| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| 11 // with the distribution. | 11 // with the distribution. |
| 12 // * Neither the name of Google Inc. nor the names of its | 12 // * Neither the name of Google Inc. nor the names of its |
| 13 // contributors may be used to endorse or promote products derived | 13 // contributors may be used to endorse or promote products derived |
| 14 // from this software without specific prior written permission. | 14 // from this software without specific prior written permission. |
| 15 // | 15 // |
| 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 27 | 27 |
| 28 #include "v8.h" | 28 #include "v8.h" |
| 29 | 29 |
| 30 #if defined(V8_TARGET_ARCH_ARM) | 30 #if defined(V8_TARGET_ARCH_A64) |
| 31 | 31 |
| 32 #include "ic-inl.h" | 32 #include "ic-inl.h" |
| 33 #include "codegen.h" | 33 #include "codegen.h" |
| 34 #include "stub-cache.h" | 34 #include "stub-cache.h" |
| 35 | 35 |
| 36 namespace v8 { | 36 namespace v8 { |
| 37 namespace internal { | 37 namespace internal { |
| 38 | 38 |
| 39 |
| 39 #define __ ACCESS_MASM(masm) | 40 #define __ ACCESS_MASM(masm) |
| 40 | 41 |
| 41 | 42 |
| 42 static void ProbeTable(Isolate* isolate, | |
| 43 MacroAssembler* masm, | |
| 44 Code::Flags flags, | |
| 45 StubCache::Table table, | |
| 46 Register receiver, | |
| 47 Register name, | |
| 48 // Number of the cache entry, not scaled. | |
| 49 Register offset, | |
| 50 Register scratch, | |
| 51 Register scratch2, | |
| 52 Register offset_scratch) { | |
| 53 ExternalReference key_offset(isolate->stub_cache()->key_reference(table)); | |
| 54 ExternalReference value_offset(isolate->stub_cache()->value_reference(table)); | |
| 55 ExternalReference map_offset(isolate->stub_cache()->map_reference(table)); | |
| 56 | |
| 57 uint32_t key_off_addr = reinterpret_cast<uint32_t>(key_offset.address()); | |
| 58 uint32_t value_off_addr = reinterpret_cast<uint32_t>(value_offset.address()); | |
| 59 uint32_t map_off_addr = reinterpret_cast<uint32_t>(map_offset.address()); | |
| 60 | |
| 61 // Check the relative positions of the address fields. | |
| 62 ASSERT(value_off_addr > key_off_addr); | |
| 63 ASSERT((value_off_addr - key_off_addr) % 4 == 0); | |
| 64 ASSERT((value_off_addr - key_off_addr) < (256 * 4)); | |
| 65 ASSERT(map_off_addr > key_off_addr); | |
| 66 ASSERT((map_off_addr - key_off_addr) % 4 == 0); | |
| 67 ASSERT((map_off_addr - key_off_addr) < (256 * 4)); | |
| 68 | |
| 69 Label miss; | |
| 70 Register base_addr = scratch; | |
| 71 scratch = no_reg; | |
| 72 | |
| 73 // Multiply by 3 because there are 3 fields per entry (name, code, map). | |
| 74 __ add(offset_scratch, offset, Operand(offset, LSL, 1)); | |
| 75 | |
| 76 // Calculate the base address of the entry. | |
| 77 __ mov(base_addr, Operand(key_offset)); | |
| 78 __ add(base_addr, base_addr, Operand(offset_scratch, LSL, kPointerSizeLog2)); | |
| 79 | |
| 80 // Check that the key in the entry matches the name. | |
| 81 __ ldr(ip, MemOperand(base_addr, 0)); | |
| 82 __ cmp(name, ip); | |
| 83 __ b(ne, &miss); | |
| 84 | |
| 85 // Check the map matches. | |
| 86 __ ldr(ip, MemOperand(base_addr, map_off_addr - key_off_addr)); | |
| 87 __ ldr(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset)); | |
| 88 __ cmp(ip, scratch2); | |
| 89 __ b(ne, &miss); | |
| 90 | |
| 91 // Get the code entry from the cache. | |
| 92 Register code = scratch2; | |
| 93 scratch2 = no_reg; | |
| 94 __ ldr(code, MemOperand(base_addr, value_off_addr - key_off_addr)); | |
| 95 | |
| 96 // Check that the flags match what we're looking for. | |
| 97 Register flags_reg = base_addr; | |
| 98 base_addr = no_reg; | |
| 99 __ ldr(flags_reg, FieldMemOperand(code, Code::kFlagsOffset)); | |
| 100 // It's a nice optimization if this constant is encodable in the bic insn. | |
| 101 | |
| 102 uint32_t mask = Code::kFlagsNotUsedInLookup; | |
| 103 ASSERT(__ ImmediateFitsAddrMode1Instruction(mask)); | |
| 104 __ bic(flags_reg, flags_reg, Operand(mask)); | |
| 105 __ cmp(flags_reg, Operand(flags)); | |
| 106 __ b(ne, &miss); | |
| 107 | |
| 108 #ifdef DEBUG | |
| 109 if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) { | |
| 110 __ jmp(&miss); | |
| 111 } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) { | |
| 112 __ jmp(&miss); | |
| 113 } | |
| 114 #endif | |
| 115 | |
| 116 // Jump to the first instruction in the code stub. | |
| 117 __ add(pc, code, Operand(Code::kHeaderSize - kHeapObjectTag)); | |
| 118 | |
| 119 // Miss: fall through. | |
| 120 __ bind(&miss); | |
| 121 } | |
| 122 | |
| 123 | |
| 124 // Helper function used to check that the dictionary doesn't contain | 43 // Helper function used to check that the dictionary doesn't contain |
| 125 // the property. This function may return false negatives, so miss_label | 44 // the property. This function may return false negatives, so miss_label |
| 126 // must always call a backup property check that is complete. | 45 // must always call a backup property check that is complete. |
| 127 // This function is safe to call if the receiver has fast properties. | 46 // This function is safe to call if the receiver has fast properties. |
| 128 // Name must be unique and receiver must be a heap object. | 47 // Name must be unique and receiver must be a heap object. |
| 129 static void GenerateDictionaryNegativeLookup(MacroAssembler* masm, | 48 static void GenerateDictionaryNegativeLookup(MacroAssembler* masm, |
| 130 Label* miss_label, | 49 Label* miss_label, |
| 131 Register receiver, | 50 Register receiver, |
| 132 Handle<Name> name, | 51 Handle<Name> name, |
| 133 Register scratch0, | 52 Register scratch0, |
| 134 Register scratch1) { | 53 Register scratch1) { |
| 54 ASSERT(!AreAliased(scratch0, scratch1)); |
| 135 ASSERT(name->IsUniqueName()); | 55 ASSERT(name->IsUniqueName()); |
| 136 Counters* counters = masm->isolate()->counters(); | 56 Counters* counters = masm->isolate()->counters(); |
| 137 __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1); | 57 __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1); |
| 138 __ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1); | 58 __ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1); |
| 139 | 59 |
| 140 Label done; | 60 Label done; |
| 141 | 61 |
| 142 const int kInterceptorOrAccessCheckNeededMask = | 62 const int kInterceptorOrAccessCheckNeededMask = |
| 143 (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded); | 63 (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded); |
| 144 | 64 |
| 145 // Bail out if the receiver has a named interceptor or requires access checks. | 65 // Bail out if the receiver has a named interceptor or requires access checks. |
| 146 Register map = scratch1; | 66 Register map = scratch1; |
| 147 __ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset)); | 67 __ Ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
| 148 __ ldrb(scratch0, FieldMemOperand(map, Map::kBitFieldOffset)); | 68 __ Ldrb(scratch0, FieldMemOperand(map, Map::kBitFieldOffset)); |
| 149 __ tst(scratch0, Operand(kInterceptorOrAccessCheckNeededMask)); | 69 __ Tst(scratch0, kInterceptorOrAccessCheckNeededMask); |
| 150 __ b(ne, miss_label); | 70 __ B(ne, miss_label); |
| 151 | 71 |
| 152 // Check that receiver is a JSObject. | 72 // Check that receiver is a JSObject. |
| 153 __ ldrb(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset)); | 73 __ Ldrb(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset)); |
| 154 __ cmp(scratch0, Operand(FIRST_SPEC_OBJECT_TYPE)); | 74 __ Cmp(scratch0, FIRST_SPEC_OBJECT_TYPE); |
| 155 __ b(lt, miss_label); | 75 __ B(lt, miss_label); |
| 156 | 76 |
| 157 // Load properties array. | 77 // Load properties array. |
| 158 Register properties = scratch0; | 78 Register properties = scratch0; |
| 159 __ ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); | 79 __ Ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); |
| 160 // Check that the properties array is a dictionary. | 80 // Check that the properties array is a dictionary. |
| 161 __ ldr(map, FieldMemOperand(properties, HeapObject::kMapOffset)); | 81 __ Ldr(map, FieldMemOperand(properties, HeapObject::kMapOffset)); |
| 162 Register tmp = properties; | 82 __ JumpIfNotRoot(map, Heap::kHashTableMapRootIndex, miss_label); |
| 163 __ LoadRoot(tmp, Heap::kHashTableMapRootIndex); | |
| 164 __ cmp(map, tmp); | |
| 165 __ b(ne, miss_label); | |
| 166 | |
| 167 // Restore the temporarily used register. | |
| 168 __ ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); | |
| 169 | |
| 170 | 83 |
| 171 NameDictionaryLookupStub::GenerateNegativeLookup(masm, | 84 NameDictionaryLookupStub::GenerateNegativeLookup(masm, |
| 172 miss_label, | 85 miss_label, |
| 173 &done, | 86 &done, |
| 174 receiver, | 87 receiver, |
| 175 properties, | 88 properties, |
| 176 name, | 89 name, |
| 177 scratch1); | 90 scratch1); |
| 178 __ bind(&done); | 91 __ Bind(&done); |
| 179 __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1); | 92 __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1); |
| 180 } | 93 } |
| 181 | 94 |
| 182 | 95 |
| 96 // Probe primary or secondary table. |
| 97 // If the entry is found in the cache, the generated code jump to the first |
| 98 // instruction of the stub in the cache. |
| 99 // If there is a miss the code fall trough. |
| 100 // |
| 101 // 'receiver', 'name' and 'offset' registers are preserved on miss. |
| 102 static void ProbeTable(Isolate* isolate, |
| 103 MacroAssembler* masm, |
| 104 Code::Flags flags, |
| 105 StubCache::Table table, |
| 106 Register receiver, |
| 107 Register name, |
| 108 Register offset, |
| 109 Register scratch, |
| 110 Register scratch2, |
| 111 Register scratch3) { |
| 112 // Some code below relies on the fact that the Entry struct contains |
| 113 // 3 pointers (name, code, map). |
| 114 STATIC_ASSERT(sizeof(StubCache::Entry) == (3 * kPointerSize)); |
| 115 |
| 116 ExternalReference key_offset(isolate->stub_cache()->key_reference(table)); |
| 117 ExternalReference value_offset(isolate->stub_cache()->value_reference(table)); |
| 118 ExternalReference map_offset(isolate->stub_cache()->map_reference(table)); |
| 119 |
| 120 uintptr_t key_off_addr = reinterpret_cast<uintptr_t>(key_offset.address()); |
| 121 uintptr_t value_off_addr = |
| 122 reinterpret_cast<uintptr_t>(value_offset.address()); |
| 123 uintptr_t map_off_addr = reinterpret_cast<uintptr_t>(map_offset.address()); |
| 124 |
| 125 Label miss; |
| 126 |
| 127 ASSERT(!AreAliased(name, offset, scratch, scratch2, scratch3)); |
| 128 |
| 129 // Multiply by 3 because there are 3 fields per entry. |
| 130 __ Add(scratch3, offset, Operand(offset, LSL, 1)); |
| 131 |
| 132 // Calculate the base address of the entry. |
| 133 __ Mov(scratch, Operand(key_offset)); |
| 134 __ Add(scratch, scratch, Operand(scratch3, LSL, kPointerSizeLog2)); |
| 135 |
| 136 // Check that the key in the entry matches the name. |
| 137 __ Ldr(scratch2, MemOperand(scratch)); |
| 138 __ Cmp(name, scratch2); |
| 139 __ B(ne, &miss); |
| 140 |
| 141 // Check the map matches. |
| 142 __ Ldr(scratch2, MemOperand(scratch, map_off_addr - key_off_addr)); |
| 143 __ Ldr(scratch3, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
| 144 __ Cmp(scratch2, scratch3); |
| 145 __ B(ne, &miss); |
| 146 |
| 147 // Get the code entry from the cache. |
| 148 __ Ldr(scratch, MemOperand(scratch, value_off_addr - key_off_addr)); |
| 149 |
| 150 // Check that the flags match what we're looking for. |
| 151 __ Ldr(scratch2.W(), FieldMemOperand(scratch, Code::kFlagsOffset)); |
| 152 __ Bic(scratch2.W(), scratch2.W(), Code::kFlagsNotUsedInLookup); |
| 153 __ Cmp(scratch2.W(), flags); |
| 154 __ B(ne, &miss); |
| 155 |
| 156 #ifdef DEBUG |
| 157 if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) { |
| 158 __ B(&miss); |
| 159 } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) { |
| 160 __ B(&miss); |
| 161 } |
| 162 #endif |
| 163 |
| 164 // Jump to the first instruction in the code stub. |
| 165 __ Add(scratch, scratch, Code::kHeaderSize - kHeapObjectTag); |
| 166 __ Br(scratch); |
| 167 |
| 168 // Miss: fall through. |
| 169 __ Bind(&miss); |
| 170 } |
| 171 |
| 172 |
| 173 // Check if key is a smi or can be converted into a smi. |
| 174 // If not jump on 'fail' and fall-through otherwise. |
| 175 static void GenerateSmiKeyCheck(MacroAssembler* masm, |
| 176 Register key, |
| 177 Register scratch0, |
| 178 FPRegister double_scratch0, |
| 179 FPRegister double_scratch1, |
| 180 Label* fail) { |
| 181 Label key_ok; |
| 182 __ JumpIfSmi(key, &key_ok); |
| 183 |
| 184 // The key is not a smi. Check for a smi inside a heap number. |
| 185 __ CheckMap(key, |
| 186 scratch0, |
| 187 masm->isolate()->factory()->heap_number_map(), |
| 188 fail, |
| 189 DONT_DO_SMI_CHECK); |
| 190 |
| 191 __ Ldr(scratch0, FieldMemOperand(key, HeapNumber::kValueOffset)); |
| 192 __ Fmov(double_scratch0, scratch0); |
| 193 __ TryConvertDoubleToInt32(scratch0.W(), |
| 194 double_scratch0, |
| 195 double_scratch1, |
| 196 NULL, |
| 197 fail); |
| 198 // The double value has been coverted to a 32-bit signed integer. |
| 199 // We just need to tag it. |
| 200 __ SmiTag(key, scratch0); |
| 201 |
| 202 __ Bind(&key_ok); |
| 203 } |
| 204 |
| 205 |
| 183 void StubCache::GenerateProbe(MacroAssembler* masm, | 206 void StubCache::GenerateProbe(MacroAssembler* masm, |
| 184 Code::Flags flags, | 207 Code::Flags flags, |
| 185 Register receiver, | 208 Register receiver, |
| 186 Register name, | 209 Register name, |
| 187 Register scratch, | 210 Register scratch, |
| 188 Register extra, | 211 Register extra, |
| 189 Register extra2, | 212 Register extra2, |
| 190 Register extra3) { | 213 Register extra3) { |
| 191 Isolate* isolate = masm->isolate(); | 214 Isolate* isolate = masm->isolate(); |
| 192 Label miss; | 215 Label miss; |
| 193 | 216 |
| 194 // Make sure that code is valid. The multiplying code relies on the | |
| 195 // entry size being 12. | |
| 196 ASSERT(sizeof(Entry) == 12); | |
| 197 | |
| 198 // Make sure the flags does not name a specific type. | 217 // Make sure the flags does not name a specific type. |
| 199 ASSERT(Code::ExtractTypeFromFlags(flags) == 0); | 218 ASSERT(Code::ExtractTypeFromFlags(flags) == 0); |
| 200 | 219 |
| 201 // Make sure that there are no register conflicts. | 220 // Make sure that there are no register conflicts. |
| 202 ASSERT(!scratch.is(receiver)); | 221 ASSERT(!AreAliased(receiver, name, scratch, extra, extra2, extra3)); |
| 203 ASSERT(!scratch.is(name)); | |
| 204 ASSERT(!extra.is(receiver)); | |
| 205 ASSERT(!extra.is(name)); | |
| 206 ASSERT(!extra.is(scratch)); | |
| 207 ASSERT(!extra2.is(receiver)); | |
| 208 ASSERT(!extra2.is(name)); | |
| 209 ASSERT(!extra2.is(scratch)); | |
| 210 ASSERT(!extra2.is(extra)); | |
| 211 | 222 |
| 212 // Check scratch, extra and extra2 registers are valid. | 223 // Make sure extra and extra2 registers are valid. |
| 213 ASSERT(!scratch.is(no_reg)); | |
| 214 ASSERT(!extra.is(no_reg)); | 224 ASSERT(!extra.is(no_reg)); |
| 215 ASSERT(!extra2.is(no_reg)); | 225 ASSERT(!extra2.is(no_reg)); |
| 216 ASSERT(!extra3.is(no_reg)); | 226 ASSERT(!extra3.is(no_reg)); |
| 217 | 227 |
| 218 Counters* counters = masm->isolate()->counters(); | 228 Counters* counters = masm->isolate()->counters(); |
| 219 __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, | 229 __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, |
| 220 extra2, extra3); | 230 extra2, extra3); |
| 221 | 231 |
| 222 // Check that the receiver isn't a smi. | 232 // Check that the receiver isn't a smi. |
| 223 __ JumpIfSmi(receiver, &miss); | 233 __ JumpIfSmi(receiver, &miss); |
| 224 | 234 |
| 225 // Get the map of the receiver and compute the hash. | 235 // Compute the hash for primary table. |
| 226 __ ldr(scratch, FieldMemOperand(name, Name::kHashFieldOffset)); | 236 __ Ldr(scratch, FieldMemOperand(name, Name::kHashFieldOffset)); |
| 227 __ ldr(ip, FieldMemOperand(receiver, HeapObject::kMapOffset)); | 237 __ Ldr(extra, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
| 228 __ add(scratch, scratch, Operand(ip)); | 238 __ Add(scratch, scratch, extra); |
| 229 uint32_t mask = kPrimaryTableSize - 1; | 239 __ Eor(scratch, scratch, flags); |
| 230 // We shift out the last two bits because they are not part of the hash and | 240 // We shift out the last two bits because they are not part of the hash. |
| 231 // they are always 01 for maps. | 241 __ Ubfx(scratch, scratch, kHeapObjectTagSize, |
| 232 __ mov(scratch, Operand(scratch, LSR, kHeapObjectTagSize)); | 242 CountTrailingZeros(kPrimaryTableSize, 64)); |
| 233 // Mask down the eor argument to the minimum to keep the immediate | |
| 234 // ARM-encodable. | |
| 235 __ eor(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask)); | |
| 236 // Prefer and_ to ubfx here because ubfx takes 2 cycles. | |
| 237 __ and_(scratch, scratch, Operand(mask)); | |
| 238 | 243 |
| 239 // Probe the primary table. | 244 // Probe the primary table. |
| 240 ProbeTable(isolate, | 245 ProbeTable(isolate, masm, flags, kPrimary, receiver, name, |
| 241 masm, | 246 scratch, extra, extra2, extra3); |
| 242 flags, | |
| 243 kPrimary, | |
| 244 receiver, | |
| 245 name, | |
| 246 scratch, | |
| 247 extra, | |
| 248 extra2, | |
| 249 extra3); | |
| 250 | 247 |
| 251 // Primary miss: Compute hash for secondary probe. | 248 // Primary miss: Compute hash for secondary table. |
| 252 __ sub(scratch, scratch, Operand(name, LSR, kHeapObjectTagSize)); | 249 __ Sub(scratch, scratch, Operand(name, LSR, kHeapObjectTagSize)); |
| 253 uint32_t mask2 = kSecondaryTableSize - 1; | 250 __ Add(scratch, scratch, flags >> kHeapObjectTagSize); |
| 254 __ add(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask2)); | 251 __ And(scratch, scratch, kSecondaryTableSize - 1); |
| 255 __ and_(scratch, scratch, Operand(mask2)); | |
| 256 | 252 |
| 257 // Probe the secondary table. | 253 // Probe the secondary table. |
| 258 ProbeTable(isolate, | 254 ProbeTable(isolate, masm, flags, kSecondary, receiver, name, |
| 259 masm, | 255 scratch, extra, extra2, extra3); |
| 260 flags, | |
| 261 kSecondary, | |
| 262 receiver, | |
| 263 name, | |
| 264 scratch, | |
| 265 extra, | |
| 266 extra2, | |
| 267 extra3); | |
| 268 | 256 |
| 269 // Cache miss: Fall-through and let caller handle the miss by | 257 // Cache miss: Fall-through and let caller handle the miss by |
| 270 // entering the runtime system. | 258 // entering the runtime system. |
| 271 __ bind(&miss); | 259 __ Bind(&miss); |
| 272 __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1, | 260 __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1, |
| 273 extra2, extra3); | 261 extra2, extra3); |
| 274 } | 262 } |
| 275 | 263 |
| 276 | 264 |
| 277 void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm, | 265 void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm, |
| 278 int index, | 266 int index, |
| 279 Register prototype) { | 267 Register prototype) { |
| 280 // Load the global or builtins object from the current context. | 268 // Load the global or builtins object from the current context. |
| 281 __ ldr(prototype, | 269 __ Ldr(prototype, GlobalObjectMemOperand()); |
| 282 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); | |
| 283 // Load the native context from the global or builtins object. | 270 // Load the native context from the global or builtins object. |
| 284 __ ldr(prototype, | 271 __ Ldr(prototype, |
| 285 FieldMemOperand(prototype, GlobalObject::kNativeContextOffset)); | 272 FieldMemOperand(prototype, GlobalObject::kNativeContextOffset)); |
| 286 // Load the function from the native context. | 273 // Load the function from the native context. |
| 287 __ ldr(prototype, MemOperand(prototype, Context::SlotOffset(index))); | 274 __ Ldr(prototype, ContextMemOperand(prototype, index)); |
| 288 // Load the initial map. The global functions all have initial maps. | 275 // Load the initial map. The global functions all have initial maps. |
| 289 __ ldr(prototype, | 276 __ Ldr(prototype, |
| 290 FieldMemOperand(prototype, JSFunction::kPrototypeOrInitialMapOffset)); | 277 FieldMemOperand(prototype, JSFunction::kPrototypeOrInitialMapOffset)); |
| 291 // Load the prototype from the initial map. | 278 // Load the prototype from the initial map. |
| 292 __ ldr(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset)); | 279 __ Ldr(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset)); |
| 293 } | 280 } |
| 294 | 281 |
| 295 | 282 |
| 296 void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype( | 283 void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype( |
| 297 MacroAssembler* masm, | 284 MacroAssembler* masm, |
| 298 int index, | 285 int index, |
| 299 Register prototype, | 286 Register prototype, |
| 300 Label* miss) { | 287 Label* miss) { |
| 301 Isolate* isolate = masm->isolate(); | 288 Isolate* isolate = masm->isolate(); |
| 302 // Check we're still in the same context. | 289 // Check we're still in the same context. |
| 303 __ ldr(prototype, | 290 __ Ldr(prototype, GlobalObjectMemOperand()); |
| 304 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); | 291 __ Cmp(prototype, Operand(isolate->global_object())); |
| 305 __ Move(ip, isolate->global_object()); | 292 __ B(ne, miss); |
| 306 __ cmp(prototype, ip); | |
| 307 __ b(ne, miss); | |
| 308 // Get the global function with the given index. | 293 // Get the global function with the given index. |
| 309 Handle<JSFunction> function( | 294 Handle<JSFunction> function( |
| 310 JSFunction::cast(isolate->native_context()->get(index))); | 295 JSFunction::cast(isolate->native_context()->get(index))); |
| 311 // Load its initial map. The global functions all have initial maps. | 296 // Load its initial map. The global functions all have initial maps. |
| 312 __ Move(prototype, Handle<Map>(function->initial_map())); | 297 __ Mov(prototype, Operand(Handle<Map>(function->initial_map()))); |
| 313 // Load the prototype from the initial map. | 298 // Load the prototype from the initial map. |
| 314 __ ldr(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset)); | 299 __ Ldr(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset)); |
| 315 } | 300 } |
| 316 | 301 |
| 317 | 302 |
| 318 void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm, | 303 void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm, |
| 319 Register dst, | 304 Register dst, |
| 320 Register src, | 305 Register src, |
| 321 bool inobject, | 306 bool inobject, |
| 322 int index, | 307 int index, |
| 323 Representation representation) { | 308 Representation representation) { |
| 324 ASSERT(!FLAG_track_double_fields || !representation.IsDouble()); | 309 ASSERT(!FLAG_track_double_fields || !representation.IsDouble()); |
| 325 int offset = index * kPointerSize; | 310 USE(representation); |
| 326 if (!inobject) { | 311 if (inobject) { |
| 312 int offset = index * kPointerSize; |
| 313 __ Ldr(dst, FieldMemOperand(src, offset)); |
| 314 } else { |
| 327 // Calculate the offset into the properties array. | 315 // Calculate the offset into the properties array. |
| 328 offset = offset + FixedArray::kHeaderSize; | 316 int offset = index * kPointerSize + FixedArray::kHeaderSize; |
| 329 __ ldr(dst, FieldMemOperand(src, JSObject::kPropertiesOffset)); | 317 __ Ldr(dst, FieldMemOperand(src, JSObject::kPropertiesOffset)); |
| 330 src = dst; | 318 __ Ldr(dst, FieldMemOperand(dst, offset)); |
| 331 } | 319 } |
| 332 __ ldr(dst, FieldMemOperand(src, offset)); | |
| 333 } | 320 } |
| 334 | 321 |
| 335 | 322 |
| 336 void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm, | 323 void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm, |
| 337 Register receiver, | 324 Register receiver, |
| 338 Register scratch, | 325 Register scratch, |
| 339 Label* miss_label) { | 326 Label* miss_label) { |
| 327 ASSERT(!AreAliased(receiver, scratch)); |
| 328 |
| 340 // Check that the receiver isn't a smi. | 329 // Check that the receiver isn't a smi. |
| 341 __ JumpIfSmi(receiver, miss_label); | 330 __ JumpIfSmi(receiver, miss_label); |
| 342 | 331 |
| 343 // Check that the object is a JS array. | 332 // Check that the object is a JS array. |
| 344 __ CompareObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE); | 333 __ JumpIfNotObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE, |
| 345 __ b(ne, miss_label); | 334 miss_label); |
| 346 | 335 |
| 347 // Load length directly from the JS array. | 336 // Load length directly from the JS array. |
| 348 __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset)); | 337 __ Ldr(x0, FieldMemOperand(receiver, JSArray::kLengthOffset)); |
| 349 __ Ret(); | 338 __ Ret(); |
| 350 } | 339 } |
| 351 | 340 |
| 352 | 341 |
| 353 // Generate code to check if an object is a string. If the object is a | 342 // Generate code to check if an object is a string. If the object is a |
| 354 // heap object, its map's instance type is left in the scratch1 register. | 343 // heap object, its map's instance type is left in the scratch1 register. |
| 355 // If this is not needed, scratch1 and scratch2 may be the same register. | |
| 356 static void GenerateStringCheck(MacroAssembler* masm, | 344 static void GenerateStringCheck(MacroAssembler* masm, |
| 357 Register receiver, | 345 Register receiver, |
| 358 Register scratch1, | 346 Register scratch1, |
| 359 Register scratch2, | |
| 360 Label* smi, | 347 Label* smi, |
| 361 Label* non_string_object) { | 348 Label* non_string_object) { |
| 362 // Check that the receiver isn't a smi. | 349 // Check that the receiver isn't a smi. |
| 363 __ JumpIfSmi(receiver, smi); | 350 __ JumpIfSmi(receiver, smi); |
| 364 | 351 |
| 365 // Check that the object is a string. | 352 // Get the object's instance type filed. |
| 366 __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset)); | 353 __ Ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
| 367 __ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset)); | 354 __ Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset)); |
| 368 __ and_(scratch2, scratch1, Operand(kIsNotStringMask)); | 355 // Check if the "not string" bit is set. |
| 369 // The cast is to resolve the overload for the argument of 0x0. | 356 __ Tbnz(scratch1, MaskToBit(kNotStringTag), non_string_object); |
| 370 __ cmp(scratch2, Operand(static_cast<int32_t>(kStringTag))); | |
| 371 __ b(ne, non_string_object); | |
| 372 } | 357 } |
| 373 | 358 |
| 374 | 359 |
| 375 // Generate code to load the length from a string object and return the length. | 360 // Generate code to load the length from a string object and return the length. |
| 376 // If the receiver object is not a string or a wrapped string object the | 361 // If the receiver object is not a string or a wrapped string object the |
| 377 // execution continues at the miss label. The register containing the | 362 // execution continues at the miss label. The register containing the |
| 378 // receiver is potentially clobbered. | 363 // receiver is not clobbered if the receiver is not a string. |
| 379 void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm, | 364 void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm, |
| 380 Register receiver, | 365 Register receiver, |
| 381 Register scratch1, | 366 Register scratch1, |
| 382 Register scratch2, | 367 Register scratch2, |
| 383 Label* miss, | 368 Label* miss, |
| 384 bool support_wrappers) { | 369 bool support_wrappers) { |
| 370 // Input registers can't alias because we don't want to clobber the |
| 371 // receiver register if the object is not a string. |
| 372 ASSERT(!AreAliased(receiver, scratch1, scratch2)); |
| 373 |
| 385 Label check_wrapper; | 374 Label check_wrapper; |
| 386 | 375 |
| 387 // Check if the object is a string leaving the instance type in the | 376 // Check if the object is a string leaving the instance type in the |
| 388 // scratch1 register. | 377 // scratch1 register. |
| 389 GenerateStringCheck(masm, receiver, scratch1, scratch2, miss, | 378 GenerateStringCheck(masm, receiver, scratch1, miss, |
| 390 support_wrappers ? &check_wrapper : miss); | 379 support_wrappers ? &check_wrapper : miss); |
| 391 | 380 |
| 392 // Load length directly from the string. | 381 // Load length directly from the string. |
| 393 __ ldr(r0, FieldMemOperand(receiver, String::kLengthOffset)); | 382 __ Ldr(x0, FieldMemOperand(receiver, String::kLengthOffset)); |
| 394 __ Ret(); | 383 __ Ret(); |
| 395 | 384 |
| 396 if (support_wrappers) { | 385 if (support_wrappers) { |
| 397 // Check if the object is a JSValue wrapper. | 386 // Check if the object is a JSValue wrapper. |
| 398 __ bind(&check_wrapper); | 387 __ Bind(&check_wrapper); |
| 399 __ cmp(scratch1, Operand(JS_VALUE_TYPE)); | 388 __ Cmp(scratch1, Operand(JS_VALUE_TYPE)); |
| 400 __ b(ne, miss); | 389 __ B(ne, miss); |
| 401 | 390 |
| 402 // Unwrap the value and check if the wrapped value is a string. | 391 // Unwrap the value and check if the wrapped value is a string. |
| 403 __ ldr(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset)); | 392 __ Ldr(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset)); |
| 404 GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss); | 393 GenerateStringCheck(masm, scratch1, scratch2, miss, miss); |
| 405 __ ldr(r0, FieldMemOperand(scratch1, String::kLengthOffset)); | 394 __ Ldr(x0, FieldMemOperand(scratch1, String::kLengthOffset)); |
| 406 __ Ret(); | 395 __ Ret(); |
| 407 } | 396 } |
| 408 } | 397 } |
| 409 | 398 |
| 410 | 399 |
| 411 void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm, | 400 void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm, |
| 412 Register receiver, | 401 Register receiver, |
| 413 Register scratch1, | 402 Register scratch1, |
| 414 Register scratch2, | 403 Register scratch2, |
| 415 Label* miss_label) { | 404 Label* miss_label) { |
| 416 __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label); | 405 __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label); |
| 417 __ mov(r0, scratch1); | 406 // TryGetFunctionPrototype can't put the result directly in x0 because the |
| 407 // 3 inputs registers can't alias and we call this function from |
| 408 // LoadIC::GenerateFunctionPrototype, where receiver is x0. So we explicitly |
| 409 // move the result in x0. |
| 410 __ Mov(x0, scratch1); |
| 418 __ Ret(); | 411 __ Ret(); |
| 419 } | 412 } |
| 420 | 413 |
| 421 | 414 |
| 422 // Generate code to check that a global property cell is empty. Create | 415 // Generate code to check that a global property cell is empty. Create |
| 423 // the property cell at compilation time if no cell exists for the | 416 // the property cell at compilation time if no cell exists for the |
| 424 // property. | 417 // property. |
| 425 static void GenerateCheckPropertyCell(MacroAssembler* masm, | 418 static void GenerateCheckPropertyCell(MacroAssembler* masm, |
| 426 Handle<GlobalObject> global, | 419 Handle<GlobalObject> global, |
| 427 Handle<Name> name, | 420 Handle<Name> name, |
| 428 Register scratch, | 421 Register scratch, |
| 422 Register the_hole, |
| 429 Label* miss) { | 423 Label* miss) { |
| 430 Handle<JSGlobalPropertyCell> cell = | 424 Handle<JSGlobalPropertyCell> cell = |
| 431 GlobalObject::EnsurePropertyCell(global, name); | 425 GlobalObject::EnsurePropertyCell(global, name); |
| 432 ASSERT(cell->value()->IsTheHole()); | 426 ASSERT(cell->value()->IsTheHole()); |
| 433 __ mov(scratch, Operand(cell)); | 427 __ Mov(scratch, Operand(cell)); |
| 434 __ ldr(scratch, | 428 __ Ldr(scratch, |
| 435 FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset)); | 429 FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset)); |
| 436 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); | 430 __ Cmp(scratch, the_hole); |
| 437 __ cmp(scratch, ip); | 431 __ B(ne, miss); |
| 438 __ b(ne, miss); | |
| 439 } | 432 } |
| 440 | 433 |
| 441 | 434 |
| 442 // Generate StoreTransition code, value is passed in r0 register. | 435 // Generate StoreTransition code, value is passed in x0 register. |
| 443 // When leaving generated code after success, the receiver_reg and name_reg | 436 // When leaving generated code after success, the receiver_reg and name_reg may |
| 444 // may be clobbered. Upon branch to miss_label, the receiver and name | 437 // be clobbered. Upon branch to miss_label, the receiver and name registers have |
| 445 // registers have their original values. | 438 // their original values. |
| 446 void StubCompiler::GenerateStoreTransition(MacroAssembler* masm, | 439 void StubCompiler::GenerateStoreTransition(MacroAssembler* masm, |
| 447 Handle<JSObject> object, | 440 Handle<JSObject> object, |
| 448 LookupResult* lookup, | 441 LookupResult* lookup, |
| 449 Handle<Map> transition, | 442 Handle<Map> transition, |
| 450 Handle<Name> name, | 443 Handle<Name> name, |
| 451 Register receiver_reg, | 444 Register receiver_reg, |
| 452 Register name_reg, | 445 Register name_reg, |
| 453 Register value_reg, | 446 Register value_reg, |
| 454 Register scratch1, | 447 Register scratch1, |
| 455 Register scratch2, | 448 Register scratch2, |
| 456 Register scratch3, | 449 Register scratch3, |
| 457 Label* miss_label, | 450 Label* miss_label, |
| 458 Label* miss_restore_name, | 451 Label* miss_restore_name, |
| 459 Label* slow) { | 452 Label* slow) { |
| 460 // r0 : value | |
| 461 Label exit; | 453 Label exit; |
| 462 | 454 |
| 455 ASSERT(!AreAliased(receiver_reg, name_reg, value_reg, |
| 456 scratch1, scratch2, scratch3)); |
| 457 |
| 458 // We don't need scratch3. |
| 459 scratch3 = NoReg; |
| 460 |
| 463 // Check that the map of the object hasn't changed. | 461 // Check that the map of the object hasn't changed. |
| 464 __ CheckMap(receiver_reg, scratch1, Handle<Map>(object->map()), miss_label, | 462 __ CheckMap(receiver_reg, scratch1, Handle<Map>(object->map()), miss_label, |
| 465 DO_SMI_CHECK); | 463 DO_SMI_CHECK); |
| 466 | 464 |
| 467 // Perform global security token check if needed. | 465 // Perform global security token check if needed. |
| 468 if (object->IsJSGlobalProxy()) { | 466 if (object->IsJSGlobalProxy()) { |
| 469 __ CheckAccessGlobalProxy(receiver_reg, scratch1, miss_label); | 467 __ CheckAccessGlobalProxy(receiver_reg, scratch1, miss_label); |
| 470 } | 468 } |
| 471 | 469 |
| 472 int descriptor = transition->LastAdded(); | 470 int descriptor = transition->LastAdded(); |
| (...skipping 19 matching lines...) Expand all Loading... |
| 492 } while (holder->GetPrototype()->IsJSObject()); | 490 } while (holder->GetPrototype()->IsJSObject()); |
| 493 } | 491 } |
| 494 Register holder_reg = CheckPrototypes( | 492 Register holder_reg = CheckPrototypes( |
| 495 object, receiver_reg, Handle<JSObject>(holder), name_reg, | 493 object, receiver_reg, Handle<JSObject>(holder), name_reg, |
| 496 scratch1, scratch2, name, miss_restore_name, SKIP_RECEIVER); | 494 scratch1, scratch2, name, miss_restore_name, SKIP_RECEIVER); |
| 497 // If no property was found, and the holder (the last object in the | 495 // If no property was found, and the holder (the last object in the |
| 498 // prototype chain) is in slow mode, we need to do a negative lookup on the | 496 // prototype chain) is in slow mode, we need to do a negative lookup on the |
| 499 // holder. | 497 // holder. |
| 500 if (lookup->holder() == *object) { | 498 if (lookup->holder() == *object) { |
| 501 if (holder->IsJSGlobalObject()) { | 499 if (holder->IsJSGlobalObject()) { |
| 500 __ LoadRoot(scratch2, Heap::kTheHoleValueRootIndex); |
| 502 GenerateCheckPropertyCell( | 501 GenerateCheckPropertyCell( |
| 503 masm, | 502 masm, |
| 504 Handle<GlobalObject>(GlobalObject::cast(holder)), | 503 Handle<GlobalObject>(GlobalObject::cast(holder)), |
| 505 name, | 504 name, |
| 506 scratch1, | 505 scratch1, |
| 506 scratch2, // The hole. |
| 507 miss_restore_name); | 507 miss_restore_name); |
| 508 } else if (!holder->HasFastProperties() && !holder->IsJSGlobalProxy()) { | 508 } else if (!holder->HasFastProperties() && !holder->IsJSGlobalProxy()) { |
| 509 GenerateDictionaryNegativeLookup( | 509 GenerateDictionaryNegativeLookup( |
| 510 masm, miss_restore_name, holder_reg, name, scratch1, scratch2); | 510 masm, miss_restore_name, holder_reg, name, scratch1, scratch2); |
| 511 } | 511 } |
| 512 } | 512 } |
| 513 } | 513 } |
| 514 | 514 |
| 515 // We've possibly already clobbered name_reg at this point, so use it for |
| 516 // storage_reg. |
| 515 Register storage_reg = name_reg; | 517 Register storage_reg = name_reg; |
| 516 | 518 |
| 517 if (FLAG_track_fields && representation.IsSmi()) { | 519 if (FLAG_track_fields && representation.IsSmi()) { |
| 518 __ JumpIfNotSmi(value_reg, miss_restore_name); | 520 __ JumpIfNotSmi(value_reg, miss_restore_name); |
| 519 } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) { | 521 } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) { |
| 520 __ JumpIfSmi(value_reg, miss_restore_name); | 522 __ JumpIfSmi(value_reg, miss_restore_name); |
| 521 } else if (FLAG_track_double_fields && representation.IsDouble()) { | 523 } else if (FLAG_track_double_fields && representation.IsDouble()) { |
| 522 Label do_store, heap_number; | 524 Label do_store, heap_number; |
| 523 __ LoadRoot(scratch3, Heap::kHeapNumberMapRootIndex); | 525 __ AllocateHeapNumber(storage_reg, slow, scratch1, scratch2); |
| 524 __ AllocateHeapNumber(storage_reg, scratch1, scratch2, scratch3, slow); | |
| 525 | 526 |
| 527 // TODO(jbramley): Is fp_scratch the most appropriate FP scratch register? |
| 528 // It's only used in Fcmp, but it's not really safe to use it like this. |
| 526 __ JumpIfNotSmi(value_reg, &heap_number); | 529 __ JumpIfNotSmi(value_reg, &heap_number); |
| 527 __ SmiUntag(scratch1, value_reg); | 530 __ SmiUntagToDouble(fp_scratch, value_reg); |
| 528 __ vmov(s0, scratch1); | 531 __ B(&do_store); |
| 529 __ vcvt_f64_s32(d0, s0); | |
| 530 __ jmp(&do_store); | |
| 531 | 532 |
| 532 __ bind(&heap_number); | 533 __ Bind(&heap_number); |
| 533 __ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex, | 534 __ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex, |
| 534 miss_restore_name, DONT_DO_SMI_CHECK); | 535 miss_restore_name, DONT_DO_SMI_CHECK); |
| 535 __ vldr(d0, FieldMemOperand(value_reg, HeapNumber::kValueOffset)); | 536 __ Ldr(fp_scratch, FieldMemOperand(value_reg, HeapNumber::kValueOffset)); |
| 536 | 537 |
| 537 __ bind(&do_store); | 538 __ Bind(&do_store); |
| 538 __ vstr(d0, FieldMemOperand(storage_reg, HeapNumber::kValueOffset)); | 539 __ Str(fp_scratch, FieldMemOperand(storage_reg, HeapNumber::kValueOffset)); |
| 539 } | 540 } |
| 540 | 541 |
| 541 // Stub never generated for non-global objects that require access | 542 // Stub never generated for non-global objects that require access checks. |
| 542 // checks. | |
| 543 ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded()); | 543 ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded()); |
| 544 | 544 |
| 545 // Perform map transition for the receiver if necessary. | 545 // Perform map transition for the receiver if necessary. |
| 546 if (object->map()->unused_property_fields() == 0) { | 546 if (object->map()->unused_property_fields() == 0) { |
| 547 // The properties must be extended before we can store the value. | 547 // The properties must be extended before we can store the value. |
| 548 // We jump to a runtime call that extends the properties array. | 548 // We jump to a runtime call that extends the properties array. |
| 549 __ push(receiver_reg); | 549 __ Mov(scratch1, Operand(transition)); |
| 550 __ mov(r2, Operand(transition)); | 550 __ Push(receiver_reg, scratch1, value_reg); |
| 551 __ Push(r2, r0); | |
| 552 __ TailCallExternalReference( | 551 __ TailCallExternalReference( |
| 553 ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage), | 552 ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage), |
| 554 masm->isolate()), | 553 masm->isolate()), |
| 555 3, | 554 3, |
| 556 1); | 555 1); |
| 557 return; | 556 return; |
| 558 } | 557 } |
| 559 | 558 |
| 560 // Update the map of the object. | 559 // Update the map of the object. |
| 561 __ mov(scratch1, Operand(transition)); | 560 __ Mov(scratch1, Operand(transition)); |
| 562 __ str(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset)); | 561 __ Str(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset)); |
| 563 | 562 |
| 564 // Update the write barrier for the map field and pass the now unused | 563 // Update the write barrier for the map field and pass the now unused |
| 565 // name_reg as scratch register. | 564 // name_reg as scratch register. |
| 566 __ RecordWriteField(receiver_reg, | 565 __ RecordWriteField(receiver_reg, |
| 567 HeapObject::kMapOffset, | 566 HeapObject::kMapOffset, |
| 568 scratch1, | 567 scratch1, |
| 569 scratch2, | 568 scratch2, |
| 570 kLRHasNotBeenSaved, | 569 kLRHasNotBeenSaved, |
| 571 kDontSaveFPRegs, | 570 kDontSaveFPRegs, |
| 572 OMIT_REMEMBERED_SET, | 571 OMIT_REMEMBERED_SET, |
| 573 OMIT_SMI_CHECK); | 572 OMIT_SMI_CHECK); |
| 574 | |
| 575 int index = transition->instance_descriptors()->GetFieldIndex( | 573 int index = transition->instance_descriptors()->GetFieldIndex( |
| 576 transition->LastAdded()); | 574 transition->LastAdded()); |
| 577 | 575 |
| 578 // Adjust for the number of properties stored in the object. Even in the | 576 // Adjust for the number of properties stored in the object. Even in the |
| 579 // face of a transition we can use the old map here because the size of the | 577 // face of a transition we can use the old map here because the size of the |
| 580 // object and the number of in-object properties is not going to change. | 578 // object and the number of in-object properties is not going to change. |
| 581 index -= object->map()->inobject_properties(); | 579 index -= object->map()->inobject_properties(); |
| 582 | 580 |
| 583 // TODO(verwaest): Share this code as a code stub. | 581 // TODO(verwaest): Share this code as a code stub. |
| 584 SmiCheck smi_check = representation.IsTagged() | 582 SmiCheck smi_check = representation.IsTagged() |
| 585 ? INLINE_SMI_CHECK : OMIT_SMI_CHECK; | 583 ? INLINE_SMI_CHECK : OMIT_SMI_CHECK; |
| 586 if (index < 0) { | 584 if (index < 0) { |
| 587 // Set the property straight into the object. | 585 // Set the property straight into the object. |
| 588 int offset = object->map()->instance_size() + (index * kPointerSize); | 586 int offset = object->map()->instance_size() + (index * kPointerSize); |
| 587 // TODO(jbramley): This construct appears in several places in this |
| 588 // function. Try to clean it up, perhaps using a result_reg. |
| 589 if (FLAG_track_double_fields && representation.IsDouble()) { | 589 if (FLAG_track_double_fields && representation.IsDouble()) { |
| 590 __ str(storage_reg, FieldMemOperand(receiver_reg, offset)); | 590 __ Str(storage_reg, FieldMemOperand(receiver_reg, offset)); |
| 591 } else { | 591 } else { |
| 592 __ str(value_reg, FieldMemOperand(receiver_reg, offset)); | 592 __ Str(value_reg, FieldMemOperand(receiver_reg, offset)); |
| 593 } | 593 } |
| 594 | 594 |
| 595 if (!FLAG_track_fields || !representation.IsSmi()) { | 595 if (!FLAG_track_fields || !representation.IsSmi()) { |
| 596 // Skip updating write barrier if storing a smi. | 596 // Skip updating write barrier if storing a smi. |
| 597 __ JumpIfSmi(value_reg, &exit); | 597 __ JumpIfSmi(value_reg, &exit); |
| 598 | 598 |
| 599 // Update the write barrier for the array address. | 599 // Update the write barrier for the array address. |
| 600 // Pass the now unused name_reg as a scratch register. | 600 // Pass the now unused name_reg as a scratch register. |
| 601 if (!FLAG_track_double_fields || !representation.IsDouble()) { | 601 if (!FLAG_track_double_fields || !representation.IsDouble()) { |
| 602 __ mov(name_reg, value_reg); | 602 __ Mov(name_reg, value_reg); |
| 603 } else { | 603 } else { |
| 604 ASSERT(storage_reg.is(name_reg)); | 604 ASSERT(storage_reg.is(name_reg)); |
| 605 } | 605 } |
| 606 __ RecordWriteField(receiver_reg, | 606 __ RecordWriteField(receiver_reg, |
| 607 offset, | 607 offset, |
| 608 name_reg, | 608 name_reg, |
| 609 scratch1, | 609 scratch1, |
| 610 kLRHasNotBeenSaved, | 610 kLRHasNotBeenSaved, |
| 611 kDontSaveFPRegs, | 611 kDontSaveFPRegs, |
| 612 EMIT_REMEMBERED_SET, | 612 EMIT_REMEMBERED_SET, |
| 613 smi_check); | 613 smi_check); |
| 614 } | 614 } |
| 615 } else { | 615 } else { |
| 616 // Write to the properties array. | 616 // Write to the properties array. |
| 617 int offset = index * kPointerSize + FixedArray::kHeaderSize; | 617 int offset = index * kPointerSize + FixedArray::kHeaderSize; |
| 618 // Get the properties array | 618 // Get the properties array |
| 619 __ ldr(scratch1, | 619 __ Ldr(scratch1, |
| 620 FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset)); | 620 FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset)); |
| 621 if (FLAG_track_double_fields && representation.IsDouble()) { | 621 if (FLAG_track_double_fields && representation.IsDouble()) { |
| 622 __ str(storage_reg, FieldMemOperand(scratch1, offset)); | 622 __ Str(storage_reg, FieldMemOperand(scratch1, offset)); |
| 623 } else { | 623 } else { |
| 624 __ str(value_reg, FieldMemOperand(scratch1, offset)); | 624 __ Str(value_reg, FieldMemOperand(scratch1, offset)); |
| 625 } | 625 } |
| 626 | 626 |
| 627 if (!FLAG_track_fields || !representation.IsSmi()) { | 627 if (!FLAG_track_fields || !representation.IsSmi()) { |
| 628 // Skip updating write barrier if storing a smi. | 628 // Skip updating write barrier if storing a smi. |
| 629 __ JumpIfSmi(value_reg, &exit); | 629 __ JumpIfSmi(value_reg, &exit); |
| 630 | 630 |
| 631 // Update the write barrier for the array address. | 631 // Update the write barrier for the array address. |
| 632 // Ok to clobber receiver_reg and name_reg, since we return. | 632 // Ok to clobber receiver_reg and name_reg, since we return. |
| 633 if (!FLAG_track_double_fields || !representation.IsDouble()) { | 633 if (!FLAG_track_double_fields || !representation.IsDouble()) { |
| 634 __ mov(name_reg, value_reg); | 634 __ Mov(name_reg, value_reg); |
| 635 } else { | 635 } else { |
| 636 ASSERT(storage_reg.is(name_reg)); | 636 ASSERT(storage_reg.is(name_reg)); |
| 637 } | 637 } |
| 638 __ RecordWriteField(scratch1, | 638 __ RecordWriteField(scratch1, |
| 639 offset, | 639 offset, |
| 640 name_reg, | 640 name_reg, |
| 641 receiver_reg, | 641 receiver_reg, |
| 642 kLRHasNotBeenSaved, | 642 kLRHasNotBeenSaved, |
| 643 kDontSaveFPRegs, | 643 kDontSaveFPRegs, |
| 644 EMIT_REMEMBERED_SET, | 644 EMIT_REMEMBERED_SET, |
| 645 smi_check); | 645 smi_check); |
| 646 } | 646 } |
| 647 } | 647 } |
| 648 | 648 |
| 649 // Return the value (register r0). | 649 __ Bind(&exit); |
| 650 ASSERT(value_reg.is(r0)); | 650 // Return the value (register x0). |
| 651 __ bind(&exit); | 651 ASSERT(value_reg.is(x0)); |
| 652 __ Ret(); | 652 __ Ret(); |
| 653 } | 653 } |
| 654 | 654 |
| 655 | 655 |
| 656 // Generate StoreField code, value is passed in r0 register. | 656 // Generate StoreField code, value is passed in x0 register. |
| 657 // When leaving generated code after success, the receiver_reg and name_reg | 657 // When leaving generated code after success, the receiver_reg and name_reg may |
| 658 // may be clobbered. Upon branch to miss_label, the receiver and name | 658 // be clobbered. Upon branch to miss_label, the receiver and name registers have |
| 659 // registers have their original values. | 659 // their original values. |
| 660 void StubCompiler::GenerateStoreField(MacroAssembler* masm, | 660 void StubCompiler::GenerateStoreField(MacroAssembler* masm, |
| 661 Handle<JSObject> object, | 661 Handle<JSObject> object, |
| 662 LookupResult* lookup, | 662 LookupResult* lookup, |
| 663 Register receiver_reg, | 663 Register receiver_reg, |
| 664 Register name_reg, | 664 Register name_reg, |
| 665 Register value_reg, | 665 Register value_reg, |
| 666 Register scratch1, | 666 Register scratch1, |
| 667 Register scratch2, | 667 Register scratch2, |
| 668 Label* miss_label) { | 668 Label* miss_label) { |
| 669 // r0 : value | 669 // x0 : value |
| 670 Label exit; | 670 Label exit; |
| 671 | 671 |
| 672 // Check that the map of the object hasn't changed. | 672 // Check that the map of the object hasn't changed. |
| 673 __ CheckMap(receiver_reg, scratch1, Handle<Map>(object->map()), miss_label, | 673 __ CheckMap(receiver_reg, scratch1, Handle<Map>(object->map()), miss_label, |
| 674 DO_SMI_CHECK); | 674 DO_SMI_CHECK); |
| 675 | 675 |
| 676 // Perform global security token check if needed. | 676 // Perform global security token check if needed. |
| 677 if (object->IsJSGlobalProxy()) { | 677 if (object->IsJSGlobalProxy()) { |
| 678 __ CheckAccessGlobalProxy(receiver_reg, scratch1, miss_label); | 678 __ CheckAccessGlobalProxy(receiver_reg, scratch1, miss_label); |
| 679 } | 679 } |
| (...skipping 11 matching lines...) Expand all Loading... |
| 691 | 691 |
| 692 Representation representation = lookup->representation(); | 692 Representation representation = lookup->representation(); |
| 693 ASSERT(!representation.IsNone()); | 693 ASSERT(!representation.IsNone()); |
| 694 if (FLAG_track_fields && representation.IsSmi()) { | 694 if (FLAG_track_fields && representation.IsSmi()) { |
| 695 __ JumpIfNotSmi(value_reg, miss_label); | 695 __ JumpIfNotSmi(value_reg, miss_label); |
| 696 } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) { | 696 } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) { |
| 697 __ JumpIfSmi(value_reg, miss_label); | 697 __ JumpIfSmi(value_reg, miss_label); |
| 698 } else if (FLAG_track_double_fields && representation.IsDouble()) { | 698 } else if (FLAG_track_double_fields && representation.IsDouble()) { |
| 699 // Load the double storage. | 699 // Load the double storage. |
| 700 if (index < 0) { | 700 if (index < 0) { |
| 701 int offset = object->map()->instance_size() + (index * kPointerSize); | 701 int offset = (index * kPointerSize) + object->map()->instance_size(); |
| 702 __ ldr(scratch1, FieldMemOperand(receiver_reg, offset)); | 702 __ Ldr(scratch1, FieldMemOperand(receiver_reg, offset)); |
| 703 } else { | 703 } else { |
| 704 __ ldr(scratch1, | 704 int offset = (index * kPointerSize) + FixedArray::kHeaderSize; |
| 705 __ Ldr(scratch1, |
| 705 FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset)); | 706 FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset)); |
| 706 int offset = index * kPointerSize + FixedArray::kHeaderSize; | 707 __ Ldr(scratch1, FieldMemOperand(scratch1, offset)); |
| 707 __ ldr(scratch1, FieldMemOperand(scratch1, offset)); | |
| 708 } | 708 } |
| 709 | 709 |
| 710 // Store the value into the storage. | 710 // Store the value into the storage. |
| 711 Label do_store, heap_number; | 711 Label do_store, heap_number; |
| 712 // TODO(jbramley): Is fp_scratch the most appropriate FP scratch register? |
| 713 // It's only used in Fcmp, but it's not really safe to use it like this. |
| 712 __ JumpIfNotSmi(value_reg, &heap_number); | 714 __ JumpIfNotSmi(value_reg, &heap_number); |
| 713 __ SmiUntag(scratch2, value_reg); | 715 __ SmiUntagToDouble(fp_scratch, value_reg); |
| 714 __ vmov(s0, scratch2); | 716 __ B(&do_store); |
| 715 __ vcvt_f64_s32(d0, s0); | |
| 716 __ jmp(&do_store); | |
| 717 | 717 |
| 718 __ bind(&heap_number); | 718 __ Bind(&heap_number); |
| 719 __ CheckMap(value_reg, scratch2, Heap::kHeapNumberMapRootIndex, | 719 __ CheckMap(value_reg, scratch2, Heap::kHeapNumberMapRootIndex, |
| 720 miss_label, DONT_DO_SMI_CHECK); | 720 miss_label, DONT_DO_SMI_CHECK); |
| 721 __ vldr(d0, FieldMemOperand(value_reg, HeapNumber::kValueOffset)); | 721 __ Ldr(fp_scratch, FieldMemOperand(value_reg, HeapNumber::kValueOffset)); |
| 722 | 722 |
| 723 __ bind(&do_store); | 723 __ Bind(&do_store); |
| 724 __ vstr(d0, FieldMemOperand(scratch1, HeapNumber::kValueOffset)); | 724 __ Str(fp_scratch, FieldMemOperand(scratch1, HeapNumber::kValueOffset)); |
| 725 // Return the value (register r0). | 725 |
| 726 ASSERT(value_reg.is(r0)); | 726 // Return the value (register x0). |
| 727 ASSERT(value_reg.is(x0)); |
| 727 __ Ret(); | 728 __ Ret(); |
| 728 return; | 729 return; |
| 729 } | 730 } |
| 730 | 731 |
| 731 // TODO(verwaest): Share this code as a code stub. | 732 // TODO(verwaest): Share this code as a code stub. |
| 732 SmiCheck smi_check = representation.IsTagged() | 733 SmiCheck smi_check = representation.IsTagged() |
| 733 ? INLINE_SMI_CHECK : OMIT_SMI_CHECK; | 734 ? INLINE_SMI_CHECK : OMIT_SMI_CHECK; |
| 734 if (index < 0) { | 735 if (index < 0) { |
| 735 // Set the property straight into the object. | 736 // Set the property straight into the object. |
| 736 int offset = object->map()->instance_size() + (index * kPointerSize); | 737 int offset = object->map()->instance_size() + (index * kPointerSize); |
| 737 __ str(value_reg, FieldMemOperand(receiver_reg, offset)); | 738 __ Str(value_reg, FieldMemOperand(receiver_reg, offset)); |
| 738 | 739 |
| 739 if (!FLAG_track_fields || !representation.IsSmi()) { | 740 if (!FLAG_track_fields || !representation.IsSmi()) { |
| 740 // Skip updating write barrier if storing a smi. | 741 // Skip updating write barrier if storing a smi. |
| 741 __ JumpIfSmi(value_reg, &exit); | 742 __ JumpIfSmi(value_reg, &exit); |
| 742 | 743 |
| 743 // Update the write barrier for the array address. | 744 // Update the write barrier for the array address. |
| 744 // Pass the now unused name_reg as a scratch register. | 745 // Pass the now unused name_reg as a scratch register. |
| 745 __ mov(name_reg, value_reg); | 746 __ Mov(name_reg, value_reg); |
| 746 __ RecordWriteField(receiver_reg, | 747 __ RecordWriteField(receiver_reg, |
| 747 offset, | 748 offset, |
| 748 name_reg, | 749 name_reg, |
| 749 scratch1, | 750 scratch1, |
| 750 kLRHasNotBeenSaved, | 751 kLRHasNotBeenSaved, |
| 751 kDontSaveFPRegs, | 752 kDontSaveFPRegs, |
| 752 EMIT_REMEMBERED_SET, | 753 EMIT_REMEMBERED_SET, |
| 753 smi_check); | 754 smi_check); |
| 754 } | 755 } |
| 755 } else { | 756 } else { |
| 756 // Write to the properties array. | 757 // Write to the properties array. |
| 757 int offset = index * kPointerSize + FixedArray::kHeaderSize; | 758 int offset = index * kPointerSize + FixedArray::kHeaderSize; |
| 758 // Get the properties array | 759 // Get the properties array |
| 759 __ ldr(scratch1, | 760 __ Ldr(scratch1, |
| 760 FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset)); | 761 FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset)); |
| 761 __ str(value_reg, FieldMemOperand(scratch1, offset)); | 762 __ Str(value_reg, FieldMemOperand(scratch1, offset)); |
| 762 | 763 |
| 763 if (!FLAG_track_fields || !representation.IsSmi()) { | 764 if (!FLAG_track_fields || !representation.IsSmi()) { |
| 764 // Skip updating write barrier if storing a smi. | 765 // Skip updating write barrier if storing a smi. |
| 765 __ JumpIfSmi(value_reg, &exit); | 766 __ JumpIfSmi(value_reg, &exit); |
| 766 | 767 |
| 767 // Update the write barrier for the array address. | 768 // Update the write barrier for the array address. |
| 768 // Ok to clobber receiver_reg and name_reg, since we return. | 769 // Ok to clobber receiver_reg and name_reg, since we return. |
| 769 __ mov(name_reg, value_reg); | 770 __ Mov(name_reg, value_reg); |
| 770 __ RecordWriteField(scratch1, | 771 __ RecordWriteField(scratch1, |
| 771 offset, | 772 offset, |
| 772 name_reg, | 773 name_reg, |
| 773 receiver_reg, | 774 receiver_reg, |
| 774 kLRHasNotBeenSaved, | 775 kLRHasNotBeenSaved, |
| 775 kDontSaveFPRegs, | 776 kDontSaveFPRegs, |
| 776 EMIT_REMEMBERED_SET, | 777 EMIT_REMEMBERED_SET, |
| 777 smi_check); | 778 smi_check); |
| 778 } | 779 } |
| 779 } | 780 } |
| 780 | 781 |
| 781 // Return the value (register r0). | 782 __ Bind(&exit); |
| 782 ASSERT(value_reg.is(r0)); | 783 // Return the value (register x0). |
| 783 __ bind(&exit); | 784 ASSERT(value_reg.is(x0)); |
| 784 __ Ret(); | 785 __ Ret(); |
| 785 } | 786 } |
| 786 | 787 |
| 787 | 788 |
| 788 void BaseStoreStubCompiler::GenerateRestoreName(MacroAssembler* masm, | 789 void BaseStoreStubCompiler::GenerateRestoreName(MacroAssembler* masm, |
| 789 Label* label, | 790 Label* label, |
| 790 Handle<Name> name) { | 791 Handle<Name> name) { |
| 791 if (!label->is_unused()) { | 792 if (!label->is_unused()) { |
| 792 __ bind(label); | 793 __ Bind(label); |
| 793 __ mov(this->name(), Operand(name)); | 794 __ Mov(this->name(), Operand(name)); |
| 794 } | 795 } |
| 795 } | 796 } |
| 796 | 797 |
| 797 | 798 |
| 799 // Calls GenerateCheckPropertyCell for each global object in the prototype chain |
| 800 // from object to (but not including) holder. |
| 801 static void GenerateCheckPropertyCells(MacroAssembler* masm, |
| 802 Handle<JSObject> object, |
| 803 Handle<JSObject> holder, |
| 804 Handle<Name> name, |
| 805 Register scratch1, |
| 806 Register scratch2, |
| 807 Label* miss) { |
| 808 bool the_hole_is_loaded = false; |
| 809 Handle<JSObject> current = object; |
| 810 while (!current.is_identical_to(holder)) { |
| 811 if (current->IsGlobalObject()) { |
| 812 if (!the_hole_is_loaded) { |
| 813 __ LoadRoot(scratch2, Heap::kTheHoleValueRootIndex); |
| 814 the_hole_is_loaded = true; |
| 815 } |
| 816 GenerateCheckPropertyCell(masm, |
| 817 Handle<GlobalObject>::cast(current), |
| 818 name, |
| 819 scratch1, |
| 820 scratch2, |
| 821 miss); |
| 822 } |
| 823 current = Handle<JSObject>(JSObject::cast(current->GetPrototype())); |
| 824 } |
| 825 } |
| 826 |
| 827 |
| 828 // The function to called must be passed in x1. |
| 798 static void GenerateCallFunction(MacroAssembler* masm, | 829 static void GenerateCallFunction(MacroAssembler* masm, |
| 799 Handle<Object> object, | 830 Handle<Object> object, |
| 800 const ParameterCount& arguments, | 831 const ParameterCount& arguments, |
| 801 Label* miss, | 832 Label* miss, |
| 802 Code::ExtraICState extra_ic_state) { | 833 Code::ExtraICState extra_ic_state, |
| 803 // ----------- S t a t e ------------- | 834 Register function, |
| 804 // -- r0: receiver | 835 Register receiver, |
| 805 // -- r1: function to call | 836 Register scratch) { |
| 806 // ----------------------------------- | 837 ASSERT(!AreAliased(function, receiver, scratch)); |
| 838 ASSERT(function.Is(x1)); |
| 807 | 839 |
| 808 // Check that the function really is a function. | 840 // Check that the function really is a function. |
| 809 __ JumpIfSmi(r1, miss); | 841 __ JumpIfSmi(function, miss); |
| 810 __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE); | 842 __ JumpIfNotObjectType(function, scratch, scratch, JS_FUNCTION_TYPE, miss); |
| 811 __ b(ne, miss); | |
| 812 | 843 |
| 813 // Patch the receiver on the stack with the global proxy if | 844 // Patch the receiver on the stack with the global proxy if necessary. |
| 814 // necessary. | |
| 815 if (object->IsGlobalObject()) { | 845 if (object->IsGlobalObject()) { |
| 816 __ ldr(r3, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset)); | 846 __ Ldr(scratch, |
| 817 __ str(r3, MemOperand(sp, arguments.immediate() * kPointerSize)); | 847 FieldMemOperand(receiver, GlobalObject::kGlobalReceiverOffset)); |
| 848 __ Poke(scratch, arguments.immediate() * kPointerSize); |
| 818 } | 849 } |
| 819 | 850 |
| 820 // Invoke the function. | 851 // Invoke the function. |
| 821 CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state) | 852 CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state) |
| 822 ? CALL_AS_FUNCTION | 853 ? CALL_AS_FUNCTION |
| 823 : CALL_AS_METHOD; | 854 : CALL_AS_METHOD; |
| 824 __ InvokeFunction(r1, arguments, JUMP_FUNCTION, NullCallWrapper(), call_kind); | 855 __ InvokeFunction( |
| 856 function, arguments, JUMP_FUNCTION, NullCallWrapper(), call_kind); |
| 825 } | 857 } |
| 826 | 858 |
| 827 | 859 |
| 828 static void PushInterceptorArguments(MacroAssembler* masm, | 860 static void PushInterceptorArguments(MacroAssembler* masm, |
| 829 Register receiver, | 861 Register receiver, |
| 830 Register holder, | 862 Register holder, |
| 831 Register name, | 863 Register name, |
| 832 Handle<JSObject> holder_obj) { | 864 Handle<JSObject> holder_obj) { |
| 833 __ push(name); | 865 __ Push(name); |
| 834 Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor()); | 866 Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor()); |
| 835 ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor)); | 867 ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor)); |
| 836 Register scratch = name; | 868 Register scratch = name; |
| 837 __ mov(scratch, Operand(interceptor)); | 869 __ Mov(scratch, Operand(interceptor)); |
| 838 __ push(scratch); | 870 __ Push(scratch, receiver, holder); |
| 839 __ push(receiver); | 871 __ Ldr(scratch, FieldMemOperand(scratch, InterceptorInfo::kDataOffset)); |
| 840 __ push(holder); | 872 __ Push(scratch); |
| 841 __ ldr(scratch, FieldMemOperand(scratch, InterceptorInfo::kDataOffset)); | 873 __ Mov(scratch, Operand(ExternalReference::isolate_address(masm->isolate()))); |
| 842 __ push(scratch); | 874 __ Push(scratch); |
| 843 __ mov(scratch, Operand(ExternalReference::isolate_address(masm->isolate()))); | |
| 844 __ push(scratch); | |
| 845 } | 875 } |
| 846 | 876 |
| 847 | 877 |
| 848 static void CompileCallLoadPropertyWithInterceptor( | 878 static void CompileCallLoadPropertyWithInterceptor( |
| 849 MacroAssembler* masm, | 879 MacroAssembler* masm, |
| 850 Register receiver, | 880 Register receiver, |
| 851 Register holder, | 881 Register holder, |
| 852 Register name, | 882 Register name, |
| 853 Handle<JSObject> holder_obj) { | 883 Handle<JSObject> holder_obj) { |
| 854 PushInterceptorArguments(masm, receiver, holder, name, holder_obj); | 884 PushInterceptorArguments(masm, receiver, holder, name, holder_obj); |
| 855 | 885 |
| 856 ExternalReference ref = | 886 ExternalReference ref = |
| 857 ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly), | 887 ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly), |
| 858 masm->isolate()); | 888 masm->isolate()); |
| 859 __ mov(r0, Operand(6)); | 889 // Put the number of on-stack arguments for runtime call in x0. |
| 860 __ mov(r1, Operand(ref)); | 890 // These arguemnts have been pushed by PushInterceptorArguments. |
| 891 __ Mov(x0, 6); |
| 892 __ Mov(x1, Operand(ref)); |
| 861 | 893 |
| 862 CEntryStub stub(1); | 894 CEntryStub stub(1); |
| 863 __ CallStub(&stub); | 895 __ CallStub(&stub); |
| 864 } | 896 } |
| 865 | 897 |
| 866 | 898 |
| 867 static const int kFastApiCallArguments = FunctionCallbackArguments::kArgsLength; | 899 static const int kFastApiCallArguments = FunctionCallbackArguments::kArgsLength; |
| 868 | 900 |
| 869 // Reserves space for the extra arguments to API function in the | 901 // Reserves space for the extra arguments to API function in the |
| 870 // caller's frame. | 902 // caller's frame. |
| 871 // | 903 // |
| 872 // These arguments are set by CheckPrototypes and GenerateFastApiDirectCall. | 904 // These arguments are set by CheckPrototypes and GenerateFastApiDirectCall. |
| 873 static void ReserveSpaceForFastApiCall(MacroAssembler* masm, | 905 static void ReserveSpaceForFastApiCall(MacroAssembler* masm, |
| 874 Register scratch) { | 906 Register scratch) { |
| 875 __ mov(scratch, Operand(Smi::FromInt(0))); | 907 ASSERT(Smi::FromInt(0) == 0); |
| 876 for (int i = 0; i < kFastApiCallArguments; i++) { | 908 __ PushMultipleTimes(kFastApiCallArguments, xzr); |
| 877 __ push(scratch); | |
| 878 } | |
| 879 } | 909 } |
| 880 | 910 |
| 881 | 911 |
| 882 // Undoes the effects of ReserveSpaceForFastApiCall. | 912 // Undoes the effects of ReserveSpaceForFastApiCall. |
| 883 static void FreeSpaceForFastApiCall(MacroAssembler* masm) { | 913 static void FreeSpaceForFastApiCall(MacroAssembler* masm) { |
| 884 __ Drop(kFastApiCallArguments); | 914 __ Drop(kFastApiCallArguments); |
| 885 } | 915 } |
| 886 | 916 |
| 887 | 917 |
| 888 static void GenerateFastApiDirectCall(MacroAssembler* masm, | 918 static void GenerateFastApiDirectCall(MacroAssembler* masm, |
| 889 const CallOptimization& optimization, | 919 const CallOptimization& optimization, |
| 890 int argc) { | 920 int argc) { |
| 891 // ----------- S t a t e ------------- | 921 // ----------- S t a t e ------------- |
| 892 // -- sp[0] : holder (set by CheckPrototypes) | 922 // -- sp[0] : holder (set by CheckPrototypes) |
| 893 // -- sp[4] : callee JS function | 923 // -- sp[8] : callee JS function |
| 894 // -- sp[8] : call data | 924 // -- sp[16] : call data |
| 895 // -- sp[12] : isolate | 925 // -- sp[24] : isolate |
| 896 // -- sp[16] : ReturnValue default value | 926 // -- sp[32] : ReturnValue default value |
| 897 // -- sp[20] : ReturnValue | 927 // -- sp[40] : ReturnValue |
| 898 // -- sp[24] : last JS argument | 928 // -- sp[48] : last JS argument |
| 899 // -- ... | 929 // -- ... |
| 900 // -- sp[(argc + 5) * 4] : first JS argument | 930 // -- sp[(argc + 5) * 8] : first JS argument |
| 901 // -- sp[(argc + 6) * 4] : receiver | 931 // -- sp[(argc + 6) * 8] : receiver |
| 902 // ----------------------------------- | 932 // ----------------------------------- |
| 903 // Get the function and setup the context. | 933 // Get the function and setup the context. |
| 904 Handle<JSFunction> function = optimization.constant_function(); | 934 Handle<JSFunction> function = optimization.constant_function(); |
| 905 __ LoadHeapObject(r5, function); | 935 Register function_reg = x5; |
| 906 __ ldr(cp, FieldMemOperand(r5, JSFunction::kContextOffset)); | 936 __ LoadHeapObject(function_reg, function); |
| 937 __ Ldr(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset)); |
| 907 | 938 |
| 908 // Pass the additional arguments. | 939 // Pass the additional arguments. |
| 909 Handle<CallHandlerInfo> api_call_info = optimization.api_call_info(); | 940 Handle<CallHandlerInfo> api_call_info = optimization.api_call_info(); |
| 910 Handle<Object> call_data(api_call_info->data(), masm->isolate()); | 941 Handle<Object> call_data(api_call_info->data(), masm->isolate()); |
| 942 Register call_data_reg = x6; |
| 911 if (masm->isolate()->heap()->InNewSpace(*call_data)) { | 943 if (masm->isolate()->heap()->InNewSpace(*call_data)) { |
| 912 __ Move(r0, api_call_info); | 944 __ Mov(x0, Operand(api_call_info)); |
| 913 __ ldr(r6, FieldMemOperand(r0, CallHandlerInfo::kDataOffset)); | 945 __ Ldr(call_data_reg, FieldMemOperand(x0, CallHandlerInfo::kDataOffset)); |
| 914 } else { | 946 } else { |
| 915 __ Move(r6, call_data); | 947 __ Mov(call_data_reg, Operand(call_data)); |
| 916 } | 948 } |
| 917 __ mov(r7, Operand(ExternalReference::isolate_address(masm->isolate()))); | |
| 918 // Store JS function, call data, isolate ReturnValue default and ReturnValue. | |
| 919 __ stm(ib, sp, r5.bit() | r6.bit() | r7.bit()); | |
| 920 __ LoadRoot(r5, Heap::kUndefinedValueRootIndex); | |
| 921 __ str(r5, MemOperand(sp, 4 * kPointerSize)); | |
| 922 __ str(r5, MemOperand(sp, 5 * kPointerSize)); | |
| 923 | 949 |
| 924 // Prepare arguments. | 950 Register isolate_reg = x7; |
| 925 __ add(r2, sp, Operand(5 * kPointerSize)); | 951 __ Mov(isolate_reg, |
| 952 Operand(ExternalReference::isolate_address(masm->isolate()))); |
| 926 | 953 |
| 927 // Allocate the v8::Arguments structure in the arguments' space since | 954 Register undefined_reg = x8; |
| 928 // it's not controlled by GC. | 955 __ LoadRoot(undefined_reg, Heap::kUndefinedValueRootIndex); |
| 929 const int kApiStackSpace = 4; | 956 |
| 957 // Store JS function, call data, isolate, ReturnValue default and ReturnValue. |
| 958 // TODO(jbramley): Try to combine these accesses using stp. |
| 959 __ Poke(function_reg, 1 * kXRegSizeInBytes); |
| 960 __ Poke(call_data_reg, 2 * kXRegSizeInBytes); |
| 961 __ Poke(isolate_reg, 3 * kXRegSizeInBytes); |
| 962 __ Poke(undefined_reg, 4 * kXRegSizeInBytes); |
| 963 __ Poke(undefined_reg, 5 * kXRegSizeInBytes); |
| 964 |
| 965 Register implicit_args = x2; |
| 966 __ Add(implicit_args, masm->StackPointer(), 5 * kXRegSizeInBytes); |
| 930 | 967 |
| 931 FrameScope frame_scope(masm, StackFrame::MANUAL); | 968 FrameScope frame_scope(masm, StackFrame::MANUAL); |
| 932 __ EnterExitFrame(false, kApiStackSpace); | 969 // Allocate the v8::Arguments structure inside the ExitFrame since it's not |
| 970 // controlled by GC. |
| 971 const int kApiArgsStackSpace = 4; |
| 972 __ EnterExitFrame( |
| 973 false, |
| 974 x3, |
| 975 kApiArgsStackSpace + MacroAssembler::kCallApiFunctionSpillSpace); |
| 933 | 976 |
| 934 // r0 = v8::Arguments& | 977 // Arguments structure is after the return address. |
| 935 // Arguments is after the return address. | 978 Register args = x0; |
| 936 __ add(r0, sp, Operand(1 * kPointerSize)); | 979 __ Add(args, masm->StackPointer(), kPointerSize); |
| 980 |
| 937 // v8::Arguments::implicit_args_ | 981 // v8::Arguments::implicit_args_ |
| 938 __ str(r2, MemOperand(r0, 0 * kPointerSize)); | 982 __ Str(implicit_args, MemOperand(args, 0 * kPointerSize)); |
| 939 // v8::Arguments::values_ | 983 // v8::Arguments::values_ |
| 940 __ add(ip, r2, Operand(argc * kPointerSize)); | 984 __ Add(x3, implicit_args, argc * kPointerSize); |
| 941 __ str(ip, MemOperand(r0, 1 * kPointerSize)); | 985 __ Str(x3, MemOperand(args, 1 * kPointerSize)); |
| 942 // v8::Arguments::length_ = argc | 986 // v8::Arguments::length_ = argc |
| 943 __ mov(ip, Operand(argc)); | 987 __ Mov(x3, argc); |
| 944 __ str(ip, MemOperand(r0, 2 * kPointerSize)); | 988 __ Str(x3, MemOperand(args, 2 * kPointerSize)); |
| 945 // v8::Arguments::is_construct_call = 0 | 989 // v8::Arguments::is_construct_call = 0 |
| 946 __ mov(ip, Operand::Zero()); | 990 __ Mov(x3, 0); |
| 947 __ str(ip, MemOperand(r0, 3 * kPointerSize)); | 991 __ Str(x3, MemOperand(args, 3 * kPointerSize)); |
| 948 | 992 |
| 993 // After the call to the API function we need to free memory used for: |
| 994 // - JS arguments |
| 995 // - the receiver |
| 996 // - the space allocated by ReserveSpaceForFastApiCall. |
| 997 // |
| 998 // The memory allocated for v8::Arguments structure will be freed when we'll |
| 999 // leave the ExitFrame. |
| 949 const int kStackUnwindSpace = argc + kFastApiCallArguments + 1; | 1000 const int kStackUnwindSpace = argc + kFastApiCallArguments + 1; |
| 1001 |
| 950 Address function_address = v8::ToCData<Address>(api_call_info->callback()); | 1002 Address function_address = v8::ToCData<Address>(api_call_info->callback()); |
| 951 bool returns_handle = | 1003 bool returns_handle = |
| 952 !CallbackTable::ReturnsVoid(masm->isolate(), function_address); | 1004 !CallbackTable::ReturnsVoid(masm->isolate(), function_address); |
| 953 ApiFunction fun(function_address); | 1005 ApiFunction fun(function_address); |
| 954 ExternalReference::Type type = | 1006 ExternalReference::Type type = |
| 955 returns_handle ? | 1007 returns_handle ? |
| 956 ExternalReference::DIRECT_API_CALL : | 1008 ExternalReference::DIRECT_API_CALL : |
| 957 ExternalReference::DIRECT_API_CALL_NEW; | 1009 ExternalReference::DIRECT_API_CALL_NEW; |
| 958 ExternalReference ref = ExternalReference(&fun, | 1010 ExternalReference ref = ExternalReference(&fun, type, masm->isolate()); |
| 959 type, | |
| 960 masm->isolate()); | |
| 961 AllowExternalCallThatCantCauseGC scope(masm); | 1011 AllowExternalCallThatCantCauseGC scope(masm); |
| 1012 // CallApiFunctionAndReturn can spill registers inside the exit frame, |
| 1013 // after the return address and the v8::Arguments structure. |
| 1014 const int spill_offset = 1 + kApiArgsStackSpace; |
| 962 __ CallApiFunctionAndReturn(ref, | 1015 __ CallApiFunctionAndReturn(ref, |
| 963 kStackUnwindSpace, | 1016 kStackUnwindSpace, |
| 1017 spill_offset, |
| 964 returns_handle, | 1018 returns_handle, |
| 965 kFastApiCallArguments + 1); | 1019 kFastApiCallArguments + 1); |
| 966 } | 1020 } |
| 967 | 1021 |
| 968 | 1022 |
| 969 class CallInterceptorCompiler BASE_EMBEDDED { | 1023 class CallInterceptorCompiler BASE_EMBEDDED { |
| 970 public: | 1024 public: |
| 971 CallInterceptorCompiler(StubCompiler* stub_compiler, | 1025 CallInterceptorCompiler(StubCompiler* stub_compiler, |
| 972 const ParameterCount& arguments, | 1026 const ParameterCount& arguments, |
| 973 Register name, | 1027 Register name, |
| (...skipping 11 matching lines...) Expand all Loading... |
| 985 Register receiver, | 1039 Register receiver, |
| 986 Register scratch1, | 1040 Register scratch1, |
| 987 Register scratch2, | 1041 Register scratch2, |
| 988 Register scratch3, | 1042 Register scratch3, |
| 989 Label* miss) { | 1043 Label* miss) { |
| 990 ASSERT(holder->HasNamedInterceptor()); | 1044 ASSERT(holder->HasNamedInterceptor()); |
| 991 ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined()); | 1045 ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined()); |
| 992 | 1046 |
| 993 // Check that the receiver isn't a smi. | 1047 // Check that the receiver isn't a smi. |
| 994 __ JumpIfSmi(receiver, miss); | 1048 __ JumpIfSmi(receiver, miss); |
| 1049 |
| 995 CallOptimization optimization(lookup); | 1050 CallOptimization optimization(lookup); |
| 996 if (optimization.is_constant_call()) { | 1051 if (optimization.is_constant_call()) { |
| 997 CompileCacheable(masm, object, receiver, scratch1, scratch2, scratch3, | 1052 CompileCacheable(masm, object, receiver, scratch1, scratch2, scratch3, |
| 998 holder, lookup, name, optimization, miss); | 1053 holder, lookup, name, optimization, miss); |
| 999 } else { | 1054 } else { |
| 1000 CompileRegular(masm, object, receiver, scratch1, scratch2, scratch3, | 1055 CompileRegular(masm, object, receiver, scratch1, scratch2, scratch3, |
| 1001 name, holder, miss); | 1056 name, holder, miss); |
| 1002 } | 1057 } |
| 1003 } | 1058 } |
| 1004 | 1059 |
| 1005 private: | 1060 private: |
| 1006 void CompileCacheable(MacroAssembler* masm, | 1061 void CompileCacheable(MacroAssembler* masm, |
| 1007 Handle<JSObject> object, | 1062 Handle<JSObject> object, |
| 1008 Register receiver, | 1063 Register receiver, |
| 1009 Register scratch1, | 1064 Register scratch1, |
| 1010 Register scratch2, | 1065 Register scratch2, |
| 1011 Register scratch3, | 1066 Register scratch3, |
| 1012 Handle<JSObject> interceptor_holder, | 1067 Handle<JSObject> interceptor_holder, |
| 1013 LookupResult* lookup, | 1068 LookupResult* lookup, |
| 1014 Handle<Name> name, | 1069 Handle<Name> name, |
| 1015 const CallOptimization& optimization, | 1070 const CallOptimization& optimization, |
| 1016 Label* miss_label) { | 1071 Label* miss_label) { |
| 1017 ASSERT(optimization.is_constant_call()); | 1072 ASSERT(optimization.is_constant_call()); |
| 1018 ASSERT(!lookup->holder()->IsGlobalObject()); | 1073 ASSERT(!lookup->holder()->IsGlobalObject()); |
| 1074 |
| 1019 Counters* counters = masm->isolate()->counters(); | 1075 Counters* counters = masm->isolate()->counters(); |
| 1020 int depth1 = kInvalidProtoDepth; | 1076 int depth1 = kInvalidProtoDepth; |
| 1021 int depth2 = kInvalidProtoDepth; | 1077 int depth2 = kInvalidProtoDepth; |
| 1022 bool can_do_fast_api_call = false; | 1078 bool can_do_fast_api_call = false; |
| 1079 |
| 1023 if (optimization.is_simple_api_call() && | 1080 if (optimization.is_simple_api_call() && |
| 1024 !lookup->holder()->IsGlobalObject()) { | 1081 !lookup->holder()->IsGlobalObject()) { |
| 1025 depth1 = optimization.GetPrototypeDepthOfExpectedType( | 1082 depth1 = optimization.GetPrototypeDepthOfExpectedType( |
| 1026 object, interceptor_holder); | 1083 object, interceptor_holder); |
| 1027 if (depth1 == kInvalidProtoDepth) { | 1084 if (depth1 == kInvalidProtoDepth) { |
| 1028 depth2 = optimization.GetPrototypeDepthOfExpectedType( | 1085 depth2 = optimization.GetPrototypeDepthOfExpectedType( |
| 1029 interceptor_holder, Handle<JSObject>(lookup->holder())); | 1086 interceptor_holder, Handle<JSObject>(lookup->holder())); |
| 1030 } | 1087 } |
| 1031 can_do_fast_api_call = | 1088 can_do_fast_api_call = |
| 1032 depth1 != kInvalidProtoDepth || depth2 != kInvalidProtoDepth; | 1089 depth1 != kInvalidProtoDepth || depth2 != kInvalidProtoDepth; |
| (...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1080 } else { | 1137 } else { |
| 1081 CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_) | 1138 CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state_) |
| 1082 ? CALL_AS_FUNCTION | 1139 ? CALL_AS_FUNCTION |
| 1083 : CALL_AS_METHOD; | 1140 : CALL_AS_METHOD; |
| 1084 Handle<JSFunction> function = optimization.constant_function(); | 1141 Handle<JSFunction> function = optimization.constant_function(); |
| 1085 ParameterCount expected(function); | 1142 ParameterCount expected(function); |
| 1086 __ InvokeFunction(function, expected, arguments_, | 1143 __ InvokeFunction(function, expected, arguments_, |
| 1087 JUMP_FUNCTION, NullCallWrapper(), call_kind); | 1144 JUMP_FUNCTION, NullCallWrapper(), call_kind); |
| 1088 } | 1145 } |
| 1089 | 1146 |
| 1090 // Deferred code for fast API call case---clean preallocated space. | 1147 // Deferred code for fast API call case, clean preallocated space. |
| 1091 if (can_do_fast_api_call) { | 1148 if (can_do_fast_api_call) { |
| 1092 __ bind(&miss_cleanup); | 1149 __ Bind(&miss_cleanup); |
| 1093 FreeSpaceForFastApiCall(masm); | 1150 FreeSpaceForFastApiCall(masm); |
| 1094 __ b(miss_label); | 1151 __ B(miss_label); |
| 1095 } | 1152 } |
| 1096 | 1153 |
| 1097 // Invoke a regular function. | 1154 // Invoke a regular function. |
| 1098 __ bind(®ular_invoke); | 1155 __ Bind(®ular_invoke); |
| 1099 if (can_do_fast_api_call) { | 1156 if (can_do_fast_api_call) { |
| 1100 FreeSpaceForFastApiCall(masm); | 1157 FreeSpaceForFastApiCall(masm); |
| 1101 } | 1158 } |
| 1102 } | 1159 } |
| 1103 | 1160 |
| 1104 void CompileRegular(MacroAssembler* masm, | 1161 void CompileRegular(MacroAssembler* masm, |
| 1105 Handle<JSObject> object, | 1162 Handle<JSObject> object, |
| 1106 Register receiver, | 1163 Register receiver, |
| 1107 Register scratch1, | 1164 Register scratch1, |
| 1108 Register scratch2, | 1165 Register scratch2, |
| 1109 Register scratch3, | 1166 Register scratch3, |
| 1110 Handle<Name> name, | 1167 Handle<Name> name, |
| 1111 Handle<JSObject> interceptor_holder, | 1168 Handle<JSObject> interceptor_holder, |
| 1112 Label* miss_label) { | 1169 Label* miss_label) { |
| 1113 Register holder = | 1170 Register holder = |
| 1114 stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder, | 1171 stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder, |
| 1115 scratch1, scratch2, scratch3, | 1172 scratch1, scratch2, scratch3, |
| 1116 name, miss_label); | 1173 name, miss_label); |
| 1117 | 1174 |
| 1118 // Call a runtime function to load the interceptor property. | 1175 // Call a runtime function to load the interceptor property. |
| 1119 FrameScope scope(masm, StackFrame::INTERNAL); | 1176 FrameScope scope(masm, StackFrame::INTERNAL); |
| 1120 // Save the name_ register across the call. | 1177 // The name_ register must be preserved across the call. |
| 1121 __ push(name_); | 1178 __ Push(name_); |
| 1122 PushInterceptorArguments(masm, receiver, holder, name_, interceptor_holder); | 1179 PushInterceptorArguments(masm, receiver, holder, name_, interceptor_holder); |
| 1123 __ CallExternalReference( | 1180 __ CallExternalReference( |
| 1124 ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall), | 1181 ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall), |
| 1125 masm->isolate()), | 1182 masm->isolate()), |
| 1126 6); | 1183 6); |
| 1127 // Restore the name_ register. | 1184 __ Pop(name_); |
| 1128 __ pop(name_); | |
| 1129 // Leave the internal frame. | |
| 1130 } | 1185 } |
| 1131 | 1186 |
| 1187 |
| 1132 void LoadWithInterceptor(MacroAssembler* masm, | 1188 void LoadWithInterceptor(MacroAssembler* masm, |
| 1133 Register receiver, | 1189 Register receiver, |
| 1134 Register holder, | 1190 Register holder, |
| 1135 Handle<JSObject> holder_obj, | 1191 Handle<JSObject> holder_obj, |
| 1136 Register scratch, | 1192 Register scratch, |
| 1137 Label* interceptor_succeeded) { | 1193 Label* interceptor_succeeded) { |
| 1138 { | 1194 { |
| 1139 FrameScope scope(masm, StackFrame::INTERNAL); | 1195 FrameScope scope(masm, StackFrame::INTERNAL); |
| 1140 __ Push(holder, name_); | 1196 __ Push(holder, name_); |
| 1141 CompileCallLoadPropertyWithInterceptor(masm, | 1197 CompileCallLoadPropertyWithInterceptor(masm, |
| 1142 receiver, | 1198 receiver, |
| 1143 holder, | 1199 holder, |
| 1144 name_, | 1200 name_, |
| 1145 holder_obj); | 1201 holder_obj); |
| 1146 __ pop(name_); // Restore the name. | 1202 __ Pop(name_, receiver); |
| 1147 __ pop(receiver); // Restore the holder. | |
| 1148 } | 1203 } |
| 1204 |
| 1149 // If interceptor returns no-result sentinel, call the constant function. | 1205 // If interceptor returns no-result sentinel, call the constant function. |
| 1150 __ LoadRoot(scratch, Heap::kNoInterceptorResultSentinelRootIndex); | 1206 __ JumpIfNotRoot(x0, |
| 1151 __ cmp(r0, scratch); | 1207 Heap::kNoInterceptorResultSentinelRootIndex, |
| 1152 __ b(ne, interceptor_succeeded); | 1208 interceptor_succeeded); |
| 1153 } | 1209 } |
| 1154 | 1210 |
| 1155 StubCompiler* stub_compiler_; | 1211 StubCompiler* stub_compiler_; |
| 1156 const ParameterCount& arguments_; | 1212 const ParameterCount& arguments_; |
| 1157 Register name_; | 1213 Register name_; |
| 1158 Code::ExtraICState extra_ic_state_; | 1214 Code::ExtraICState extra_ic_state_; |
| 1159 }; | 1215 }; |
| 1160 | 1216 |
| 1161 | |
| 1162 // Calls GenerateCheckPropertyCell for each global object in the prototype chain | |
| 1163 // from object to (but not including) holder. | |
| 1164 static void GenerateCheckPropertyCells(MacroAssembler* masm, | |
| 1165 Handle<JSObject> object, | |
| 1166 Handle<JSObject> holder, | |
| 1167 Handle<Name> name, | |
| 1168 Register scratch, | |
| 1169 Label* miss) { | |
| 1170 Handle<JSObject> current = object; | |
| 1171 while (!current.is_identical_to(holder)) { | |
| 1172 if (current->IsGlobalObject()) { | |
| 1173 GenerateCheckPropertyCell(masm, | |
| 1174 Handle<GlobalObject>::cast(current), | |
| 1175 name, | |
| 1176 scratch, | |
| 1177 miss); | |
| 1178 } | |
| 1179 current = Handle<JSObject>(JSObject::cast(current->GetPrototype())); | |
| 1180 } | |
| 1181 } | |
| 1182 | |
| 1183 | |
| 1184 // Convert and store int passed in register ival to IEEE 754 single precision | |
| 1185 // floating point value at memory location (dst + 4 * wordoffset) | |
| 1186 // If VFP3 is available use it for conversion. | |
| 1187 static void StoreIntAsFloat(MacroAssembler* masm, | |
| 1188 Register dst, | |
| 1189 Register wordoffset, | |
| 1190 Register ival, | |
| 1191 Register scratch1) { | |
| 1192 __ vmov(s0, ival); | |
| 1193 __ add(scratch1, dst, Operand(wordoffset, LSL, 2)); | |
| 1194 __ vcvt_f32_s32(s0, s0); | |
| 1195 __ vstr(s0, scratch1, 0); | |
| 1196 } | |
| 1197 | |
| 1198 | |
| 1199 void StubCompiler::GenerateTailCall(MacroAssembler* masm, Handle<Code> code) { | 1217 void StubCompiler::GenerateTailCall(MacroAssembler* masm, Handle<Code> code) { |
| 1200 __ Jump(code, RelocInfo::CODE_TARGET); | 1218 __ Jump(code, RelocInfo::CODE_TARGET); |
| 1201 } | 1219 } |
| 1202 | 1220 |
| 1203 | 1221 |
| 1204 #undef __ | 1222 #undef __ |
| 1205 #define __ ACCESS_MASM(masm()) | 1223 #define __ ACCESS_MASM(masm()) |
| 1206 | 1224 |
| 1207 | 1225 |
| 1208 Register StubCompiler::CheckPrototypes(Handle<JSObject> object, | 1226 Register StubCompiler::CheckPrototypes(Handle<JSObject> object, |
| 1209 Register object_reg, | 1227 Register object_reg, |
| 1210 Handle<JSObject> holder, | 1228 Handle<JSObject> holder, |
| 1211 Register holder_reg, | 1229 Register holder_reg, |
| 1212 Register scratch1, | 1230 Register scratch1, |
| 1213 Register scratch2, | 1231 Register scratch2, |
| 1214 Handle<Name> name, | 1232 Handle<Name> name, |
| 1215 int save_at_depth, | 1233 int save_at_depth, |
| 1216 Label* miss, | 1234 Label* miss, |
| 1217 PrototypeCheckType check) { | 1235 PrototypeCheckType check) { |
| 1218 Handle<JSObject> first = object; | 1236 Handle<JSObject> first = object; |
| 1219 // Make sure there's no overlap between holder and object registers. | 1237 |
| 1220 ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg)); | 1238 // object_reg and holder_reg registers can alias. |
| 1221 ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg) | 1239 ASSERT(!AreAliased(object_reg, scratch1, scratch2)); |
| 1222 && !scratch2.is(scratch1)); | 1240 ASSERT(!AreAliased(holder_reg, scratch1, scratch2)); |
| 1223 | 1241 |
| 1224 // Keep track of the current object in register reg. | 1242 // Keep track of the current object in register reg. |
| 1225 Register reg = object_reg; | 1243 Register reg = object_reg; |
| 1226 int depth = 0; | 1244 int depth = 0; |
| 1227 | 1245 |
| 1228 if (save_at_depth == depth) { | 1246 if (save_at_depth == depth) { |
| 1229 __ str(reg, MemOperand(sp)); | 1247 __ Poke(reg, 0); |
| 1230 } | 1248 } |
| 1231 | 1249 |
| 1232 // Check the maps in the prototype chain. | 1250 // Check the maps in the prototype chain. |
| 1233 // Traverse the prototype chain from the object and do map checks. | 1251 // Traverse the prototype chain from the object and do map checks. |
| 1234 Handle<JSObject> current = object; | 1252 Handle<JSObject> current = object; |
| 1235 while (!current.is_identical_to(holder)) { | 1253 while (!current.is_identical_to(holder)) { |
| 1236 ++depth; | 1254 ++depth; |
| 1237 | 1255 |
| 1238 // Only global objects and objects that do not require access | 1256 // Only global objects and objects that do not require access |
| 1239 // checks are allowed in stubs. | 1257 // checks are allowed in stubs. |
| 1240 ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded()); | 1258 ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded()); |
| 1241 | 1259 |
| 1242 Handle<JSObject> prototype(JSObject::cast(current->GetPrototype())); | 1260 Handle<JSObject> prototype(JSObject::cast(current->GetPrototype())); |
| 1243 if (!current->HasFastProperties() && | 1261 if (!current->HasFastProperties() && |
| 1244 !current->IsJSGlobalObject() && | 1262 !current->IsJSGlobalObject() && |
| 1245 !current->IsJSGlobalProxy()) { | 1263 !current->IsJSGlobalProxy()) { |
| 1246 if (!name->IsUniqueName()) { | 1264 if (!name->IsUniqueName()) { |
| 1247 ASSERT(name->IsString()); | 1265 ASSERT(name->IsString()); |
| 1248 name = factory()->InternalizeString(Handle<String>::cast(name)); | 1266 name = factory()->InternalizeString(Handle<String>::cast(name)); |
| 1249 } | 1267 } |
| 1250 ASSERT(current->property_dictionary()->FindEntry(*name) == | 1268 ASSERT(current->property_dictionary()->FindEntry(*name) == |
| 1251 NameDictionary::kNotFound); | 1269 NameDictionary::kNotFound); |
| 1252 | 1270 |
| 1253 GenerateDictionaryNegativeLookup(masm(), miss, reg, name, | 1271 GenerateDictionaryNegativeLookup(masm(), miss, reg, name, |
| 1254 scratch1, scratch2); | 1272 scratch1, scratch2); |
| 1255 | 1273 |
| 1256 __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset)); | 1274 __ Ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset)); |
| 1257 reg = holder_reg; // From now on the object will be in holder_reg. | 1275 reg = holder_reg; // From now on the object will be in holder_reg. |
| 1258 __ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset)); | 1276 __ Ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset)); |
| 1259 } else { | 1277 } else { |
| 1260 Register map_reg = scratch1; | 1278 Register map_reg = scratch1; |
| 1261 if (!current.is_identical_to(first) || check == CHECK_ALL_MAPS) { | 1279 // TODO(jbramley): Skip this load when we don't need the map. |
| 1280 __ Ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset)); |
| 1281 |
| 1282 if (!current.is_identical_to(first) || (check == CHECK_ALL_MAPS)) { |
| 1262 Handle<Map> current_map(current->map()); | 1283 Handle<Map> current_map(current->map()); |
| 1263 // CheckMap implicitly loads the map of |reg| into |map_reg|. | 1284 __ CheckMap(map_reg, current_map, miss, DONT_DO_SMI_CHECK); |
| 1264 __ CheckMap(reg, map_reg, current_map, miss, DONT_DO_SMI_CHECK); | |
| 1265 } else { | |
| 1266 __ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset)); | |
| 1267 } | 1285 } |
| 1268 | 1286 |
| 1269 // Check access rights to the global object. This has to happen after | 1287 // Check access rights to the global object. This has to happen after |
| 1270 // the map check so that we know that the object is actually a global | 1288 // the map check so that we know that the object is actually a global |
| 1271 // object. | 1289 // object. |
| 1272 if (current->IsJSGlobalProxy()) { | 1290 if (current->IsJSGlobalProxy()) { |
| 1273 __ CheckAccessGlobalProxy(reg, scratch2, miss); | 1291 __ CheckAccessGlobalProxy(reg, scratch2, miss); |
| 1274 } | 1292 } |
| 1275 reg = holder_reg; // From now on the object will be in holder_reg. | 1293 reg = holder_reg; // From now on the object will be in holder_reg. |
| 1276 | 1294 |
| 1277 if (heap()->InNewSpace(*prototype)) { | 1295 if (heap()->InNewSpace(*prototype)) { |
| 1278 // The prototype is in new space; we cannot store a reference to it | 1296 // The prototype is in new space; we cannot store a reference to it |
| 1279 // in the code. Load it from the map. | 1297 // in the code. Load it from the map. |
| 1280 __ ldr(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset)); | 1298 __ Ldr(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset)); |
| 1281 } else { | 1299 } else { |
| 1282 // The prototype is in old space; load it directly. | 1300 // The prototype is in old space; load it directly. |
| 1283 __ mov(reg, Operand(prototype)); | 1301 __ Mov(reg, Operand(prototype)); |
| 1284 } | 1302 } |
| 1285 } | 1303 } |
| 1286 | 1304 |
| 1287 if (save_at_depth == depth) { | 1305 if (save_at_depth == depth) { |
| 1288 __ str(reg, MemOperand(sp)); | 1306 __ Poke(reg, 0); |
| 1289 } | 1307 } |
| 1290 | 1308 |
| 1291 // Go to the next object in the prototype chain. | 1309 // Go to the next object in the prototype chain. |
| 1292 current = prototype; | 1310 current = prototype; |
| 1293 } | 1311 } |
| 1294 | 1312 |
| 1295 // Log the check depth. | 1313 // Log the check depth. |
| 1296 LOG(isolate(), IntEvent("check-maps-depth", depth + 1)); | 1314 LOG(isolate(), IntEvent("check-maps-depth", depth + 1)); |
| 1297 | 1315 |
| 1316 // Check the holder map. |
| 1298 if (!holder.is_identical_to(first) || check == CHECK_ALL_MAPS) { | 1317 if (!holder.is_identical_to(first) || check == CHECK_ALL_MAPS) { |
| 1299 // Check the holder map. | 1318 // Check the holder map. |
| 1300 __ CheckMap(reg, scratch1, Handle<Map>(holder->map()), miss, | 1319 __ CheckMap(reg, scratch1, Handle<Map>(current->map()), miss, |
| 1301 DONT_DO_SMI_CHECK); | 1320 DONT_DO_SMI_CHECK); |
| 1302 } | 1321 } |
| 1303 | 1322 |
| 1304 // Perform security check for access to the global object. | 1323 // Perform security check for access to the global object. |
| 1305 ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded()); | 1324 ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded()); |
| 1306 if (holder->IsJSGlobalProxy()) { | 1325 if (holder->IsJSGlobalProxy()) { |
| 1307 __ CheckAccessGlobalProxy(reg, scratch1, miss); | 1326 __ CheckAccessGlobalProxy(reg, scratch1, miss); |
| 1308 } | 1327 } |
| 1309 | 1328 |
| 1310 // If we've skipped any global objects, it's not enough to verify that | 1329 // If we've skipped any global objects, it's not enough to verify that |
| 1311 // their maps haven't changed. We also need to check that the property | 1330 // their maps haven't changed. We also need to check that the property |
| 1312 // cell for the property is still empty. | 1331 // cell for the property is still empty. |
| 1313 GenerateCheckPropertyCells(masm(), object, holder, name, scratch1, miss); | 1332 GenerateCheckPropertyCells(masm(), object, holder, name, |
| 1333 scratch1, scratch2, miss); |
| 1314 | 1334 |
| 1315 // Return the register containing the holder. | 1335 // Return the register containing the holder. |
| 1316 return reg; | 1336 return reg; |
| 1317 } | 1337 } |
| 1318 | 1338 |
| 1319 | 1339 |
| 1320 void BaseLoadStubCompiler::HandlerFrontendFooter(Label* success, | 1340 void BaseLoadStubCompiler::HandlerFrontendFooter(Label* success, |
| 1321 Label* miss) { | 1341 Label* miss) { |
| 1322 if (!miss->is_unused()) { | 1342 if (!miss->is_unused()) { |
| 1323 __ b(success); | 1343 __ B(success); |
| 1324 __ bind(miss); | 1344 __ Bind(miss); |
| 1325 TailCallBuiltin(masm(), MissBuiltin(kind())); | 1345 TailCallBuiltin(masm(), MissBuiltin(kind())); |
| 1326 } | 1346 } |
| 1327 } | 1347 } |
| 1328 | 1348 |
| 1329 | 1349 |
| 1330 Register BaseLoadStubCompiler::CallbackHandlerFrontend( | 1350 Register BaseLoadStubCompiler::CallbackHandlerFrontend( |
| 1331 Handle<JSObject> object, | 1351 Handle<JSObject> object, |
| 1332 Register object_reg, | 1352 Register object_reg, |
| 1333 Handle<JSObject> holder, | 1353 Handle<JSObject> holder, |
| 1334 Handle<Name> name, | 1354 Handle<Name> name, |
| 1335 Label* success, | 1355 Label* success, |
| 1336 Handle<ExecutableAccessorInfo> callback) { | 1356 Handle<ExecutableAccessorInfo> callback) { |
| 1337 Label miss; | 1357 Label miss; |
| 1338 | 1358 |
| 1339 Register reg = HandlerFrontendHeader(object, object_reg, holder, name, &miss); | 1359 Register reg = HandlerFrontendHeader(object, object_reg, holder, name, &miss); |
| 1340 | 1360 |
| 1361 // TODO(jbramely): HandlerFrontendHeader returns its result in scratch1(), so |
| 1362 // we can't use it below, but that isn't very obvious. Is there a better way |
| 1363 // of handling this? |
| 1364 |
| 1341 if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) { | 1365 if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) { |
| 1342 ASSERT(!reg.is(scratch2())); | 1366 ASSERT(!AreAliased(reg, scratch2(), scratch3(), scratch4())); |
| 1343 ASSERT(!reg.is(scratch3())); | |
| 1344 ASSERT(!reg.is(scratch4())); | |
| 1345 | 1367 |
| 1346 // Load the properties dictionary. | 1368 // Load the properties dictionary. |
| 1347 Register dictionary = scratch4(); | 1369 Register dictionary = scratch4(); |
| 1348 __ ldr(dictionary, FieldMemOperand(reg, JSObject::kPropertiesOffset)); | 1370 __ Ldr(dictionary, FieldMemOperand(reg, JSObject::kPropertiesOffset)); |
| 1349 | 1371 |
| 1350 // Probe the dictionary. | 1372 // Probe the dictionary. |
| 1351 Label probe_done; | 1373 Label probe_done; |
| 1352 NameDictionaryLookupStub::GeneratePositiveLookup(masm(), | 1374 NameDictionaryLookupStub::GeneratePositiveLookup(masm(), |
| 1353 &miss, | 1375 &miss, |
| 1354 &probe_done, | 1376 &probe_done, |
| 1355 dictionary, | 1377 dictionary, |
| 1356 this->name(), | 1378 this->name(), |
| 1357 scratch2(), | 1379 scratch2(), |
| 1358 scratch3()); | 1380 scratch3()); |
| 1359 __ bind(&probe_done); | 1381 __ Bind(&probe_done); |
| 1360 | 1382 |
| 1361 // If probing finds an entry in the dictionary, scratch3 contains the | 1383 // If probing finds an entry in the dictionary, scratch3 contains the |
| 1362 // pointer into the dictionary. Check that the value is the callback. | 1384 // pointer into the dictionary. Check that the value is the callback. |
| 1363 Register pointer = scratch3(); | 1385 Register pointer = scratch3(); |
| 1364 const int kElementsStartOffset = NameDictionary::kHeaderSize + | 1386 const int kElementsStartOffset = NameDictionary::kHeaderSize + |
| 1365 NameDictionary::kElementsStartIndex * kPointerSize; | 1387 NameDictionary::kElementsStartIndex * kPointerSize; |
| 1366 const int kValueOffset = kElementsStartOffset + kPointerSize; | 1388 const int kValueOffset = kElementsStartOffset + kPointerSize; |
| 1367 __ ldr(scratch2(), FieldMemOperand(pointer, kValueOffset)); | 1389 __ Ldr(scratch2(), FieldMemOperand(pointer, kValueOffset)); |
| 1368 __ cmp(scratch2(), Operand(callback)); | 1390 __ Cmp(scratch2(), Operand(callback)); |
| 1369 __ b(ne, &miss); | 1391 __ B(ne, &miss); |
| 1370 } | 1392 } |
| 1371 | 1393 |
| 1372 HandlerFrontendFooter(success, &miss); | 1394 HandlerFrontendFooter(success, &miss); |
| 1373 return reg; | 1395 return reg; |
| 1374 } | 1396 } |
| 1375 | 1397 |
| 1376 | 1398 |
| 1377 void BaseLoadStubCompiler::NonexistentHandlerFrontend( | 1399 void BaseLoadStubCompiler::NonexistentHandlerFrontend( |
| 1378 Handle<JSObject> object, | 1400 Handle<JSObject> object, |
| 1379 Handle<JSObject> last, | 1401 Handle<JSObject> last, |
| 1380 Handle<Name> name, | 1402 Handle<Name> name, |
| 1381 Label* success, | 1403 Label* success, |
| 1382 Handle<GlobalObject> global) { | 1404 Handle<GlobalObject> global) { |
| 1383 Label miss; | 1405 Label miss; |
| 1384 | 1406 |
| 1385 HandlerFrontendHeader(object, receiver(), last, name, &miss); | 1407 HandlerFrontendHeader(object, receiver(), last, name, &miss); |
| 1386 | 1408 |
| 1387 // If the last object in the prototype chain is a global object, | 1409 // If the last object in the prototype chain is a global object, |
| 1388 // check that the global property cell is empty. | 1410 // check that the global property cell is empty. |
| 1389 if (!global.is_null()) { | 1411 if (!global.is_null()) { |
| 1390 GenerateCheckPropertyCell(masm(), global, name, scratch2(), &miss); | 1412 GenerateCheckPropertyCell(masm(), global, name, |
| 1413 scratch1(), scratch2(), &miss); |
| 1391 } | 1414 } |
| 1392 | 1415 |
| 1393 HandlerFrontendFooter(success, &miss); | 1416 HandlerFrontendFooter(success, &miss); |
| 1394 } | 1417 } |
| 1395 | 1418 |
| 1396 | 1419 |
| 1397 void BaseLoadStubCompiler::GenerateLoadField(Register reg, | 1420 void BaseLoadStubCompiler::GenerateLoadField(Register reg, |
| 1398 Handle<JSObject> holder, | 1421 Handle<JSObject> holder, |
| 1399 PropertyIndex field, | 1422 PropertyIndex field, |
| 1400 Representation representation) { | 1423 Representation representation) { |
| 1401 if (!reg.is(receiver())) __ mov(receiver(), reg); | 1424 __ Mov(receiver(), reg); |
| 1402 if (kind() == Code::LOAD_IC) { | 1425 if (kind() == Code::LOAD_IC) { |
| 1403 LoadFieldStub stub(field.is_inobject(holder), | 1426 LoadFieldStub stub(field.is_inobject(holder), |
| 1404 field.translate(holder), | 1427 field.translate(holder), |
| 1405 representation); | 1428 representation); |
| 1406 GenerateTailCall(masm(), stub.GetCode(isolate())); | 1429 GenerateTailCall(masm(), stub.GetCode(isolate())); |
| 1407 } else { | 1430 } else { |
| 1408 KeyedLoadFieldStub stub(field.is_inobject(holder), | 1431 KeyedLoadFieldStub stub(field.is_inobject(holder), |
| 1409 field.translate(holder), | 1432 field.translate(holder), |
| 1410 representation); | 1433 representation); |
| 1411 GenerateTailCall(masm(), stub.GetCode(isolate())); | 1434 GenerateTailCall(masm(), stub.GetCode(isolate())); |
| 1412 } | 1435 } |
| 1413 } | 1436 } |
| 1414 | 1437 |
| 1415 | 1438 |
| 1416 void BaseLoadStubCompiler::GenerateLoadConstant(Handle<JSFunction> value) { | 1439 void BaseLoadStubCompiler::GenerateLoadConstant(Handle<JSFunction> value) { |
| 1417 // Return the constant value. | 1440 // Return the constant value. |
| 1418 __ LoadHeapObject(r0, value); | 1441 __ LoadHeapObject(x0, value); |
| 1419 __ Ret(); | 1442 __ Ret(); |
| 1420 } | 1443 } |
| 1421 | 1444 |
| 1422 | 1445 |
| 1423 void BaseLoadStubCompiler::GenerateLoadCallback( | 1446 void BaseLoadStubCompiler::GenerateLoadCallback( |
| 1424 Register reg, | 1447 Register reg, |
| 1425 Handle<ExecutableAccessorInfo> callback) { | 1448 Handle<ExecutableAccessorInfo> callback) { |
| 1426 // Build AccessorInfo::args_ list on the stack and push property name below | 1449 // Build ExecutableAccessorInfo::args_ list on the stack and push property |
| 1427 // the exit frame to make GC aware of them and store pointers to them. | 1450 // name below the exit frame to make GC aware of them and store pointers to |
| 1428 __ push(receiver()); | 1451 // them. |
| 1429 __ mov(scratch2(), sp); // scratch2 = AccessorInfo::args_ | 1452 __ Push(receiver()); |
| 1453 Register args_addr = scratch2(); |
| 1454 __ Mov(args_addr, __ StackPointer()); |
| 1455 |
| 1430 if (heap()->InNewSpace(callback->data())) { | 1456 if (heap()->InNewSpace(callback->data())) { |
| 1431 __ Move(scratch3(), callback); | 1457 __ Mov(scratch3(), Operand(callback)); |
| 1432 __ ldr(scratch3(), FieldMemOperand(scratch3(), | 1458 __ Ldr(scratch3(), FieldMemOperand(scratch3(), |
| 1433 ExecutableAccessorInfo::kDataOffset)); | 1459 ExecutableAccessorInfo::kDataOffset)); |
| 1434 } else { | 1460 } else { |
| 1435 __ Move(scratch3(), Handle<Object>(callback->data(), isolate())); | 1461 __ Mov(scratch3(), Operand(Handle<Object>(callback->data(), isolate()))); |
| 1436 } | 1462 } |
| 1463 // TODO(jbramley): Find another scratch register and combine the pushes |
| 1464 // together. Can we use scratch1() and scratch2() here? |
| 1437 __ Push(reg, scratch3()); | 1465 __ Push(reg, scratch3()); |
| 1438 __ LoadRoot(scratch3(), Heap::kUndefinedValueRootIndex); | 1466 __ LoadRoot(scratch3(), Heap::kUndefinedValueRootIndex); |
| 1439 __ mov(scratch4(), scratch3()); | 1467 __ Mov(scratch4(), Operand(ExternalReference::isolate_address(isolate()))); |
| 1440 __ Push(scratch3(), scratch4()); | 1468 __ Push(scratch3(), scratch3(), scratch4(), name()); |
| 1441 __ mov(scratch4(), | |
| 1442 Operand(ExternalReference::isolate_address(isolate()))); | |
| 1443 __ Push(scratch4(), name()); | |
| 1444 __ mov(r0, sp); // r0 = Handle<Name> | |
| 1445 | 1469 |
| 1470 // Pass the Handle<Name> of the property name to the runtime. |
| 1471 __ Mov(x0, __ StackPointer()); |
| 1472 |
| 1473 FrameScope frame_scope(masm(), StackFrame::MANUAL); |
| 1446 const int kApiStackSpace = 1; | 1474 const int kApiStackSpace = 1; |
| 1447 FrameScope frame_scope(masm(), StackFrame::MANUAL); | 1475 __ EnterExitFrame(false, scratch4(), |
| 1448 __ EnterExitFrame(false, kApiStackSpace); | 1476 kApiStackSpace + MacroAssembler::kCallApiFunctionSpillSpace); |
| 1449 | 1477 |
| 1450 // Create AccessorInfo instance on the stack above the exit frame with | 1478 // Create ExecutableAccessorInfo instance on the stack above the exit frame |
| 1451 // scratch2 (internal::Object** args_) as the data. | 1479 // before the return address. ExecutableAccessorInfo has only one field: the |
| 1452 __ str(scratch2(), MemOperand(sp, 1 * kPointerSize)); | 1480 // address of args_. |
| 1453 __ add(r1, sp, Operand(1 * kPointerSize)); // r1 = AccessorInfo& | 1481 __ Poke(args_addr, 1 * kPointerSize); |
| 1454 | 1482 |
| 1483 // Get the address of ExecutableAccessorInfo instance and pass it to the |
| 1484 // runtime. |
| 1485 __ Add(x1, __ StackPointer(), 1 * kPointerSize); |
| 1486 |
| 1487 // CallApiFunctionAndReturn can spill registers inside the exit frame, after |
| 1488 // the return address and the ExecutableAccessorInfo instance. |
| 1489 const int spill_offset = 1 + kApiStackSpace; |
| 1490 |
| 1491 // After the call to the API function we need to free memory used for: |
| 1492 // - the holder |
| 1493 // - the callback data |
| 1494 // - the isolate |
| 1495 // - the property name |
| 1496 // - the receiver. |
| 1497 // |
| 1498 // The memory allocated inside the ExitFrame will be freed when we'll leave |
| 1499 // the ExitFrame in CallApiFunctionAndReturn. |
| 1455 const int kStackUnwindSpace = kFastApiCallArguments + 1; | 1500 const int kStackUnwindSpace = kFastApiCallArguments + 1; |
| 1501 |
| 1502 // Do the API call. |
| 1456 Address getter_address = v8::ToCData<Address>(callback->getter()); | 1503 Address getter_address = v8::ToCData<Address>(callback->getter()); |
| 1457 bool returns_handle = | 1504 bool returns_handle = !CallbackTable::ReturnsVoid(isolate(), getter_address); |
| 1458 !CallbackTable::ReturnsVoid(isolate(), getter_address); | |
| 1459 ApiFunction fun(getter_address); | 1505 ApiFunction fun(getter_address); |
| 1460 ExternalReference::Type type = | 1506 ExternalReference::Type type = |
| 1461 returns_handle ? | 1507 returns_handle ? |
| 1462 ExternalReference::DIRECT_GETTER_CALL : | 1508 ExternalReference::DIRECT_GETTER_CALL : |
| 1463 ExternalReference::DIRECT_GETTER_CALL_NEW; | 1509 ExternalReference::DIRECT_GETTER_CALL_NEW; |
| 1464 | |
| 1465 ExternalReference ref = ExternalReference(&fun, type, isolate()); | 1510 ExternalReference ref = ExternalReference(&fun, type, isolate()); |
| 1511 // TODO(jbramley): I don't know where '5' comes from, but this goes away at |
| 1512 // some point. |
| 1466 __ CallApiFunctionAndReturn(ref, | 1513 __ CallApiFunctionAndReturn(ref, |
| 1467 kStackUnwindSpace, | 1514 kStackUnwindSpace, |
| 1515 spill_offset, |
| 1468 returns_handle, | 1516 returns_handle, |
| 1469 5); | 1517 5); |
| 1470 } | 1518 } |
| 1471 | 1519 |
| 1472 | 1520 |
| 1473 void BaseLoadStubCompiler::GenerateLoadInterceptor( | 1521 void BaseLoadStubCompiler::GenerateLoadInterceptor( |
| 1474 Register holder_reg, | 1522 Register holder_reg, |
| 1475 Handle<JSObject> object, | 1523 Handle<JSObject> object, |
| 1476 Handle<JSObject> interceptor_holder, | 1524 Handle<JSObject> interceptor_holder, |
| 1477 LookupResult* lookup, | 1525 LookupResult* lookup, |
| 1478 Handle<Name> name) { | 1526 Handle<Name> name) { |
| 1527 ASSERT(!AreAliased(receiver(), this->name(), |
| 1528 scratch1(), scratch2(), scratch3())); |
| 1479 ASSERT(interceptor_holder->HasNamedInterceptor()); | 1529 ASSERT(interceptor_holder->HasNamedInterceptor()); |
| 1480 ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined()); | 1530 ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined()); |
| 1481 | 1531 |
| 1482 // So far the most popular follow ups for interceptor loads are FIELD | 1532 // So far the most popular follow ups for interceptor loads are FIELD |
| 1483 // and CALLBACKS, so inline only them, other cases may be added | 1533 // and CALLBACKS, so inline only them, other cases may be added later. |
| 1484 // later. | |
| 1485 bool compile_followup_inline = false; | 1534 bool compile_followup_inline = false; |
| 1486 if (lookup->IsFound() && lookup->IsCacheable()) { | 1535 if (lookup->IsFound() && lookup->IsCacheable()) { |
| 1487 if (lookup->IsField()) { | 1536 if (lookup->IsField()) { |
| 1488 compile_followup_inline = true; | 1537 compile_followup_inline = true; |
| 1489 } else if (lookup->type() == CALLBACKS && | 1538 } else if (lookup->type() == CALLBACKS && |
| 1490 lookup->GetCallbackObject()->IsExecutableAccessorInfo()) { | 1539 lookup->GetCallbackObject()->IsExecutableAccessorInfo()) { |
| 1491 ExecutableAccessorInfo* callback = | 1540 ExecutableAccessorInfo* callback = |
| 1492 ExecutableAccessorInfo::cast(lookup->GetCallbackObject()); | 1541 ExecutableAccessorInfo::cast(lookup->GetCallbackObject()); |
| 1493 compile_followup_inline = callback->getter() != NULL && | 1542 compile_followup_inline = callback->getter() != NULL && |
| 1494 callback->IsCompatibleReceiver(*object); | 1543 callback->IsCompatibleReceiver(*object); |
| 1495 } | 1544 } |
| 1496 } | 1545 } |
| 1497 | 1546 |
| 1498 if (compile_followup_inline) { | 1547 if (compile_followup_inline) { |
| 1499 // Compile the interceptor call, followed by inline code to load the | 1548 // Compile the interceptor call, followed by inline code to load the |
| 1500 // property from further up the prototype chain if the call fails. | 1549 // property from further up the prototype chain if the call fails. |
| 1501 // Check that the maps haven't changed. | 1550 // Check that the maps haven't changed. |
| 1502 ASSERT(holder_reg.is(receiver()) || holder_reg.is(scratch1())); | 1551 ASSERT(holder_reg.is(receiver()) || holder_reg.is(scratch1())); |
| 1503 | 1552 |
| 1504 // Preserve the receiver register explicitly whenever it is different from | 1553 // Preserve the receiver register explicitly whenever it is different from |
| 1505 // the holder and it is needed should the interceptor return without any | 1554 // the holder and it is needed should the interceptor return without any |
| 1506 // result. The CALLBACKS case needs the receiver to be passed into C++ code, | 1555 // result. The CALLBACKS case needs the receiver to be passed into C++ code, |
| 1507 // the FIELD case might cause a miss during the prototype check. | 1556 // the FIELD case might cause a miss during the prototype check. |
| 1508 bool must_perfrom_prototype_check = *interceptor_holder != lookup->holder(); | 1557 bool must_perfrom_prototype_check = *interceptor_holder != lookup->holder(); |
| 1509 bool must_preserve_receiver_reg = !receiver().is(holder_reg) && | 1558 bool must_preserve_receiver_reg = !receiver().Is(holder_reg) && |
| 1510 (lookup->type() == CALLBACKS || must_perfrom_prototype_check); | 1559 (lookup->type() == CALLBACKS || must_perfrom_prototype_check); |
| 1511 | 1560 |
| 1512 // Save necessary data before invoking an interceptor. | 1561 // Save necessary data before invoking an interceptor. |
| 1513 // Requires a frame to make GC aware of pushed pointers. | 1562 // Requires a frame to make GC aware of pushed pointers. |
| 1514 { | 1563 { |
| 1515 FrameScope frame_scope(masm(), StackFrame::INTERNAL); | 1564 FrameScope frame_scope(masm(), StackFrame::INTERNAL); |
| 1516 if (must_preserve_receiver_reg) { | 1565 if (must_preserve_receiver_reg) { |
| 1517 __ Push(receiver(), holder_reg, this->name()); | 1566 __ Push(receiver(), holder_reg, this->name()); |
| 1518 } else { | 1567 } else { |
| 1519 __ Push(holder_reg, this->name()); | 1568 __ Push(holder_reg, this->name()); |
| 1520 } | 1569 } |
| 1521 // Invoke an interceptor. Note: map checks from receiver to | 1570 // Invoke an interceptor. Note: map checks from receiver to |
| 1522 // interceptor's holder has been compiled before (see a caller | 1571 // interceptor's holder has been compiled before (see a caller |
| 1523 // of this method.) | 1572 // of this method.) |
| 1524 CompileCallLoadPropertyWithInterceptor(masm(), | 1573 CompileCallLoadPropertyWithInterceptor(masm(), |
| 1525 receiver(), | 1574 receiver(), |
| 1526 holder_reg, | 1575 holder_reg, |
| 1527 this->name(), | 1576 this->name(), |
| 1528 interceptor_holder); | 1577 interceptor_holder); |
| 1529 // Check if interceptor provided a value for property. If it's | 1578 // Check if interceptor provided a value for property. If it's |
| 1530 // the case, return immediately. | 1579 // the case, return immediately. |
| 1531 Label interceptor_failed; | 1580 Label interceptor_failed; |
| 1532 __ LoadRoot(scratch1(), Heap::kNoInterceptorResultSentinelRootIndex); | 1581 __ JumpIfRoot(x0, |
| 1533 __ cmp(r0, scratch1()); | 1582 Heap::kNoInterceptorResultSentinelRootIndex, |
| 1534 __ b(eq, &interceptor_failed); | 1583 &interceptor_failed); |
| 1535 frame_scope.GenerateLeaveFrame(); | 1584 frame_scope.GenerateLeaveFrame(); |
| 1536 __ Ret(); | 1585 __ Ret(); |
| 1537 | 1586 |
| 1538 __ bind(&interceptor_failed); | 1587 __ Bind(&interceptor_failed); |
| 1539 __ pop(this->name()); | |
| 1540 __ pop(holder_reg); | |
| 1541 if (must_preserve_receiver_reg) { | 1588 if (must_preserve_receiver_reg) { |
| 1542 __ pop(receiver()); | 1589 __ Pop(this->name(), holder_reg, receiver()); |
| 1590 } else { |
| 1591 __ Pop(this->name(), holder_reg); |
| 1543 } | 1592 } |
| 1544 // Leave the internal frame. | 1593 // Leave the internal frame. |
| 1545 } | 1594 } |
| 1546 | |
| 1547 GenerateLoadPostInterceptor(holder_reg, interceptor_holder, name, lookup); | 1595 GenerateLoadPostInterceptor(holder_reg, interceptor_holder, name, lookup); |
| 1548 } else { // !compile_followup_inline | 1596 } else { // !compile_followup_inline |
| 1549 // Call the runtime system to load the interceptor. | 1597 // Call the runtime system to load the interceptor. |
| 1550 // Check that the maps haven't changed. | 1598 // Check that the maps haven't changed. |
| 1551 PushInterceptorArguments(masm(), receiver(), holder_reg, | 1599 PushInterceptorArguments( |
| 1552 this->name(), interceptor_holder); | 1600 masm(), receiver(), holder_reg, this->name(), interceptor_holder); |
| 1553 | 1601 |
| 1554 ExternalReference ref = | 1602 ExternalReference ref = |
| 1555 ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForLoad), | 1603 ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForLoad), |
| 1556 isolate()); | 1604 isolate()); |
| 1557 __ TailCallExternalReference(ref, 6, 1); | 1605 __ TailCallExternalReference(ref, 6, 1); |
| 1558 } | 1606 } |
| 1559 } | 1607 } |
| 1560 | 1608 |
| 1561 | 1609 |
| 1562 void CallStubCompiler::GenerateNameCheck(Handle<Name> name, Label* miss) { | 1610 void CallStubCompiler::GenerateNameCheck(Handle<Name> name, Label* miss) { |
| 1611 Register name_reg = x2; |
| 1612 |
| 1563 if (kind_ == Code::KEYED_CALL_IC) { | 1613 if (kind_ == Code::KEYED_CALL_IC) { |
| 1564 __ cmp(r2, Operand(name)); | 1614 __ Cmp(name_reg, Operand(name)); |
| 1565 __ b(ne, miss); | 1615 __ B(ne, miss); |
| 1566 } | 1616 } |
| 1567 } | 1617 } |
| 1568 | 1618 |
| 1569 | 1619 |
| 1620 // The receiver is loaded from the stack and left in x0 register. |
| 1570 void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object, | 1621 void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object, |
| 1571 Handle<JSObject> holder, | 1622 Handle<JSObject> holder, |
| 1572 Handle<Name> name, | 1623 Handle<Name> name, |
| 1573 Label* miss) { | 1624 Label* miss) { |
| 1574 ASSERT(holder->IsGlobalObject()); | 1625 ASSERT(holder->IsGlobalObject()); |
| 1575 | 1626 |
| 1576 // Get the number of arguments. | |
| 1577 const int argc = arguments().immediate(); | 1627 const int argc = arguments().immediate(); |
| 1578 | 1628 |
| 1579 // Get the receiver from the stack. | 1629 // Get the receiver from the stack. |
| 1580 __ ldr(r0, MemOperand(sp, argc * kPointerSize)); | 1630 Register receiver = x0; |
| 1631 __ Peek(receiver, argc * kPointerSize); |
| 1581 | 1632 |
| 1582 // Check that the maps haven't changed. | 1633 // Check that the maps haven't changed. |
| 1583 __ JumpIfSmi(r0, miss); | 1634 __ JumpIfSmi(receiver, miss); |
| 1584 CheckPrototypes(object, r0, holder, r3, r1, r4, name, miss); | 1635 CheckPrototypes(object, receiver, holder, x3, x1, x4, name, miss); |
| 1585 } | 1636 } |
| 1586 | 1637 |
| 1587 | 1638 |
| 1639 // Load the function object into x1 register. |
| 1588 void CallStubCompiler::GenerateLoadFunctionFromCell( | 1640 void CallStubCompiler::GenerateLoadFunctionFromCell( |
| 1589 Handle<JSGlobalPropertyCell> cell, | 1641 Handle<JSGlobalPropertyCell> cell, |
| 1590 Handle<JSFunction> function, | 1642 Handle<JSFunction> function, |
| 1591 Label* miss) { | 1643 Label* miss) { |
| 1592 // Get the value from the cell. | 1644 // Get the value from the cell. |
| 1593 __ mov(r3, Operand(cell)); | 1645 __ Mov(x3, Operand(cell)); |
| 1594 __ ldr(r1, FieldMemOperand(r3, JSGlobalPropertyCell::kValueOffset)); | 1646 Register function_reg = x1; |
| 1647 __ Ldr(function_reg, FieldMemOperand(x3, JSGlobalPropertyCell::kValueOffset)); |
| 1595 | 1648 |
| 1596 // Check that the cell contains the same function. | 1649 // Check that the cell contains the same function. |
| 1597 if (heap()->InNewSpace(*function)) { | 1650 if (heap()->InNewSpace(*function)) { |
| 1598 // We can't embed a pointer to a function in new space so we have | 1651 // We can't embed a pointer to a function in new space so we have |
| 1599 // to verify that the shared function info is unchanged. This has | 1652 // to verify that the shared function info is unchanged. This has |
| 1600 // the nice side effect that multiple closures based on the same | 1653 // the nice side effect that multiple closures based on the same |
| 1601 // function can all use this call IC. Before we load through the | 1654 // function can all use this call IC. Before we load through the |
| 1602 // function, we have to verify that it still is a function. | 1655 // function, we have to verify that it still is a function. |
| 1603 __ JumpIfSmi(r1, miss); | 1656 __ JumpIfSmi(function_reg, miss); |
| 1604 __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE); | 1657 __ JumpIfNotObjectType(function_reg, x3, x3, JS_FUNCTION_TYPE, miss); |
| 1605 __ b(ne, miss); | |
| 1606 | 1658 |
| 1607 // Check the shared function info. Make sure it hasn't changed. | 1659 // Check the shared function info. Make sure it hasn't changed. |
| 1608 __ Move(r3, Handle<SharedFunctionInfo>(function->shared())); | 1660 __ Mov(x3, Operand(Handle<SharedFunctionInfo>(function->shared()))); |
| 1609 __ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); | 1661 __ Ldr(x4, |
| 1610 __ cmp(r4, r3); | 1662 FieldMemOperand(function_reg, JSFunction::kSharedFunctionInfoOffset)); |
| 1663 __ Cmp(x4, x3); |
| 1611 } else { | 1664 } else { |
| 1612 __ cmp(r1, Operand(function)); | 1665 __ Cmp(function_reg, Operand(function)); |
| 1613 } | 1666 } |
| 1614 __ b(ne, miss); | 1667 __ B(ne, miss); |
| 1615 } | 1668 } |
| 1616 | 1669 |
| 1617 | 1670 |
| 1618 void CallStubCompiler::GenerateMissBranch() { | 1671 void CallStubCompiler::GenerateMissBranch() { |
| 1619 Handle<Code> code = | 1672 Handle<Code> code = |
| 1620 isolate()->stub_cache()->ComputeCallMiss(arguments().immediate(), | 1673 isolate()->stub_cache()->ComputeCallMiss(arguments().immediate(), |
| 1621 kind_, | 1674 kind_, |
| 1622 extra_state_); | 1675 extra_state_); |
| 1623 __ Jump(code, RelocInfo::CODE_TARGET); | 1676 __ Jump(code, RelocInfo::CODE_TARGET); |
| 1624 } | 1677 } |
| 1625 | 1678 |
| 1626 | 1679 |
| 1627 Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object, | 1680 Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object, |
| 1628 Handle<JSObject> holder, | 1681 Handle<JSObject> holder, |
| 1629 PropertyIndex index, | 1682 PropertyIndex index, |
| 1630 Handle<Name> name) { | 1683 Handle<Name> name) { |
| 1631 // ----------- S t a t e ------------- | 1684 // ----------- S t a t e ------------- |
| 1632 // -- r2 : name | 1685 // -- x2 : name |
| 1633 // -- lr : return address | 1686 // -- lr : return address |
| 1634 // ----------------------------------- | 1687 // ----------------------------------- |
| 1635 Label miss; | 1688 Label miss; |
| 1689 const int argc = arguments().immediate(); |
| 1636 | 1690 |
| 1637 GenerateNameCheck(name, &miss); | 1691 GenerateNameCheck(name, &miss); |
| 1638 | 1692 |
| 1639 const int argc = arguments().immediate(); | 1693 // Get the receiver of the function from the stack. |
| 1640 | 1694 Register receiver = x0; |
| 1641 // Get the receiver of the function from the stack into r0. | 1695 __ Peek(receiver, argc * kXRegSizeInBytes); |
| 1642 __ ldr(r0, MemOperand(sp, argc * kPointerSize)); | |
| 1643 // Check that the receiver isn't a smi. | 1696 // Check that the receiver isn't a smi. |
| 1644 __ JumpIfSmi(r0, &miss); | 1697 __ JumpIfSmi(receiver, &miss); |
| 1645 | 1698 |
| 1646 // Do the right check and compute the holder register. | 1699 // Do the right check and compute the holder register. |
| 1647 Register reg = CheckPrototypes(object, r0, holder, r1, r3, r4, name, &miss); | 1700 Register holder_reg = CheckPrototypes( |
| 1648 GenerateFastPropertyLoad(masm(), r1, reg, index.is_inobject(holder), | 1701 object, receiver, holder, x1, x3, x4, name, &miss); |
| 1649 index.translate(holder), Representation::Tagged()); | 1702 Register function = x1; |
| 1703 GenerateFastPropertyLoad(masm(), function, holder_reg, |
| 1704 index.is_inobject(holder), |
| 1705 index.translate(holder), |
| 1706 Representation::Tagged()); |
| 1650 | 1707 |
| 1651 GenerateCallFunction(masm(), object, arguments(), &miss, extra_state_); | 1708 GenerateCallFunction( |
| 1709 masm(), object, arguments(), &miss, extra_state_, function, receiver, x3); |
| 1652 | 1710 |
| 1653 // Handle call cache miss. | 1711 // Handle call cache miss. |
| 1654 __ bind(&miss); | 1712 __ Bind(&miss); |
| 1655 GenerateMissBranch(); | 1713 GenerateMissBranch(); |
| 1656 | 1714 |
| 1657 // Return the generated code. | 1715 // Return the generated code. |
| 1658 return GetCode(Code::FIELD, name); | 1716 return GetCode(Code::FIELD, name); |
| 1659 } | 1717 } |
| 1660 | 1718 |
| 1661 | 1719 |
| 1662 Handle<Code> CallStubCompiler::CompileArrayPushCall( | 1720 Handle<Code> CallStubCompiler::CompileArrayPushCall( |
| 1663 Handle<Object> object, | 1721 Handle<Object> object, |
| 1664 Handle<JSObject> holder, | 1722 Handle<JSObject> holder, |
| 1665 Handle<JSGlobalPropertyCell> cell, | 1723 Handle<JSGlobalPropertyCell> cell, |
| 1666 Handle<JSFunction> function, | 1724 Handle<JSFunction> function, |
| 1667 Handle<String> name) { | 1725 Handle<String> name) { |
| 1668 // ----------- S t a t e ------------- | 1726 // ----------- S t a t e ------------- |
| 1669 // -- r2 : name | 1727 // -- x2 : name (Must be preserved on miss.) |
| 1670 // -- lr : return address | 1728 // -- lr : return address |
| 1671 // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based) | 1729 // -- sp[(argc - n - 1) * 8] : arg[n] (zero-based) |
| 1672 // -- ... | 1730 // -- ... |
| 1673 // -- sp[argc * 4] : receiver | 1731 // -- sp[argc * 8] : receiver |
| 1674 // ----------------------------------- | 1732 // ----------------------------------- |
| 1675 | 1733 |
| 1676 // If object is not an array, bail out to regular call. | 1734 // If object is not an array, bail out to regular call. |
| 1677 if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null(); | 1735 if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null(); |
| 1678 | 1736 |
| 1679 Label miss; | 1737 Label miss; |
| 1738 Register result = x0; |
| 1739 const int argc = arguments().immediate(); |
| 1740 |
| 1680 GenerateNameCheck(name, &miss); | 1741 GenerateNameCheck(name, &miss); |
| 1681 | 1742 |
| 1682 Register receiver = r1; | |
| 1683 // Get the receiver from the stack | 1743 // Get the receiver from the stack |
| 1684 const int argc = arguments().immediate(); | 1744 Register receiver = x1; |
| 1685 __ ldr(receiver, MemOperand(sp, argc * kPointerSize)); | 1745 __ Peek(receiver, argc * kPointerSize); |
| 1686 | 1746 |
| 1687 // Check that the receiver isn't a smi. | 1747 // Check that the receiver isn't a smi. |
| 1688 __ JumpIfSmi(receiver, &miss); | 1748 __ JumpIfSmi(receiver, &miss); |
| 1689 | 1749 |
| 1690 // Check that the maps haven't changed. | 1750 // Check that the maps haven't changed. |
| 1691 CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, r3, r0, r4, | 1751 CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, x3, x0, x4, |
| 1692 name, &miss); | 1752 name, &miss); |
| 1693 | 1753 |
| 1694 if (argc == 0) { | 1754 if (argc == 0) { |
| 1695 // Nothing to do, just return the length. | 1755 // Nothing to do, just return the length. |
| 1696 __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset)); | 1756 __ Ldr(result, FieldMemOperand(receiver, JSArray::kLengthOffset)); |
| 1697 __ Drop(argc + 1); | 1757 __ Drop(argc + 1); |
| 1698 __ Ret(); | 1758 __ Ret(); |
| 1699 } else { | 1759 } else { |
| 1700 Label call_builtin; | 1760 Label call_builtin; |
| 1701 | 1761 |
| 1702 if (argc == 1) { // Otherwise fall through to call the builtin. | 1762 if (argc == 1) { // Otherwise fall through to call the builtin. |
| 1703 Label attempt_to_grow_elements, with_write_barrier, check_double; | 1763 Label attempt_to_grow_elements, with_write_barrier, check_double; |
| 1704 | 1764 |
| 1705 Register elements = r6; | 1765 // Note that even though we assign the array length to x0 and the value |
| 1706 Register end_elements = r5; | 1766 // to push in x4, they are not always live. Both x0 and x4 can be locally |
| 1767 // reused as scratch registers. |
| 1768 Register length = x0; |
| 1769 Register value = x4; |
| 1770 Register elements = x6; |
| 1771 Register end_elements = x5; |
| 1707 // Get the elements array of the object. | 1772 // Get the elements array of the object. |
| 1708 __ ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset)); | 1773 __ Ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset)); |
| 1709 | 1774 |
| 1710 // Check that the elements are in fast mode and writable. | 1775 // Check that the elements are in fast mode and writable. |
| 1711 __ CheckMap(elements, | 1776 __ CheckMap(elements, |
| 1712 r0, | 1777 x0, |
| 1713 Heap::kFixedArrayMapRootIndex, | 1778 Heap::kFixedArrayMapRootIndex, |
| 1714 &check_double, | 1779 &check_double, |
| 1715 DONT_DO_SMI_CHECK); | 1780 DONT_DO_SMI_CHECK); |
| 1716 | 1781 |
| 1717 // Get the array's length into r0 and calculate new length. | 1782 // Get the array's length and calculate new length. |
| 1718 __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset)); | 1783 __ Ldr(length, FieldMemOperand(receiver, JSArray::kLengthOffset)); |
| 1719 __ add(r0, r0, Operand(Smi::FromInt(argc))); | 1784 STATIC_ASSERT(kSmiTag == 0); |
| 1720 | 1785 __ Add(length, length, Operand(Smi::FromInt(argc))); |
| 1721 // Get the elements' length. | |
| 1722 __ ldr(r4, FieldMemOperand(elements, FixedArray::kLengthOffset)); | |
| 1723 | 1786 |
| 1724 // Check if we could survive without allocation. | 1787 // Check if we could survive without allocation. |
| 1725 __ cmp(r0, r4); | 1788 __ Ldr(x4, FieldMemOperand(elements, FixedArray::kLengthOffset)); |
| 1726 __ b(gt, &attempt_to_grow_elements); | 1789 __ Cmp(length, x4); |
| 1790 __ B(gt, &attempt_to_grow_elements); |
| 1727 | 1791 |
| 1728 // Check if value is a smi. | 1792 // Check if value is a smi. |
| 1729 __ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize)); | 1793 __ Peek(value, (argc - 1) * kPointerSize); |
| 1730 __ JumpIfNotSmi(r4, &with_write_barrier); | 1794 __ JumpIfNotSmi(value, &with_write_barrier); |
| 1731 | 1795 |
| 1732 // Save new length. | 1796 // Save new length. |
| 1733 __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset)); | 1797 __ Str(length, FieldMemOperand(receiver, JSArray::kLengthOffset)); |
| 1734 | 1798 |
| 1735 // Store the value. | 1799 // Store the value. |
| 1736 // We may need a register containing the address end_elements below, | 1800 // We may need a register containing the address end_elements below, |
| 1737 // so write back the value in end_elements. | 1801 // so write back the value in end_elements. |
| 1738 __ add(end_elements, elements, Operand::PointerOffsetFromSmiKey(r0)); | 1802 __ Add(end_elements, elements, |
| 1803 Operand::UntagSmiAndScale(length, kPointerSizeLog2)); |
| 1739 const int kEndElementsOffset = | 1804 const int kEndElementsOffset = |
| 1740 FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize; | 1805 FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize; |
| 1741 __ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex)); | 1806 __ Str(value, MemOperand(end_elements, kEndElementsOffset, PreIndex)); |
| 1742 | 1807 |
| 1743 // Check for a smi. | 1808 // Check for a smi. |
| 1744 __ Drop(argc + 1); | 1809 __ Drop(argc + 1); |
| 1745 __ Ret(); | 1810 __ Ret(); |
| 1746 | 1811 |
| 1747 __ bind(&check_double); | 1812 __ Bind(&check_double); |
| 1748 | |
| 1749 // Check that the elements are in fast mode and writable. | 1813 // Check that the elements are in fast mode and writable. |
| 1750 __ CheckMap(elements, | 1814 __ CheckMap(elements, |
| 1751 r0, | 1815 x0, |
| 1752 Heap::kFixedDoubleArrayMapRootIndex, | 1816 Heap::kFixedDoubleArrayMapRootIndex, |
| 1753 &call_builtin, | 1817 &call_builtin, |
| 1754 DONT_DO_SMI_CHECK); | 1818 DONT_DO_SMI_CHECK); |
| 1755 | 1819 |
| 1756 // Get the array's length into r0 and calculate new length. | 1820 // Get the array's length and calculate new length. |
| 1757 __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset)); | 1821 Register old_length = x5; |
| 1758 __ add(r0, r0, Operand(Smi::FromInt(argc))); | 1822 __ Ldr(old_length, FieldMemOperand(receiver, JSArray::kLengthOffset)); |
| 1759 | 1823 STATIC_ASSERT(kSmiTag == 0); |
| 1760 // Get the elements' length. | 1824 __ Add(length, old_length, Operand(Smi::FromInt(argc))); |
| 1761 __ ldr(r4, FieldMemOperand(elements, FixedArray::kLengthOffset)); | |
| 1762 | 1825 |
| 1763 // Check if we could survive without allocation. | 1826 // Check if we could survive without allocation. |
| 1764 __ cmp(r0, r4); | 1827 __ Ldr(x4, FieldMemOperand(elements, FixedArray::kLengthOffset)); |
| 1765 __ b(gt, &call_builtin); | 1828 __ Cmp(length, x4); |
| 1829 __ B(gt, &call_builtin); |
| 1766 | 1830 |
| 1767 __ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize)); | 1831 __ Peek(value, (argc - 1) * kPointerSize); |
| 1768 __ StoreNumberToDoubleElements(r4, r0, elements, r5, | 1832 __ StoreNumberToDoubleElements( |
| 1769 &call_builtin, argc * kDoubleSize); | 1833 value, old_length, elements, x3, d0, d1, |
| 1834 &call_builtin); |
| 1770 | 1835 |
| 1771 // Save new length. | 1836 // Save new length. |
| 1772 __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset)); | 1837 __ Str(length, FieldMemOperand(receiver, JSArray::kLengthOffset)); |
| 1773 | 1838 |
| 1774 // Check for a smi. | 1839 // Check for a smi. |
| 1775 __ Drop(argc + 1); | 1840 __ Drop(argc + 1); |
| 1776 __ Ret(); | 1841 __ Ret(); |
| 1777 | 1842 |
| 1778 __ bind(&with_write_barrier); | |
| 1779 | 1843 |
| 1780 __ ldr(r3, FieldMemOperand(receiver, HeapObject::kMapOffset)); | 1844 __ Bind(&with_write_barrier); |
| 1845 Register map = x3; |
| 1846 __ Ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
| 1781 | 1847 |
| 1782 if (FLAG_smi_only_arrays && !FLAG_trace_elements_transitions) { | 1848 if (FLAG_smi_only_arrays && !FLAG_trace_elements_transitions) { |
| 1783 Label fast_object, not_fast_object; | 1849 Label fast_object, not_fast_object; |
| 1784 __ CheckFastObjectElements(r3, r7, ¬_fast_object); | 1850 __ CheckFastObjectElements(map, x7, ¬_fast_object); |
| 1785 __ jmp(&fast_object); | 1851 __ B(&fast_object); |
| 1852 |
| 1786 // In case of fast smi-only, convert to fast object, otherwise bail out. | 1853 // In case of fast smi-only, convert to fast object, otherwise bail out. |
| 1787 __ bind(¬_fast_object); | 1854 __ Bind(¬_fast_object); |
| 1788 __ CheckFastSmiElements(r3, r7, &call_builtin); | 1855 __ CheckFastSmiElements(map, x7, &call_builtin); |
| 1789 | 1856 |
| 1790 __ ldr(r7, FieldMemOperand(r4, HeapObject::kMapOffset)); | 1857 __ Ldr(x7, FieldMemOperand(x4, HeapObject::kMapOffset)); |
| 1791 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); | 1858 __ JumpIfRoot(x7, Heap::kHeapNumberMapRootIndex, &call_builtin); |
| 1792 __ cmp(r7, ip); | 1859 |
| 1793 __ b(eq, &call_builtin); | |
| 1794 // edx: receiver | |
| 1795 // r3: map | |
| 1796 Label try_holey_map; | 1860 Label try_holey_map; |
| 1797 __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, | 1861 __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, |
| 1798 FAST_ELEMENTS, | 1862 FAST_ELEMENTS, |
| 1799 r3, | 1863 map, |
| 1800 r7, | 1864 x7, |
| 1801 &try_holey_map); | 1865 &try_holey_map); |
| 1802 __ mov(r2, receiver); | 1866 // GenerateMapChangeElementsTransition expects the receiver to be in x2. |
| 1867 // Since from this point we cannot jump on 'miss' it is ok to clobber |
| 1868 // x2 (which initialy contained called function name). |
| 1869 __ Mov(x2, receiver); |
| 1803 ElementsTransitionGenerator:: | 1870 ElementsTransitionGenerator:: |
| 1804 GenerateMapChangeElementsTransition(masm(), | 1871 GenerateMapChangeElementsTransition(masm(), |
| 1805 DONT_TRACK_ALLOCATION_SITE, | 1872 DONT_TRACK_ALLOCATION_SITE, |
| 1806 NULL); | 1873 NULL); |
| 1807 __ jmp(&fast_object); | 1874 __ B(&fast_object); |
| 1808 | 1875 |
| 1809 __ bind(&try_holey_map); | 1876 __ Bind(&try_holey_map); |
| 1810 __ LoadTransitionedArrayMapConditional(FAST_HOLEY_SMI_ELEMENTS, | 1877 __ LoadTransitionedArrayMapConditional(FAST_HOLEY_SMI_ELEMENTS, |
| 1811 FAST_HOLEY_ELEMENTS, | 1878 FAST_HOLEY_ELEMENTS, |
| 1812 r3, | 1879 map, |
| 1813 r7, | 1880 x7, |
| 1814 &call_builtin); | 1881 &call_builtin); |
| 1815 __ mov(r2, receiver); | 1882 // The previous comment about x2 usage also applies here. |
| 1883 __ Mov(x2, receiver); |
| 1816 ElementsTransitionGenerator:: | 1884 ElementsTransitionGenerator:: |
| 1817 GenerateMapChangeElementsTransition(masm(), | 1885 GenerateMapChangeElementsTransition(masm(), |
| 1818 DONT_TRACK_ALLOCATION_SITE, | 1886 DONT_TRACK_ALLOCATION_SITE, |
| 1819 NULL); | 1887 NULL); |
| 1820 __ bind(&fast_object); | 1888 __ Bind(&fast_object); |
| 1821 } else { | 1889 } else { |
| 1822 __ CheckFastObjectElements(r3, r3, &call_builtin); | 1890 __ CheckFastObjectElements(map, x3, &call_builtin); |
| 1823 } | 1891 } |
| 1824 | 1892 |
| 1825 // Save new length. | 1893 // Save new length. |
| 1826 __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset)); | 1894 __ Str(length, FieldMemOperand(receiver, JSArray::kLengthOffset)); |
| 1827 | 1895 |
| 1828 // Store the value. | 1896 // Store the value. |
| 1829 // We may need a register containing the address end_elements below, | 1897 // We may need a register containing the address end_elements below, |
| 1830 // so write back the value in end_elements. | 1898 // so write back the value in end_elements. |
| 1831 __ add(end_elements, elements, Operand::PointerOffsetFromSmiKey(r0)); | 1899 __ Add(end_elements, elements, |
| 1832 __ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex)); | 1900 Operand::UntagSmiAndScale(length, kPointerSizeLog2)); |
| 1901 __ Str(x4, MemOperand(end_elements, kEndElementsOffset, PreIndex)); |
| 1833 | 1902 |
| 1834 __ RecordWrite(elements, | 1903 __ RecordWrite(elements, |
| 1835 end_elements, | 1904 end_elements, |
| 1836 r4, | 1905 x4, |
| 1837 kLRHasNotBeenSaved, | 1906 kLRHasNotBeenSaved, |
| 1838 kDontSaveFPRegs, | 1907 kDontSaveFPRegs, |
| 1839 EMIT_REMEMBERED_SET, | 1908 EMIT_REMEMBERED_SET, |
| 1840 OMIT_SMI_CHECK); | 1909 OMIT_SMI_CHECK); |
| 1841 __ Drop(argc + 1); | 1910 __ Drop(argc + 1); |
| 1842 __ Ret(); | 1911 __ Ret(); |
| 1843 | 1912 |
| 1844 __ bind(&attempt_to_grow_elements); | 1913 |
| 1845 // r0: array's length + 1. | 1914 __ Bind(&attempt_to_grow_elements); |
| 1846 // r4: elements' length. | 1915 // When we jump here, x4 must hold the length of elements. |
| 1916 Register elements_length = x4; |
| 1847 | 1917 |
| 1848 if (!FLAG_inline_new) { | 1918 if (!FLAG_inline_new) { |
| 1849 __ b(&call_builtin); | 1919 __ B(&call_builtin); |
| 1850 } | 1920 } |
| 1851 | 1921 |
| 1852 __ ldr(r2, MemOperand(sp, (argc - 1) * kPointerSize)); | 1922 __ Peek(x2, (argc - 1) * kPointerSize); |
| 1853 // Growing elements that are SMI-only requires special handling in case | 1923 // Growing elements that are SMI-only requires special handling in case |
| 1854 // the new element is non-Smi. For now, delegate to the builtin. | 1924 // the new element is non-Smi. For now, delegate to the builtin. |
| 1855 Label no_fast_elements_check; | 1925 Label no_fast_elements_check; |
| 1856 __ JumpIfSmi(r2, &no_fast_elements_check); | 1926 __ JumpIfSmi(x2, &no_fast_elements_check); |
| 1857 __ ldr(r7, FieldMemOperand(receiver, HeapObject::kMapOffset)); | 1927 __ Ldr(x7, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
| 1858 __ CheckFastObjectElements(r7, r7, &call_builtin); | 1928 __ CheckFastObjectElements(x7, x7, &call_builtin); |
| 1859 __ bind(&no_fast_elements_check); | 1929 __ Bind(&no_fast_elements_check); |
| 1860 | 1930 |
| 1861 ExternalReference new_space_allocation_top = | 1931 ExternalReference new_space_allocation_top = |
| 1862 ExternalReference::new_space_allocation_top_address(isolate()); | 1932 ExternalReference::new_space_allocation_top_address(isolate()); |
| 1863 ExternalReference new_space_allocation_limit = | 1933 ExternalReference new_space_allocation_limit = |
| 1864 ExternalReference::new_space_allocation_limit_address(isolate()); | 1934 ExternalReference::new_space_allocation_limit_address(isolate()); |
| 1865 | 1935 |
| 1866 const int kAllocationDelta = 4; | 1936 const int kAllocationDelta = 4; |
| 1867 // Load top and check if it is the end of elements. | 1937 // Load top and check if it is the end of elements. |
| 1868 __ add(end_elements, elements, Operand::PointerOffsetFromSmiKey(r0)); | 1938 __ Add(end_elements, elements, |
| 1869 __ add(end_elements, end_elements, Operand(kEndElementsOffset)); | 1939 Operand::UntagSmiAndScale(length, kPointerSizeLog2)); |
| 1870 __ mov(r7, Operand(new_space_allocation_top)); | 1940 __ Add(end_elements, end_elements, kEndElementsOffset); |
| 1871 __ ldr(r3, MemOperand(r7)); | 1941 __ Mov(x7, Operand(new_space_allocation_top)); |
| 1872 __ cmp(end_elements, r3); | 1942 __ Ldr(x3, MemOperand(x7)); |
| 1873 __ b(ne, &call_builtin); | 1943 __ Cmp(end_elements, x3); |
| 1944 __ B(ne, &call_builtin); |
| 1874 | 1945 |
| 1875 __ mov(r9, Operand(new_space_allocation_limit)); | 1946 __ Mov(x10, Operand(new_space_allocation_limit)); |
| 1876 __ ldr(r9, MemOperand(r9)); | 1947 __ Ldr(x10, MemOperand(x10)); |
| 1877 __ add(r3, r3, Operand(kAllocationDelta * kPointerSize)); | 1948 __ Add(x3, x3, kAllocationDelta * kPointerSize); |
| 1878 __ cmp(r3, r9); | 1949 __ Cmp(x3, x10); |
| 1879 __ b(hi, &call_builtin); | 1950 __ B(hi, &call_builtin); |
| 1880 | 1951 |
| 1881 // We fit and could grow elements. | 1952 // We fit and could grow elements. |
| 1882 // Update new_space_allocation_top. | 1953 // Update new_space_allocation_top. |
| 1883 __ str(r3, MemOperand(r7)); | 1954 __ Str(x3, MemOperand(x7)); |
| 1884 // Push the argument. | 1955 // Push the argument. |
| 1885 __ str(r2, MemOperand(end_elements)); | 1956 __ Str(x2, MemOperand(end_elements)); |
| 1886 // Fill the rest with holes. | 1957 // Fill the rest with holes. |
| 1887 __ LoadRoot(r3, Heap::kTheHoleValueRootIndex); | 1958 __ LoadRoot(x3, Heap::kTheHoleValueRootIndex); |
| 1888 for (int i = 1; i < kAllocationDelta; i++) { | 1959 for (int i = 1; i < kAllocationDelta; i++) { |
| 1889 __ str(r3, MemOperand(end_elements, i * kPointerSize)); | 1960 __ Str(x3, MemOperand(end_elements, i * kPointerSize)); |
| 1890 } | 1961 } |
| 1891 | 1962 |
| 1892 // Update elements' and array's sizes. | 1963 // Update elements' and array's sizes. |
| 1893 __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset)); | 1964 __ Str(length, FieldMemOperand(receiver, JSArray::kLengthOffset)); |
| 1894 __ add(r4, r4, Operand(Smi::FromInt(kAllocationDelta))); | 1965 __ Add(elements_length, |
| 1895 __ str(r4, FieldMemOperand(elements, FixedArray::kLengthOffset)); | 1966 elements_length, |
| 1967 Operand(Smi::FromInt(kAllocationDelta))); |
| 1968 __ Str(elements_length, |
| 1969 FieldMemOperand(elements, FixedArray::kLengthOffset)); |
| 1896 | 1970 |
| 1897 // Elements are in new space, so write barrier is not required. | 1971 // Elements are in new space, so write barrier is not required. |
| 1898 __ Drop(argc + 1); | 1972 __ Drop(argc + 1); |
| 1899 __ Ret(); | 1973 __ Ret(); |
| 1900 } | 1974 } |
| 1901 __ bind(&call_builtin); | 1975 __ Bind(&call_builtin); |
| 1902 __ TailCallExternalReference( | 1976 __ TailCallExternalReference( |
| 1903 ExternalReference(Builtins::c_ArrayPush, isolate()), argc + 1, 1); | 1977 ExternalReference(Builtins::c_ArrayPush, isolate()), argc + 1, 1); |
| 1904 } | 1978 } |
| 1905 | 1979 |
| 1906 // Handle call cache miss. | 1980 // Handle call cache miss. |
| 1907 __ bind(&miss); | 1981 __ Bind(&miss); |
| 1908 GenerateMissBranch(); | 1982 GenerateMissBranch(); |
| 1909 | 1983 |
| 1910 // Return the generated code. | 1984 // Return the generated code. |
| 1911 return GetCode(function); | 1985 return GetCode(function); |
| 1912 } | 1986 } |
| 1913 | 1987 |
| 1914 | 1988 |
| 1915 Handle<Code> CallStubCompiler::CompileArrayPopCall( | 1989 Handle<Code> CallStubCompiler::CompileArrayPopCall( |
| 1916 Handle<Object> object, | 1990 Handle<Object> object, |
| 1917 Handle<JSObject> holder, | 1991 Handle<JSObject> holder, |
| 1918 Handle<JSGlobalPropertyCell> cell, | 1992 Handle<JSGlobalPropertyCell> cell, |
| 1919 Handle<JSFunction> function, | 1993 Handle<JSFunction> function, |
| 1920 Handle<String> name) { | 1994 Handle<String> name) { |
| 1921 // ----------- S t a t e ------------- | 1995 // ----------- S t a t e ------------- |
| 1922 // -- r2 : name | 1996 // -- x2 : name |
| 1923 // -- lr : return address | 1997 // -- lr : return address |
| 1924 // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based) | 1998 // -- sp[(argc - n - 1) * 8] : arg[n] (zero-based) |
| 1925 // -- ... | 1999 // -- ... |
| 1926 // -- sp[argc * 4] : receiver | 2000 // -- sp[argc * 8] : receiver |
| 1927 // ----------------------------------- | 2001 // ----------------------------------- |
| 1928 | 2002 |
| 1929 // If object is not an array, bail out to regular call. | 2003 // If object is not an array, bail out to regular call. |
| 1930 if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null(); | 2004 if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null(); |
| 1931 | 2005 |
| 2006 const int argc = arguments().immediate(); |
| 2007 Register result = x0; |
| 1932 Label miss, return_undefined, call_builtin; | 2008 Label miss, return_undefined, call_builtin; |
| 1933 Register receiver = r1; | 2009 |
| 1934 Register elements = r3; | |
| 1935 GenerateNameCheck(name, &miss); | 2010 GenerateNameCheck(name, &miss); |
| 1936 | 2011 |
| 1937 // Get the receiver from the stack | 2012 // Get the receiver from the stack |
| 1938 const int argc = arguments().immediate(); | 2013 Register receiver = x1; |
| 1939 __ ldr(receiver, MemOperand(sp, argc * kPointerSize)); | 2014 __ Peek(receiver, argc * kPointerSize); |
| 1940 // Check that the receiver isn't a smi. | 2015 // Check that the receiver isn't a smi. |
| 1941 __ JumpIfSmi(receiver, &miss); | 2016 __ JumpIfSmi(receiver, &miss); |
| 1942 | 2017 |
| 1943 // Check that the maps haven't changed. | 2018 // Check that the maps haven't changed. |
| 1944 CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, elements, | 2019 CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, |
| 1945 r4, r0, name, &miss); | 2020 x3, x4, x0, name, &miss); |
| 1946 | 2021 |
| 1947 // Get the elements array of the object. | 2022 // Get the elements array of the object. |
| 1948 __ ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset)); | 2023 Register elements = x3; |
| 2024 __ Ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset)); |
| 1949 | 2025 |
| 1950 // Check that the elements are in fast mode and writable. | 2026 // Check that the elements are in fast mode and writable. |
| 1951 __ CheckMap(elements, | 2027 __ CheckMap(elements, |
| 1952 r0, | 2028 x0, |
| 1953 Heap::kFixedArrayMapRootIndex, | 2029 Heap::kFixedArrayMapRootIndex, |
| 1954 &call_builtin, | 2030 &call_builtin, |
| 1955 DONT_DO_SMI_CHECK); | 2031 DONT_DO_SMI_CHECK); |
| 1956 | 2032 |
| 1957 // Get the array's length into r4 and calculate new length. | 2033 // Get the array's length and calculate new length. |
| 1958 __ ldr(r4, FieldMemOperand(receiver, JSArray::kLengthOffset)); | 2034 Register length = x4; |
| 1959 __ sub(r4, r4, Operand(Smi::FromInt(1)), SetCC); | 2035 __ Ldr(length, FieldMemOperand(receiver, JSArray::kLengthOffset)); |
| 1960 __ b(lt, &return_undefined); | 2036 __ Subs(length, length, Operand(Smi::FromInt(1))); |
| 2037 __ B(lt, &return_undefined); |
| 1961 | 2038 |
| 1962 // Get the last element. | 2039 // Get the last element. |
| 1963 __ LoadRoot(r6, Heap::kTheHoleValueRootIndex); | 2040 __ Add(elements, elements, |
| 1964 // We can't address the last element in one operation. Compute the more | 2041 Operand::UntagSmiAndScale(length, kPointerSizeLog2)); |
| 1965 // expensive shift first, and use an offset later on. | 2042 __ Ldr(result, FieldMemOperand(elements, FixedArray::kHeaderSize)); |
| 1966 __ add(elements, elements, Operand::PointerOffsetFromSmiKey(r4)); | 2043 __ JumpIfRoot(result, Heap::kTheHoleValueRootIndex, &call_builtin); |
| 1967 __ ldr(r0, FieldMemOperand(elements, FixedArray::kHeaderSize)); | |
| 1968 __ cmp(r0, r6); | |
| 1969 __ b(eq, &call_builtin); | |
| 1970 | 2044 |
| 1971 // Set the array's length. | 2045 // Set the array's length. |
| 1972 __ str(r4, FieldMemOperand(receiver, JSArray::kLengthOffset)); | 2046 __ Str(length, FieldMemOperand(receiver, JSArray::kLengthOffset)); |
| 1973 | 2047 |
| 1974 // Fill with the hole. | 2048 // Fill with the hole. |
| 1975 __ str(r6, FieldMemOperand(elements, FixedArray::kHeaderSize)); | 2049 Register hole_value = x6; |
| 2050 __ LoadRoot(hole_value, Heap::kTheHoleValueRootIndex); |
| 2051 __ Str(hole_value, FieldMemOperand(elements, FixedArray::kHeaderSize)); |
| 1976 __ Drop(argc + 1); | 2052 __ Drop(argc + 1); |
| 1977 __ Ret(); | 2053 __ Ret(); |
| 1978 | 2054 |
| 1979 __ bind(&return_undefined); | 2055 __ Bind(&return_undefined); |
| 1980 __ LoadRoot(r0, Heap::kUndefinedValueRootIndex); | 2056 __ LoadRoot(result, Heap::kUndefinedValueRootIndex); |
| 1981 __ Drop(argc + 1); | 2057 __ Drop(argc + 1); |
| 1982 __ Ret(); | 2058 __ Ret(); |
| 1983 | 2059 |
| 1984 __ bind(&call_builtin); | 2060 __ Bind(&call_builtin); |
| 1985 __ TailCallExternalReference( | 2061 __ TailCallExternalReference( |
| 1986 ExternalReference(Builtins::c_ArrayPop, isolate()), argc + 1, 1); | 2062 ExternalReference(Builtins::c_ArrayPop, isolate()), argc + 1, 1); |
| 1987 | 2063 |
| 1988 // Handle call cache miss. | 2064 // Handle call cache miss. |
| 1989 __ bind(&miss); | 2065 __ Bind(&miss); |
| 1990 GenerateMissBranch(); | 2066 GenerateMissBranch(); |
| 1991 | 2067 |
| 1992 // Return the generated code. | 2068 // Return the generated code. |
| 1993 return GetCode(function); | 2069 return GetCode(function); |
| 1994 } | 2070 } |
| 1995 | 2071 |
| 1996 | 2072 |
| 1997 Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall( | 2073 Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall( |
| 1998 Handle<Object> object, | 2074 Handle<Object> object, |
| 1999 Handle<JSObject> holder, | 2075 Handle<JSObject> holder, |
| 2000 Handle<JSGlobalPropertyCell> cell, | 2076 Handle<JSGlobalPropertyCell> cell, |
| 2001 Handle<JSFunction> function, | 2077 Handle<JSFunction> function, |
| 2002 Handle<String> name) { | 2078 Handle<String> name) { |
| 2003 // ----------- S t a t e ------------- | 2079 // ----------- S t a t e ------------- |
| 2004 // -- r2 : function name | 2080 // -- x2 : function name |
| 2005 // -- lr : return address | 2081 // -- lr : return address |
| 2006 // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based) | 2082 // -- sp[(argc - n - 1) * 8] : arg[n] (zero-based) |
| 2007 // -- ... | 2083 // -- ... |
| 2008 // -- sp[argc * 4] : receiver | 2084 // -- sp[argc * 8] : receiver |
| 2009 // ----------------------------------- | 2085 // ----------------------------------- |
| 2010 | 2086 |
| 2011 // If object is not a string, bail out to regular call. | 2087 // If object is not a string, bail out to regular call. |
| 2012 if (!object->IsString() || !cell.is_null()) return Handle<Code>::null(); | 2088 if (!object->IsString() || !cell.is_null()) return Handle<Code>::null(); |
| 2013 | 2089 |
| 2014 const int argc = arguments().immediate(); | 2090 const int argc = arguments().immediate(); |
| 2015 Label miss; | 2091 Label miss; |
| 2016 Label name_miss; | 2092 Label name_miss; |
| 2017 Label index_out_of_range; | 2093 Label index_out_of_range; |
| 2018 Label* index_out_of_range_label = &index_out_of_range; | 2094 Label* index_out_of_range_label = &index_out_of_range; |
| 2019 | 2095 |
| 2020 if (kind_ == Code::CALL_IC && | 2096 if (kind_ == Code::CALL_IC && |
| 2021 (CallICBase::StringStubState::decode(extra_state_) == | 2097 (CallICBase::StringStubState::decode(extra_state_) == |
| 2022 DEFAULT_STRING_STUB)) { | 2098 DEFAULT_STRING_STUB)) { |
| 2023 index_out_of_range_label = &miss; | 2099 index_out_of_range_label = &miss; |
| 2024 } | 2100 } |
| 2025 GenerateNameCheck(name, &name_miss); | 2101 GenerateNameCheck(name, &name_miss); |
| 2026 | 2102 |
| 2027 // Check that the maps starting from the prototype haven't changed. | 2103 // Check that the maps starting from the prototype haven't changed. |
| 2104 Register prototype = x0; |
| 2028 GenerateDirectLoadGlobalFunctionPrototype(masm(), | 2105 GenerateDirectLoadGlobalFunctionPrototype(masm(), |
| 2029 Context::STRING_FUNCTION_INDEX, | 2106 Context::STRING_FUNCTION_INDEX, |
| 2030 r0, | 2107 prototype, |
| 2031 &miss); | 2108 &miss); |
| 2032 ASSERT(!object.is_identical_to(holder)); | 2109 ASSERT(!object.is_identical_to(holder)); |
| 2033 CheckPrototypes( | 2110 CheckPrototypes( |
| 2034 Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))), | 2111 Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))), |
| 2035 r0, holder, r1, r3, r4, name, &miss); | 2112 prototype, holder, x1, x3, x4, name, &miss); |
| 2036 | 2113 |
| 2037 Register receiver = r1; | 2114 Register result = x0; |
| 2038 Register index = r4; | 2115 Register receiver = x1; |
| 2039 Register result = r0; | 2116 Register index = x4; |
| 2040 __ ldr(receiver, MemOperand(sp, argc * kPointerSize)); | 2117 |
| 2118 __ Peek(receiver, argc * kPointerSize); |
| 2041 if (argc > 0) { | 2119 if (argc > 0) { |
| 2042 __ ldr(index, MemOperand(sp, (argc - 1) * kPointerSize)); | 2120 __ Peek(index, (argc - 1) * kPointerSize); |
| 2043 } else { | 2121 } else { |
| 2044 __ LoadRoot(index, Heap::kUndefinedValueRootIndex); | 2122 __ LoadRoot(index, Heap::kUndefinedValueRootIndex); |
| 2045 } | 2123 } |
| 2046 | 2124 |
| 2047 StringCharCodeAtGenerator generator(receiver, | 2125 StringCharCodeAtGenerator generator(receiver, |
| 2048 index, | 2126 index, |
| 2049 result, | 2127 result, |
| 2050 &miss, // When not a string. | 2128 &miss, // When not a string. |
| 2051 &miss, // When not a number. | 2129 &miss, // When not a number. |
| 2052 index_out_of_range_label, | 2130 index_out_of_range_label, |
| 2053 STRING_INDEX_IS_NUMBER); | 2131 STRING_INDEX_IS_NUMBER); |
| 2054 generator.GenerateFast(masm()); | 2132 generator.GenerateFast(masm()); |
| 2055 __ Drop(argc + 1); | 2133 __ Drop(argc + 1); |
| 2056 __ Ret(); | 2134 __ Ret(); |
| 2057 | 2135 |
| 2058 StubRuntimeCallHelper call_helper; | 2136 StubRuntimeCallHelper call_helper; |
| 2059 generator.GenerateSlow(masm(), call_helper); | 2137 generator.GenerateSlow(masm(), call_helper); |
| 2060 | 2138 |
| 2061 if (index_out_of_range.is_linked()) { | 2139 if (index_out_of_range.is_linked()) { |
| 2062 __ bind(&index_out_of_range); | 2140 __ Bind(&index_out_of_range); |
| 2063 __ LoadRoot(r0, Heap::kNanValueRootIndex); | 2141 __ LoadRoot(result, Heap::kNanValueRootIndex); |
| 2064 __ Drop(argc + 1); | 2142 __ Drop(argc + 1); |
| 2065 __ Ret(); | 2143 __ Ret(); |
| 2066 } | 2144 } |
| 2067 | 2145 |
| 2068 __ bind(&miss); | 2146 __ Bind(&miss); |
| 2069 // Restore function name in r2. | 2147 // Restore function name in x2. |
| 2070 __ Move(r2, name); | 2148 __ Mov(x2, Operand(name)); |
| 2071 __ bind(&name_miss); | 2149 __ Bind(&name_miss); |
| 2072 GenerateMissBranch(); | 2150 GenerateMissBranch(); |
| 2073 | 2151 |
| 2074 // Return the generated code. | 2152 // Return the generated code. |
| 2075 return GetCode(function); | 2153 return GetCode(function); |
| 2076 } | 2154 } |
| 2077 | 2155 |
| 2078 | 2156 |
| 2079 Handle<Code> CallStubCompiler::CompileStringCharAtCall( | 2157 Handle<Code> CallStubCompiler::CompileStringCharAtCall( |
| 2080 Handle<Object> object, | 2158 Handle<Object> object, |
| 2081 Handle<JSObject> holder, | 2159 Handle<JSObject> holder, |
| 2082 Handle<JSGlobalPropertyCell> cell, | 2160 Handle<JSGlobalPropertyCell> cell, |
| 2083 Handle<JSFunction> function, | 2161 Handle<JSFunction> function, |
| 2084 Handle<String> name) { | 2162 Handle<String> name) { |
| 2085 // ----------- S t a t e ------------- | 2163 // ----------- S t a t e ------------- |
| 2086 // -- r2 : function name | 2164 // -- x2 : function name |
| 2087 // -- lr : return address | 2165 // -- lr : return address |
| 2088 // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based) | 2166 // -- sp[(argc - n - 1) * 8] : arg[n] (zero-based) |
| 2089 // -- ... | 2167 // -- ... |
| 2090 // -- sp[argc * 4] : receiver | 2168 // -- sp[argc * 8] : receiver |
| 2091 // ----------------------------------- | 2169 // ----------------------------------- |
| 2092 | 2170 |
| 2093 // If object is not a string, bail out to regular call. | 2171 // If object is not a string, bail out to regular call. |
| 2094 if (!object->IsString() || !cell.is_null()) return Handle<Code>::null(); | 2172 if (!object->IsString() || !cell.is_null()) return Handle<Code>::null(); |
| 2095 | 2173 |
| 2096 const int argc = arguments().immediate(); | 2174 const int argc = arguments().immediate(); |
| 2097 Label miss; | 2175 Label miss; |
| 2098 Label name_miss; | 2176 Label name_miss; |
| 2099 Label index_out_of_range; | 2177 Label index_out_of_range; |
| 2100 Label* index_out_of_range_label = &index_out_of_range; | 2178 Label* index_out_of_range_label = &index_out_of_range; |
| 2179 |
| 2101 if (kind_ == Code::CALL_IC && | 2180 if (kind_ == Code::CALL_IC && |
| 2102 (CallICBase::StringStubState::decode(extra_state_) == | 2181 (CallICBase::StringStubState::decode(extra_state_) == |
| 2103 DEFAULT_STRING_STUB)) { | 2182 DEFAULT_STRING_STUB)) { |
| 2104 index_out_of_range_label = &miss; | 2183 index_out_of_range_label = &miss; |
| 2105 } | 2184 } |
| 2106 GenerateNameCheck(name, &name_miss); | 2185 GenerateNameCheck(name, &name_miss); |
| 2107 | 2186 |
| 2108 // Check that the maps starting from the prototype haven't changed. | 2187 // Check that the maps starting from the prototype haven't changed. |
| 2188 Register prototype = x0; |
| 2109 GenerateDirectLoadGlobalFunctionPrototype(masm(), | 2189 GenerateDirectLoadGlobalFunctionPrototype(masm(), |
| 2110 Context::STRING_FUNCTION_INDEX, | 2190 Context::STRING_FUNCTION_INDEX, |
| 2111 r0, | 2191 prototype, |
| 2112 &miss); | 2192 &miss); |
| 2113 ASSERT(!object.is_identical_to(holder)); | 2193 ASSERT(!object.is_identical_to(holder)); |
| 2114 CheckPrototypes( | 2194 CheckPrototypes( |
| 2115 Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))), | 2195 Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))), |
| 2116 r0, holder, r1, r3, r4, name, &miss); | 2196 prototype, holder, x1, x3, x4, name, &miss); |
| 2117 | 2197 |
| 2118 Register receiver = r0; | 2198 Register receiver = x0; |
| 2119 Register index = r4; | 2199 Register index = x4; |
| 2120 Register scratch = r3; | 2200 Register scratch = x3; |
| 2121 Register result = r0; | 2201 Register result = x0; |
| 2122 __ ldr(receiver, MemOperand(sp, argc * kPointerSize)); | 2202 |
| 2203 __ Peek(receiver, argc * kPointerSize); |
| 2123 if (argc > 0) { | 2204 if (argc > 0) { |
| 2124 __ ldr(index, MemOperand(sp, (argc - 1) * kPointerSize)); | 2205 __ Peek(index, (argc - 1) * kPointerSize); |
| 2125 } else { | 2206 } else { |
| 2126 __ LoadRoot(index, Heap::kUndefinedValueRootIndex); | 2207 __ LoadRoot(index, Heap::kUndefinedValueRootIndex); |
| 2127 } | 2208 } |
| 2128 | 2209 |
| 2129 StringCharAtGenerator generator(receiver, | 2210 StringCharAtGenerator generator(receiver, |
| 2130 index, | 2211 index, |
| 2131 scratch, | 2212 scratch, |
| 2132 result, | 2213 result, |
| 2133 &miss, // When not a string. | 2214 &miss, // When not a string. |
| 2134 &miss, // When not a number. | 2215 &miss, // When not a number. |
| 2135 index_out_of_range_label, | 2216 index_out_of_range_label, |
| 2136 STRING_INDEX_IS_NUMBER); | 2217 STRING_INDEX_IS_NUMBER); |
| 2137 generator.GenerateFast(masm()); | 2218 generator.GenerateFast(masm()); |
| 2138 __ Drop(argc + 1); | 2219 __ Drop(argc + 1); |
| 2139 __ Ret(); | 2220 __ Ret(); |
| 2140 | 2221 |
| 2141 StubRuntimeCallHelper call_helper; | 2222 StubRuntimeCallHelper call_helper; |
| 2142 generator.GenerateSlow(masm(), call_helper); | 2223 generator.GenerateSlow(masm(), call_helper); |
| 2143 | 2224 |
| 2144 if (index_out_of_range.is_linked()) { | 2225 if (index_out_of_range.is_linked()) { |
| 2145 __ bind(&index_out_of_range); | 2226 __ Bind(&index_out_of_range); |
| 2146 __ LoadRoot(r0, Heap::kempty_stringRootIndex); | 2227 __ LoadRoot(result, Heap::kempty_stringRootIndex); |
| 2147 __ Drop(argc + 1); | 2228 __ Drop(argc + 1); |
| 2148 __ Ret(); | 2229 __ Ret(); |
| 2149 } | 2230 } |
| 2150 | 2231 |
| 2151 __ bind(&miss); | 2232 __ Bind(&miss); |
| 2152 // Restore function name in r2. | 2233 // Restore function name in x2. |
| 2153 __ Move(r2, name); | 2234 __ Mov(x2, Operand(name)); |
| 2154 __ bind(&name_miss); | 2235 __ Bind(&name_miss); |
| 2155 GenerateMissBranch(); | 2236 GenerateMissBranch(); |
| 2156 | 2237 |
| 2157 // Return the generated code. | 2238 // Return the generated code. |
| 2158 return GetCode(function); | 2239 return GetCode(function); |
| 2159 } | 2240 } |
| 2160 | 2241 |
| 2161 | 2242 |
| 2162 Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall( | 2243 Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall( |
| 2163 Handle<Object> object, | 2244 Handle<Object> object, |
| 2164 Handle<JSObject> holder, | 2245 Handle<JSObject> holder, |
| 2165 Handle<JSGlobalPropertyCell> cell, | 2246 Handle<JSGlobalPropertyCell> cell, |
| 2166 Handle<JSFunction> function, | 2247 Handle<JSFunction> function, |
| 2167 Handle<String> name) { | 2248 Handle<String> name) { |
| 2168 // ----------- S t a t e ------------- | 2249 // ----------- S t a t e ------------- |
| 2169 // -- r2 : function name | 2250 // -- x2 : function name |
| 2170 // -- lr : return address | 2251 // -- lr : return address |
| 2171 // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based) | 2252 // -- sp[(argc - n - 1) * 8] : arg[n] (zero-based) |
| 2172 // -- ... | 2253 // -- ... |
| 2173 // -- sp[argc * 4] : receiver | 2254 // -- sp[argc * 8] : receiver |
| 2174 // ----------------------------------- | 2255 // ----------------------------------- |
| 2175 | |
| 2176 const int argc = arguments().immediate(); | 2256 const int argc = arguments().immediate(); |
| 2177 | 2257 |
| 2178 // If the object is not a JSObject or we got an unexpected number of | 2258 // If the object is not a JSObject or we got an unexpected number of |
| 2179 // arguments, bail out to the regular call. | 2259 // arguments, bail out to the regular call. |
| 2180 if (!object->IsJSObject() || argc != 1) return Handle<Code>::null(); | 2260 if (!object->IsJSObject() || argc != 1) return Handle<Code>::null(); |
| 2181 | 2261 |
| 2182 Label miss; | 2262 Label miss; |
| 2183 GenerateNameCheck(name, &miss); | 2263 GenerateNameCheck(name, &miss); |
| 2184 | 2264 |
| 2185 if (cell.is_null()) { | 2265 if (cell.is_null()) { |
| 2186 __ ldr(r1, MemOperand(sp, 1 * kPointerSize)); | 2266 Register receiver = x1; |
| 2267 __ Peek(receiver, kPointerSize); |
| 2268 __ JumpIfSmi(receiver, &miss); |
| 2187 | 2269 |
| 2188 __ JumpIfSmi(r1, &miss); | 2270 CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, |
| 2189 | 2271 x0, x3, x4, name, &miss); |
| 2190 CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4, | |
| 2191 name, &miss); | |
| 2192 } else { | 2272 } else { |
| 2193 ASSERT(cell->value() == *function); | 2273 ASSERT(cell->value() == *function); |
| 2194 GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name, | 2274 GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name, |
| 2195 &miss); | 2275 &miss); |
| 2196 GenerateLoadFunctionFromCell(cell, function, &miss); | 2276 GenerateLoadFunctionFromCell(cell, function, &miss); |
| 2197 } | 2277 } |
| 2198 | 2278 |
| 2199 // Load the char code argument. | 2279 // Load the char code argument. |
| 2200 Register code = r1; | 2280 Register code = x1; |
| 2201 __ ldr(code, MemOperand(sp, 0 * kPointerSize)); | 2281 __ Peek(code, 0); |
| 2202 | 2282 |
| 2203 // Check the code is a smi. | 2283 // Check the code is a smi. |
| 2204 Label slow; | 2284 Label slow; |
| 2205 __ JumpIfNotSmi(code, &slow); | 2285 __ JumpIfNotSmi(code, &slow); |
| 2206 | 2286 |
| 2207 // Convert the smi code to uint16. | 2287 // Make sure the smi code is a uint16. |
| 2208 __ and_(code, code, Operand(Smi::FromInt(0xffff))); | 2288 __ And(code, code, Operand(Smi::FromInt(0xffff))); |
| 2209 | 2289 |
| 2210 StringCharFromCodeGenerator generator(code, r0); | 2290 Register result = x0; |
| 2291 StringCharFromCodeGenerator generator(code, result); |
| 2211 generator.GenerateFast(masm()); | 2292 generator.GenerateFast(masm()); |
| 2212 __ Drop(argc + 1); | 2293 __ Drop(argc + 1); |
| 2213 __ Ret(); | 2294 __ Ret(); |
| 2214 | 2295 |
| 2215 StubRuntimeCallHelper call_helper; | 2296 StubRuntimeCallHelper call_helper; |
| 2216 generator.GenerateSlow(masm(), call_helper); | 2297 generator.GenerateSlow(masm(), call_helper); |
| 2217 | 2298 |
| 2218 // Tail call the full function. We do not have to patch the receiver | 2299 // Tail call the full function. We do not have to patch the receiver |
| 2219 // because the function makes no use of it. | 2300 // because the function makes no use of it. |
| 2220 __ bind(&slow); | 2301 __ Bind(&slow); |
| 2221 ParameterCount expected(function); | 2302 ParameterCount expected(function); |
| 2222 __ InvokeFunction(function, expected, arguments(), | 2303 __ InvokeFunction(function, expected, arguments(), |
| 2223 JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD); | 2304 JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD); |
| 2224 | 2305 |
| 2225 __ bind(&miss); | 2306 __ Bind(&miss); |
| 2226 // r2: function name. | |
| 2227 GenerateMissBranch(); | 2307 GenerateMissBranch(); |
| 2228 | 2308 |
| 2229 // Return the generated code. | 2309 // Return the generated code. |
| 2230 return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name); | 2310 return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name); |
| 2231 } | 2311 } |
| 2232 | 2312 |
| 2233 | 2313 |
| 2234 Handle<Code> CallStubCompiler::CompileMathFloorCall( | 2314 Handle<Code> CallStubCompiler::CompileMathFloorCall( |
| 2235 Handle<Object> object, | 2315 Handle<Object> object, |
| 2236 Handle<JSObject> holder, | 2316 Handle<JSObject> holder, |
| 2237 Handle<JSGlobalPropertyCell> cell, | 2317 Handle<JSGlobalPropertyCell> cell, |
| 2238 Handle<JSFunction> function, | 2318 Handle<JSFunction> function, |
| 2239 Handle<String> name) { | 2319 Handle<String> name) { |
| 2240 // ----------- S t a t e ------------- | 2320 // ----------- S t a t e ------------- |
| 2241 // -- r2 : function name | 2321 // -- x2 : function name (must be preserved on miss) |
| 2242 // -- lr : return address | 2322 // -- lr : return address |
| 2243 // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based) | 2323 // -- sp[(argc - n - 1) * 8] : arg[n] (zero-based) |
| 2244 // -- ... | 2324 // -- ... |
| 2245 // -- sp[argc * 4] : receiver | 2325 // -- sp[argc * 8] : receiver |
| 2246 // ----------------------------------- | 2326 // ----------------------------------- |
| 2327 Label miss; |
| 2328 Label return_result; |
| 2329 Register result = x0; |
| 2330 const int argc = arguments().immediate(); |
| 2247 | 2331 |
| 2248 const int argc = arguments().immediate(); | |
| 2249 // If the object is not a JSObject or we got an unexpected number of | 2332 // If the object is not a JSObject or we got an unexpected number of |
| 2250 // arguments, bail out to the regular call. | 2333 // arguments, bail out to the regular call. |
| 2251 if (!object->IsJSObject() || argc != 1) return Handle<Code>::null(); | 2334 if (!object->IsJSObject() || argc != 1) return Handle<Code>::null(); |
| 2252 | 2335 |
| 2253 Label miss, slow; | |
| 2254 GenerateNameCheck(name, &miss); | 2336 GenerateNameCheck(name, &miss); |
| 2255 | 2337 |
| 2256 if (cell.is_null()) { | 2338 if (cell.is_null()) { |
| 2257 __ ldr(r1, MemOperand(sp, 1 * kPointerSize)); | 2339 Register receiver = x1; |
| 2258 __ JumpIfSmi(r1, &miss); | 2340 __ Peek(receiver, kPointerSize); |
| 2259 CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4, | 2341 __ JumpIfSmi(receiver, &miss); |
| 2260 name, &miss); | 2342 CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, |
| 2343 x0, x3, x4, name, &miss); |
| 2261 } else { | 2344 } else { |
| 2262 ASSERT(cell->value() == *function); | 2345 ASSERT(cell->value() == *function); |
| 2263 GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name, | 2346 GenerateGlobalReceiverCheck( |
| 2264 &miss); | 2347 Handle<JSObject>::cast(object), holder, name, &miss); |
| 2265 GenerateLoadFunctionFromCell(cell, function, &miss); | 2348 GenerateLoadFunctionFromCell(cell, function, &miss); |
| 2266 } | 2349 } |
| 2267 | 2350 |
| 2268 // Load the (only) argument into r0. | 2351 // Load the (only) argument. |
| 2269 __ ldr(r0, MemOperand(sp, 0 * kPointerSize)); | 2352 Register arg = x0; |
| 2353 __ Peek(arg, 0); |
| 2270 | 2354 |
| 2271 // If the argument is a smi, just return. | 2355 // If the argument is a smi, just return. |
| 2272 __ SmiTst(r0); | 2356 __ JumpIfSmi(arg, &return_result); |
| 2273 __ Drop(argc + 1, eq); | |
| 2274 __ Ret(eq); | |
| 2275 | 2357 |
| 2276 __ CheckMap(r0, r1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK); | 2358 // Load the HeapNumber. |
| 2359 Label slow; |
| 2360 __ CheckMap(arg, x1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK); |
| 2277 | 2361 |
| 2278 Label smi_check, just_return; | 2362 FPRegister double_value = d0; |
| 2363 __ Ldr(double_value, FieldMemOperand(arg, HeapNumber::kValueOffset)); |
| 2279 | 2364 |
| 2280 // Load the HeapNumber value. | 2365 // Try to do the conversion and check for overflow. |
| 2281 // We will need access to the value in the core registers, so we load it | 2366 Label zero_or_overflow; |
| 2282 // with ldrd and move it to the fpu. It also spares a sub instruction for | 2367 Register int_value = x3; |
| 2283 // updating the HeapNumber value address, as vldr expects a multiple | 2368 __ Fcvtms(int_value, double_value); |
| 2284 // of 4 offset. | 2369 __ Cmp(int_value, Smi::kMaxValue); |
| 2285 __ Ldrd(r4, r5, FieldMemOperand(r0, HeapNumber::kValueOffset)); | 2370 __ Ccmp(int_value, Smi::kMinValue, NFlag, le); |
| 2286 __ vmov(d1, r4, r5); | 2371 // If the second comparison is skipped, we will have N=1 and V=0, this will |
| 2372 // force the following "lt" condition to be true. |
| 2373 __ B(lt, &zero_or_overflow); |
| 2287 | 2374 |
| 2288 // Check for NaN, Infinities and -0. | 2375 Label smi_result; |
| 2289 // They are invariant through a Math.Floor call, so just | 2376 __ Cbnz(int_value, &smi_result); |
| 2290 // return the original argument. | |
| 2291 __ Sbfx(r3, r5, HeapNumber::kExponentShift, HeapNumber::kExponentBits); | |
| 2292 __ cmp(r3, Operand(-1)); | |
| 2293 __ b(eq, &just_return); | |
| 2294 __ eor(r3, r5, Operand(0x80000000u)); | |
| 2295 __ orr(r3, r3, r4, SetCC); | |
| 2296 __ b(eq, &just_return); | |
| 2297 // Test for values that can be exactly represented as a | |
| 2298 // signed 32-bit integer. | |
| 2299 __ TryDoubleToInt32Exact(r0, d1, d2); | |
| 2300 // If exact, check smi | |
| 2301 __ b(eq, &smi_check); | |
| 2302 __ cmp(r5, Operand(0)); | |
| 2303 | 2377 |
| 2304 // If input is in ]+0, +inf[, the cmp has cleared overflow and negative | 2378 __ Bind(&zero_or_overflow); |
| 2305 // (V=0 and N=0), the two following instructions won't execute and | 2379 Register value = x1; |
| 2306 // we fall through smi_check to check if the result can fit into a smi. | 2380 __ Fmov(value, double_value); |
| 2307 | 2381 |
| 2308 // If input is in ]-inf, -0[, sub one and, go to slow if we have | 2382 // Extract the exponent. |
| 2309 // an overflow. Else we fall through smi check. | 2383 // TODO(all): The constants in the HeapNumber class assume that the double |
| 2310 // Hint: if x is a negative, non integer number, | 2384 // is stored in two 32-bit registers. They should assume offset within a |
| 2311 // floor(x) <=> round_to_zero(x) - 1. | 2385 // 64-bit register on 64-bit systems. However if we want to change that we |
| 2312 __ sub(r0, r0, Operand(1), SetCC, mi); | 2386 // have to make some changes in x64 back-end. |
| 2313 __ b(vs, &slow); | 2387 static const int exponent_shift = |
| 2388 CountTrailingZeros(Double::kExponentMask, 64); |
| 2389 static const int exponent_width = CountSetBits(Double::kExponentMask, 64); |
| 2390 Register exponent = x3; |
| 2391 __ Ubfx(exponent, value, exponent_shift, exponent_width); |
| 2314 | 2392 |
| 2315 __ bind(&smi_check); | 2393 // Check for NaN, Infinity, and -Infinity. They are invariant through |
| 2316 // Check if the result can fit into an smi. If we had an overflow, | 2394 // a Math.Floor call, so just return the original argument. |
| 2317 // the result is either 0x80000000 or 0x7FFFFFFF and won't fit into an smi. | 2395 __ Cmp(exponent, Double::kExponentMask >> exponent_shift); |
| 2318 // If result doesn't fit into an smi, branch to slow. | 2396 __ B(&return_result, eq); |
| 2319 __ SmiTag(r0, SetCC); | |
| 2320 __ b(vs, &slow); | |
| 2321 | 2397 |
| 2322 __ bind(&just_return); | 2398 // If the exponent is null, the number was 0 or -0. Otherwise the result |
| 2399 // can't fit in a smi and we go to the slow path. |
| 2400 __ Cbnz(exponent, &slow); |
| 2401 |
| 2402 // Check for -0. |
| 2403 // If our HeapNumber is negative it was -0, so we just return it. |
| 2404 __ TestAndBranchIfAnySet(value, Double::kSignMask, &return_result); |
| 2405 |
| 2406 __ Bind(&smi_result); |
| 2407 // Tag and return the result. |
| 2408 __ SmiTag(result, int_value); |
| 2409 |
| 2410 __ Bind(&return_result); |
| 2323 __ Drop(argc + 1); | 2411 __ Drop(argc + 1); |
| 2324 __ Ret(); | 2412 __ Ret(); |
| 2325 | 2413 |
| 2326 __ bind(&slow); | 2414 __ Bind(&slow); |
| 2327 // Tail call the full function. We do not have to patch the receiver | 2415 // Tail call the full function. We do not have to patch the receiver |
| 2328 // because the function makes no use of it. | 2416 // because the function makes no use of it. |
| 2329 ParameterCount expected(function); | 2417 ParameterCount expected(function); |
| 2330 __ InvokeFunction(function, expected, arguments(), | 2418 __ InvokeFunction(function, expected, arguments(), |
| 2331 JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD); | 2419 JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD); |
| 2332 | 2420 |
| 2333 __ bind(&miss); | 2421 __ Bind(&miss); |
| 2334 // r2: function name. | |
| 2335 GenerateMissBranch(); | 2422 GenerateMissBranch(); |
| 2336 | 2423 |
| 2337 // Return the generated code. | 2424 // Return the generated code. |
| 2338 return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name); | 2425 return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name); |
| 2339 } | 2426 } |
| 2340 | 2427 |
| 2341 | 2428 |
| 2342 Handle<Code> CallStubCompiler::CompileMathAbsCall( | 2429 Handle<Code> CallStubCompiler::CompileMathAbsCall( |
| 2343 Handle<Object> object, | 2430 Handle<Object> object, |
| 2344 Handle<JSObject> holder, | 2431 Handle<JSObject> holder, |
| 2345 Handle<JSGlobalPropertyCell> cell, | 2432 Handle<JSGlobalPropertyCell> cell, |
| 2346 Handle<JSFunction> function, | 2433 Handle<JSFunction> function, |
| 2347 Handle<String> name) { | 2434 Handle<String> name) { |
| 2348 // ----------- S t a t e ------------- | 2435 // ----------- S t a t e ------------- |
| 2349 // -- r2 : function name | 2436 // -- x2 : function name |
| 2350 // -- lr : return address | 2437 // -- lr : return address |
| 2351 // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based) | 2438 // -- sp[(argc - n - 1) * 8] : arg[n] (zero-based) |
| 2352 // -- ... | 2439 // -- ... |
| 2353 // -- sp[argc * 4] : receiver | 2440 // -- sp[argc * 8] : receiver |
| 2354 // ----------------------------------- | 2441 // ----------------------------------- |
| 2355 | 2442 |
| 2356 const int argc = arguments().immediate(); | 2443 const int argc = arguments().immediate(); |
| 2444 |
| 2357 // If the object is not a JSObject or we got an unexpected number of | 2445 // If the object is not a JSObject or we got an unexpected number of |
| 2358 // arguments, bail out to the regular call. | 2446 // arguments, bail out to the regular call. |
| 2359 if (!object->IsJSObject() || argc != 1) return Handle<Code>::null(); | 2447 if (!object->IsJSObject() || argc != 1) return Handle<Code>::null(); |
| 2360 | 2448 |
| 2361 Label miss; | 2449 Register result = x0; |
| 2450 Label miss, slow; |
| 2362 GenerateNameCheck(name, &miss); | 2451 GenerateNameCheck(name, &miss); |
| 2452 |
| 2363 if (cell.is_null()) { | 2453 if (cell.is_null()) { |
| 2364 __ ldr(r1, MemOperand(sp, 1 * kPointerSize)); | 2454 Register receiver = x1; |
| 2365 __ JumpIfSmi(r1, &miss); | 2455 __ Peek(receiver, kPointerSize); |
| 2366 CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4, | 2456 __ JumpIfSmi(receiver, &miss); |
| 2367 name, &miss); | 2457 CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, |
| 2458 x0, x3, x4, name, &miss); |
| 2368 } else { | 2459 } else { |
| 2369 ASSERT(cell->value() == *function); | 2460 ASSERT(cell->value() == *function); |
| 2370 GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name, | 2461 GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name, |
| 2371 &miss); | 2462 &miss); |
| 2372 GenerateLoadFunctionFromCell(cell, function, &miss); | 2463 GenerateLoadFunctionFromCell(cell, function, &miss); |
| 2373 } | 2464 } |
| 2374 | 2465 |
| 2375 // Load the (only) argument into r0. | 2466 // Load the (only) argument. |
| 2376 __ ldr(r0, MemOperand(sp, 0 * kPointerSize)); | 2467 Register arg = x0; |
| 2468 __ Peek(arg, 0); |
| 2377 | 2469 |
| 2378 // Check if the argument is a smi. | 2470 // Check if the argument is a smi. |
| 2379 Label not_smi; | 2471 Label not_smi; |
| 2380 __ JumpIfNotSmi(r0, ¬_smi); | 2472 __ JumpIfNotSmi(arg, ¬_smi); |
| 2381 | 2473 |
| 2382 // Do bitwise not or do nothing depending on the sign of the | 2474 __ SmiAbs(arg, x1, &slow); |
| 2383 // argument. | |
| 2384 __ eor(r1, r0, Operand(r0, ASR, kBitsPerInt - 1)); | |
| 2385 | |
| 2386 // Add 1 or do nothing depending on the sign of the argument. | |
| 2387 __ sub(r0, r1, Operand(r0, ASR, kBitsPerInt - 1), SetCC); | |
| 2388 | |
| 2389 // If the result is still negative, go to the slow case. | |
| 2390 // This only happens for the most negative smi. | |
| 2391 Label slow; | |
| 2392 __ b(mi, &slow); | |
| 2393 | |
| 2394 // Smi case done. | 2475 // Smi case done. |
| 2395 __ Drop(argc + 1); | 2476 __ Drop(argc + 1); |
| 2396 __ Ret(); | 2477 __ Ret(); |
| 2397 | 2478 |
| 2398 // Check if the argument is a heap number and load its exponent and | 2479 // Check if the argument is a heap number and load its value. |
| 2399 // sign. | 2480 __ Bind(¬_smi); |
| 2400 __ bind(¬_smi); | 2481 __ CheckMap( |
| 2401 __ CheckMap(r0, r1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK); | 2482 arg, x1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK); |
| 2402 __ ldr(r1, FieldMemOperand(r0, HeapNumber::kExponentOffset)); | 2483 Register value = x1; |
| 2484 __ Ldr(value, FieldMemOperand(arg, HeapNumber::kValueOffset)); |
| 2403 | 2485 |
| 2404 // Check the sign of the argument. If the argument is positive, | 2486 // Check the sign of the argument. If the argument is positive, return it. |
| 2405 // just return it. | |
| 2406 Label negative_sign; | 2487 Label negative_sign; |
| 2407 __ tst(r1, Operand(HeapNumber::kSignMask)); | 2488 __ TestAndBranchIfAnySet(value, Double::kSignMask, &negative_sign); |
| 2408 __ b(ne, &negative_sign); | |
| 2409 __ Drop(argc + 1); | 2489 __ Drop(argc + 1); |
| 2410 __ Ret(); | 2490 __ Ret(); |
| 2411 | 2491 |
| 2412 // If the argument is negative, clear the sign, and return a new | 2492 __ Bind(&negative_sign); |
| 2413 // number. | 2493 FPRegister double_value = d0; |
| 2414 __ bind(&negative_sign); | 2494 __ Fmov(double_value, value); |
| 2415 __ eor(r1, r1, Operand(HeapNumber::kSignMask)); | 2495 __ Fabs(double_value, double_value); |
| 2416 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); | 2496 __ AllocateHeapNumberWithValue(result, double_value, &slow, x1, x3); |
| 2417 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); | |
| 2418 __ AllocateHeapNumber(r0, r4, r5, r6, &slow); | |
| 2419 __ str(r1, FieldMemOperand(r0, HeapNumber::kExponentOffset)); | |
| 2420 __ str(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); | |
| 2421 __ Drop(argc + 1); | 2497 __ Drop(argc + 1); |
| 2422 __ Ret(); | 2498 __ Ret(); |
| 2423 | 2499 |
| 2424 // Tail call the full function. We do not have to patch the receiver | 2500 // Tail call the full function. We do not have to patch the receiver |
| 2425 // because the function makes no use of it. | 2501 // because the function makes no use of it. |
| 2426 __ bind(&slow); | 2502 __ Bind(&slow); |
| 2427 ParameterCount expected(function); | 2503 ParameterCount expected(function); |
| 2428 __ InvokeFunction(function, expected, arguments(), | 2504 __ InvokeFunction(function, expected, arguments(), |
| 2429 JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD); | 2505 JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD); |
| 2430 | 2506 |
| 2431 __ bind(&miss); | 2507 __ Bind(&miss); |
| 2432 // r2: function name. | |
| 2433 GenerateMissBranch(); | 2508 GenerateMissBranch(); |
| 2434 | 2509 |
| 2435 // Return the generated code. | 2510 // Return the generated code. |
| 2436 return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name); | 2511 return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name); |
| 2437 } | 2512 } |
| 2438 | 2513 |
| 2439 | 2514 |
| 2440 Handle<Code> CallStubCompiler::CompileFastApiCall( | 2515 Handle<Code> CallStubCompiler::CompileFastApiCall( |
| 2441 const CallOptimization& optimization, | 2516 const CallOptimization& optimization, |
| 2442 Handle<Object> object, | 2517 Handle<Object> object, |
| 2443 Handle<JSObject> holder, | 2518 Handle<JSObject> holder, |
| 2444 Handle<JSGlobalPropertyCell> cell, | 2519 Handle<JSGlobalPropertyCell> cell, |
| 2445 Handle<JSFunction> function, | 2520 Handle<JSFunction> function, |
| 2446 Handle<String> name) { | 2521 Handle<String> name) { |
| 2447 Counters* counters = isolate()->counters(); | 2522 Counters* counters = isolate()->counters(); |
| 2448 | 2523 |
| 2449 ASSERT(optimization.is_simple_api_call()); | 2524 ASSERT(optimization.is_simple_api_call()); |
| 2450 // Bail out if object is a global object as we don't want to | 2525 // Bail out if object is a global object as we don't want to |
| 2451 // repatch it to global receiver. | 2526 // repatch it to global receiver. |
| 2452 if (object->IsGlobalObject()) return Handle<Code>::null(); | 2527 if (object->IsGlobalObject()) return Handle<Code>::null(); |
| 2453 if (!cell.is_null()) return Handle<Code>::null(); | 2528 if (!cell.is_null()) return Handle<Code>::null(); |
| 2454 if (!object->IsJSObject()) return Handle<Code>::null(); | 2529 if (!object->IsJSObject()) return Handle<Code>::null(); |
| 2455 int depth = optimization.GetPrototypeDepthOfExpectedType( | 2530 int depth = optimization.GetPrototypeDepthOfExpectedType( |
| 2456 Handle<JSObject>::cast(object), holder); | 2531 Handle<JSObject>::cast(object), holder); |
| 2457 if (depth == kInvalidProtoDepth) return Handle<Code>::null(); | 2532 if (depth == kInvalidProtoDepth) return Handle<Code>::null(); |
| 2458 | 2533 |
| 2459 Label miss, miss_before_stack_reserved; | 2534 Label miss, miss_before_stack_reserved; |
| 2460 GenerateNameCheck(name, &miss_before_stack_reserved); | 2535 GenerateNameCheck(name, &miss_before_stack_reserved); |
| 2461 | 2536 |
| 2537 const int argc = arguments().immediate(); |
| 2538 |
| 2462 // Get the receiver from the stack. | 2539 // Get the receiver from the stack. |
| 2463 const int argc = arguments().immediate(); | 2540 Register receiver = x1; |
| 2464 __ ldr(r1, MemOperand(sp, argc * kPointerSize)); | 2541 __ Peek(receiver, argc * kPointerSize); |
| 2465 | 2542 |
| 2466 // Check that the receiver isn't a smi. | 2543 // Check that the receiver isn't a smi. |
| 2467 __ JumpIfSmi(r1, &miss_before_stack_reserved); | 2544 __ JumpIfSmi(receiver, &miss_before_stack_reserved); |
| 2468 | 2545 |
| 2469 __ IncrementCounter(counters->call_const(), 1, r0, r3); | 2546 __ IncrementCounter(counters->call_const(), 1, x0, x3); |
| 2470 __ IncrementCounter(counters->call_const_fast_api(), 1, r0, r3); | 2547 __ IncrementCounter(counters->call_const_fast_api(), 1, x0, x3); |
| 2471 | 2548 |
| 2472 ReserveSpaceForFastApiCall(masm(), r0); | 2549 ReserveSpaceForFastApiCall(masm(), x0); |
| 2473 | 2550 |
| 2474 // Check that the maps haven't changed and find a Holder as a side effect. | 2551 // Check that the maps haven't changed and find a Holder as a side effect. |
| 2475 CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4, name, | 2552 CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, x0, x3, x4, |
| 2476 depth, &miss); | 2553 name, depth, &miss); |
| 2477 | 2554 |
| 2478 GenerateFastApiDirectCall(masm(), optimization, argc); | 2555 GenerateFastApiDirectCall(masm(), optimization, argc); |
| 2479 | 2556 |
| 2480 __ bind(&miss); | 2557 __ Bind(&miss); |
| 2481 FreeSpaceForFastApiCall(masm()); | 2558 FreeSpaceForFastApiCall(masm()); |
| 2482 | 2559 |
| 2483 __ bind(&miss_before_stack_reserved); | 2560 __ Bind(&miss_before_stack_reserved); |
| 2484 GenerateMissBranch(); | 2561 GenerateMissBranch(); |
| 2485 | 2562 |
| 2486 // Return the generated code. | 2563 // Return the generated code. |
| 2487 return GetCode(function); | 2564 return GetCode(function); |
| 2488 } | 2565 } |
| 2489 | 2566 |
| 2490 | 2567 |
| 2491 void CallStubCompiler::CompileHandlerFrontend(Handle<Object> object, | 2568 void CallStubCompiler::CompileHandlerFrontend(Handle<Object> object, |
| 2492 Handle<JSObject> holder, | 2569 Handle<JSObject> holder, |
| 2493 Handle<Name> name, | 2570 Handle<Name> name, |
| 2494 CheckType check, | 2571 CheckType check, |
| 2495 Label* success) { | 2572 Label* success) { |
| 2496 // ----------- S t a t e ------------- | 2573 // ----------- S t a t e ------------- |
| 2497 // -- r2 : name | 2574 // -- x2 : name |
| 2498 // -- lr : return address | 2575 // -- lr : return address |
| 2499 // ----------------------------------- | 2576 // ----------------------------------- |
| 2500 Label miss; | 2577 Label miss; |
| 2501 GenerateNameCheck(name, &miss); | 2578 GenerateNameCheck(name, &miss); |
| 2502 | 2579 |
| 2503 // Get the receiver from the stack | 2580 // Get the receiver from the stack. |
| 2504 const int argc = arguments().immediate(); | 2581 const int argc = arguments().immediate(); |
| 2505 __ ldr(r1, MemOperand(sp, argc * kPointerSize)); | 2582 Register receiver = x1; |
| 2583 __ Peek(receiver, argc * kPointerSize); |
| 2506 | 2584 |
| 2507 // Check that the receiver isn't a smi. | 2585 // Check that the receiver isn't a smi. |
| 2508 if (check != NUMBER_CHECK) { | 2586 if (check != NUMBER_CHECK) { |
| 2509 __ JumpIfSmi(r1, &miss); | 2587 __ JumpIfSmi(receiver, &miss); |
| 2510 } | 2588 } |
| 2511 | 2589 |
| 2512 // Make sure that it's okay not to patch the on stack receiver | 2590 // Make sure that it's okay not to patch the on stack receiver |
| 2513 // unless we're doing a receiver map check. | 2591 // unless we're doing a receiver map check. |
| 2514 ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK); | 2592 ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK); |
| 2593 |
| 2515 switch (check) { | 2594 switch (check) { |
| 2516 case RECEIVER_MAP_CHECK: | 2595 case RECEIVER_MAP_CHECK: { |
| 2517 __ IncrementCounter(isolate()->counters()->call_const(), 1, r0, r3); | 2596 __ IncrementCounter(isolate()->counters()->call_const(), 1, x0, x3); |
| 2518 | 2597 |
| 2519 // Check that the maps haven't changed. | 2598 // Check that the maps haven't changed. |
| 2520 CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4, | 2599 CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, |
| 2521 name, &miss); | 2600 x0, x3, x4, name, &miss); |
| 2522 | 2601 |
| 2523 // Patch the receiver on the stack with the global proxy if | 2602 // Patch the receiver on the stack with the global proxy if necessary. |
| 2524 // necessary. | |
| 2525 if (object->IsGlobalObject()) { | 2603 if (object->IsGlobalObject()) { |
| 2526 __ ldr(r3, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset)); | 2604 __ Ldr(x3, |
| 2527 __ str(r3, MemOperand(sp, argc * kPointerSize)); | 2605 FieldMemOperand(receiver, GlobalObject::kGlobalReceiverOffset)); |
| 2606 __ Poke(x3, argc * kPointerSize); |
| 2528 } | 2607 } |
| 2529 break; | 2608 break; |
| 2530 | 2609 } |
| 2531 case STRING_CHECK: | 2610 case STRING_CHECK: { |
| 2532 // Check that the object is a string. | 2611 // Check that the object is a string. |
| 2533 __ CompareObjectType(r1, r3, r3, FIRST_NONSTRING_TYPE); | 2612 __ JumpIfObjectType(receiver, x3, x3, FIRST_NONSTRING_TYPE, &miss, ge); |
| 2534 __ b(ge, &miss); | |
| 2535 // Check that the maps starting from the prototype haven't changed. | 2613 // Check that the maps starting from the prototype haven't changed. |
| 2614 Register prototype = x0; |
| 2536 GenerateDirectLoadGlobalFunctionPrototype( | 2615 GenerateDirectLoadGlobalFunctionPrototype( |
| 2537 masm(), Context::STRING_FUNCTION_INDEX, r0, &miss); | 2616 masm(), Context::STRING_FUNCTION_INDEX, prototype, &miss); |
| 2538 CheckPrototypes( | 2617 CheckPrototypes( |
| 2539 Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))), | 2618 Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))), |
| 2540 r0, holder, r3, r1, r4, name, &miss); | 2619 prototype, holder, x3, x1, x4, name, &miss); |
| 2541 break; | 2620 break; |
| 2542 | 2621 } |
| 2543 case SYMBOL_CHECK: | 2622 case SYMBOL_CHECK: { |
| 2544 // Check that the object is a symbol. | 2623 // Check that the object is a symbol. |
| 2545 __ CompareObjectType(r1, r1, r3, SYMBOL_TYPE); | 2624 __ JumpIfNotObjectType(receiver, x3, x3, SYMBOL_TYPE, &miss); |
| 2546 __ b(ne, &miss); | |
| 2547 // Check that the maps starting from the prototype haven't changed. | 2625 // Check that the maps starting from the prototype haven't changed. |
| 2626 Register prototype = x0; |
| 2548 GenerateDirectLoadGlobalFunctionPrototype( | 2627 GenerateDirectLoadGlobalFunctionPrototype( |
| 2549 masm(), Context::SYMBOL_FUNCTION_INDEX, r0, &miss); | 2628 masm(), Context::SYMBOL_FUNCTION_INDEX, prototype, &miss); |
| 2550 CheckPrototypes( | 2629 CheckPrototypes( |
| 2551 Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))), | 2630 Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))), |
| 2552 r0, holder, r3, r1, r4, name, &miss); | 2631 prototype, holder, x3, x1, x4, name, &miss); |
| 2553 break; | 2632 break; |
| 2554 | 2633 } |
| 2555 case NUMBER_CHECK: { | 2634 case NUMBER_CHECK: { |
| 2556 Label fast; | 2635 Label fast; |
| 2557 // Check that the object is a smi or a heap number. | 2636 // Check that the object is a smi or a heap number. |
| 2558 __ JumpIfSmi(r1, &fast); | 2637 __ JumpIfSmi(receiver, &fast); |
| 2559 __ CompareObjectType(r1, r0, r0, HEAP_NUMBER_TYPE); | 2638 __ JumpIfNotObjectType(receiver, x0, x0, HEAP_NUMBER_TYPE, &miss); |
| 2560 __ b(ne, &miss); | 2639 |
| 2561 __ bind(&fast); | 2640 __ Bind(&fast); |
| 2562 // Check that the maps starting from the prototype haven't changed. | 2641 // Check that the maps starting from the prototype haven't changed. |
| 2642 Register prototype = x0; |
| 2563 GenerateDirectLoadGlobalFunctionPrototype( | 2643 GenerateDirectLoadGlobalFunctionPrototype( |
| 2564 masm(), Context::NUMBER_FUNCTION_INDEX, r0, &miss); | 2644 masm(), Context::NUMBER_FUNCTION_INDEX, prototype, &miss); |
| 2565 CheckPrototypes( | 2645 CheckPrototypes( |
| 2566 Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))), | 2646 Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))), |
| 2567 r0, holder, r3, r1, r4, name, &miss); | 2647 prototype, holder, x3, x1, x4, name, &miss); |
| 2568 break; | 2648 break; |
| 2569 } | 2649 } |
| 2570 case BOOLEAN_CHECK: { | 2650 case BOOLEAN_CHECK: { |
| 2571 Label fast; | 2651 Label fast; |
| 2572 // Check that the object is a boolean. | 2652 // Check that the object is a boolean. |
| 2573 __ LoadRoot(ip, Heap::kTrueValueRootIndex); | 2653 __ JumpIfRoot(receiver, Heap::kTrueValueRootIndex, &fast); |
| 2574 __ cmp(r1, ip); | 2654 __ JumpIfNotRoot(receiver, Heap::kFalseValueRootIndex, &miss); |
| 2575 __ b(eq, &fast); | 2655 |
| 2576 __ LoadRoot(ip, Heap::kFalseValueRootIndex); | 2656 __ Bind(&fast); |
| 2577 __ cmp(r1, ip); | |
| 2578 __ b(ne, &miss); | |
| 2579 __ bind(&fast); | |
| 2580 // Check that the maps starting from the prototype haven't changed. | 2657 // Check that the maps starting from the prototype haven't changed. |
| 2658 Register prototype = x0; |
| 2581 GenerateDirectLoadGlobalFunctionPrototype( | 2659 GenerateDirectLoadGlobalFunctionPrototype( |
| 2582 masm(), Context::BOOLEAN_FUNCTION_INDEX, r0, &miss); | 2660 masm(), Context::BOOLEAN_FUNCTION_INDEX, prototype, &miss); |
| 2583 CheckPrototypes( | 2661 CheckPrototypes( |
| 2584 Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))), | 2662 Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))), |
| 2585 r0, holder, r3, r1, r4, name, &miss); | 2663 prototype, holder, x3, x1, x4, name, &miss); |
| 2586 break; | 2664 break; |
| 2587 } | 2665 } |
| 2588 } | 2666 } |
| 2589 | 2667 |
| 2590 __ b(success); | 2668 __ B(success); |
| 2591 | 2669 |
| 2592 // Handle call cache miss. | 2670 // Handle call cache miss. |
| 2593 __ bind(&miss); | 2671 __ Bind(&miss); |
| 2594 GenerateMissBranch(); | 2672 GenerateMissBranch(); |
| 2595 } | 2673 } |
| 2596 | 2674 |
| 2597 | 2675 |
| 2598 void CallStubCompiler::CompileHandlerBackend(Handle<JSFunction> function) { | 2676 void CallStubCompiler::CompileHandlerBackend(Handle<JSFunction> function) { |
| 2599 CallKind call_kind = CallICBase::Contextual::decode(extra_state_) | 2677 CallKind call_kind = CallICBase::Contextual::decode(extra_state_) |
| 2600 ? CALL_AS_FUNCTION | 2678 ? CALL_AS_FUNCTION |
| 2601 : CALL_AS_METHOD; | 2679 : CALL_AS_METHOD; |
| 2602 ParameterCount expected(function); | 2680 ParameterCount expected(function); |
| 2603 __ InvokeFunction(function, expected, arguments(), | 2681 __ InvokeFunction(function, expected, arguments(), |
| (...skipping 11 matching lines...) Expand all Loading... |
| 2615 Handle<Code> code = CompileCustomCall(object, holder, | 2693 Handle<Code> code = CompileCustomCall(object, holder, |
| 2616 Handle<JSGlobalPropertyCell>::null(), | 2694 Handle<JSGlobalPropertyCell>::null(), |
| 2617 function, Handle<String>::cast(name)); | 2695 function, Handle<String>::cast(name)); |
| 2618 // A null handle means bail out to the regular compiler code below. | 2696 // A null handle means bail out to the regular compiler code below. |
| 2619 if (!code.is_null()) return code; | 2697 if (!code.is_null()) return code; |
| 2620 } | 2698 } |
| 2621 | 2699 |
| 2622 Label success; | 2700 Label success; |
| 2623 | 2701 |
| 2624 CompileHandlerFrontend(object, holder, name, check, &success); | 2702 CompileHandlerFrontend(object, holder, name, check, &success); |
| 2625 __ bind(&success); | 2703 __ Bind(&success); |
| 2626 CompileHandlerBackend(function); | 2704 CompileHandlerBackend(function); |
| 2627 | 2705 |
| 2628 // Return the generated code. | 2706 // Return the generated code. |
| 2629 return GetCode(function); | 2707 return GetCode(function); |
| 2630 } | 2708 } |
| 2631 | 2709 |
| 2632 | 2710 |
| 2633 Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object, | 2711 Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object, |
| 2634 Handle<JSObject> holder, | 2712 Handle<JSObject> holder, |
| 2635 Handle<Name> name) { | 2713 Handle<Name> name) { |
| 2636 // ----------- S t a t e ------------- | 2714 // ----------- S t a t e ------------- |
| 2637 // -- r2 : name | 2715 // -- x2 : name |
| 2638 // -- lr : return address | 2716 // -- lr : return address |
| 2639 // ----------------------------------- | 2717 // ----------------------------------- |
| 2640 Label miss; | 2718 Label miss; |
| 2719 Register name_reg = x2; |
| 2720 |
| 2641 GenerateNameCheck(name, &miss); | 2721 GenerateNameCheck(name, &miss); |
| 2642 | 2722 |
| 2643 // Get the number of arguments. | |
| 2644 const int argc = arguments().immediate(); | 2723 const int argc = arguments().immediate(); |
| 2645 LookupResult lookup(isolate()); | 2724 LookupResult lookup(isolate()); |
| 2646 LookupPostInterceptor(holder, name, &lookup); | 2725 LookupPostInterceptor(holder, name, &lookup); |
| 2647 | 2726 |
| 2648 // Get the receiver from the stack. | 2727 // Get the receiver from the stack. |
| 2649 __ ldr(r1, MemOperand(sp, argc * kPointerSize)); | 2728 Register receiver = x5; |
| 2729 __ Peek(receiver, argc * kPointerSize); |
| 2650 | 2730 |
| 2651 CallInterceptorCompiler compiler(this, arguments(), r2, extra_state_); | 2731 CallInterceptorCompiler compiler(this, arguments(), name_reg, extra_state_); |
| 2652 compiler.Compile(masm(), object, holder, name, &lookup, r1, r3, r4, r0, | 2732 compiler.Compile( |
| 2653 &miss); | 2733 masm(), object, holder, name, &lookup, receiver, x3, x4, x0, &miss); |
| 2654 | 2734 |
| 2655 // Move returned value, the function to call, to r1. | 2735 // Move returned value, the function to call, to x1 (this is required by |
| 2656 __ mov(r1, r0); | 2736 // GenerateCallFunction). |
| 2737 Register function = x1; |
| 2738 __ Mov(function, x0); |
| 2739 |
| 2657 // Restore receiver. | 2740 // Restore receiver. |
| 2658 __ ldr(r0, MemOperand(sp, argc * kPointerSize)); | 2741 __ Peek(receiver, argc * kPointerSize); |
| 2659 | 2742 |
| 2660 GenerateCallFunction(masm(), object, arguments(), &miss, extra_state_); | 2743 GenerateCallFunction( |
| 2744 masm(), object, arguments(), &miss, extra_state_, function, receiver, x3); |
| 2661 | 2745 |
| 2662 // Handle call cache miss. | 2746 // Handle call cache miss. |
| 2663 __ bind(&miss); | 2747 __ Bind(&miss); |
| 2664 GenerateMissBranch(); | 2748 GenerateMissBranch(); |
| 2665 | 2749 |
| 2666 // Return the generated code. | 2750 // Return the generated code. |
| 2667 return GetCode(Code::INTERCEPTOR, name); | 2751 return GetCode(Code::INTERCEPTOR, name); |
| 2668 } | 2752 } |
| 2669 | 2753 |
| 2670 | 2754 |
| 2671 Handle<Code> CallStubCompiler::CompileCallGlobal( | 2755 Handle<Code> CallStubCompiler::CompileCallGlobal( |
| 2672 Handle<JSObject> object, | 2756 Handle<JSObject> object, |
| 2673 Handle<GlobalObject> holder, | 2757 Handle<GlobalObject> holder, |
| 2674 Handle<JSGlobalPropertyCell> cell, | 2758 Handle<JSGlobalPropertyCell> cell, |
| 2675 Handle<JSFunction> function, | 2759 Handle<JSFunction> function, |
| 2676 Handle<Name> name) { | 2760 Handle<Name> name) { |
| 2677 // ----------- S t a t e ------------- | 2761 // ----------- S t a t e ------------- |
| 2678 // -- r2 : name | 2762 // -- x2 : name |
| 2679 // -- lr : return address | 2763 // -- lr : return address |
| 2680 // ----------------------------------- | 2764 // ----------------------------------- |
| 2681 if (HasCustomCallGenerator(function)) { | 2765 if (HasCustomCallGenerator(function)) { |
| 2682 Handle<Code> code = CompileCustomCall( | 2766 Handle<Code> code = CompileCustomCall( |
| 2683 object, holder, cell, function, Handle<String>::cast(name)); | 2767 object, holder, cell, function, Handle<String>::cast(name)); |
| 2684 // A null handle means bail out to the regular compiler code below. | 2768 // A null handle means bail out to the regular compiler code below. |
| 2685 if (!code.is_null()) return code; | 2769 if (!code.is_null()) return code; |
| 2686 } | 2770 } |
| 2687 | 2771 |
| 2688 Label miss; | 2772 Label miss; |
| 2689 GenerateNameCheck(name, &miss); | 2773 GenerateNameCheck(name, &miss); |
| 2690 | 2774 |
| 2691 // Get the number of arguments. | 2775 // Get the number of arguments. |
| 2692 const int argc = arguments().immediate(); | 2776 const int argc = arguments().immediate(); |
| 2777 |
| 2693 GenerateGlobalReceiverCheck(object, holder, name, &miss); | 2778 GenerateGlobalReceiverCheck(object, holder, name, &miss); |
| 2694 GenerateLoadFunctionFromCell(cell, function, &miss); | 2779 GenerateLoadFunctionFromCell(cell, function, &miss); |
| 2780 // After these two calls the receiver is left in x0 and the function in x1. |
| 2781 Register receiver_reg = x0; |
| 2782 Register function_reg = x1; |
| 2695 | 2783 |
| 2696 // Patch the receiver on the stack with the global proxy if | 2784 // Patch the receiver on the stack with the global proxy if necessary. |
| 2697 // necessary. | |
| 2698 if (object->IsGlobalObject()) { | 2785 if (object->IsGlobalObject()) { |
| 2699 __ ldr(r3, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset)); | 2786 __ Ldr(x3, |
| 2700 __ str(r3, MemOperand(sp, argc * kPointerSize)); | 2787 FieldMemOperand(receiver_reg, GlobalObject::kGlobalReceiverOffset)); |
| 2788 __ Poke(x3, argc * kPointerSize); |
| 2701 } | 2789 } |
| 2702 | 2790 |
| 2703 // Set up the context (function already in r1). | 2791 // Set up the context. |
| 2704 __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); | 2792 __ Ldr(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset)); |
| 2705 | 2793 |
| 2706 // Jump to the cached code (tail call). | 2794 // Jump to the cached code (tail call). |
| 2707 Counters* counters = isolate()->counters(); | 2795 Counters* counters = isolate()->counters(); |
| 2708 __ IncrementCounter(counters->call_global_inline(), 1, r3, r4); | 2796 __ IncrementCounter(counters->call_global_inline(), 1, x3, x4); |
| 2709 ParameterCount expected(function->shared()->formal_parameter_count()); | 2797 ParameterCount expected(function->shared()->formal_parameter_count()); |
| 2710 CallKind call_kind = CallICBase::Contextual::decode(extra_state_) | 2798 CallKind call_kind = CallICBase::Contextual::decode(extra_state_) |
| 2711 ? CALL_AS_FUNCTION | 2799 ? CALL_AS_FUNCTION |
| 2712 : CALL_AS_METHOD; | 2800 : CALL_AS_METHOD; |
| 2713 // We call indirectly through the code field in the function to | 2801 // We call indirectly through the code field in the function to |
| 2714 // allow recompilation to take effect without changing any of the | 2802 // allow recompilation to take effect without changing any of the |
| 2715 // call sites. | 2803 // call sites. |
| 2716 __ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset)); | 2804 __ Ldr(x3, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset)); |
| 2717 __ InvokeCode(r3, expected, arguments(), JUMP_FUNCTION, | 2805 __ InvokeCode( |
| 2718 NullCallWrapper(), call_kind); | 2806 x3, expected, arguments(), JUMP_FUNCTION, NullCallWrapper(), call_kind); |
| 2719 | 2807 |
| 2720 // Handle call cache miss. | 2808 // Handle call cache miss. |
| 2721 __ bind(&miss); | 2809 __ Bind(&miss); |
| 2722 __ IncrementCounter(counters->call_global_inline_miss(), 1, r1, r3); | 2810 __ IncrementCounter(counters->call_global_inline_miss(), 1, x1, x3); |
| 2723 GenerateMissBranch(); | 2811 GenerateMissBranch(); |
| 2724 | 2812 |
| 2725 // Return the generated code. | 2813 // Return the generated code. |
| 2726 return GetCode(Code::NORMAL, name); | 2814 return GetCode(Code::NORMAL, name); |
| 2727 } | 2815 } |
| 2728 | 2816 |
| 2729 | 2817 |
| 2730 Handle<Code> StoreStubCompiler::CompileStoreCallback( | 2818 Handle<Code> StoreStubCompiler::CompileStoreCallback( |
| 2731 Handle<Name> name, | 2819 Handle<Name> name, |
| 2732 Handle<JSObject> object, | 2820 Handle<JSObject> object, |
| 2733 Handle<JSObject> holder, | 2821 Handle<JSObject> holder, |
| 2734 Handle<ExecutableAccessorInfo> callback) { | 2822 Handle<ExecutableAccessorInfo> callback) { |
| 2735 Label miss; | 2823 Label miss; |
| 2824 |
| 2825 ASM_LOCATION("StoreStubCompiler::CompileStoreCallback"); |
| 2826 |
| 2736 // Check that the maps haven't changed. | 2827 // Check that the maps haven't changed. |
| 2737 __ JumpIfSmi(receiver(), &miss); | 2828 __ JumpIfSmi(receiver(), &miss); |
| 2738 CheckPrototypes(object, receiver(), holder, | 2829 CheckPrototypes(object, receiver(), holder, |
| 2739 scratch1(), scratch2(), scratch3(), name, &miss); | 2830 scratch1(), scratch2(), scratch3(), name, &miss); |
| 2740 | 2831 |
| 2741 // Stub never generated for non-global objects that require access checks. | 2832 // Stub never generated for non-global objects that require access checks. |
| 2742 ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded()); | 2833 ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded()); |
| 2743 | 2834 |
| 2744 __ push(receiver()); // receiver | 2835 __ Mov(scratch1(), Operand(callback)); |
| 2745 __ mov(ip, Operand(callback)); // callback info | 2836 __ Push(receiver(), scratch1(), this->name(), value()); |
| 2746 __ Push(ip, this->name(), value()); | |
| 2747 | 2837 |
| 2748 // Do tail-call to the runtime system. | 2838 // Do tail-call to the runtime system. |
| 2749 ExternalReference store_callback_property = | 2839 ExternalReference store_callback_property = |
| 2750 ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate()); | 2840 ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate()); |
| 2751 __ TailCallExternalReference(store_callback_property, 4, 1); | 2841 __ TailCallExternalReference(store_callback_property, 4, 1); |
| 2752 | 2842 |
| 2753 // Handle store cache miss. | 2843 // Handle store cache miss. |
| 2754 __ bind(&miss); | 2844 __ Bind(&miss); |
| 2755 TailCallBuiltin(masm(), MissBuiltin(kind())); | 2845 TailCallBuiltin(masm(), MissBuiltin(kind())); |
| 2756 | 2846 |
| 2757 // Return the generated code. | 2847 // Return the generated code. |
| 2758 return GetICCode(kind(), Code::CALLBACKS, name); | 2848 return GetICCode(kind(), Code::CALLBACKS, name); |
| 2759 } | 2849 } |
| 2760 | 2850 |
| 2761 | 2851 |
| 2762 #undef __ | 2852 #undef __ |
| 2763 #define __ ACCESS_MASM(masm) | 2853 #define __ ACCESS_MASM(masm) |
| 2764 | 2854 |
| 2765 | 2855 |
| 2766 void StoreStubCompiler::GenerateStoreViaSetter( | 2856 void StoreStubCompiler::GenerateStoreViaSetter( |
| 2767 MacroAssembler* masm, | 2857 MacroAssembler* masm, |
| 2768 Handle<JSFunction> setter) { | 2858 Handle<JSFunction> setter) { |
| 2769 // ----------- S t a t e ------------- | 2859 // ----------- S t a t e ------------- |
| 2770 // -- r0 : value | 2860 // -- x0 : value |
| 2771 // -- r1 : receiver | 2861 // -- x1 : receiver |
| 2772 // -- r2 : name | 2862 // -- x2 : name |
| 2773 // -- lr : return address | 2863 // -- lr : return address |
| 2774 // ----------------------------------- | 2864 // ----------------------------------- |
| 2865 Register value_reg = x0; |
| 2866 Register receiver_reg = x1; |
| 2867 Label miss; |
| 2868 |
| 2775 { | 2869 { |
| 2776 FrameScope scope(masm, StackFrame::INTERNAL); | 2870 FrameScope scope(masm, StackFrame::INTERNAL); |
| 2777 | 2871 |
| 2778 // Save value register, so we can restore it later. | 2872 // Save value register, so we can restore it later. |
| 2779 __ push(r0); | 2873 __ Push(value_reg); |
| 2780 | 2874 |
| 2781 if (!setter.is_null()) { | 2875 if (!setter.is_null()) { |
| 2782 // Call the JavaScript setter with receiver and value on the stack. | 2876 // Call the JavaScript setter with receiver and value on the stack. |
| 2783 __ Push(r1, r0); | 2877 __ Push(receiver_reg, value_reg); |
| 2784 ParameterCount actual(1); | 2878 ParameterCount actual(1); |
| 2785 ParameterCount expected(setter); | 2879 ParameterCount expected(setter); |
| 2786 __ InvokeFunction(setter, expected, actual, | 2880 __ InvokeFunction(setter, expected, actual, |
| 2787 CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD); | 2881 CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD); |
| 2788 } else { | 2882 } else { |
| 2789 // If we generate a global code snippet for deoptimization only, remember | 2883 // If we generate a global code snippet for deoptimization only, remember |
| 2790 // the place to continue after deoptimization. | 2884 // the place to continue after deoptimization. |
| 2791 masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset()); | 2885 masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset()); |
| 2792 } | 2886 } |
| 2793 | 2887 |
| 2794 // We have to return the passed value, not the return value of the setter. | 2888 // We have to return the passed value, not the return value of the setter. |
| 2795 __ pop(r0); | 2889 __ Pop(value_reg); |
| 2796 | 2890 |
| 2797 // Restore context register. | 2891 // Restore context register. |
| 2798 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); | 2892 __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
| 2799 } | 2893 } |
| 2800 __ Ret(); | 2894 __ Ret(); |
| 2801 } | 2895 } |
| 2802 | 2896 |
| 2803 | 2897 |
| 2804 #undef __ | 2898 #undef __ |
| 2805 #define __ ACCESS_MASM(masm()) | 2899 #define __ ACCESS_MASM(masm()) |
| 2806 | 2900 |
| 2807 | 2901 |
| 2808 Handle<Code> StoreStubCompiler::CompileStoreInterceptor( | 2902 Handle<Code> StoreStubCompiler::CompileStoreInterceptor( |
| 2809 Handle<JSObject> object, | 2903 Handle<JSObject> object, |
| 2810 Handle<Name> name) { | 2904 Handle<Name> name) { |
| 2811 Label miss; | 2905 Label miss; |
| 2812 | 2906 |
| 2907 ASM_LOCATION("StoreStubCompiler::CompileStoreInterceptor"); |
| 2908 |
| 2813 // Check that the map of the object hasn't changed. | 2909 // Check that the map of the object hasn't changed. |
| 2814 __ CheckMap(receiver(), scratch1(), Handle<Map>(object->map()), &miss, | 2910 __ CheckMap(receiver(), scratch1(), Handle<Map>(object->map()), &miss, |
| 2815 DO_SMI_CHECK); | 2911 DO_SMI_CHECK); |
| 2816 | 2912 |
| 2817 // Perform global security token check if needed. | 2913 // Perform global security token check if needed. |
| 2818 if (object->IsJSGlobalProxy()) { | 2914 if (object->IsJSGlobalProxy()) { |
| 2819 __ CheckAccessGlobalProxy(receiver(), scratch1(), &miss); | 2915 __ CheckAccessGlobalProxy(receiver(), scratch1(), &miss); |
| 2820 } | 2916 } |
| 2821 | 2917 |
| 2822 // Stub is never generated for non-global objects that require access | 2918 // Stub is never generated for non-global objects that require access checks. |
| 2823 // checks. | |
| 2824 ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded()); | 2919 ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded()); |
| 2825 | 2920 |
| 2826 __ Push(receiver(), this->name(), value()); | 2921 __ Mov(scratch1(), Operand(Smi::FromInt(strict_mode()))); |
| 2827 | 2922 __ Push(receiver(), this->name(), value(), scratch1()); |
| 2828 __ mov(scratch1(), Operand(Smi::FromInt(strict_mode()))); | |
| 2829 __ push(scratch1()); // strict mode | |
| 2830 | 2923 |
| 2831 // Do tail-call to the runtime system. | 2924 // Do tail-call to the runtime system. |
| 2832 ExternalReference store_ic_property = | 2925 ExternalReference store_ic_property = |
| 2833 ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate()); | 2926 ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate()); |
| 2834 __ TailCallExternalReference(store_ic_property, 4, 1); | 2927 __ TailCallExternalReference(store_ic_property, 4, 1); |
| 2835 | 2928 |
| 2836 // Handle store cache miss. | 2929 // Handle store cache miss. |
| 2837 __ bind(&miss); | 2930 __ Bind(&miss); |
| 2838 TailCallBuiltin(masm(), MissBuiltin(kind())); | 2931 TailCallBuiltin(masm(), MissBuiltin(kind())); |
| 2839 | 2932 |
| 2840 // Return the generated code. | 2933 // Return the generated code. |
| 2841 return GetICCode(kind(), Code::INTERCEPTOR, name); | 2934 return GetICCode(kind(), Code::INTERCEPTOR, name); |
| 2842 } | 2935 } |
| 2843 | 2936 |
| 2844 | 2937 |
| 2845 Handle<Code> StoreStubCompiler::CompileStoreGlobal( | 2938 Handle<Code> StoreStubCompiler::CompileStoreGlobal( |
| 2846 Handle<GlobalObject> object, | 2939 Handle<GlobalObject> object, |
| 2847 Handle<JSGlobalPropertyCell> cell, | 2940 Handle<JSGlobalPropertyCell> cell, |
| 2848 Handle<Name> name) { | 2941 Handle<Name> name) { |
| 2849 Label miss; | 2942 Label miss; |
| 2850 | 2943 |
| 2944 ASM_LOCATION("StoreStubCompiler::CompileStoreGlobal"); |
| 2945 |
| 2851 // Check that the map of the global has not changed. | 2946 // Check that the map of the global has not changed. |
| 2852 __ ldr(scratch1(), FieldMemOperand(receiver(), HeapObject::kMapOffset)); | 2947 __ Ldr(scratch1(), FieldMemOperand(receiver(), HeapObject::kMapOffset)); |
| 2853 __ cmp(scratch1(), Operand(Handle<Map>(object->map()))); | 2948 __ Cmp(scratch1(), Operand(Handle<Map>(object->map()))); |
| 2854 __ b(ne, &miss); | 2949 __ B(ne, &miss); |
| 2855 | 2950 |
| 2856 // Check that the value in the cell is not the hole. If it is, this | 2951 // Check that the value in the cell is not the hole. If it is, this |
| 2857 // cell could have been deleted and reintroducing the global needs | 2952 // cell could have been deleted and reintroducing the global needs |
| 2858 // to update the property details in the property dictionary of the | 2953 // to update the property details in the property dictionary of the |
| 2859 // global object. We bail out to the runtime system to do that. | 2954 // global object. We bail out to the runtime system to do that. |
| 2860 __ mov(scratch1(), Operand(cell)); | 2955 __ Mov(scratch1(), Operand(cell)); |
| 2861 __ LoadRoot(scratch2(), Heap::kTheHoleValueRootIndex); | 2956 __ Ldr(scratch2(), FieldMemOperand(scratch1(), |
| 2862 __ ldr(scratch3(), | 2957 JSGlobalPropertyCell::kValueOffset)); |
| 2863 FieldMemOperand(scratch1(), JSGlobalPropertyCell::kValueOffset)); | 2958 __ JumpIfRoot(scratch2(), Heap::kTheHoleValueRootIndex, &miss); |
| 2864 __ cmp(scratch3(), scratch2()); | |
| 2865 __ b(eq, &miss); | |
| 2866 | 2959 |
| 2867 // Store the value in the cell. | 2960 // Store the value in the cell. |
| 2868 __ str(value(), | 2961 __ Str(value(), FieldMemOperand(scratch1(), |
| 2869 FieldMemOperand(scratch1(), JSGlobalPropertyCell::kValueOffset)); | 2962 JSGlobalPropertyCell::kValueOffset)); |
| 2870 // Cells are always rescanned, so no write barrier here. | 2963 // Cells are always rescanned, so no write barrier here. |
| 2871 | 2964 |
| 2872 Counters* counters = isolate()->counters(); | 2965 Counters* counters = isolate()->counters(); |
| 2873 __ IncrementCounter( | 2966 __ IncrementCounter(counters->named_store_global_inline(), 1, |
| 2874 counters->named_store_global_inline(), 1, scratch1(), scratch2()); | 2967 scratch1(), scratch2()); |
| 2875 __ Ret(); | 2968 __ Ret(); |
| 2876 | 2969 |
| 2877 // Handle store cache miss. | 2970 // Handle store cache miss. |
| 2878 __ bind(&miss); | 2971 __ Bind(&miss); |
| 2879 __ IncrementCounter( | 2972 __ IncrementCounter(counters->named_store_global_inline_miss(), 1, |
| 2880 counters->named_store_global_inline_miss(), 1, scratch1(), scratch2()); | 2973 scratch1(), scratch2()); |
| 2881 TailCallBuiltin(masm(), MissBuiltin(kind())); | 2974 TailCallBuiltin(masm(), MissBuiltin(kind())); |
| 2882 | 2975 |
| 2883 // Return the generated code. | 2976 // Return the generated code. |
| 2884 return GetICCode(kind(), Code::NORMAL, name); | 2977 return GetICCode(kind(), Code::NORMAL, name); |
| 2885 } | 2978 } |
| 2886 | 2979 |
| 2887 | 2980 |
| 2888 Handle<Code> LoadStubCompiler::CompileLoadNonexistent( | 2981 Handle<Code> LoadStubCompiler::CompileLoadNonexistent( |
| 2889 Handle<JSObject> object, | 2982 Handle<JSObject> object, |
| 2890 Handle<JSObject> last, | 2983 Handle<JSObject> last, |
| 2891 Handle<Name> name, | 2984 Handle<Name> name, |
| 2892 Handle<GlobalObject> global) { | 2985 Handle<GlobalObject> global) { |
| 2893 Label success; | 2986 Label success; |
| 2894 | |
| 2895 NonexistentHandlerFrontend(object, last, name, &success, global); | 2987 NonexistentHandlerFrontend(object, last, name, &success, global); |
| 2896 | 2988 |
| 2897 __ bind(&success); | 2989 __ Bind(&success); |
| 2898 // Return undefined if maps of the full prototype chain are still the | 2990 // Return undefined if maps of the full prototype chain are still the |
| 2899 // same and no global property with this name contains a value. | 2991 // same and no global property with this name contains a value. |
| 2900 __ LoadRoot(r0, Heap::kUndefinedValueRootIndex); | 2992 __ LoadRoot(x0, Heap::kUndefinedValueRootIndex); |
| 2901 __ Ret(); | 2993 __ Ret(); |
| 2902 | 2994 |
| 2903 // Return the generated code. | 2995 // Return the generated code. |
| 2904 return GetCode(kind(), Code::NONEXISTENT, name); | 2996 return GetCode(kind(), Code::NONEXISTENT, name); |
| 2905 } | 2997 } |
| 2906 | 2998 |
| 2907 | 2999 |
| 3000 // TODO(all): The so-called scratch registers are significant in some cases. For |
| 3001 // example, KeyedStoreStubCompiler::registers()[3] (x3) is actually used for |
| 3002 // KeyedStoreCompiler::transition_map(). We should verify which registers are |
| 3003 // actually scratch registers, and which are important. For now, we use the same |
| 3004 // assignments as ARM to remain on the safe side. |
| 3005 |
| 2908 Register* LoadStubCompiler::registers() { | 3006 Register* LoadStubCompiler::registers() { |
| 2909 // receiver, name, scratch1, scratch2, scratch3, scratch4. | 3007 // receiver, name, scratch1, scratch2, scratch3, scratch4. |
| 2910 static Register registers[] = { r0, r2, r3, r1, r4, r5 }; | 3008 static Register registers[] = { x0, x2, x3, x1, x4, x5 }; |
| 2911 return registers; | 3009 return registers; |
| 2912 } | 3010 } |
| 2913 | 3011 |
| 2914 | |
| 2915 Register* KeyedLoadStubCompiler::registers() { | 3012 Register* KeyedLoadStubCompiler::registers() { |
| 2916 // receiver, name, scratch1, scratch2, scratch3, scratch4. | 3013 // receiver, name/key, scratch1, scratch2, scratch3, scratch4. |
| 2917 static Register registers[] = { r1, r0, r2, r3, r4, r5 }; | 3014 static Register registers[] = { x1, x0, x2, x3, x4, x5 }; |
| 2918 return registers; | 3015 return registers; |
| 2919 } | 3016 } |
| 2920 | 3017 |
| 2921 | 3018 |
| 2922 Register* StoreStubCompiler::registers() { | 3019 Register* StoreStubCompiler::registers() { |
| 2923 // receiver, name, value, scratch1, scratch2, scratch3. | 3020 // receiver, name, value, scratch1, scratch2, scratch3. |
| 2924 static Register registers[] = { r1, r2, r0, r3, r4, r5 }; | 3021 static Register registers[] = { x1, x2, x0, x3, x4, x5 }; |
| 2925 return registers; | 3022 return registers; |
| 2926 } | 3023 } |
| 2927 | 3024 |
| 2928 | 3025 |
| 2929 Register* KeyedStoreStubCompiler::registers() { | 3026 Register* KeyedStoreStubCompiler::registers() { |
| 2930 // receiver, name, value, scratch1, scratch2, scratch3. | 3027 // receiver, name, value, scratch1, scratch2, scratch3. |
| 2931 static Register registers[] = { r2, r1, r0, r3, r4, r5 }; | 3028 static Register registers[] = { x2, x1, x0, x3, x4, x5 }; |
| 2932 return registers; | 3029 return registers; |
| 2933 } | 3030 } |
| 2934 | 3031 |
| 2935 | 3032 |
| 2936 void KeyedLoadStubCompiler::GenerateNameCheck(Handle<Name> name, | 3033 void KeyedLoadStubCompiler::GenerateNameCheck(Handle<Name> name, |
| 2937 Register name_reg, | 3034 Register name_reg, |
| 2938 Label* miss) { | 3035 Label* miss) { |
| 2939 __ cmp(name_reg, Operand(name)); | 3036 __ Cmp(name_reg, Operand(name)); |
| 2940 __ b(ne, miss); | 3037 __ B(ne, miss); |
| 2941 } | 3038 } |
| 2942 | 3039 |
| 2943 | 3040 |
| 2944 void KeyedStoreStubCompiler::GenerateNameCheck(Handle<Name> name, | 3041 void KeyedStoreStubCompiler::GenerateNameCheck(Handle<Name> name, |
| 2945 Register name_reg, | 3042 Register name_reg, |
| 2946 Label* miss) { | 3043 Label* miss) { |
| 2947 __ cmp(name_reg, Operand(name)); | 3044 __ Cmp(name_reg, Operand(name)); |
| 2948 __ b(ne, miss); | 3045 __ B(ne, miss); |
| 2949 } | 3046 } |
| 2950 | 3047 |
| 2951 | 3048 |
| 2952 #undef __ | 3049 #undef __ |
| 2953 #define __ ACCESS_MASM(masm) | 3050 #define __ ACCESS_MASM(masm) |
| 2954 | 3051 |
| 2955 | |
| 2956 void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm, | 3052 void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm, |
| 2957 Handle<JSFunction> getter) { | 3053 Handle<JSFunction> getter) { |
| 2958 // ----------- S t a t e ------------- | 3054 // ----------- S t a t e ------------- |
| 2959 // -- r0 : receiver | 3055 // -- x0 : receiver |
| 2960 // -- r2 : name | 3056 // -- x2 : name |
| 2961 // -- lr : return address | 3057 // -- lr : return address |
| 2962 // ----------------------------------- | 3058 // ----------------------------------- |
| 2963 { | 3059 { |
| 2964 FrameScope scope(masm, StackFrame::INTERNAL); | 3060 FrameScope scope(masm, StackFrame::INTERNAL); |
| 2965 | 3061 |
| 2966 if (!getter.is_null()) { | 3062 if (!getter.is_null()) { |
| 2967 // Call the JavaScript getter with the receiver on the stack. | 3063 // Call the JavaScript getter with the receiver on the stack. |
| 2968 __ push(r0); | 3064 __ Push(x0); |
| 2969 ParameterCount actual(0); | 3065 ParameterCount actual(0); |
| 2970 ParameterCount expected(getter); | 3066 ParameterCount expected(getter); |
| 2971 __ InvokeFunction(getter, expected, actual, | 3067 __ InvokeFunction(getter, expected, actual, |
| 2972 CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD); | 3068 CALL_FUNCTION, NullCallWrapper(), CALL_AS_METHOD); |
| 2973 } else { | 3069 } else { |
| 2974 // If we generate a global code snippet for deoptimization only, remember | 3070 // If we generate a global code snippet for deoptimization only, remember |
| 2975 // the place to continue after deoptimization. | 3071 // the place to continue after deoptimization. |
| 2976 masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset()); | 3072 masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset()); |
| 2977 } | 3073 } |
| 2978 | 3074 |
| 2979 // Restore context register. | 3075 // Restore context register. |
| 2980 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); | 3076 __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
| 2981 } | 3077 } |
| 2982 __ Ret(); | 3078 __ Ret(); |
| 2983 } | 3079 } |
| 2984 | 3080 |
| 2985 | 3081 |
| 2986 #undef __ | 3082 #undef __ |
| 2987 #define __ ACCESS_MASM(masm()) | 3083 #define __ ACCESS_MASM(masm()) |
| 2988 | 3084 |
| 2989 | 3085 |
| 2990 Handle<Code> LoadStubCompiler::CompileLoadGlobal( | 3086 Handle<Code> LoadStubCompiler::CompileLoadGlobal( |
| 2991 Handle<JSObject> object, | 3087 Handle<JSObject> object, |
| 2992 Handle<GlobalObject> global, | 3088 Handle<GlobalObject> global, |
| 2993 Handle<JSGlobalPropertyCell> cell, | 3089 Handle<JSGlobalPropertyCell> cell, |
| 2994 Handle<Name> name, | 3090 Handle<Name> name, |
| 2995 bool is_dont_delete) { | 3091 bool is_dont_delete) { |
| 2996 Label success, miss; | 3092 Label success, miss; |
| 2997 | 3093 |
| 2998 __ CheckMap( | 3094 __ CheckMap( |
| 2999 receiver(), scratch1(), Handle<Map>(object->map()), &miss, DO_SMI_CHECK); | 3095 receiver(), scratch1(), Handle<Map>(object->map()), &miss, DO_SMI_CHECK); |
| 3000 HandlerFrontendHeader( | 3096 HandlerFrontendHeader( |
| 3001 object, receiver(), Handle<JSObject>::cast(global), name, &miss); | 3097 object, receiver(), Handle<JSObject>::cast(global), name, &miss); |
| 3002 | 3098 |
| 3003 // Get the value from the cell. | 3099 // Get the value from the cell. |
| 3004 __ mov(r3, Operand(cell)); | 3100 __ Mov(x3, Operand(cell)); |
| 3005 __ ldr(r4, FieldMemOperand(r3, JSGlobalPropertyCell::kValueOffset)); | 3101 __ Ldr(x4, FieldMemOperand(x3, JSGlobalPropertyCell::kValueOffset)); |
| 3006 | 3102 |
| 3007 // Check for deleted property if property can actually be deleted. | 3103 // Check for deleted property if property can actually be deleted. |
| 3008 if (!is_dont_delete) { | 3104 if (!is_dont_delete) { |
| 3009 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); | 3105 __ JumpIfRoot(x4, Heap::kTheHoleValueRootIndex, &miss); |
| 3010 __ cmp(r4, ip); | |
| 3011 __ b(eq, &miss); | |
| 3012 } | 3106 } |
| 3013 | 3107 |
| 3014 HandlerFrontendFooter(&success, &miss); | 3108 HandlerFrontendFooter(&success, &miss); |
| 3015 __ bind(&success); | 3109 __ bind(&success); |
| 3016 | 3110 |
| 3017 Counters* counters = isolate()->counters(); | 3111 Counters* counters = isolate()->counters(); |
| 3018 __ IncrementCounter(counters->named_load_global_stub(), 1, r1, r3); | 3112 __ IncrementCounter(counters->named_load_global_stub(), 1, x1, x3); |
| 3019 __ mov(r0, r4); | 3113 __ Mov(x0, x4); |
| 3020 __ Ret(); | 3114 __ Ret(); |
| 3021 | 3115 |
| 3022 // Return the generated code. | 3116 // Return the generated code. |
| 3023 return GetICCode(kind(), Code::NORMAL, name); | 3117 return GetICCode(kind(), Code::NORMAL, name); |
| 3024 } | 3118 } |
| 3025 | 3119 |
| 3026 | 3120 |
| 3027 Handle<Code> BaseLoadStubCompiler::CompilePolymorphicIC( | 3121 Handle<Code> BaseLoadStubCompiler::CompilePolymorphicIC( |
| 3028 MapHandleList* receiver_maps, | 3122 MapHandleList* receiver_maps, |
| 3029 CodeHandleList* handlers, | 3123 CodeHandleList* handlers, |
| 3030 Handle<Name> name, | 3124 Handle<Name> name, |
| 3031 Code::StubType type, | 3125 Code::StubType type, |
| 3032 IcCheckType check) { | 3126 IcCheckType check) { |
| 3033 Label miss; | 3127 Label miss; |
| 3034 | 3128 |
| 3035 if (check == PROPERTY) { | 3129 if (check == PROPERTY) { |
| 3036 GenerateNameCheck(name, this->name(), &miss); | 3130 GenerateNameCheck(name, this->name(), &miss); |
| 3037 } | 3131 } |
| 3038 | 3132 |
| 3039 __ JumpIfSmi(receiver(), &miss); | 3133 __ JumpIfSmi(receiver(), &miss); |
| 3134 |
| 3040 Register map_reg = scratch1(); | 3135 Register map_reg = scratch1(); |
| 3041 | 3136 __ Ldr(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset)); |
| 3042 int receiver_count = receiver_maps->length(); | 3137 int receiver_count = receiver_maps->length(); |
| 3043 int number_of_handled_maps = 0; | 3138 int number_of_handled_maps = 0; |
| 3044 __ ldr(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset)); | |
| 3045 for (int current = 0; current < receiver_count; ++current) { | 3139 for (int current = 0; current < receiver_count; ++current) { |
| 3046 Handle<Map> map = receiver_maps->at(current); | 3140 Handle<Map> map = receiver_maps->at(current); |
| 3047 if (!map->is_deprecated()) { | 3141 if (!map->is_deprecated()) { |
| 3048 number_of_handled_maps++; | 3142 number_of_handled_maps++; |
| 3049 __ mov(ip, Operand(receiver_maps->at(current))); | 3143 Label try_next; |
| 3050 __ cmp(map_reg, ip); | 3144 __ Cmp(map_reg, Operand(receiver_maps->at(current))); |
| 3051 __ Jump(handlers->at(current), RelocInfo::CODE_TARGET, eq); | 3145 __ B(ne, &try_next); |
| 3146 __ Jump(handlers->at(current), RelocInfo::CODE_TARGET); |
| 3147 __ Bind(&try_next); |
| 3052 } | 3148 } |
| 3053 } | 3149 } |
| 3054 ASSERT(number_of_handled_maps != 0); | 3150 ASSERT(number_of_handled_maps != 0); |
| 3055 | 3151 |
| 3056 __ bind(&miss); | 3152 __ Bind(&miss); |
| 3057 TailCallBuiltin(masm(), MissBuiltin(kind())); | 3153 TailCallBuiltin(masm(), MissBuiltin(kind())); |
| 3058 | 3154 |
| 3059 // Return the generated code. | 3155 // Return the generated code. |
| 3060 InlineCacheState state = | 3156 InlineCacheState state = |
| 3061 number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC; | 3157 (number_of_handled_maps > 1) ? POLYMORPHIC : MONOMORPHIC; |
| 3062 return GetICCode(kind(), type, name, state); | 3158 return GetICCode(kind(), type, name, state); |
| 3063 } | 3159 } |
| 3064 | 3160 |
| 3065 | 3161 |
| 3066 Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic( | 3162 Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic( |
| 3067 MapHandleList* receiver_maps, | 3163 MapHandleList* receiver_maps, |
| 3068 CodeHandleList* handler_stubs, | 3164 CodeHandleList* handler_stubs, |
| 3069 MapHandleList* transitioned_maps) { | 3165 MapHandleList* transitioned_maps) { |
| 3070 Label miss; | 3166 Label miss; |
| 3167 |
| 3168 ASM_LOCATION("KeyedStoreStubCompiler::CompileStorePolymorphic"); |
| 3169 |
| 3071 __ JumpIfSmi(receiver(), &miss); | 3170 __ JumpIfSmi(receiver(), &miss); |
| 3072 | 3171 |
| 3073 int receiver_count = receiver_maps->length(); | 3172 int receiver_count = receiver_maps->length(); |
| 3074 __ ldr(scratch1(), FieldMemOperand(receiver(), HeapObject::kMapOffset)); | 3173 __ Ldr(scratch1(), FieldMemOperand(receiver(), HeapObject::kMapOffset)); |
| 3075 for (int i = 0; i < receiver_count; ++i) { | 3174 for (int i = 0; i < receiver_count; i++) { |
| 3076 __ mov(ip, Operand(receiver_maps->at(i))); | 3175 __ Cmp(scratch1(), Operand(receiver_maps->at(i))); |
| 3077 __ cmp(scratch1(), ip); | 3176 |
| 3078 if (transitioned_maps->at(i).is_null()) { | 3177 Label skip; |
| 3079 __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq); | 3178 __ B(&skip, ne); |
| 3080 } else { | 3179 if (!transitioned_maps->at(i).is_null()) { |
| 3081 Label next_map; | 3180 // This argument is used by the handler stub. For example, see |
| 3082 __ b(ne, &next_map); | 3181 // ElementsTransitionGenerator::GenerateMapChangeElementsTransition. |
| 3083 __ mov(transition_map(), Operand(transitioned_maps->at(i))); | 3182 __ Mov(transition_map(), Operand(transitioned_maps->at(i))); |
| 3084 __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, al); | |
| 3085 __ bind(&next_map); | |
| 3086 } | 3183 } |
| 3184 __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET); |
| 3185 __ Bind(&skip); |
| 3087 } | 3186 } |
| 3088 | 3187 |
| 3089 __ bind(&miss); | 3188 __ Bind(&miss); |
| 3090 TailCallBuiltin(masm(), MissBuiltin(kind())); | 3189 TailCallBuiltin(masm(), MissBuiltin(kind())); |
| 3091 | 3190 |
| 3092 // Return the generated code. | |
| 3093 return GetICCode( | 3191 return GetICCode( |
| 3094 kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC); | 3192 kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC); |
| 3095 } | 3193 } |
| 3096 | 3194 |
| 3097 | 3195 |
| 3098 #undef __ | 3196 #undef __ |
| 3099 #define __ ACCESS_MASM(masm) | 3197 #define __ ACCESS_MASM(masm) |
| 3100 | 3198 |
| 3101 | |
| 3102 void KeyedLoadStubCompiler::GenerateLoadDictionaryElement( | 3199 void KeyedLoadStubCompiler::GenerateLoadDictionaryElement( |
| 3103 MacroAssembler* masm) { | 3200 MacroAssembler* masm) { |
| 3104 // ---------- S t a t e -------------- | 3201 // ---------- S t a t e -------------- |
| 3105 // -- lr : return address | 3202 // -- lr : return address |
| 3106 // -- r0 : key | 3203 // -- x0 : key |
| 3107 // -- r1 : receiver | 3204 // -- x1 : receiver |
| 3108 // ----------------------------------- | 3205 // ----------------------------------- |
| 3109 Label slow, miss_force_generic; | 3206 Label slow, miss_force_generic; |
| 3110 | 3207 |
| 3111 Register key = r0; | 3208 Register result = x0; |
| 3112 Register receiver = r1; | 3209 Register key = x0; |
| 3210 Register receiver = x1; |
| 3113 | 3211 |
| 3114 __ UntagAndJumpIfNotSmi(r2, key, &miss_force_generic); | 3212 __ JumpIfNotSmi(key, &miss_force_generic); |
| 3115 __ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset)); | 3213 __ Ldr(x4, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
| 3116 __ LoadFromNumberDictionary(&slow, r4, key, r0, r2, r3, r5); | 3214 __ LoadFromNumberDictionary(&slow, x4, key, result, x2, x3, x5, x6); |
| 3117 __ Ret(); | 3215 __ Ret(); |
| 3118 | 3216 |
| 3119 __ bind(&slow); | 3217 __ Bind(&slow); |
| 3120 __ IncrementCounter( | 3218 __ IncrementCounter( |
| 3121 masm->isolate()->counters()->keyed_load_external_array_slow(), | 3219 masm->isolate()->counters()->keyed_load_external_array_slow(), 1, x2, x3); |
| 3122 1, r2, r3); | |
| 3123 | |
| 3124 // ---------- S t a t e -------------- | |
| 3125 // -- lr : return address | |
| 3126 // -- r0 : key | |
| 3127 // -- r1 : receiver | |
| 3128 // ----------------------------------- | |
| 3129 TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow); | 3220 TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow); |
| 3130 | 3221 |
| 3131 // Miss case, call the runtime. | 3222 // Miss case, call the runtime. |
| 3132 __ bind(&miss_force_generic); | 3223 __ Bind(&miss_force_generic); |
| 3133 | |
| 3134 // ---------- S t a t e -------------- | |
| 3135 // -- lr : return address | |
| 3136 // -- r0 : key | |
| 3137 // -- r1 : receiver | |
| 3138 // ----------------------------------- | |
| 3139 TailCallBuiltin(masm, Builtins::kKeyedLoadIC_MissForceGeneric); | 3224 TailCallBuiltin(masm, Builtins::kKeyedLoadIC_MissForceGeneric); |
| 3140 } | 3225 } |
| 3141 | 3226 |
| 3142 | 3227 |
| 3143 static void GenerateSmiKeyCheck(MacroAssembler* masm, | 3228 static void GenerateStoreSmiToExternalArray( |
| 3144 Register key, | 3229 MacroAssembler* masm, |
| 3145 Register scratch0, | 3230 ElementsKind elements_kind, |
| 3146 DwVfpRegister double_scratch0, | 3231 Register value, |
| 3147 DwVfpRegister double_scratch1, | 3232 Register key_raw, // Untagged 'key'. |
| 3148 Label* fail) { | 3233 Register elements_ext, // elements[ExternalArray::kExternalPointerOffset] |
| 3149 Label key_ok; | 3234 Register scratch, |
| 3150 // Check for smi or a smi inside a heap number. We convert the heap | 3235 FPRegister double_scratch) { |
| 3151 // number and check if the conversion is exact and fits into the smi | 3236 // Convert the smi in value (x0) to the specified element kind, and store it |
| 3152 // range. | 3237 // in the external array. No input registers are clobbered by this helper, |
| 3153 __ JumpIfSmi(key, &key_ok); | 3238 // other than the scratch registers. |
| 3154 __ CheckMap(key, | |
| 3155 scratch0, | |
| 3156 Heap::kHeapNumberMapRootIndex, | |
| 3157 fail, | |
| 3158 DONT_DO_SMI_CHECK); | |
| 3159 __ sub(ip, key, Operand(kHeapObjectTag)); | |
| 3160 __ vldr(double_scratch0, ip, HeapNumber::kValueOffset); | |
| 3161 __ TryDoubleToInt32Exact(scratch0, double_scratch0, double_scratch1); | |
| 3162 __ b(ne, fail); | |
| 3163 __ TrySmiTag(key, scratch0, fail); | |
| 3164 __ bind(&key_ok); | |
| 3165 } | |
| 3166 | 3239 |
| 3240 ASSERT(!AreAliased(value, key_raw, elements_ext, scratch, double_scratch)); |
| 3167 | 3241 |
| 3168 void KeyedStoreStubCompiler::GenerateStoreExternalArray( | |
| 3169 MacroAssembler* masm, | |
| 3170 ElementsKind elements_kind) { | |
| 3171 // ---------- S t a t e -------------- | |
| 3172 // -- r0 : value | |
| 3173 // -- r1 : key | |
| 3174 // -- r2 : receiver | |
| 3175 // -- lr : return address | |
| 3176 // ----------------------------------- | |
| 3177 Label slow, check_heap_number, miss_force_generic; | |
| 3178 | |
| 3179 // Register usage. | |
| 3180 Register value = r0; | |
| 3181 Register key = r1; | |
| 3182 Register receiver = r2; | |
| 3183 // r3 mostly holds the elements array or the destination external array. | |
| 3184 | |
| 3185 // This stub is meant to be tail-jumped to, the receiver must already | |
| 3186 // have been verified by the caller to not be a smi. | |
| 3187 | |
| 3188 // Check that the key is a smi or a heap number convertible to a smi. | |
| 3189 GenerateSmiKeyCheck(masm, key, r4, d1, d2, &miss_force_generic); | |
| 3190 | |
| 3191 __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset)); | |
| 3192 | |
| 3193 // Check that the index is in range | |
| 3194 __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset)); | |
| 3195 __ cmp(key, ip); | |
| 3196 // Unsigned comparison catches both negative and too-large values. | |
| 3197 __ b(hs, &miss_force_generic); | |
| 3198 | |
| 3199 // Handle both smis and HeapNumbers in the fast path. Go to the | |
| 3200 // runtime for all other kinds of values. | |
| 3201 // r3: external array. | |
| 3202 if (elements_kind == EXTERNAL_PIXEL_ELEMENTS) { | |
| 3203 // Double to pixel conversion is only implemented in the runtime for now. | |
| 3204 __ UntagAndJumpIfNotSmi(r5, value, &slow); | |
| 3205 } else { | |
| 3206 __ UntagAndJumpIfNotSmi(r5, value, &check_heap_number); | |
| 3207 } | |
| 3208 __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset)); | |
| 3209 | |
| 3210 // r3: base pointer of external storage. | |
| 3211 // r5: value (integer). | |
| 3212 switch (elements_kind) { | 3242 switch (elements_kind) { |
| 3213 case EXTERNAL_PIXEL_ELEMENTS: | 3243 case EXTERNAL_PIXEL_ELEMENTS: |
| 3244 __ SmiUntag(scratch, value); |
| 3214 // Clamp the value to [0..255]. | 3245 // Clamp the value to [0..255]. |
| 3215 __ Usat(r5, 8, Operand(r5)); | 3246 __ Cmp(scratch, Operand(scratch, UXTB)); |
| 3216 __ strb(r5, MemOperand(r3, key, LSR, 1)); | 3247 // If scratch < scratch & 0xff, it must be < 0, so saturate to 0. |
| 3248 __ CzeroX(scratch, lt); |
| 3249 // If scratch > scratch & 0xff, it must be > 255, so saturate to 255. |
| 3250 // This actually generates ~0, but it doesn't matter if we use strb. |
| 3251 __ Csinv(scratch, scratch, xzr, le); |
| 3252 __ Strb(scratch.W(), MemOperand(elements_ext, key_raw)); |
| 3217 break; | 3253 break; |
| 3218 case EXTERNAL_BYTE_ELEMENTS: | 3254 case EXTERNAL_BYTE_ELEMENTS: |
| 3219 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: | 3255 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: |
| 3220 __ strb(r5, MemOperand(r3, key, LSR, 1)); | 3256 __ SmiUntag(scratch, value); |
| 3257 __ Strb(scratch.W(), MemOperand(elements_ext, key_raw)); |
| 3221 break; | 3258 break; |
| 3222 case EXTERNAL_SHORT_ELEMENTS: | 3259 case EXTERNAL_SHORT_ELEMENTS: |
| 3223 case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: | 3260 case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: |
| 3224 __ strh(r5, MemOperand(r3, key, LSL, 0)); | 3261 __ SmiUntag(scratch, value); |
| 3262 __ Strh(scratch.W(), |
| 3263 MemOperand(elements_ext, key_raw, LSL, kHalfWordSizeInBytesLog2)); |
| 3225 break; | 3264 break; |
| 3226 case EXTERNAL_INT_ELEMENTS: | 3265 case EXTERNAL_INT_ELEMENTS: |
| 3227 case EXTERNAL_UNSIGNED_INT_ELEMENTS: | 3266 case EXTERNAL_UNSIGNED_INT_ELEMENTS: |
| 3228 __ str(r5, MemOperand(r3, key, LSL, 1)); | 3267 __ SmiUntag(scratch, value); |
| 3268 __ Str(scratch.W(), |
| 3269 MemOperand(elements_ext, key_raw, LSL, kWordSizeInBytesLog2)); |
| 3229 break; | 3270 break; |
| 3230 case EXTERNAL_FLOAT_ELEMENTS: | 3271 case EXTERNAL_FLOAT_ELEMENTS: |
| 3231 // Perform int-to-float conversion and store to memory. | 3272 __ SmiUntagToFloat(double_scratch.S(), value); |
| 3232 __ SmiUntag(r4, key); | 3273 __ Str(double_scratch.S(), |
| 3233 StoreIntAsFloat(masm, r3, r4, r5, r7); | 3274 MemOperand(elements_ext, key_raw, LSL, kSRegSizeInBytesLog2)); |
| 3234 break; | 3275 break; |
| 3235 case EXTERNAL_DOUBLE_ELEMENTS: | 3276 case EXTERNAL_DOUBLE_ELEMENTS: |
| 3236 __ vmov(s2, r5); | 3277 __ SmiUntagToDouble(double_scratch, value); |
| 3237 __ vcvt_f64_s32(d0, s2); | 3278 __ Str(double_scratch, |
| 3238 __ add(r3, r3, Operand(key, LSL, 2)); | 3279 MemOperand(elements_ext, key_raw, LSL, kDRegSizeInBytesLog2)); |
| 3239 // r3: effective address of the double element | |
| 3240 __ vstr(d0, r3, 0); | |
| 3241 break; | 3280 break; |
| 3242 case FAST_ELEMENTS: | 3281 case FAST_ELEMENTS: |
| 3243 case FAST_SMI_ELEMENTS: | 3282 case FAST_SMI_ELEMENTS: |
| 3244 case FAST_DOUBLE_ELEMENTS: | 3283 case FAST_DOUBLE_ELEMENTS: |
| 3245 case FAST_HOLEY_ELEMENTS: | 3284 case FAST_HOLEY_ELEMENTS: |
| 3246 case FAST_HOLEY_SMI_ELEMENTS: | 3285 case FAST_HOLEY_SMI_ELEMENTS: |
| 3247 case FAST_HOLEY_DOUBLE_ELEMENTS: | 3286 case FAST_HOLEY_DOUBLE_ELEMENTS: |
| 3248 case DICTIONARY_ELEMENTS: | 3287 case DICTIONARY_ELEMENTS: |
| 3249 case NON_STRICT_ARGUMENTS_ELEMENTS: | 3288 case NON_STRICT_ARGUMENTS_ELEMENTS: |
| 3250 UNREACHABLE(); | 3289 UNREACHABLE(); |
| 3251 break; | 3290 break; |
| 3252 } | 3291 } |
| 3253 | 3292 } |
| 3254 // Entry registers are intact, r0 holds the value which is the return value. | 3293 |
| 3294 |
| 3295 static void GenerateStoreHeapNumberToExternalArray( |
| 3296 MacroAssembler* masm, |
| 3297 ElementsKind elements_kind, |
| 3298 Register value, |
| 3299 Register key_raw, // Untagged 'key'. |
| 3300 Register elements_ext, // elements[ExternalArray::kExternalPointerOffset] |
| 3301 Register scratch, |
| 3302 FPRegister double_scratch1, |
| 3303 FPRegister double_scratch2) { |
| 3304 // Convert the heap number in value (x0) to the specified element kind, and |
| 3305 // store it in the external array. No input registers are clobbered by this |
| 3306 // helper, other than the scratch registers. |
| 3307 |
| 3308 ASSERT(!AreAliased(value, key_raw, elements_ext, scratch, |
| 3309 double_scratch1, double_scratch2)); |
| 3310 |
| 3311 FPRegister value_d = double_scratch1; |
| 3312 __ Ldr(value_d, FieldMemOperand(value, HeapNumber::kValueOffset)); |
| 3313 |
| 3314 // Convert the (double) input to an integral type. |
| 3315 switch (elements_kind) { |
| 3316 case EXTERNAL_FLOAT_ELEMENTS: |
| 3317 __ Fcvt(s16, value_d); |
| 3318 __ Str(s16, MemOperand(elements_ext, key_raw, LSL, 2)); |
| 3319 break; |
| 3320 case EXTERNAL_DOUBLE_ELEMENTS: |
| 3321 __ Str(value_d, MemOperand(elements_ext, key_raw, LSL, 3)); |
| 3322 break; |
| 3323 case EXTERNAL_PIXEL_ELEMENTS: |
| 3324 // This conversion follows the WebIDL "[Clamp]" rules: |
| 3325 // - Inputs lower than 0 (including -infinity) produce 0. |
| 3326 // - Inputs higher than 255 (including +infinity) produce 255. |
| 3327 // Also, it seems that PIXEL types use round-to-nearest rather than |
| 3328 // round-towards-zero. |
| 3329 |
| 3330 // Squash +infinity before the conversion, since Fcvtnu will normally |
| 3331 // convert it to 0. |
| 3332 __ Fmov(double_scratch2, 255); |
| 3333 __ Fmin(double_scratch2, double_scratch2, value_d); |
| 3334 |
| 3335 // Convert double to unsigned integer. Values less than zero become zero. |
| 3336 // Values greater than 255 have already been clamped to 255. |
| 3337 __ Fcvtnu(scratch.W(), double_scratch2); |
| 3338 |
| 3339 __ Strb(scratch.W(), MemOperand(elements_ext, key_raw)); |
| 3340 break; |
| 3341 case EXTERNAL_BYTE_ELEMENTS: |
| 3342 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: |
| 3343 __ ECMA262ToInt32(scratch, value_d, x11, x12, MacroAssembler::INT32_IN_W); |
| 3344 __ Strb(scratch.W(), MemOperand(elements_ext, key_raw)); |
| 3345 break; |
| 3346 case EXTERNAL_SHORT_ELEMENTS: |
| 3347 case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: |
| 3348 __ ECMA262ToInt32(scratch, value_d, x11, x12, MacroAssembler::INT32_IN_W); |
| 3349 __ Strh(scratch.W(), |
| 3350 MemOperand(elements_ext, key_raw, LSL, kHalfWordSizeInBytesLog2)); |
| 3351 break; |
| 3352 case EXTERNAL_INT_ELEMENTS: |
| 3353 case EXTERNAL_UNSIGNED_INT_ELEMENTS: |
| 3354 __ ECMA262ToInt32(scratch, value_d, x11, x12, MacroAssembler::INT32_IN_W); |
| 3355 __ Str(scratch.W(), |
| 3356 MemOperand(elements_ext, key_raw, LSL, kWordSizeInBytesLog2)); |
| 3357 break; |
| 3358 case FAST_ELEMENTS: |
| 3359 case FAST_SMI_ELEMENTS: |
| 3360 case FAST_DOUBLE_ELEMENTS: |
| 3361 case FAST_HOLEY_ELEMENTS: |
| 3362 case FAST_HOLEY_SMI_ELEMENTS: |
| 3363 case FAST_HOLEY_DOUBLE_ELEMENTS: |
| 3364 case DICTIONARY_ELEMENTS: |
| 3365 case NON_STRICT_ARGUMENTS_ELEMENTS: |
| 3366 UNREACHABLE(); |
| 3367 break; |
| 3368 } |
| 3369 } |
| 3370 |
| 3371 |
| 3372 void KeyedStoreStubCompiler::GenerateStoreExternalArray( |
| 3373 MacroAssembler* masm, |
| 3374 ElementsKind elements_kind) { |
| 3375 // ---------- S t a t e -------------- |
| 3376 // -- lr : return address |
| 3377 // -- x0 : value |
| 3378 // -- x1 : key |
| 3379 // -- x2 : receiver |
| 3380 // ----------------------------------- |
| 3381 Label slow, check_heap_number, miss_force_generic; |
| 3382 |
| 3383 // Register usage. |
| 3384 Register value = x0; |
| 3385 Register key = x1; |
| 3386 Register receiver = x2; |
| 3387 |
| 3388 // This stub is meant to be tail-jumped to, the receiver must already |
| 3389 // have been verified by the caller to not be a smi. |
| 3390 if (__ emit_debug_code()) { |
| 3391 Label ok; |
| 3392 __ JumpIfNotSmi(receiver, &ok); |
| 3393 __ Abort("KeyedStoreStubCompiler::GenerateStoreExternalArray: " |
| 3394 "receiver is a SMI\n"); |
| 3395 __ Bind(&ok); |
| 3396 } |
| 3397 |
| 3398 // Check that the key is a smi or a heap number convertible to a smi. |
| 3399 GenerateSmiKeyCheck(masm, key, x10, d16, d17, &miss_force_generic); |
| 3400 |
| 3401 Register elements = x3; |
| 3402 __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
| 3403 |
| 3404 Register key_raw = x4; |
| 3405 __ SmiUntag(key_raw, key); |
| 3406 |
| 3407 // Check that the key is within bounds. An unsigned comparison catches both |
| 3408 // negative and out-of-bound indexes. |
| 3409 __ Ldrsw(x10, |
| 3410 UntagSmiFieldMemOperand(elements, ExternalArray::kLengthOffset)); |
| 3411 __ Cmp(key_raw.W(), w10); |
| 3412 __ B(&miss_force_generic, hs); |
| 3413 |
| 3414 // Get the externally-stored elements. |
| 3415 Register elements_ext = x5; |
| 3416 __ Ldr(elements_ext, |
| 3417 FieldMemOperand(elements, ExternalArray::kExternalPointerOffset)); |
| 3418 |
| 3419 // x0: value |
| 3420 // x1: key |
| 3421 // x2: receiver |
| 3422 // x3: elements |
| 3423 // x4: key_raw Untagged 'key'. |
| 3424 // x5: elements_ext From elements[ExternalArray::kExternalPointerOffset]. |
| 3425 |
| 3426 // Handle both smis and HeapNumbers in the fast path. Go to the |
| 3427 // runtime for all other kinds of values. |
| 3428 __ JumpIfNotSmi(value, &check_heap_number); |
| 3429 |
| 3430 GenerateStoreSmiToExternalArray( |
| 3431 masm, elements_kind, value, key_raw, elements_ext, x10, d16); |
| 3432 // Entry registers are intact and x0 holds 'value', which is the return value. |
| 3255 __ Ret(); | 3433 __ Ret(); |
| 3256 | 3434 |
| 3257 if (elements_kind != EXTERNAL_PIXEL_ELEMENTS) { | 3435 __ Bind(&check_heap_number); |
| 3258 // r3: external array. | 3436 // Convert the double at 'value' to the specified element kind. |
| 3259 __ bind(&check_heap_number); | 3437 // |
| 3260 __ CompareObjectType(value, r5, r6, HEAP_NUMBER_TYPE); | 3438 // x0: value |
| 3261 __ b(ne, &slow); | 3439 // x1: key |
| 3262 | 3440 // x2: receiver |
| 3263 __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset)); | 3441 // x3: elements |
| 3264 | 3442 // x4: key_raw Untagged 'key'. |
| 3265 // r3: base pointer of external storage. | 3443 // x5: elements_ext From elements[ExternalArray::kExternalPointerOffset]. |
| 3266 | 3444 __ JumpIfNotObjectType(value, x10, x11, HEAP_NUMBER_TYPE, &slow); |
| 3267 // The WebGL specification leaves the behavior of storing NaN and | 3445 |
| 3268 // +/-Infinity into integer arrays basically undefined. For more | 3446 GenerateStoreHeapNumberToExternalArray( |
| 3269 // reproducible behavior, convert these to zero. | 3447 masm, elements_kind, value, key_raw, elements_ext, x10, d16, d17); |
| 3270 | 3448 // Entry registers are intact and x0 holds 'value', which is the return value. |
| 3271 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { | 3449 __ Ret(); |
| 3272 // vldr requires offset to be a multiple of 4 so we can not | 3450 |
| 3273 // include -kHeapObjectTag into it. | 3451 __ Bind(&slow); |
| 3274 __ sub(r5, r0, Operand(kHeapObjectTag)); | 3452 // ---------- S t a t e -------------- |
| 3275 __ vldr(d0, r5, HeapNumber::kValueOffset); | 3453 // -- lr : return address |
| 3276 __ add(r5, r3, Operand(key, LSL, 1)); | 3454 // -- x0 : value |
| 3277 __ vcvt_f32_f64(s0, d0); | 3455 // -- x1 : key |
| 3278 __ vstr(s0, r5, 0); | 3456 // -- x2 : receiver |
| 3279 } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { | 3457 // ----------------------------------- |
| 3280 __ sub(r5, r0, Operand(kHeapObjectTag)); | 3458 __ IncrementCounter( |
| 3281 __ vldr(d0, r5, HeapNumber::kValueOffset); | 3459 masm->isolate()->counters()->keyed_load_external_array_slow(), |
| 3282 __ add(r5, r3, Operand(key, LSL, 2)); | 3460 1, x10, x11); |
| 3283 __ vstr(d0, r5, 0); | 3461 |
| 3462 TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Slow); |
| 3463 |
| 3464 // Miss case, call the runtime. |
| 3465 __ Bind(&miss_force_generic); |
| 3466 // ---------- S t a t e -------------- |
| 3467 // -- lr : return address |
| 3468 // -- x0 : value |
| 3469 // -- x1 : key |
| 3470 // -- x2 : receiver |
| 3471 // ----------------------------------- |
| 3472 |
| 3473 TailCallBuiltin(masm, Builtins::kKeyedStoreIC_MissForceGeneric); |
| 3474 } |
| 3475 |
| 3476 |
| 3477 static void GenerateStoreFastSmiOrDoubleElement( |
| 3478 MacroAssembler* masm, |
| 3479 bool is_js_array, |
| 3480 ElementsKind elements_kind, |
| 3481 KeyedAccessStoreMode store_mode, |
| 3482 bool store_double) { |
| 3483 Label miss_force_generic, transition_elements_kind, grow, slow; |
| 3484 Label finish_store, check_capacity; |
| 3485 |
| 3486 Register value = x0; |
| 3487 Register key = x1; |
| 3488 Register receiver = x2; |
| 3489 |
| 3490 // This stub is meant to be tail-jumped to, the receiver must already |
| 3491 // have been verified by the caller to not be a smi. |
| 3492 if (__ emit_debug_code()) { |
| 3493 Label ok; |
| 3494 __ JumpIfNotSmi(receiver, &ok); |
| 3495 __ Abort("GenerateStoreFastSmiOrDoubleElement: receiver is a SMI\n"); |
| 3496 __ Bind(&ok); |
| 3497 } |
| 3498 |
| 3499 // Check that the key is a smi or a heap number convertible to a smi. |
| 3500 GenerateSmiKeyCheck(masm, key, x10, d16, d17, &miss_force_generic); |
| 3501 |
| 3502 if (!store_double && IsFastSmiElementsKind(elements_kind)) { |
| 3503 __ JumpIfNotSmi(value, &transition_elements_kind); |
| 3504 } |
| 3505 |
| 3506 Register elements = x3; |
| 3507 __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
| 3508 |
| 3509 // Check that the key is within bounds. |
| 3510 Register length = x4; |
| 3511 if (is_js_array) { |
| 3512 __ Ldr(length, FieldMemOperand(receiver, JSArray::kLengthOffset)); |
| 3513 } else { |
| 3514 __ Ldr(length, FieldMemOperand(elements, FixedArray::kLengthOffset)); |
| 3515 } |
| 3516 // Compare smis. An unsigned comparison catches both negative and out-of-bound |
| 3517 // indexes. |
| 3518 __ Cmp(key, length); |
| 3519 if (is_js_array && IsGrowStoreMode(store_mode)) { |
| 3520 // We can handle the case where the array needs to grow by a single element |
| 3521 // without falling back to run-time. |
| 3522 __ B(&grow, eq); |
| 3523 } |
| 3524 // Fall back to the run-time if the key is out of bounds. |
| 3525 __ B(&miss_force_generic, hs); |
| 3526 |
| 3527 if (store_double) { |
| 3528 __ Bind(&finish_store); |
| 3529 __ StoreNumberToDoubleElements(value, key, elements, x10, d16, d17, |
| 3530 &transition_elements_kind); |
| 3531 } else { |
| 3532 // Make sure elements is a fast element array, not 'cow'. |
| 3533 // TODO(jbramley): Why is this only done when storing a smi? |
| 3534 __ CheckMap(elements, x10, |
| 3535 Heap::kFixedArrayMapRootIndex, |
| 3536 &miss_force_generic, |
| 3537 DONT_DO_SMI_CHECK); |
| 3538 |
| 3539 __ Bind(&finish_store); |
| 3540 |
| 3541 STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kPointerSizeLog2); |
| 3542 __ Add(x10, elements, FixedArray::kHeaderSize - kHeapObjectTag); |
| 3543 __ Add(x10, x10, Operand::UntagSmiAndScale(key, kPointerSizeLog2)); |
| 3544 __ Str(value, MemOperand(x10)); |
| 3545 if (!IsFastSmiElementsKind(elements_kind)) { |
| 3546 ASSERT(IsFastObjectElementsKind(elements_kind)); |
| 3547 __ Mov(receiver, value); |
| 3548 __ RecordWrite(elements, // Object. |
| 3549 x10, // Address. |
| 3550 receiver, // Value. |
| 3551 kLRHasNotBeenSaved, |
| 3552 kDontSaveFPRegs, |
| 3553 EMIT_REMEMBERED_SET, |
| 3554 INLINE_SMI_CHECK, |
| 3555 EXPECT_PREGENERATED); |
| 3556 } |
| 3557 // Value (x0) is preserved. |
| 3558 } |
| 3559 __ Ret(); |
| 3560 |
| 3561 __ Bind(&miss_force_generic); |
| 3562 KeyedStoreStubCompiler::TailCallBuiltin( |
| 3563 masm, Builtins::kKeyedStoreIC_MissForceGeneric); |
| 3564 |
| 3565 __ Bind(&transition_elements_kind); |
| 3566 KeyedStoreStubCompiler::TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Miss); |
| 3567 |
| 3568 if (is_js_array && IsGrowStoreMode(store_mode)) { |
| 3569 // Grow a JSArray by a single element. |
| 3570 __ Bind(&grow); |
| 3571 |
| 3572 // x1: key |
| 3573 // x2: receiver |
| 3574 // x3: elements From receiver[JSObject::kElementsOffset]. |
| 3575 // x4: length From receiver[JSArray::kLengthOffset]. |
| 3576 |
| 3577 if (__ emit_debug_code()) { |
| 3578 // Check that 'elements' and 'length' are pre-loaded. |
| 3579 __ Ldr(x10, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
| 3580 __ Cmp(x10, x3); |
| 3581 __ Ldr(x11, FieldMemOperand(receiver, JSArray::kLengthOffset)); |
| 3582 __ Ccmp(x11, x4, NoFlag, eq); |
| 3583 |
| 3584 // Check that the key is equal to length, so we need to extend the array |
| 3585 // by one element. |
| 3586 __ Ccmp(x1, x4, NoFlag, eq); |
| 3587 |
| 3588 __ Check(eq, "GenerateStoreFastSmiOrDoubleElement [grow]: " |
| 3589 "Preconditions were not met."); |
| 3590 } |
| 3591 |
| 3592 __ JumpIfNotRoot(elements, Heap::kEmptyFixedArrayRootIndex, |
| 3593 &check_capacity); |
| 3594 |
| 3595 // The array is currently empty, so allocate a new backing store. |
| 3596 int size = FixedArray::SizeFor(JSArray::kPreallocatedArrayElements); |
| 3597 __ Allocate(size, elements, x10, x11, &slow, TAG_OBJECT); |
| 3598 Heap::RootListIndex root_index = store_double |
| 3599 ? Heap::kFixedDoubleArrayMapRootIndex |
| 3600 : Heap::kFixedArrayMapRootIndex; |
| 3601 __ LoadRoot(x12, root_index); |
| 3602 __ Str(x12, FieldMemOperand(elements, JSObject::kMapOffset)); |
| 3603 __ Mov(x13, Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements))); |
| 3604 __ Str(x13, FieldMemOperand(elements, FixedArray::kLengthOffset)); |
| 3605 |
| 3606 // Store the element at index zero, and fill the rest with the hole value. |
| 3607 if (store_double) { |
| 3608 __ StoreNumberToDoubleElements(value, |
| 3609 key, |
| 3610 elements, |
| 3611 x10, |
| 3612 d16, |
| 3613 d17, |
| 3614 &transition_elements_kind); |
| 3615 __ Fmov(d16, rawbits_to_double(kHoleNanInt64)); |
| 3616 for (int i = 1; i < JSArray::kPreallocatedArrayElements; i++) { |
| 3617 __ Str(d16, FieldMemOperand(elements, |
| 3618 FixedDoubleArray::OffsetOfElementAt(i))); |
| 3619 } |
| 3284 } else { | 3620 } else { |
| 3285 // Hoisted load. vldr requires offset to be a multiple of 4 so we can | 3621 __ Str(value, FieldMemOperand(elements, FixedArray::SizeFor(0))); |
| 3286 // not include -kHeapObjectTag into it. | 3622 __ LoadRoot(x10, Heap::kTheHoleValueRootIndex); |
| 3287 __ sub(r5, value, Operand(kHeapObjectTag)); | 3623 for (int i = 1; i < JSArray::kPreallocatedArrayElements; i++) { |
| 3288 __ vldr(d0, r5, HeapNumber::kValueOffset); | 3624 __ Str(x10, FieldMemOperand(elements, |
| 3289 __ ECMAToInt32(r5, d0, r6, r7, r9, d1); | 3625 FixedArray::OffsetOfElementAt(i))); |
| 3290 | |
| 3291 switch (elements_kind) { | |
| 3292 case EXTERNAL_BYTE_ELEMENTS: | |
| 3293 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: | |
| 3294 __ strb(r5, MemOperand(r3, key, LSR, 1)); | |
| 3295 break; | |
| 3296 case EXTERNAL_SHORT_ELEMENTS: | |
| 3297 case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: | |
| 3298 __ strh(r5, MemOperand(r3, key, LSL, 0)); | |
| 3299 break; | |
| 3300 case EXTERNAL_INT_ELEMENTS: | |
| 3301 case EXTERNAL_UNSIGNED_INT_ELEMENTS: | |
| 3302 __ str(r5, MemOperand(r3, key, LSL, 1)); | |
| 3303 break; | |
| 3304 case EXTERNAL_PIXEL_ELEMENTS: | |
| 3305 case EXTERNAL_FLOAT_ELEMENTS: | |
| 3306 case EXTERNAL_DOUBLE_ELEMENTS: | |
| 3307 case FAST_ELEMENTS: | |
| 3308 case FAST_SMI_ELEMENTS: | |
| 3309 case FAST_DOUBLE_ELEMENTS: | |
| 3310 case FAST_HOLEY_ELEMENTS: | |
| 3311 case FAST_HOLEY_SMI_ELEMENTS: | |
| 3312 case FAST_HOLEY_DOUBLE_ELEMENTS: | |
| 3313 case DICTIONARY_ELEMENTS: | |
| 3314 case NON_STRICT_ARGUMENTS_ELEMENTS: | |
| 3315 UNREACHABLE(); | |
| 3316 break; | |
| 3317 } | 3626 } |
| 3318 } | 3627 } |
| 3319 | 3628 |
| 3320 // Entry registers are intact, r0 holds the value which is the return | 3629 // Install the new backing store in the JSArray. |
| 3321 // value. | 3630 __ Str(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
| 3631 __ RecordWriteField(receiver, JSObject::kElementsOffset, elements, |
| 3632 x10, kLRHasNotBeenSaved, kDontSaveFPRegs, |
| 3633 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK, |
| 3634 EXPECT_PREGENERATED); |
| 3635 |
| 3636 // Increment the length of the array. |
| 3637 __ Mov(length, Operand(Smi::FromInt(1))); |
| 3638 __ Str(length, FieldMemOperand(receiver, JSArray::kLengthOffset)); |
| 3322 __ Ret(); | 3639 __ Ret(); |
| 3323 } | 3640 |
| 3324 | 3641 __ Bind(&check_capacity); |
| 3325 // Slow case, key and receiver still in r0 and r1. | 3642 |
| 3326 __ bind(&slow); | 3643 if (!store_double) { |
| 3327 __ IncrementCounter( | 3644 // Check for cow elements, in general they are not handled by this stub |
| 3328 masm->isolate()->counters()->keyed_load_external_array_slow(), | 3645 // TODO(jbramley): Why is this only done when storing a smi? |
| 3329 1, r2, r3); | 3646 __ CheckMap(elements, x10, |
| 3330 | 3647 Heap::kFixedCOWArrayMapRootIndex, |
| 3331 // ---------- S t a t e -------------- | 3648 &miss_force_generic, |
| 3332 // -- lr : return address | 3649 DONT_DO_SMI_CHECK); |
| 3333 // -- r0 : key | 3650 } |
| 3334 // -- r1 : receiver | 3651 |
| 3335 // ----------------------------------- | 3652 // See if there are any free preallocated slots. If not, defer to the |
| 3336 TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Slow); | 3653 // runtime to extend the backing store. |
| 3337 | 3654 __ Ldr(x10, FieldMemOperand(elements, FixedArray::kLengthOffset)); |
| 3338 // Miss case, call the runtime. | 3655 __ Cmp(length, x10); |
| 3339 __ bind(&miss_force_generic); | 3656 __ B(&slow, hs); |
| 3340 | 3657 |
| 3341 // ---------- S t a t e -------------- | 3658 // Grow the array and finish the store. |
| 3342 // -- lr : return address | 3659 __ Add(length, length, Operand(Smi::FromInt(1))); |
| 3343 // -- r0 : key | 3660 __ Str(length, FieldMemOperand(receiver, JSArray::kLengthOffset)); |
| 3344 // -- r1 : receiver | 3661 __ B(&finish_store); |
| 3345 // ----------------------------------- | 3662 |
| 3346 TailCallBuiltin(masm, Builtins::kKeyedStoreIC_MissForceGeneric); | 3663 __ Bind(&slow); |
| 3664 KeyedStoreStubCompiler::TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Slow); |
| 3665 } |
| 3347 } | 3666 } |
| 3348 | 3667 |
| 3349 | 3668 |
| 3350 void KeyedStoreStubCompiler::GenerateStoreFastElement( | 3669 void KeyedStoreStubCompiler::GenerateStoreFastElement( |
| 3351 MacroAssembler* masm, | 3670 MacroAssembler* masm, |
| 3352 bool is_js_array, | 3671 bool is_js_array, |
| 3353 ElementsKind elements_kind, | 3672 ElementsKind elements_kind, |
| 3354 KeyedAccessStoreMode store_mode) { | 3673 KeyedAccessStoreMode store_mode) { |
| 3674 |
| 3355 // ----------- S t a t e ------------- | 3675 // ----------- S t a t e ------------- |
| 3356 // -- r0 : value | |
| 3357 // -- r1 : key | |
| 3358 // -- r2 : receiver | |
| 3359 // -- lr : return address | 3676 // -- lr : return address |
| 3360 // -- r3 : scratch | 3677 // -- x0 : value |
| 3361 // -- r4 : scratch (elements) | 3678 // -- x1 : key |
| 3679 // -- x2 : receiver |
| 3362 // ----------------------------------- | 3680 // ----------------------------------- |
| 3363 Label miss_force_generic, transition_elements_kind, grow, slow; | 3681 |
| 3364 Label finish_store, check_capacity; | 3682 GenerateStoreFastSmiOrDoubleElement(masm, is_js_array, elements_kind, |
| 3365 | 3683 store_mode, false); |
| 3366 Register value_reg = r0; | |
| 3367 Register key_reg = r1; | |
| 3368 Register receiver_reg = r2; | |
| 3369 Register scratch = r4; | |
| 3370 Register elements_reg = r3; | |
| 3371 Register length_reg = r5; | |
| 3372 Register scratch2 = r6; | |
| 3373 | |
| 3374 // This stub is meant to be tail-jumped to, the receiver must already | |
| 3375 // have been verified by the caller to not be a smi. | |
| 3376 | |
| 3377 // Check that the key is a smi or a heap number convertible to a smi. | |
| 3378 GenerateSmiKeyCheck(masm, key_reg, r4, d1, d2, &miss_force_generic); | |
| 3379 | |
| 3380 if (IsFastSmiElementsKind(elements_kind)) { | |
| 3381 __ JumpIfNotSmi(value_reg, &transition_elements_kind); | |
| 3382 } | |
| 3383 | |
| 3384 // Check that the key is within bounds. | |
| 3385 __ ldr(elements_reg, | |
| 3386 FieldMemOperand(receiver_reg, JSObject::kElementsOffset)); | |
| 3387 if (is_js_array) { | |
| 3388 __ ldr(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset)); | |
| 3389 } else { | |
| 3390 __ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset)); | |
| 3391 } | |
| 3392 // Compare smis. | |
| 3393 __ cmp(key_reg, scratch); | |
| 3394 if (is_js_array && IsGrowStoreMode(store_mode)) { | |
| 3395 __ b(hs, &grow); | |
| 3396 } else { | |
| 3397 __ b(hs, &miss_force_generic); | |
| 3398 } | |
| 3399 | |
| 3400 // Make sure elements is a fast element array, not 'cow'. | |
| 3401 __ CheckMap(elements_reg, | |
| 3402 scratch, | |
| 3403 Heap::kFixedArrayMapRootIndex, | |
| 3404 &miss_force_generic, | |
| 3405 DONT_DO_SMI_CHECK); | |
| 3406 | |
| 3407 __ bind(&finish_store); | |
| 3408 if (IsFastSmiElementsKind(elements_kind)) { | |
| 3409 __ add(scratch, | |
| 3410 elements_reg, | |
| 3411 Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | |
| 3412 __ add(scratch, scratch, Operand::PointerOffsetFromSmiKey(key_reg)); | |
| 3413 __ str(value_reg, MemOperand(scratch)); | |
| 3414 } else { | |
| 3415 ASSERT(IsFastObjectElementsKind(elements_kind)); | |
| 3416 __ add(scratch, | |
| 3417 elements_reg, | |
| 3418 Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | |
| 3419 __ add(scratch, scratch, Operand::PointerOffsetFromSmiKey(key_reg)); | |
| 3420 __ str(value_reg, MemOperand(scratch)); | |
| 3421 __ mov(receiver_reg, value_reg); | |
| 3422 __ RecordWrite(elements_reg, // Object. | |
| 3423 scratch, // Address. | |
| 3424 receiver_reg, // Value. | |
| 3425 kLRHasNotBeenSaved, | |
| 3426 kDontSaveFPRegs); | |
| 3427 } | |
| 3428 // value_reg (r0) is preserved. | |
| 3429 // Done. | |
| 3430 __ Ret(); | |
| 3431 | |
| 3432 __ bind(&miss_force_generic); | |
| 3433 TailCallBuiltin(masm, Builtins::kKeyedStoreIC_MissForceGeneric); | |
| 3434 | |
| 3435 __ bind(&transition_elements_kind); | |
| 3436 TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Miss); | |
| 3437 | |
| 3438 if (is_js_array && IsGrowStoreMode(store_mode)) { | |
| 3439 // Grow the array by a single element if possible. | |
| 3440 __ bind(&grow); | |
| 3441 | |
| 3442 // Make sure the array is only growing by a single element, anything else | |
| 3443 // must be handled by the runtime. Flags already set by previous compare. | |
| 3444 __ b(ne, &miss_force_generic); | |
| 3445 | |
| 3446 // Check for the empty array, and preallocate a small backing store if | |
| 3447 // possible. | |
| 3448 __ ldr(length_reg, | |
| 3449 FieldMemOperand(receiver_reg, JSArray::kLengthOffset)); | |
| 3450 __ ldr(elements_reg, | |
| 3451 FieldMemOperand(receiver_reg, JSObject::kElementsOffset)); | |
| 3452 __ CompareRoot(elements_reg, Heap::kEmptyFixedArrayRootIndex); | |
| 3453 __ b(ne, &check_capacity); | |
| 3454 | |
| 3455 int size = FixedArray::SizeFor(JSArray::kPreallocatedArrayElements); | |
| 3456 __ Allocate(size, elements_reg, scratch, scratch2, &slow, TAG_OBJECT); | |
| 3457 | |
| 3458 __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex); | |
| 3459 __ str(scratch, FieldMemOperand(elements_reg, JSObject::kMapOffset)); | |
| 3460 __ mov(scratch, Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements))); | |
| 3461 __ str(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset)); | |
| 3462 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); | |
| 3463 for (int i = 1; i < JSArray::kPreallocatedArrayElements; ++i) { | |
| 3464 __ str(scratch, FieldMemOperand(elements_reg, FixedArray::SizeFor(i))); | |
| 3465 } | |
| 3466 | |
| 3467 // Store the element at index zero. | |
| 3468 __ str(value_reg, FieldMemOperand(elements_reg, FixedArray::SizeFor(0))); | |
| 3469 | |
| 3470 // Install the new backing store in the JSArray. | |
| 3471 __ str(elements_reg, | |
| 3472 FieldMemOperand(receiver_reg, JSObject::kElementsOffset)); | |
| 3473 __ RecordWriteField(receiver_reg, JSObject::kElementsOffset, elements_reg, | |
| 3474 scratch, kLRHasNotBeenSaved, kDontSaveFPRegs, | |
| 3475 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); | |
| 3476 | |
| 3477 // Increment the length of the array. | |
| 3478 __ mov(length_reg, Operand(Smi::FromInt(1))); | |
| 3479 __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset)); | |
| 3480 __ Ret(); | |
| 3481 | |
| 3482 __ bind(&check_capacity); | |
| 3483 // Check for cow elements, in general they are not handled by this stub | |
| 3484 __ CheckMap(elements_reg, | |
| 3485 scratch, | |
| 3486 Heap::kFixedCOWArrayMapRootIndex, | |
| 3487 &miss_force_generic, | |
| 3488 DONT_DO_SMI_CHECK); | |
| 3489 | |
| 3490 __ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset)); | |
| 3491 __ cmp(length_reg, scratch); | |
| 3492 __ b(hs, &slow); | |
| 3493 | |
| 3494 // Grow the array and finish the store. | |
| 3495 __ add(length_reg, length_reg, Operand(Smi::FromInt(1))); | |
| 3496 __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset)); | |
| 3497 __ jmp(&finish_store); | |
| 3498 | |
| 3499 __ bind(&slow); | |
| 3500 TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Slow); | |
| 3501 } | |
| 3502 } | 3684 } |
| 3503 | 3685 |
| 3504 | 3686 |
| 3505 void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( | 3687 void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( |
| 3506 MacroAssembler* masm, | 3688 MacroAssembler* masm, |
| 3507 bool is_js_array, | 3689 bool is_js_array, |
| 3508 KeyedAccessStoreMode store_mode) { | 3690 KeyedAccessStoreMode store_mode) { |
| 3691 |
| 3509 // ----------- S t a t e ------------- | 3692 // ----------- S t a t e ------------- |
| 3510 // -- r0 : value | |
| 3511 // -- r1 : key | |
| 3512 // -- r2 : receiver | |
| 3513 // -- lr : return address | 3693 // -- lr : return address |
| 3514 // -- r3 : scratch (elements backing store) | 3694 // -- x0 : value |
| 3515 // -- r4 : scratch | 3695 // -- x1 : key |
| 3516 // -- r5 : scratch | 3696 // -- x2 : receiver |
| 3517 // ----------------------------------- | 3697 // ----------- S t a t e ------------- |
| 3518 Label miss_force_generic, transition_elements_kind, grow, slow; | 3698 |
| 3519 Label finish_store, check_capacity; | 3699 GenerateStoreFastSmiOrDoubleElement(masm, is_js_array, FAST_DOUBLE_ELEMENTS, |
| 3520 | 3700 store_mode, true); |
| 3521 Register value_reg = r0; | 3701 } |
| 3522 Register key_reg = r1; | 3702 |
| 3523 Register receiver_reg = r2; | |
| 3524 Register elements_reg = r3; | |
| 3525 Register scratch1 = r4; | |
| 3526 Register scratch2 = r5; | |
| 3527 Register length_reg = r7; | |
| 3528 | |
| 3529 // This stub is meant to be tail-jumped to, the receiver must already | |
| 3530 // have been verified by the caller to not be a smi. | |
| 3531 | |
| 3532 // Check that the key is a smi or a heap number convertible to a smi. | |
| 3533 GenerateSmiKeyCheck(masm, key_reg, r4, d1, d2, &miss_force_generic); | |
| 3534 | |
| 3535 __ ldr(elements_reg, | |
| 3536 FieldMemOperand(receiver_reg, JSObject::kElementsOffset)); | |
| 3537 | |
| 3538 // Check that the key is within bounds. | |
| 3539 if (is_js_array) { | |
| 3540 __ ldr(scratch1, FieldMemOperand(receiver_reg, JSArray::kLengthOffset)); | |
| 3541 } else { | |
| 3542 __ ldr(scratch1, | |
| 3543 FieldMemOperand(elements_reg, FixedArray::kLengthOffset)); | |
| 3544 } | |
| 3545 // Compare smis, unsigned compare catches both negative and out-of-bound | |
| 3546 // indexes. | |
| 3547 __ cmp(key_reg, scratch1); | |
| 3548 if (IsGrowStoreMode(store_mode)) { | |
| 3549 __ b(hs, &grow); | |
| 3550 } else { | |
| 3551 __ b(hs, &miss_force_generic); | |
| 3552 } | |
| 3553 | |
| 3554 __ bind(&finish_store); | |
| 3555 __ StoreNumberToDoubleElements(value_reg, key_reg, elements_reg, | |
| 3556 scratch1, &transition_elements_kind); | |
| 3557 __ Ret(); | |
| 3558 | |
| 3559 // Handle store cache miss, replacing the ic with the generic stub. | |
| 3560 __ bind(&miss_force_generic); | |
| 3561 TailCallBuiltin(masm, Builtins::kKeyedStoreIC_MissForceGeneric); | |
| 3562 | |
| 3563 __ bind(&transition_elements_kind); | |
| 3564 TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Miss); | |
| 3565 | |
| 3566 if (is_js_array && IsGrowStoreMode(store_mode)) { | |
| 3567 // Grow the array by a single element if possible. | |
| 3568 __ bind(&grow); | |
| 3569 | |
| 3570 // Make sure the array is only growing by a single element, anything else | |
| 3571 // must be handled by the runtime. Flags already set by previous compare. | |
| 3572 __ b(ne, &miss_force_generic); | |
| 3573 | |
| 3574 // Transition on values that can't be stored in a FixedDoubleArray. | |
| 3575 Label value_is_smi; | |
| 3576 __ JumpIfSmi(value_reg, &value_is_smi); | |
| 3577 __ ldr(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset)); | |
| 3578 __ CompareRoot(scratch1, Heap::kHeapNumberMapRootIndex); | |
| 3579 __ b(ne, &transition_elements_kind); | |
| 3580 __ bind(&value_is_smi); | |
| 3581 | |
| 3582 // Check for the empty array, and preallocate a small backing store if | |
| 3583 // possible. | |
| 3584 __ ldr(length_reg, | |
| 3585 FieldMemOperand(receiver_reg, JSArray::kLengthOffset)); | |
| 3586 __ ldr(elements_reg, | |
| 3587 FieldMemOperand(receiver_reg, JSObject::kElementsOffset)); | |
| 3588 __ CompareRoot(elements_reg, Heap::kEmptyFixedArrayRootIndex); | |
| 3589 __ b(ne, &check_capacity); | |
| 3590 | |
| 3591 int size = FixedDoubleArray::SizeFor(JSArray::kPreallocatedArrayElements); | |
| 3592 __ Allocate(size, elements_reg, scratch1, scratch2, &slow, TAG_OBJECT); | |
| 3593 | |
| 3594 // Initialize the new FixedDoubleArray. | |
| 3595 __ LoadRoot(scratch1, Heap::kFixedDoubleArrayMapRootIndex); | |
| 3596 __ str(scratch1, FieldMemOperand(elements_reg, JSObject::kMapOffset)); | |
| 3597 __ mov(scratch1, | |
| 3598 Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements))); | |
| 3599 __ str(scratch1, | |
| 3600 FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset)); | |
| 3601 | |
| 3602 __ mov(scratch1, elements_reg); | |
| 3603 __ StoreNumberToDoubleElements(value_reg, key_reg, scratch1, | |
| 3604 scratch2, &transition_elements_kind); | |
| 3605 | |
| 3606 __ mov(scratch1, Operand(kHoleNanLower32)); | |
| 3607 __ mov(scratch2, Operand(kHoleNanUpper32)); | |
| 3608 for (int i = 1; i < JSArray::kPreallocatedArrayElements; i++) { | |
| 3609 int offset = FixedDoubleArray::OffsetOfElementAt(i); | |
| 3610 __ str(scratch1, FieldMemOperand(elements_reg, offset)); | |
| 3611 __ str(scratch2, FieldMemOperand(elements_reg, offset + kPointerSize)); | |
| 3612 } | |
| 3613 | |
| 3614 // Install the new backing store in the JSArray. | |
| 3615 __ str(elements_reg, | |
| 3616 FieldMemOperand(receiver_reg, JSObject::kElementsOffset)); | |
| 3617 __ RecordWriteField(receiver_reg, JSObject::kElementsOffset, elements_reg, | |
| 3618 scratch1, kLRHasNotBeenSaved, kDontSaveFPRegs, | |
| 3619 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); | |
| 3620 | |
| 3621 // Increment the length of the array. | |
| 3622 __ mov(length_reg, Operand(Smi::FromInt(1))); | |
| 3623 __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset)); | |
| 3624 __ ldr(elements_reg, | |
| 3625 FieldMemOperand(receiver_reg, JSObject::kElementsOffset)); | |
| 3626 __ Ret(); | |
| 3627 | |
| 3628 __ bind(&check_capacity); | |
| 3629 // Make sure that the backing store can hold additional elements. | |
| 3630 __ ldr(scratch1, | |
| 3631 FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset)); | |
| 3632 __ cmp(length_reg, scratch1); | |
| 3633 __ b(hs, &slow); | |
| 3634 | |
| 3635 // Grow the array and finish the store. | |
| 3636 __ add(length_reg, length_reg, Operand(Smi::FromInt(1))); | |
| 3637 __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset)); | |
| 3638 __ jmp(&finish_store); | |
| 3639 | |
| 3640 __ bind(&slow); | |
| 3641 TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Slow); | |
| 3642 } | |
| 3643 } | |
| 3644 | |
| 3645 | |
| 3646 #undef __ | |
| 3647 | 3703 |
| 3648 } } // namespace v8::internal | 3704 } } // namespace v8::internal |
| 3649 | 3705 |
| 3650 #endif // V8_TARGET_ARCH_ARM | 3706 #endif // V8_TARGET_ARCH_A64 |
| OLD | NEW |