OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/v8.h" | 5 #include "src/v8.h" |
6 | 6 |
7 #if V8_TARGET_ARCH_MIPS | 7 #if V8_TARGET_ARCH_MIPS64 |
8 | 8 |
9 #include "src/codegen.h" | 9 #include "src/codegen.h" |
10 #include "src/ic-inl.h" | 10 #include "src/ic-inl.h" |
11 #include "src/stub-cache.h" | 11 #include "src/stub-cache.h" |
12 | 12 |
13 namespace v8 { | 13 namespace v8 { |
14 namespace internal { | 14 namespace internal { |
15 | 15 |
16 #define __ ACCESS_MASM(masm) | 16 #define __ ACCESS_MASM(masm) |
17 | 17 |
18 | 18 |
19 static void ProbeTable(Isolate* isolate, | 19 static void ProbeTable(Isolate* isolate, |
20 MacroAssembler* masm, | 20 MacroAssembler* masm, |
21 Code::Flags flags, | 21 Code::Flags flags, |
22 StubCache::Table table, | 22 StubCache::Table table, |
23 Register receiver, | 23 Register receiver, |
24 Register name, | 24 Register name, |
25 // Number of the cache entry, not scaled. | 25 // Number of the cache entry, not scaled. |
26 Register offset, | 26 Register offset, |
27 Register scratch, | 27 Register scratch, |
28 Register scratch2, | 28 Register scratch2, |
29 Register offset_scratch) { | 29 Register offset_scratch) { |
30 ExternalReference key_offset(isolate->stub_cache()->key_reference(table)); | 30 ExternalReference key_offset(isolate->stub_cache()->key_reference(table)); |
31 ExternalReference value_offset(isolate->stub_cache()->value_reference(table)); | 31 ExternalReference value_offset(isolate->stub_cache()->value_reference(table)); |
32 ExternalReference map_offset(isolate->stub_cache()->map_reference(table)); | 32 ExternalReference map_offset(isolate->stub_cache()->map_reference(table)); |
33 | 33 |
34 uint32_t key_off_addr = reinterpret_cast<uint32_t>(key_offset.address()); | 34 uint64_t key_off_addr = reinterpret_cast<uint64_t>(key_offset.address()); |
35 uint32_t value_off_addr = reinterpret_cast<uint32_t>(value_offset.address()); | 35 uint64_t value_off_addr = reinterpret_cast<uint64_t>(value_offset.address()); |
36 uint32_t map_off_addr = reinterpret_cast<uint32_t>(map_offset.address()); | 36 uint64_t map_off_addr = reinterpret_cast<uint64_t>(map_offset.address()); |
37 | 37 |
38 // Check the relative positions of the address fields. | 38 // Check the relative positions of the address fields. |
39 ASSERT(value_off_addr > key_off_addr); | 39 ASSERT(value_off_addr > key_off_addr); |
40 ASSERT((value_off_addr - key_off_addr) % 4 == 0); | 40 ASSERT((value_off_addr - key_off_addr) % 4 == 0); |
41 ASSERT((value_off_addr - key_off_addr) < (256 * 4)); | 41 ASSERT((value_off_addr - key_off_addr) < (256 * 4)); |
42 ASSERT(map_off_addr > key_off_addr); | 42 ASSERT(map_off_addr > key_off_addr); |
43 ASSERT((map_off_addr - key_off_addr) % 4 == 0); | 43 ASSERT((map_off_addr - key_off_addr) % 4 == 0); |
44 ASSERT((map_off_addr - key_off_addr) < (256 * 4)); | 44 ASSERT((map_off_addr - key_off_addr) < (256 * 4)); |
45 | 45 |
46 Label miss; | 46 Label miss; |
47 Register base_addr = scratch; | 47 Register base_addr = scratch; |
48 scratch = no_reg; | 48 scratch = no_reg; |
49 | 49 |
50 // Multiply by 3 because there are 3 fields per entry (name, code, map). | 50 // Multiply by 3 because there are 3 fields per entry (name, code, map). |
51 __ sll(offset_scratch, offset, 1); | 51 __ dsll(offset_scratch, offset, 1); |
52 __ Addu(offset_scratch, offset_scratch, offset); | 52 __ Daddu(offset_scratch, offset_scratch, offset); |
53 | 53 |
54 // Calculate the base address of the entry. | 54 // Calculate the base address of the entry. |
55 __ li(base_addr, Operand(key_offset)); | 55 __ li(base_addr, Operand(key_offset)); |
56 __ sll(at, offset_scratch, kPointerSizeLog2); | 56 __ dsll(at, offset_scratch, kPointerSizeLog2); |
57 __ Addu(base_addr, base_addr, at); | 57 __ Daddu(base_addr, base_addr, at); |
58 | 58 |
59 // Check that the key in the entry matches the name. | 59 // Check that the key in the entry matches the name. |
60 __ lw(at, MemOperand(base_addr, 0)); | 60 __ ld(at, MemOperand(base_addr, 0)); |
61 __ Branch(&miss, ne, name, Operand(at)); | 61 __ Branch(&miss, ne, name, Operand(at)); |
62 | 62 |
63 // Check the map matches. | 63 // Check the map matches. |
64 __ lw(at, MemOperand(base_addr, map_off_addr - key_off_addr)); | 64 __ ld(at, MemOperand(base_addr, map_off_addr - key_off_addr)); |
65 __ lw(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset)); | 65 __ ld(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
66 __ Branch(&miss, ne, at, Operand(scratch2)); | 66 __ Branch(&miss, ne, at, Operand(scratch2)); |
67 | 67 |
68 // Get the code entry from the cache. | 68 // Get the code entry from the cache. |
69 Register code = scratch2; | 69 Register code = scratch2; |
70 scratch2 = no_reg; | 70 scratch2 = no_reg; |
71 __ lw(code, MemOperand(base_addr, value_off_addr - key_off_addr)); | 71 __ ld(code, MemOperand(base_addr, value_off_addr - key_off_addr)); |
72 | 72 |
73 // Check that the flags match what we're looking for. | 73 // Check that the flags match what we're looking for. |
74 Register flags_reg = base_addr; | 74 Register flags_reg = base_addr; |
75 base_addr = no_reg; | 75 base_addr = no_reg; |
76 __ lw(flags_reg, FieldMemOperand(code, Code::kFlagsOffset)); | 76 __ lw(flags_reg, FieldMemOperand(code, Code::kFlagsOffset)); |
77 __ And(flags_reg, flags_reg, Operand(~Code::kFlagsNotUsedInLookup)); | 77 __ And(flags_reg, flags_reg, Operand(~Code::kFlagsNotUsedInLookup)); |
78 __ Branch(&miss, ne, flags_reg, Operand(flags)); | 78 __ Branch(&miss, ne, flags_reg, Operand(flags)); |
79 | 79 |
80 #ifdef DEBUG | 80 #ifdef DEBUG |
81 if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) { | 81 if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) { |
82 __ jmp(&miss); | 82 __ jmp(&miss); |
83 } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) { | 83 } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) { |
84 __ jmp(&miss); | 84 __ jmp(&miss); |
85 } | 85 } |
86 #endif | 86 #endif |
87 | 87 |
88 // Jump to the first instruction in the code stub. | 88 // Jump to the first instruction in the code stub. |
89 __ Addu(at, code, Operand(Code::kHeaderSize - kHeapObjectTag)); | 89 __ Daddu(at, code, Operand(Code::kHeaderSize - kHeapObjectTag)); |
90 __ Jump(at); | 90 __ Jump(at); |
91 | 91 |
92 // Miss: fall through. | 92 // Miss: fall through. |
93 __ bind(&miss); | 93 __ bind(&miss); |
94 } | 94 } |
95 | 95 |
96 | 96 |
97 void StubCompiler::GenerateDictionaryNegativeLookup(MacroAssembler* masm, | 97 void StubCompiler::GenerateDictionaryNegativeLookup(MacroAssembler* masm, |
98 Label* miss_label, | 98 Label* miss_label, |
99 Register receiver, | 99 Register receiver, |
100 Handle<Name> name, | 100 Handle<Name> name, |
101 Register scratch0, | 101 Register scratch0, |
102 Register scratch1) { | 102 Register scratch1) { |
103 ASSERT(name->IsUniqueName()); | 103 ASSERT(name->IsUniqueName()); |
104 ASSERT(!receiver.is(scratch0)); | 104 ASSERT(!receiver.is(scratch0)); |
105 Counters* counters = masm->isolate()->counters(); | 105 Counters* counters = masm->isolate()->counters(); |
106 __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1); | 106 __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1); |
107 __ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1); | 107 __ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1); |
108 | 108 |
109 Label done; | 109 Label done; |
110 | 110 |
111 const int kInterceptorOrAccessCheckNeededMask = | 111 const int kInterceptorOrAccessCheckNeededMask = |
112 (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded); | 112 (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded); |
113 | 113 |
114 // Bail out if the receiver has a named interceptor or requires access checks. | 114 // Bail out if the receiver has a named interceptor or requires access checks. |
115 Register map = scratch1; | 115 Register map = scratch1; |
116 __ lw(map, FieldMemOperand(receiver, HeapObject::kMapOffset)); | 116 __ ld(map, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
117 __ lbu(scratch0, FieldMemOperand(map, Map::kBitFieldOffset)); | 117 __ lbu(scratch0, FieldMemOperand(map, Map::kBitFieldOffset)); |
118 __ And(scratch0, scratch0, Operand(kInterceptorOrAccessCheckNeededMask)); | 118 __ And(scratch0, scratch0, Operand(kInterceptorOrAccessCheckNeededMask)); |
119 __ Branch(miss_label, ne, scratch0, Operand(zero_reg)); | 119 __ Branch(miss_label, ne, scratch0, Operand(zero_reg)); |
120 | 120 |
121 // Check that receiver is a JSObject. | 121 // Check that receiver is a JSObject. |
122 __ lbu(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset)); | 122 __ lbu(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset)); |
123 __ Branch(miss_label, lt, scratch0, Operand(FIRST_SPEC_OBJECT_TYPE)); | 123 __ Branch(miss_label, lt, scratch0, Operand(FIRST_SPEC_OBJECT_TYPE)); |
124 | 124 |
125 // Load properties array. | 125 // Load properties array. |
126 Register properties = scratch0; | 126 Register properties = scratch0; |
127 __ lw(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); | 127 __ ld(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); |
128 // Check that the properties array is a dictionary. | 128 // Check that the properties array is a dictionary. |
129 __ lw(map, FieldMemOperand(properties, HeapObject::kMapOffset)); | 129 __ ld(map, FieldMemOperand(properties, HeapObject::kMapOffset)); |
130 Register tmp = properties; | 130 Register tmp = properties; |
131 __ LoadRoot(tmp, Heap::kHashTableMapRootIndex); | 131 __ LoadRoot(tmp, Heap::kHashTableMapRootIndex); |
132 __ Branch(miss_label, ne, map, Operand(tmp)); | 132 __ Branch(miss_label, ne, map, Operand(tmp)); |
133 | 133 |
134 // Restore the temporarily used register. | 134 // Restore the temporarily used register. |
135 __ lw(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); | 135 __ ld(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); |
136 | 136 |
137 | 137 |
138 NameDictionaryLookupStub::GenerateNegativeLookup(masm, | 138 NameDictionaryLookupStub::GenerateNegativeLookup(masm, |
139 miss_label, | 139 miss_label, |
140 &done, | 140 &done, |
141 receiver, | 141 receiver, |
142 properties, | 142 properties, |
143 name, | 143 name, |
144 scratch1); | 144 scratch1); |
145 __ bind(&done); | 145 __ bind(&done); |
146 __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1); | 146 __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1); |
147 } | 147 } |
148 | 148 |
149 | 149 |
150 void StubCache::GenerateProbe(MacroAssembler* masm, | 150 void StubCache::GenerateProbe(MacroAssembler* masm, |
151 Code::Flags flags, | 151 Code::Flags flags, |
152 Register receiver, | 152 Register receiver, |
153 Register name, | 153 Register name, |
154 Register scratch, | 154 Register scratch, |
155 Register extra, | 155 Register extra, |
156 Register extra2, | 156 Register extra2, |
157 Register extra3) { | 157 Register extra3) { |
158 Isolate* isolate = masm->isolate(); | 158 Isolate* isolate = masm->isolate(); |
159 Label miss; | 159 Label miss; |
160 | 160 |
161 // Make sure that code is valid. The multiplying code relies on the | 161 // Make sure that code is valid. The multiplying code relies on the |
162 // entry size being 12. | 162 // entry size being 12. |
163 ASSERT(sizeof(Entry) == 12); | 163 // ASSERT(sizeof(Entry) == 12); |
| 164 // ASSERT(sizeof(Entry) == 3 * kPointerSize); |
164 | 165 |
165 // Make sure the flags does not name a specific type. | 166 // Make sure the flags does not name a specific type. |
166 ASSERT(Code::ExtractTypeFromFlags(flags) == 0); | 167 ASSERT(Code::ExtractTypeFromFlags(flags) == 0); |
167 | 168 |
168 // Make sure that there are no register conflicts. | 169 // Make sure that there are no register conflicts. |
169 ASSERT(!scratch.is(receiver)); | 170 ASSERT(!scratch.is(receiver)); |
170 ASSERT(!scratch.is(name)); | 171 ASSERT(!scratch.is(name)); |
171 ASSERT(!extra.is(receiver)); | 172 ASSERT(!extra.is(receiver)); |
172 ASSERT(!extra.is(name)); | 173 ASSERT(!extra.is(name)); |
173 ASSERT(!extra.is(scratch)); | 174 ASSERT(!extra.is(scratch)); |
174 ASSERT(!extra2.is(receiver)); | 175 ASSERT(!extra2.is(receiver)); |
175 ASSERT(!extra2.is(name)); | 176 ASSERT(!extra2.is(name)); |
176 ASSERT(!extra2.is(scratch)); | 177 ASSERT(!extra2.is(scratch)); |
177 ASSERT(!extra2.is(extra)); | 178 ASSERT(!extra2.is(extra)); |
178 | 179 |
179 // Check register validity. | 180 // Check register validity. |
180 ASSERT(!scratch.is(no_reg)); | 181 ASSERT(!scratch.is(no_reg)); |
181 ASSERT(!extra.is(no_reg)); | 182 ASSERT(!extra.is(no_reg)); |
182 ASSERT(!extra2.is(no_reg)); | 183 ASSERT(!extra2.is(no_reg)); |
183 ASSERT(!extra3.is(no_reg)); | 184 ASSERT(!extra3.is(no_reg)); |
184 | 185 |
185 Counters* counters = masm->isolate()->counters(); | 186 Counters* counters = masm->isolate()->counters(); |
186 __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, | 187 __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, |
187 extra2, extra3); | 188 extra2, extra3); |
188 | 189 |
189 // Check that the receiver isn't a smi. | 190 // Check that the receiver isn't a smi. |
190 __ JumpIfSmi(receiver, &miss); | 191 __ JumpIfSmi(receiver, &miss); |
191 | 192 |
192 // Get the map of the receiver and compute the hash. | 193 // Get the map of the receiver and compute the hash. |
193 __ lw(scratch, FieldMemOperand(name, Name::kHashFieldOffset)); | 194 __ ld(scratch, FieldMemOperand(name, Name::kHashFieldOffset)); |
194 __ lw(at, FieldMemOperand(receiver, HeapObject::kMapOffset)); | 195 __ ld(at, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
195 __ Addu(scratch, scratch, at); | 196 __ Daddu(scratch, scratch, at); |
196 uint32_t mask = kPrimaryTableSize - 1; | 197 uint64_t mask = kPrimaryTableSize - 1; |
197 // We shift out the last two bits because they are not part of the hash and | 198 // We shift out the last two bits because they are not part of the hash and |
198 // they are always 01 for maps. | 199 // they are always 01 for maps. |
199 __ srl(scratch, scratch, kHeapObjectTagSize); | 200 __ dsrl(scratch, scratch, kHeapObjectTagSize); |
200 __ Xor(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask)); | 201 __ Xor(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask)); |
201 __ And(scratch, scratch, Operand(mask)); | 202 __ And(scratch, scratch, Operand(mask)); |
202 | 203 |
203 // Probe the primary table. | 204 // Probe the primary table. |
204 ProbeTable(isolate, | 205 ProbeTable(isolate, |
205 masm, | 206 masm, |
206 flags, | 207 flags, |
207 kPrimary, | 208 kPrimary, |
208 receiver, | 209 receiver, |
209 name, | 210 name, |
210 scratch, | 211 scratch, |
211 extra, | 212 extra, |
212 extra2, | 213 extra2, |
213 extra3); | 214 extra3); |
214 | 215 |
215 // Primary miss: Compute hash for secondary probe. | 216 // Primary miss: Compute hash for secondary probe. |
216 __ srl(at, name, kHeapObjectTagSize); | 217 __ dsrl(at, name, kHeapObjectTagSize); |
217 __ Subu(scratch, scratch, at); | 218 __ Dsubu(scratch, scratch, at); |
218 uint32_t mask2 = kSecondaryTableSize - 1; | 219 uint64_t mask2 = kSecondaryTableSize - 1; |
219 __ Addu(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask2)); | 220 __ Daddu(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask2)); |
220 __ And(scratch, scratch, Operand(mask2)); | 221 __ And(scratch, scratch, Operand(mask2)); |
221 | 222 |
222 // Probe the secondary table. | 223 // Probe the secondary table. |
223 ProbeTable(isolate, | 224 ProbeTable(isolate, |
224 masm, | 225 masm, |
225 flags, | 226 flags, |
226 kSecondary, | 227 kSecondary, |
227 receiver, | 228 receiver, |
228 name, | 229 name, |
229 scratch, | 230 scratch, |
230 extra, | 231 extra, |
231 extra2, | 232 extra2, |
232 extra3); | 233 extra3); |
233 | 234 |
234 // Cache miss: Fall-through and let caller handle the miss by | 235 // Cache miss: Fall-through and let caller handle the miss by |
235 // entering the runtime system. | 236 // entering the runtime system. |
236 __ bind(&miss); | 237 __ bind(&miss); |
237 __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1, | 238 __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1, |
238 extra2, extra3); | 239 extra2, extra3); |
239 } | 240 } |
240 | 241 |
241 | 242 |
242 void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm, | 243 void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm, |
243 int index, | 244 int index, |
244 Register prototype) { | 245 Register prototype) { |
245 // Load the global or builtins object from the current context. | 246 // Load the global or builtins object from the current context. |
246 __ lw(prototype, | 247 __ ld(prototype, |
247 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); | 248 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); |
248 // Load the native context from the global or builtins object. | 249 // Load the native context from the global or builtins object. |
249 __ lw(prototype, | 250 __ ld(prototype, |
250 FieldMemOperand(prototype, GlobalObject::kNativeContextOffset)); | 251 FieldMemOperand(prototype, GlobalObject::kNativeContextOffset)); |
251 // Load the function from the native context. | 252 // Load the function from the native context. |
252 __ lw(prototype, MemOperand(prototype, Context::SlotOffset(index))); | 253 __ ld(prototype, MemOperand(prototype, Context::SlotOffset(index))); |
253 // Load the initial map. The global functions all have initial maps. | 254 // Load the initial map. The global functions all have initial maps. |
254 __ lw(prototype, | 255 __ ld(prototype, |
255 FieldMemOperand(prototype, JSFunction::kPrototypeOrInitialMapOffset)); | 256 FieldMemOperand(prototype, JSFunction::kPrototypeOrInitialMapOffset)); |
256 // Load the prototype from the initial map. | 257 // Load the prototype from the initial map. |
257 __ lw(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset)); | 258 __ ld(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset)); |
258 } | 259 } |
259 | 260 |
260 | 261 |
261 void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype( | 262 void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype( |
262 MacroAssembler* masm, | 263 MacroAssembler* masm, |
263 int index, | 264 int index, |
264 Register prototype, | 265 Register prototype, |
265 Label* miss) { | 266 Label* miss) { |
266 Isolate* isolate = masm->isolate(); | 267 Isolate* isolate = masm->isolate(); |
267 // Get the global function with the given index. | 268 // Get the global function with the given index. |
268 Handle<JSFunction> function( | 269 Handle<JSFunction> function( |
269 JSFunction::cast(isolate->native_context()->get(index))); | 270 JSFunction::cast(isolate->native_context()->get(index))); |
270 | 271 |
271 // Check we're still in the same context. | 272 // Check we're still in the same context. |
272 Register scratch = prototype; | 273 Register scratch = prototype; |
273 const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX); | 274 const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX); |
274 __ lw(scratch, MemOperand(cp, offset)); | 275 __ ld(scratch, MemOperand(cp, offset)); |
275 __ lw(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset)); | 276 __ ld(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset)); |
276 __ lw(scratch, MemOperand(scratch, Context::SlotOffset(index))); | 277 __ ld(scratch, MemOperand(scratch, Context::SlotOffset(index))); |
277 __ li(at, function); | 278 __ li(at, function); |
278 __ Branch(miss, ne, at, Operand(scratch)); | 279 __ Branch(miss, ne, at, Operand(scratch)); |
279 | 280 |
280 // Load its initial map. The global functions all have initial maps. | 281 // Load its initial map. The global functions all have initial maps. |
281 __ li(prototype, Handle<Map>(function->initial_map())); | 282 __ li(prototype, Handle<Map>(function->initial_map())); |
282 // Load the prototype from the initial map. | 283 // Load the prototype from the initial map. |
283 __ lw(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset)); | 284 __ ld(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset)); |
284 } | 285 } |
285 | 286 |
286 | 287 |
287 void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm, | 288 void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm, |
288 Register dst, | 289 Register dst, |
289 Register src, | 290 Register src, |
290 bool inobject, | 291 bool inobject, |
291 int index, | 292 int index, |
292 Representation representation) { | 293 Representation representation) { |
293 ASSERT(!representation.IsDouble()); | 294 ASSERT(!representation.IsDouble()); |
294 int offset = index * kPointerSize; | 295 int offset = index * kPointerSize; |
295 if (!inobject) { | 296 if (!inobject) { |
296 // Calculate the offset into the properties array. | 297 // Calculate the offset into the properties array. |
297 offset = offset + FixedArray::kHeaderSize; | 298 offset = offset + FixedArray::kHeaderSize; |
298 __ lw(dst, FieldMemOperand(src, JSObject::kPropertiesOffset)); | 299 __ ld(dst, FieldMemOperand(src, JSObject::kPropertiesOffset)); |
299 src = dst; | 300 src = dst; |
300 } | 301 } |
301 __ lw(dst, FieldMemOperand(src, offset)); | 302 __ ld(dst, FieldMemOperand(src, offset)); |
302 } | 303 } |
303 | 304 |
304 | 305 |
305 void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm, | 306 void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm, |
306 Register receiver, | 307 Register receiver, |
307 Register scratch, | 308 Register scratch, |
308 Label* miss_label) { | 309 Label* miss_label) { |
309 // Check that the receiver isn't a smi. | 310 // Check that the receiver isn't a smi. |
310 __ JumpIfSmi(receiver, miss_label); | 311 __ JumpIfSmi(receiver, miss_label); |
311 | 312 |
312 // Check that the object is a JS array. | 313 // Check that the object is a JS array. |
313 __ GetObjectType(receiver, scratch, scratch); | 314 __ GetObjectType(receiver, scratch, scratch); |
314 __ Branch(miss_label, ne, scratch, Operand(JS_ARRAY_TYPE)); | 315 __ Branch(miss_label, ne, scratch, Operand(JS_ARRAY_TYPE)); |
315 | 316 |
316 // Load length directly from the JS array. | 317 // Load length directly from the JS array. |
317 __ Ret(USE_DELAY_SLOT); | 318 __ Ret(USE_DELAY_SLOT); |
318 __ lw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset)); | 319 __ ld(v0, FieldMemOperand(receiver, JSArray::kLengthOffset)); |
319 } | 320 } |
320 | 321 |
321 | 322 |
322 void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm, | 323 void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm, |
323 Register receiver, | 324 Register receiver, |
324 Register scratch1, | 325 Register scratch1, |
325 Register scratch2, | 326 Register scratch2, |
326 Label* miss_label) { | 327 Label* miss_label) { |
327 __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label); | 328 __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label); |
328 __ Ret(USE_DELAY_SLOT); | 329 __ Ret(USE_DELAY_SLOT); |
329 __ mov(v0, scratch1); | 330 __ mov(v0, scratch1); |
330 } | 331 } |
331 | 332 |
332 | 333 |
333 void StubCompiler::GenerateCheckPropertyCell(MacroAssembler* masm, | 334 void StubCompiler::GenerateCheckPropertyCell(MacroAssembler* masm, |
334 Handle<JSGlobalObject> global, | 335 Handle<JSGlobalObject> global, |
335 Handle<Name> name, | 336 Handle<Name> name, |
336 Register scratch, | 337 Register scratch, |
337 Label* miss) { | 338 Label* miss) { |
338 Handle<Cell> cell = JSGlobalObject::EnsurePropertyCell(global, name); | 339 Handle<Cell> cell = JSGlobalObject::EnsurePropertyCell(global, name); |
339 ASSERT(cell->value()->IsTheHole()); | 340 ASSERT(cell->value()->IsTheHole()); |
340 __ li(scratch, Operand(cell)); | 341 __ li(scratch, Operand(cell)); |
341 __ lw(scratch, FieldMemOperand(scratch, Cell::kValueOffset)); | 342 __ ld(scratch, FieldMemOperand(scratch, Cell::kValueOffset)); |
342 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); | 343 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); |
343 __ Branch(miss, ne, scratch, Operand(at)); | 344 __ Branch(miss, ne, scratch, Operand(at)); |
344 } | 345 } |
345 | 346 |
346 | 347 |
347 void StoreStubCompiler::GenerateNegativeHolderLookup( | 348 void StoreStubCompiler::GenerateNegativeHolderLookup( |
348 MacroAssembler* masm, | 349 MacroAssembler* masm, |
349 Handle<JSObject> holder, | 350 Handle<JSObject> holder, |
350 Register holder_reg, | 351 Register holder_reg, |
351 Handle<Name> name, | 352 Handle<Name> name, |
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
390 __ li(scratch1, constant); | 391 __ li(scratch1, constant); |
391 __ Branch(miss_label, ne, value_reg, Operand(scratch1)); | 392 __ Branch(miss_label, ne, value_reg, Operand(scratch1)); |
392 } else if (representation.IsSmi()) { | 393 } else if (representation.IsSmi()) { |
393 __ JumpIfNotSmi(value_reg, miss_label); | 394 __ JumpIfNotSmi(value_reg, miss_label); |
394 } else if (representation.IsHeapObject()) { | 395 } else if (representation.IsHeapObject()) { |
395 __ JumpIfSmi(value_reg, miss_label); | 396 __ JumpIfSmi(value_reg, miss_label); |
396 HeapType* field_type = descriptors->GetFieldType(descriptor); | 397 HeapType* field_type = descriptors->GetFieldType(descriptor); |
397 HeapType::Iterator<Map> it = field_type->Classes(); | 398 HeapType::Iterator<Map> it = field_type->Classes(); |
398 Handle<Map> current; | 399 Handle<Map> current; |
399 if (!it.Done()) { | 400 if (!it.Done()) { |
400 __ lw(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset)); | 401 __ ld(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset)); |
401 Label do_store; | 402 Label do_store; |
402 while (true) { | 403 while (true) { |
403 // Do the CompareMap() directly within the Branch() functions. | 404 // Do the CompareMap() directly within the Branch() functions. |
404 current = it.Current(); | 405 current = it.Current(); |
405 it.Advance(); | 406 it.Advance(); |
406 if (it.Done()) { | 407 if (it.Done()) { |
407 __ Branch(miss_label, ne, scratch1, Operand(current)); | 408 __ Branch(miss_label, ne, scratch1, Operand(current)); |
408 break; | 409 break; |
409 } | 410 } |
410 __ Branch(&do_store, eq, scratch1, Operand(current)); | 411 __ Branch(&do_store, eq, scratch1, Operand(current)); |
411 } | 412 } |
412 __ bind(&do_store); | 413 __ bind(&do_store); |
413 } | 414 } |
414 } else if (representation.IsDouble()) { | 415 } else if (representation.IsDouble()) { |
415 Label do_store, heap_number; | 416 Label do_store, heap_number; |
416 __ LoadRoot(scratch3, Heap::kMutableHeapNumberMapRootIndex); | 417 __ LoadRoot(scratch3, Heap::kHeapNumberMapRootIndex); |
417 __ AllocateHeapNumber(storage_reg, scratch1, scratch2, scratch3, slow, | 418 __ AllocateHeapNumber(storage_reg, scratch1, scratch2, scratch3, slow); |
418 TAG_RESULT, MUTABLE); | |
419 | 419 |
420 __ JumpIfNotSmi(value_reg, &heap_number); | 420 __ JumpIfNotSmi(value_reg, &heap_number); |
421 __ SmiUntag(scratch1, value_reg); | 421 __ SmiUntag(scratch1, value_reg); |
422 __ mtc1(scratch1, f6); | 422 __ mtc1(scratch1, f6); |
423 __ cvt_d_w(f4, f6); | 423 __ cvt_d_w(f4, f6); |
424 __ jmp(&do_store); | 424 __ jmp(&do_store); |
425 | 425 |
426 __ bind(&heap_number); | 426 __ bind(&heap_number); |
427 __ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex, | 427 __ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex, |
428 miss_label, DONT_DO_SMI_CHECK); | 428 miss_label, DONT_DO_SMI_CHECK); |
(...skipping 17 matching lines...) Expand all Loading... |
446 __ Push(a2, a0); | 446 __ Push(a2, a0); |
447 __ TailCallExternalReference( | 447 __ TailCallExternalReference( |
448 ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage), | 448 ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage), |
449 masm->isolate()), | 449 masm->isolate()), |
450 3, 1); | 450 3, 1); |
451 return; | 451 return; |
452 } | 452 } |
453 | 453 |
454 // Update the map of the object. | 454 // Update the map of the object. |
455 __ li(scratch1, Operand(transition)); | 455 __ li(scratch1, Operand(transition)); |
456 __ sw(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset)); | 456 __ sd(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset)); |
457 | 457 |
458 // Update the write barrier for the map field. | 458 // Update the write barrier for the map field. |
459 __ RecordWriteField(receiver_reg, | 459 __ RecordWriteField(receiver_reg, |
460 HeapObject::kMapOffset, | 460 HeapObject::kMapOffset, |
461 scratch1, | 461 scratch1, |
462 scratch2, | 462 scratch2, |
463 kRAHasNotBeenSaved, | 463 kRAHasNotBeenSaved, |
464 kDontSaveFPRegs, | 464 kDontSaveFPRegs, |
465 OMIT_REMEMBERED_SET, | 465 OMIT_REMEMBERED_SET, |
466 OMIT_SMI_CHECK); | 466 OMIT_SMI_CHECK); |
(...skipping 13 matching lines...) Expand all Loading... |
480 // object and the number of in-object properties is not going to change. | 480 // object and the number of in-object properties is not going to change. |
481 index -= object->map()->inobject_properties(); | 481 index -= object->map()->inobject_properties(); |
482 | 482 |
483 // TODO(verwaest): Share this code as a code stub. | 483 // TODO(verwaest): Share this code as a code stub. |
484 SmiCheck smi_check = representation.IsTagged() | 484 SmiCheck smi_check = representation.IsTagged() |
485 ? INLINE_SMI_CHECK : OMIT_SMI_CHECK; | 485 ? INLINE_SMI_CHECK : OMIT_SMI_CHECK; |
486 if (index < 0) { | 486 if (index < 0) { |
487 // Set the property straight into the object. | 487 // Set the property straight into the object. |
488 int offset = object->map()->instance_size() + (index * kPointerSize); | 488 int offset = object->map()->instance_size() + (index * kPointerSize); |
489 if (representation.IsDouble()) { | 489 if (representation.IsDouble()) { |
490 __ sw(storage_reg, FieldMemOperand(receiver_reg, offset)); | 490 __ sd(storage_reg, FieldMemOperand(receiver_reg, offset)); |
491 } else { | 491 } else { |
492 __ sw(value_reg, FieldMemOperand(receiver_reg, offset)); | 492 __ sd(value_reg, FieldMemOperand(receiver_reg, offset)); |
493 } | 493 } |
494 | 494 |
495 if (!representation.IsSmi()) { | 495 if (!representation.IsSmi()) { |
496 // Update the write barrier for the array address. | 496 // Update the write barrier for the array address. |
497 if (!representation.IsDouble()) { | 497 if (!representation.IsDouble()) { |
498 __ mov(storage_reg, value_reg); | 498 __ mov(storage_reg, value_reg); |
499 } | 499 } |
500 __ RecordWriteField(receiver_reg, | 500 __ RecordWriteField(receiver_reg, |
501 offset, | 501 offset, |
502 storage_reg, | 502 storage_reg, |
503 scratch1, | 503 scratch1, |
504 kRAHasNotBeenSaved, | 504 kRAHasNotBeenSaved, |
505 kDontSaveFPRegs, | 505 kDontSaveFPRegs, |
506 EMIT_REMEMBERED_SET, | 506 EMIT_REMEMBERED_SET, |
507 smi_check); | 507 smi_check); |
508 } | 508 } |
509 } else { | 509 } else { |
510 // Write to the properties array. | 510 // Write to the properties array. |
511 int offset = index * kPointerSize + FixedArray::kHeaderSize; | 511 int offset = index * kPointerSize + FixedArray::kHeaderSize; |
512 // Get the properties array | 512 // Get the properties array |
513 __ lw(scratch1, | 513 __ ld(scratch1, |
514 FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset)); | 514 FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset)); |
515 if (representation.IsDouble()) { | 515 if (representation.IsDouble()) { |
516 __ sw(storage_reg, FieldMemOperand(scratch1, offset)); | 516 __ sd(storage_reg, FieldMemOperand(scratch1, offset)); |
517 } else { | 517 } else { |
518 __ sw(value_reg, FieldMemOperand(scratch1, offset)); | 518 __ sd(value_reg, FieldMemOperand(scratch1, offset)); |
519 } | 519 } |
520 | 520 |
521 if (!representation.IsSmi()) { | 521 if (!representation.IsSmi()) { |
522 // Update the write barrier for the array address. | 522 // Update the write barrier for the array address. |
523 if (!representation.IsDouble()) { | 523 if (!representation.IsDouble()) { |
524 __ mov(storage_reg, value_reg); | 524 __ mov(storage_reg, value_reg); |
525 } | 525 } |
526 __ RecordWriteField(scratch1, | 526 __ RecordWriteField(scratch1, |
527 offset, | 527 offset, |
528 storage_reg, | 528 storage_reg, |
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
566 | 566 |
567 Representation representation = lookup->representation(); | 567 Representation representation = lookup->representation(); |
568 ASSERT(!representation.IsNone()); | 568 ASSERT(!representation.IsNone()); |
569 if (representation.IsSmi()) { | 569 if (representation.IsSmi()) { |
570 __ JumpIfNotSmi(value_reg, miss_label); | 570 __ JumpIfNotSmi(value_reg, miss_label); |
571 } else if (representation.IsHeapObject()) { | 571 } else if (representation.IsHeapObject()) { |
572 __ JumpIfSmi(value_reg, miss_label); | 572 __ JumpIfSmi(value_reg, miss_label); |
573 HeapType* field_type = lookup->GetFieldType(); | 573 HeapType* field_type = lookup->GetFieldType(); |
574 HeapType::Iterator<Map> it = field_type->Classes(); | 574 HeapType::Iterator<Map> it = field_type->Classes(); |
575 if (!it.Done()) { | 575 if (!it.Done()) { |
576 __ lw(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset)); | 576 __ ld(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset)); |
577 Label do_store; | 577 Label do_store; |
578 Handle<Map> current; | 578 Handle<Map> current; |
579 while (true) { | 579 while (true) { |
580 // Do the CompareMap() directly within the Branch() functions. | 580 // Do the CompareMap() directly within the Branch() functions. |
581 current = it.Current(); | 581 current = it.Current(); |
582 it.Advance(); | 582 it.Advance(); |
583 if (it.Done()) { | 583 if (it.Done()) { |
584 __ Branch(miss_label, ne, scratch1, Operand(current)); | 584 __ Branch(miss_label, ne, scratch1, Operand(current)); |
585 break; | 585 break; |
586 } | 586 } |
587 __ Branch(&do_store, eq, scratch1, Operand(current)); | 587 __ Branch(&do_store, eq, scratch1, Operand(current)); |
588 } | 588 } |
589 __ bind(&do_store); | 589 __ bind(&do_store); |
590 } | 590 } |
591 } else if (representation.IsDouble()) { | 591 } else if (representation.IsDouble()) { |
592 // Load the double storage. | 592 // Load the double storage. |
593 if (index.is_inobject()) { | 593 if (index.is_inobject()) { |
594 __ lw(scratch1, FieldMemOperand(receiver_reg, index.offset())); | 594 __ ld(scratch1, FieldMemOperand(receiver_reg, index.offset())); |
595 } else { | 595 } else { |
596 __ lw(scratch1, | 596 __ ld(scratch1, |
597 FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset)); | 597 FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset)); |
598 __ lw(scratch1, FieldMemOperand(scratch1, index.offset())); | 598 __ ld(scratch1, FieldMemOperand(scratch1, index.offset())); |
599 } | 599 } |
600 | 600 |
601 // Store the value into the storage. | 601 // Store the value into the storage. |
602 Label do_store, heap_number; | 602 Label do_store, heap_number; |
603 __ JumpIfNotSmi(value_reg, &heap_number); | 603 __ JumpIfNotSmi(value_reg, &heap_number); |
604 __ SmiUntag(scratch2, value_reg); | 604 __ SmiUntag(scratch2, value_reg); |
605 __ mtc1(scratch2, f6); | 605 __ mtc1(scratch2, f6); |
606 __ cvt_d_w(f4, f6); | 606 __ cvt_d_w(f4, f6); |
607 __ jmp(&do_store); | 607 __ jmp(&do_store); |
608 | 608 |
609 __ bind(&heap_number); | 609 __ bind(&heap_number); |
610 __ CheckMap(value_reg, scratch2, Heap::kHeapNumberMapRootIndex, | 610 __ CheckMap(value_reg, scratch2, Heap::kHeapNumberMapRootIndex, |
611 miss_label, DONT_DO_SMI_CHECK); | 611 miss_label, DONT_DO_SMI_CHECK); |
612 __ ldc1(f4, FieldMemOperand(value_reg, HeapNumber::kValueOffset)); | 612 __ ldc1(f4, FieldMemOperand(value_reg, HeapNumber::kValueOffset)); |
613 | 613 |
614 __ bind(&do_store); | 614 __ bind(&do_store); |
615 __ sdc1(f4, FieldMemOperand(scratch1, HeapNumber::kValueOffset)); | 615 __ sdc1(f4, FieldMemOperand(scratch1, HeapNumber::kValueOffset)); |
616 // Return the value (register v0). | 616 // Return the value (register v0). |
617 ASSERT(value_reg.is(a0)); | 617 ASSERT(value_reg.is(a0)); |
618 __ Ret(USE_DELAY_SLOT); | 618 __ Ret(USE_DELAY_SLOT); |
619 __ mov(v0, a0); | 619 __ mov(v0, a0); |
620 return; | 620 return; |
621 } | 621 } |
622 | 622 |
623 // TODO(verwaest): Share this code as a code stub. | 623 // TODO(verwaest): Share this code as a code stub. |
624 SmiCheck smi_check = representation.IsTagged() | 624 SmiCheck smi_check = representation.IsTagged() |
625 ? INLINE_SMI_CHECK : OMIT_SMI_CHECK; | 625 ? INLINE_SMI_CHECK : OMIT_SMI_CHECK; |
626 if (index.is_inobject()) { | 626 if (index.is_inobject()) { |
627 // Set the property straight into the object. | 627 // Set the property straight into the object. |
628 __ sw(value_reg, FieldMemOperand(receiver_reg, index.offset())); | 628 __ sd(value_reg, FieldMemOperand(receiver_reg, index.offset())); |
629 | 629 |
630 if (!representation.IsSmi()) { | 630 if (!representation.IsSmi()) { |
631 // Skip updating write barrier if storing a smi. | 631 // Skip updating write barrier if storing a smi. |
632 __ JumpIfSmi(value_reg, &exit); | 632 __ JumpIfSmi(value_reg, &exit); |
633 | 633 |
634 // Update the write barrier for the array address. | 634 // Update the write barrier for the array address. |
635 // Pass the now unused name_reg as a scratch register. | 635 // Pass the now unused name_reg as a scratch register. |
636 __ mov(name_reg, value_reg); | 636 __ mov(name_reg, value_reg); |
637 __ RecordWriteField(receiver_reg, | 637 __ RecordWriteField(receiver_reg, |
638 index.offset(), | 638 index.offset(), |
639 name_reg, | 639 name_reg, |
640 scratch1, | 640 scratch1, |
641 kRAHasNotBeenSaved, | 641 kRAHasNotBeenSaved, |
642 kDontSaveFPRegs, | 642 kDontSaveFPRegs, |
643 EMIT_REMEMBERED_SET, | 643 EMIT_REMEMBERED_SET, |
644 smi_check); | 644 smi_check); |
645 } | 645 } |
646 } else { | 646 } else { |
647 // Write to the properties array. | 647 // Write to the properties array. |
648 // Get the properties array. | 648 // Get the properties array. |
649 __ lw(scratch1, | 649 __ ld(scratch1, |
650 FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset)); | 650 FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset)); |
651 __ sw(value_reg, FieldMemOperand(scratch1, index.offset())); | 651 __ sd(value_reg, FieldMemOperand(scratch1, index.offset())); |
652 | 652 |
653 if (!representation.IsSmi()) { | 653 if (!representation.IsSmi()) { |
654 // Skip updating write barrier if storing a smi. | 654 // Skip updating write barrier if storing a smi. |
655 __ JumpIfSmi(value_reg, &exit); | 655 __ JumpIfSmi(value_reg, &exit); |
656 | 656 |
657 // Update the write barrier for the array address. | 657 // Update the write barrier for the array address. |
658 // Ok to clobber receiver_reg and name_reg, since we return. | 658 // Ok to clobber receiver_reg and name_reg, since we return. |
659 __ mov(name_reg, value_reg); | 659 __ mov(name_reg, value_reg); |
660 __ RecordWriteField(scratch1, | 660 __ RecordWriteField(scratch1, |
661 index.offset(), | 661 index.offset(), |
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
723 void StubCompiler::GenerateFastApiCall(MacroAssembler* masm, | 723 void StubCompiler::GenerateFastApiCall(MacroAssembler* masm, |
724 const CallOptimization& optimization, | 724 const CallOptimization& optimization, |
725 Handle<Map> receiver_map, | 725 Handle<Map> receiver_map, |
726 Register receiver, | 726 Register receiver, |
727 Register scratch_in, | 727 Register scratch_in, |
728 bool is_store, | 728 bool is_store, |
729 int argc, | 729 int argc, |
730 Register* values) { | 730 Register* values) { |
731 ASSERT(!receiver.is(scratch_in)); | 731 ASSERT(!receiver.is(scratch_in)); |
732 // Preparing to push, adjust sp. | 732 // Preparing to push, adjust sp. |
733 __ Subu(sp, sp, Operand((argc + 1) * kPointerSize)); | 733 __ Dsubu(sp, sp, Operand((argc + 1) * kPointerSize)); |
734 __ sw(receiver, MemOperand(sp, argc * kPointerSize)); // Push receiver. | 734 __ sd(receiver, MemOperand(sp, argc * kPointerSize)); // Push receiver. |
735 // Write the arguments to stack frame. | 735 // Write the arguments to stack frame. |
736 for (int i = 0; i < argc; i++) { | 736 for (int i = 0; i < argc; i++) { |
737 Register arg = values[argc-1-i]; | 737 Register arg = values[argc-1-i]; |
738 ASSERT(!receiver.is(arg)); | 738 ASSERT(!receiver.is(arg)); |
739 ASSERT(!scratch_in.is(arg)); | 739 ASSERT(!scratch_in.is(arg)); |
740 __ sw(arg, MemOperand(sp, (argc-1-i) * kPointerSize)); // Push arg. | 740 __ sd(arg, MemOperand(sp, (argc-1-i) * kPointerSize)); // Push arg. |
741 } | 741 } |
742 ASSERT(optimization.is_simple_api_call()); | 742 ASSERT(optimization.is_simple_api_call()); |
743 | 743 |
744 // Abi for CallApiFunctionStub. | 744 // Abi for CallApiFunctionStub. |
745 Register callee = a0; | 745 Register callee = a0; |
746 Register call_data = t0; | 746 Register call_data = a4; |
747 Register holder = a2; | 747 Register holder = a2; |
748 Register api_function_address = a1; | 748 Register api_function_address = a1; |
749 | 749 |
750 // Put holder in place. | 750 // Put holder in place. |
751 CallOptimization::HolderLookup holder_lookup; | 751 CallOptimization::HolderLookup holder_lookup; |
752 Handle<JSObject> api_holder = optimization.LookupHolderOfExpectedType( | 752 Handle<JSObject> api_holder = optimization.LookupHolderOfExpectedType( |
753 receiver_map, | 753 receiver_map, |
754 &holder_lookup); | 754 &holder_lookup); |
755 switch (holder_lookup) { | 755 switch (holder_lookup) { |
756 case CallOptimization::kHolderIsReceiver: | 756 case CallOptimization::kHolderIsReceiver: |
(...skipping 12 matching lines...) Expand all Loading... |
769 Handle<CallHandlerInfo> api_call_info = optimization.api_call_info(); | 769 Handle<CallHandlerInfo> api_call_info = optimization.api_call_info(); |
770 Handle<Object> call_data_obj(api_call_info->data(), isolate); | 770 Handle<Object> call_data_obj(api_call_info->data(), isolate); |
771 | 771 |
772 // Put callee in place. | 772 // Put callee in place. |
773 __ li(callee, function); | 773 __ li(callee, function); |
774 | 774 |
775 bool call_data_undefined = false; | 775 bool call_data_undefined = false; |
776 // Put call_data in place. | 776 // Put call_data in place. |
777 if (isolate->heap()->InNewSpace(*call_data_obj)) { | 777 if (isolate->heap()->InNewSpace(*call_data_obj)) { |
778 __ li(call_data, api_call_info); | 778 __ li(call_data, api_call_info); |
779 __ lw(call_data, FieldMemOperand(call_data, CallHandlerInfo::kDataOffset)); | 779 __ ld(call_data, FieldMemOperand(call_data, CallHandlerInfo::kDataOffset)); |
780 } else if (call_data_obj->IsUndefined()) { | 780 } else if (call_data_obj->IsUndefined()) { |
781 call_data_undefined = true; | 781 call_data_undefined = true; |
782 __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex); | 782 __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex); |
783 } else { | 783 } else { |
784 __ li(call_data, call_data_obj); | 784 __ li(call_data, call_data_obj); |
785 } | 785 } |
786 // Put api_function_address in place. | 786 // Put api_function_address in place. |
787 Address function_address = v8::ToCData<Address>(api_call_info->callback()); | 787 Address function_address = v8::ToCData<Address>(api_call_info->callback()); |
788 ApiFunction fun(function_address); | 788 ApiFunction fun(function_address); |
789 ExternalReference::Type type = ExternalReference::DIRECT_API_CALL; | 789 ExternalReference::Type type = ExternalReference::DIRECT_API_CALL; |
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
853 ASSERT(name->IsString()); | 853 ASSERT(name->IsString()); |
854 name = factory()->InternalizeString(Handle<String>::cast(name)); | 854 name = factory()->InternalizeString(Handle<String>::cast(name)); |
855 } | 855 } |
856 ASSERT(current.is_null() || | 856 ASSERT(current.is_null() || |
857 current->property_dictionary()->FindEntry(name) == | 857 current->property_dictionary()->FindEntry(name) == |
858 NameDictionary::kNotFound); | 858 NameDictionary::kNotFound); |
859 | 859 |
860 GenerateDictionaryNegativeLookup(masm(), miss, reg, name, | 860 GenerateDictionaryNegativeLookup(masm(), miss, reg, name, |
861 scratch1, scratch2); | 861 scratch1, scratch2); |
862 | 862 |
863 __ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset)); | 863 __ ld(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset)); |
864 reg = holder_reg; // From now on the object will be in holder_reg. | 864 reg = holder_reg; // From now on the object will be in holder_reg. |
865 __ lw(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset)); | 865 __ ld(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset)); |
866 } else { | 866 } else { |
867 Register map_reg = scratch1; | 867 Register map_reg = scratch1; |
868 if (depth != 1 || check == CHECK_ALL_MAPS) { | 868 if (depth != 1 || check == CHECK_ALL_MAPS) { |
869 // CheckMap implicitly loads the map of |reg| into |map_reg|. | 869 // CheckMap implicitly loads the map of |reg| into |map_reg|. |
870 __ CheckMap(reg, map_reg, current_map, miss, DONT_DO_SMI_CHECK); | 870 __ CheckMap(reg, map_reg, current_map, miss, DONT_DO_SMI_CHECK); |
871 } else { | 871 } else { |
872 __ lw(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset)); | 872 __ ld(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset)); |
873 } | 873 } |
874 | 874 |
875 // Check access rights to the global object. This has to happen after | 875 // Check access rights to the global object. This has to happen after |
876 // the map check so that we know that the object is actually a global | 876 // the map check so that we know that the object is actually a global |
877 // object. | 877 // object. |
878 if (current_map->IsJSGlobalProxyMap()) { | 878 if (current_map->IsJSGlobalProxyMap()) { |
879 __ CheckAccessGlobalProxy(reg, scratch2, miss); | 879 __ CheckAccessGlobalProxy(reg, scratch2, miss); |
880 } else if (current_map->IsJSGlobalObjectMap()) { | 880 } else if (current_map->IsJSGlobalObjectMap()) { |
881 GenerateCheckPropertyCell( | 881 GenerateCheckPropertyCell( |
882 masm(), Handle<JSGlobalObject>::cast(current), name, | 882 masm(), Handle<JSGlobalObject>::cast(current), name, |
883 scratch2, miss); | 883 scratch2, miss); |
884 } | 884 } |
885 | 885 |
886 reg = holder_reg; // From now on the object will be in holder_reg. | 886 reg = holder_reg; // From now on the object will be in holder_reg. |
887 | 887 |
888 if (heap()->InNewSpace(*prototype)) { | 888 if (heap()->InNewSpace(*prototype)) { |
889 // The prototype is in new space; we cannot store a reference to it | 889 // The prototype is in new space; we cannot store a reference to it |
890 // in the code. Load it from the map. | 890 // in the code. Load it from the map. |
891 __ lw(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset)); | 891 __ ld(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset)); |
892 } else { | 892 } else { |
893 // The prototype is in old space; load it directly. | 893 // The prototype is in old space; load it directly. |
894 __ li(reg, Operand(prototype)); | 894 __ li(reg, Operand(prototype)); |
895 } | 895 } |
896 } | 896 } |
897 | 897 |
898 // Go to the next object in the prototype chain. | 898 // Go to the next object in the prototype chain. |
899 current = prototype; | 899 current = prototype; |
900 current_map = handle(current->map()); | 900 current_map = handle(current->map()); |
901 } | 901 } |
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
952 | 952 |
953 Register reg = HandlerFrontendHeader(type, object_reg, holder, name, &miss); | 953 Register reg = HandlerFrontendHeader(type, object_reg, holder, name, &miss); |
954 | 954 |
955 if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) { | 955 if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) { |
956 ASSERT(!reg.is(scratch2())); | 956 ASSERT(!reg.is(scratch2())); |
957 ASSERT(!reg.is(scratch3())); | 957 ASSERT(!reg.is(scratch3())); |
958 ASSERT(!reg.is(scratch4())); | 958 ASSERT(!reg.is(scratch4())); |
959 | 959 |
960 // Load the properties dictionary. | 960 // Load the properties dictionary. |
961 Register dictionary = scratch4(); | 961 Register dictionary = scratch4(); |
962 __ lw(dictionary, FieldMemOperand(reg, JSObject::kPropertiesOffset)); | 962 __ ld(dictionary, FieldMemOperand(reg, JSObject::kPropertiesOffset)); |
963 | 963 |
964 // Probe the dictionary. | 964 // Probe the dictionary. |
965 Label probe_done; | 965 Label probe_done; |
966 NameDictionaryLookupStub::GeneratePositiveLookup(masm(), | 966 NameDictionaryLookupStub::GeneratePositiveLookup(masm(), |
967 &miss, | 967 &miss, |
968 &probe_done, | 968 &probe_done, |
969 dictionary, | 969 dictionary, |
970 this->name(), | 970 this->name(), |
971 scratch2(), | 971 scratch2(), |
972 scratch3()); | 972 scratch3()); |
973 __ bind(&probe_done); | 973 __ bind(&probe_done); |
974 | 974 |
975 // If probing finds an entry in the dictionary, scratch3 contains the | 975 // If probing finds an entry in the dictionary, scratch3 contains the |
976 // pointer into the dictionary. Check that the value is the callback. | 976 // pointer into the dictionary. Check that the value is the callback. |
977 Register pointer = scratch3(); | 977 Register pointer = scratch3(); |
978 const int kElementsStartOffset = NameDictionary::kHeaderSize + | 978 const int kElementsStartOffset = NameDictionary::kHeaderSize + |
979 NameDictionary::kElementsStartIndex * kPointerSize; | 979 NameDictionary::kElementsStartIndex * kPointerSize; |
980 const int kValueOffset = kElementsStartOffset + kPointerSize; | 980 const int kValueOffset = kElementsStartOffset + kPointerSize; |
981 __ lw(scratch2(), FieldMemOperand(pointer, kValueOffset)); | 981 __ ld(scratch2(), FieldMemOperand(pointer, kValueOffset)); |
982 __ Branch(&miss, ne, scratch2(), Operand(callback)); | 982 __ Branch(&miss, ne, scratch2(), Operand(callback)); |
983 } | 983 } |
984 | 984 |
985 HandlerFrontendFooter(name, &miss); | 985 HandlerFrontendFooter(name, &miss); |
986 return reg; | 986 return reg; |
987 } | 987 } |
988 | 988 |
989 | 989 |
990 void LoadStubCompiler::GenerateLoadField(Register reg, | 990 void LoadStubCompiler::GenerateLoadField(Register reg, |
991 Handle<JSObject> holder, | 991 Handle<JSObject> holder, |
(...skipping 28 matching lines...) Expand all Loading... |
1020 STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3); | 1020 STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3); |
1021 STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4); | 1021 STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4); |
1022 STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5); | 1022 STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5); |
1023 STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6); | 1023 STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6); |
1024 ASSERT(!scratch2().is(reg)); | 1024 ASSERT(!scratch2().is(reg)); |
1025 ASSERT(!scratch3().is(reg)); | 1025 ASSERT(!scratch3().is(reg)); |
1026 ASSERT(!scratch4().is(reg)); | 1026 ASSERT(!scratch4().is(reg)); |
1027 __ push(receiver()); | 1027 __ push(receiver()); |
1028 if (heap()->InNewSpace(callback->data())) { | 1028 if (heap()->InNewSpace(callback->data())) { |
1029 __ li(scratch3(), callback); | 1029 __ li(scratch3(), callback); |
1030 __ lw(scratch3(), FieldMemOperand(scratch3(), | 1030 __ ld(scratch3(), FieldMemOperand(scratch3(), |
1031 ExecutableAccessorInfo::kDataOffset)); | 1031 ExecutableAccessorInfo::kDataOffset)); |
1032 } else { | 1032 } else { |
1033 __ li(scratch3(), Handle<Object>(callback->data(), isolate())); | 1033 __ li(scratch3(), Handle<Object>(callback->data(), isolate())); |
1034 } | 1034 } |
1035 __ Subu(sp, sp, 6 * kPointerSize); | 1035 __ Dsubu(sp, sp, 6 * kPointerSize); |
1036 __ sw(scratch3(), MemOperand(sp, 5 * kPointerSize)); | 1036 __ sd(scratch3(), MemOperand(sp, 5 * kPointerSize)); |
1037 __ LoadRoot(scratch3(), Heap::kUndefinedValueRootIndex); | 1037 __ LoadRoot(scratch3(), Heap::kUndefinedValueRootIndex); |
1038 __ sw(scratch3(), MemOperand(sp, 4 * kPointerSize)); | 1038 __ sd(scratch3(), MemOperand(sp, 4 * kPointerSize)); |
1039 __ sw(scratch3(), MemOperand(sp, 3 * kPointerSize)); | 1039 __ sd(scratch3(), MemOperand(sp, 3 * kPointerSize)); |
1040 __ li(scratch4(), | 1040 __ li(scratch4(), |
1041 Operand(ExternalReference::isolate_address(isolate()))); | 1041 Operand(ExternalReference::isolate_address(isolate()))); |
1042 __ sw(scratch4(), MemOperand(sp, 2 * kPointerSize)); | 1042 __ sd(scratch4(), MemOperand(sp, 2 * kPointerSize)); |
1043 __ sw(reg, MemOperand(sp, 1 * kPointerSize)); | 1043 __ sd(reg, MemOperand(sp, 1 * kPointerSize)); |
1044 __ sw(name(), MemOperand(sp, 0 * kPointerSize)); | 1044 __ sd(name(), MemOperand(sp, 0 * kPointerSize)); |
1045 __ Addu(scratch2(), sp, 1 * kPointerSize); | 1045 __ Daddu(scratch2(), sp, 1 * kPointerSize); |
1046 | 1046 |
1047 __ mov(a2, scratch2()); // Saved in case scratch2 == a1. | 1047 __ mov(a2, scratch2()); // Saved in case scratch2 == a1. |
1048 // Abi for CallApiGetter. | 1048 // Abi for CallApiGetter. |
1049 Register getter_address_reg = a2; | 1049 Register getter_address_reg = a2; |
1050 | 1050 |
1051 Address getter_address = v8::ToCData<Address>(callback->getter()); | 1051 Address getter_address = v8::ToCData<Address>(callback->getter()); |
1052 ApiFunction fun(getter_address); | 1052 ApiFunction fun(getter_address); |
1053 ExternalReference::Type type = ExternalReference::DIRECT_GETTER_CALL; | 1053 ExternalReference::Type type = ExternalReference::DIRECT_GETTER_CALL; |
1054 ExternalReference ref = ExternalReference(&fun, type, isolate()); | 1054 ExternalReference ref = ExternalReference(&fun, type, isolate()); |
1055 __ li(getter_address_reg, Operand(ref)); | 1055 __ li(getter_address_reg, Operand(ref)); |
(...skipping 131 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1187 { | 1187 { |
1188 FrameScope scope(masm, StackFrame::INTERNAL); | 1188 FrameScope scope(masm, StackFrame::INTERNAL); |
1189 | 1189 |
1190 // Save value register, so we can restore it later. | 1190 // Save value register, so we can restore it later. |
1191 __ push(value()); | 1191 __ push(value()); |
1192 | 1192 |
1193 if (!setter.is_null()) { | 1193 if (!setter.is_null()) { |
1194 // Call the JavaScript setter with receiver and value on the stack. | 1194 // Call the JavaScript setter with receiver and value on the stack. |
1195 if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) { | 1195 if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) { |
1196 // Swap in the global receiver. | 1196 // Swap in the global receiver. |
1197 __ lw(receiver, | 1197 __ ld(receiver, |
1198 FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset)); | 1198 FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset)); |
1199 } | 1199 } |
1200 __ Push(receiver, value()); | 1200 __ Push(receiver, value()); |
1201 ParameterCount actual(1); | 1201 ParameterCount actual(1); |
1202 ParameterCount expected(setter); | 1202 ParameterCount expected(setter); |
1203 __ InvokeFunction(setter, expected, actual, | 1203 __ InvokeFunction(setter, expected, actual, |
1204 CALL_FUNCTION, NullCallWrapper()); | 1204 CALL_FUNCTION, NullCallWrapper()); |
1205 } else { | 1205 } else { |
1206 // If we generate a global code snippet for deoptimization only, remember | 1206 // If we generate a global code snippet for deoptimization only, remember |
1207 // the place to continue after deoptimization. | 1207 // the place to continue after deoptimization. |
1208 masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset()); | 1208 masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset()); |
1209 } | 1209 } |
1210 | 1210 |
1211 // We have to return the passed value, not the return value of the setter. | 1211 // We have to return the passed value, not the return value of the setter. |
1212 __ pop(v0); | 1212 __ pop(v0); |
1213 | 1213 |
1214 // Restore context register. | 1214 // Restore context register. |
1215 __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); | 1215 __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
1216 } | 1216 } |
1217 __ Ret(); | 1217 __ Ret(); |
1218 } | 1218 } |
1219 | 1219 |
1220 | 1220 |
1221 #undef __ | 1221 #undef __ |
1222 #define __ ACCESS_MASM(masm()) | 1222 #define __ ACCESS_MASM(masm()) |
1223 | 1223 |
1224 | 1224 |
1225 Handle<Code> StoreStubCompiler::CompileStoreInterceptor( | 1225 Handle<Code> StoreStubCompiler::CompileStoreInterceptor( |
(...skipping 22 matching lines...) Expand all Loading... |
1248 | 1248 |
1249 // Return the generated code. | 1249 // Return the generated code. |
1250 return GetCode(kind(), Code::FAST, name); | 1250 return GetCode(kind(), Code::FAST, name); |
1251 } | 1251 } |
1252 | 1252 |
1253 | 1253 |
1254 Register* LoadStubCompiler::registers() { | 1254 Register* LoadStubCompiler::registers() { |
1255 // receiver, name, scratch1, scratch2, scratch3, scratch4. | 1255 // receiver, name, scratch1, scratch2, scratch3, scratch4. |
1256 Register receiver = LoadIC::ReceiverRegister(); | 1256 Register receiver = LoadIC::ReceiverRegister(); |
1257 Register name = LoadIC::NameRegister(); | 1257 Register name = LoadIC::NameRegister(); |
1258 static Register registers[] = { receiver, name, a3, a0, t0, t1 }; | 1258 static Register registers[] = { receiver, name, a3, a0, a4, a5 }; |
1259 return registers; | 1259 return registers; |
1260 } | 1260 } |
1261 | 1261 |
1262 | 1262 |
1263 Register* KeyedLoadStubCompiler::registers() { | 1263 Register* KeyedLoadStubCompiler::registers() { |
1264 // receiver, name, scratch1, scratch2, scratch3, scratch4. | 1264 // receiver, name, scratch1, scratch2, scratch3, scratch4. |
1265 Register receiver = LoadIC::ReceiverRegister(); | 1265 Register receiver = LoadIC::ReceiverRegister(); |
1266 Register name = LoadIC::NameRegister(); | 1266 Register name = LoadIC::NameRegister(); |
1267 static Register registers[] = { receiver, name, a3, a0, t0, t1 }; | 1267 static Register registers[] = { receiver, name, a3, a0, a4, a5 }; |
1268 return registers; | 1268 return registers; |
1269 } | 1269 } |
1270 | 1270 |
1271 | 1271 |
1272 Register StoreStubCompiler::value() { | 1272 Register StoreStubCompiler::value() { |
1273 return a0; | 1273 return a0; |
1274 } | 1274 } |
1275 | 1275 |
1276 | 1276 |
1277 Register* StoreStubCompiler::registers() { | 1277 Register* StoreStubCompiler::registers() { |
1278 // receiver, name, scratch1, scratch2, scratch3. | 1278 // receiver, name, scratch1, scratch2, scratch3. |
1279 static Register registers[] = { a1, a2, a3, t0, t1 }; | 1279 static Register registers[] = { a1, a2, a3, a4, a5 }; |
1280 return registers; | 1280 return registers; |
1281 } | 1281 } |
1282 | 1282 |
1283 | 1283 |
1284 Register* KeyedStoreStubCompiler::registers() { | 1284 Register* KeyedStoreStubCompiler::registers() { |
1285 // receiver, name, scratch1, scratch2, scratch3. | 1285 // receiver, name, scratch1, scratch2, scratch3. |
1286 static Register registers[] = { a2, a1, a3, t0, t1 }; | 1286 static Register registers[] = { a2, a1, a3, a4, a5 }; |
1287 return registers; | 1287 return registers; |
1288 } | 1288 } |
1289 | 1289 |
1290 | 1290 |
1291 #undef __ | 1291 #undef __ |
1292 #define __ ACCESS_MASM(masm) | 1292 #define __ ACCESS_MASM(masm) |
1293 | 1293 |
1294 | 1294 |
1295 void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm, | 1295 void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm, |
1296 Handle<HeapType> type, | 1296 Handle<HeapType> type, |
1297 Register receiver, | 1297 Register receiver, |
1298 Handle<JSFunction> getter) { | 1298 Handle<JSFunction> getter) { |
1299 // ----------- S t a t e ------------- | 1299 // ----------- S t a t e ------------- |
1300 // -- a0 : receiver | 1300 // -- a0 : receiver |
1301 // -- a2 : name | 1301 // -- a2 : name |
1302 // -- ra : return address | 1302 // -- ra : return address |
1303 // ----------------------------------- | 1303 // ----------------------------------- |
1304 { | 1304 { |
1305 FrameScope scope(masm, StackFrame::INTERNAL); | 1305 FrameScope scope(masm, StackFrame::INTERNAL); |
1306 | 1306 |
1307 if (!getter.is_null()) { | 1307 if (!getter.is_null()) { |
1308 // Call the JavaScript getter with the receiver on the stack. | 1308 // Call the JavaScript getter with the receiver on the stack. |
1309 if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) { | 1309 if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) { |
1310 // Swap in the global receiver. | 1310 // Swap in the global receiver. |
1311 __ lw(receiver, | 1311 __ ld(receiver, |
1312 FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset)); | 1312 FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset)); |
1313 } | 1313 } |
1314 __ push(receiver); | 1314 __ push(receiver); |
1315 ParameterCount actual(0); | 1315 ParameterCount actual(0); |
1316 ParameterCount expected(getter); | 1316 ParameterCount expected(getter); |
1317 __ InvokeFunction(getter, expected, actual, | 1317 __ InvokeFunction(getter, expected, actual, |
1318 CALL_FUNCTION, NullCallWrapper()); | 1318 CALL_FUNCTION, NullCallWrapper()); |
1319 } else { | 1319 } else { |
1320 // If we generate a global code snippet for deoptimization only, remember | 1320 // If we generate a global code snippet for deoptimization only, remember |
1321 // the place to continue after deoptimization. | 1321 // the place to continue after deoptimization. |
1322 masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset()); | 1322 masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset()); |
1323 } | 1323 } |
1324 | 1324 |
1325 // Restore context register. | 1325 // Restore context register. |
1326 __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); | 1326 __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
1327 } | 1327 } |
1328 __ Ret(); | 1328 __ Ret(); |
1329 } | 1329 } |
1330 | 1330 |
1331 | 1331 |
1332 #undef __ | 1332 #undef __ |
1333 #define __ ACCESS_MASM(masm()) | 1333 #define __ ACCESS_MASM(masm()) |
1334 | 1334 |
1335 | 1335 |
1336 Handle<Code> LoadStubCompiler::CompileLoadGlobal( | 1336 Handle<Code> LoadStubCompiler::CompileLoadGlobal( |
1337 Handle<HeapType> type, | 1337 Handle<HeapType> type, |
1338 Handle<GlobalObject> global, | 1338 Handle<GlobalObject> global, |
1339 Handle<PropertyCell> cell, | 1339 Handle<PropertyCell> cell, |
1340 Handle<Name> name, | 1340 Handle<Name> name, |
1341 bool is_dont_delete) { | 1341 bool is_dont_delete) { |
1342 Label miss; | 1342 Label miss; |
1343 | 1343 |
1344 HandlerFrontendHeader(type, receiver(), global, name, &miss); | 1344 HandlerFrontendHeader(type, receiver(), global, name, &miss); |
1345 | 1345 |
1346 // Get the value from the cell. | 1346 // Get the value from the cell. |
1347 __ li(a3, Operand(cell)); | 1347 __ li(a3, Operand(cell)); |
1348 __ lw(t0, FieldMemOperand(a3, Cell::kValueOffset)); | 1348 __ ld(a4, FieldMemOperand(a3, Cell::kValueOffset)); |
1349 | 1349 |
1350 // Check for deleted property if property can actually be deleted. | 1350 // Check for deleted property if property can actually be deleted. |
1351 if (!is_dont_delete) { | 1351 if (!is_dont_delete) { |
1352 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); | 1352 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); |
1353 __ Branch(&miss, eq, t0, Operand(at)); | 1353 __ Branch(&miss, eq, a4, Operand(at)); |
1354 } | 1354 } |
1355 | 1355 |
1356 Counters* counters = isolate()->counters(); | 1356 Counters* counters = isolate()->counters(); |
1357 __ IncrementCounter(counters->named_load_global_stub(), 1, a1, a3); | 1357 __ IncrementCounter(counters->named_load_global_stub(), 1, a1, a3); |
1358 __ Ret(USE_DELAY_SLOT); | 1358 __ Ret(USE_DELAY_SLOT); |
1359 __ mov(v0, t0); | 1359 __ mov(v0, a4); |
1360 | 1360 |
1361 HandlerFrontendFooter(name, &miss); | 1361 HandlerFrontendFooter(name, &miss); |
1362 | 1362 |
1363 // Return the generated code. | 1363 // Return the generated code. |
1364 return GetCode(kind(), Code::NORMAL, name); | 1364 return GetCode(kind(), Code::NORMAL, name); |
1365 } | 1365 } |
1366 | 1366 |
1367 | 1367 |
1368 Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC( | 1368 Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC( |
1369 TypeHandleList* types, | 1369 TypeHandleList* types, |
(...skipping 10 matching lines...) Expand all Loading... |
1380 | 1380 |
1381 Label number_case; | 1381 Label number_case; |
1382 Register match = scratch1(); | 1382 Register match = scratch1(); |
1383 Label* smi_target = IncludesNumberType(types) ? &number_case : &miss; | 1383 Label* smi_target = IncludesNumberType(types) ? &number_case : &miss; |
1384 __ JumpIfSmi(receiver(), smi_target, match); // Reg match is 0 if Smi. | 1384 __ JumpIfSmi(receiver(), smi_target, match); // Reg match is 0 if Smi. |
1385 | 1385 |
1386 Register map_reg = scratch2(); | 1386 Register map_reg = scratch2(); |
1387 | 1387 |
1388 int receiver_count = types->length(); | 1388 int receiver_count = types->length(); |
1389 int number_of_handled_maps = 0; | 1389 int number_of_handled_maps = 0; |
1390 __ lw(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset)); | 1390 __ ld(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset)); |
1391 for (int current = 0; current < receiver_count; ++current) { | 1391 for (int current = 0; current < receiver_count; ++current) { |
1392 Handle<HeapType> type = types->at(current); | 1392 Handle<HeapType> type = types->at(current); |
1393 Handle<Map> map = IC::TypeToMap(*type, isolate()); | 1393 Handle<Map> map = IC::TypeToMap(*type, isolate()); |
1394 if (!map->is_deprecated()) { | 1394 if (!map->is_deprecated()) { |
1395 number_of_handled_maps++; | 1395 number_of_handled_maps++; |
1396 // Check map and tail call if there's a match. | 1396 // Check map and tail call if there's a match. |
1397 // Separate compare from branch, to provide path for above JumpIfSmi(). | 1397 // Separate compare from branch, to provide path for above JumpIfSmi(). |
1398 __ Subu(match, map_reg, Operand(map)); | 1398 __ Dsubu(match, map_reg, Operand(map)); |
1399 if (type->Is(HeapType::Number())) { | 1399 if (type->Is(HeapType::Number())) { |
1400 ASSERT(!number_case.is_unused()); | 1400 ASSERT(!number_case.is_unused()); |
1401 __ bind(&number_case); | 1401 __ bind(&number_case); |
1402 } | 1402 } |
1403 __ Jump(handlers->at(current), RelocInfo::CODE_TARGET, | 1403 __ Jump(handlers->at(current), RelocInfo::CODE_TARGET, |
1404 eq, match, Operand(zero_reg)); | 1404 eq, match, Operand(zero_reg)); |
1405 } | 1405 } |
1406 } | 1406 } |
1407 ASSERT(number_of_handled_maps != 0); | 1407 ASSERT(number_of_handled_maps != 0); |
1408 | 1408 |
(...skipping 19 matching lines...) Expand all Loading... |
1428 | 1428 |
1429 | 1429 |
1430 Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic( | 1430 Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic( |
1431 MapHandleList* receiver_maps, | 1431 MapHandleList* receiver_maps, |
1432 CodeHandleList* handler_stubs, | 1432 CodeHandleList* handler_stubs, |
1433 MapHandleList* transitioned_maps) { | 1433 MapHandleList* transitioned_maps) { |
1434 Label miss; | 1434 Label miss; |
1435 __ JumpIfSmi(receiver(), &miss); | 1435 __ JumpIfSmi(receiver(), &miss); |
1436 | 1436 |
1437 int receiver_count = receiver_maps->length(); | 1437 int receiver_count = receiver_maps->length(); |
1438 __ lw(scratch1(), FieldMemOperand(receiver(), HeapObject::kMapOffset)); | 1438 __ ld(scratch1(), FieldMemOperand(receiver(), HeapObject::kMapOffset)); |
1439 for (int i = 0; i < receiver_count; ++i) { | 1439 for (int i = 0; i < receiver_count; ++i) { |
1440 if (transitioned_maps->at(i).is_null()) { | 1440 if (transitioned_maps->at(i).is_null()) { |
1441 __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq, | 1441 __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq, |
1442 scratch1(), Operand(receiver_maps->at(i))); | 1442 scratch1(), Operand(receiver_maps->at(i))); |
1443 } else { | 1443 } else { |
1444 Label next_map; | 1444 Label next_map; |
1445 __ Branch(&next_map, ne, scratch1(), Operand(receiver_maps->at(i))); | 1445 __ Branch(&next_map, ne, scratch1(), Operand(receiver_maps->at(i))); |
1446 __ li(transition_map(), Operand(transitioned_maps->at(i))); | 1446 __ li(transition_map(), Operand(transitioned_maps->at(i))); |
1447 __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET); | 1447 __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET); |
1448 __ bind(&next_map); | 1448 __ bind(&next_map); |
1449 } | 1449 } |
1450 } | 1450 } |
1451 | 1451 |
1452 __ bind(&miss); | 1452 __ bind(&miss); |
1453 TailCallBuiltin(masm(), MissBuiltin(kind())); | 1453 TailCallBuiltin(masm(), MissBuiltin(kind())); |
1454 | 1454 |
1455 // Return the generated code. | 1455 // Return the generated code. |
1456 return GetICCode( | 1456 return GetICCode( |
1457 kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC); | 1457 kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC); |
1458 } | 1458 } |
1459 | 1459 |
1460 | 1460 |
1461 #undef __ | 1461 #undef __ |
1462 #define __ ACCESS_MASM(masm) | 1462 #define __ ACCESS_MASM(masm) |
1463 | 1463 |
1464 | 1464 |
1465 void KeyedLoadStubCompiler::GenerateLoadDictionaryElement( | 1465 void KeyedLoadStubCompiler::GenerateLoadDictionaryElement( |
1466 MacroAssembler* masm) { | 1466 MacroAssembler* masm) { |
1467 // The return address is in ra. | 1467 // The return address is in ra |
1468 Label slow, miss; | 1468 Label slow, miss; |
1469 | 1469 |
1470 Register key = LoadIC::NameRegister(); | 1470 Register key = LoadIC::NameRegister(); |
1471 Register receiver = LoadIC::ReceiverRegister(); | 1471 Register receiver = LoadIC::ReceiverRegister(); |
1472 ASSERT(receiver.is(a1)); | 1472 ASSERT(receiver.is(a1)); |
1473 ASSERT(key.is(a2)); | 1473 ASSERT(key.is(a2)); |
1474 | 1474 |
1475 __ UntagAndJumpIfNotSmi(t2, key, &miss); | 1475 __ UntagAndJumpIfNotSmi(a6, key, &miss); |
1476 __ lw(t0, FieldMemOperand(receiver, JSObject::kElementsOffset)); | 1476 __ ld(a4, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
1477 __ LoadFromNumberDictionary(&slow, t0, key, v0, t2, a3, t1); | 1477 ASSERT(kSmiTagSize + kSmiShiftSize == 32); |
| 1478 __ LoadFromNumberDictionary(&slow, a4, key, v0, a6, a3, a5); |
1478 __ Ret(); | 1479 __ Ret(); |
1479 | 1480 |
1480 // Slow case, key and receiver still unmodified. | 1481 // Slow case, key and receiver still unmodified. |
1481 __ bind(&slow); | 1482 __ bind(&slow); |
1482 __ IncrementCounter( | 1483 __ IncrementCounter( |
1483 masm->isolate()->counters()->keyed_load_external_array_slow(), | 1484 masm->isolate()->counters()->keyed_load_external_array_slow(), |
1484 1, a2, a3); | 1485 1, a2, a3); |
1485 | 1486 |
1486 TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow); | 1487 TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow); |
1487 | 1488 |
1488 // Miss case, call the runtime. | 1489 // Miss case, call the runtime. |
1489 __ bind(&miss); | 1490 __ bind(&miss); |
1490 | 1491 |
1491 TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Miss); | 1492 TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Miss); |
1492 } | 1493 } |
1493 | 1494 |
1494 | 1495 |
1495 #undef __ | 1496 #undef __ |
1496 | 1497 |
1497 } } // namespace v8::internal | 1498 } } // namespace v8::internal |
1498 | 1499 |
1499 #endif // V8_TARGET_ARCH_MIPS | 1500 #endif // V8_TARGET_ARCH_MIPS64 |
OLD | NEW |