Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(36)

Side by Side Diff: src/s390/codegen-s390.cc

Issue 2523473002: [cleanup] Drop handwritten KeyedStoreIC code (Closed)
Patch Set: rebased Created 4 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/s390/code-stubs-s390.cc ('k') | src/s390/macro-assembler-s390.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2015 the V8 project authors. All rights reserved. 1 // Copyright 2015 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/s390/codegen-s390.h" 5 #include "src/s390/codegen-s390.h"
6 6
7 #if V8_TARGET_ARCH_S390 7 #if V8_TARGET_ARCH_S390
8 8
9 #include <memory> 9 #include <memory>
10 10
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after
59 masm->LeaveFrame(StackFrame::INTERNAL); 59 masm->LeaveFrame(StackFrame::INTERNAL);
60 DCHECK(masm->has_frame()); 60 DCHECK(masm->has_frame());
61 masm->set_has_frame(false); 61 masm->set_has_frame(false);
62 } 62 }
63 63
64 // ------------------------------------------------------------------------- 64 // -------------------------------------------------------------------------
65 // Code generators 65 // Code generators
66 66
67 #define __ ACCESS_MASM(masm) 67 #define __ ACCESS_MASM(masm)
68 68
69 void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
70 MacroAssembler* masm, Register receiver, Register key, Register value,
71 Register target_map, AllocationSiteMode mode,
72 Label* allocation_memento_found) {
73 Register scratch_elements = r6;
74 DCHECK(!AreAliased(receiver, key, value, target_map, scratch_elements));
75
76 if (mode == TRACK_ALLOCATION_SITE) {
77 DCHECK(allocation_memento_found != NULL);
78 __ JumpIfJSArrayHasAllocationMemento(receiver, scratch_elements, r1,
79 allocation_memento_found);
80 }
81
82 // Set transitioned map.
83 __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
84 __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, r1,
85 kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
86 OMIT_SMI_CHECK);
87 }
88
89 void ElementsTransitionGenerator::GenerateSmiToDouble(
90 MacroAssembler* masm, Register receiver, Register key, Register value,
91 Register target_map, AllocationSiteMode mode, Label* fail) {
92 // lr contains the return address
93 Label loop, entry, convert_hole, gc_required, only_change_map, done;
94 Register elements = r6;
95 Register length = r7;
96 Register array = r8;
97 Register array_end = array;
98
99 // target_map parameter can be clobbered.
100 Register scratch1 = target_map;
101 Register scratch2 = r1;
102
103 // Verify input registers don't conflict with locals.
104 DCHECK(!AreAliased(receiver, key, value, target_map, elements, length, array,
105 scratch2));
106
107 if (mode == TRACK_ALLOCATION_SITE) {
108 __ JumpIfJSArrayHasAllocationMemento(receiver, elements, scratch2, fail);
109 }
110
111 // Check for empty arrays, which only require a map transition and no changes
112 // to the backing store.
113 __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
114 __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
115 __ beq(&only_change_map, Label::kNear);
116
117 // Preserve lr and use r14 as a temporary register.
118 __ push(r14);
119
120 __ LoadP(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
121 // length: number of elements (smi-tagged)
122
123 // Allocate new FixedDoubleArray.
124 __ SmiToDoubleArrayOffset(r14, length);
125 __ AddP(r14, Operand(FixedDoubleArray::kHeaderSize));
126 __ Allocate(r14, array, r9, scratch2, &gc_required, DOUBLE_ALIGNMENT);
127 __ SubP(array, array, Operand(kHeapObjectTag));
128 // Set destination FixedDoubleArray's length and map.
129 __ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex);
130 __ StoreP(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
131 // Update receiver's map.
132 __ StoreP(scratch2, MemOperand(array, HeapObject::kMapOffset));
133
134 __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
135 __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch2,
136 kLRHasBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
137 OMIT_SMI_CHECK);
138 // Replace receiver's backing store with newly created FixedDoubleArray.
139 __ AddP(scratch1, array, Operand(kHeapObjectTag));
140 __ StoreP(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
141 __ RecordWriteField(receiver, JSObject::kElementsOffset, scratch1, scratch2,
142 kLRHasBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
143 OMIT_SMI_CHECK);
144
145 // Prepare for conversion loop.
146 __ AddP(target_map, elements,
147 Operand(FixedArray::kHeaderSize - kHeapObjectTag));
148 __ AddP(r9, array, Operand(FixedDoubleArray::kHeaderSize));
149 __ SmiToDoubleArrayOffset(array, length);
150 __ AddP(array_end, r9, array);
151 // Repurpose registers no longer in use.
152 #if V8_TARGET_ARCH_S390X
153 Register hole_int64 = elements;
154 #else
155 Register hole_lower = elements;
156 Register hole_upper = length;
157 #endif
158 // scratch1: begin of source FixedArray element fields, not tagged
159 // hole_lower: kHoleNanLower32 OR hol_int64
160 // hole_upper: kHoleNanUpper32
161 // array_end: end of destination FixedDoubleArray, not tagged
162 // scratch2: begin of FixedDoubleArray element fields, not tagged
163
164 __ b(&entry, Label::kNear);
165
166 __ bind(&only_change_map);
167 __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
168 __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch2,
169 kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
170 OMIT_SMI_CHECK);
171 __ b(&done, Label::kNear);
172
173 // Call into runtime if GC is required.
174 __ bind(&gc_required);
175 __ pop(r14);
176 __ b(fail);
177
178 // Convert and copy elements.
179 __ bind(&loop);
180 __ LoadP(r14, MemOperand(scratch1));
181 __ la(scratch1, MemOperand(scratch1, kPointerSize));
182 // r1: current element
183 __ UntagAndJumpIfNotSmi(r14, r14, &convert_hole);
184
185 // Normal smi, convert to double and store.
186 __ ConvertIntToDouble(r14, d0);
187 __ StoreDouble(d0, MemOperand(r9, 0));
188 __ la(r9, MemOperand(r9, 8));
189
190 __ b(&entry, Label::kNear);
191
192 // Hole found, store the-hole NaN.
193 __ bind(&convert_hole);
194 if (FLAG_debug_code) {
195 // Restore a "smi-untagged" heap object.
196 __ LoadP(r1, MemOperand(r5, -kPointerSize));
197 __ CompareRoot(r1, Heap::kTheHoleValueRootIndex);
198 __ Assert(eq, kObjectFoundInSmiOnlyArray);
199 }
200 #if V8_TARGET_ARCH_S390X
201 __ stg(hole_int64, MemOperand(r9, 0));
202 #else
203 __ StoreW(hole_upper, MemOperand(r9, Register::kExponentOffset));
204 __ StoreW(hole_lower, MemOperand(r9, Register::kMantissaOffset));
205 #endif
206 __ AddP(r9, Operand(8));
207
208 __ bind(&entry);
209 __ CmpP(r9, array_end);
210 __ blt(&loop);
211
212 __ pop(r14);
213 __ bind(&done);
214 }
215
216 void ElementsTransitionGenerator::GenerateDoubleToObject(
217 MacroAssembler* masm, Register receiver, Register key, Register value,
218 Register target_map, AllocationSiteMode mode, Label* fail) {
219 // Register lr contains the return address.
220 Label loop, convert_hole, gc_required, only_change_map;
221 Register elements = r6;
222 Register array = r8;
223 Register length = r7;
224 Register scratch = r1;
225 Register scratch3 = r9;
226 Register hole_value = r9;
227
228 // Verify input registers don't conflict with locals.
229 DCHECK(!AreAliased(receiver, key, value, target_map, elements, array, length,
230 scratch));
231
232 if (mode == TRACK_ALLOCATION_SITE) {
233 __ JumpIfJSArrayHasAllocationMemento(receiver, elements, scratch3, fail);
234 }
235
236 // Check for empty arrays, which only require a map transition and no changes
237 // to the backing store.
238 __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
239 __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
240 __ beq(&only_change_map);
241
242 __ Push(target_map, receiver, key, value);
243 __ LoadP(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
244 // elements: source FixedDoubleArray
245 // length: number of elements (smi-tagged)
246
247 // Allocate new FixedArray.
248 // Re-use value and target_map registers, as they have been saved on the
249 // stack.
250 Register array_size = value;
251 Register allocate_scratch = target_map;
252 __ LoadImmP(array_size, Operand(FixedDoubleArray::kHeaderSize));
253 __ SmiToPtrArrayOffset(r0, length);
254 __ AddP(array_size, r0);
255 __ Allocate(array_size, array, allocate_scratch, scratch, &gc_required,
256 NO_ALLOCATION_FLAGS);
257 // array: destination FixedArray, tagged as heap object
258 // Set destination FixedDoubleArray's length and map.
259 __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
260 __ StoreP(length, FieldMemOperand(array, FixedDoubleArray::kLengthOffset),
261 r0);
262 __ StoreP(scratch, FieldMemOperand(array, HeapObject::kMapOffset), r0);
263
264 // Prepare for conversion loop.
265 Register src_elements = elements;
266 Register dst_elements = target_map;
267 Register dst_end = length;
268 Register heap_number_map = scratch;
269 __ AddP(src_elements,
270 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
271 __ SmiToPtrArrayOffset(length, length);
272 __ LoadRoot(hole_value, Heap::kTheHoleValueRootIndex);
273
274 Label initialization_loop, loop_done;
275 __ ShiftRightP(scratch, length, Operand(kPointerSizeLog2));
276 __ beq(&loop_done, Label::kNear);
277
278 // Allocating heap numbers in the loop below can fail and cause a jump to
279 // gc_required. We can't leave a partly initialized FixedArray behind,
280 // so pessimistically fill it with holes now.
281 __ AddP(dst_elements, array,
282 Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
283 __ bind(&initialization_loop);
284 __ StoreP(hole_value, MemOperand(dst_elements, kPointerSize));
285 __ lay(dst_elements, MemOperand(dst_elements, kPointerSize));
286 __ BranchOnCount(scratch, &initialization_loop);
287
288 __ AddP(dst_elements, array,
289 Operand(FixedArray::kHeaderSize - kHeapObjectTag));
290 __ AddP(dst_end, dst_elements, length);
291 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
292 // Using offsetted addresses in src_elements to fully take advantage of
293 // post-indexing.
294 // dst_elements: begin of destination FixedArray element fields, not tagged
295 // src_elements: begin of source FixedDoubleArray element fields,
296 // not tagged, +4
297 // dst_end: end of destination FixedArray, not tagged
298 // array: destination FixedArray
299 // hole_value: the-hole pointer
300 // heap_number_map: heap number map
301 __ b(&loop, Label::kNear);
302
303 // Call into runtime if GC is required.
304 __ bind(&gc_required);
305 __ Pop(target_map, receiver, key, value);
306 __ b(fail);
307
308 // Replace the-hole NaN with the-hole pointer.
309 __ bind(&convert_hole);
310 __ StoreP(hole_value, MemOperand(dst_elements));
311 __ AddP(dst_elements, Operand(kPointerSize));
312 __ CmpLogicalP(dst_elements, dst_end);
313 __ bge(&loop_done);
314
315 __ bind(&loop);
316 Register upper_bits = key;
317 __ LoadlW(upper_bits, MemOperand(src_elements, Register::kExponentOffset));
318 __ AddP(src_elements, Operand(kDoubleSize));
319 // upper_bits: current element's upper 32 bit
320 // src_elements: address of next element's upper 32 bit
321 __ Cmp32(upper_bits, Operand(kHoleNanUpper32));
322 __ beq(&convert_hole, Label::kNear);
323
324 // Non-hole double, copy value into a heap number.
325 Register heap_number = receiver;
326 Register scratch2 = value;
327 __ AllocateHeapNumber(heap_number, scratch2, scratch3, heap_number_map,
328 &gc_required);
329 // heap_number: new heap number
330 #if V8_TARGET_ARCH_S390X
331 __ lg(scratch2, MemOperand(src_elements, -kDoubleSize));
332 // subtract tag for std
333 __ AddP(upper_bits, heap_number, Operand(-kHeapObjectTag));
334 __ stg(scratch2, MemOperand(upper_bits, HeapNumber::kValueOffset));
335 #else
336 __ LoadlW(scratch2,
337 MemOperand(src_elements, Register::kMantissaOffset - kDoubleSize));
338 __ LoadlW(upper_bits,
339 MemOperand(src_elements, Register::kExponentOffset - kDoubleSize));
340 __ StoreW(scratch2,
341 FieldMemOperand(heap_number, HeapNumber::kMantissaOffset));
342 __ StoreW(upper_bits,
343 FieldMemOperand(heap_number, HeapNumber::kExponentOffset));
344 #endif
345 __ LoadRR(scratch2, dst_elements);
346 __ StoreP(heap_number, MemOperand(dst_elements));
347 __ AddP(dst_elements, Operand(kPointerSize));
348 __ RecordWrite(array, scratch2, heap_number, kLRHasNotBeenSaved,
349 kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
350 __ CmpLogicalP(dst_elements, dst_end);
351 __ blt(&loop);
352 __ bind(&loop_done);
353
354 __ Pop(target_map, receiver, key, value);
355 // Replace receiver's backing store with newly created and filled FixedArray.
356 __ StoreP(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
357 __ RecordWriteField(receiver, JSObject::kElementsOffset, array, scratch,
358 kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
359 OMIT_SMI_CHECK);
360
361 __ bind(&only_change_map);
362 // Update receiver's map.
363 __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
364 __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch,
365 kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
366 OMIT_SMI_CHECK);
367 }
368
369 // assume ip can be used as a scratch register below 69 // assume ip can be used as a scratch register below
370 void StringCharLoadGenerator::Generate(MacroAssembler* masm, Register string, 70 void StringCharLoadGenerator::Generate(MacroAssembler* masm, Register string,
371 Register index, Register result, 71 Register index, Register result,
372 Label* call_runtime) { 72 Label* call_runtime) {
373 // Fetch the instance type of the receiver into result register. 73 // Fetch the instance type of the receiver into result register.
374 __ LoadP(result, FieldMemOperand(string, HeapObject::kMapOffset)); 74 __ LoadP(result, FieldMemOperand(string, HeapObject::kMapOffset));
375 __ LoadlB(result, FieldMemOperand(result, Map::kInstanceTypeOffset)); 75 __ LoadlB(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
376 76
377 // We need special handling for indirect strings. 77 // We need special handling for indirect strings.
378 Label check_sequential; 78 Label check_sequential;
(...skipping 150 matching lines...) Expand 10 before | Expand all | Expand 10 after
529 // (kNoCodeAgeSequenceLength - kCodeAgingSequenceLength) bytes. 229 // (kNoCodeAgeSequenceLength - kCodeAgingSequenceLength) bytes.
530 patcher.masm()->nop(); // 2-byte nops(). 230 patcher.masm()->nop(); // 2-byte nops().
531 } 231 }
532 } 232 }
533 } 233 }
534 234
535 } // namespace internal 235 } // namespace internal
536 } // namespace v8 236 } // namespace v8
537 237
538 #endif // V8_TARGET_ARCH_S390 238 #endif // V8_TARGET_ARCH_S390
OLDNEW
« no previous file with comments | « src/s390/code-stubs-s390.cc ('k') | src/s390/macro-assembler-s390.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698