OLD | NEW |
1 // Copyright 2014 the V8 project authors. All rights reserved. | 1 // Copyright 2014 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include <assert.h> // For assert | 5 #include <assert.h> // For assert |
6 #include <limits.h> // For LONG_MIN, LONG_MAX. | 6 #include <limits.h> // For LONG_MIN, LONG_MAX. |
7 | 7 |
8 #if V8_TARGET_ARCH_PPC | 8 #if V8_TARGET_ARCH_S390 |
9 | 9 |
10 #include "src/base/bits.h" | 10 #include "src/base/bits.h" |
11 #include "src/base/division-by-constant.h" | 11 #include "src/base/division-by-constant.h" |
12 #include "src/bootstrapper.h" | 12 #include "src/bootstrapper.h" |
13 #include "src/codegen.h" | 13 #include "src/codegen.h" |
14 #include "src/debug/debug.h" | 14 #include "src/debug/debug.h" |
15 #include "src/register-configuration.h" | 15 #include "src/register-configuration.h" |
16 #include "src/runtime/runtime.h" | 16 #include "src/runtime/runtime.h" |
17 | 17 |
18 #include "src/ppc/macro-assembler-ppc.h" | 18 #include "src/s390/macro-assembler-s390.h" |
19 | 19 |
20 namespace v8 { | 20 namespace v8 { |
21 namespace internal { | 21 namespace internal { |
22 | 22 |
23 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size, | 23 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size, |
24 CodeObjectRequired create_code_object) | 24 CodeObjectRequired create_code_object) |
25 : Assembler(arg_isolate, buffer, size), | 25 : Assembler(arg_isolate, buffer, size), |
26 generating_stub_(false), | 26 generating_stub_(false), |
27 has_frame_(false) { | 27 has_frame_(false) { |
28 if (create_code_object == CodeObjectRequired::kYes) { | 28 if (create_code_object == CodeObjectRequired::kYes) { |
29 code_object_ = | 29 code_object_ = |
30 Handle<Object>::New(isolate()->heap()->undefined_value(), isolate()); | 30 Handle<Object>::New(isolate()->heap()->undefined_value(), isolate()); |
31 } | 31 } |
32 } | 32 } |
33 | 33 |
34 | 34 void MacroAssembler::Jump(Register target) { b(target); } |
35 void MacroAssembler::Jump(Register target) { | |
36 mtctr(target); | |
37 bctr(); | |
38 } | |
39 | |
40 | 35 |
41 void MacroAssembler::JumpToJSEntry(Register target) { | 36 void MacroAssembler::JumpToJSEntry(Register target) { |
42 Move(ip, target); | 37 Move(ip, target); |
43 Jump(ip); | 38 Jump(ip); |
44 } | 39 } |
45 | 40 |
46 | |
47 void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, | 41 void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, |
48 Condition cond, CRegister cr) { | 42 Condition cond, CRegister) { |
49 Label skip; | 43 Label skip; |
50 | 44 |
51 if (cond != al) b(NegateCondition(cond), &skip, cr); | 45 if (cond != al) b(NegateCondition(cond), &skip); |
52 | 46 |
53 DCHECK(rmode == RelocInfo::CODE_TARGET || rmode == RelocInfo::RUNTIME_ENTRY); | 47 DCHECK(rmode == RelocInfo::CODE_TARGET || rmode == RelocInfo::RUNTIME_ENTRY); |
54 | 48 |
55 mov(ip, Operand(target, rmode)); | 49 mov(ip, Operand(target, rmode)); |
56 mtctr(ip); | 50 b(ip); |
57 bctr(); | |
58 | 51 |
59 bind(&skip); | 52 bind(&skip); |
60 } | 53 } |
61 | 54 |
62 | |
63 void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond, | 55 void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond, |
64 CRegister cr) { | 56 CRegister cr) { |
65 DCHECK(!RelocInfo::IsCodeTarget(rmode)); | 57 DCHECK(!RelocInfo::IsCodeTarget(rmode)); |
66 Jump(reinterpret_cast<intptr_t>(target), rmode, cond, cr); | 58 Jump(reinterpret_cast<intptr_t>(target), rmode, cond, cr); |
67 } | 59 } |
68 | 60 |
69 | |
70 void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode, | 61 void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode, |
71 Condition cond) { | 62 Condition cond) { |
72 DCHECK(RelocInfo::IsCodeTarget(rmode)); | 63 DCHECK(RelocInfo::IsCodeTarget(rmode)); |
73 // 'code' is always generated ppc code, never THUMB code | 64 jump(code, rmode, cond); |
74 AllowDeferredHandleDereference embedding_raw_address; | |
75 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond); | |
76 } | 65 } |
77 | 66 |
78 | 67 int MacroAssembler::CallSize(Register target) { return 2; } // BASR |
79 int MacroAssembler::CallSize(Register target) { return 2 * kInstrSize; } | |
80 | |
81 | 68 |
82 void MacroAssembler::Call(Register target) { | 69 void MacroAssembler::Call(Register target) { |
83 BlockTrampolinePoolScope block_trampoline_pool(this); | |
84 Label start; | 70 Label start; |
85 bind(&start); | 71 bind(&start); |
86 | 72 |
87 // Statement positions are expected to be recorded when the target | 73 // Statement positions are expected to be recorded when the target |
88 // address is loaded. | 74 // address is loaded. |
89 positions_recorder()->WriteRecordedPositions(); | 75 positions_recorder()->WriteRecordedPositions(); |
90 | 76 |
91 // branch via link register and set LK bit for return point | 77 // Branch to target via indirect branch |
92 mtctr(target); | 78 basr(r14, target); |
93 bctrl(); | |
94 | 79 |
95 DCHECK_EQ(CallSize(target), SizeOfCodeGeneratedSince(&start)); | 80 DCHECK_EQ(CallSize(target), SizeOfCodeGeneratedSince(&start)); |
96 } | 81 } |
97 | 82 |
98 | |
99 void MacroAssembler::CallJSEntry(Register target) { | 83 void MacroAssembler::CallJSEntry(Register target) { |
100 DCHECK(target.is(ip)); | 84 DCHECK(target.is(ip)); |
101 Call(target); | 85 Call(target); |
102 } | 86 } |
103 | 87 |
104 | |
105 int MacroAssembler::CallSize(Address target, RelocInfo::Mode rmode, | 88 int MacroAssembler::CallSize(Address target, RelocInfo::Mode rmode, |
106 Condition cond) { | 89 Condition cond) { |
107 Operand mov_operand = Operand(reinterpret_cast<intptr_t>(target), rmode); | 90 // S390 Assembler::move sequence is IILF / IIHF |
108 return (2 + instructions_required_for_mov(ip, mov_operand)) * kInstrSize; | 91 int size; |
| 92 #if V8_TARGET_ARCH_S390X |
| 93 size = 14; // IILF + IIHF + BASR |
| 94 #else |
| 95 size = 8; // IILF + BASR |
| 96 #endif |
| 97 return size; |
109 } | 98 } |
110 | 99 |
111 | |
112 int MacroAssembler::CallSizeNotPredictableCodeSize(Address target, | 100 int MacroAssembler::CallSizeNotPredictableCodeSize(Address target, |
113 RelocInfo::Mode rmode, | 101 RelocInfo::Mode rmode, |
114 Condition cond) { | 102 Condition cond) { |
115 return (2 + kMovInstructionsNoConstantPool) * kInstrSize; | 103 // S390 Assembler::move sequence is IILF / IIHF |
| 104 int size; |
| 105 #if V8_TARGET_ARCH_S390X |
| 106 size = 14; // IILF + IIHF + BASR |
| 107 #else |
| 108 size = 8; // IILF + BASR |
| 109 #endif |
| 110 return size; |
116 } | 111 } |
117 | 112 |
118 | |
119 void MacroAssembler::Call(Address target, RelocInfo::Mode rmode, | 113 void MacroAssembler::Call(Address target, RelocInfo::Mode rmode, |
120 Condition cond) { | 114 Condition cond) { |
121 BlockTrampolinePoolScope block_trampoline_pool(this); | |
122 DCHECK(cond == al); | 115 DCHECK(cond == al); |
123 | 116 |
124 #ifdef DEBUG | 117 #ifdef DEBUG |
125 // Check the expected size before generating code to ensure we assume the same | 118 // Check the expected size before generating code to ensure we assume the same |
126 // constant pool availability (e.g., whether constant pool is full or not). | 119 // constant pool availability (e.g., whether constant pool is full or not). |
127 int expected_size = CallSize(target, rmode, cond); | 120 int expected_size = CallSize(target, rmode, cond); |
128 Label start; | 121 Label start; |
129 bind(&start); | 122 bind(&start); |
130 #endif | 123 #endif |
131 | 124 |
132 // Statement positions are expected to be recorded when the target | 125 // Statement positions are expected to be recorded when the target |
133 // address is loaded. | 126 // address is loaded. |
134 positions_recorder()->WriteRecordedPositions(); | 127 positions_recorder()->WriteRecordedPositions(); |
135 | 128 |
136 // This can likely be optimized to make use of bc() with 24bit relative | |
137 // | |
138 // RecordRelocInfo(x.rmode_, x.imm_); | |
139 // bc( BA, .... offset, LKset); | |
140 // | |
141 | |
142 mov(ip, Operand(reinterpret_cast<intptr_t>(target), rmode)); | 129 mov(ip, Operand(reinterpret_cast<intptr_t>(target), rmode)); |
143 mtctr(ip); | 130 basr(r14, ip); |
144 bctrl(); | |
145 | 131 |
146 DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start)); | 132 DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start)); |
147 } | 133 } |
148 | 134 |
149 | |
150 int MacroAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode, | 135 int MacroAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode, |
151 TypeFeedbackId ast_id, Condition cond) { | 136 TypeFeedbackId ast_id, Condition cond) { |
152 AllowDeferredHandleDereference using_raw_address; | 137 return 6; // BRASL |
153 return CallSize(reinterpret_cast<Address>(code.location()), rmode, cond); | |
154 } | 138 } |
155 | 139 |
156 | |
157 void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode, | 140 void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode, |
158 TypeFeedbackId ast_id, Condition cond) { | 141 TypeFeedbackId ast_id, Condition cond) { |
159 BlockTrampolinePoolScope block_trampoline_pool(this); | 142 DCHECK(RelocInfo::IsCodeTarget(rmode) && cond == al); |
160 DCHECK(RelocInfo::IsCodeTarget(rmode)); | |
161 | 143 |
162 #ifdef DEBUG | 144 #ifdef DEBUG |
163 // Check the expected size before generating code to ensure we assume the same | 145 // Check the expected size before generating code to ensure we assume the same |
164 // constant pool availability (e.g., whether constant pool is full or not). | 146 // constant pool availability (e.g., whether constant pool is full or not). |
165 int expected_size = CallSize(code, rmode, ast_id, cond); | 147 int expected_size = CallSize(code, rmode, ast_id, cond); |
166 Label start; | 148 Label start; |
167 bind(&start); | 149 bind(&start); |
168 #endif | 150 #endif |
169 | 151 call(code, rmode, ast_id); |
170 if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) { | |
171 SetRecordedAstId(ast_id); | |
172 rmode = RelocInfo::CODE_TARGET_WITH_ID; | |
173 } | |
174 AllowDeferredHandleDereference using_raw_address; | |
175 Call(reinterpret_cast<Address>(code.location()), rmode, cond); | |
176 DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start)); | 152 DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start)); |
177 } | 153 } |
178 | 154 |
179 | |
180 void MacroAssembler::Drop(int count) { | 155 void MacroAssembler::Drop(int count) { |
181 if (count > 0) { | 156 if (count > 0) { |
182 Add(sp, sp, count * kPointerSize, r0); | 157 la(sp, MemOperand(sp, count * kPointerSize)); |
183 } | 158 } |
184 } | 159 } |
185 | 160 |
186 void MacroAssembler::Drop(Register count, Register scratch) { | 161 void MacroAssembler::Drop(Register count, Register scratch) { |
187 ShiftLeftImm(scratch, count, Operand(kPointerSizeLog2)); | 162 ShiftLeftP(scratch, count, Operand(kPointerSizeLog2)); |
188 add(sp, sp, scratch); | 163 AddP(sp, sp, scratch); |
189 } | 164 } |
190 | 165 |
191 void MacroAssembler::Call(Label* target) { b(target, SetLK); } | 166 void MacroAssembler::Call(Label* target) { b(r14, target); } |
192 | |
193 | 167 |
194 void MacroAssembler::Push(Handle<Object> handle) { | 168 void MacroAssembler::Push(Handle<Object> handle) { |
195 mov(r0, Operand(handle)); | 169 mov(r0, Operand(handle)); |
196 push(r0); | 170 push(r0); |
197 } | 171 } |
198 | 172 |
199 | |
200 void MacroAssembler::Move(Register dst, Handle<Object> value) { | 173 void MacroAssembler::Move(Register dst, Handle<Object> value) { |
201 AllowDeferredHandleDereference smi_check; | 174 AllowDeferredHandleDereference smi_check; |
202 if (value->IsSmi()) { | 175 if (value->IsSmi()) { |
203 LoadSmiLiteral(dst, reinterpret_cast<Smi*>(*value)); | 176 LoadSmiLiteral(dst, reinterpret_cast<Smi*>(*value)); |
204 } else { | 177 } else { |
205 DCHECK(value->IsHeapObject()); | 178 DCHECK(value->IsHeapObject()); |
206 if (isolate()->heap()->InNewSpace(*value)) { | 179 if (isolate()->heap()->InNewSpace(*value)) { |
207 Handle<Cell> cell = isolate()->factory()->NewCell(value); | 180 Handle<Cell> cell = isolate()->factory()->NewCell(value); |
208 mov(dst, Operand(cell)); | 181 mov(dst, Operand(cell)); |
209 LoadP(dst, FieldMemOperand(dst, Cell::kValueOffset)); | 182 LoadP(dst, FieldMemOperand(dst, Cell::kValueOffset)); |
210 } else { | 183 } else { |
211 mov(dst, Operand(value)); | 184 mov(dst, Operand(value)); |
212 } | 185 } |
213 } | 186 } |
214 } | 187 } |
215 | 188 |
216 | |
217 void MacroAssembler::Move(Register dst, Register src, Condition cond) { | 189 void MacroAssembler::Move(Register dst, Register src, Condition cond) { |
218 DCHECK(cond == al); | |
219 if (!dst.is(src)) { | 190 if (!dst.is(src)) { |
220 mr(dst, src); | 191 LoadRR(dst, src); |
221 } | 192 } |
222 } | 193 } |
223 | 194 |
224 | |
225 void MacroAssembler::Move(DoubleRegister dst, DoubleRegister src) { | 195 void MacroAssembler::Move(DoubleRegister dst, DoubleRegister src) { |
226 if (!dst.is(src)) { | 196 if (!dst.is(src)) { |
227 fmr(dst, src); | 197 ldr(dst, src); |
228 } | 198 } |
229 } | 199 } |
230 | 200 |
| 201 void MacroAssembler::InsertDoubleLow(DoubleRegister dst, Register src) { |
| 202 StoreDouble(dst, MemOperand(sp, -kDoubleSize)); |
| 203 #if V8_TARGET_LITTLE_ENDIAN |
| 204 StoreW(src, MemOperand(sp, -kDoubleSize)); |
| 205 #else |
| 206 StoreW(src, MemOperand(sp, -kDoubleSize / 2)); |
| 207 #endif |
| 208 ldy(dst, MemOperand(sp, -kDoubleSize)); |
| 209 } |
| 210 |
| 211 void MacroAssembler::InsertDoubleHigh(DoubleRegister dst, Register src) { |
| 212 StoreDouble(dst, MemOperand(sp, -kDoubleSize)); |
| 213 #if V8_TARGET_LITTLE_ENDIAN |
| 214 StoreW(src, MemOperand(sp, -kDoubleSize / 2)); |
| 215 #else |
| 216 StoreW(src, MemOperand(sp, -kDoubleSize)); |
| 217 #endif |
| 218 ldy(dst, MemOperand(sp, -kDoubleSize)); |
| 219 } |
231 | 220 |
232 void MacroAssembler::MultiPush(RegList regs, Register location) { | 221 void MacroAssembler::MultiPush(RegList regs, Register location) { |
233 int16_t num_to_push = NumberOfBitsSet(regs); | 222 int16_t num_to_push = NumberOfBitsSet(regs); |
234 int16_t stack_offset = num_to_push * kPointerSize; | 223 int16_t stack_offset = num_to_push * kPointerSize; |
235 | 224 |
236 subi(location, location, Operand(stack_offset)); | 225 SubP(location, location, Operand(stack_offset)); |
237 for (int16_t i = Register::kNumRegisters - 1; i >= 0; i--) { | 226 for (int16_t i = Register::kNumRegisters - 1; i >= 0; i--) { |
238 if ((regs & (1 << i)) != 0) { | 227 if ((regs & (1 << i)) != 0) { |
239 stack_offset -= kPointerSize; | 228 stack_offset -= kPointerSize; |
240 StoreP(ToRegister(i), MemOperand(location, stack_offset)); | 229 StoreP(ToRegister(i), MemOperand(location, stack_offset)); |
241 } | 230 } |
242 } | 231 } |
243 } | 232 } |
244 | 233 |
245 | |
246 void MacroAssembler::MultiPop(RegList regs, Register location) { | 234 void MacroAssembler::MultiPop(RegList regs, Register location) { |
247 int16_t stack_offset = 0; | 235 int16_t stack_offset = 0; |
248 | 236 |
249 for (int16_t i = 0; i < Register::kNumRegisters; i++) { | 237 for (int16_t i = 0; i < Register::kNumRegisters; i++) { |
250 if ((regs & (1 << i)) != 0) { | 238 if ((regs & (1 << i)) != 0) { |
251 LoadP(ToRegister(i), MemOperand(location, stack_offset)); | 239 LoadP(ToRegister(i), MemOperand(location, stack_offset)); |
252 stack_offset += kPointerSize; | 240 stack_offset += kPointerSize; |
253 } | 241 } |
254 } | 242 } |
255 addi(location, location, Operand(stack_offset)); | 243 AddP(location, location, Operand(stack_offset)); |
256 } | 244 } |
257 | 245 |
258 | |
259 void MacroAssembler::MultiPushDoubles(RegList dregs, Register location) { | 246 void MacroAssembler::MultiPushDoubles(RegList dregs, Register location) { |
260 int16_t num_to_push = NumberOfBitsSet(dregs); | 247 int16_t num_to_push = NumberOfBitsSet(dregs); |
261 int16_t stack_offset = num_to_push * kDoubleSize; | 248 int16_t stack_offset = num_to_push * kDoubleSize; |
262 | 249 |
263 subi(location, location, Operand(stack_offset)); | 250 SubP(location, location, Operand(stack_offset)); |
264 for (int16_t i = DoubleRegister::kNumRegisters - 1; i >= 0; i--) { | 251 for (int16_t i = DoubleRegister::kNumRegisters - 1; i >= 0; i--) { |
265 if ((dregs & (1 << i)) != 0) { | 252 if ((dregs & (1 << i)) != 0) { |
266 DoubleRegister dreg = DoubleRegister::from_code(i); | 253 DoubleRegister dreg = DoubleRegister::from_code(i); |
267 stack_offset -= kDoubleSize; | 254 stack_offset -= kDoubleSize; |
268 stfd(dreg, MemOperand(location, stack_offset)); | 255 StoreDouble(dreg, MemOperand(location, stack_offset)); |
269 } | 256 } |
270 } | 257 } |
271 } | 258 } |
272 | 259 |
273 | |
274 void MacroAssembler::MultiPopDoubles(RegList dregs, Register location) { | 260 void MacroAssembler::MultiPopDoubles(RegList dregs, Register location) { |
275 int16_t stack_offset = 0; | 261 int16_t stack_offset = 0; |
276 | 262 |
277 for (int16_t i = 0; i < DoubleRegister::kNumRegisters; i++) { | 263 for (int16_t i = 0; i < DoubleRegister::kNumRegisters; i++) { |
278 if ((dregs & (1 << i)) != 0) { | 264 if ((dregs & (1 << i)) != 0) { |
279 DoubleRegister dreg = DoubleRegister::from_code(i); | 265 DoubleRegister dreg = DoubleRegister::from_code(i); |
280 lfd(dreg, MemOperand(location, stack_offset)); | 266 LoadDouble(dreg, MemOperand(location, stack_offset)); |
281 stack_offset += kDoubleSize; | 267 stack_offset += kDoubleSize; |
282 } | 268 } |
283 } | 269 } |
284 addi(location, location, Operand(stack_offset)); | 270 AddP(location, location, Operand(stack_offset)); |
285 } | 271 } |
286 | 272 |
287 | |
288 void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index, | 273 void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index, |
289 Condition cond) { | 274 Condition) { |
290 DCHECK(cond == al); | |
291 LoadP(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), r0); | 275 LoadP(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), r0); |
292 } | 276 } |
293 | 277 |
294 | |
295 void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index, | 278 void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index, |
296 Condition cond) { | 279 Condition) { |
297 DCHECK(Heap::RootCanBeWrittenAfterInitialization(index)); | 280 DCHECK(Heap::RootCanBeWrittenAfterInitialization(index)); |
298 DCHECK(cond == al); | 281 StoreP(source, MemOperand(kRootRegister, index << kPointerSizeLog2)); |
299 StoreP(source, MemOperand(kRootRegister, index << kPointerSizeLog2), r0); | |
300 } | 282 } |
301 | 283 |
302 | |
303 void MacroAssembler::InNewSpace(Register object, Register scratch, | 284 void MacroAssembler::InNewSpace(Register object, Register scratch, |
304 Condition cond, Label* branch) { | 285 Condition cond, Label* branch) { |
305 DCHECK(cond == eq || cond == ne); | 286 DCHECK(cond == eq || cond == ne); |
| 287 // TODO(joransiu): check if we can merge mov Operand into AndP. |
306 const int mask = | 288 const int mask = |
307 (1 << MemoryChunk::IN_FROM_SPACE) | (1 << MemoryChunk::IN_TO_SPACE); | 289 (1 << MemoryChunk::IN_FROM_SPACE) | (1 << MemoryChunk::IN_TO_SPACE); |
308 CheckPageFlag(object, scratch, mask, cond, branch); | 290 CheckPageFlag(object, scratch, mask, cond, branch); |
309 } | 291 } |
310 | 292 |
311 | |
312 void MacroAssembler::RecordWriteField( | 293 void MacroAssembler::RecordWriteField( |
313 Register object, int offset, Register value, Register dst, | 294 Register object, int offset, Register value, Register dst, |
314 LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, | 295 LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, |
315 RememberedSetAction remembered_set_action, SmiCheck smi_check, | 296 RememberedSetAction remembered_set_action, SmiCheck smi_check, |
316 PointersToHereCheck pointers_to_here_check_for_value) { | 297 PointersToHereCheck pointers_to_here_check_for_value) { |
317 // First, check if a write barrier is even needed. The tests below | 298 // First, check if a write barrier is even needed. The tests below |
318 // catch stores of Smis. | 299 // catch stores of Smis. |
319 Label done; | 300 Label done; |
320 | 301 |
321 // Skip barrier if writing a smi. | 302 // Skip barrier if writing a smi. |
322 if (smi_check == INLINE_SMI_CHECK) { | 303 if (smi_check == INLINE_SMI_CHECK) { |
323 JumpIfSmi(value, &done); | 304 JumpIfSmi(value, &done); |
324 } | 305 } |
325 | 306 |
326 // Although the object register is tagged, the offset is relative to the start | 307 // Although the object register is tagged, the offset is relative to the start |
327 // of the object, so so offset must be a multiple of kPointerSize. | 308 // of the object, so so offset must be a multiple of kPointerSize. |
328 DCHECK(IsAligned(offset, kPointerSize)); | 309 DCHECK(IsAligned(offset, kPointerSize)); |
329 | 310 |
330 Add(dst, object, offset - kHeapObjectTag, r0); | 311 lay(dst, MemOperand(object, offset - kHeapObjectTag)); |
331 if (emit_debug_code()) { | 312 if (emit_debug_code()) { |
332 Label ok; | 313 Label ok; |
333 andi(r0, dst, Operand((1 << kPointerSizeLog2) - 1)); | 314 AndP(r0, dst, Operand((1 << kPointerSizeLog2) - 1)); |
334 beq(&ok, cr0); | 315 beq(&ok, Label::kNear); |
335 stop("Unaligned cell in write barrier"); | 316 stop("Unaligned cell in write barrier"); |
336 bind(&ok); | 317 bind(&ok); |
337 } | 318 } |
338 | 319 |
339 RecordWrite(object, dst, value, lr_status, save_fp, remembered_set_action, | 320 RecordWrite(object, dst, value, lr_status, save_fp, remembered_set_action, |
340 OMIT_SMI_CHECK, pointers_to_here_check_for_value); | 321 OMIT_SMI_CHECK, pointers_to_here_check_for_value); |
341 | 322 |
342 bind(&done); | 323 bind(&done); |
343 | 324 |
344 // Clobber clobbered input registers when running with the debug-code flag | 325 // Clobber clobbered input registers when running with the debug-code flag |
345 // turned on to provoke errors. | 326 // turned on to provoke errors. |
346 if (emit_debug_code()) { | 327 if (emit_debug_code()) { |
347 mov(value, Operand(bit_cast<intptr_t>(kZapValue + 4))); | 328 mov(value, Operand(bit_cast<intptr_t>(kZapValue + 4))); |
348 mov(dst, Operand(bit_cast<intptr_t>(kZapValue + 8))); | 329 mov(dst, Operand(bit_cast<intptr_t>(kZapValue + 8))); |
349 } | 330 } |
350 } | 331 } |
351 | 332 |
352 | |
353 // Will clobber 4 registers: object, map, dst, ip. The | 333 // Will clobber 4 registers: object, map, dst, ip. The |
354 // register 'object' contains a heap object pointer. | 334 // register 'object' contains a heap object pointer. |
355 void MacroAssembler::RecordWriteForMap(Register object, Register map, | 335 void MacroAssembler::RecordWriteForMap(Register object, Register map, |
356 Register dst, | 336 Register dst, |
357 LinkRegisterStatus lr_status, | 337 LinkRegisterStatus lr_status, |
358 SaveFPRegsMode fp_mode) { | 338 SaveFPRegsMode fp_mode) { |
359 if (emit_debug_code()) { | 339 if (emit_debug_code()) { |
360 LoadP(dst, FieldMemOperand(map, HeapObject::kMapOffset)); | 340 LoadP(dst, FieldMemOperand(map, HeapObject::kMapOffset)); |
361 Cmpi(dst, Operand(isolate()->factory()->meta_map()), r0); | 341 CmpP(dst, Operand(isolate()->factory()->meta_map())); |
362 Check(eq, kWrongAddressOrValuePassedToRecordWrite); | 342 Check(eq, kWrongAddressOrValuePassedToRecordWrite); |
363 } | 343 } |
364 | 344 |
365 if (!FLAG_incremental_marking) { | 345 if (!FLAG_incremental_marking) { |
366 return; | 346 return; |
367 } | 347 } |
368 | 348 |
369 if (emit_debug_code()) { | 349 if (emit_debug_code()) { |
370 LoadP(ip, FieldMemOperand(object, HeapObject::kMapOffset)); | 350 CmpP(map, FieldMemOperand(object, HeapObject::kMapOffset)); |
371 cmp(ip, map); | |
372 Check(eq, kWrongAddressOrValuePassedToRecordWrite); | 351 Check(eq, kWrongAddressOrValuePassedToRecordWrite); |
373 } | 352 } |
374 | 353 |
375 Label done; | 354 Label done; |
376 | 355 |
377 // A single check of the map's pages interesting flag suffices, since it is | 356 // A single check of the map's pages interesting flag suffices, since it is |
378 // only set during incremental collection, and then it's also guaranteed that | 357 // only set during incremental collection, and then it's also guaranteed that |
379 // the from object's page's interesting flag is also set. This optimization | 358 // the from object's page's interesting flag is also set. This optimization |
380 // relies on the fact that maps can never be in new space. | 359 // relies on the fact that maps can never be in new space. |
381 CheckPageFlag(map, | 360 CheckPageFlag(map, |
382 map, // Used as scratch. | 361 map, // Used as scratch. |
383 MemoryChunk::kPointersToHereAreInterestingMask, eq, &done); | 362 MemoryChunk::kPointersToHereAreInterestingMask, eq, &done); |
384 | 363 |
385 addi(dst, object, Operand(HeapObject::kMapOffset - kHeapObjectTag)); | 364 lay(dst, MemOperand(object, HeapObject::kMapOffset - kHeapObjectTag)); |
386 if (emit_debug_code()) { | 365 if (emit_debug_code()) { |
387 Label ok; | 366 Label ok; |
388 andi(r0, dst, Operand((1 << kPointerSizeLog2) - 1)); | 367 AndP(r0, dst, Operand((1 << kPointerSizeLog2) - 1)); |
389 beq(&ok, cr0); | 368 beq(&ok, Label::kNear); |
390 stop("Unaligned cell in write barrier"); | 369 stop("Unaligned cell in write barrier"); |
391 bind(&ok); | 370 bind(&ok); |
392 } | 371 } |
393 | 372 |
394 // Record the actual write. | 373 // Record the actual write. |
395 if (lr_status == kLRHasNotBeenSaved) { | 374 if (lr_status == kLRHasNotBeenSaved) { |
396 mflr(r0); | 375 push(r14); |
397 push(r0); | |
398 } | 376 } |
399 RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET, | 377 RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET, |
400 fp_mode); | 378 fp_mode); |
401 CallStub(&stub); | 379 CallStub(&stub); |
402 if (lr_status == kLRHasNotBeenSaved) { | 380 if (lr_status == kLRHasNotBeenSaved) { |
403 pop(r0); | 381 pop(r14); |
404 mtlr(r0); | |
405 } | 382 } |
406 | 383 |
407 bind(&done); | 384 bind(&done); |
408 | 385 |
409 // Count number of write barriers in generated code. | 386 // Count number of write barriers in generated code. |
410 isolate()->counters()->write_barriers_static()->Increment(); | 387 isolate()->counters()->write_barriers_static()->Increment(); |
411 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip, dst); | 388 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip, dst); |
412 | 389 |
413 // Clobber clobbered registers when running with the debug-code flag | 390 // Clobber clobbered registers when running with the debug-code flag |
414 // turned on to provoke errors. | 391 // turned on to provoke errors. |
415 if (emit_debug_code()) { | 392 if (emit_debug_code()) { |
416 mov(dst, Operand(bit_cast<intptr_t>(kZapValue + 12))); | 393 mov(dst, Operand(bit_cast<intptr_t>(kZapValue + 12))); |
417 mov(map, Operand(bit_cast<intptr_t>(kZapValue + 16))); | 394 mov(map, Operand(bit_cast<intptr_t>(kZapValue + 16))); |
418 } | 395 } |
419 } | 396 } |
420 | 397 |
421 | |
422 // Will clobber 4 registers: object, address, scratch, ip. The | 398 // Will clobber 4 registers: object, address, scratch, ip. The |
423 // register 'object' contains a heap object pointer. The heap object | 399 // register 'object' contains a heap object pointer. The heap object |
424 // tag is shifted away. | 400 // tag is shifted away. |
425 void MacroAssembler::RecordWrite( | 401 void MacroAssembler::RecordWrite( |
426 Register object, Register address, Register value, | 402 Register object, Register address, Register value, |
427 LinkRegisterStatus lr_status, SaveFPRegsMode fp_mode, | 403 LinkRegisterStatus lr_status, SaveFPRegsMode fp_mode, |
428 RememberedSetAction remembered_set_action, SmiCheck smi_check, | 404 RememberedSetAction remembered_set_action, SmiCheck smi_check, |
429 PointersToHereCheck pointers_to_here_check_for_value) { | 405 PointersToHereCheck pointers_to_here_check_for_value) { |
430 DCHECK(!object.is(value)); | 406 DCHECK(!object.is(value)); |
431 if (emit_debug_code()) { | 407 if (emit_debug_code()) { |
432 LoadP(r0, MemOperand(address)); | 408 CmpP(value, MemOperand(address)); |
433 cmp(r0, value); | |
434 Check(eq, kWrongAddressOrValuePassedToRecordWrite); | 409 Check(eq, kWrongAddressOrValuePassedToRecordWrite); |
435 } | 410 } |
436 | 411 |
437 if (remembered_set_action == OMIT_REMEMBERED_SET && | 412 if (remembered_set_action == OMIT_REMEMBERED_SET && |
438 !FLAG_incremental_marking) { | 413 !FLAG_incremental_marking) { |
439 return; | 414 return; |
440 } | 415 } |
441 | |
442 // First, check if a write barrier is even needed. The tests below | 416 // First, check if a write barrier is even needed. The tests below |
443 // catch stores of smis and stores into the young generation. | 417 // catch stores of smis and stores into the young generation. |
444 Label done; | 418 Label done; |
445 | 419 |
446 if (smi_check == INLINE_SMI_CHECK) { | 420 if (smi_check == INLINE_SMI_CHECK) { |
447 JumpIfSmi(value, &done); | 421 JumpIfSmi(value, &done); |
448 } | 422 } |
449 | 423 |
450 if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) { | 424 if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) { |
451 CheckPageFlag(value, | 425 CheckPageFlag(value, |
452 value, // Used as scratch. | 426 value, // Used as scratch. |
453 MemoryChunk::kPointersToHereAreInterestingMask, eq, &done); | 427 MemoryChunk::kPointersToHereAreInterestingMask, eq, &done); |
454 } | 428 } |
455 CheckPageFlag(object, | 429 CheckPageFlag(object, |
456 value, // Used as scratch. | 430 value, // Used as scratch. |
457 MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done); | 431 MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done); |
458 | 432 |
459 // Record the actual write. | 433 // Record the actual write. |
460 if (lr_status == kLRHasNotBeenSaved) { | 434 if (lr_status == kLRHasNotBeenSaved) { |
461 mflr(r0); | 435 push(r14); |
462 push(r0); | |
463 } | 436 } |
464 RecordWriteStub stub(isolate(), object, value, address, remembered_set_action, | 437 RecordWriteStub stub(isolate(), object, value, address, remembered_set_action, |
465 fp_mode); | 438 fp_mode); |
466 CallStub(&stub); | 439 CallStub(&stub); |
467 if (lr_status == kLRHasNotBeenSaved) { | 440 if (lr_status == kLRHasNotBeenSaved) { |
468 pop(r0); | 441 pop(r14); |
469 mtlr(r0); | |
470 } | 442 } |
471 | 443 |
472 bind(&done); | 444 bind(&done); |
473 | 445 |
474 // Count number of write barriers in generated code. | 446 // Count number of write barriers in generated code. |
475 isolate()->counters()->write_barriers_static()->Increment(); | 447 isolate()->counters()->write_barriers_static()->Increment(); |
476 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip, | 448 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip, |
477 value); | 449 value); |
478 | 450 |
479 // Clobber clobbered registers when running with the debug-code flag | 451 // Clobber clobbered registers when running with the debug-code flag |
480 // turned on to provoke errors. | 452 // turned on to provoke errors. |
481 if (emit_debug_code()) { | 453 if (emit_debug_code()) { |
482 mov(address, Operand(bit_cast<intptr_t>(kZapValue + 12))); | 454 mov(address, Operand(bit_cast<intptr_t>(kZapValue + 12))); |
483 mov(value, Operand(bit_cast<intptr_t>(kZapValue + 16))); | 455 mov(value, Operand(bit_cast<intptr_t>(kZapValue + 16))); |
484 } | 456 } |
485 } | 457 } |
486 | 458 |
487 void MacroAssembler::RecordWriteCodeEntryField(Register js_function, | 459 void MacroAssembler::RecordWriteCodeEntryField(Register js_function, |
488 Register code_entry, | 460 Register code_entry, |
489 Register scratch) { | 461 Register scratch) { |
490 const int offset = JSFunction::kCodeEntryOffset; | 462 const int offset = JSFunction::kCodeEntryOffset; |
491 | 463 |
492 // Since a code entry (value) is always in old space, we don't need to update | 464 // Since a code entry (value) is always in old space, we don't need to update |
493 // remembered set. If incremental marking is off, there is nothing for us to | 465 // remembered set. If incremental marking is off, there is nothing for us to |
494 // do. | 466 // do. |
495 if (!FLAG_incremental_marking) return; | 467 if (!FLAG_incremental_marking) return; |
496 | 468 |
497 DCHECK(js_function.is(r4)); | 469 DCHECK(js_function.is(r3)); |
498 DCHECK(code_entry.is(r7)); | 470 DCHECK(code_entry.is(r6)); |
499 DCHECK(scratch.is(r8)); | 471 DCHECK(scratch.is(r7)); |
500 AssertNotSmi(js_function); | 472 AssertNotSmi(js_function); |
501 | 473 |
502 if (emit_debug_code()) { | 474 if (emit_debug_code()) { |
503 addi(scratch, js_function, Operand(offset - kHeapObjectTag)); | 475 AddP(scratch, js_function, Operand(offset - kHeapObjectTag)); |
504 LoadP(ip, MemOperand(scratch)); | 476 LoadP(ip, MemOperand(scratch)); |
505 cmp(ip, code_entry); | 477 CmpP(ip, code_entry); |
506 Check(eq, kWrongAddressOrValuePassedToRecordWrite); | 478 Check(eq, kWrongAddressOrValuePassedToRecordWrite); |
507 } | 479 } |
508 | 480 |
509 // First, check if a write barrier is even needed. The tests below | 481 // First, check if a write barrier is even needed. The tests below |
510 // catch stores of Smis and stores into young gen. | 482 // catch stores of Smis and stores into young gen. |
511 Label done; | 483 Label done; |
512 | 484 |
513 CheckPageFlag(code_entry, scratch, | 485 CheckPageFlag(code_entry, scratch, |
514 MemoryChunk::kPointersToHereAreInterestingMask, eq, &done); | 486 MemoryChunk::kPointersToHereAreInterestingMask, eq, &done); |
515 CheckPageFlag(js_function, scratch, | 487 CheckPageFlag(js_function, scratch, |
516 MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done); | 488 MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done); |
517 | 489 |
518 const Register dst = scratch; | 490 const Register dst = scratch; |
519 addi(dst, js_function, Operand(offset - kHeapObjectTag)); | 491 AddP(dst, js_function, Operand(offset - kHeapObjectTag)); |
520 | 492 |
521 // Save caller-saved registers. js_function and code_entry are in the | 493 // Save caller-saved registers. js_function and code_entry are in the |
522 // caller-saved register list. | 494 // caller-saved register list. |
523 DCHECK(kJSCallerSaved & js_function.bit()); | 495 DCHECK(kJSCallerSaved & js_function.bit()); |
524 DCHECK(kJSCallerSaved & code_entry.bit()); | 496 DCHECK(kJSCallerSaved & code_entry.bit()); |
525 mflr(r0); | 497 MultiPush(kJSCallerSaved | r14.bit()); |
526 MultiPush(kJSCallerSaved | r0.bit()); | |
527 | 498 |
528 int argument_count = 3; | 499 int argument_count = 3; |
529 PrepareCallCFunction(argument_count, code_entry); | 500 PrepareCallCFunction(argument_count, code_entry); |
530 | 501 |
531 mr(r3, js_function); | 502 LoadRR(r2, js_function); |
532 mr(r4, dst); | 503 LoadRR(r3, dst); |
533 mov(r5, Operand(ExternalReference::isolate_address(isolate()))); | 504 mov(r4, Operand(ExternalReference::isolate_address(isolate()))); |
534 | 505 |
535 { | 506 { |
536 AllowExternalCallThatCantCauseGC scope(this); | 507 AllowExternalCallThatCantCauseGC scope(this); |
537 CallCFunction( | 508 CallCFunction( |
538 ExternalReference::incremental_marking_record_write_code_entry_function( | 509 ExternalReference::incremental_marking_record_write_code_entry_function( |
539 isolate()), | 510 isolate()), |
540 argument_count); | 511 argument_count); |
541 } | 512 } |
542 | 513 |
543 // Restore caller-saved registers (including js_function and code_entry). | 514 // Restore caller-saved registers (including js_function and code_entry). |
544 MultiPop(kJSCallerSaved | r0.bit()); | 515 MultiPop(kJSCallerSaved | r14.bit()); |
545 mtlr(r0); | |
546 | 516 |
547 bind(&done); | 517 bind(&done); |
548 } | 518 } |
549 | 519 |
550 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests. | 520 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests. |
551 Register address, Register scratch, | 521 Register address, Register scratch, |
552 SaveFPRegsMode fp_mode, | 522 SaveFPRegsMode fp_mode, |
553 RememberedSetFinalAction and_then) { | 523 RememberedSetFinalAction and_then) { |
554 Label done; | 524 Label done; |
555 if (emit_debug_code()) { | 525 if (emit_debug_code()) { |
556 Label ok; | 526 Label ok; |
557 JumpIfNotInNewSpace(object, scratch, &ok); | 527 JumpIfNotInNewSpace(object, scratch, &ok); |
558 stop("Remembered set pointer is in new space"); | 528 stop("Remembered set pointer is in new space"); |
559 bind(&ok); | 529 bind(&ok); |
560 } | 530 } |
561 // Load store buffer top. | 531 // Load store buffer top. |
562 ExternalReference store_buffer = | 532 ExternalReference store_buffer = |
563 ExternalReference::store_buffer_top(isolate()); | 533 ExternalReference::store_buffer_top(isolate()); |
564 mov(ip, Operand(store_buffer)); | 534 mov(ip, Operand(store_buffer)); |
565 LoadP(scratch, MemOperand(ip)); | 535 LoadP(scratch, MemOperand(ip)); |
566 // Store pointer to buffer and increment buffer top. | 536 // Store pointer to buffer and increment buffer top. |
567 StoreP(address, MemOperand(scratch)); | 537 StoreP(address, MemOperand(scratch)); |
568 addi(scratch, scratch, Operand(kPointerSize)); | 538 AddP(scratch, Operand(kPointerSize)); |
569 // Write back new top of buffer. | 539 // Write back new top of buffer. |
570 StoreP(scratch, MemOperand(ip)); | 540 StoreP(scratch, MemOperand(ip)); |
571 // Call stub on end of buffer. | 541 // Call stub on end of buffer. |
572 // Check for end of buffer. | 542 // Check for end of buffer. |
573 mov(r0, Operand(StoreBuffer::kStoreBufferOverflowBit)); | 543 AndP(scratch, Operand(StoreBuffer::kStoreBufferOverflowBit)); |
574 and_(r0, scratch, r0, SetRC); | |
575 | 544 |
576 if (and_then == kFallThroughAtEnd) { | 545 if (and_then == kFallThroughAtEnd) { |
577 beq(&done, cr0); | 546 beq(&done, Label::kNear); |
578 } else { | 547 } else { |
579 DCHECK(and_then == kReturnAtEnd); | 548 DCHECK(and_then == kReturnAtEnd); |
580 Ret(eq, cr0); | 549 beq(&done, Label::kNear); |
581 } | 550 } |
582 mflr(r0); | 551 push(r14); |
583 push(r0); | |
584 StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode); | 552 StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode); |
585 CallStub(&store_buffer_overflow); | 553 CallStub(&store_buffer_overflow); |
586 pop(r0); | 554 pop(r14); |
587 mtlr(r0); | |
588 bind(&done); | 555 bind(&done); |
589 if (and_then == kReturnAtEnd) { | 556 if (and_then == kReturnAtEnd) { |
590 Ret(); | 557 Ret(); |
591 } | 558 } |
592 } | 559 } |
593 | 560 |
594 | |
595 void MacroAssembler::PushFixedFrame(Register marker_reg) { | 561 void MacroAssembler::PushFixedFrame(Register marker_reg) { |
596 mflr(r0); | 562 CleanseP(r14); |
597 if (FLAG_enable_embedded_constant_pool) { | 563 if (marker_reg.is_valid()) { |
598 if (marker_reg.is_valid()) { | 564 Push(r14, fp, cp, marker_reg); |
599 Push(r0, fp, kConstantPoolRegister, cp, marker_reg); | |
600 } else { | |
601 Push(r0, fp, kConstantPoolRegister, cp); | |
602 } | |
603 } else { | 565 } else { |
604 if (marker_reg.is_valid()) { | 566 Push(r14, fp, cp); |
605 Push(r0, fp, cp, marker_reg); | |
606 } else { | |
607 Push(r0, fp, cp); | |
608 } | |
609 } | 567 } |
610 } | 568 } |
611 | 569 |
612 | |
613 void MacroAssembler::PopFixedFrame(Register marker_reg) { | 570 void MacroAssembler::PopFixedFrame(Register marker_reg) { |
614 if (FLAG_enable_embedded_constant_pool) { | 571 if (marker_reg.is_valid()) { |
615 if (marker_reg.is_valid()) { | 572 Pop(r14, fp, cp, marker_reg); |
616 Pop(r0, fp, kConstantPoolRegister, cp, marker_reg); | |
617 } else { | |
618 Pop(r0, fp, kConstantPoolRegister, cp); | |
619 } | |
620 } else { | 573 } else { |
621 if (marker_reg.is_valid()) { | 574 Pop(r14, fp, cp); |
622 Pop(r0, fp, cp, marker_reg); | |
623 } else { | |
624 Pop(r0, fp, cp); | |
625 } | |
626 } | 575 } |
627 mtlr(r0); | |
628 } | 576 } |
629 | 577 |
630 void MacroAssembler::RestoreFrameStateForTailCall() { | 578 void MacroAssembler::RestoreFrameStateForTailCall() { |
631 if (FLAG_enable_embedded_constant_pool) { | 579 // if (FLAG_enable_embedded_constant_pool) { |
632 LoadP(kConstantPoolRegister, | 580 // LoadP(kConstantPoolRegister, |
633 MemOperand(fp, StandardFrameConstants::kConstantPoolOffset)); | 581 // MemOperand(fp, StandardFrameConstants::kConstantPoolOffset)); |
634 set_constant_pool_available(false); | 582 // set_constant_pool_available(false); |
635 } | 583 // } |
636 LoadP(r0, MemOperand(fp, StandardFrameConstants::kCallerPCOffset)); | 584 DCHECK(!FLAG_enable_embedded_constant_pool); |
| 585 LoadP(r14, MemOperand(fp, StandardFrameConstants::kCallerPCOffset)); |
637 LoadP(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); | 586 LoadP(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); |
638 mtlr(r0); | |
639 } | 587 } |
640 | 588 |
641 const RegList MacroAssembler::kSafepointSavedRegisters = Register::kAllocatable; | 589 const RegList MacroAssembler::kSafepointSavedRegisters = Register::kAllocatable; |
642 const int MacroAssembler::kNumSafepointSavedRegisters = | 590 const int MacroAssembler::kNumSafepointSavedRegisters = |
643 Register::kNumAllocatable; | 591 Register::kNumAllocatable; |
644 | 592 |
645 // Push and pop all registers that can hold pointers. | 593 // Push and pop all registers that can hold pointers. |
646 void MacroAssembler::PushSafepointRegisters() { | 594 void MacroAssembler::PushSafepointRegisters() { |
647 // Safepoints expect a block of kNumSafepointRegisters values on the | 595 // Safepoints expect a block of kNumSafepointRegisters values on the |
648 // stack, so adjust the stack for unsaved registers. | 596 // stack, so adjust the stack for unsaved registers. |
649 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters; | 597 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters; |
650 DCHECK(num_unsaved >= 0); | 598 DCHECK(num_unsaved >= 0); |
651 if (num_unsaved > 0) { | 599 if (num_unsaved > 0) { |
652 subi(sp, sp, Operand(num_unsaved * kPointerSize)); | 600 lay(sp, MemOperand(sp, -(num_unsaved * kPointerSize))); |
653 } | 601 } |
654 MultiPush(kSafepointSavedRegisters); | 602 MultiPush(kSafepointSavedRegisters); |
655 } | 603 } |
656 | 604 |
657 | |
658 void MacroAssembler::PopSafepointRegisters() { | 605 void MacroAssembler::PopSafepointRegisters() { |
659 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters; | 606 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters; |
660 MultiPop(kSafepointSavedRegisters); | 607 MultiPop(kSafepointSavedRegisters); |
661 if (num_unsaved > 0) { | 608 if (num_unsaved > 0) { |
662 addi(sp, sp, Operand(num_unsaved * kPointerSize)); | 609 la(sp, MemOperand(sp, num_unsaved * kPointerSize)); |
663 } | 610 } |
664 } | 611 } |
665 | 612 |
666 | |
667 void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) { | 613 void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) { |
668 StoreP(src, SafepointRegisterSlot(dst)); | 614 StoreP(src, SafepointRegisterSlot(dst)); |
669 } | 615 } |
670 | 616 |
671 | |
672 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) { | 617 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) { |
673 LoadP(dst, SafepointRegisterSlot(src)); | 618 LoadP(dst, SafepointRegisterSlot(src)); |
674 } | 619 } |
675 | 620 |
676 | |
677 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) { | 621 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) { |
678 // The registers are pushed starting with the highest encoding, | 622 // The registers are pushed starting with the highest encoding, |
679 // which means that lowest encodings are closest to the stack pointer. | 623 // which means that lowest encodings are closest to the stack pointer. |
680 RegList regs = kSafepointSavedRegisters; | 624 RegList regs = kSafepointSavedRegisters; |
681 int index = 0; | 625 int index = 0; |
682 | 626 |
683 DCHECK(reg_code >= 0 && reg_code < kNumRegisters); | 627 DCHECK(reg_code >= 0 && reg_code < kNumRegisters); |
684 | 628 |
685 for (int16_t i = 0; i < reg_code; i++) { | 629 for (int16_t i = 0; i < reg_code; i++) { |
686 if ((regs & (1 << i)) != 0) { | 630 if ((regs & (1 << i)) != 0) { |
687 index++; | 631 index++; |
688 } | 632 } |
689 } | 633 } |
690 | 634 |
691 return index; | 635 return index; |
692 } | 636 } |
693 | 637 |
694 | |
695 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) { | 638 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) { |
696 return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize); | 639 return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize); |
697 } | 640 } |
698 | 641 |
699 | |
700 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) { | 642 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) { |
701 // General purpose registers are pushed last on the stack. | 643 // General purpose registers are pushed last on the stack. |
702 const RegisterConfiguration* config = | 644 const RegisterConfiguration* config = |
703 RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT); | 645 RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT); |
704 int doubles_size = config->num_allocatable_double_registers() * kDoubleSize; | 646 int doubles_size = config->num_allocatable_double_registers() * kDoubleSize; |
705 int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize; | 647 int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize; |
706 return MemOperand(sp, doubles_size + register_offset); | 648 return MemOperand(sp, doubles_size + register_offset); |
707 } | 649 } |
708 | 650 |
709 | |
710 void MacroAssembler::CanonicalizeNaN(const DoubleRegister dst, | 651 void MacroAssembler::CanonicalizeNaN(const DoubleRegister dst, |
711 const DoubleRegister src) { | 652 const DoubleRegister src) { |
712 // Turn potential sNaN into qNaN. | 653 // Turn potential sNaN into qNaN |
713 fsub(dst, src, kDoubleRegZero); | 654 if (!dst.is(src)) ldr(dst, src); |
| 655 lzdr(kDoubleRegZero); |
| 656 sdbr(dst, kDoubleRegZero); |
714 } | 657 } |
715 | 658 |
716 void MacroAssembler::ConvertIntToDouble(Register src, DoubleRegister dst) { | 659 void MacroAssembler::ConvertIntToDouble(Register src, DoubleRegister dst) { |
717 MovIntToDouble(dst, src, r0); | 660 cdfbr(dst, src); |
718 fcfid(dst, dst); | |
719 } | 661 } |
720 | 662 |
721 void MacroAssembler::ConvertUnsignedIntToDouble(Register src, | 663 void MacroAssembler::ConvertUnsignedIntToDouble(Register src, |
722 DoubleRegister dst) { | 664 DoubleRegister dst) { |
723 MovUnsignedIntToDouble(dst, src, r0); | 665 if (CpuFeatures::IsSupported(FLOATING_POINT_EXT)) { |
724 fcfid(dst, dst); | 666 cdlfbr(Condition(5), Condition(0), dst, src); |
| 667 } else { |
| 668 // zero-extend src |
| 669 llgfr(src, src); |
| 670 // convert to double |
| 671 cdgbr(dst, src); |
| 672 } |
725 } | 673 } |
726 | 674 |
727 void MacroAssembler::ConvertIntToFloat(Register src, DoubleRegister dst) { | 675 void MacroAssembler::ConvertIntToFloat(Register src, DoubleRegister dst) { |
728 MovIntToDouble(dst, src, r0); | 676 cefbr(dst, src); |
729 fcfids(dst, dst); | |
730 } | 677 } |
731 | 678 |
732 void MacroAssembler::ConvertUnsignedIntToFloat(Register src, | 679 void MacroAssembler::ConvertUnsignedIntToFloat(Register src, |
733 DoubleRegister dst) { | 680 DoubleRegister dst) { |
734 MovUnsignedIntToDouble(dst, src, r0); | 681 celfbr(Condition(0), Condition(0), dst, src); |
735 fcfids(dst, dst); | 682 } |
736 } | 683 |
737 | 684 #if V8_TARGET_ARCH_S390X |
738 #if V8_TARGET_ARCH_PPC64 | |
739 void MacroAssembler::ConvertInt64ToDouble(Register src, | 685 void MacroAssembler::ConvertInt64ToDouble(Register src, |
740 DoubleRegister double_dst) { | 686 DoubleRegister double_dst) { |
741 MovInt64ToDouble(double_dst, src); | 687 cdgbr(double_dst, src); |
742 fcfid(double_dst, double_dst); | 688 } |
743 } | |
744 | |
745 | 689 |
746 void MacroAssembler::ConvertUnsignedInt64ToFloat(Register src, | 690 void MacroAssembler::ConvertUnsignedInt64ToFloat(Register src, |
747 DoubleRegister double_dst) { | 691 DoubleRegister double_dst) { |
748 MovInt64ToDouble(double_dst, src); | 692 celgbr(Condition(0), Condition(0), double_dst, src); |
749 fcfidus(double_dst, double_dst); | 693 } |
750 } | |
751 | |
752 | 694 |
753 void MacroAssembler::ConvertUnsignedInt64ToDouble(Register src, | 695 void MacroAssembler::ConvertUnsignedInt64ToDouble(Register src, |
754 DoubleRegister double_dst) { | 696 DoubleRegister double_dst) { |
755 MovInt64ToDouble(double_dst, src); | 697 cdlgbr(Condition(0), Condition(0), double_dst, src); |
756 fcfidu(double_dst, double_dst); | 698 } |
757 } | |
758 | |
759 | 699 |
760 void MacroAssembler::ConvertInt64ToFloat(Register src, | 700 void MacroAssembler::ConvertInt64ToFloat(Register src, |
761 DoubleRegister double_dst) { | 701 DoubleRegister double_dst) { |
762 MovInt64ToDouble(double_dst, src); | 702 cegbr(double_dst, src); |
763 fcfids(double_dst, double_dst); | 703 } |
764 } | 704 #endif |
765 #endif | 705 |
766 | 706 void MacroAssembler::ConvertFloat32ToInt64(const DoubleRegister double_input, |
| 707 #if !V8_TARGET_ARCH_S390X |
| 708 const Register dst_hi, |
| 709 #endif |
| 710 const Register dst, |
| 711 const DoubleRegister double_dst, |
| 712 FPRoundingMode rounding_mode) { |
| 713 Condition m = Condition(0); |
| 714 switch (rounding_mode) { |
| 715 case kRoundToZero: |
| 716 m = Condition(5); |
| 717 break; |
| 718 case kRoundToNearest: |
| 719 UNIMPLEMENTED(); |
| 720 break; |
| 721 case kRoundToPlusInf: |
| 722 m = Condition(6); |
| 723 break; |
| 724 case kRoundToMinusInf: |
| 725 m = Condition(7); |
| 726 break; |
| 727 default: |
| 728 UNIMPLEMENTED(); |
| 729 break; |
| 730 } |
| 731 cgebr(m, dst, double_input); |
| 732 ldgr(double_dst, dst); |
| 733 #if !V8_TARGET_ARCH_S390X |
| 734 srlg(dst_hi, dst, Operand(32)); |
| 735 #endif |
| 736 } |
767 | 737 |
768 void MacroAssembler::ConvertDoubleToInt64(const DoubleRegister double_input, | 738 void MacroAssembler::ConvertDoubleToInt64(const DoubleRegister double_input, |
769 #if !V8_TARGET_ARCH_PPC64 | 739 #if !V8_TARGET_ARCH_S390X |
770 const Register dst_hi, | 740 const Register dst_hi, |
771 #endif | 741 #endif |
772 const Register dst, | 742 const Register dst, |
773 const DoubleRegister double_dst, | 743 const DoubleRegister double_dst, |
774 FPRoundingMode rounding_mode) { | 744 FPRoundingMode rounding_mode) { |
775 if (rounding_mode == kRoundToZero) { | 745 Condition m = Condition(0); |
776 fctidz(double_dst, double_input); | 746 switch (rounding_mode) { |
777 } else { | 747 case kRoundToZero: |
778 SetRoundingMode(rounding_mode); | 748 m = Condition(5); |
779 fctid(double_dst, double_input); | 749 break; |
780 ResetRoundingMode(); | 750 case kRoundToNearest: |
781 } | 751 UNIMPLEMENTED(); |
782 | 752 break; |
783 MovDoubleToInt64( | 753 case kRoundToPlusInf: |
784 #if !V8_TARGET_ARCH_PPC64 | 754 m = Condition(6); |
785 dst_hi, | 755 break; |
786 #endif | 756 case kRoundToMinusInf: |
787 dst, double_dst); | 757 m = Condition(7); |
788 } | 758 break; |
789 | 759 default: |
790 #if V8_TARGET_ARCH_PPC64 | 760 UNIMPLEMENTED(); |
| 761 break; |
| 762 } |
| 763 cgdbr(m, dst, double_input); |
| 764 ldgr(double_dst, dst); |
| 765 #if !V8_TARGET_ARCH_S390X |
| 766 srlg(dst_hi, dst, Operand(32)); |
| 767 #endif |
| 768 } |
| 769 |
| 770 void MacroAssembler::ConvertFloat32ToInt32(const DoubleRegister double_input, |
| 771 const Register dst, |
| 772 const DoubleRegister double_dst, |
| 773 FPRoundingMode rounding_mode) { |
| 774 Condition m = Condition(0); |
| 775 switch (rounding_mode) { |
| 776 case kRoundToZero: |
| 777 m = Condition(5); |
| 778 break; |
| 779 case kRoundToNearest: |
| 780 UNIMPLEMENTED(); |
| 781 break; |
| 782 case kRoundToPlusInf: |
| 783 m = Condition(6); |
| 784 break; |
| 785 case kRoundToMinusInf: |
| 786 m = Condition(7); |
| 787 break; |
| 788 default: |
| 789 UNIMPLEMENTED(); |
| 790 break; |
| 791 } |
| 792 cfebr(m, dst, double_input); |
| 793 ldgr(double_dst, dst); |
| 794 } |
| 795 |
| 796 void MacroAssembler::ConvertFloat32ToUnsignedInt32( |
| 797 const DoubleRegister double_input, const Register dst, |
| 798 const DoubleRegister double_dst, FPRoundingMode rounding_mode) { |
| 799 Condition m = Condition(0); |
| 800 switch (rounding_mode) { |
| 801 case kRoundToZero: |
| 802 m = Condition(5); |
| 803 break; |
| 804 case kRoundToNearest: |
| 805 UNIMPLEMENTED(); |
| 806 break; |
| 807 case kRoundToPlusInf: |
| 808 m = Condition(6); |
| 809 break; |
| 810 case kRoundToMinusInf: |
| 811 m = Condition(7); |
| 812 break; |
| 813 default: |
| 814 UNIMPLEMENTED(); |
| 815 break; |
| 816 } |
| 817 clfebr(m, Condition(0), dst, double_input); |
| 818 ldgr(double_dst, dst); |
| 819 } |
| 820 |
| 821 #if V8_TARGET_ARCH_S390X |
| 822 void MacroAssembler::ConvertFloat32ToUnsignedInt64( |
| 823 const DoubleRegister double_input, const Register dst, |
| 824 const DoubleRegister double_dst, FPRoundingMode rounding_mode) { |
| 825 Condition m = Condition(0); |
| 826 switch (rounding_mode) { |
| 827 case kRoundToZero: |
| 828 m = Condition(5); |
| 829 break; |
| 830 case kRoundToNearest: |
| 831 UNIMPLEMENTED(); |
| 832 break; |
| 833 case kRoundToPlusInf: |
| 834 m = Condition(6); |
| 835 break; |
| 836 case kRoundToMinusInf: |
| 837 m = Condition(7); |
| 838 break; |
| 839 default: |
| 840 UNIMPLEMENTED(); |
| 841 break; |
| 842 } |
| 843 clgebr(m, Condition(0), dst, double_input); |
| 844 ldgr(double_dst, dst); |
| 845 } |
| 846 |
791 void MacroAssembler::ConvertDoubleToUnsignedInt64( | 847 void MacroAssembler::ConvertDoubleToUnsignedInt64( |
792 const DoubleRegister double_input, const Register dst, | 848 const DoubleRegister double_input, const Register dst, |
793 const DoubleRegister double_dst, FPRoundingMode rounding_mode) { | 849 const DoubleRegister double_dst, FPRoundingMode rounding_mode) { |
794 if (rounding_mode == kRoundToZero) { | 850 Condition m = Condition(0); |
795 fctiduz(double_dst, double_input); | 851 switch (rounding_mode) { |
796 } else { | 852 case kRoundToZero: |
797 SetRoundingMode(rounding_mode); | 853 m = Condition(5); |
798 fctidu(double_dst, double_input); | 854 break; |
799 ResetRoundingMode(); | 855 case kRoundToNearest: |
800 } | 856 UNIMPLEMENTED(); |
801 | 857 break; |
802 MovDoubleToInt64(dst, double_dst); | 858 case kRoundToPlusInf: |
803 } | 859 m = Condition(6); |
804 #endif | 860 break; |
805 | 861 case kRoundToMinusInf: |
806 | 862 m = Condition(7); |
807 void MacroAssembler::LoadConstantPoolPointerRegisterFromCodeTargetAddress( | 863 break; |
808 Register code_target_address) { | 864 default: |
809 lwz(kConstantPoolRegister, | 865 UNIMPLEMENTED(); |
810 MemOperand(code_target_address, | 866 break; |
811 Code::kConstantPoolOffset - Code::kHeaderSize)); | 867 } |
812 add(kConstantPoolRegister, kConstantPoolRegister, code_target_address); | 868 clgdbr(m, Condition(0), dst, double_input); |
813 } | 869 ldgr(double_dst, dst); |
814 | 870 } |
815 | 871 #endif |
816 void MacroAssembler::LoadConstantPoolPointerRegister(Register base, | 872 |
817 int code_start_delta) { | 873 void MacroAssembler::MovDoubleToInt64(Register dst, DoubleRegister src) { |
818 add_label_offset(kConstantPoolRegister, base, ConstantPoolPosition(), | 874 lgdr(dst, src); |
819 code_start_delta); | 875 } |
820 } | 876 |
821 | 877 void MacroAssembler::MovInt64ToDouble(DoubleRegister dst, Register src) { |
822 | 878 ldgr(dst, src); |
823 void MacroAssembler::LoadConstantPoolPointerRegister() { | 879 } |
824 mov_label_addr(kConstantPoolRegister, ConstantPoolPosition()); | |
825 } | |
826 | |
827 | 880 |
828 void MacroAssembler::StubPrologue(Register base, int prologue_offset) { | 881 void MacroAssembler::StubPrologue(Register base, int prologue_offset) { |
829 LoadSmiLiteral(r11, Smi::FromInt(StackFrame::STUB)); | 882 PushFixedFrame(); |
830 PushFixedFrame(r11); | 883 Push(Smi::FromInt(StackFrame::STUB)); |
831 // Adjust FP to point to saved FP. | 884 // Adjust FP to point to saved FP. |
832 addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); | 885 la(fp, MemOperand(sp, StandardFrameConstants::kFixedFrameSizeFromFp)); |
833 if (FLAG_enable_embedded_constant_pool) { | 886 } |
834 if (!base.is(no_reg)) { | |
835 // base contains prologue address | |
836 LoadConstantPoolPointerRegister(base, -prologue_offset); | |
837 } else { | |
838 LoadConstantPoolPointerRegister(); | |
839 } | |
840 set_constant_pool_available(true); | |
841 } | |
842 } | |
843 | |
844 | 887 |
845 void MacroAssembler::Prologue(bool code_pre_aging, Register base, | 888 void MacroAssembler::Prologue(bool code_pre_aging, Register base, |
846 int prologue_offset) { | 889 int prologue_offset) { |
847 DCHECK(!base.is(no_reg)); | 890 DCHECK(!base.is(no_reg)); |
848 { | 891 { |
849 PredictableCodeSizeScope predictible_code_size_scope( | 892 PredictableCodeSizeScope predictible_code_size_scope( |
850 this, kNoCodeAgeSequenceLength); | 893 this, kNoCodeAgeSequenceLength); |
851 Assembler::BlockTrampolinePoolScope block_trampoline_pool(this); | |
852 // The following instructions must remain together and unmodified | 894 // The following instructions must remain together and unmodified |
853 // for code aging to work properly. | 895 // for code aging to work properly. |
854 if (code_pre_aging) { | 896 if (code_pre_aging) { |
855 // Pre-age the code. | 897 // Pre-age the code. |
856 // This matches the code found in PatchPlatformCodeAge() | 898 // This matches the code found in PatchPlatformCodeAge() |
857 Code* stub = Code::GetPreAgedCodeAgeStub(isolate()); | 899 Code* stub = Code::GetPreAgedCodeAgeStub(isolate()); |
858 intptr_t target = reinterpret_cast<intptr_t>(stub->instruction_start()); | 900 intptr_t target = reinterpret_cast<intptr_t>(stub->instruction_start()); |
859 // Don't use Call -- we need to preserve ip and lr | 901 nop(); |
860 nop(); // marker to detect sequence (see IsOld) | 902 CleanseP(r14); |
861 mov(r3, Operand(target)); | 903 Push(r14); |
862 Jump(r3); | 904 mov(r2, Operand(target)); |
863 for (int i = 0; i < kCodeAgingSequenceNops; i++) { | 905 Call(r2); |
864 nop(); | 906 for (int i = 0; i < kNoCodeAgeSequenceLength - kCodeAgingSequenceLength; |
| 907 i += 2) { |
| 908 // TODO(joransiu): Create nop function to pad |
| 909 // (kNoCodeAgeSequenceLength - kCodeAgingSequenceLength) bytes. |
| 910 nop(); // 2-byte nops(). |
865 } | 911 } |
866 } else { | 912 } else { |
867 // This matches the code found in GetNoCodeAgeSequence() | 913 // This matches the code found in GetNoCodeAgeSequence() |
868 PushFixedFrame(r4); | 914 PushFixedFrame(r3); |
869 // Adjust fp to point to saved fp. | 915 // Adjust fp to point to saved fp. |
870 addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); | 916 la(fp, MemOperand(sp, StandardFrameConstants::kFixedFrameSizeFromFp)); |
871 for (int i = 0; i < kNoCodeAgeSequenceNops; i++) { | |
872 nop(); | |
873 } | |
874 } | 917 } |
875 } | 918 } |
876 if (FLAG_enable_embedded_constant_pool) { | |
877 // base contains prologue address | |
878 LoadConstantPoolPointerRegister(base, -prologue_offset); | |
879 set_constant_pool_available(true); | |
880 } | |
881 } | 919 } |
882 | 920 |
883 | |
884 void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) { | 921 void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) { |
885 LoadP(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); | 922 LoadP(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); |
886 LoadP(vector, FieldMemOperand(vector, JSFunction::kSharedFunctionInfoOffset)); | 923 LoadP(vector, FieldMemOperand(vector, JSFunction::kSharedFunctionInfoOffset)); |
887 LoadP(vector, | 924 LoadP(vector, |
888 FieldMemOperand(vector, SharedFunctionInfo::kFeedbackVectorOffset)); | 925 FieldMemOperand(vector, SharedFunctionInfo::kFeedbackVectorOffset)); |
889 } | 926 } |
890 | 927 |
891 | |
892 void MacroAssembler::EnterFrame(StackFrame::Type type, | 928 void MacroAssembler::EnterFrame(StackFrame::Type type, |
893 bool load_constant_pool_pointer_reg) { | 929 bool load_constant_pool_pointer_reg) { |
894 if (FLAG_enable_embedded_constant_pool && load_constant_pool_pointer_reg) { | 930 // We create a stack frame with: |
895 PushFixedFrame(); | 931 // Return Addr <-- old sp |
896 // This path should not rely on ip containing code entry. | 932 // Old FP <-- new fp |
897 LoadConstantPoolPointerRegister(); | 933 // CP |
898 LoadSmiLiteral(ip, Smi::FromInt(type)); | 934 // type |
899 push(ip); | 935 // CodeObject <-- new sp |
900 } else { | 936 |
901 LoadSmiLiteral(ip, Smi::FromInt(type)); | 937 LoadSmiLiteral(ip, Smi::FromInt(type)); |
902 PushFixedFrame(ip); | 938 PushFixedFrame(ip); |
903 } | |
904 // Adjust FP to point to saved FP. | |
905 addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); | |
906 | 939 |
907 mov(r0, Operand(CodeObject())); | 940 mov(r0, Operand(CodeObject())); |
908 push(r0); | 941 push(r0); |
| 942 // Adjust FP to point to saved FP |
| 943 la(fp, MemOperand( |
| 944 sp, StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize)); |
909 } | 945 } |
910 | 946 |
911 | |
912 int MacroAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) { | 947 int MacroAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) { |
913 ConstantPoolUnavailableScope constant_pool_unavailable(this); | |
914 // r3: preserved | |
915 // r4: preserved | |
916 // r5: preserved | |
917 | |
918 // Drop the execution stack down to the frame pointer and restore | 948 // Drop the execution stack down to the frame pointer and restore |
919 // the caller's state. | 949 // the caller frame pointer, return address and constant pool pointer. |
920 int frame_ends; | 950 LoadP(r14, MemOperand(fp, StandardFrameConstants::kCallerPCOffset)); |
921 LoadP(r0, MemOperand(fp, StandardFrameConstants::kCallerPCOffset)); | 951 lay(r1, MemOperand( |
922 LoadP(ip, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); | 952 fp, StandardFrameConstants::kCallerSPOffset + stack_adjustment)); |
923 if (FLAG_enable_embedded_constant_pool) { | 953 LoadP(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); |
924 const int exitOffset = ExitFrameConstants::kConstantPoolOffset; | 954 LoadRR(sp, r1); |
925 const int standardOffset = StandardFrameConstants::kConstantPoolOffset; | 955 int frame_ends = pc_offset(); |
926 const int offset = | |
927 ((type == StackFrame::EXIT) ? exitOffset : standardOffset); | |
928 LoadP(kConstantPoolRegister, MemOperand(fp, offset)); | |
929 } | |
930 mtlr(r0); | |
931 frame_ends = pc_offset(); | |
932 Add(sp, fp, StandardFrameConstants::kCallerSPOffset + stack_adjustment, r0); | |
933 mr(fp, ip); | |
934 return frame_ends; | 956 return frame_ends; |
935 } | 957 } |
936 | 958 |
937 | |
938 // ExitFrame layout (probably wrongish.. needs updating) | 959 // ExitFrame layout (probably wrongish.. needs updating) |
939 // | 960 // |
940 // SP -> previousSP | 961 // SP -> previousSP |
941 // LK reserved | 962 // LK reserved |
942 // code | 963 // code |
943 // sp_on_exit (for debug?) | 964 // sp_on_exit (for debug?) |
944 // oldSP->prev SP | 965 // oldSP->prev SP |
945 // LK | 966 // LK |
946 // <parameters on stack> | 967 // <parameters on stack> |
947 | 968 |
948 // Prior to calling EnterExitFrame, we've got a bunch of parameters | 969 // Prior to calling EnterExitFrame, we've got a bunch of parameters |
949 // on the stack that we need to wrap a real frame around.. so first | 970 // on the stack that we need to wrap a real frame around.. so first |
950 // we reserve a slot for LK and push the previous SP which is captured | 971 // we reserve a slot for LK and push the previous SP which is captured |
951 // in the fp register (r31) | 972 // in the fp register (r11) |
952 // Then - we buy a new frame | 973 // Then - we buy a new frame |
953 | 974 |
| 975 // r14 |
| 976 // oldFP <- newFP |
| 977 // SP |
| 978 // Code |
| 979 // Floats |
| 980 // gaps |
| 981 // Args |
| 982 // ABIRes <- newSP |
954 void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) { | 983 void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) { |
955 // Set up the frame structure on the stack. | 984 // Set up the frame structure on the stack. |
956 DCHECK_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement); | 985 DCHECK_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement); |
957 DCHECK_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset); | 986 DCHECK_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset); |
958 DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset); | 987 DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset); |
959 DCHECK(stack_space > 0); | 988 DCHECK(stack_space > 0); |
960 | 989 |
961 // This is an opportunity to build a frame to wrap | 990 // This is an opportunity to build a frame to wrap |
962 // all of the pushes that have happened inside of V8 | 991 // all of the pushes that have happened inside of V8 |
963 // since we were called from C code | 992 // since we were called from C code |
964 | 993 CleanseP(r14); |
965 // replicate ARM frame - TODO make this more closely follow PPC ABI | 994 Push(r14, fp); |
966 mflr(r0); | 995 LoadRR(fp, sp); |
967 Push(r0, fp); | |
968 mr(fp, sp); | |
969 // Reserve room for saved entry sp and code object. | 996 // Reserve room for saved entry sp and code object. |
970 subi(sp, sp, Operand(ExitFrameConstants::kFrameSize)); | 997 lay(sp, MemOperand(sp, -ExitFrameConstants::kFrameSize)); |
971 | 998 |
972 if (emit_debug_code()) { | 999 if (emit_debug_code()) { |
973 li(r8, Operand::Zero()); | 1000 StoreP(MemOperand(fp, ExitFrameConstants::kSPOffset), Operand::Zero(), r1); |
974 StoreP(r8, MemOperand(fp, ExitFrameConstants::kSPOffset)); | |
975 } | 1001 } |
976 if (FLAG_enable_embedded_constant_pool) { | 1002 mov(r1, Operand(CodeObject())); |
977 StoreP(kConstantPoolRegister, | 1003 StoreP(r1, MemOperand(fp, ExitFrameConstants::kCodeOffset)); |
978 MemOperand(fp, ExitFrameConstants::kConstantPoolOffset)); | |
979 } | |
980 mov(r8, Operand(CodeObject())); | |
981 StoreP(r8, MemOperand(fp, ExitFrameConstants::kCodeOffset)); | |
982 | 1004 |
983 // Save the frame pointer and the context in top. | 1005 // Save the frame pointer and the context in top. |
984 mov(r8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate()))); | 1006 mov(r1, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate()))); |
985 StoreP(fp, MemOperand(r8)); | 1007 StoreP(fp, MemOperand(r1)); |
986 mov(r8, Operand(ExternalReference(Isolate::kContextAddress, isolate()))); | 1008 mov(r1, Operand(ExternalReference(Isolate::kContextAddress, isolate()))); |
987 StoreP(cp, MemOperand(r8)); | 1009 StoreP(cp, MemOperand(r1)); |
988 | 1010 |
989 // Optionally save all volatile double registers. | 1011 // Optionally save all volatile double registers. |
990 if (save_doubles) { | 1012 if (save_doubles) { |
991 MultiPushDoubles(kCallerSavedDoubles); | 1013 MultiPushDoubles(kCallerSavedDoubles); |
992 // Note that d0 will be accessible at | 1014 // Note that d0 will be accessible at |
993 // fp - ExitFrameConstants::kFrameSize - | 1015 // fp - ExitFrameConstants::kFrameSize - |
994 // kNumCallerSavedDoubles * kDoubleSize, | 1016 // kNumCallerSavedDoubles * kDoubleSize, |
995 // since the sp slot and code slot were pushed after the fp. | 1017 // since the sp slot and code slot were pushed after the fp. |
996 } | 1018 } |
997 | 1019 |
998 addi(sp, sp, Operand(-stack_space * kPointerSize)); | 1020 lay(sp, MemOperand(sp, -stack_space * kPointerSize)); |
999 | 1021 |
1000 // Allocate and align the frame preparing for calling the runtime | 1022 // Allocate and align the frame preparing for calling the runtime |
1001 // function. | 1023 // function. |
1002 const int frame_alignment = ActivationFrameAlignment(); | 1024 const int frame_alignment = MacroAssembler::ActivationFrameAlignment(); |
1003 if (frame_alignment > kPointerSize) { | 1025 if (frame_alignment > 0) { |
1004 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment)); | 1026 DCHECK(frame_alignment == 8); |
1005 ClearRightImm(sp, sp, Operand(WhichPowerOf2(frame_alignment))); | 1027 ClearRightImm(sp, sp, Operand(3)); // equivalent to &= -8 |
1006 } | 1028 } |
1007 li(r0, Operand::Zero()); | |
1008 StorePU(r0, MemOperand(sp, -kNumRequiredStackFrameSlots * kPointerSize)); | |
1009 | 1029 |
| 1030 StoreP(MemOperand(sp, -kNumRequiredStackFrameSlots * kPointerSize), |
| 1031 Operand::Zero(), r0); |
| 1032 lay(sp, MemOperand(sp, -kNumRequiredStackFrameSlots * kPointerSize)); |
1010 // Set the exit frame sp value to point just before the return address | 1033 // Set the exit frame sp value to point just before the return address |
1011 // location. | 1034 // location. |
1012 addi(r8, sp, Operand((kStackFrameExtraParamSlot + 1) * kPointerSize)); | 1035 lay(r1, MemOperand(sp, kStackFrameSPSlot * kPointerSize)); |
1013 StoreP(r8, MemOperand(fp, ExitFrameConstants::kSPOffset)); | 1036 StoreP(r1, MemOperand(fp, ExitFrameConstants::kSPOffset)); |
1014 } | 1037 } |
1015 | 1038 |
1016 | |
1017 void MacroAssembler::InitializeNewString(Register string, Register length, | 1039 void MacroAssembler::InitializeNewString(Register string, Register length, |
1018 Heap::RootListIndex map_index, | 1040 Heap::RootListIndex map_index, |
1019 Register scratch1, Register scratch2) { | 1041 Register scratch1, Register scratch2) { |
1020 SmiTag(scratch1, length); | 1042 SmiTag(scratch1, length); |
1021 LoadRoot(scratch2, map_index); | 1043 LoadRoot(scratch2, map_index); |
1022 StoreP(scratch1, FieldMemOperand(string, String::kLengthOffset), r0); | 1044 StoreP(scratch1, FieldMemOperand(string, String::kLengthOffset)); |
1023 li(scratch1, Operand(String::kEmptyHashField)); | 1045 StoreP(FieldMemOperand(string, String::kHashFieldSlot), |
1024 StoreP(scratch2, FieldMemOperand(string, HeapObject::kMapOffset), r0); | 1046 Operand(String::kEmptyHashField), scratch1); |
1025 StoreP(scratch1, FieldMemOperand(string, String::kHashFieldSlot), r0); | 1047 StoreP(scratch2, FieldMemOperand(string, HeapObject::kMapOffset)); |
1026 } | 1048 } |
1027 | 1049 |
1028 | |
1029 int MacroAssembler::ActivationFrameAlignment() { | 1050 int MacroAssembler::ActivationFrameAlignment() { |
1030 #if !defined(USE_SIMULATOR) | 1051 #if !defined(USE_SIMULATOR) |
1031 // Running on the real platform. Use the alignment as mandated by the local | 1052 // Running on the real platform. Use the alignment as mandated by the local |
1032 // environment. | 1053 // environment. |
1033 // Note: This will break if we ever start generating snapshots on one PPC | 1054 // Note: This will break if we ever start generating snapshots on one S390 |
1034 // platform for another PPC platform with a different alignment. | 1055 // platform for another S390 platform with a different alignment. |
1035 return base::OS::ActivationFrameAlignment(); | 1056 return base::OS::ActivationFrameAlignment(); |
1036 #else // Simulated | 1057 #else // Simulated |
1037 // If we are using the simulator then we should always align to the expected | 1058 // If we are using the simulator then we should always align to the expected |
1038 // alignment. As the simulator is used to generate snapshots we do not know | 1059 // alignment. As the simulator is used to generate snapshots we do not know |
1039 // if the target platform will need alignment, so this is controlled from a | 1060 // if the target platform will need alignment, so this is controlled from a |
1040 // flag. | 1061 // flag. |
1041 return FLAG_sim_stack_alignment; | 1062 return FLAG_sim_stack_alignment; |
1042 #endif | 1063 #endif |
1043 } | 1064 } |
1044 | 1065 |
1045 | |
1046 void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count, | 1066 void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count, |
1047 bool restore_context, | 1067 bool restore_context, |
1048 bool argument_count_is_length) { | 1068 bool argument_count_is_length) { |
1049 ConstantPoolUnavailableScope constant_pool_unavailable(this); | |
1050 // Optionally restore all double registers. | 1069 // Optionally restore all double registers. |
1051 if (save_doubles) { | 1070 if (save_doubles) { |
1052 // Calculate the stack location of the saved doubles and restore them. | 1071 // Calculate the stack location of the saved doubles and restore them. |
1053 const int kNumRegs = kNumCallerSavedDoubles; | 1072 const int kNumRegs = kNumCallerSavedDoubles; |
1054 const int offset = | 1073 lay(r5, MemOperand(fp, -(ExitFrameConstants::kFrameSize + |
1055 (ExitFrameConstants::kFrameSize + kNumRegs * kDoubleSize); | 1074 kNumRegs * kDoubleSize))); |
1056 addi(r6, fp, Operand(-offset)); | 1075 MultiPopDoubles(kCallerSavedDoubles, r5); |
1057 MultiPopDoubles(kCallerSavedDoubles, r6); | |
1058 } | 1076 } |
1059 | 1077 |
1060 // Clear top frame. | 1078 // Clear top frame. |
1061 li(r6, Operand::Zero()); | |
1062 mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate()))); | 1079 mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate()))); |
1063 StoreP(r6, MemOperand(ip)); | 1080 StoreP(MemOperand(ip), Operand(0, kRelocInfo_NONEPTR), r0); |
1064 | 1081 |
1065 // Restore current context from top and clear it in debug mode. | 1082 // Restore current context from top and clear it in debug mode. |
1066 if (restore_context) { | 1083 if (restore_context) { |
1067 mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate()))); | 1084 mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate()))); |
1068 LoadP(cp, MemOperand(ip)); | 1085 LoadP(cp, MemOperand(ip)); |
1069 } | 1086 } |
1070 #ifdef DEBUG | 1087 #ifdef DEBUG |
1071 mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate()))); | 1088 mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate()))); |
1072 StoreP(r6, MemOperand(ip)); | 1089 StoreP(MemOperand(ip), Operand(0, kRelocInfo_NONEPTR), r0); |
1073 #endif | 1090 #endif |
1074 | 1091 |
1075 // Tear down the exit frame, pop the arguments, and return. | 1092 // Tear down the exit frame, pop the arguments, and return. |
1076 LeaveFrame(StackFrame::EXIT); | 1093 LeaveFrame(StackFrame::EXIT); |
1077 | 1094 |
1078 if (argument_count.is_valid()) { | 1095 if (argument_count.is_valid()) { |
1079 if (!argument_count_is_length) { | 1096 if (!argument_count_is_length) { |
1080 ShiftLeftImm(argument_count, argument_count, Operand(kPointerSizeLog2)); | 1097 ShiftLeftP(argument_count, argument_count, Operand(kPointerSizeLog2)); |
1081 } | 1098 } |
1082 add(sp, sp, argument_count); | 1099 la(sp, MemOperand(sp, argument_count)); |
1083 } | 1100 } |
1084 } | 1101 } |
1085 | 1102 |
1086 | |
1087 void MacroAssembler::MovFromFloatResult(const DoubleRegister dst) { | 1103 void MacroAssembler::MovFromFloatResult(const DoubleRegister dst) { |
1088 Move(dst, d1); | 1104 Move(dst, d0); |
1089 } | 1105 } |
1090 | 1106 |
1091 | |
1092 void MacroAssembler::MovFromFloatParameter(const DoubleRegister dst) { | 1107 void MacroAssembler::MovFromFloatParameter(const DoubleRegister dst) { |
1093 Move(dst, d1); | 1108 Move(dst, d0); |
1094 } | 1109 } |
1095 | 1110 |
1096 | |
1097 void MacroAssembler::InvokePrologue(const ParameterCount& expected, | 1111 void MacroAssembler::InvokePrologue(const ParameterCount& expected, |
1098 const ParameterCount& actual, Label* done, | 1112 const ParameterCount& actual, Label* done, |
1099 bool* definitely_mismatches, | 1113 bool* definitely_mismatches, |
1100 InvokeFlag flag, | 1114 InvokeFlag flag, |
1101 const CallWrapper& call_wrapper) { | 1115 const CallWrapper& call_wrapper) { |
1102 bool definitely_matches = false; | 1116 bool definitely_matches = false; |
1103 *definitely_mismatches = false; | 1117 *definitely_mismatches = false; |
1104 Label regular_invoke; | 1118 Label regular_invoke; |
1105 | 1119 |
1106 // Check whether the expected and actual arguments count match. If not, | 1120 // Check whether the expected and actual arguments count match. If not, |
1107 // setup registers according to contract with ArgumentsAdaptorTrampoline: | 1121 // setup registers according to contract with ArgumentsAdaptorTrampoline: |
1108 // r3: actual arguments count | 1122 // r2: actual arguments count |
1109 // r4: function (passed through to callee) | 1123 // r3: function (passed through to callee) |
1110 // r5: expected arguments count | 1124 // r4: expected arguments count |
1111 | 1125 |
1112 // The code below is made a lot easier because the calling code already sets | 1126 // The code below is made a lot easier because the calling code already sets |
1113 // up actual and expected registers according to the contract if values are | 1127 // up actual and expected registers according to the contract if values are |
1114 // passed in registers. | 1128 // passed in registers. |
1115 | 1129 |
1116 // ARM has some sanity checks as per below, considering add them for PPC | 1130 // ARM has some sanity checks as per below, considering add them for S390 |
1117 // DCHECK(actual.is_immediate() || actual.reg().is(r3)); | 1131 // DCHECK(actual.is_immediate() || actual.reg().is(r2)); |
1118 // DCHECK(expected.is_immediate() || expected.reg().is(r5)); | 1132 // DCHECK(expected.is_immediate() || expected.reg().is(r4)); |
1119 | 1133 |
1120 if (expected.is_immediate()) { | 1134 if (expected.is_immediate()) { |
1121 DCHECK(actual.is_immediate()); | 1135 DCHECK(actual.is_immediate()); |
1122 mov(r3, Operand(actual.immediate())); | 1136 mov(r2, Operand(actual.immediate())); |
1123 if (expected.immediate() == actual.immediate()) { | 1137 if (expected.immediate() == actual.immediate()) { |
1124 definitely_matches = true; | 1138 definitely_matches = true; |
1125 } else { | 1139 } else { |
1126 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel; | 1140 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel; |
1127 if (expected.immediate() == sentinel) { | 1141 if (expected.immediate() == sentinel) { |
1128 // Don't worry about adapting arguments for builtins that | 1142 // Don't worry about adapting arguments for builtins that |
1129 // don't want that done. Skip adaption code by making it look | 1143 // don't want that done. Skip adaption code by making it look |
1130 // like we have a match between expected and actual number of | 1144 // like we have a match between expected and actual number of |
1131 // arguments. | 1145 // arguments. |
1132 definitely_matches = true; | 1146 definitely_matches = true; |
1133 } else { | 1147 } else { |
1134 *definitely_mismatches = true; | 1148 *definitely_mismatches = true; |
1135 mov(r5, Operand(expected.immediate())); | 1149 mov(r4, Operand(expected.immediate())); |
1136 } | 1150 } |
1137 } | 1151 } |
1138 } else { | 1152 } else { |
1139 if (actual.is_immediate()) { | 1153 if (actual.is_immediate()) { |
1140 mov(r3, Operand(actual.immediate())); | 1154 mov(r2, Operand(actual.immediate())); |
1141 cmpi(expected.reg(), Operand(actual.immediate())); | 1155 CmpPH(expected.reg(), Operand(actual.immediate())); |
1142 beq(®ular_invoke); | 1156 beq(®ular_invoke); |
1143 } else { | 1157 } else { |
1144 cmp(expected.reg(), actual.reg()); | 1158 CmpP(expected.reg(), actual.reg()); |
1145 beq(®ular_invoke); | 1159 beq(®ular_invoke); |
1146 } | 1160 } |
1147 } | 1161 } |
1148 | 1162 |
1149 if (!definitely_matches) { | 1163 if (!definitely_matches) { |
1150 Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline(); | 1164 Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline(); |
1151 if (flag == CALL_FUNCTION) { | 1165 if (flag == CALL_FUNCTION) { |
1152 call_wrapper.BeforeCall(CallSize(adaptor)); | 1166 call_wrapper.BeforeCall(CallSize(adaptor)); |
1153 Call(adaptor); | 1167 Call(adaptor); |
1154 call_wrapper.AfterCall(); | 1168 call_wrapper.AfterCall(); |
1155 if (!*definitely_mismatches) { | 1169 if (!*definitely_mismatches) { |
1156 b(done); | 1170 b(done); |
1157 } | 1171 } |
1158 } else { | 1172 } else { |
1159 Jump(adaptor, RelocInfo::CODE_TARGET); | 1173 Jump(adaptor, RelocInfo::CODE_TARGET); |
1160 } | 1174 } |
1161 bind(®ular_invoke); | 1175 bind(®ular_invoke); |
1162 } | 1176 } |
1163 } | 1177 } |
1164 | 1178 |
1165 | |
1166 void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target, | 1179 void MacroAssembler::FloodFunctionIfStepping(Register fun, Register new_target, |
1167 const ParameterCount& expected, | 1180 const ParameterCount& expected, |
1168 const ParameterCount& actual) { | 1181 const ParameterCount& actual) { |
1169 Label skip_flooding; | 1182 Label skip_flooding; |
1170 ExternalReference step_in_enabled = | 1183 ExternalReference step_in_enabled = |
1171 ExternalReference::debug_step_in_enabled_address(isolate()); | 1184 ExternalReference::debug_step_in_enabled_address(isolate()); |
1172 mov(r7, Operand(step_in_enabled)); | 1185 mov(r6, Operand(step_in_enabled)); |
1173 lbz(r7, MemOperand(r7)); | 1186 LoadlB(r6, MemOperand(r6)); |
1174 cmpi(r7, Operand::Zero()); | 1187 CmpP(r6, Operand::Zero()); |
1175 beq(&skip_flooding); | 1188 beq(&skip_flooding); |
1176 { | 1189 { |
1177 FrameScope frame(this, | 1190 FrameScope frame(this, |
1178 has_frame() ? StackFrame::NONE : StackFrame::INTERNAL); | 1191 has_frame() ? StackFrame::NONE : StackFrame::INTERNAL); |
1179 if (expected.is_reg()) { | 1192 if (expected.is_reg()) { |
1180 SmiTag(expected.reg()); | 1193 SmiTag(expected.reg()); |
1181 Push(expected.reg()); | 1194 Push(expected.reg()); |
1182 } | 1195 } |
1183 if (actual.is_reg()) { | 1196 if (actual.is_reg()) { |
1184 SmiTag(actual.reg()); | 1197 SmiTag(actual.reg()); |
(...skipping 13 matching lines...) Expand all Loading... |
1198 SmiUntag(actual.reg()); | 1211 SmiUntag(actual.reg()); |
1199 } | 1212 } |
1200 if (expected.is_reg()) { | 1213 if (expected.is_reg()) { |
1201 Pop(expected.reg()); | 1214 Pop(expected.reg()); |
1202 SmiUntag(expected.reg()); | 1215 SmiUntag(expected.reg()); |
1203 } | 1216 } |
1204 } | 1217 } |
1205 bind(&skip_flooding); | 1218 bind(&skip_flooding); |
1206 } | 1219 } |
1207 | 1220 |
1208 | |
1209 void MacroAssembler::InvokeFunctionCode(Register function, Register new_target, | 1221 void MacroAssembler::InvokeFunctionCode(Register function, Register new_target, |
1210 const ParameterCount& expected, | 1222 const ParameterCount& expected, |
1211 const ParameterCount& actual, | 1223 const ParameterCount& actual, |
1212 InvokeFlag flag, | 1224 InvokeFlag flag, |
1213 const CallWrapper& call_wrapper) { | 1225 const CallWrapper& call_wrapper) { |
1214 // You can't call a function without a valid frame. | 1226 // You can't call a function without a valid frame. |
1215 DCHECK(flag == JUMP_FUNCTION || has_frame()); | 1227 DCHECK(flag == JUMP_FUNCTION || has_frame()); |
1216 DCHECK(function.is(r4)); | 1228 |
1217 DCHECK_IMPLIES(new_target.is_valid(), new_target.is(r6)); | 1229 DCHECK(function.is(r3)); |
| 1230 DCHECK_IMPLIES(new_target.is_valid(), new_target.is(r5)); |
1218 | 1231 |
1219 if (call_wrapper.NeedsDebugStepCheck()) { | 1232 if (call_wrapper.NeedsDebugStepCheck()) { |
1220 FloodFunctionIfStepping(function, new_target, expected, actual); | 1233 FloodFunctionIfStepping(function, new_target, expected, actual); |
1221 } | 1234 } |
1222 | 1235 |
1223 // Clear the new.target register if not given. | 1236 // Clear the new.target register if not given. |
1224 if (!new_target.is_valid()) { | 1237 if (!new_target.is_valid()) { |
1225 LoadRoot(r6, Heap::kUndefinedValueRootIndex); | 1238 LoadRoot(r5, Heap::kUndefinedValueRootIndex); |
1226 } | 1239 } |
1227 | 1240 |
1228 Label done; | 1241 Label done; |
1229 bool definitely_mismatches = false; | 1242 bool definitely_mismatches = false; |
1230 InvokePrologue(expected, actual, &done, &definitely_mismatches, flag, | 1243 InvokePrologue(expected, actual, &done, &definitely_mismatches, flag, |
1231 call_wrapper); | 1244 call_wrapper); |
1232 if (!definitely_mismatches) { | 1245 if (!definitely_mismatches) { |
1233 // We call indirectly through the code field in the function to | 1246 // We call indirectly through the code field in the function to |
1234 // allow recompilation to take effect without changing any of the | 1247 // allow recompilation to take effect without changing any of the |
1235 // call sites. | 1248 // call sites. |
1236 Register code = ip; | 1249 Register code = ip; |
1237 LoadP(code, FieldMemOperand(function, JSFunction::kCodeEntryOffset)); | 1250 LoadP(code, FieldMemOperand(function, JSFunction::kCodeEntryOffset)); |
1238 if (flag == CALL_FUNCTION) { | 1251 if (flag == CALL_FUNCTION) { |
1239 call_wrapper.BeforeCall(CallSize(code)); | 1252 call_wrapper.BeforeCall(CallSize(code)); |
1240 CallJSEntry(code); | 1253 CallJSEntry(code); |
1241 call_wrapper.AfterCall(); | 1254 call_wrapper.AfterCall(); |
1242 } else { | 1255 } else { |
1243 DCHECK(flag == JUMP_FUNCTION); | 1256 DCHECK(flag == JUMP_FUNCTION); |
1244 JumpToJSEntry(code); | 1257 JumpToJSEntry(code); |
1245 } | 1258 } |
1246 | 1259 |
1247 // Continue here if InvokePrologue does handle the invocation due to | 1260 // Continue here if InvokePrologue does handle the invocation due to |
1248 // mismatched parameter counts. | 1261 // mismatched parameter counts. |
1249 bind(&done); | 1262 bind(&done); |
1250 } | 1263 } |
1251 } | 1264 } |
1252 | 1265 |
1253 | |
1254 void MacroAssembler::InvokeFunction(Register fun, Register new_target, | 1266 void MacroAssembler::InvokeFunction(Register fun, Register new_target, |
1255 const ParameterCount& actual, | 1267 const ParameterCount& actual, |
1256 InvokeFlag flag, | 1268 InvokeFlag flag, |
1257 const CallWrapper& call_wrapper) { | 1269 const CallWrapper& call_wrapper) { |
1258 // You can't call a function without a valid frame. | 1270 // You can't call a function without a valid frame. |
1259 DCHECK(flag == JUMP_FUNCTION || has_frame()); | 1271 DCHECK(flag == JUMP_FUNCTION || has_frame()); |
1260 | 1272 |
1261 // Contract with called JS functions requires that function is passed in r4. | 1273 // Contract with called JS functions requires that function is passed in r3. |
1262 DCHECK(fun.is(r4)); | 1274 DCHECK(fun.is(r3)); |
1263 | 1275 |
1264 Register expected_reg = r5; | 1276 Register expected_reg = r4; |
1265 Register temp_reg = r7; | 1277 Register temp_reg = r6; |
1266 | 1278 LoadP(temp_reg, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset)); |
1267 LoadP(temp_reg, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset)); | 1279 LoadP(cp, FieldMemOperand(r3, JSFunction::kContextOffset)); |
1268 LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset)); | 1280 LoadW(expected_reg, |
1269 LoadWordArith(expected_reg, | 1281 FieldMemOperand(temp_reg, |
1270 FieldMemOperand( | 1282 SharedFunctionInfo::kFormalParameterCountOffset)); |
1271 temp_reg, SharedFunctionInfo::kFormalParameterCountOffset)); | 1283 #if !defined(V8_TARGET_ARCH_S390X) |
1272 #if !defined(V8_TARGET_ARCH_PPC64) | |
1273 SmiUntag(expected_reg); | 1284 SmiUntag(expected_reg); |
1274 #endif | 1285 #endif |
1275 | 1286 |
1276 ParameterCount expected(expected_reg); | 1287 ParameterCount expected(expected_reg); |
1277 InvokeFunctionCode(fun, new_target, expected, actual, flag, call_wrapper); | 1288 InvokeFunctionCode(fun, new_target, expected, actual, flag, call_wrapper); |
1278 } | 1289 } |
1279 | 1290 |
1280 | |
1281 void MacroAssembler::InvokeFunction(Register function, | 1291 void MacroAssembler::InvokeFunction(Register function, |
1282 const ParameterCount& expected, | 1292 const ParameterCount& expected, |
1283 const ParameterCount& actual, | 1293 const ParameterCount& actual, |
1284 InvokeFlag flag, | 1294 InvokeFlag flag, |
1285 const CallWrapper& call_wrapper) { | 1295 const CallWrapper& call_wrapper) { |
1286 // You can't call a function without a valid frame. | 1296 // You can't call a function without a valid frame. |
1287 DCHECK(flag == JUMP_FUNCTION || has_frame()); | 1297 DCHECK(flag == JUMP_FUNCTION || has_frame()); |
1288 | 1298 |
1289 // Contract with called JS functions requires that function is passed in r4. | 1299 // Contract with called JS functions requires that function is passed in r3. |
1290 DCHECK(function.is(r4)); | 1300 DCHECK(function.is(r3)); |
1291 | 1301 |
1292 // Get the function and setup the context. | 1302 // Get the function and setup the context. |
1293 LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset)); | 1303 LoadP(cp, FieldMemOperand(r3, JSFunction::kContextOffset)); |
1294 | 1304 |
1295 InvokeFunctionCode(r4, no_reg, expected, actual, flag, call_wrapper); | 1305 InvokeFunctionCode(r3, no_reg, expected, actual, flag, call_wrapper); |
1296 } | 1306 } |
1297 | 1307 |
1298 | |
1299 void MacroAssembler::InvokeFunction(Handle<JSFunction> function, | 1308 void MacroAssembler::InvokeFunction(Handle<JSFunction> function, |
1300 const ParameterCount& expected, | 1309 const ParameterCount& expected, |
1301 const ParameterCount& actual, | 1310 const ParameterCount& actual, |
1302 InvokeFlag flag, | 1311 InvokeFlag flag, |
1303 const CallWrapper& call_wrapper) { | 1312 const CallWrapper& call_wrapper) { |
1304 Move(r4, function); | 1313 Move(r3, function); |
1305 InvokeFunction(r4, expected, actual, flag, call_wrapper); | 1314 InvokeFunction(r3, expected, actual, flag, call_wrapper); |
1306 } | 1315 } |
1307 | 1316 |
1308 | |
1309 void MacroAssembler::IsObjectJSStringType(Register object, Register scratch, | 1317 void MacroAssembler::IsObjectJSStringType(Register object, Register scratch, |
1310 Label* fail) { | 1318 Label* fail) { |
1311 DCHECK(kNotStringTag != 0); | 1319 DCHECK(kNotStringTag != 0); |
1312 | 1320 |
1313 LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); | 1321 LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); |
1314 lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); | 1322 LoadlB(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); |
1315 andi(r0, scratch, Operand(kIsNotStringMask)); | 1323 mov(r0, Operand(kIsNotStringMask)); |
1316 bne(fail, cr0); | 1324 AndP(r0, scratch); |
| 1325 bne(fail); |
1317 } | 1326 } |
1318 | 1327 |
1319 | |
1320 void MacroAssembler::IsObjectNameType(Register object, Register scratch, | 1328 void MacroAssembler::IsObjectNameType(Register object, Register scratch, |
1321 Label* fail) { | 1329 Label* fail) { |
1322 LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); | 1330 LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); |
1323 lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); | 1331 LoadlB(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); |
1324 cmpi(scratch, Operand(LAST_NAME_TYPE)); | 1332 CmpP(scratch, Operand(LAST_NAME_TYPE)); |
1325 bgt(fail); | 1333 bgt(fail); |
1326 } | 1334 } |
1327 | 1335 |
1328 | |
1329 void MacroAssembler::DebugBreak() { | 1336 void MacroAssembler::DebugBreak() { |
1330 li(r3, Operand::Zero()); | 1337 LoadImmP(r2, Operand::Zero()); |
1331 mov(r4, | 1338 mov(r3, |
1332 Operand(ExternalReference(Runtime::kHandleDebuggerStatement, isolate()))); | 1339 Operand(ExternalReference(Runtime::kHandleDebuggerStatement, isolate()))); |
1333 CEntryStub ces(isolate(), 1); | 1340 CEntryStub ces(isolate(), 1); |
1334 DCHECK(AllowThisStubCall(&ces)); | 1341 DCHECK(AllowThisStubCall(&ces)); |
1335 Call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT); | 1342 Call(ces.GetCode(), RelocInfo::DEBUGGER_STATEMENT); |
1336 } | 1343 } |
1337 | 1344 |
1338 | |
1339 void MacroAssembler::PushStackHandler() { | 1345 void MacroAssembler::PushStackHandler() { |
1340 // Adjust this code if not the case. | 1346 // Adjust this code if not the case. |
1341 STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize); | 1347 STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize); |
1342 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize); | 1348 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize); |
1343 | 1349 |
1344 // Link the current handler as the next handler. | 1350 // Link the current handler as the next handler. |
1345 // Preserve r3-r7. | 1351 mov(r7, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); |
1346 mov(r8, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); | |
1347 LoadP(r0, MemOperand(r8)); | |
1348 push(r0); | |
1349 | 1352 |
| 1353 // Buy the full stack frame for 5 slots. |
| 1354 lay(sp, MemOperand(sp, -StackHandlerConstants::kSize)); |
| 1355 |
| 1356 // Copy the old handler into the next handler slot. |
| 1357 mvc(MemOperand(sp, StackHandlerConstants::kNextOffset), MemOperand(r7), |
| 1358 kPointerSize); |
1350 // Set this new handler as the current one. | 1359 // Set this new handler as the current one. |
1351 StoreP(sp, MemOperand(r8)); | 1360 StoreP(sp, MemOperand(r7)); |
1352 } | 1361 } |
1353 | 1362 |
1354 | |
1355 void MacroAssembler::PopStackHandler() { | 1363 void MacroAssembler::PopStackHandler() { |
1356 STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize); | 1364 STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize); |
1357 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); | 1365 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); |
1358 | 1366 |
1359 pop(r4); | 1367 // Pop the Next Handler into r3 and store it into Handler Address reference. |
| 1368 Pop(r3); |
1360 mov(ip, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); | 1369 mov(ip, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); |
1361 StoreP(r4, MemOperand(ip)); | 1370 |
| 1371 StoreP(r3, MemOperand(ip)); |
1362 } | 1372 } |
1363 | 1373 |
1364 | |
1365 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, | 1374 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, |
1366 Register scratch, Label* miss) { | 1375 Register scratch, Label* miss) { |
1367 Label same_contexts; | 1376 Label same_contexts; |
1368 | 1377 |
1369 DCHECK(!holder_reg.is(scratch)); | 1378 DCHECK(!holder_reg.is(scratch)); |
1370 DCHECK(!holder_reg.is(ip)); | 1379 DCHECK(!holder_reg.is(ip)); |
1371 DCHECK(!scratch.is(ip)); | 1380 DCHECK(!scratch.is(ip)); |
1372 | 1381 |
1373 // Load current lexical context from the stack frame. | 1382 // Load current lexical context from the stack frame. |
1374 LoadP(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset)); | 1383 LoadP(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
1375 // In debug mode, make sure the lexical context is set. | 1384 // In debug mode, make sure the lexical context is set. |
1376 #ifdef DEBUG | 1385 #ifdef DEBUG |
1377 cmpi(scratch, Operand::Zero()); | 1386 CmpP(scratch, Operand::Zero()); |
1378 Check(ne, kWeShouldNotHaveAnEmptyLexicalContext); | 1387 Check(ne, kWeShouldNotHaveAnEmptyLexicalContext); |
1379 #endif | 1388 #endif |
1380 | 1389 |
1381 // Load the native context of the current context. | 1390 // Load the native context of the current context. |
1382 LoadP(scratch, ContextMemOperand(scratch, Context::NATIVE_CONTEXT_INDEX)); | 1391 LoadP(scratch, ContextMemOperand(scratch, Context::NATIVE_CONTEXT_INDEX)); |
1383 | 1392 |
1384 // Check the context is a native context. | 1393 // Check the context is a native context. |
1385 if (emit_debug_code()) { | 1394 if (emit_debug_code()) { |
1386 // Cannot use ip as a temporary in this verification code. Due to the fact | 1395 // Cannot use ip as a temporary in this verification code. Due to the fact |
1387 // that ip is clobbered as part of cmp with an object Operand. | 1396 // that ip is clobbered as part of cmp with an object Operand. |
1388 push(holder_reg); // Temporarily save holder on the stack. | 1397 push(holder_reg); // Temporarily save holder on the stack. |
1389 // Read the first word and compare to the native_context_map. | 1398 // Read the first word and compare to the native_context_map. |
1390 LoadP(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset)); | 1399 LoadP(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset)); |
1391 LoadRoot(ip, Heap::kNativeContextMapRootIndex); | 1400 CompareRoot(holder_reg, Heap::kNativeContextMapRootIndex); |
1392 cmp(holder_reg, ip); | |
1393 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext); | 1401 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext); |
1394 pop(holder_reg); // Restore holder. | 1402 pop(holder_reg); // Restore holder. |
1395 } | 1403 } |
1396 | 1404 |
1397 // Check if both contexts are the same. | 1405 // Check if both contexts are the same. |
1398 LoadP(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset)); | 1406 LoadP(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset)); |
1399 cmp(scratch, ip); | 1407 CmpP(scratch, ip); |
1400 beq(&same_contexts); | 1408 beq(&same_contexts, Label::kNear); |
1401 | 1409 |
1402 // Check the context is a native context. | 1410 // Check the context is a native context. |
1403 if (emit_debug_code()) { | 1411 if (emit_debug_code()) { |
| 1412 // TODO(119): avoid push(holder_reg)/pop(holder_reg) |
1404 // Cannot use ip as a temporary in this verification code. Due to the fact | 1413 // Cannot use ip as a temporary in this verification code. Due to the fact |
1405 // that ip is clobbered as part of cmp with an object Operand. | 1414 // that ip is clobbered as part of cmp with an object Operand. |
1406 push(holder_reg); // Temporarily save holder on the stack. | 1415 push(holder_reg); // Temporarily save holder on the stack. |
1407 mr(holder_reg, ip); // Move ip to its holding place. | 1416 LoadRR(holder_reg, ip); // Move ip to its holding place. |
1408 LoadRoot(ip, Heap::kNullValueRootIndex); | 1417 CompareRoot(holder_reg, Heap::kNullValueRootIndex); |
1409 cmp(holder_reg, ip); | |
1410 Check(ne, kJSGlobalProxyContextShouldNotBeNull); | 1418 Check(ne, kJSGlobalProxyContextShouldNotBeNull); |
1411 | 1419 |
1412 LoadP(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset)); | 1420 LoadP(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset)); |
1413 LoadRoot(ip, Heap::kNativeContextMapRootIndex); | 1421 CompareRoot(holder_reg, Heap::kNativeContextMapRootIndex); |
1414 cmp(holder_reg, ip); | |
1415 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext); | 1422 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext); |
1416 // Restore ip is not needed. ip is reloaded below. | 1423 // Restore ip is not needed. ip is reloaded below. |
1417 pop(holder_reg); // Restore holder. | 1424 pop(holder_reg); // Restore holder. |
1418 // Restore ip to holder's context. | 1425 // Restore ip to holder's context. |
1419 LoadP(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset)); | 1426 LoadP(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset)); |
1420 } | 1427 } |
1421 | 1428 |
1422 // Check that the security token in the calling global object is | 1429 // Check that the security token in the calling global object is |
1423 // compatible with the security token in the receiving global | 1430 // compatible with the security token in the receiving global |
1424 // object. | 1431 // object. |
1425 int token_offset = | 1432 int token_offset = |
1426 Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX * kPointerSize; | 1433 Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX * kPointerSize; |
1427 | 1434 |
1428 LoadP(scratch, FieldMemOperand(scratch, token_offset)); | 1435 LoadP(scratch, FieldMemOperand(scratch, token_offset)); |
1429 LoadP(ip, FieldMemOperand(ip, token_offset)); | 1436 LoadP(ip, FieldMemOperand(ip, token_offset)); |
1430 cmp(scratch, ip); | 1437 CmpP(scratch, ip); |
1431 bne(miss); | 1438 bne(miss); |
1432 | 1439 |
1433 bind(&same_contexts); | 1440 bind(&same_contexts); |
1434 } | 1441 } |
1435 | 1442 |
1436 | |
1437 // Compute the hash code from the untagged key. This must be kept in sync with | 1443 // Compute the hash code from the untagged key. This must be kept in sync with |
1438 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in | 1444 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in |
1439 // code-stub-hydrogen.cc | 1445 // code-stub-hydrogen.cc |
1440 void MacroAssembler::GetNumberHash(Register t0, Register scratch) { | 1446 void MacroAssembler::GetNumberHash(Register t0, Register scratch) { |
1441 // First of all we assign the hash seed to scratch. | 1447 // First of all we assign the hash seed to scratch. |
1442 LoadRoot(scratch, Heap::kHashSeedRootIndex); | 1448 LoadRoot(scratch, Heap::kHashSeedRootIndex); |
1443 SmiUntag(scratch); | 1449 SmiUntag(scratch); |
1444 | 1450 |
1445 // Xor original key with a seed. | 1451 // Xor original key with a seed. |
1446 xor_(t0, t0, scratch); | 1452 XorP(t0, scratch); |
1447 | 1453 |
1448 // Compute the hash code from the untagged key. This must be kept in sync | 1454 // Compute the hash code from the untagged key. This must be kept in sync |
1449 // with ComputeIntegerHash in utils.h. | 1455 // with ComputeIntegerHash in utils.h. |
1450 // | 1456 // |
1451 // hash = ~hash + (hash << 15); | 1457 // hash = ~hash + (hash << 15); |
1452 notx(scratch, t0); | 1458 LoadRR(scratch, t0); |
1453 slwi(t0, t0, Operand(15)); | 1459 NotP(scratch); |
1454 add(t0, scratch, t0); | 1460 sll(t0, Operand(15)); |
| 1461 AddP(t0, scratch, t0); |
1455 // hash = hash ^ (hash >> 12); | 1462 // hash = hash ^ (hash >> 12); |
1456 srwi(scratch, t0, Operand(12)); | 1463 ShiftRight(scratch, t0, Operand(12)); |
1457 xor_(t0, t0, scratch); | 1464 XorP(t0, scratch); |
1458 // hash = hash + (hash << 2); | 1465 // hash = hash + (hash << 2); |
1459 slwi(scratch, t0, Operand(2)); | 1466 ShiftLeft(scratch, t0, Operand(2)); |
1460 add(t0, t0, scratch); | 1467 AddP(t0, t0, scratch); |
1461 // hash = hash ^ (hash >> 4); | 1468 // hash = hash ^ (hash >> 4); |
1462 srwi(scratch, t0, Operand(4)); | 1469 ShiftRight(scratch, t0, Operand(4)); |
1463 xor_(t0, t0, scratch); | 1470 XorP(t0, scratch); |
1464 // hash = hash * 2057; | 1471 // hash = hash * 2057; |
1465 mr(r0, t0); | 1472 LoadRR(r0, t0); |
1466 slwi(scratch, t0, Operand(3)); | 1473 ShiftLeft(scratch, t0, Operand(3)); |
1467 add(t0, t0, scratch); | 1474 AddP(t0, t0, scratch); |
1468 slwi(scratch, r0, Operand(11)); | 1475 ShiftLeft(scratch, r0, Operand(11)); |
1469 add(t0, t0, scratch); | 1476 AddP(t0, t0, scratch); |
1470 // hash = hash ^ (hash >> 16); | 1477 // hash = hash ^ (hash >> 16); |
1471 srwi(scratch, t0, Operand(16)); | 1478 ShiftRight(scratch, t0, Operand(16)); |
1472 xor_(t0, t0, scratch); | 1479 XorP(t0, scratch); |
1473 // hash & 0x3fffffff | 1480 // hash & 0x3fffffff |
1474 ExtractBitRange(t0, t0, 29, 0); | 1481 ExtractBitRange(t0, t0, 29, 0); |
1475 } | 1482 } |
1476 | 1483 |
1477 | |
1478 void MacroAssembler::LoadFromNumberDictionary(Label* miss, Register elements, | 1484 void MacroAssembler::LoadFromNumberDictionary(Label* miss, Register elements, |
1479 Register key, Register result, | 1485 Register key, Register result, |
1480 Register t0, Register t1, | 1486 Register t0, Register t1, |
1481 Register t2) { | 1487 Register t2) { |
1482 // Register use: | 1488 // Register use: |
1483 // | 1489 // |
1484 // elements - holds the slow-case elements of the receiver on entry. | 1490 // elements - holds the slow-case elements of the receiver on entry. |
1485 // Unchanged unless 'result' is the same register. | 1491 // Unchanged unless 'result' is the same register. |
1486 // | 1492 // |
1487 // key - holds the smi key on entry. | 1493 // key - holds the smi key on entry. |
(...skipping 11 matching lines...) Expand all Loading... |
1499 // t1 - used to hold the capacity mask of the dictionary | 1505 // t1 - used to hold the capacity mask of the dictionary |
1500 // | 1506 // |
1501 // t2 - used for the index into the dictionary. | 1507 // t2 - used for the index into the dictionary. |
1502 Label done; | 1508 Label done; |
1503 | 1509 |
1504 GetNumberHash(t0, t1); | 1510 GetNumberHash(t0, t1); |
1505 | 1511 |
1506 // Compute the capacity mask. | 1512 // Compute the capacity mask. |
1507 LoadP(t1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset)); | 1513 LoadP(t1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset)); |
1508 SmiUntag(t1); | 1514 SmiUntag(t1); |
1509 subi(t1, t1, Operand(1)); | 1515 SubP(t1, Operand(1)); |
1510 | 1516 |
1511 // Generate an unrolled loop that performs a few probes before giving up. | 1517 // Generate an unrolled loop that performs a few probes before giving up. |
1512 for (int i = 0; i < kNumberDictionaryProbes; i++) { | 1518 for (int i = 0; i < kNumberDictionaryProbes; i++) { |
1513 // Use t2 for index calculations and keep the hash intact in t0. | 1519 // Use t2 for index calculations and keep the hash intact in t0. |
1514 mr(t2, t0); | 1520 LoadRR(t2, t0); |
1515 // Compute the masked index: (hash + i + i * i) & mask. | 1521 // Compute the masked index: (hash + i + i * i) & mask. |
1516 if (i > 0) { | 1522 if (i > 0) { |
1517 addi(t2, t2, Operand(SeededNumberDictionary::GetProbeOffset(i))); | 1523 AddP(t2, Operand(SeededNumberDictionary::GetProbeOffset(i))); |
1518 } | 1524 } |
1519 and_(t2, t2, t1); | 1525 AndP(t2, t1); |
1520 | 1526 |
1521 // Scale the index by multiplying by the element size. | 1527 // Scale the index by multiplying by the element size. |
1522 DCHECK(SeededNumberDictionary::kEntrySize == 3); | 1528 DCHECK(SeededNumberDictionary::kEntrySize == 3); |
1523 slwi(ip, t2, Operand(1)); | 1529 LoadRR(ip, t2); |
1524 add(t2, t2, ip); // t2 = t2 * 3 | 1530 sll(ip, Operand(1)); |
| 1531 AddP(t2, ip); // t2 = t2 * 3 |
1525 | 1532 |
1526 // Check if the key is identical to the name. | 1533 // Check if the key is identical to the name. |
1527 slwi(t2, t2, Operand(kPointerSizeLog2)); | 1534 sll(t2, Operand(kPointerSizeLog2)); |
1528 add(t2, elements, t2); | 1535 AddP(t2, elements); |
1529 LoadP(ip, | 1536 LoadP(ip, |
1530 FieldMemOperand(t2, SeededNumberDictionary::kElementsStartOffset)); | 1537 FieldMemOperand(t2, SeededNumberDictionary::kElementsStartOffset)); |
1531 cmp(key, ip); | 1538 CmpP(key, ip); |
1532 if (i != kNumberDictionaryProbes - 1) { | 1539 if (i != kNumberDictionaryProbes - 1) { |
1533 beq(&done); | 1540 beq(&done, Label::kNear); |
1534 } else { | 1541 } else { |
1535 bne(miss); | 1542 bne(miss); |
1536 } | 1543 } |
1537 } | 1544 } |
1538 | 1545 |
1539 bind(&done); | 1546 bind(&done); |
1540 // Check that the value is a field property. | 1547 // Check that the value is a field property. |
1541 // t2: elements + (index * kPointerSize) | 1548 // t2: elements + (index * kPointerSize) |
1542 const int kDetailsOffset = | 1549 const int kDetailsOffset = |
1543 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize; | 1550 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize; |
1544 LoadP(t1, FieldMemOperand(t2, kDetailsOffset)); | 1551 LoadP(t1, FieldMemOperand(t2, kDetailsOffset)); |
1545 LoadSmiLiteral(ip, Smi::FromInt(PropertyDetails::TypeField::kMask)); | 1552 LoadSmiLiteral(ip, Smi::FromInt(PropertyDetails::TypeField::kMask)); |
1546 DCHECK_EQ(DATA, 0); | 1553 DCHECK_EQ(DATA, 0); |
1547 and_(r0, t1, ip, SetRC); | 1554 AndP(r0, ip, t1); |
1548 bne(miss, cr0); | 1555 bne(miss); |
1549 | 1556 |
1550 // Get the value at the masked, scaled index and return. | 1557 // Get the value at the masked, scaled index and return. |
1551 const int kValueOffset = | 1558 const int kValueOffset = |
1552 SeededNumberDictionary::kElementsStartOffset + kPointerSize; | 1559 SeededNumberDictionary::kElementsStartOffset + kPointerSize; |
1553 LoadP(result, FieldMemOperand(t2, kValueOffset)); | 1560 LoadP(result, FieldMemOperand(t2, kValueOffset)); |
1554 } | 1561 } |
1555 | 1562 |
1556 | |
1557 void MacroAssembler::Allocate(int object_size, Register result, | 1563 void MacroAssembler::Allocate(int object_size, Register result, |
1558 Register scratch1, Register scratch2, | 1564 Register scratch1, Register scratch2, |
1559 Label* gc_required, AllocationFlags flags) { | 1565 Label* gc_required, AllocationFlags flags) { |
1560 DCHECK(object_size <= Page::kMaxRegularHeapObjectSize); | 1566 DCHECK(object_size <= Page::kMaxRegularHeapObjectSize); |
1561 if (!FLAG_inline_new) { | 1567 if (!FLAG_inline_new) { |
1562 if (emit_debug_code()) { | 1568 if (emit_debug_code()) { |
1563 // Trash the registers to simulate an allocation failure. | 1569 // Trash the registers to simulate an allocation failure. |
1564 li(result, Operand(0x7091)); | 1570 LoadImmP(result, Operand(0x7091)); |
1565 li(scratch1, Operand(0x7191)); | 1571 LoadImmP(scratch1, Operand(0x7191)); |
1566 li(scratch2, Operand(0x7291)); | 1572 LoadImmP(scratch2, Operand(0x7291)); |
1567 } | 1573 } |
1568 b(gc_required); | 1574 b(gc_required); |
1569 return; | 1575 return; |
1570 } | 1576 } |
1571 | 1577 |
1572 DCHECK(!AreAliased(result, scratch1, scratch2, ip)); | 1578 DCHECK(!AreAliased(result, scratch1, scratch2, ip)); |
1573 | 1579 |
1574 // Make object size into bytes. | 1580 // Make object size into bytes. |
1575 if ((flags & SIZE_IN_WORDS) != 0) { | 1581 if ((flags & SIZE_IN_WORDS) != 0) { |
1576 object_size *= kPointerSize; | 1582 object_size *= kPointerSize; |
(...skipping 19 matching lines...) Expand all Loading... |
1596 mov(top_address, Operand(allocation_top)); | 1602 mov(top_address, Operand(allocation_top)); |
1597 | 1603 |
1598 if ((flags & RESULT_CONTAINS_TOP) == 0) { | 1604 if ((flags & RESULT_CONTAINS_TOP) == 0) { |
1599 // Load allocation top into result and allocation limit into ip. | 1605 // Load allocation top into result and allocation limit into ip. |
1600 LoadP(result, MemOperand(top_address)); | 1606 LoadP(result, MemOperand(top_address)); |
1601 LoadP(alloc_limit, MemOperand(top_address, kPointerSize)); | 1607 LoadP(alloc_limit, MemOperand(top_address, kPointerSize)); |
1602 } else { | 1608 } else { |
1603 if (emit_debug_code()) { | 1609 if (emit_debug_code()) { |
1604 // Assert that result actually contains top on entry. | 1610 // Assert that result actually contains top on entry. |
1605 LoadP(alloc_limit, MemOperand(top_address)); | 1611 LoadP(alloc_limit, MemOperand(top_address)); |
1606 cmp(result, alloc_limit); | 1612 CmpP(result, alloc_limit); |
1607 Check(eq, kUnexpectedAllocationTop); | 1613 Check(eq, kUnexpectedAllocationTop); |
1608 } | 1614 } |
1609 // Load allocation limit. Result already contains allocation top. | 1615 // Load allocation limit. Result already contains allocation top. |
1610 LoadP(alloc_limit, MemOperand(top_address, limit - top)); | 1616 LoadP(alloc_limit, MemOperand(top_address, limit - top)); |
1611 } | 1617 } |
1612 | 1618 |
1613 if ((flags & DOUBLE_ALIGNMENT) != 0) { | 1619 if ((flags & DOUBLE_ALIGNMENT) != 0) { |
1614 // Align the next allocation. Storing the filler map without checking top is | 1620 // Align the next allocation. Storing the filler map without checking top is |
1615 // safe in new-space because the limit of the heap is aligned there. | 1621 // safe in new-space because the limit of the heap is aligned there. |
1616 #if V8_TARGET_ARCH_PPC64 | 1622 #if V8_TARGET_ARCH_S390X |
1617 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment); | 1623 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment); |
1618 #else | 1624 #else |
1619 STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment); | 1625 STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment); |
1620 andi(result_end, result, Operand(kDoubleAlignmentMask)); | 1626 AndP(result_end, result, Operand(kDoubleAlignmentMask)); |
1621 Label aligned; | 1627 Label aligned; |
1622 beq(&aligned, cr0); | 1628 beq(&aligned); |
1623 if ((flags & PRETENURE) != 0) { | 1629 if ((flags & PRETENURE) != 0) { |
1624 cmpl(result, alloc_limit); | 1630 CmpLogicalP(result, alloc_limit); |
1625 bge(gc_required); | 1631 bge(gc_required); |
1626 } | 1632 } |
1627 mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map())); | 1633 mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map())); |
1628 stw(result_end, MemOperand(result)); | 1634 StoreW(result_end, MemOperand(result)); |
1629 addi(result, result, Operand(kDoubleSize / 2)); | 1635 AddP(result, result, Operand(kDoubleSize / 2)); |
1630 bind(&aligned); | 1636 bind(&aligned); |
1631 #endif | 1637 #endif |
1632 } | 1638 } |
1633 | 1639 |
1634 // Calculate new top and bail out if new space is exhausted. Use result | 1640 // Calculate new top and bail out if new space is exhausted. Use result |
1635 // to calculate the new top. | 1641 // to calculate the new top. |
1636 sub(r0, alloc_limit, result); | 1642 SubP(r0, alloc_limit, result); |
1637 if (is_int16(object_size)) { | 1643 if (is_int16(object_size)) { |
1638 cmpi(r0, Operand(object_size)); | 1644 CmpP(r0, Operand(object_size)); |
1639 blt(gc_required); | 1645 blt(gc_required); |
1640 addi(result_end, result, Operand(object_size)); | 1646 AddP(result_end, result, Operand(object_size)); |
1641 } else { | 1647 } else { |
1642 Cmpi(r0, Operand(object_size), result_end); | 1648 mov(result_end, Operand(object_size)); |
| 1649 CmpP(r0, result_end); |
1643 blt(gc_required); | 1650 blt(gc_required); |
1644 add(result_end, result, result_end); | 1651 AddP(result_end, result, result_end); |
1645 } | 1652 } |
1646 StoreP(result_end, MemOperand(top_address)); | 1653 StoreP(result_end, MemOperand(top_address)); |
1647 | 1654 |
1648 // Tag object if requested. | 1655 // Tag object if requested. |
1649 if ((flags & TAG_OBJECT) != 0) { | 1656 if ((flags & TAG_OBJECT) != 0) { |
1650 addi(result, result, Operand(kHeapObjectTag)); | 1657 AddP(result, result, Operand(kHeapObjectTag)); |
1651 } | 1658 } |
1652 } | 1659 } |
1653 | 1660 |
1654 | |
1655 void MacroAssembler::Allocate(Register object_size, Register result, | 1661 void MacroAssembler::Allocate(Register object_size, Register result, |
1656 Register result_end, Register scratch, | 1662 Register result_end, Register scratch, |
1657 Label* gc_required, AllocationFlags flags) { | 1663 Label* gc_required, AllocationFlags flags) { |
1658 if (!FLAG_inline_new) { | 1664 if (!FLAG_inline_new) { |
1659 if (emit_debug_code()) { | 1665 if (emit_debug_code()) { |
1660 // Trash the registers to simulate an allocation failure. | 1666 // Trash the registers to simulate an allocation failure. |
1661 li(result, Operand(0x7091)); | 1667 LoadImmP(result, Operand(0x7091)); |
1662 li(scratch, Operand(0x7191)); | 1668 LoadImmP(scratch, Operand(0x7191)); |
1663 li(result_end, Operand(0x7291)); | 1669 LoadImmP(result_end, Operand(0x7291)); |
1664 } | 1670 } |
1665 b(gc_required); | 1671 b(gc_required); |
1666 return; | 1672 return; |
1667 } | 1673 } |
1668 | 1674 |
1669 // |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag | 1675 // |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag |
1670 // is not specified. Other registers must not overlap. | 1676 // is not specified. Other registers must not overlap. |
1671 DCHECK(!AreAliased(object_size, result, scratch, ip)); | 1677 DCHECK(!AreAliased(object_size, result, scratch, ip)); |
1672 DCHECK(!AreAliased(result_end, result, scratch, ip)); | 1678 DCHECK(!AreAliased(result_end, result, scratch, ip)); |
1673 DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end)); | 1679 DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end)); |
(...skipping 15 matching lines...) Expand all Loading... |
1689 mov(top_address, Operand(allocation_top)); | 1695 mov(top_address, Operand(allocation_top)); |
1690 | 1696 |
1691 if ((flags & RESULT_CONTAINS_TOP) == 0) { | 1697 if ((flags & RESULT_CONTAINS_TOP) == 0) { |
1692 // Load allocation top into result and allocation limit into alloc_limit.. | 1698 // Load allocation top into result and allocation limit into alloc_limit.. |
1693 LoadP(result, MemOperand(top_address)); | 1699 LoadP(result, MemOperand(top_address)); |
1694 LoadP(alloc_limit, MemOperand(top_address, kPointerSize)); | 1700 LoadP(alloc_limit, MemOperand(top_address, kPointerSize)); |
1695 } else { | 1701 } else { |
1696 if (emit_debug_code()) { | 1702 if (emit_debug_code()) { |
1697 // Assert that result actually contains top on entry. | 1703 // Assert that result actually contains top on entry. |
1698 LoadP(alloc_limit, MemOperand(top_address)); | 1704 LoadP(alloc_limit, MemOperand(top_address)); |
1699 cmp(result, alloc_limit); | 1705 CmpP(result, alloc_limit); |
1700 Check(eq, kUnexpectedAllocationTop); | 1706 Check(eq, kUnexpectedAllocationTop); |
1701 } | 1707 } |
1702 // Load allocation limit. Result already contains allocation top. | 1708 // Load allocation limit. Result already contains allocation top. |
1703 LoadP(alloc_limit, MemOperand(top_address, limit - top)); | 1709 LoadP(alloc_limit, MemOperand(top_address, limit - top)); |
1704 } | 1710 } |
1705 | 1711 |
1706 if ((flags & DOUBLE_ALIGNMENT) != 0) { | 1712 if ((flags & DOUBLE_ALIGNMENT) != 0) { |
1707 // Align the next allocation. Storing the filler map without checking top is | 1713 // Align the next allocation. Storing the filler map without checking top is |
1708 // safe in new-space because the limit of the heap is aligned there. | 1714 // safe in new-space because the limit of the heap is aligned there. |
1709 #if V8_TARGET_ARCH_PPC64 | 1715 #if V8_TARGET_ARCH_S390X |
1710 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment); | 1716 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment); |
1711 #else | 1717 #else |
1712 STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment); | 1718 STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment); |
1713 andi(result_end, result, Operand(kDoubleAlignmentMask)); | 1719 AndP(result_end, result, Operand(kDoubleAlignmentMask)); |
1714 Label aligned; | 1720 Label aligned; |
1715 beq(&aligned, cr0); | 1721 beq(&aligned); |
1716 if ((flags & PRETENURE) != 0) { | 1722 if ((flags & PRETENURE) != 0) { |
1717 cmpl(result, alloc_limit); | 1723 CmpLogicalP(result, alloc_limit); |
1718 bge(gc_required); | 1724 bge(gc_required); |
1719 } | 1725 } |
1720 mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map())); | 1726 mov(result_end, Operand(isolate()->factory()->one_pointer_filler_map())); |
1721 stw(result_end, MemOperand(result)); | 1727 StoreW(result_end, MemOperand(result)); |
1722 addi(result, result, Operand(kDoubleSize / 2)); | 1728 AddP(result, result, Operand(kDoubleSize / 2)); |
1723 bind(&aligned); | 1729 bind(&aligned); |
1724 #endif | 1730 #endif |
1725 } | 1731 } |
1726 | 1732 |
1727 // Calculate new top and bail out if new space is exhausted. Use result | 1733 // Calculate new top and bail out if new space is exhausted. Use result |
1728 // to calculate the new top. Object size may be in words so a shift is | 1734 // to calculate the new top. Object size may be in words so a shift is |
1729 // required to get the number of bytes. | 1735 // required to get the number of bytes. |
1730 sub(r0, alloc_limit, result); | 1736 SubP(r0, alloc_limit, result); |
1731 if ((flags & SIZE_IN_WORDS) != 0) { | 1737 if ((flags & SIZE_IN_WORDS) != 0) { |
1732 ShiftLeftImm(result_end, object_size, Operand(kPointerSizeLog2)); | 1738 ShiftLeftP(result_end, object_size, Operand(kPointerSizeLog2)); |
1733 cmp(r0, result_end); | 1739 CmpP(r0, result_end); |
1734 blt(gc_required); | 1740 blt(gc_required); |
1735 add(result_end, result, result_end); | 1741 AddP(result_end, result, result_end); |
1736 } else { | 1742 } else { |
1737 cmp(r0, object_size); | 1743 CmpP(r0, object_size); |
1738 blt(gc_required); | 1744 blt(gc_required); |
1739 add(result_end, result, object_size); | 1745 AddP(result_end, result, object_size); |
1740 } | 1746 } |
1741 | 1747 |
1742 // Update allocation top. result temporarily holds the new top. | 1748 // Update allocation top. result temporarily holds the new top. |
1743 if (emit_debug_code()) { | 1749 if (emit_debug_code()) { |
1744 andi(r0, result_end, Operand(kObjectAlignmentMask)); | 1750 AndP(r0, result_end, Operand(kObjectAlignmentMask)); |
1745 Check(eq, kUnalignedAllocationInNewSpace, cr0); | 1751 Check(eq, kUnalignedAllocationInNewSpace, cr0); |
1746 } | 1752 } |
1747 StoreP(result_end, MemOperand(top_address)); | 1753 StoreP(result_end, MemOperand(top_address)); |
1748 | 1754 |
1749 // Tag object if requested. | 1755 // Tag object if requested. |
1750 if ((flags & TAG_OBJECT) != 0) { | 1756 if ((flags & TAG_OBJECT) != 0) { |
1751 addi(result, result, Operand(kHeapObjectTag)); | 1757 AddP(result, result, Operand(kHeapObjectTag)); |
1752 } | 1758 } |
1753 } | 1759 } |
1754 | 1760 |
1755 | |
1756 void MacroAssembler::AllocateTwoByteString(Register result, Register length, | 1761 void MacroAssembler::AllocateTwoByteString(Register result, Register length, |
1757 Register scratch1, Register scratch2, | 1762 Register scratch1, Register scratch2, |
1758 Register scratch3, | 1763 Register scratch3, |
1759 Label* gc_required) { | 1764 Label* gc_required) { |
1760 // Calculate the number of bytes needed for the characters in the string while | 1765 // Calculate the number of bytes needed for the characters in the string while |
1761 // observing object alignment. | 1766 // observing object alignment. |
1762 DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); | 1767 DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); |
1763 slwi(scratch1, length, Operand(1)); // Length in bytes, not chars. | 1768 |
1764 addi(scratch1, scratch1, | 1769 ShiftLeft(scratch1, length, Operand(1)); // Length in bytes, not chars. |
1765 Operand(kObjectAlignmentMask + SeqTwoByteString::kHeaderSize)); | 1770 AddP(scratch1, Operand(kObjectAlignmentMask + SeqTwoByteString::kHeaderSize)); |
1766 mov(r0, Operand(~kObjectAlignmentMask)); | 1771 |
1767 and_(scratch1, scratch1, r0); | 1772 AndP(scratch1, Operand(~kObjectAlignmentMask)); |
1768 | 1773 |
1769 // Allocate two-byte string in new space. | 1774 // Allocate two-byte string in new space. |
1770 Allocate(scratch1, result, scratch2, scratch3, gc_required, TAG_OBJECT); | 1775 Allocate(scratch1, result, scratch2, scratch3, gc_required, TAG_OBJECT); |
1771 | 1776 |
1772 // Set the map, length and hash field. | 1777 // Set the map, length and hash field. |
1773 InitializeNewString(result, length, Heap::kStringMapRootIndex, scratch1, | 1778 InitializeNewString(result, length, Heap::kStringMapRootIndex, scratch1, |
1774 scratch2); | 1779 scratch2); |
1775 } | 1780 } |
1776 | 1781 |
1777 | |
1778 void MacroAssembler::AllocateOneByteString(Register result, Register length, | 1782 void MacroAssembler::AllocateOneByteString(Register result, Register length, |
1779 Register scratch1, Register scratch2, | 1783 Register scratch1, Register scratch2, |
1780 Register scratch3, | 1784 Register scratch3, |
1781 Label* gc_required) { | 1785 Label* gc_required) { |
1782 // Calculate the number of bytes needed for the characters in the string while | 1786 // Calculate the number of bytes needed for the characters in the string while |
1783 // observing object alignment. | 1787 // observing object alignment. |
1784 DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0); | 1788 DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0); |
1785 DCHECK(kCharSize == 1); | 1789 DCHECK(kCharSize == 1); |
1786 addi(scratch1, length, | 1790 AddP(scratch1, length, |
1787 Operand(kObjectAlignmentMask + SeqOneByteString::kHeaderSize)); | 1791 Operand(kObjectAlignmentMask + SeqOneByteString::kHeaderSize)); |
1788 li(r0, Operand(~kObjectAlignmentMask)); | 1792 AndP(scratch1, Operand(~kObjectAlignmentMask)); |
1789 and_(scratch1, scratch1, r0); | |
1790 | 1793 |
1791 // Allocate one-byte string in new space. | 1794 // Allocate one-byte string in new space. |
1792 Allocate(scratch1, result, scratch2, scratch3, gc_required, TAG_OBJECT); | 1795 Allocate(scratch1, result, scratch2, scratch3, gc_required, TAG_OBJECT); |
1793 | 1796 |
1794 // Set the map, length and hash field. | 1797 // Set the map, length and hash field. |
1795 InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex, | 1798 InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex, |
1796 scratch1, scratch2); | 1799 scratch1, scratch2); |
1797 } | 1800 } |
1798 | 1801 |
1799 | |
1800 void MacroAssembler::AllocateTwoByteConsString(Register result, Register length, | 1802 void MacroAssembler::AllocateTwoByteConsString(Register result, Register length, |
1801 Register scratch1, | 1803 Register scratch1, |
1802 Register scratch2, | 1804 Register scratch2, |
1803 Label* gc_required) { | 1805 Label* gc_required) { |
1804 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required, | 1806 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required, |
1805 TAG_OBJECT); | 1807 TAG_OBJECT); |
1806 | 1808 |
1807 InitializeNewString(result, length, Heap::kConsStringMapRootIndex, scratch1, | 1809 InitializeNewString(result, length, Heap::kConsStringMapRootIndex, scratch1, |
1808 scratch2); | 1810 scratch2); |
1809 } | 1811 } |
1810 | 1812 |
1811 | |
1812 void MacroAssembler::AllocateOneByteConsString(Register result, Register length, | 1813 void MacroAssembler::AllocateOneByteConsString(Register result, Register length, |
1813 Register scratch1, | 1814 Register scratch1, |
1814 Register scratch2, | 1815 Register scratch2, |
1815 Label* gc_required) { | 1816 Label* gc_required) { |
1816 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required, | 1817 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required, |
1817 TAG_OBJECT); | 1818 TAG_OBJECT); |
1818 | 1819 |
1819 InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex, | 1820 InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex, |
1820 scratch1, scratch2); | 1821 scratch1, scratch2); |
1821 } | 1822 } |
1822 | 1823 |
1823 | |
1824 void MacroAssembler::AllocateTwoByteSlicedString(Register result, | 1824 void MacroAssembler::AllocateTwoByteSlicedString(Register result, |
1825 Register length, | 1825 Register length, |
1826 Register scratch1, | 1826 Register scratch1, |
1827 Register scratch2, | 1827 Register scratch2, |
1828 Label* gc_required) { | 1828 Label* gc_required) { |
1829 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required, | 1829 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required, |
1830 TAG_OBJECT); | 1830 TAG_OBJECT); |
1831 | 1831 |
1832 InitializeNewString(result, length, Heap::kSlicedStringMapRootIndex, scratch1, | 1832 InitializeNewString(result, length, Heap::kSlicedStringMapRootIndex, scratch1, |
1833 scratch2); | 1833 scratch2); |
1834 } | 1834 } |
1835 | 1835 |
1836 | |
1837 void MacroAssembler::AllocateOneByteSlicedString(Register result, | 1836 void MacroAssembler::AllocateOneByteSlicedString(Register result, |
1838 Register length, | 1837 Register length, |
1839 Register scratch1, | 1838 Register scratch1, |
1840 Register scratch2, | 1839 Register scratch2, |
1841 Label* gc_required) { | 1840 Label* gc_required) { |
1842 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required, | 1841 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required, |
1843 TAG_OBJECT); | 1842 TAG_OBJECT); |
1844 | 1843 |
1845 InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex, | 1844 InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex, |
1846 scratch1, scratch2); | 1845 scratch1, scratch2); |
1847 } | 1846 } |
1848 | 1847 |
1849 | |
1850 void MacroAssembler::CompareObjectType(Register object, Register map, | 1848 void MacroAssembler::CompareObjectType(Register object, Register map, |
1851 Register type_reg, InstanceType type) { | 1849 Register type_reg, InstanceType type) { |
1852 const Register temp = type_reg.is(no_reg) ? r0 : type_reg; | 1850 const Register temp = type_reg.is(no_reg) ? r0 : type_reg; |
1853 | 1851 |
1854 LoadP(map, FieldMemOperand(object, HeapObject::kMapOffset)); | 1852 LoadP(map, FieldMemOperand(object, HeapObject::kMapOffset)); |
1855 CompareInstanceType(map, temp, type); | 1853 CompareInstanceType(map, temp, type); |
1856 } | 1854 } |
1857 | 1855 |
1858 | |
1859 void MacroAssembler::CompareInstanceType(Register map, Register type_reg, | 1856 void MacroAssembler::CompareInstanceType(Register map, Register type_reg, |
1860 InstanceType type) { | 1857 InstanceType type) { |
1861 STATIC_ASSERT(Map::kInstanceTypeOffset < 4096); | 1858 STATIC_ASSERT(Map::kInstanceTypeOffset < 4096); |
1862 STATIC_ASSERT(LAST_TYPE < 256); | 1859 STATIC_ASSERT(LAST_TYPE < 256); |
1863 lbz(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset)); | 1860 LoadlB(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset)); |
1864 cmpi(type_reg, Operand(type)); | 1861 CmpP(type_reg, Operand(type)); |
1865 } | 1862 } |
1866 | 1863 |
1867 | |
1868 void MacroAssembler::CompareRoot(Register obj, Heap::RootListIndex index) { | 1864 void MacroAssembler::CompareRoot(Register obj, Heap::RootListIndex index) { |
1869 DCHECK(!obj.is(r0)); | 1865 CmpP(obj, MemOperand(kRootRegister, index << kPointerSizeLog2)); |
1870 LoadRoot(r0, index); | |
1871 cmp(obj, r0); | |
1872 } | 1866 } |
1873 | 1867 |
1874 | |
1875 void MacroAssembler::CheckFastElements(Register map, Register scratch, | 1868 void MacroAssembler::CheckFastElements(Register map, Register scratch, |
1876 Label* fail) { | 1869 Label* fail) { |
1877 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); | 1870 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); |
1878 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); | 1871 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); |
1879 STATIC_ASSERT(FAST_ELEMENTS == 2); | 1872 STATIC_ASSERT(FAST_ELEMENTS == 2); |
1880 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3); | 1873 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3); |
1881 lbz(scratch, FieldMemOperand(map, Map::kBitField2Offset)); | |
1882 STATIC_ASSERT(Map::kMaximumBitField2FastHoleyElementValue < 0x8000); | 1874 STATIC_ASSERT(Map::kMaximumBitField2FastHoleyElementValue < 0x8000); |
1883 cmpli(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue)); | 1875 CmpLogicalByte(FieldMemOperand(map, Map::kBitField2Offset), |
| 1876 Operand(Map::kMaximumBitField2FastHoleyElementValue)); |
1884 bgt(fail); | 1877 bgt(fail); |
1885 } | 1878 } |
1886 | 1879 |
1887 | |
1888 void MacroAssembler::CheckFastObjectElements(Register map, Register scratch, | 1880 void MacroAssembler::CheckFastObjectElements(Register map, Register scratch, |
1889 Label* fail) { | 1881 Label* fail) { |
1890 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); | 1882 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); |
1891 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); | 1883 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); |
1892 STATIC_ASSERT(FAST_ELEMENTS == 2); | 1884 STATIC_ASSERT(FAST_ELEMENTS == 2); |
1893 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3); | 1885 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3); |
1894 lbz(scratch, FieldMemOperand(map, Map::kBitField2Offset)); | 1886 CmpLogicalByte(FieldMemOperand(map, Map::kBitField2Offset), |
1895 cmpli(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue)); | 1887 Operand(Map::kMaximumBitField2FastHoleySmiElementValue)); |
1896 ble(fail); | 1888 ble(fail); |
1897 cmpli(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue)); | 1889 CmpLogicalByte(FieldMemOperand(map, Map::kBitField2Offset), |
| 1890 Operand(Map::kMaximumBitField2FastHoleyElementValue)); |
1898 bgt(fail); | 1891 bgt(fail); |
1899 } | 1892 } |
1900 | 1893 |
1901 | |
1902 void MacroAssembler::CheckFastSmiElements(Register map, Register scratch, | 1894 void MacroAssembler::CheckFastSmiElements(Register map, Register scratch, |
1903 Label* fail) { | 1895 Label* fail) { |
1904 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); | 1896 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); |
1905 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); | 1897 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); |
1906 lbz(scratch, FieldMemOperand(map, Map::kBitField2Offset)); | 1898 CmpLogicalByte(FieldMemOperand(map, Map::kBitField2Offset), |
1907 cmpli(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue)); | 1899 Operand(Map::kMaximumBitField2FastHoleySmiElementValue)); |
1908 bgt(fail); | 1900 bgt(fail); |
1909 } | 1901 } |
1910 | 1902 |
1911 | 1903 void MacroAssembler::SmiToDouble(DoubleRegister value, Register smi) { |
| 1904 SmiUntag(ip, smi); |
| 1905 ConvertIntToDouble(ip, value); |
| 1906 } |
1912 void MacroAssembler::StoreNumberToDoubleElements( | 1907 void MacroAssembler::StoreNumberToDoubleElements( |
1913 Register value_reg, Register key_reg, Register elements_reg, | 1908 Register value_reg, Register key_reg, Register elements_reg, |
1914 Register scratch1, DoubleRegister double_scratch, Label* fail, | 1909 Register scratch1, DoubleRegister double_scratch, Label* fail, |
1915 int elements_offset) { | 1910 int elements_offset) { |
1916 DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1)); | 1911 DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1)); |
1917 Label smi_value, store; | 1912 Label smi_value, store; |
1918 | 1913 |
1919 // Handle smi values specially. | 1914 // Handle smi values specially. |
1920 JumpIfSmi(value_reg, &smi_value); | 1915 JumpIfSmi(value_reg, &smi_value); |
1921 | 1916 |
1922 // Ensure that the object is a heap number | 1917 // Ensure that the object is a heap number |
1923 CheckMap(value_reg, scratch1, isolate()->factory()->heap_number_map(), fail, | 1918 CheckMap(value_reg, scratch1, isolate()->factory()->heap_number_map(), fail, |
1924 DONT_DO_SMI_CHECK); | 1919 DONT_DO_SMI_CHECK); |
1925 | 1920 |
1926 lfd(double_scratch, FieldMemOperand(value_reg, HeapNumber::kValueOffset)); | 1921 LoadDouble(double_scratch, |
1927 // Double value, turn potential sNaN into qNaN. | 1922 FieldMemOperand(value_reg, HeapNumber::kValueOffset)); |
| 1923 // Force a canonical NaN. |
1928 CanonicalizeNaN(double_scratch); | 1924 CanonicalizeNaN(double_scratch); |
1929 b(&store); | 1925 b(&store); |
1930 | 1926 |
1931 bind(&smi_value); | 1927 bind(&smi_value); |
1932 SmiToDouble(double_scratch, value_reg); | 1928 SmiToDouble(double_scratch, value_reg); |
1933 | 1929 |
1934 bind(&store); | 1930 bind(&store); |
1935 SmiToDoubleArrayOffset(scratch1, key_reg); | 1931 SmiToDoubleArrayOffset(scratch1, key_reg); |
1936 add(scratch1, elements_reg, scratch1); | 1932 StoreDouble(double_scratch, |
1937 stfd(double_scratch, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize - | 1933 FieldMemOperand(elements_reg, scratch1, |
1938 elements_offset)); | 1934 FixedDoubleArray::kHeaderSize - elements_offset)); |
1939 } | 1935 } |
1940 | 1936 |
1941 | |
1942 void MacroAssembler::AddAndCheckForOverflow(Register dst, Register left, | 1937 void MacroAssembler::AddAndCheckForOverflow(Register dst, Register left, |
1943 Register right, | 1938 Register right, |
1944 Register overflow_dst, | 1939 Register overflow_dst, |
1945 Register scratch) { | 1940 Register scratch) { |
1946 DCHECK(!dst.is(overflow_dst)); | 1941 DCHECK(!dst.is(overflow_dst)); |
1947 DCHECK(!dst.is(scratch)); | 1942 DCHECK(!dst.is(scratch)); |
1948 DCHECK(!overflow_dst.is(scratch)); | 1943 DCHECK(!overflow_dst.is(scratch)); |
1949 DCHECK(!overflow_dst.is(left)); | 1944 DCHECK(!overflow_dst.is(left)); |
1950 DCHECK(!overflow_dst.is(right)); | 1945 DCHECK(!overflow_dst.is(right)); |
1951 | 1946 |
| 1947 // TODO(joransiu): Optimize paths for left == right. |
1952 bool left_is_right = left.is(right); | 1948 bool left_is_right = left.is(right); |
1953 RCBit xorRC = left_is_right ? SetRC : LeaveRC; | |
1954 | 1949 |
1955 // C = A+B; C overflows if A/B have same sign and C has diff sign than A | 1950 // C = A+B; C overflows if A/B have same sign and C has diff sign than A |
1956 if (dst.is(left)) { | 1951 if (dst.is(left)) { |
1957 mr(scratch, left); // Preserve left. | 1952 LoadRR(scratch, left); // Preserve left. |
1958 add(dst, left, right); // Left is overwritten. | 1953 AddP(dst, left, right); // Left is overwritten. |
1959 xor_(overflow_dst, dst, scratch, xorRC); // Original left. | 1954 XorP(overflow_dst, scratch, dst); // Original left. |
1960 if (!left_is_right) xor_(scratch, dst, right); | 1955 if (!left_is_right) XorP(scratch, dst, right); |
1961 } else if (dst.is(right)) { | 1956 } else if (dst.is(right)) { |
1962 mr(scratch, right); // Preserve right. | 1957 LoadRR(scratch, right); // Preserve right. |
1963 add(dst, left, right); // Right is overwritten. | 1958 AddP(dst, left, right); // Right is overwritten. |
1964 xor_(overflow_dst, dst, left, xorRC); | 1959 XorP(overflow_dst, dst, left); |
1965 if (!left_is_right) xor_(scratch, dst, scratch); // Original right. | 1960 if (!left_is_right) XorP(scratch, dst, scratch); |
1966 } else { | 1961 } else { |
1967 add(dst, left, right); | 1962 AddP(dst, left, right); |
1968 xor_(overflow_dst, dst, left, xorRC); | 1963 XorP(overflow_dst, dst, left); |
1969 if (!left_is_right) xor_(scratch, dst, right); | 1964 if (!left_is_right) XorP(scratch, dst, right); |
1970 } | 1965 } |
1971 if (!left_is_right) and_(overflow_dst, scratch, overflow_dst, SetRC); | 1966 if (!left_is_right) AndP(overflow_dst, scratch, overflow_dst); |
| 1967 LoadAndTestRR(overflow_dst, overflow_dst); |
1972 } | 1968 } |
1973 | 1969 |
1974 | |
1975 void MacroAssembler::AddAndCheckForOverflow(Register dst, Register left, | 1970 void MacroAssembler::AddAndCheckForOverflow(Register dst, Register left, |
1976 intptr_t right, | 1971 intptr_t right, |
1977 Register overflow_dst, | 1972 Register overflow_dst, |
1978 Register scratch) { | 1973 Register scratch) { |
1979 Register original_left = left; | |
1980 DCHECK(!dst.is(overflow_dst)); | 1974 DCHECK(!dst.is(overflow_dst)); |
1981 DCHECK(!dst.is(scratch)); | 1975 DCHECK(!dst.is(scratch)); |
1982 DCHECK(!overflow_dst.is(scratch)); | 1976 DCHECK(!overflow_dst.is(scratch)); |
1983 DCHECK(!overflow_dst.is(left)); | 1977 DCHECK(!overflow_dst.is(left)); |
1984 | 1978 |
1985 // C = A+B; C overflows if A/B have same sign and C has diff sign than A | 1979 mov(r1, Operand(right)); |
1986 if (dst.is(left)) { | 1980 AddAndCheckForOverflow(dst, left, r1, overflow_dst, scratch); |
1987 // Preserve left. | |
1988 original_left = overflow_dst; | |
1989 mr(original_left, left); | |
1990 } | |
1991 Add(dst, left, right, scratch); | |
1992 xor_(overflow_dst, dst, original_left); | |
1993 if (right >= 0) { | |
1994 and_(overflow_dst, overflow_dst, dst, SetRC); | |
1995 } else { | |
1996 andc(overflow_dst, overflow_dst, dst, SetRC); | |
1997 } | |
1998 } | 1981 } |
1999 | 1982 |
2000 | |
2001 void MacroAssembler::SubAndCheckForOverflow(Register dst, Register left, | 1983 void MacroAssembler::SubAndCheckForOverflow(Register dst, Register left, |
2002 Register right, | 1984 Register right, |
2003 Register overflow_dst, | 1985 Register overflow_dst, |
2004 Register scratch) { | 1986 Register scratch) { |
2005 DCHECK(!dst.is(overflow_dst)); | 1987 DCHECK(!dst.is(overflow_dst)); |
2006 DCHECK(!dst.is(scratch)); | 1988 DCHECK(!dst.is(scratch)); |
2007 DCHECK(!overflow_dst.is(scratch)); | 1989 DCHECK(!overflow_dst.is(scratch)); |
2008 DCHECK(!overflow_dst.is(left)); | 1990 DCHECK(!overflow_dst.is(left)); |
2009 DCHECK(!overflow_dst.is(right)); | 1991 DCHECK(!overflow_dst.is(right)); |
2010 | 1992 |
2011 // C = A-B; C overflows if A/B have diff signs and C has diff sign than A | 1993 // C = A-B; C overflows if A/B have diff signs and C has diff sign than A |
2012 if (dst.is(left)) { | 1994 if (dst.is(left)) { |
2013 mr(scratch, left); // Preserve left. | 1995 LoadRR(scratch, left); // Preserve left. |
2014 sub(dst, left, right); // Left is overwritten. | 1996 SubP(dst, left, right); // Left is overwritten. |
2015 xor_(overflow_dst, dst, scratch); | 1997 XorP(overflow_dst, dst, scratch); |
2016 xor_(scratch, scratch, right); | 1998 XorP(scratch, right); |
2017 and_(overflow_dst, overflow_dst, scratch, SetRC); | 1999 AndP(overflow_dst, scratch /*, SetRC*/); |
| 2000 LoadAndTestRR(overflow_dst, overflow_dst); |
| 2001 // Should be okay to remove rc |
2018 } else if (dst.is(right)) { | 2002 } else if (dst.is(right)) { |
2019 mr(scratch, right); // Preserve right. | 2003 LoadRR(scratch, right); // Preserve right. |
2020 sub(dst, left, right); // Right is overwritten. | 2004 SubP(dst, left, right); // Right is overwritten. |
2021 xor_(overflow_dst, dst, left); | 2005 XorP(overflow_dst, dst, left); |
2022 xor_(scratch, left, scratch); | 2006 XorP(scratch, left); |
2023 and_(overflow_dst, overflow_dst, scratch, SetRC); | 2007 AndP(overflow_dst, scratch /*, SetRC*/); |
| 2008 LoadAndTestRR(overflow_dst, overflow_dst); |
| 2009 // Should be okay to remove rc |
2024 } else { | 2010 } else { |
2025 sub(dst, left, right); | 2011 SubP(dst, left, right); |
2026 xor_(overflow_dst, dst, left); | 2012 XorP(overflow_dst, dst, left); |
2027 xor_(scratch, left, right); | 2013 XorP(scratch, left, right); |
2028 and_(overflow_dst, scratch, overflow_dst, SetRC); | 2014 AndP(overflow_dst, scratch /*, SetRC*/); |
| 2015 LoadAndTestRR(overflow_dst, overflow_dst); |
| 2016 // Should be okay to remove rc |
2029 } | 2017 } |
2030 } | 2018 } |
2031 | 2019 |
2032 | |
2033 void MacroAssembler::CompareMap(Register obj, Register scratch, Handle<Map> map, | 2020 void MacroAssembler::CompareMap(Register obj, Register scratch, Handle<Map> map, |
2034 Label* early_success) { | 2021 Label* early_success) { |
2035 LoadP(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); | 2022 LoadP(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); |
2036 CompareMap(scratch, map, early_success); | 2023 CompareMap(obj, map, early_success); |
2037 } | 2024 } |
2038 | 2025 |
2039 | |
2040 void MacroAssembler::CompareMap(Register obj_map, Handle<Map> map, | 2026 void MacroAssembler::CompareMap(Register obj_map, Handle<Map> map, |
2041 Label* early_success) { | 2027 Label* early_success) { |
2042 mov(r0, Operand(map)); | 2028 mov(r0, Operand(map)); |
2043 cmp(obj_map, r0); | 2029 CmpP(r0, FieldMemOperand(obj_map, HeapObject::kMapOffset)); |
2044 } | 2030 } |
2045 | 2031 |
2046 | |
2047 void MacroAssembler::CheckMap(Register obj, Register scratch, Handle<Map> map, | 2032 void MacroAssembler::CheckMap(Register obj, Register scratch, Handle<Map> map, |
2048 Label* fail, SmiCheckType smi_check_type) { | 2033 Label* fail, SmiCheckType smi_check_type) { |
2049 if (smi_check_type == DO_SMI_CHECK) { | 2034 if (smi_check_type == DO_SMI_CHECK) { |
2050 JumpIfSmi(obj, fail); | 2035 JumpIfSmi(obj, fail); |
2051 } | 2036 } |
2052 | 2037 |
2053 Label success; | 2038 Label success; |
2054 CompareMap(obj, scratch, map, &success); | 2039 CompareMap(obj, scratch, map, &success); |
2055 bne(fail); | 2040 bne(fail); |
2056 bind(&success); | 2041 bind(&success); |
2057 } | 2042 } |
2058 | 2043 |
2059 | |
2060 void MacroAssembler::CheckMap(Register obj, Register scratch, | 2044 void MacroAssembler::CheckMap(Register obj, Register scratch, |
2061 Heap::RootListIndex index, Label* fail, | 2045 Heap::RootListIndex index, Label* fail, |
2062 SmiCheckType smi_check_type) { | 2046 SmiCheckType smi_check_type) { |
2063 if (smi_check_type == DO_SMI_CHECK) { | 2047 if (smi_check_type == DO_SMI_CHECK) { |
2064 JumpIfSmi(obj, fail); | 2048 JumpIfSmi(obj, fail); |
2065 } | 2049 } |
2066 LoadP(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); | 2050 LoadP(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); |
2067 LoadRoot(r0, index); | 2051 CompareRoot(scratch, index); |
2068 cmp(scratch, r0); | |
2069 bne(fail); | 2052 bne(fail); |
2070 } | 2053 } |
2071 | 2054 |
2072 | |
2073 void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1, | 2055 void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1, |
2074 Register scratch2, Handle<WeakCell> cell, | 2056 Register scratch2, Handle<WeakCell> cell, |
2075 Handle<Code> success, | 2057 Handle<Code> success, |
2076 SmiCheckType smi_check_type) { | 2058 SmiCheckType smi_check_type) { |
2077 Label fail; | 2059 Label fail; |
2078 if (smi_check_type == DO_SMI_CHECK) { | 2060 if (smi_check_type == DO_SMI_CHECK) { |
2079 JumpIfSmi(obj, &fail); | 2061 JumpIfSmi(obj, &fail); |
2080 } | 2062 } |
2081 LoadP(scratch1, FieldMemOperand(obj, HeapObject::kMapOffset)); | 2063 LoadP(scratch1, FieldMemOperand(obj, HeapObject::kMapOffset)); |
2082 CmpWeakValue(scratch1, cell, scratch2); | 2064 CmpWeakValue(scratch1, cell, scratch2); |
2083 Jump(success, RelocInfo::CODE_TARGET, eq); | 2065 Jump(success, RelocInfo::CODE_TARGET, eq); |
2084 bind(&fail); | 2066 bind(&fail); |
2085 } | 2067 } |
2086 | 2068 |
2087 | |
2088 void MacroAssembler::CmpWeakValue(Register value, Handle<WeakCell> cell, | 2069 void MacroAssembler::CmpWeakValue(Register value, Handle<WeakCell> cell, |
2089 Register scratch, CRegister cr) { | 2070 Register scratch, CRegister) { |
2090 mov(scratch, Operand(cell)); | 2071 mov(scratch, Operand(cell)); |
2091 LoadP(scratch, FieldMemOperand(scratch, WeakCell::kValueOffset)); | 2072 CmpP(value, FieldMemOperand(scratch, WeakCell::kValueOffset)); |
2092 cmp(value, scratch, cr); | |
2093 } | 2073 } |
2094 | 2074 |
2095 | |
2096 void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) { | 2075 void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) { |
2097 mov(value, Operand(cell)); | 2076 mov(value, Operand(cell)); |
2098 LoadP(value, FieldMemOperand(value, WeakCell::kValueOffset)); | 2077 LoadP(value, FieldMemOperand(value, WeakCell::kValueOffset)); |
2099 } | 2078 } |
2100 | 2079 |
2101 | |
2102 void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell, | 2080 void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell, |
2103 Label* miss) { | 2081 Label* miss) { |
2104 GetWeakValue(value, cell); | 2082 GetWeakValue(value, cell); |
2105 JumpIfSmi(value, miss); | 2083 JumpIfSmi(value, miss); |
2106 } | 2084 } |
2107 | 2085 |
2108 | |
2109 void MacroAssembler::GetMapConstructor(Register result, Register map, | 2086 void MacroAssembler::GetMapConstructor(Register result, Register map, |
2110 Register temp, Register temp2) { | 2087 Register temp, Register temp2) { |
2111 Label done, loop; | 2088 Label done, loop; |
2112 LoadP(result, FieldMemOperand(map, Map::kConstructorOrBackPointerOffset)); | 2089 LoadP(result, FieldMemOperand(map, Map::kConstructorOrBackPointerOffset)); |
2113 bind(&loop); | 2090 bind(&loop); |
2114 JumpIfSmi(result, &done); | 2091 JumpIfSmi(result, &done); |
2115 CompareObjectType(result, temp, temp2, MAP_TYPE); | 2092 CompareObjectType(result, temp, temp2, MAP_TYPE); |
2116 bne(&done); | 2093 bne(&done); |
2117 LoadP(result, FieldMemOperand(result, Map::kConstructorOrBackPointerOffset)); | 2094 LoadP(result, FieldMemOperand(result, Map::kConstructorOrBackPointerOffset)); |
2118 b(&loop); | 2095 b(&loop); |
2119 bind(&done); | 2096 bind(&done); |
2120 } | 2097 } |
2121 | 2098 |
2122 | |
2123 void MacroAssembler::TryGetFunctionPrototype(Register function, Register result, | 2099 void MacroAssembler::TryGetFunctionPrototype(Register function, Register result, |
2124 Register scratch, Label* miss) { | 2100 Register scratch, Label* miss) { |
2125 // Get the prototype or initial map from the function. | 2101 // Get the prototype or initial map from the function. |
2126 LoadP(result, | 2102 LoadP(result, |
2127 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); | 2103 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); |
2128 | 2104 |
2129 // If the prototype or initial map is the hole, don't return it and | 2105 // If the prototype or initial map is the hole, don't return it and |
2130 // simply miss the cache instead. This will allow us to allocate a | 2106 // simply miss the cache instead. This will allow us to allocate a |
2131 // prototype object on-demand in the runtime system. | 2107 // prototype object on-demand in the runtime system. |
2132 LoadRoot(r0, Heap::kTheHoleValueRootIndex); | 2108 CompareRoot(result, Heap::kTheHoleValueRootIndex); |
2133 cmp(result, r0); | |
2134 beq(miss); | 2109 beq(miss); |
2135 | 2110 |
2136 // If the function does not have an initial map, we're done. | 2111 // If the function does not have an initial map, we're done. |
2137 Label done; | 2112 Label done; |
2138 CompareObjectType(result, scratch, scratch, MAP_TYPE); | 2113 CompareObjectType(result, scratch, scratch, MAP_TYPE); |
2139 bne(&done); | 2114 bne(&done, Label::kNear); |
2140 | 2115 |
2141 // Get the prototype from the initial map. | 2116 // Get the prototype from the initial map. |
2142 LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset)); | 2117 LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset)); |
2143 | 2118 |
2144 // All done. | 2119 // All done. |
2145 bind(&done); | 2120 bind(&done); |
2146 } | 2121 } |
2147 | 2122 |
2148 | |
2149 void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id, | 2123 void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id, |
2150 Condition cond) { | 2124 Condition cond) { |
2151 DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs. | 2125 DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs. |
2152 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, cond); | 2126 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, cond); |
2153 } | 2127 } |
2154 | 2128 |
2155 | |
2156 void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) { | 2129 void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) { |
2157 Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond); | 2130 Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond); |
2158 } | 2131 } |
2159 | 2132 |
2160 | |
2161 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) { | 2133 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) { |
2162 return has_frame_ || !stub->SometimesSetsUpAFrame(); | 2134 return has_frame_ || !stub->SometimesSetsUpAFrame(); |
2163 } | 2135 } |
2164 | 2136 |
2165 | |
2166 void MacroAssembler::IndexFromHash(Register hash, Register index) { | 2137 void MacroAssembler::IndexFromHash(Register hash, Register index) { |
2167 // If the hash field contains an array index pick it out. The assert checks | 2138 // If the hash field contains an array index pick it out. The assert checks |
2168 // that the constants for the maximum number of digits for an array index | 2139 // that the constants for the maximum number of digits for an array index |
2169 // cached in the hash field and the number of bits reserved for it does not | 2140 // cached in the hash field and the number of bits reserved for it does not |
2170 // conflict. | 2141 // conflict. |
2171 DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) < | 2142 DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) < |
2172 (1 << String::kArrayIndexValueBits)); | 2143 (1 << String::kArrayIndexValueBits)); |
2173 DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash); | 2144 DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash); |
2174 } | 2145 } |
2175 | 2146 |
2176 | |
2177 void MacroAssembler::SmiToDouble(DoubleRegister value, Register smi) { | |
2178 SmiUntag(ip, smi); | |
2179 ConvertIntToDouble(ip, value); | |
2180 } | |
2181 | |
2182 | |
2183 void MacroAssembler::TestDoubleIsInt32(DoubleRegister double_input, | 2147 void MacroAssembler::TestDoubleIsInt32(DoubleRegister double_input, |
2184 Register scratch1, Register scratch2, | 2148 Register scratch1, Register scratch2, |
2185 DoubleRegister double_scratch) { | 2149 DoubleRegister double_scratch) { |
2186 TryDoubleToInt32Exact(scratch1, double_input, scratch2, double_scratch); | 2150 TryDoubleToInt32Exact(scratch1, double_input, scratch2, double_scratch); |
2187 } | 2151 } |
2188 | 2152 |
2189 void MacroAssembler::TestDoubleIsMinusZero(DoubleRegister input, | 2153 void MacroAssembler::TestDoubleIsMinusZero(DoubleRegister input, |
2190 Register scratch1, | 2154 Register scratch1, |
2191 Register scratch2) { | 2155 Register scratch2) { |
2192 #if V8_TARGET_ARCH_PPC64 | 2156 lgdr(scratch1, input); |
2193 MovDoubleToInt64(scratch1, input); | 2157 #if V8_TARGET_ARCH_S390X |
2194 rotldi(scratch1, scratch1, 1); | 2158 llihf(scratch2, Operand(0x80000000)); // scratch2 = 0x80000000_00000000 |
2195 cmpi(scratch1, Operand(1)); | 2159 CmpP(scratch1, scratch2); |
2196 #else | 2160 #else |
2197 MovDoubleToInt64(scratch1, scratch2, input); | |
2198 Label done; | 2161 Label done; |
2199 cmpi(scratch2, Operand::Zero()); | 2162 CmpP(scratch1, Operand::Zero()); |
2200 bne(&done); | 2163 bne(&done, Label::kNear); |
2201 lis(scratch2, Operand(SIGN_EXT_IMM16(0x8000))); | 2164 |
2202 cmp(scratch1, scratch2); | 2165 srlg(scratch1, scratch1, Operand(32)); |
| 2166 CmpP(scratch1, Operand(HeapNumber::kSignMask)); |
2203 bind(&done); | 2167 bind(&done); |
2204 #endif | 2168 #endif |
2205 } | 2169 } |
2206 | 2170 |
2207 void MacroAssembler::TestDoubleSign(DoubleRegister input, Register scratch) { | 2171 void MacroAssembler::TestDoubleSign(DoubleRegister input, Register scratch) { |
2208 #if V8_TARGET_ARCH_PPC64 | 2172 stdy(input, MemOperand(sp, -kDoubleSize)); |
2209 MovDoubleToInt64(scratch, input); | 2173 LoadlW(scratch, MemOperand(sp, -kDoubleSize + Register::kExponentOffset)); |
2210 #else | 2174 Cmp32(scratch, Operand::Zero()); |
2211 MovDoubleHighToInt(scratch, input); | |
2212 #endif | |
2213 cmpi(scratch, Operand::Zero()); | |
2214 } | 2175 } |
2215 | 2176 |
2216 void MacroAssembler::TestHeapNumberSign(Register input, Register scratch) { | 2177 void MacroAssembler::TestHeapNumberSign(Register input, Register scratch) { |
2217 #if V8_TARGET_ARCH_PPC64 | 2178 LoadlW(scratch, FieldMemOperand(input, HeapNumber::kValueOffset + |
2218 LoadP(scratch, FieldMemOperand(input, HeapNumber::kValueOffset)); | 2179 Register::kExponentOffset)); |
2219 #else | 2180 Cmp32(scratch, Operand::Zero()); |
2220 lwz(scratch, FieldMemOperand(input, HeapNumber::kExponentOffset)); | |
2221 #endif | |
2222 cmpi(scratch, Operand::Zero()); | |
2223 } | 2181 } |
2224 | 2182 |
2225 void MacroAssembler::TryDoubleToInt32Exact(Register result, | 2183 void MacroAssembler::TryDoubleToInt32Exact(Register result, |
2226 DoubleRegister double_input, | 2184 DoubleRegister double_input, |
2227 Register scratch, | 2185 Register scratch, |
2228 DoubleRegister double_scratch) { | 2186 DoubleRegister double_scratch) { |
2229 Label done; | 2187 Label done; |
2230 DCHECK(!double_input.is(double_scratch)); | 2188 DCHECK(!double_input.is(double_scratch)); |
2231 | 2189 |
2232 ConvertDoubleToInt64(double_input, | 2190 ConvertDoubleToInt64(double_input, |
2233 #if !V8_TARGET_ARCH_PPC64 | 2191 #if !V8_TARGET_ARCH_S390X |
2234 scratch, | 2192 scratch, |
2235 #endif | 2193 #endif |
2236 result, double_scratch); | 2194 result, double_scratch); |
2237 | 2195 |
2238 #if V8_TARGET_ARCH_PPC64 | 2196 #if V8_TARGET_ARCH_S390X |
2239 TestIfInt32(result, r0); | 2197 TestIfInt32(result, r0); |
2240 #else | 2198 #else |
2241 TestIfInt32(scratch, result, r0); | 2199 TestIfInt32(scratch, result, r0); |
2242 #endif | 2200 #endif |
2243 bne(&done); | 2201 bne(&done); |
2244 | 2202 |
2245 // convert back and compare | 2203 // convert back and compare |
2246 fcfid(double_scratch, double_scratch); | 2204 lgdr(scratch, double_scratch); |
2247 fcmpu(double_scratch, double_input); | 2205 cdfbr(double_scratch, scratch); |
| 2206 cdbr(double_scratch, double_input); |
2248 bind(&done); | 2207 bind(&done); |
2249 } | 2208 } |
2250 | 2209 |
2251 | |
2252 void MacroAssembler::TryInt32Floor(Register result, DoubleRegister double_input, | 2210 void MacroAssembler::TryInt32Floor(Register result, DoubleRegister double_input, |
2253 Register input_high, Register scratch, | 2211 Register input_high, Register scratch, |
2254 DoubleRegister double_scratch, Label* done, | 2212 DoubleRegister double_scratch, Label* done, |
2255 Label* exact) { | 2213 Label* exact) { |
2256 DCHECK(!result.is(input_high)); | 2214 DCHECK(!result.is(input_high)); |
2257 DCHECK(!double_input.is(double_scratch)); | 2215 DCHECK(!double_input.is(double_scratch)); |
2258 Label exception; | 2216 Label exception; |
2259 | 2217 |
2260 MovDoubleHighToInt(input_high, double_input); | 2218 // Move high word into input_high |
| 2219 StoreDouble(double_input, MemOperand(sp, -kDoubleSize)); |
| 2220 lay(sp, MemOperand(sp, -kDoubleSize)); |
| 2221 LoadlW(input_high, MemOperand(sp, Register::kExponentOffset)); |
| 2222 la(sp, MemOperand(sp, kDoubleSize)); |
2261 | 2223 |
2262 // Test for NaN/Inf | 2224 // Test for NaN/Inf |
2263 ExtractBitMask(result, input_high, HeapNumber::kExponentMask); | 2225 ExtractBitMask(result, input_high, HeapNumber::kExponentMask); |
2264 cmpli(result, Operand(0x7ff)); | 2226 CmpLogicalP(result, Operand(0x7ff)); |
2265 beq(&exception); | 2227 beq(&exception); |
2266 | 2228 |
2267 // Convert (rounding to -Inf) | 2229 // Convert (rounding to -Inf) |
2268 ConvertDoubleToInt64(double_input, | 2230 ConvertDoubleToInt64(double_input, |
2269 #if !V8_TARGET_ARCH_PPC64 | 2231 #if !V8_TARGET_ARCH_S390X |
2270 scratch, | 2232 scratch, |
2271 #endif | 2233 #endif |
2272 result, double_scratch, kRoundToMinusInf); | 2234 result, double_scratch, kRoundToMinusInf); |
2273 | 2235 |
2274 // Test for overflow | 2236 // Test for overflow |
2275 #if V8_TARGET_ARCH_PPC64 | 2237 #if V8_TARGET_ARCH_S390X |
2276 TestIfInt32(result, r0); | 2238 TestIfInt32(result, r0); |
2277 #else | 2239 #else |
2278 TestIfInt32(scratch, result, r0); | 2240 TestIfInt32(scratch, result, r0); |
2279 #endif | 2241 #endif |
2280 bne(&exception); | 2242 bne(&exception); |
2281 | 2243 |
2282 // Test for exactness | 2244 // Test for exactness |
2283 fcfid(double_scratch, double_scratch); | 2245 lgdr(scratch, double_scratch); |
2284 fcmpu(double_scratch, double_input); | 2246 cdfbr(double_scratch, scratch); |
| 2247 cdbr(double_scratch, double_input); |
2285 beq(exact); | 2248 beq(exact); |
2286 b(done); | 2249 b(done); |
2287 | 2250 |
2288 bind(&exception); | 2251 bind(&exception); |
2289 } | 2252 } |
2290 | 2253 |
| 2254 void MacroAssembler::FloatCeiling32(DoubleRegister double_output, |
| 2255 DoubleRegister double_input, |
| 2256 Register scratch, |
| 2257 DoubleRegister double_scratch) { |
| 2258 Label not_zero, no_nan_inf, done, do_ceil; |
| 2259 Register scratch2 = r0; |
| 2260 |
| 2261 // Move high word into scratch |
| 2262 MovFloatToInt(scratch, double_input); |
| 2263 |
| 2264 // Test for NaN/Inf which results in NaN/Inf respectively |
| 2265 static const uint32_t float32ExponentMask = 0x7f800000u; |
| 2266 ExtractBitMask(scratch2, scratch, float32ExponentMask); |
| 2267 CmpLogical32(scratch2, Operand(0xff)); |
| 2268 bne(&no_nan_inf, Label::kNear); |
| 2269 Move(double_output, double_input); |
| 2270 b(&done); |
| 2271 bind(&no_nan_inf); |
| 2272 |
| 2273 // Test for double_input in (-1, -0) which results in -0 |
| 2274 LoadFloat32Literal(double_scratch, -1.0, scratch2); |
| 2275 cebr(double_input, double_scratch); |
| 2276 ble(&do_ceil, Label::kNear); |
| 2277 Cmp32(scratch, Operand::Zero()); |
| 2278 bgt(&do_ceil, Label::kNear); |
| 2279 bne(¬_zero, Label::kNear); |
| 2280 |
| 2281 // double_input = +/- 0 which results in +/- 0 respectively |
| 2282 Move(double_output, double_input); |
| 2283 b(&done); |
| 2284 bind(¬_zero); |
| 2285 |
| 2286 // double_output = -0 |
| 2287 llihf(scratch2, Operand(0x80000000)); |
| 2288 ldgr(double_output, scratch2); |
| 2289 b(&done); |
| 2290 bind(&do_ceil); |
| 2291 |
| 2292 // Regular case |
| 2293 // cgdbr(Condition(6), scratch, double_input); |
| 2294 // cdfbr(double_output, scratch); |
| 2295 fiebra(double_output, double_input, FIDBRA_ROUND_TOWARD_POS_INF); |
| 2296 bind(&done); |
| 2297 } |
| 2298 |
| 2299 void MacroAssembler::FloatFloor32(DoubleRegister double_output, |
| 2300 DoubleRegister double_input, |
| 2301 Register scratch) { |
| 2302 Label not_zero, no_nan_inf, done, do_floor; |
| 2303 Register scratch2 = r0; |
| 2304 |
| 2305 // Move high word into scratch |
| 2306 MovFloatToInt(scratch, double_input); |
| 2307 |
| 2308 // Test for NaN/Inf which results in NaN/Inf respectively |
| 2309 static const uint32_t float32ExponentMask = 0x7f800000u; |
| 2310 ExtractBitMask(scratch2, scratch, float32ExponentMask); |
| 2311 CmpLogical32(scratch2, Operand(0xff)); |
| 2312 bne(&no_nan_inf, Label::kNear); |
| 2313 Move(double_output, double_input); |
| 2314 b(&done); |
| 2315 bind(&no_nan_inf); |
| 2316 |
| 2317 // Test for double_input=+/- 0 which results in +/- 0 respectively |
| 2318 ltebr(double_input, double_input); |
| 2319 bne(&do_floor, Label::kNear); |
| 2320 Move(double_output, double_input); |
| 2321 b(&done); |
| 2322 bind(&do_floor); |
| 2323 |
| 2324 // Regular case |
| 2325 // cgdbr(Condition(7), scratch, double_input); |
| 2326 // cdfbr(double_output, scratch); |
| 2327 fiebra(double_output, double_input, FIDBRA_ROUND_TOWARD_NEG_INF); |
| 2328 bind(&done); |
| 2329 } |
| 2330 |
| 2331 void MacroAssembler::FloatCeiling64(DoubleRegister double_output, |
| 2332 DoubleRegister double_input, |
| 2333 Register scratch, |
| 2334 DoubleRegister double_scratch) { |
| 2335 Label not_zero, no_nan_inf, done, do_ceil; |
| 2336 Register scratch2 = r0; |
| 2337 |
| 2338 // Move high word into scratch |
| 2339 StoreDouble(double_input, MemOperand(sp, -kDoubleSize)); |
| 2340 LoadlW(scratch, MemOperand(sp, -kDoubleSize + Register::kExponentOffset)); |
| 2341 |
| 2342 // Test for NaN/Inf which results in NaN/Inf respectively |
| 2343 ExtractBitMask(scratch2, scratch, HeapNumber::kExponentMask); |
| 2344 CmpLogicalP(scratch2, Operand(0x7ff)); |
| 2345 bne(&no_nan_inf, Label::kNear); |
| 2346 Move(double_output, double_input); |
| 2347 b(&done); |
| 2348 bind(&no_nan_inf); |
| 2349 |
| 2350 // Test for double_input in (-1, -0) which results in -0 |
| 2351 LoadDoubleLiteral(double_scratch, -1.0, scratch2); |
| 2352 cdbr(double_input, double_scratch); |
| 2353 ble(&do_ceil, Label::kNear); |
| 2354 Cmp32(scratch, Operand::Zero()); |
| 2355 bgt(&do_ceil, Label::kNear); |
| 2356 bne(¬_zero, Label::kNear); |
| 2357 |
| 2358 // double_input = +/- 0 which results in +/- 0 respectively |
| 2359 Move(double_output, double_input); |
| 2360 b(&done); |
| 2361 bind(¬_zero); |
| 2362 |
| 2363 // double_output = -0 |
| 2364 llihf(scratch2, Operand(0x80000000)); |
| 2365 ldgr(double_output, scratch2); |
| 2366 b(&done); |
| 2367 bind(&do_ceil); |
| 2368 |
| 2369 // Regular case |
| 2370 // cgdbr(Condition(6), scratch, double_input); |
| 2371 // cdfbr(double_output, scratch); |
| 2372 fidbra(double_output, double_input, FIDBRA_ROUND_TOWARD_POS_INF); |
| 2373 bind(&done); |
| 2374 } |
| 2375 |
| 2376 void MacroAssembler::FloatFloor64(DoubleRegister double_output, |
| 2377 DoubleRegister double_input, |
| 2378 Register scratch) { |
| 2379 Label not_zero, no_nan_inf, done, do_floor; |
| 2380 Register scratch2 = r0; |
| 2381 |
| 2382 // Move high word into scratch |
| 2383 StoreDouble(double_input, MemOperand(sp, -kDoubleSize)); |
| 2384 LoadlW(scratch, MemOperand(sp, -kDoubleSize + Register::kExponentOffset)); |
| 2385 |
| 2386 // Test for NaN/Inf which results in NaN/Inf respectively |
| 2387 ExtractBitMask(scratch2, scratch, HeapNumber::kExponentMask); |
| 2388 CmpLogicalP(scratch2, Operand(0x7ff)); |
| 2389 bne(&no_nan_inf, Label::kNear); |
| 2390 Move(double_output, double_input); |
| 2391 b(&done); |
| 2392 bind(&no_nan_inf); |
| 2393 |
| 2394 // Test for double_input=+/- 0 which results in +/- 0 respectively |
| 2395 ltdbr(double_input, double_input); |
| 2396 bne(&do_floor, Label::kNear); |
| 2397 Move(double_output, double_input); |
| 2398 b(&done); |
| 2399 bind(&do_floor); |
| 2400 |
| 2401 // Regular case |
| 2402 // cgdbr(Condition(7), scratch, double_input); |
| 2403 // cdfbr(double_output, scratch); |
| 2404 fidbra(double_output, double_input, FIDBRA_ROUND_TOWARD_NEG_INF); |
| 2405 bind(&done); |
| 2406 } |
2291 | 2407 |
2292 void MacroAssembler::TryInlineTruncateDoubleToI(Register result, | 2408 void MacroAssembler::TryInlineTruncateDoubleToI(Register result, |
2293 DoubleRegister double_input, | 2409 DoubleRegister double_input, |
2294 Label* done) { | 2410 Label* done) { |
2295 DoubleRegister double_scratch = kScratchDoubleReg; | 2411 DoubleRegister double_scratch = kScratchDoubleReg; |
2296 #if !V8_TARGET_ARCH_PPC64 | 2412 #if !V8_TARGET_ARCH_S390X |
2297 Register scratch = ip; | 2413 Register scratch = ip; |
2298 #endif | 2414 #endif |
2299 | 2415 |
2300 ConvertDoubleToInt64(double_input, | 2416 ConvertDoubleToInt64(double_input, |
2301 #if !V8_TARGET_ARCH_PPC64 | 2417 #if !V8_TARGET_ARCH_S390X |
2302 scratch, | 2418 scratch, |
2303 #endif | 2419 #endif |
2304 result, double_scratch); | 2420 result, double_scratch); |
2305 | 2421 |
2306 // Test for overflow | 2422 // Test for overflow |
2307 #if V8_TARGET_ARCH_PPC64 | 2423 #if V8_TARGET_ARCH_S390X |
2308 TestIfInt32(result, r0); | 2424 TestIfInt32(result, r0); |
2309 #else | 2425 #else |
2310 TestIfInt32(scratch, result, r0); | 2426 TestIfInt32(scratch, result, r0); |
2311 #endif | 2427 #endif |
2312 beq(done); | 2428 beq(done); |
2313 } | 2429 } |
2314 | 2430 |
2315 | |
2316 void MacroAssembler::TruncateDoubleToI(Register result, | 2431 void MacroAssembler::TruncateDoubleToI(Register result, |
2317 DoubleRegister double_input) { | 2432 DoubleRegister double_input) { |
2318 Label done; | 2433 Label done; |
2319 | 2434 |
2320 TryInlineTruncateDoubleToI(result, double_input, &done); | 2435 TryInlineTruncateDoubleToI(result, double_input, &done); |
2321 | 2436 |
2322 // If we fell through then inline version didn't succeed - call stub instead. | 2437 // If we fell through then inline version didn't succeed - call stub instead. |
2323 mflr(r0); | 2438 push(r14); |
2324 push(r0); | |
2325 // Put input on stack. | 2439 // Put input on stack. |
2326 stfdu(double_input, MemOperand(sp, -kDoubleSize)); | 2440 StoreDouble(double_input, MemOperand(sp, -kDoubleSize)); |
| 2441 lay(sp, MemOperand(sp, -kDoubleSize)); |
2327 | 2442 |
2328 DoubleToIStub stub(isolate(), sp, result, 0, true, true); | 2443 DoubleToIStub stub(isolate(), sp, result, 0, true, true); |
2329 CallStub(&stub); | 2444 CallStub(&stub); |
2330 | 2445 |
2331 addi(sp, sp, Operand(kDoubleSize)); | 2446 la(sp, MemOperand(sp, kDoubleSize)); |
2332 pop(r0); | 2447 pop(r14); |
2333 mtlr(r0); | |
2334 | 2448 |
2335 bind(&done); | 2449 bind(&done); |
2336 } | 2450 } |
2337 | 2451 |
2338 | |
2339 void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) { | 2452 void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) { |
2340 Label done; | 2453 Label done; |
2341 DoubleRegister double_scratch = kScratchDoubleReg; | 2454 DoubleRegister double_scratch = kScratchDoubleReg; |
2342 DCHECK(!result.is(object)); | 2455 DCHECK(!result.is(object)); |
2343 | 2456 |
2344 lfd(double_scratch, FieldMemOperand(object, HeapNumber::kValueOffset)); | 2457 LoadDouble(double_scratch, FieldMemOperand(object, HeapNumber::kValueOffset)); |
2345 TryInlineTruncateDoubleToI(result, double_scratch, &done); | 2458 TryInlineTruncateDoubleToI(result, double_scratch, &done); |
2346 | 2459 |
2347 // If we fell through then inline version didn't succeed - call stub instead. | 2460 // If we fell through then inline version didn't succeed - call stub instead. |
2348 mflr(r0); | 2461 push(r14); |
2349 push(r0); | |
2350 DoubleToIStub stub(isolate(), object, result, | 2462 DoubleToIStub stub(isolate(), object, result, |
2351 HeapNumber::kValueOffset - kHeapObjectTag, true, true); | 2463 HeapNumber::kValueOffset - kHeapObjectTag, true, true); |
2352 CallStub(&stub); | 2464 CallStub(&stub); |
2353 pop(r0); | 2465 pop(r14); |
2354 mtlr(r0); | |
2355 | 2466 |
2356 bind(&done); | 2467 bind(&done); |
2357 } | 2468 } |
2358 | 2469 |
2359 | |
2360 void MacroAssembler::TruncateNumberToI(Register object, Register result, | 2470 void MacroAssembler::TruncateNumberToI(Register object, Register result, |
2361 Register heap_number_map, | 2471 Register heap_number_map, |
2362 Register scratch1, Label* not_number) { | 2472 Register scratch1, Label* not_number) { |
2363 Label done; | 2473 Label done; |
2364 DCHECK(!result.is(object)); | 2474 DCHECK(!result.is(object)); |
2365 | 2475 |
2366 UntagAndJumpIfSmi(result, object, &done); | 2476 UntagAndJumpIfSmi(result, object, &done); |
2367 JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number); | 2477 JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number); |
2368 TruncateHeapNumberToI(result, object); | 2478 TruncateHeapNumberToI(result, object); |
2369 | 2479 |
2370 bind(&done); | 2480 bind(&done); |
2371 } | 2481 } |
2372 | 2482 |
2373 | |
2374 void MacroAssembler::GetLeastBitsFromSmi(Register dst, Register src, | 2483 void MacroAssembler::GetLeastBitsFromSmi(Register dst, Register src, |
2375 int num_least_bits) { | 2484 int num_least_bits) { |
2376 #if V8_TARGET_ARCH_PPC64 | 2485 if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) { |
2377 rldicl(dst, src, kBitsPerPointer - kSmiShift, | 2486 // We rotate by kSmiShift amount, and extract the num_least_bits |
2378 kBitsPerPointer - num_least_bits); | 2487 risbg(dst, src, Operand(64 - num_least_bits), Operand(63), |
2379 #else | 2488 Operand(64 - kSmiShift), true); |
2380 rlwinm(dst, src, kBitsPerPointer - kSmiShift, | 2489 } else { |
2381 kBitsPerPointer - num_least_bits, 31); | 2490 SmiUntag(dst, src); |
2382 #endif | 2491 AndP(dst, Operand((1 << num_least_bits) - 1)); |
| 2492 } |
2383 } | 2493 } |
2384 | 2494 |
2385 | |
2386 void MacroAssembler::GetLeastBitsFromInt32(Register dst, Register src, | 2495 void MacroAssembler::GetLeastBitsFromInt32(Register dst, Register src, |
2387 int num_least_bits) { | 2496 int num_least_bits) { |
2388 rlwinm(dst, src, 0, 32 - num_least_bits, 31); | 2497 AndP(dst, src, Operand((1 << num_least_bits) - 1)); |
2389 } | 2498 } |
2390 | 2499 |
2391 | |
2392 void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments, | 2500 void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments, |
2393 SaveFPRegsMode save_doubles) { | 2501 SaveFPRegsMode save_doubles) { |
2394 // All parameters are on the stack. r3 has the return value after call. | 2502 // All parameters are on the stack. r2 has the return value after call. |
2395 | 2503 |
2396 // If the expected number of arguments of the runtime function is | 2504 // If the expected number of arguments of the runtime function is |
2397 // constant, we check that the actual number of arguments match the | 2505 // constant, we check that the actual number of arguments match the |
2398 // expectation. | 2506 // expectation. |
2399 CHECK(f->nargs < 0 || f->nargs == num_arguments); | 2507 CHECK(f->nargs < 0 || f->nargs == num_arguments); |
2400 | 2508 |
2401 // TODO(1236192): Most runtime routines don't need the number of | 2509 // TODO(1236192): Most runtime routines don't need the number of |
2402 // arguments passed in because it is constant. At some point we | 2510 // arguments passed in because it is constant. At some point we |
2403 // should remove this need and make the runtime routine entry code | 2511 // should remove this need and make the runtime routine entry code |
2404 // smarter. | 2512 // smarter. |
2405 mov(r3, Operand(num_arguments)); | 2513 mov(r2, Operand(num_arguments)); |
2406 mov(r4, Operand(ExternalReference(f, isolate()))); | 2514 mov(r3, Operand(ExternalReference(f, isolate()))); |
2407 CEntryStub stub(isolate(), | 2515 CEntryStub stub(isolate(), |
2408 #if V8_TARGET_ARCH_PPC64 | 2516 #if V8_TARGET_ARCH_S390X |
2409 f->result_size, | 2517 f->result_size, |
2410 #else | 2518 #else |
2411 1, | 2519 1, |
2412 #endif | 2520 #endif |
2413 save_doubles); | 2521 save_doubles); |
2414 CallStub(&stub); | 2522 CallStub(&stub); |
2415 } | 2523 } |
2416 | 2524 |
2417 | |
2418 void MacroAssembler::CallExternalReference(const ExternalReference& ext, | 2525 void MacroAssembler::CallExternalReference(const ExternalReference& ext, |
2419 int num_arguments) { | 2526 int num_arguments) { |
2420 mov(r3, Operand(num_arguments)); | 2527 mov(r2, Operand(num_arguments)); |
2421 mov(r4, Operand(ext)); | 2528 mov(r3, Operand(ext)); |
2422 | 2529 |
2423 CEntryStub stub(isolate(), 1); | 2530 CEntryStub stub(isolate(), 1); |
2424 CallStub(&stub); | 2531 CallStub(&stub); |
2425 } | 2532 } |
2426 | 2533 |
2427 | |
2428 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) { | 2534 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) { |
2429 const Runtime::Function* function = Runtime::FunctionForId(fid); | 2535 const Runtime::Function* function = Runtime::FunctionForId(fid); |
2430 DCHECK_EQ(1, function->result_size); | 2536 DCHECK_EQ(1, function->result_size); |
2431 if (function->nargs >= 0) { | 2537 if (function->nargs >= 0) { |
2432 mov(r3, Operand(function->nargs)); | 2538 mov(r2, Operand(function->nargs)); |
2433 } | 2539 } |
2434 JumpToExternalReference(ExternalReference(fid, isolate())); | 2540 JumpToExternalReference(ExternalReference(fid, isolate())); |
2435 } | 2541 } |
2436 | 2542 |
2437 | |
2438 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) { | 2543 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) { |
2439 mov(r4, Operand(builtin)); | 2544 mov(r3, Operand(builtin)); |
2440 CEntryStub stub(isolate(), 1); | 2545 CEntryStub stub(isolate(), 1); |
2441 Jump(stub.GetCode(), RelocInfo::CODE_TARGET); | 2546 Jump(stub.GetCode(), RelocInfo::CODE_TARGET); |
2442 } | 2547 } |
2443 | 2548 |
2444 | |
2445 void MacroAssembler::SetCounter(StatsCounter* counter, int value, | 2549 void MacroAssembler::SetCounter(StatsCounter* counter, int value, |
2446 Register scratch1, Register scratch2) { | 2550 Register scratch1, Register scratch2) { |
2447 if (FLAG_native_code_counters && counter->Enabled()) { | 2551 if (FLAG_native_code_counters && counter->Enabled()) { |
2448 mov(scratch1, Operand(value)); | 2552 mov(scratch1, Operand(value)); |
2449 mov(scratch2, Operand(ExternalReference(counter))); | 2553 mov(scratch2, Operand(ExternalReference(counter))); |
2450 stw(scratch1, MemOperand(scratch2)); | 2554 StoreW(scratch1, MemOperand(scratch2)); |
2451 } | 2555 } |
2452 } | 2556 } |
2453 | 2557 |
2454 | |
2455 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value, | 2558 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value, |
2456 Register scratch1, Register scratch2) { | 2559 Register scratch1, Register scratch2) { |
2457 DCHECK(value > 0); | 2560 DCHECK(value > 0 && is_int8(value)); |
2458 if (FLAG_native_code_counters && counter->Enabled()) { | 2561 if (FLAG_native_code_counters && counter->Enabled()) { |
2459 mov(scratch2, Operand(ExternalReference(counter))); | 2562 mov(scratch1, Operand(ExternalReference(counter))); |
2460 lwz(scratch1, MemOperand(scratch2)); | 2563 // @TODO(john.yan): can be optimized by asi() |
2461 addi(scratch1, scratch1, Operand(value)); | 2564 LoadW(scratch2, MemOperand(scratch1)); |
2462 stw(scratch1, MemOperand(scratch2)); | 2565 AddP(scratch2, Operand(value)); |
| 2566 StoreW(scratch2, MemOperand(scratch1)); |
2463 } | 2567 } |
2464 } | 2568 } |
2465 | 2569 |
2466 | |
2467 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value, | 2570 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value, |
2468 Register scratch1, Register scratch2) { | 2571 Register scratch1, Register scratch2) { |
2469 DCHECK(value > 0); | 2572 DCHECK(value > 0 && is_int8(value)); |
2470 if (FLAG_native_code_counters && counter->Enabled()) { | 2573 if (FLAG_native_code_counters && counter->Enabled()) { |
2471 mov(scratch2, Operand(ExternalReference(counter))); | 2574 mov(scratch1, Operand(ExternalReference(counter))); |
2472 lwz(scratch1, MemOperand(scratch2)); | 2575 // @TODO(john.yan): can be optimized by asi() |
2473 subi(scratch1, scratch1, Operand(value)); | 2576 LoadW(scratch2, MemOperand(scratch1)); |
2474 stw(scratch1, MemOperand(scratch2)); | 2577 AddP(scratch2, Operand(-value)); |
| 2578 StoreW(scratch2, MemOperand(scratch1)); |
2475 } | 2579 } |
2476 } | 2580 } |
2477 | 2581 |
2478 | |
2479 void MacroAssembler::Assert(Condition cond, BailoutReason reason, | 2582 void MacroAssembler::Assert(Condition cond, BailoutReason reason, |
2480 CRegister cr) { | 2583 CRegister cr) { |
2481 if (emit_debug_code()) Check(cond, reason, cr); | 2584 if (emit_debug_code()) Check(cond, reason, cr); |
2482 } | 2585 } |
2483 | 2586 |
2484 | |
2485 void MacroAssembler::AssertFastElements(Register elements) { | 2587 void MacroAssembler::AssertFastElements(Register elements) { |
2486 if (emit_debug_code()) { | 2588 if (emit_debug_code()) { |
2487 DCHECK(!elements.is(r0)); | 2589 DCHECK(!elements.is(r0)); |
2488 Label ok; | 2590 Label ok; |
2489 push(elements); | 2591 push(elements); |
2490 LoadP(elements, FieldMemOperand(elements, HeapObject::kMapOffset)); | 2592 LoadP(elements, FieldMemOperand(elements, HeapObject::kMapOffset)); |
2491 LoadRoot(r0, Heap::kFixedArrayMapRootIndex); | 2593 CompareRoot(elements, Heap::kFixedArrayMapRootIndex); |
2492 cmp(elements, r0); | 2594 beq(&ok, Label::kNear); |
2493 beq(&ok); | 2595 CompareRoot(elements, Heap::kFixedDoubleArrayMapRootIndex); |
2494 LoadRoot(r0, Heap::kFixedDoubleArrayMapRootIndex); | 2596 beq(&ok, Label::kNear); |
2495 cmp(elements, r0); | 2597 CompareRoot(elements, Heap::kFixedCOWArrayMapRootIndex); |
2496 beq(&ok); | 2598 beq(&ok, Label::kNear); |
2497 LoadRoot(r0, Heap::kFixedCOWArrayMapRootIndex); | |
2498 cmp(elements, r0); | |
2499 beq(&ok); | |
2500 Abort(kJSObjectWithFastElementsMapHasSlowElements); | 2599 Abort(kJSObjectWithFastElementsMapHasSlowElements); |
2501 bind(&ok); | 2600 bind(&ok); |
2502 pop(elements); | 2601 pop(elements); |
2503 } | 2602 } |
2504 } | 2603 } |
2505 | 2604 |
2506 | |
2507 void MacroAssembler::Check(Condition cond, BailoutReason reason, CRegister cr) { | 2605 void MacroAssembler::Check(Condition cond, BailoutReason reason, CRegister cr) { |
2508 Label L; | 2606 Label L; |
2509 b(cond, &L, cr); | 2607 b(cond, &L); |
2510 Abort(reason); | 2608 Abort(reason); |
2511 // will not return here | 2609 // will not return here |
2512 bind(&L); | 2610 bind(&L); |
2513 } | 2611 } |
2514 | 2612 |
2515 | |
2516 void MacroAssembler::Abort(BailoutReason reason) { | 2613 void MacroAssembler::Abort(BailoutReason reason) { |
2517 Label abort_start; | 2614 Label abort_start; |
2518 bind(&abort_start); | 2615 bind(&abort_start); |
2519 #ifdef DEBUG | 2616 #ifdef DEBUG |
2520 const char* msg = GetBailoutReason(reason); | 2617 const char* msg = GetBailoutReason(reason); |
2521 if (msg != NULL) { | 2618 if (msg != NULL) { |
2522 RecordComment("Abort message: "); | 2619 RecordComment("Abort message: "); |
2523 RecordComment(msg); | 2620 RecordComment(msg); |
2524 } | 2621 } |
2525 | 2622 |
(...skipping 10 matching lines...) Expand all Loading... |
2536 // We don't actually want to generate a pile of code for this, so just | 2633 // We don't actually want to generate a pile of code for this, so just |
2537 // claim there is a stack frame, without generating one. | 2634 // claim there is a stack frame, without generating one. |
2538 FrameScope scope(this, StackFrame::NONE); | 2635 FrameScope scope(this, StackFrame::NONE); |
2539 CallRuntime(Runtime::kAbort); | 2636 CallRuntime(Runtime::kAbort); |
2540 } else { | 2637 } else { |
2541 CallRuntime(Runtime::kAbort); | 2638 CallRuntime(Runtime::kAbort); |
2542 } | 2639 } |
2543 // will not return here | 2640 // will not return here |
2544 } | 2641 } |
2545 | 2642 |
2546 | |
2547 void MacroAssembler::LoadContext(Register dst, int context_chain_length) { | 2643 void MacroAssembler::LoadContext(Register dst, int context_chain_length) { |
2548 if (context_chain_length > 0) { | 2644 if (context_chain_length > 0) { |
2549 // Move up the chain of contexts to the context containing the slot. | 2645 // Move up the chain of contexts to the context containing the slot. |
2550 LoadP(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX))); | 2646 LoadP(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX))); |
2551 for (int i = 1; i < context_chain_length; i++) { | 2647 for (int i = 1; i < context_chain_length; i++) { |
2552 LoadP(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX))); | 2648 LoadP(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX))); |
2553 } | 2649 } |
2554 } else { | 2650 } else { |
2555 // Slot is in the current function context. Move it into the | 2651 // Slot is in the current function context. Move it into the |
2556 // destination register in case we store into it (the write barrier | 2652 // destination register in case we store into it (the write barrier |
2557 // cannot be allowed to destroy the context in esi). | 2653 // cannot be allowed to destroy the context in esi). |
2558 mr(dst, cp); | 2654 LoadRR(dst, cp); |
2559 } | 2655 } |
2560 } | 2656 } |
2561 | 2657 |
2562 | |
2563 void MacroAssembler::LoadTransitionedArrayMapConditional( | 2658 void MacroAssembler::LoadTransitionedArrayMapConditional( |
2564 ElementsKind expected_kind, ElementsKind transitioned_kind, | 2659 ElementsKind expected_kind, ElementsKind transitioned_kind, |
2565 Register map_in_out, Register scratch, Label* no_map_match) { | 2660 Register map_in_out, Register scratch, Label* no_map_match) { |
2566 DCHECK(IsFastElementsKind(expected_kind)); | 2661 DCHECK(IsFastElementsKind(expected_kind)); |
2567 DCHECK(IsFastElementsKind(transitioned_kind)); | 2662 DCHECK(IsFastElementsKind(transitioned_kind)); |
2568 | 2663 |
2569 // Check that the function's map is the same as the expected cached map. | 2664 // Check that the function's map is the same as the expected cached map. |
2570 LoadP(scratch, NativeContextMemOperand()); | 2665 LoadP(scratch, NativeContextMemOperand()); |
2571 LoadP(ip, ContextMemOperand(scratch, Context::ArrayMapIndex(expected_kind))); | 2666 LoadP(ip, ContextMemOperand(scratch, Context::ArrayMapIndex(expected_kind))); |
2572 cmp(map_in_out, ip); | 2667 CmpP(map_in_out, ip); |
2573 bne(no_map_match); | 2668 bne(no_map_match); |
2574 | 2669 |
2575 // Use the transitioned cached map. | 2670 // Use the transitioned cached map. |
2576 LoadP(map_in_out, | 2671 LoadP(map_in_out, |
2577 ContextMemOperand(scratch, Context::ArrayMapIndex(transitioned_kind))); | 2672 ContextMemOperand(scratch, Context::ArrayMapIndex(transitioned_kind))); |
2578 } | 2673 } |
2579 | 2674 |
2580 | |
2581 void MacroAssembler::LoadNativeContextSlot(int index, Register dst) { | 2675 void MacroAssembler::LoadNativeContextSlot(int index, Register dst) { |
2582 LoadP(dst, NativeContextMemOperand()); | 2676 LoadP(dst, NativeContextMemOperand()); |
2583 LoadP(dst, ContextMemOperand(dst, index)); | 2677 LoadP(dst, ContextMemOperand(dst, index)); |
2584 } | 2678 } |
2585 | 2679 |
2586 | |
2587 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function, | 2680 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function, |
2588 Register map, | 2681 Register map, |
2589 Register scratch) { | 2682 Register scratch) { |
2590 // Load the initial map. The global functions all have initial maps. | 2683 // Load the initial map. The global functions all have initial maps. |
2591 LoadP(map, | 2684 LoadP(map, |
2592 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); | 2685 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); |
2593 if (emit_debug_code()) { | 2686 if (emit_debug_code()) { |
2594 Label ok, fail; | 2687 Label ok, fail; |
2595 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK); | 2688 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK); |
2596 b(&ok); | 2689 b(&ok); |
2597 bind(&fail); | 2690 bind(&fail); |
2598 Abort(kGlobalFunctionsMustHaveInitialMap); | 2691 Abort(kGlobalFunctionsMustHaveInitialMap); |
2599 bind(&ok); | 2692 bind(&ok); |
2600 } | 2693 } |
2601 } | 2694 } |
2602 | 2695 |
2603 | |
2604 void MacroAssembler::JumpIfNotPowerOfTwoOrZero( | 2696 void MacroAssembler::JumpIfNotPowerOfTwoOrZero( |
2605 Register reg, Register scratch, Label* not_power_of_two_or_zero) { | 2697 Register reg, Register scratch, Label* not_power_of_two_or_zero) { |
2606 subi(scratch, reg, Operand(1)); | 2698 SubP(scratch, reg, Operand(1)); |
2607 cmpi(scratch, Operand::Zero()); | 2699 CmpP(scratch, Operand::Zero()); |
2608 blt(not_power_of_two_or_zero); | 2700 blt(not_power_of_two_or_zero); |
2609 and_(r0, scratch, reg, SetRC); | 2701 AndP(r0, reg, scratch /*, SetRC*/); // Should be okay to remove rc |
2610 bne(not_power_of_two_or_zero, cr0); | 2702 bne(not_power_of_two_or_zero /*, cr0*/); |
2611 } | 2703 } |
2612 | 2704 |
2613 | |
2614 void MacroAssembler::JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg, | 2705 void MacroAssembler::JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg, |
2615 Register scratch, | 2706 Register scratch, |
2616 Label* zero_and_neg, | 2707 Label* zero_and_neg, |
2617 Label* not_power_of_two) { | 2708 Label* not_power_of_two) { |
2618 subi(scratch, reg, Operand(1)); | 2709 SubP(scratch, reg, Operand(1)); |
2619 cmpi(scratch, Operand::Zero()); | 2710 CmpP(scratch, Operand::Zero()); |
2620 blt(zero_and_neg); | 2711 blt(zero_and_neg); |
2621 and_(r0, scratch, reg, SetRC); | 2712 AndP(r0, reg, scratch /*, SetRC*/); // Should be okay to remove rc |
2622 bne(not_power_of_two, cr0); | 2713 bne(not_power_of_two /*, cr0*/); |
2623 } | 2714 } |
2624 | 2715 |
2625 #if !V8_TARGET_ARCH_PPC64 | 2716 #if !V8_TARGET_ARCH_S390X |
2626 void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) { | 2717 void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) { |
2627 DCHECK(!reg.is(overflow)); | 2718 DCHECK(!reg.is(overflow)); |
2628 mr(overflow, reg); // Save original value. | 2719 LoadRR(overflow, reg); // Save original value. |
2629 SmiTag(reg); | 2720 SmiTag(reg); |
2630 xor_(overflow, overflow, reg, SetRC); // Overflow if (value ^ 2 * value) < 0. | 2721 XorP(overflow, overflow, reg); // Overflow if (value ^ 2 * value) < 0. |
| 2722 LoadAndTestRR(overflow, overflow); |
2631 } | 2723 } |
2632 | 2724 |
2633 | |
2634 void MacroAssembler::SmiTagCheckOverflow(Register dst, Register src, | 2725 void MacroAssembler::SmiTagCheckOverflow(Register dst, Register src, |
2635 Register overflow) { | 2726 Register overflow) { |
2636 if (dst.is(src)) { | 2727 if (dst.is(src)) { |
2637 // Fall back to slower case. | 2728 // Fall back to slower case. |
2638 SmiTagCheckOverflow(dst, overflow); | 2729 SmiTagCheckOverflow(dst, overflow); |
2639 } else { | 2730 } else { |
2640 DCHECK(!dst.is(src)); | 2731 DCHECK(!dst.is(src)); |
2641 DCHECK(!dst.is(overflow)); | 2732 DCHECK(!dst.is(overflow)); |
2642 DCHECK(!src.is(overflow)); | 2733 DCHECK(!src.is(overflow)); |
2643 SmiTag(dst, src); | 2734 SmiTag(dst, src); |
2644 xor_(overflow, dst, src, SetRC); // Overflow if (value ^ 2 * value) < 0. | 2735 XorP(overflow, dst, src); // Overflow if (value ^ 2 * value) < 0. |
| 2736 LoadAndTestRR(overflow, overflow); |
2645 } | 2737 } |
2646 } | 2738 } |
2647 #endif | 2739 #endif |
2648 | 2740 |
2649 void MacroAssembler::JumpIfNotBothSmi(Register reg1, Register reg2, | 2741 void MacroAssembler::JumpIfNotBothSmi(Register reg1, Register reg2, |
2650 Label* on_not_both_smi) { | 2742 Label* on_not_both_smi) { |
2651 STATIC_ASSERT(kSmiTag == 0); | 2743 STATIC_ASSERT(kSmiTag == 0); |
2652 orx(r0, reg1, reg2, LeaveRC); | 2744 OrP(r0, reg1, reg2 /*, LeaveRC*/); // should be okay to remove LeaveRC |
2653 JumpIfNotSmi(r0, on_not_both_smi); | 2745 JumpIfNotSmi(r0, on_not_both_smi); |
2654 } | 2746 } |
2655 | 2747 |
2656 | |
2657 void MacroAssembler::UntagAndJumpIfSmi(Register dst, Register src, | 2748 void MacroAssembler::UntagAndJumpIfSmi(Register dst, Register src, |
2658 Label* smi_case) { | 2749 Label* smi_case) { |
2659 STATIC_ASSERT(kSmiTag == 0); | 2750 STATIC_ASSERT(kSmiTag == 0); |
2660 TestBitRange(src, kSmiTagSize - 1, 0, r0); | 2751 STATIC_ASSERT(kSmiTagSize == 1); |
| 2752 // this won't work if src == dst |
| 2753 DCHECK(src.code() != dst.code()); |
2661 SmiUntag(dst, src); | 2754 SmiUntag(dst, src); |
2662 beq(smi_case, cr0); | 2755 TestIfSmi(src); |
| 2756 beq(smi_case); |
2663 } | 2757 } |
2664 | 2758 |
2665 | |
2666 void MacroAssembler::UntagAndJumpIfNotSmi(Register dst, Register src, | 2759 void MacroAssembler::UntagAndJumpIfNotSmi(Register dst, Register src, |
2667 Label* non_smi_case) { | 2760 Label* non_smi_case) { |
2668 STATIC_ASSERT(kSmiTag == 0); | 2761 STATIC_ASSERT(kSmiTag == 0); |
2669 TestBitRange(src, kSmiTagSize - 1, 0, r0); | 2762 STATIC_ASSERT(kSmiTagSize == 1); |
2670 SmiUntag(dst, src); | 2763 |
2671 bne(non_smi_case, cr0); | 2764 // We can more optimally use TestIfSmi if dst != src |
| 2765 // otherwise, the UnTag operation will kill the CC and we cannot |
| 2766 // test the Tag bit. |
| 2767 if (src.code() != dst.code()) { |
| 2768 SmiUntag(dst, src); |
| 2769 TestIfSmi(src); |
| 2770 } else { |
| 2771 TestBit(src, 0, r0); |
| 2772 SmiUntag(dst, src); |
| 2773 LoadAndTestRR(r0, r0); |
| 2774 } |
| 2775 bne(non_smi_case); |
2672 } | 2776 } |
2673 | 2777 |
2674 | |
2675 void MacroAssembler::JumpIfEitherSmi(Register reg1, Register reg2, | 2778 void MacroAssembler::JumpIfEitherSmi(Register reg1, Register reg2, |
2676 Label* on_either_smi) { | 2779 Label* on_either_smi) { |
2677 STATIC_ASSERT(kSmiTag == 0); | 2780 STATIC_ASSERT(kSmiTag == 0); |
2678 JumpIfSmi(reg1, on_either_smi); | 2781 JumpIfSmi(reg1, on_either_smi); |
2679 JumpIfSmi(reg2, on_either_smi); | 2782 JumpIfSmi(reg2, on_either_smi); |
2680 } | 2783 } |
2681 | 2784 |
2682 | |
2683 void MacroAssembler::AssertNotSmi(Register object) { | 2785 void MacroAssembler::AssertNotSmi(Register object) { |
2684 if (emit_debug_code()) { | 2786 if (emit_debug_code()) { |
2685 STATIC_ASSERT(kSmiTag == 0); | 2787 STATIC_ASSERT(kSmiTag == 0); |
2686 TestIfSmi(object, r0); | 2788 TestIfSmi(object); |
2687 Check(ne, kOperandIsASmi, cr0); | 2789 Check(ne, kOperandIsASmi, cr0); |
2688 } | 2790 } |
2689 } | 2791 } |
2690 | 2792 |
2691 | |
2692 void MacroAssembler::AssertSmi(Register object) { | 2793 void MacroAssembler::AssertSmi(Register object) { |
2693 if (emit_debug_code()) { | 2794 if (emit_debug_code()) { |
2694 STATIC_ASSERT(kSmiTag == 0); | 2795 STATIC_ASSERT(kSmiTag == 0); |
2695 TestIfSmi(object, r0); | 2796 TestIfSmi(object); |
2696 Check(eq, kOperandIsNotSmi, cr0); | 2797 Check(eq, kOperandIsNotSmi, cr0); |
2697 } | 2798 } |
2698 } | 2799 } |
2699 | 2800 |
2700 | |
2701 void MacroAssembler::AssertString(Register object) { | 2801 void MacroAssembler::AssertString(Register object) { |
2702 if (emit_debug_code()) { | 2802 if (emit_debug_code()) { |
2703 STATIC_ASSERT(kSmiTag == 0); | 2803 STATIC_ASSERT(kSmiTag == 0); |
2704 TestIfSmi(object, r0); | 2804 TestIfSmi(object); |
2705 Check(ne, kOperandIsASmiAndNotAString, cr0); | 2805 Check(ne, kOperandIsASmiAndNotAString, cr0); |
2706 push(object); | 2806 push(object); |
2707 LoadP(object, FieldMemOperand(object, HeapObject::kMapOffset)); | 2807 LoadP(object, FieldMemOperand(object, HeapObject::kMapOffset)); |
2708 CompareInstanceType(object, object, FIRST_NONSTRING_TYPE); | 2808 CompareInstanceType(object, object, FIRST_NONSTRING_TYPE); |
2709 pop(object); | 2809 pop(object); |
2710 Check(lt, kOperandIsNotAString); | 2810 Check(lt, kOperandIsNotAString); |
2711 } | 2811 } |
2712 } | 2812 } |
2713 | 2813 |
2714 | |
2715 void MacroAssembler::AssertName(Register object) { | 2814 void MacroAssembler::AssertName(Register object) { |
2716 if (emit_debug_code()) { | 2815 if (emit_debug_code()) { |
2717 STATIC_ASSERT(kSmiTag == 0); | 2816 STATIC_ASSERT(kSmiTag == 0); |
2718 TestIfSmi(object, r0); | 2817 TestIfSmi(object); |
2719 Check(ne, kOperandIsASmiAndNotAName, cr0); | 2818 Check(ne, kOperandIsASmiAndNotAName, cr0); |
2720 push(object); | 2819 push(object); |
2721 LoadP(object, FieldMemOperand(object, HeapObject::kMapOffset)); | 2820 LoadP(object, FieldMemOperand(object, HeapObject::kMapOffset)); |
2722 CompareInstanceType(object, object, LAST_NAME_TYPE); | 2821 CompareInstanceType(object, object, LAST_NAME_TYPE); |
2723 pop(object); | 2822 pop(object); |
2724 Check(le, kOperandIsNotAName); | 2823 Check(le, kOperandIsNotAName); |
2725 } | 2824 } |
2726 } | 2825 } |
2727 | 2826 |
2728 | |
2729 void MacroAssembler::AssertFunction(Register object) { | 2827 void MacroAssembler::AssertFunction(Register object) { |
2730 if (emit_debug_code()) { | 2828 if (emit_debug_code()) { |
2731 STATIC_ASSERT(kSmiTag == 0); | 2829 STATIC_ASSERT(kSmiTag == 0); |
2732 TestIfSmi(object, r0); | 2830 TestIfSmi(object); |
2733 Check(ne, kOperandIsASmiAndNotAFunction, cr0); | 2831 Check(ne, kOperandIsASmiAndNotAFunction, cr0); |
2734 push(object); | 2832 push(object); |
2735 CompareObjectType(object, object, object, JS_FUNCTION_TYPE); | 2833 CompareObjectType(object, object, object, JS_FUNCTION_TYPE); |
2736 pop(object); | 2834 pop(object); |
2737 Check(eq, kOperandIsNotAFunction); | 2835 Check(eq, kOperandIsNotAFunction); |
2738 } | 2836 } |
2739 } | 2837 } |
2740 | 2838 |
2741 | |
2742 void MacroAssembler::AssertBoundFunction(Register object) { | 2839 void MacroAssembler::AssertBoundFunction(Register object) { |
2743 if (emit_debug_code()) { | 2840 if (emit_debug_code()) { |
2744 STATIC_ASSERT(kSmiTag == 0); | 2841 STATIC_ASSERT(kSmiTag == 0); |
2745 TestIfSmi(object, r0); | 2842 TestIfSmi(object); |
2746 Check(ne, kOperandIsASmiAndNotABoundFunction, cr0); | 2843 Check(ne, kOperandIsASmiAndNotABoundFunction, cr0); |
2747 push(object); | 2844 push(object); |
2748 CompareObjectType(object, object, object, JS_BOUND_FUNCTION_TYPE); | 2845 CompareObjectType(object, object, object, JS_BOUND_FUNCTION_TYPE); |
2749 pop(object); | 2846 pop(object); |
2750 Check(eq, kOperandIsNotABoundFunction); | 2847 Check(eq, kOperandIsNotABoundFunction); |
2751 } | 2848 } |
2752 } | 2849 } |
2753 | 2850 |
2754 void MacroAssembler::AssertReceiver(Register object) { | 2851 void MacroAssembler::AssertReceiver(Register object) { |
2755 if (emit_debug_code()) { | 2852 if (emit_debug_code()) { |
2756 STATIC_ASSERT(kSmiTag == 0); | 2853 STATIC_ASSERT(kSmiTag == 0); |
2757 TestIfSmi(object, r0); | 2854 TestIfSmi(object); |
2758 Check(ne, kOperandIsASmiAndNotAReceiver, cr0); | 2855 Check(ne, kOperandIsASmiAndNotAReceiver, cr0); |
2759 push(object); | 2856 push(object); |
2760 STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE); | 2857 STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE); |
2761 CompareObjectType(object, object, object, FIRST_JS_RECEIVER_TYPE); | 2858 CompareObjectType(object, object, object, FIRST_JS_RECEIVER_TYPE); |
2762 pop(object); | 2859 pop(object); |
2763 Check(ge, kOperandIsNotAReceiver); | 2860 Check(ge, kOperandIsNotAReceiver); |
2764 } | 2861 } |
2765 } | 2862 } |
2766 | 2863 |
2767 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object, | 2864 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object, |
2768 Register scratch) { | 2865 Register scratch) { |
2769 if (emit_debug_code()) { | 2866 if (emit_debug_code()) { |
2770 Label done_checking; | 2867 Label done_checking; |
2771 AssertNotSmi(object); | 2868 AssertNotSmi(object); |
2772 CompareRoot(object, Heap::kUndefinedValueRootIndex); | 2869 CompareRoot(object, Heap::kUndefinedValueRootIndex); |
2773 beq(&done_checking); | 2870 beq(&done_checking, Label::kNear); |
2774 LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); | 2871 LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); |
2775 CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex); | 2872 CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex); |
2776 Assert(eq, kExpectedUndefinedOrCell); | 2873 Assert(eq, kExpectedUndefinedOrCell); |
2777 bind(&done_checking); | 2874 bind(&done_checking); |
2778 } | 2875 } |
2779 } | 2876 } |
2780 | 2877 |
2781 | |
2782 void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) { | 2878 void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) { |
2783 if (emit_debug_code()) { | 2879 if (emit_debug_code()) { |
2784 CompareRoot(reg, index); | 2880 CompareRoot(reg, index); |
2785 Check(eq, kHeapNumberMapRegisterClobbered); | 2881 Check(eq, kHeapNumberMapRegisterClobbered); |
2786 } | 2882 } |
2787 } | 2883 } |
2788 | 2884 |
2789 | |
2790 void MacroAssembler::JumpIfNotHeapNumber(Register object, | 2885 void MacroAssembler::JumpIfNotHeapNumber(Register object, |
2791 Register heap_number_map, | 2886 Register heap_number_map, |
2792 Register scratch, | 2887 Register scratch, |
2793 Label* on_not_heap_number) { | 2888 Label* on_not_heap_number) { |
2794 LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); | 2889 LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); |
2795 AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | 2890 AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
2796 cmp(scratch, heap_number_map); | 2891 CmpP(scratch, heap_number_map); |
2797 bne(on_not_heap_number); | 2892 bne(on_not_heap_number); |
2798 } | 2893 } |
2799 | 2894 |
2800 | |
2801 void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings( | 2895 void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings( |
2802 Register first, Register second, Register scratch1, Register scratch2, | 2896 Register first, Register second, Register scratch1, Register scratch2, |
2803 Label* failure) { | 2897 Label* failure) { |
2804 // Test that both first and second are sequential one-byte strings. | 2898 // Test that both first and second are sequential one-byte strings. |
2805 // Assume that they are non-smis. | 2899 // Assume that they are non-smis. |
2806 LoadP(scratch1, FieldMemOperand(first, HeapObject::kMapOffset)); | 2900 LoadP(scratch1, FieldMemOperand(first, HeapObject::kMapOffset)); |
2807 LoadP(scratch2, FieldMemOperand(second, HeapObject::kMapOffset)); | 2901 LoadP(scratch2, FieldMemOperand(second, HeapObject::kMapOffset)); |
2808 lbz(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset)); | 2902 LoadlB(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset)); |
2809 lbz(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset)); | 2903 LoadlB(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset)); |
2810 | 2904 |
2811 JumpIfBothInstanceTypesAreNotSequentialOneByte(scratch1, scratch2, scratch1, | 2905 JumpIfBothInstanceTypesAreNotSequentialOneByte(scratch1, scratch2, scratch1, |
2812 scratch2, failure); | 2906 scratch2, failure); |
2813 } | 2907 } |
2814 | 2908 |
2815 void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register first, | 2909 void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register first, |
2816 Register second, | 2910 Register second, |
2817 Register scratch1, | 2911 Register scratch1, |
2818 Register scratch2, | 2912 Register scratch2, |
2819 Label* failure) { | 2913 Label* failure) { |
2820 // Check that neither is a smi. | 2914 // Check that neither is a smi. |
2821 and_(scratch1, first, second); | 2915 AndP(scratch1, first, second); |
2822 JumpIfSmi(scratch1, failure); | 2916 JumpIfSmi(scratch1, failure); |
2823 JumpIfNonSmisNotBothSequentialOneByteStrings(first, second, scratch1, | 2917 JumpIfNonSmisNotBothSequentialOneByteStrings(first, second, scratch1, |
2824 scratch2, failure); | 2918 scratch2, failure); |
2825 } | 2919 } |
2826 | 2920 |
2827 | |
2828 void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg, | 2921 void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg, |
2829 Label* not_unique_name) { | 2922 Label* not_unique_name) { |
2830 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0); | 2923 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0); |
2831 Label succeed; | 2924 Label succeed; |
2832 andi(r0, reg, Operand(kIsNotStringMask | kIsNotInternalizedMask)); | 2925 AndP(r0, reg, Operand(kIsNotStringMask | kIsNotInternalizedMask)); |
2833 beq(&succeed, cr0); | 2926 beq(&succeed, Label::kNear); |
2834 cmpi(reg, Operand(SYMBOL_TYPE)); | 2927 CmpP(reg, Operand(SYMBOL_TYPE)); |
2835 bne(not_unique_name); | 2928 bne(not_unique_name); |
2836 | 2929 |
2837 bind(&succeed); | 2930 bind(&succeed); |
2838 } | 2931 } |
2839 | 2932 |
2840 | |
2841 // Allocates a heap number or jumps to the need_gc label if the young space | 2933 // Allocates a heap number or jumps to the need_gc label if the young space |
2842 // is full and a scavenge is needed. | 2934 // is full and a scavenge is needed. |
2843 void MacroAssembler::AllocateHeapNumber(Register result, Register scratch1, | 2935 void MacroAssembler::AllocateHeapNumber(Register result, Register scratch1, |
2844 Register scratch2, | 2936 Register scratch2, |
2845 Register heap_number_map, | 2937 Register heap_number_map, |
2846 Label* gc_required, | 2938 Label* gc_required, |
2847 TaggingMode tagging_mode, | 2939 TaggingMode tagging_mode, |
2848 MutableMode mode) { | 2940 MutableMode mode) { |
2849 // Allocate an object in the heap for the heap number and tag it as a heap | 2941 // Allocate an object in the heap for the heap number and tag it as a heap |
2850 // object. | 2942 // object. |
2851 Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required, | 2943 Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required, |
2852 tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS); | 2944 tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS); |
2853 | 2945 |
2854 Heap::RootListIndex map_index = mode == MUTABLE | 2946 Heap::RootListIndex map_index = mode == MUTABLE |
2855 ? Heap::kMutableHeapNumberMapRootIndex | 2947 ? Heap::kMutableHeapNumberMapRootIndex |
2856 : Heap::kHeapNumberMapRootIndex; | 2948 : Heap::kHeapNumberMapRootIndex; |
2857 AssertIsRoot(heap_number_map, map_index); | 2949 AssertIsRoot(heap_number_map, map_index); |
2858 | 2950 |
2859 // Store heap number map in the allocated object. | 2951 // Store heap number map in the allocated object. |
2860 if (tagging_mode == TAG_RESULT) { | 2952 if (tagging_mode == TAG_RESULT) { |
2861 StoreP(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset), | 2953 StoreP(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset)); |
2862 r0); | |
2863 } else { | 2954 } else { |
2864 StoreP(heap_number_map, MemOperand(result, HeapObject::kMapOffset)); | 2955 StoreP(heap_number_map, MemOperand(result, HeapObject::kMapOffset)); |
2865 } | 2956 } |
2866 } | 2957 } |
2867 | 2958 |
2868 | |
2869 void MacroAssembler::AllocateHeapNumberWithValue( | 2959 void MacroAssembler::AllocateHeapNumberWithValue( |
2870 Register result, DoubleRegister value, Register scratch1, Register scratch2, | 2960 Register result, DoubleRegister value, Register scratch1, Register scratch2, |
2871 Register heap_number_map, Label* gc_required) { | 2961 Register heap_number_map, Label* gc_required) { |
2872 AllocateHeapNumber(result, scratch1, scratch2, heap_number_map, gc_required); | 2962 AllocateHeapNumber(result, scratch1, scratch2, heap_number_map, gc_required); |
2873 stfd(value, FieldMemOperand(result, HeapNumber::kValueOffset)); | 2963 StoreDouble(value, FieldMemOperand(result, HeapNumber::kValueOffset)); |
2874 } | 2964 } |
2875 | 2965 |
2876 | |
2877 void MacroAssembler::AllocateJSValue(Register result, Register constructor, | 2966 void MacroAssembler::AllocateJSValue(Register result, Register constructor, |
2878 Register value, Register scratch1, | 2967 Register value, Register scratch1, |
2879 Register scratch2, Label* gc_required) { | 2968 Register scratch2, Label* gc_required) { |
2880 DCHECK(!result.is(constructor)); | 2969 DCHECK(!result.is(constructor)); |
2881 DCHECK(!result.is(scratch1)); | 2970 DCHECK(!result.is(scratch1)); |
2882 DCHECK(!result.is(scratch2)); | 2971 DCHECK(!result.is(scratch2)); |
2883 DCHECK(!result.is(value)); | 2972 DCHECK(!result.is(value)); |
2884 | 2973 |
2885 // Allocate JSValue in new space. | 2974 // Allocate JSValue in new space. |
2886 Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required, TAG_OBJECT); | 2975 Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required, TAG_OBJECT); |
2887 | 2976 |
2888 // Initialize the JSValue. | 2977 // Initialize the JSValue. |
2889 LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2); | 2978 LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2); |
2890 StoreP(scratch1, FieldMemOperand(result, HeapObject::kMapOffset), r0); | 2979 StoreP(scratch1, FieldMemOperand(result, HeapObject::kMapOffset), r0); |
2891 LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex); | 2980 LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex); |
2892 StoreP(scratch1, FieldMemOperand(result, JSObject::kPropertiesOffset), r0); | 2981 StoreP(scratch1, FieldMemOperand(result, JSObject::kPropertiesOffset), r0); |
2893 StoreP(scratch1, FieldMemOperand(result, JSObject::kElementsOffset), r0); | 2982 StoreP(scratch1, FieldMemOperand(result, JSObject::kElementsOffset), r0); |
2894 StoreP(value, FieldMemOperand(result, JSValue::kValueOffset), r0); | 2983 StoreP(value, FieldMemOperand(result, JSValue::kValueOffset), r0); |
2895 STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize); | 2984 STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize); |
2896 } | 2985 } |
2897 | 2986 |
2898 | |
2899 void MacroAssembler::CopyBytes(Register src, Register dst, Register length, | 2987 void MacroAssembler::CopyBytes(Register src, Register dst, Register length, |
2900 Register scratch) { | 2988 Register scratch) { |
2901 Label align_loop, aligned, word_loop, byte_loop, byte_loop_1, done; | 2989 Label big_loop, left_bytes, done, fake_call; |
2902 | 2990 |
2903 DCHECK(!scratch.is(r0)); | 2991 DCHECK(!scratch.is(r0)); |
2904 | 2992 |
2905 cmpi(length, Operand::Zero()); | 2993 // big loop moves 256 bytes at a time |
| 2994 bind(&big_loop); |
| 2995 CmpP(length, Operand(static_cast<intptr_t>(0x100))); |
| 2996 blt(&left_bytes); |
| 2997 |
| 2998 mvc(MemOperand(dst), MemOperand(src), 0x100); |
| 2999 |
| 3000 AddP(src, Operand(static_cast<intptr_t>(0x100))); |
| 3001 AddP(dst, Operand(static_cast<intptr_t>(0x100))); |
| 3002 SubP(length, Operand(static_cast<intptr_t>(0x100))); |
| 3003 b(&big_loop); |
| 3004 |
| 3005 bind(&left_bytes); |
| 3006 CmpP(length, Operand::Zero()); |
2906 beq(&done); | 3007 beq(&done); |
2907 | 3008 |
2908 // Check src alignment and length to see whether word_loop is possible | 3009 // TODO(john.yan): More optimal version is to use MVC |
2909 andi(scratch, src, Operand(kPointerSize - 1)); | 3010 // Sequence below has some undiagnosed issue. |
2910 beq(&aligned, cr0); | 3011 /* |
2911 subfic(scratch, scratch, Operand(kPointerSize * 2)); | 3012 b(scratch, &fake_call); // use brasl to Save mvc addr to scratch |
2912 cmp(length, scratch); | 3013 mvc(MemOperand(dst), MemOperand(src), 1); |
2913 blt(&byte_loop); | 3014 bind(&fake_call); |
| 3015 SubP(length, Operand(static_cast<intptr_t>(-1))); |
| 3016 ex(length, MemOperand(scratch)); // execute mvc instr above |
| 3017 AddP(src, length); |
| 3018 AddP(dst, length); |
| 3019 AddP(src, Operand(static_cast<intptr_t>(0x1))); |
| 3020 AddP(dst, Operand(static_cast<intptr_t>(0x1))); |
| 3021 */ |
2914 | 3022 |
2915 // Align src before copying in word size chunks. | 3023 mvc(MemOperand(dst), MemOperand(src), 1); |
2916 subi(scratch, scratch, Operand(kPointerSize)); | 3024 AddP(src, Operand(static_cast<intptr_t>(0x1))); |
2917 mtctr(scratch); | 3025 AddP(dst, Operand(static_cast<intptr_t>(0x1))); |
2918 bind(&align_loop); | 3026 SubP(length, Operand(static_cast<intptr_t>(0x1))); |
2919 lbz(scratch, MemOperand(src)); | |
2920 addi(src, src, Operand(1)); | |
2921 subi(length, length, Operand(1)); | |
2922 stb(scratch, MemOperand(dst)); | |
2923 addi(dst, dst, Operand(1)); | |
2924 bdnz(&align_loop); | |
2925 | 3027 |
2926 bind(&aligned); | 3028 b(&left_bytes); |
2927 | |
2928 // Copy bytes in word size chunks. | |
2929 if (emit_debug_code()) { | |
2930 andi(r0, src, Operand(kPointerSize - 1)); | |
2931 Assert(eq, kExpectingAlignmentForCopyBytes, cr0); | |
2932 } | |
2933 | |
2934 ShiftRightImm(scratch, length, Operand(kPointerSizeLog2)); | |
2935 cmpi(scratch, Operand::Zero()); | |
2936 beq(&byte_loop); | |
2937 | |
2938 mtctr(scratch); | |
2939 bind(&word_loop); | |
2940 LoadP(scratch, MemOperand(src)); | |
2941 addi(src, src, Operand(kPointerSize)); | |
2942 subi(length, length, Operand(kPointerSize)); | |
2943 if (CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) { | |
2944 // currently false for PPC - but possible future opt | |
2945 StoreP(scratch, MemOperand(dst)); | |
2946 addi(dst, dst, Operand(kPointerSize)); | |
2947 } else { | |
2948 #if V8_TARGET_LITTLE_ENDIAN | |
2949 stb(scratch, MemOperand(dst, 0)); | |
2950 ShiftRightImm(scratch, scratch, Operand(8)); | |
2951 stb(scratch, MemOperand(dst, 1)); | |
2952 ShiftRightImm(scratch, scratch, Operand(8)); | |
2953 stb(scratch, MemOperand(dst, 2)); | |
2954 ShiftRightImm(scratch, scratch, Operand(8)); | |
2955 stb(scratch, MemOperand(dst, 3)); | |
2956 #if V8_TARGET_ARCH_PPC64 | |
2957 ShiftRightImm(scratch, scratch, Operand(8)); | |
2958 stb(scratch, MemOperand(dst, 4)); | |
2959 ShiftRightImm(scratch, scratch, Operand(8)); | |
2960 stb(scratch, MemOperand(dst, 5)); | |
2961 ShiftRightImm(scratch, scratch, Operand(8)); | |
2962 stb(scratch, MemOperand(dst, 6)); | |
2963 ShiftRightImm(scratch, scratch, Operand(8)); | |
2964 stb(scratch, MemOperand(dst, 7)); | |
2965 #endif | |
2966 #else | |
2967 #if V8_TARGET_ARCH_PPC64 | |
2968 stb(scratch, MemOperand(dst, 7)); | |
2969 ShiftRightImm(scratch, scratch, Operand(8)); | |
2970 stb(scratch, MemOperand(dst, 6)); | |
2971 ShiftRightImm(scratch, scratch, Operand(8)); | |
2972 stb(scratch, MemOperand(dst, 5)); | |
2973 ShiftRightImm(scratch, scratch, Operand(8)); | |
2974 stb(scratch, MemOperand(dst, 4)); | |
2975 ShiftRightImm(scratch, scratch, Operand(8)); | |
2976 #endif | |
2977 stb(scratch, MemOperand(dst, 3)); | |
2978 ShiftRightImm(scratch, scratch, Operand(8)); | |
2979 stb(scratch, MemOperand(dst, 2)); | |
2980 ShiftRightImm(scratch, scratch, Operand(8)); | |
2981 stb(scratch, MemOperand(dst, 1)); | |
2982 ShiftRightImm(scratch, scratch, Operand(8)); | |
2983 stb(scratch, MemOperand(dst, 0)); | |
2984 #endif | |
2985 addi(dst, dst, Operand(kPointerSize)); | |
2986 } | |
2987 bdnz(&word_loop); | |
2988 | |
2989 // Copy the last bytes if any left. | |
2990 cmpi(length, Operand::Zero()); | |
2991 beq(&done); | |
2992 | |
2993 bind(&byte_loop); | |
2994 mtctr(length); | |
2995 bind(&byte_loop_1); | |
2996 lbz(scratch, MemOperand(src)); | |
2997 addi(src, src, Operand(1)); | |
2998 stb(scratch, MemOperand(dst)); | |
2999 addi(dst, dst, Operand(1)); | |
3000 bdnz(&byte_loop_1); | |
3001 | |
3002 bind(&done); | 3029 bind(&done); |
3003 } | 3030 } |
3004 | 3031 |
3005 | |
3006 void MacroAssembler::InitializeNFieldsWithFiller(Register current_address, | 3032 void MacroAssembler::InitializeNFieldsWithFiller(Register current_address, |
3007 Register count, | 3033 Register count, |
3008 Register filler) { | 3034 Register filler) { |
3009 Label loop; | 3035 Label loop; |
3010 mtctr(count); | |
3011 bind(&loop); | 3036 bind(&loop); |
3012 StoreP(filler, MemOperand(current_address)); | 3037 StoreP(filler, MemOperand(current_address)); |
3013 addi(current_address, current_address, Operand(kPointerSize)); | 3038 AddP(current_address, current_address, Operand(kPointerSize)); |
3014 bdnz(&loop); | 3039 BranchOnCount(r1, &loop); |
3015 } | 3040 } |
3016 | 3041 |
3017 void MacroAssembler::InitializeFieldsWithFiller(Register current_address, | 3042 void MacroAssembler::InitializeFieldsWithFiller(Register current_address, |
3018 Register end_address, | 3043 Register end_address, |
3019 Register filler) { | 3044 Register filler) { |
3020 Label done; | 3045 Label done; |
3021 sub(r0, end_address, current_address, LeaveOE, SetRC); | 3046 DCHECK(!filler.is(r1)); |
3022 beq(&done, cr0); | 3047 DCHECK(!current_address.is(r1)); |
3023 ShiftRightImm(r0, r0, Operand(kPointerSizeLog2)); | 3048 DCHECK(!end_address.is(r1)); |
3024 InitializeNFieldsWithFiller(current_address, r0, filler); | 3049 SubP(r1, end_address, current_address /*, LeaveOE, SetRC*/); |
| 3050 beq(&done, Label::kNear); |
| 3051 ShiftRightP(r1, r1, Operand(kPointerSizeLog2)); |
| 3052 InitializeNFieldsWithFiller(current_address, r1, filler); |
3025 bind(&done); | 3053 bind(&done); |
3026 } | 3054 } |
3027 | 3055 |
3028 | |
3029 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte( | 3056 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte( |
3030 Register first, Register second, Register scratch1, Register scratch2, | 3057 Register first, Register second, Register scratch1, Register scratch2, |
3031 Label* failure) { | 3058 Label* failure) { |
3032 const int kFlatOneByteStringMask = | 3059 const int kFlatOneByteStringMask = |
3033 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask; | 3060 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask; |
3034 const int kFlatOneByteStringTag = | 3061 const int kFlatOneByteStringTag = |
3035 kStringTag | kOneByteStringTag | kSeqStringTag; | 3062 kStringTag | kOneByteStringTag | kSeqStringTag; |
3036 andi(scratch1, first, Operand(kFlatOneByteStringMask)); | 3063 if (!scratch1.is(first)) LoadRR(scratch1, first); |
3037 andi(scratch2, second, Operand(kFlatOneByteStringMask)); | 3064 if (!scratch2.is(second)) LoadRR(scratch2, second); |
3038 cmpi(scratch1, Operand(kFlatOneByteStringTag)); | 3065 nilf(scratch1, Operand(kFlatOneByteStringMask)); |
| 3066 CmpP(scratch1, Operand(kFlatOneByteStringTag)); |
3039 bne(failure); | 3067 bne(failure); |
3040 cmpi(scratch2, Operand(kFlatOneByteStringTag)); | 3068 nilf(scratch2, Operand(kFlatOneByteStringMask)); |
| 3069 CmpP(scratch2, Operand(kFlatOneByteStringTag)); |
3041 bne(failure); | 3070 bne(failure); |
3042 } | 3071 } |
3043 | 3072 |
3044 | |
3045 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type, | 3073 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type, |
3046 Register scratch, | 3074 Register scratch, |
3047 Label* failure) { | 3075 Label* failure) { |
3048 const int kFlatOneByteStringMask = | 3076 const int kFlatOneByteStringMask = |
3049 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask; | 3077 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask; |
3050 const int kFlatOneByteStringTag = | 3078 const int kFlatOneByteStringTag = |
3051 kStringTag | kOneByteStringTag | kSeqStringTag; | 3079 kStringTag | kOneByteStringTag | kSeqStringTag; |
3052 andi(scratch, type, Operand(kFlatOneByteStringMask)); | 3080 |
3053 cmpi(scratch, Operand(kFlatOneByteStringTag)); | 3081 if (!scratch.is(type)) LoadRR(scratch, type); |
| 3082 nilf(scratch, Operand(kFlatOneByteStringMask)); |
| 3083 CmpP(scratch, Operand(kFlatOneByteStringTag)); |
3054 bne(failure); | 3084 bne(failure); |
3055 } | 3085 } |
3056 | 3086 |
3057 static const int kRegisterPassedArguments = 8; | 3087 static const int kRegisterPassedArguments = 5; |
3058 | |
3059 | 3088 |
3060 int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments, | 3089 int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments, |
3061 int num_double_arguments) { | 3090 int num_double_arguments) { |
3062 int stack_passed_words = 0; | 3091 int stack_passed_words = 0; |
3063 if (num_double_arguments > DoubleRegister::kNumRegisters) { | 3092 if (num_double_arguments > DoubleRegister::kNumRegisters) { |
3064 stack_passed_words += | 3093 stack_passed_words += |
3065 2 * (num_double_arguments - DoubleRegister::kNumRegisters); | 3094 2 * (num_double_arguments - DoubleRegister::kNumRegisters); |
3066 } | 3095 } |
3067 // Up to 8 simple arguments are passed in registers r3..r10. | 3096 // Up to five simple arguments are passed in registers r2..r6 |
3068 if (num_reg_arguments > kRegisterPassedArguments) { | 3097 if (num_reg_arguments > kRegisterPassedArguments) { |
3069 stack_passed_words += num_reg_arguments - kRegisterPassedArguments; | 3098 stack_passed_words += num_reg_arguments - kRegisterPassedArguments; |
3070 } | 3099 } |
3071 return stack_passed_words; | 3100 return stack_passed_words; |
3072 } | 3101 } |
3073 | 3102 |
3074 | |
3075 void MacroAssembler::EmitSeqStringSetCharCheck(Register string, Register index, | 3103 void MacroAssembler::EmitSeqStringSetCharCheck(Register string, Register index, |
3076 Register value, | 3104 Register value, |
3077 uint32_t encoding_mask) { | 3105 uint32_t encoding_mask) { |
3078 Label is_object; | 3106 Label is_object; |
3079 TestIfSmi(string, r0); | 3107 TestIfSmi(string); |
3080 Check(ne, kNonObject, cr0); | 3108 Check(ne, kNonObject, cr0); |
3081 | 3109 |
3082 LoadP(ip, FieldMemOperand(string, HeapObject::kMapOffset)); | 3110 LoadP(ip, FieldMemOperand(string, HeapObject::kMapOffset)); |
3083 lbz(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset)); | 3111 LoadlB(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset)); |
3084 | 3112 |
3085 andi(ip, ip, Operand(kStringRepresentationMask | kStringEncodingMask)); | 3113 AndP(ip, Operand(kStringRepresentationMask | kStringEncodingMask)); |
3086 cmpi(ip, Operand(encoding_mask)); | 3114 CmpP(ip, Operand(encoding_mask)); |
3087 Check(eq, kUnexpectedStringType); | 3115 Check(eq, kUnexpectedStringType); |
3088 | 3116 |
3089 // The index is assumed to be untagged coming in, tag it to compare with the | 3117 // The index is assumed to be untagged coming in, tag it to compare with the |
3090 // string length without using a temp register, it is restored at the end of | 3118 // string length without using a temp register, it is restored at the end of |
3091 // this function. | 3119 // this function. |
3092 #if !V8_TARGET_ARCH_PPC64 | 3120 #if !V8_TARGET_ARCH_S390X |
3093 Label index_tag_ok, index_tag_bad; | 3121 Label index_tag_ok, index_tag_bad; |
3094 JumpIfNotSmiCandidate(index, r0, &index_tag_bad); | 3122 JumpIfNotSmiCandidate(index, r0, &index_tag_bad); |
3095 #endif | 3123 #endif |
3096 SmiTag(index, index); | 3124 SmiTag(index, index); |
3097 #if !V8_TARGET_ARCH_PPC64 | 3125 #if !V8_TARGET_ARCH_S390X |
3098 b(&index_tag_ok); | 3126 b(&index_tag_ok); |
3099 bind(&index_tag_bad); | 3127 bind(&index_tag_bad); |
3100 Abort(kIndexIsTooLarge); | 3128 Abort(kIndexIsTooLarge); |
3101 bind(&index_tag_ok); | 3129 bind(&index_tag_ok); |
3102 #endif | 3130 #endif |
3103 | 3131 |
3104 LoadP(ip, FieldMemOperand(string, String::kLengthOffset)); | 3132 LoadP(ip, FieldMemOperand(string, String::kLengthOffset)); |
3105 cmp(index, ip); | 3133 CmpP(index, ip); |
3106 Check(lt, kIndexIsTooLarge); | 3134 Check(lt, kIndexIsTooLarge); |
3107 | 3135 |
3108 DCHECK(Smi::FromInt(0) == 0); | 3136 DCHECK(Smi::FromInt(0) == 0); |
3109 cmpi(index, Operand::Zero()); | 3137 CmpP(index, Operand::Zero()); |
3110 Check(ge, kIndexIsNegative); | 3138 Check(ge, kIndexIsNegative); |
3111 | 3139 |
3112 SmiUntag(index, index); | 3140 SmiUntag(index, index); |
3113 } | 3141 } |
3114 | 3142 |
3115 | |
3116 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, | 3143 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, |
3117 int num_double_arguments, | 3144 int num_double_arguments, |
3118 Register scratch) { | 3145 Register scratch) { |
3119 int frame_alignment = ActivationFrameAlignment(); | 3146 int frame_alignment = ActivationFrameAlignment(); |
3120 int stack_passed_arguments = | 3147 int stack_passed_arguments = |
3121 CalculateStackPassedWords(num_reg_arguments, num_double_arguments); | 3148 CalculateStackPassedWords(num_reg_arguments, num_double_arguments); |
3122 int stack_space = kNumRequiredStackFrameSlots; | 3149 int stack_space = kNumRequiredStackFrameSlots; |
3123 | |
3124 if (frame_alignment > kPointerSize) { | 3150 if (frame_alignment > kPointerSize) { |
3125 // Make stack end at alignment and make room for stack arguments | 3151 // Make stack end at alignment and make room for stack arguments |
3126 // -- preserving original value of sp. | 3152 // -- preserving original value of sp. |
3127 mr(scratch, sp); | 3153 LoadRR(scratch, sp); |
3128 addi(sp, sp, Operand(-(stack_passed_arguments + 1) * kPointerSize)); | 3154 lay(sp, MemOperand(sp, -(stack_passed_arguments + 1) * kPointerSize)); |
3129 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment)); | 3155 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment)); |
3130 ClearRightImm(sp, sp, Operand(WhichPowerOf2(frame_alignment))); | 3156 ClearRightImm(sp, sp, Operand(WhichPowerOf2(frame_alignment))); |
3131 StoreP(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize)); | 3157 StoreP(scratch, MemOperand(sp, (stack_passed_arguments)*kPointerSize)); |
3132 } else { | 3158 } else { |
3133 // Make room for stack arguments | |
3134 stack_space += stack_passed_arguments; | 3159 stack_space += stack_passed_arguments; |
3135 } | 3160 } |
3136 | 3161 lay(sp, MemOperand(sp, -(stack_space)*kPointerSize)); |
3137 // Allocate frame with required slots to make ABI work. | |
3138 li(r0, Operand::Zero()); | |
3139 StorePU(r0, MemOperand(sp, -stack_space * kPointerSize)); | |
3140 } | 3162 } |
3141 | 3163 |
3142 | |
3143 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, | 3164 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, |
3144 Register scratch) { | 3165 Register scratch) { |
3145 PrepareCallCFunction(num_reg_arguments, 0, scratch); | 3166 PrepareCallCFunction(num_reg_arguments, 0, scratch); |
3146 } | 3167 } |
3147 | 3168 |
| 3169 void MacroAssembler::MovToFloatParameter(DoubleRegister src) { Move(d0, src); } |
3148 | 3170 |
3149 void MacroAssembler::MovToFloatParameter(DoubleRegister src) { Move(d1, src); } | 3171 void MacroAssembler::MovToFloatResult(DoubleRegister src) { Move(d0, src); } |
3150 | |
3151 | |
3152 void MacroAssembler::MovToFloatResult(DoubleRegister src) { Move(d1, src); } | |
3153 | |
3154 | 3172 |
3155 void MacroAssembler::MovToFloatParameters(DoubleRegister src1, | 3173 void MacroAssembler::MovToFloatParameters(DoubleRegister src1, |
3156 DoubleRegister src2) { | 3174 DoubleRegister src2) { |
3157 if (src2.is(d1)) { | 3175 if (src2.is(d0)) { |
3158 DCHECK(!src1.is(d2)); | 3176 DCHECK(!src1.is(d2)); |
3159 Move(d2, src2); | 3177 Move(d2, src2); |
3160 Move(d1, src1); | 3178 Move(d0, src1); |
3161 } else { | 3179 } else { |
3162 Move(d1, src1); | 3180 Move(d0, src1); |
3163 Move(d2, src2); | 3181 Move(d2, src2); |
3164 } | 3182 } |
3165 } | 3183 } |
3166 | 3184 |
3167 | |
3168 void MacroAssembler::CallCFunction(ExternalReference function, | 3185 void MacroAssembler::CallCFunction(ExternalReference function, |
3169 int num_reg_arguments, | 3186 int num_reg_arguments, |
3170 int num_double_arguments) { | 3187 int num_double_arguments) { |
3171 mov(ip, Operand(function)); | 3188 mov(ip, Operand(function)); |
3172 CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments); | 3189 CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments); |
3173 } | 3190 } |
3174 | 3191 |
3175 | |
3176 void MacroAssembler::CallCFunction(Register function, int num_reg_arguments, | 3192 void MacroAssembler::CallCFunction(Register function, int num_reg_arguments, |
3177 int num_double_arguments) { | 3193 int num_double_arguments) { |
3178 CallCFunctionHelper(function, num_reg_arguments, num_double_arguments); | 3194 CallCFunctionHelper(function, num_reg_arguments, num_double_arguments); |
3179 } | 3195 } |
3180 | 3196 |
3181 | |
3182 void MacroAssembler::CallCFunction(ExternalReference function, | 3197 void MacroAssembler::CallCFunction(ExternalReference function, |
3183 int num_arguments) { | 3198 int num_arguments) { |
3184 CallCFunction(function, num_arguments, 0); | 3199 CallCFunction(function, num_arguments, 0); |
3185 } | 3200 } |
3186 | 3201 |
3187 | |
3188 void MacroAssembler::CallCFunction(Register function, int num_arguments) { | 3202 void MacroAssembler::CallCFunction(Register function, int num_arguments) { |
3189 CallCFunction(function, num_arguments, 0); | 3203 CallCFunction(function, num_arguments, 0); |
3190 } | 3204 } |
3191 | 3205 |
3192 | |
3193 void MacroAssembler::CallCFunctionHelper(Register function, | 3206 void MacroAssembler::CallCFunctionHelper(Register function, |
3194 int num_reg_arguments, | 3207 int num_reg_arguments, |
3195 int num_double_arguments) { | 3208 int num_double_arguments) { |
3196 DCHECK(has_frame()); | 3209 DCHECK(has_frame()); |
3197 | 3210 |
3198 // Just call directly. The function called cannot cause a GC, or | 3211 // Just call directly. The function called cannot cause a GC, or |
3199 // allow preemption, so the return address in the link register | 3212 // allow preemption, so the return address in the link register |
3200 // stays correct. | 3213 // stays correct. |
3201 Register dest = function; | 3214 Register dest = function; |
3202 if (ABI_USES_FUNCTION_DESCRIPTORS) { | 3215 if (ABI_CALL_VIA_IP) { |
3203 // AIX/PPC64BE Linux uses a function descriptor. When calling C code be | |
3204 // aware of this descriptor and pick up values from it | |
3205 LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(function, kPointerSize)); | |
3206 LoadP(ip, MemOperand(function, 0)); | |
3207 dest = ip; | |
3208 } else if (ABI_CALL_VIA_IP) { | |
3209 Move(ip, function); | 3216 Move(ip, function); |
3210 dest = ip; | 3217 dest = ip; |
3211 } | 3218 } |
3212 | 3219 |
3213 Call(dest); | 3220 Call(dest); |
3214 | 3221 |
3215 // Remove frame bought in PrepareCallCFunction | |
3216 int stack_passed_arguments = | 3222 int stack_passed_arguments = |
3217 CalculateStackPassedWords(num_reg_arguments, num_double_arguments); | 3223 CalculateStackPassedWords(num_reg_arguments, num_double_arguments); |
3218 int stack_space = kNumRequiredStackFrameSlots + stack_passed_arguments; | 3224 int stack_space = kNumRequiredStackFrameSlots + stack_passed_arguments; |
3219 if (ActivationFrameAlignment() > kPointerSize) { | 3225 if (ActivationFrameAlignment() > kPointerSize) { |
| 3226 // Load the original stack pointer (pre-alignment) from the stack |
3220 LoadP(sp, MemOperand(sp, stack_space * kPointerSize)); | 3227 LoadP(sp, MemOperand(sp, stack_space * kPointerSize)); |
3221 } else { | 3228 } else { |
3222 addi(sp, sp, Operand(stack_space * kPointerSize)); | 3229 la(sp, MemOperand(sp, stack_space * kPointerSize)); |
3223 } | 3230 } |
3224 } | 3231 } |
3225 | 3232 |
3226 | |
3227 void MacroAssembler::DecodeConstantPoolOffset(Register result, | |
3228 Register location) { | |
3229 Label overflow_access, done; | |
3230 DCHECK(!AreAliased(result, location, r0)); | |
3231 | |
3232 // Determine constant pool access type | |
3233 // Caller has already placed the instruction word at location in result. | |
3234 ExtractBitRange(r0, result, 31, 26); | |
3235 cmpi(r0, Operand(ADDIS >> 26)); | |
3236 beq(&overflow_access); | |
3237 | |
3238 // Regular constant pool access | |
3239 // extract the load offset | |
3240 andi(result, result, Operand(kImm16Mask)); | |
3241 b(&done); | |
3242 | |
3243 bind(&overflow_access); | |
3244 // Overflow constant pool access | |
3245 // shift addis immediate | |
3246 slwi(r0, result, Operand(16)); | |
3247 // sign-extend and add the load offset | |
3248 lwz(result, MemOperand(location, kInstrSize)); | |
3249 extsh(result, result); | |
3250 add(result, r0, result); | |
3251 | |
3252 bind(&done); | |
3253 } | |
3254 | |
3255 | |
3256 void MacroAssembler::CheckPageFlag( | 3233 void MacroAssembler::CheckPageFlag( |
3257 Register object, | 3234 Register object, |
3258 Register scratch, // scratch may be same register as object | 3235 Register scratch, // scratch may be same register as object |
3259 int mask, Condition cc, Label* condition_met) { | 3236 int mask, Condition cc, Label* condition_met) { |
3260 DCHECK(cc == ne || cc == eq); | 3237 DCHECK(cc == ne || cc == eq); |
3261 ClearRightImm(scratch, object, Operand(kPageSizeBits)); | 3238 ClearRightImm(scratch, object, Operand(kPageSizeBits)); |
3262 LoadP(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset)); | |
3263 | 3239 |
3264 And(r0, scratch, Operand(mask), SetRC); | 3240 if (base::bits::IsPowerOfTwo32(mask)) { |
| 3241 // If it's a power of two, we can use Test-Under-Mask Memory-Imm form |
| 3242 // which allows testing of a single byte in memory. |
| 3243 int32_t byte_offset = 4; |
| 3244 uint32_t shifted_mask = mask; |
| 3245 // Determine the byte offset to be tested |
| 3246 if (mask <= 0x80) { |
| 3247 byte_offset = kPointerSize - 1; |
| 3248 } else if (mask < 0x8000) { |
| 3249 byte_offset = kPointerSize - 2; |
| 3250 shifted_mask = mask >> 8; |
| 3251 } else if (mask < 0x800000) { |
| 3252 byte_offset = kPointerSize - 3; |
| 3253 shifted_mask = mask >> 16; |
| 3254 } else { |
| 3255 byte_offset = kPointerSize - 4; |
| 3256 shifted_mask = mask >> 24; |
| 3257 } |
| 3258 #if V8_TARGET_LITTLE_ENDIAN |
| 3259 // Reverse the byte_offset if emulating on little endian platform |
| 3260 byte_offset = kPointerSize - byte_offset - 1; |
| 3261 #endif |
| 3262 tm(MemOperand(scratch, MemoryChunk::kFlagsOffset + byte_offset), |
| 3263 Operand(shifted_mask)); |
| 3264 } else { |
| 3265 LoadP(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset)); |
| 3266 AndP(r0, scratch, Operand(mask)); |
| 3267 } |
| 3268 // Should be okay to remove rc |
3265 | 3269 |
3266 if (cc == ne) { | 3270 if (cc == ne) { |
3267 bne(condition_met, cr0); | 3271 bne(condition_met); |
3268 } | 3272 } |
3269 if (cc == eq) { | 3273 if (cc == eq) { |
3270 beq(condition_met, cr0); | 3274 beq(condition_met); |
3271 } | 3275 } |
3272 } | 3276 } |
3273 | 3277 |
3274 | |
3275 void MacroAssembler::JumpIfBlack(Register object, Register scratch0, | 3278 void MacroAssembler::JumpIfBlack(Register object, Register scratch0, |
3276 Register scratch1, Label* on_black) { | 3279 Register scratch1, Label* on_black) { |
3277 HasColor(object, scratch0, scratch1, on_black, 1, 1); // kBlackBitPattern. | 3280 HasColor(object, scratch0, scratch1, on_black, 1, 1); // kBlackBitPattern. |
3278 DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0); | 3281 DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0); |
3279 } | 3282 } |
3280 | 3283 |
3281 | |
3282 void MacroAssembler::HasColor(Register object, Register bitmap_scratch, | 3284 void MacroAssembler::HasColor(Register object, Register bitmap_scratch, |
3283 Register mask_scratch, Label* has_color, | 3285 Register mask_scratch, Label* has_color, |
3284 int first_bit, int second_bit) { | 3286 int first_bit, int second_bit) { |
3285 DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg)); | 3287 DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg)); |
3286 | 3288 |
3287 GetMarkBits(object, bitmap_scratch, mask_scratch); | 3289 GetMarkBits(object, bitmap_scratch, mask_scratch); |
3288 | 3290 |
3289 Label other_color, word_boundary; | 3291 Label other_color, word_boundary; |
3290 lwz(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); | 3292 LoadlW(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); |
3291 // Test the first bit | 3293 // Test the first bit |
3292 and_(r0, ip, mask_scratch, SetRC); | 3294 AndP(r0, ip, mask_scratch /*, SetRC*/); // Should be okay to remove rc |
3293 b(first_bit == 1 ? eq : ne, &other_color, cr0); | 3295 b(first_bit == 1 ? eq : ne, &other_color, Label::kNear); |
3294 // Shift left 1 | 3296 // Shift left 1 |
3295 // May need to load the next cell | 3297 // May need to load the next cell |
3296 slwi(mask_scratch, mask_scratch, Operand(1), SetRC); | 3298 sll(mask_scratch, Operand(1) /*, SetRC*/); |
3297 beq(&word_boundary, cr0); | 3299 LoadAndTest32(mask_scratch, mask_scratch); |
| 3300 beq(&word_boundary, Label::kNear); |
3298 // Test the second bit | 3301 // Test the second bit |
3299 and_(r0, ip, mask_scratch, SetRC); | 3302 AndP(r0, ip, mask_scratch /*, SetRC*/); // Should be okay to remove rc |
3300 b(second_bit == 1 ? ne : eq, has_color, cr0); | 3303 b(second_bit == 1 ? ne : eq, has_color); |
3301 b(&other_color); | 3304 b(&other_color, Label::kNear); |
3302 | 3305 |
3303 bind(&word_boundary); | 3306 bind(&word_boundary); |
3304 lwz(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kIntSize)); | 3307 LoadlW(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kIntSize)); |
3305 andi(r0, ip, Operand(1)); | 3308 AndP(r0, ip, Operand(1)); |
3306 b(second_bit == 1 ? ne : eq, has_color, cr0); | 3309 b(second_bit == 1 ? ne : eq, has_color); |
3307 bind(&other_color); | 3310 bind(&other_color); |
3308 } | 3311 } |
3309 | 3312 |
3310 | |
3311 void MacroAssembler::GetMarkBits(Register addr_reg, Register bitmap_reg, | 3313 void MacroAssembler::GetMarkBits(Register addr_reg, Register bitmap_reg, |
3312 Register mask_reg) { | 3314 Register mask_reg) { |
3313 DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg)); | 3315 DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg)); |
3314 DCHECK((~Page::kPageAlignmentMask & 0xffff) == 0); | 3316 LoadRR(bitmap_reg, addr_reg); |
3315 lis(r0, Operand((~Page::kPageAlignmentMask >> 16))); | 3317 nilf(bitmap_reg, Operand(~Page::kPageAlignmentMask)); |
3316 and_(bitmap_reg, addr_reg, r0); | |
3317 const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2; | 3318 const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2; |
3318 ExtractBitRange(mask_reg, addr_reg, kLowBits - 1, kPointerSizeLog2); | 3319 ExtractBitRange(mask_reg, addr_reg, kLowBits - 1, kPointerSizeLog2); |
3319 ExtractBitRange(ip, addr_reg, kPageSizeBits - 1, kLowBits); | 3320 ExtractBitRange(ip, addr_reg, kPageSizeBits - 1, kLowBits); |
3320 ShiftLeftImm(ip, ip, Operand(Bitmap::kBytesPerCellLog2)); | 3321 ShiftLeftP(ip, ip, Operand(Bitmap::kBytesPerCellLog2)); |
3321 add(bitmap_reg, bitmap_reg, ip); | 3322 AddP(bitmap_reg, ip); |
3322 li(ip, Operand(1)); | 3323 LoadRR(ip, mask_reg); // Have to do some funky reg shuffling as |
3323 slw(mask_reg, ip, mask_reg); | 3324 // 31-bit shift left clobbers on s390. |
| 3325 LoadImmP(mask_reg, Operand(1)); |
| 3326 ShiftLeftP(mask_reg, mask_reg, ip); |
3324 } | 3327 } |
3325 | 3328 |
3326 | |
3327 void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch, | 3329 void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch, |
3328 Register mask_scratch, Register load_scratch, | 3330 Register mask_scratch, Register load_scratch, |
3329 Label* value_is_white) { | 3331 Label* value_is_white) { |
3330 DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, ip)); | 3332 DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, ip)); |
3331 GetMarkBits(value, bitmap_scratch, mask_scratch); | 3333 GetMarkBits(value, bitmap_scratch, mask_scratch); |
3332 | 3334 |
3333 // If the value is black or grey we don't need to do anything. | 3335 // If the value is black or grey we don't need to do anything. |
3334 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0); | 3336 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0); |
3335 DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0); | 3337 DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0); |
3336 DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0); | 3338 DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0); |
3337 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0); | 3339 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0); |
3338 | 3340 |
3339 // Since both black and grey have a 1 in the first position and white does | 3341 // Since both black and grey have a 1 in the first position and white does |
3340 // not have a 1 there we only need to check one bit. | 3342 // not have a 1 there we only need to check one bit. |
3341 lwz(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); | 3343 LoadlW(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); |
3342 and_(r0, mask_scratch, load_scratch, SetRC); | 3344 LoadRR(r0, load_scratch); |
3343 beq(value_is_white, cr0); | 3345 AndP(r0, mask_scratch); |
| 3346 beq(value_is_white); |
3344 } | 3347 } |
3345 | 3348 |
3346 | |
3347 // Saturate a value into 8-bit unsigned integer | 3349 // Saturate a value into 8-bit unsigned integer |
3348 // if input_value < 0, output_value is 0 | 3350 // if input_value < 0, output_value is 0 |
3349 // if input_value > 255, output_value is 255 | 3351 // if input_value > 255, output_value is 255 |
3350 // otherwise output_value is the input_value | 3352 // otherwise output_value is the input_value |
3351 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) { | 3353 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) { |
3352 int satval = (1 << 8) - 1; | 3354 int satval = (1 << 8) - 1; |
3353 | 3355 |
3354 if (CpuFeatures::IsSupported(ISELECT)) { | 3356 Label done, negative_label, overflow_label; |
3355 // set to 0 if negative | 3357 CmpP(input_reg, Operand::Zero()); |
3356 cmpi(input_reg, Operand::Zero()); | 3358 blt(&negative_label); |
3357 isel(lt, output_reg, r0, input_reg); | |
3358 | 3359 |
3359 // set to satval if > satval | 3360 CmpP(input_reg, Operand(satval)); |
3360 li(r0, Operand(satval)); | 3361 bgt(&overflow_label); |
3361 cmpi(output_reg, Operand(satval)); | 3362 if (!output_reg.is(input_reg)) { |
3362 isel(lt, output_reg, output_reg, r0); | 3363 LoadRR(output_reg, input_reg); |
3363 } else { | 3364 } |
3364 Label done, negative_label, overflow_label; | 3365 b(&done); |
3365 cmpi(input_reg, Operand::Zero()); | |
3366 blt(&negative_label); | |
3367 | 3366 |
3368 cmpi(input_reg, Operand(satval)); | 3367 bind(&negative_label); |
3369 bgt(&overflow_label); | 3368 LoadImmP(output_reg, Operand::Zero()); // set to 0 if negative |
3370 if (!output_reg.is(input_reg)) { | 3369 b(&done); |
3371 mr(output_reg, input_reg); | |
3372 } | |
3373 b(&done); | |
3374 | 3370 |
3375 bind(&negative_label); | 3371 bind(&overflow_label); // set to satval if > satval |
3376 li(output_reg, Operand::Zero()); // set to 0 if negative | 3372 LoadImmP(output_reg, Operand(satval)); |
3377 b(&done); | |
3378 | 3373 |
3379 bind(&overflow_label); // set to satval if > satval | 3374 bind(&done); |
3380 li(output_reg, Operand(satval)); | |
3381 | |
3382 bind(&done); | |
3383 } | |
3384 } | 3375 } |
3385 | 3376 |
3386 | |
3387 void MacroAssembler::SetRoundingMode(FPRoundingMode RN) { mtfsfi(7, RN); } | |
3388 | |
3389 | |
3390 void MacroAssembler::ResetRoundingMode() { | |
3391 mtfsfi(7, kRoundToNearest); // reset (default is kRoundToNearest) | |
3392 } | |
3393 | |
3394 | |
3395 void MacroAssembler::ClampDoubleToUint8(Register result_reg, | 3377 void MacroAssembler::ClampDoubleToUint8(Register result_reg, |
3396 DoubleRegister input_reg, | 3378 DoubleRegister input_reg, |
3397 DoubleRegister double_scratch) { | 3379 DoubleRegister double_scratch) { |
3398 Label above_zero; | 3380 Label above_zero; |
3399 Label done; | 3381 Label done; |
3400 Label in_bounds; | 3382 Label in_bounds; |
3401 | 3383 |
3402 LoadDoubleLiteral(double_scratch, 0.0, result_reg); | 3384 LoadDoubleLiteral(double_scratch, 0.0, result_reg); |
3403 fcmpu(input_reg, double_scratch); | 3385 cdbr(input_reg, double_scratch); |
3404 bgt(&above_zero); | 3386 bgt(&above_zero, Label::kNear); |
3405 | 3387 |
3406 // Double value is less than zero, NaN or Inf, return 0. | 3388 // Double value is less than zero, NaN or Inf, return 0. |
3407 LoadIntLiteral(result_reg, 0); | 3389 LoadIntLiteral(result_reg, 0); |
3408 b(&done); | 3390 b(&done, Label::kNear); |
3409 | 3391 |
3410 // Double value is >= 255, return 255. | 3392 // Double value is >= 255, return 255. |
3411 bind(&above_zero); | 3393 bind(&above_zero); |
3412 LoadDoubleLiteral(double_scratch, 255.0, result_reg); | 3394 LoadDoubleLiteral(double_scratch, 255.0, result_reg); |
3413 fcmpu(input_reg, double_scratch); | 3395 cdbr(input_reg, double_scratch); |
3414 ble(&in_bounds); | 3396 ble(&in_bounds, Label::kNear); |
3415 LoadIntLiteral(result_reg, 255); | 3397 LoadIntLiteral(result_reg, 255); |
3416 b(&done); | 3398 b(&done, Label::kNear); |
3417 | 3399 |
3418 // In 0-255 range, round and truncate. | 3400 // In 0-255 range, round and truncate. |
3419 bind(&in_bounds); | 3401 bind(&in_bounds); |
3420 | 3402 |
3421 // round to nearest (default rounding mode) | 3403 // round to nearest (default rounding mode) |
3422 fctiw(double_scratch, input_reg); | 3404 cfdbr(ROUND_TO_NEAREST_WITH_TIES_TO_EVEN, result_reg, input_reg); |
3423 MovDoubleLowToInt(result_reg, double_scratch); | |
3424 bind(&done); | 3405 bind(&done); |
3425 } | 3406 } |
3426 | 3407 |
3427 | |
3428 void MacroAssembler::LoadInstanceDescriptors(Register map, | 3408 void MacroAssembler::LoadInstanceDescriptors(Register map, |
3429 Register descriptors) { | 3409 Register descriptors) { |
3430 LoadP(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset)); | 3410 LoadP(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset)); |
3431 } | 3411 } |
3432 | 3412 |
3433 | |
3434 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) { | 3413 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) { |
3435 lwz(dst, FieldMemOperand(map, Map::kBitField3Offset)); | 3414 LoadlW(dst, FieldMemOperand(map, Map::kBitField3Offset)); |
3436 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst); | 3415 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst); |
3437 } | 3416 } |
3438 | 3417 |
3439 | |
3440 void MacroAssembler::EnumLength(Register dst, Register map) { | 3418 void MacroAssembler::EnumLength(Register dst, Register map) { |
3441 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0); | 3419 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0); |
3442 lwz(dst, FieldMemOperand(map, Map::kBitField3Offset)); | 3420 LoadW(dst, FieldMemOperand(map, Map::kBitField3Offset)); |
3443 ExtractBitMask(dst, dst, Map::EnumLengthBits::kMask); | 3421 And(dst, Operand(Map::EnumLengthBits::kMask)); |
3444 SmiTag(dst); | 3422 SmiTag(dst); |
3445 } | 3423 } |
3446 | 3424 |
3447 | |
3448 void MacroAssembler::LoadAccessor(Register dst, Register holder, | 3425 void MacroAssembler::LoadAccessor(Register dst, Register holder, |
3449 int accessor_index, | 3426 int accessor_index, |
3450 AccessorComponent accessor) { | 3427 AccessorComponent accessor) { |
3451 LoadP(dst, FieldMemOperand(holder, HeapObject::kMapOffset)); | 3428 LoadP(dst, FieldMemOperand(holder, HeapObject::kMapOffset)); |
3452 LoadInstanceDescriptors(dst, dst); | 3429 LoadInstanceDescriptors(dst, dst); |
3453 LoadP(dst, | 3430 LoadP(dst, |
3454 FieldMemOperand(dst, DescriptorArray::GetValueOffset(accessor_index))); | 3431 FieldMemOperand(dst, DescriptorArray::GetValueOffset(accessor_index))); |
3455 const int getterOffset = AccessorPair::kGetterOffset; | 3432 const int getterOffset = AccessorPair::kGetterOffset; |
3456 const int setterOffset = AccessorPair::kSetterOffset; | 3433 const int setterOffset = AccessorPair::kSetterOffset; |
3457 int offset = ((accessor == ACCESSOR_GETTER) ? getterOffset : setterOffset); | 3434 int offset = ((accessor == ACCESSOR_GETTER) ? getterOffset : setterOffset); |
3458 LoadP(dst, FieldMemOperand(dst, offset)); | 3435 LoadP(dst, FieldMemOperand(dst, offset)); |
3459 } | 3436 } |
3460 | 3437 |
3461 | |
3462 void MacroAssembler::CheckEnumCache(Label* call_runtime) { | 3438 void MacroAssembler::CheckEnumCache(Label* call_runtime) { |
3463 Register null_value = r8; | 3439 Register null_value = r7; |
3464 Register empty_fixed_array_value = r9; | 3440 Register empty_fixed_array_value = r8; |
3465 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex); | 3441 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex); |
3466 Label next, start; | 3442 Label next, start; |
3467 mr(r5, r3); | 3443 LoadRR(r4, r2); |
3468 | 3444 |
3469 // Check if the enum length field is properly initialized, indicating that | 3445 // Check if the enum length field is properly initialized, indicating that |
3470 // there is an enum cache. | 3446 // there is an enum cache. |
3471 LoadP(r4, FieldMemOperand(r5, HeapObject::kMapOffset)); | 3447 LoadP(r3, FieldMemOperand(r4, HeapObject::kMapOffset)); |
3472 | 3448 |
3473 EnumLength(r6, r4); | 3449 EnumLength(r5, r3); |
3474 CmpSmiLiteral(r6, Smi::FromInt(kInvalidEnumCacheSentinel), r0); | 3450 CmpSmiLiteral(r5, Smi::FromInt(kInvalidEnumCacheSentinel), r0); |
3475 beq(call_runtime); | 3451 beq(call_runtime); |
3476 | 3452 |
3477 LoadRoot(null_value, Heap::kNullValueRootIndex); | 3453 LoadRoot(null_value, Heap::kNullValueRootIndex); |
3478 b(&start); | 3454 b(&start, Label::kNear); |
3479 | 3455 |
3480 bind(&next); | 3456 bind(&next); |
3481 LoadP(r4, FieldMemOperand(r5, HeapObject::kMapOffset)); | 3457 LoadP(r3, FieldMemOperand(r4, HeapObject::kMapOffset)); |
3482 | 3458 |
3483 // For all objects but the receiver, check that the cache is empty. | 3459 // For all objects but the receiver, check that the cache is empty. |
3484 EnumLength(r6, r4); | 3460 EnumLength(r5, r3); |
3485 CmpSmiLiteral(r6, Smi::FromInt(0), r0); | 3461 CmpSmiLiteral(r5, Smi::FromInt(0), r0); |
3486 bne(call_runtime); | 3462 bne(call_runtime); |
3487 | 3463 |
3488 bind(&start); | 3464 bind(&start); |
3489 | 3465 |
3490 // Check that there are no elements. Register r5 contains the current JS | 3466 // Check that there are no elements. Register r4 contains the current JS |
3491 // object we've reached through the prototype chain. | 3467 // object we've reached through the prototype chain. |
3492 Label no_elements; | 3468 Label no_elements; |
3493 LoadP(r5, FieldMemOperand(r5, JSObject::kElementsOffset)); | 3469 LoadP(r4, FieldMemOperand(r4, JSObject::kElementsOffset)); |
3494 cmp(r5, empty_fixed_array_value); | 3470 CmpP(r4, empty_fixed_array_value); |
3495 beq(&no_elements); | 3471 beq(&no_elements, Label::kNear); |
3496 | 3472 |
3497 // Second chance, the object may be using the empty slow element dictionary. | 3473 // Second chance, the object may be using the empty slow element dictionary. |
3498 CompareRoot(r5, Heap::kEmptySlowElementDictionaryRootIndex); | 3474 CompareRoot(r5, Heap::kEmptySlowElementDictionaryRootIndex); |
3499 bne(call_runtime); | 3475 bne(call_runtime); |
3500 | 3476 |
3501 bind(&no_elements); | 3477 bind(&no_elements); |
3502 LoadP(r5, FieldMemOperand(r4, Map::kPrototypeOffset)); | 3478 LoadP(r4, FieldMemOperand(r3, Map::kPrototypeOffset)); |
3503 cmp(r5, null_value); | 3479 CmpP(r4, null_value); |
3504 bne(&next); | 3480 bne(&next); |
3505 } | 3481 } |
3506 | 3482 |
3507 | |
3508 //////////////////////////////////////////////////////////////////////////////// | 3483 //////////////////////////////////////////////////////////////////////////////// |
3509 // | 3484 // |
3510 // New MacroAssembler Interfaces added for PPC | 3485 // New MacroAssembler Interfaces added for S390 |
3511 // | 3486 // |
3512 //////////////////////////////////////////////////////////////////////////////// | 3487 //////////////////////////////////////////////////////////////////////////////// |
3513 void MacroAssembler::LoadIntLiteral(Register dst, int value) { | 3488 // Primarily used for loading constants |
3514 mov(dst, Operand(value)); | 3489 // This should really move to be in macro-assembler as it |
3515 } | 3490 // is really a pseudo instruction |
3516 | 3491 // Some usages of this intend for a FIXED_SEQUENCE to be used |
3517 | 3492 // @TODO - break this dependency so we can optimize mov() in general |
3518 void MacroAssembler::LoadSmiLiteral(Register dst, Smi* smi) { | 3493 // and only use the generic version when we require a fixed sequence |
3519 mov(dst, Operand(smi)); | |
3520 } | |
3521 | |
3522 | |
3523 void MacroAssembler::LoadDoubleLiteral(DoubleRegister result, double value, | |
3524 Register scratch) { | |
3525 if (FLAG_enable_embedded_constant_pool && is_constant_pool_available() && | |
3526 !(scratch.is(r0) && ConstantPoolAccessIsInOverflow())) { | |
3527 ConstantPoolEntry::Access access = ConstantPoolAddEntry(value); | |
3528 if (access == ConstantPoolEntry::OVERFLOWED) { | |
3529 addis(scratch, kConstantPoolRegister, Operand::Zero()); | |
3530 lfd(result, MemOperand(scratch, 0)); | |
3531 } else { | |
3532 lfd(result, MemOperand(kConstantPoolRegister, 0)); | |
3533 } | |
3534 return; | |
3535 } | |
3536 | |
3537 // avoid gcc strict aliasing error using union cast | |
3538 union { | |
3539 double dval; | |
3540 #if V8_TARGET_ARCH_PPC64 | |
3541 intptr_t ival; | |
3542 #else | |
3543 intptr_t ival[2]; | |
3544 #endif | |
3545 } litVal; | |
3546 | |
3547 litVal.dval = value; | |
3548 | |
3549 #if V8_TARGET_ARCH_PPC64 | |
3550 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) { | |
3551 mov(scratch, Operand(litVal.ival)); | |
3552 mtfprd(result, scratch); | |
3553 return; | |
3554 } | |
3555 #endif | |
3556 | |
3557 addi(sp, sp, Operand(-kDoubleSize)); | |
3558 #if V8_TARGET_ARCH_PPC64 | |
3559 mov(scratch, Operand(litVal.ival)); | |
3560 std(scratch, MemOperand(sp)); | |
3561 #else | |
3562 LoadIntLiteral(scratch, litVal.ival[0]); | |
3563 stw(scratch, MemOperand(sp, 0)); | |
3564 LoadIntLiteral(scratch, litVal.ival[1]); | |
3565 stw(scratch, MemOperand(sp, 4)); | |
3566 #endif | |
3567 nop(GROUP_ENDING_NOP); // LHS/RAW optimization | |
3568 lfd(result, MemOperand(sp, 0)); | |
3569 addi(sp, sp, Operand(kDoubleSize)); | |
3570 } | |
3571 | |
3572 | |
3573 void MacroAssembler::MovIntToDouble(DoubleRegister dst, Register src, | |
3574 Register scratch) { | |
3575 // sign-extend src to 64-bit | |
3576 #if V8_TARGET_ARCH_PPC64 | |
3577 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) { | |
3578 mtfprwa(dst, src); | |
3579 return; | |
3580 } | |
3581 #endif | |
3582 | |
3583 DCHECK(!src.is(scratch)); | |
3584 subi(sp, sp, Operand(kDoubleSize)); | |
3585 #if V8_TARGET_ARCH_PPC64 | |
3586 extsw(scratch, src); | |
3587 std(scratch, MemOperand(sp, 0)); | |
3588 #else | |
3589 srawi(scratch, src, 31); | |
3590 stw(scratch, MemOperand(sp, Register::kExponentOffset)); | |
3591 stw(src, MemOperand(sp, Register::kMantissaOffset)); | |
3592 #endif | |
3593 nop(GROUP_ENDING_NOP); // LHS/RAW optimization | |
3594 lfd(dst, MemOperand(sp, 0)); | |
3595 addi(sp, sp, Operand(kDoubleSize)); | |
3596 } | |
3597 | |
3598 | |
3599 void MacroAssembler::MovUnsignedIntToDouble(DoubleRegister dst, Register src, | |
3600 Register scratch) { | |
3601 // zero-extend src to 64-bit | |
3602 #if V8_TARGET_ARCH_PPC64 | |
3603 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) { | |
3604 mtfprwz(dst, src); | |
3605 return; | |
3606 } | |
3607 #endif | |
3608 | |
3609 DCHECK(!src.is(scratch)); | |
3610 subi(sp, sp, Operand(kDoubleSize)); | |
3611 #if V8_TARGET_ARCH_PPC64 | |
3612 clrldi(scratch, src, Operand(32)); | |
3613 std(scratch, MemOperand(sp, 0)); | |
3614 #else | |
3615 li(scratch, Operand::Zero()); | |
3616 stw(scratch, MemOperand(sp, Register::kExponentOffset)); | |
3617 stw(src, MemOperand(sp, Register::kMantissaOffset)); | |
3618 #endif | |
3619 nop(GROUP_ENDING_NOP); // LHS/RAW optimization | |
3620 lfd(dst, MemOperand(sp, 0)); | |
3621 addi(sp, sp, Operand(kDoubleSize)); | |
3622 } | |
3623 | |
3624 | |
3625 void MacroAssembler::MovInt64ToDouble(DoubleRegister dst, | |
3626 #if !V8_TARGET_ARCH_PPC64 | |
3627 Register src_hi, | |
3628 #endif | |
3629 Register src) { | |
3630 #if V8_TARGET_ARCH_PPC64 | |
3631 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) { | |
3632 mtfprd(dst, src); | |
3633 return; | |
3634 } | |
3635 #endif | |
3636 | |
3637 subi(sp, sp, Operand(kDoubleSize)); | |
3638 #if V8_TARGET_ARCH_PPC64 | |
3639 std(src, MemOperand(sp, 0)); | |
3640 #else | |
3641 stw(src_hi, MemOperand(sp, Register::kExponentOffset)); | |
3642 stw(src, MemOperand(sp, Register::kMantissaOffset)); | |
3643 #endif | |
3644 nop(GROUP_ENDING_NOP); // LHS/RAW optimization | |
3645 lfd(dst, MemOperand(sp, 0)); | |
3646 addi(sp, sp, Operand(kDoubleSize)); | |
3647 } | |
3648 | |
3649 | |
3650 #if V8_TARGET_ARCH_PPC64 | |
3651 void MacroAssembler::MovInt64ComponentsToDouble(DoubleRegister dst, | |
3652 Register src_hi, | |
3653 Register src_lo, | |
3654 Register scratch) { | |
3655 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) { | |
3656 sldi(scratch, src_hi, Operand(32)); | |
3657 rldimi(scratch, src_lo, 0, 32); | |
3658 mtfprd(dst, scratch); | |
3659 return; | |
3660 } | |
3661 | |
3662 subi(sp, sp, Operand(kDoubleSize)); | |
3663 stw(src_hi, MemOperand(sp, Register::kExponentOffset)); | |
3664 stw(src_lo, MemOperand(sp, Register::kMantissaOffset)); | |
3665 nop(GROUP_ENDING_NOP); // LHS/RAW optimization | |
3666 lfd(dst, MemOperand(sp)); | |
3667 addi(sp, sp, Operand(kDoubleSize)); | |
3668 } | |
3669 #endif | |
3670 | |
3671 | |
3672 void MacroAssembler::InsertDoubleLow(DoubleRegister dst, Register src, | |
3673 Register scratch) { | |
3674 #if V8_TARGET_ARCH_PPC64 | |
3675 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) { | |
3676 mffprd(scratch, dst); | |
3677 rldimi(scratch, src, 0, 32); | |
3678 mtfprd(dst, scratch); | |
3679 return; | |
3680 } | |
3681 #endif | |
3682 | |
3683 subi(sp, sp, Operand(kDoubleSize)); | |
3684 stfd(dst, MemOperand(sp)); | |
3685 stw(src, MemOperand(sp, Register::kMantissaOffset)); | |
3686 nop(GROUP_ENDING_NOP); // LHS/RAW optimization | |
3687 lfd(dst, MemOperand(sp)); | |
3688 addi(sp, sp, Operand(kDoubleSize)); | |
3689 } | |
3690 | |
3691 | |
3692 void MacroAssembler::InsertDoubleHigh(DoubleRegister dst, Register src, | |
3693 Register scratch) { | |
3694 #if V8_TARGET_ARCH_PPC64 | |
3695 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) { | |
3696 mffprd(scratch, dst); | |
3697 rldimi(scratch, src, 32, 0); | |
3698 mtfprd(dst, scratch); | |
3699 return; | |
3700 } | |
3701 #endif | |
3702 | |
3703 subi(sp, sp, Operand(kDoubleSize)); | |
3704 stfd(dst, MemOperand(sp)); | |
3705 stw(src, MemOperand(sp, Register::kExponentOffset)); | |
3706 nop(GROUP_ENDING_NOP); // LHS/RAW optimization | |
3707 lfd(dst, MemOperand(sp)); | |
3708 addi(sp, sp, Operand(kDoubleSize)); | |
3709 } | |
3710 | |
3711 | |
3712 void MacroAssembler::MovDoubleLowToInt(Register dst, DoubleRegister src) { | |
3713 #if V8_TARGET_ARCH_PPC64 | |
3714 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) { | |
3715 mffprwz(dst, src); | |
3716 return; | |
3717 } | |
3718 #endif | |
3719 | |
3720 subi(sp, sp, Operand(kDoubleSize)); | |
3721 stfd(src, MemOperand(sp)); | |
3722 nop(GROUP_ENDING_NOP); // LHS/RAW optimization | |
3723 lwz(dst, MemOperand(sp, Register::kMantissaOffset)); | |
3724 addi(sp, sp, Operand(kDoubleSize)); | |
3725 } | |
3726 | |
3727 | |
3728 void MacroAssembler::MovDoubleHighToInt(Register dst, DoubleRegister src) { | |
3729 #if V8_TARGET_ARCH_PPC64 | |
3730 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) { | |
3731 mffprd(dst, src); | |
3732 srdi(dst, dst, Operand(32)); | |
3733 return; | |
3734 } | |
3735 #endif | |
3736 | |
3737 subi(sp, sp, Operand(kDoubleSize)); | |
3738 stfd(src, MemOperand(sp)); | |
3739 nop(GROUP_ENDING_NOP); // LHS/RAW optimization | |
3740 lwz(dst, MemOperand(sp, Register::kExponentOffset)); | |
3741 addi(sp, sp, Operand(kDoubleSize)); | |
3742 } | |
3743 | |
3744 | |
3745 void MacroAssembler::MovDoubleToInt64( | |
3746 #if !V8_TARGET_ARCH_PPC64 | |
3747 Register dst_hi, | |
3748 #endif | |
3749 Register dst, DoubleRegister src) { | |
3750 #if V8_TARGET_ARCH_PPC64 | |
3751 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) { | |
3752 mffprd(dst, src); | |
3753 return; | |
3754 } | |
3755 #endif | |
3756 | |
3757 subi(sp, sp, Operand(kDoubleSize)); | |
3758 stfd(src, MemOperand(sp)); | |
3759 nop(GROUP_ENDING_NOP); // LHS/RAW optimization | |
3760 #if V8_TARGET_ARCH_PPC64 | |
3761 ld(dst, MemOperand(sp, 0)); | |
3762 #else | |
3763 lwz(dst_hi, MemOperand(sp, Register::kExponentOffset)); | |
3764 lwz(dst, MemOperand(sp, Register::kMantissaOffset)); | |
3765 #endif | |
3766 addi(sp, sp, Operand(kDoubleSize)); | |
3767 } | |
3768 | |
3769 | |
3770 void MacroAssembler::MovIntToFloat(DoubleRegister dst, Register src) { | |
3771 subi(sp, sp, Operand(kFloatSize)); | |
3772 stw(src, MemOperand(sp, 0)); | |
3773 nop(GROUP_ENDING_NOP); // LHS/RAW optimization | |
3774 lfs(dst, MemOperand(sp, 0)); | |
3775 addi(sp, sp, Operand(kFloatSize)); | |
3776 } | |
3777 | |
3778 | |
3779 void MacroAssembler::MovFloatToInt(Register dst, DoubleRegister src) { | |
3780 subi(sp, sp, Operand(kFloatSize)); | |
3781 frsp(src, src); | |
3782 stfs(src, MemOperand(sp, 0)); | |
3783 nop(GROUP_ENDING_NOP); // LHS/RAW optimization | |
3784 lwz(dst, MemOperand(sp, 0)); | |
3785 addi(sp, sp, Operand(kFloatSize)); | |
3786 } | |
3787 | |
3788 | |
3789 void MacroAssembler::Add(Register dst, Register src, intptr_t value, | |
3790 Register scratch) { | |
3791 if (is_int16(value)) { | |
3792 addi(dst, src, Operand(value)); | |
3793 } else { | |
3794 mov(scratch, Operand(value)); | |
3795 add(dst, src, scratch); | |
3796 } | |
3797 } | |
3798 | |
3799 | |
3800 void MacroAssembler::Cmpi(Register src1, const Operand& src2, Register scratch, | |
3801 CRegister cr) { | |
3802 intptr_t value = src2.immediate(); | |
3803 if (is_int16(value)) { | |
3804 cmpi(src1, src2, cr); | |
3805 } else { | |
3806 mov(scratch, src2); | |
3807 cmp(src1, scratch, cr); | |
3808 } | |
3809 } | |
3810 | |
3811 | |
3812 void MacroAssembler::Cmpli(Register src1, const Operand& src2, Register scratch, | |
3813 CRegister cr) { | |
3814 intptr_t value = src2.immediate(); | |
3815 if (is_uint16(value)) { | |
3816 cmpli(src1, src2, cr); | |
3817 } else { | |
3818 mov(scratch, src2); | |
3819 cmpl(src1, scratch, cr); | |
3820 } | |
3821 } | |
3822 | |
3823 | |
3824 void MacroAssembler::Cmpwi(Register src1, const Operand& src2, Register scratch, | |
3825 CRegister cr) { | |
3826 intptr_t value = src2.immediate(); | |
3827 if (is_int16(value)) { | |
3828 cmpwi(src1, src2, cr); | |
3829 } else { | |
3830 mov(scratch, src2); | |
3831 cmpw(src1, scratch, cr); | |
3832 } | |
3833 } | |
3834 | |
3835 | |
3836 void MacroAssembler::Cmplwi(Register src1, const Operand& src2, | |
3837 Register scratch, CRegister cr) { | |
3838 intptr_t value = src2.immediate(); | |
3839 if (is_uint16(value)) { | |
3840 cmplwi(src1, src2, cr); | |
3841 } else { | |
3842 mov(scratch, src2); | |
3843 cmplw(src1, scratch, cr); | |
3844 } | |
3845 } | |
3846 | |
3847 | |
3848 void MacroAssembler::And(Register ra, Register rs, const Operand& rb, | |
3849 RCBit rc) { | |
3850 if (rb.is_reg()) { | |
3851 and_(ra, rs, rb.rm(), rc); | |
3852 } else { | |
3853 if (is_uint16(rb.imm_) && RelocInfo::IsNone(rb.rmode_) && rc == SetRC) { | |
3854 andi(ra, rs, rb); | |
3855 } else { | |
3856 // mov handles the relocation. | |
3857 DCHECK(!rs.is(r0)); | |
3858 mov(r0, rb); | |
3859 and_(ra, rs, r0, rc); | |
3860 } | |
3861 } | |
3862 } | |
3863 | |
3864 | |
3865 void MacroAssembler::Or(Register ra, Register rs, const Operand& rb, RCBit rc) { | |
3866 if (rb.is_reg()) { | |
3867 orx(ra, rs, rb.rm(), rc); | |
3868 } else { | |
3869 if (is_uint16(rb.imm_) && RelocInfo::IsNone(rb.rmode_) && rc == LeaveRC) { | |
3870 ori(ra, rs, rb); | |
3871 } else { | |
3872 // mov handles the relocation. | |
3873 DCHECK(!rs.is(r0)); | |
3874 mov(r0, rb); | |
3875 orx(ra, rs, r0, rc); | |
3876 } | |
3877 } | |
3878 } | |
3879 | |
3880 | |
3881 void MacroAssembler::Xor(Register ra, Register rs, const Operand& rb, | |
3882 RCBit rc) { | |
3883 if (rb.is_reg()) { | |
3884 xor_(ra, rs, rb.rm(), rc); | |
3885 } else { | |
3886 if (is_uint16(rb.imm_) && RelocInfo::IsNone(rb.rmode_) && rc == LeaveRC) { | |
3887 xori(ra, rs, rb); | |
3888 } else { | |
3889 // mov handles the relocation. | |
3890 DCHECK(!rs.is(r0)); | |
3891 mov(r0, rb); | |
3892 xor_(ra, rs, r0, rc); | |
3893 } | |
3894 } | |
3895 } | |
3896 | |
3897 | |
3898 void MacroAssembler::CmpSmiLiteral(Register src1, Smi* smi, Register scratch, | |
3899 CRegister cr) { | |
3900 #if V8_TARGET_ARCH_PPC64 | |
3901 LoadSmiLiteral(scratch, smi); | |
3902 cmp(src1, scratch, cr); | |
3903 #else | |
3904 Cmpi(src1, Operand(smi), scratch, cr); | |
3905 #endif | |
3906 } | |
3907 | |
3908 | |
3909 void MacroAssembler::CmplSmiLiteral(Register src1, Smi* smi, Register scratch, | |
3910 CRegister cr) { | |
3911 #if V8_TARGET_ARCH_PPC64 | |
3912 LoadSmiLiteral(scratch, smi); | |
3913 cmpl(src1, scratch, cr); | |
3914 #else | |
3915 Cmpli(src1, Operand(smi), scratch, cr); | |
3916 #endif | |
3917 } | |
3918 | |
3919 | |
3920 void MacroAssembler::AddSmiLiteral(Register dst, Register src, Smi* smi, | |
3921 Register scratch) { | |
3922 #if V8_TARGET_ARCH_PPC64 | |
3923 LoadSmiLiteral(scratch, smi); | |
3924 add(dst, src, scratch); | |
3925 #else | |
3926 Add(dst, src, reinterpret_cast<intptr_t>(smi), scratch); | |
3927 #endif | |
3928 } | |
3929 | |
3930 | |
3931 void MacroAssembler::SubSmiLiteral(Register dst, Register src, Smi* smi, | |
3932 Register scratch) { | |
3933 #if V8_TARGET_ARCH_PPC64 | |
3934 LoadSmiLiteral(scratch, smi); | |
3935 sub(dst, src, scratch); | |
3936 #else | |
3937 Add(dst, src, -(reinterpret_cast<intptr_t>(smi)), scratch); | |
3938 #endif | |
3939 } | |
3940 | |
3941 | |
3942 void MacroAssembler::AndSmiLiteral(Register dst, Register src, Smi* smi, | |
3943 Register scratch, RCBit rc) { | |
3944 #if V8_TARGET_ARCH_PPC64 | |
3945 LoadSmiLiteral(scratch, smi); | |
3946 and_(dst, src, scratch, rc); | |
3947 #else | |
3948 And(dst, src, Operand(smi), rc); | |
3949 #endif | |
3950 } | |
3951 | |
3952 | |
3953 // Load a "pointer" sized value from the memory location | |
3954 void MacroAssembler::LoadP(Register dst, const MemOperand& mem, | |
3955 Register scratch) { | |
3956 int offset = mem.offset(); | |
3957 | |
3958 if (!is_int16(offset)) { | |
3959 /* cannot use d-form */ | |
3960 DCHECK(!scratch.is(no_reg)); | |
3961 mov(scratch, Operand(offset)); | |
3962 #if V8_TARGET_ARCH_PPC64 | |
3963 ldx(dst, MemOperand(mem.ra(), scratch)); | |
3964 #else | |
3965 lwzx(dst, MemOperand(mem.ra(), scratch)); | |
3966 #endif | |
3967 } else { | |
3968 #if V8_TARGET_ARCH_PPC64 | |
3969 int misaligned = (offset & 3); | |
3970 if (misaligned) { | |
3971 // adjust base to conform to offset alignment requirements | |
3972 // Todo: enhance to use scratch if dst is unsuitable | |
3973 DCHECK(!dst.is(r0)); | |
3974 addi(dst, mem.ra(), Operand((offset & 3) - 4)); | |
3975 ld(dst, MemOperand(dst, (offset & ~3) + 4)); | |
3976 } else { | |
3977 ld(dst, mem); | |
3978 } | |
3979 #else | |
3980 lwz(dst, mem); | |
3981 #endif | |
3982 } | |
3983 } | |
3984 | |
3985 | |
3986 // Store a "pointer" sized value to the memory location | |
3987 void MacroAssembler::StoreP(Register src, const MemOperand& mem, | |
3988 Register scratch) { | |
3989 int offset = mem.offset(); | |
3990 | |
3991 if (!is_int16(offset)) { | |
3992 /* cannot use d-form */ | |
3993 DCHECK(!scratch.is(no_reg)); | |
3994 mov(scratch, Operand(offset)); | |
3995 #if V8_TARGET_ARCH_PPC64 | |
3996 stdx(src, MemOperand(mem.ra(), scratch)); | |
3997 #else | |
3998 stwx(src, MemOperand(mem.ra(), scratch)); | |
3999 #endif | |
4000 } else { | |
4001 #if V8_TARGET_ARCH_PPC64 | |
4002 int misaligned = (offset & 3); | |
4003 if (misaligned) { | |
4004 // adjust base to conform to offset alignment requirements | |
4005 // a suitable scratch is required here | |
4006 DCHECK(!scratch.is(no_reg)); | |
4007 if (scratch.is(r0)) { | |
4008 LoadIntLiteral(scratch, offset); | |
4009 stdx(src, MemOperand(mem.ra(), scratch)); | |
4010 } else { | |
4011 addi(scratch, mem.ra(), Operand((offset & 3) - 4)); | |
4012 std(src, MemOperand(scratch, (offset & ~3) + 4)); | |
4013 } | |
4014 } else { | |
4015 std(src, mem); | |
4016 } | |
4017 #else | |
4018 stw(src, mem); | |
4019 #endif | |
4020 } | |
4021 } | |
4022 | |
4023 void MacroAssembler::LoadWordArith(Register dst, const MemOperand& mem, | |
4024 Register scratch) { | |
4025 int offset = mem.offset(); | |
4026 | |
4027 if (!is_int16(offset)) { | |
4028 DCHECK(!scratch.is(no_reg)); | |
4029 mov(scratch, Operand(offset)); | |
4030 lwax(dst, MemOperand(mem.ra(), scratch)); | |
4031 } else { | |
4032 #if V8_TARGET_ARCH_PPC64 | |
4033 int misaligned = (offset & 3); | |
4034 if (misaligned) { | |
4035 // adjust base to conform to offset alignment requirements | |
4036 // Todo: enhance to use scratch if dst is unsuitable | |
4037 DCHECK(!dst.is(r0)); | |
4038 addi(dst, mem.ra(), Operand((offset & 3) - 4)); | |
4039 lwa(dst, MemOperand(dst, (offset & ~3) + 4)); | |
4040 } else { | |
4041 lwa(dst, mem); | |
4042 } | |
4043 #else | |
4044 lwz(dst, mem); | |
4045 #endif | |
4046 } | |
4047 } | |
4048 | |
4049 | |
4050 // Variable length depending on whether offset fits into immediate field | |
4051 // MemOperand currently only supports d-form | |
4052 void MacroAssembler::LoadWord(Register dst, const MemOperand& mem, | |
4053 Register scratch) { | |
4054 Register base = mem.ra(); | |
4055 int offset = mem.offset(); | |
4056 | |
4057 if (!is_int16(offset)) { | |
4058 LoadIntLiteral(scratch, offset); | |
4059 lwzx(dst, MemOperand(base, scratch)); | |
4060 } else { | |
4061 lwz(dst, mem); | |
4062 } | |
4063 } | |
4064 | |
4065 | |
4066 // Variable length depending on whether offset fits into immediate field | |
4067 // MemOperand current only supports d-form | |
4068 void MacroAssembler::StoreWord(Register src, const MemOperand& mem, | |
4069 Register scratch) { | |
4070 Register base = mem.ra(); | |
4071 int offset = mem.offset(); | |
4072 | |
4073 if (!is_int16(offset)) { | |
4074 LoadIntLiteral(scratch, offset); | |
4075 stwx(src, MemOperand(base, scratch)); | |
4076 } else { | |
4077 stw(src, mem); | |
4078 } | |
4079 } | |
4080 | |
4081 | |
4082 void MacroAssembler::LoadHalfWordArith(Register dst, const MemOperand& mem, | |
4083 Register scratch) { | |
4084 int offset = mem.offset(); | |
4085 | |
4086 if (!is_int16(offset)) { | |
4087 DCHECK(!scratch.is(no_reg)); | |
4088 mov(scratch, Operand(offset)); | |
4089 lhax(dst, MemOperand(mem.ra(), scratch)); | |
4090 } else { | |
4091 lha(dst, mem); | |
4092 } | |
4093 } | |
4094 | |
4095 | |
4096 // Variable length depending on whether offset fits into immediate field | |
4097 // MemOperand currently only supports d-form | |
4098 void MacroAssembler::LoadHalfWord(Register dst, const MemOperand& mem, | |
4099 Register scratch) { | |
4100 Register base = mem.ra(); | |
4101 int offset = mem.offset(); | |
4102 | |
4103 if (!is_int16(offset)) { | |
4104 LoadIntLiteral(scratch, offset); | |
4105 lhzx(dst, MemOperand(base, scratch)); | |
4106 } else { | |
4107 lhz(dst, mem); | |
4108 } | |
4109 } | |
4110 | |
4111 | |
4112 // Variable length depending on whether offset fits into immediate field | |
4113 // MemOperand current only supports d-form | |
4114 void MacroAssembler::StoreHalfWord(Register src, const MemOperand& mem, | |
4115 Register scratch) { | |
4116 Register base = mem.ra(); | |
4117 int offset = mem.offset(); | |
4118 | |
4119 if (!is_int16(offset)) { | |
4120 LoadIntLiteral(scratch, offset); | |
4121 sthx(src, MemOperand(base, scratch)); | |
4122 } else { | |
4123 sth(src, mem); | |
4124 } | |
4125 } | |
4126 | |
4127 | |
4128 // Variable length depending on whether offset fits into immediate field | |
4129 // MemOperand currently only supports d-form | |
4130 void MacroAssembler::LoadByte(Register dst, const MemOperand& mem, | |
4131 Register scratch) { | |
4132 Register base = mem.ra(); | |
4133 int offset = mem.offset(); | |
4134 | |
4135 if (!is_int16(offset)) { | |
4136 LoadIntLiteral(scratch, offset); | |
4137 lbzx(dst, MemOperand(base, scratch)); | |
4138 } else { | |
4139 lbz(dst, mem); | |
4140 } | |
4141 } | |
4142 | |
4143 | |
4144 // Variable length depending on whether offset fits into immediate field | |
4145 // MemOperand current only supports d-form | |
4146 void MacroAssembler::StoreByte(Register src, const MemOperand& mem, | |
4147 Register scratch) { | |
4148 Register base = mem.ra(); | |
4149 int offset = mem.offset(); | |
4150 | |
4151 if (!is_int16(offset)) { | |
4152 LoadIntLiteral(scratch, offset); | |
4153 stbx(src, MemOperand(base, scratch)); | |
4154 } else { | |
4155 stb(src, mem); | |
4156 } | |
4157 } | |
4158 | |
4159 | |
4160 void MacroAssembler::LoadRepresentation(Register dst, const MemOperand& mem, | 3494 void MacroAssembler::LoadRepresentation(Register dst, const MemOperand& mem, |
4161 Representation r, Register scratch) { | 3495 Representation r, Register scratch) { |
4162 DCHECK(!r.IsDouble()); | 3496 DCHECK(!r.IsDouble()); |
4163 if (r.IsInteger8()) { | 3497 if (r.IsInteger8()) { |
4164 LoadByte(dst, mem, scratch); | 3498 LoadB(dst, mem); |
4165 extsb(dst, dst); | 3499 lgbr(dst, dst); |
4166 } else if (r.IsUInteger8()) { | 3500 } else if (r.IsUInteger8()) { |
4167 LoadByte(dst, mem, scratch); | 3501 LoadlB(dst, mem); |
4168 } else if (r.IsInteger16()) { | 3502 } else if (r.IsInteger16()) { |
4169 LoadHalfWordArith(dst, mem, scratch); | 3503 LoadHalfWordP(dst, mem, scratch); |
| 3504 lghr(dst, dst); |
4170 } else if (r.IsUInteger16()) { | 3505 } else if (r.IsUInteger16()) { |
4171 LoadHalfWord(dst, mem, scratch); | 3506 LoadHalfWordP(dst, mem, scratch); |
4172 #if V8_TARGET_ARCH_PPC64 | 3507 #if V8_TARGET_ARCH_S390X |
4173 } else if (r.IsInteger32()) { | 3508 } else if (r.IsInteger32()) { |
4174 LoadWordArith(dst, mem, scratch); | 3509 LoadW(dst, mem, scratch); |
4175 #endif | 3510 #endif |
4176 } else { | 3511 } else { |
4177 LoadP(dst, mem, scratch); | 3512 LoadP(dst, mem, scratch); |
4178 } | 3513 } |
4179 } | 3514 } |
4180 | 3515 |
4181 | |
4182 void MacroAssembler::StoreRepresentation(Register src, const MemOperand& mem, | 3516 void MacroAssembler::StoreRepresentation(Register src, const MemOperand& mem, |
4183 Representation r, Register scratch) { | 3517 Representation r, Register scratch) { |
4184 DCHECK(!r.IsDouble()); | 3518 DCHECK(!r.IsDouble()); |
4185 if (r.IsInteger8() || r.IsUInteger8()) { | 3519 if (r.IsInteger8() || r.IsUInteger8()) { |
4186 StoreByte(src, mem, scratch); | 3520 StoreByte(src, mem, scratch); |
4187 } else if (r.IsInteger16() || r.IsUInteger16()) { | 3521 } else if (r.IsInteger16() || r.IsUInteger16()) { |
4188 StoreHalfWord(src, mem, scratch); | 3522 StoreHalfWord(src, mem, scratch); |
4189 #if V8_TARGET_ARCH_PPC64 | 3523 #if V8_TARGET_ARCH_S390X |
4190 } else if (r.IsInteger32()) { | 3524 } else if (r.IsInteger32()) { |
4191 StoreWord(src, mem, scratch); | 3525 StoreW(src, mem, scratch); |
4192 #endif | 3526 #endif |
4193 } else { | 3527 } else { |
4194 if (r.IsHeapObject()) { | 3528 if (r.IsHeapObject()) { |
4195 AssertNotSmi(src); | 3529 AssertNotSmi(src); |
4196 } else if (r.IsSmi()) { | 3530 } else if (r.IsSmi()) { |
4197 AssertSmi(src); | 3531 AssertSmi(src); |
4198 } | 3532 } |
4199 StoreP(src, mem, scratch); | 3533 StoreP(src, mem, scratch); |
4200 } | 3534 } |
4201 } | 3535 } |
4202 | 3536 |
4203 | |
4204 void MacroAssembler::LoadDouble(DoubleRegister dst, const MemOperand& mem, | |
4205 Register scratch) { | |
4206 Register base = mem.ra(); | |
4207 int offset = mem.offset(); | |
4208 | |
4209 if (!is_int16(offset)) { | |
4210 mov(scratch, Operand(offset)); | |
4211 lfdx(dst, MemOperand(base, scratch)); | |
4212 } else { | |
4213 lfd(dst, mem); | |
4214 } | |
4215 } | |
4216 | |
4217 | |
4218 void MacroAssembler::StoreDouble(DoubleRegister src, const MemOperand& mem, | |
4219 Register scratch) { | |
4220 Register base = mem.ra(); | |
4221 int offset = mem.offset(); | |
4222 | |
4223 if (!is_int16(offset)) { | |
4224 mov(scratch, Operand(offset)); | |
4225 stfdx(src, MemOperand(base, scratch)); | |
4226 } else { | |
4227 stfd(src, mem); | |
4228 } | |
4229 } | |
4230 | |
4231 | |
4232 void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg, | 3537 void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg, |
4233 Register scratch_reg, | 3538 Register scratch_reg, |
4234 Label* no_memento_found) { | 3539 Label* no_memento_found) { |
4235 ExternalReference new_space_start = | 3540 ExternalReference new_space_start = |
4236 ExternalReference::new_space_start(isolate()); | 3541 ExternalReference::new_space_start(isolate()); |
4237 ExternalReference new_space_allocation_top = | 3542 ExternalReference new_space_allocation_top = |
4238 ExternalReference::new_space_allocation_top_address(isolate()); | 3543 ExternalReference::new_space_allocation_top_address(isolate()); |
4239 addi(scratch_reg, receiver_reg, | 3544 AddP(scratch_reg, receiver_reg, |
4240 Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag)); | 3545 Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag)); |
4241 Cmpi(scratch_reg, Operand(new_space_start), r0); | 3546 CmpP(scratch_reg, Operand(new_space_start)); |
4242 blt(no_memento_found); | 3547 blt(no_memento_found); |
4243 mov(ip, Operand(new_space_allocation_top)); | 3548 mov(ip, Operand(new_space_allocation_top)); |
4244 LoadP(ip, MemOperand(ip)); | 3549 LoadP(ip, MemOperand(ip)); |
4245 cmp(scratch_reg, ip); | 3550 CmpP(scratch_reg, ip); |
4246 bgt(no_memento_found); | 3551 bgt(no_memento_found); |
4247 LoadP(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize)); | 3552 LoadP(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize)); |
4248 Cmpi(scratch_reg, Operand(isolate()->factory()->allocation_memento_map()), | 3553 CmpP(scratch_reg, Operand(isolate()->factory()->allocation_memento_map())); |
4249 r0); | 3554 } |
4250 } | |
4251 | |
4252 | 3555 |
4253 Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3, | 3556 Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3, |
4254 Register reg4, Register reg5, | 3557 Register reg4, Register reg5, |
4255 Register reg6) { | 3558 Register reg6) { |
4256 RegList regs = 0; | 3559 RegList regs = 0; |
4257 if (reg1.is_valid()) regs |= reg1.bit(); | 3560 if (reg1.is_valid()) regs |= reg1.bit(); |
4258 if (reg2.is_valid()) regs |= reg2.bit(); | 3561 if (reg2.is_valid()) regs |= reg2.bit(); |
4259 if (reg3.is_valid()) regs |= reg3.bit(); | 3562 if (reg3.is_valid()) regs |= reg3.bit(); |
4260 if (reg4.is_valid()) regs |= reg4.bit(); | 3563 if (reg4.is_valid()) regs |= reg4.bit(); |
4261 if (reg5.is_valid()) regs |= reg5.bit(); | 3564 if (reg5.is_valid()) regs |= reg5.bit(); |
4262 if (reg6.is_valid()) regs |= reg6.bit(); | 3565 if (reg6.is_valid()) regs |= reg6.bit(); |
4263 | 3566 |
4264 const RegisterConfiguration* config = | 3567 const RegisterConfiguration* config = |
4265 RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT); | 3568 RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT); |
4266 for (int i = 0; i < config->num_allocatable_general_registers(); ++i) { | 3569 for (int i = 0; i < config->num_allocatable_general_registers(); ++i) { |
4267 int code = config->GetAllocatableGeneralCode(i); | 3570 int code = config->GetAllocatableGeneralCode(i); |
4268 Register candidate = Register::from_code(code); | 3571 Register candidate = Register::from_code(code); |
4269 if (regs & candidate.bit()) continue; | 3572 if (regs & candidate.bit()) continue; |
4270 return candidate; | 3573 return candidate; |
4271 } | 3574 } |
4272 UNREACHABLE(); | 3575 UNREACHABLE(); |
4273 return no_reg; | 3576 return no_reg; |
4274 } | 3577 } |
4275 | 3578 |
4276 | |
4277 void MacroAssembler::JumpIfDictionaryInPrototypeChain(Register object, | 3579 void MacroAssembler::JumpIfDictionaryInPrototypeChain(Register object, |
4278 Register scratch0, | 3580 Register scratch0, |
4279 Register scratch1, | 3581 Register scratch1, |
4280 Label* found) { | 3582 Label* found) { |
4281 DCHECK(!scratch1.is(scratch0)); | 3583 DCHECK(!scratch1.is(scratch0)); |
4282 Register current = scratch0; | 3584 Register current = scratch0; |
4283 Label loop_again, end; | 3585 Label loop_again, end; |
4284 | 3586 |
4285 // scratch contained elements pointer. | 3587 // scratch contained elements pointer. |
4286 mr(current, object); | 3588 LoadRR(current, object); |
4287 LoadP(current, FieldMemOperand(current, HeapObject::kMapOffset)); | 3589 LoadP(current, FieldMemOperand(current, HeapObject::kMapOffset)); |
4288 LoadP(current, FieldMemOperand(current, Map::kPrototypeOffset)); | 3590 LoadP(current, FieldMemOperand(current, Map::kPrototypeOffset)); |
4289 CompareRoot(current, Heap::kNullValueRootIndex); | 3591 CompareRoot(current, Heap::kNullValueRootIndex); |
4290 beq(&end); | 3592 beq(&end); |
4291 | 3593 |
4292 // Loop based on the map going up the prototype chain. | 3594 // Loop based on the map going up the prototype chain. |
4293 bind(&loop_again); | 3595 bind(&loop_again); |
4294 LoadP(current, FieldMemOperand(current, HeapObject::kMapOffset)); | 3596 LoadP(current, FieldMemOperand(current, HeapObject::kMapOffset)); |
4295 | 3597 |
4296 STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE); | 3598 STATIC_ASSERT(JS_PROXY_TYPE < JS_OBJECT_TYPE); |
4297 STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE); | 3599 STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE); |
4298 lbz(scratch1, FieldMemOperand(current, Map::kInstanceTypeOffset)); | 3600 LoadlB(scratch1, FieldMemOperand(current, Map::kInstanceTypeOffset)); |
4299 cmpi(scratch1, Operand(JS_OBJECT_TYPE)); | 3601 CmpP(scratch1, Operand(JS_OBJECT_TYPE)); |
4300 blt(found); | 3602 blt(found); |
4301 | 3603 |
4302 lbz(scratch1, FieldMemOperand(current, Map::kBitField2Offset)); | 3604 LoadlB(scratch1, FieldMemOperand(current, Map::kBitField2Offset)); |
4303 DecodeField<Map::ElementsKindBits>(scratch1); | 3605 DecodeField<Map::ElementsKindBits>(scratch1); |
4304 cmpi(scratch1, Operand(DICTIONARY_ELEMENTS)); | 3606 CmpP(scratch1, Operand(DICTIONARY_ELEMENTS)); |
4305 beq(found); | 3607 beq(found); |
4306 LoadP(current, FieldMemOperand(current, Map::kPrototypeOffset)); | 3608 LoadP(current, FieldMemOperand(current, Map::kPrototypeOffset)); |
4307 CompareRoot(current, Heap::kNullValueRootIndex); | 3609 CompareRoot(current, Heap::kNullValueRootIndex); |
4308 bne(&loop_again); | 3610 bne(&loop_again); |
4309 | 3611 |
4310 bind(&end); | 3612 bind(&end); |
4311 } | 3613 } |
4312 | 3614 |
| 3615 void MacroAssembler::mov(Register dst, const Operand& src) { |
| 3616 if (src.rmode_ != kRelocInfo_NONEPTR) { |
| 3617 // some form of relocation needed |
| 3618 RecordRelocInfo(src.rmode_, src.imm_); |
| 3619 } |
| 3620 |
| 3621 #if V8_TARGET_ARCH_S390X |
| 3622 int64_t value = src.immediate(); |
| 3623 int32_t hi_32 = static_cast<int64_t>(value) >> 32; |
| 3624 int32_t lo_32 = static_cast<int32_t>(value); |
| 3625 |
| 3626 iihf(dst, Operand(hi_32)); |
| 3627 iilf(dst, Operand(lo_32)); |
| 3628 #else |
| 3629 int value = src.immediate(); |
| 3630 iilf(dst, Operand(value)); |
| 3631 #endif |
| 3632 } |
| 3633 |
| 3634 void MacroAssembler::Mul(Register dst, Register src1, Register src2) { |
| 3635 if (dst.is(src2)) { |
| 3636 MulP(dst, src1); |
| 3637 } else if (dst.is(src1)) { |
| 3638 MulP(dst, src2); |
| 3639 } else { |
| 3640 Move(dst, src1); |
| 3641 MulP(dst, src2); |
| 3642 } |
| 3643 } |
| 3644 |
| 3645 void MacroAssembler::DivP(Register dividend, Register divider) { |
| 3646 // have to make sure the src and dst are reg pairs |
| 3647 DCHECK(dividend.code() % 2 == 0); |
| 3648 #if V8_TARGET_ARCH_S390X |
| 3649 dsgr(dividend, divider); |
| 3650 #else |
| 3651 dr(dividend, divider); |
| 3652 #endif |
| 3653 } |
| 3654 |
| 3655 void MacroAssembler::MulP(Register dst, const Operand& opnd) { |
| 3656 #if V8_TARGET_ARCH_S390X |
| 3657 msgfi(dst, opnd); |
| 3658 #else |
| 3659 msfi(dst, opnd); |
| 3660 #endif |
| 3661 } |
| 3662 |
| 3663 void MacroAssembler::MulP(Register dst, Register src) { |
| 3664 #if V8_TARGET_ARCH_S390X |
| 3665 msgr(dst, src); |
| 3666 #else |
| 3667 msr(dst, src); |
| 3668 #endif |
| 3669 } |
| 3670 |
| 3671 void MacroAssembler::MulP(Register dst, const MemOperand& opnd) { |
| 3672 #if V8_TARGET_ARCH_S390X |
| 3673 if (is_uint16(opnd.offset())) { |
| 3674 ms(dst, opnd); |
| 3675 } else if (is_int20(opnd.offset())) { |
| 3676 msy(dst, opnd); |
| 3677 } else { |
| 3678 UNIMPLEMENTED(); |
| 3679 } |
| 3680 #else |
| 3681 if (is_int20(opnd.offset())) { |
| 3682 msg(dst, opnd); |
| 3683 } else { |
| 3684 UNIMPLEMENTED(); |
| 3685 } |
| 3686 #endif |
| 3687 } |
| 3688 |
| 3689 //---------------------------------------------------------------------------- |
| 3690 // Add Instructions |
| 3691 //---------------------------------------------------------------------------- |
| 3692 |
| 3693 // Add 32-bit (Register dst = Register dst + Immediate opnd) |
| 3694 void MacroAssembler::Add32(Register dst, const Operand& opnd) { |
| 3695 if (is_int16(opnd.immediate())) |
| 3696 ahi(dst, opnd); |
| 3697 else |
| 3698 afi(dst, opnd); |
| 3699 } |
| 3700 |
| 3701 // Add Pointer Size (Register dst = Register dst + Immediate opnd) |
| 3702 void MacroAssembler::AddP(Register dst, const Operand& opnd) { |
| 3703 #if V8_TARGET_ARCH_S390X |
| 3704 if (is_int16(opnd.immediate())) |
| 3705 aghi(dst, opnd); |
| 3706 else |
| 3707 agfi(dst, opnd); |
| 3708 #else |
| 3709 Add32(dst, opnd); |
| 3710 #endif |
| 3711 } |
| 3712 |
| 3713 // Add 32-bit (Register dst = Register src + Immediate opnd) |
| 3714 void MacroAssembler::Add32(Register dst, Register src, const Operand& opnd) { |
| 3715 if (!dst.is(src)) { |
| 3716 if (CpuFeatures::IsSupported(DISTINCT_OPS) && is_int16(opnd.immediate())) { |
| 3717 ahik(dst, src, opnd); |
| 3718 return; |
| 3719 } |
| 3720 lr(dst, src); |
| 3721 } |
| 3722 Add32(dst, opnd); |
| 3723 } |
| 3724 |
| 3725 // Add Pointer Size (Register dst = Register src + Immediate opnd) |
| 3726 void MacroAssembler::AddP(Register dst, Register src, const Operand& opnd) { |
| 3727 if (!dst.is(src)) { |
| 3728 if (CpuFeatures::IsSupported(DISTINCT_OPS) && is_int16(opnd.immediate())) { |
| 3729 AddPImm_RRI(dst, src, opnd); |
| 3730 return; |
| 3731 } |
| 3732 LoadRR(dst, src); |
| 3733 } |
| 3734 AddP(dst, opnd); |
| 3735 } |
| 3736 |
| 3737 // Add 32-bit (Register dst = Register dst + Register src) |
| 3738 void MacroAssembler::Add32(Register dst, Register src) { ar(dst, src); } |
| 3739 |
| 3740 // Add Pointer Size (Register dst = Register dst + Register src) |
| 3741 void MacroAssembler::AddP(Register dst, Register src) { AddRR(dst, src); } |
| 3742 |
| 3743 // Add Pointer Size with src extension |
| 3744 // (Register dst(ptr) = Register dst (ptr) + Register src (32 | 32->64)) |
| 3745 // src is treated as a 32-bit signed integer, which is sign extended to |
| 3746 // 64-bit if necessary. |
| 3747 void MacroAssembler::AddP_ExtendSrc(Register dst, Register src) { |
| 3748 #if V8_TARGET_ARCH_S390X |
| 3749 agfr(dst, src); |
| 3750 #else |
| 3751 ar(dst, src); |
| 3752 #endif |
| 3753 } |
| 3754 |
| 3755 // Add 32-bit (Register dst = Register src1 + Register src2) |
| 3756 void MacroAssembler::Add32(Register dst, Register src1, Register src2) { |
| 3757 if (!dst.is(src1) && !dst.is(src2)) { |
| 3758 // We prefer to generate AR/AGR, over the non clobbering ARK/AGRK |
| 3759 // as AR is a smaller instruction |
| 3760 if (CpuFeatures::IsSupported(DISTINCT_OPS)) { |
| 3761 ark(dst, src1, src2); |
| 3762 return; |
| 3763 } else { |
| 3764 lr(dst, src1); |
| 3765 } |
| 3766 } else if (dst.is(src2)) { |
| 3767 src2 = src1; |
| 3768 } |
| 3769 ar(dst, src2); |
| 3770 } |
| 3771 |
| 3772 // Add Pointer Size (Register dst = Register src1 + Register src2) |
| 3773 void MacroAssembler::AddP(Register dst, Register src1, Register src2) { |
| 3774 if (!dst.is(src1) && !dst.is(src2)) { |
| 3775 // We prefer to generate AR/AGR, over the non clobbering ARK/AGRK |
| 3776 // as AR is a smaller instruction |
| 3777 if (CpuFeatures::IsSupported(DISTINCT_OPS)) { |
| 3778 AddP_RRR(dst, src1, src2); |
| 3779 return; |
| 3780 } else { |
| 3781 LoadRR(dst, src1); |
| 3782 } |
| 3783 } else if (dst.is(src2)) { |
| 3784 src2 = src1; |
| 3785 } |
| 3786 AddRR(dst, src2); |
| 3787 } |
| 3788 |
| 3789 // Add Pointer Size with src extension |
| 3790 // (Register dst (ptr) = Register dst (ptr) + Register src1 (ptr) + |
| 3791 // Register src2 (32 | 32->64)) |
| 3792 // src is treated as a 32-bit signed integer, which is sign extended to |
| 3793 // 64-bit if necessary. |
| 3794 void MacroAssembler::AddP_ExtendSrc(Register dst, Register src1, |
| 3795 Register src2) { |
| 3796 #if V8_TARGET_ARCH_S390X |
| 3797 if (dst.is(src2)) { |
| 3798 // The source we need to sign extend is the same as result. |
| 3799 lgfr(dst, src2); |
| 3800 agr(dst, src1); |
| 3801 } else { |
| 3802 if (!dst.is(src1)) LoadRR(dst, src1); |
| 3803 agfr(dst, src2); |
| 3804 } |
| 3805 #else |
| 3806 AddP(dst, src1, src2); |
| 3807 #endif |
| 3808 } |
| 3809 |
| 3810 // Add 32-bit (Register-Memory) |
| 3811 void MacroAssembler::Add32(Register dst, const MemOperand& opnd) { |
| 3812 DCHECK(is_int20(opnd.offset())); |
| 3813 if (is_uint12(opnd.offset())) |
| 3814 a(dst, opnd); |
| 3815 else |
| 3816 ay(dst, opnd); |
| 3817 } |
| 3818 |
| 3819 // Add Pointer Size (Register-Memory) |
| 3820 void MacroAssembler::AddP(Register dst, const MemOperand& opnd) { |
| 3821 #if V8_TARGET_ARCH_S390X |
| 3822 DCHECK(is_int20(opnd.offset())); |
| 3823 ag(dst, opnd); |
| 3824 #else |
| 3825 Add32(dst, opnd); |
| 3826 #endif |
| 3827 } |
| 3828 |
| 3829 // Add Pointer Size with src extension |
| 3830 // (Register dst (ptr) = Register dst (ptr) + Mem opnd (32 | 32->64)) |
| 3831 // src is treated as a 32-bit signed integer, which is sign extended to |
| 3832 // 64-bit if necessary. |
| 3833 void MacroAssembler::AddP_ExtendSrc(Register dst, const MemOperand& opnd) { |
| 3834 #if V8_TARGET_ARCH_S390X |
| 3835 DCHECK(is_int20(opnd.offset())); |
| 3836 agf(dst, opnd); |
| 3837 #else |
| 3838 Add32(dst, opnd); |
| 3839 #endif |
| 3840 } |
| 3841 |
| 3842 // Add 32-bit (Memory - Immediate) |
| 3843 void MacroAssembler::Add32(const MemOperand& opnd, const Operand& imm) { |
| 3844 DCHECK(is_int8(imm.immediate())); |
| 3845 DCHECK(is_int20(opnd.offset())); |
| 3846 DCHECK(CpuFeatures::IsSupported(GENERAL_INSTR_EXT)); |
| 3847 asi(opnd, imm); |
| 3848 } |
| 3849 |
| 3850 // Add Pointer-sized (Memory - Immediate) |
| 3851 void MacroAssembler::AddP(const MemOperand& opnd, const Operand& imm) { |
| 3852 DCHECK(is_int8(imm.immediate())); |
| 3853 DCHECK(is_int20(opnd.offset())); |
| 3854 DCHECK(CpuFeatures::IsSupported(GENERAL_INSTR_EXT)); |
| 3855 #if V8_TARGET_ARCH_S390X |
| 3856 agsi(opnd, imm); |
| 3857 #else |
| 3858 asi(opnd, imm); |
| 3859 #endif |
| 3860 } |
| 3861 |
| 3862 //---------------------------------------------------------------------------- |
| 3863 // Add Logical Instructions |
| 3864 //---------------------------------------------------------------------------- |
| 3865 |
| 3866 // Add Logical 32-bit (Register dst = Register dst + Immediate opnd) |
| 3867 void MacroAssembler::AddLogical(Register dst, const Operand& imm) { |
| 3868 alfi(dst, imm); |
| 3869 } |
| 3870 |
| 3871 // Add Logical Pointer Size (Register dst = Register dst + Immediate opnd) |
| 3872 void MacroAssembler::AddLogicalP(Register dst, const Operand& imm) { |
| 3873 #ifdef V8_TARGET_ARCH_S390X |
| 3874 algfi(dst, imm); |
| 3875 #else |
| 3876 AddLogical(dst, imm); |
| 3877 #endif |
| 3878 } |
| 3879 |
| 3880 // Add Logical 32-bit (Register-Memory) |
| 3881 void MacroAssembler::AddLogical(Register dst, const MemOperand& opnd) { |
| 3882 DCHECK(is_int20(opnd.offset())); |
| 3883 if (is_uint12(opnd.offset())) |
| 3884 al_z(dst, opnd); |
| 3885 else |
| 3886 aly(dst, opnd); |
| 3887 } |
| 3888 |
| 3889 // Add Logical Pointer Size (Register-Memory) |
| 3890 void MacroAssembler::AddLogicalP(Register dst, const MemOperand& opnd) { |
| 3891 #if V8_TARGET_ARCH_S390X |
| 3892 DCHECK(is_int20(opnd.offset())); |
| 3893 alg(dst, opnd); |
| 3894 #else |
| 3895 AddLogical(dst, opnd); |
| 3896 #endif |
| 3897 } |
| 3898 |
| 3899 //---------------------------------------------------------------------------- |
| 3900 // Subtract Instructions |
| 3901 //---------------------------------------------------------------------------- |
| 3902 |
| 3903 // Subtract 32-bit (Register dst = Register dst - Immediate opnd) |
| 3904 void MacroAssembler::Sub32(Register dst, const Operand& imm) { |
| 3905 Add32(dst, Operand(-(imm.imm_))); |
| 3906 } |
| 3907 |
| 3908 // Subtract Pointer Size (Register dst = Register dst - Immediate opnd) |
| 3909 void MacroAssembler::SubP(Register dst, const Operand& imm) { |
| 3910 AddP(dst, Operand(-(imm.imm_))); |
| 3911 } |
| 3912 |
| 3913 // Subtract 32-bit (Register dst = Register src - Immediate opnd) |
| 3914 void MacroAssembler::Sub32(Register dst, Register src, const Operand& imm) { |
| 3915 Add32(dst, src, Operand(-(imm.imm_))); |
| 3916 } |
| 3917 |
| 3918 // Subtract Pointer Sized (Register dst = Register src - Immediate opnd) |
| 3919 void MacroAssembler::SubP(Register dst, Register src, const Operand& imm) { |
| 3920 AddP(dst, src, Operand(-(imm.imm_))); |
| 3921 } |
| 3922 |
| 3923 // Subtract 32-bit (Register dst = Register dst - Register src) |
| 3924 void MacroAssembler::Sub32(Register dst, Register src) { sr(dst, src); } |
| 3925 |
| 3926 // Subtract Pointer Size (Register dst = Register dst - Register src) |
| 3927 void MacroAssembler::SubP(Register dst, Register src) { SubRR(dst, src); } |
| 3928 |
| 3929 // Subtract Pointer Size with src extension |
| 3930 // (Register dst(ptr) = Register dst (ptr) - Register src (32 | 32->64)) |
| 3931 // src is treated as a 32-bit signed integer, which is sign extended to |
| 3932 // 64-bit if necessary. |
| 3933 void MacroAssembler::SubP_ExtendSrc(Register dst, Register src) { |
| 3934 #if V8_TARGET_ARCH_S390X |
| 3935 sgfr(dst, src); |
| 3936 #else |
| 3937 sr(dst, src); |
| 3938 #endif |
| 3939 } |
| 3940 |
| 3941 // Subtract 32-bit (Register = Register - Register) |
| 3942 void MacroAssembler::Sub32(Register dst, Register src1, Register src2) { |
| 3943 // Use non-clobbering version if possible |
| 3944 if (CpuFeatures::IsSupported(DISTINCT_OPS) && !dst.is(src1)) { |
| 3945 srk(dst, src1, src2); |
| 3946 return; |
| 3947 } |
| 3948 if (!dst.is(src1) && !dst.is(src2)) lr(dst, src1); |
| 3949 // In scenario where we have dst = src - dst, we need to swap and negate |
| 3950 if (!dst.is(src1) && dst.is(src2)) { |
| 3951 sr(dst, src1); // dst = (dst - src) |
| 3952 lcr(dst, dst); // dst = -dst |
| 3953 } else { |
| 3954 sr(dst, src2); |
| 3955 } |
| 3956 } |
| 3957 |
| 3958 // Subtract Pointer Sized (Register = Register - Register) |
| 3959 void MacroAssembler::SubP(Register dst, Register src1, Register src2) { |
| 3960 // Use non-clobbering version if possible |
| 3961 if (CpuFeatures::IsSupported(DISTINCT_OPS) && !dst.is(src1)) { |
| 3962 SubP_RRR(dst, src1, src2); |
| 3963 return; |
| 3964 } |
| 3965 if (!dst.is(src1) && !dst.is(src2)) LoadRR(dst, src1); |
| 3966 // In scenario where we have dst = src - dst, we need to swap and negate |
| 3967 if (!dst.is(src1) && dst.is(src2)) { |
| 3968 SubP(dst, src1); // dst = (dst - src) |
| 3969 LoadComplementRR(dst, dst); // dst = -dst |
| 3970 } else { |
| 3971 SubP(dst, src2); |
| 3972 } |
| 3973 } |
| 3974 |
| 3975 // Subtract Pointer Size with src extension |
| 3976 // (Register dst(ptr) = Register dst (ptr) - Register src (32 | 32->64)) |
| 3977 // src is treated as a 32-bit signed integer, which is sign extended to |
| 3978 // 64-bit if necessary. |
| 3979 void MacroAssembler::SubP_ExtendSrc(Register dst, Register src1, |
| 3980 Register src2) { |
| 3981 #if V8_TARGET_ARCH_S390X |
| 3982 if (!dst.is(src1) && !dst.is(src2)) LoadRR(dst, src1); |
| 3983 |
| 3984 // In scenario where we have dst = src - dst, we need to swap and negate |
| 3985 if (!dst.is(src1) && dst.is(src2)) { |
| 3986 lgfr(dst, dst); // Sign extend this operand first. |
| 3987 SubP(dst, src1); // dst = (dst - src) |
| 3988 LoadComplementRR(dst, dst); // dst = -dst |
| 3989 } else { |
| 3990 sgfr(dst, src2); |
| 3991 } |
| 3992 #else |
| 3993 SubP(dst, src1, src2); |
| 3994 #endif |
| 3995 } |
| 3996 |
| 3997 // Subtract 32-bit (Register-Memory) |
| 3998 void MacroAssembler::Sub32(Register dst, const MemOperand& opnd) { |
| 3999 DCHECK(is_int20(opnd.offset())); |
| 4000 if (is_uint12(opnd.offset())) |
| 4001 s(dst, opnd); |
| 4002 else |
| 4003 sy(dst, opnd); |
| 4004 } |
| 4005 |
| 4006 // Subtract Pointer Sized (Register - Memory) |
| 4007 void MacroAssembler::SubP(Register dst, const MemOperand& opnd) { |
| 4008 #if V8_TARGET_ARCH_S390X |
| 4009 sg(dst, opnd); |
| 4010 #else |
| 4011 Sub32(dst, opnd); |
| 4012 #endif |
| 4013 } |
| 4014 |
| 4015 void MacroAssembler::MovIntToFloat(DoubleRegister dst, Register src) { |
| 4016 sllg(src, src, Operand(32)); |
| 4017 ldgr(dst, src); |
| 4018 } |
| 4019 |
| 4020 void MacroAssembler::MovFloatToInt(Register dst, DoubleRegister src) { |
| 4021 lgdr(dst, src); |
| 4022 srlg(dst, dst, Operand(32)); |
| 4023 } |
| 4024 |
| 4025 void MacroAssembler::SubP_ExtendSrc(Register dst, const MemOperand& opnd) { |
| 4026 #if V8_TARGET_ARCH_S390X |
| 4027 DCHECK(is_int20(opnd.offset())); |
| 4028 sgf(dst, opnd); |
| 4029 #else |
| 4030 Sub32(dst, opnd); |
| 4031 #endif |
| 4032 } |
| 4033 |
| 4034 //---------------------------------------------------------------------------- |
| 4035 // Subtract Logical Instructions |
| 4036 //---------------------------------------------------------------------------- |
| 4037 |
| 4038 // Subtract Logical 32-bit (Register - Memory) |
| 4039 void MacroAssembler::SubLogical(Register dst, const MemOperand& opnd) { |
| 4040 DCHECK(is_int20(opnd.offset())); |
| 4041 if (is_uint12(opnd.offset())) |
| 4042 sl(dst, opnd); |
| 4043 else |
| 4044 sly(dst, opnd); |
| 4045 } |
| 4046 |
| 4047 // Subtract Logical Pointer Sized (Register - Memory) |
| 4048 void MacroAssembler::SubLogicalP(Register dst, const MemOperand& opnd) { |
| 4049 DCHECK(is_int20(opnd.offset())); |
| 4050 #if V8_TARGET_ARCH_S390X |
| 4051 slgf(dst, opnd); |
| 4052 #else |
| 4053 SubLogical(dst, opnd); |
| 4054 #endif |
| 4055 } |
| 4056 |
| 4057 // Subtract Logical Pointer Size with src extension |
| 4058 // (Register dst (ptr) = Register dst (ptr) - Mem opnd (32 | 32->64)) |
| 4059 // src is treated as a 32-bit signed integer, which is sign extended to |
| 4060 // 64-bit if necessary. |
| 4061 void MacroAssembler::SubLogicalP_ExtendSrc(Register dst, |
| 4062 const MemOperand& opnd) { |
| 4063 #if V8_TARGET_ARCH_S390X |
| 4064 DCHECK(is_int20(opnd.offset())); |
| 4065 slgf(dst, opnd); |
| 4066 #else |
| 4067 SubLogical(dst, opnd); |
| 4068 #endif |
| 4069 } |
| 4070 |
| 4071 //---------------------------------------------------------------------------- |
| 4072 // Bitwise Operations |
| 4073 //---------------------------------------------------------------------------- |
| 4074 |
| 4075 // AND 32-bit - dst = dst & src |
| 4076 void MacroAssembler::And(Register dst, Register src) { nr(dst, src); } |
| 4077 |
| 4078 // AND Pointer Size - dst = dst & src |
| 4079 void MacroAssembler::AndP(Register dst, Register src) { AndRR(dst, src); } |
| 4080 |
| 4081 // Non-clobbering AND 32-bit - dst = src1 & src1 |
| 4082 void MacroAssembler::And(Register dst, Register src1, Register src2) { |
| 4083 if (!dst.is(src1) && !dst.is(src2)) { |
| 4084 // We prefer to generate XR/XGR, over the non clobbering XRK/XRK |
| 4085 // as XR is a smaller instruction |
| 4086 if (CpuFeatures::IsSupported(DISTINCT_OPS)) { |
| 4087 nrk(dst, src1, src2); |
| 4088 return; |
| 4089 } else { |
| 4090 lr(dst, src1); |
| 4091 } |
| 4092 } else if (dst.is(src2)) { |
| 4093 src2 = src1; |
| 4094 } |
| 4095 And(dst, src2); |
| 4096 } |
| 4097 |
| 4098 // Non-clobbering AND pointer size - dst = src1 & src1 |
| 4099 void MacroAssembler::AndP(Register dst, Register src1, Register src2) { |
| 4100 if (!dst.is(src1) && !dst.is(src2)) { |
| 4101 // We prefer to generate XR/XGR, over the non clobbering XRK/XRK |
| 4102 // as XR is a smaller instruction |
| 4103 if (CpuFeatures::IsSupported(DISTINCT_OPS)) { |
| 4104 AndP_RRR(dst, src1, src2); |
| 4105 return; |
| 4106 } else { |
| 4107 LoadRR(dst, src1); |
| 4108 } |
| 4109 } else if (dst.is(src2)) { |
| 4110 src2 = src1; |
| 4111 } |
| 4112 AndP(dst, src2); |
| 4113 } |
| 4114 |
| 4115 // AND 32-bit (Reg - Mem) |
| 4116 void MacroAssembler::And(Register dst, const MemOperand& opnd) { |
| 4117 DCHECK(is_int20(opnd.offset())); |
| 4118 if (is_uint12(opnd.offset())) |
| 4119 n(dst, opnd); |
| 4120 else |
| 4121 ny(dst, opnd); |
| 4122 } |
| 4123 |
| 4124 // AND Pointer Size (Reg - Mem) |
| 4125 void MacroAssembler::AndP(Register dst, const MemOperand& opnd) { |
| 4126 DCHECK(is_int20(opnd.offset())); |
| 4127 #if V8_TARGET_ARCH_S390X |
| 4128 ng(dst, opnd); |
| 4129 #else |
| 4130 And(dst, opnd); |
| 4131 #endif |
| 4132 } |
| 4133 |
| 4134 // AND 32-bit - dst = dst & imm |
| 4135 void MacroAssembler::And(Register dst, const Operand& opnd) { nilf(dst, opnd); } |
| 4136 |
| 4137 // AND Pointer Size - dst = dst & imm |
| 4138 void MacroAssembler::AndP(Register dst, const Operand& opnd) { |
| 4139 #if V8_TARGET_ARCH_S390X |
| 4140 intptr_t value = opnd.imm_; |
| 4141 if (value >> 32 != -1) { |
| 4142 // this may not work b/c condition code won't be set correctly |
| 4143 nihf(dst, Operand(value >> 32)); |
| 4144 } |
| 4145 nilf(dst, Operand(value & 0xFFFFFFFF)); |
| 4146 #else |
| 4147 And(dst, opnd); |
| 4148 #endif |
| 4149 } |
| 4150 |
| 4151 // AND 32-bit - dst = src & imm |
| 4152 void MacroAssembler::And(Register dst, Register src, const Operand& opnd) { |
| 4153 if (!dst.is(src)) lr(dst, src); |
| 4154 nilf(dst, opnd); |
| 4155 } |
| 4156 |
| 4157 // AND Pointer Size - dst = src & imm |
| 4158 void MacroAssembler::AndP(Register dst, Register src, const Operand& opnd) { |
| 4159 // Try to exploit RISBG first |
| 4160 intptr_t value = opnd.imm_; |
| 4161 if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) { |
| 4162 intptr_t shifted_value = value; |
| 4163 int trailing_zeros = 0; |
| 4164 |
| 4165 // We start checking how many trailing zeros are left at the end. |
| 4166 while ((0 != shifted_value) && (0 == (shifted_value & 1))) { |
| 4167 trailing_zeros++; |
| 4168 shifted_value >>= 1; |
| 4169 } |
| 4170 |
| 4171 // If temp (value with right-most set of zeros shifted out) is 1 less |
| 4172 // than power of 2, we have consecutive bits of 1. |
| 4173 // Special case: If shift_value is zero, we cannot use RISBG, as it requires |
| 4174 // selection of at least 1 bit. |
| 4175 if ((0 != shifted_value) && base::bits::IsPowerOfTwo64(shifted_value + 1)) { |
| 4176 int startBit = |
| 4177 base::bits::CountLeadingZeros64(shifted_value) - trailing_zeros; |
| 4178 int endBit = 63 - trailing_zeros; |
| 4179 // Start: startBit, End: endBit, Shift = 0, true = zero unselected bits. |
| 4180 risbg(dst, src, Operand(startBit), Operand(endBit), Operand::Zero(), |
| 4181 true); |
| 4182 return; |
| 4183 } else if (-1 == shifted_value) { |
| 4184 // A Special case in which all top bits up to MSB are 1's. In this case, |
| 4185 // we can set startBit to be 0. |
| 4186 int endBit = 63 - trailing_zeros; |
| 4187 risbg(dst, src, Operand::Zero(), Operand(endBit), Operand::Zero(), true); |
| 4188 return; |
| 4189 } |
| 4190 } |
| 4191 |
| 4192 // If we are &'ing zero, we can just whack the dst register and skip copy |
| 4193 if (!dst.is(src) && (0 != value)) LoadRR(dst, src); |
| 4194 AndP(dst, opnd); |
| 4195 } |
| 4196 |
| 4197 // OR 32-bit - dst = dst & src |
| 4198 void MacroAssembler::Or(Register dst, Register src) { or_z(dst, src); } |
| 4199 |
| 4200 // OR Pointer Size - dst = dst & src |
| 4201 void MacroAssembler::OrP(Register dst, Register src) { OrRR(dst, src); } |
| 4202 |
| 4203 // Non-clobbering OR 32-bit - dst = src1 & src1 |
| 4204 void MacroAssembler::Or(Register dst, Register src1, Register src2) { |
| 4205 if (!dst.is(src1) && !dst.is(src2)) { |
| 4206 // We prefer to generate XR/XGR, over the non clobbering XRK/XRK |
| 4207 // as XR is a smaller instruction |
| 4208 if (CpuFeatures::IsSupported(DISTINCT_OPS)) { |
| 4209 ork(dst, src1, src2); |
| 4210 return; |
| 4211 } else { |
| 4212 lr(dst, src1); |
| 4213 } |
| 4214 } else if (dst.is(src2)) { |
| 4215 src2 = src1; |
| 4216 } |
| 4217 Or(dst, src2); |
| 4218 } |
| 4219 |
| 4220 // Non-clobbering OR pointer size - dst = src1 & src1 |
| 4221 void MacroAssembler::OrP(Register dst, Register src1, Register src2) { |
| 4222 if (!dst.is(src1) && !dst.is(src2)) { |
| 4223 // We prefer to generate XR/XGR, over the non clobbering XRK/XRK |
| 4224 // as XR is a smaller instruction |
| 4225 if (CpuFeatures::IsSupported(DISTINCT_OPS)) { |
| 4226 OrP_RRR(dst, src1, src2); |
| 4227 return; |
| 4228 } else { |
| 4229 LoadRR(dst, src1); |
| 4230 } |
| 4231 } else if (dst.is(src2)) { |
| 4232 src2 = src1; |
| 4233 } |
| 4234 OrP(dst, src2); |
| 4235 } |
| 4236 |
| 4237 // OR 32-bit (Reg - Mem) |
| 4238 void MacroAssembler::Or(Register dst, const MemOperand& opnd) { |
| 4239 DCHECK(is_int20(opnd.offset())); |
| 4240 if (is_uint12(opnd.offset())) |
| 4241 o(dst, opnd); |
| 4242 else |
| 4243 oy(dst, opnd); |
| 4244 } |
| 4245 |
| 4246 // OR Pointer Size (Reg - Mem) |
| 4247 void MacroAssembler::OrP(Register dst, const MemOperand& opnd) { |
| 4248 DCHECK(is_int20(opnd.offset())); |
| 4249 #if V8_TARGET_ARCH_S390X |
| 4250 og(dst, opnd); |
| 4251 #else |
| 4252 Or(dst, opnd); |
| 4253 #endif |
| 4254 } |
| 4255 |
| 4256 // OR 32-bit - dst = dst & imm |
| 4257 void MacroAssembler::Or(Register dst, const Operand& opnd) { oilf(dst, opnd); } |
| 4258 |
| 4259 // OR Pointer Size - dst = dst & imm |
| 4260 void MacroAssembler::OrP(Register dst, const Operand& opnd) { |
| 4261 #if V8_TARGET_ARCH_S390X |
| 4262 intptr_t value = opnd.imm_; |
| 4263 if (value >> 32 != 0) { |
| 4264 // this may not work b/c condition code won't be set correctly |
| 4265 oihf(dst, Operand(value >> 32)); |
| 4266 } |
| 4267 oilf(dst, Operand(value & 0xFFFFFFFF)); |
| 4268 #else |
| 4269 Or(dst, opnd); |
| 4270 #endif |
| 4271 } |
| 4272 |
| 4273 // OR 32-bit - dst = src & imm |
| 4274 void MacroAssembler::Or(Register dst, Register src, const Operand& opnd) { |
| 4275 if (!dst.is(src)) lr(dst, src); |
| 4276 oilf(dst, opnd); |
| 4277 } |
| 4278 |
| 4279 // OR Pointer Size - dst = src & imm |
| 4280 void MacroAssembler::OrP(Register dst, Register src, const Operand& opnd) { |
| 4281 if (!dst.is(src)) LoadRR(dst, src); |
| 4282 OrP(dst, opnd); |
| 4283 } |
| 4284 |
| 4285 // XOR 32-bit - dst = dst & src |
| 4286 void MacroAssembler::Xor(Register dst, Register src) { xr(dst, src); } |
| 4287 |
| 4288 // XOR Pointer Size - dst = dst & src |
| 4289 void MacroAssembler::XorP(Register dst, Register src) { XorRR(dst, src); } |
| 4290 |
| 4291 // Non-clobbering XOR 32-bit - dst = src1 & src1 |
| 4292 void MacroAssembler::Xor(Register dst, Register src1, Register src2) { |
| 4293 if (!dst.is(src1) && !dst.is(src2)) { |
| 4294 // We prefer to generate XR/XGR, over the non clobbering XRK/XRK |
| 4295 // as XR is a smaller instruction |
| 4296 if (CpuFeatures::IsSupported(DISTINCT_OPS)) { |
| 4297 xrk(dst, src1, src2); |
| 4298 return; |
| 4299 } else { |
| 4300 lr(dst, src1); |
| 4301 } |
| 4302 } else if (dst.is(src2)) { |
| 4303 src2 = src1; |
| 4304 } |
| 4305 Xor(dst, src2); |
| 4306 } |
| 4307 |
| 4308 // Non-clobbering XOR pointer size - dst = src1 & src1 |
| 4309 void MacroAssembler::XorP(Register dst, Register src1, Register src2) { |
| 4310 if (!dst.is(src1) && !dst.is(src2)) { |
| 4311 // We prefer to generate XR/XGR, over the non clobbering XRK/XRK |
| 4312 // as XR is a smaller instruction |
| 4313 if (CpuFeatures::IsSupported(DISTINCT_OPS)) { |
| 4314 XorP_RRR(dst, src1, src2); |
| 4315 return; |
| 4316 } else { |
| 4317 LoadRR(dst, src1); |
| 4318 } |
| 4319 } else if (dst.is(src2)) { |
| 4320 src2 = src1; |
| 4321 } |
| 4322 XorP(dst, src2); |
| 4323 } |
| 4324 |
| 4325 // XOR 32-bit (Reg - Mem) |
| 4326 void MacroAssembler::Xor(Register dst, const MemOperand& opnd) { |
| 4327 DCHECK(is_int20(opnd.offset())); |
| 4328 if (is_uint12(opnd.offset())) |
| 4329 x(dst, opnd); |
| 4330 else |
| 4331 xy(dst, opnd); |
| 4332 } |
| 4333 |
| 4334 // XOR Pointer Size (Reg - Mem) |
| 4335 void MacroAssembler::XorP(Register dst, const MemOperand& opnd) { |
| 4336 DCHECK(is_int20(opnd.offset())); |
| 4337 #if V8_TARGET_ARCH_S390X |
| 4338 xg(dst, opnd); |
| 4339 #else |
| 4340 Xor(dst, opnd); |
| 4341 #endif |
| 4342 } |
| 4343 |
| 4344 // XOR 32-bit - dst = dst & imm |
| 4345 void MacroAssembler::Xor(Register dst, const Operand& opnd) { xilf(dst, opnd); } |
| 4346 |
| 4347 // XOR Pointer Size - dst = dst & imm |
| 4348 void MacroAssembler::XorP(Register dst, const Operand& opnd) { |
| 4349 #if V8_TARGET_ARCH_S390X |
| 4350 intptr_t value = opnd.imm_; |
| 4351 xihf(dst, Operand(value >> 32)); |
| 4352 xilf(dst, Operand(value & 0xFFFFFFFF)); |
| 4353 #else |
| 4354 Xor(dst, opnd); |
| 4355 #endif |
| 4356 } |
| 4357 |
| 4358 // XOR 32-bit - dst = src & imm |
| 4359 void MacroAssembler::Xor(Register dst, Register src, const Operand& opnd) { |
| 4360 if (!dst.is(src)) lr(dst, src); |
| 4361 xilf(dst, opnd); |
| 4362 } |
| 4363 |
| 4364 // XOR Pointer Size - dst = src & imm |
| 4365 void MacroAssembler::XorP(Register dst, Register src, const Operand& opnd) { |
| 4366 if (!dst.is(src)) LoadRR(dst, src); |
| 4367 XorP(dst, opnd); |
| 4368 } |
| 4369 |
| 4370 void MacroAssembler::NotP(Register dst) { |
| 4371 #if V8_TARGET_ARCH_S390X |
| 4372 xihf(dst, Operand(0xFFFFFFFF)); |
| 4373 xilf(dst, Operand(0xFFFFFFFF)); |
| 4374 #else |
| 4375 XorP(dst, Operand(0xFFFFFFFF)); |
| 4376 #endif |
| 4377 } |
| 4378 |
| 4379 // works the same as mov |
| 4380 void MacroAssembler::Load(Register dst, const Operand& opnd) { |
| 4381 intptr_t value = opnd.immediate(); |
| 4382 if (is_int16(value)) { |
| 4383 #if V8_TARGET_ARCH_S390X |
| 4384 lghi(dst, opnd); |
| 4385 #else |
| 4386 lhi(dst, opnd); |
| 4387 #endif |
| 4388 } else { |
| 4389 #if V8_TARGET_ARCH_S390X |
| 4390 llilf(dst, opnd); |
| 4391 #else |
| 4392 iilf(dst, opnd); |
| 4393 #endif |
| 4394 } |
| 4395 } |
| 4396 |
| 4397 void MacroAssembler::Load(Register dst, const MemOperand& opnd) { |
| 4398 DCHECK(is_int20(opnd.offset())); |
| 4399 #if V8_TARGET_ARCH_S390X |
| 4400 lgf(dst, opnd); // 64<-32 |
| 4401 #else |
| 4402 if (is_uint12(opnd.offset())) { |
| 4403 l(dst, opnd); |
| 4404 } else { |
| 4405 ly(dst, opnd); |
| 4406 } |
| 4407 #endif |
| 4408 } |
| 4409 |
| 4410 //----------------------------------------------------------------------------- |
| 4411 // Compare Helpers |
| 4412 //----------------------------------------------------------------------------- |
| 4413 |
| 4414 // Compare 32-bit Register vs Register |
| 4415 void MacroAssembler::Cmp32(Register src1, Register src2) { cr_z(src1, src2); } |
| 4416 |
| 4417 // Compare Pointer Sized Register vs Register |
| 4418 void MacroAssembler::CmpP(Register src1, Register src2) { |
| 4419 #if V8_TARGET_ARCH_S390X |
| 4420 cgr(src1, src2); |
| 4421 #else |
| 4422 Cmp32(src1, src2); |
| 4423 #endif |
| 4424 } |
| 4425 |
| 4426 // Compare 32-bit Register vs Immediate |
| 4427 // This helper will set up proper relocation entries if required. |
| 4428 void MacroAssembler::Cmp32(Register dst, const Operand& opnd) { |
| 4429 if (opnd.rmode_ == kRelocInfo_NONEPTR) { |
| 4430 intptr_t value = opnd.immediate(); |
| 4431 if (is_int16(value)) |
| 4432 chi(dst, opnd); |
| 4433 else |
| 4434 cfi(dst, opnd); |
| 4435 } else { |
| 4436 // Need to generate relocation record here |
| 4437 RecordRelocInfo(opnd.rmode_, opnd.imm_); |
| 4438 cfi(dst, opnd); |
| 4439 } |
| 4440 } |
| 4441 |
| 4442 // Compare Pointer Sized Register vs Immediate |
| 4443 // This helper will set up proper relocation entries if required. |
| 4444 void MacroAssembler::CmpP(Register dst, const Operand& opnd) { |
| 4445 #if V8_TARGET_ARCH_S390X |
| 4446 if (opnd.rmode_ == kRelocInfo_NONEPTR) { |
| 4447 cgfi(dst, opnd); |
| 4448 } else { |
| 4449 mov(r0, opnd); // Need to generate 64-bit relocation |
| 4450 cgr(dst, r0); |
| 4451 } |
| 4452 #else |
| 4453 Cmp32(dst, opnd); |
| 4454 #endif |
| 4455 } |
| 4456 |
| 4457 // Compare 32-bit Register vs Memory |
| 4458 void MacroAssembler::Cmp32(Register dst, const MemOperand& opnd) { |
| 4459 // make sure offset is within 20 bit range |
| 4460 DCHECK(is_int20(opnd.offset())); |
| 4461 if (is_uint12(opnd.offset())) |
| 4462 c(dst, opnd); |
| 4463 else |
| 4464 cy(dst, opnd); |
| 4465 } |
| 4466 |
| 4467 // Compare Pointer Size Register vs Memory |
| 4468 void MacroAssembler::CmpP(Register dst, const MemOperand& opnd) { |
| 4469 // make sure offset is within 20 bit range |
| 4470 DCHECK(is_int20(opnd.offset())); |
| 4471 #if V8_TARGET_ARCH_S390X |
| 4472 cg(dst, opnd); |
| 4473 #else |
| 4474 Cmp32(dst, opnd); |
| 4475 #endif |
| 4476 } |
| 4477 |
| 4478 //----------------------------------------------------------------------------- |
| 4479 // Compare Logical Helpers |
| 4480 //----------------------------------------------------------------------------- |
| 4481 |
| 4482 // Compare Logical 32-bit Register vs Register |
| 4483 void MacroAssembler::CmpLogical32(Register dst, Register src) { clr(dst, src); } |
| 4484 |
| 4485 // Compare Logical Pointer Sized Register vs Register |
| 4486 void MacroAssembler::CmpLogicalP(Register dst, Register src) { |
| 4487 #ifdef V8_TARGET_ARCH_S390X |
| 4488 clgr(dst, src); |
| 4489 #else |
| 4490 CmpLogical32(dst, src); |
| 4491 #endif |
| 4492 } |
| 4493 |
| 4494 // Compare Logical 32-bit Register vs Immediate |
| 4495 void MacroAssembler::CmpLogical32(Register dst, const Operand& opnd) { |
| 4496 clfi(dst, opnd); |
| 4497 } |
| 4498 |
| 4499 // Compare Logical Pointer Sized Register vs Immediate |
| 4500 void MacroAssembler::CmpLogicalP(Register dst, const Operand& opnd) { |
| 4501 #if V8_TARGET_ARCH_S390X |
| 4502 DCHECK(static_cast<uint32_t>(opnd.immediate() >> 32) == 0); |
| 4503 clgfi(dst, opnd); |
| 4504 #else |
| 4505 CmpLogical32(dst, opnd); |
| 4506 #endif |
| 4507 } |
| 4508 |
| 4509 // Compare Logical 32-bit Register vs Memory |
| 4510 void MacroAssembler::CmpLogical32(Register dst, const MemOperand& opnd) { |
| 4511 // make sure offset is within 20 bit range |
| 4512 DCHECK(is_int20(opnd.offset())); |
| 4513 if (is_uint12(opnd.offset())) |
| 4514 cl(dst, opnd); |
| 4515 else |
| 4516 cly(dst, opnd); |
| 4517 } |
| 4518 |
| 4519 // Compare Logical Pointer Sized Register vs Memory |
| 4520 void MacroAssembler::CmpLogicalP(Register dst, const MemOperand& opnd) { |
| 4521 // make sure offset is within 20 bit range |
| 4522 DCHECK(is_int20(opnd.offset())); |
| 4523 #if V8_TARGET_ARCH_S390X |
| 4524 clg(dst, opnd); |
| 4525 #else |
| 4526 CmpLogical32(dst, opnd); |
| 4527 #endif |
| 4528 } |
| 4529 |
| 4530 // Compare Logical Byte (Mem - Imm) |
| 4531 void MacroAssembler::CmpLogicalByte(const MemOperand& mem, const Operand& imm) { |
| 4532 DCHECK(is_uint8(imm.immediate())); |
| 4533 if (is_uint12(mem.offset())) |
| 4534 cli(mem, imm); |
| 4535 else |
| 4536 cliy(mem, imm); |
| 4537 } |
| 4538 |
| 4539 void MacroAssembler::Branch(Condition c, const Operand& opnd) { |
| 4540 intptr_t value = opnd.immediate(); |
| 4541 if (is_int16(value)) |
| 4542 brc(c, opnd); |
| 4543 else |
| 4544 brcl(c, opnd); |
| 4545 } |
| 4546 |
| 4547 // Branch On Count. Decrement R1, and branch if R1 != 0. |
| 4548 void MacroAssembler::BranchOnCount(Register r1, Label* l) { |
| 4549 int32_t offset = branch_offset(l); |
| 4550 positions_recorder()->WriteRecordedPositions(); |
| 4551 if (is_int16(offset)) { |
| 4552 #if V8_TARGET_ARCH_S390X |
| 4553 brctg(r1, Operand(offset)); |
| 4554 #else |
| 4555 brct(r1, Operand(offset)); |
| 4556 #endif |
| 4557 } else { |
| 4558 AddP(r1, Operand(-1)); |
| 4559 Branch(ne, Operand(offset)); |
| 4560 } |
| 4561 } |
| 4562 |
| 4563 void MacroAssembler::LoadIntLiteral(Register dst, int value) { |
| 4564 Load(dst, Operand(value)); |
| 4565 } |
| 4566 |
| 4567 void MacroAssembler::LoadSmiLiteral(Register dst, Smi* smi) { |
| 4568 intptr_t value = reinterpret_cast<intptr_t>(smi); |
| 4569 #if V8_TARGET_ARCH_S390X |
| 4570 DCHECK((value & 0xffffffff) == 0); |
| 4571 // The smi value is loaded in upper 32-bits. Lower 32-bit are zeros. |
| 4572 llihf(dst, Operand(value >> 32)); |
| 4573 #else |
| 4574 llilf(dst, Operand(value)); |
| 4575 #endif |
| 4576 } |
| 4577 |
| 4578 void MacroAssembler::LoadDoubleLiteral(DoubleRegister result, uint64_t value, |
| 4579 Register scratch) { |
| 4580 uint32_t hi_32 = value >> 32; |
| 4581 uint32_t lo_32 = static_cast<uint32_t>(value); |
| 4582 |
| 4583 // Load the 64-bit value into a GPR, then transfer it to FPR via LDGR |
| 4584 iihf(scratch, Operand(hi_32)); |
| 4585 iilf(scratch, Operand(lo_32)); |
| 4586 ldgr(result, scratch); |
| 4587 } |
| 4588 |
| 4589 void MacroAssembler::LoadDoubleLiteral(DoubleRegister result, double value, |
| 4590 Register scratch) { |
| 4591 uint64_t int_val = bit_cast<uint64_t, double>(value); |
| 4592 LoadDoubleLiteral(result, int_val, scratch); |
| 4593 } |
| 4594 |
| 4595 void MacroAssembler::LoadFloat32Literal(DoubleRegister result, float value, |
| 4596 Register scratch) { |
| 4597 uint32_t hi_32 = bit_cast<uint32_t>(value); |
| 4598 uint32_t lo_32 = 0; |
| 4599 |
| 4600 // Load the 64-bit value into a GPR, then transfer it to FPR via LDGR |
| 4601 iihf(scratch, Operand(hi_32)); |
| 4602 iilf(scratch, Operand(lo_32)); |
| 4603 ldgr(result, scratch); |
| 4604 } |
| 4605 |
| 4606 void MacroAssembler::CmpSmiLiteral(Register src1, Smi* smi, Register scratch) { |
| 4607 #if V8_TARGET_ARCH_S390X |
| 4608 LoadSmiLiteral(scratch, smi); |
| 4609 cgr(src1, scratch); |
| 4610 #else |
| 4611 // CFI takes 32-bit immediate. |
| 4612 cfi(src1, Operand(smi)); |
| 4613 #endif |
| 4614 } |
| 4615 |
| 4616 void MacroAssembler::CmpLogicalSmiLiteral(Register src1, Smi* smi, |
| 4617 Register scratch) { |
| 4618 #if V8_TARGET_ARCH_S390X |
| 4619 LoadSmiLiteral(scratch, smi); |
| 4620 clgr(src1, scratch); |
| 4621 #else |
| 4622 // CLFI takes 32-bit immediate |
| 4623 clfi(src1, Operand(smi)); |
| 4624 #endif |
| 4625 } |
| 4626 |
| 4627 void MacroAssembler::AddSmiLiteral(Register dst, Register src, Smi* smi, |
| 4628 Register scratch) { |
| 4629 #if V8_TARGET_ARCH_S390X |
| 4630 LoadSmiLiteral(scratch, smi); |
| 4631 AddP(dst, src, scratch); |
| 4632 #else |
| 4633 AddP(dst, src, Operand(reinterpret_cast<intptr_t>(smi))); |
| 4634 #endif |
| 4635 } |
| 4636 |
| 4637 void MacroAssembler::SubSmiLiteral(Register dst, Register src, Smi* smi, |
| 4638 Register scratch) { |
| 4639 #if V8_TARGET_ARCH_S390X |
| 4640 LoadSmiLiteral(scratch, smi); |
| 4641 SubP(dst, src, scratch); |
| 4642 #else |
| 4643 AddP(dst, src, Operand(-(reinterpret_cast<intptr_t>(smi)))); |
| 4644 #endif |
| 4645 } |
| 4646 |
| 4647 void MacroAssembler::AndSmiLiteral(Register dst, Register src, Smi* smi) { |
| 4648 if (!dst.is(src)) LoadRR(dst, src); |
| 4649 #if V8_TARGET_ARCH_S390X |
| 4650 DCHECK((reinterpret_cast<intptr_t>(smi) & 0xffffffff) == 0); |
| 4651 int value = static_cast<int>(reinterpret_cast<intptr_t>(smi) >> 32); |
| 4652 nihf(dst, Operand(value)); |
| 4653 #else |
| 4654 nilf(dst, Operand(reinterpret_cast<int>(smi))); |
| 4655 #endif |
| 4656 } |
| 4657 |
| 4658 // Load a "pointer" sized value from the memory location |
| 4659 void MacroAssembler::LoadP(Register dst, const MemOperand& mem, |
| 4660 Register scratch) { |
| 4661 int offset = mem.offset(); |
| 4662 |
| 4663 if (!scratch.is(no_reg) && !is_int20(offset)) { |
| 4664 /* cannot use d-form */ |
| 4665 LoadIntLiteral(scratch, offset); |
| 4666 #if V8_TARGET_ARCH_S390X |
| 4667 lg(dst, MemOperand(mem.rb(), scratch)); |
| 4668 #else |
| 4669 l(dst, MemOperand(mem.rb(), scratch)); |
| 4670 #endif |
| 4671 } else { |
| 4672 #if V8_TARGET_ARCH_S390X |
| 4673 lg(dst, mem); |
| 4674 #else |
| 4675 if (is_uint12(offset)) { |
| 4676 l(dst, mem); |
| 4677 } else { |
| 4678 ly(dst, mem); |
| 4679 } |
| 4680 #endif |
| 4681 } |
| 4682 } |
| 4683 |
| 4684 // Store a "pointer" sized value to the memory location |
| 4685 void MacroAssembler::StoreP(Register src, const MemOperand& mem, |
| 4686 Register scratch) { |
| 4687 if (!is_int20(mem.offset())) { |
| 4688 DCHECK(!scratch.is(no_reg)); |
| 4689 DCHECK(!scratch.is(r0)); |
| 4690 LoadIntLiteral(scratch, mem.offset()); |
| 4691 #if V8_TARGET_ARCH_S390X |
| 4692 stg(src, MemOperand(mem.rb(), scratch)); |
| 4693 #else |
| 4694 st(src, MemOperand(mem.rb(), scratch)); |
| 4695 #endif |
| 4696 } else { |
| 4697 #if V8_TARGET_ARCH_S390X |
| 4698 stg(src, mem); |
| 4699 #else |
| 4700 // StoreW will try to generate ST if offset fits, otherwise |
| 4701 // it'll generate STY. |
| 4702 StoreW(src, mem); |
| 4703 #endif |
| 4704 } |
| 4705 } |
| 4706 |
| 4707 // Store a "pointer" sized constant to the memory location |
| 4708 void MacroAssembler::StoreP(const MemOperand& mem, const Operand& opnd, |
| 4709 Register scratch) { |
| 4710 // Relocations not supported |
| 4711 DCHECK(opnd.rmode_ == kRelocInfo_NONEPTR); |
| 4712 |
| 4713 // Try to use MVGHI/MVHI |
| 4714 if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT) && is_uint12(mem.offset()) && |
| 4715 mem.getIndexRegister().is(r0) && is_int16(opnd.imm_)) { |
| 4716 #if V8_TARGET_ARCH_S390X |
| 4717 mvghi(mem, opnd); |
| 4718 #else |
| 4719 mvhi(mem, opnd); |
| 4720 #endif |
| 4721 } else { |
| 4722 LoadImmP(scratch, opnd); |
| 4723 StoreP(scratch, mem); |
| 4724 } |
| 4725 } |
| 4726 |
| 4727 void MacroAssembler::LoadMultipleP(Register dst1, Register dst2, |
| 4728 const MemOperand& mem) { |
| 4729 #if V8_TARGET_ARCH_S390X |
| 4730 DCHECK(is_int20(mem.offset())); |
| 4731 lmg(dst1, dst2, mem); |
| 4732 #else |
| 4733 if (is_uint12(mem.offset())) { |
| 4734 lm(dst1, dst2, mem); |
| 4735 } else { |
| 4736 DCHECK(is_int20(mem.offset())); |
| 4737 lmy(dst1, dst2, mem); |
| 4738 } |
| 4739 #endif |
| 4740 } |
| 4741 |
| 4742 void MacroAssembler::StoreMultipleP(Register src1, Register src2, |
| 4743 const MemOperand& mem) { |
| 4744 #if V8_TARGET_ARCH_S390X |
| 4745 DCHECK(is_int20(mem.offset())); |
| 4746 stmg(src1, src2, mem); |
| 4747 #else |
| 4748 if (is_uint12(mem.offset())) { |
| 4749 stm(src1, src2, mem); |
| 4750 } else { |
| 4751 DCHECK(is_int20(mem.offset())); |
| 4752 stmy(src1, src2, mem); |
| 4753 } |
| 4754 #endif |
| 4755 } |
| 4756 |
| 4757 void MacroAssembler::LoadMultipleW(Register dst1, Register dst2, |
| 4758 const MemOperand& mem) { |
| 4759 if (is_uint12(mem.offset())) { |
| 4760 lm(dst1, dst2, mem); |
| 4761 } else { |
| 4762 DCHECK(is_int20(mem.offset())); |
| 4763 lmy(dst1, dst2, mem); |
| 4764 } |
| 4765 } |
| 4766 |
| 4767 void MacroAssembler::StoreMultipleW(Register src1, Register src2, |
| 4768 const MemOperand& mem) { |
| 4769 if (is_uint12(mem.offset())) { |
| 4770 stm(src1, src2, mem); |
| 4771 } else { |
| 4772 DCHECK(is_int20(mem.offset())); |
| 4773 stmy(src1, src2, mem); |
| 4774 } |
| 4775 } |
| 4776 |
| 4777 // Load 32-bits and sign extend if necessary. |
| 4778 void MacroAssembler::LoadW(Register dst, const MemOperand& mem, |
| 4779 Register scratch) { |
| 4780 int offset = mem.offset(); |
| 4781 |
| 4782 if (!is_int20(offset)) { |
| 4783 DCHECK(!scratch.is(no_reg)); |
| 4784 LoadIntLiteral(scratch, offset); |
| 4785 #if V8_TARGET_ARCH_S390X |
| 4786 lgf(dst, MemOperand(mem.rb(), scratch)); |
| 4787 #else |
| 4788 l(dst, MemOperand(mem.rb(), scratch)); |
| 4789 #endif |
| 4790 } else { |
| 4791 #if V8_TARGET_ARCH_S390X |
| 4792 lgf(dst, mem); |
| 4793 #else |
| 4794 if (is_uint12(offset)) { |
| 4795 l(dst, mem); |
| 4796 } else { |
| 4797 ly(dst, mem); |
| 4798 } |
| 4799 #endif |
| 4800 } |
| 4801 } |
| 4802 |
| 4803 // Variable length depending on whether offset fits into immediate field |
| 4804 // MemOperand of RX or RXY format |
| 4805 void MacroAssembler::LoadlW(Register dst, const MemOperand& mem, |
| 4806 Register scratch) { |
| 4807 Register base = mem.rb(); |
| 4808 int offset = mem.offset(); |
| 4809 |
| 4810 #if V8_TARGET_ARCH_S390X |
| 4811 if (is_int20(offset)) { |
| 4812 llgf(dst, mem); |
| 4813 } else if (!scratch.is(no_reg)) { |
| 4814 // Materialize offset into scratch register. |
| 4815 LoadIntLiteral(scratch, offset); |
| 4816 llgf(dst, MemOperand(base, scratch)); |
| 4817 } else { |
| 4818 DCHECK(false); |
| 4819 } |
| 4820 #else |
| 4821 bool use_RXform = false; |
| 4822 bool use_RXYform = false; |
| 4823 if (is_uint12(offset)) { |
| 4824 // RX-format supports unsigned 12-bits offset. |
| 4825 use_RXform = true; |
| 4826 } else if (is_int20(offset)) { |
| 4827 // RXY-format supports signed 20-bits offset. |
| 4828 use_RXYform = true; |
| 4829 } else if (!scratch.is(no_reg)) { |
| 4830 // Materialize offset into scratch register. |
| 4831 LoadIntLiteral(scratch, offset); |
| 4832 } else { |
| 4833 DCHECK(false); |
| 4834 } |
| 4835 |
| 4836 if (use_RXform) { |
| 4837 l(dst, mem); |
| 4838 } else if (use_RXYform) { |
| 4839 ly(dst, mem); |
| 4840 } else { |
| 4841 ly(dst, MemOperand(base, scratch)); |
| 4842 } |
| 4843 #endif |
| 4844 } |
| 4845 |
| 4846 void MacroAssembler::LoadB(Register dst, const MemOperand& mem) { |
| 4847 #if V8_TARGET_ARCH_S390X |
| 4848 lgb(dst, mem); |
| 4849 #else |
| 4850 lb(dst, mem); |
| 4851 #endif |
| 4852 } |
| 4853 |
| 4854 void MacroAssembler::LoadlB(Register dst, const MemOperand& mem) { |
| 4855 #if V8_TARGET_ARCH_S390X |
| 4856 llgc(dst, mem); |
| 4857 #else |
| 4858 llc(dst, mem); |
| 4859 #endif |
| 4860 } |
| 4861 |
| 4862 // Load And Test (Reg <- Reg) |
| 4863 void MacroAssembler::LoadAndTest32(Register dst, Register src) { |
| 4864 ltr(dst, src); |
| 4865 } |
| 4866 |
| 4867 // Load And Test |
| 4868 // (Register dst(ptr) = Register src (32 | 32->64)) |
| 4869 // src is treated as a 32-bit signed integer, which is sign extended to |
| 4870 // 64-bit if necessary. |
| 4871 void MacroAssembler::LoadAndTestP_ExtendSrc(Register dst, Register src) { |
| 4872 #if V8_TARGET_ARCH_S390X |
| 4873 ltgfr(dst, src); |
| 4874 #else |
| 4875 ltr(dst, src); |
| 4876 #endif |
| 4877 } |
| 4878 |
| 4879 // Load And Test Pointer Sized (Reg <- Reg) |
| 4880 void MacroAssembler::LoadAndTestP(Register dst, Register src) { |
| 4881 #if V8_TARGET_ARCH_S390X |
| 4882 ltgr(dst, src); |
| 4883 #else |
| 4884 ltr(dst, src); |
| 4885 #endif |
| 4886 } |
| 4887 |
| 4888 // Load And Test 32-bit (Reg <- Mem) |
| 4889 void MacroAssembler::LoadAndTest32(Register dst, const MemOperand& mem) { |
| 4890 lt_z(dst, mem); |
| 4891 } |
| 4892 |
| 4893 // Load And Test Pointer Sized (Reg <- Mem) |
| 4894 void MacroAssembler::LoadAndTestP(Register dst, const MemOperand& mem) { |
| 4895 #if V8_TARGET_ARCH_S390X |
| 4896 ltg(dst, mem); |
| 4897 #else |
| 4898 lt_z(dst, mem); |
| 4899 #endif |
| 4900 } |
| 4901 |
| 4902 // Load Double Precision (64-bit) Floating Point number from memory |
| 4903 void MacroAssembler::LoadDouble(DoubleRegister dst, const MemOperand& mem) { |
| 4904 // for 32bit and 64bit we all use 64bit floating point regs |
| 4905 if (is_uint12(mem.offset())) { |
| 4906 ld(dst, mem); |
| 4907 } else { |
| 4908 ldy(dst, mem); |
| 4909 } |
| 4910 } |
| 4911 |
| 4912 // Load Single Precision (32-bit) Floating Point number from memory |
| 4913 void MacroAssembler::LoadFloat32(DoubleRegister dst, const MemOperand& mem) { |
| 4914 if (is_uint12(mem.offset())) { |
| 4915 le_z(dst, mem); |
| 4916 } else { |
| 4917 DCHECK(is_int20(mem.offset())); |
| 4918 ley(dst, mem); |
| 4919 } |
| 4920 } |
| 4921 |
| 4922 // Load Single Precision (32-bit) Floating Point number from memory, |
| 4923 // and convert to Double Precision (64-bit) |
| 4924 void MacroAssembler::LoadFloat32ConvertToDouble(DoubleRegister dst, |
| 4925 const MemOperand& mem) { |
| 4926 LoadFloat32(dst, mem); |
| 4927 ldebr(dst, dst); |
| 4928 } |
| 4929 |
| 4930 // Store Double Precision (64-bit) Floating Point number to memory |
| 4931 void MacroAssembler::StoreDouble(DoubleRegister dst, const MemOperand& mem) { |
| 4932 if (is_uint12(mem.offset())) { |
| 4933 std(dst, mem); |
| 4934 } else { |
| 4935 stdy(dst, mem); |
| 4936 } |
| 4937 } |
| 4938 |
| 4939 // Store Single Precision (32-bit) Floating Point number to memory |
| 4940 void MacroAssembler::StoreFloat32(DoubleRegister src, const MemOperand& mem) { |
| 4941 if (is_uint12(mem.offset())) { |
| 4942 ste(src, mem); |
| 4943 } else { |
| 4944 stey(src, mem); |
| 4945 } |
| 4946 } |
| 4947 |
| 4948 // Convert Double precision (64-bit) to Single Precision (32-bit) |
| 4949 // and store resulting Float32 to memory |
| 4950 void MacroAssembler::StoreDoubleAsFloat32(DoubleRegister src, |
| 4951 const MemOperand& mem, |
| 4952 DoubleRegister scratch) { |
| 4953 ledbr(scratch, src); |
| 4954 StoreFloat32(scratch, mem); |
| 4955 } |
| 4956 |
| 4957 // Variable length depending on whether offset fits into immediate field |
| 4958 // MemOperand of RX or RXY format |
| 4959 void MacroAssembler::StoreW(Register src, const MemOperand& mem, |
| 4960 Register scratch) { |
| 4961 Register base = mem.rb(); |
| 4962 int offset = mem.offset(); |
| 4963 |
| 4964 bool use_RXform = false; |
| 4965 bool use_RXYform = false; |
| 4966 |
| 4967 if (is_uint12(offset)) { |
| 4968 // RX-format supports unsigned 12-bits offset. |
| 4969 use_RXform = true; |
| 4970 } else if (is_int20(offset)) { |
| 4971 // RXY-format supports signed 20-bits offset. |
| 4972 use_RXYform = true; |
| 4973 } else if (!scratch.is(no_reg)) { |
| 4974 // Materialize offset into scratch register. |
| 4975 LoadIntLiteral(scratch, offset); |
| 4976 } else { |
| 4977 // scratch is no_reg |
| 4978 DCHECK(false); |
| 4979 } |
| 4980 |
| 4981 if (use_RXform) { |
| 4982 st(src, mem); |
| 4983 } else if (use_RXYform) { |
| 4984 sty(src, mem); |
| 4985 } else { |
| 4986 StoreW(src, MemOperand(base, scratch)); |
| 4987 } |
| 4988 } |
| 4989 |
| 4990 // Loads 16-bits half-word value from memory and sign extends to pointer |
| 4991 // sized register |
| 4992 void MacroAssembler::LoadHalfWordP(Register dst, const MemOperand& mem, |
| 4993 Register scratch) { |
| 4994 Register base = mem.rb(); |
| 4995 int offset = mem.offset(); |
| 4996 |
| 4997 if (!is_int20(offset)) { |
| 4998 DCHECK(!scratch.is(no_reg)); |
| 4999 LoadIntLiteral(scratch, offset); |
| 5000 #if V8_TARGET_ARCH_S390X |
| 5001 lgh(dst, MemOperand(base, scratch)); |
| 5002 #else |
| 5003 lh(dst, MemOperand(base, scratch)); |
| 5004 #endif |
| 5005 } else { |
| 5006 #if V8_TARGET_ARCH_S390X |
| 5007 lgh(dst, mem); |
| 5008 #else |
| 5009 if (is_uint12(offset)) { |
| 5010 lh(dst, mem); |
| 5011 } else { |
| 5012 lhy(dst, mem); |
| 5013 } |
| 5014 #endif |
| 5015 } |
| 5016 } |
| 5017 |
| 5018 // Variable length depending on whether offset fits into immediate field |
| 5019 // MemOperand current only supports d-form |
| 5020 void MacroAssembler::StoreHalfWord(Register src, const MemOperand& mem, |
| 5021 Register scratch) { |
| 5022 Register base = mem.rb(); |
| 5023 int offset = mem.offset(); |
| 5024 |
| 5025 if (is_uint12(offset)) { |
| 5026 sth(src, mem); |
| 5027 } else if (is_int20(offset)) { |
| 5028 sthy(src, mem); |
| 5029 } else { |
| 5030 DCHECK(!scratch.is(no_reg)); |
| 5031 LoadIntLiteral(scratch, offset); |
| 5032 sth(src, MemOperand(base, scratch)); |
| 5033 } |
| 5034 } |
| 5035 |
| 5036 // Variable length depending on whether offset fits into immediate field |
| 5037 // MemOperand current only supports d-form |
| 5038 void MacroAssembler::StoreByte(Register src, const MemOperand& mem, |
| 5039 Register scratch) { |
| 5040 Register base = mem.rb(); |
| 5041 int offset = mem.offset(); |
| 5042 |
| 5043 if (is_uint12(offset)) { |
| 5044 stc(src, mem); |
| 5045 } else if (is_int20(offset)) { |
| 5046 stcy(src, mem); |
| 5047 } else { |
| 5048 DCHECK(!scratch.is(no_reg)); |
| 5049 LoadIntLiteral(scratch, offset); |
| 5050 stc(src, MemOperand(base, scratch)); |
| 5051 } |
| 5052 } |
| 5053 |
| 5054 // Shift left logical for 32-bit integer types. |
| 5055 void MacroAssembler::ShiftLeft(Register dst, Register src, const Operand& val) { |
| 5056 if (dst.is(src)) { |
| 5057 sll(dst, val); |
| 5058 } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) { |
| 5059 sllk(dst, src, val); |
| 5060 } else { |
| 5061 lr(dst, src); |
| 5062 sll(dst, val); |
| 5063 } |
| 5064 } |
| 5065 |
| 5066 // Shift left logical for 32-bit integer types. |
| 5067 void MacroAssembler::ShiftLeft(Register dst, Register src, Register val) { |
| 5068 if (dst.is(src)) { |
| 5069 sll(dst, val); |
| 5070 } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) { |
| 5071 sllk(dst, src, val); |
| 5072 } else { |
| 5073 DCHECK(!dst.is(val)); // The lr/sll path clobbers val. |
| 5074 lr(dst, src); |
| 5075 sll(dst, val); |
| 5076 } |
| 5077 } |
| 5078 |
| 5079 // Shift right logical for 32-bit integer types. |
| 5080 void MacroAssembler::ShiftRight(Register dst, Register src, |
| 5081 const Operand& val) { |
| 5082 if (dst.is(src)) { |
| 5083 srl(dst, val); |
| 5084 } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) { |
| 5085 srlk(dst, src, val); |
| 5086 } else { |
| 5087 lr(dst, src); |
| 5088 srl(dst, val); |
| 5089 } |
| 5090 } |
| 5091 |
| 5092 // Shift right logical for 32-bit integer types. |
| 5093 void MacroAssembler::ShiftRight(Register dst, Register src, Register val) { |
| 5094 DCHECK(!dst.is(val)); // The lr/srl path clobbers val. |
| 5095 if (dst.is(src)) { |
| 5096 srl(dst, val); |
| 5097 } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) { |
| 5098 srlk(dst, src, val); |
| 5099 } else { |
| 5100 lr(dst, src); |
| 5101 srl(dst, val); |
| 5102 } |
| 5103 } |
| 5104 |
| 5105 // Shift left arithmetic for 32-bit integer types. |
| 5106 void MacroAssembler::ShiftLeftArith(Register dst, Register src, |
| 5107 const Operand& val) { |
| 5108 if (dst.is(src)) { |
| 5109 sla(dst, val); |
| 5110 } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) { |
| 5111 slak(dst, src, val); |
| 5112 } else { |
| 5113 lr(dst, src); |
| 5114 sla(dst, val); |
| 5115 } |
| 5116 } |
| 5117 |
| 5118 // Shift left arithmetic for 32-bit integer types. |
| 5119 void MacroAssembler::ShiftLeftArith(Register dst, Register src, Register val) { |
| 5120 DCHECK(!dst.is(val)); // The lr/sla path clobbers val. |
| 5121 if (dst.is(src)) { |
| 5122 sla(dst, val); |
| 5123 } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) { |
| 5124 slak(dst, src, val); |
| 5125 } else { |
| 5126 lr(dst, src); |
| 5127 sla(dst, val); |
| 5128 } |
| 5129 } |
| 5130 |
| 5131 // Shift right arithmetic for 32-bit integer types. |
| 5132 void MacroAssembler::ShiftRightArith(Register dst, Register src, |
| 5133 const Operand& val) { |
| 5134 if (dst.is(src)) { |
| 5135 sra(dst, val); |
| 5136 } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) { |
| 5137 srak(dst, src, val); |
| 5138 } else { |
| 5139 lr(dst, src); |
| 5140 sra(dst, val); |
| 5141 } |
| 5142 } |
| 5143 |
| 5144 // Shift right arithmetic for 32-bit integer types. |
| 5145 void MacroAssembler::ShiftRightArith(Register dst, Register src, Register val) { |
| 5146 DCHECK(!dst.is(val)); // The lr/sra path clobbers val. |
| 5147 if (dst.is(src)) { |
| 5148 sra(dst, val); |
| 5149 } else if (CpuFeatures::IsSupported(DISTINCT_OPS)) { |
| 5150 srak(dst, src, val); |
| 5151 } else { |
| 5152 lr(dst, src); |
| 5153 sra(dst, val); |
| 5154 } |
| 5155 } |
| 5156 |
| 5157 // Clear right most # of bits |
| 5158 void MacroAssembler::ClearRightImm(Register dst, Register src, |
| 5159 const Operand& val) { |
| 5160 int numBitsToClear = val.imm_ % (kPointerSize * 8); |
| 5161 |
| 5162 // Try to use RISBG if possible |
| 5163 if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) { |
| 5164 int endBit = 63 - numBitsToClear; |
| 5165 risbg(dst, src, Operand::Zero(), Operand(endBit), Operand::Zero(), true); |
| 5166 return; |
| 5167 } |
| 5168 |
| 5169 uint64_t hexMask = ~((1L << numBitsToClear) - 1); |
| 5170 |
| 5171 // S390 AND instr clobbers source. Make a copy if necessary |
| 5172 if (!dst.is(src)) LoadRR(dst, src); |
| 5173 |
| 5174 if (numBitsToClear <= 16) { |
| 5175 nill(dst, Operand(static_cast<uint16_t>(hexMask))); |
| 5176 } else if (numBitsToClear <= 32) { |
| 5177 nilf(dst, Operand(static_cast<uint32_t>(hexMask))); |
| 5178 } else if (numBitsToClear <= 64) { |
| 5179 nilf(dst, Operand(static_cast<intptr_t>(0))); |
| 5180 nihf(dst, Operand(hexMask >> 32)); |
| 5181 } |
| 5182 } |
| 5183 |
| 5184 void MacroAssembler::Popcnt32(Register dst, Register src) { |
| 5185 DCHECK(!src.is(r0)); |
| 5186 DCHECK(!dst.is(r0)); |
| 5187 |
| 5188 popcnt(dst, src); |
| 5189 ShiftRight(r0, dst, Operand(16)); |
| 5190 ar(dst, r0); |
| 5191 ShiftRight(r0, dst, Operand(8)); |
| 5192 ar(dst, r0); |
| 5193 lbr(dst, dst); |
| 5194 } |
| 5195 |
| 5196 #ifdef V8_TARGET_ARCH_S390X |
| 5197 void MacroAssembler::Popcnt64(Register dst, Register src) { |
| 5198 DCHECK(!src.is(r0)); |
| 5199 DCHECK(!dst.is(r0)); |
| 5200 |
| 5201 popcnt(dst, src); |
| 5202 ShiftRightP(r0, dst, Operand(32)); |
| 5203 AddP(dst, r0); |
| 5204 ShiftRightP(r0, dst, Operand(16)); |
| 5205 AddP(dst, r0); |
| 5206 ShiftRightP(r0, dst, Operand(8)); |
| 5207 AddP(dst, r0); |
| 5208 lbr(dst, dst); |
| 5209 } |
| 5210 #endif |
4313 | 5211 |
4314 #ifdef DEBUG | 5212 #ifdef DEBUG |
4315 bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4, | 5213 bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4, |
4316 Register reg5, Register reg6, Register reg7, Register reg8, | 5214 Register reg5, Register reg6, Register reg7, Register reg8, |
4317 Register reg9, Register reg10) { | 5215 Register reg9, Register reg10) { |
4318 int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() + reg3.is_valid() + | 5216 int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() + reg3.is_valid() + |
4319 reg4.is_valid() + reg5.is_valid() + reg6.is_valid() + | 5217 reg4.is_valid() + reg5.is_valid() + reg6.is_valid() + |
4320 reg7.is_valid() + reg8.is_valid() + reg9.is_valid() + | 5218 reg7.is_valid() + reg8.is_valid() + reg9.is_valid() + |
4321 reg10.is_valid(); | 5219 reg10.is_valid(); |
4322 | 5220 |
4323 RegList regs = 0; | 5221 RegList regs = 0; |
4324 if (reg1.is_valid()) regs |= reg1.bit(); | 5222 if (reg1.is_valid()) regs |= reg1.bit(); |
4325 if (reg2.is_valid()) regs |= reg2.bit(); | 5223 if (reg2.is_valid()) regs |= reg2.bit(); |
4326 if (reg3.is_valid()) regs |= reg3.bit(); | 5224 if (reg3.is_valid()) regs |= reg3.bit(); |
4327 if (reg4.is_valid()) regs |= reg4.bit(); | 5225 if (reg4.is_valid()) regs |= reg4.bit(); |
4328 if (reg5.is_valid()) regs |= reg5.bit(); | 5226 if (reg5.is_valid()) regs |= reg5.bit(); |
4329 if (reg6.is_valid()) regs |= reg6.bit(); | 5227 if (reg6.is_valid()) regs |= reg6.bit(); |
4330 if (reg7.is_valid()) regs |= reg7.bit(); | 5228 if (reg7.is_valid()) regs |= reg7.bit(); |
4331 if (reg8.is_valid()) regs |= reg8.bit(); | 5229 if (reg8.is_valid()) regs |= reg8.bit(); |
4332 if (reg9.is_valid()) regs |= reg9.bit(); | 5230 if (reg9.is_valid()) regs |= reg9.bit(); |
4333 if (reg10.is_valid()) regs |= reg10.bit(); | 5231 if (reg10.is_valid()) regs |= reg10.bit(); |
4334 int n_of_non_aliasing_regs = NumRegs(regs); | 5232 int n_of_non_aliasing_regs = NumRegs(regs); |
4335 | 5233 |
4336 return n_of_valid_regs != n_of_non_aliasing_regs; | 5234 return n_of_valid_regs != n_of_non_aliasing_regs; |
4337 } | 5235 } |
4338 #endif | 5236 #endif |
4339 | 5237 |
4340 | 5238 CodePatcher::CodePatcher(Isolate* isolate, byte* address, int size, |
4341 CodePatcher::CodePatcher(Isolate* isolate, byte* address, int instructions, | |
4342 FlushICache flush_cache) | 5239 FlushICache flush_cache) |
4343 : address_(address), | 5240 : address_(address), |
4344 size_(instructions * Assembler::kInstrSize), | 5241 size_(size), |
4345 masm_(isolate, address, size_ + Assembler::kGap, CodeObjectRequired::kNo), | 5242 masm_(isolate, address, size_ + Assembler::kGap, CodeObjectRequired::kNo), |
4346 flush_cache_(flush_cache) { | 5243 flush_cache_(flush_cache) { |
4347 // Create a new macro assembler pointing to the address of the code to patch. | 5244 // Create a new macro assembler pointing to the address of the code to patch. |
4348 // The size is adjusted with kGap on order for the assembler to generate size | 5245 // The size is adjusted with kGap on order for the assembler to generate size |
4349 // bytes of instructions without failing with buffer size constraints. | 5246 // bytes of instructions without failing with buffer size constraints. |
4350 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); | 5247 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); |
4351 } | 5248 } |
4352 | 5249 |
4353 | |
4354 CodePatcher::~CodePatcher() { | 5250 CodePatcher::~CodePatcher() { |
4355 // Indicate that code has changed. | 5251 // Indicate that code has changed. |
4356 if (flush_cache_ == FLUSH) { | 5252 if (flush_cache_ == FLUSH) { |
4357 Assembler::FlushICache(masm_.isolate(), address_, size_); | 5253 Assembler::FlushICache(masm_.isolate(), address_, size_); |
4358 } | 5254 } |
4359 | 5255 |
4360 // Check that the code was patched as expected. | 5256 // Check that the code was patched as expected. |
4361 DCHECK(masm_.pc_ == address_ + size_); | 5257 DCHECK(masm_.pc_ == address_ + size_); |
4362 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); | 5258 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); |
4363 } | 5259 } |
4364 | 5260 |
4365 | |
4366 void CodePatcher::Emit(Instr instr) { masm()->emit(instr); } | |
4367 | |
4368 | |
4369 void CodePatcher::EmitCondition(Condition cond) { | |
4370 Instr instr = Assembler::instr_at(masm_.pc_); | |
4371 switch (cond) { | |
4372 case eq: | |
4373 instr = (instr & ~kCondMask) | BT; | |
4374 break; | |
4375 case ne: | |
4376 instr = (instr & ~kCondMask) | BF; | |
4377 break; | |
4378 default: | |
4379 UNIMPLEMENTED(); | |
4380 } | |
4381 masm_.emit(instr); | |
4382 } | |
4383 | |
4384 | |
4385 void MacroAssembler::TruncatingDiv(Register result, Register dividend, | 5261 void MacroAssembler::TruncatingDiv(Register result, Register dividend, |
4386 int32_t divisor) { | 5262 int32_t divisor) { |
4387 DCHECK(!dividend.is(result)); | 5263 DCHECK(!dividend.is(result)); |
4388 DCHECK(!dividend.is(r0)); | 5264 DCHECK(!dividend.is(r0)); |
4389 DCHECK(!result.is(r0)); | 5265 DCHECK(!result.is(r0)); |
4390 base::MagicNumbersForDivision<uint32_t> mag = | 5266 base::MagicNumbersForDivision<uint32_t> mag = |
4391 base::SignedDivisionByConstant(static_cast<uint32_t>(divisor)); | 5267 base::SignedDivisionByConstant(static_cast<uint32_t>(divisor)); |
4392 mov(r0, Operand(mag.multiplier)); | 5268 #ifdef V8_TARGET_ARCH_S390X |
4393 mulhw(result, dividend, r0); | 5269 LoadRR(result, dividend); |
| 5270 MulP(result, Operand(mag.multiplier)); |
| 5271 ShiftRightArithP(result, result, Operand(32)); |
| 5272 |
| 5273 #else |
| 5274 lay(sp, MemOperand(sp, -kPointerSize)); |
| 5275 StoreP(r1, MemOperand(sp)); |
| 5276 |
| 5277 mov(r1, Operand(mag.multiplier)); |
| 5278 mr_z(r0, dividend); // r0:r1 = r1 * dividend |
| 5279 |
| 5280 LoadRR(result, r0); |
| 5281 LoadP(r1, MemOperand(sp)); |
| 5282 la(sp, MemOperand(sp, kPointerSize)); |
| 5283 #endif |
4394 bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0; | 5284 bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0; |
4395 if (divisor > 0 && neg) { | 5285 if (divisor > 0 && neg) { |
4396 add(result, result, dividend); | 5286 AddP(result, dividend); |
4397 } | 5287 } |
4398 if (divisor < 0 && !neg && mag.multiplier > 0) { | 5288 if (divisor < 0 && !neg && mag.multiplier > 0) { |
4399 sub(result, result, dividend); | 5289 SubP(result, dividend); |
4400 } | 5290 } |
4401 if (mag.shift > 0) srawi(result, result, mag.shift); | 5291 if (mag.shift > 0) ShiftRightArith(result, result, Operand(mag.shift)); |
4402 ExtractBit(r0, dividend, 31); | 5292 ExtractBit(r0, dividend, 31); |
4403 add(result, result, r0); | 5293 AddP(result, r0); |
4404 } | 5294 } |
4405 | 5295 |
4406 } // namespace internal | 5296 } // namespace internal |
4407 } // namespace v8 | 5297 } // namespace v8 |
4408 | 5298 |
4409 #endif // V8_TARGET_ARCH_PPC | 5299 #endif // V8_TARGET_ARCH_S390 |
OLD | NEW |