| OLD | NEW |
| 1 // Copyright 2014 the V8 project authors. All rights reserved. | 1 // Copyright 2014 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/compiler/code-generator.h" | 5 #include "src/compiler/code-generator.h" |
| 6 | 6 |
| 7 #include "src/arm/macro-assembler-arm.h" | 7 #include "src/arm/macro-assembler-arm.h" |
| 8 #include "src/compiler/code-generator-impl.h" | 8 #include "src/compiler/code-generator-impl.h" |
| 9 #include "src/compiler/gap-resolver.h" | 9 #include "src/compiler/gap-resolver.h" |
| 10 #include "src/compiler/node-matchers.h" | 10 #include "src/compiler/node-matchers.h" |
| (...skipping 102 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 113 UNREACHABLE(); | 113 UNREACHABLE(); |
| 114 return MemOperand(r0); | 114 return MemOperand(r0); |
| 115 } | 115 } |
| 116 | 116 |
| 117 MemOperand InputOffset() { | 117 MemOperand InputOffset() { |
| 118 int index = 0; | 118 int index = 0; |
| 119 return InputOffset(&index); | 119 return InputOffset(&index); |
| 120 } | 120 } |
| 121 | 121 |
| 122 MemOperand ToMemOperand(InstructionOperand* op) const { | 122 MemOperand ToMemOperand(InstructionOperand* op) const { |
| 123 ASSERT(op != NULL); | 123 DCHECK(op != NULL); |
| 124 ASSERT(!op->IsRegister()); | 124 DCHECK(!op->IsRegister()); |
| 125 ASSERT(!op->IsDoubleRegister()); | 125 DCHECK(!op->IsDoubleRegister()); |
| 126 ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot()); | 126 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot()); |
| 127 // The linkage computes where all spill slots are located. | 127 // The linkage computes where all spill slots are located. |
| 128 FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), 0); | 128 FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), 0); |
| 129 return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset()); | 129 return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset()); |
| 130 } | 130 } |
| 131 }; | 131 }; |
| 132 | 132 |
| 133 | 133 |
| 134 // Assembles an instruction after register allocation, producing machine code. | 134 // Assembles an instruction after register allocation, producing machine code. |
| 135 void CodeGenerator::AssembleArchInstruction(Instruction* instr) { | 135 void CodeGenerator::AssembleArchInstruction(Instruction* instr) { |
| 136 ArmOperandConverter i(this, instr); | 136 ArmOperandConverter i(this, instr); |
| 137 | 137 |
| 138 switch (ArchOpcodeField::decode(instr->opcode())) { | 138 switch (ArchOpcodeField::decode(instr->opcode())) { |
| 139 case kArchJmp: | 139 case kArchJmp: |
| 140 __ b(code_->GetLabel(i.InputBlock(0))); | 140 __ b(code_->GetLabel(i.InputBlock(0))); |
| 141 ASSERT_EQ(LeaveCC, i.OutputSBit()); | 141 DCHECK_EQ(LeaveCC, i.OutputSBit()); |
| 142 break; | 142 break; |
| 143 case kArchNop: | 143 case kArchNop: |
| 144 // don't emit code for nops. | 144 // don't emit code for nops. |
| 145 ASSERT_EQ(LeaveCC, i.OutputSBit()); | 145 DCHECK_EQ(LeaveCC, i.OutputSBit()); |
| 146 break; | 146 break; |
| 147 case kArchRet: | 147 case kArchRet: |
| 148 AssembleReturn(); | 148 AssembleReturn(); |
| 149 ASSERT_EQ(LeaveCC, i.OutputSBit()); | 149 DCHECK_EQ(LeaveCC, i.OutputSBit()); |
| 150 break; | 150 break; |
| 151 case kArchDeoptimize: { | 151 case kArchDeoptimize: { |
| 152 int deoptimization_id = MiscField::decode(instr->opcode()); | 152 int deoptimization_id = MiscField::decode(instr->opcode()); |
| 153 BuildTranslation(instr, deoptimization_id); | 153 BuildTranslation(instr, deoptimization_id); |
| 154 | 154 |
| 155 Address deopt_entry = Deoptimizer::GetDeoptimizationEntry( | 155 Address deopt_entry = Deoptimizer::GetDeoptimizationEntry( |
| 156 isolate(), deoptimization_id, Deoptimizer::LAZY); | 156 isolate(), deoptimization_id, Deoptimizer::LAZY); |
| 157 __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY); | 157 __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY); |
| 158 ASSERT_EQ(LeaveCC, i.OutputSBit()); | 158 DCHECK_EQ(LeaveCC, i.OutputSBit()); |
| 159 break; | 159 break; |
| 160 } | 160 } |
| 161 case kArmAdd: | 161 case kArmAdd: |
| 162 __ add(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1), | 162 __ add(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1), |
| 163 i.OutputSBit()); | 163 i.OutputSBit()); |
| 164 break; | 164 break; |
| 165 case kArmAnd: | 165 case kArmAnd: |
| 166 __ and_(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1), | 166 __ and_(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1), |
| 167 i.OutputSBit()); | 167 i.OutputSBit()); |
| 168 break; | 168 break; |
| 169 case kArmBic: | 169 case kArmBic: |
| 170 __ bic(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1), | 170 __ bic(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1), |
| 171 i.OutputSBit()); | 171 i.OutputSBit()); |
| 172 break; | 172 break; |
| 173 case kArmMul: | 173 case kArmMul: |
| 174 __ mul(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1), | 174 __ mul(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1), |
| 175 i.OutputSBit()); | 175 i.OutputSBit()); |
| 176 break; | 176 break; |
| 177 case kArmMla: | 177 case kArmMla: |
| 178 __ mla(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1), | 178 __ mla(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1), |
| 179 i.InputRegister(2), i.OutputSBit()); | 179 i.InputRegister(2), i.OutputSBit()); |
| 180 break; | 180 break; |
| 181 case kArmMls: { | 181 case kArmMls: { |
| 182 CpuFeatureScope scope(masm(), MLS); | 182 CpuFeatureScope scope(masm(), MLS); |
| 183 __ mls(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1), | 183 __ mls(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1), |
| 184 i.InputRegister(2)); | 184 i.InputRegister(2)); |
| 185 ASSERT_EQ(LeaveCC, i.OutputSBit()); | 185 DCHECK_EQ(LeaveCC, i.OutputSBit()); |
| 186 break; | 186 break; |
| 187 } | 187 } |
| 188 case kArmSdiv: { | 188 case kArmSdiv: { |
| 189 CpuFeatureScope scope(masm(), SUDIV); | 189 CpuFeatureScope scope(masm(), SUDIV); |
| 190 __ sdiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); | 190 __ sdiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); |
| 191 ASSERT_EQ(LeaveCC, i.OutputSBit()); | 191 DCHECK_EQ(LeaveCC, i.OutputSBit()); |
| 192 break; | 192 break; |
| 193 } | 193 } |
| 194 case kArmUdiv: { | 194 case kArmUdiv: { |
| 195 CpuFeatureScope scope(masm(), SUDIV); | 195 CpuFeatureScope scope(masm(), SUDIV); |
| 196 __ udiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); | 196 __ udiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1)); |
| 197 ASSERT_EQ(LeaveCC, i.OutputSBit()); | 197 DCHECK_EQ(LeaveCC, i.OutputSBit()); |
| 198 break; | 198 break; |
| 199 } | 199 } |
| 200 case kArmMov: | 200 case kArmMov: |
| 201 __ Move(i.OutputRegister(), i.InputOperand2(0)); | 201 __ Move(i.OutputRegister(), i.InputOperand2(0)); |
| 202 ASSERT_EQ(LeaveCC, i.OutputSBit()); | 202 DCHECK_EQ(LeaveCC, i.OutputSBit()); |
| 203 break; | 203 break; |
| 204 case kArmMvn: | 204 case kArmMvn: |
| 205 __ mvn(i.OutputRegister(), i.InputOperand2(0), i.OutputSBit()); | 205 __ mvn(i.OutputRegister(), i.InputOperand2(0), i.OutputSBit()); |
| 206 break; | 206 break; |
| 207 case kArmOrr: | 207 case kArmOrr: |
| 208 __ orr(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1), | 208 __ orr(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1), |
| 209 i.OutputSBit()); | 209 i.OutputSBit()); |
| 210 break; | 210 break; |
| 211 case kArmEor: | 211 case kArmEor: |
| 212 __ eor(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1), | 212 __ eor(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1), |
| 213 i.OutputSBit()); | 213 i.OutputSBit()); |
| 214 break; | 214 break; |
| 215 case kArmSub: | 215 case kArmSub: |
| 216 __ sub(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1), | 216 __ sub(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1), |
| 217 i.OutputSBit()); | 217 i.OutputSBit()); |
| 218 break; | 218 break; |
| 219 case kArmRsb: | 219 case kArmRsb: |
| 220 __ rsb(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1), | 220 __ rsb(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1), |
| 221 i.OutputSBit()); | 221 i.OutputSBit()); |
| 222 break; | 222 break; |
| 223 case kArmBfc: { | 223 case kArmBfc: { |
| 224 CpuFeatureScope scope(masm(), ARMv7); | 224 CpuFeatureScope scope(masm(), ARMv7); |
| 225 __ bfc(i.OutputRegister(), i.InputInt8(1), i.InputInt8(2)); | 225 __ bfc(i.OutputRegister(), i.InputInt8(1), i.InputInt8(2)); |
| 226 ASSERT_EQ(LeaveCC, i.OutputSBit()); | 226 DCHECK_EQ(LeaveCC, i.OutputSBit()); |
| 227 break; | 227 break; |
| 228 } | 228 } |
| 229 case kArmUbfx: { | 229 case kArmUbfx: { |
| 230 CpuFeatureScope scope(masm(), ARMv7); | 230 CpuFeatureScope scope(masm(), ARMv7); |
| 231 __ ubfx(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1), | 231 __ ubfx(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1), |
| 232 i.InputInt8(2)); | 232 i.InputInt8(2)); |
| 233 ASSERT_EQ(LeaveCC, i.OutputSBit()); | 233 DCHECK_EQ(LeaveCC, i.OutputSBit()); |
| 234 break; | 234 break; |
| 235 } | 235 } |
| 236 case kArmCallCodeObject: { | 236 case kArmCallCodeObject: { |
| 237 if (instr->InputAt(0)->IsImmediate()) { | 237 if (instr->InputAt(0)->IsImmediate()) { |
| 238 Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0)); | 238 Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0)); |
| 239 __ Call(code, RelocInfo::CODE_TARGET); | 239 __ Call(code, RelocInfo::CODE_TARGET); |
| 240 RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0, | 240 RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0, |
| 241 Safepoint::kNoLazyDeopt); | 241 Safepoint::kNoLazyDeopt); |
| 242 } else { | 242 } else { |
| 243 Register reg = i.InputRegister(0); | 243 Register reg = i.InputRegister(0); |
| 244 int entry = Code::kHeaderSize - kHeapObjectTag; | 244 int entry = Code::kHeaderSize - kHeapObjectTag; |
| 245 __ ldr(reg, MemOperand(reg, entry)); | 245 __ ldr(reg, MemOperand(reg, entry)); |
| 246 __ Call(reg); | 246 __ Call(reg); |
| 247 RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0, | 247 RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0, |
| 248 Safepoint::kNoLazyDeopt); | 248 Safepoint::kNoLazyDeopt); |
| 249 } | 249 } |
| 250 bool lazy_deopt = (MiscField::decode(instr->opcode()) == 1); | 250 bool lazy_deopt = (MiscField::decode(instr->opcode()) == 1); |
| 251 if (lazy_deopt) { | 251 if (lazy_deopt) { |
| 252 RecordLazyDeoptimizationEntry(instr); | 252 RecordLazyDeoptimizationEntry(instr); |
| 253 } | 253 } |
| 254 ASSERT_EQ(LeaveCC, i.OutputSBit()); | 254 DCHECK_EQ(LeaveCC, i.OutputSBit()); |
| 255 break; | 255 break; |
| 256 } | 256 } |
| 257 case kArmCallJSFunction: { | 257 case kArmCallJSFunction: { |
| 258 Register func = i.InputRegister(0); | 258 Register func = i.InputRegister(0); |
| 259 | 259 |
| 260 // TODO(jarin) The load of the context should be separated from the call. | 260 // TODO(jarin) The load of the context should be separated from the call. |
| 261 __ ldr(cp, FieldMemOperand(func, JSFunction::kContextOffset)); | 261 __ ldr(cp, FieldMemOperand(func, JSFunction::kContextOffset)); |
| 262 __ ldr(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset)); | 262 __ ldr(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset)); |
| 263 __ Call(ip); | 263 __ Call(ip); |
| 264 | 264 |
| 265 RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0, | 265 RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0, |
| 266 Safepoint::kNoLazyDeopt); | 266 Safepoint::kNoLazyDeopt); |
| 267 RecordLazyDeoptimizationEntry(instr); | 267 RecordLazyDeoptimizationEntry(instr); |
| 268 ASSERT_EQ(LeaveCC, i.OutputSBit()); | 268 DCHECK_EQ(LeaveCC, i.OutputSBit()); |
| 269 break; | 269 break; |
| 270 } | 270 } |
| 271 case kArmCallAddress: { | 271 case kArmCallAddress: { |
| 272 DirectCEntryStub stub(isolate()); | 272 DirectCEntryStub stub(isolate()); |
| 273 stub.GenerateCall(masm(), i.InputRegister(0)); | 273 stub.GenerateCall(masm(), i.InputRegister(0)); |
| 274 ASSERT_EQ(LeaveCC, i.OutputSBit()); | 274 DCHECK_EQ(LeaveCC, i.OutputSBit()); |
| 275 break; | 275 break; |
| 276 } | 276 } |
| 277 case kArmPush: | 277 case kArmPush: |
| 278 __ Push(i.InputRegister(0)); | 278 __ Push(i.InputRegister(0)); |
| 279 ASSERT_EQ(LeaveCC, i.OutputSBit()); | 279 DCHECK_EQ(LeaveCC, i.OutputSBit()); |
| 280 break; | 280 break; |
| 281 case kArmDrop: { | 281 case kArmDrop: { |
| 282 int words = MiscField::decode(instr->opcode()); | 282 int words = MiscField::decode(instr->opcode()); |
| 283 __ Drop(words); | 283 __ Drop(words); |
| 284 ASSERT_EQ(LeaveCC, i.OutputSBit()); | 284 DCHECK_EQ(LeaveCC, i.OutputSBit()); |
| 285 break; | 285 break; |
| 286 } | 286 } |
| 287 case kArmCmp: | 287 case kArmCmp: |
| 288 __ cmp(i.InputRegister(0), i.InputOperand2(1)); | 288 __ cmp(i.InputRegister(0), i.InputOperand2(1)); |
| 289 ASSERT_EQ(SetCC, i.OutputSBit()); | 289 DCHECK_EQ(SetCC, i.OutputSBit()); |
| 290 break; | 290 break; |
| 291 case kArmCmn: | 291 case kArmCmn: |
| 292 __ cmn(i.InputRegister(0), i.InputOperand2(1)); | 292 __ cmn(i.InputRegister(0), i.InputOperand2(1)); |
| 293 ASSERT_EQ(SetCC, i.OutputSBit()); | 293 DCHECK_EQ(SetCC, i.OutputSBit()); |
| 294 break; | 294 break; |
| 295 case kArmTst: | 295 case kArmTst: |
| 296 __ tst(i.InputRegister(0), i.InputOperand2(1)); | 296 __ tst(i.InputRegister(0), i.InputOperand2(1)); |
| 297 ASSERT_EQ(SetCC, i.OutputSBit()); | 297 DCHECK_EQ(SetCC, i.OutputSBit()); |
| 298 break; | 298 break; |
| 299 case kArmTeq: | 299 case kArmTeq: |
| 300 __ teq(i.InputRegister(0), i.InputOperand2(1)); | 300 __ teq(i.InputRegister(0), i.InputOperand2(1)); |
| 301 ASSERT_EQ(SetCC, i.OutputSBit()); | 301 DCHECK_EQ(SetCC, i.OutputSBit()); |
| 302 break; | 302 break; |
| 303 case kArmVcmpF64: | 303 case kArmVcmpF64: |
| 304 __ VFPCompareAndSetFlags(i.InputDoubleRegister(0), | 304 __ VFPCompareAndSetFlags(i.InputDoubleRegister(0), |
| 305 i.InputDoubleRegister(1)); | 305 i.InputDoubleRegister(1)); |
| 306 ASSERT_EQ(SetCC, i.OutputSBit()); | 306 DCHECK_EQ(SetCC, i.OutputSBit()); |
| 307 break; | 307 break; |
| 308 case kArmVaddF64: | 308 case kArmVaddF64: |
| 309 __ vadd(i.OutputDoubleRegister(), i.InputDoubleRegister(0), | 309 __ vadd(i.OutputDoubleRegister(), i.InputDoubleRegister(0), |
| 310 i.InputDoubleRegister(1)); | 310 i.InputDoubleRegister(1)); |
| 311 ASSERT_EQ(LeaveCC, i.OutputSBit()); | 311 DCHECK_EQ(LeaveCC, i.OutputSBit()); |
| 312 break; | 312 break; |
| 313 case kArmVsubF64: | 313 case kArmVsubF64: |
| 314 __ vsub(i.OutputDoubleRegister(), i.InputDoubleRegister(0), | 314 __ vsub(i.OutputDoubleRegister(), i.InputDoubleRegister(0), |
| 315 i.InputDoubleRegister(1)); | 315 i.InputDoubleRegister(1)); |
| 316 ASSERT_EQ(LeaveCC, i.OutputSBit()); | 316 DCHECK_EQ(LeaveCC, i.OutputSBit()); |
| 317 break; | 317 break; |
| 318 case kArmVmulF64: | 318 case kArmVmulF64: |
| 319 __ vmul(i.OutputDoubleRegister(), i.InputDoubleRegister(0), | 319 __ vmul(i.OutputDoubleRegister(), i.InputDoubleRegister(0), |
| 320 i.InputDoubleRegister(1)); | 320 i.InputDoubleRegister(1)); |
| 321 ASSERT_EQ(LeaveCC, i.OutputSBit()); | 321 DCHECK_EQ(LeaveCC, i.OutputSBit()); |
| 322 break; | 322 break; |
| 323 case kArmVmlaF64: | 323 case kArmVmlaF64: |
| 324 __ vmla(i.OutputDoubleRegister(), i.InputDoubleRegister(1), | 324 __ vmla(i.OutputDoubleRegister(), i.InputDoubleRegister(1), |
| 325 i.InputDoubleRegister(2)); | 325 i.InputDoubleRegister(2)); |
| 326 ASSERT_EQ(LeaveCC, i.OutputSBit()); | 326 DCHECK_EQ(LeaveCC, i.OutputSBit()); |
| 327 break; | 327 break; |
| 328 case kArmVmlsF64: | 328 case kArmVmlsF64: |
| 329 __ vmls(i.OutputDoubleRegister(), i.InputDoubleRegister(1), | 329 __ vmls(i.OutputDoubleRegister(), i.InputDoubleRegister(1), |
| 330 i.InputDoubleRegister(2)); | 330 i.InputDoubleRegister(2)); |
| 331 ASSERT_EQ(LeaveCC, i.OutputSBit()); | 331 DCHECK_EQ(LeaveCC, i.OutputSBit()); |
| 332 break; | 332 break; |
| 333 case kArmVdivF64: | 333 case kArmVdivF64: |
| 334 __ vdiv(i.OutputDoubleRegister(), i.InputDoubleRegister(0), | 334 __ vdiv(i.OutputDoubleRegister(), i.InputDoubleRegister(0), |
| 335 i.InputDoubleRegister(1)); | 335 i.InputDoubleRegister(1)); |
| 336 ASSERT_EQ(LeaveCC, i.OutputSBit()); | 336 DCHECK_EQ(LeaveCC, i.OutputSBit()); |
| 337 break; | 337 break; |
| 338 case kArmVmodF64: { | 338 case kArmVmodF64: { |
| 339 // TODO(bmeurer): We should really get rid of this special instruction, | 339 // TODO(bmeurer): We should really get rid of this special instruction, |
| 340 // and generate a CallAddress instruction instead. | 340 // and generate a CallAddress instruction instead. |
| 341 FrameScope scope(masm(), StackFrame::MANUAL); | 341 FrameScope scope(masm(), StackFrame::MANUAL); |
| 342 __ PrepareCallCFunction(0, 2, kScratchReg); | 342 __ PrepareCallCFunction(0, 2, kScratchReg); |
| 343 __ MovToFloatParameters(i.InputDoubleRegister(0), | 343 __ MovToFloatParameters(i.InputDoubleRegister(0), |
| 344 i.InputDoubleRegister(1)); | 344 i.InputDoubleRegister(1)); |
| 345 __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()), | 345 __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()), |
| 346 0, 2); | 346 0, 2); |
| 347 // Move the result in the double result register. | 347 // Move the result in the double result register. |
| 348 __ MovFromFloatResult(i.OutputDoubleRegister()); | 348 __ MovFromFloatResult(i.OutputDoubleRegister()); |
| 349 ASSERT_EQ(LeaveCC, i.OutputSBit()); | 349 DCHECK_EQ(LeaveCC, i.OutputSBit()); |
| 350 break; | 350 break; |
| 351 } | 351 } |
| 352 case kArmVnegF64: | 352 case kArmVnegF64: |
| 353 __ vneg(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); | 353 __ vneg(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); |
| 354 break; | 354 break; |
| 355 case kArmVcvtF64S32: { | 355 case kArmVcvtF64S32: { |
| 356 SwVfpRegister scratch = kScratchDoubleReg.low(); | 356 SwVfpRegister scratch = kScratchDoubleReg.low(); |
| 357 __ vmov(scratch, i.InputRegister(0)); | 357 __ vmov(scratch, i.InputRegister(0)); |
| 358 __ vcvt_f64_s32(i.OutputDoubleRegister(), scratch); | 358 __ vcvt_f64_s32(i.OutputDoubleRegister(), scratch); |
| 359 ASSERT_EQ(LeaveCC, i.OutputSBit()); | 359 DCHECK_EQ(LeaveCC, i.OutputSBit()); |
| 360 break; | 360 break; |
| 361 } | 361 } |
| 362 case kArmVcvtF64U32: { | 362 case kArmVcvtF64U32: { |
| 363 SwVfpRegister scratch = kScratchDoubleReg.low(); | 363 SwVfpRegister scratch = kScratchDoubleReg.low(); |
| 364 __ vmov(scratch, i.InputRegister(0)); | 364 __ vmov(scratch, i.InputRegister(0)); |
| 365 __ vcvt_f64_u32(i.OutputDoubleRegister(), scratch); | 365 __ vcvt_f64_u32(i.OutputDoubleRegister(), scratch); |
| 366 ASSERT_EQ(LeaveCC, i.OutputSBit()); | 366 DCHECK_EQ(LeaveCC, i.OutputSBit()); |
| 367 break; | 367 break; |
| 368 } | 368 } |
| 369 case kArmVcvtS32F64: { | 369 case kArmVcvtS32F64: { |
| 370 SwVfpRegister scratch = kScratchDoubleReg.low(); | 370 SwVfpRegister scratch = kScratchDoubleReg.low(); |
| 371 __ vcvt_s32_f64(scratch, i.InputDoubleRegister(0)); | 371 __ vcvt_s32_f64(scratch, i.InputDoubleRegister(0)); |
| 372 __ vmov(i.OutputRegister(), scratch); | 372 __ vmov(i.OutputRegister(), scratch); |
| 373 ASSERT_EQ(LeaveCC, i.OutputSBit()); | 373 DCHECK_EQ(LeaveCC, i.OutputSBit()); |
| 374 break; | 374 break; |
| 375 } | 375 } |
| 376 case kArmVcvtU32F64: { | 376 case kArmVcvtU32F64: { |
| 377 SwVfpRegister scratch = kScratchDoubleReg.low(); | 377 SwVfpRegister scratch = kScratchDoubleReg.low(); |
| 378 __ vcvt_u32_f64(scratch, i.InputDoubleRegister(0)); | 378 __ vcvt_u32_f64(scratch, i.InputDoubleRegister(0)); |
| 379 __ vmov(i.OutputRegister(), scratch); | 379 __ vmov(i.OutputRegister(), scratch); |
| 380 ASSERT_EQ(LeaveCC, i.OutputSBit()); | 380 DCHECK_EQ(LeaveCC, i.OutputSBit()); |
| 381 break; | 381 break; |
| 382 } | 382 } |
| 383 case kArmLoadWord8: | 383 case kArmLoadWord8: |
| 384 __ ldrb(i.OutputRegister(), i.InputOffset()); | 384 __ ldrb(i.OutputRegister(), i.InputOffset()); |
| 385 ASSERT_EQ(LeaveCC, i.OutputSBit()); | 385 DCHECK_EQ(LeaveCC, i.OutputSBit()); |
| 386 break; | 386 break; |
| 387 case kArmStoreWord8: { | 387 case kArmStoreWord8: { |
| 388 int index = 0; | 388 int index = 0; |
| 389 MemOperand operand = i.InputOffset(&index); | 389 MemOperand operand = i.InputOffset(&index); |
| 390 __ strb(i.InputRegister(index), operand); | 390 __ strb(i.InputRegister(index), operand); |
| 391 ASSERT_EQ(LeaveCC, i.OutputSBit()); | 391 DCHECK_EQ(LeaveCC, i.OutputSBit()); |
| 392 break; | 392 break; |
| 393 } | 393 } |
| 394 case kArmLoadWord16: | 394 case kArmLoadWord16: |
| 395 __ ldrh(i.OutputRegister(), i.InputOffset()); | 395 __ ldrh(i.OutputRegister(), i.InputOffset()); |
| 396 break; | 396 break; |
| 397 case kArmStoreWord16: { | 397 case kArmStoreWord16: { |
| 398 int index = 0; | 398 int index = 0; |
| 399 MemOperand operand = i.InputOffset(&index); | 399 MemOperand operand = i.InputOffset(&index); |
| 400 __ strh(i.InputRegister(index), operand); | 400 __ strh(i.InputRegister(index), operand); |
| 401 ASSERT_EQ(LeaveCC, i.OutputSBit()); | 401 DCHECK_EQ(LeaveCC, i.OutputSBit()); |
| 402 break; | 402 break; |
| 403 } | 403 } |
| 404 case kArmLoadWord32: | 404 case kArmLoadWord32: |
| 405 __ ldr(i.OutputRegister(), i.InputOffset()); | 405 __ ldr(i.OutputRegister(), i.InputOffset()); |
| 406 break; | 406 break; |
| 407 case kArmStoreWord32: { | 407 case kArmStoreWord32: { |
| 408 int index = 0; | 408 int index = 0; |
| 409 MemOperand operand = i.InputOffset(&index); | 409 MemOperand operand = i.InputOffset(&index); |
| 410 __ str(i.InputRegister(index), operand); | 410 __ str(i.InputRegister(index), operand); |
| 411 ASSERT_EQ(LeaveCC, i.OutputSBit()); | 411 DCHECK_EQ(LeaveCC, i.OutputSBit()); |
| 412 break; | 412 break; |
| 413 } | 413 } |
| 414 case kArmFloat64Load: | 414 case kArmFloat64Load: |
| 415 __ vldr(i.OutputDoubleRegister(), i.InputOffset()); | 415 __ vldr(i.OutputDoubleRegister(), i.InputOffset()); |
| 416 ASSERT_EQ(LeaveCC, i.OutputSBit()); | 416 DCHECK_EQ(LeaveCC, i.OutputSBit()); |
| 417 break; | 417 break; |
| 418 case kArmFloat64Store: { | 418 case kArmFloat64Store: { |
| 419 int index = 0; | 419 int index = 0; |
| 420 MemOperand operand = i.InputOffset(&index); | 420 MemOperand operand = i.InputOffset(&index); |
| 421 __ vstr(i.InputDoubleRegister(index), operand); | 421 __ vstr(i.InputDoubleRegister(index), operand); |
| 422 ASSERT_EQ(LeaveCC, i.OutputSBit()); | 422 DCHECK_EQ(LeaveCC, i.OutputSBit()); |
| 423 break; | 423 break; |
| 424 } | 424 } |
| 425 case kArmStoreWriteBarrier: { | 425 case kArmStoreWriteBarrier: { |
| 426 Register object = i.InputRegister(0); | 426 Register object = i.InputRegister(0); |
| 427 Register index = i.InputRegister(1); | 427 Register index = i.InputRegister(1); |
| 428 Register value = i.InputRegister(2); | 428 Register value = i.InputRegister(2); |
| 429 __ add(index, object, index); | 429 __ add(index, object, index); |
| 430 __ str(value, MemOperand(index)); | 430 __ str(value, MemOperand(index)); |
| 431 SaveFPRegsMode mode = | 431 SaveFPRegsMode mode = |
| 432 frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs; | 432 frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs; |
| 433 LinkRegisterStatus lr_status = kLRHasNotBeenSaved; | 433 LinkRegisterStatus lr_status = kLRHasNotBeenSaved; |
| 434 __ RecordWrite(object, index, value, lr_status, mode); | 434 __ RecordWrite(object, index, value, lr_status, mode); |
| 435 ASSERT_EQ(LeaveCC, i.OutputSBit()); | 435 DCHECK_EQ(LeaveCC, i.OutputSBit()); |
| 436 break; | 436 break; |
| 437 } | 437 } |
| 438 } | 438 } |
| 439 } | 439 } |
| 440 | 440 |
| 441 | 441 |
| 442 // Assembles branches after an instruction. | 442 // Assembles branches after an instruction. |
| 443 void CodeGenerator::AssembleArchBranch(Instruction* instr, | 443 void CodeGenerator::AssembleArchBranch(Instruction* instr, |
| 444 FlagsCondition condition) { | 444 FlagsCondition condition) { |
| 445 ArmOperandConverter i(this, instr); | 445 ArmOperandConverter i(this, instr); |
| (...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 515 | 515 |
| 516 // Assembles boolean materializations after an instruction. | 516 // Assembles boolean materializations after an instruction. |
| 517 void CodeGenerator::AssembleArchBoolean(Instruction* instr, | 517 void CodeGenerator::AssembleArchBoolean(Instruction* instr, |
| 518 FlagsCondition condition) { | 518 FlagsCondition condition) { |
| 519 ArmOperandConverter i(this, instr); | 519 ArmOperandConverter i(this, instr); |
| 520 Label done; | 520 Label done; |
| 521 | 521 |
| 522 // Materialize a full 32-bit 1 or 0 value. The result register is always the | 522 // Materialize a full 32-bit 1 or 0 value. The result register is always the |
| 523 // last output of the instruction. | 523 // last output of the instruction. |
| 524 Label check; | 524 Label check; |
| 525 ASSERT_NE(0, instr->OutputCount()); | 525 DCHECK_NE(0, instr->OutputCount()); |
| 526 Register reg = i.OutputRegister(instr->OutputCount() - 1); | 526 Register reg = i.OutputRegister(instr->OutputCount() - 1); |
| 527 Condition cc = kNoCondition; | 527 Condition cc = kNoCondition; |
| 528 switch (condition) { | 528 switch (condition) { |
| 529 case kUnorderedEqual: | 529 case kUnorderedEqual: |
| 530 __ b(vc, &check); | 530 __ b(vc, &check); |
| 531 __ mov(reg, Operand(0)); | 531 __ mov(reg, Operand(0)); |
| 532 __ b(&done); | 532 __ b(&done); |
| 533 // Fall through. | 533 // Fall through. |
| 534 case kEqual: | 534 case kEqual: |
| 535 cc = eq; | 535 cc = eq; |
| (...skipping 142 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 678 } | 678 } |
| 679 } | 679 } |
| 680 | 680 |
| 681 | 681 |
| 682 void CodeGenerator::AssembleMove(InstructionOperand* source, | 682 void CodeGenerator::AssembleMove(InstructionOperand* source, |
| 683 InstructionOperand* destination) { | 683 InstructionOperand* destination) { |
| 684 ArmOperandConverter g(this, NULL); | 684 ArmOperandConverter g(this, NULL); |
| 685 // Dispatch on the source and destination operand kinds. Not all | 685 // Dispatch on the source and destination operand kinds. Not all |
| 686 // combinations are possible. | 686 // combinations are possible. |
| 687 if (source->IsRegister()) { | 687 if (source->IsRegister()) { |
| 688 ASSERT(destination->IsRegister() || destination->IsStackSlot()); | 688 DCHECK(destination->IsRegister() || destination->IsStackSlot()); |
| 689 Register src = g.ToRegister(source); | 689 Register src = g.ToRegister(source); |
| 690 if (destination->IsRegister()) { | 690 if (destination->IsRegister()) { |
| 691 __ mov(g.ToRegister(destination), src); | 691 __ mov(g.ToRegister(destination), src); |
| 692 } else { | 692 } else { |
| 693 __ str(src, g.ToMemOperand(destination)); | 693 __ str(src, g.ToMemOperand(destination)); |
| 694 } | 694 } |
| 695 } else if (source->IsStackSlot()) { | 695 } else if (source->IsStackSlot()) { |
| 696 ASSERT(destination->IsRegister() || destination->IsStackSlot()); | 696 DCHECK(destination->IsRegister() || destination->IsStackSlot()); |
| 697 MemOperand src = g.ToMemOperand(source); | 697 MemOperand src = g.ToMemOperand(source); |
| 698 if (destination->IsRegister()) { | 698 if (destination->IsRegister()) { |
| 699 __ ldr(g.ToRegister(destination), src); | 699 __ ldr(g.ToRegister(destination), src); |
| 700 } else { | 700 } else { |
| 701 Register temp = kScratchReg; | 701 Register temp = kScratchReg; |
| 702 __ ldr(temp, src); | 702 __ ldr(temp, src); |
| 703 __ str(temp, g.ToMemOperand(destination)); | 703 __ str(temp, g.ToMemOperand(destination)); |
| 704 } | 704 } |
| 705 } else if (source->IsConstant()) { | 705 } else if (source->IsConstant()) { |
| 706 if (destination->IsRegister() || destination->IsStackSlot()) { | 706 if (destination->IsRegister() || destination->IsStackSlot()) { |
| (...skipping 16 matching lines...) Expand all Loading... |
| 723 break; | 723 break; |
| 724 case Constant::kHeapObject: | 724 case Constant::kHeapObject: |
| 725 __ Move(dst, src.ToHeapObject()); | 725 __ Move(dst, src.ToHeapObject()); |
| 726 break; | 726 break; |
| 727 } | 727 } |
| 728 if (destination->IsStackSlot()) __ str(dst, g.ToMemOperand(destination)); | 728 if (destination->IsStackSlot()) __ str(dst, g.ToMemOperand(destination)); |
| 729 } else if (destination->IsDoubleRegister()) { | 729 } else if (destination->IsDoubleRegister()) { |
| 730 DwVfpRegister result = g.ToDoubleRegister(destination); | 730 DwVfpRegister result = g.ToDoubleRegister(destination); |
| 731 __ vmov(result, g.ToDouble(source)); | 731 __ vmov(result, g.ToDouble(source)); |
| 732 } else { | 732 } else { |
| 733 ASSERT(destination->IsDoubleStackSlot()); | 733 DCHECK(destination->IsDoubleStackSlot()); |
| 734 DwVfpRegister temp = kScratchDoubleReg; | 734 DwVfpRegister temp = kScratchDoubleReg; |
| 735 __ vmov(temp, g.ToDouble(source)); | 735 __ vmov(temp, g.ToDouble(source)); |
| 736 __ vstr(temp, g.ToMemOperand(destination)); | 736 __ vstr(temp, g.ToMemOperand(destination)); |
| 737 } | 737 } |
| 738 } else if (source->IsDoubleRegister()) { | 738 } else if (source->IsDoubleRegister()) { |
| 739 DwVfpRegister src = g.ToDoubleRegister(source); | 739 DwVfpRegister src = g.ToDoubleRegister(source); |
| 740 if (destination->IsDoubleRegister()) { | 740 if (destination->IsDoubleRegister()) { |
| 741 DwVfpRegister dst = g.ToDoubleRegister(destination); | 741 DwVfpRegister dst = g.ToDoubleRegister(destination); |
| 742 __ Move(dst, src); | 742 __ Move(dst, src); |
| 743 } else { | 743 } else { |
| 744 ASSERT(destination->IsDoubleStackSlot()); | 744 DCHECK(destination->IsDoubleStackSlot()); |
| 745 __ vstr(src, g.ToMemOperand(destination)); | 745 __ vstr(src, g.ToMemOperand(destination)); |
| 746 } | 746 } |
| 747 } else if (source->IsDoubleStackSlot()) { | 747 } else if (source->IsDoubleStackSlot()) { |
| 748 ASSERT(destination->IsDoubleRegister() || destination->IsDoubleStackSlot()); | 748 DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot()); |
| 749 MemOperand src = g.ToMemOperand(source); | 749 MemOperand src = g.ToMemOperand(source); |
| 750 if (destination->IsDoubleRegister()) { | 750 if (destination->IsDoubleRegister()) { |
| 751 __ vldr(g.ToDoubleRegister(destination), src); | 751 __ vldr(g.ToDoubleRegister(destination), src); |
| 752 } else { | 752 } else { |
| 753 DwVfpRegister temp = kScratchDoubleReg; | 753 DwVfpRegister temp = kScratchDoubleReg; |
| 754 __ vldr(temp, src); | 754 __ vldr(temp, src); |
| 755 __ vstr(temp, g.ToMemOperand(destination)); | 755 __ vstr(temp, g.ToMemOperand(destination)); |
| 756 } | 756 } |
| 757 } else { | 757 } else { |
| 758 UNREACHABLE(); | 758 UNREACHABLE(); |
| 759 } | 759 } |
| 760 } | 760 } |
| 761 | 761 |
| 762 | 762 |
| 763 void CodeGenerator::AssembleSwap(InstructionOperand* source, | 763 void CodeGenerator::AssembleSwap(InstructionOperand* source, |
| 764 InstructionOperand* destination) { | 764 InstructionOperand* destination) { |
| 765 ArmOperandConverter g(this, NULL); | 765 ArmOperandConverter g(this, NULL); |
| 766 // Dispatch on the source and destination operand kinds. Not all | 766 // Dispatch on the source and destination operand kinds. Not all |
| 767 // combinations are possible. | 767 // combinations are possible. |
| 768 if (source->IsRegister()) { | 768 if (source->IsRegister()) { |
| 769 // Register-register. | 769 // Register-register. |
| 770 Register temp = kScratchReg; | 770 Register temp = kScratchReg; |
| 771 Register src = g.ToRegister(source); | 771 Register src = g.ToRegister(source); |
| 772 if (destination->IsRegister()) { | 772 if (destination->IsRegister()) { |
| 773 Register dst = g.ToRegister(destination); | 773 Register dst = g.ToRegister(destination); |
| 774 __ Move(temp, src); | 774 __ Move(temp, src); |
| 775 __ Move(src, dst); | 775 __ Move(src, dst); |
| 776 __ Move(dst, temp); | 776 __ Move(dst, temp); |
| 777 } else { | 777 } else { |
| 778 ASSERT(destination->IsStackSlot()); | 778 DCHECK(destination->IsStackSlot()); |
| 779 MemOperand dst = g.ToMemOperand(destination); | 779 MemOperand dst = g.ToMemOperand(destination); |
| 780 __ mov(temp, src); | 780 __ mov(temp, src); |
| 781 __ ldr(src, dst); | 781 __ ldr(src, dst); |
| 782 __ str(temp, dst); | 782 __ str(temp, dst); |
| 783 } | 783 } |
| 784 } else if (source->IsStackSlot()) { | 784 } else if (source->IsStackSlot()) { |
| 785 ASSERT(destination->IsStackSlot()); | 785 DCHECK(destination->IsStackSlot()); |
| 786 Register temp_0 = kScratchReg; | 786 Register temp_0 = kScratchReg; |
| 787 SwVfpRegister temp_1 = kScratchDoubleReg.low(); | 787 SwVfpRegister temp_1 = kScratchDoubleReg.low(); |
| 788 MemOperand src = g.ToMemOperand(source); | 788 MemOperand src = g.ToMemOperand(source); |
| 789 MemOperand dst = g.ToMemOperand(destination); | 789 MemOperand dst = g.ToMemOperand(destination); |
| 790 __ ldr(temp_0, src); | 790 __ ldr(temp_0, src); |
| 791 __ vldr(temp_1, dst); | 791 __ vldr(temp_1, dst); |
| 792 __ str(temp_0, dst); | 792 __ str(temp_0, dst); |
| 793 __ vstr(temp_1, src); | 793 __ vstr(temp_1, src); |
| 794 } else if (source->IsDoubleRegister()) { | 794 } else if (source->IsDoubleRegister()) { |
| 795 DwVfpRegister temp = kScratchDoubleReg; | 795 DwVfpRegister temp = kScratchDoubleReg; |
| 796 DwVfpRegister src = g.ToDoubleRegister(source); | 796 DwVfpRegister src = g.ToDoubleRegister(source); |
| 797 if (destination->IsDoubleRegister()) { | 797 if (destination->IsDoubleRegister()) { |
| 798 DwVfpRegister dst = g.ToDoubleRegister(destination); | 798 DwVfpRegister dst = g.ToDoubleRegister(destination); |
| 799 __ Move(temp, src); | 799 __ Move(temp, src); |
| 800 __ Move(src, dst); | 800 __ Move(src, dst); |
| 801 __ Move(src, temp); | 801 __ Move(src, temp); |
| 802 } else { | 802 } else { |
| 803 ASSERT(destination->IsDoubleStackSlot()); | 803 DCHECK(destination->IsDoubleStackSlot()); |
| 804 MemOperand dst = g.ToMemOperand(destination); | 804 MemOperand dst = g.ToMemOperand(destination); |
| 805 __ Move(temp, src); | 805 __ Move(temp, src); |
| 806 __ vldr(src, dst); | 806 __ vldr(src, dst); |
| 807 __ vstr(temp, dst); | 807 __ vstr(temp, dst); |
| 808 } | 808 } |
| 809 } else if (source->IsDoubleStackSlot()) { | 809 } else if (source->IsDoubleStackSlot()) { |
| 810 ASSERT(destination->IsDoubleStackSlot()); | 810 DCHECK(destination->IsDoubleStackSlot()); |
| 811 Register temp_0 = kScratchReg; | 811 Register temp_0 = kScratchReg; |
| 812 DwVfpRegister temp_1 = kScratchDoubleReg; | 812 DwVfpRegister temp_1 = kScratchDoubleReg; |
| 813 MemOperand src0 = g.ToMemOperand(source); | 813 MemOperand src0 = g.ToMemOperand(source); |
| 814 MemOperand src1(src0.rn(), src0.offset() + kPointerSize); | 814 MemOperand src1(src0.rn(), src0.offset() + kPointerSize); |
| 815 MemOperand dst0 = g.ToMemOperand(destination); | 815 MemOperand dst0 = g.ToMemOperand(destination); |
| 816 MemOperand dst1(dst0.rn(), dst0.offset() + kPointerSize); | 816 MemOperand dst1(dst0.rn(), dst0.offset() + kPointerSize); |
| 817 __ vldr(temp_1, dst0); // Save destination in temp_1. | 817 __ vldr(temp_1, dst0); // Save destination in temp_1. |
| 818 __ ldr(temp_0, src0); // Then use temp_0 to copy source to destination. | 818 __ ldr(temp_0, src0); // Then use temp_0 to copy source to destination. |
| 819 __ str(temp_0, dst0); | 819 __ str(temp_0, dst0); |
| 820 __ ldr(temp_0, src1); | 820 __ ldr(temp_0, src1); |
| (...skipping 18 matching lines...) Expand all Loading... |
| 839 int end_pc) { | 839 int end_pc) { |
| 840 return false; | 840 return false; |
| 841 } | 841 } |
| 842 | 842 |
| 843 #endif // DEBUG | 843 #endif // DEBUG |
| 844 | 844 |
| 845 #undef __ | 845 #undef __ |
| 846 } | 846 } |
| 847 } | 847 } |
| 848 } // namespace v8::internal::compiler | 848 } // namespace v8::internal::compiler |
| OLD | NEW |