OLD | NEW |
---|---|
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 1271 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1282 __ JumpIfNotSmi(result, &deoptimize); | 1282 __ JumpIfNotSmi(result, &deoptimize); |
1283 __ SmiUntag(result); | 1283 __ SmiUntag(result); |
1284 __ b(&done); | 1284 __ b(&done); |
1285 | 1285 |
1286 __ bind(&deoptimize); | 1286 __ bind(&deoptimize); |
1287 DeoptimizeIf(al, instr->environment()); | 1287 DeoptimizeIf(al, instr->environment()); |
1288 __ bind(&done); | 1288 __ bind(&done); |
1289 } | 1289 } |
1290 | 1290 |
1291 | 1291 |
1292 void LCodeGen::DoMultiplyAdd(LMultiplyAdd* instr) { | |
1293 DwVfpRegister a = ToDoubleRegister(instr->a()); // FIXME: Not sure this is cor rect.. | |
ulan_google
2012/11/07 09:54:03
Should we check for support of vmla here?
Otherwis
| |
1294 DwVfpRegister b = ToDoubleRegister(instr->b()); | |
1295 DwVfpRegister c = ToDoubleRegister(instr->c()); | |
1296 | |
1297 __ vmla(c, a, b); | |
1298 } | |
1299 | |
1300 | |
1292 void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) { | 1301 void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) { |
1293 const Register result = ToRegister(instr->result()); | 1302 const Register result = ToRegister(instr->result()); |
1294 const Register left = ToRegister(instr->left()); | 1303 const Register left = ToRegister(instr->left()); |
1295 const Register remainder = ToRegister(instr->temp()); | 1304 const Register remainder = ToRegister(instr->temp()); |
1296 const Register scratch = scratch0(); | 1305 const Register scratch = scratch0(); |
1297 | 1306 |
1298 // We only optimize this for division by constants, because the standard | 1307 // We only optimize this for division by constants, because the standard |
1299 // integer division routine is usually slower than transitionning to VFP. | 1308 // integer division routine is usually slower than transitionning to VFP. |
1300 // This could be optimized on processors with SDIV available. | 1309 // This could be optimized on processors with SDIV available. |
1301 ASSERT(instr->right()->IsConstantOperand()); | 1310 ASSERT(instr->right()->IsConstantOperand()); |
(...skipping 4406 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
5708 __ sub(scratch, result, Operand(index, LSL, kPointerSizeLog2 - kSmiTagSize)); | 5717 __ sub(scratch, result, Operand(index, LSL, kPointerSizeLog2 - kSmiTagSize)); |
5709 __ ldr(result, FieldMemOperand(scratch, | 5718 __ ldr(result, FieldMemOperand(scratch, |
5710 FixedArray::kHeaderSize - kPointerSize)); | 5719 FixedArray::kHeaderSize - kPointerSize)); |
5711 __ bind(&done); | 5720 __ bind(&done); |
5712 } | 5721 } |
5713 | 5722 |
5714 | 5723 |
5715 #undef __ | 5724 #undef __ |
5716 | 5725 |
5717 } } // namespace v8::internal | 5726 } } // namespace v8::internal |
OLD | NEW |