| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 157 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 168 if (info()->IsOptimizing()) { | 168 if (info()->IsOptimizing()) { |
| 169 ProfileEntryHookStub::MaybeCallEntryHook(masm_); | 169 ProfileEntryHookStub::MaybeCallEntryHook(masm_); |
| 170 | 170 |
| 171 #ifdef DEBUG | 171 #ifdef DEBUG |
| 172 if (strlen(FLAG_stop_at) > 0 && | 172 if (strlen(FLAG_stop_at) > 0 && |
| 173 info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) { | 173 info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) { |
| 174 __ int3(); | 174 __ int3(); |
| 175 } | 175 } |
| 176 #endif | 176 #endif |
| 177 | 177 |
| 178 // Classic mode functions and builtins need to replace the receiver with the | 178 // Sloppy mode functions and builtins need to replace the receiver with the |
| 179 // global proxy when called as functions (without an explicit receiver | 179 // global proxy when called as functions (without an explicit receiver |
| 180 // object). | 180 // object). |
| 181 if (info_->this_has_uses() && | 181 if (info_->this_has_uses() && |
| 182 info_->is_classic_mode() && | 182 info_->strict_mode() == SLOPPY && |
| 183 !info_->is_native()) { | 183 !info_->is_native()) { |
| 184 Label ok; | 184 Label ok; |
| 185 // +1 for return address. | 185 // +1 for return address. |
| 186 int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize; | 186 int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize; |
| 187 __ mov(ecx, Operand(esp, receiver_offset)); | 187 __ mov(ecx, Operand(esp, receiver_offset)); |
| 188 | 188 |
| 189 __ cmp(ecx, isolate()->factory()->undefined_value()); | 189 __ cmp(ecx, isolate()->factory()->undefined_value()); |
| 190 __ j(not_equal, &ok, Label::kNear); | 190 __ j(not_equal, &ok, Label::kNear); |
| 191 | 191 |
| 192 __ mov(ecx, GlobalObjectOperand()); | 192 __ mov(ecx, GlobalObjectOperand()); |
| (...skipping 190 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 383 | 383 |
| 384 // Adjust the frame size, subsuming the unoptimized frame into the | 384 // Adjust the frame size, subsuming the unoptimized frame into the |
| 385 // optimized frame. | 385 // optimized frame. |
| 386 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots(); | 386 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots(); |
| 387 ASSERT(slots >= 1); | 387 ASSERT(slots >= 1); |
| 388 __ sub(esp, Immediate((slots - 1) * kPointerSize)); | 388 __ sub(esp, Immediate((slots - 1) * kPointerSize)); |
| 389 } | 389 } |
| 390 | 390 |
| 391 | 391 |
| 392 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) { | 392 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) { |
| 393 if (!instr->IsLazyBailout() && !instr->IsGap()) { |
| 394 safepoints_.BumpLastLazySafepointIndex(); |
| 395 } |
| 393 if (!CpuFeatures::IsSupported(SSE2)) FlushX87StackIfNecessary(instr); | 396 if (!CpuFeatures::IsSupported(SSE2)) FlushX87StackIfNecessary(instr); |
| 394 } | 397 } |
| 395 | 398 |
| 396 | 399 |
| 397 void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) { | 400 void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) { |
| 398 if (!CpuFeatures::IsSupported(SSE2)) { | 401 if (!CpuFeatures::IsSupported(SSE2)) { |
| 399 if (instr->IsGoto()) { | 402 if (instr->IsGoto()) { |
| 400 x87_stack_.LeavingBlock(current_block_, LGoto::cast(instr)); | 403 x87_stack_.LeavingBlock(current_block_, LGoto::cast(instr)); |
| 401 } else if (FLAG_debug_code && FLAG_enable_slow_asserts && | 404 } else if (FLAG_debug_code && FLAG_enable_slow_asserts && |
| 402 !instr->IsGap() && !instr->IsReturn()) { | 405 !instr->IsGap() && !instr->IsReturn()) { |
| (...skipping 542 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 945 if (op->IsStackSlot()) { | 948 if (op->IsStackSlot()) { |
| 946 if (is_tagged) { | 949 if (is_tagged) { |
| 947 translation->StoreStackSlot(op->index()); | 950 translation->StoreStackSlot(op->index()); |
| 948 } else if (is_uint32) { | 951 } else if (is_uint32) { |
| 949 translation->StoreUint32StackSlot(op->index()); | 952 translation->StoreUint32StackSlot(op->index()); |
| 950 } else { | 953 } else { |
| 951 translation->StoreInt32StackSlot(op->index()); | 954 translation->StoreInt32StackSlot(op->index()); |
| 952 } | 955 } |
| 953 } else if (op->IsDoubleStackSlot()) { | 956 } else if (op->IsDoubleStackSlot()) { |
| 954 translation->StoreDoubleStackSlot(op->index()); | 957 translation->StoreDoubleStackSlot(op->index()); |
| 955 } else if (op->IsArgument()) { | |
| 956 ASSERT(is_tagged); | |
| 957 int src_index = GetStackSlotCount() + op->index(); | |
| 958 translation->StoreStackSlot(src_index); | |
| 959 } else if (op->IsRegister()) { | 958 } else if (op->IsRegister()) { |
| 960 Register reg = ToRegister(op); | 959 Register reg = ToRegister(op); |
| 961 if (is_tagged) { | 960 if (is_tagged) { |
| 962 translation->StoreRegister(reg); | 961 translation->StoreRegister(reg); |
| 963 } else if (is_uint32) { | 962 } else if (is_uint32) { |
| 964 translation->StoreUint32Register(reg); | 963 translation->StoreUint32Register(reg); |
| 965 } else { | 964 } else { |
| 966 translation->StoreInt32Register(reg); | 965 translation->StoreInt32Register(reg); |
| 967 } | 966 } |
| 968 } else if (op->IsDoubleRegister()) { | 967 } else if (op->IsDoubleRegister()) { |
| (...skipping 396 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1365 UNREACHABLE(); | 1364 UNREACHABLE(); |
| 1366 } | 1365 } |
| 1367 } | 1366 } |
| 1368 | 1367 |
| 1369 | 1368 |
| 1370 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) { | 1369 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) { |
| 1371 GenerateOsrPrologue(); | 1370 GenerateOsrPrologue(); |
| 1372 } | 1371 } |
| 1373 | 1372 |
| 1374 | 1373 |
| 1374 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) { |
| 1375 Register dividend = ToRegister(instr->dividend()); |
| 1376 int32_t divisor = instr->divisor(); |
| 1377 ASSERT(dividend.is(ToRegister(instr->result()))); |
| 1378 |
| 1379 // Theoretically, a variation of the branch-free code for integer division by |
| 1380 // a power of 2 (calculating the remainder via an additional multiplication |
| 1381 // (which gets simplified to an 'and') and subtraction) should be faster, and |
| 1382 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to |
| 1383 // indicate that positive dividends are heavily favored, so the branching |
| 1384 // version performs better. |
| 1385 HMod* hmod = instr->hydrogen(); |
| 1386 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); |
| 1387 Label dividend_is_not_negative, done; |
| 1388 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) { |
| 1389 __ test(dividend, dividend); |
| 1390 __ j(not_sign, ÷nd_is_not_negative, Label::kNear); |
| 1391 // Note that this is correct even for kMinInt operands. |
| 1392 __ neg(dividend); |
| 1393 __ and_(dividend, mask); |
| 1394 __ neg(dividend); |
| 1395 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1396 DeoptimizeIf(zero, instr->environment()); |
| 1397 } |
| 1398 __ jmp(&done, Label::kNear); |
| 1399 } |
| 1400 |
| 1401 __ bind(÷nd_is_not_negative); |
| 1402 __ and_(dividend, mask); |
| 1403 __ bind(&done); |
| 1404 } |
| 1405 |
| 1406 |
| 1407 void LCodeGen::DoModByConstI(LModByConstI* instr) { |
| 1408 Register dividend = ToRegister(instr->dividend()); |
| 1409 int32_t divisor = instr->divisor(); |
| 1410 ASSERT(ToRegister(instr->result()).is(eax)); |
| 1411 |
| 1412 if (divisor == 0) { |
| 1413 DeoptimizeIf(no_condition, instr->environment()); |
| 1414 return; |
| 1415 } |
| 1416 |
| 1417 __ FlooringDiv(dividend, Abs(divisor)); |
| 1418 __ mov(eax, dividend); |
| 1419 __ shr(eax, 31); |
| 1420 __ add(edx, eax); |
| 1421 __ imul(edx, edx, Abs(divisor)); |
| 1422 __ mov(eax, dividend); |
| 1423 __ sub(eax, edx); |
| 1424 |
| 1425 // Check for negative zero. |
| 1426 HMod* hmod = instr->hydrogen(); |
| 1427 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1428 Label remainder_not_zero; |
| 1429 __ j(not_zero, &remainder_not_zero, Label::kNear); |
| 1430 __ cmp(dividend, Immediate(0)); |
| 1431 DeoptimizeIf(less, instr->environment()); |
| 1432 __ bind(&remainder_not_zero); |
| 1433 } |
| 1434 } |
| 1435 |
| 1436 |
| 1375 void LCodeGen::DoModI(LModI* instr) { | 1437 void LCodeGen::DoModI(LModI* instr) { |
| 1376 HMod* hmod = instr->hydrogen(); | 1438 HMod* hmod = instr->hydrogen(); |
| 1377 HValue* left = hmod->left(); | 1439 |
| 1378 HValue* right = hmod->right(); | 1440 Register left_reg = ToRegister(instr->left()); |
| 1379 if (hmod->RightIsPowerOf2()) { | 1441 ASSERT(left_reg.is(eax)); |
| 1380 // TODO(svenpanne) We should really do the strength reduction on the | 1442 Register right_reg = ToRegister(instr->right()); |
| 1381 // Hydrogen level. | 1443 ASSERT(!right_reg.is(eax)); |
| 1382 Register left_reg = ToRegister(instr->left()); | 1444 ASSERT(!right_reg.is(edx)); |
| 1383 ASSERT(left_reg.is(ToRegister(instr->result()))); | 1445 Register result_reg = ToRegister(instr->result()); |
| 1384 | 1446 ASSERT(result_reg.is(edx)); |
| 1385 // Note: The code below even works when right contains kMinInt. | 1447 |
| 1386 int32_t divisor = Abs(right->GetInteger32Constant()); | 1448 Label done; |
| 1387 | 1449 // Check for x % 0, idiv would signal a divide error. We have to |
| 1388 Label left_is_not_negative, done; | 1450 // deopt in this case because we can't return a NaN. |
| 1389 if (left->CanBeNegative()) { | 1451 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { |
| 1390 __ test(left_reg, Operand(left_reg)); | 1452 __ test(right_reg, Operand(right_reg)); |
| 1391 __ j(not_sign, &left_is_not_negative, Label::kNear); | 1453 DeoptimizeIf(zero, instr->environment()); |
| 1392 __ neg(left_reg); | 1454 } |
| 1393 __ and_(left_reg, divisor - 1); | 1455 |
| 1394 __ neg(left_reg); | 1456 // Check for kMinInt % -1, idiv would signal a divide error. We |
| 1395 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1457 // have to deopt if we care about -0, because we can't return that. |
| 1396 DeoptimizeIf(zero, instr->environment()); | 1458 if (hmod->CheckFlag(HValue::kCanOverflow)) { |
| 1397 } | 1459 Label no_overflow_possible; |
| 1460 __ cmp(left_reg, kMinInt); |
| 1461 __ j(not_equal, &no_overflow_possible, Label::kNear); |
| 1462 __ cmp(right_reg, -1); |
| 1463 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1464 DeoptimizeIf(equal, instr->environment()); |
| 1465 } else { |
| 1466 __ j(not_equal, &no_overflow_possible, Label::kNear); |
| 1467 __ Set(result_reg, Immediate(0)); |
| 1398 __ jmp(&done, Label::kNear); | 1468 __ jmp(&done, Label::kNear); |
| 1399 } | 1469 } |
| 1400 | 1470 __ bind(&no_overflow_possible); |
| 1401 __ bind(&left_is_not_negative); | 1471 } |
| 1402 __ and_(left_reg, divisor - 1); | 1472 |
| 1403 __ bind(&done); | 1473 // Sign extend dividend in eax into edx:eax. |
| 1404 } else { | 1474 __ cdq(); |
| 1405 Register left_reg = ToRegister(instr->left()); | 1475 |
| 1406 ASSERT(left_reg.is(eax)); | 1476 // If we care about -0, test if the dividend is <0 and the result is 0. |
| 1407 Register right_reg = ToRegister(instr->right()); | 1477 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1408 ASSERT(!right_reg.is(eax)); | 1478 Label positive_left; |
| 1409 ASSERT(!right_reg.is(edx)); | 1479 __ test(left_reg, Operand(left_reg)); |
| 1410 Register result_reg = ToRegister(instr->result()); | 1480 __ j(not_sign, &positive_left, Label::kNear); |
| 1411 ASSERT(result_reg.is(edx)); | |
| 1412 | |
| 1413 Label done; | |
| 1414 // Check for x % 0, idiv would signal a divide error. We have to | |
| 1415 // deopt in this case because we can't return a NaN. | |
| 1416 if (right->CanBeZero()) { | |
| 1417 __ test(right_reg, Operand(right_reg)); | |
| 1418 DeoptimizeIf(zero, instr->environment()); | |
| 1419 } | |
| 1420 | |
| 1421 // Check for kMinInt % -1, idiv would signal a divide error. We | |
| 1422 // have to deopt if we care about -0, because we can't return that. | |
| 1423 if (left->RangeCanInclude(kMinInt) && right->RangeCanInclude(-1)) { | |
| 1424 Label no_overflow_possible; | |
| 1425 __ cmp(left_reg, kMinInt); | |
| 1426 __ j(not_equal, &no_overflow_possible, Label::kNear); | |
| 1427 __ cmp(right_reg, -1); | |
| 1428 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | |
| 1429 DeoptimizeIf(equal, instr->environment()); | |
| 1430 } else { | |
| 1431 __ j(not_equal, &no_overflow_possible, Label::kNear); | |
| 1432 __ Set(result_reg, Immediate(0)); | |
| 1433 __ jmp(&done, Label::kNear); | |
| 1434 } | |
| 1435 __ bind(&no_overflow_possible); | |
| 1436 } | |
| 1437 | |
| 1438 // Sign extend dividend in eax into edx:eax. | |
| 1439 __ cdq(); | |
| 1440 | |
| 1441 // If we care about -0, test if the dividend is <0 and the result is 0. | |
| 1442 if (left->CanBeNegative() && | |
| 1443 hmod->CanBeZero() && | |
| 1444 hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | |
| 1445 Label positive_left; | |
| 1446 __ test(left_reg, Operand(left_reg)); | |
| 1447 __ j(not_sign, &positive_left, Label::kNear); | |
| 1448 __ idiv(right_reg); | |
| 1449 __ test(result_reg, Operand(result_reg)); | |
| 1450 DeoptimizeIf(zero, instr->environment()); | |
| 1451 __ jmp(&done, Label::kNear); | |
| 1452 __ bind(&positive_left); | |
| 1453 } | |
| 1454 __ idiv(right_reg); | 1481 __ idiv(right_reg); |
| 1455 __ bind(&done); | 1482 __ test(result_reg, Operand(result_reg)); |
| 1456 } | 1483 DeoptimizeIf(zero, instr->environment()); |
| 1457 } | 1484 __ jmp(&done, Label::kNear); |
| 1458 | 1485 __ bind(&positive_left); |
| 1459 | 1486 } |
| 1460 void LCodeGen::DoDivI(LDivI* instr) { | 1487 __ idiv(right_reg); |
| 1461 if (!instr->is_flooring() && instr->hydrogen()->RightIsPowerOf2()) { | 1488 __ bind(&done); |
| 1462 Register dividend = ToRegister(instr->left()); | 1489 } |
| 1463 HDiv* hdiv = instr->hydrogen(); | 1490 |
| 1464 int32_t divisor = hdiv->right()->GetInteger32Constant(); | 1491 |
| 1465 Register result = ToRegister(instr->result()); | 1492 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { |
| 1466 ASSERT(!result.is(dividend)); | 1493 Register dividend = ToRegister(instr->dividend()); |
| 1467 | 1494 int32_t divisor = instr->divisor(); |
| 1468 // Check for (0 / -x) that will produce negative zero. | 1495 Register result = ToRegister(instr->result()); |
| 1469 if (hdiv->left()->RangeCanInclude(0) && divisor < 0 && | 1496 ASSERT(divisor == kMinInt || (divisor != 0 && IsPowerOf2(Abs(divisor)))); |
| 1470 hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1497 ASSERT(!result.is(dividend)); |
| 1471 __ test(dividend, Operand(dividend)); | |
| 1472 DeoptimizeIf(zero, instr->environment()); | |
| 1473 } | |
| 1474 // Check for (kMinInt / -1). | |
| 1475 if (hdiv->left()->RangeCanInclude(kMinInt) && divisor == -1 && | |
| 1476 hdiv->CheckFlag(HValue::kCanOverflow)) { | |
| 1477 __ cmp(dividend, kMinInt); | |
| 1478 DeoptimizeIf(zero, instr->environment()); | |
| 1479 } | |
| 1480 // Deoptimize if remainder will not be 0. | |
| 1481 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && | |
| 1482 Abs(divisor) != 1) { | |
| 1483 __ test(dividend, Immediate(Abs(divisor) - 1)); | |
| 1484 DeoptimizeIf(not_zero, instr->environment()); | |
| 1485 } | |
| 1486 __ Move(result, dividend); | |
| 1487 int32_t shift = WhichPowerOf2(Abs(divisor)); | |
| 1488 if (shift > 0) { | |
| 1489 // The arithmetic shift is always OK, the 'if' is an optimization only. | |
| 1490 if (shift > 1) __ sar(result, 31); | |
| 1491 __ shr(result, 32 - shift); | |
| 1492 __ add(result, dividend); | |
| 1493 __ sar(result, shift); | |
| 1494 } | |
| 1495 if (divisor < 0) __ neg(result); | |
| 1496 return; | |
| 1497 } | |
| 1498 | |
| 1499 LOperand* right = instr->right(); | |
| 1500 ASSERT(ToRegister(instr->result()).is(eax)); | |
| 1501 ASSERT(ToRegister(instr->left()).is(eax)); | |
| 1502 ASSERT(!ToRegister(instr->right()).is(eax)); | |
| 1503 ASSERT(!ToRegister(instr->right()).is(edx)); | |
| 1504 | |
| 1505 Register left_reg = eax; | |
| 1506 | |
| 1507 // Check for x / 0. | |
| 1508 Register right_reg = ToRegister(right); | |
| 1509 if (instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) { | |
| 1510 __ test(right_reg, ToOperand(right)); | |
| 1511 DeoptimizeIf(zero, instr->environment()); | |
| 1512 } | |
| 1513 | 1498 |
| 1514 // Check for (0 / -x) that will produce negative zero. | 1499 // Check for (0 / -x) that will produce negative zero. |
| 1515 if (instr->hydrogen_value()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1500 HDiv* hdiv = instr->hydrogen(); |
| 1516 Label left_not_zero; | 1501 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { |
| 1517 __ test(left_reg, Operand(left_reg)); | 1502 __ test(dividend, dividend); |
| 1518 __ j(not_zero, &left_not_zero, Label::kNear); | 1503 DeoptimizeIf(zero, instr->environment()); |
| 1519 __ test(right_reg, ToOperand(right)); | 1504 } |
| 1520 DeoptimizeIf(sign, instr->environment()); | |
| 1521 __ bind(&left_not_zero); | |
| 1522 } | |
| 1523 | |
| 1524 // Check for (kMinInt / -1). | 1505 // Check for (kMinInt / -1). |
| 1525 if (instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow)) { | 1506 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { |
| 1526 Label left_not_min_int; | 1507 __ cmp(dividend, kMinInt); |
| 1527 __ cmp(left_reg, kMinInt); | 1508 DeoptimizeIf(zero, instr->environment()); |
| 1528 __ j(not_zero, &left_not_min_int, Label::kNear); | 1509 } |
| 1529 __ cmp(right_reg, -1); | 1510 // Deoptimize if remainder will not be 0. |
| 1530 DeoptimizeIf(zero, instr->environment()); | 1511 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && |
| 1531 __ bind(&left_not_min_int); | 1512 divisor != 1 && divisor != -1) { |
| 1532 } | 1513 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); |
| 1533 | 1514 __ test(dividend, Immediate(mask)); |
| 1534 // Sign extend to edx. | |
| 1535 __ cdq(); | |
| 1536 __ idiv(right_reg); | |
| 1537 | |
| 1538 if (instr->is_flooring()) { | |
| 1539 Label done; | |
| 1540 __ test(edx, edx); | |
| 1541 __ j(zero, &done, Label::kNear); | |
| 1542 __ xor_(edx, right_reg); | |
| 1543 __ sar(edx, 31); | |
| 1544 __ add(eax, edx); | |
| 1545 __ bind(&done); | |
| 1546 } else if (!instr->hydrogen()->CheckFlag( | |
| 1547 HInstruction::kAllUsesTruncatingToInt32)) { | |
| 1548 // Deoptimize if remainder is not 0. | |
| 1549 __ test(edx, Operand(edx)); | |
| 1550 DeoptimizeIf(not_zero, instr->environment()); | 1515 DeoptimizeIf(not_zero, instr->environment()); |
| 1551 } | 1516 } |
| 1552 } | 1517 __ Move(result, dividend); |
| 1553 | 1518 int32_t shift = WhichPowerOf2Abs(divisor); |
| 1554 | 1519 if (shift > 0) { |
| 1555 void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) { | 1520 // The arithmetic shift is always OK, the 'if' is an optimization only. |
| 1556 ASSERT(instr->right()->IsConstantOperand()); | 1521 if (shift > 1) __ sar(result, 31); |
| 1557 | 1522 __ shr(result, 32 - shift); |
| 1558 Register dividend = ToRegister(instr->left()); | 1523 __ add(result, dividend); |
| 1559 int32_t divisor = ToInteger32(LConstantOperand::cast(instr->right())); | 1524 __ sar(result, shift); |
| 1560 Register result = ToRegister(instr->result()); | 1525 } |
| 1561 | 1526 if (divisor < 0) __ neg(result); |
| 1562 switch (divisor) { | 1527 } |
| 1563 case 0: | 1528 |
| 1529 |
| 1530 void LCodeGen::DoDivByConstI(LDivByConstI* instr) { |
| 1531 Register dividend = ToRegister(instr->dividend()); |
| 1532 int32_t divisor = instr->divisor(); |
| 1533 ASSERT(ToRegister(instr->result()).is(edx)); |
| 1534 |
| 1535 if (divisor == 0) { |
| 1564 DeoptimizeIf(no_condition, instr->environment()); | 1536 DeoptimizeIf(no_condition, instr->environment()); |
| 1565 return; | 1537 return; |
| 1566 | 1538 } |
| 1567 case 1: | 1539 |
| 1568 __ Move(result, dividend); | 1540 // Check for (0 / -x) that will produce negative zero. |
| 1541 HDiv* hdiv = instr->hydrogen(); |
| 1542 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { |
| 1543 __ test(dividend, dividend); |
| 1544 DeoptimizeIf(zero, instr->environment()); |
| 1545 } |
| 1546 |
| 1547 __ FlooringDiv(dividend, Abs(divisor)); |
| 1548 __ mov(eax, dividend); |
| 1549 __ shr(eax, 31); |
| 1550 __ add(edx, eax); |
| 1551 if (divisor < 0) __ neg(edx); |
| 1552 |
| 1553 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { |
| 1554 __ mov(eax, edx); |
| 1555 __ imul(eax, eax, divisor); |
| 1556 __ sub(eax, dividend); |
| 1557 DeoptimizeIf(not_equal, instr->environment()); |
| 1558 } |
| 1559 } |
| 1560 |
| 1561 |
| 1562 void LCodeGen::DoDivI(LDivI* instr) { |
| 1563 HBinaryOperation* hdiv = instr->hydrogen(); |
| 1564 Register dividend = ToRegister(instr->left()); |
| 1565 Register divisor = ToRegister(instr->right()); |
| 1566 Register remainder = ToRegister(instr->temp()); |
| 1567 Register result = ToRegister(instr->result()); |
| 1568 ASSERT(dividend.is(eax)); |
| 1569 ASSERT(remainder.is(edx)); |
| 1570 ASSERT(result.is(eax)); |
| 1571 ASSERT(!divisor.is(eax)); |
| 1572 ASSERT(!divisor.is(edx)); |
| 1573 |
| 1574 // Check for x / 0. |
| 1575 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { |
| 1576 __ test(divisor, divisor); |
| 1577 DeoptimizeIf(zero, instr->environment()); |
| 1578 } |
| 1579 |
| 1580 // Check for (0 / -x) that will produce negative zero. |
| 1581 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1582 Label dividend_not_zero; |
| 1583 __ test(dividend, dividend); |
| 1584 __ j(not_zero, ÷nd_not_zero, Label::kNear); |
| 1585 __ test(divisor, divisor); |
| 1586 DeoptimizeIf(sign, instr->environment()); |
| 1587 __ bind(÷nd_not_zero); |
| 1588 } |
| 1589 |
| 1590 // Check for (kMinInt / -1). |
| 1591 if (hdiv->CheckFlag(HValue::kCanOverflow)) { |
| 1592 Label dividend_not_min_int; |
| 1593 __ cmp(dividend, kMinInt); |
| 1594 __ j(not_zero, ÷nd_not_min_int, Label::kNear); |
| 1595 __ cmp(divisor, -1); |
| 1596 DeoptimizeIf(zero, instr->environment()); |
| 1597 __ bind(÷nd_not_min_int); |
| 1598 } |
| 1599 |
| 1600 // Sign extend to edx (= remainder). |
| 1601 __ cdq(); |
| 1602 __ idiv(divisor); |
| 1603 |
| 1604 if (hdiv->IsMathFloorOfDiv()) { |
| 1605 Label done; |
| 1606 __ test(remainder, remainder); |
| 1607 __ j(zero, &done, Label::kNear); |
| 1608 __ xor_(remainder, divisor); |
| 1609 __ sar(remainder, 31); |
| 1610 __ add(result, remainder); |
| 1611 __ bind(&done); |
| 1612 } else if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { |
| 1613 // Deoptimize if remainder is not 0. |
| 1614 __ test(remainder, remainder); |
| 1615 DeoptimizeIf(not_zero, instr->environment()); |
| 1616 } |
| 1617 } |
| 1618 |
| 1619 |
| 1620 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) { |
| 1621 Register dividend = ToRegister(instr->dividend()); |
| 1622 int32_t divisor = instr->divisor(); |
| 1623 ASSERT(dividend.is(ToRegister(instr->result()))); |
| 1624 |
| 1625 // If the divisor is positive, things are easy: There can be no deopts and we |
| 1626 // can simply do an arithmetic right shift. |
| 1627 if (divisor == 1) return; |
| 1628 int32_t shift = WhichPowerOf2Abs(divisor); |
| 1629 if (divisor > 1) { |
| 1630 __ sar(dividend, shift); |
| 1569 return; | 1631 return; |
| 1570 | 1632 } |
| 1571 case -1: | 1633 |
| 1572 __ Move(result, dividend); | 1634 // If the divisor is negative, we have to negate and handle edge cases. |
| 1573 __ neg(result); | 1635 Label not_kmin_int, done; |
| 1574 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1636 __ neg(dividend); |
| 1575 DeoptimizeIf(zero, instr->environment()); | 1637 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1638 DeoptimizeIf(zero, instr->environment()); |
| 1639 } |
| 1640 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { |
| 1641 // Note that we could emit branch-free code, but that would need one more |
| 1642 // register. |
| 1643 if (divisor == -1) { |
| 1644 DeoptimizeIf(overflow, instr->environment()); |
| 1645 } else { |
| 1646 __ j(no_overflow, ¬_kmin_int, Label::kNear); |
| 1647 __ mov(dividend, Immediate(kMinInt / divisor)); |
| 1648 __ jmp(&done, Label::kNear); |
| 1576 } | 1649 } |
| 1577 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { | 1650 } |
| 1578 DeoptimizeIf(overflow, instr->environment()); | 1651 __ bind(¬_kmin_int); |
| 1579 } | 1652 __ sar(dividend, shift); |
| 1653 __ bind(&done); |
| 1654 } |
| 1655 |
| 1656 |
| 1657 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { |
| 1658 Register dividend = ToRegister(instr->dividend()); |
| 1659 int32_t divisor = instr->divisor(); |
| 1660 ASSERT(ToRegister(instr->result()).is(edx)); |
| 1661 |
| 1662 if (divisor == 0) { |
| 1663 DeoptimizeIf(no_condition, instr->environment()); |
| 1580 return; | 1664 return; |
| 1581 } | 1665 } |
| 1582 | 1666 |
| 1583 uint32_t divisor_abs = abs(divisor); | 1667 // Check for (0 / -x) that will produce negative zero. |
| 1584 if (IsPowerOf2(divisor_abs)) { | 1668 HMathFloorOfDiv* hdiv = instr->hydrogen(); |
| 1585 int32_t power = WhichPowerOf2(divisor_abs); | 1669 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { |
| 1586 if (divisor < 0) { | 1670 __ test(dividend, dividend); |
| 1587 // Input[dividend] is clobbered. | 1671 DeoptimizeIf(zero, instr->environment()); |
| 1588 // The sequence is tedious because neg(dividend) might overflow. | 1672 } |
| 1589 __ mov(result, dividend); | 1673 |
| 1590 __ sar(dividend, 31); | 1674 __ FlooringDiv(dividend, divisor); |
| 1591 __ neg(result); | 1675 } |
| 1592 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1676 |
| 1593 DeoptimizeIf(zero, instr->environment()); | 1677 |
| 1594 } | |
| 1595 __ shl(dividend, 32 - power); | |
| 1596 __ sar(result, power); | |
| 1597 __ not_(dividend); | |
| 1598 // Clear result.sign if dividend.sign is set. | |
| 1599 __ and_(result, dividend); | |
| 1600 } else { | |
| 1601 __ Move(result, dividend); | |
| 1602 __ sar(result, power); | |
| 1603 } | |
| 1604 } else { | |
| 1605 ASSERT(ToRegister(instr->left()).is(eax)); | |
| 1606 ASSERT(ToRegister(instr->result()).is(edx)); | |
| 1607 Register scratch = ToRegister(instr->temp()); | |
| 1608 | |
| 1609 // Find b which: 2^b < divisor_abs < 2^(b+1). | |
| 1610 unsigned b = 31 - CompilerIntrinsics::CountLeadingZeros(divisor_abs); | |
| 1611 unsigned shift = 32 + b; // Precision +1bit (effectively). | |
| 1612 double multiplier_f = | |
| 1613 static_cast<double>(static_cast<uint64_t>(1) << shift) / divisor_abs; | |
| 1614 int64_t multiplier; | |
| 1615 if (multiplier_f - std::floor(multiplier_f) < 0.5) { | |
| 1616 multiplier = static_cast<int64_t>(std::floor(multiplier_f)); | |
| 1617 } else { | |
| 1618 multiplier = static_cast<int64_t>(std::floor(multiplier_f)) + 1; | |
| 1619 } | |
| 1620 // The multiplier is a uint32. | |
| 1621 ASSERT(multiplier > 0 && | |
| 1622 multiplier < (static_cast<int64_t>(1) << 32)); | |
| 1623 __ mov(scratch, dividend); | |
| 1624 if (divisor < 0 && | |
| 1625 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | |
| 1626 __ test(dividend, dividend); | |
| 1627 DeoptimizeIf(zero, instr->environment()); | |
| 1628 } | |
| 1629 __ mov(edx, static_cast<int32_t>(multiplier)); | |
| 1630 __ imul(edx); | |
| 1631 if (static_cast<int32_t>(multiplier) < 0) { | |
| 1632 __ add(edx, scratch); | |
| 1633 } | |
| 1634 Register reg_lo = eax; | |
| 1635 Register reg_byte_scratch = scratch; | |
| 1636 if (!reg_byte_scratch.is_byte_register()) { | |
| 1637 __ xchg(reg_lo, reg_byte_scratch); | |
| 1638 reg_lo = scratch; | |
| 1639 reg_byte_scratch = eax; | |
| 1640 } | |
| 1641 if (divisor < 0) { | |
| 1642 __ xor_(reg_byte_scratch, reg_byte_scratch); | |
| 1643 __ cmp(reg_lo, 0x40000000); | |
| 1644 __ setcc(above, reg_byte_scratch); | |
| 1645 __ neg(edx); | |
| 1646 __ sub(edx, reg_byte_scratch); | |
| 1647 } else { | |
| 1648 __ xor_(reg_byte_scratch, reg_byte_scratch); | |
| 1649 __ cmp(reg_lo, 0xC0000000); | |
| 1650 __ setcc(above_equal, reg_byte_scratch); | |
| 1651 __ add(edx, reg_byte_scratch); | |
| 1652 } | |
| 1653 __ sar(edx, shift - 32); | |
| 1654 } | |
| 1655 } | |
| 1656 | |
| 1657 | |
| 1658 void LCodeGen::DoMulI(LMulI* instr) { | 1678 void LCodeGen::DoMulI(LMulI* instr) { |
| 1659 Register left = ToRegister(instr->left()); | 1679 Register left = ToRegister(instr->left()); |
| 1660 LOperand* right = instr->right(); | 1680 LOperand* right = instr->right(); |
| 1661 | 1681 |
| 1662 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1682 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1663 __ mov(ToRegister(instr->temp()), left); | 1683 __ mov(ToRegister(instr->temp()), left); |
| 1664 } | 1684 } |
| 1665 | 1685 |
| 1666 if (right->IsConstantOperand()) { | 1686 if (right->IsConstantOperand()) { |
| 1667 // Try strength reductions on the multiplication. | 1687 // Try strength reductions on the multiplication. |
| (...skipping 593 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2261 | 2281 |
| 2262 | 2282 |
| 2263 void LCodeGen::DoArithmeticT(LArithmeticT* instr) { | 2283 void LCodeGen::DoArithmeticT(LArithmeticT* instr) { |
| 2264 ASSERT(ToRegister(instr->context()).is(esi)); | 2284 ASSERT(ToRegister(instr->context()).is(esi)); |
| 2265 ASSERT(ToRegister(instr->left()).is(edx)); | 2285 ASSERT(ToRegister(instr->left()).is(edx)); |
| 2266 ASSERT(ToRegister(instr->right()).is(eax)); | 2286 ASSERT(ToRegister(instr->right()).is(eax)); |
| 2267 ASSERT(ToRegister(instr->result()).is(eax)); | 2287 ASSERT(ToRegister(instr->result()).is(eax)); |
| 2268 | 2288 |
| 2269 BinaryOpICStub stub(instr->op(), NO_OVERWRITE); | 2289 BinaryOpICStub stub(instr->op(), NO_OVERWRITE); |
| 2270 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); | 2290 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); |
| 2271 __ nop(); // Signals no inlined code. | |
| 2272 } | 2291 } |
| 2273 | 2292 |
| 2274 | 2293 |
| 2275 template<class InstrType> | 2294 template<class InstrType> |
| 2276 void LCodeGen::EmitBranch(InstrType instr, Condition cc) { | 2295 void LCodeGen::EmitBranch(InstrType instr, Condition cc) { |
| 2277 int left_block = instr->TrueDestination(chunk_); | 2296 int left_block = instr->TrueDestination(chunk_); |
| 2278 int right_block = instr->FalseDestination(chunk_); | 2297 int right_block = instr->FalseDestination(chunk_); |
| 2279 | 2298 |
| 2280 int next_block = GetNextEmittedBlock(); | 2299 int next_block = GetNextEmittedBlock(); |
| 2281 | 2300 |
| (...skipping 1112 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3394 case EXTERNAL_FLOAT64_ELEMENTS: | 3413 case EXTERNAL_FLOAT64_ELEMENTS: |
| 3395 case FLOAT32_ELEMENTS: | 3414 case FLOAT32_ELEMENTS: |
| 3396 case FLOAT64_ELEMENTS: | 3415 case FLOAT64_ELEMENTS: |
| 3397 case FAST_SMI_ELEMENTS: | 3416 case FAST_SMI_ELEMENTS: |
| 3398 case FAST_ELEMENTS: | 3417 case FAST_ELEMENTS: |
| 3399 case FAST_DOUBLE_ELEMENTS: | 3418 case FAST_DOUBLE_ELEMENTS: |
| 3400 case FAST_HOLEY_SMI_ELEMENTS: | 3419 case FAST_HOLEY_SMI_ELEMENTS: |
| 3401 case FAST_HOLEY_ELEMENTS: | 3420 case FAST_HOLEY_ELEMENTS: |
| 3402 case FAST_HOLEY_DOUBLE_ELEMENTS: | 3421 case FAST_HOLEY_DOUBLE_ELEMENTS: |
| 3403 case DICTIONARY_ELEMENTS: | 3422 case DICTIONARY_ELEMENTS: |
| 3404 case NON_STRICT_ARGUMENTS_ELEMENTS: | 3423 case SLOPPY_ARGUMENTS_ELEMENTS: |
| 3405 UNREACHABLE(); | 3424 UNREACHABLE(); |
| 3406 break; | 3425 break; |
| 3407 } | 3426 } |
| 3408 } | 3427 } |
| 3409 } | 3428 } |
| 3410 | 3429 |
| 3411 | 3430 |
| 3412 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) { | 3431 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) { |
| 3413 if (instr->hydrogen()->RequiresHoleCheck()) { | 3432 if (instr->hydrogen()->RequiresHoleCheck()) { |
| 3414 int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag + | 3433 int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag + |
| (...skipping 774 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4189 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); | 4208 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); |
| 4190 } | 4209 } |
| 4191 | 4210 |
| 4192 | 4211 |
| 4193 void LCodeGen::DoCallNew(LCallNew* instr) { | 4212 void LCodeGen::DoCallNew(LCallNew* instr) { |
| 4194 ASSERT(ToRegister(instr->context()).is(esi)); | 4213 ASSERT(ToRegister(instr->context()).is(esi)); |
| 4195 ASSERT(ToRegister(instr->constructor()).is(edi)); | 4214 ASSERT(ToRegister(instr->constructor()).is(edi)); |
| 4196 ASSERT(ToRegister(instr->result()).is(eax)); | 4215 ASSERT(ToRegister(instr->result()).is(eax)); |
| 4197 | 4216 |
| 4198 // No cell in ebx for construct type feedback in optimized code | 4217 // No cell in ebx for construct type feedback in optimized code |
| 4199 Handle<Object> undefined_value(isolate()->factory()->undefined_value()); | 4218 Handle<Object> megamorphic_symbol = |
| 4200 __ mov(ebx, Immediate(undefined_value)); | 4219 TypeFeedbackInfo::MegamorphicSentinel(isolate()); |
| 4220 __ mov(ebx, Immediate(megamorphic_symbol)); |
| 4201 CallConstructStub stub(NO_CALL_FUNCTION_FLAGS); | 4221 CallConstructStub stub(NO_CALL_FUNCTION_FLAGS); |
| 4202 __ Set(eax, Immediate(instr->arity())); | 4222 __ Set(eax, Immediate(instr->arity())); |
| 4203 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); | 4223 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); |
| 4204 } | 4224 } |
| 4205 | 4225 |
| 4206 | 4226 |
| 4207 void LCodeGen::DoCallNewArray(LCallNewArray* instr) { | 4227 void LCodeGen::DoCallNewArray(LCallNewArray* instr) { |
| 4208 ASSERT(ToRegister(instr->context()).is(esi)); | 4228 ASSERT(ToRegister(instr->context()).is(esi)); |
| 4209 ASSERT(ToRegister(instr->constructor()).is(edi)); | 4229 ASSERT(ToRegister(instr->constructor()).is(edi)); |
| 4210 ASSERT(ToRegister(instr->result()).is(eax)); | 4230 ASSERT(ToRegister(instr->result()).is(eax)); |
| 4211 | 4231 |
| 4212 __ Set(eax, Immediate(instr->arity())); | 4232 __ Set(eax, Immediate(instr->arity())); |
| 4213 __ mov(ebx, factory()->undefined_value()); | 4233 __ mov(ebx, TypeFeedbackInfo::MegamorphicSentinel(isolate())); |
| 4214 ElementsKind kind = instr->hydrogen()->elements_kind(); | 4234 ElementsKind kind = instr->hydrogen()->elements_kind(); |
| 4215 AllocationSiteOverrideMode override_mode = | 4235 AllocationSiteOverrideMode override_mode = |
| 4216 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE) | 4236 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE) |
| 4217 ? DISABLE_ALLOCATION_SITES | 4237 ? DISABLE_ALLOCATION_SITES |
| 4218 : DONT_OVERRIDE; | 4238 : DONT_OVERRIDE; |
| 4219 | 4239 |
| 4220 if (instr->arity() == 0) { | 4240 if (instr->arity() == 0) { |
| 4221 ArrayNoArgumentConstructorStub stub(kind, override_mode); | 4241 ArrayNoArgumentConstructorStub stub(kind, override_mode); |
| 4222 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); | 4242 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); |
| 4223 } else if (instr->arity() == 1) { | 4243 } else if (instr->arity() == 1) { |
| (...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4295 } | 4315 } |
| 4296 return; | 4316 return; |
| 4297 } | 4317 } |
| 4298 | 4318 |
| 4299 Register object = ToRegister(instr->object()); | 4319 Register object = ToRegister(instr->object()); |
| 4300 Handle<Map> transition = instr->transition(); | 4320 Handle<Map> transition = instr->transition(); |
| 4301 SmiCheck check_needed = | 4321 SmiCheck check_needed = |
| 4302 instr->hydrogen()->value()->IsHeapObject() | 4322 instr->hydrogen()->value()->IsHeapObject() |
| 4303 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; | 4323 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; |
| 4304 | 4324 |
| 4305 if (FLAG_track_fields && representation.IsSmi()) { | 4325 if (representation.IsSmi()) { |
| 4306 if (instr->value()->IsConstantOperand()) { | 4326 if (instr->value()->IsConstantOperand()) { |
| 4307 LConstantOperand* operand_value = LConstantOperand::cast(instr->value()); | 4327 LConstantOperand* operand_value = LConstantOperand::cast(instr->value()); |
| 4308 if (!IsSmi(operand_value)) { | 4328 if (!IsSmi(operand_value)) { |
| 4309 DeoptimizeIf(no_condition, instr->environment()); | 4329 DeoptimizeIf(no_condition, instr->environment()); |
| 4310 } | 4330 } |
| 4311 } | 4331 } |
| 4312 } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) { | 4332 } else if (representation.IsHeapObject()) { |
| 4313 if (instr->value()->IsConstantOperand()) { | 4333 if (instr->value()->IsConstantOperand()) { |
| 4314 LConstantOperand* operand_value = LConstantOperand::cast(instr->value()); | 4334 LConstantOperand* operand_value = LConstantOperand::cast(instr->value()); |
| 4315 if (IsInteger32(operand_value)) { | 4335 if (IsInteger32(operand_value)) { |
| 4316 DeoptimizeIf(no_condition, instr->environment()); | 4336 DeoptimizeIf(no_condition, instr->environment()); |
| 4317 } | 4337 } |
| 4318 } else { | 4338 } else { |
| 4319 if (!instr->hydrogen()->value()->type().IsHeapObject()) { | 4339 if (!instr->hydrogen()->value()->type().IsHeapObject()) { |
| 4320 Register value = ToRegister(instr->value()); | 4340 Register value = ToRegister(instr->value()); |
| 4321 __ test(value, Immediate(kSmiTagMask)); | 4341 __ test(value, Immediate(kSmiTagMask)); |
| 4322 DeoptimizeIf(zero, instr->environment()); | 4342 DeoptimizeIf(zero, instr->environment()); |
| (...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4400 } | 4420 } |
| 4401 } | 4421 } |
| 4402 | 4422 |
| 4403 | 4423 |
| 4404 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) { | 4424 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) { |
| 4405 ASSERT(ToRegister(instr->context()).is(esi)); | 4425 ASSERT(ToRegister(instr->context()).is(esi)); |
| 4406 ASSERT(ToRegister(instr->object()).is(edx)); | 4426 ASSERT(ToRegister(instr->object()).is(edx)); |
| 4407 ASSERT(ToRegister(instr->value()).is(eax)); | 4427 ASSERT(ToRegister(instr->value()).is(eax)); |
| 4408 | 4428 |
| 4409 __ mov(ecx, instr->name()); | 4429 __ mov(ecx, instr->name()); |
| 4410 Handle<Code> ic = StoreIC::initialize_stub(isolate(), | 4430 Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode()); |
| 4411 instr->strict_mode_flag()); | |
| 4412 CallCode(ic, RelocInfo::CODE_TARGET, instr); | 4431 CallCode(ic, RelocInfo::CODE_TARGET, instr); |
| 4413 } | 4432 } |
| 4414 | 4433 |
| 4415 | 4434 |
| 4416 void LCodeGen::ApplyCheckIf(Condition cc, LBoundsCheck* check) { | 4435 void LCodeGen::ApplyCheckIf(Condition cc, LBoundsCheck* check) { |
| 4417 if (FLAG_debug_code && check->hydrogen()->skip_check()) { | 4436 if (FLAG_debug_code && check->hydrogen()->skip_check()) { |
| 4418 Label done; | 4437 Label done; |
| 4419 __ j(NegateCondition(cc), &done, Label::kNear); | 4438 __ j(NegateCondition(cc), &done, Label::kNear); |
| 4420 __ int3(); | 4439 __ int3(); |
| 4421 __ bind(&done); | 4440 __ bind(&done); |
| (...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4506 case EXTERNAL_FLOAT64_ELEMENTS: | 4525 case EXTERNAL_FLOAT64_ELEMENTS: |
| 4507 case FLOAT32_ELEMENTS: | 4526 case FLOAT32_ELEMENTS: |
| 4508 case FLOAT64_ELEMENTS: | 4527 case FLOAT64_ELEMENTS: |
| 4509 case FAST_SMI_ELEMENTS: | 4528 case FAST_SMI_ELEMENTS: |
| 4510 case FAST_ELEMENTS: | 4529 case FAST_ELEMENTS: |
| 4511 case FAST_DOUBLE_ELEMENTS: | 4530 case FAST_DOUBLE_ELEMENTS: |
| 4512 case FAST_HOLEY_SMI_ELEMENTS: | 4531 case FAST_HOLEY_SMI_ELEMENTS: |
| 4513 case FAST_HOLEY_ELEMENTS: | 4532 case FAST_HOLEY_ELEMENTS: |
| 4514 case FAST_HOLEY_DOUBLE_ELEMENTS: | 4533 case FAST_HOLEY_DOUBLE_ELEMENTS: |
| 4515 case DICTIONARY_ELEMENTS: | 4534 case DICTIONARY_ELEMENTS: |
| 4516 case NON_STRICT_ARGUMENTS_ELEMENTS: | 4535 case SLOPPY_ARGUMENTS_ELEMENTS: |
| 4517 UNREACHABLE(); | 4536 UNREACHABLE(); |
| 4518 break; | 4537 break; |
| 4519 } | 4538 } |
| 4520 } | 4539 } |
| 4521 } | 4540 } |
| 4522 | 4541 |
| 4523 | 4542 |
| 4524 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { | 4543 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { |
| 4525 ExternalReference canonical_nan_reference = | 4544 ExternalReference canonical_nan_reference = |
| 4526 ExternalReference::address_of_canonical_non_hole_nan(); | 4545 ExternalReference::address_of_canonical_non_hole_nan(); |
| (...skipping 125 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4652 } | 4671 } |
| 4653 } | 4672 } |
| 4654 | 4673 |
| 4655 | 4674 |
| 4656 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) { | 4675 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) { |
| 4657 ASSERT(ToRegister(instr->context()).is(esi)); | 4676 ASSERT(ToRegister(instr->context()).is(esi)); |
| 4658 ASSERT(ToRegister(instr->object()).is(edx)); | 4677 ASSERT(ToRegister(instr->object()).is(edx)); |
| 4659 ASSERT(ToRegister(instr->key()).is(ecx)); | 4678 ASSERT(ToRegister(instr->key()).is(ecx)); |
| 4660 ASSERT(ToRegister(instr->value()).is(eax)); | 4679 ASSERT(ToRegister(instr->value()).is(eax)); |
| 4661 | 4680 |
| 4662 Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode) | 4681 Handle<Code> ic = instr->strict_mode() == STRICT |
| 4663 ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict() | 4682 ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict() |
| 4664 : isolate()->builtins()->KeyedStoreIC_Initialize(); | 4683 : isolate()->builtins()->KeyedStoreIC_Initialize(); |
| 4665 CallCode(ic, RelocInfo::CODE_TARGET, instr); | 4684 CallCode(ic, RelocInfo::CODE_TARGET, instr); |
| 4666 } | 4685 } |
| 4667 | 4686 |
| 4668 | 4687 |
| 4669 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { | 4688 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { |
| 4670 Register object = ToRegister(instr->object()); | 4689 Register object = ToRegister(instr->object()); |
| 4671 Register temp = ToRegister(instr->temp()); | 4690 Register temp = ToRegister(instr->temp()); |
| 4672 Label no_memento_found; | 4691 Label no_memento_found; |
| (...skipping 176 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4849 Register input_reg = ToRegister(input); | 4868 Register input_reg = ToRegister(input); |
| 4850 __ push(input_reg); | 4869 __ push(input_reg); |
| 4851 X87Mov(ToX87Register(output), Operand(esp, 0), kX87IntOperand); | 4870 X87Mov(ToX87Register(output), Operand(esp, 0), kX87IntOperand); |
| 4852 __ pop(input_reg); | 4871 __ pop(input_reg); |
| 4853 } else { | 4872 } else { |
| 4854 X87Mov(ToX87Register(output), ToOperand(input), kX87IntOperand); | 4873 X87Mov(ToX87Register(output), ToOperand(input), kX87IntOperand); |
| 4855 } | 4874 } |
| 4856 } | 4875 } |
| 4857 | 4876 |
| 4858 | 4877 |
| 4859 void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) { | |
| 4860 Register input = ToRegister(instr->value()); | |
| 4861 __ SmiTag(input); | |
| 4862 if (!instr->hydrogen()->value()->HasRange() || | |
| 4863 !instr->hydrogen()->value()->range()->IsInSmiRange()) { | |
| 4864 DeoptimizeIf(overflow, instr->environment()); | |
| 4865 } | |
| 4866 } | |
| 4867 | |
| 4868 | |
| 4869 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { | 4878 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { |
| 4870 LOperand* input = instr->value(); | 4879 LOperand* input = instr->value(); |
| 4871 LOperand* output = instr->result(); | 4880 LOperand* output = instr->result(); |
| 4872 if (CpuFeatures::IsSupported(SSE2)) { | 4881 if (CpuFeatures::IsSupported(SSE2)) { |
| 4873 CpuFeatureScope scope(masm(), SSE2); | 4882 CpuFeatureScope scope(masm(), SSE2); |
| 4874 LOperand* temp = instr->temp(); | 4883 LOperand* temp = instr->temp(); |
| 4875 | 4884 |
| 4876 __ LoadUint32(ToDoubleRegister(output), | 4885 __ LoadUint32(ToDoubleRegister(output), |
| 4877 ToRegister(input), | 4886 ToRegister(input), |
| 4878 ToDoubleRegister(temp)); | 4887 ToDoubleRegister(temp)); |
| 4879 } else { | 4888 } else { |
| 4880 X87Register res = ToX87Register(output); | 4889 X87Register res = ToX87Register(output); |
| 4881 X87PrepareToWrite(res); | 4890 X87PrepareToWrite(res); |
| 4882 __ LoadUint32NoSSE2(ToRegister(input)); | 4891 __ LoadUint32NoSSE2(ToRegister(input)); |
| 4883 X87CommitWrite(res); | 4892 X87CommitWrite(res); |
| 4884 } | 4893 } |
| 4885 } | 4894 } |
| 4886 | 4895 |
| 4887 | 4896 |
| 4888 void LCodeGen::DoUint32ToSmi(LUint32ToSmi* instr) { | |
| 4889 Register input = ToRegister(instr->value()); | |
| 4890 if (!instr->hydrogen()->value()->HasRange() || | |
| 4891 !instr->hydrogen()->value()->range()->IsInSmiRange()) { | |
| 4892 __ test(input, Immediate(0xc0000000)); | |
| 4893 DeoptimizeIf(not_zero, instr->environment()); | |
| 4894 } | |
| 4895 __ SmiTag(input); | |
| 4896 } | |
| 4897 | |
| 4898 | |
| 4899 void LCodeGen::DoNumberTagI(LNumberTagI* instr) { | 4897 void LCodeGen::DoNumberTagI(LNumberTagI* instr) { |
| 4900 class DeferredNumberTagI V8_FINAL : public LDeferredCode { | 4898 class DeferredNumberTagI V8_FINAL : public LDeferredCode { |
| 4901 public: | 4899 public: |
| 4902 DeferredNumberTagI(LCodeGen* codegen, | 4900 DeferredNumberTagI(LCodeGen* codegen, |
| 4903 LNumberTagI* instr, | 4901 LNumberTagI* instr, |
| 4904 const X87Stack& x87_stack) | 4902 const X87Stack& x87_stack) |
| 4905 : LDeferredCode(codegen, x87_stack), instr_(instr) { } | 4903 : LDeferredCode(codegen, x87_stack), instr_(instr) { } |
| 4906 virtual void Generate() V8_OVERRIDE { | 4904 virtual void Generate() V8_OVERRIDE { |
| 4907 codegen()->DoDeferredNumberTagI(instr_, instr_->value(), SIGNED_INT32); | 4905 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp(), |
| 4906 NULL, SIGNED_INT32); |
| 4908 } | 4907 } |
| 4909 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } | 4908 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
| 4910 private: | 4909 private: |
| 4911 LNumberTagI* instr_; | 4910 LNumberTagI* instr_; |
| 4912 }; | 4911 }; |
| 4913 | 4912 |
| 4914 LOperand* input = instr->value(); | 4913 LOperand* input = instr->value(); |
| 4915 ASSERT(input->IsRegister() && input->Equals(instr->result())); | 4914 ASSERT(input->IsRegister() && input->Equals(instr->result())); |
| 4916 Register reg = ToRegister(input); | 4915 Register reg = ToRegister(input); |
| 4917 | 4916 |
| 4918 DeferredNumberTagI* deferred = | 4917 DeferredNumberTagI* deferred = |
| 4919 new(zone()) DeferredNumberTagI(this, instr, x87_stack_); | 4918 new(zone()) DeferredNumberTagI(this, instr, x87_stack_); |
| 4920 __ SmiTag(reg); | 4919 __ SmiTag(reg); |
| 4921 __ j(overflow, deferred->entry()); | 4920 __ j(overflow, deferred->entry()); |
| 4922 __ bind(deferred->exit()); | 4921 __ bind(deferred->exit()); |
| 4923 } | 4922 } |
| 4924 | 4923 |
| 4925 | 4924 |
| 4926 void LCodeGen::DoNumberTagU(LNumberTagU* instr) { | 4925 void LCodeGen::DoNumberTagU(LNumberTagU* instr) { |
| 4927 class DeferredNumberTagU V8_FINAL : public LDeferredCode { | 4926 class DeferredNumberTagU V8_FINAL : public LDeferredCode { |
| 4928 public: | 4927 public: |
| 4929 DeferredNumberTagU(LCodeGen* codegen, | 4928 DeferredNumberTagU(LCodeGen* codegen, |
| 4930 LNumberTagU* instr, | 4929 LNumberTagU* instr, |
| 4931 const X87Stack& x87_stack) | 4930 const X87Stack& x87_stack) |
| 4932 : LDeferredCode(codegen, x87_stack), instr_(instr) { } | 4931 : LDeferredCode(codegen, x87_stack), instr_(instr) { } |
| 4933 virtual void Generate() V8_OVERRIDE { | 4932 virtual void Generate() V8_OVERRIDE { |
| 4934 codegen()->DoDeferredNumberTagI(instr_, instr_->value(), UNSIGNED_INT32); | 4933 codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(), |
| 4934 instr_->temp2(), UNSIGNED_INT32); |
| 4935 } | 4935 } |
| 4936 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } | 4936 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
| 4937 private: | 4937 private: |
| 4938 LNumberTagU* instr_; | 4938 LNumberTagU* instr_; |
| 4939 }; | 4939 }; |
| 4940 | 4940 |
| 4941 LOperand* input = instr->value(); | 4941 LOperand* input = instr->value(); |
| 4942 ASSERT(input->IsRegister() && input->Equals(instr->result())); | 4942 ASSERT(input->IsRegister() && input->Equals(instr->result())); |
| 4943 Register reg = ToRegister(input); | 4943 Register reg = ToRegister(input); |
| 4944 | 4944 |
| 4945 DeferredNumberTagU* deferred = | 4945 DeferredNumberTagU* deferred = |
| 4946 new(zone()) DeferredNumberTagU(this, instr, x87_stack_); | 4946 new(zone()) DeferredNumberTagU(this, instr, x87_stack_); |
| 4947 __ cmp(reg, Immediate(Smi::kMaxValue)); | 4947 __ cmp(reg, Immediate(Smi::kMaxValue)); |
| 4948 __ j(above, deferred->entry()); | 4948 __ j(above, deferred->entry()); |
| 4949 __ SmiTag(reg); | 4949 __ SmiTag(reg); |
| 4950 __ bind(deferred->exit()); | 4950 __ bind(deferred->exit()); |
| 4951 } | 4951 } |
| 4952 | 4952 |
| 4953 | 4953 |
| 4954 void LCodeGen::DoDeferredNumberTagI(LInstruction* instr, | 4954 void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr, |
| 4955 LOperand* value, | 4955 LOperand* value, |
| 4956 IntegerSignedness signedness) { | 4956 LOperand* temp1, |
| 4957 Label slow; | 4957 LOperand* temp2, |
| 4958 IntegerSignedness signedness) { |
| 4959 Label done, slow; |
| 4958 Register reg = ToRegister(value); | 4960 Register reg = ToRegister(value); |
| 4959 Register tmp = reg.is(eax) ? ecx : eax; | 4961 Register tmp = ToRegister(temp1); |
| 4960 XMMRegister xmm_scratch = double_scratch0(); | 4962 XMMRegister xmm_scratch = double_scratch0(); |
| 4961 | 4963 |
| 4962 // Preserve the value of all registers. | |
| 4963 PushSafepointRegistersScope scope(this); | |
| 4964 | |
| 4965 Label done; | |
| 4966 | |
| 4967 if (signedness == SIGNED_INT32) { | 4964 if (signedness == SIGNED_INT32) { |
| 4968 // There was overflow, so bits 30 and 31 of the original integer | 4965 // There was overflow, so bits 30 and 31 of the original integer |
| 4969 // disagree. Try to allocate a heap number in new space and store | 4966 // disagree. Try to allocate a heap number in new space and store |
| 4970 // the value in there. If that fails, call the runtime system. | 4967 // the value in there. If that fails, call the runtime system. |
| 4971 __ SmiUntag(reg); | 4968 __ SmiUntag(reg); |
| 4972 __ xor_(reg, 0x80000000); | 4969 __ xor_(reg, 0x80000000); |
| 4973 if (CpuFeatures::IsSupported(SSE2)) { | 4970 if (CpuFeatures::IsSupported(SSE2)) { |
| 4974 CpuFeatureScope feature_scope(masm(), SSE2); | 4971 CpuFeatureScope feature_scope(masm(), SSE2); |
| 4975 __ Cvtsi2sd(xmm_scratch, Operand(reg)); | 4972 __ Cvtsi2sd(xmm_scratch, Operand(reg)); |
| 4976 } else { | 4973 } else { |
| 4977 __ push(reg); | 4974 __ push(reg); |
| 4978 __ fild_s(Operand(esp, 0)); | 4975 __ fild_s(Operand(esp, 0)); |
| 4979 __ pop(reg); | 4976 __ pop(reg); |
| 4980 } | 4977 } |
| 4981 } else { | 4978 } else { |
| 4982 if (CpuFeatures::IsSupported(SSE2)) { | 4979 if (CpuFeatures::IsSupported(SSE2)) { |
| 4983 CpuFeatureScope feature_scope(masm(), SSE2); | 4980 CpuFeatureScope feature_scope(masm(), SSE2); |
| 4984 __ LoadUint32(xmm_scratch, reg, | 4981 __ LoadUint32(xmm_scratch, reg, ToDoubleRegister(temp2)); |
| 4985 ToDoubleRegister(LNumberTagU::cast(instr)->temp())); | |
| 4986 } else { | 4982 } else { |
| 4987 // There's no fild variant for unsigned values, so zero-extend to a 64-bit | 4983 // There's no fild variant for unsigned values, so zero-extend to a 64-bit |
| 4988 // int manually. | 4984 // int manually. |
| 4989 __ push(Immediate(0)); | 4985 __ push(Immediate(0)); |
| 4990 __ push(reg); | 4986 __ push(reg); |
| 4991 __ fild_d(Operand(esp, 0)); | 4987 __ fild_d(Operand(esp, 0)); |
| 4992 __ pop(reg); | 4988 __ pop(reg); |
| 4993 __ pop(reg); | 4989 __ pop(reg); |
| 4994 } | 4990 } |
| 4995 } | 4991 } |
| 4996 | 4992 |
| 4997 if (FLAG_inline_new) { | 4993 if (FLAG_inline_new) { |
| 4998 __ AllocateHeapNumber(reg, tmp, no_reg, &slow); | 4994 __ AllocateHeapNumber(reg, tmp, no_reg, &slow); |
| 4999 __ jmp(&done, Label::kNear); | 4995 __ jmp(&done, Label::kNear); |
| 5000 } | 4996 } |
| 5001 | 4997 |
| 5002 // Slow case: Call the runtime system to do the number allocation. | 4998 // Slow case: Call the runtime system to do the number allocation. |
| 5003 __ bind(&slow); | 4999 __ bind(&slow); |
| 5000 { |
| 5001 // TODO(3095996): Put a valid pointer value in the stack slot where the |
| 5002 // result register is stored, as this register is in the pointer map, but |
| 5003 // contains an integer value. |
| 5004 __ Set(reg, Immediate(0)); |
| 5004 | 5005 |
| 5005 // TODO(3095996): Put a valid pointer value in the stack slot where the result | 5006 // Preserve the value of all registers. |
| 5006 // register is stored, as this register is in the pointer map, but contains an | 5007 PushSafepointRegistersScope scope(this); |
| 5007 // integer value. | 5008 |
| 5008 __ StoreToSafepointRegisterSlot(reg, Immediate(0)); | 5009 // NumberTagI and NumberTagD use the context from the frame, rather than |
| 5009 // NumberTagI and NumberTagD use the context from the frame, rather than | 5010 // the environment's HContext or HInlinedContext value. |
| 5010 // the environment's HContext or HInlinedContext value. | 5011 // They only call Runtime::kAllocateHeapNumber. |
| 5011 // They only call Runtime::kAllocateHeapNumber. | 5012 // The corresponding HChange instructions are added in a phase that does |
| 5012 // The corresponding HChange instructions are added in a phase that does | 5013 // not have easy access to the local context. |
| 5013 // not have easy access to the local context. | 5014 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); |
| 5014 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); | 5015 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); |
| 5015 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); | 5016 RecordSafepointWithRegisters( |
| 5016 RecordSafepointWithRegisters( | 5017 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); |
| 5017 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); | 5018 __ StoreToSafepointRegisterSlot(reg, eax); |
| 5018 if (!reg.is(eax)) __ mov(reg, eax); | 5019 } |
| 5019 | 5020 |
| 5020 // Done. Put the value in xmm_scratch into the value of the allocated heap | 5021 // Done. Put the value in xmm_scratch into the value of the allocated heap |
| 5021 // number. | 5022 // number. |
| 5022 __ bind(&done); | 5023 __ bind(&done); |
| 5023 if (CpuFeatures::IsSupported(SSE2)) { | 5024 if (CpuFeatures::IsSupported(SSE2)) { |
| 5024 CpuFeatureScope feature_scope(masm(), SSE2); | 5025 CpuFeatureScope feature_scope(masm(), SSE2); |
| 5025 __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), xmm_scratch); | 5026 __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), xmm_scratch); |
| 5026 } else { | 5027 } else { |
| 5027 __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset)); | 5028 __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset)); |
| 5028 } | 5029 } |
| 5029 __ StoreToSafepointRegisterSlot(reg, reg); | |
| 5030 } | 5030 } |
| 5031 | 5031 |
| 5032 | 5032 |
| 5033 void LCodeGen::DoNumberTagD(LNumberTagD* instr) { | 5033 void LCodeGen::DoNumberTagD(LNumberTagD* instr) { |
| 5034 class DeferredNumberTagD V8_FINAL : public LDeferredCode { | 5034 class DeferredNumberTagD V8_FINAL : public LDeferredCode { |
| 5035 public: | 5035 public: |
| 5036 DeferredNumberTagD(LCodeGen* codegen, | 5036 DeferredNumberTagD(LCodeGen* codegen, |
| 5037 LNumberTagD* instr, | 5037 LNumberTagD* instr, |
| 5038 const X87Stack& x87_stack) | 5038 const X87Stack& x87_stack) |
| 5039 : LDeferredCode(codegen, x87_stack), instr_(instr) { } | 5039 : LDeferredCode(codegen, x87_stack), instr_(instr) { } |
| (...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5088 // not have easy access to the local context. | 5088 // not have easy access to the local context. |
| 5089 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); | 5089 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); |
| 5090 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); | 5090 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); |
| 5091 RecordSafepointWithRegisters( | 5091 RecordSafepointWithRegisters( |
| 5092 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); | 5092 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); |
| 5093 __ StoreToSafepointRegisterSlot(reg, eax); | 5093 __ StoreToSafepointRegisterSlot(reg, eax); |
| 5094 } | 5094 } |
| 5095 | 5095 |
| 5096 | 5096 |
| 5097 void LCodeGen::DoSmiTag(LSmiTag* instr) { | 5097 void LCodeGen::DoSmiTag(LSmiTag* instr) { |
| 5098 LOperand* input = instr->value(); | 5098 HChange* hchange = instr->hydrogen(); |
| 5099 ASSERT(input->IsRegister() && input->Equals(instr->result())); | 5099 Register input = ToRegister(instr->value()); |
| 5100 ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow)); | 5100 if (hchange->CheckFlag(HValue::kCanOverflow) && |
| 5101 __ SmiTag(ToRegister(input)); | 5101 hchange->value()->CheckFlag(HValue::kUint32)) { |
| 5102 __ test(input, Immediate(0xc0000000)); |
| 5103 DeoptimizeIf(not_zero, instr->environment()); |
| 5104 } |
| 5105 __ SmiTag(input); |
| 5106 if (hchange->CheckFlag(HValue::kCanOverflow) && |
| 5107 !hchange->value()->CheckFlag(HValue::kUint32)) { |
| 5108 DeoptimizeIf(overflow, instr->environment()); |
| 5109 } |
| 5102 } | 5110 } |
| 5103 | 5111 |
| 5104 | 5112 |
| 5105 void LCodeGen::DoSmiUntag(LSmiUntag* instr) { | 5113 void LCodeGen::DoSmiUntag(LSmiUntag* instr) { |
| 5106 LOperand* input = instr->value(); | 5114 LOperand* input = instr->value(); |
| 5107 Register result = ToRegister(input); | 5115 Register result = ToRegister(input); |
| 5108 ASSERT(input->IsRegister() && input->Equals(instr->result())); | 5116 ASSERT(input->IsRegister() && input->Equals(instr->result())); |
| 5109 if (instr->needs_check()) { | 5117 if (instr->needs_check()) { |
| 5110 __ test(result, Immediate(kSmiTagMask)); | 5118 __ test(result, Immediate(kSmiTagMask)); |
| 5111 DeoptimizeIf(not_zero, instr->environment()); | 5119 DeoptimizeIf(not_zero, instr->environment()); |
| (...skipping 132 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5244 __ mov(temp_reg, input_reg); | 5252 __ mov(temp_reg, input_reg); |
| 5245 __ SmiUntag(temp_reg); // Untag smi before converting to float. | 5253 __ SmiUntag(temp_reg); // Untag smi before converting to float. |
| 5246 __ Cvtsi2sd(result_reg, Operand(temp_reg)); | 5254 __ Cvtsi2sd(result_reg, Operand(temp_reg)); |
| 5247 __ bind(&done); | 5255 __ bind(&done); |
| 5248 } | 5256 } |
| 5249 | 5257 |
| 5250 | 5258 |
| 5251 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) { | 5259 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) { |
| 5252 Register input_reg = ToRegister(instr->value()); | 5260 Register input_reg = ToRegister(instr->value()); |
| 5253 | 5261 |
| 5262 // The input was optimistically untagged; revert it. |
| 5263 STATIC_ASSERT(kSmiTagSize == 1); |
| 5264 __ lea(input_reg, Operand(input_reg, times_2, kHeapObjectTag)); |
| 5265 |
| 5254 if (instr->truncating()) { | 5266 if (instr->truncating()) { |
| 5255 Label no_heap_number, check_bools, check_false; | 5267 Label no_heap_number, check_bools, check_false; |
| 5256 | 5268 |
| 5257 // Heap number map check. | 5269 // Heap number map check. |
| 5258 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), | 5270 __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), |
| 5259 factory()->heap_number_map()); | 5271 factory()->heap_number_map()); |
| 5260 __ j(not_equal, &no_heap_number, Label::kNear); | 5272 __ j(not_equal, &no_heap_number, Label::kNear); |
| 5261 __ TruncateHeapNumberToI(input_reg, input_reg); | 5273 __ TruncateHeapNumberToI(input_reg, input_reg); |
| 5262 __ jmp(done); | 5274 __ jmp(done); |
| 5263 | 5275 |
| 5264 __ bind(&no_heap_number); | 5276 __ bind(&no_heap_number); |
| 5265 // Check for Oddballs. Undefined/False is converted to zero and True to one | 5277 // Check for Oddballs. Undefined/False is converted to zero and True to one |
| 5266 // for truncating conversions. | 5278 // for truncating conversions. |
| 5267 __ cmp(input_reg, factory()->undefined_value()); | 5279 __ cmp(input_reg, factory()->undefined_value()); |
| 5268 __ j(not_equal, &check_bools, Label::kNear); | 5280 __ j(not_equal, &check_bools, Label::kNear); |
| 5269 __ Set(input_reg, Immediate(0)); | 5281 __ Set(input_reg, Immediate(0)); |
| 5270 __ jmp(done); | 5282 __ jmp(done); |
| 5271 | 5283 |
| 5272 __ bind(&check_bools); | 5284 __ bind(&check_bools); |
| 5273 __ cmp(input_reg, factory()->true_value()); | 5285 __ cmp(input_reg, factory()->true_value()); |
| 5274 __ j(not_equal, &check_false, Label::kNear); | 5286 __ j(not_equal, &check_false, Label::kNear); |
| 5275 __ Set(input_reg, Immediate(1)); | 5287 __ Set(input_reg, Immediate(1)); |
| 5276 __ jmp(done); | 5288 __ jmp(done); |
| 5277 | 5289 |
| 5278 __ bind(&check_false); | 5290 __ bind(&check_false); |
| 5279 __ cmp(input_reg, factory()->false_value()); | 5291 __ cmp(input_reg, factory()->false_value()); |
| 5280 __ RecordComment("Deferred TaggedToI: cannot truncate"); | 5292 __ RecordComment("Deferred TaggedToI: cannot truncate"); |
| 5281 DeoptimizeIf(not_equal, instr->environment()); | 5293 DeoptimizeIf(not_equal, instr->environment()); |
| 5282 __ Set(input_reg, Immediate(0)); | 5294 __ Set(input_reg, Immediate(0)); |
| 5283 __ jmp(done); | |
| 5284 } else { | 5295 } else { |
| 5285 Label bailout; | 5296 Label bailout; |
| 5286 XMMRegister scratch = (instr->temp() != NULL) | 5297 XMMRegister scratch = (instr->temp() != NULL) |
| 5287 ? ToDoubleRegister(instr->temp()) | 5298 ? ToDoubleRegister(instr->temp()) |
| 5288 : no_xmm_reg; | 5299 : no_xmm_reg; |
| 5289 __ TaggedToI(input_reg, input_reg, scratch, | 5300 __ TaggedToI(input_reg, input_reg, scratch, |
| 5290 instr->hydrogen()->GetMinusZeroMode(), &bailout); | 5301 instr->hydrogen()->GetMinusZeroMode(), &bailout); |
| 5291 __ jmp(done); | 5302 __ jmp(done); |
| 5292 __ bind(&bailout); | 5303 __ bind(&bailout); |
| 5293 DeoptimizeIf(no_condition, instr->environment()); | 5304 DeoptimizeIf(no_condition, instr->environment()); |
| (...skipping 19 matching lines...) Expand all Loading... |
| 5313 LOperand* input = instr->value(); | 5324 LOperand* input = instr->value(); |
| 5314 ASSERT(input->IsRegister()); | 5325 ASSERT(input->IsRegister()); |
| 5315 Register input_reg = ToRegister(input); | 5326 Register input_reg = ToRegister(input); |
| 5316 ASSERT(input_reg.is(ToRegister(instr->result()))); | 5327 ASSERT(input_reg.is(ToRegister(instr->result()))); |
| 5317 | 5328 |
| 5318 if (instr->hydrogen()->value()->representation().IsSmi()) { | 5329 if (instr->hydrogen()->value()->representation().IsSmi()) { |
| 5319 __ SmiUntag(input_reg); | 5330 __ SmiUntag(input_reg); |
| 5320 } else { | 5331 } else { |
| 5321 DeferredTaggedToI* deferred = | 5332 DeferredTaggedToI* deferred = |
| 5322 new(zone()) DeferredTaggedToI(this, instr, x87_stack_); | 5333 new(zone()) DeferredTaggedToI(this, instr, x87_stack_); |
| 5323 | 5334 // Optimistically untag the input. |
| 5324 __ JumpIfNotSmi(input_reg, deferred->entry()); | 5335 // If the input is a HeapObject, SmiUntag will set the carry flag. |
| 5336 STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); |
| 5325 __ SmiUntag(input_reg); | 5337 __ SmiUntag(input_reg); |
| 5338 // Branch to deferred code if the input was tagged. |
| 5339 // The deferred code will take care of restoring the tag. |
| 5340 __ j(carry, deferred->entry()); |
| 5326 __ bind(deferred->exit()); | 5341 __ bind(deferred->exit()); |
| 5327 } | 5342 } |
| 5328 } | 5343 } |
| 5329 | 5344 |
| 5330 | 5345 |
| 5331 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { | 5346 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { |
| 5332 LOperand* input = instr->value(); | 5347 LOperand* input = instr->value(); |
| 5333 ASSERT(input->IsRegister()); | 5348 ASSERT(input->IsRegister()); |
| 5334 LOperand* temp = instr->temp(); | 5349 LOperand* temp = instr->temp(); |
| 5335 ASSERT(temp->IsRegister()); | 5350 ASSERT(temp->IsRegister()); |
| (...skipping 411 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5747 __ bind(&is_smi); | 5762 __ bind(&is_smi); |
| 5748 if (!input_reg.is(result_reg)) { | 5763 if (!input_reg.is(result_reg)) { |
| 5749 __ mov(result_reg, input_reg); | 5764 __ mov(result_reg, input_reg); |
| 5750 } | 5765 } |
| 5751 __ SmiUntag(result_reg); | 5766 __ SmiUntag(result_reg); |
| 5752 __ ClampUint8(result_reg); | 5767 __ ClampUint8(result_reg); |
| 5753 __ bind(&done); | 5768 __ bind(&done); |
| 5754 } | 5769 } |
| 5755 | 5770 |
| 5756 | 5771 |
| 5772 void LCodeGen::DoDoubleBits(LDoubleBits* instr) { |
| 5773 CpuFeatureScope scope(masm(), SSE2); |
| 5774 XMMRegister value_reg = ToDoubleRegister(instr->value()); |
| 5775 Register result_reg = ToRegister(instr->result()); |
| 5776 if (instr->hydrogen()->bits() == HDoubleBits::HIGH) { |
| 5777 if (CpuFeatures::IsSupported(SSE4_1)) { |
| 5778 CpuFeatureScope scope2(masm(), SSE4_1); |
| 5779 __ pextrd(result_reg, value_reg, 1); |
| 5780 } else { |
| 5781 XMMRegister xmm_scratch = double_scratch0(); |
| 5782 __ pshufd(xmm_scratch, value_reg, 1); |
| 5783 __ movd(result_reg, xmm_scratch); |
| 5784 } |
| 5785 } else { |
| 5786 __ movd(result_reg, value_reg); |
| 5787 } |
| 5788 } |
| 5789 |
| 5790 |
| 5791 void LCodeGen::DoConstructDouble(LConstructDouble* instr) { |
| 5792 Register hi_reg = ToRegister(instr->hi()); |
| 5793 Register lo_reg = ToRegister(instr->lo()); |
| 5794 XMMRegister result_reg = ToDoubleRegister(instr->result()); |
| 5795 CpuFeatureScope scope(masm(), SSE2); |
| 5796 |
| 5797 if (CpuFeatures::IsSupported(SSE4_1)) { |
| 5798 CpuFeatureScope scope2(masm(), SSE4_1); |
| 5799 __ movd(result_reg, lo_reg); |
| 5800 __ pinsrd(result_reg, hi_reg, 1); |
| 5801 } else { |
| 5802 XMMRegister xmm_scratch = double_scratch0(); |
| 5803 __ movd(result_reg, hi_reg); |
| 5804 __ psllq(result_reg, 32); |
| 5805 __ movd(xmm_scratch, lo_reg); |
| 5806 __ orps(result_reg, xmm_scratch); |
| 5807 } |
| 5808 } |
| 5809 |
| 5810 |
| 5757 void LCodeGen::DoAllocate(LAllocate* instr) { | 5811 void LCodeGen::DoAllocate(LAllocate* instr) { |
| 5758 class DeferredAllocate V8_FINAL : public LDeferredCode { | 5812 class DeferredAllocate V8_FINAL : public LDeferredCode { |
| 5759 public: | 5813 public: |
| 5760 DeferredAllocate(LCodeGen* codegen, | 5814 DeferredAllocate(LCodeGen* codegen, |
| 5761 LAllocate* instr, | 5815 LAllocate* instr, |
| 5762 const X87Stack& x87_stack) | 5816 const X87Stack& x87_stack) |
| 5763 : LDeferredCode(codegen, x87_stack), instr_(instr) { } | 5817 : LDeferredCode(codegen, x87_stack), instr_(instr) { } |
| 5764 virtual void Generate() V8_OVERRIDE { | 5818 virtual void Generate() V8_OVERRIDE { |
| 5765 codegen()->DoDeferredAllocate(instr_); | 5819 codegen()->DoDeferredAllocate(instr_); |
| 5766 } | 5820 } |
| (...skipping 152 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5919 } | 5973 } |
| 5920 } | 5974 } |
| 5921 | 5975 |
| 5922 | 5976 |
| 5923 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) { | 5977 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) { |
| 5924 ASSERT(ToRegister(instr->context()).is(esi)); | 5978 ASSERT(ToRegister(instr->context()).is(esi)); |
| 5925 // Use the fast case closure allocation code that allocates in new | 5979 // Use the fast case closure allocation code that allocates in new |
| 5926 // space for nested functions that don't need literals cloning. | 5980 // space for nested functions that don't need literals cloning. |
| 5927 bool pretenure = instr->hydrogen()->pretenure(); | 5981 bool pretenure = instr->hydrogen()->pretenure(); |
| 5928 if (!pretenure && instr->hydrogen()->has_no_literals()) { | 5982 if (!pretenure && instr->hydrogen()->has_no_literals()) { |
| 5929 FastNewClosureStub stub(instr->hydrogen()->language_mode(), | 5983 FastNewClosureStub stub(instr->hydrogen()->strict_mode(), |
| 5930 instr->hydrogen()->is_generator()); | 5984 instr->hydrogen()->is_generator()); |
| 5931 __ mov(ebx, Immediate(instr->hydrogen()->shared_info())); | 5985 __ mov(ebx, Immediate(instr->hydrogen()->shared_info())); |
| 5932 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); | 5986 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); |
| 5933 } else { | 5987 } else { |
| 5934 __ push(esi); | 5988 __ push(esi); |
| 5935 __ push(Immediate(instr->hydrogen()->shared_info())); | 5989 __ push(Immediate(instr->hydrogen()->shared_info())); |
| 5936 __ push(Immediate(pretenure ? factory()->true_value() | 5990 __ push(Immediate(pretenure ? factory()->true_value() |
| 5937 : factory()->false_value())); | 5991 : factory()->false_value())); |
| 5938 CallRuntime(Runtime::kNewClosure, 3, instr); | 5992 CallRuntime(Runtime::kNewClosure, 3, instr); |
| 5939 } | 5993 } |
| (...skipping 341 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 6281 FixedArray::kHeaderSize - kPointerSize)); | 6335 FixedArray::kHeaderSize - kPointerSize)); |
| 6282 __ bind(&done); | 6336 __ bind(&done); |
| 6283 } | 6337 } |
| 6284 | 6338 |
| 6285 | 6339 |
| 6286 #undef __ | 6340 #undef __ |
| 6287 | 6341 |
| 6288 } } // namespace v8::internal | 6342 } } // namespace v8::internal |
| 6289 | 6343 |
| 6290 #endif // V8_TARGET_ARCH_IA32 | 6344 #endif // V8_TARGET_ARCH_IA32 |
| OLD | NEW |