Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(777)

Unified Diff: src/mips/lithium-codegen-mips.cc

Issue 131363008: A64: Synchronize with r15922. (Closed) Base URL: https://v8.googlecode.com/svn/branches/experimental/a64
Patch Set: Created 6 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « src/mips/lithium-codegen-mips.h ('k') | src/mips/lithium-gap-resolver-mips.cc » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: src/mips/lithium-codegen-mips.cc
diff --git a/src/mips/lithium-codegen-mips.cc b/src/mips/lithium-codegen-mips.cc
index 2a9ca324fc69561d7f4dafa2d76d7eb82fa93163..6bd45fe2f2353633032609927c205d0e483fa763 100644
--- a/src/mips/lithium-codegen-mips.cc
+++ b/src/mips/lithium-codegen-mips.cc
@@ -407,15 +407,14 @@ Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
if (r.IsInteger32()) {
ASSERT(literal->IsNumber());
__ li(scratch, Operand(static_cast<int32_t>(literal->Number())));
+ } else if (r.IsSmi()) {
+ ASSERT(constant->HasSmiValue());
+ __ li(scratch, Operand(Smi::FromInt(constant->Integer32Value())));
} else if (r.IsDouble()) {
Abort("EmitLoadRegister: Unsupported double immediate.");
} else {
ASSERT(r.IsTagged());
- if (literal->IsSmi()) {
- __ li(scratch, Operand(literal));
- } else {
- __ LoadHeapObject(scratch, Handle<HeapObject>::cast(literal));
- }
+ __ LoadObject(scratch, literal);
}
return scratch;
} else if (op->IsStackSlot() || op->IsArgument()) {
@@ -481,9 +480,18 @@ bool LCodeGen::IsSmi(LConstantOperand* op) const {
}
-int LCodeGen::ToInteger32(LConstantOperand* op) const {
+int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
+ return ToRepresentation(op, Representation::Integer32());
+}
+
+
+int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
+ const Representation& r) const {
HConstant* constant = chunk_->LookupConstant(op);
- return constant->Integer32Value();
+ int32_t value = constant->Integer32Value();
+ if (r.IsInteger32()) return value;
+ ASSERT(r.IsSmiOrTagged());
+ return reinterpret_cast<int32_t>(Smi::FromInt(value));
}
@@ -505,7 +513,10 @@ Operand LCodeGen::ToOperand(LOperand* op) {
LConstantOperand* const_op = LConstantOperand::cast(op);
HConstant* constant = chunk()->LookupConstant(const_op);
Representation r = chunk_->LookupLiteralRepresentation(const_op);
- if (r.IsInteger32()) {
+ if (r.IsSmi()) {
+ ASSERT(constant->HasSmiValue());
+ return Operand(Smi::FromInt(constant->Integer32Value()));
+ } else if (r.IsInteger32()) {
ASSERT(constant->HasInteger32Value());
return Operand(constant->Integer32Value());
} else if (r.IsDouble()) {
@@ -790,14 +801,6 @@ void LCodeGen::DeoptimizeIf(Condition cc,
}
-void LCodeGen::SoftDeoptimize(LEnvironment* environment,
- Register src1,
- const Operand& src2) {
- ASSERT(!info()->IsStub());
- DeoptimizeIf(al, environment, Deoptimizer::SOFT, src1, src2);
-}
-
-
void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
ZoneList<Handle<Map> > maps(1, zone());
int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
@@ -1379,7 +1382,9 @@ void LCodeGen::DoMulI(LMulI* instr) {
if (right_op->IsConstantOperand() && !can_overflow) {
// Use optimized code for specific constants.
- int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
+ int32_t constant = ToRepresentation(
+ LConstantOperand::cast(right_op),
+ instr->hydrogen()->right()->representation());
if (bailout_on_minus_zero && (constant < 0)) {
// The case of a null constant will be handled separately.
@@ -1446,13 +1451,25 @@ void LCodeGen::DoMulI(LMulI* instr) {
if (can_overflow) {
// hi:lo = left * right.
- __ mult(left, right);
- __ mfhi(scratch);
- __ mflo(result);
+ if (instr->hydrogen()->representation().IsSmi()) {
+ __ SmiUntag(result, left);
+ __ mult(result, right);
+ __ mfhi(scratch);
+ __ mflo(result);
+ } else {
+ __ mult(left, right);
+ __ mfhi(scratch);
+ __ mflo(result);
+ }
__ sra(at, result, 31);
DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
} else {
- __ Mul(result, left, right);
+ if (instr->hydrogen()->representation().IsSmi()) {
+ __ SmiUntag(result, left);
+ __ Mul(result, result, right);
+ } else {
+ __ Mul(result, left, right);
+ }
}
if (bailout_on_minus_zero) {
@@ -1507,6 +1524,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
LOperand* right_op = instr->right();
Register left = ToRegister(instr->left());
Register result = ToRegister(instr->result());
+ Register scratch = scratch0();
if (right_op->IsRegister()) {
// No need to mask the right operand on MIPS, it is built into the variable
@@ -1563,7 +1581,14 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
break;
case Token::SHL:
if (shift_count != 0) {
- __ sll(result, left, shift_count);
+ if (instr->hydrogen_value()->representation().IsSmi() &&
+ instr->can_deopt()) {
+ __ sll(result, left, shift_count - 1);
+ __ SmiTagCheckOverflow(result, result, scratch);
+ DeoptimizeIf(lt, instr->environment(), scratch, Operand(zero_reg));
+ } else {
+ __ sll(result, left, shift_count);
+ }
} else {
__ Move(result, left);
}
@@ -1636,12 +1661,7 @@ void LCodeGen::DoConstantD(LConstantD* instr) {
void LCodeGen::DoConstantT(LConstantT* instr) {
Handle<Object> value = instr->value();
AllowDeferredHandleDereference smi_check;
- if (value->IsSmi()) {
- __ li(ToRegister(instr->result()), Operand(value));
- } else {
- __ LoadHeapObject(ToRegister(instr->result()),
- Handle<HeapObject>::cast(value));
- }
+ __ LoadObject(ToRegister(instr->result()), value);
}
@@ -1820,7 +1840,7 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
LOperand* right = instr->right();
HMathMinMax::Operation operation = instr->hydrogen()->operation();
Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
- if (instr->hydrogen()->representation().IsInteger32()) {
+ if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
Register left_reg = ToRegister(left);
Operand right_op = (right->IsRegister() || right->IsConstantOperand())
? ToOperand(right)
@@ -2240,13 +2260,6 @@ void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
}
-void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) {
- Register left = ToRegister(instr->left());
-
- EmitBranch(instr, eq, left, Operand(instr->hydrogen()->right()));
-}
-
-
Condition LCodeGen::EmitIsObject(Register input,
Register temp1,
Register temp2,
@@ -2901,9 +2914,9 @@ void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
__ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
__ lw(result, FieldMemOperand(result, offset + FixedArray::kHeaderSize));
}
- } else if (lookup.IsConstantFunction()) {
- Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
- __ LoadHeapObject(result, function);
+ } else if (lookup.IsConstant()) {
+ Handle<Object> constant(lookup.GetConstantFromMap(*type), isolate());
+ __ LoadObject(result, constant);
} else {
// Negative lookup.
// Check prototypes.
@@ -4187,9 +4200,25 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
}
+void LCodeGen::ApplyCheckIf(Condition cc,
+ LBoundsCheck* check,
+ Register src1,
+ const Operand& src2) {
+ if (FLAG_debug_code && check->hydrogen()->skip_check()) {
+ Label done;
+ __ Branch(&done, NegateCondition(cc), src1, src2);
+ __ stop("eliminated bounds check failed");
+ __ bind(&done);
+ } else {
+ DeoptimizeIf(cc, check->environment(), src1, src2);
+ }
+}
+
+
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
if (instr->hydrogen()->skip_check()) return;
+ Condition condition = instr->hydrogen()->allow_equality() ? hi : hs;
if (instr->index()->IsConstantOperand()) {
int constant_index =
ToInteger32(LConstantOperand::cast(instr->index()));
@@ -4198,13 +4227,13 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
} else {
__ li(at, Operand(constant_index));
}
- DeoptimizeIf(hs,
- instr->environment(),
+ ApplyCheckIf(condition,
+ instr,
at,
Operand(ToRegister(instr->length())));
} else {
- DeoptimizeIf(hs,
- instr->environment(),
+ ApplyCheckIf(condition,
+ instr,
ToRegister(instr->index()),
Operand(ToRegister(instr->length())));
}
@@ -4426,7 +4455,7 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
// Write barrier.
__ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
scratch, GetRAState(), kDontSaveFPRegs);
- } else if (FLAG_compiled_transitions) {
+ } else {
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
__ mov(a0, object_reg);
__ li(a1, Operand(to_map));
@@ -4434,28 +4463,6 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
__ CallStub(&stub);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
- } else if (IsFastSmiElementsKind(from_kind) &&
- IsFastDoubleElementsKind(to_kind)) {
- Register fixed_object_reg = ToRegister(instr->temp());
- ASSERT(fixed_object_reg.is(a2));
- Register new_map_reg = ToRegister(instr->new_map_temp());
- ASSERT(new_map_reg.is(a3));
- __ li(new_map_reg, Operand(to_map));
- __ mov(fixed_object_reg, object_reg);
- CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
- RelocInfo::CODE_TARGET, instr);
- } else if (IsFastDoubleElementsKind(from_kind) &&
- IsFastObjectElementsKind(to_kind)) {
- Register fixed_object_reg = ToRegister(instr->temp());
- ASSERT(fixed_object_reg.is(a2));
- Register new_map_reg = ToRegister(instr->new_map_temp());
- ASSERT(new_map_reg.is(a3));
- __ li(new_map_reg, Operand(to_map));
- __ mov(fixed_object_reg, object_reg);
- CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
- RelocInfo::CODE_TARGET, instr);
- } else {
- UNREACHABLE();
}
__ bind(&not_applicable);
}
@@ -5195,6 +5202,7 @@ void LCodeGen::DoCheckMapCommon(Register map_reg,
void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
+ if (instr->hydrogen()->CanOmitMapChecks()) return;
Register map_reg = scratch0();
LOperand* input = instr->value();
ASSERT(input->IsRegister());
@@ -5263,6 +5271,8 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
+ if (instr->hydrogen()->CanOmitPrototypeChecks()) return;
+
Register prototype_reg = ToRegister(instr->temp());
Register map_reg = ToRegister(instr->temp2());
@@ -5271,12 +5281,10 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
ASSERT(prototypes->length() == maps->length());
- if (!instr->hydrogen()->CanOmitPrototypeChecks()) {
- for (int i = 0; i < prototypes->length(); i++) {
- __ LoadHeapObject(prototype_reg, prototypes->at(i));
- __ lw(map_reg, FieldMemOperand(prototype_reg, HeapObject::kMapOffset));
- DoCheckMapCommon(map_reg, maps->at(i), instr->environment());
- }
+ for (int i = 0; i < prototypes->length(); i++) {
+ __ LoadHeapObject(prototype_reg, prototypes->at(i));
+ __ lw(map_reg, FieldMemOperand(prototype_reg, HeapObject::kMapOffset));
+ DoCheckMapCommon(map_reg, maps->at(i), instr->environment());
}
}
@@ -5649,11 +5657,15 @@ void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
- if (instr->hydrogen_value()->IsSoftDeoptimize()) {
- SoftDeoptimize(instr->environment(), zero_reg, Operand(zero_reg));
- } else {
- DeoptimizeIf(al, instr->environment(), zero_reg, Operand(zero_reg));
- }
+ Deoptimizer::BailoutType type = instr->hydrogen()->type();
+ // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
+ // needed return address), even though the implementation of LAZY and EAGER is
+ // now identical. When LAZY is eventually completely folded into EAGER, remove
+ // the special case below.
+ if (info()->IsStub() && type == Deoptimizer::EAGER) {
+ type = Deoptimizer::LAZY;
+ }
+ DeoptimizeIf(al, instr->environment(), type, zero_reg, Operand(zero_reg));
}
« no previous file with comments | « src/mips/lithium-codegen-mips.h ('k') | src/mips/lithium-gap-resolver-mips.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698