Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(18)

Unified Diff: src/arm/code-stubs-arm.cc

Issue 6903124: Removed dead code: GenericUnaryOpStub is not used anymore, as a consequence, NegativeZeroHandling... (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: Created 9 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « no previous file | src/code-stubs.h » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: src/arm/code-stubs-arm.cc
===================================================================
--- src/arm/code-stubs-arm.cc (revision 7711)
+++ src/arm/code-stubs-arm.cc (working copy)
@@ -3202,141 +3202,6 @@
}
-void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
- Label slow, done;
-
- Register heap_number_map = r6;
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-
- if (op_ == Token::SUB) {
- if (include_smi_code_) {
- // Check whether the value is a smi.
- Label try_float;
- __ tst(r0, Operand(kSmiTagMask));
- __ b(ne, &try_float);
-
- // Go slow case if the value of the expression is zero
- // to make sure that we switch between 0 and -0.
- if (negative_zero_ == kStrictNegativeZero) {
- // If we have to check for zero, then we can check for the max negative
- // smi while we are at it.
- __ bic(ip, r0, Operand(0x80000000), SetCC);
- __ b(eq, &slow);
- __ rsb(r0, r0, Operand(0, RelocInfo::NONE));
- __ Ret();
- } else {
- // The value of the expression is a smi and 0 is OK for -0. Try
- // optimistic subtraction '0 - value'.
- __ rsb(r0, r0, Operand(0, RelocInfo::NONE), SetCC);
- __ Ret(vc);
- // We don't have to reverse the optimistic neg since the only case
- // where we fall through is the minimum negative Smi, which is the case
- // where the neg leaves the register unchanged.
- __ jmp(&slow); // Go slow on max negative Smi.
- }
- __ bind(&try_float);
- } else if (FLAG_debug_code) {
- __ tst(r0, Operand(kSmiTagMask));
- __ Assert(ne, "Unexpected smi operand.");
- }
-
- __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- __ cmp(r1, heap_number_map);
- __ b(ne, &slow);
- // r0 is a heap number. Get a new heap number in r1.
- if (overwrite_ == UNARY_OVERWRITE) {
- __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
- __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
- __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
- } else {
- __ AllocateHeapNumber(r1, r2, r3, r6, &slow);
- __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
- __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
- __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset));
- __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
- __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset));
- __ mov(r0, Operand(r1));
- }
- } else if (op_ == Token::BIT_NOT) {
- if (include_smi_code_) {
- Label non_smi;
- __ JumpIfNotSmi(r0, &non_smi);
- __ mvn(r0, Operand(r0));
- // Bit-clear inverted smi-tag.
- __ bic(r0, r0, Operand(kSmiTagMask));
- __ Ret();
- __ bind(&non_smi);
- } else if (FLAG_debug_code) {
- __ tst(r0, Operand(kSmiTagMask));
- __ Assert(ne, "Unexpected smi operand.");
- }
-
- // Check if the operand is a heap number.
- __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- __ cmp(r1, heap_number_map);
- __ b(ne, &slow);
-
- // Convert the heap number is r0 to an untagged integer in r1.
- __ ConvertToInt32(r0, r1, r2, r3, d0, &slow);
-
- // Do the bitwise operation (move negated) and check if the result
- // fits in a smi.
- Label try_float;
- __ mvn(r1, Operand(r1));
- __ add(r2, r1, Operand(0x40000000), SetCC);
- __ b(mi, &try_float);
- __ mov(r0, Operand(r1, LSL, kSmiTagSize));
- __ b(&done);
-
- __ bind(&try_float);
- if (!overwrite_ == UNARY_OVERWRITE) {
- // Allocate a fresh heap number, but don't overwrite r0 until
- // we're sure we can do it without going through the slow case
- // that needs the value in r0.
- __ AllocateHeapNumber(r2, r3, r4, r6, &slow);
- __ mov(r0, Operand(r2));
- }
-
- if (CpuFeatures::IsSupported(VFP3)) {
- // Convert the int32 in r1 to the heap number in r0. r2 is corrupted.
- CpuFeatures::Scope scope(VFP3);
- __ vmov(s0, r1);
- __ vcvt_f64_s32(d0, s0);
- __ sub(r2, r0, Operand(kHeapObjectTag));
- __ vstr(d0, r2, HeapNumber::kValueOffset);
- } else {
- // WriteInt32ToHeapNumberStub does not trigger GC, so we do not
- // have to set up a frame.
- WriteInt32ToHeapNumberStub stub(r1, r0, r2);
- __ push(lr);
- __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
- __ pop(lr);
- }
- } else {
- UNIMPLEMENTED();
- }
-
- __ bind(&done);
- __ Ret();
-
- // Handle the slow case by jumping to the JavaScript builtin.
- __ bind(&slow);
- __ push(r0);
- switch (op_) {
- case Token::SUB:
- __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_JS);
- break;
- case Token::BIT_NOT:
- __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_JS);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
void MathPowStub::Generate(MacroAssembler* masm) {
Label call_runtime;
« no previous file with comments | « no previous file | src/code-stubs.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698