Chromium Code Reviews| Index: src/arm/code-stubs-arm.cc |
| =================================================================== |
| --- src/arm/code-stubs-arm.cc (revision 7674) |
| +++ src/arm/code-stubs-arm.cc (working copy) |
| @@ -1768,6 +1768,301 @@ |
| } |
| +Handle<Code> GetTypeRecordingUnaryOpStub(int key, |
| + TRUnaryOpIC::TypeInfo type_info) { |
| + TypeRecordingUnaryOpStub stub(key, type_info); |
| + return stub.GetCode(); |
| +} |
| + |
| + |
| +const char* TypeRecordingUnaryOpStub::GetName() { |
| + if (name_ != NULL) return name_; |
| + const int kMaxNameLength = 100; |
| + name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray( |
| + kMaxNameLength); |
| + if (name_ == NULL) return "OOM"; |
| + const char* op_name = Token::Name(op_); |
| + const char* overwrite_name; |
| + switch (mode_) { |
| + case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break; |
| + case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break; |
| + } |
| + |
| + OS::SNPrintF(Vector<char>(name_, kMaxNameLength), |
| + "TypeRecordingUnaryOpStub_%s_%s_%s", |
| + op_name, |
| + overwrite_name, |
| + TRUnaryOpIC::GetName(operand_type_)); |
| + return name_; |
| +} |
| + |
| + |
| +// TODO(svenpanne): Use virtual functions instead of switch. |
| +void TypeRecordingUnaryOpStub::Generate(MacroAssembler* masm) { |
| + switch (operand_type_) { |
| + case TRUnaryOpIC::UNINITIALIZED: |
| + GenerateTypeTransition(masm); |
| + break; |
| + case TRUnaryOpIC::SMI: |
| + GenerateSmiStub(masm); |
| + break; |
| + case TRUnaryOpIC::HEAP_NUMBER: |
| + GenerateHeapNumberStub(masm); |
| + break; |
| + case TRUnaryOpIC::GENERIC: |
| + GenerateGenericStub(masm); |
| + break; |
| + } |
| +} |
| + |
| + |
| +void TypeRecordingUnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { |
| + // Prepare to push argument. |
| + __ mov(r3, Operand(r0)); |
| + |
| + // Push this stub's key. Although the operation and the type info are |
| + // encoded into the key, the encoding is opaque, so push them too. |
| + __ mov(r2, Operand(Smi::FromInt(MinorKey()))); |
| + __ mov(r1, Operand(Smi::FromInt(op_))); |
| + __ mov(r0, Operand(Smi::FromInt(operand_type_))); |
| + |
| + __ Push(r3, r2, r1, r0); |
| + |
| + __ TailCallExternalReference( |
| + ExternalReference(IC_Utility(IC::kTypeRecordingUnaryOp_Patch), |
| + masm->isolate()), |
| + 4, |
| + 1); |
| +} |
| + |
| + |
| +// TODO(svenpanne): Use virtual functions instead of switch. |
| +void TypeRecordingUnaryOpStub::GenerateSmiStub(MacroAssembler* masm) { |
| + switch (op_) { |
| + case Token::SUB: |
| + GenerateSmiStubSub(masm); |
| + break; |
| + case Token::BIT_NOT: |
| + GenerateSmiStubBitNot(masm); |
| + break; |
| + default: |
| + UNREACHABLE(); |
| + } |
| +} |
| + |
| + |
| +void TypeRecordingUnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) { |
| + Label non_smi, slow; |
| + GenerateSmiCodeSub(masm, &non_smi, &slow); |
| + __ bind(&non_smi); |
| + __ bind(&slow); |
| + GenerateTypeTransition(masm); |
| +} |
| + |
| + |
| +void TypeRecordingUnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) { |
| + Label non_smi; |
| + GenerateSmiCodeBitNot(masm, &non_smi); |
| + __ bind(&non_smi); |
| + GenerateTypeTransition(masm); |
| +} |
| + |
| + |
| +void TypeRecordingUnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm, |
| + Label* non_smi, |
| + Label* slow) { |
| + __ JumpIfNotSmi(r0, non_smi); |
| + |
| + // The result of negating zero or the smallest negative smi is not a smi. |
| + __ bic(ip, r0, Operand(0x80000000), SetCC); |
| + __ b(eq, slow); |
| + |
| + // Return '0 - value'. |
| + __ rsb(r0, r0, Operand(0, RelocInfo::NONE)); |
| + __ Ret(); |
| +} |
| + |
| + |
| +void TypeRecordingUnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm, |
| + Label* non_smi) { |
| + __ JumpIfNotSmi(r0, non_smi); |
| + |
| + // Flip bits and revert inverted smi-tag. |
| + __ mvn(r0, Operand(r0)); |
| + __ bic(r0, r0, Operand(kSmiTagMask)); |
| + __ Ret(); |
| +} |
| + |
| + |
| +// TODO(svenpanne): Use virtual functions instead of switch. |
| +void TypeRecordingUnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { |
| + switch (op_) { |
| + case Token::SUB: |
| + GenerateHeapNumberStubSub(masm); |
| + break; |
| + case Token::BIT_NOT: |
| + GenerateHeapNumberStubBitNot(masm); |
| + break; |
| + default: |
| + UNREACHABLE(); |
| + } |
| +} |
| + |
| + |
| +void TypeRecordingUnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) { |
| + Label non_smi, slow; |
| + GenerateSmiCodeSub(masm, &non_smi, &slow); |
| + __ bind(&non_smi); |
| + GenerateHeapNumberCodeSub(masm, &slow); |
| + __ bind(&slow); |
| + GenerateTypeTransition(masm); |
| +} |
| + |
| + |
| +void TypeRecordingUnaryOpStub::GenerateHeapNumberStubBitNot( |
| + MacroAssembler* masm) { |
| + Label non_smi, slow; |
| + GenerateSmiCodeBitNot(masm, &non_smi); |
| + __ bind(&non_smi); |
| + GenerateHeapNumberCodeBitNot(masm, &slow); |
| + __ bind(&slow); |
| + GenerateTypeTransition(masm); |
| +} |
| + |
| + |
| +void TypeRecordingUnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm, |
| + Label* slow) { |
| + Register heap_number_map = r6; |
| + __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); |
| + __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
| + __ cmp(r1, heap_number_map); |
| + __ b(ne, slow); |
| + |
| + // r0 is a heap number. Get a new heap number in r1. |
| + if (mode_ == UNARY_OVERWRITE) { |
| + __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); |
| + __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign. |
| + __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); |
| + } else { |
| + __ AllocateHeapNumber(r1, r2, r3, r6, slow); |
| + __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); |
| + __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); |
| + __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset)); |
| + __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign. |
| + __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset)); |
| + __ mov(r0, Operand(r1)); |
| + } |
| + __ Ret(); |
| +} |
| + |
| + |
| +void TypeRecordingUnaryOpStub::GenerateHeapNumberCodeBitNot( |
| + MacroAssembler* masm, Label* slow) { |
| + Register heap_number_map = r6; |
| + // Check if the operand is a heap number. |
| + __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); |
| + __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
| + __ cmp(r1, heap_number_map); |
| + __ b(ne, slow); |
| + |
| + // Convert the heap number is r0 to an untagged integer in r1. |
| + __ ConvertToInt32(r0, r1, r2, r3, d0, slow); |
| + |
| + // Do the bitwise operation and check if the result fits in a smi. |
| + Label try_float; |
| + __ mvn(r1, Operand(r1)); |
| + __ add(r2, r1, Operand(0x40000000), SetCC); |
| + __ b(mi, &try_float); |
| + |
| + // Tag the result as a smi and we're done. |
| + __ mov(r0, Operand(r1, LSL, kSmiTagSize)); |
| + __ Ret(); |
| + |
| + // Try to store the result in a heap number. |
| + __ bind(&try_float); |
| + if (mode_ == UNARY_NO_OVERWRITE) { |
| + Label slow_allocate_heapnumber, heapnumber_allocated; |
| + __ AllocateHeapNumber(r0, r2, r3, r6, &slow_allocate_heapnumber); |
| + __ jmp(&heapnumber_allocated); |
| + |
| + __ bind(&slow_allocate_heapnumber); |
|
Sven Panne
2011/04/21 16:19:13
Hmmm, this seems to crash mjsunit/bit-not.js with
|
| + __ push(r1); |
| + __ CallRuntime(Runtime::kNumberAlloc, 0); |
| + __ pop(r1); |
| + |
| + __ bind(&heapnumber_allocated); |
| + |
| + } |
| + |
| + if (CpuFeatures::IsSupported(VFP3)) { |
| + // Convert the int32 in r1 to the heap number in r0. r2 is corrupted. |
| + CpuFeatures::Scope scope(VFP3); |
| + __ vmov(s0, r1); |
| + __ vcvt_f64_s32(d0, s0); |
| + __ sub(r2, r0, Operand(kHeapObjectTag)); |
| + __ vstr(d0, r2, HeapNumber::kValueOffset); |
| + __ Ret(); |
| + } else { |
| + // WriteInt32ToHeapNumberStub does not trigger GC, so we do not |
| + // have to set up a frame. |
| + WriteInt32ToHeapNumberStub stub(r1, r0, r2); |
| + __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); |
| + } |
| +} |
| + |
| + |
| +// TODO(svenpanne): Use virtual functions instead of switch. |
| +void TypeRecordingUnaryOpStub::GenerateGenericStub(MacroAssembler* masm) { |
| + switch (op_) { |
| + case Token::SUB: |
| + GenerateGenericStubSub(masm); |
| + break; |
| + case Token::BIT_NOT: |
| + GenerateGenericStubBitNot(masm); |
| + break; |
| + default: |
| + UNREACHABLE(); |
| + } |
| +} |
| + |
| + |
| +void TypeRecordingUnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) { |
| + Label non_smi, slow; |
| + GenerateSmiCodeSub(masm, &non_smi, &slow); |
| + __ bind(&non_smi); |
| + GenerateHeapNumberCodeSub(masm, &slow); |
| + __ bind(&slow); |
| + GenerateGenericCodeFallback(masm); |
| +} |
| + |
| + |
| +void TypeRecordingUnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) { |
| + Label non_smi, slow; |
| + GenerateSmiCodeBitNot(masm, &non_smi); |
| + __ bind(&non_smi); |
| + GenerateHeapNumberCodeBitNot(masm, &slow); |
| + __ bind(&slow); |
| + GenerateGenericCodeFallback(masm); |
| +} |
| + |
| + |
| +void TypeRecordingUnaryOpStub::GenerateGenericCodeFallback( |
| + MacroAssembler* masm) { |
| + // Handle the slow case by jumping to the JavaScript builtin. |
| + __ push(r0); |
| + switch (op_) { |
| + case Token::SUB: |
| + __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_JS); |
| + break; |
| + case Token::BIT_NOT: |
| + __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_JS); |
| + break; |
| + default: |
| + UNREACHABLE(); |
| + } |
| +} |
| + |
| + |
| Handle<Code> GetTypeRecordingBinaryOpStub(int key, |
| TRBinaryOpIC::TypeInfo type_info, |
| TRBinaryOpIC::TypeInfo result_type_info) { |