| Index: src/arm/code-stubs-arm.h
|
| ===================================================================
|
| --- src/arm/code-stubs-arm.h (revision 6800)
|
| +++ src/arm/code-stubs-arm.h (working copy)
|
| @@ -218,6 +218,120 @@
|
| };
|
|
|
|
|
| +class TypeRecordingBinaryOpStub: public CodeStub {
|
| + public:
|
| + TypeRecordingBinaryOpStub(Token::Value op, OverwriteMode mode)
|
| + : op_(op),
|
| + mode_(mode),
|
| + operands_type_(TRBinaryOpIC::UNINITIALIZED),
|
| + result_type_(TRBinaryOpIC::UNINITIALIZED),
|
| + name_(NULL) {
|
| + use_vfp3_ = CpuFeatures::IsSupported(VFP3);
|
| + ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
|
| + }
|
| +
|
| + TypeRecordingBinaryOpStub(
|
| + int key,
|
| + TRBinaryOpIC::TypeInfo operands_type,
|
| + TRBinaryOpIC::TypeInfo result_type = TRBinaryOpIC::UNINITIALIZED)
|
| + : op_(OpBits::decode(key)),
|
| + mode_(ModeBits::decode(key)),
|
| + use_vfp3_(VFP3Bits::decode(key)),
|
| + operands_type_(operands_type),
|
| + result_type_(result_type),
|
| + name_(NULL) { }
|
| +
|
| + private:
|
| + enum SmiCodeGenerateHeapNumberResults {
|
| + ALLOW_HEAPNUMBER_RESULTS,
|
| + NO_HEAPNUMBER_RESULTS
|
| + };
|
| +
|
| + Token::Value op_;
|
| + OverwriteMode mode_;
|
| + bool use_vfp3_;
|
| +
|
| + // Operand type information determined at runtime.
|
| + TRBinaryOpIC::TypeInfo operands_type_;
|
| + TRBinaryOpIC::TypeInfo result_type_;
|
| +
|
| + char* name_;
|
| +
|
| + const char* GetName();
|
| +
|
| +#ifdef DEBUG
|
| + void Print() {
|
| + PrintF("TypeRecordingBinaryOpStub %d (op %s), "
|
| + "(mode %d, runtime_type_info %s)\n",
|
| + MinorKey(),
|
| + Token::String(op_),
|
| + static_cast<int>(mode_),
|
| + TRBinaryOpIC::GetName(operands_type_));
|
| + }
|
| +#endif
|
| +
|
| + // Minor key encoding in 16 bits RRRTTTVOOOOOOOMM.
|
| + class ModeBits: public BitField<OverwriteMode, 0, 2> {};
|
| + class OpBits: public BitField<Token::Value, 2, 7> {};
|
| + class VFP3Bits: public BitField<bool, 9, 1> {};
|
| + class OperandTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 10, 3> {};
|
| + class ResultTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 13, 3> {};
|
| +
|
| + Major MajorKey() { return TypeRecordingBinaryOp; }
|
| + int MinorKey() {
|
| + return OpBits::encode(op_)
|
| + | ModeBits::encode(mode_)
|
| + | VFP3Bits::encode(use_vfp3_)
|
| + | OperandTypeInfoBits::encode(operands_type_)
|
| + | ResultTypeInfoBits::encode(result_type_);
|
| + }
|
| +
|
| + void Generate(MacroAssembler* masm);
|
| + void GenerateGeneric(MacroAssembler* masm);
|
| + void GenerateSmiSmiOperation(MacroAssembler* masm);
|
| + void GenerateFPOperation(MacroAssembler* masm,
|
| + bool smi_operands,
|
| + Label* not_numbers,
|
| + Label* gc_required);
|
| + void GenerateSmiCode(MacroAssembler* masm,
|
| + Label* gc_required,
|
| + SmiCodeGenerateHeapNumberResults heapnumber_results);
|
| + void GenerateLoadArguments(MacroAssembler* masm);
|
| + void GenerateReturn(MacroAssembler* masm);
|
| + void GenerateUninitializedStub(MacroAssembler* masm);
|
| + void GenerateSmiStub(MacroAssembler* masm);
|
| + void GenerateInt32Stub(MacroAssembler* masm);
|
| + void GenerateHeapNumberStub(MacroAssembler* masm);
|
| + void GenerateStringStub(MacroAssembler* masm);
|
| + void GenerateGenericStub(MacroAssembler* masm);
|
| + void GenerateAddStrings(MacroAssembler* masm);
|
| + void GenerateCallRuntime(MacroAssembler* masm);
|
| +
|
| + void GenerateHeapResultAllocation(MacroAssembler* masm,
|
| + Register result,
|
| + Register heap_number_map,
|
| + Register scratch1,
|
| + Register scratch2,
|
| + Label* gc_required);
|
| + void GenerateRegisterArgsPush(MacroAssembler* masm);
|
| + void GenerateTypeTransition(MacroAssembler* masm);
|
| + void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
|
| +
|
| + virtual int GetCodeKind() { return Code::TYPE_RECORDING_BINARY_OP_IC; }
|
| +
|
| + virtual InlineCacheState GetICState() {
|
| + return TRBinaryOpIC::ToState(operands_type_);
|
| + }
|
| +
|
| + virtual void FinishCode(Code* code) {
|
| + code->set_type_recording_binary_op_type(operands_type_);
|
| + code->set_type_recording_binary_op_result_type(result_type_);
|
| + }
|
| +
|
| + friend class CodeGenerator;
|
| +};
|
| +
|
| +
|
| // Flag that indicates how to generate code for the stub StringAddStub.
|
| enum StringAddFlags {
|
| NO_STRING_ADD_FLAGS = 0,
|
| @@ -457,6 +571,75 @@
|
| };
|
|
|
|
|
| +// Trampoline stub to call into native code. To call safely into native code
|
| +// in the presence of compacting GC (which can move code objects) we need to
|
| +// keep the code which called into native pinned in the memory. Currently the
|
| +// simplest approach is to generate such stub early enough so it can never be
|
| +// moved by GC
|
| +class DirectCEntryStub: public CodeStub {
|
| + public:
|
| + DirectCEntryStub() {}
|
| + void Generate(MacroAssembler* masm);
|
| + void GenerateCall(MacroAssembler* masm, ApiFunction *function);
|
| + void GenerateCall(MacroAssembler* masm, Register target);
|
| +
|
| + private:
|
| + Major MajorKey() { return DirectCEntry; }
|
| + int MinorKey() { return 0; }
|
| + const char* GetName() { return "DirectCEntryStub"; }
|
| +};
|
| +
|
| +
|
| +// Generate code to load an element from a pixel array. The receiver is assumed
|
| +// to not be a smi and to have elements, the caller must guarantee this
|
| +// precondition. If key is not a smi, then the generated code branches to
|
| +// key_not_smi. Callers can specify NULL for key_not_smi to signal that a smi
|
| +// check has already been performed on key so that the smi check is not
|
| +// generated. If key is not a valid index within the bounds of the pixel array,
|
| +// the generated code jumps to out_of_range. receiver, key and elements are
|
| +// unchanged throughout the generated code sequence.
|
| +void GenerateFastPixelArrayLoad(MacroAssembler* masm,
|
| + Register receiver,
|
| + Register key,
|
| + Register elements_map,
|
| + Register elements,
|
| + Register scratch1,
|
| + Register scratch2,
|
| + Register result,
|
| + Label* not_pixel_array,
|
| + Label* key_not_smi,
|
| + Label* out_of_range);
|
| +
|
| +// Generate code to store an element into a pixel array, clamping values between
|
| +// [0..255]. The receiver is assumed to not be a smi and to have elements, the
|
| +// caller must guarantee this precondition. If key is not a smi, then the
|
| +// generated code branches to key_not_smi. Callers can specify NULL for
|
| +// key_not_smi to signal that a smi check has already been performed on key so
|
| +// that the smi check is not generated. If value is not a smi, the generated
|
| +// code will branch to value_not_smi. If the receiver doesn't have pixel array
|
| +// elements, the generated code will branch to not_pixel_array, unless
|
| +// not_pixel_array is NULL, in which case the caller must ensure that the
|
| +// receiver has pixel array elements. If key is not a valid index within the
|
| +// bounds of the pixel array, the generated code jumps to out_of_range. If
|
| +// load_elements_from_receiver is true, then the elements of receiver is loaded
|
| +// into elements, otherwise elements is assumed to already be the receiver's
|
| +// elements. If load_elements_map_from_elements is true, elements_map is loaded
|
| +// from elements, otherwise it is assumed to already contain the element map.
|
| +void GenerateFastPixelArrayStore(MacroAssembler* masm,
|
| + Register receiver,
|
| + Register key,
|
| + Register value,
|
| + Register elements,
|
| + Register elements_map,
|
| + Register scratch1,
|
| + Register scratch2,
|
| + bool load_elements_from_receiver,
|
| + bool load_elements_map_from_elements,
|
| + Label* key_not_smi,
|
| + Label* value_not_smi,
|
| + Label* not_pixel_array,
|
| + Label* out_of_range);
|
| +
|
| } } // namespace v8::internal
|
|
|
| #endif // V8_ARM_CODE_STUBS_ARM_H_
|
|
|