| OLD | NEW |
| 1 // Copyright 2010 the V8 project authors. All rights reserved. | 1 // Copyright 2010 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 200 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 211 } else { | 211 } else { |
| 212 PrintF("GenericBinaryOpStub (%s by %d)\n", | 212 PrintF("GenericBinaryOpStub (%s by %d)\n", |
| 213 Token::String(op_), | 213 Token::String(op_), |
| 214 constant_rhs_); | 214 constant_rhs_); |
| 215 } | 215 } |
| 216 } | 216 } |
| 217 #endif | 217 #endif |
| 218 }; | 218 }; |
| 219 | 219 |
| 220 | 220 |
| 221 class TypeRecordingBinaryOpStub: public CodeStub { |
| 222 public: |
| 223 TypeRecordingBinaryOpStub(Token::Value op, OverwriteMode mode) |
| 224 : op_(op), |
| 225 mode_(mode), |
| 226 operands_type_(TRBinaryOpIC::UNINITIALIZED), |
| 227 result_type_(TRBinaryOpIC::UNINITIALIZED), |
| 228 name_(NULL) { |
| 229 use_vfp3_ = CpuFeatures::IsSupported(VFP3); |
| 230 ASSERT(OpBits::is_valid(Token::NUM_TOKENS)); |
| 231 } |
| 232 |
| 233 TypeRecordingBinaryOpStub( |
| 234 int key, |
| 235 TRBinaryOpIC::TypeInfo operands_type, |
| 236 TRBinaryOpIC::TypeInfo result_type = TRBinaryOpIC::UNINITIALIZED) |
| 237 : op_(OpBits::decode(key)), |
| 238 mode_(ModeBits::decode(key)), |
| 239 use_vfp3_(VFP3Bits::decode(key)), |
| 240 operands_type_(operands_type), |
| 241 result_type_(result_type), |
| 242 name_(NULL) { } |
| 243 |
| 244 private: |
| 245 enum SmiCodeGenerateHeapNumberResults { |
| 246 ALLOW_HEAPNUMBER_RESULTS, |
| 247 NO_HEAPNUMBER_RESULTS |
| 248 }; |
| 249 |
| 250 Token::Value op_; |
| 251 OverwriteMode mode_; |
| 252 bool use_vfp3_; |
| 253 |
| 254 // Operand type information determined at runtime. |
| 255 TRBinaryOpIC::TypeInfo operands_type_; |
| 256 TRBinaryOpIC::TypeInfo result_type_; |
| 257 |
| 258 char* name_; |
| 259 |
| 260 const char* GetName(); |
| 261 |
| 262 #ifdef DEBUG |
| 263 void Print() { |
| 264 PrintF("TypeRecordingBinaryOpStub %d (op %s), " |
| 265 "(mode %d, runtime_type_info %s)\n", |
| 266 MinorKey(), |
| 267 Token::String(op_), |
| 268 static_cast<int>(mode_), |
| 269 TRBinaryOpIC::GetName(operands_type_)); |
| 270 } |
| 271 #endif |
| 272 |
| 273 // Minor key encoding in 16 bits RRRTTTVOOOOOOOMM. |
| 274 class ModeBits: public BitField<OverwriteMode, 0, 2> {}; |
| 275 class OpBits: public BitField<Token::Value, 2, 7> {}; |
| 276 class VFP3Bits: public BitField<bool, 9, 1> {}; |
| 277 class OperandTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 10, 3> {}; |
| 278 class ResultTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 13, 3> {}; |
| 279 |
| 280 Major MajorKey() { return TypeRecordingBinaryOp; } |
| 281 int MinorKey() { |
| 282 return OpBits::encode(op_) |
| 283 | ModeBits::encode(mode_) |
| 284 | VFP3Bits::encode(use_vfp3_) |
| 285 | OperandTypeInfoBits::encode(operands_type_) |
| 286 | ResultTypeInfoBits::encode(result_type_); |
| 287 } |
| 288 |
| 289 void Generate(MacroAssembler* masm); |
| 290 void GenerateGeneric(MacroAssembler* masm); |
| 291 void GenerateSmiSmiOperation(MacroAssembler* masm); |
| 292 void GenerateFPOperation(MacroAssembler* masm, |
| 293 bool smi_operands, |
| 294 Label* not_numbers, |
| 295 Label* gc_required); |
| 296 void GenerateSmiCode(MacroAssembler* masm, |
| 297 Label* gc_required, |
| 298 SmiCodeGenerateHeapNumberResults heapnumber_results); |
| 299 void GenerateLoadArguments(MacroAssembler* masm); |
| 300 void GenerateReturn(MacroAssembler* masm); |
| 301 void GenerateUninitializedStub(MacroAssembler* masm); |
| 302 void GenerateSmiStub(MacroAssembler* masm); |
| 303 void GenerateInt32Stub(MacroAssembler* masm); |
| 304 void GenerateHeapNumberStub(MacroAssembler* masm); |
| 305 void GenerateStringStub(MacroAssembler* masm); |
| 306 void GenerateGenericStub(MacroAssembler* masm); |
| 307 void GenerateAddStrings(MacroAssembler* masm); |
| 308 void GenerateCallRuntime(MacroAssembler* masm); |
| 309 |
| 310 void GenerateHeapResultAllocation(MacroAssembler* masm, |
| 311 Register result, |
| 312 Register heap_number_map, |
| 313 Register scratch1, |
| 314 Register scratch2, |
| 315 Label* gc_required); |
| 316 void GenerateRegisterArgsPush(MacroAssembler* masm); |
| 317 void GenerateTypeTransition(MacroAssembler* masm); |
| 318 void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm); |
| 319 |
| 320 virtual int GetCodeKind() { return Code::TYPE_RECORDING_BINARY_OP_IC; } |
| 321 |
| 322 virtual InlineCacheState GetICState() { |
| 323 return TRBinaryOpIC::ToState(operands_type_); |
| 324 } |
| 325 |
| 326 virtual void FinishCode(Code* code) { |
| 327 code->set_type_recording_binary_op_type(operands_type_); |
| 328 code->set_type_recording_binary_op_result_type(result_type_); |
| 329 } |
| 330 |
| 331 friend class CodeGenerator; |
| 332 }; |
| 333 |
| 334 |
| 221 // Flag that indicates how to generate code for the stub StringAddStub. | 335 // Flag that indicates how to generate code for the stub StringAddStub. |
| 222 enum StringAddFlags { | 336 enum StringAddFlags { |
| 223 NO_STRING_ADD_FLAGS = 0, | 337 NO_STRING_ADD_FLAGS = 0, |
| 224 NO_STRING_CHECK_IN_STUB = 1 << 0 // Omit string check in stub. | 338 NO_STRING_CHECK_IN_STUB = 1 << 0 // Omit string check in stub. |
| 225 }; | 339 }; |
| 226 | 340 |
| 227 | 341 |
| 228 class StringAddStub: public CodeStub { | 342 class StringAddStub: public CodeStub { |
| 229 public: | 343 public: |
| 230 explicit StringAddStub(StringAddFlags flags) { | 344 explicit StringAddStub(StringAddFlags flags) { |
| (...skipping 219 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 450 virtual ~RegExpCEntryStub() {} | 564 virtual ~RegExpCEntryStub() {} |
| 451 void Generate(MacroAssembler* masm); | 565 void Generate(MacroAssembler* masm); |
| 452 | 566 |
| 453 private: | 567 private: |
| 454 Major MajorKey() { return RegExpCEntry; } | 568 Major MajorKey() { return RegExpCEntry; } |
| 455 int MinorKey() { return 0; } | 569 int MinorKey() { return 0; } |
| 456 const char* GetName() { return "RegExpCEntryStub"; } | 570 const char* GetName() { return "RegExpCEntryStub"; } |
| 457 }; | 571 }; |
| 458 | 572 |
| 459 | 573 |
| 574 // Trampoline stub to call into native code. To call safely into native code |
| 575 // in the presence of compacting GC (which can move code objects) we need to |
| 576 // keep the code which called into native pinned in the memory. Currently the |
| 577 // simplest approach is to generate such stub early enough so it can never be |
| 578 // moved by GC |
| 579 class DirectCEntryStub: public CodeStub { |
| 580 public: |
| 581 DirectCEntryStub() {} |
| 582 void Generate(MacroAssembler* masm); |
| 583 void GenerateCall(MacroAssembler* masm, ApiFunction *function); |
| 584 void GenerateCall(MacroAssembler* masm, Register target); |
| 585 |
| 586 private: |
| 587 Major MajorKey() { return DirectCEntry; } |
| 588 int MinorKey() { return 0; } |
| 589 const char* GetName() { return "DirectCEntryStub"; } |
| 590 }; |
| 591 |
| 592 |
| 593 // Generate code to load an element from a pixel array. The receiver is assumed |
| 594 // to not be a smi and to have elements, the caller must guarantee this |
| 595 // precondition. If key is not a smi, then the generated code branches to |
| 596 // key_not_smi. Callers can specify NULL for key_not_smi to signal that a smi |
| 597 // check has already been performed on key so that the smi check is not |
| 598 // generated. If key is not a valid index within the bounds of the pixel array, |
| 599 // the generated code jumps to out_of_range. receiver, key and elements are |
| 600 // unchanged throughout the generated code sequence. |
| 601 void GenerateFastPixelArrayLoad(MacroAssembler* masm, |
| 602 Register receiver, |
| 603 Register key, |
| 604 Register elements_map, |
| 605 Register elements, |
| 606 Register scratch1, |
| 607 Register scratch2, |
| 608 Register result, |
| 609 Label* not_pixel_array, |
| 610 Label* key_not_smi, |
| 611 Label* out_of_range); |
| 612 |
| 613 // Generate code to store an element into a pixel array, clamping values between |
| 614 // [0..255]. The receiver is assumed to not be a smi and to have elements, the |
| 615 // caller must guarantee this precondition. If key is not a smi, then the |
| 616 // generated code branches to key_not_smi. Callers can specify NULL for |
| 617 // key_not_smi to signal that a smi check has already been performed on key so |
| 618 // that the smi check is not generated. If value is not a smi, the generated |
| 619 // code will branch to value_not_smi. If the receiver doesn't have pixel array |
| 620 // elements, the generated code will branch to not_pixel_array, unless |
| 621 // not_pixel_array is NULL, in which case the caller must ensure that the |
| 622 // receiver has pixel array elements. If key is not a valid index within the |
| 623 // bounds of the pixel array, the generated code jumps to out_of_range. If |
| 624 // load_elements_from_receiver is true, then the elements of receiver is loaded |
| 625 // into elements, otherwise elements is assumed to already be the receiver's |
| 626 // elements. If load_elements_map_from_elements is true, elements_map is loaded |
| 627 // from elements, otherwise it is assumed to already contain the element map. |
| 628 void GenerateFastPixelArrayStore(MacroAssembler* masm, |
| 629 Register receiver, |
| 630 Register key, |
| 631 Register value, |
| 632 Register elements, |
| 633 Register elements_map, |
| 634 Register scratch1, |
| 635 Register scratch2, |
| 636 bool load_elements_from_receiver, |
| 637 bool load_elements_map_from_elements, |
| 638 Label* key_not_smi, |
| 639 Label* value_not_smi, |
| 640 Label* not_pixel_array, |
| 641 Label* out_of_range); |
| 642 |
| 460 } } // namespace v8::internal | 643 } } // namespace v8::internal |
| 461 | 644 |
| 462 #endif // V8_ARM_CODE_STUBS_ARM_H_ | 645 #endif // V8_ARM_CODE_STUBS_ARM_H_ |
| OLD | NEW |