Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(66)

Side by Side Diff: src/sh4/code-stubs-sh4.h

Issue 11275184: First draft of the sh4 port Base URL: http://github.com/v8/v8.git@master
Patch Set: Use GYP and fixe some typos Created 8 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/sh4/checks-sh4.h ('k') | src/sh4/code-stubs-sh4.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2011-2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution. 11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its 12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived 13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission. 14 // from this software without specific prior written permission.
15 // 15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 27
28 #ifndef V8_ARM_CODE_STUBS_ARM_H_ 28 #ifndef V8_SH4_CODE_STUBS_SH4_H_
29 #define V8_ARM_CODE_STUBS_ARM_H_ 29 #define V8_SH4_CODE_STUBS_SH4_H_
30 30
31 #include "ic-inl.h" 31 #include "ic-inl.h"
32 32
33 namespace v8 { 33 namespace v8 {
34 namespace internal { 34 namespace internal {
35 35
36 36
37 // Compute a transcendental math function natively, or call the 37 // Compute a transcendental math function natively, or call the
38 // TranscendentalCache runtime function. 38 // TranscendentalCache runtime function.
39 class TranscendentalCacheStub: public CodeStub { 39 class TranscendentalCacheStub: public CodeStub {
(...skipping 102 matching lines...) Expand 10 before | Expand all | Expand 10 after
142 }; 142 };
143 143
144 144
145 class BinaryOpStub: public CodeStub { 145 class BinaryOpStub: public CodeStub {
146 public: 146 public:
147 BinaryOpStub(Token::Value op, OverwriteMode mode) 147 BinaryOpStub(Token::Value op, OverwriteMode mode)
148 : op_(op), 148 : op_(op),
149 mode_(mode), 149 mode_(mode),
150 operands_type_(BinaryOpIC::UNINITIALIZED), 150 operands_type_(BinaryOpIC::UNINITIALIZED),
151 result_type_(BinaryOpIC::UNINITIALIZED) { 151 result_type_(BinaryOpIC::UNINITIALIZED) {
152 use_vfp2_ = CpuFeatures::IsSupported(VFP2); 152 use_fpu_ = CpuFeatures::IsSupported(FPU);
153 ASSERT(OpBits::is_valid(Token::NUM_TOKENS)); 153 ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
154 } 154 }
155 155
156 BinaryOpStub( 156 BinaryOpStub(
157 int key, 157 int key,
158 BinaryOpIC::TypeInfo operands_type, 158 BinaryOpIC::TypeInfo operands_type,
159 BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED) 159 BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED)
160 : op_(OpBits::decode(key)), 160 : op_(OpBits::decode(key)),
161 mode_(ModeBits::decode(key)), 161 mode_(ModeBits::decode(key)),
162 use_vfp2_(VFP2Bits::decode(key)), 162 use_fpu_(FPUBits::decode(key)),
163 operands_type_(operands_type), 163 operands_type_(operands_type),
164 result_type_(result_type) { } 164 result_type_(result_type) { }
165 165
166 private: 166 private:
167 enum SmiCodeGenerateHeapNumberResults { 167 enum SmiCodeGenerateHeapNumberResults {
168 ALLOW_HEAPNUMBER_RESULTS, 168 ALLOW_HEAPNUMBER_RESULTS,
169 NO_HEAPNUMBER_RESULTS 169 NO_HEAPNUMBER_RESULTS
170 }; 170 };
171 171
172 Token::Value op_; 172 Token::Value op_;
173 OverwriteMode mode_; 173 OverwriteMode mode_;
174 bool use_vfp2_; 174 bool use_fpu_;
175 175
176 // Operand type information determined at runtime. 176 // Operand type information determined at runtime.
177 BinaryOpIC::TypeInfo operands_type_; 177 BinaryOpIC::TypeInfo operands_type_;
178 BinaryOpIC::TypeInfo result_type_; 178 BinaryOpIC::TypeInfo result_type_;
179 179
180 virtual void PrintName(StringStream* stream); 180 virtual void PrintName(StringStream* stream);
181 181
182 // Minor key encoding in 16 bits RRRTTTVOOOOOOOMM. 182 // Minor key encoding in 16 bits RRRTTTVOOOOOOOMM.
183 class ModeBits: public BitField<OverwriteMode, 0, 2> {}; 183 class ModeBits: public BitField<OverwriteMode, 0, 2> {};
184 class OpBits: public BitField<Token::Value, 2, 7> {}; 184 class OpBits: public BitField<Token::Value, 2, 7> {};
185 class VFP2Bits: public BitField<bool, 9, 1> {}; 185 class FPUBits: public BitField<bool, 9, 1> {};
186 class OperandTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 10, 3> {}; 186 class OperandTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 10, 3> {};
187 class ResultTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 13, 3> {}; 187 class ResultTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 13, 3> {};
188 188
189 Major MajorKey() { return BinaryOp; } 189 Major MajorKey() { return BinaryOp; }
190 int MinorKey() { 190 int MinorKey() {
191 return OpBits::encode(op_) 191 return OpBits::encode(op_)
192 | ModeBits::encode(mode_) 192 | ModeBits::encode(mode_)
193 | VFP2Bits::encode(use_vfp2_) 193 | FPUBits::encode(use_fpu_)
194 | OperandTypeInfoBits::encode(operands_type_) 194 | OperandTypeInfoBits::encode(operands_type_)
195 | ResultTypeInfoBits::encode(result_type_); 195 | ResultTypeInfoBits::encode(result_type_);
196 } 196 }
197 197
198 void Generate(MacroAssembler* masm); 198 void Generate(MacroAssembler* masm);
199 void GenerateGeneric(MacroAssembler* masm); 199 void GenerateGeneric(MacroAssembler* masm);
200 void GenerateSmiSmiOperation(MacroAssembler* masm); 200 void GenerateSmiSmiOperation(MacroAssembler* masm);
201 void GenerateFPOperation(MacroAssembler* masm, 201 void GenerateFPOperation(MacroAssembler* masm,
202 bool smi_operands, 202 bool smi_operands,
203 Label* not_numbers, 203 Label* not_numbers,
(...skipping 83 matching lines...) Expand 10 before | Expand all | Expand 10 after
287 Register scratch1, 287 Register scratch1,
288 Register scratch2, 288 Register scratch2,
289 Register scratch3, 289 Register scratch3,
290 Register scratch4, 290 Register scratch4,
291 Register scratch5, 291 Register scratch5,
292 Label* not_found); 292 Label* not_found);
293 293
294 // Generate string hash. 294 // Generate string hash.
295 static void GenerateHashInit(MacroAssembler* masm, 295 static void GenerateHashInit(MacroAssembler* masm,
296 Register hash, 296 Register hash,
297 Register character); 297 Register character,
298 Register scratch);
298 299
299 static void GenerateHashAddCharacter(MacroAssembler* masm, 300 static void GenerateHashAddCharacter(MacroAssembler* masm,
300 Register hash, 301 Register hash,
301 Register character); 302 Register character,
303 Register scratch);
302 304
303 static void GenerateHashGetHash(MacroAssembler* masm, 305 static void GenerateHashGetHash(MacroAssembler* masm,
304 Register hash); 306 Register hash,
307 Register scratch);
305 308
306 private: 309 private:
307 DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper); 310 DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
308 }; 311 };
309 312
310 313
311 // Flag that indicates how to generate code for the stub StringAddStub. 314 // Flag that indicates how to generate code for the stub StringAddStub.
312 enum StringAddFlags { 315 enum StringAddFlags {
313 NO_STRING_ADD_FLAGS = 0, 316 NO_STRING_ADD_FLAGS = 0,
314 // Omit left string check in stub (left is definitely a string). 317 // Omit left string check in stub (left is definitely a string).
(...skipping 163 matching lines...) Expand 10 before | Expand all | Expand 10 after
478 STORE_BUFFER_ONLY, 481 STORE_BUFFER_ONLY,
479 INCREMENTAL, 482 INCREMENTAL,
480 INCREMENTAL_COMPACTION 483 INCREMENTAL_COMPACTION
481 }; 484 };
482 485
483 virtual bool IsPregenerated(); 486 virtual bool IsPregenerated();
484 static void GenerateFixedRegStubsAheadOfTime(); 487 static void GenerateFixedRegStubsAheadOfTime();
485 virtual bool SometimesSetsUpAFrame() { return false; } 488 virtual bool SometimesSetsUpAFrame() { return false; }
486 489
487 static void PatchBranchIntoNop(MacroAssembler* masm, int pos) { 490 static void PatchBranchIntoNop(MacroAssembler* masm, int pos) {
488 masm->instr_at_put(pos, (masm->instr_at(pos) & ~B27) | (B24 | B20)); 491 UNIMPLEMENTED();
489 ASSERT(Assembler::IsTstImmediate(masm->instr_at(pos)));
490 } 492 }
491 493
492 static void PatchNopIntoBranch(MacroAssembler* masm, int pos) { 494 static void PatchNopIntoBranch(MacroAssembler* masm, int pos) {
493 masm->instr_at_put(pos, (masm->instr_at(pos) & ~(B24 | B20)) | B27); 495 UNIMPLEMENTED();
494 ASSERT(Assembler::IsBranch(masm->instr_at(pos)));
495 } 496 }
496 497
497 static Mode GetMode(Code* stub) { 498 static Mode GetMode(Code* stub) {
498 Instr first_instruction = Assembler::instr_at(stub->instruction_start()); 499 // TODO(STM): UNIMPLEMENTED
499 Instr second_instruction = Assembler::instr_at(stub->instruction_start() +
500 Assembler::kInstrSize);
501
502 if (Assembler::IsBranch(first_instruction)) {
503 return INCREMENTAL;
504 }
505
506 ASSERT(Assembler::IsTstImmediate(first_instruction));
507
508 if (Assembler::IsBranch(second_instruction)) {
509 return INCREMENTAL_COMPACTION;
510 }
511
512 ASSERT(Assembler::IsTstImmediate(second_instruction));
513
514 return STORE_BUFFER_ONLY; 500 return STORE_BUFFER_ONLY;
515 } 501 }
516 502
517 static void Patch(Code* stub, Mode mode) { 503 static void Patch(Code* stub, Mode mode) {
518 MacroAssembler masm(NULL, 504 UNIMPLEMENTED();
519 stub->instruction_start(),
520 stub->instruction_size());
521 switch (mode) {
522 case STORE_BUFFER_ONLY:
523 ASSERT(GetMode(stub) == INCREMENTAL ||
524 GetMode(stub) == INCREMENTAL_COMPACTION);
525 PatchBranchIntoNop(&masm, 0);
526 PatchBranchIntoNop(&masm, Assembler::kInstrSize);
527 break;
528 case INCREMENTAL:
529 ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
530 PatchNopIntoBranch(&masm, 0);
531 break;
532 case INCREMENTAL_COMPACTION:
533 ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
534 PatchNopIntoBranch(&masm, Assembler::kInstrSize);
535 break;
536 }
537 ASSERT(GetMode(stub) == mode);
538 CPU::FlushICache(stub->instruction_start(), 2 * Assembler::kInstrSize);
539 } 505 }
540 506
541 private: 507 private:
542 // This is a helper class for freeing up 3 scratch registers. The input is 508 // This is a helper class for freeing up 3 scratch registers. The input is
543 // two registers that must be preserved and one scratch register provided by 509 // two registers that must be preserved and one scratch register provided by
544 // the caller. 510 // the caller.
545 class RegisterAllocation { 511 class RegisterAllocation {
546 public: 512 public:
547 RegisterAllocation(Register object, 513 RegisterAllocation(Register object,
548 Register address, 514 Register address,
(...skipping 13 matching lines...) Expand all
562 } 528 }
563 529
564 void Restore(MacroAssembler* masm) { 530 void Restore(MacroAssembler* masm) {
565 masm->pop(scratch1_); 531 masm->pop(scratch1_);
566 } 532 }
567 533
568 // If we have to call into C then we need to save and restore all caller- 534 // If we have to call into C then we need to save and restore all caller-
569 // saved registers that were not already preserved. The scratch registers 535 // saved registers that were not already preserved. The scratch registers
570 // will be restored by other means so we don't bother pushing them here. 536 // will be restored by other means so we don't bother pushing them here.
571 void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) { 537 void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
572 masm->stm(db_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit()); 538 masm->pushm((kJSCallerSaved | pr.bit()) & ~scratch1_.bit());
573 if (mode == kSaveFPRegs) { 539 if (mode == kSaveFPRegs) {
574 CpuFeatures::Scope scope(VFP2); 540 UNIMPLEMENTED();
575 masm->sub(sp,
576 sp,
577 Operand(kDoubleSize * (DwVfpRegister::kNumRegisters - 1)));
578 // Save all VFP registers except d0.
579 for (int i = DwVfpRegister::kNumRegisters - 1; i > 0; i--) {
580 DwVfpRegister reg = DwVfpRegister::from_code(i);
581 masm->vstr(reg, MemOperand(sp, (i - 1) * kDoubleSize));
582 }
583 } 541 }
584 } 542 }
585 543
586 inline void RestoreCallerSaveRegisters(MacroAssembler*masm, 544 inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
587 SaveFPRegsMode mode) { 545 SaveFPRegsMode mode) {
588 if (mode == kSaveFPRegs) { 546 if (mode == kSaveFPRegs) {
589 CpuFeatures::Scope scope(VFP2); 547 UNIMPLEMENTED();
590 // Restore all VFP registers except d0.
591 for (int i = DwVfpRegister::kNumRegisters - 1; i > 0; i--) {
592 DwVfpRegister reg = DwVfpRegister::from_code(i);
593 masm->vldr(reg, MemOperand(sp, (i - 1) * kDoubleSize));
594 }
595 masm->add(sp,
596 sp,
597 Operand(kDoubleSize * (DwVfpRegister::kNumRegisters - 1)));
598 } 548 }
599 masm->ldm(ia_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit()); 549 masm->popm((kJSCallerSaved | pr.bit()) & ~scratch1_.bit());
600 } 550 }
601 551
602 inline Register object() { return object_; } 552 inline Register object() { return object_; }
603 inline Register address() { return address_; } 553 inline Register address() { return address_; }
604 inline Register scratch0() { return scratch0_; } 554 inline Register scratch0() { return scratch0_; }
605 inline Register scratch1() { return scratch1_; } 555 inline Register scratch1() { return scratch1_; }
606 556
607 private: 557 private:
608 Register object_; 558 Register object_;
609 Register address_; 559 Register address_;
(...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after
686 }; 636 };
687 637
688 638
689 // Trampoline stub to call into native code. To call safely into native code 639 // Trampoline stub to call into native code. To call safely into native code
690 // in the presence of compacting GC (which can move code objects) we need to 640 // in the presence of compacting GC (which can move code objects) we need to
691 // keep the code which called into native pinned in the memory. Currently the 641 // keep the code which called into native pinned in the memory. Currently the
692 // simplest approach is to generate such stub early enough so it can never be 642 // simplest approach is to generate such stub early enough so it can never be
693 // moved by GC 643 // moved by GC
694 class DirectCEntryStub: public CodeStub { 644 class DirectCEntryStub: public CodeStub {
695 public: 645 public:
696 DirectCEntryStub() {} 646 explicit DirectCEntryStub(Register scratch) : scratch_(scratch) {}
697 void Generate(MacroAssembler* masm); 647 void Generate(MacroAssembler* masm);
698 void GenerateCall(MacroAssembler* masm, ExternalReference function); 648 void GenerateCall(MacroAssembler* masm, ExternalReference function,
699 void GenerateCall(MacroAssembler* masm, Register target); 649 Register scratch1, Register scratch2);
650 void GenerateCall(MacroAssembler* masm, Register target, Register scratch1);
700 651
701 private: 652 private:
702 Major MajorKey() { return DirectCEntry; } 653 Major MajorKey() { return DirectCEntry; }
703 int MinorKey() { return 0; } 654 int MinorKey() { return 0; }
655 Register scratch_;
704 656
705 bool NeedsImmovableCode() { return true; } 657 bool NeedsImmovableCode() { return true; }
706 }; 658 };
707 659
708 660
709 class FloatingPointHelper : public AllStatic { 661 class FloatingPointHelper : public AllStatic {
710 public: 662 public:
711 enum Destination { 663 enum Destination {
712 kVFPRegisters, 664 kVFPRegisters,
713 kCoreRegisters 665 kCoreRegisters
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after
766 718
767 // Load the number from object into double_dst in the double format. 719 // Load the number from object into double_dst in the double format.
768 // Control will jump to not_int32 if the value cannot be exactly represented 720 // Control will jump to not_int32 if the value cannot be exactly represented
769 // by a 32-bit integer. 721 // by a 32-bit integer.
770 // Floating point value in the 32-bit integer range that are not exact integer 722 // Floating point value in the 32-bit integer range that are not exact integer
771 // won't be loaded. 723 // won't be loaded.
772 static void LoadNumberAsInt32Double(MacroAssembler* masm, 724 static void LoadNumberAsInt32Double(MacroAssembler* masm,
773 Register object, 725 Register object,
774 Destination destination, 726 Destination destination,
775 DwVfpRegister double_dst, 727 DwVfpRegister double_dst,
776 DwVfpRegister double_scratch,
777 Register dst1, 728 Register dst1,
778 Register dst2, 729 Register dst2,
779 Register heap_number_map, 730 Register heap_number_map,
780 Register scratch1, 731 Register scratch1,
781 Register scratch2, 732 Register scratch2,
782 SwVfpRegister single_scratch, 733 SwVfpRegister single_scratch,
783 Label* not_int32); 734 Label* not_int32);
784 735
785 // Loads the number from object into dst as a 32-bit integer. 736 // Loads the number from object into dst as a 32-bit integer.
786 // Control will jump to not_int32 if the object cannot be exactly represented 737 // Control will jump to not_int32 if the object cannot be exactly represented
787 // by a 32-bit integer. 738 // by a 32-bit integer.
788 // Floating point value in the 32-bit integer range that are not exact integer 739 // Floating point value in the 32-bit integer range that are not exact integer
789 // won't be converted. 740 // won't be converted.
790 // scratch3 is not used when VFP3 is supported. 741 // scratch3 is not used when VFP3 is supported.
791 static void LoadNumberAsInt32(MacroAssembler* masm, 742 static void LoadNumberAsInt32(MacroAssembler* masm,
792 Register object, 743 Register object,
793 Register dst, 744 Register dst,
794 Register heap_number_map, 745 Register heap_number_map,
795 Register scratch1, 746 Register scratch1,
796 Register scratch2, 747 Register scratch2,
797 Register scratch3, 748 Register scratch3,
798 DwVfpRegister double_scratch0, 749 DwVfpRegister double_scratch,
799 DwVfpRegister double_scratch1,
800 Label* not_int32); 750 Label* not_int32);
801 751
802 // Generate non VFP3 code to check if a double can be exactly represented by a 752 // Generate non VFP3 code to check if a double can be exactly represented by a
803 // 32-bit integer. This does not check for 0 or -0, which need 753 // 32-bit integer. This does not check for 0 or -0, which need
804 // to be checked for separately. 754 // to be checked for separately.
805 // Control jumps to not_int32 if the value is not a 32-bit integer, and falls 755 // Control jumps to not_int32 if the value is not a 32-bit integer, and falls
806 // through otherwise. 756 // through otherwise.
807 // src1 and src2 will be cloberred. 757 // src1 and src2 will be cloberred.
808 // 758 //
809 // Expected input: 759 // Expected input:
(...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after
895 } 845 }
896 846
897 class LookupModeBits: public BitField<LookupMode, 0, 1> {}; 847 class LookupModeBits: public BitField<LookupMode, 0, 1> {};
898 848
899 LookupMode mode_; 849 LookupMode mode_;
900 }; 850 };
901 851
902 852
903 } } // namespace v8::internal 853 } } // namespace v8::internal
904 854
905 #endif // V8_ARM_CODE_STUBS_ARM_H_ 855 #endif // V8_SH4_CODE_STUBS_SH4_H_
OLDNEW
« no previous file with comments | « src/sh4/checks-sh4.h ('k') | src/sh4/code-stubs-sh4.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698