Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(167)

Side by Side Diff: src/arm/lithium-codegen-arm.cc

Issue 6272019: ARM: Change BranchOnSmi/BranchOnNotSmi to JumpIfSmi/JumpIfNotSmi (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: Created 9 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/arm/ic-arm.cc ('k') | src/arm/macro-assembler-arm.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 1066 matching lines...) Expand 10 before | Expand all | Expand 10 after
1077 // Call the generic stub. The numbers in r0 and r1 have 1077 // Call the generic stub. The numbers in r0 and r1 have
1078 // to be tagged to Smis. If that is not possible, deoptimize. 1078 // to be tagged to Smis. If that is not possible, deoptimize.
1079 DeferredModI* deferred = new DeferredModI(this, instr); 1079 DeferredModI* deferred = new DeferredModI(this, instr);
1080 __ TrySmiTag(left, &deoptimize, scratch); 1080 __ TrySmiTag(left, &deoptimize, scratch);
1081 __ TrySmiTag(right, &deoptimize, scratch); 1081 __ TrySmiTag(right, &deoptimize, scratch);
1082 1082
1083 __ b(al, deferred->entry()); 1083 __ b(al, deferred->entry());
1084 __ bind(deferred->exit()); 1084 __ bind(deferred->exit());
1085 1085
1086 // If the result in r0 is a Smi, untag it, else deoptimize. 1086 // If the result in r0 is a Smi, untag it, else deoptimize.
1087 __ BranchOnNotSmi(result, &deoptimize); 1087 __ JumpIfNotSmi(result, &deoptimize);
1088 __ SmiUntag(result); 1088 __ SmiUntag(result);
1089 1089
1090 __ b(al, &done); 1090 __ b(al, &done);
1091 __ bind(&deoptimize); 1091 __ bind(&deoptimize);
1092 DeoptimizeIf(al, instr->environment()); 1092 DeoptimizeIf(al, instr->environment());
1093 __ bind(&done); 1093 __ bind(&done);
1094 } 1094 }
1095 1095
1096 1096
1097 void LCodeGen::DoDivI(LDivI* instr) { 1097 void LCodeGen::DoDivI(LDivI* instr) {
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after
1157 // to be tagged to Smis. If that is not possible, deoptimize. 1157 // to be tagged to Smis. If that is not possible, deoptimize.
1158 DeferredDivI* deferred = new DeferredDivI(this, instr); 1158 DeferredDivI* deferred = new DeferredDivI(this, instr);
1159 1159
1160 __ TrySmiTag(left, &deoptimize, scratch); 1160 __ TrySmiTag(left, &deoptimize, scratch);
1161 __ TrySmiTag(right, &deoptimize, scratch); 1161 __ TrySmiTag(right, &deoptimize, scratch);
1162 1162
1163 __ b(al, deferred->entry()); 1163 __ b(al, deferred->entry());
1164 __ bind(deferred->exit()); 1164 __ bind(deferred->exit());
1165 1165
1166 // If the result in r0 is a Smi, untag it, else deoptimize. 1166 // If the result in r0 is a Smi, untag it, else deoptimize.
1167 __ BranchOnNotSmi(result, &deoptimize); 1167 __ JumpIfNotSmi(result, &deoptimize);
1168 __ SmiUntag(result); 1168 __ SmiUntag(result);
1169 __ b(&done); 1169 __ b(&done);
1170 1170
1171 __ bind(&deoptimize); 1171 __ bind(&deoptimize);
1172 DeoptimizeIf(al, instr->environment()); 1172 DeoptimizeIf(al, instr->environment());
1173 __ bind(&done); 1173 __ bind(&done);
1174 } 1174 }
1175 1175
1176 1176
1177 template<int T> 1177 template<int T>
(...skipping 549 matching lines...) Expand 10 before | Expand all | Expand 10 after
1727 EmitBranch(true_block, false_block, ne); 1727 EmitBranch(true_block, false_block, ne);
1728 } 1728 }
1729 } 1729 }
1730 1730
1731 1731
1732 Condition LCodeGen::EmitIsObject(Register input, 1732 Condition LCodeGen::EmitIsObject(Register input,
1733 Register temp1, 1733 Register temp1,
1734 Register temp2, 1734 Register temp2,
1735 Label* is_not_object, 1735 Label* is_not_object,
1736 Label* is_object) { 1736 Label* is_object) {
1737 __ BranchOnSmi(input, is_not_object); 1737 __ JumpIfSmi(input, is_not_object);
1738 1738
1739 __ LoadRoot(temp1, Heap::kNullValueRootIndex); 1739 __ LoadRoot(temp1, Heap::kNullValueRootIndex);
1740 __ cmp(input, temp1); 1740 __ cmp(input, temp1);
1741 __ b(eq, is_object); 1741 __ b(eq, is_object);
1742 1742
1743 // Load map. 1743 // Load map.
1744 __ ldr(temp1, FieldMemOperand(input, HeapObject::kMapOffset)); 1744 __ ldr(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
1745 // Undetectable objects behave like undefined. 1745 // Undetectable objects behave like undefined.
1746 __ ldrb(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset)); 1746 __ ldrb(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset));
1747 __ tst(temp2, Operand(1 << Map::kIsUndetectable)); 1747 __ tst(temp2, Operand(1 << Map::kIsUndetectable));
(...skipping 282 matching lines...) Expand 10 before | Expand all | Expand 10 after
2030 2030
2031 Label done, false_result; 2031 Label done, false_result;
2032 Register object = ToRegister(instr->InputAt(0)); 2032 Register object = ToRegister(instr->InputAt(0));
2033 Register temp = ToRegister(instr->TempAt(0)); 2033 Register temp = ToRegister(instr->TempAt(0));
2034 Register result = ToRegister(instr->result()); 2034 Register result = ToRegister(instr->result());
2035 2035
2036 ASSERT(object.is(r0)); 2036 ASSERT(object.is(r0));
2037 ASSERT(result.is(r0)); 2037 ASSERT(result.is(r0));
2038 2038
2039 // A Smi is not instance of anything. 2039 // A Smi is not instance of anything.
2040 __ BranchOnSmi(object, &false_result); 2040 __ JumpIfSmi(object, &false_result);
2041 2041
2042 // This is the inlined call site instanceof cache. The two occurences of the 2042 // This is the inlined call site instanceof cache. The two occurences of the
2043 // hole value will be patched to the last map/result pair generated by the 2043 // hole value will be patched to the last map/result pair generated by the
2044 // instanceof stub. 2044 // instanceof stub.
2045 Label cache_miss; 2045 Label cache_miss;
2046 Register map = temp; 2046 Register map = temp;
2047 __ ldr(map, FieldMemOperand(object, HeapObject::kMapOffset)); 2047 __ ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
2048 __ bind(deferred->map_check()); // Label for calculating code patching. 2048 __ bind(deferred->map_check()); // Label for calculating code patching.
2049 // We use Factory::the_hole_value() on purpose instead of loading from the 2049 // We use Factory::the_hole_value() on purpose instead of loading from the
2050 // root array to force relocation to be able to later patch with 2050 // root array to force relocation to be able to later patch with
(...skipping 555 matching lines...) Expand 10 before | Expand all | Expand 10 after
2606 // __ vabs(input, input); 2606 // __ vabs(input, input);
2607 Abort("Double DoMathAbs unimplemented"); 2607 Abort("Double DoMathAbs unimplemented");
2608 } else if (r.IsInteger32()) { 2608 } else if (r.IsInteger32()) {
2609 EmitIntegerMathAbs(instr); 2609 EmitIntegerMathAbs(instr);
2610 } else { 2610 } else {
2611 // Representation is tagged. 2611 // Representation is tagged.
2612 DeferredMathAbsTaggedHeapNumber* deferred = 2612 DeferredMathAbsTaggedHeapNumber* deferred =
2613 new DeferredMathAbsTaggedHeapNumber(this, instr); 2613 new DeferredMathAbsTaggedHeapNumber(this, instr);
2614 Register input = ToRegister(instr->InputAt(0)); 2614 Register input = ToRegister(instr->InputAt(0));
2615 // Smi check. 2615 // Smi check.
2616 __ BranchOnNotSmi(input, deferred->entry()); 2616 __ JumpIfNotSmi(input, deferred->entry());
2617 // If smi, handle it directly. 2617 // If smi, handle it directly.
2618 EmitIntegerMathAbs(instr); 2618 EmitIntegerMathAbs(instr);
2619 __ bind(deferred->exit()); 2619 __ bind(deferred->exit());
2620 } 2620 }
2621 } 2621 }
2622 2622
2623 2623
2624 void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) { 2624 void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
2625 DoubleRegister input = ToDoubleRegister(instr->InputAt(0)); 2625 DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
2626 Register result = ToRegister(instr->result()); 2626 Register result = ToRegister(instr->result());
(...skipping 1047 matching lines...) Expand 10 before | Expand all | Expand 10 after
3674 3674
3675 3675
3676 void LCodeGen::DoOsrEntry(LOsrEntry* instr) { 3676 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
3677 Abort("DoOsrEntry unimplemented."); 3677 Abort("DoOsrEntry unimplemented.");
3678 } 3678 }
3679 3679
3680 3680
3681 #undef __ 3681 #undef __
3682 3682
3683 } } // namespace v8::internal 3683 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/arm/ic-arm.cc ('k') | src/arm/macro-assembler-arm.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698