Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(761)

Side by Side Diff: src/ppc/lithium-codegen-ppc.cc

Issue 901083004: Contribution of PowerPC port (continuation of 422063005) - PPC dir update (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Contribution of PowerPC port (continuation of 422063005) - PPC dir update -comments and rebase Created 5 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/ppc/lithium-codegen-ppc.h ('k') | src/ppc/lithium-ppc.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2014 the V8 project authors. All rights reserved. 1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/v8.h" 5 #include "src/v8.h"
6 6
7 #include "src/base/bits.h" 7 #include "src/base/bits.h"
8 #include "src/code-factory.h" 8 #include "src/code-factory.h"
9 #include "src/code-stubs.h" 9 #include "src/code-stubs.h"
10 #include "src/hydrogen-osr.h" 10 #include "src/hydrogen-osr.h"
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
51 51
52 return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() && 52 return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
53 GenerateJumpTable() && GenerateSafepointTable(); 53 GenerateJumpTable() && GenerateSafepointTable();
54 } 54 }
55 55
56 56
57 void LCodeGen::FinishCode(Handle<Code> code) { 57 void LCodeGen::FinishCode(Handle<Code> code) {
58 DCHECK(is_done()); 58 DCHECK(is_done());
59 code->set_stack_slots(GetStackSlotCount()); 59 code->set_stack_slots(GetStackSlotCount());
60 code->set_safepoint_table_offset(safepoints_.GetCodeOffset()); 60 code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
61 if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
62 PopulateDeoptimizationData(code); 61 PopulateDeoptimizationData(code);
63 } 62 }
64 63
65 64
66 void LCodeGen::SaveCallerDoubles() { 65 void LCodeGen::SaveCallerDoubles() {
67 DCHECK(info()->saves_caller_doubles()); 66 DCHECK(info()->saves_caller_doubles());
68 DCHECK(NeedsEagerFrame()); 67 DCHECK(NeedsEagerFrame());
69 Comment(";;; Save clobbered callee double registers"); 68 Comment(";;; Save clobbered callee double registers");
70 int count = 0; 69 int count = 0;
71 BitVector* doubles = chunk()->allocated_double_registers(); 70 BitVector* doubles = chunk()->allocated_double_registers();
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after
111 // r4: Callee's JS function. 110 // r4: Callee's JS function.
112 // cp: Callee's context. 111 // cp: Callee's context.
113 // pp: Callee's constant pool pointer (if FLAG_enable_ool_constant_pool) 112 // pp: Callee's constant pool pointer (if FLAG_enable_ool_constant_pool)
114 // fp: Caller's frame pointer. 113 // fp: Caller's frame pointer.
115 // lr: Caller's pc. 114 // lr: Caller's pc.
116 // ip: Our own function entry (required by the prologue) 115 // ip: Our own function entry (required by the prologue)
117 116
118 // Sloppy mode functions and builtins need to replace the receiver with the 117 // Sloppy mode functions and builtins need to replace the receiver with the
119 // global proxy when called as functions (without an explicit receiver 118 // global proxy when called as functions (without an explicit receiver
120 // object). 119 // object).
121 if (info_->this_has_uses() && info_->strict_mode() == SLOPPY && 120 if (info_->this_has_uses() && is_sloppy(info_->language_mode()) &&
122 !info_->is_native()) { 121 !info_->is_native()) {
123 Label ok; 122 Label ok;
124 int receiver_offset = info_->scope()->num_parameters() * kPointerSize; 123 int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
125 __ LoadP(r5, MemOperand(sp, receiver_offset)); 124 __ LoadP(r5, MemOperand(sp, receiver_offset));
126 __ CompareRoot(r5, Heap::kUndefinedValueRootIndex); 125 __ CompareRoot(r5, Heap::kUndefinedValueRootIndex);
127 __ bne(&ok); 126 __ bne(&ok);
128 127
129 __ LoadP(r5, GlobalObjectOperand()); 128 __ LoadP(r5, GlobalObjectOperand());
130 __ LoadP(r5, FieldMemOperand(r5, GlobalObject::kGlobalProxyOffset)); 129 __ LoadP(r5, FieldMemOperand(r5, GlobalObject::kGlobalProxyOffset));
131 130
(...skipping 633 matching lines...) Expand 10 before | Expand all | Expand 10 after
765 int deoptimization_index = deoptimizations_.length(); 764 int deoptimization_index = deoptimizations_.length();
766 int pc_offset = masm()->pc_offset(); 765 int pc_offset = masm()->pc_offset();
767 environment->Register(deoptimization_index, translation.index(), 766 environment->Register(deoptimization_index, translation.index(),
768 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); 767 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
769 deoptimizations_.Add(environment, zone()); 768 deoptimizations_.Add(environment, zone());
770 } 769 }
771 } 770 }
772 771
773 772
774 void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr, 773 void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
775 const char* detail, 774 Deoptimizer::DeoptReason deopt_reason,
776 Deoptimizer::BailoutType bailout_type, 775 Deoptimizer::BailoutType bailout_type,
777 CRegister cr) { 776 CRegister cr) {
778 LEnvironment* environment = instr->environment(); 777 LEnvironment* environment = instr->environment();
779 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); 778 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
780 DCHECK(environment->HasBeenRegistered()); 779 DCHECK(environment->HasBeenRegistered());
781 int id = environment->deoptimization_index(); 780 int id = environment->deoptimization_index();
782 DCHECK(info()->IsOptimizing() || info()->IsStub()); 781 DCHECK(info()->IsOptimizing() || info()->IsStub());
783 Address entry = 782 Address entry =
784 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); 783 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
785 if (entry == NULL) { 784 if (entry == NULL) {
(...skipping 21 matching lines...) Expand all
807 __ bind(&no_deopt); 806 __ bind(&no_deopt);
808 __ stw(r4, MemOperand(scratch)); 807 __ stw(r4, MemOperand(scratch));
809 __ Pop(r4, scratch); 808 __ Pop(r4, scratch);
810 } 809 }
811 810
812 if (info()->ShouldTrapOnDeopt()) { 811 if (info()->ShouldTrapOnDeopt()) {
813 __ stop("trap_on_deopt", cond, kDefaultStopCode, cr); 812 __ stop("trap_on_deopt", cond, kDefaultStopCode, cr);
814 } 813 }
815 814
816 Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(), 815 Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
817 instr->Mnemonic(), detail); 816 instr->Mnemonic(), deopt_reason);
818 DCHECK(info()->IsStub() || frame_is_built_); 817 DCHECK(info()->IsStub() || frame_is_built_);
819 // Go through jump table if we need to handle condition, build frame, or 818 // Go through jump table if we need to handle condition, build frame, or
820 // restore caller doubles. 819 // restore caller doubles.
821 if (cond == al && frame_is_built_ && !info()->saves_caller_doubles()) { 820 if (cond == al && frame_is_built_ && !info()->saves_caller_doubles()) {
822 DeoptComment(reason); 821 DeoptComment(reason);
823 __ Call(entry, RelocInfo::RUNTIME_ENTRY); 822 __ Call(entry, RelocInfo::RUNTIME_ENTRY);
824 } else { 823 } else {
825 Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type, 824 Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type,
826 !frame_is_built_); 825 !frame_is_built_);
827 // We often have several deopts to the same entry, reuse the last 826 // We often have several deopts to the same entry, reuse the last
828 // jump entry if this is the case. 827 // jump entry if this is the case.
829 if (jump_table_.is_empty() || 828 if (jump_table_.is_empty() ||
830 !table_entry.IsEquivalentTo(jump_table_.last())) { 829 !table_entry.IsEquivalentTo(jump_table_.last())) {
831 jump_table_.Add(table_entry, zone()); 830 jump_table_.Add(table_entry, zone());
832 } 831 }
833 __ b(cond, &jump_table_.last().label, cr); 832 __ b(cond, &jump_table_.last().label, cr);
834 } 833 }
835 } 834 }
836 835
837 836
838 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr, 837 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
839 const char* detail, CRegister cr) { 838 Deoptimizer::DeoptReason deopt_reason,
839 CRegister cr) {
840 Deoptimizer::BailoutType bailout_type = 840 Deoptimizer::BailoutType bailout_type =
841 info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER; 841 info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER;
842 DeoptimizeIf(condition, instr, detail, bailout_type, cr); 842 DeoptimizeIf(condition, instr, deopt_reason, bailout_type, cr);
843 } 843 }
844 844
845 845
846 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) { 846 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
847 int length = deoptimizations_.length(); 847 int length = deoptimizations_.length();
848 if (length == 0) return; 848 if (length == 0) return;
849 Handle<DeoptimizationInputData> data = 849 Handle<DeoptimizationInputData> data =
850 DeoptimizationInputData::New(isolate(), length, TENURED); 850 DeoptimizationInputData::New(isolate(), length, TENURED);
851 851
852 Handle<ByteArray> translations = 852 Handle<ByteArray> translations =
853 translations_.CreateByteArray(isolate()->factory()); 853 translations_.CreateByteArray(isolate()->factory());
854 data->SetTranslationByteArray(*translations); 854 data->SetTranslationByteArray(*translations);
855 data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_)); 855 data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
856 data->SetOptimizationId(Smi::FromInt(info_->optimization_id())); 856 data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
857 if (info_->IsOptimizing()) { 857 if (info_->IsOptimizing()) {
858 // Reference to shared function info does not change between phases. 858 // Reference to shared function info does not change between phases.
859 AllowDeferredHandleDereference allow_handle_dereference; 859 AllowDeferredHandleDereference allow_handle_dereference;
860 data->SetSharedFunctionInfo(*info_->shared_info()); 860 data->SetSharedFunctionInfo(*info_->shared_info());
861 } else { 861 } else {
862 data->SetSharedFunctionInfo(Smi::FromInt(0)); 862 data->SetSharedFunctionInfo(Smi::FromInt(0));
863 } 863 }
864 data->SetWeakCellCache(Smi::FromInt(0));
864 865
865 Handle<FixedArray> literals = 866 Handle<FixedArray> literals =
866 factory()->NewFixedArray(deoptimization_literals_.length(), TENURED); 867 factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
867 { 868 {
868 AllowDeferredHandleDereference copy_handles; 869 AllowDeferredHandleDereference copy_handles;
869 for (int i = 0; i < deoptimization_literals_.length(); i++) { 870 for (int i = 0; i < deoptimization_literals_.length(); i++) {
870 literals->set(i, *deoptimization_literals_[i]); 871 literals->set(i, *deoptimization_literals_[i]);
871 } 872 }
872 data->SetLiteralArray(*literals); 873 data->SetLiteralArray(*literals);
873 } 874 }
(...skipping 238 matching lines...) Expand 10 before | Expand all | Expand 10 after
1112 } 1113 }
1113 } 1114 }
1114 1115
1115 1116
1116 void LCodeGen::DoModI(LModI* instr) { 1117 void LCodeGen::DoModI(LModI* instr) {
1117 HMod* hmod = instr->hydrogen(); 1118 HMod* hmod = instr->hydrogen();
1118 Register left_reg = ToRegister(instr->left()); 1119 Register left_reg = ToRegister(instr->left());
1119 Register right_reg = ToRegister(instr->right()); 1120 Register right_reg = ToRegister(instr->right());
1120 Register result_reg = ToRegister(instr->result()); 1121 Register result_reg = ToRegister(instr->result());
1121 Register scratch = scratch0(); 1122 Register scratch = scratch0();
1123 bool can_overflow = hmod->CheckFlag(HValue::kCanOverflow);
1122 Label done; 1124 Label done;
1123 1125
1124 if (hmod->CheckFlag(HValue::kCanOverflow)) { 1126 if (can_overflow) {
1125 __ li(r0, Operand::Zero()); // clear xer 1127 __ li(r0, Operand::Zero()); // clear xer
1126 __ mtxer(r0); 1128 __ mtxer(r0);
1127 } 1129 }
1128 1130
1129 __ divw(scratch, left_reg, right_reg, SetOE, SetRC); 1131 __ divw(scratch, left_reg, right_reg, SetOE, SetRC);
1130 1132
1131 // Check for x % 0. 1133 // Check for x % 0.
1132 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { 1134 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
1133 __ cmpwi(right_reg, Operand::Zero()); 1135 __ cmpwi(right_reg, Operand::Zero());
1134 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero); 1136 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
1135 } 1137 }
1136 1138
1137 // Check for kMinInt % -1, divw will return undefined, which is not what we 1139 // Check for kMinInt % -1, divw will return undefined, which is not what we
1138 // want. We have to deopt if we care about -0, because we can't return that. 1140 // want. We have to deopt if we care about -0, because we can't return that.
1139 if (hmod->CheckFlag(HValue::kCanOverflow)) { 1141 if (can_overflow) {
1140 Label no_overflow_possible;
1141 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { 1142 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1142 DeoptimizeIf(overflow, instr, Deoptimizer::kMinusZero, cr0); 1143 DeoptimizeIf(overflow, instr, Deoptimizer::kMinusZero, cr0);
1143 } else { 1144 } else {
1144 __ bnooverflow(&no_overflow_possible, cr0); 1145 if (CpuFeatures::IsSupported(ISELECT)) {
1145 __ li(result_reg, Operand::Zero()); 1146 __ isel(overflow, result_reg, r0, result_reg, cr0);
1146 __ b(&done); 1147 __ boverflow(&done, cr0);
1148 } else {
1149 Label no_overflow_possible;
1150 __ bnooverflow(&no_overflow_possible, cr0);
1151 __ li(result_reg, Operand::Zero());
1152 __ b(&done);
1153 __ bind(&no_overflow_possible);
1154 }
1147 } 1155 }
1148 __ bind(&no_overflow_possible);
1149 } 1156 }
1150 1157
1151 __ mullw(scratch, right_reg, scratch); 1158 __ mullw(scratch, right_reg, scratch);
1152 __ sub(result_reg, left_reg, scratch, LeaveOE, SetRC); 1159 __ sub(result_reg, left_reg, scratch, LeaveOE, SetRC);
1153 1160
1154 // If we care about -0, test if the dividend is <0 and the result is 0. 1161 // If we care about -0, test if the dividend is <0 and the result is 0.
1155 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { 1162 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1156 __ bne(&done, cr0); 1163 __ bne(&done, cr0);
1157 __ cmpwi(left_reg, Operand::Zero()); 1164 __ cmpwi(left_reg, Operand::Zero());
1158 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero); 1165 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
(...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after
1240 } 1247 }
1241 } 1248 }
1242 1249
1243 1250
1244 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. 1251 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
1245 void LCodeGen::DoDivI(LDivI* instr) { 1252 void LCodeGen::DoDivI(LDivI* instr) {
1246 HBinaryOperation* hdiv = instr->hydrogen(); 1253 HBinaryOperation* hdiv = instr->hydrogen();
1247 const Register dividend = ToRegister(instr->dividend()); 1254 const Register dividend = ToRegister(instr->dividend());
1248 const Register divisor = ToRegister(instr->divisor()); 1255 const Register divisor = ToRegister(instr->divisor());
1249 Register result = ToRegister(instr->result()); 1256 Register result = ToRegister(instr->result());
1257 bool can_overflow = hdiv->CheckFlag(HValue::kCanOverflow);
1250 1258
1251 DCHECK(!dividend.is(result)); 1259 DCHECK(!dividend.is(result));
1252 DCHECK(!divisor.is(result)); 1260 DCHECK(!divisor.is(result));
1253 1261
1254 if (hdiv->CheckFlag(HValue::kCanOverflow)) { 1262 if (can_overflow) {
1255 __ li(r0, Operand::Zero()); // clear xer 1263 __ li(r0, Operand::Zero()); // clear xer
1256 __ mtxer(r0); 1264 __ mtxer(r0);
1257 } 1265 }
1258 1266
1259 __ divw(result, dividend, divisor, SetOE, SetRC); 1267 __ divw(result, dividend, divisor, SetOE, SetRC);
1260 1268
1261 // Check for x / 0. 1269 // Check for x / 0.
1262 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { 1270 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1263 __ cmpwi(divisor, Operand::Zero()); 1271 __ cmpwi(divisor, Operand::Zero());
1264 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero); 1272 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
1265 } 1273 }
1266 1274
1267 // Check for (0 / -x) that will produce negative zero. 1275 // Check for (0 / -x) that will produce negative zero.
1268 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { 1276 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1269 Label dividend_not_zero; 1277 Label dividend_not_zero;
1270 __ cmpwi(dividend, Operand::Zero()); 1278 __ cmpwi(dividend, Operand::Zero());
1271 __ bne(&dividend_not_zero); 1279 __ bne(&dividend_not_zero);
1272 __ cmpwi(divisor, Operand::Zero()); 1280 __ cmpwi(divisor, Operand::Zero());
1273 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero); 1281 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
1274 __ bind(&dividend_not_zero); 1282 __ bind(&dividend_not_zero);
1275 } 1283 }
1276 1284
1277 // Check for (kMinInt / -1). 1285 // Check for (kMinInt / -1).
1278 if (hdiv->CheckFlag(HValue::kCanOverflow)) { 1286 if (can_overflow) {
1279 Label no_overflow_possible;
1280 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { 1287 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1281 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0); 1288 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
1282 } else { 1289 } else {
1283 // When truncating, we want kMinInt / -1 = kMinInt. 1290 // When truncating, we want kMinInt / -1 = kMinInt.
1284 __ bnooverflow(&no_overflow_possible, cr0); 1291 if (CpuFeatures::IsSupported(ISELECT)) {
1285 __ mr(result, dividend); 1292 __ isel(overflow, result, dividend, result, cr0);
1293 } else {
1294 Label no_overflow_possible;
1295 __ bnooverflow(&no_overflow_possible, cr0);
1296 __ mr(result, dividend);
1297 __ bind(&no_overflow_possible);
1298 }
1286 } 1299 }
1287 __ bind(&no_overflow_possible);
1288 } 1300 }
1289 1301
1290 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { 1302 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1291 // Deoptimize if remainder is not 0. 1303 // Deoptimize if remainder is not 0.
1292 Register scratch = scratch0(); 1304 Register scratch = scratch0();
1293 __ mullw(scratch, divisor, result); 1305 __ mullw(scratch, divisor, result);
1294 __ cmpw(dividend, scratch); 1306 __ cmpw(dividend, scratch);
1295 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision); 1307 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
1296 } 1308 }
1297 } 1309 }
1298 1310
1299 1311
1300 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) { 1312 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
1301 HBinaryOperation* hdiv = instr->hydrogen(); 1313 HBinaryOperation* hdiv = instr->hydrogen();
1302 Register dividend = ToRegister(instr->dividend()); 1314 Register dividend = ToRegister(instr->dividend());
1303 Register result = ToRegister(instr->result()); 1315 Register result = ToRegister(instr->result());
1304 int32_t divisor = instr->divisor(); 1316 int32_t divisor = instr->divisor();
1317 bool can_overflow = hdiv->CheckFlag(HValue::kLeftCanBeMinInt);
1305 1318
1306 // If the divisor is positive, things are easy: There can be no deopts and we 1319 // If the divisor is positive, things are easy: There can be no deopts and we
1307 // can simply do an arithmetic right shift. 1320 // can simply do an arithmetic right shift.
1308 int32_t shift = WhichPowerOf2Abs(divisor); 1321 int32_t shift = WhichPowerOf2Abs(divisor);
1309 if (divisor > 0) { 1322 if (divisor > 0) {
1310 if (shift || !result.is(dividend)) { 1323 if (shift || !result.is(dividend)) {
1311 __ srawi(result, dividend, shift); 1324 __ srawi(result, dividend, shift);
1312 } 1325 }
1313 return; 1326 return;
1314 } 1327 }
1315 1328
1316 // If the divisor is negative, we have to negate and handle edge cases. 1329 // If the divisor is negative, we have to negate and handle edge cases.
1317 OEBit oe = LeaveOE; 1330 OEBit oe = LeaveOE;
1318 #if V8_TARGET_ARCH_PPC64 1331 #if V8_TARGET_ARCH_PPC64
1319 if (divisor == -1 && hdiv->CheckFlag(HValue::kLeftCanBeMinInt)) { 1332 if (divisor == -1 && can_overflow) {
1320 __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000))); 1333 __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
1321 __ cmpw(dividend, r0); 1334 __ cmpw(dividend, r0);
1322 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow); 1335 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
1323 } 1336 }
1324 #else 1337 #else
1325 if (hdiv->CheckFlag(HValue::kLeftCanBeMinInt)) { 1338 if (can_overflow) {
1326 __ li(r0, Operand::Zero()); // clear xer 1339 __ li(r0, Operand::Zero()); // clear xer
1327 __ mtxer(r0); 1340 __ mtxer(r0);
1328 oe = SetOE; 1341 oe = SetOE;
1329 } 1342 }
1330 #endif 1343 #endif
1331 1344
1332 __ neg(result, dividend, oe, SetRC); 1345 __ neg(result, dividend, oe, SetRC);
1333 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { 1346 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1334 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, cr0); 1347 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, cr0);
1335 } 1348 }
1336 1349
1337 // If the negation could not overflow, simply shifting is OK. 1350 // If the negation could not overflow, simply shifting is OK.
1338 #if !V8_TARGET_ARCH_PPC64 1351 #if !V8_TARGET_ARCH_PPC64
1339 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { 1352 if (!can_overflow) {
1340 #endif 1353 #endif
1341 if (shift) { 1354 if (shift) {
1342 __ ShiftRightArithImm(result, result, shift); 1355 __ ShiftRightArithImm(result, result, shift);
1343 } 1356 }
1344 return; 1357 return;
1345 #if !V8_TARGET_ARCH_PPC64 1358 #if !V8_TARGET_ARCH_PPC64
1346 } 1359 }
1347 1360
1348 // Dividing by -1 is basically negation, unless we overflow. 1361 // Dividing by -1 is basically negation, unless we overflow.
1349 if (divisor == -1) { 1362 if (divisor == -1) {
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after
1407 __ bind(&done); 1420 __ bind(&done);
1408 } 1421 }
1409 1422
1410 1423
1411 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI. 1424 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
1412 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) { 1425 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
1413 HBinaryOperation* hdiv = instr->hydrogen(); 1426 HBinaryOperation* hdiv = instr->hydrogen();
1414 const Register dividend = ToRegister(instr->dividend()); 1427 const Register dividend = ToRegister(instr->dividend());
1415 const Register divisor = ToRegister(instr->divisor()); 1428 const Register divisor = ToRegister(instr->divisor());
1416 Register result = ToRegister(instr->result()); 1429 Register result = ToRegister(instr->result());
1430 bool can_overflow = hdiv->CheckFlag(HValue::kCanOverflow);
1417 1431
1418 DCHECK(!dividend.is(result)); 1432 DCHECK(!dividend.is(result));
1419 DCHECK(!divisor.is(result)); 1433 DCHECK(!divisor.is(result));
1420 1434
1421 if (hdiv->CheckFlag(HValue::kCanOverflow)) { 1435 if (can_overflow) {
1422 __ li(r0, Operand::Zero()); // clear xer 1436 __ li(r0, Operand::Zero()); // clear xer
1423 __ mtxer(r0); 1437 __ mtxer(r0);
1424 } 1438 }
1425 1439
1426 __ divw(result, dividend, divisor, SetOE, SetRC); 1440 __ divw(result, dividend, divisor, SetOE, SetRC);
1427 1441
1428 // Check for x / 0. 1442 // Check for x / 0.
1429 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { 1443 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1430 __ cmpwi(divisor, Operand::Zero()); 1444 __ cmpwi(divisor, Operand::Zero());
1431 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero); 1445 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
1432 } 1446 }
1433 1447
1434 // Check for (0 / -x) that will produce negative zero. 1448 // Check for (0 / -x) that will produce negative zero.
1435 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { 1449 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1436 Label dividend_not_zero; 1450 Label dividend_not_zero;
1437 __ cmpwi(dividend, Operand::Zero()); 1451 __ cmpwi(dividend, Operand::Zero());
1438 __ bne(&dividend_not_zero); 1452 __ bne(&dividend_not_zero);
1439 __ cmpwi(divisor, Operand::Zero()); 1453 __ cmpwi(divisor, Operand::Zero());
1440 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero); 1454 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
1441 __ bind(&dividend_not_zero); 1455 __ bind(&dividend_not_zero);
1442 } 1456 }
1443 1457
1444 // Check for (kMinInt / -1). 1458 // Check for (kMinInt / -1).
1445 if (hdiv->CheckFlag(HValue::kCanOverflow)) { 1459 if (can_overflow) {
1446 Label no_overflow_possible;
1447 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { 1460 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1448 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0); 1461 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
1449 } else { 1462 } else {
1450 // When truncating, we want kMinInt / -1 = kMinInt. 1463 // When truncating, we want kMinInt / -1 = kMinInt.
1451 __ bnooverflow(&no_overflow_possible, cr0); 1464 if (CpuFeatures::IsSupported(ISELECT)) {
1452 __ mr(result, dividend); 1465 __ isel(overflow, result, dividend, result, cr0);
1466 } else {
1467 Label no_overflow_possible;
1468 __ bnooverflow(&no_overflow_possible, cr0);
1469 __ mr(result, dividend);
1470 __ bind(&no_overflow_possible);
1471 }
1453 } 1472 }
1454 __ bind(&no_overflow_possible);
1455 } 1473 }
1456 1474
1457 Label done; 1475 Label done;
1458 Register scratch = scratch0(); 1476 Register scratch = scratch0();
1459 // If both operands have the same sign then we are done. 1477 // If both operands have the same sign then we are done.
1460 #if V8_TARGET_ARCH_PPC64 1478 #if V8_TARGET_ARCH_PPC64
1461 __ xor_(scratch, dividend, divisor); 1479 __ xor_(scratch, dividend, divisor);
1462 __ cmpwi(scratch, Operand::Zero()); 1480 __ cmpwi(scratch, Operand::Zero());
1463 __ bge(&done); 1481 __ bge(&done);
1464 #else 1482 #else
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after
1524 #if V8_TARGET_ARCH_PPC64 1542 #if V8_TARGET_ARCH_PPC64
1525 if (instr->hydrogen()->representation().IsSmi()) { 1543 if (instr->hydrogen()->representation().IsSmi()) {
1526 #endif 1544 #endif
1527 __ li(r0, Operand::Zero()); // clear xer 1545 __ li(r0, Operand::Zero()); // clear xer
1528 __ mtxer(r0); 1546 __ mtxer(r0);
1529 __ neg(result, left, SetOE, SetRC); 1547 __ neg(result, left, SetOE, SetRC);
1530 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0); 1548 DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
1531 #if V8_TARGET_ARCH_PPC64 1549 #if V8_TARGET_ARCH_PPC64
1532 } else { 1550 } else {
1533 __ neg(result, left); 1551 __ neg(result, left);
1534 __ TestIfInt32(result, scratch, r0); 1552 __ TestIfInt32(result, r0);
1535 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow); 1553 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
1536 } 1554 }
1537 #endif 1555 #endif
1538 } else { 1556 } else {
1539 __ neg(result, left); 1557 __ neg(result, left);
1540 } 1558 }
1541 break; 1559 break;
1542 case 0: 1560 case 0:
1543 if (bailout_on_minus_zero) { 1561 if (bailout_on_minus_zero) {
1544 // If left is strictly negative and the constant is null, the 1562 // If left is strictly negative and the constant is null, the
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after
1597 if (can_overflow) { 1615 if (can_overflow) {
1598 #if V8_TARGET_ARCH_PPC64 1616 #if V8_TARGET_ARCH_PPC64
1599 // result = left * right. 1617 // result = left * right.
1600 if (instr->hydrogen()->representation().IsSmi()) { 1618 if (instr->hydrogen()->representation().IsSmi()) {
1601 __ SmiUntag(result, left); 1619 __ SmiUntag(result, left);
1602 __ SmiUntag(scratch, right); 1620 __ SmiUntag(scratch, right);
1603 __ Mul(result, result, scratch); 1621 __ Mul(result, result, scratch);
1604 } else { 1622 } else {
1605 __ Mul(result, left, right); 1623 __ Mul(result, left, right);
1606 } 1624 }
1607 __ TestIfInt32(result, scratch, r0); 1625 __ TestIfInt32(result, r0);
1608 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow); 1626 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
1609 if (instr->hydrogen()->representation().IsSmi()) { 1627 if (instr->hydrogen()->representation().IsSmi()) {
1610 __ SmiTag(result); 1628 __ SmiTag(result);
1611 } 1629 }
1612 #else 1630 #else
1613 // scratch:result = left * right. 1631 // scratch:result = left * right.
1614 if (instr->hydrogen()->representation().IsSmi()) { 1632 if (instr->hydrogen()->representation().IsSmi()) {
1615 __ SmiUntag(result, left); 1633 __ SmiUntag(result, left);
1616 __ mulhw(scratch, result, right); 1634 __ mulhw(scratch, result, right);
1617 __ mullw(result, result, right); 1635 __ mullw(result, result, right);
(...skipping 192 matching lines...) Expand 10 before | Expand all | Expand 10 after
1810 } 1828 }
1811 } 1829 }
1812 } 1830 }
1813 1831
1814 1832
1815 void LCodeGen::DoSubI(LSubI* instr) { 1833 void LCodeGen::DoSubI(LSubI* instr) {
1816 LOperand* right = instr->right(); 1834 LOperand* right = instr->right();
1817 Register left = ToRegister(instr->left()); 1835 Register left = ToRegister(instr->left());
1818 Register result = ToRegister(instr->result()); 1836 Register result = ToRegister(instr->result());
1819 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); 1837 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1820 if (!can_overflow) { 1838 #if V8_TARGET_ARCH_PPC64
1839 const bool isInteger = !instr->hydrogen()->representation().IsSmi();
1840 #else
1841 const bool isInteger = false;
1842 #endif
1843 if (!can_overflow || isInteger) {
1821 if (right->IsConstantOperand()) { 1844 if (right->IsConstantOperand()) {
1822 __ Add(result, left, -(ToOperand(right).immediate()), r0); 1845 __ Add(result, left, -(ToOperand(right).immediate()), r0);
1823 } else { 1846 } else {
1824 __ sub(result, left, EmitLoadRegister(right, ip)); 1847 __ sub(result, left, EmitLoadRegister(right, ip));
1825 } 1848 }
1849 #if V8_TARGET_ARCH_PPC64
1850 if (can_overflow) {
1851 __ TestIfInt32(result, r0);
1852 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
1853 }
1854 #endif
1826 } else { 1855 } else {
1827 if (right->IsConstantOperand()) { 1856 if (right->IsConstantOperand()) {
1828 __ AddAndCheckForOverflow(result, left, -(ToOperand(right).immediate()), 1857 __ AddAndCheckForOverflow(result, left, -(ToOperand(right).immediate()),
1829 scratch0(), r0); 1858 scratch0(), r0);
1830 } else { 1859 } else {
1831 __ SubAndCheckForOverflow(result, left, EmitLoadRegister(right, ip), 1860 __ SubAndCheckForOverflow(result, left, EmitLoadRegister(right, ip),
1832 scratch0(), r0); 1861 scratch0(), r0);
1833 } 1862 }
1834 // Doptimize on overflow
1835 #if V8_TARGET_ARCH_PPC64
1836 if (!instr->hydrogen()->representation().IsSmi()) {
1837 __ extsw(scratch0(), scratch0(), SetRC);
1838 }
1839 #endif
1840 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0); 1863 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
1841 } 1864 }
1842
1843 #if V8_TARGET_ARCH_PPC64
1844 if (!instr->hydrogen()->representation().IsSmi()) {
1845 __ extsw(result, result);
1846 }
1847 #endif
1848 } 1865 }
1849 1866
1850 1867
1851 void LCodeGen::DoRSubI(LRSubI* instr) { 1868 void LCodeGen::DoRSubI(LRSubI* instr) {
1852 LOperand* left = instr->left(); 1869 LOperand* left = instr->left();
1853 LOperand* right = instr->right(); 1870 LOperand* right = instr->right();
1854 LOperand* result = instr->result(); 1871 LOperand* result = instr->result();
1855 1872
1856 DCHECK(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow) && 1873 DCHECK(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow) &&
1857 right->IsConstantOperand()); 1874 right->IsConstantOperand());
(...skipping 11 matching lines...) Expand all
1869 void LCodeGen::DoConstantI(LConstantI* instr) { 1886 void LCodeGen::DoConstantI(LConstantI* instr) {
1870 __ mov(ToRegister(instr->result()), Operand(instr->value())); 1887 __ mov(ToRegister(instr->result()), Operand(instr->value()));
1871 } 1888 }
1872 1889
1873 1890
1874 void LCodeGen::DoConstantS(LConstantS* instr) { 1891 void LCodeGen::DoConstantS(LConstantS* instr) {
1875 __ LoadSmiLiteral(ToRegister(instr->result()), instr->value()); 1892 __ LoadSmiLiteral(ToRegister(instr->result()), instr->value());
1876 } 1893 }
1877 1894
1878 1895
1879 // TODO(penguin): put const to constant pool instead
1880 // of storing double to stack
1881 void LCodeGen::DoConstantD(LConstantD* instr) { 1896 void LCodeGen::DoConstantD(LConstantD* instr) {
1882 DCHECK(instr->result()->IsDoubleRegister()); 1897 DCHECK(instr->result()->IsDoubleRegister());
1883 DoubleRegister result = ToDoubleRegister(instr->result()); 1898 DoubleRegister result = ToDoubleRegister(instr->result());
1899 #if V8_HOST_ARCH_IA32
1900 // Need some crappy work-around for x87 sNaN -> qNaN breakage in simulator
1901 // builds.
1902 uint64_t bits = instr->bits();
1903 if ((bits & V8_UINT64_C(0x7FF8000000000000)) ==
1904 V8_UINT64_C(0x7FF0000000000000)) {
1905 uint32_t lo = static_cast<uint32_t>(bits);
1906 uint32_t hi = static_cast<uint32_t>(bits >> 32);
1907 __ mov(ip, Operand(lo));
1908 __ mov(scratch0(), Operand(hi));
1909 __ MovInt64ToDouble(result, scratch0(), ip);
1910 return;
1911 }
1912 #endif
1884 double v = instr->value(); 1913 double v = instr->value();
1885 __ LoadDoubleLiteral(result, v, scratch0()); 1914 __ LoadDoubleLiteral(result, v, scratch0());
1886 } 1915 }
1887 1916
1888 1917
1889 void LCodeGen::DoConstantE(LConstantE* instr) { 1918 void LCodeGen::DoConstantE(LConstantE* instr) {
1890 __ mov(ToRegister(instr->result()), Operand(instr->value())); 1919 __ mov(ToRegister(instr->result()), Operand(instr->value()));
1891 } 1920 }
1892 1921
1893 1922
(...skipping 128 matching lines...) Expand 10 before | Expand all | Expand 10 after
2022 } 2051 }
2023 } 2052 }
2024 2053
2025 2054
2026 void LCodeGen::DoAddI(LAddI* instr) { 2055 void LCodeGen::DoAddI(LAddI* instr) {
2027 LOperand* right = instr->right(); 2056 LOperand* right = instr->right();
2028 Register left = ToRegister(instr->left()); 2057 Register left = ToRegister(instr->left());
2029 Register result = ToRegister(instr->result()); 2058 Register result = ToRegister(instr->result());
2030 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); 2059 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
2031 #if V8_TARGET_ARCH_PPC64 2060 #if V8_TARGET_ARCH_PPC64
2032 bool isInteger = !(instr->hydrogen()->representation().IsSmi() || 2061 const bool isInteger = !(instr->hydrogen()->representation().IsSmi() ||
2033 instr->hydrogen()->representation().IsExternal()); 2062 instr->hydrogen()->representation().IsExternal());
2063 #else
2064 const bool isInteger = false;
2034 #endif 2065 #endif
2035 2066
2036 if (!can_overflow) { 2067 if (!can_overflow || isInteger) {
2037 if (right->IsConstantOperand()) { 2068 if (right->IsConstantOperand()) {
2038 __ Add(result, left, ToOperand(right).immediate(), r0); 2069 __ Add(result, left, ToOperand(right).immediate(), r0);
2039 } else { 2070 } else {
2040 __ add(result, left, EmitLoadRegister(right, ip)); 2071 __ add(result, left, EmitLoadRegister(right, ip));
2041 } 2072 }
2073 #if V8_TARGET_ARCH_PPC64
2074 if (can_overflow) {
2075 __ TestIfInt32(result, r0);
2076 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
2077 }
2078 #endif
2042 } else { 2079 } else {
2043 if (right->IsConstantOperand()) { 2080 if (right->IsConstantOperand()) {
2044 __ AddAndCheckForOverflow(result, left, ToOperand(right).immediate(), 2081 __ AddAndCheckForOverflow(result, left, ToOperand(right).immediate(),
2045 scratch0(), r0); 2082 scratch0(), r0);
2046 } else { 2083 } else {
2047 __ AddAndCheckForOverflow(result, left, EmitLoadRegister(right, ip), 2084 __ AddAndCheckForOverflow(result, left, EmitLoadRegister(right, ip),
2048 scratch0(), r0); 2085 scratch0(), r0);
2049 } 2086 }
2050 // Doptimize on overflow
2051 #if V8_TARGET_ARCH_PPC64
2052 if (isInteger) {
2053 __ extsw(scratch0(), scratch0(), SetRC);
2054 }
2055 #endif
2056 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0); 2087 DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
2057 } 2088 }
2058
2059 #if V8_TARGET_ARCH_PPC64
2060 if (isInteger) {
2061 __ extsw(result, result);
2062 }
2063 #endif
2064 } 2089 }
2065 2090
2066 2091
2067 void LCodeGen::DoMathMinMax(LMathMinMax* instr) { 2092 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
2068 LOperand* left = instr->left(); 2093 LOperand* left = instr->left();
2069 LOperand* right = instr->right(); 2094 LOperand* right = instr->right();
2070 HMathMinMax::Operation operation = instr->hydrogen()->operation(); 2095 HMathMinMax::Operation operation = instr->hydrogen()->operation();
2071 Condition cond = (operation == HMathMinMax::kMathMin) ? le : ge; 2096 Condition cond = (operation == HMathMinMax::kMathMin) ? le : ge;
2072 if (instr->hydrogen()->representation().IsSmiOrInteger32()) { 2097 if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
2073 Register left_reg = ToRegister(left); 2098 Register left_reg = ToRegister(left);
2074 Register right_reg = EmitLoadRegister(right, ip); 2099 Register right_reg = EmitLoadRegister(right, ip);
2075 Register result_reg = ToRegister(instr->result()); 2100 Register result_reg = ToRegister(instr->result());
2076 Label return_left, done; 2101 Label return_left, done;
2077 #if V8_TARGET_ARCH_PPC64 2102 #if V8_TARGET_ARCH_PPC64
2078 if (instr->hydrogen_value()->representation().IsSmi()) { 2103 if (instr->hydrogen_value()->representation().IsSmi()) {
2079 #endif 2104 #endif
2080 __ cmp(left_reg, right_reg); 2105 __ cmp(left_reg, right_reg);
2081 #if V8_TARGET_ARCH_PPC64 2106 #if V8_TARGET_ARCH_PPC64
2082 } else { 2107 } else {
2083 __ cmpw(left_reg, right_reg); 2108 __ cmpw(left_reg, right_reg);
2084 } 2109 }
2085 #endif 2110 #endif
2086 __ b(cond, &return_left); 2111 if (CpuFeatures::IsSupported(ISELECT)) {
2087 __ Move(result_reg, right_reg); 2112 __ isel(cond, result_reg, left_reg, right_reg);
2088 __ b(&done); 2113 } else {
2089 __ bind(&return_left); 2114 __ b(cond, &return_left);
2090 __ Move(result_reg, left_reg); 2115 __ Move(result_reg, right_reg);
2091 __ bind(&done); 2116 __ b(&done);
2117 __ bind(&return_left);
2118 __ Move(result_reg, left_reg);
2119 __ bind(&done);
2120 }
2092 } else { 2121 } else {
2093 DCHECK(instr->hydrogen()->representation().IsDouble()); 2122 DCHECK(instr->hydrogen()->representation().IsDouble());
2094 DoubleRegister left_reg = ToDoubleRegister(left); 2123 DoubleRegister left_reg = ToDoubleRegister(left);
2095 DoubleRegister right_reg = ToDoubleRegister(right); 2124 DoubleRegister right_reg = ToDoubleRegister(right);
2096 DoubleRegister result_reg = ToDoubleRegister(instr->result()); 2125 DoubleRegister result_reg = ToDoubleRegister(instr->result());
2097 Label check_nan_left, check_zero, return_left, return_right, done; 2126 Label check_nan_left, check_zero, return_left, return_right, done;
2098 __ fcmpu(left_reg, right_reg); 2127 __ fcmpu(left_reg, right_reg);
2099 __ bunordered(&check_nan_left); 2128 __ bunordered(&check_nan_left);
2100 __ beq(&check_zero); 2129 __ beq(&check_zero);
2101 __ b(cond, &return_left); 2130 __ b(cond, &return_left);
(...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after
2169 } 2198 }
2170 } 2199 }
2171 2200
2172 2201
2173 void LCodeGen::DoArithmeticT(LArithmeticT* instr) { 2202 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
2174 DCHECK(ToRegister(instr->context()).is(cp)); 2203 DCHECK(ToRegister(instr->context()).is(cp));
2175 DCHECK(ToRegister(instr->left()).is(r4)); 2204 DCHECK(ToRegister(instr->left()).is(r4));
2176 DCHECK(ToRegister(instr->right()).is(r3)); 2205 DCHECK(ToRegister(instr->right()).is(r3));
2177 DCHECK(ToRegister(instr->result()).is(r3)); 2206 DCHECK(ToRegister(instr->result()).is(r3));
2178 2207
2179 Handle<Code> code = 2208 Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code();
2180 CodeFactory::BinaryOpIC(isolate(), instr->op(), NO_OVERWRITE).code();
2181 CallCode(code, RelocInfo::CODE_TARGET, instr); 2209 CallCode(code, RelocInfo::CODE_TARGET, instr);
2182 } 2210 }
2183 2211
2184 2212
2185 template <class InstrType> 2213 template <class InstrType>
2186 void LCodeGen::EmitBranch(InstrType instr, Condition cond, CRegister cr) { 2214 void LCodeGen::EmitBranch(InstrType instr, Condition cond, CRegister cr) {
2187 int left_block = instr->TrueDestination(chunk_); 2215 int left_block = instr->TrueDestination(chunk_);
2188 int right_block = instr->FalseDestination(chunk_); 2216 int right_block = instr->FalseDestination(chunk_);
2189 2217
2190 int next_block = GetNextEmittedBlock(); 2218 int next_block = GetNextEmittedBlock();
(...skipping 617 matching lines...) Expand 10 before | Expand all | Expand 10 after
2808 2836
2809 2837
2810 void LCodeGen::DoInstanceOf(LInstanceOf* instr) { 2838 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2811 DCHECK(ToRegister(instr->context()).is(cp)); 2839 DCHECK(ToRegister(instr->context()).is(cp));
2812 DCHECK(ToRegister(instr->left()).is(r3)); // Object is in r3. 2840 DCHECK(ToRegister(instr->left()).is(r3)); // Object is in r3.
2813 DCHECK(ToRegister(instr->right()).is(r4)); // Function is in r4. 2841 DCHECK(ToRegister(instr->right()).is(r4)); // Function is in r4.
2814 2842
2815 InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters); 2843 InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters);
2816 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 2844 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
2817 2845
2818 Label equal, done; 2846 if (CpuFeatures::IsSupported(ISELECT)) {
2819 __ cmpi(r3, Operand::Zero()); 2847 __ mov(r4, Operand(factory()->true_value()));
2820 __ beq(&equal); 2848 __ mov(r5, Operand(factory()->false_value()));
2821 __ mov(r3, Operand(factory()->false_value())); 2849 __ cmpi(r3, Operand::Zero());
2822 __ b(&done); 2850 __ isel(eq, r3, r4, r5);
2851 } else {
2852 Label equal, done;
2853 __ cmpi(r3, Operand::Zero());
2854 __ beq(&equal);
2855 __ mov(r3, Operand(factory()->false_value()));
2856 __ b(&done);
2823 2857
2824 __ bind(&equal); 2858 __ bind(&equal);
2825 __ mov(r3, Operand(factory()->true_value())); 2859 __ mov(r3, Operand(factory()->true_value()));
2826 __ bind(&done); 2860 __ bind(&done);
2861 }
2827 } 2862 }
2828 2863
2829 2864
2830 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { 2865 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
2831 class DeferredInstanceOfKnownGlobal FINAL : public LDeferredCode { 2866 class DeferredInstanceOfKnownGlobal FINAL : public LDeferredCode {
2832 public: 2867 public:
2833 DeferredInstanceOfKnownGlobal(LCodeGen* codegen, 2868 DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
2834 LInstanceOfKnownGlobal* instr) 2869 LInstanceOfKnownGlobal* instr)
2835 : LDeferredCode(codegen), instr_(instr) {} 2870 : LDeferredCode(codegen), instr_(instr) {}
2836 void Generate() OVERRIDE { 2871 void Generate() OVERRIDE {
(...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after
2915 flags | InstanceofStub::kCallSiteInlineCheck); 2950 flags | InstanceofStub::kCallSiteInlineCheck);
2916 flags = static_cast<InstanceofStub::Flags>( 2951 flags = static_cast<InstanceofStub::Flags>(
2917 flags | InstanceofStub::kReturnTrueFalseObject); 2952 flags | InstanceofStub::kReturnTrueFalseObject);
2918 InstanceofStub stub(isolate(), flags); 2953 InstanceofStub stub(isolate(), flags);
2919 2954
2920 PushSafepointRegistersScope scope(this); 2955 PushSafepointRegistersScope scope(this);
2921 LoadContextFromDeferred(instr->context()); 2956 LoadContextFromDeferred(instr->context());
2922 2957
2923 __ Move(InstanceofStub::right(), instr->function()); 2958 __ Move(InstanceofStub::right(), instr->function());
2924 // Include instructions below in delta: mov + call = mov + (mov + 2) 2959 // Include instructions below in delta: mov + call = mov + (mov + 2)
2925 static const int kAdditionalDelta = (2 * Assembler::kMovInstructions) + 2; 2960 static const int kAdditionalDelta = 2 * Assembler::kMovInstructions + 2;
2926 int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta; 2961 int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
2927 { 2962 {
2928 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_); 2963 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
2964 if (Assembler::kMovInstructions != 1 &&
2965 is_int16(delta * Instruction::kInstrSize)) {
2966 // The following mov will be an li rather than a multi-instruction form
2967 delta -= Assembler::kMovInstructions - 1;
2968 }
2929 // r8 is used to communicate the offset to the location of the map check. 2969 // r8 is used to communicate the offset to the location of the map check.
2930 __ mov(r8, Operand(delta * Instruction::kInstrSize)); 2970 __ mov(r8, Operand(delta * Instruction::kInstrSize));
2931 } 2971 }
2932 CallCodeGeneric(stub.GetCode(), RelocInfo::CODE_TARGET, instr, 2972 CallCodeGeneric(stub.GetCode(), RelocInfo::CODE_TARGET, instr,
2933 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); 2973 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
2934 DCHECK(delta == masm_->InstructionsGeneratedSince(map_check)); 2974 DCHECK(delta == masm_->InstructionsGeneratedSince(map_check));
2935 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment(); 2975 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
2936 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); 2976 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2937 // Put the result value (r3) into the result register slot and 2977 // Put the result value (r3) into the result register slot and
2938 // restore all registers. 2978 // restore all registers.
2939 __ StoreToSafepointRegisterSlot(r3, ToRegister(instr->result())); 2979 __ StoreToSafepointRegisterSlot(r3, ToRegister(instr->result()));
2940 } 2980 }
2941 2981
2942 2982
2943 void LCodeGen::DoCmpT(LCmpT* instr) { 2983 void LCodeGen::DoCmpT(LCmpT* instr) {
2944 DCHECK(ToRegister(instr->context()).is(cp)); 2984 DCHECK(ToRegister(instr->context()).is(cp));
2945 Token::Value op = instr->op(); 2985 Token::Value op = instr->op();
2946 2986
2947 Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code(); 2987 Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
2948 CallCode(ic, RelocInfo::CODE_TARGET, instr); 2988 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2949 // This instruction also signals no smi code inlined 2989 // This instruction also signals no smi code inlined
2950 __ cmpi(r3, Operand::Zero()); 2990 __ cmpi(r3, Operand::Zero());
2951 2991
2952 Condition condition = ComputeCompareCondition(op); 2992 Condition condition = ComputeCompareCondition(op);
2953 Label true_value, done; 2993 if (CpuFeatures::IsSupported(ISELECT)) {
2994 __ LoadRoot(r4, Heap::kTrueValueRootIndex);
2995 __ LoadRoot(r5, Heap::kFalseValueRootIndex);
2996 __ isel(condition, ToRegister(instr->result()), r4, r5);
2997 } else {
2998 Label true_value, done;
2954 2999
2955 __ b(condition, &true_value); 3000 __ b(condition, &true_value);
2956 3001
2957 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex); 3002 __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
2958 __ b(&done); 3003 __ b(&done);
2959 3004
2960 __ bind(&true_value); 3005 __ bind(&true_value);
2961 __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex); 3006 __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
2962 3007
2963 __ bind(&done); 3008 __ bind(&done);
3009 }
2964 } 3010 }
2965 3011
2966 3012
2967 void LCodeGen::DoReturn(LReturn* instr) { 3013 void LCodeGen::DoReturn(LReturn* instr) {
2968 if (FLAG_trace && info()->IsOptimizing()) { 3014 if (FLAG_trace && info()->IsOptimizing()) {
2969 // Push the return value on the stack as the parameter. 3015 // Push the return value on the stack as the parameter.
2970 // Runtime::TraceExit returns its parameter in r3. We're leaving the code 3016 // Runtime::TraceExit returns its parameter in r3. We're leaving the code
2971 // managed by the register allocator and tearing down the frame, it's 3017 // managed by the register allocator and tearing down the frame, it's
2972 // safe to write to the context register. 3018 // safe to write to the context register.
2973 __ push(r3); 3019 __ push(r3);
2974 __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); 3020 __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2975 __ CallRuntime(Runtime::kTraceExit, 1); 3021 __ CallRuntime(Runtime::kTraceExit, 1);
2976 } 3022 }
2977 if (info()->saves_caller_doubles()) { 3023 if (info()->saves_caller_doubles()) {
2978 RestoreCallerDoubles(); 3024 RestoreCallerDoubles();
2979 } 3025 }
2980 int no_frame_start = -1; 3026 int no_frame_start = -1;
2981 if (instr->has_constant_parameter_count()) { 3027 if (instr->has_constant_parameter_count()) {
2982 int parameter_count = ToInteger32(instr->constant_parameter_count()); 3028 int parameter_count = ToInteger32(instr->constant_parameter_count());
2983 int32_t sp_delta = (parameter_count + 1) * kPointerSize; 3029 int32_t sp_delta = (parameter_count + 1) * kPointerSize;
2984 if (NeedsEagerFrame()) { 3030 if (NeedsEagerFrame()) {
2985 no_frame_start = masm_->LeaveFrame(StackFrame::JAVA_SCRIPT, sp_delta); 3031 no_frame_start = masm_->LeaveFrame(StackFrame::JAVA_SCRIPT, sp_delta);
2986 } else if (sp_delta != 0) { 3032 } else if (sp_delta != 0) {
2987 __ addi(sp, sp, Operand(sp_delta)); 3033 __ addi(sp, sp, Operand(sp_delta));
2988 } 3034 }
2989 } else { 3035 } else {
3036 DCHECK(info()->IsStub()); // Functions would need to drop one more value.
2990 Register reg = ToRegister(instr->parameter_count()); 3037 Register reg = ToRegister(instr->parameter_count());
2991 // The argument count parameter is a smi 3038 // The argument count parameter is a smi
2992 if (NeedsEagerFrame()) { 3039 if (NeedsEagerFrame()) {
2993 no_frame_start = masm_->LeaveFrame(StackFrame::JAVA_SCRIPT); 3040 no_frame_start = masm_->LeaveFrame(StackFrame::JAVA_SCRIPT);
2994 } 3041 }
2995 __ SmiToPtrArrayOffset(r0, reg); 3042 __ SmiToPtrArrayOffset(r0, reg);
2996 __ add(sp, sp, r0); 3043 __ add(sp, sp, r0);
2997 } 3044 }
2998 3045
2999 __ blr(); 3046 __ blr();
(...skipping 12 matching lines...) Expand all
3012 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); 3059 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
3013 __ cmp(result, ip); 3060 __ cmp(result, ip);
3014 DeoptimizeIf(eq, instr, Deoptimizer::kHole); 3061 DeoptimizeIf(eq, instr, Deoptimizer::kHole);
3015 } 3062 }
3016 } 3063 }
3017 3064
3018 3065
3019 template <class T> 3066 template <class T>
3020 void LCodeGen::EmitVectorLoadICRegisters(T* instr) { 3067 void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
3021 DCHECK(FLAG_vector_ics); 3068 DCHECK(FLAG_vector_ics);
3022 Register vector = ToRegister(instr->temp_vector()); 3069 Register vector_register = ToRegister(instr->temp_vector());
3023 DCHECK(vector.is(VectorLoadICDescriptor::VectorRegister())); 3070 Register slot_register = VectorLoadICDescriptor::SlotRegister();
3024 __ Move(vector, instr->hydrogen()->feedback_vector()); 3071 DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister()));
3072 DCHECK(slot_register.is(r3));
3073
3074 AllowDeferredHandleDereference vector_structure_check;
3075 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
3076 __ Move(vector_register, vector);
3025 // No need to allocate this register. 3077 // No need to allocate this register.
3026 DCHECK(VectorLoadICDescriptor::SlotRegister().is(r3)); 3078 FeedbackVectorICSlot slot = instr->hydrogen()->slot();
3027 __ mov(VectorLoadICDescriptor::SlotRegister(), 3079 int index = vector->GetIndex(slot);
3028 Operand(Smi::FromInt(instr->hydrogen()->slot()))); 3080 __ mov(slot_register, Operand(Smi::FromInt(index)));
3029 } 3081 }
3030 3082
3031 3083
3032 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) { 3084 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
3033 DCHECK(ToRegister(instr->context()).is(cp)); 3085 DCHECK(ToRegister(instr->context()).is(cp));
3034 DCHECK(ToRegister(instr->global_object()) 3086 DCHECK(ToRegister(instr->global_object())
3035 .is(LoadDescriptor::ReceiverRegister())); 3087 .is(LoadDescriptor::ReceiverRegister()));
3036 DCHECK(ToRegister(instr->result()).is(r3)); 3088 DCHECK(ToRegister(instr->result()).is(r3));
3037 3089
3038 __ mov(LoadDescriptor::NameRegister(), Operand(instr->name())); 3090 __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
(...skipping 30 matching lines...) Expand all
3069 // Cells are always rescanned, so no write barrier here. 3121 // Cells are always rescanned, so no write barrier here.
3070 } 3122 }
3071 3123
3072 3124
3073 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { 3125 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
3074 Register context = ToRegister(instr->context()); 3126 Register context = ToRegister(instr->context());
3075 Register result = ToRegister(instr->result()); 3127 Register result = ToRegister(instr->result());
3076 __ LoadP(result, ContextOperand(context, instr->slot_index())); 3128 __ LoadP(result, ContextOperand(context, instr->slot_index()));
3077 if (instr->hydrogen()->RequiresHoleCheck()) { 3129 if (instr->hydrogen()->RequiresHoleCheck()) {
3078 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); 3130 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
3079 __ cmp(result, ip);
3080 if (instr->hydrogen()->DeoptimizesOnHole()) { 3131 if (instr->hydrogen()->DeoptimizesOnHole()) {
3132 __ cmp(result, ip);
3081 DeoptimizeIf(eq, instr, Deoptimizer::kHole); 3133 DeoptimizeIf(eq, instr, Deoptimizer::kHole);
3082 } else { 3134 } else {
3083 Label skip; 3135 if (CpuFeatures::IsSupported(ISELECT)) {
3084 __ bne(&skip); 3136 Register scratch = scratch0();
3085 __ mov(result, Operand(factory()->undefined_value())); 3137 __ mov(scratch, Operand(factory()->undefined_value()));
3086 __ bind(&skip); 3138 __ cmp(result, ip);
3139 __ isel(eq, result, scratch, result);
3140 } else {
3141 Label skip;
3142 __ cmp(result, ip);
3143 __ bne(&skip);
3144 __ mov(result, Operand(factory()->undefined_value()));
3145 __ bind(&skip);
3146 }
3087 } 3147 }
3088 } 3148 }
3089 } 3149 }
3090 3150
3091 3151
3092 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { 3152 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
3093 Register context = ToRegister(instr->context()); 3153 Register context = ToRegister(instr->context());
3094 Register value = ToRegister(instr->value()); 3154 Register value = ToRegister(instr->value());
3095 Register scratch = scratch0(); 3155 Register scratch = scratch0();
3096 MemOperand target = ContextOperand(context, instr->slot_index()); 3156 MemOperand target = ContextOperand(context, instr->slot_index());
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
3128 Register object = ToRegister(instr->object()); 3188 Register object = ToRegister(instr->object());
3129 3189
3130 if (access.IsExternalMemory()) { 3190 if (access.IsExternalMemory()) {
3131 Register result = ToRegister(instr->result()); 3191 Register result = ToRegister(instr->result());
3132 MemOperand operand = MemOperand(object, offset); 3192 MemOperand operand = MemOperand(object, offset);
3133 __ LoadRepresentation(result, operand, access.representation(), r0); 3193 __ LoadRepresentation(result, operand, access.representation(), r0);
3134 return; 3194 return;
3135 } 3195 }
3136 3196
3137 if (instr->hydrogen()->representation().IsDouble()) { 3197 if (instr->hydrogen()->representation().IsDouble()) {
3198 DCHECK(access.IsInobject());
3138 DoubleRegister result = ToDoubleRegister(instr->result()); 3199 DoubleRegister result = ToDoubleRegister(instr->result());
3139 __ lfd(result, FieldMemOperand(object, offset)); 3200 __ lfd(result, FieldMemOperand(object, offset));
3140 return; 3201 return;
3141 } 3202 }
3142 3203
3143 Register result = ToRegister(instr->result()); 3204 Register result = ToRegister(instr->result());
3144 if (!access.IsInobject()) { 3205 if (!access.IsInobject()) {
3145 __ LoadP(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); 3206 __ LoadP(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
3146 object = result; 3207 object = result;
3147 } 3208 }
3148 3209
3149 Representation representation = access.representation(); 3210 Representation representation = access.representation();
3150 3211
3151 #if V8_TARGET_ARCH_PPC64 3212 #if V8_TARGET_ARCH_PPC64
3152 // 64-bit Smi optimization 3213 // 64-bit Smi optimization
3153 if (representation.IsSmi() && 3214 if (representation.IsSmi() &&
3154 instr->hydrogen()->representation().IsInteger32()) { 3215 instr->hydrogen()->representation().IsInteger32()) {
3155 // Read int value directly from upper half of the smi. 3216 // Read int value directly from upper half of the smi.
3156 STATIC_ASSERT(kSmiTag == 0); 3217 offset = SmiWordOffset(offset);
3157 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
3158 #if V8_TARGET_LITTLE_ENDIAN
3159 offset += kPointerSize / 2;
3160 #endif
3161 representation = Representation::Integer32(); 3218 representation = Representation::Integer32();
3162 } 3219 }
3163 #endif 3220 #endif
3164 3221
3165 __ LoadRepresentation(result, FieldMemOperand(object, offset), representation, 3222 __ LoadRepresentation(result, FieldMemOperand(object, offset), representation,
3166 r0); 3223 r0);
3167 } 3224 }
3168 3225
3169 3226
3170 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) { 3227 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
(...skipping 19 matching lines...) Expand all
3190 // Get the prototype or initial map from the function. 3247 // Get the prototype or initial map from the function.
3191 __ LoadP(result, 3248 __ LoadP(result,
3192 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); 3249 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3193 3250
3194 // Check that the function has a prototype or an initial map. 3251 // Check that the function has a prototype or an initial map.
3195 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); 3252 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
3196 __ cmp(result, ip); 3253 __ cmp(result, ip);
3197 DeoptimizeIf(eq, instr, Deoptimizer::kHole); 3254 DeoptimizeIf(eq, instr, Deoptimizer::kHole);
3198 3255
3199 // If the function does not have an initial map, we're done. 3256 // If the function does not have an initial map, we're done.
3200 Label done; 3257 if (CpuFeatures::IsSupported(ISELECT)) {
3201 __ CompareObjectType(result, scratch, scratch, MAP_TYPE); 3258 // Get the prototype from the initial map (optimistic).
3202 __ bne(&done); 3259 __ LoadP(ip, FieldMemOperand(result, Map::kPrototypeOffset));
3260 __ CompareObjectType(result, scratch, scratch, MAP_TYPE);
3261 __ isel(eq, result, ip, result);
3262 } else {
3263 Label done;
3264 __ CompareObjectType(result, scratch, scratch, MAP_TYPE);
3265 __ bne(&done);
3203 3266
3204 // Get the prototype from the initial map. 3267 // Get the prototype from the initial map.
3205 __ LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset)); 3268 __ LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset));
3206 3269
3207 // All done. 3270 // All done.
3208 __ bind(&done); 3271 __ bind(&done);
3272 }
3209 } 3273 }
3210 3274
3211 3275
3212 void LCodeGen::DoLoadRoot(LLoadRoot* instr) { 3276 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
3213 Register result = ToRegister(instr->result()); 3277 Register result = ToRegister(instr->result());
3214 __ LoadRoot(result, instr->index()); 3278 __ LoadRoot(result, instr->index());
3215 } 3279 }
3216 3280
3217 3281
3218 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) { 3282 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
(...skipping 93 matching lines...) Expand 10 before | Expand all | Expand 10 after
3312 case UINT8_CLAMPED_ELEMENTS: 3376 case UINT8_CLAMPED_ELEMENTS:
3313 if (key_is_constant) { 3377 if (key_is_constant) {
3314 __ LoadByte(result, mem_operand, r0); 3378 __ LoadByte(result, mem_operand, r0);
3315 } else { 3379 } else {
3316 __ lbzx(result, mem_operand); 3380 __ lbzx(result, mem_operand);
3317 } 3381 }
3318 break; 3382 break;
3319 case EXTERNAL_INT16_ELEMENTS: 3383 case EXTERNAL_INT16_ELEMENTS:
3320 case INT16_ELEMENTS: 3384 case INT16_ELEMENTS:
3321 if (key_is_constant) { 3385 if (key_is_constant) {
3322 __ LoadHalfWord(result, mem_operand, r0); 3386 __ LoadHalfWordArith(result, mem_operand, r0);
3323 } else { 3387 } else {
3324 __ lhzx(result, mem_operand); 3388 __ lhax(result, mem_operand);
3325 } 3389 }
3326 __ extsh(result, result);
3327 break; 3390 break;
3328 case EXTERNAL_UINT16_ELEMENTS: 3391 case EXTERNAL_UINT16_ELEMENTS:
3329 case UINT16_ELEMENTS: 3392 case UINT16_ELEMENTS:
3330 if (key_is_constant) { 3393 if (key_is_constant) {
3331 __ LoadHalfWord(result, mem_operand, r0); 3394 __ LoadHalfWord(result, mem_operand, r0);
3332 } else { 3395 } else {
3333 __ lhzx(result, mem_operand); 3396 __ lhzx(result, mem_operand);
3334 } 3397 }
3335 break; 3398 break;
3336 case EXTERNAL_INT32_ELEMENTS: 3399 case EXTERNAL_INT32_ELEMENTS:
3337 case INT32_ELEMENTS: 3400 case INT32_ELEMENTS:
3338 if (key_is_constant) { 3401 if (key_is_constant) {
3339 __ LoadWord(result, mem_operand, r0); 3402 __ LoadWordArith(result, mem_operand, r0);
3340 } else { 3403 } else {
3341 __ lwzx(result, mem_operand); 3404 __ lwax(result, mem_operand);
3342 } 3405 }
3343 #if V8_TARGET_ARCH_PPC64
3344 __ extsw(result, result);
3345 #endif
3346 break; 3406 break;
3347 case EXTERNAL_UINT32_ELEMENTS: 3407 case EXTERNAL_UINT32_ELEMENTS:
3348 case UINT32_ELEMENTS: 3408 case UINT32_ELEMENTS:
3349 if (key_is_constant) { 3409 if (key_is_constant) {
3350 __ LoadWord(result, mem_operand, r0); 3410 __ LoadWord(result, mem_operand, r0);
3351 } else { 3411 } else {
3352 __ lwzx(result, mem_operand); 3412 __ lwzx(result, mem_operand);
3353 } 3413 }
3354 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { 3414 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
3355 __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000))); 3415 __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
(...skipping 94 matching lines...) Expand 10 before | Expand all | Expand 10 after
3450 3510
3451 bool requires_hole_check = hinstr->RequiresHoleCheck(); 3511 bool requires_hole_check = hinstr->RequiresHoleCheck();
3452 Representation representation = hinstr->representation(); 3512 Representation representation = hinstr->representation();
3453 3513
3454 #if V8_TARGET_ARCH_PPC64 3514 #if V8_TARGET_ARCH_PPC64
3455 // 64-bit Smi optimization 3515 // 64-bit Smi optimization
3456 if (representation.IsInteger32() && 3516 if (representation.IsInteger32() &&
3457 hinstr->elements_kind() == FAST_SMI_ELEMENTS) { 3517 hinstr->elements_kind() == FAST_SMI_ELEMENTS) {
3458 DCHECK(!requires_hole_check); 3518 DCHECK(!requires_hole_check);
3459 // Read int value directly from upper half of the smi. 3519 // Read int value directly from upper half of the smi.
3460 STATIC_ASSERT(kSmiTag == 0); 3520 offset = SmiWordOffset(offset);
3461 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
3462 #if V8_TARGET_LITTLE_ENDIAN
3463 offset += kPointerSize / 2;
3464 #endif
3465 } 3521 }
3466 #endif 3522 #endif
3467 3523
3468 __ LoadRepresentation(result, MemOperand(store_base, offset), representation, 3524 __ LoadRepresentation(result, MemOperand(store_base, offset), representation,
3469 r0); 3525 r0);
3470 3526
3471 // Check for the hole value. 3527 // Check for the hole value.
3472 if (requires_hole_check) { 3528 if (requires_hole_check) {
3473 if (IsFastSmiElementsKind(hinstr->elements_kind())) { 3529 if (IsFastSmiElementsKind(hinstr->elements_kind())) {
3474 __ TestIfSmi(result, r0); 3530 __ TestIfSmi(result, r0);
(...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after
3539 3595
3540 3596
3541 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) { 3597 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3542 Register scratch = scratch0(); 3598 Register scratch = scratch0();
3543 Register result = ToRegister(instr->result()); 3599 Register result = ToRegister(instr->result());
3544 3600
3545 if (instr->hydrogen()->from_inlined()) { 3601 if (instr->hydrogen()->from_inlined()) {
3546 __ subi(result, sp, Operand(2 * kPointerSize)); 3602 __ subi(result, sp, Operand(2 * kPointerSize));
3547 } else { 3603 } else {
3548 // Check if the calling frame is an arguments adaptor frame. 3604 // Check if the calling frame is an arguments adaptor frame.
3549 Label done, adapted;
3550 __ LoadP(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); 3605 __ LoadP(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3551 __ LoadP(result, 3606 __ LoadP(result,
3552 MemOperand(scratch, StandardFrameConstants::kContextOffset)); 3607 MemOperand(scratch, StandardFrameConstants::kContextOffset));
3553 __ CmpSmiLiteral(result, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0); 3608 __ CmpSmiLiteral(result, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
3554 3609
3555 // Result is the frame pointer for the frame if not adapted and for the real 3610 // Result is the frame pointer for the frame if not adapted and for the real
3556 // frame below the adaptor frame if adapted. 3611 // frame below the adaptor frame if adapted.
3557 __ beq(&adapted); 3612 if (CpuFeatures::IsSupported(ISELECT)) {
3558 __ mr(result, fp); 3613 __ isel(eq, result, scratch, fp);
3559 __ b(&done); 3614 } else {
3615 Label done, adapted;
3616 __ beq(&adapted);
3617 __ mr(result, fp);
3618 __ b(&done);
3560 3619
3561 __ bind(&adapted); 3620 __ bind(&adapted);
3562 __ mr(result, scratch); 3621 __ mr(result, scratch);
3563 __ bind(&done); 3622 __ bind(&done);
3623 }
3564 } 3624 }
3565 } 3625 }
3566 3626
3567 3627
3568 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) { 3628 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3569 Register elem = ToRegister(instr->elements()); 3629 Register elem = ToRegister(instr->elements());
3570 Register result = ToRegister(instr->result()); 3630 Register result = ToRegister(instr->result());
3571 3631
3572 Label done; 3632 Label done;
3573 3633
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after
3628 // Normal function. Replace undefined or null with global receiver. 3688 // Normal function. Replace undefined or null with global receiver.
3629 __ LoadRoot(scratch, Heap::kNullValueRootIndex); 3689 __ LoadRoot(scratch, Heap::kNullValueRootIndex);
3630 __ cmp(receiver, scratch); 3690 __ cmp(receiver, scratch);
3631 __ beq(&global_object); 3691 __ beq(&global_object);
3632 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); 3692 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
3633 __ cmp(receiver, scratch); 3693 __ cmp(receiver, scratch);
3634 __ beq(&global_object); 3694 __ beq(&global_object);
3635 3695
3636 // Deoptimize if the receiver is not a JS object. 3696 // Deoptimize if the receiver is not a JS object.
3637 __ TestIfSmi(receiver, r0); 3697 __ TestIfSmi(receiver, r0);
3638 DeoptimizeIf(eq, instr, Deoptimizer::kSmi); 3698 DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
3639 __ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE); 3699 __ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE);
3640 DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject); 3700 DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject);
3641 3701
3642 __ b(&result_in_receiver); 3702 __ b(&result_in_receiver);
3643 __ bind(&global_object); 3703 __ bind(&global_object);
3644 __ LoadP(result, FieldMemOperand(function, JSFunction::kContextOffset)); 3704 __ LoadP(result, FieldMemOperand(function, JSFunction::kContextOffset));
3645 __ LoadP(result, ContextOperand(result, Context::GLOBAL_OBJECT_INDEX)); 3705 __ LoadP(result, ContextOperand(result, Context::GLOBAL_OBJECT_INDEX));
3646 __ LoadP(result, FieldMemOperand(result, GlobalObject::kGlobalProxyOffset)); 3706 __ LoadP(result, FieldMemOperand(result, GlobalObject::kGlobalProxyOffset));
3647 if (result.is(receiver)) { 3707 if (result.is(receiver)) {
3648 __ bind(&result_in_receiver); 3708 __ bind(&result_in_receiver);
(...skipping 93 matching lines...) Expand 10 before | Expand all | Expand 10 after
3742 __ Move(scratch0(), instr->hydrogen()->pairs()); 3802 __ Move(scratch0(), instr->hydrogen()->pairs());
3743 __ push(scratch0()); 3803 __ push(scratch0());
3744 __ LoadSmiLiteral(scratch0(), Smi::FromInt(instr->hydrogen()->flags())); 3804 __ LoadSmiLiteral(scratch0(), Smi::FromInt(instr->hydrogen()->flags()));
3745 __ push(scratch0()); 3805 __ push(scratch0());
3746 CallRuntime(Runtime::kDeclareGlobals, 3, instr); 3806 CallRuntime(Runtime::kDeclareGlobals, 3, instr);
3747 } 3807 }
3748 3808
3749 3809
3750 void LCodeGen::CallKnownFunction(Handle<JSFunction> function, 3810 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
3751 int formal_parameter_count, int arity, 3811 int formal_parameter_count, int arity,
3752 LInstruction* instr, R4State r4_state) { 3812 LInstruction* instr) {
3753 bool dont_adapt_arguments = 3813 bool dont_adapt_arguments =
3754 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel; 3814 formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
3755 bool can_invoke_directly = 3815 bool can_invoke_directly =
3756 dont_adapt_arguments || formal_parameter_count == arity; 3816 dont_adapt_arguments || formal_parameter_count == arity;
3757 3817
3818 Register function_reg = r4;
3819
3758 LPointerMap* pointers = instr->pointer_map(); 3820 LPointerMap* pointers = instr->pointer_map();
3759 3821
3760 if (can_invoke_directly) { 3822 if (can_invoke_directly) {
3761 if (r4_state == R4_UNINITIALIZED) {
3762 __ Move(r4, function);
3763 }
3764
3765 // Change context. 3823 // Change context.
3766 __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset)); 3824 __ LoadP(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
3767 3825
3768 // Set r3 to arguments count if adaption is not needed. Assumes that r3 3826 // Set r3 to arguments count if adaption is not needed. Assumes that r3
3769 // is available to write to at this point. 3827 // is available to write to at this point.
3770 if (dont_adapt_arguments) { 3828 if (dont_adapt_arguments) {
3771 __ mov(r3, Operand(arity)); 3829 __ mov(r3, Operand(arity));
3772 } 3830 }
3773 3831
3774 bool is_self_call = function.is_identical_to(info()->closure()); 3832 bool is_self_call = function.is_identical_to(info()->closure());
3775 3833
3776 // Invoke function. 3834 // Invoke function.
3777 if (is_self_call) { 3835 if (is_self_call) {
3778 __ CallSelf(); 3836 __ CallSelf();
3779 } else { 3837 } else {
3780 __ LoadP(ip, FieldMemOperand(r4, JSFunction::kCodeEntryOffset)); 3838 __ LoadP(ip, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
3781 __ CallJSEntry(ip); 3839 __ CallJSEntry(ip);
3782 } 3840 }
3783 3841
3784 // Set up deoptimization. 3842 // Set up deoptimization.
3785 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); 3843 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3786 } else { 3844 } else {
3787 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); 3845 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3788 ParameterCount count(arity); 3846 ParameterCount count(arity);
3789 ParameterCount expected(formal_parameter_count); 3847 ParameterCount expected(formal_parameter_count);
3790 __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator); 3848 __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
3791 } 3849 }
3792 } 3850 }
3793 3851
3794 3852
3795 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) { 3853 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3796 DCHECK(instr->context() != NULL); 3854 DCHECK(instr->context() != NULL);
3797 DCHECK(ToRegister(instr->context()).is(cp)); 3855 DCHECK(ToRegister(instr->context()).is(cp));
3798 Register input = ToRegister(instr->value()); 3856 Register input = ToRegister(instr->value());
3799 Register result = ToRegister(instr->result()); 3857 Register result = ToRegister(instr->result());
3800 Register scratch = scratch0(); 3858 Register scratch = scratch0();
(...skipping 183 matching lines...) Expand 10 before | Expand all | Expand 10 after
3984 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 4042 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3985 #if V8_TARGET_ARCH_PPC64 4043 #if V8_TARGET_ARCH_PPC64
3986 __ MovDoubleToInt64(scratch1, input); 4044 __ MovDoubleToInt64(scratch1, input);
3987 #else 4045 #else
3988 __ MovDoubleHighToInt(scratch1, input); 4046 __ MovDoubleHighToInt(scratch1, input);
3989 #endif 4047 #endif
3990 __ cmpi(scratch1, Operand::Zero()); 4048 __ cmpi(scratch1, Operand::Zero());
3991 // [-0.5, -0]. 4049 // [-0.5, -0].
3992 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero); 4050 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
3993 } 4051 }
3994 Label return_zero;
3995 __ fcmpu(input, dot_five); 4052 __ fcmpu(input, dot_five);
3996 __ bne(&return_zero); 4053 if (CpuFeatures::IsSupported(ISELECT)) {
3997 __ li(result, Operand(1)); // +0.5. 4054 __ li(result, Operand(1));
3998 __ b(&done); 4055 __ isel(lt, result, r0, result);
3999 // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on 4056 __ b(&done);
4000 // flag kBailoutOnMinusZero. 4057 } else {
4001 __ bind(&return_zero); 4058 Label return_zero;
4002 __ li(result, Operand::Zero()); 4059 __ bne(&return_zero);
4003 __ b(&done); 4060 __ li(result, Operand(1)); // +0.5.
4061 __ b(&done);
4062 // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on
4063 // flag kBailoutOnMinusZero.
4064 __ bind(&return_zero);
4065 __ li(result, Operand::Zero());
4066 __ b(&done);
4067 }
4004 4068
4005 __ bind(&convert); 4069 __ bind(&convert);
4006 __ fadd(input_plus_dot_five, input, dot_five); 4070 __ fadd(input_plus_dot_five, input, dot_five);
4007 // Reuse dot_five (double_scratch0) as we no longer need this value. 4071 // Reuse dot_five (double_scratch0) as we no longer need this value.
4008 __ TryInt32Floor(result, input_plus_dot_five, scratch1, scratch2, 4072 __ TryInt32Floor(result, input_plus_dot_five, scratch1, scratch2,
4009 double_scratch0(), &done, &done); 4073 double_scratch0(), &done, &done);
4010 DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN); 4074 DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN);
4011 __ bind(&done); 4075 __ bind(&done);
4012 } 4076 }
4013 4077
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
4047 __ fadd(result, input, kDoubleRegZero); 4111 __ fadd(result, input, kDoubleRegZero);
4048 __ fsqrt(result, result); 4112 __ fsqrt(result, result);
4049 __ bind(&done); 4113 __ bind(&done);
4050 } 4114 }
4051 4115
4052 4116
4053 void LCodeGen::DoPower(LPower* instr) { 4117 void LCodeGen::DoPower(LPower* instr) {
4054 Representation exponent_type = instr->hydrogen()->right()->representation(); 4118 Representation exponent_type = instr->hydrogen()->right()->representation();
4055 // Having marked this as a call, we can use any registers. 4119 // Having marked this as a call, we can use any registers.
4056 // Just make sure that the input/output registers are the expected ones. 4120 // Just make sure that the input/output registers are the expected ones.
4057 #ifdef DEBUG
4058 Register tagged_exponent = MathPowTaggedDescriptor::exponent(); 4121 Register tagged_exponent = MathPowTaggedDescriptor::exponent();
4059 #endif
4060 DCHECK(!instr->right()->IsDoubleRegister() || 4122 DCHECK(!instr->right()->IsDoubleRegister() ||
4061 ToDoubleRegister(instr->right()).is(d2)); 4123 ToDoubleRegister(instr->right()).is(d2));
4062 DCHECK(!instr->right()->IsRegister() || 4124 DCHECK(!instr->right()->IsRegister() ||
4063 ToRegister(instr->right()).is(tagged_exponent)); 4125 ToRegister(instr->right()).is(tagged_exponent));
4064 DCHECK(ToDoubleRegister(instr->left()).is(d1)); 4126 DCHECK(ToDoubleRegister(instr->left()).is(d1));
4065 DCHECK(ToDoubleRegister(instr->result()).is(d3)); 4127 DCHECK(ToDoubleRegister(instr->result()).is(d3));
4066 4128
4067 if (exponent_type.IsSmi()) { 4129 if (exponent_type.IsSmi()) {
4068 MathPowStub stub(isolate(), MathPowStub::TAGGED); 4130 MathPowStub stub(isolate(), MathPowStub::TAGGED);
4069 __ CallStub(&stub); 4131 __ CallStub(&stub);
4070 } else if (exponent_type.IsTagged()) { 4132 } else if (exponent_type.IsTagged()) {
4071 Label no_deopt; 4133 Label no_deopt;
4072 __ JumpIfSmi(r5, &no_deopt); 4134 __ JumpIfSmi(tagged_exponent, &no_deopt);
4073 __ LoadP(r10, FieldMemOperand(r5, HeapObject::kMapOffset)); 4135 DCHECK(!r10.is(tagged_exponent));
4136 __ LoadP(r10, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
4074 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); 4137 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
4075 __ cmp(r10, ip); 4138 __ cmp(r10, ip);
4076 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber); 4139 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
4077 __ bind(&no_deopt); 4140 __ bind(&no_deopt);
4078 MathPowStub stub(isolate(), MathPowStub::TAGGED); 4141 MathPowStub stub(isolate(), MathPowStub::TAGGED);
4079 __ CallStub(&stub); 4142 __ CallStub(&stub);
4080 } else if (exponent_type.IsInteger32()) { 4143 } else if (exponent_type.IsInteger32()) {
4081 MathPowStub stub(isolate(), MathPowStub::INTEGER); 4144 MathPowStub stub(isolate(), MathPowStub::INTEGER);
4082 __ CallStub(&stub); 4145 __ CallStub(&stub);
4083 } else { 4146 } else {
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
4124 4187
4125 Handle<JSFunction> known_function = instr->hydrogen()->known_function(); 4188 Handle<JSFunction> known_function = instr->hydrogen()->known_function();
4126 if (known_function.is_null()) { 4189 if (known_function.is_null()) {
4127 LPointerMap* pointers = instr->pointer_map(); 4190 LPointerMap* pointers = instr->pointer_map();
4128 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); 4191 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
4129 ParameterCount count(instr->arity()); 4192 ParameterCount count(instr->arity());
4130 __ InvokeFunction(r4, count, CALL_FUNCTION, generator); 4193 __ InvokeFunction(r4, count, CALL_FUNCTION, generator);
4131 } else { 4194 } else {
4132 CallKnownFunction(known_function, 4195 CallKnownFunction(known_function,
4133 instr->hydrogen()->formal_parameter_count(), 4196 instr->hydrogen()->formal_parameter_count(),
4134 instr->arity(), instr, R4_CONTAINS_TARGET); 4197 instr->arity(), instr);
4135 } 4198 }
4136 } 4199 }
4137 4200
4138 4201
4139 void LCodeGen::DoTailCallThroughMegamorphicCache( 4202 void LCodeGen::DoTailCallThroughMegamorphicCache(
4140 LTailCallThroughMegamorphicCache* instr) { 4203 LTailCallThroughMegamorphicCache* instr) {
4141 Register receiver = ToRegister(instr->receiver()); 4204 Register receiver = ToRegister(instr->receiver());
4142 Register name = ToRegister(instr->name()); 4205 Register name = ToRegister(instr->name());
4143 DCHECK(receiver.is(LoadDescriptor::ReceiverRegister())); 4206 DCHECK(receiver.is(LoadDescriptor::ReceiverRegister()));
4144 DCHECK(name.is(LoadDescriptor::NameRegister())); 4207 DCHECK(name.is(LoadDescriptor::NameRegister()));
4145 DCHECK(receiver.is(r4)); 4208 DCHECK(receiver.is(r4));
4146 DCHECK(name.is(r5)); 4209 DCHECK(name.is(r5));
4210 Register scratch = r7;
4211 Register extra = r8;
4212 Register extra2 = r9;
4213 Register extra3 = r10;
4147 4214
4148 Register scratch = r6; 4215 #ifdef DEBUG
4149 Register extra = r7; 4216 Register slot = FLAG_vector_ics ? ToRegister(instr->slot()) : no_reg;
4150 Register extra2 = r8; 4217 Register vector = FLAG_vector_ics ? ToRegister(instr->vector()) : no_reg;
4151 Register extra3 = r9; 4218 DCHECK(!FLAG_vector_ics ||
4219 !AreAliased(slot, vector, scratch, extra, extra2, extra3));
4220 #endif
4152 4221
4153 // Important for the tail-call. 4222 // Important for the tail-call.
4154 bool must_teardown_frame = NeedsEagerFrame(); 4223 bool must_teardown_frame = NeedsEagerFrame();
4155 4224
4156 // The probe will tail call to a handler if found. 4225 if (!instr->hydrogen()->is_just_miss()) {
4157 isolate()->stub_cache()->GenerateProbe(masm(), instr->hydrogen()->flags(), 4226 DCHECK(!instr->hydrogen()->is_keyed_load());
4158 must_teardown_frame, receiver, name, 4227
4159 scratch, extra, extra2, extra3); 4228 // The probe will tail call to a handler if found.
4229 isolate()->stub_cache()->GenerateProbe(
4230 masm(), Code::LOAD_IC, instr->hydrogen()->flags(), must_teardown_frame,
4231 receiver, name, scratch, extra, extra2, extra3);
4232 }
4160 4233
4161 // Tail call to miss if we ended up here. 4234 // Tail call to miss if we ended up here.
4162 if (must_teardown_frame) __ LeaveFrame(StackFrame::INTERNAL); 4235 if (must_teardown_frame) __ LeaveFrame(StackFrame::INTERNAL);
4163 LoadIC::GenerateMiss(masm()); 4236 if (instr->hydrogen()->is_keyed_load()) {
4237 KeyedLoadIC::GenerateMiss(masm());
4238 } else {
4239 LoadIC::GenerateMiss(masm());
4240 }
4164 } 4241 }
4165 4242
4166 4243
4167 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) { 4244 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
4168 DCHECK(ToRegister(instr->result()).is(r3)); 4245 DCHECK(ToRegister(instr->result()).is(r3));
4169 4246
4170 LPointerMap* pointers = instr->pointer_map(); 4247 if (instr->hydrogen()->IsTailCall()) {
4171 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); 4248 if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL);
4172 4249
4173 if (instr->target()->IsConstantOperand()) { 4250 if (instr->target()->IsConstantOperand()) {
4174 LConstantOperand* target = LConstantOperand::cast(instr->target()); 4251 LConstantOperand* target = LConstantOperand::cast(instr->target());
4175 Handle<Code> code = Handle<Code>::cast(ToHandle(target)); 4252 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
4176 generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET)); 4253 __ Jump(code, RelocInfo::CODE_TARGET);
4177 __ Call(code, RelocInfo::CODE_TARGET); 4254 } else {
4255 DCHECK(instr->target()->IsRegister());
4256 Register target = ToRegister(instr->target());
4257 __ addi(ip, target, Operand(Code::kHeaderSize - kHeapObjectTag));
4258 __ JumpToJSEntry(ip);
4259 }
4178 } else { 4260 } else {
4179 DCHECK(instr->target()->IsRegister()); 4261 LPointerMap* pointers = instr->pointer_map();
4180 Register target = ToRegister(instr->target()); 4262 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
4181 generator.BeforeCall(__ CallSize(target)); 4263
4182 __ addi(ip, target, Operand(Code::kHeaderSize - kHeapObjectTag)); 4264 if (instr->target()->IsConstantOperand()) {
4183 __ CallJSEntry(ip); 4265 LConstantOperand* target = LConstantOperand::cast(instr->target());
4266 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
4267 generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
4268 __ Call(code, RelocInfo::CODE_TARGET);
4269 } else {
4270 DCHECK(instr->target()->IsRegister());
4271 Register target = ToRegister(instr->target());
4272 generator.BeforeCall(__ CallSize(target));
4273 __ addi(ip, target, Operand(Code::kHeaderSize - kHeapObjectTag));
4274 __ CallJSEntry(ip);
4275 }
4276 generator.AfterCall();
4184 } 4277 }
4185 generator.AfterCall();
4186 } 4278 }
4187 4279
4188 4280
4189 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) { 4281 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
4190 DCHECK(ToRegister(instr->function()).is(r4)); 4282 DCHECK(ToRegister(instr->function()).is(r4));
4191 DCHECK(ToRegister(instr->result()).is(r3)); 4283 DCHECK(ToRegister(instr->result()).is(r3));
4192 4284
4193 if (instr->hydrogen()->pass_argument_count()) { 4285 if (instr->hydrogen()->pass_argument_count()) {
4194 __ mov(r3, Operand(instr->arity())); 4286 __ mov(r3, Operand(instr->arity()));
4195 } 4287 }
(...skipping 19 matching lines...) Expand all
4215 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); 4307 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
4216 } 4308 }
4217 4309
4218 4310
4219 void LCodeGen::DoCallFunction(LCallFunction* instr) { 4311 void LCodeGen::DoCallFunction(LCallFunction* instr) {
4220 DCHECK(ToRegister(instr->context()).is(cp)); 4312 DCHECK(ToRegister(instr->context()).is(cp));
4221 DCHECK(ToRegister(instr->function()).is(r4)); 4313 DCHECK(ToRegister(instr->function()).is(r4));
4222 DCHECK(ToRegister(instr->result()).is(r3)); 4314 DCHECK(ToRegister(instr->result()).is(r3));
4223 4315
4224 int arity = instr->arity(); 4316 int arity = instr->arity();
4225 CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags()); 4317 CallFunctionFlags flags = instr->hydrogen()->function_flags();
4226 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 4318 if (instr->hydrogen()->HasVectorAndSlot()) {
4319 Register slot_register = ToRegister(instr->temp_slot());
4320 Register vector_register = ToRegister(instr->temp_vector());
4321 DCHECK(slot_register.is(r6));
4322 DCHECK(vector_register.is(r5));
4323
4324 AllowDeferredHandleDereference vector_structure_check;
4325 Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
4326 int index = vector->GetIndex(instr->hydrogen()->slot());
4327
4328 __ Move(vector_register, vector);
4329 __ LoadSmiLiteral(slot_register, Smi::FromInt(index));
4330
4331 CallICState::CallType call_type =
4332 (flags & CALL_AS_METHOD) ? CallICState::METHOD : CallICState::FUNCTION;
4333
4334 Handle<Code> ic =
4335 CodeFactory::CallICInOptimizedCode(isolate(), arity, call_type).code();
4336 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4337 } else {
4338 CallFunctionStub stub(isolate(), arity, flags);
4339 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
4340 }
4227 } 4341 }
4228 4342
4229 4343
4230 void LCodeGen::DoCallNew(LCallNew* instr) { 4344 void LCodeGen::DoCallNew(LCallNew* instr) {
4231 DCHECK(ToRegister(instr->context()).is(cp)); 4345 DCHECK(ToRegister(instr->context()).is(cp));
4232 DCHECK(ToRegister(instr->constructor()).is(r4)); 4346 DCHECK(ToRegister(instr->constructor()).is(r4));
4233 DCHECK(ToRegister(instr->result()).is(r3)); 4347 DCHECK(ToRegister(instr->result()).is(r3));
4234 4348
4235 __ mov(r3, Operand(instr->arity())); 4349 __ mov(r3, Operand(instr->arity()));
4236 // No cell in r5 for construct type feedback in optimized code 4350 // No cell in r5 for construct type feedback in optimized code
(...skipping 93 matching lines...) Expand 10 before | Expand all | Expand 10 after
4330 4444
4331 __ AssertNotSmi(object); 4445 __ AssertNotSmi(object);
4332 4446
4333 #if V8_TARGET_ARCH_PPC64 4447 #if V8_TARGET_ARCH_PPC64
4334 DCHECK(!representation.IsSmi() || !instr->value()->IsConstantOperand() || 4448 DCHECK(!representation.IsSmi() || !instr->value()->IsConstantOperand() ||
4335 IsInteger32(LConstantOperand::cast(instr->value()))); 4449 IsInteger32(LConstantOperand::cast(instr->value())));
4336 #else 4450 #else
4337 DCHECK(!representation.IsSmi() || !instr->value()->IsConstantOperand() || 4451 DCHECK(!representation.IsSmi() || !instr->value()->IsConstantOperand() ||
4338 IsSmi(LConstantOperand::cast(instr->value()))); 4452 IsSmi(LConstantOperand::cast(instr->value())));
4339 #endif 4453 #endif
4340 if (representation.IsDouble()) { 4454 if (!FLAG_unbox_double_fields && representation.IsDouble()) {
4341 DCHECK(access.IsInobject()); 4455 DCHECK(access.IsInobject());
4342 DCHECK(!hinstr->has_transition()); 4456 DCHECK(!hinstr->has_transition());
4343 DCHECK(!hinstr->NeedsWriteBarrier()); 4457 DCHECK(!hinstr->NeedsWriteBarrier());
4344 DoubleRegister value = ToDoubleRegister(instr->value()); 4458 DoubleRegister value = ToDoubleRegister(instr->value());
4345 __ stfd(value, FieldMemOperand(object, offset)); 4459 __ stfd(value, FieldMemOperand(object, offset));
4346 return; 4460 return;
4347 } 4461 }
4348 4462
4349 if (hinstr->has_transition()) { 4463 if (hinstr->has_transition()) {
4350 Handle<Map> transition = hinstr->transition_map(); 4464 Handle<Map> transition = hinstr->transition_map();
4351 AddDeprecationDependency(transition); 4465 AddDeprecationDependency(transition);
4352 __ mov(scratch, Operand(transition)); 4466 __ mov(scratch, Operand(transition));
4353 __ StoreP(scratch, FieldMemOperand(object, HeapObject::kMapOffset), r0); 4467 __ StoreP(scratch, FieldMemOperand(object, HeapObject::kMapOffset), r0);
4354 if (hinstr->NeedsWriteBarrierForMap()) { 4468 if (hinstr->NeedsWriteBarrierForMap()) {
4355 Register temp = ToRegister(instr->temp()); 4469 Register temp = ToRegister(instr->temp());
4356 // Update the write barrier for the map field. 4470 // Update the write barrier for the map field.
4357 __ RecordWriteForMap(object, scratch, temp, GetLinkRegisterState(), 4471 __ RecordWriteForMap(object, scratch, temp, GetLinkRegisterState(),
4358 kSaveFPRegs); 4472 kSaveFPRegs);
4359 } 4473 }
4360 } 4474 }
4361 4475
4362 // Do the store. 4476 // Do the store.
4363 Register value = ToRegister(instr->value()); 4477 Register record_dest = object;
4364 4478 Register record_value = no_reg;
4479 Register record_scratch = scratch;
4365 #if V8_TARGET_ARCH_PPC64 4480 #if V8_TARGET_ARCH_PPC64
4366 // 64-bit Smi optimization 4481 if (FLAG_unbox_double_fields && representation.IsDouble()) {
4367 if (representation.IsSmi() && 4482 DCHECK(access.IsInobject());
4368 hinstr->value()->representation().IsInteger32()) { 4483 DoubleRegister value = ToDoubleRegister(instr->value());
4369 DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY); 4484 __ stfd(value, FieldMemOperand(object, offset));
4370 // Store int value directly to upper half of the smi. 4485 if (hinstr->NeedsWriteBarrier()) {
4371 STATIC_ASSERT(kSmiTag == 0); 4486 record_value = ToRegister(instr->value());
4372 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32); 4487 }
4373 #if V8_TARGET_LITTLE_ENDIAN 4488 } else {
4374 offset += kPointerSize / 2; 4489 if (representation.IsSmi() &&
4490 hinstr->value()->representation().IsInteger32()) {
4491 DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
4492 // 64-bit Smi optimization
4493 // Store int value directly to upper half of the smi.
4494 offset = SmiWordOffset(offset);
4495 representation = Representation::Integer32();
4496 }
4375 #endif 4497 #endif
4376 representation = Representation::Integer32(); 4498 if (access.IsInobject()) {
4499 Register value = ToRegister(instr->value());
4500 MemOperand operand = FieldMemOperand(object, offset);
4501 __ StoreRepresentation(value, operand, representation, r0);
4502 record_value = value;
4503 } else {
4504 Register value = ToRegister(instr->value());
4505 __ LoadP(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
4506 MemOperand operand = FieldMemOperand(scratch, offset);
4507 __ StoreRepresentation(value, operand, representation, r0);
4508 record_dest = scratch;
4509 record_value = value;
4510 record_scratch = object;
4511 }
4512 #if V8_TARGET_ARCH_PPC64
4377 } 4513 }
4378 #endif 4514 #endif
4379 4515
4380 if (access.IsInobject()) { 4516 if (hinstr->NeedsWriteBarrier()) {
4381 MemOperand operand = FieldMemOperand(object, offset); 4517 __ RecordWriteField(record_dest, offset, record_value, record_scratch,
4382 __ StoreRepresentation(value, operand, representation, r0); 4518 GetLinkRegisterState(), kSaveFPRegs,
4383 if (hinstr->NeedsWriteBarrier()) { 4519 EMIT_REMEMBERED_SET, hinstr->SmiCheckForWriteBarrier(),
4384 // Update the write barrier for the object for in-object properties. 4520 hinstr->PointersToHereCheckForValue());
4385 __ RecordWriteField(
4386 object, offset, value, scratch, GetLinkRegisterState(), kSaveFPRegs,
4387 EMIT_REMEMBERED_SET, hinstr->SmiCheckForWriteBarrier(),
4388 hinstr->PointersToHereCheckForValue());
4389 }
4390 } else {
4391 __ LoadP(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
4392 MemOperand operand = FieldMemOperand(scratch, offset);
4393 __ StoreRepresentation(value, operand, representation, r0);
4394 if (hinstr->NeedsWriteBarrier()) {
4395 // Update the write barrier for the properties array.
4396 // object is used as a scratch register.
4397 __ RecordWriteField(
4398 scratch, offset, value, object, GetLinkRegisterState(), kSaveFPRegs,
4399 EMIT_REMEMBERED_SET, hinstr->SmiCheckForWriteBarrier(),
4400 hinstr->PointersToHereCheckForValue());
4401 }
4402 } 4521 }
4403 } 4522 }
4404 4523
4405 4524
4406 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) { 4525 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
4407 DCHECK(ToRegister(instr->context()).is(cp)); 4526 DCHECK(ToRegister(instr->context()).is(cp));
4408 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister())); 4527 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
4409 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister())); 4528 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4410 4529
4411 __ mov(StoreDescriptor::NameRegister(), Operand(instr->name())); 4530 __ mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
4412 Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode()); 4531 Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->language_mode());
4413 CallCode(ic, RelocInfo::CODE_TARGET, instr); 4532 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4414 } 4533 }
4415 4534
4416 4535
4417 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { 4536 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
4418 Representation representation = instr->hydrogen()->length()->representation(); 4537 Representation representation = instr->hydrogen()->length()->representation();
4419 DCHECK(representation.Equals(instr->hydrogen()->index()->representation())); 4538 DCHECK(representation.Equals(instr->hydrogen()->index()->representation()));
4420 DCHECK(representation.IsSmiOrInteger32()); 4539 DCHECK(representation.IsSmiOrInteger32());
4421 4540
4422 Condition cc = instr->hydrogen()->allow_equality() ? lt : le; 4541 Condition cc = instr->hydrogen()->allow_equality() ? lt : le;
(...skipping 160 matching lines...) Expand 10 before | Expand all | Expand 10 after
4583 __ add(scratch, elements, scratch); 4702 __ add(scratch, elements, scratch);
4584 elements = scratch; 4703 elements = scratch;
4585 } 4704 }
4586 if (!is_int16(base_offset)) { 4705 if (!is_int16(base_offset)) {
4587 __ Add(scratch, elements, base_offset, r0); 4706 __ Add(scratch, elements, base_offset, r0);
4588 base_offset = 0; 4707 base_offset = 0;
4589 elements = scratch; 4708 elements = scratch;
4590 } 4709 }
4591 4710
4592 if (instr->NeedsCanonicalization()) { 4711 if (instr->NeedsCanonicalization()) {
4593 // Force a canonical NaN. 4712 // Turn potential sNaN value into qNaN.
4594 __ CanonicalizeNaN(double_scratch, value); 4713 __ CanonicalizeNaN(double_scratch, value);
4595 __ stfd(double_scratch, MemOperand(elements, base_offset)); 4714 __ stfd(double_scratch, MemOperand(elements, base_offset));
4596 } else { 4715 } else {
4597 __ stfd(value, MemOperand(elements, base_offset)); 4716 __ stfd(value, MemOperand(elements, base_offset));
4598 } 4717 }
4599 } 4718 }
4600 4719
4601 4720
4602 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) { 4721 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
4603 HStoreKeyed* hinstr = instr->hydrogen(); 4722 HStoreKeyed* hinstr = instr->hydrogen();
(...skipping 24 matching lines...) Expand all
4628 } 4747 }
4629 4748
4630 Representation representation = hinstr->value()->representation(); 4749 Representation representation = hinstr->value()->representation();
4631 4750
4632 #if V8_TARGET_ARCH_PPC64 4751 #if V8_TARGET_ARCH_PPC64
4633 // 64-bit Smi optimization 4752 // 64-bit Smi optimization
4634 if (representation.IsInteger32()) { 4753 if (representation.IsInteger32()) {
4635 DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY); 4754 DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
4636 DCHECK(hinstr->elements_kind() == FAST_SMI_ELEMENTS); 4755 DCHECK(hinstr->elements_kind() == FAST_SMI_ELEMENTS);
4637 // Store int value directly to upper half of the smi. 4756 // Store int value directly to upper half of the smi.
4638 STATIC_ASSERT(kSmiTag == 0); 4757 offset = SmiWordOffset(offset);
4639 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
4640 #if V8_TARGET_LITTLE_ENDIAN
4641 offset += kPointerSize / 2;
4642 #endif
4643 } 4758 }
4644 #endif 4759 #endif
4645 4760
4646 __ StoreRepresentation(value, MemOperand(store_base, offset), representation, 4761 __ StoreRepresentation(value, MemOperand(store_base, offset), representation,
4647 r0); 4762 r0);
4648 4763
4649 if (hinstr->NeedsWriteBarrier()) { 4764 if (hinstr->NeedsWriteBarrier()) {
4650 SmiCheck check_needed = hinstr->value()->type().IsHeapObject() 4765 SmiCheck check_needed = hinstr->value()->type().IsHeapObject()
4651 ? OMIT_SMI_CHECK 4766 ? OMIT_SMI_CHECK
4652 : INLINE_SMI_CHECK; 4767 : INLINE_SMI_CHECK;
(...skipping 18 matching lines...) Expand all
4671 } 4786 }
4672 4787
4673 4788
4674 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) { 4789 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4675 DCHECK(ToRegister(instr->context()).is(cp)); 4790 DCHECK(ToRegister(instr->context()).is(cp));
4676 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister())); 4791 DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
4677 DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister())); 4792 DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
4678 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister())); 4793 DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
4679 4794
4680 Handle<Code> ic = 4795 Handle<Code> ic =
4681 CodeFactory::KeyedStoreIC(isolate(), instr->strict_mode()).code(); 4796 CodeFactory::KeyedStoreIC(isolate(), instr->language_mode()).code();
4682 CallCode(ic, RelocInfo::CODE_TARGET, instr); 4797 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4683 } 4798 }
4684 4799
4685 4800
4686 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { 4801 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4687 Register object_reg = ToRegister(instr->object()); 4802 Register object_reg = ToRegister(instr->object());
4688 Register scratch = scratch0(); 4803 Register scratch = scratch0();
4689 4804
4690 Handle<Map> from_map = instr->original_map(); 4805 Handle<Map> from_map = instr->original_map();
4691 Handle<Map> to_map = instr->transitioned_map(); 4806 Handle<Map> to_map = instr->transitioned_map();
(...skipping 362 matching lines...) Expand 10 before | Expand all | Expand 10 after
5054 } 5169 }
5055 #endif 5170 #endif
5056 } 5171 }
5057 5172
5058 5173
5059 void LCodeGen::DoSmiUntag(LSmiUntag* instr) { 5174 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
5060 Register scratch = scratch0(); 5175 Register scratch = scratch0();
5061 Register input = ToRegister(instr->value()); 5176 Register input = ToRegister(instr->value());
5062 Register result = ToRegister(instr->result()); 5177 Register result = ToRegister(instr->result());
5063 if (instr->needs_check()) { 5178 if (instr->needs_check()) {
5064 STATIC_ASSERT(kHeapObjectTag == 1);
5065 // If the input is a HeapObject, value of scratch won't be zero. 5179 // If the input is a HeapObject, value of scratch won't be zero.
5066 __ andi(scratch, input, Operand(kHeapObjectTag)); 5180 __ andi(scratch, input, Operand(kHeapObjectTag));
5067 __ SmiUntag(result, input); 5181 __ SmiUntag(result, input);
5068 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0); 5182 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0);
5069 } else { 5183 } else {
5070 __ SmiUntag(result, input); 5184 __ SmiUntag(result, input);
5071 } 5185 }
5072 } 5186 }
5073 5187
5074 5188
(...skipping 99 matching lines...) Expand 10 before | Expand all | Expand 10 after
5174 __ bind(&check_bools); 5288 __ bind(&check_bools);
5175 __ LoadRoot(ip, Heap::kTrueValueRootIndex); 5289 __ LoadRoot(ip, Heap::kTrueValueRootIndex);
5176 __ cmp(input_reg, ip); 5290 __ cmp(input_reg, ip);
5177 __ bne(&check_false); 5291 __ bne(&check_false);
5178 __ li(input_reg, Operand(1)); 5292 __ li(input_reg, Operand(1));
5179 __ b(&done); 5293 __ b(&done);
5180 5294
5181 __ bind(&check_false); 5295 __ bind(&check_false);
5182 __ LoadRoot(ip, Heap::kFalseValueRootIndex); 5296 __ LoadRoot(ip, Heap::kFalseValueRootIndex);
5183 __ cmp(input_reg, ip); 5297 __ cmp(input_reg, ip);
5184 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefinedTrueFalse, 5298 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefinedBoolean);
5185 cr7);
5186 __ li(input_reg, Operand::Zero()); 5299 __ li(input_reg, Operand::Zero());
5187 } else { 5300 } else {
5188 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, cr7); 5301 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
5189 5302
5190 __ lfd(double_scratch2, 5303 __ lfd(double_scratch2,
5191 FieldMemOperand(input_reg, HeapNumber::kValueOffset)); 5304 FieldMemOperand(input_reg, HeapNumber::kValueOffset));
5192 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 5305 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5193 // preserve heap number pointer in scratch2 for minus zero check below 5306 // preserve heap number pointer in scratch2 for minus zero check below
5194 __ mr(scratch2, input_reg); 5307 __ mr(scratch2, input_reg);
5195 } 5308 }
5196 __ TryDoubleToInt32Exact(input_reg, double_scratch2, scratch1, 5309 __ TryDoubleToInt32Exact(input_reg, double_scratch2, scratch1,
5197 double_scratch); 5310 double_scratch);
5198 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, cr7); 5311 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
5199 5312
5200 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 5313 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5201 __ cmpi(input_reg, Operand::Zero()); 5314 __ cmpi(input_reg, Operand::Zero());
5202 __ bne(&done); 5315 __ bne(&done);
5203 __ lwz(scratch1, 5316 __ lwz(scratch1,
5204 FieldMemOperand(scratch2, HeapNumber::kValueOffset + 5317 FieldMemOperand(scratch2, HeapNumber::kValueOffset +
5205 Register::kExponentOffset)); 5318 Register::kExponentOffset));
5206 __ cmpwi(scratch1, Operand::Zero()); 5319 __ cmpwi(scratch1, Operand::Zero());
5207 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, cr7); 5320 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
5208 } 5321 }
5209 } 5322 }
5210 __ bind(&done); 5323 __ bind(&done);
5211 } 5324 }
5212 5325
5213 5326
5214 void LCodeGen::DoTaggedToI(LTaggedToI* instr) { 5327 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
5215 class DeferredTaggedToI FINAL : public LDeferredCode { 5328 class DeferredTaggedToI FINAL : public LDeferredCode {
5216 public: 5329 public:
5217 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) 5330 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
(...skipping 376 matching lines...) Expand 10 before | Expand all | Expand 10 after
5594 __ b(deferred->entry()); 5707 __ b(deferred->entry());
5595 } 5708 }
5596 } else { 5709 } else {
5597 Register size = ToRegister(instr->size()); 5710 Register size = ToRegister(instr->size());
5598 __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags); 5711 __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
5599 } 5712 }
5600 5713
5601 __ bind(deferred->exit()); 5714 __ bind(deferred->exit());
5602 5715
5603 if (instr->hydrogen()->MustPrefillWithFiller()) { 5716 if (instr->hydrogen()->MustPrefillWithFiller()) {
5604 STATIC_ASSERT(kHeapObjectTag == 1);
5605 if (instr->size()->IsConstantOperand()) { 5717 if (instr->size()->IsConstantOperand()) {
5606 int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); 5718 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5607 __ LoadIntLiteral(scratch, size - kHeapObjectTag); 5719 __ LoadIntLiteral(scratch, size - kHeapObjectTag);
5608 } else { 5720 } else {
5609 __ subi(scratch, ToRegister(instr->size()), Operand(kHeapObjectTag)); 5721 __ subi(scratch, ToRegister(instr->size()), Operand(kHeapObjectTag));
5610 } 5722 }
5611 __ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map())); 5723 __ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
5612 Label loop; 5724 Label loop;
5613 __ bind(&loop); 5725 __ bind(&loop);
5614 __ subi(scratch, scratch, Operand(kPointerSize)); 5726 __ subi(scratch, scratch, Operand(kPointerSize));
(...skipping 103 matching lines...) Expand 10 before | Expand all | Expand 10 after
5718 __ CopyFields(r3, r4, r5.bit(), size / kPointerSize); 5830 __ CopyFields(r3, r4, r5.bit(), size / kPointerSize);
5719 } 5831 }
5720 5832
5721 5833
5722 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) { 5834 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
5723 DCHECK(ToRegister(instr->context()).is(cp)); 5835 DCHECK(ToRegister(instr->context()).is(cp));
5724 // Use the fast case closure allocation code that allocates in new 5836 // Use the fast case closure allocation code that allocates in new
5725 // space for nested functions that don't need literals cloning. 5837 // space for nested functions that don't need literals cloning.
5726 bool pretenure = instr->hydrogen()->pretenure(); 5838 bool pretenure = instr->hydrogen()->pretenure();
5727 if (!pretenure && instr->hydrogen()->has_no_literals()) { 5839 if (!pretenure && instr->hydrogen()->has_no_literals()) {
5728 FastNewClosureStub stub(isolate(), instr->hydrogen()->strict_mode(), 5840 FastNewClosureStub stub(isolate(), instr->hydrogen()->language_mode(),
5729 instr->hydrogen()->kind()); 5841 instr->hydrogen()->kind());
5730 __ mov(r5, Operand(instr->hydrogen()->shared_info())); 5842 __ mov(r5, Operand(instr->hydrogen()->shared_info()));
5731 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); 5843 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
5732 } else { 5844 } else {
5733 __ mov(r5, Operand(instr->hydrogen()->shared_info())); 5845 __ mov(r5, Operand(instr->hydrogen()->shared_info()));
5734 __ mov(r4, Operand(pretenure ? factory()->true_value() 5846 __ mov(r4, Operand(pretenure ? factory()->true_value()
5735 : factory()->false_value())); 5847 : factory()->false_value()));
5736 __ Push(cp, r5, r4); 5848 __ Push(cp, r5, r4);
5737 CallRuntime(Runtime::kNewClosure, 3, instr); 5849 CallRuntime(Runtime::kNewClosure, 3, instr);
5738 } 5850 }
(...skipping 390 matching lines...) Expand 10 before | Expand all | Expand 10 after
6129 __ Push(scope_info); 6241 __ Push(scope_info);
6130 __ push(ToRegister(instr->function())); 6242 __ push(ToRegister(instr->function()));
6131 CallRuntime(Runtime::kPushBlockContext, 2, instr); 6243 CallRuntime(Runtime::kPushBlockContext, 2, instr);
6132 RecordSafepoint(Safepoint::kNoLazyDeopt); 6244 RecordSafepoint(Safepoint::kNoLazyDeopt);
6133 } 6245 }
6134 6246
6135 6247
6136 #undef __ 6248 #undef __
6137 } 6249 }
6138 } // namespace v8::internal 6250 } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/ppc/lithium-codegen-ppc.h ('k') | src/ppc/lithium-ppc.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698