Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(811)

Side by Side Diff: src/arm/lithium-codegen-arm.cc

Issue 196133017: Experimental parser: merge r19949 (Closed) Base URL: https://v8.googlecode.com/svn/branches/experimental/parser
Patch Set: Created 6 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/arm/lithium-codegen-arm.h ('k') | src/arm/macro-assembler-arm.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 129 matching lines...) Expand 10 before | Expand all | Expand 10 after
140 __ stop("stop_at"); 140 __ stop("stop_at");
141 } 141 }
142 #endif 142 #endif
143 143
144 // r1: Callee's JS function. 144 // r1: Callee's JS function.
145 // cp: Callee's context. 145 // cp: Callee's context.
146 // pp: Callee's constant pool pointer (if FLAG_enable_ool_constant_pool) 146 // pp: Callee's constant pool pointer (if FLAG_enable_ool_constant_pool)
147 // fp: Caller's frame pointer. 147 // fp: Caller's frame pointer.
148 // lr: Caller's pc. 148 // lr: Caller's pc.
149 149
150 // Classic mode functions and builtins need to replace the receiver with the 150 // Sloppy mode functions and builtins need to replace the receiver with the
151 // global proxy when called as functions (without an explicit receiver 151 // global proxy when called as functions (without an explicit receiver
152 // object). 152 // object).
153 if (info_->this_has_uses() && 153 if (info_->this_has_uses() &&
154 info_->is_classic_mode() && 154 info_->strict_mode() == SLOPPY &&
155 !info_->is_native()) { 155 !info_->is_native()) {
156 Label ok; 156 Label ok;
157 int receiver_offset = info_->scope()->num_parameters() * kPointerSize; 157 int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
158 __ ldr(r2, MemOperand(sp, receiver_offset)); 158 __ ldr(r2, MemOperand(sp, receiver_offset));
159 __ CompareRoot(r2, Heap::kUndefinedValueRootIndex); 159 __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
160 __ b(ne, &ok); 160 __ b(ne, &ok);
161 161
162 __ ldr(r2, GlobalObjectOperand()); 162 __ ldr(r2, GlobalObjectOperand());
163 __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset)); 163 __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
164 164
165 __ str(r2, MemOperand(sp, receiver_offset)); 165 __ str(r2, MemOperand(sp, receiver_offset));
166 166
167 __ bind(&ok); 167 __ bind(&ok);
168 } 168 }
169 } 169 }
170 170
171 info()->set_prologue_offset(masm_->pc_offset()); 171 info()->set_prologue_offset(masm_->pc_offset());
172 if (NeedsEagerFrame()) { 172 if (NeedsEagerFrame()) {
173 __ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME); 173 __ Prologue(info()->IsStub() ? BUILD_STUB_FRAME : BUILD_FUNCTION_FRAME);
174 frame_is_built_ = true; 174 frame_is_built_ = true;
175 info_->AddNoFrameRange(0, masm_->pc_offset()); 175 info_->AddNoFrameRange(0, masm_->pc_offset());
176 __ LoadConstantPoolPointerRegister();
177 } 176 }
178 177
179 // Reserve space for the stack slots needed by the code. 178 // Reserve space for the stack slots needed by the code.
180 int slots = GetStackSlotCount(); 179 int slots = GetStackSlotCount();
181 if (slots > 0) { 180 if (slots > 0) {
182 if (FLAG_debug_code) { 181 if (FLAG_debug_code) {
183 __ sub(sp, sp, Operand(slots * kPointerSize)); 182 __ sub(sp, sp, Operand(slots * kPointerSize));
184 __ push(r0); 183 __ push(r0);
185 __ push(r1); 184 __ push(r1);
186 __ add(r0, sp, Operand(slots * kPointerSize)); 185 __ add(r0, sp, Operand(slots * kPointerSize));
(...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after
262 osr_pc_offset_ = masm()->pc_offset(); 261 osr_pc_offset_ = masm()->pc_offset();
263 262
264 // Adjust the frame size, subsuming the unoptimized frame into the 263 // Adjust the frame size, subsuming the unoptimized frame into the
265 // optimized frame. 264 // optimized frame.
266 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots(); 265 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
267 ASSERT(slots >= 0); 266 ASSERT(slots >= 0);
268 __ sub(sp, sp, Operand(slots * kPointerSize)); 267 __ sub(sp, sp, Operand(slots * kPointerSize));
269 } 268 }
270 269
271 270
271 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
272 if (!instr->IsLazyBailout() && !instr->IsGap()) {
273 safepoints_.BumpLastLazySafepointIndex();
274 }
275 }
276
277
272 bool LCodeGen::GenerateDeferredCode() { 278 bool LCodeGen::GenerateDeferredCode() {
273 ASSERT(is_generating()); 279 ASSERT(is_generating());
274 if (deferred_.length() > 0) { 280 if (deferred_.length() > 0) {
275 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { 281 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
276 LDeferredCode* code = deferred_[i]; 282 LDeferredCode* code = deferred_[i];
277 283
278 HValue* value = 284 HValue* value =
279 instructions_->at(code->instruction_index())->hydrogen_value(); 285 instructions_->at(code->instruction_index())->hydrogen_value();
280 RecordAndWritePosition( 286 RecordAndWritePosition(
281 chunk()->graph()->SourcePositionToScriptPosition(value->position())); 287 chunk()->graph()->SourcePositionToScriptPosition(value->position()));
(...skipping 138 matching lines...) Expand 10 before | Expand all | Expand 10 after
420 if (r.IsInteger32()) { 426 if (r.IsInteger32()) {
421 ASSERT(literal->IsNumber()); 427 ASSERT(literal->IsNumber());
422 __ mov(scratch, Operand(static_cast<int32_t>(literal->Number()))); 428 __ mov(scratch, Operand(static_cast<int32_t>(literal->Number())));
423 } else if (r.IsDouble()) { 429 } else if (r.IsDouble()) {
424 Abort(kEmitLoadRegisterUnsupportedDoubleImmediate); 430 Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
425 } else { 431 } else {
426 ASSERT(r.IsSmiOrTagged()); 432 ASSERT(r.IsSmiOrTagged());
427 __ Move(scratch, literal); 433 __ Move(scratch, literal);
428 } 434 }
429 return scratch; 435 return scratch;
430 } else if (op->IsStackSlot() || op->IsArgument()) { 436 } else if (op->IsStackSlot()) {
431 __ ldr(scratch, ToMemOperand(op)); 437 __ ldr(scratch, ToMemOperand(op));
432 return scratch; 438 return scratch;
433 } 439 }
434 UNREACHABLE(); 440 UNREACHABLE();
435 return scratch; 441 return scratch;
436 } 442 }
437 443
438 444
439 DwVfpRegister LCodeGen::ToDoubleRegister(LOperand* op) const { 445 DwVfpRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
440 ASSERT(op->IsDoubleRegister()); 446 ASSERT(op->IsDoubleRegister());
(...skipping 15 matching lines...) Expand all
456 ASSERT(literal->IsNumber()); 462 ASSERT(literal->IsNumber());
457 __ mov(ip, Operand(static_cast<int32_t>(literal->Number()))); 463 __ mov(ip, Operand(static_cast<int32_t>(literal->Number())));
458 __ vmov(flt_scratch, ip); 464 __ vmov(flt_scratch, ip);
459 __ vcvt_f64_s32(dbl_scratch, flt_scratch); 465 __ vcvt_f64_s32(dbl_scratch, flt_scratch);
460 return dbl_scratch; 466 return dbl_scratch;
461 } else if (r.IsDouble()) { 467 } else if (r.IsDouble()) {
462 Abort(kUnsupportedDoubleImmediate); 468 Abort(kUnsupportedDoubleImmediate);
463 } else if (r.IsTagged()) { 469 } else if (r.IsTagged()) {
464 Abort(kUnsupportedTaggedImmediate); 470 Abort(kUnsupportedTaggedImmediate);
465 } 471 }
466 } else if (op->IsStackSlot() || op->IsArgument()) { 472 } else if (op->IsStackSlot()) {
467 // TODO(regis): Why is vldr not taking a MemOperand? 473 // TODO(regis): Why is vldr not taking a MemOperand?
468 // __ vldr(dbl_scratch, ToMemOperand(op)); 474 // __ vldr(dbl_scratch, ToMemOperand(op));
469 MemOperand mem_op = ToMemOperand(op); 475 MemOperand mem_op = ToMemOperand(op);
470 __ vldr(dbl_scratch, mem_op.rn(), mem_op.offset()); 476 __ vldr(dbl_scratch, mem_op.rn(), mem_op.offset());
471 return dbl_scratch; 477 return dbl_scratch;
472 } 478 }
473 UNREACHABLE(); 479 UNREACHABLE();
474 return dbl_scratch; 480 return dbl_scratch;
475 } 481 }
476 482
(...skipping 199 matching lines...) Expand 10 before | Expand all | Expand 10 after
676 if (op->IsStackSlot()) { 682 if (op->IsStackSlot()) {
677 if (is_tagged) { 683 if (is_tagged) {
678 translation->StoreStackSlot(op->index()); 684 translation->StoreStackSlot(op->index());
679 } else if (is_uint32) { 685 } else if (is_uint32) {
680 translation->StoreUint32StackSlot(op->index()); 686 translation->StoreUint32StackSlot(op->index());
681 } else { 687 } else {
682 translation->StoreInt32StackSlot(op->index()); 688 translation->StoreInt32StackSlot(op->index());
683 } 689 }
684 } else if (op->IsDoubleStackSlot()) { 690 } else if (op->IsDoubleStackSlot()) {
685 translation->StoreDoubleStackSlot(op->index()); 691 translation->StoreDoubleStackSlot(op->index());
686 } else if (op->IsArgument()) {
687 ASSERT(is_tagged);
688 int src_index = GetStackSlotCount() + op->index();
689 translation->StoreStackSlot(src_index);
690 } else if (op->IsRegister()) { 692 } else if (op->IsRegister()) {
691 Register reg = ToRegister(op); 693 Register reg = ToRegister(op);
692 if (is_tagged) { 694 if (is_tagged) {
693 translation->StoreRegister(reg); 695 translation->StoreRegister(reg);
694 } else if (is_uint32) { 696 } else if (is_uint32) {
695 translation->StoreUint32Register(reg); 697 translation->StoreUint32Register(reg);
696 } else { 698 } else {
697 translation->StoreInt32Register(reg); 699 translation->StoreInt32Register(reg);
698 } 700 }
699 } else if (op->IsDoubleRegister()) { 701 } else if (op->IsDoubleRegister()) {
(...skipping 408 matching lines...) Expand 10 before | Expand all | Expand 10 after
1108 UNREACHABLE(); 1110 UNREACHABLE();
1109 } 1111 }
1110 } 1112 }
1111 1113
1112 1114
1113 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) { 1115 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
1114 GenerateOsrPrologue(); 1116 GenerateOsrPrologue();
1115 } 1117 }
1116 1118
1117 1119
1120 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
1121 Register dividend = ToRegister(instr->dividend());
1122 int32_t divisor = instr->divisor();
1123 ASSERT(dividend.is(ToRegister(instr->result())));
1124
1125 // Theoretically, a variation of the branch-free code for integer division by
1126 // a power of 2 (calculating the remainder via an additional multiplication
1127 // (which gets simplified to an 'and') and subtraction) should be faster, and
1128 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
1129 // indicate that positive dividends are heavily favored, so the branching
1130 // version performs better.
1131 HMod* hmod = instr->hydrogen();
1132 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1133 Label dividend_is_not_negative, done;
1134 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
1135 __ cmp(dividend, Operand::Zero());
1136 __ b(pl, &dividend_is_not_negative);
1137 // Note that this is correct even for kMinInt operands.
1138 __ rsb(dividend, dividend, Operand::Zero());
1139 __ and_(dividend, dividend, Operand(mask));
1140 __ rsb(dividend, dividend, Operand::Zero(), SetCC);
1141 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1142 DeoptimizeIf(eq, instr->environment());
1143 }
1144 __ b(&done);
1145 }
1146
1147 __ bind(&dividend_is_not_negative);
1148 __ and_(dividend, dividend, Operand(mask));
1149 __ bind(&done);
1150 }
1151
1152
1153 void LCodeGen::DoModByConstI(LModByConstI* instr) {
1154 Register dividend = ToRegister(instr->dividend());
1155 int32_t divisor = instr->divisor();
1156 Register result = ToRegister(instr->result());
1157 ASSERT(!dividend.is(result));
1158
1159 if (divisor == 0) {
1160 DeoptimizeIf(al, instr->environment());
1161 return;
1162 }
1163
1164 __ FlooringDiv(result, dividend, Abs(divisor));
1165 __ add(result, result, Operand(dividend, LSR, 31));
1166 __ mov(ip, Operand(Abs(divisor)));
1167 __ smull(result, ip, result, ip);
1168 __ sub(result, dividend, result, SetCC);
1169
1170 // Check for negative zero.
1171 HMod* hmod = instr->hydrogen();
1172 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1173 Label remainder_not_zero;
1174 __ b(ne, &remainder_not_zero);
1175 __ cmp(dividend, Operand::Zero());
1176 DeoptimizeIf(lt, instr->environment());
1177 __ bind(&remainder_not_zero);
1178 }
1179 }
1180
1181
1118 void LCodeGen::DoModI(LModI* instr) { 1182 void LCodeGen::DoModI(LModI* instr) {
1119 HMod* hmod = instr->hydrogen(); 1183 HMod* hmod = instr->hydrogen();
1120 HValue* left = hmod->left(); 1184 if (CpuFeatures::IsSupported(SUDIV)) {
1121 HValue* right = hmod->right();
1122 if (hmod->RightIsPowerOf2()) {
1123 // TODO(svenpanne) We should really do the strength reduction on the
1124 // Hydrogen level.
1125 Register left_reg = ToRegister(instr->left());
1126 Register result_reg = ToRegister(instr->result());
1127
1128 // Note: The code below even works when right contains kMinInt.
1129 int32_t divisor = Abs(right->GetInteger32Constant());
1130
1131 Label left_is_not_negative, done;
1132 if (left->CanBeNegative()) {
1133 __ cmp(left_reg, Operand::Zero());
1134 __ b(pl, &left_is_not_negative);
1135 __ rsb(result_reg, left_reg, Operand::Zero());
1136 __ and_(result_reg, result_reg, Operand(divisor - 1));
1137 __ rsb(result_reg, result_reg, Operand::Zero(), SetCC);
1138 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1139 DeoptimizeIf(eq, instr->environment());
1140 }
1141 __ b(&done);
1142 }
1143
1144 __ bind(&left_is_not_negative);
1145 __ and_(result_reg, left_reg, Operand(divisor - 1));
1146 __ bind(&done);
1147 } else if (CpuFeatures::IsSupported(SUDIV)) {
1148 CpuFeatureScope scope(masm(), SUDIV); 1185 CpuFeatureScope scope(masm(), SUDIV);
1149 1186
1150 Register left_reg = ToRegister(instr->left()); 1187 Register left_reg = ToRegister(instr->left());
1151 Register right_reg = ToRegister(instr->right()); 1188 Register right_reg = ToRegister(instr->right());
1152 Register result_reg = ToRegister(instr->result()); 1189 Register result_reg = ToRegister(instr->result());
1153 1190
1154 Label done; 1191 Label done;
1155 // Check for x % 0, sdiv might signal an exception. We have to deopt in this 1192 // Check for x % 0, sdiv might signal an exception. We have to deopt in this
1156 // case because we can't return a NaN. 1193 // case because we can't return a NaN.
1157 if (right->CanBeZero()) { 1194 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
1158 __ cmp(right_reg, Operand::Zero()); 1195 __ cmp(right_reg, Operand::Zero());
1159 DeoptimizeIf(eq, instr->environment()); 1196 DeoptimizeIf(eq, instr->environment());
1160 } 1197 }
1161 1198
1162 // Check for kMinInt % -1, sdiv will return kMinInt, which is not what we 1199 // Check for kMinInt % -1, sdiv will return kMinInt, which is not what we
1163 // want. We have to deopt if we care about -0, because we can't return that. 1200 // want. We have to deopt if we care about -0, because we can't return that.
1164 if (left->RangeCanInclude(kMinInt) && right->RangeCanInclude(-1)) { 1201 if (hmod->CheckFlag(HValue::kCanOverflow)) {
1165 Label no_overflow_possible; 1202 Label no_overflow_possible;
1166 __ cmp(left_reg, Operand(kMinInt)); 1203 __ cmp(left_reg, Operand(kMinInt));
1167 __ b(ne, &no_overflow_possible); 1204 __ b(ne, &no_overflow_possible);
1168 __ cmp(right_reg, Operand(-1)); 1205 __ cmp(right_reg, Operand(-1));
1169 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { 1206 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1170 DeoptimizeIf(eq, instr->environment()); 1207 DeoptimizeIf(eq, instr->environment());
1171 } else { 1208 } else {
1172 __ b(ne, &no_overflow_possible); 1209 __ b(ne, &no_overflow_possible);
1173 __ mov(result_reg, Operand::Zero()); 1210 __ mov(result_reg, Operand::Zero());
1174 __ jmp(&done); 1211 __ jmp(&done);
1175 } 1212 }
1176 __ bind(&no_overflow_possible); 1213 __ bind(&no_overflow_possible);
1177 } 1214 }
1178 1215
1179 // For 'r3 = r1 % r2' we can have the following ARM code: 1216 // For 'r3 = r1 % r2' we can have the following ARM code:
1180 // sdiv r3, r1, r2 1217 // sdiv r3, r1, r2
1181 // mls r3, r3, r2, r1 1218 // mls r3, r3, r2, r1
1182 1219
1183 __ sdiv(result_reg, left_reg, right_reg); 1220 __ sdiv(result_reg, left_reg, right_reg);
1184 __ mls(result_reg, result_reg, right_reg, left_reg); 1221 __ mls(result_reg, result_reg, right_reg, left_reg);
1185 1222
1186 // If we care about -0, test if the dividend is <0 and the result is 0. 1223 // If we care about -0, test if the dividend is <0 and the result is 0.
1187 if (left->CanBeNegative() && 1224 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1188 hmod->CanBeZero() &&
1189 hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1190 __ cmp(result_reg, Operand::Zero()); 1225 __ cmp(result_reg, Operand::Zero());
1191 __ b(ne, &done); 1226 __ b(ne, &done);
1192 __ cmp(left_reg, Operand::Zero()); 1227 __ cmp(left_reg, Operand::Zero());
1193 DeoptimizeIf(lt, instr->environment()); 1228 DeoptimizeIf(lt, instr->environment());
1194 } 1229 }
1195 __ bind(&done); 1230 __ bind(&done);
1196 1231
1197 } else { 1232 } else {
1198 // General case, without any SDIV support. 1233 // General case, without any SDIV support.
1199 Register left_reg = ToRegister(instr->left()); 1234 Register left_reg = ToRegister(instr->left());
1200 Register right_reg = ToRegister(instr->right()); 1235 Register right_reg = ToRegister(instr->right());
1201 Register result_reg = ToRegister(instr->result()); 1236 Register result_reg = ToRegister(instr->result());
1202 Register scratch = scratch0(); 1237 Register scratch = scratch0();
1203 ASSERT(!scratch.is(left_reg)); 1238 ASSERT(!scratch.is(left_reg));
1204 ASSERT(!scratch.is(right_reg)); 1239 ASSERT(!scratch.is(right_reg));
1205 ASSERT(!scratch.is(result_reg)); 1240 ASSERT(!scratch.is(result_reg));
1206 DwVfpRegister dividend = ToDoubleRegister(instr->temp()); 1241 DwVfpRegister dividend = ToDoubleRegister(instr->temp());
1207 DwVfpRegister divisor = ToDoubleRegister(instr->temp2()); 1242 DwVfpRegister divisor = ToDoubleRegister(instr->temp2());
1208 ASSERT(!divisor.is(dividend)); 1243 ASSERT(!divisor.is(dividend));
1209 LowDwVfpRegister quotient = double_scratch0(); 1244 LowDwVfpRegister quotient = double_scratch0();
1210 ASSERT(!quotient.is(dividend)); 1245 ASSERT(!quotient.is(dividend));
1211 ASSERT(!quotient.is(divisor)); 1246 ASSERT(!quotient.is(divisor));
1212 1247
1213 Label done; 1248 Label done;
1214 // Check for x % 0, we have to deopt in this case because we can't return a 1249 // Check for x % 0, we have to deopt in this case because we can't return a
1215 // NaN. 1250 // NaN.
1216 if (right->CanBeZero()) { 1251 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
1217 __ cmp(right_reg, Operand::Zero()); 1252 __ cmp(right_reg, Operand::Zero());
1218 DeoptimizeIf(eq, instr->environment()); 1253 DeoptimizeIf(eq, instr->environment());
1219 } 1254 }
1220 1255
1221 __ Move(result_reg, left_reg); 1256 __ Move(result_reg, left_reg);
1222 // Load the arguments in VFP registers. The divisor value is preloaded 1257 // Load the arguments in VFP registers. The divisor value is preloaded
1223 // before. Be careful that 'right_reg' is only live on entry. 1258 // before. Be careful that 'right_reg' is only live on entry.
1224 // TODO(svenpanne) The last comments seems to be wrong nowadays. 1259 // TODO(svenpanne) The last comments seems to be wrong nowadays.
1225 __ vmov(double_scratch0().low(), left_reg); 1260 __ vmov(double_scratch0().low(), left_reg);
1226 __ vcvt_f64_s32(dividend, double_scratch0().low()); 1261 __ vcvt_f64_s32(dividend, double_scratch0().low());
1227 __ vmov(double_scratch0().low(), right_reg); 1262 __ vmov(double_scratch0().low(), right_reg);
1228 __ vcvt_f64_s32(divisor, double_scratch0().low()); 1263 __ vcvt_f64_s32(divisor, double_scratch0().low());
1229 1264
1230 // We do not care about the sign of the divisor. Note that we still handle 1265 // We do not care about the sign of the divisor. Note that we still handle
1231 // the kMinInt % -1 case correctly, though. 1266 // the kMinInt % -1 case correctly, though.
1232 __ vabs(divisor, divisor); 1267 __ vabs(divisor, divisor);
1233 // Compute the quotient and round it to a 32bit integer. 1268 // Compute the quotient and round it to a 32bit integer.
1234 __ vdiv(quotient, dividend, divisor); 1269 __ vdiv(quotient, dividend, divisor);
1235 __ vcvt_s32_f64(quotient.low(), quotient); 1270 __ vcvt_s32_f64(quotient.low(), quotient);
1236 __ vcvt_f64_s32(quotient, quotient.low()); 1271 __ vcvt_f64_s32(quotient, quotient.low());
1237 1272
1238 // Compute the remainder in result. 1273 // Compute the remainder in result.
1239 __ vmul(double_scratch0(), divisor, quotient); 1274 __ vmul(double_scratch0(), divisor, quotient);
1240 __ vcvt_s32_f64(double_scratch0().low(), double_scratch0()); 1275 __ vcvt_s32_f64(double_scratch0().low(), double_scratch0());
1241 __ vmov(scratch, double_scratch0().low()); 1276 __ vmov(scratch, double_scratch0().low());
1242 __ sub(result_reg, left_reg, scratch, SetCC); 1277 __ sub(result_reg, left_reg, scratch, SetCC);
1243 1278
1244 // If we care about -0, test if the dividend is <0 and the result is 0. 1279 // If we care about -0, test if the dividend is <0 and the result is 0.
1245 if (left->CanBeNegative() && 1280 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1246 hmod->CanBeZero() &&
1247 hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1248 __ b(ne, &done); 1281 __ b(ne, &done);
1249 __ cmp(left_reg, Operand::Zero()); 1282 __ cmp(left_reg, Operand::Zero());
1250 DeoptimizeIf(mi, instr->environment()); 1283 DeoptimizeIf(mi, instr->environment());
1251 } 1284 }
1252 __ bind(&done); 1285 __ bind(&done);
1253 } 1286 }
1254 } 1287 }
1255 1288
1256 1289
1257 void LCodeGen::EmitSignedIntegerDivisionByConstant( 1290 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
1258 Register result, 1291 Register dividend = ToRegister(instr->dividend());
1259 Register dividend, 1292 int32_t divisor = instr->divisor();
1260 int32_t divisor, 1293 Register result = ToRegister(instr->result());
1261 Register remainder, 1294 ASSERT(divisor == kMinInt || (divisor != 0 && IsPowerOf2(Abs(divisor))));
1262 Register scratch, 1295 ASSERT(!result.is(dividend));
1263 LEnvironment* environment) {
1264 ASSERT(!AreAliased(dividend, scratch, ip));
1265 ASSERT(LChunkBuilder::HasMagicNumberForDivisor(divisor));
1266 1296
1267 uint32_t divisor_abs = abs(divisor); 1297 // Check for (0 / -x) that will produce negative zero.
1298 HDiv* hdiv = instr->hydrogen();
1299 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1300 __ cmp(dividend, Operand::Zero());
1301 DeoptimizeIf(eq, instr->environment());
1302 }
1303 // Check for (kMinInt / -1).
1304 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
1305 __ cmp(dividend, Operand(kMinInt));
1306 DeoptimizeIf(eq, instr->environment());
1307 }
1308 // Deoptimize if remainder will not be 0.
1309 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
1310 divisor != 1 && divisor != -1) {
1311 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1312 __ tst(dividend, Operand(mask));
1313 DeoptimizeIf(ne, instr->environment());
1314 }
1268 1315
1269 int32_t power_of_2_factor = 1316 if (divisor == -1) { // Nice shortcut, not needed for correctness.
1270 CompilerIntrinsics::CountTrailingZeros(divisor_abs); 1317 __ rsb(result, dividend, Operand(0));
1318 return;
1319 }
1320 int32_t shift = WhichPowerOf2Abs(divisor);
1321 if (shift == 0) {
1322 __ mov(result, dividend);
1323 } else if (shift == 1) {
1324 __ add(result, dividend, Operand(dividend, LSR, 31));
1325 } else {
1326 __ mov(result, Operand(dividend, ASR, 31));
1327 __ add(result, dividend, Operand(result, LSR, 32 - shift));
1328 }
1329 if (shift > 0) __ mov(result, Operand(result, ASR, shift));
1330 if (divisor < 0) __ rsb(result, result, Operand(0));
1331 }
1271 1332
1272 switch (divisor_abs) {
1273 case 0:
1274 DeoptimizeIf(al, environment);
1275 return;
1276 1333
1277 case 1: 1334 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
1278 if (divisor > 0) { 1335 Register dividend = ToRegister(instr->dividend());
1279 __ Move(result, dividend); 1336 int32_t divisor = instr->divisor();
1280 } else { 1337 Register result = ToRegister(instr->result());
1281 __ rsb(result, dividend, Operand::Zero(), SetCC); 1338 ASSERT(!dividend.is(result));
1282 DeoptimizeIf(vs, environment);
1283 }
1284 // Compute the remainder.
1285 __ mov(remainder, Operand::Zero());
1286 return;
1287 1339
1288 default: 1340 if (divisor == 0) {
1289 if (IsPowerOf2(divisor_abs)) { 1341 DeoptimizeIf(al, instr->environment());
1290 // Branch and condition free code for integer division by a power 1342 return;
1291 // of two. 1343 }
1292 int32_t power = WhichPowerOf2(divisor_abs);
1293 if (power > 1) {
1294 __ mov(scratch, Operand(dividend, ASR, power - 1));
1295 }
1296 __ add(scratch, dividend, Operand(scratch, LSR, 32 - power));
1297 __ mov(result, Operand(scratch, ASR, power));
1298 // Negate if necessary.
1299 // We don't need to check for overflow because the case '-1' is
1300 // handled separately.
1301 if (divisor < 0) {
1302 ASSERT(divisor != -1);
1303 __ rsb(result, result, Operand::Zero());
1304 }
1305 // Compute the remainder.
1306 if (divisor > 0) {
1307 __ sub(remainder, dividend, Operand(result, LSL, power));
1308 } else {
1309 __ add(remainder, dividend, Operand(result, LSL, power));
1310 }
1311 return;
1312 } else {
1313 // Use magic numbers for a few specific divisors.
1314 // Details and proofs can be found in:
1315 // - Hacker's Delight, Henry S. Warren, Jr.
1316 // - The PowerPC Compiler Writer’s Guide
1317 // and probably many others.
1318 //
1319 // We handle
1320 // <divisor with magic numbers> * <power of 2>
1321 // but not
1322 // <divisor with magic numbers> * <other divisor with magic numbers>
1323 DivMagicNumbers magic_numbers =
1324 DivMagicNumberFor(divisor_abs >> power_of_2_factor);
1325 // Branch and condition free code for integer division by a power
1326 // of two.
1327 const int32_t M = magic_numbers.M;
1328 const int32_t s = magic_numbers.s + power_of_2_factor;
1329 1344
1330 __ mov(ip, Operand(M)); 1345 // Check for (0 / -x) that will produce negative zero.
1331 __ smull(ip, scratch, dividend, ip); 1346 HDiv* hdiv = instr->hydrogen();
1332 if (M < 0) { 1347 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1333 __ add(scratch, scratch, Operand(dividend)); 1348 __ cmp(dividend, Operand::Zero());
1334 } 1349 DeoptimizeIf(eq, instr->environment());
1335 if (s > 0) { 1350 }
1336 __ mov(scratch, Operand(scratch, ASR, s)); 1351
1337 } 1352 __ FlooringDiv(result, dividend, Abs(divisor));
1338 __ add(result, scratch, Operand(dividend, LSR, 31)); 1353 __ add(result, result, Operand(dividend, LSR, 31));
1339 if (divisor < 0) __ rsb(result, result, Operand::Zero()); 1354 if (divisor < 0) __ rsb(result, result, Operand::Zero());
1340 // Compute the remainder. 1355
1341 __ mov(ip, Operand(divisor)); 1356 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1342 // This sequence could be replaced with 'mls' when 1357 __ mov(ip, Operand(divisor));
1343 // it gets implemented. 1358 __ smull(scratch0(), ip, result, ip);
1344 __ mul(scratch, result, ip); 1359 __ sub(scratch0(), scratch0(), dividend, SetCC);
1345 __ sub(remainder, dividend, scratch); 1360 DeoptimizeIf(ne, instr->environment());
1346 }
1347 } 1361 }
1348 } 1362 }
1349 1363
1350 1364
1351 void LCodeGen::DoDivI(LDivI* instr) { 1365 void LCodeGen::DoDivI(LDivI* instr) {
1352 if (!instr->is_flooring() && instr->hydrogen()->RightIsPowerOf2()) { 1366 HBinaryOperation* hdiv = instr->hydrogen();
1353 Register dividend = ToRegister(instr->left()); 1367 Register left = ToRegister(instr->left());
1354 HDiv* hdiv = instr->hydrogen(); 1368 Register right = ToRegister(instr->right());
1355 int32_t divisor = hdiv->right()->GetInteger32Constant(); 1369 Register result = ToRegister(instr->result());
1356 Register result = ToRegister(instr->result());
1357 ASSERT(!result.is(dividend));
1358
1359 // Check for (0 / -x) that will produce negative zero.
1360 if (hdiv->left()->RangeCanInclude(0) && divisor < 0 &&
1361 hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1362 __ cmp(dividend, Operand::Zero());
1363 DeoptimizeIf(eq, instr->environment());
1364 }
1365 // Check for (kMinInt / -1).
1366 if (hdiv->left()->RangeCanInclude(kMinInt) && divisor == -1 &&
1367 hdiv->CheckFlag(HValue::kCanOverflow)) {
1368 __ cmp(dividend, Operand(kMinInt));
1369 DeoptimizeIf(eq, instr->environment());
1370 }
1371 // Deoptimize if remainder will not be 0.
1372 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
1373 Abs(divisor) != 1) {
1374 __ tst(dividend, Operand(Abs(divisor) - 1));
1375 DeoptimizeIf(ne, instr->environment());
1376 }
1377 if (divisor == -1) { // Nice shortcut, not needed for correctness.
1378 __ rsb(result, dividend, Operand(0));
1379 return;
1380 }
1381 int32_t shift = WhichPowerOf2(Abs(divisor));
1382 if (shift == 0) {
1383 __ mov(result, dividend);
1384 } else if (shift == 1) {
1385 __ add(result, dividend, Operand(dividend, LSR, 31));
1386 } else {
1387 __ mov(result, Operand(dividend, ASR, 31));
1388 __ add(result, dividend, Operand(result, LSR, 32 - shift));
1389 }
1390 if (shift > 0) __ mov(result, Operand(result, ASR, shift));
1391 if (divisor < 0) __ rsb(result, result, Operand(0));
1392 return;
1393 }
1394
1395 const Register left = ToRegister(instr->left());
1396 const Register right = ToRegister(instr->right());
1397 const Register result = ToRegister(instr->result());
1398 1370
1399 // Check for x / 0. 1371 // Check for x / 0.
1400 if (instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) { 1372 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1401 __ cmp(right, Operand::Zero()); 1373 __ cmp(right, Operand::Zero());
1402 DeoptimizeIf(eq, instr->environment()); 1374 DeoptimizeIf(eq, instr->environment());
1403 } 1375 }
1404 1376
1405 // Check for (0 / -x) that will produce negative zero. 1377 // Check for (0 / -x) that will produce negative zero.
1406 if (instr->hydrogen_value()->CheckFlag(HValue::kBailoutOnMinusZero)) { 1378 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1407 Label positive; 1379 Label positive;
1408 if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) { 1380 if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) {
1409 // Do the test only if it hadn't be done above. 1381 // Do the test only if it hadn't be done above.
1410 __ cmp(right, Operand::Zero()); 1382 __ cmp(right, Operand::Zero());
1411 } 1383 }
1412 __ b(pl, &positive); 1384 __ b(pl, &positive);
1413 __ cmp(left, Operand::Zero()); 1385 __ cmp(left, Operand::Zero());
1414 DeoptimizeIf(eq, instr->environment()); 1386 DeoptimizeIf(eq, instr->environment());
1415 __ bind(&positive); 1387 __ bind(&positive);
1416 } 1388 }
1417 1389
1418 // Check for (kMinInt / -1). 1390 // Check for (kMinInt / -1).
1419 if (instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow) && 1391 if (hdiv->CheckFlag(HValue::kCanOverflow) &&
1420 (!CpuFeatures::IsSupported(SUDIV) || 1392 (!CpuFeatures::IsSupported(SUDIV) ||
1421 !instr->hydrogen_value()->CheckFlag( 1393 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32))) {
1422 HValue::kAllUsesTruncatingToInt32))) {
1423 // We don't need to check for overflow when truncating with sdiv 1394 // We don't need to check for overflow when truncating with sdiv
1424 // support because, on ARM, sdiv kMinInt, -1 -> kMinInt. 1395 // support because, on ARM, sdiv kMinInt, -1 -> kMinInt.
1425 __ cmp(left, Operand(kMinInt)); 1396 __ cmp(left, Operand(kMinInt));
1426 __ cmp(right, Operand(-1), eq); 1397 __ cmp(right, Operand(-1), eq);
1427 DeoptimizeIf(eq, instr->environment()); 1398 DeoptimizeIf(eq, instr->environment());
1428 } 1399 }
1429 1400
1430 if (CpuFeatures::IsSupported(SUDIV)) { 1401 if (CpuFeatures::IsSupported(SUDIV)) {
1431 CpuFeatureScope scope(masm(), SUDIV); 1402 CpuFeatureScope scope(masm(), SUDIV);
1432 __ sdiv(result, left, right); 1403 __ sdiv(result, left, right);
1433
1434 if (!instr->hydrogen_value()->CheckFlag(
1435 HInstruction::kAllUsesTruncatingToInt32)) {
1436 // Compute remainder and deopt if it's not zero.
1437 const Register remainder = scratch0();
1438 __ mls(remainder, result, right, left);
1439 __ cmp(remainder, Operand::Zero());
1440 DeoptimizeIf(ne, instr->environment());
1441 }
1442 } else { 1404 } else {
1443 const DoubleRegister vleft = ToDoubleRegister(instr->temp()); 1405 DoubleRegister vleft = ToDoubleRegister(instr->temp());
1444 const DoubleRegister vright = double_scratch0(); 1406 DoubleRegister vright = double_scratch0();
1445 __ vmov(double_scratch0().low(), left); 1407 __ vmov(double_scratch0().low(), left);
1446 __ vcvt_f64_s32(vleft, double_scratch0().low()); 1408 __ vcvt_f64_s32(vleft, double_scratch0().low());
1447 __ vmov(double_scratch0().low(), right); 1409 __ vmov(double_scratch0().low(), right);
1448 __ vcvt_f64_s32(vright, double_scratch0().low()); 1410 __ vcvt_f64_s32(vright, double_scratch0().low());
1449 __ vdiv(vleft, vleft, vright); // vleft now contains the result. 1411 __ vdiv(vleft, vleft, vright); // vleft now contains the result.
1450 __ vcvt_s32_f64(double_scratch0().low(), vleft); 1412 __ vcvt_s32_f64(double_scratch0().low(), vleft);
1451 __ vmov(result, double_scratch0().low()); 1413 __ vmov(result, double_scratch0().low());
1414 }
1452 1415
1453 if (!instr->hydrogen_value()->CheckFlag( 1416 if (hdiv->IsMathFloorOfDiv()) {
1454 HInstruction::kAllUsesTruncatingToInt32)) { 1417 Label done;
1455 // Deopt if exact conversion to integer was not possible. 1418 Register remainder = scratch0();
1456 // Use vright as scratch register. 1419 __ mls(remainder, result, right, left);
1457 __ vcvt_f64_s32(double_scratch0(), double_scratch0().low()); 1420 __ cmp(remainder, Operand::Zero());
1458 __ VFPCompareAndSetFlags(vleft, double_scratch0()); 1421 __ b(eq, &done);
1459 DeoptimizeIf(ne, instr->environment()); 1422 __ eor(remainder, remainder, Operand(right));
1460 } 1423 __ add(result, result, Operand(remainder, ASR, 31));
1424 __ bind(&done);
1425 } else if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1426 // Compute remainder and deopt if it's not zero.
1427 Register remainder = scratch0();
1428 __ mls(remainder, result, right, left);
1429 __ cmp(remainder, Operand::Zero());
1430 DeoptimizeIf(ne, instr->environment());
1461 } 1431 }
1462 } 1432 }
1463 1433
1464 1434
1465 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) { 1435 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
1466 DwVfpRegister addend = ToDoubleRegister(instr->addend()); 1436 DwVfpRegister addend = ToDoubleRegister(instr->addend());
1467 DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier()); 1437 DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier());
1468 DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand()); 1438 DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1469 1439
1470 // This is computed in-place. 1440 // This is computed in-place.
1471 ASSERT(addend.is(ToDoubleRegister(instr->result()))); 1441 ASSERT(addend.is(ToDoubleRegister(instr->result())));
1472 1442
1473 __ vmla(addend, multiplier, multiplicand); 1443 __ vmla(addend, multiplier, multiplicand);
1474 } 1444 }
1475 1445
1476 1446
1477 void LCodeGen::DoMultiplySubD(LMultiplySubD* instr) { 1447 void LCodeGen::DoMultiplySubD(LMultiplySubD* instr) {
1478 DwVfpRegister minuend = ToDoubleRegister(instr->minuend()); 1448 DwVfpRegister minuend = ToDoubleRegister(instr->minuend());
1479 DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier()); 1449 DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier());
1480 DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand()); 1450 DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1481 1451
1482 // This is computed in-place. 1452 // This is computed in-place.
1483 ASSERT(minuend.is(ToDoubleRegister(instr->result()))); 1453 ASSERT(minuend.is(ToDoubleRegister(instr->result())));
1484 1454
1485 __ vmls(minuend, multiplier, multiplicand); 1455 __ vmls(minuend, multiplier, multiplicand);
1486 } 1456 }
1487 1457
1488 1458
1489 void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) { 1459 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
1490 const Register result = ToRegister(instr->result()); 1460 Register dividend = ToRegister(instr->dividend());
1491 const Register left = ToRegister(instr->left()); 1461 Register result = ToRegister(instr->result());
1492 const Register remainder = ToRegister(instr->temp()); 1462 int32_t divisor = instr->divisor();
1493 const Register scratch = scratch0();
1494 1463
1495 if (!CpuFeatures::IsSupported(SUDIV)) { 1464 // If the divisor is positive, things are easy: There can be no deopts and we
1496 // If the CPU doesn't support sdiv instruction, we only optimize when we 1465 // can simply do an arithmetic right shift.
1497 // have magic numbers for the divisor. The standard integer division routine 1466 if (divisor == 1) return;
1498 // is usually slower than transitionning to VFP. 1467 int32_t shift = WhichPowerOf2Abs(divisor);
1499 ASSERT(instr->right()->IsConstantOperand()); 1468 if (divisor > 1) {
1500 int32_t divisor = ToInteger32(LConstantOperand::cast(instr->right())); 1469 __ mov(result, Operand(dividend, ASR, shift));
1501 ASSERT(LChunkBuilder::HasMagicNumberForDivisor(divisor)); 1470 return;
1502 if (divisor < 0) { 1471 }
1503 __ cmp(left, Operand::Zero()); 1472
1504 DeoptimizeIf(eq, instr->environment()); 1473 // If the divisor is negative, we have to negate and handle edge cases.
1474 __ rsb(result, dividend, Operand::Zero(), SetCC);
1475 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1476 DeoptimizeIf(eq, instr->environment());
1477 }
1478 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1479 // Note that we could emit branch-free code, but that would need one more
1480 // register.
1481 if (divisor == -1) {
1482 DeoptimizeIf(vs, instr->environment());
1483 __ mov(result, Operand(dividend, ASR, shift));
1484 } else {
1485 __ mov(result, Operand(kMinInt / divisor), LeaveCC, vs);
1486 __ mov(result, Operand(dividend, ASR, shift), LeaveCC, vc);
1505 } 1487 }
1506 EmitSignedIntegerDivisionByConstant(result,
1507 left,
1508 divisor,
1509 remainder,
1510 scratch,
1511 instr->environment());
1512 // We performed a truncating division. Correct the result if necessary.
1513 __ cmp(remainder, Operand::Zero());
1514 __ teq(remainder, Operand(divisor), ne);
1515 __ sub(result, result, Operand(1), LeaveCC, mi);
1516 } else { 1488 } else {
1517 CpuFeatureScope scope(masm(), SUDIV); 1489 __ mov(result, Operand(dividend, ASR, shift));
1518 const Register right = ToRegister(instr->right());
1519
1520 // Check for x / 0.
1521 __ cmp(right, Operand::Zero());
1522 DeoptimizeIf(eq, instr->environment());
1523
1524 // Check for (kMinInt / -1).
1525 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
1526 __ cmp(left, Operand(kMinInt));
1527 __ cmp(right, Operand(-1), eq);
1528 DeoptimizeIf(eq, instr->environment());
1529 }
1530
1531 // Check for (0 / -x) that will produce negative zero.
1532 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1533 __ cmp(right, Operand::Zero());
1534 __ cmp(left, Operand::Zero(), mi);
1535 // "right" can't be null because the code would have already been
1536 // deoptimized. The Z flag is set only if (right < 0) and (left == 0).
1537 // In this case we need to deoptimize to produce a -0.
1538 DeoptimizeIf(eq, instr->environment());
1539 }
1540
1541 Label done;
1542 __ sdiv(result, left, right);
1543 // If both operands have the same sign then we are done.
1544 __ eor(remainder, left, Operand(right), SetCC);
1545 __ b(pl, &done);
1546
1547 // Check if the result needs to be corrected.
1548 __ mls(remainder, result, right, left);
1549 __ cmp(remainder, Operand::Zero());
1550 __ sub(result, result, Operand(1), LeaveCC, ne);
1551
1552 __ bind(&done);
1553 } 1490 }
1554 } 1491 }
1555 1492
1556 1493
1494 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
1495 Register dividend = ToRegister(instr->dividend());
1496 int32_t divisor = instr->divisor();
1497 Register result = ToRegister(instr->result());
1498 ASSERT(!dividend.is(result));
1499
1500 if (divisor == 0) {
1501 DeoptimizeIf(al, instr->environment());
1502 return;
1503 }
1504
1505 // Check for (0 / -x) that will produce negative zero.
1506 HMathFloorOfDiv* hdiv = instr->hydrogen();
1507 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1508 __ cmp(dividend, Operand::Zero());
1509 DeoptimizeIf(eq, instr->environment());
1510 }
1511
1512 __ FlooringDiv(result, dividend, divisor);
1513 }
1514
1515
1557 void LCodeGen::DoMulI(LMulI* instr) { 1516 void LCodeGen::DoMulI(LMulI* instr) {
1558 Register result = ToRegister(instr->result()); 1517 Register result = ToRegister(instr->result());
1559 // Note that result may alias left. 1518 // Note that result may alias left.
1560 Register left = ToRegister(instr->left()); 1519 Register left = ToRegister(instr->left());
1561 LOperand* right_op = instr->right(); 1520 LOperand* right_op = instr->right();
1562 1521
1563 bool bailout_on_minus_zero = 1522 bool bailout_on_minus_zero =
1564 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); 1523 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
1565 bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); 1524 bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1566 1525
(...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after
1662 1621
1663 1622
1664 void LCodeGen::DoBitI(LBitI* instr) { 1623 void LCodeGen::DoBitI(LBitI* instr) {
1665 LOperand* left_op = instr->left(); 1624 LOperand* left_op = instr->left();
1666 LOperand* right_op = instr->right(); 1625 LOperand* right_op = instr->right();
1667 ASSERT(left_op->IsRegister()); 1626 ASSERT(left_op->IsRegister());
1668 Register left = ToRegister(left_op); 1627 Register left = ToRegister(left_op);
1669 Register result = ToRegister(instr->result()); 1628 Register result = ToRegister(instr->result());
1670 Operand right(no_reg); 1629 Operand right(no_reg);
1671 1630
1672 if (right_op->IsStackSlot() || right_op->IsArgument()) { 1631 if (right_op->IsStackSlot()) {
1673 right = Operand(EmitLoadRegister(right_op, ip)); 1632 right = Operand(EmitLoadRegister(right_op, ip));
1674 } else { 1633 } else {
1675 ASSERT(right_op->IsRegister() || right_op->IsConstantOperand()); 1634 ASSERT(right_op->IsRegister() || right_op->IsConstantOperand());
1676 right = ToOperand(right_op); 1635 right = ToOperand(right_op);
1677 } 1636 }
1678 1637
1679 switch (instr->op()) { 1638 switch (instr->op()) {
1680 case Token::BIT_AND: 1639 case Token::BIT_AND:
1681 __ and_(result, left, right); 1640 __ and_(result, left, right);
1682 break; 1641 break;
(...skipping 102 matching lines...) Expand 10 before | Expand all | Expand 10 after
1785 } 1744 }
1786 1745
1787 1746
1788 void LCodeGen::DoSubI(LSubI* instr) { 1747 void LCodeGen::DoSubI(LSubI* instr) {
1789 LOperand* left = instr->left(); 1748 LOperand* left = instr->left();
1790 LOperand* right = instr->right(); 1749 LOperand* right = instr->right();
1791 LOperand* result = instr->result(); 1750 LOperand* result = instr->result();
1792 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); 1751 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1793 SBit set_cond = can_overflow ? SetCC : LeaveCC; 1752 SBit set_cond = can_overflow ? SetCC : LeaveCC;
1794 1753
1795 if (right->IsStackSlot() || right->IsArgument()) { 1754 if (right->IsStackSlot()) {
1796 Register right_reg = EmitLoadRegister(right, ip); 1755 Register right_reg = EmitLoadRegister(right, ip);
1797 __ sub(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond); 1756 __ sub(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
1798 } else { 1757 } else {
1799 ASSERT(right->IsRegister() || right->IsConstantOperand()); 1758 ASSERT(right->IsRegister() || right->IsConstantOperand());
1800 __ sub(ToRegister(result), ToRegister(left), ToOperand(right), set_cond); 1759 __ sub(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
1801 } 1760 }
1802 1761
1803 if (can_overflow) { 1762 if (can_overflow) {
1804 DeoptimizeIf(vs, instr->environment()); 1763 DeoptimizeIf(vs, instr->environment());
1805 } 1764 }
1806 } 1765 }
1807 1766
1808 1767
1809 void LCodeGen::DoRSubI(LRSubI* instr) { 1768 void LCodeGen::DoRSubI(LRSubI* instr) {
1810 LOperand* left = instr->left(); 1769 LOperand* left = instr->left();
1811 LOperand* right = instr->right(); 1770 LOperand* right = instr->right();
1812 LOperand* result = instr->result(); 1771 LOperand* result = instr->result();
1813 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); 1772 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1814 SBit set_cond = can_overflow ? SetCC : LeaveCC; 1773 SBit set_cond = can_overflow ? SetCC : LeaveCC;
1815 1774
1816 if (right->IsStackSlot() || right->IsArgument()) { 1775 if (right->IsStackSlot()) {
1817 Register right_reg = EmitLoadRegister(right, ip); 1776 Register right_reg = EmitLoadRegister(right, ip);
1818 __ rsb(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond); 1777 __ rsb(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
1819 } else { 1778 } else {
1820 ASSERT(right->IsRegister() || right->IsConstantOperand()); 1779 ASSERT(right->IsRegister() || right->IsConstantOperand());
1821 __ rsb(ToRegister(result), ToRegister(left), ToOperand(right), set_cond); 1780 __ rsb(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
1822 } 1781 }
1823 1782
1824 if (can_overflow) { 1783 if (can_overflow) {
1825 DeoptimizeIf(vs, instr->environment()); 1784 DeoptimizeIf(vs, instr->environment());
1826 } 1785 }
(...skipping 152 matching lines...) Expand 10 before | Expand all | Expand 10 after
1979 } 1938 }
1980 1939
1981 1940
1982 void LCodeGen::DoAddI(LAddI* instr) { 1941 void LCodeGen::DoAddI(LAddI* instr) {
1983 LOperand* left = instr->left(); 1942 LOperand* left = instr->left();
1984 LOperand* right = instr->right(); 1943 LOperand* right = instr->right();
1985 LOperand* result = instr->result(); 1944 LOperand* result = instr->result();
1986 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); 1945 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1987 SBit set_cond = can_overflow ? SetCC : LeaveCC; 1946 SBit set_cond = can_overflow ? SetCC : LeaveCC;
1988 1947
1989 if (right->IsStackSlot() || right->IsArgument()) { 1948 if (right->IsStackSlot()) {
1990 Register right_reg = EmitLoadRegister(right, ip); 1949 Register right_reg = EmitLoadRegister(right, ip);
1991 __ add(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond); 1950 __ add(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
1992 } else { 1951 } else {
1993 ASSERT(right->IsRegister() || right->IsConstantOperand()); 1952 ASSERT(right->IsRegister() || right->IsConstantOperand());
1994 __ add(ToRegister(result), ToRegister(left), ToOperand(right), set_cond); 1953 __ add(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
1995 } 1954 }
1996 1955
1997 if (can_overflow) { 1956 if (can_overflow) {
1998 DeoptimizeIf(vs, instr->environment()); 1957 DeoptimizeIf(vs, instr->environment());
1999 } 1958 }
(...skipping 105 matching lines...) Expand 10 before | Expand all | Expand 10 after
2105 ASSERT(ToRegister(instr->context()).is(cp)); 2064 ASSERT(ToRegister(instr->context()).is(cp));
2106 ASSERT(ToRegister(instr->left()).is(r1)); 2065 ASSERT(ToRegister(instr->left()).is(r1));
2107 ASSERT(ToRegister(instr->right()).is(r0)); 2066 ASSERT(ToRegister(instr->right()).is(r0));
2108 ASSERT(ToRegister(instr->result()).is(r0)); 2067 ASSERT(ToRegister(instr->result()).is(r0));
2109 2068
2110 BinaryOpICStub stub(instr->op(), NO_OVERWRITE); 2069 BinaryOpICStub stub(instr->op(), NO_OVERWRITE);
2111 // Block literal pool emission to ensure nop indicating no inlined smi code 2070 // Block literal pool emission to ensure nop indicating no inlined smi code
2112 // is in the correct position. 2071 // is in the correct position.
2113 Assembler::BlockConstPoolScope block_const_pool(masm()); 2072 Assembler::BlockConstPoolScope block_const_pool(masm());
2114 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); 2073 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
2115 __ nop(); // Signals no inlined code.
2116 } 2074 }
2117 2075
2118 2076
2119 template<class InstrType> 2077 template<class InstrType>
2120 void LCodeGen::EmitBranch(InstrType instr, Condition condition) { 2078 void LCodeGen::EmitBranch(InstrType instr, Condition condition) {
2121 int left_block = instr->TrueDestination(chunk_); 2079 int left_block = instr->TrueDestination(chunk_);
2122 int right_block = instr->FalseDestination(chunk_); 2080 int right_block = instr->FalseDestination(chunk_);
2123 2081
2124 int next_block = GetNextEmittedBlock(); 2082 int next_block = GetNextEmittedBlock();
2125 2083
(...skipping 603 matching lines...) Expand 10 before | Expand all | Expand 10 after
2729 }; 2687 };
2730 2688
2731 DeferredInstanceOfKnownGlobal* deferred; 2689 DeferredInstanceOfKnownGlobal* deferred;
2732 deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr); 2690 deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
2733 2691
2734 Label done, false_result; 2692 Label done, false_result;
2735 Register object = ToRegister(instr->value()); 2693 Register object = ToRegister(instr->value());
2736 Register temp = ToRegister(instr->temp()); 2694 Register temp = ToRegister(instr->temp());
2737 Register result = ToRegister(instr->result()); 2695 Register result = ToRegister(instr->result());
2738 2696
2739 ASSERT(object.is(r0));
2740 ASSERT(result.is(r0));
2741
2742 // A Smi is not instance of anything. 2697 // A Smi is not instance of anything.
2743 __ JumpIfSmi(object, &false_result); 2698 __ JumpIfSmi(object, &false_result);
2744 2699
2745 // This is the inlined call site instanceof cache. The two occurences of the 2700 // This is the inlined call site instanceof cache. The two occurences of the
2746 // hole value will be patched to the last map/result pair generated by the 2701 // hole value will be patched to the last map/result pair generated by the
2747 // instanceof stub. 2702 // instanceof stub.
2748 Label cache_miss; 2703 Label cache_miss;
2749 Register map = temp; 2704 Register map = temp;
2750 __ ldr(map, FieldMemOperand(object, HeapObject::kMapOffset)); 2705 __ ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
2751 { 2706 {
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
2789 2744
2790 // Here result has either true or false. Deferred code also produces true or 2745 // Here result has either true or false. Deferred code also produces true or
2791 // false object. 2746 // false object.
2792 __ bind(deferred->exit()); 2747 __ bind(deferred->exit());
2793 __ bind(&done); 2748 __ bind(&done);
2794 } 2749 }
2795 2750
2796 2751
2797 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, 2752 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
2798 Label* map_check) { 2753 Label* map_check) {
2799 Register result = ToRegister(instr->result());
2800 ASSERT(result.is(r0));
2801
2802 InstanceofStub::Flags flags = InstanceofStub::kNoFlags; 2754 InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
2803 flags = static_cast<InstanceofStub::Flags>( 2755 flags = static_cast<InstanceofStub::Flags>(
2804 flags | InstanceofStub::kArgsInRegisters); 2756 flags | InstanceofStub::kArgsInRegisters);
2805 flags = static_cast<InstanceofStub::Flags>( 2757 flags = static_cast<InstanceofStub::Flags>(
2806 flags | InstanceofStub::kCallSiteInlineCheck); 2758 flags | InstanceofStub::kCallSiteInlineCheck);
2807 flags = static_cast<InstanceofStub::Flags>( 2759 flags = static_cast<InstanceofStub::Flags>(
2808 flags | InstanceofStub::kReturnTrueFalseObject); 2760 flags | InstanceofStub::kReturnTrueFalseObject);
2809 InstanceofStub stub(flags); 2761 InstanceofStub stub(flags);
2810 2762
2811 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); 2763 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
2812 LoadContextFromDeferred(instr->context()); 2764 LoadContextFromDeferred(instr->context());
2813 2765
2814 // Get the temp register reserved by the instruction. This needs to be r4 as
2815 // its slot of the pushing of safepoint registers is used to communicate the
2816 // offset to the location of the map check.
2817 Register temp = ToRegister(instr->temp());
2818 ASSERT(temp.is(r4));
2819 __ Move(InstanceofStub::right(), instr->function()); 2766 __ Move(InstanceofStub::right(), instr->function());
2820 static const int kAdditionalDelta = 5; 2767 static const int kAdditionalDelta = 4;
2821 // Make sure that code size is predicable, since we use specific constants 2768 // Make sure that code size is predicable, since we use specific constants
2822 // offsets in the code to find embedded values.. 2769 // offsets in the code to find embedded values..
2823 PredictableCodeSizeScope predictable(masm_, 6 * Assembler::kInstrSize); 2770 PredictableCodeSizeScope predictable(masm_, 5 * Assembler::kInstrSize);
2824 int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta; 2771 int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
2825 Label before_push_delta; 2772 Label before_push_delta;
2826 __ bind(&before_push_delta); 2773 __ bind(&before_push_delta);
2827 __ BlockConstPoolFor(kAdditionalDelta); 2774 __ BlockConstPoolFor(kAdditionalDelta);
2828 __ mov(temp, Operand(delta * kPointerSize)); 2775 // r5 is used to communicate the offset to the location of the map check.
2776 __ mov(r5, Operand(delta * kPointerSize));
2829 // The mov above can generate one or two instructions. The delta was computed 2777 // The mov above can generate one or two instructions. The delta was computed
2830 // for two instructions, so we need to pad here in case of one instruction. 2778 // for two instructions, so we need to pad here in case of one instruction.
2831 if (masm_->InstructionsGeneratedSince(&before_push_delta) != 2) { 2779 if (masm_->InstructionsGeneratedSince(&before_push_delta) != 2) {
2832 ASSERT_EQ(1, masm_->InstructionsGeneratedSince(&before_push_delta)); 2780 ASSERT_EQ(1, masm_->InstructionsGeneratedSince(&before_push_delta));
2833 __ nop(); 2781 __ nop();
2834 } 2782 }
2835 __ StoreToSafepointRegisterSlot(temp, temp);
2836 CallCodeGeneric(stub.GetCode(isolate()), 2783 CallCodeGeneric(stub.GetCode(isolate()),
2837 RelocInfo::CODE_TARGET, 2784 RelocInfo::CODE_TARGET,
2838 instr, 2785 instr,
2839 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); 2786 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
2840 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment(); 2787 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
2841 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); 2788 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2842 // Put the result value into the result register slot and 2789 // Put the result value (r0) into the result register slot and
2843 // restore all registers. 2790 // restore all registers.
2844 __ StoreToSafepointRegisterSlot(result, result); 2791 __ StoreToSafepointRegisterSlot(r0, ToRegister(instr->result()));
2845 } 2792 }
2846 2793
2847 2794
2848 void LCodeGen::DoCmpT(LCmpT* instr) { 2795 void LCodeGen::DoCmpT(LCmpT* instr) {
2849 ASSERT(ToRegister(instr->context()).is(cp)); 2796 ASSERT(ToRegister(instr->context()).is(cp));
2850 Token::Value op = instr->op(); 2797 Token::Value op = instr->op();
2851 2798
2852 Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op); 2799 Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
2853 CallCode(ic, RelocInfo::CODE_TARGET, instr); 2800 CallCode(ic, RelocInfo::CODE_TARGET, instr);
2854 // This instruction also signals no smi code inlined. 2801 // This instruction also signals no smi code inlined.
(...skipping 357 matching lines...) Expand 10 before | Expand all | Expand 10 after
3212 case FLOAT64_ELEMENTS: 3159 case FLOAT64_ELEMENTS:
3213 case EXTERNAL_FLOAT32_ELEMENTS: 3160 case EXTERNAL_FLOAT32_ELEMENTS:
3214 case EXTERNAL_FLOAT64_ELEMENTS: 3161 case EXTERNAL_FLOAT64_ELEMENTS:
3215 case FAST_HOLEY_DOUBLE_ELEMENTS: 3162 case FAST_HOLEY_DOUBLE_ELEMENTS:
3216 case FAST_HOLEY_ELEMENTS: 3163 case FAST_HOLEY_ELEMENTS:
3217 case FAST_HOLEY_SMI_ELEMENTS: 3164 case FAST_HOLEY_SMI_ELEMENTS:
3218 case FAST_DOUBLE_ELEMENTS: 3165 case FAST_DOUBLE_ELEMENTS:
3219 case FAST_ELEMENTS: 3166 case FAST_ELEMENTS:
3220 case FAST_SMI_ELEMENTS: 3167 case FAST_SMI_ELEMENTS:
3221 case DICTIONARY_ELEMENTS: 3168 case DICTIONARY_ELEMENTS:
3222 case NON_STRICT_ARGUMENTS_ELEMENTS: 3169 case SLOPPY_ARGUMENTS_ELEMENTS:
3223 UNREACHABLE(); 3170 UNREACHABLE();
3224 break; 3171 break;
3225 } 3172 }
3226 } 3173 }
3227 } 3174 }
3228 3175
3229 3176
3230 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) { 3177 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
3231 Register elements = ToRegister(instr->elements()); 3178 Register elements = ToRegister(instr->elements());
3232 bool key_is_constant = instr->key()->IsConstantOperand(); 3179 bool key_is_constant = instr->key()->IsConstantOperand();
(...skipping 725 matching lines...) Expand 10 before | Expand all | Expand 10 after
3958 } 3905 }
3959 3906
3960 3907
3961 void LCodeGen::DoCallNew(LCallNew* instr) { 3908 void LCodeGen::DoCallNew(LCallNew* instr) {
3962 ASSERT(ToRegister(instr->context()).is(cp)); 3909 ASSERT(ToRegister(instr->context()).is(cp));
3963 ASSERT(ToRegister(instr->constructor()).is(r1)); 3910 ASSERT(ToRegister(instr->constructor()).is(r1));
3964 ASSERT(ToRegister(instr->result()).is(r0)); 3911 ASSERT(ToRegister(instr->result()).is(r0));
3965 3912
3966 __ mov(r0, Operand(instr->arity())); 3913 __ mov(r0, Operand(instr->arity()));
3967 // No cell in r2 for construct type feedback in optimized code 3914 // No cell in r2 for construct type feedback in optimized code
3968 Handle<Object> undefined_value(isolate()->factory()->undefined_value()); 3915 Handle<Object> megamorphic_symbol =
3969 __ mov(r2, Operand(undefined_value)); 3916 TypeFeedbackInfo::MegamorphicSentinel(isolate());
3917 __ mov(r2, Operand(megamorphic_symbol));
3970 CallConstructStub stub(NO_CALL_FUNCTION_FLAGS); 3918 CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
3971 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); 3919 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
3972 } 3920 }
3973 3921
3974 3922
3975 void LCodeGen::DoCallNewArray(LCallNewArray* instr) { 3923 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
3976 ASSERT(ToRegister(instr->context()).is(cp)); 3924 ASSERT(ToRegister(instr->context()).is(cp));
3977 ASSERT(ToRegister(instr->constructor()).is(r1)); 3925 ASSERT(ToRegister(instr->constructor()).is(r1));
3978 ASSERT(ToRegister(instr->result()).is(r0)); 3926 ASSERT(ToRegister(instr->result()).is(r0));
3979 3927
3980 __ mov(r0, Operand(instr->arity())); 3928 __ mov(r0, Operand(instr->arity()));
3981 __ mov(r2, Operand(factory()->undefined_value())); 3929 __ mov(r2, Operand(TypeFeedbackInfo::MegamorphicSentinel(isolate())));
3982 ElementsKind kind = instr->hydrogen()->elements_kind(); 3930 ElementsKind kind = instr->hydrogen()->elements_kind();
3983 AllocationSiteOverrideMode override_mode = 3931 AllocationSiteOverrideMode override_mode =
3984 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE) 3932 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
3985 ? DISABLE_ALLOCATION_SITES 3933 ? DISABLE_ALLOCATION_SITES
3986 : DONT_OVERRIDE; 3934 : DONT_OVERRIDE;
3987 3935
3988 if (instr->arity() == 0) { 3936 if (instr->arity() == 0) {
3989 ArrayNoArgumentConstructorStub stub(kind, override_mode); 3937 ArrayNoArgumentConstructorStub stub(kind, override_mode);
3990 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); 3938 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
3991 } else if (instr->arity() == 1) { 3939 } else if (instr->arity() == 1) {
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after
4055 MemOperand operand = MemOperand(object, offset); 4003 MemOperand operand = MemOperand(object, offset);
4056 __ Store(value, operand, representation); 4004 __ Store(value, operand, representation);
4057 return; 4005 return;
4058 } 4006 }
4059 4007
4060 Handle<Map> transition = instr->transition(); 4008 Handle<Map> transition = instr->transition();
4061 SmiCheck check_needed = 4009 SmiCheck check_needed =
4062 instr->hydrogen()->value()->IsHeapObject() 4010 instr->hydrogen()->value()->IsHeapObject()
4063 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; 4011 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4064 4012
4065 if (FLAG_track_heap_object_fields && representation.IsHeapObject()) { 4013 if (representation.IsHeapObject()) {
4066 Register value = ToRegister(instr->value()); 4014 Register value = ToRegister(instr->value());
4067 if (!instr->hydrogen()->value()->type().IsHeapObject()) { 4015 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
4068 __ SmiTst(value); 4016 __ SmiTst(value);
4069 DeoptimizeIf(eq, instr->environment()); 4017 DeoptimizeIf(eq, instr->environment());
4070 4018
4071 // We know that value is a smi now, so we can omit the check below. 4019 // We know that value is a smi now, so we can omit the check below.
4072 check_needed = OMIT_SMI_CHECK; 4020 check_needed = OMIT_SMI_CHECK;
4073 } 4021 }
4074 } else if (representation.IsDouble()) { 4022 } else if (representation.IsDouble()) {
4075 ASSERT(transition.is_null()); 4023 ASSERT(transition.is_null());
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after
4133 } 4081 }
4134 4082
4135 4083
4136 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) { 4084 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
4137 ASSERT(ToRegister(instr->context()).is(cp)); 4085 ASSERT(ToRegister(instr->context()).is(cp));
4138 ASSERT(ToRegister(instr->object()).is(r1)); 4086 ASSERT(ToRegister(instr->object()).is(r1));
4139 ASSERT(ToRegister(instr->value()).is(r0)); 4087 ASSERT(ToRegister(instr->value()).is(r0));
4140 4088
4141 // Name is always in r2. 4089 // Name is always in r2.
4142 __ mov(r2, Operand(instr->name())); 4090 __ mov(r2, Operand(instr->name()));
4143 Handle<Code> ic = StoreIC::initialize_stub(isolate(), 4091 Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
4144 instr->strict_mode_flag());
4145 CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS); 4092 CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
4146 } 4093 }
4147 4094
4148 4095
4149 void LCodeGen::ApplyCheckIf(Condition condition, LBoundsCheck* check) { 4096 void LCodeGen::ApplyCheckIf(Condition condition, LBoundsCheck* check) {
4150 if (FLAG_debug_code && check->hydrogen()->skip_check()) { 4097 if (FLAG_debug_code && check->hydrogen()->skip_check()) {
4151 Label done; 4098 Label done;
4152 __ b(NegateCondition(condition), &done); 4099 __ b(NegateCondition(condition), &done);
4153 __ stop("eliminated bounds check failed"); 4100 __ stop("eliminated bounds check failed");
4154 __ bind(&done); 4101 __ bind(&done);
(...skipping 100 matching lines...) Expand 10 before | Expand all | Expand 10 after
4255 case FLOAT64_ELEMENTS: 4202 case FLOAT64_ELEMENTS:
4256 case EXTERNAL_FLOAT32_ELEMENTS: 4203 case EXTERNAL_FLOAT32_ELEMENTS:
4257 case EXTERNAL_FLOAT64_ELEMENTS: 4204 case EXTERNAL_FLOAT64_ELEMENTS:
4258 case FAST_DOUBLE_ELEMENTS: 4205 case FAST_DOUBLE_ELEMENTS:
4259 case FAST_ELEMENTS: 4206 case FAST_ELEMENTS:
4260 case FAST_SMI_ELEMENTS: 4207 case FAST_SMI_ELEMENTS:
4261 case FAST_HOLEY_DOUBLE_ELEMENTS: 4208 case FAST_HOLEY_DOUBLE_ELEMENTS:
4262 case FAST_HOLEY_ELEMENTS: 4209 case FAST_HOLEY_ELEMENTS:
4263 case FAST_HOLEY_SMI_ELEMENTS: 4210 case FAST_HOLEY_SMI_ELEMENTS:
4264 case DICTIONARY_ELEMENTS: 4211 case DICTIONARY_ELEMENTS:
4265 case NON_STRICT_ARGUMENTS_ELEMENTS: 4212 case SLOPPY_ARGUMENTS_ELEMENTS:
4266 UNREACHABLE(); 4213 UNREACHABLE();
4267 break; 4214 break;
4268 } 4215 }
4269 } 4216 }
4270 } 4217 }
4271 4218
4272 4219
4273 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { 4220 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4274 DwVfpRegister value = ToDoubleRegister(instr->value()); 4221 DwVfpRegister value = ToDoubleRegister(instr->value());
4275 Register elements = ToRegister(instr->elements()); 4222 Register elements = ToRegister(instr->elements());
(...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after
4371 } 4318 }
4372 } 4319 }
4373 4320
4374 4321
4375 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) { 4322 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
4376 ASSERT(ToRegister(instr->context()).is(cp)); 4323 ASSERT(ToRegister(instr->context()).is(cp));
4377 ASSERT(ToRegister(instr->object()).is(r2)); 4324 ASSERT(ToRegister(instr->object()).is(r2));
4378 ASSERT(ToRegister(instr->key()).is(r1)); 4325 ASSERT(ToRegister(instr->key()).is(r1));
4379 ASSERT(ToRegister(instr->value()).is(r0)); 4326 ASSERT(ToRegister(instr->value()).is(r0));
4380 4327
4381 Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode) 4328 Handle<Code> ic = instr->strict_mode() == STRICT
4382 ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict() 4329 ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
4383 : isolate()->builtins()->KeyedStoreIC_Initialize(); 4330 : isolate()->builtins()->KeyedStoreIC_Initialize();
4384 CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS); 4331 CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
4385 } 4332 }
4386 4333
4387 4334
4388 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { 4335 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4389 Register object_reg = ToRegister(instr->object()); 4336 Register object_reg = ToRegister(instr->object());
4390 Register scratch = scratch0(); 4337 Register scratch = scratch0();
4391 4338
(...skipping 165 matching lines...) Expand 10 before | Expand all | Expand 10 after
4557 Register scratch = scratch0(); 4504 Register scratch = scratch0();
4558 __ ldr(scratch, ToMemOperand(input)); 4505 __ ldr(scratch, ToMemOperand(input));
4559 __ vmov(single_scratch, scratch); 4506 __ vmov(single_scratch, scratch);
4560 } else { 4507 } else {
4561 __ vmov(single_scratch, ToRegister(input)); 4508 __ vmov(single_scratch, ToRegister(input));
4562 } 4509 }
4563 __ vcvt_f64_s32(ToDoubleRegister(output), single_scratch); 4510 __ vcvt_f64_s32(ToDoubleRegister(output), single_scratch);
4564 } 4511 }
4565 4512
4566 4513
4567 void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) {
4568 LOperand* input = instr->value();
4569 LOperand* output = instr->result();
4570 ASSERT(output->IsRegister());
4571 if (!instr->hydrogen()->value()->HasRange() ||
4572 !instr->hydrogen()->value()->range()->IsInSmiRange()) {
4573 __ SmiTag(ToRegister(output), ToRegister(input), SetCC);
4574 DeoptimizeIf(vs, instr->environment());
4575 } else {
4576 __ SmiTag(ToRegister(output), ToRegister(input));
4577 }
4578 }
4579
4580
4581 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { 4514 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4582 LOperand* input = instr->value(); 4515 LOperand* input = instr->value();
4583 LOperand* output = instr->result(); 4516 LOperand* output = instr->result();
4584 4517
4585 SwVfpRegister flt_scratch = double_scratch0().low(); 4518 SwVfpRegister flt_scratch = double_scratch0().low();
4586 __ vmov(flt_scratch, ToRegister(input)); 4519 __ vmov(flt_scratch, ToRegister(input));
4587 __ vcvt_f64_u32(ToDoubleRegister(output), flt_scratch); 4520 __ vcvt_f64_u32(ToDoubleRegister(output), flt_scratch);
4588 } 4521 }
4589 4522
4590 4523
4591 void LCodeGen::DoUint32ToSmi(LUint32ToSmi* instr) {
4592 LOperand* input = instr->value();
4593 LOperand* output = instr->result();
4594 if (!instr->hydrogen()->value()->HasRange() ||
4595 !instr->hydrogen()->value()->range()->IsInSmiRange()) {
4596 __ tst(ToRegister(input), Operand(0xc0000000));
4597 DeoptimizeIf(ne, instr->environment());
4598 }
4599 __ SmiTag(ToRegister(output), ToRegister(input));
4600 }
4601
4602
4603 void LCodeGen::DoNumberTagI(LNumberTagI* instr) { 4524 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4604 class DeferredNumberTagI V8_FINAL : public LDeferredCode { 4525 class DeferredNumberTagI V8_FINAL : public LDeferredCode {
4605 public: 4526 public:
4606 DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr) 4527 DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
4607 : LDeferredCode(codegen), instr_(instr) { } 4528 : LDeferredCode(codegen), instr_(instr) { }
4608 virtual void Generate() V8_OVERRIDE { 4529 virtual void Generate() V8_OVERRIDE {
4609 codegen()->DoDeferredNumberTagI(instr_, 4530 codegen()->DoDeferredNumberTagIU(instr_,
4610 instr_->value(), 4531 instr_->value(),
4611 SIGNED_INT32); 4532 instr_->temp1(),
4533 instr_->temp2(),
4534 SIGNED_INT32);
4612 } 4535 }
4613 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } 4536 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4614 private: 4537 private:
4615 LNumberTagI* instr_; 4538 LNumberTagI* instr_;
4616 }; 4539 };
4617 4540
4618 Register src = ToRegister(instr->value()); 4541 Register src = ToRegister(instr->value());
4619 Register dst = ToRegister(instr->result()); 4542 Register dst = ToRegister(instr->result());
4620 4543
4621 DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr); 4544 DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
4622 __ SmiTag(dst, src, SetCC); 4545 __ SmiTag(dst, src, SetCC);
4623 __ b(vs, deferred->entry()); 4546 __ b(vs, deferred->entry());
4624 __ bind(deferred->exit()); 4547 __ bind(deferred->exit());
4625 } 4548 }
4626 4549
4627 4550
4628 void LCodeGen::DoNumberTagU(LNumberTagU* instr) { 4551 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4629 class DeferredNumberTagU V8_FINAL : public LDeferredCode { 4552 class DeferredNumberTagU V8_FINAL : public LDeferredCode {
4630 public: 4553 public:
4631 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr) 4554 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4632 : LDeferredCode(codegen), instr_(instr) { } 4555 : LDeferredCode(codegen), instr_(instr) { }
4633 virtual void Generate() V8_OVERRIDE { 4556 virtual void Generate() V8_OVERRIDE {
4634 codegen()->DoDeferredNumberTagI(instr_, 4557 codegen()->DoDeferredNumberTagIU(instr_,
4635 instr_->value(), 4558 instr_->value(),
4636 UNSIGNED_INT32); 4559 instr_->temp1(),
4560 instr_->temp2(),
4561 UNSIGNED_INT32);
4637 } 4562 }
4638 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } 4563 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4639 private: 4564 private:
4640 LNumberTagU* instr_; 4565 LNumberTagU* instr_;
4641 }; 4566 };
4642 4567
4643 Register input = ToRegister(instr->value()); 4568 Register input = ToRegister(instr->value());
4644 Register result = ToRegister(instr->result()); 4569 Register result = ToRegister(instr->result());
4645 4570
4646 DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr); 4571 DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
4647 __ cmp(input, Operand(Smi::kMaxValue)); 4572 __ cmp(input, Operand(Smi::kMaxValue));
4648 __ b(hi, deferred->entry()); 4573 __ b(hi, deferred->entry());
4649 __ SmiTag(result, input); 4574 __ SmiTag(result, input);
4650 __ bind(deferred->exit()); 4575 __ bind(deferred->exit());
4651 } 4576 }
4652 4577
4653 4578
4654 void LCodeGen::DoDeferredNumberTagI(LInstruction* instr, 4579 void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
4655 LOperand* value, 4580 LOperand* value,
4656 IntegerSignedness signedness) { 4581 LOperand* temp1,
4657 Label slow; 4582 LOperand* temp2,
4583 IntegerSignedness signedness) {
4584 Label done, slow;
4658 Register src = ToRegister(value); 4585 Register src = ToRegister(value);
4659 Register dst = ToRegister(instr->result()); 4586 Register dst = ToRegister(instr->result());
4587 Register tmp1 = scratch0();
4588 Register tmp2 = ToRegister(temp1);
4589 Register tmp3 = ToRegister(temp2);
4660 LowDwVfpRegister dbl_scratch = double_scratch0(); 4590 LowDwVfpRegister dbl_scratch = double_scratch0();
4661 4591
4662 // Preserve the value of all registers.
4663 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4664
4665 Label done;
4666 if (signedness == SIGNED_INT32) { 4592 if (signedness == SIGNED_INT32) {
4667 // There was overflow, so bits 30 and 31 of the original integer 4593 // There was overflow, so bits 30 and 31 of the original integer
4668 // disagree. Try to allocate a heap number in new space and store 4594 // disagree. Try to allocate a heap number in new space and store
4669 // the value in there. If that fails, call the runtime system. 4595 // the value in there. If that fails, call the runtime system.
4670 if (dst.is(src)) { 4596 if (dst.is(src)) {
4671 __ SmiUntag(src, dst); 4597 __ SmiUntag(src, dst);
4672 __ eor(src, src, Operand(0x80000000)); 4598 __ eor(src, src, Operand(0x80000000));
4673 } 4599 }
4674 __ vmov(dbl_scratch.low(), src); 4600 __ vmov(dbl_scratch.low(), src);
4675 __ vcvt_f64_s32(dbl_scratch, dbl_scratch.low()); 4601 __ vcvt_f64_s32(dbl_scratch, dbl_scratch.low());
4676 } else { 4602 } else {
4677 __ vmov(dbl_scratch.low(), src); 4603 __ vmov(dbl_scratch.low(), src);
4678 __ vcvt_f64_u32(dbl_scratch, dbl_scratch.low()); 4604 __ vcvt_f64_u32(dbl_scratch, dbl_scratch.low());
4679 } 4605 }
4680 4606
4681 if (FLAG_inline_new) { 4607 if (FLAG_inline_new) {
4682 __ LoadRoot(scratch0(), Heap::kHeapNumberMapRootIndex); 4608 __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
4683 __ AllocateHeapNumber(r5, r3, r4, scratch0(), &slow, DONT_TAG_RESULT); 4609 __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow, DONT_TAG_RESULT);
4684 __ Move(dst, r5);
4685 __ b(&done); 4610 __ b(&done);
4686 } 4611 }
4687 4612
4688 // Slow case: Call the runtime system to do the number allocation. 4613 // Slow case: Call the runtime system to do the number allocation.
4689 __ bind(&slow); 4614 __ bind(&slow);
4615 {
4616 // TODO(3095996): Put a valid pointer value in the stack slot where the
4617 // result register is stored, as this register is in the pointer map, but
4618 // contains an integer value.
4619 __ mov(dst, Operand::Zero());
4690 4620
4691 // TODO(3095996): Put a valid pointer value in the stack slot where the result 4621 // Preserve the value of all registers.
4692 // register is stored, as this register is in the pointer map, but contains an 4622 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4693 // integer value. 4623
4694 __ mov(ip, Operand::Zero()); 4624 // NumberTagI and NumberTagD use the context from the frame, rather than
4695 __ StoreToSafepointRegisterSlot(ip, dst); 4625 // the environment's HContext or HInlinedContext value.
4696 // NumberTagI and NumberTagD use the context from the frame, rather than 4626 // They only call Runtime::kAllocateHeapNumber.
4697 // the environment's HContext or HInlinedContext value. 4627 // The corresponding HChange instructions are added in a phase that does
4698 // They only call Runtime::kAllocateHeapNumber. 4628 // not have easy access to the local context.
4699 // The corresponding HChange instructions are added in a phase that does 4629 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4700 // not have easy access to the local context. 4630 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4701 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); 4631 RecordSafepointWithRegisters(
4702 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); 4632 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4703 RecordSafepointWithRegisters( 4633 __ sub(r0, r0, Operand(kHeapObjectTag));
4704 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); 4634 __ StoreToSafepointRegisterSlot(r0, dst);
4705 __ Move(dst, r0); 4635 }
4706 __ sub(dst, dst, Operand(kHeapObjectTag));
4707 4636
4708 // Done. Put the value in dbl_scratch into the value of the allocated heap 4637 // Done. Put the value in dbl_scratch into the value of the allocated heap
4709 // number. 4638 // number.
4710 __ bind(&done); 4639 __ bind(&done);
4711 __ vstr(dbl_scratch, dst, HeapNumber::kValueOffset); 4640 __ vstr(dbl_scratch, dst, HeapNumber::kValueOffset);
4712 __ add(dst, dst, Operand(kHeapObjectTag)); 4641 __ add(dst, dst, Operand(kHeapObjectTag));
4713 __ StoreToSafepointRegisterSlot(dst, dst);
4714 } 4642 }
4715 4643
4716 4644
4717 void LCodeGen::DoNumberTagD(LNumberTagD* instr) { 4645 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4718 class DeferredNumberTagD V8_FINAL : public LDeferredCode { 4646 class DeferredNumberTagD V8_FINAL : public LDeferredCode {
4719 public: 4647 public:
4720 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr) 4648 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4721 : LDeferredCode(codegen), instr_(instr) { } 4649 : LDeferredCode(codegen), instr_(instr) { }
4722 virtual void Generate() V8_OVERRIDE { 4650 virtual void Generate() V8_OVERRIDE {
4723 codegen()->DoDeferredNumberTagD(instr_); 4651 codegen()->DoDeferredNumberTagD(instr_);
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
4765 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); 4693 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4766 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); 4694 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4767 RecordSafepointWithRegisters( 4695 RecordSafepointWithRegisters(
4768 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); 4696 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4769 __ sub(r0, r0, Operand(kHeapObjectTag)); 4697 __ sub(r0, r0, Operand(kHeapObjectTag));
4770 __ StoreToSafepointRegisterSlot(r0, reg); 4698 __ StoreToSafepointRegisterSlot(r0, reg);
4771 } 4699 }
4772 4700
4773 4701
4774 void LCodeGen::DoSmiTag(LSmiTag* instr) { 4702 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4775 ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow)); 4703 HChange* hchange = instr->hydrogen();
4776 __ SmiTag(ToRegister(instr->result()), ToRegister(instr->value())); 4704 Register input = ToRegister(instr->value());
4705 Register output = ToRegister(instr->result());
4706 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4707 hchange->value()->CheckFlag(HValue::kUint32)) {
4708 __ tst(input, Operand(0xc0000000));
4709 DeoptimizeIf(ne, instr->environment());
4710 }
4711 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4712 !hchange->value()->CheckFlag(HValue::kUint32)) {
4713 __ SmiTag(output, input, SetCC);
4714 DeoptimizeIf(vs, instr->environment());
4715 } else {
4716 __ SmiTag(output, input);
4717 }
4777 } 4718 }
4778 4719
4779 4720
4780 void LCodeGen::DoSmiUntag(LSmiUntag* instr) { 4721 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4781 Register input = ToRegister(instr->value()); 4722 Register input = ToRegister(instr->value());
4782 Register result = ToRegister(instr->result()); 4723 Register result = ToRegister(instr->result());
4783 if (instr->needs_check()) { 4724 if (instr->needs_check()) {
4784 STATIC_ASSERT(kHeapObjectTag == 1); 4725 STATIC_ASSERT(kHeapObjectTag == 1);
4785 // If the input is a HeapObject, SmiUntag will set the carry flag. 4726 // If the input is a HeapObject, SmiUntag will set the carry flag.
4786 __ SmiUntag(result, input, SetCC); 4727 __ SmiUntag(result, input, SetCC);
(...skipping 429 matching lines...) Expand 10 before | Expand all | Expand 10 after
5216 __ jmp(&done); 5157 __ jmp(&done);
5217 5158
5218 // smi 5159 // smi
5219 __ bind(&is_smi); 5160 __ bind(&is_smi);
5220 __ ClampUint8(result_reg, result_reg); 5161 __ ClampUint8(result_reg, result_reg);
5221 5162
5222 __ bind(&done); 5163 __ bind(&done);
5223 } 5164 }
5224 5165
5225 5166
5167 void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
5168 DwVfpRegister value_reg = ToDoubleRegister(instr->value());
5169 Register result_reg = ToRegister(instr->result());
5170 if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
5171 __ VmovHigh(result_reg, value_reg);
5172 } else {
5173 __ VmovLow(result_reg, value_reg);
5174 }
5175 }
5176
5177
5178 void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
5179 Register hi_reg = ToRegister(instr->hi());
5180 Register lo_reg = ToRegister(instr->lo());
5181 DwVfpRegister result_reg = ToDoubleRegister(instr->result());
5182 __ VmovHigh(result_reg, hi_reg);
5183 __ VmovLow(result_reg, lo_reg);
5184 }
5185
5186
5226 void LCodeGen::DoAllocate(LAllocate* instr) { 5187 void LCodeGen::DoAllocate(LAllocate* instr) {
5227 class DeferredAllocate V8_FINAL : public LDeferredCode { 5188 class DeferredAllocate V8_FINAL : public LDeferredCode {
5228 public: 5189 public:
5229 DeferredAllocate(LCodeGen* codegen, LAllocate* instr) 5190 DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
5230 : LDeferredCode(codegen), instr_(instr) { } 5191 : LDeferredCode(codegen), instr_(instr) { }
5231 virtual void Generate() V8_OVERRIDE { 5192 virtual void Generate() V8_OVERRIDE {
5232 codegen()->DoDeferredAllocate(instr_); 5193 codegen()->DoDeferredAllocate(instr_);
5233 } 5194 }
5234 virtual LInstruction* instr() V8_OVERRIDE { return instr_; } 5195 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
5235 private: 5196 private:
(...skipping 150 matching lines...) Expand 10 before | Expand all | Expand 10 after
5386 __ CopyFields(r0, r1, double_scratch0(), size / kPointerSize); 5347 __ CopyFields(r0, r1, double_scratch0(), size / kPointerSize);
5387 } 5348 }
5388 5349
5389 5350
5390 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) { 5351 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
5391 ASSERT(ToRegister(instr->context()).is(cp)); 5352 ASSERT(ToRegister(instr->context()).is(cp));
5392 // Use the fast case closure allocation code that allocates in new 5353 // Use the fast case closure allocation code that allocates in new
5393 // space for nested functions that don't need literals cloning. 5354 // space for nested functions that don't need literals cloning.
5394 bool pretenure = instr->hydrogen()->pretenure(); 5355 bool pretenure = instr->hydrogen()->pretenure();
5395 if (!pretenure && instr->hydrogen()->has_no_literals()) { 5356 if (!pretenure && instr->hydrogen()->has_no_literals()) {
5396 FastNewClosureStub stub(instr->hydrogen()->language_mode(), 5357 FastNewClosureStub stub(instr->hydrogen()->strict_mode(),
5397 instr->hydrogen()->is_generator()); 5358 instr->hydrogen()->is_generator());
5398 __ mov(r2, Operand(instr->hydrogen()->shared_info())); 5359 __ mov(r2, Operand(instr->hydrogen()->shared_info()));
5399 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); 5360 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
5400 } else { 5361 } else {
5401 __ mov(r2, Operand(instr->hydrogen()->shared_info())); 5362 __ mov(r2, Operand(instr->hydrogen()->shared_info()));
5402 __ mov(r1, Operand(pretenure ? factory()->true_value() 5363 __ mov(r1, Operand(pretenure ? factory()->true_value()
5403 : factory()->false_value())); 5364 : factory()->false_value()));
5404 __ Push(cp, r2, r1); 5365 __ Push(cp, r2, r1);
5405 CallRuntime(Runtime::kNewClosure, 3, instr); 5366 CallRuntime(Runtime::kNewClosure, 3, instr);
5406 } 5367 }
(...skipping 346 matching lines...) Expand 10 before | Expand all | Expand 10 after
5753 __ sub(scratch, result, Operand::PointerOffsetFromSmiKey(index)); 5714 __ sub(scratch, result, Operand::PointerOffsetFromSmiKey(index));
5754 __ ldr(result, FieldMemOperand(scratch, 5715 __ ldr(result, FieldMemOperand(scratch,
5755 FixedArray::kHeaderSize - kPointerSize)); 5716 FixedArray::kHeaderSize - kPointerSize));
5756 __ bind(&done); 5717 __ bind(&done);
5757 } 5718 }
5758 5719
5759 5720
5760 #undef __ 5721 #undef __
5761 5722
5762 } } // namespace v8::internal 5723 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/arm/lithium-codegen-arm.h ('k') | src/arm/macro-assembler-arm.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698