Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(154)

Side by Side Diff: src/mips64/lithium-codegen-mips64.cc

Issue 371923006: Add mips64 port. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Rebase Created 6 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/mips64/lithium-codegen-mips64.h ('k') | src/mips64/lithium-gap-resolver-mips64.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved.7 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Use of this source code is governed by a BSD-style license that can be
3 // modification, are permitted provided that the following conditions are 3 // found in the LICENSE file.
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 4
28 #include "src/v8.h" 5 #include "src/v8.h"
29 6
30 #include "src/code-stubs.h" 7 #include "src/code-stubs.h"
31 #include "src/hydrogen-osr.h" 8 #include "src/hydrogen-osr.h"
32 #include "src/mips/lithium-codegen-mips.h" 9 #include "src/mips64/lithium-codegen-mips64.h"
33 #include "src/mips/lithium-gap-resolver-mips.h" 10 #include "src/mips64/lithium-gap-resolver-mips64.h"
34 #include "src/stub-cache.h" 11 #include "src/stub-cache.h"
35 12
36 namespace v8 { 13 namespace v8 {
37 namespace internal { 14 namespace internal {
38 15
39 16
40 class SafepointGenerator V8_FINAL : public CallWrapper { 17 class SafepointGenerator V8_FINAL : public CallWrapper {
41 public: 18 public:
42 SafepointGenerator(LCodeGen* codegen, 19 SafepointGenerator(LCodeGen* codegen,
43 LPointerMap* pointers, 20 LPointerMap* pointers,
(...skipping 97 matching lines...) Expand 10 before | Expand all | Expand 10 after
141 118
142 // Sloppy mode functions and builtins need to replace the receiver with the 119 // Sloppy mode functions and builtins need to replace the receiver with the
143 // global proxy when called as functions (without an explicit receiver 120 // global proxy when called as functions (without an explicit receiver
144 // object). 121 // object).
145 if (info_->this_has_uses() && 122 if (info_->this_has_uses() &&
146 info_->strict_mode() == SLOPPY && 123 info_->strict_mode() == SLOPPY &&
147 !info_->is_native()) { 124 !info_->is_native()) {
148 Label ok; 125 Label ok;
149 int receiver_offset = info_->scope()->num_parameters() * kPointerSize; 126 int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
150 __ LoadRoot(at, Heap::kUndefinedValueRootIndex); 127 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
151 __ lw(a2, MemOperand(sp, receiver_offset)); 128 __ ld(a2, MemOperand(sp, receiver_offset));
152 __ Branch(&ok, ne, a2, Operand(at)); 129 __ Branch(&ok, ne, a2, Operand(at));
153 130
154 __ lw(a2, GlobalObjectOperand()); 131 __ ld(a2, GlobalObjectOperand());
155 __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalProxyOffset)); 132 __ ld(a2, FieldMemOperand(a2, GlobalObject::kGlobalProxyOffset));
156 133
157 __ sw(a2, MemOperand(sp, receiver_offset)); 134 __ sd(a2, MemOperand(sp, receiver_offset));
158 135
159 __ bind(&ok); 136 __ bind(&ok);
160 } 137 }
161 } 138 }
162 139
163 info()->set_prologue_offset(masm_->pc_offset()); 140 info()->set_prologue_offset(masm_->pc_offset());
164 if (NeedsEagerFrame()) { 141 if (NeedsEagerFrame()) {
165 if (info()->IsStub()) { 142 if (info()->IsStub()) {
166 __ StubPrologue(); 143 __ StubPrologue();
167 } else { 144 } else {
168 __ Prologue(info()->IsCodePreAgingActive()); 145 __ Prologue(info()->IsCodePreAgingActive());
169 } 146 }
170 frame_is_built_ = true; 147 frame_is_built_ = true;
171 info_->AddNoFrameRange(0, masm_->pc_offset()); 148 info_->AddNoFrameRange(0, masm_->pc_offset());
172 } 149 }
173 150
174 // Reserve space for the stack slots needed by the code. 151 // Reserve space for the stack slots needed by the code.
175 int slots = GetStackSlotCount(); 152 int slots = GetStackSlotCount();
176 if (slots > 0) { 153 if (slots > 0) {
177 if (FLAG_debug_code) { 154 if (FLAG_debug_code) {
178 __ Subu(sp, sp, Operand(slots * kPointerSize)); 155 __ Dsubu(sp, sp, Operand(slots * kPointerSize));
179 __ Push(a0, a1); 156 __ Push(a0, a1);
180 __ Addu(a0, sp, Operand(slots * kPointerSize)); 157 __ Daddu(a0, sp, Operand(slots * kPointerSize));
181 __ li(a1, Operand(kSlotsZapValue)); 158 __ li(a1, Operand(kSlotsZapValue));
182 Label loop; 159 Label loop;
183 __ bind(&loop); 160 __ bind(&loop);
184 __ Subu(a0, a0, Operand(kPointerSize)); 161 __ Dsubu(a0, a0, Operand(kPointerSize));
185 __ sw(a1, MemOperand(a0, 2 * kPointerSize)); 162 __ sd(a1, MemOperand(a0, 2 * kPointerSize));
186 __ Branch(&loop, ne, a0, Operand(sp)); 163 __ Branch(&loop, ne, a0, Operand(sp));
187 __ Pop(a0, a1); 164 __ Pop(a0, a1);
188 } else { 165 } else {
189 __ Subu(sp, sp, Operand(slots * kPointerSize)); 166 __ Dsubu(sp, sp, Operand(slots * kPointerSize));
190 } 167 }
191 } 168 }
192 169
193 if (info()->saves_caller_doubles()) { 170 if (info()->saves_caller_doubles()) {
194 SaveCallerDoubles(); 171 SaveCallerDoubles();
195 } 172 }
196 173
197 // Possibly allocate a local context. 174 // Possibly allocate a local context.
198 int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; 175 int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
199 if (heap_slots > 0) { 176 if (heap_slots > 0) {
200 Comment(";;; Allocate local context"); 177 Comment(";;; Allocate local context");
201 bool need_write_barrier = true; 178 bool need_write_barrier = true;
202 // Argument to NewContext is the function, which is in a1. 179 // Argument to NewContext is the function, which is in a1.
203 if (heap_slots <= FastNewContextStub::kMaximumSlots) { 180 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
204 FastNewContextStub stub(isolate(), heap_slots); 181 FastNewContextStub stub(isolate(), heap_slots);
205 __ CallStub(&stub); 182 __ CallStub(&stub);
206 // Result of FastNewContextStub is always in new space. 183 // Result of FastNewContextStub is always in new space.
207 need_write_barrier = false; 184 need_write_barrier = false;
208 } else { 185 } else {
209 __ push(a1); 186 __ push(a1);
210 __ CallRuntime(Runtime::kNewFunctionContext, 1); 187 __ CallRuntime(Runtime::kNewFunctionContext, 1);
211 } 188 }
212 RecordSafepoint(Safepoint::kNoLazyDeopt); 189 RecordSafepoint(Safepoint::kNoLazyDeopt);
213 // Context is returned in both v0. It replaces the context passed to us. 190 // Context is returned in both v0. It replaces the context passed to us.
214 // It's saved in the stack and kept live in cp. 191 // It's saved in the stack and kept live in cp.
215 __ mov(cp, v0); 192 __ mov(cp, v0);
216 __ sw(v0, MemOperand(fp, StandardFrameConstants::kContextOffset)); 193 __ sd(v0, MemOperand(fp, StandardFrameConstants::kContextOffset));
217 // Copy any necessary parameters into the context. 194 // Copy any necessary parameters into the context.
218 int num_parameters = scope()->num_parameters(); 195 int num_parameters = scope()->num_parameters();
219 for (int i = 0; i < num_parameters; i++) { 196 for (int i = 0; i < num_parameters; i++) {
220 Variable* var = scope()->parameter(i); 197 Variable* var = scope()->parameter(i);
221 if (var->IsContextSlot()) { 198 if (var->IsContextSlot()) {
222 int parameter_offset = StandardFrameConstants::kCallerSPOffset + 199 int parameter_offset = StandardFrameConstants::kCallerSPOffset +
223 (num_parameters - 1 - i) * kPointerSize; 200 (num_parameters - 1 - i) * kPointerSize;
224 // Load parameter from stack. 201 // Load parameter from stack.
225 __ lw(a0, MemOperand(fp, parameter_offset)); 202 __ ld(a0, MemOperand(fp, parameter_offset));
226 // Store it in the context. 203 // Store it in the context.
227 MemOperand target = ContextOperand(cp, var->index()); 204 MemOperand target = ContextOperand(cp, var->index());
228 __ sw(a0, target); 205 __ sd(a0, target);
229 // Update the write barrier. This clobbers a3 and a0. 206 // Update the write barrier. This clobbers a3 and a0.
230 if (need_write_barrier) { 207 if (need_write_barrier) {
231 __ RecordWriteContextSlot( 208 __ RecordWriteContextSlot(
232 cp, target.offset(), a0, a3, GetRAState(), kSaveFPRegs); 209 cp, target.offset(), a0, a3, GetRAState(), kSaveFPRegs);
233 } else if (FLAG_debug_code) { 210 } else if (FLAG_debug_code) {
234 Label done; 211 Label done;
235 __ JumpIfInNewSpace(cp, a0, &done); 212 __ JumpIfInNewSpace(cp, a0, &done);
236 __ Abort(kExpectedNewSpaceObject); 213 __ Abort(kExpectedNewSpaceObject);
237 __ bind(&done); 214 __ bind(&done);
238 } 215 }
(...skipping 16 matching lines...) Expand all
255 // Generate the OSR entry prologue at the first unknown OSR value, or if there 232 // Generate the OSR entry prologue at the first unknown OSR value, or if there
256 // are none, at the OSR entrypoint instruction. 233 // are none, at the OSR entrypoint instruction.
257 if (osr_pc_offset_ >= 0) return; 234 if (osr_pc_offset_ >= 0) return;
258 235
259 osr_pc_offset_ = masm()->pc_offset(); 236 osr_pc_offset_ = masm()->pc_offset();
260 237
261 // Adjust the frame size, subsuming the unoptimized frame into the 238 // Adjust the frame size, subsuming the unoptimized frame into the
262 // optimized frame. 239 // optimized frame.
263 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots(); 240 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
264 ASSERT(slots >= 0); 241 ASSERT(slots >= 0);
265 __ Subu(sp, sp, Operand(slots * kPointerSize)); 242 __ Dsubu(sp, sp, Operand(slots * kPointerSize));
266 } 243 }
267 244
268 245
269 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) { 246 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
270 if (instr->IsCall()) { 247 if (instr->IsCall()) {
271 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); 248 EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
272 } 249 }
273 if (!instr->IsLazyBailout() && !instr->IsGap()) { 250 if (!instr->IsLazyBailout() && !instr->IsGap()) {
274 safepoints_.BumpLastLazySafepointIndex(); 251 safepoints_.BumpLastLazySafepointIndex();
275 } 252 }
(...skipping 18 matching lines...) Expand all
294 code->instr()->Mnemonic()); 271 code->instr()->Mnemonic());
295 __ bind(code->entry()); 272 __ bind(code->entry());
296 if (NeedsDeferredFrame()) { 273 if (NeedsDeferredFrame()) {
297 Comment(";;; Build frame"); 274 Comment(";;; Build frame");
298 ASSERT(!frame_is_built_); 275 ASSERT(!frame_is_built_);
299 ASSERT(info()->IsStub()); 276 ASSERT(info()->IsStub());
300 frame_is_built_ = true; 277 frame_is_built_ = true;
301 __ MultiPush(cp.bit() | fp.bit() | ra.bit()); 278 __ MultiPush(cp.bit() | fp.bit() | ra.bit());
302 __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB))); 279 __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
303 __ push(scratch0()); 280 __ push(scratch0());
304 __ Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); 281 __ Daddu(fp, sp,
282 Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
305 Comment(";;; Deferred code"); 283 Comment(";;; Deferred code");
306 } 284 }
307 code->Generate(); 285 code->Generate();
308 if (NeedsDeferredFrame()) { 286 if (NeedsDeferredFrame()) {
309 Comment(";;; Destroy frame"); 287 Comment(";;; Destroy frame");
310 ASSERT(frame_is_built_); 288 ASSERT(frame_is_built_);
311 __ pop(at); 289 __ pop(at);
312 __ MultiPop(cp.bit() | fp.bit() | ra.bit()); 290 __ MultiPop(cp.bit() | fp.bit() | ra.bit());
313 frame_is_built_ = false; 291 frame_is_built_ = false;
314 } 292 }
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
347 __ Branch(&needs_frame); 325 __ Branch(&needs_frame);
348 } else { 326 } else {
349 __ bind(&needs_frame); 327 __ bind(&needs_frame);
350 __ MultiPush(cp.bit() | fp.bit() | ra.bit()); 328 __ MultiPush(cp.bit() | fp.bit() | ra.bit());
351 // This variant of deopt can only be used with stubs. Since we don't 329 // This variant of deopt can only be used with stubs. Since we don't
352 // have a function pointer to install in the stack frame that we're 330 // have a function pointer to install in the stack frame that we're
353 // building, install a special marker there instead. 331 // building, install a special marker there instead.
354 ASSERT(info()->IsStub()); 332 ASSERT(info()->IsStub());
355 __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB))); 333 __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
356 __ push(scratch0()); 334 __ push(scratch0());
357 __ Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); 335 __ Daddu(fp, sp,
336 Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
358 __ Call(t9); 337 __ Call(t9);
359 } 338 }
360 } else { 339 } else {
361 if (info()->saves_caller_doubles()) { 340 if (info()->saves_caller_doubles()) {
362 ASSERT(info()->IsStub()); 341 ASSERT(info()->IsStub());
363 RestoreCallerDoubles(); 342 RestoreCallerDoubles();
364 } 343 }
365 __ Call(t9); 344 __ Call(t9);
366 } 345 }
367 } 346 }
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after
412 ASSERT(constant->HasSmiValue()); 391 ASSERT(constant->HasSmiValue());
413 __ li(scratch, Operand(Smi::FromInt(constant->Integer32Value()))); 392 __ li(scratch, Operand(Smi::FromInt(constant->Integer32Value())));
414 } else if (r.IsDouble()) { 393 } else if (r.IsDouble()) {
415 Abort(kEmitLoadRegisterUnsupportedDoubleImmediate); 394 Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
416 } else { 395 } else {
417 ASSERT(r.IsSmiOrTagged()); 396 ASSERT(r.IsSmiOrTagged());
418 __ li(scratch, literal); 397 __ li(scratch, literal);
419 } 398 }
420 return scratch; 399 return scratch;
421 } else if (op->IsStackSlot()) { 400 } else if (op->IsStackSlot()) {
422 __ lw(scratch, ToMemOperand(op)); 401 __ ld(scratch, ToMemOperand(op));
423 return scratch; 402 return scratch;
424 } 403 }
425 UNREACHABLE(); 404 UNREACHABLE();
426 return scratch; 405 return scratch;
427 } 406 }
428 407
429 408
430 DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const { 409 DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
431 ASSERT(op->IsDoubleRegister()); 410 ASSERT(op->IsDoubleRegister());
432 return ToDoubleRegister(op->index()); 411 return ToDoubleRegister(op->index());
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after
475 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32(); 454 return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
476 } 455 }
477 456
478 457
479 bool LCodeGen::IsSmi(LConstantOperand* op) const { 458 bool LCodeGen::IsSmi(LConstantOperand* op) const {
480 return chunk_->LookupLiteralRepresentation(op).IsSmi(); 459 return chunk_->LookupLiteralRepresentation(op).IsSmi();
481 } 460 }
482 461
483 462
484 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const { 463 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
485 return ToRepresentation(op, Representation::Integer32()); 464 // return ToRepresentation(op, Representation::Integer32());
465 HConstant* constant = chunk_->LookupConstant(op);
466 return constant->Integer32Value();
486 } 467 }
487 468
488 469
489 int32_t LCodeGen::ToRepresentation(LConstantOperand* op, 470 int32_t LCodeGen::ToRepresentation_donotuse(LConstantOperand* op,
490 const Representation& r) const { 471 const Representation& r) const {
491 HConstant* constant = chunk_->LookupConstant(op); 472 HConstant* constant = chunk_->LookupConstant(op);
492 int32_t value = constant->Integer32Value(); 473 int32_t value = constant->Integer32Value();
493 if (r.IsInteger32()) return value; 474 if (r.IsInteger32()) return value;
494 ASSERT(r.IsSmiOrTagged()); 475 ASSERT(r.IsSmiOrTagged());
495 return reinterpret_cast<int32_t>(Smi::FromInt(value)); 476 return reinterpret_cast<int64_t>(Smi::FromInt(value));
496 } 477 }
497 478
498 479
499 Smi* LCodeGen::ToSmi(LConstantOperand* op) const { 480 Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
500 HConstant* constant = chunk_->LookupConstant(op); 481 HConstant* constant = chunk_->LookupConstant(op);
501 return Smi::FromInt(constant->Integer32Value()); 482 return Smi::FromInt(constant->Integer32Value());
502 } 483 }
503 484
504 485
505 double LCodeGen::ToDouble(LConstantOperand* op) const { 486 double LCodeGen::ToDouble(LConstantOperand* op) const {
(...skipping 16 matching lines...) Expand all
522 return Operand(constant->Integer32Value()); 503 return Operand(constant->Integer32Value());
523 } else if (r.IsDouble()) { 504 } else if (r.IsDouble()) {
524 Abort(kToOperandUnsupportedDoubleImmediate); 505 Abort(kToOperandUnsupportedDoubleImmediate);
525 } 506 }
526 ASSERT(r.IsTagged()); 507 ASSERT(r.IsTagged());
527 return Operand(constant->handle(isolate())); 508 return Operand(constant->handle(isolate()));
528 } else if (op->IsRegister()) { 509 } else if (op->IsRegister()) {
529 return Operand(ToRegister(op)); 510 return Operand(ToRegister(op));
530 } else if (op->IsDoubleRegister()) { 511 } else if (op->IsDoubleRegister()) {
531 Abort(kToOperandIsDoubleRegisterUnimplemented); 512 Abort(kToOperandIsDoubleRegisterUnimplemented);
532 return Operand(0); 513 return Operand((int64_t)0);
533 } 514 }
534 // Stack slots not implemented, use ToMemOperand instead. 515 // Stack slots not implemented, use ToMemOperand instead.
535 UNREACHABLE(); 516 UNREACHABLE();
536 return Operand(0); 517 return Operand((int64_t)0);
537 } 518 }
538 519
539 520
540 static int ArgumentsOffsetWithoutFrame(int index) { 521 static int ArgumentsOffsetWithoutFrame(int index) {
541 ASSERT(index < 0); 522 ASSERT(index < 0);
542 return -(index + 1) * kPointerSize; 523 return -(index + 1) * kPointerSize;
543 } 524 }
544 525
545 526
546 MemOperand LCodeGen::ToMemOperand(LOperand* op) const { 527 MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
547 ASSERT(!op->IsRegister()); 528 ASSERT(!op->IsRegister());
548 ASSERT(!op->IsDoubleRegister()); 529 ASSERT(!op->IsDoubleRegister());
549 ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot()); 530 ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
550 if (NeedsEagerFrame()) { 531 if (NeedsEagerFrame()) {
551 return MemOperand(fp, StackSlotOffset(op->index())); 532 return MemOperand(fp, StackSlotOffset(op->index()));
552 } else { 533 } else {
553 // Retrieve parameter without eager stack-frame relative to the 534 // Retrieve parameter without eager stack-frame relative to the
554 // stack-pointer. 535 // stack-pointer.
555 return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index())); 536 return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index()));
556 } 537 }
557 } 538 }
558 539
559 540
560 MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const { 541 MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
561 ASSERT(op->IsDoubleStackSlot()); 542 ASSERT(op->IsDoubleStackSlot());
562 if (NeedsEagerFrame()) { 543 if (NeedsEagerFrame()) {
563 return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize); 544 // return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
545 return MemOperand(fp, StackSlotOffset(op->index()) + kIntSize);
564 } else { 546 } else {
565 // Retrieve parameter without eager stack-frame relative to the 547 // Retrieve parameter without eager stack-frame relative to the
566 // stack-pointer. 548 // stack-pointer.
549 // return MemOperand(
550 // sp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
567 return MemOperand( 551 return MemOperand(
568 sp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize); 552 sp, ArgumentsOffsetWithoutFrame(op->index()) + kIntSize);
569 } 553 }
570 } 554 }
571 555
572 556
573 void LCodeGen::WriteTranslation(LEnvironment* environment, 557 void LCodeGen::WriteTranslation(LEnvironment* environment,
574 Translation* translation) { 558 Translation* translation) {
575 if (environment == NULL) return; 559 if (environment == NULL) return;
576 560
577 // The translation includes one command per value in the environment. 561 // The translation includes one command per value in the environment.
578 int translation_size = environment->translation_size(); 562 int translation_size = environment->translation_size();
(...skipping 141 matching lines...) Expand 10 before | Expand all | Expand 10 after
720 __ CallRuntime(function, num_arguments, save_doubles); 704 __ CallRuntime(function, num_arguments, save_doubles);
721 705
722 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); 706 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
723 } 707 }
724 708
725 709
726 void LCodeGen::LoadContextFromDeferred(LOperand* context) { 710 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
727 if (context->IsRegister()) { 711 if (context->IsRegister()) {
728 __ Move(cp, ToRegister(context)); 712 __ Move(cp, ToRegister(context));
729 } else if (context->IsStackSlot()) { 713 } else if (context->IsStackSlot()) {
730 __ lw(cp, ToMemOperand(context)); 714 __ ld(cp, ToMemOperand(context));
731 } else if (context->IsConstantOperand()) { 715 } else if (context->IsConstantOperand()) {
732 HConstant* constant = 716 HConstant* constant =
733 chunk_->LookupConstant(LConstantOperand::cast(context)); 717 chunk_->LookupConstant(LConstantOperand::cast(context));
734 __ li(cp, Handle<Object>::cast(constant->handle(isolate()))); 718 __ li(cp, Handle<Object>::cast(constant->handle(isolate())));
735 } else { 719 } else {
736 UNREACHABLE(); 720 UNREACHABLE();
737 } 721 }
738 } 722 }
739 723
740 724
(...skipping 348 matching lines...) Expand 10 before | Expand all | Expand 10 after
1089 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to 1073 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
1090 // indicate that positive dividends are heavily favored, so the branching 1074 // indicate that positive dividends are heavily favored, so the branching
1091 // version performs better. 1075 // version performs better.
1092 HMod* hmod = instr->hydrogen(); 1076 HMod* hmod = instr->hydrogen();
1093 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); 1077 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1094 Label dividend_is_not_negative, done; 1078 Label dividend_is_not_negative, done;
1095 1079
1096 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) { 1080 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
1097 __ Branch(&dividend_is_not_negative, ge, dividend, Operand(zero_reg)); 1081 __ Branch(&dividend_is_not_negative, ge, dividend, Operand(zero_reg));
1098 // Note: The code below even works when right contains kMinInt. 1082 // Note: The code below even works when right contains kMinInt.
1099 __ subu(dividend, zero_reg, dividend); 1083 __ dsubu(dividend, zero_reg, dividend);
1100 __ And(dividend, dividend, Operand(mask)); 1084 __ And(dividend, dividend, Operand(mask));
1101 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { 1085 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1102 DeoptimizeIf(eq, instr->environment(), dividend, Operand(zero_reg)); 1086 DeoptimizeIf(eq, instr->environment(), dividend, Operand(zero_reg));
1103 } 1087 }
1104 __ Branch(USE_DELAY_SLOT, &done); 1088 __ Branch(USE_DELAY_SLOT, &done);
1105 __ subu(dividend, zero_reg, dividend); 1089 __ dsubu(dividend, zero_reg, dividend);
1106 } 1090 }
1107 1091
1108 __ bind(&dividend_is_not_negative); 1092 __ bind(&dividend_is_not_negative);
1109 __ And(dividend, dividend, Operand(mask)); 1093 __ And(dividend, dividend, Operand(mask));
1110 __ bind(&done); 1094 __ bind(&done);
1111 } 1095 }
1112 1096
1113 1097
1114 void LCodeGen::DoModByConstI(LModByConstI* instr) { 1098 void LCodeGen::DoModByConstI(LModByConstI* instr) {
1115 Register dividend = ToRegister(instr->dividend()); 1099 Register dividend = ToRegister(instr->dividend());
1116 int32_t divisor = instr->divisor(); 1100 int32_t divisor = instr->divisor();
1117 Register result = ToRegister(instr->result()); 1101 Register result = ToRegister(instr->result());
1118 ASSERT(!dividend.is(result)); 1102 ASSERT(!dividend.is(result));
1119 1103
1120 if (divisor == 0) { 1104 if (divisor == 0) {
1121 DeoptimizeIf(al, instr->environment()); 1105 DeoptimizeIf(al, instr->environment());
1122 return; 1106 return;
1123 } 1107 }
1124 1108
1125 __ TruncatingDiv(result, dividend, Abs(divisor)); 1109 __ TruncatingDiv(result, dividend, Abs(divisor));
1126 __ Mul(result, result, Operand(Abs(divisor))); 1110 __ Dmul(result, result, Operand(Abs(divisor)));
1127 __ Subu(result, dividend, Operand(result)); 1111 __ Dsubu(result, dividend, Operand(result));
1128 1112
1129 // Check for negative zero. 1113 // Check for negative zero.
1130 HMod* hmod = instr->hydrogen(); 1114 HMod* hmod = instr->hydrogen();
1131 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { 1115 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1132 Label remainder_not_zero; 1116 Label remainder_not_zero;
1133 __ Branch(&remainder_not_zero, ne, result, Operand(zero_reg)); 1117 __ Branch(&remainder_not_zero, ne, result, Operand(zero_reg));
1134 DeoptimizeIf(lt, instr->environment(), dividend, Operand(zero_reg)); 1118 DeoptimizeIf(lt, instr->environment(), dividend, Operand(zero_reg));
1135 __ bind(&remainder_not_zero); 1119 __ bind(&remainder_not_zero);
1136 } 1120 }
1137 } 1121 }
1138 1122
1139 1123
1140 void LCodeGen::DoModI(LModI* instr) { 1124 void LCodeGen::DoModI(LModI* instr) {
1141 HMod* hmod = instr->hydrogen(); 1125 HMod* hmod = instr->hydrogen();
1142 const Register left_reg = ToRegister(instr->left()); 1126 const Register left_reg = ToRegister(instr->left());
1143 const Register right_reg = ToRegister(instr->right()); 1127 const Register right_reg = ToRegister(instr->right());
1144 const Register result_reg = ToRegister(instr->result()); 1128 const Register result_reg = ToRegister(instr->result());
1145 1129
1146 // div runs in the background while we check for special cases. 1130 // div runs in the background while we check for special cases.
1147 __ div(left_reg, right_reg); 1131 __ ddiv(left_reg, right_reg);
1148 1132
1149 Label done; 1133 Label done;
1150 // Check for x % 0, we have to deopt in this case because we can't return a 1134 // Check for x % 0, we have to deopt in this case because we can't return a
1151 // NaN. 1135 // NaN.
1152 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { 1136 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
1153 DeoptimizeIf(eq, instr->environment(), right_reg, Operand(zero_reg)); 1137 DeoptimizeIf(eq, instr->environment(), right_reg, Operand(zero_reg));
1154 } 1138 }
1155 1139
1156 // Check for kMinInt % -1, div will return kMinInt, which is not what we 1140 // Check for kMinInt % -1, div will return kMinInt, which is not what we
1157 // want. We have to deopt if we care about -0, because we can't return that. 1141 // want. We have to deopt if we care about -0, because we can't return that.
1158 if (hmod->CheckFlag(HValue::kCanOverflow)) { 1142 if (hmod->CheckFlag(HValue::kCanOverflow)) {
1159 Label no_overflow_possible; 1143 Label no_overflow_possible;
1160 __ Branch(&no_overflow_possible, ne, left_reg, Operand(kMinInt)); 1144 __ Branch(&no_overflow_possible, ne, left_reg, Operand(kMinInt));
1161 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { 1145 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1162 DeoptimizeIf(eq, instr->environment(), right_reg, Operand(-1)); 1146 DeoptimizeIf(eq, instr->environment(), right_reg, Operand(-1));
1163 } else { 1147 } else {
1164 __ Branch(&no_overflow_possible, ne, right_reg, Operand(-1)); 1148 __ Branch(&no_overflow_possible, ne, right_reg, Operand(-1));
1165 __ Branch(USE_DELAY_SLOT, &done); 1149 __ Branch(USE_DELAY_SLOT, &done);
1166 __ mov(result_reg, zero_reg); 1150 __ mov(result_reg, zero_reg);
1167 } 1151 }
1168 __ bind(&no_overflow_possible); 1152 __ bind(&no_overflow_possible);
1169 } 1153 }
1170 1154
1171 // If we care about -0, test if the dividend is <0 and the result is 0. 1155 // If we care about -0, test if the dividend is <0 and the result is 0.
1172 __ Branch(USE_DELAY_SLOT, &done, ge, left_reg, Operand(zero_reg)); 1156 __ Branch(USE_DELAY_SLOT, &done, ge, left_reg, Operand(zero_reg));
1173 __ mfhi(result_reg); 1157 __ mfhi(result_reg);
1158
1174 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { 1159 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1175 DeoptimizeIf(eq, instr->environment(), result_reg, Operand(zero_reg)); 1160 DeoptimizeIf(eq, instr->environment(), result_reg, Operand(zero_reg));
1176 } 1161 }
1177 __ bind(&done); 1162 __ bind(&done);
1178 } 1163 }
1179 1164
1180 1165
1181 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { 1166 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
1182 Register dividend = ToRegister(instr->dividend()); 1167 Register dividend = ToRegister(instr->dividend());
1183 int32_t divisor = instr->divisor(); 1168 int32_t divisor = instr->divisor();
(...skipping 12 matching lines...) Expand all
1196 } 1181 }
1197 // Deoptimize if remainder will not be 0. 1182 // Deoptimize if remainder will not be 0.
1198 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && 1183 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
1199 divisor != 1 && divisor != -1) { 1184 divisor != 1 && divisor != -1) {
1200 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); 1185 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1201 __ And(at, dividend, Operand(mask)); 1186 __ And(at, dividend, Operand(mask));
1202 DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg)); 1187 DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
1203 } 1188 }
1204 1189
1205 if (divisor == -1) { // Nice shortcut, not needed for correctness. 1190 if (divisor == -1) { // Nice shortcut, not needed for correctness.
1206 __ Subu(result, zero_reg, dividend); 1191 __ Dsubu(result, zero_reg, dividend);
1207 return; 1192 return;
1208 } 1193 }
1209 uint16_t shift = WhichPowerOf2Abs(divisor); 1194 uint16_t shift = WhichPowerOf2Abs(divisor);
1210 if (shift == 0) { 1195 if (shift == 0) {
1211 __ Move(result, dividend); 1196 __ Move(result, dividend);
1212 } else if (shift == 1) { 1197 } else if (shift == 1) {
1213 __ srl(result, dividend, 31); 1198 __ dsrl32(result, dividend, 31);
1214 __ Addu(result, dividend, Operand(result)); 1199 __ Daddu(result, dividend, Operand(result));
1215 } else { 1200 } else {
1216 __ sra(result, dividend, 31); 1201 __ dsra32(result, dividend, 31);
1217 __ srl(result, result, 32 - shift); 1202 __ dsrl32(result, result, 32 - shift);
1218 __ Addu(result, dividend, Operand(result)); 1203 __ Daddu(result, dividend, Operand(result));
1219 } 1204 }
1220 if (shift > 0) __ sra(result, result, shift); 1205 if (shift > 0) __ dsra(result, result, shift);
1221 if (divisor < 0) __ Subu(result, zero_reg, result); 1206 if (divisor < 0) __ Dsubu(result, zero_reg, result);
1222 } 1207 }
1223 1208
1224 1209
1225 void LCodeGen::DoDivByConstI(LDivByConstI* instr) { 1210 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
1226 Register dividend = ToRegister(instr->dividend()); 1211 Register dividend = ToRegister(instr->dividend());
1227 int32_t divisor = instr->divisor(); 1212 int32_t divisor = instr->divisor();
1228 Register result = ToRegister(instr->result()); 1213 Register result = ToRegister(instr->result());
1229 ASSERT(!dividend.is(result)); 1214 ASSERT(!dividend.is(result));
1230 1215
1231 if (divisor == 0) { 1216 if (divisor == 0) {
1232 DeoptimizeIf(al, instr->environment()); 1217 DeoptimizeIf(al, instr->environment());
1233 return; 1218 return;
1234 } 1219 }
1235 1220
1236 // Check for (0 / -x) that will produce negative zero. 1221 // Check for (0 / -x) that will produce negative zero.
1237 HDiv* hdiv = instr->hydrogen(); 1222 HDiv* hdiv = instr->hydrogen();
1238 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { 1223 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1239 DeoptimizeIf(eq, instr->environment(), dividend, Operand(zero_reg)); 1224 DeoptimizeIf(eq, instr->environment(), dividend, Operand(zero_reg));
1240 } 1225 }
1241 1226
1242 __ TruncatingDiv(result, dividend, Abs(divisor)); 1227 __ TruncatingDiv(result, dividend, Abs(divisor));
1243 if (divisor < 0) __ Subu(result, zero_reg, result); 1228 if (divisor < 0) __ Subu(result, zero_reg, result);
1244 1229
1245 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { 1230 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1246 __ Mul(scratch0(), result, Operand(divisor)); 1231 __ Dmul(scratch0(), result, Operand(divisor));
1247 __ Subu(scratch0(), scratch0(), dividend); 1232 __ Dsubu(scratch0(), scratch0(), dividend);
1248 DeoptimizeIf(ne, instr->environment(), scratch0(), Operand(zero_reg)); 1233 DeoptimizeIf(ne, instr->environment(), scratch0(), Operand(zero_reg));
1249 } 1234 }
1250 } 1235 }
1251 1236
1252 1237
1253 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. 1238 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
1254 void LCodeGen::DoDivI(LDivI* instr) { 1239 void LCodeGen::DoDivI(LDivI* instr) {
1255 HBinaryOperation* hdiv = instr->hydrogen(); 1240 HBinaryOperation* hdiv = instr->hydrogen();
1256 Register dividend = ToRegister(instr->dividend()); 1241 Register dividend = ToRegister(instr->dividend());
1257 Register divisor = ToRegister(instr->divisor()); 1242 Register divisor = ToRegister(instr->divisor());
1258 const Register result = ToRegister(instr->result()); 1243 const Register result = ToRegister(instr->result());
1259 1244
1260 // On MIPS div is asynchronous - it will run in the background while we 1245 // On MIPS div is asynchronous - it will run in the background while we
1261 // check for special cases. 1246 // check for special cases.
1262 __ div(dividend, divisor); 1247 __ ddiv(dividend, divisor);
1263 1248
1264 // Check for x / 0. 1249 // Check for x / 0.
1265 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { 1250 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1266 DeoptimizeIf(eq, instr->environment(), divisor, Operand(zero_reg)); 1251 DeoptimizeIf(eq, instr->environment(), divisor, Operand(zero_reg));
1267 } 1252 }
1268 1253
1269 // Check for (0 / -x) that will produce negative zero. 1254 // Check for (0 / -x) that will produce negative zero.
1270 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { 1255 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1271 Label left_not_zero; 1256 Label left_not_zero;
1272 __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg)); 1257 __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
(...skipping 21 matching lines...) Expand all
1294 1279
1295 1280
1296 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) { 1281 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
1297 DoubleRegister addend = ToDoubleRegister(instr->addend()); 1282 DoubleRegister addend = ToDoubleRegister(instr->addend());
1298 DoubleRegister multiplier = ToDoubleRegister(instr->multiplier()); 1283 DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
1299 DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand()); 1284 DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1300 1285
1301 // This is computed in-place. 1286 // This is computed in-place.
1302 ASSERT(addend.is(ToDoubleRegister(instr->result()))); 1287 ASSERT(addend.is(ToDoubleRegister(instr->result())));
1303 1288
1304 __ madd_d(addend, addend, multiplier, multiplicand); 1289 __ Madd_d(addend, addend, multiplier, multiplicand, double_scratch0());
1305 } 1290 }
1306 1291
1307 1292
1308 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) { 1293 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
1309 Register dividend = ToRegister(instr->dividend()); 1294 Register dividend = ToRegister(instr->dividend());
1310 Register result = ToRegister(instr->result()); 1295 Register result = ToRegister(instr->result());
1311 int32_t divisor = instr->divisor(); 1296 int32_t divisor = instr->divisor();
1312 Register scratch = result.is(dividend) ? scratch0() : dividend; 1297 Register scratch = result.is(dividend) ? scratch0() : dividend;
1313 ASSERT(!result.is(dividend) || !scratch.is(dividend)); 1298 ASSERT(!result.is(dividend) || !scratch.is(dividend));
1314 1299
1315 // If the divisor is 1, return the dividend. 1300 // If the divisor is 1, return the dividend.
1316 if (divisor == 1) { 1301 if (divisor == 1) {
1317 __ Move(result, dividend); 1302 __ Move(result, dividend);
1318 return; 1303 return;
1319 } 1304 }
1320 1305
1321 // If the divisor is positive, things are easy: There can be no deopts and we 1306 // If the divisor is positive, things are easy: There can be no deopts and we
1322 // can simply do an arithmetic right shift. 1307 // can simply do an arithmetic right shift.
1323 uint16_t shift = WhichPowerOf2Abs(divisor); 1308 uint16_t shift = WhichPowerOf2Abs(divisor);
1324 if (divisor > 1) { 1309 if (divisor > 1) {
1325 __ sra(result, dividend, shift); 1310 __ dsra(result, dividend, shift);
1326 return; 1311 return;
1327 } 1312 }
1328 1313
1329 // If the divisor is negative, we have to negate and handle edge cases. 1314 // If the divisor is negative, we have to negate and handle edge cases.
1330 1315 // Dividend can be the same register as result so save the value of it
1331 // dividend can be the same register as result so save the value of it
1332 // for checking overflow. 1316 // for checking overflow.
1333 __ Move(scratch, dividend); 1317 __ Move(scratch, dividend);
1334 1318
1335 __ Subu(result, zero_reg, dividend); 1319 __ Dsubu(result, zero_reg, dividend);
1336 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 1320 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1337 DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg)); 1321 DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
1338 } 1322 }
1339 1323
1324 __ Xor(scratch, scratch, result);
1340 // Dividing by -1 is basically negation, unless we overflow. 1325 // Dividing by -1 is basically negation, unless we overflow.
1341 __ Xor(scratch, scratch, result);
1342 if (divisor == -1) { 1326 if (divisor == -1) {
1343 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { 1327 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1344 DeoptimizeIf(ge, instr->environment(), scratch, Operand(zero_reg)); 1328 DeoptimizeIf(gt, instr->environment(), result, Operand(kMaxInt));
1345 } 1329 }
1346 return; 1330 return;
1347 } 1331 }
1348 1332
1349 // If the negation could not overflow, simply shifting is OK. 1333 // If the negation could not overflow, simply shifting is OK.
1350 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { 1334 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1351 __ sra(result, result, shift); 1335 __ dsra(result, result, shift);
1352 return; 1336 return;
1353 } 1337 }
1354 1338
1355 Label no_overflow, done; 1339 Label no_overflow, done;
1356 __ Branch(&no_overflow, lt, scratch, Operand(zero_reg)); 1340 __ Branch(&no_overflow, lt, scratch, Operand(zero_reg));
1357 __ li(result, Operand(kMinInt / divisor)); 1341 __ li(result, Operand(kMinInt / divisor), CONSTANT_SIZE);
1358 __ Branch(&done); 1342 __ Branch(&done);
1359 __ bind(&no_overflow); 1343 __ bind(&no_overflow);
1360 __ sra(result, result, shift); 1344 __ dsra(result, result, shift);
1361 __ bind(&done); 1345 __ bind(&done);
1362 } 1346 }
1363 1347
1364 1348
1365 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { 1349 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
1366 Register dividend = ToRegister(instr->dividend()); 1350 Register dividend = ToRegister(instr->dividend());
1367 int32_t divisor = instr->divisor(); 1351 int32_t divisor = instr->divisor();
1368 Register result = ToRegister(instr->result()); 1352 Register result = ToRegister(instr->result());
1369 ASSERT(!dividend.is(result)); 1353 ASSERT(!dividend.is(result));
1370 1354
1371 if (divisor == 0) { 1355 if (divisor == 0) {
1372 DeoptimizeIf(al, instr->environment()); 1356 DeoptimizeIf(al, instr->environment());
1373 return; 1357 return;
1374 } 1358 }
1375 1359
1376 // Check for (0 / -x) that will produce negative zero. 1360 // Check for (0 / -x) that will produce negative zero.
1377 HMathFloorOfDiv* hdiv = instr->hydrogen(); 1361 HMathFloorOfDiv* hdiv = instr->hydrogen();
1378 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { 1362 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1379 DeoptimizeIf(eq, instr->environment(), dividend, Operand(zero_reg)); 1363 DeoptimizeIf(eq, instr->environment(), dividend, Operand(zero_reg));
1380 } 1364 }
1381 1365
1382 // Easy case: We need no dynamic check for the dividend and the flooring 1366 // Easy case: We need no dynamic check for the dividend and the flooring
1383 // division is the same as the truncating division. 1367 // division is the same as the truncating division.
1384 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) || 1368 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
1385 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) { 1369 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
1386 __ TruncatingDiv(result, dividend, Abs(divisor)); 1370 __ TruncatingDiv(result, dividend, Abs(divisor));
1387 if (divisor < 0) __ Subu(result, zero_reg, result); 1371 if (divisor < 0) __ Dsubu(result, zero_reg, result);
1388 return; 1372 return;
1389 } 1373 }
1390 1374
1391 // In the general case we may need to adjust before and after the truncating 1375 // In the general case we may need to adjust before and after the truncating
1392 // division to get a flooring division. 1376 // division to get a flooring division.
1393 Register temp = ToRegister(instr->temp()); 1377 Register temp = ToRegister(instr->temp());
1394 ASSERT(!temp.is(dividend) && !temp.is(result)); 1378 ASSERT(!temp.is(dividend) && !temp.is(result));
1395 Label needs_adjustment, done; 1379 Label needs_adjustment, done;
1396 __ Branch(&needs_adjustment, divisor > 0 ? lt : gt, 1380 __ Branch(&needs_adjustment, divisor > 0 ? lt : gt,
1397 dividend, Operand(zero_reg)); 1381 dividend, Operand(zero_reg));
1398 __ TruncatingDiv(result, dividend, Abs(divisor)); 1382 __ TruncatingDiv(result, dividend, Abs(divisor));
1399 if (divisor < 0) __ Subu(result, zero_reg, result); 1383 if (divisor < 0) __ Dsubu(result, zero_reg, result);
1400 __ jmp(&done); 1384 __ jmp(&done);
1401 __ bind(&needs_adjustment); 1385 __ bind(&needs_adjustment);
1402 __ Addu(temp, dividend, Operand(divisor > 0 ? 1 : -1)); 1386 __ Daddu(temp, dividend, Operand(divisor > 0 ? 1 : -1));
1403 __ TruncatingDiv(result, temp, Abs(divisor)); 1387 __ TruncatingDiv(result, temp, Abs(divisor));
1404 if (divisor < 0) __ Subu(result, zero_reg, result); 1388 if (divisor < 0) __ Dsubu(result, zero_reg, result);
1405 __ Subu(result, result, Operand(1)); 1389 __ Dsubu(result, result, Operand(1));
1406 __ bind(&done); 1390 __ bind(&done);
1407 } 1391 }
1408 1392
1409 1393
1410 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI. 1394 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
1411 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) { 1395 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
1412 HBinaryOperation* hdiv = instr->hydrogen(); 1396 HBinaryOperation* hdiv = instr->hydrogen();
1413 Register dividend = ToRegister(instr->dividend()); 1397 Register dividend = ToRegister(instr->dividend());
1414 Register divisor = ToRegister(instr->divisor()); 1398 Register divisor = ToRegister(instr->divisor());
1415 const Register result = ToRegister(instr->result()); 1399 const Register result = ToRegister(instr->result());
1416 1400
1417 // On MIPS div is asynchronous - it will run in the background while we 1401 // On MIPS div is asynchronous - it will run in the background while we
1418 // check for special cases. 1402 // check for special cases.
1419 __ div(dividend, divisor); 1403 __ ddiv(dividend, divisor);
1420 1404
1421 // Check for x / 0. 1405 // Check for x / 0.
1422 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { 1406 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1423 DeoptimizeIf(eq, instr->environment(), divisor, Operand(zero_reg)); 1407 DeoptimizeIf(eq, instr->environment(), divisor, Operand(zero_reg));
1424 } 1408 }
1425 1409
1426 // Check for (0 / -x) that will produce negative zero. 1410 // Check for (0 / -x) that will produce negative zero.
1427 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { 1411 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1428 Label left_not_zero; 1412 Label left_not_zero;
1429 __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg)); 1413 __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
(...skipping 11 matching lines...) Expand all
1441 } 1425 }
1442 1426
1443 // We performed a truncating division. Correct the result if necessary. 1427 // We performed a truncating division. Correct the result if necessary.
1444 Label done; 1428 Label done;
1445 Register remainder = scratch0(); 1429 Register remainder = scratch0();
1446 __ mfhi(remainder); 1430 __ mfhi(remainder);
1447 __ mflo(result); 1431 __ mflo(result);
1448 __ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT); 1432 __ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT);
1449 __ Xor(remainder, remainder, Operand(divisor)); 1433 __ Xor(remainder, remainder, Operand(divisor));
1450 __ Branch(&done, ge, remainder, Operand(zero_reg)); 1434 __ Branch(&done, ge, remainder, Operand(zero_reg));
1451 __ Subu(result, result, Operand(1)); 1435 __ Dsubu(result, result, Operand(1));
1452 __ bind(&done); 1436 __ bind(&done);
1453 } 1437 }
1454 1438
1455 1439
1456 void LCodeGen::DoMulI(LMulI* instr) { 1440 void LCodeGen::DoMulI(LMulI* instr) {
1457 Register scratch = scratch0(); 1441 Register scratch = scratch0();
1458 Register result = ToRegister(instr->result()); 1442 Register result = ToRegister(instr->result());
1459 // Note that result may alias left. 1443 // Note that result may alias left.
1460 Register left = ToRegister(instr->left()); 1444 Register left = ToRegister(instr->left());
1461 LOperand* right_op = instr->right(); 1445 LOperand* right_op = instr->right();
1462 1446
1463 bool bailout_on_minus_zero = 1447 bool bailout_on_minus_zero =
1464 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); 1448 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
1465 bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); 1449 bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1466 1450
1467 if (right_op->IsConstantOperand()) { 1451 if (right_op->IsConstantOperand()) {
1468 int32_t constant = ToInteger32(LConstantOperand::cast(right_op)); 1452 int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
1469 1453
1470 if (bailout_on_minus_zero && (constant < 0)) { 1454 if (bailout_on_minus_zero && (constant < 0)) {
1471 // The case of a null constant will be handled separately. 1455 // The case of a null constant will be handled separately.
1472 // If constant is negative and left is null, the result should be -0. 1456 // If constant is negative and left is null, the result should be -0.
1473 DeoptimizeIf(eq, instr->environment(), left, Operand(zero_reg)); 1457 DeoptimizeIf(eq, instr->environment(), left, Operand(zero_reg));
1474 } 1458 }
1475 1459
1476 switch (constant) { 1460 switch (constant) {
1477 case -1: 1461 case -1:
1478 if (overflow) { 1462 if (overflow) {
1479 __ SubuAndCheckForOverflow(result, zero_reg, left, scratch); 1463 __ SubuAndCheckForOverflow(result, zero_reg, left, scratch);
1480 DeoptimizeIf(lt, instr->environment(), scratch, Operand(zero_reg)); 1464 DeoptimizeIf(gt, instr->environment(), scratch, Operand(kMaxInt));
1481 } else { 1465 } else {
1482 __ Subu(result, zero_reg, left); 1466 __ Dsubu(result, zero_reg, left);
1483 } 1467 }
1484 break; 1468 break;
1485 case 0: 1469 case 0:
1486 if (bailout_on_minus_zero) { 1470 if (bailout_on_minus_zero) {
1487 // If left is strictly negative and the constant is null, the 1471 // If left is strictly negative and the constant is null, the
1488 // result is -0. Deoptimize if required, otherwise return 0. 1472 // result is -0. Deoptimize if required, otherwise return 0.
1489 DeoptimizeIf(lt, instr->environment(), left, Operand(zero_reg)); 1473 DeoptimizeIf(lt, instr->environment(), left, Operand(zero_reg));
1490 } 1474 }
1491 __ mov(result, zero_reg); 1475 __ mov(result, zero_reg);
1492 break; 1476 break;
1493 case 1: 1477 case 1:
1494 // Nothing to do. 1478 // Nothing to do.
1495 __ Move(result, left); 1479 __ Move(result, left);
1496 break; 1480 break;
1497 default: 1481 default:
1498 // Multiplying by powers of two and powers of two plus or minus 1482 // Multiplying by powers of two and powers of two plus or minus
1499 // one can be done faster with shifted operands. 1483 // one can be done faster with shifted operands.
1500 // For other constants we emit standard code. 1484 // For other constants we emit standard code.
1501 int32_t mask = constant >> 31; 1485 int32_t mask = constant >> 31;
1502 uint32_t constant_abs = (constant + mask) ^ mask; 1486 uint32_t constant_abs = (constant + mask) ^ mask;
1503 1487
1504 if (IsPowerOf2(constant_abs)) { 1488 if (IsPowerOf2(constant_abs)) {
1505 int32_t shift = WhichPowerOf2(constant_abs); 1489 int32_t shift = WhichPowerOf2(constant_abs);
1506 __ sll(result, left, shift); 1490 __ dsll(result, left, shift);
1507 // Correct the sign of the result if the constant is negative. 1491 // Correct the sign of the result if the constant is negative.
1508 if (constant < 0) __ Subu(result, zero_reg, result); 1492 if (constant < 0) __ Dsubu(result, zero_reg, result);
1509 } else if (IsPowerOf2(constant_abs - 1)) { 1493 } else if (IsPowerOf2(constant_abs - 1)) {
1510 int32_t shift = WhichPowerOf2(constant_abs - 1); 1494 int32_t shift = WhichPowerOf2(constant_abs - 1);
1511 __ sll(scratch, left, shift); 1495 __ dsll(scratch, left, shift);
1512 __ Addu(result, scratch, left); 1496 __ Daddu(result, scratch, left);
1513 // Correct the sign of the result if the constant is negative. 1497 // Correct the sign of the result if the constant is negative.
1514 if (constant < 0) __ Subu(result, zero_reg, result); 1498 if (constant < 0) __ Dsubu(result, zero_reg, result);
1515 } else if (IsPowerOf2(constant_abs + 1)) { 1499 } else if (IsPowerOf2(constant_abs + 1)) {
1516 int32_t shift = WhichPowerOf2(constant_abs + 1); 1500 int32_t shift = WhichPowerOf2(constant_abs + 1);
1517 __ sll(scratch, left, shift); 1501 __ dsll(scratch, left, shift);
1518 __ Subu(result, scratch, left); 1502 __ Dsubu(result, scratch, left);
1519 // Correct the sign of the result if the constant is negative. 1503 // Correct the sign of the result if the constant is negative.
1520 if (constant < 0) __ Subu(result, zero_reg, result); 1504 if (constant < 0) __ Dsubu(result, zero_reg, result);
1521 } else { 1505 } else {
1522 // Generate standard code. 1506 // Generate standard code.
1523 __ li(at, constant); 1507 __ li(at, constant);
1524 __ Mul(result, left, at); 1508 __ Dmul(result, left, at);
1525 } 1509 }
1526 } 1510 }
1527 1511
1528 } else { 1512 } else {
1529 ASSERT(right_op->IsRegister()); 1513 ASSERT(right_op->IsRegister());
1530 Register right = ToRegister(right_op); 1514 Register right = ToRegister(right_op);
1531 1515
1532 if (overflow) { 1516 if (overflow) {
1533 // hi:lo = left * right. 1517 // hi:lo = left * right.
1534 if (instr->hydrogen()->representation().IsSmi()) { 1518 if (instr->hydrogen()->representation().IsSmi()) {
1535 __ SmiUntag(result, left); 1519 __ SmiUntag(result, left);
1536 __ mult(result, right); 1520 __ dmult(result, right);
1537 __ mfhi(scratch); 1521 __ mfhi(scratch);
1538 __ mflo(result); 1522 __ mflo(result);
1539 } else { 1523 } else {
1540 __ mult(left, right); 1524 __ dmult(left, right);
1541 __ mfhi(scratch); 1525 __ mfhi(scratch);
1542 __ mflo(result); 1526 __ mflo(result);
1543 } 1527 }
1544 __ sra(at, result, 31); 1528 __ dsra32(at, result, 31);
1545 DeoptimizeIf(ne, instr->environment(), scratch, Operand(at)); 1529 DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
1530 if (!instr->hydrogen()->representation().IsSmi()) {
1531 DeoptimizeIf(gt, instr->environment(), result, Operand(kMaxInt));
1532 DeoptimizeIf(lt, instr->environment(), result, Operand(kMinInt));
1533 }
1546 } else { 1534 } else {
1547 if (instr->hydrogen()->representation().IsSmi()) { 1535 if (instr->hydrogen()->representation().IsSmi()) {
1548 __ SmiUntag(result, left); 1536 __ SmiUntag(result, left);
1549 __ Mul(result, result, right); 1537 __ Dmul(result, result, right);
1550 } else { 1538 } else {
1551 __ Mul(result, left, right); 1539 __ Dmul(result, left, right);
1552 } 1540 }
1553 } 1541 }
1554 1542
1555 if (bailout_on_minus_zero) { 1543 if (bailout_on_minus_zero) {
1556 Label done; 1544 Label done;
1557 __ Xor(at, left, right); 1545 __ Xor(at, left, right);
1558 __ Branch(&done, ge, at, Operand(zero_reg)); 1546 __ Branch(&done, ge, at, Operand(zero_reg));
1559 // Bail out if the result is minus zero. 1547 // Bail out if the result is minus zero.
1560 DeoptimizeIf(eq, 1548 DeoptimizeIf(eq,
1561 instr->environment(), 1549 instr->environment(),
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
1602 } 1590 }
1603 } 1591 }
1604 1592
1605 1593
1606 void LCodeGen::DoShiftI(LShiftI* instr) { 1594 void LCodeGen::DoShiftI(LShiftI* instr) {
1607 // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so 1595 // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
1608 // result may alias either of them. 1596 // result may alias either of them.
1609 LOperand* right_op = instr->right(); 1597 LOperand* right_op = instr->right();
1610 Register left = ToRegister(instr->left()); 1598 Register left = ToRegister(instr->left());
1611 Register result = ToRegister(instr->result()); 1599 Register result = ToRegister(instr->result());
1612 Register scratch = scratch0();
1613 1600
1614 if (right_op->IsRegister()) { 1601 if (right_op->IsRegister()) {
1615 // No need to mask the right operand on MIPS, it is built into the variable 1602 // No need to mask the right operand on MIPS, it is built into the variable
1616 // shift instructions. 1603 // shift instructions.
1617 switch (instr->op()) { 1604 switch (instr->op()) {
1618 case Token::ROR: 1605 case Token::ROR:
1619 __ Ror(result, left, Operand(ToRegister(right_op))); 1606 __ Ror(result, left, Operand(ToRegister(right_op)));
1620 break; 1607 break;
1621 case Token::SAR: 1608 case Token::SAR:
1622 __ srav(result, left, ToRegister(right_op)); 1609 __ srav(result, left, ToRegister(right_op));
1623 break; 1610 break;
1624 case Token::SHR: 1611 case Token::SHR:
1625 __ srlv(result, left, ToRegister(right_op)); 1612 __ srlv(result, left, ToRegister(right_op));
1626 if (instr->can_deopt()) { 1613 if (instr->can_deopt()) {
1627 DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg)); 1614 // TODO(yy): (-1) >>> 0. anything else?
1615 DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg));
1616 DeoptimizeIf(gt, instr->environment(), result, Operand(kMaxInt));
1628 } 1617 }
1629 break; 1618 break;
1630 case Token::SHL: 1619 case Token::SHL:
1631 __ sllv(result, left, ToRegister(right_op)); 1620 __ sllv(result, left, ToRegister(right_op));
1632 break; 1621 break;
1633 default: 1622 default:
1634 UNREACHABLE(); 1623 UNREACHABLE();
1635 break; 1624 break;
1636 } 1625 }
1637 } else { 1626 } else {
(...skipping 21 matching lines...) Expand all
1659 } else { 1648 } else {
1660 if (instr->can_deopt()) { 1649 if (instr->can_deopt()) {
1661 __ And(at, left, Operand(0x80000000)); 1650 __ And(at, left, Operand(0x80000000));
1662 DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg)); 1651 DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
1663 } 1652 }
1664 __ Move(result, left); 1653 __ Move(result, left);
1665 } 1654 }
1666 break; 1655 break;
1667 case Token::SHL: 1656 case Token::SHL:
1668 if (shift_count != 0) { 1657 if (shift_count != 0) {
1669 if (instr->hydrogen_value()->representation().IsSmi() && 1658 if (instr->hydrogen_value()->representation().IsSmi()) {
1670 instr->can_deopt()) { 1659 __ dsll(result, left, shift_count);
1671 if (shift_count != 1) {
1672 __ sll(result, left, shift_count - 1);
1673 __ SmiTagCheckOverflow(result, result, scratch);
1674 } else {
1675 __ SmiTagCheckOverflow(result, left, scratch);
1676 }
1677 DeoptimizeIf(lt, instr->environment(), scratch, Operand(zero_reg));
1678 } else { 1660 } else {
1679 __ sll(result, left, shift_count); 1661 __ sll(result, left, shift_count);
1680 } 1662 }
1681 } else { 1663 } else {
1682 __ Move(result, left); 1664 __ Move(result, left);
1683 } 1665 }
1684 break; 1666 break;
1685 default: 1667 default:
1686 UNREACHABLE(); 1668 UNREACHABLE();
1687 break; 1669 break;
1688 } 1670 }
1689 } 1671 }
1690 } 1672 }
1691 1673
1692 1674
1693 void LCodeGen::DoSubI(LSubI* instr) { 1675 void LCodeGen::DoSubI(LSubI* instr) {
1694 LOperand* left = instr->left(); 1676 LOperand* left = instr->left();
1695 LOperand* right = instr->right(); 1677 LOperand* right = instr->right();
1696 LOperand* result = instr->result(); 1678 LOperand* result = instr->result();
1697 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); 1679 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1698 1680
1699 if (!can_overflow) { 1681 if (!can_overflow) {
1700 if (right->IsStackSlot()) { 1682 if (right->IsStackSlot()) {
1701 Register right_reg = EmitLoadRegister(right, at); 1683 Register right_reg = EmitLoadRegister(right, at);
1702 __ Subu(ToRegister(result), ToRegister(left), Operand(right_reg)); 1684 __ Dsubu(ToRegister(result), ToRegister(left), Operand(right_reg));
1703 } else { 1685 } else {
1704 ASSERT(right->IsRegister() || right->IsConstantOperand()); 1686 ASSERT(right->IsRegister() || right->IsConstantOperand());
1705 __ Subu(ToRegister(result), ToRegister(left), ToOperand(right)); 1687 __ Dsubu(ToRegister(result), ToRegister(left), ToOperand(right));
1706 } 1688 }
1707 } else { // can_overflow. 1689 } else { // can_overflow.
1708 Register overflow = scratch0(); 1690 Register overflow = scratch0();
1709 Register scratch = scratch1(); 1691 Register scratch = scratch1();
1710 if (right->IsStackSlot() || right->IsConstantOperand()) { 1692 if (right->IsStackSlot() || right->IsConstantOperand()) {
1711 Register right_reg = EmitLoadRegister(right, scratch); 1693 Register right_reg = EmitLoadRegister(right, scratch);
1712 __ SubuAndCheckForOverflow(ToRegister(result), 1694 __ SubuAndCheckForOverflow(ToRegister(result),
1713 ToRegister(left), 1695 ToRegister(left),
1714 right_reg, 1696 right_reg,
1715 overflow); // Reg at also used as scratch. 1697 overflow); // Reg at also used as scratch.
1716 } else { 1698 } else {
1717 ASSERT(right->IsRegister()); 1699 ASSERT(right->IsRegister());
1718 // Due to overflow check macros not supporting constant operands, 1700 // Due to overflow check macros not supporting constant operands,
1719 // handling the IsConstantOperand case was moved to prev if clause. 1701 // handling the IsConstantOperand case was moved to prev if clause.
1720 __ SubuAndCheckForOverflow(ToRegister(result), 1702 __ SubuAndCheckForOverflow(ToRegister(result),
1721 ToRegister(left), 1703 ToRegister(left),
1722 ToRegister(right), 1704 ToRegister(right),
1723 overflow); // Reg at also used as scratch. 1705 overflow); // Reg at also used as scratch.
1724 } 1706 }
1725 DeoptimizeIf(lt, instr->environment(), overflow, Operand(zero_reg)); 1707 DeoptimizeIf(lt, instr->environment(), overflow, Operand(zero_reg));
1708 if (!instr->hydrogen()->representation().IsSmi()) {
1709 DeoptimizeIf(gt, instr->environment(),
1710 ToRegister(result), Operand(kMaxInt));
1711 DeoptimizeIf(lt, instr->environment(),
1712 ToRegister(result), Operand(kMinInt));
1713 }
1726 } 1714 }
1727 } 1715 }
1728 1716
1729 1717
1730 void LCodeGen::DoConstantI(LConstantI* instr) { 1718 void LCodeGen::DoConstantI(LConstantI* instr) {
1731 __ li(ToRegister(instr->result()), Operand(instr->value())); 1719 __ li(ToRegister(instr->result()), Operand(instr->value()));
1732 } 1720 }
1733 1721
1734 1722
1735 void LCodeGen::DoConstantS(LConstantS* instr) { 1723 void LCodeGen::DoConstantS(LConstantS* instr) {
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
1774 ASSERT(result.is(v0)); 1762 ASSERT(result.is(v0));
1775 ASSERT(!scratch.is(scratch0())); 1763 ASSERT(!scratch.is(scratch0()));
1776 ASSERT(!scratch.is(object)); 1764 ASSERT(!scratch.is(object));
1777 1765
1778 __ SmiTst(object, at); 1766 __ SmiTst(object, at);
1779 DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg)); 1767 DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
1780 __ GetObjectType(object, scratch, scratch); 1768 __ GetObjectType(object, scratch, scratch);
1781 DeoptimizeIf(ne, instr->environment(), scratch, Operand(JS_DATE_TYPE)); 1769 DeoptimizeIf(ne, instr->environment(), scratch, Operand(JS_DATE_TYPE));
1782 1770
1783 if (index->value() == 0) { 1771 if (index->value() == 0) {
1784 __ lw(result, FieldMemOperand(object, JSDate::kValueOffset)); 1772 __ ld(result, FieldMemOperand(object, JSDate::kValueOffset));
1785 } else { 1773 } else {
1786 if (index->value() < JSDate::kFirstUncachedField) { 1774 if (index->value() < JSDate::kFirstUncachedField) {
1787 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate()); 1775 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
1788 __ li(scratch, Operand(stamp)); 1776 __ li(scratch, Operand(stamp));
1789 __ lw(scratch, MemOperand(scratch)); 1777 __ ld(scratch, MemOperand(scratch));
1790 __ lw(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset)); 1778 __ ld(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset));
1791 __ Branch(&runtime, ne, scratch, Operand(scratch0())); 1779 __ Branch(&runtime, ne, scratch, Operand(scratch0()));
1792 __ lw(result, FieldMemOperand(object, JSDate::kValueOffset + 1780 __ ld(result, FieldMemOperand(object, JSDate::kValueOffset +
1793 kPointerSize * index->value())); 1781 kPointerSize * index->value()));
1794 __ jmp(&done); 1782 __ jmp(&done);
1795 } 1783 }
1796 __ bind(&runtime); 1784 __ bind(&runtime);
1797 __ PrepareCallCFunction(2, scratch); 1785 __ PrepareCallCFunction(2, scratch);
1798 __ li(a1, Operand(index)); 1786 __ li(a1, Operand(index));
1799 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2); 1787 __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
1800 __ bind(&done); 1788 __ bind(&done);
1801 } 1789 }
1802 } 1790 }
1803 1791
1804 1792
1805 MemOperand LCodeGen::BuildSeqStringOperand(Register string, 1793 MemOperand LCodeGen::BuildSeqStringOperand(Register string,
1806 LOperand* index, 1794 LOperand* index,
1807 String::Encoding encoding) { 1795 String::Encoding encoding) {
1808 if (index->IsConstantOperand()) { 1796 if (index->IsConstantOperand()) {
1809 int offset = ToInteger32(LConstantOperand::cast(index)); 1797 int offset = ToInteger32(LConstantOperand::cast(index));
1810 if (encoding == String::TWO_BYTE_ENCODING) { 1798 if (encoding == String::TWO_BYTE_ENCODING) {
1811 offset *= kUC16Size; 1799 offset *= kUC16Size;
1812 } 1800 }
1813 STATIC_ASSERT(kCharSize == 1); 1801 STATIC_ASSERT(kCharSize == 1);
1814 return FieldMemOperand(string, SeqString::kHeaderSize + offset); 1802 return FieldMemOperand(string, SeqString::kHeaderSize + offset);
1815 } 1803 }
1816 Register scratch = scratch0(); 1804 Register scratch = scratch0();
1817 ASSERT(!scratch.is(string)); 1805 ASSERT(!scratch.is(string));
1818 ASSERT(!scratch.is(ToRegister(index))); 1806 ASSERT(!scratch.is(ToRegister(index)));
1819 if (encoding == String::ONE_BYTE_ENCODING) { 1807 if (encoding == String::ONE_BYTE_ENCODING) {
1820 __ Addu(scratch, string, ToRegister(index)); 1808 __ Daddu(scratch, string, ToRegister(index));
1821 } else { 1809 } else {
1822 STATIC_ASSERT(kUC16Size == 2); 1810 STATIC_ASSERT(kUC16Size == 2);
1823 __ sll(scratch, ToRegister(index), 1); 1811 __ dsll(scratch, ToRegister(index), 1);
1824 __ Addu(scratch, string, scratch); 1812 __ Daddu(scratch, string, scratch);
1825 } 1813 }
1826 return FieldMemOperand(scratch, SeqString::kHeaderSize); 1814 return FieldMemOperand(scratch, SeqString::kHeaderSize);
1827 } 1815 }
1828 1816
1829 1817
1830 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) { 1818 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
1831 String::Encoding encoding = instr->hydrogen()->encoding(); 1819 String::Encoding encoding = instr->hydrogen()->encoding();
1832 Register string = ToRegister(instr->string()); 1820 Register string = ToRegister(instr->string());
1833 Register result = ToRegister(instr->result()); 1821 Register result = ToRegister(instr->result());
1834 1822
1835 if (FLAG_debug_code) { 1823 if (FLAG_debug_code) {
1836 Register scratch = scratch0(); 1824 Register scratch = scratch0();
1837 __ lw(scratch, FieldMemOperand(string, HeapObject::kMapOffset)); 1825 __ ld(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
1838 __ lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); 1826 __ lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1839 1827
1840 __ And(scratch, scratch, 1828 __ And(scratch, scratch,
1841 Operand(kStringRepresentationMask | kStringEncodingMask)); 1829 Operand(kStringRepresentationMask | kStringEncodingMask));
1842 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; 1830 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
1843 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; 1831 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
1844 __ Subu(at, scratch, Operand(encoding == String::ONE_BYTE_ENCODING 1832 __ Dsubu(at, scratch, Operand(encoding == String::ONE_BYTE_ENCODING
1845 ? one_byte_seq_type : two_byte_seq_type)); 1833 ? one_byte_seq_type : two_byte_seq_type));
1846 __ Check(eq, kUnexpectedStringType, at, Operand(zero_reg)); 1834 __ Check(eq, kUnexpectedStringType, at, Operand(zero_reg));
1847 } 1835 }
1848 1836
1849 MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding); 1837 MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
1850 if (encoding == String::ONE_BYTE_ENCODING) { 1838 if (encoding == String::ONE_BYTE_ENCODING) {
1851 __ lbu(result, operand); 1839 __ lbu(result, operand);
1852 } else { 1840 } else {
1853 __ lhu(result, operand); 1841 __ lhu(result, operand);
1854 } 1842 }
(...skipping 27 matching lines...) Expand all
1882 1870
1883 void LCodeGen::DoAddI(LAddI* instr) { 1871 void LCodeGen::DoAddI(LAddI* instr) {
1884 LOperand* left = instr->left(); 1872 LOperand* left = instr->left();
1885 LOperand* right = instr->right(); 1873 LOperand* right = instr->right();
1886 LOperand* result = instr->result(); 1874 LOperand* result = instr->result();
1887 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); 1875 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1888 1876
1889 if (!can_overflow) { 1877 if (!can_overflow) {
1890 if (right->IsStackSlot()) { 1878 if (right->IsStackSlot()) {
1891 Register right_reg = EmitLoadRegister(right, at); 1879 Register right_reg = EmitLoadRegister(right, at);
1892 __ Addu(ToRegister(result), ToRegister(left), Operand(right_reg)); 1880 __ Daddu(ToRegister(result), ToRegister(left), Operand(right_reg));
1893 } else { 1881 } else {
1894 ASSERT(right->IsRegister() || right->IsConstantOperand()); 1882 ASSERT(right->IsRegister() || right->IsConstantOperand());
1895 __ Addu(ToRegister(result), ToRegister(left), ToOperand(right)); 1883 __ Daddu(ToRegister(result), ToRegister(left), ToOperand(right));
1896 } 1884 }
1897 } else { // can_overflow. 1885 } else { // can_overflow.
1898 Register overflow = scratch0(); 1886 Register overflow = scratch0();
1899 Register scratch = scratch1(); 1887 Register scratch = scratch1();
1900 if (right->IsStackSlot() || 1888 if (right->IsStackSlot() ||
1901 right->IsConstantOperand()) { 1889 right->IsConstantOperand()) {
1902 Register right_reg = EmitLoadRegister(right, scratch); 1890 Register right_reg = EmitLoadRegister(right, scratch);
1903 __ AdduAndCheckForOverflow(ToRegister(result), 1891 __ AdduAndCheckForOverflow(ToRegister(result),
1904 ToRegister(left), 1892 ToRegister(left),
1905 right_reg, 1893 right_reg,
1906 overflow); // Reg at also used as scratch. 1894 overflow); // Reg at also used as scratch.
1907 } else { 1895 } else {
1908 ASSERT(right->IsRegister()); 1896 ASSERT(right->IsRegister());
1909 // Due to overflow check macros not supporting constant operands, 1897 // Due to overflow check macros not supporting constant operands,
1910 // handling the IsConstantOperand case was moved to prev if clause. 1898 // handling the IsConstantOperand case was moved to prev if clause.
1911 __ AdduAndCheckForOverflow(ToRegister(result), 1899 __ AdduAndCheckForOverflow(ToRegister(result),
1912 ToRegister(left), 1900 ToRegister(left),
1913 ToRegister(right), 1901 ToRegister(right),
1914 overflow); // Reg at also used as scratch. 1902 overflow); // Reg at also used as scratch.
1915 } 1903 }
1916 DeoptimizeIf(lt, instr->environment(), overflow, Operand(zero_reg)); 1904 DeoptimizeIf(lt, instr->environment(), overflow, Operand(zero_reg));
1905 // if not smi, it must int32.
1906 if (!instr->hydrogen()->representation().IsSmi()) {
1907 DeoptimizeIf(gt, instr->environment(),
1908 ToRegister(result), Operand(kMaxInt));
1909 DeoptimizeIf(lt, instr->environment(),
1910 ToRegister(result), Operand(kMinInt));
1911 }
1917 } 1912 }
1918 } 1913 }
1919 1914
1920 1915
1921 void LCodeGen::DoMathMinMax(LMathMinMax* instr) { 1916 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
1922 LOperand* left = instr->left(); 1917 LOperand* left = instr->left();
1923 LOperand* right = instr->right(); 1918 LOperand* right = instr->right();
1924 HMathMinMax::Operation operation = instr->hydrogen()->operation(); 1919 HMathMinMax::Operation operation = instr->hydrogen()->operation();
1925 Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge; 1920 Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
1926 if (instr->hydrogen()->representation().IsSmiOrInteger32()) { 1921 if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
(...skipping 207 matching lines...) Expand 10 before | Expand all | Expand 10 after
2134 ASSERT(!info()->IsStub()); 2129 ASSERT(!info()->IsStub());
2135 EmitBranch(instr, al, zero_reg, Operand(zero_reg)); 2130 EmitBranch(instr, al, zero_reg, Operand(zero_reg));
2136 } else if (type.IsHeapNumber()) { 2131 } else if (type.IsHeapNumber()) {
2137 ASSERT(!info()->IsStub()); 2132 ASSERT(!info()->IsStub());
2138 DoubleRegister dbl_scratch = double_scratch0(); 2133 DoubleRegister dbl_scratch = double_scratch0();
2139 __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset)); 2134 __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2140 // Test the double value. Zero and NaN are false. 2135 // Test the double value. Zero and NaN are false.
2141 EmitBranchF(instr, nue, dbl_scratch, kDoubleRegZero); 2136 EmitBranchF(instr, nue, dbl_scratch, kDoubleRegZero);
2142 } else if (type.IsString()) { 2137 } else if (type.IsString()) {
2143 ASSERT(!info()->IsStub()); 2138 ASSERT(!info()->IsStub());
2144 __ lw(at, FieldMemOperand(reg, String::kLengthOffset)); 2139 __ ld(at, FieldMemOperand(reg, String::kLengthOffset));
2145 EmitBranch(instr, ne, at, Operand(zero_reg)); 2140 EmitBranch(instr, ne, at, Operand(zero_reg));
2146 } else { 2141 } else {
2147 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types(); 2142 ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
2148 // Avoid deopts in the case where we've never executed this path before. 2143 // Avoid deopts in the case where we've never executed this path before.
2149 if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic(); 2144 if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
2150 2145
2151 if (expected.Contains(ToBooleanStub::UNDEFINED)) { 2146 if (expected.Contains(ToBooleanStub::UNDEFINED)) {
2152 // undefined -> false. 2147 // undefined -> false.
2153 __ LoadRoot(at, Heap::kUndefinedValueRootIndex); 2148 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
2154 __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at)); 2149 __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
(...skipping 16 matching lines...) Expand all
2171 __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(zero_reg)); 2166 __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(zero_reg));
2172 __ JumpIfSmi(reg, instr->TrueLabel(chunk_)); 2167 __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2173 } else if (expected.NeedsMap()) { 2168 } else if (expected.NeedsMap()) {
2174 // If we need a map later and have a Smi -> deopt. 2169 // If we need a map later and have a Smi -> deopt.
2175 __ SmiTst(reg, at); 2170 __ SmiTst(reg, at);
2176 DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg)); 2171 DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
2177 } 2172 }
2178 2173
2179 const Register map = scratch0(); 2174 const Register map = scratch0();
2180 if (expected.NeedsMap()) { 2175 if (expected.NeedsMap()) {
2181 __ lw(map, FieldMemOperand(reg, HeapObject::kMapOffset)); 2176 __ ld(map, FieldMemOperand(reg, HeapObject::kMapOffset));
2182 if (expected.CanBeUndetectable()) { 2177 if (expected.CanBeUndetectable()) {
2183 // Undetectable -> false. 2178 // Undetectable -> false.
2184 __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset)); 2179 __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
2185 __ And(at, at, Operand(1 << Map::kIsUndetectable)); 2180 __ And(at, at, Operand(1 << Map::kIsUndetectable));
2186 __ Branch(instr->FalseLabel(chunk_), ne, at, Operand(zero_reg)); 2181 __ Branch(instr->FalseLabel(chunk_), ne, at, Operand(zero_reg));
2187 } 2182 }
2188 } 2183 }
2189 2184
2190 if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) { 2185 if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
2191 // spec object -> true. 2186 // spec object -> true.
2192 __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset)); 2187 __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
2193 __ Branch(instr->TrueLabel(chunk_), 2188 __ Branch(instr->TrueLabel(chunk_),
2194 ge, at, Operand(FIRST_SPEC_OBJECT_TYPE)); 2189 ge, at, Operand(FIRST_SPEC_OBJECT_TYPE));
2195 } 2190 }
2196 2191
2197 if (expected.Contains(ToBooleanStub::STRING)) { 2192 if (expected.Contains(ToBooleanStub::STRING)) {
2198 // String value -> false iff empty. 2193 // String value -> false iff empty.
2199 Label not_string; 2194 Label not_string;
2200 __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset)); 2195 __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
2201 __ Branch(&not_string, ge , at, Operand(FIRST_NONSTRING_TYPE)); 2196 __ Branch(&not_string, ge , at, Operand(FIRST_NONSTRING_TYPE));
2202 __ lw(at, FieldMemOperand(reg, String::kLengthOffset)); 2197 __ ld(at, FieldMemOperand(reg, String::kLengthOffset));
2203 __ Branch(instr->TrueLabel(chunk_), ne, at, Operand(zero_reg)); 2198 __ Branch(instr->TrueLabel(chunk_), ne, at, Operand(zero_reg));
2204 __ Branch(instr->FalseLabel(chunk_)); 2199 __ Branch(instr->FalseLabel(chunk_));
2205 __ bind(&not_string); 2200 __ bind(&not_string);
2206 } 2201 }
2207 2202
2208 if (expected.Contains(ToBooleanStub::SYMBOL)) { 2203 if (expected.Contains(ToBooleanStub::SYMBOL)) {
2209 // Symbol value -> true. 2204 // Symbol value -> true.
2210 const Register scratch = scratch1(); 2205 const Register scratch = scratch1();
2211 __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset)); 2206 __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
2212 __ Branch(instr->TrueLabel(chunk_), eq, scratch, Operand(SYMBOL_TYPE)); 2207 __ Branch(instr->TrueLabel(chunk_), eq, scratch, Operand(SYMBOL_TYPE));
(...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after
2303 FPURegister right_reg = ToDoubleRegister(right); 2298 FPURegister right_reg = ToDoubleRegister(right);
2304 2299
2305 // If a NaN is involved, i.e. the result is unordered, 2300 // If a NaN is involved, i.e. the result is unordered,
2306 // jump to false block label. 2301 // jump to false block label.
2307 __ BranchF(NULL, instr->FalseLabel(chunk_), eq, 2302 __ BranchF(NULL, instr->FalseLabel(chunk_), eq,
2308 left_reg, right_reg); 2303 left_reg, right_reg);
2309 2304
2310 EmitBranchF(instr, cond, left_reg, right_reg); 2305 EmitBranchF(instr, cond, left_reg, right_reg);
2311 } else { 2306 } else {
2312 Register cmp_left; 2307 Register cmp_left;
2313 Operand cmp_right = Operand(0); 2308 Operand cmp_right = Operand((int64_t)0);
2314
2315 if (right->IsConstantOperand()) { 2309 if (right->IsConstantOperand()) {
2316 int32_t value = ToInteger32(LConstantOperand::cast(right)); 2310 int32_t value = ToInteger32(LConstantOperand::cast(right));
2317 if (instr->hydrogen_value()->representation().IsSmi()) { 2311 if (instr->hydrogen_value()->representation().IsSmi()) {
2318 cmp_left = ToRegister(left); 2312 cmp_left = ToRegister(left);
2319 cmp_right = Operand(Smi::FromInt(value)); 2313 cmp_right = Operand(Smi::FromInt(value));
2320 } else { 2314 } else {
2321 cmp_left = ToRegister(left); 2315 cmp_left = ToRegister(left);
2322 cmp_right = Operand(value); 2316 cmp_right = Operand(value);
2323 } 2317 }
2324 } else if (left->IsConstantOperand()) { 2318 } else if (left->IsConstantOperand()) {
2325 int32_t value = ToInteger32(LConstantOperand::cast(left)); 2319 int32_t value = ToInteger32(LConstantOperand::cast(left));
2326 if (instr->hydrogen_value()->representation().IsSmi()) { 2320 if (instr->hydrogen_value()->representation().IsSmi()) {
2327 cmp_left = ToRegister(right); 2321 cmp_left = ToRegister(right);
2328 cmp_right = Operand(Smi::FromInt(value)); 2322 cmp_right = Operand(Smi::FromInt(value));
2329 } else { 2323 } else {
2330 cmp_left = ToRegister(right); 2324 cmp_left = ToRegister(right);
2331 cmp_right = Operand(value); 2325 cmp_right = Operand(value);
2332 } 2326 }
2333 // We commuted the operands, so commute the condition. 2327 // We commuted the operands, so commute the condition.
2334 cond = CommuteCondition(cond); 2328 cond = CommuteCondition(cond);
2335 } else { 2329 } else {
2336 cmp_left = ToRegister(left); 2330 cmp_left = ToRegister(left);
2337 cmp_right = Operand(ToRegister(right)); 2331 cmp_right = Operand(ToRegister(right));
2338 } 2332 }
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
2370 2364
2371 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) { 2365 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
2372 Representation rep = instr->hydrogen()->value()->representation(); 2366 Representation rep = instr->hydrogen()->value()->representation();
2373 ASSERT(!rep.IsInteger32()); 2367 ASSERT(!rep.IsInteger32());
2374 Register scratch = ToRegister(instr->temp()); 2368 Register scratch = ToRegister(instr->temp());
2375 2369
2376 if (rep.IsDouble()) { 2370 if (rep.IsDouble()) {
2377 DoubleRegister value = ToDoubleRegister(instr->value()); 2371 DoubleRegister value = ToDoubleRegister(instr->value());
2378 EmitFalseBranchF(instr, ne, value, kDoubleRegZero); 2372 EmitFalseBranchF(instr, ne, value, kDoubleRegZero);
2379 __ FmoveHigh(scratch, value); 2373 __ FmoveHigh(scratch, value);
2374 // Only use low 32-bits of value.
2375 __ dsll32(scratch, scratch, 0);
2376 __ dsrl32(scratch, scratch, 0);
2380 __ li(at, 0x80000000); 2377 __ li(at, 0x80000000);
2381 } else { 2378 } else {
2382 Register value = ToRegister(instr->value()); 2379 Register value = ToRegister(instr->value());
2383 __ CheckMap(value, 2380 __ CheckMap(value,
2384 scratch, 2381 scratch,
2385 Heap::kHeapNumberMapRootIndex, 2382 Heap::kHeapNumberMapRootIndex,
2386 instr->FalseLabel(chunk()), 2383 instr->FalseLabel(chunk()),
2387 DO_SMI_CHECK); 2384 DO_SMI_CHECK);
2388 __ lw(scratch, FieldMemOperand(value, HeapNumber::kExponentOffset)); 2385 __ lwu(scratch, FieldMemOperand(value, HeapNumber::kExponentOffset));
2389 EmitFalseBranch(instr, ne, scratch, Operand(0x80000000)); 2386 EmitFalseBranch(instr, ne, scratch, Operand(0x80000000));
2390 __ lw(scratch, FieldMemOperand(value, HeapNumber::kMantissaOffset)); 2387 __ lwu(scratch, FieldMemOperand(value, HeapNumber::kMantissaOffset));
2391 __ mov(at, zero_reg); 2388 __ mov(at, zero_reg);
2392 } 2389 }
2393 EmitBranch(instr, eq, scratch, Operand(at)); 2390 EmitBranch(instr, eq, scratch, Operand(at));
2394 } 2391 }
2395 2392
2396 2393
2397 Condition LCodeGen::EmitIsObject(Register input, 2394 Condition LCodeGen::EmitIsObject(Register input,
2398 Register temp1, 2395 Register temp1,
2399 Register temp2, 2396 Register temp2,
2400 Label* is_not_object, 2397 Label* is_not_object,
2401 Label* is_object) { 2398 Label* is_object) {
2402 __ JumpIfSmi(input, is_not_object); 2399 __ JumpIfSmi(input, is_not_object);
2403 2400
2404 __ LoadRoot(temp2, Heap::kNullValueRootIndex); 2401 __ LoadRoot(temp2, Heap::kNullValueRootIndex);
2405 __ Branch(is_object, eq, input, Operand(temp2)); 2402 __ Branch(is_object, eq, input, Operand(temp2));
2406 2403
2407 // Load map. 2404 // Load map.
2408 __ lw(temp1, FieldMemOperand(input, HeapObject::kMapOffset)); 2405 __ ld(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
2409 // Undetectable objects behave like undefined. 2406 // Undetectable objects behave like undefined.
2410 __ lbu(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset)); 2407 __ lbu(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset));
2411 __ And(temp2, temp2, Operand(1 << Map::kIsUndetectable)); 2408 __ And(temp2, temp2, Operand(1 << Map::kIsUndetectable));
2412 __ Branch(is_not_object, ne, temp2, Operand(zero_reg)); 2409 __ Branch(is_not_object, ne, temp2, Operand(zero_reg));
2413 2410
2414 // Load instance type and check that it is in object type range. 2411 // Load instance type and check that it is in object type range.
2415 __ lbu(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset)); 2412 __ lbu(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
2416 __ Branch(is_not_object, 2413 __ Branch(is_not_object,
2417 lt, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); 2414 lt, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2418 2415
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after
2469 } 2466 }
2470 2467
2471 2468
2472 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) { 2469 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
2473 Register input = ToRegister(instr->value()); 2470 Register input = ToRegister(instr->value());
2474 Register temp = ToRegister(instr->temp()); 2471 Register temp = ToRegister(instr->temp());
2475 2472
2476 if (!instr->hydrogen()->value()->type().IsHeapObject()) { 2473 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
2477 __ JumpIfSmi(input, instr->FalseLabel(chunk_)); 2474 __ JumpIfSmi(input, instr->FalseLabel(chunk_));
2478 } 2475 }
2479 __ lw(temp, FieldMemOperand(input, HeapObject::kMapOffset)); 2476 __ ld(temp, FieldMemOperand(input, HeapObject::kMapOffset));
2480 __ lbu(temp, FieldMemOperand(temp, Map::kBitFieldOffset)); 2477 __ lbu(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
2481 __ And(at, temp, Operand(1 << Map::kIsUndetectable)); 2478 __ And(at, temp, Operand(1 << Map::kIsUndetectable));
2482 EmitBranch(instr, ne, at, Operand(zero_reg)); 2479 EmitBranch(instr, ne, at, Operand(zero_reg));
2483 } 2480 }
2484 2481
2485 2482
2486 static Condition ComputeCompareCondition(Token::Value op) { 2483 static Condition ComputeCompareCondition(Token::Value op) {
2487 switch (op) { 2484 switch (op) {
2488 case Token::EQ_STRICT: 2485 case Token::EQ_STRICT:
2489 case Token::EQ: 2486 case Token::EQ:
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after
2551 Operand(TestType(instr->hydrogen()))); 2548 Operand(TestType(instr->hydrogen())));
2552 } 2549 }
2553 2550
2554 2551
2555 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) { 2552 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
2556 Register input = ToRegister(instr->value()); 2553 Register input = ToRegister(instr->value());
2557 Register result = ToRegister(instr->result()); 2554 Register result = ToRegister(instr->result());
2558 2555
2559 __ AssertString(input); 2556 __ AssertString(input);
2560 2557
2561 __ lw(result, FieldMemOperand(input, String::kHashFieldOffset)); 2558 __ lwu(result, FieldMemOperand(input, String::kHashFieldOffset));
2562 __ IndexFromHash(result, result); 2559 __ IndexFromHash(result, result);
2563 } 2560 }
2564 2561
2565 2562
2566 void LCodeGen::DoHasCachedArrayIndexAndBranch( 2563 void LCodeGen::DoHasCachedArrayIndexAndBranch(
2567 LHasCachedArrayIndexAndBranch* instr) { 2564 LHasCachedArrayIndexAndBranch* instr) {
2568 Register input = ToRegister(instr->value()); 2565 Register input = ToRegister(instr->value());
2569 Register scratch = scratch0(); 2566 Register scratch = scratch0();
2570 2567
2571 __ lw(scratch, 2568 __ lwu(scratch,
2572 FieldMemOperand(input, String::kHashFieldOffset)); 2569 FieldMemOperand(input, String::kHashFieldOffset));
2573 __ And(at, scratch, Operand(String::kContainsCachedArrayIndexMask)); 2570 __ And(at, scratch, Operand(String::kContainsCachedArrayIndexMask));
2574 EmitBranch(instr, eq, at, Operand(zero_reg)); 2571 EmitBranch(instr, eq, at, Operand(zero_reg));
2575 } 2572 }
2576 2573
2577 2574
2578 // Branches to a label or falls through with the answer in flags. Trashes 2575 // Branches to a label or falls through with the answer in flags. Trashes
2579 // the temp registers, but not the input. 2576 // the temp registers, but not the input.
2580 void LCodeGen::EmitClassOfTest(Label* is_true, 2577 void LCodeGen::EmitClassOfTest(Label* is_true,
2581 Label* is_false, 2578 Label* is_false,
(...skipping 18 matching lines...) Expand all
2600 STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); 2597 STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
2601 2598
2602 __ GetObjectType(input, temp, temp2); 2599 __ GetObjectType(input, temp, temp2);
2603 __ Branch(is_false, lt, temp2, Operand(FIRST_SPEC_OBJECT_TYPE)); 2600 __ Branch(is_false, lt, temp2, Operand(FIRST_SPEC_OBJECT_TYPE));
2604 __ Branch(is_true, eq, temp2, Operand(FIRST_SPEC_OBJECT_TYPE)); 2601 __ Branch(is_true, eq, temp2, Operand(FIRST_SPEC_OBJECT_TYPE));
2605 __ Branch(is_true, eq, temp2, Operand(LAST_SPEC_OBJECT_TYPE)); 2602 __ Branch(is_true, eq, temp2, Operand(LAST_SPEC_OBJECT_TYPE));
2606 } else { 2603 } else {
2607 // Faster code path to avoid two compares: subtract lower bound from the 2604 // Faster code path to avoid two compares: subtract lower bound from the
2608 // actual type and do a signed compare with the width of the type range. 2605 // actual type and do a signed compare with the width of the type range.
2609 __ GetObjectType(input, temp, temp2); 2606 __ GetObjectType(input, temp, temp2);
2610 __ Subu(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); 2607 __ Dsubu(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2611 __ Branch(is_false, gt, temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE - 2608 __ Branch(is_false, gt, temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
2612 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); 2609 FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
2613 } 2610 }
2614 2611
2615 // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range. 2612 // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
2616 // Check if the constructor in the map is a function. 2613 // Check if the constructor in the map is a function.
2617 __ lw(temp, FieldMemOperand(temp, Map::kConstructorOffset)); 2614 __ ld(temp, FieldMemOperand(temp, Map::kConstructorOffset));
2618 2615
2619 // Objects with a non-function constructor have class 'Object'. 2616 // Objects with a non-function constructor have class 'Object'.
2620 __ GetObjectType(temp, temp2, temp2); 2617 __ GetObjectType(temp, temp2, temp2);
2621 if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) { 2618 if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) {
2622 __ Branch(is_true, ne, temp2, Operand(JS_FUNCTION_TYPE)); 2619 __ Branch(is_true, ne, temp2, Operand(JS_FUNCTION_TYPE));
2623 } else { 2620 } else {
2624 __ Branch(is_false, ne, temp2, Operand(JS_FUNCTION_TYPE)); 2621 __ Branch(is_false, ne, temp2, Operand(JS_FUNCTION_TYPE));
2625 } 2622 }
2626 2623
2627 // temp now contains the constructor function. Grab the 2624 // temp now contains the constructor function. Grab the
2628 // instance class name from there. 2625 // instance class name from there.
2629 __ lw(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset)); 2626 __ ld(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
2630 __ lw(temp, FieldMemOperand(temp, 2627 __ ld(temp, FieldMemOperand(temp,
2631 SharedFunctionInfo::kInstanceClassNameOffset)); 2628 SharedFunctionInfo::kInstanceClassNameOffset));
2632 // The class name we are testing against is internalized since it's a literal. 2629 // The class name we are testing against is internalized since it's a literal.
2633 // The name in the constructor is internalized because of the way the context 2630 // The name in the constructor is internalized because of the way the context
2634 // is booted. This routine isn't expected to work for random API-created 2631 // is booted. This routine isn't expected to work for random API-created
2635 // classes and it doesn't have to because you can't access it with natives 2632 // classes and it doesn't have to because you can't access it with natives
2636 // syntax. Since both sides are internalized it is sufficient to use an 2633 // syntax. Since both sides are internalized it is sufficient to use an
2637 // identity comparison. 2634 // identity comparison.
2638 2635
2639 // End with the address of this class_name instance in temp register. 2636 // End with the address of this class_name instance in temp register.
2640 // On MIPS, the caller must do the comparison with Handle<String>class_name. 2637 // On MIPS, the caller must do the comparison with Handle<String>class_name.
(...skipping 10 matching lines...) Expand all
2651 class_name, input, temp, temp2); 2648 class_name, input, temp, temp2);
2652 2649
2653 EmitBranch(instr, eq, temp, Operand(class_name)); 2650 EmitBranch(instr, eq, temp, Operand(class_name));
2654 } 2651 }
2655 2652
2656 2653
2657 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) { 2654 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
2658 Register reg = ToRegister(instr->value()); 2655 Register reg = ToRegister(instr->value());
2659 Register temp = ToRegister(instr->temp()); 2656 Register temp = ToRegister(instr->temp());
2660 2657
2661 __ lw(temp, FieldMemOperand(reg, HeapObject::kMapOffset)); 2658 __ ld(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
2662 EmitBranch(instr, eq, temp, Operand(instr->map())); 2659 EmitBranch(instr, eq, temp, Operand(instr->map()));
2663 } 2660 }
2664 2661
2665 2662
2666 void LCodeGen::DoInstanceOf(LInstanceOf* instr) { 2663 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
2667 ASSERT(ToRegister(instr->context()).is(cp)); 2664 ASSERT(ToRegister(instr->context()).is(cp));
2668 Label true_label, done; 2665 Label true_label, done;
2669 ASSERT(ToRegister(instr->left()).is(a0)); // Object is in a0. 2666 ASSERT(ToRegister(instr->left()).is(a0)); // Object is in a0.
2670 ASSERT(ToRegister(instr->right()).is(a1)); // Function is in a1. 2667 ASSERT(ToRegister(instr->right()).is(a1)); // Function is in a1.
2671 Register result = ToRegister(instr->result()); 2668 Register result = ToRegister(instr->result());
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
2712 ASSERT(result.is(v0)); 2709 ASSERT(result.is(v0));
2713 2710
2714 // A Smi is not instance of anything. 2711 // A Smi is not instance of anything.
2715 __ JumpIfSmi(object, &false_result); 2712 __ JumpIfSmi(object, &false_result);
2716 2713
2717 // This is the inlined call site instanceof cache. The two occurences of the 2714 // This is the inlined call site instanceof cache. The two occurences of the
2718 // hole value will be patched to the last map/result pair generated by the 2715 // hole value will be patched to the last map/result pair generated by the
2719 // instanceof stub. 2716 // instanceof stub.
2720 Label cache_miss; 2717 Label cache_miss;
2721 Register map = temp; 2718 Register map = temp;
2722 __ lw(map, FieldMemOperand(object, HeapObject::kMapOffset)); 2719 __ ld(map, FieldMemOperand(object, HeapObject::kMapOffset));
2723 2720
2724 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_); 2721 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
2725 __ bind(deferred->map_check()); // Label for calculating code patching. 2722 __ bind(deferred->map_check()); // Label for calculating code patching.
2726 // We use Factory::the_hole_value() on purpose instead of loading from the 2723 // We use Factory::the_hole_value() on purpose instead of loading from the
2727 // root array to force relocation to be able to later patch with 2724 // root array to force relocation to be able to later patch with
2728 // the cached map. 2725 // the cached map.
2729 Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value()); 2726 Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
2730 __ li(at, Operand(Handle<Object>(cell))); 2727 __ li(at, Operand(Handle<Object>(cell)));
2731 __ lw(at, FieldMemOperand(at, PropertyCell::kValueOffset)); 2728 __ ld(at, FieldMemOperand(at, PropertyCell::kValueOffset));
2732 __ BranchShort(&cache_miss, ne, map, Operand(at)); 2729 __ BranchShort(&cache_miss, ne, map, Operand(at));
2733 // We use Factory::the_hole_value() on purpose instead of loading from the 2730 // We use Factory::the_hole_value() on purpose instead of loading from the
2734 // root array to force relocation to be able to later patch 2731 // root array to force relocation to be able to later patch
2735 // with true or false. The distance from map check has to be constant. 2732 // with true or false. The distance from map check has to be constant.
2736 __ li(result, Operand(factory()->the_hole_value()), CONSTANT_SIZE); 2733 __ li(result, Operand(factory()->the_hole_value()));
2737 __ Branch(&done); 2734 __ Branch(&done);
2738 2735
2739 // The inlined call site cache did not match. Check null and string before 2736 // The inlined call site cache did not match. Check null and string before
2740 // calling the deferred code. 2737 // calling the deferred code.
2741 __ bind(&cache_miss); 2738 __ bind(&cache_miss);
2742 // Null is not instance of anything. 2739 // Null is not instance of anything.
2743 __ LoadRoot(temp, Heap::kNullValueRootIndex); 2740 __ LoadRoot(temp, Heap::kNullValueRootIndex);
2744 __ Branch(&false_result, eq, object, Operand(temp)); 2741 __ Branch(&false_result, eq, object, Operand(temp));
2745 2742
2746 // String values is not instance of anything. 2743 // String values is not instance of anything.
(...skipping 23 matching lines...) Expand all
2770 flags | InstanceofStub::kArgsInRegisters); 2767 flags | InstanceofStub::kArgsInRegisters);
2771 flags = static_cast<InstanceofStub::Flags>( 2768 flags = static_cast<InstanceofStub::Flags>(
2772 flags | InstanceofStub::kCallSiteInlineCheck); 2769 flags | InstanceofStub::kCallSiteInlineCheck);
2773 flags = static_cast<InstanceofStub::Flags>( 2770 flags = static_cast<InstanceofStub::Flags>(
2774 flags | InstanceofStub::kReturnTrueFalseObject); 2771 flags | InstanceofStub::kReturnTrueFalseObject);
2775 InstanceofStub stub(isolate(), flags); 2772 InstanceofStub stub(isolate(), flags);
2776 2773
2777 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); 2774 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
2778 LoadContextFromDeferred(instr->context()); 2775 LoadContextFromDeferred(instr->context());
2779 2776
2780 // Get the temp register reserved by the instruction. This needs to be t0 as 2777 // Get the temp register reserved by the instruction. This needs to be a4 as
2781 // its slot of the pushing of safepoint registers is used to communicate the 2778 // its slot of the pushing of safepoint registers is used to communicate the
2782 // offset to the location of the map check. 2779 // offset to the location of the map check.
2783 Register temp = ToRegister(instr->temp()); 2780 Register temp = ToRegister(instr->temp());
2784 ASSERT(temp.is(t0)); 2781 ASSERT(temp.is(a4));
2785 __ li(InstanceofStub::right(), instr->function()); 2782 __ li(InstanceofStub::right(), instr->function());
2786 static const int kAdditionalDelta = 7; 2783 static const int kAdditionalDelta = 13;
2787 int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta; 2784 int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
2788 Label before_push_delta; 2785 Label before_push_delta;
2789 __ bind(&before_push_delta); 2786 __ bind(&before_push_delta);
2790 { 2787 {
2791 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_); 2788 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
2792 __ li(temp, Operand(delta * kPointerSize), CONSTANT_SIZE); 2789 __ li(temp, Operand(delta * kIntSize), CONSTANT_SIZE);
2793 __ StoreToSafepointRegisterSlot(temp, temp); 2790 __ StoreToSafepointRegisterSlot(temp, temp);
2794 } 2791 }
2795 CallCodeGeneric(stub.GetCode(), 2792 CallCodeGeneric(stub.GetCode(),
2796 RelocInfo::CODE_TARGET, 2793 RelocInfo::CODE_TARGET,
2797 instr, 2794 instr,
2798 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); 2795 RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
2799 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment(); 2796 LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
2800 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); 2797 safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
2801 // Put the result value into the result register slot and 2798 // Put the result value into the result register slot and
2802 // restore all registers. 2799 // restore all registers.
(...skipping 23 matching lines...) Expand all
2826 } 2823 }
2827 2824
2828 2825
2829 void LCodeGen::DoReturn(LReturn* instr) { 2826 void LCodeGen::DoReturn(LReturn* instr) {
2830 if (FLAG_trace && info()->IsOptimizing()) { 2827 if (FLAG_trace && info()->IsOptimizing()) {
2831 // Push the return value on the stack as the parameter. 2828 // Push the return value on the stack as the parameter.
2832 // Runtime::TraceExit returns its parameter in v0. We're leaving the code 2829 // Runtime::TraceExit returns its parameter in v0. We're leaving the code
2833 // managed by the register allocator and tearing down the frame, it's 2830 // managed by the register allocator and tearing down the frame, it's
2834 // safe to write to the context register. 2831 // safe to write to the context register.
2835 __ push(v0); 2832 __ push(v0);
2836 __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); 2833 __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2837 __ CallRuntime(Runtime::kTraceExit, 1); 2834 __ CallRuntime(Runtime::kTraceExit, 1);
2838 } 2835 }
2839 if (info()->saves_caller_doubles()) { 2836 if (info()->saves_caller_doubles()) {
2840 RestoreCallerDoubles(); 2837 RestoreCallerDoubles();
2841 } 2838 }
2842 int no_frame_start = -1; 2839 int no_frame_start = -1;
2843 if (NeedsEagerFrame()) { 2840 if (NeedsEagerFrame()) {
2844 __ mov(sp, fp); 2841 __ mov(sp, fp);
2845 no_frame_start = masm_->pc_offset(); 2842 no_frame_start = masm_->pc_offset();
2846 __ Pop(ra, fp); 2843 __ Pop(ra, fp);
2847 } 2844 }
2848 if (instr->has_constant_parameter_count()) { 2845 if (instr->has_constant_parameter_count()) {
2849 int parameter_count = ToInteger32(instr->constant_parameter_count()); 2846 int parameter_count = ToInteger32(instr->constant_parameter_count());
2850 int32_t sp_delta = (parameter_count + 1) * kPointerSize; 2847 int32_t sp_delta = (parameter_count + 1) * kPointerSize;
2851 if (sp_delta != 0) { 2848 if (sp_delta != 0) {
2852 __ Addu(sp, sp, Operand(sp_delta)); 2849 __ Daddu(sp, sp, Operand(sp_delta));
2853 } 2850 }
2854 } else { 2851 } else {
2855 Register reg = ToRegister(instr->parameter_count()); 2852 Register reg = ToRegister(instr->parameter_count());
2856 // The argument count parameter is a smi 2853 // The argument count parameter is a smi
2857 __ SmiUntag(reg); 2854 __ SmiUntag(reg);
2858 __ sll(at, reg, kPointerSizeLog2); 2855 __ dsll(at, reg, kPointerSizeLog2);
2859 __ Addu(sp, sp, at); 2856 __ Daddu(sp, sp, at);
2860 } 2857 }
2861 2858
2862 __ Jump(ra); 2859 __ Jump(ra);
2863 2860
2864 if (no_frame_start != -1) { 2861 if (no_frame_start != -1) {
2865 info_->AddNoFrameRange(no_frame_start, masm_->pc_offset()); 2862 info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
2866 } 2863 }
2867 } 2864 }
2868 2865
2869 2866
2870 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { 2867 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
2871 Register result = ToRegister(instr->result()); 2868 Register result = ToRegister(instr->result());
2872 __ li(at, Operand(Handle<Object>(instr->hydrogen()->cell().handle()))); 2869 __ li(at, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
2873 __ lw(result, FieldMemOperand(at, Cell::kValueOffset)); 2870 __ ld(result, FieldMemOperand(at, Cell::kValueOffset));
2874 if (instr->hydrogen()->RequiresHoleCheck()) { 2871 if (instr->hydrogen()->RequiresHoleCheck()) {
2875 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); 2872 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2876 DeoptimizeIf(eq, instr->environment(), result, Operand(at)); 2873 DeoptimizeIf(eq, instr->environment(), result, Operand(at));
2877 } 2874 }
2878 } 2875 }
2879 2876
2880 2877
2881 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) { 2878 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
2882 ASSERT(ToRegister(instr->context()).is(cp)); 2879 ASSERT(ToRegister(instr->context()).is(cp));
2883 ASSERT(ToRegister(instr->global_object()).is(LoadIC::ReceiverRegister())); 2880 ASSERT(ToRegister(instr->global_object()).is(LoadIC::ReceiverRegister()));
(...skipping 13 matching lines...) Expand all
2897 // Load the cell. 2894 // Load the cell.
2898 __ li(cell, Operand(instr->hydrogen()->cell().handle())); 2895 __ li(cell, Operand(instr->hydrogen()->cell().handle()));
2899 2896
2900 // If the cell we are storing to contains the hole it could have 2897 // If the cell we are storing to contains the hole it could have
2901 // been deleted from the property dictionary. In that case, we need 2898 // been deleted from the property dictionary. In that case, we need
2902 // to update the property details in the property dictionary to mark 2899 // to update the property details in the property dictionary to mark
2903 // it as no longer deleted. 2900 // it as no longer deleted.
2904 if (instr->hydrogen()->RequiresHoleCheck()) { 2901 if (instr->hydrogen()->RequiresHoleCheck()) {
2905 // We use a temp to check the payload. 2902 // We use a temp to check the payload.
2906 Register payload = ToRegister(instr->temp()); 2903 Register payload = ToRegister(instr->temp());
2907 __ lw(payload, FieldMemOperand(cell, Cell::kValueOffset)); 2904 __ ld(payload, FieldMemOperand(cell, Cell::kValueOffset));
2908 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); 2905 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2909 DeoptimizeIf(eq, instr->environment(), payload, Operand(at)); 2906 DeoptimizeIf(eq, instr->environment(), payload, Operand(at));
2910 } 2907 }
2911 2908
2912 // Store the value. 2909 // Store the value.
2913 __ sw(value, FieldMemOperand(cell, Cell::kValueOffset)); 2910 __ sd(value, FieldMemOperand(cell, Cell::kValueOffset));
2914 // Cells are always rescanned, so no write barrier here. 2911 // Cells are always rescanned, so no write barrier here.
2915 } 2912 }
2916 2913
2917 2914
2918
2919 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { 2915 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2920 Register context = ToRegister(instr->context()); 2916 Register context = ToRegister(instr->context());
2921 Register result = ToRegister(instr->result()); 2917 Register result = ToRegister(instr->result());
2922 2918
2923 __ lw(result, ContextOperand(context, instr->slot_index())); 2919 __ ld(result, ContextOperand(context, instr->slot_index()));
2924 if (instr->hydrogen()->RequiresHoleCheck()) { 2920 if (instr->hydrogen()->RequiresHoleCheck()) {
2925 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); 2921 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2926 2922
2927 if (instr->hydrogen()->DeoptimizesOnHole()) { 2923 if (instr->hydrogen()->DeoptimizesOnHole()) {
2928 DeoptimizeIf(eq, instr->environment(), result, Operand(at)); 2924 DeoptimizeIf(eq, instr->environment(), result, Operand(at));
2929 } else { 2925 } else {
2930 Label is_not_hole; 2926 Label is_not_hole;
2931 __ Branch(&is_not_hole, ne, result, Operand(at)); 2927 __ Branch(&is_not_hole, ne, result, Operand(at));
2932 __ LoadRoot(result, Heap::kUndefinedValueRootIndex); 2928 __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
2933 __ bind(&is_not_hole); 2929 __ bind(&is_not_hole);
2934 } 2930 }
2935 } 2931 }
2936 } 2932 }
2937 2933
2938 2934
2939 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { 2935 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2940 Register context = ToRegister(instr->context()); 2936 Register context = ToRegister(instr->context());
2941 Register value = ToRegister(instr->value()); 2937 Register value = ToRegister(instr->value());
2942 Register scratch = scratch0(); 2938 Register scratch = scratch0();
2943 MemOperand target = ContextOperand(context, instr->slot_index()); 2939 MemOperand target = ContextOperand(context, instr->slot_index());
2944 2940
2945 Label skip_assignment; 2941 Label skip_assignment;
2946 2942
2947 if (instr->hydrogen()->RequiresHoleCheck()) { 2943 if (instr->hydrogen()->RequiresHoleCheck()) {
2948 __ lw(scratch, target); 2944 __ ld(scratch, target);
2949 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); 2945 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2950 2946
2951 if (instr->hydrogen()->DeoptimizesOnHole()) { 2947 if (instr->hydrogen()->DeoptimizesOnHole()) {
2952 DeoptimizeIf(eq, instr->environment(), scratch, Operand(at)); 2948 DeoptimizeIf(eq, instr->environment(), scratch, Operand(at));
2953 } else { 2949 } else {
2954 __ Branch(&skip_assignment, ne, scratch, Operand(at)); 2950 __ Branch(&skip_assignment, ne, scratch, Operand(at));
2955 } 2951 }
2956 } 2952 }
2957 2953
2958 __ sw(value, target); 2954 __ sd(value, target);
2959 if (instr->hydrogen()->NeedsWriteBarrier()) { 2955 if (instr->hydrogen()->NeedsWriteBarrier()) {
2960 SmiCheck check_needed = 2956 SmiCheck check_needed =
2961 instr->hydrogen()->value()->type().IsHeapObject() 2957 instr->hydrogen()->value()->type().IsHeapObject()
2962 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; 2958 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
2963 __ RecordWriteContextSlot(context, 2959 __ RecordWriteContextSlot(context,
2964 target.offset(), 2960 target.offset(),
2965 value, 2961 value,
2966 scratch0(), 2962 scratch0(),
2967 GetRAState(), 2963 GetRAState(),
2968 kSaveFPRegs, 2964 kSaveFPRegs,
2969 EMIT_REMEMBERED_SET, 2965 EMIT_REMEMBERED_SET,
2970 check_needed); 2966 check_needed);
2971 } 2967 }
2972 2968
2973 __ bind(&skip_assignment); 2969 __ bind(&skip_assignment);
2974 } 2970 }
2975 2971
2976 2972
2977 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { 2973 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
2978 HObjectAccess access = instr->hydrogen()->access(); 2974 HObjectAccess access = instr->hydrogen()->access();
2979 int offset = access.offset(); 2975 int offset = access.offset();
2980 Register object = ToRegister(instr->object()); 2976 Register object = ToRegister(instr->object());
2981
2982 if (access.IsExternalMemory()) { 2977 if (access.IsExternalMemory()) {
2983 Register result = ToRegister(instr->result()); 2978 Register result = ToRegister(instr->result());
2984 MemOperand operand = MemOperand(object, offset); 2979 MemOperand operand = MemOperand(object, offset);
2985 __ Load(result, operand, access.representation()); 2980 __ Load(result, operand, access.representation());
2986 return; 2981 return;
2987 } 2982 }
2988 2983
2989 if (instr->hydrogen()->representation().IsDouble()) { 2984 if (instr->hydrogen()->representation().IsDouble()) {
2990 DoubleRegister result = ToDoubleRegister(instr->result()); 2985 DoubleRegister result = ToDoubleRegister(instr->result());
2991 __ ldc1(result, FieldMemOperand(object, offset)); 2986 __ ldc1(result, FieldMemOperand(object, offset));
2992 return; 2987 return;
2993 } 2988 }
2994 2989
2995 Register result = ToRegister(instr->result()); 2990 Register result = ToRegister(instr->result());
2996 if (!access.IsInobject()) { 2991 if (!access.IsInobject()) {
2997 __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); 2992 __ ld(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
2998 object = result; 2993 object = result;
2999 } 2994 }
3000 MemOperand operand = FieldMemOperand(object, offset); 2995
3001 __ Load(result, operand, access.representation()); 2996 Representation representation = access.representation();
2997 if (representation.IsSmi() && SmiValuesAre32Bits() &&
2998 instr->hydrogen()->representation().IsInteger32()) {
2999 if (FLAG_debug_code) {
3000 // Verify this is really an Smi.
3001 Register scratch = scratch0();
3002 __ Load(scratch, FieldMemOperand(object, offset), representation);
3003 __ AssertSmi(scratch);
3004 }
3005
3006 // Read int value directly from upper half of the smi.
3007 STATIC_ASSERT(kSmiTag == 0);
3008 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
3009 offset += kPointerSize / 2;
3010 representation = Representation::Integer32();
3011 }
3012 __ Load(result, FieldMemOperand(object, offset), representation);
3002 } 3013 }
3003 3014
3004 3015
3005 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) { 3016 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
3006 ASSERT(ToRegister(instr->context()).is(cp)); 3017 ASSERT(ToRegister(instr->context()).is(cp));
3007 ASSERT(ToRegister(instr->object()).is(LoadIC::ReceiverRegister())); 3018 ASSERT(ToRegister(instr->object()).is(LoadIC::ReceiverRegister()));
3008 ASSERT(ToRegister(instr->result()).is(v0)); 3019 ASSERT(ToRegister(instr->result()).is(v0));
3009 3020
3010 // Name is always in a2. 3021 // Name is always in a2.
3011 __ li(LoadIC::NameRegister(), Operand(instr->name())); 3022 __ li(LoadIC::NameRegister(), Operand(instr->name()));
(...skipping 12 matching lines...) Expand all
3024 __ GetObjectType(function, result, scratch); 3035 __ GetObjectType(function, result, scratch);
3025 DeoptimizeIf(ne, instr->environment(), scratch, Operand(JS_FUNCTION_TYPE)); 3036 DeoptimizeIf(ne, instr->environment(), scratch, Operand(JS_FUNCTION_TYPE));
3026 3037
3027 // Make sure that the function has an instance prototype. 3038 // Make sure that the function has an instance prototype.
3028 Label non_instance; 3039 Label non_instance;
3029 __ lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset)); 3040 __ lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
3030 __ And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype)); 3041 __ And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype));
3031 __ Branch(&non_instance, ne, scratch, Operand(zero_reg)); 3042 __ Branch(&non_instance, ne, scratch, Operand(zero_reg));
3032 3043
3033 // Get the prototype or initial map from the function. 3044 // Get the prototype or initial map from the function.
3034 __ lw(result, 3045 __ ld(result,
3035 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); 3046 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3036 3047
3037 // Check that the function has a prototype or an initial map. 3048 // Check that the function has a prototype or an initial map.
3038 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); 3049 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
3039 DeoptimizeIf(eq, instr->environment(), result, Operand(at)); 3050 DeoptimizeIf(eq, instr->environment(), result, Operand(at));
3040 3051
3041 // If the function does not have an initial map, we're done. 3052 // If the function does not have an initial map, we're done.
3042 Label done; 3053 Label done;
3043 __ GetObjectType(result, scratch, scratch); 3054 __ GetObjectType(result, scratch, scratch);
3044 __ Branch(&done, ne, scratch, Operand(MAP_TYPE)); 3055 __ Branch(&done, ne, scratch, Operand(MAP_TYPE));
3045 3056
3046 // Get the prototype from the initial map. 3057 // Get the prototype from the initial map.
3047 __ lw(result, FieldMemOperand(result, Map::kPrototypeOffset)); 3058 __ ld(result, FieldMemOperand(result, Map::kPrototypeOffset));
3048 __ Branch(&done); 3059 __ Branch(&done);
3049 3060
3050 // Non-instance prototype: Fetch prototype from constructor field 3061 // Non-instance prototype: Fetch prototype from constructor field
3051 // in initial map. 3062 // in initial map.
3052 __ bind(&non_instance); 3063 __ bind(&non_instance);
3053 __ lw(result, FieldMemOperand(result, Map::kConstructorOffset)); 3064 __ ld(result, FieldMemOperand(result, Map::kConstructorOffset));
3054 3065
3055 // All done. 3066 // All done.
3056 __ bind(&done); 3067 __ bind(&done);
3057 } 3068 }
3058 3069
3059 3070
3060 void LCodeGen::DoLoadRoot(LLoadRoot* instr) { 3071 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
3061 Register result = ToRegister(instr->result()); 3072 Register result = ToRegister(instr->result());
3062 __ LoadRoot(result, instr->index()); 3073 __ LoadRoot(result, instr->index());
3063 } 3074 }
3064 3075
3065 3076
3066 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) { 3077 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
3067 Register arguments = ToRegister(instr->arguments()); 3078 Register arguments = ToRegister(instr->arguments());
3068 Register result = ToRegister(instr->result()); 3079 Register result = ToRegister(instr->result());
3069 // There are two words between the frame pointer and the last argument. 3080 // There are two words between the frame pointer and the last argument.
3070 // Subtracting from length accounts for one of them add one more. 3081 // Subtracting from length accounts for one of them add one more.
3071 if (instr->length()->IsConstantOperand()) { 3082 if (instr->length()->IsConstantOperand()) {
3072 int const_length = ToInteger32(LConstantOperand::cast(instr->length())); 3083 int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
3073 if (instr->index()->IsConstantOperand()) { 3084 if (instr->index()->IsConstantOperand()) {
3074 int const_index = ToInteger32(LConstantOperand::cast(instr->index())); 3085 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3075 int index = (const_length - const_index) + 1; 3086 int index = (const_length - const_index) + 1;
3076 __ lw(result, MemOperand(arguments, index * kPointerSize)); 3087 __ ld(result, MemOperand(arguments, index * kPointerSize));
3077 } else { 3088 } else {
3078 Register index = ToRegister(instr->index()); 3089 Register index = ToRegister(instr->index());
3079 __ li(at, Operand(const_length + 1)); 3090 __ li(at, Operand(const_length + 1));
3080 __ Subu(result, at, index); 3091 __ Dsubu(result, at, index);
3081 __ sll(at, result, kPointerSizeLog2); 3092 __ dsll(at, result, kPointerSizeLog2);
3082 __ Addu(at, arguments, at); 3093 __ Daddu(at, arguments, at);
3083 __ lw(result, MemOperand(at)); 3094 __ ld(result, MemOperand(at));
3084 } 3095 }
3085 } else if (instr->index()->IsConstantOperand()) { 3096 } else if (instr->index()->IsConstantOperand()) {
3086 Register length = ToRegister(instr->length()); 3097 Register length = ToRegister(instr->length());
3087 int const_index = ToInteger32(LConstantOperand::cast(instr->index())); 3098 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3088 int loc = const_index - 1; 3099 int loc = const_index - 1;
3089 if (loc != 0) { 3100 if (loc != 0) {
3090 __ Subu(result, length, Operand(loc)); 3101 __ Dsubu(result, length, Operand(loc));
3091 __ sll(at, result, kPointerSizeLog2); 3102 __ dsll(at, result, kPointerSizeLog2);
3092 __ Addu(at, arguments, at); 3103 __ Daddu(at, arguments, at);
3093 __ lw(result, MemOperand(at)); 3104 __ ld(result, MemOperand(at));
3094 } else { 3105 } else {
3095 __ sll(at, length, kPointerSizeLog2); 3106 __ dsll(at, length, kPointerSizeLog2);
3096 __ Addu(at, arguments, at); 3107 __ Daddu(at, arguments, at);
3097 __ lw(result, MemOperand(at)); 3108 __ ld(result, MemOperand(at));
3098 } 3109 }
3099 } else { 3110 } else {
3100 Register length = ToRegister(instr->length()); 3111 Register length = ToRegister(instr->length());
3101 Register index = ToRegister(instr->index()); 3112 Register index = ToRegister(instr->index());
3102 __ Subu(result, length, index); 3113 __ Dsubu(result, length, index);
3103 __ Addu(result, result, 1); 3114 __ Daddu(result, result, 1);
3104 __ sll(at, result, kPointerSizeLog2); 3115 __ dsll(at, result, kPointerSizeLog2);
3105 __ Addu(at, arguments, at); 3116 __ Daddu(at, arguments, at);
3106 __ lw(result, MemOperand(at)); 3117 __ ld(result, MemOperand(at));
3107 } 3118 }
3108 } 3119 }
3109 3120
3110 3121
3111 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) { 3122 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
3112 Register external_pointer = ToRegister(instr->elements()); 3123 Register external_pointer = ToRegister(instr->elements());
3113 Register key = no_reg; 3124 Register key = no_reg;
3114 ElementsKind elements_kind = instr->elements_kind(); 3125 ElementsKind elements_kind = instr->elements_kind();
3115 bool key_is_constant = instr->key()->IsConstantOperand(); 3126 bool key_is_constant = instr->key()->IsConstantOperand();
3116 int constant_key = 0; 3127 int constant_key = 0;
3117 if (key_is_constant) { 3128 if (key_is_constant) {
3118 constant_key = ToInteger32(LConstantOperand::cast(instr->key())); 3129 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3119 if (constant_key & 0xF0000000) { 3130 if (constant_key & 0xF0000000) {
3120 Abort(kArrayIndexConstantValueTooBig); 3131 Abort(kArrayIndexConstantValueTooBig);
3121 } 3132 }
3122 } else { 3133 } else {
3123 key = ToRegister(instr->key()); 3134 key = ToRegister(instr->key());
3124 } 3135 }
3125 int element_size_shift = ElementsKindToShiftSize(elements_kind); 3136 int element_size_shift = ElementsKindToShiftSize(elements_kind);
3126 int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) 3137 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
3127 ? (element_size_shift - kSmiTagSize) : element_size_shift; 3138 ? (element_size_shift - (kSmiTagSize + kSmiShiftSize))
3139 : element_size_shift;
3128 int base_offset = instr->base_offset(); 3140 int base_offset = instr->base_offset();
3129 3141
3130 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS || 3142 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
3131 elements_kind == FLOAT32_ELEMENTS || 3143 elements_kind == FLOAT32_ELEMENTS ||
3132 elements_kind == EXTERNAL_FLOAT64_ELEMENTS || 3144 elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
3133 elements_kind == FLOAT64_ELEMENTS) { 3145 elements_kind == FLOAT64_ELEMENTS) {
3134 int base_offset = instr->base_offset(); 3146 int base_offset = instr->base_offset();
3135 FPURegister result = ToDoubleRegister(instr->result()); 3147 FPURegister result = ToDoubleRegister(instr->result());
3136 if (key_is_constant) { 3148 if (key_is_constant) {
3137 __ Addu(scratch0(), external_pointer, constant_key << element_size_shift); 3149 __ Daddu(scratch0(), external_pointer,
3150 constant_key << element_size_shift);
3138 } else { 3151 } else {
3139 __ sll(scratch0(), key, shift_size); 3152 if (shift_size < 0) {
3140 __ Addu(scratch0(), scratch0(), external_pointer); 3153 if (shift_size == -32) {
3154 __ dsra32(scratch0(), key, 0);
3155 } else {
3156 __ dsra(scratch0(), key, -shift_size);
3157 }
3158 } else {
3159 __ dsll(scratch0(), key, shift_size);
3160 }
3161 __ Daddu(scratch0(), scratch0(), external_pointer);
3141 } 3162 }
3142 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS || 3163 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
3143 elements_kind == FLOAT32_ELEMENTS) { 3164 elements_kind == FLOAT32_ELEMENTS) {
3144 __ lwc1(result, MemOperand(scratch0(), base_offset)); 3165 __ lwc1(result, MemOperand(scratch0(), base_offset));
3145 __ cvt_d_s(result, result); 3166 __ cvt_d_s(result, result);
3146 } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS 3167 } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
3147 __ ldc1(result, MemOperand(scratch0(), base_offset)); 3168 __ ldc1(result, MemOperand(scratch0(), base_offset));
3148 } 3169 }
3149 } else { 3170 } else {
3150 Register result = ToRegister(instr->result()); 3171 Register result = ToRegister(instr->result());
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after
3211 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS); 3232 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
3212 3233
3213 int base_offset = instr->base_offset(); 3234 int base_offset = instr->base_offset();
3214 if (key_is_constant) { 3235 if (key_is_constant) {
3215 int constant_key = ToInteger32(LConstantOperand::cast(instr->key())); 3236 int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3216 if (constant_key & 0xF0000000) { 3237 if (constant_key & 0xF0000000) {
3217 Abort(kArrayIndexConstantValueTooBig); 3238 Abort(kArrayIndexConstantValueTooBig);
3218 } 3239 }
3219 base_offset += constant_key * kDoubleSize; 3240 base_offset += constant_key * kDoubleSize;
3220 } 3241 }
3221 __ Addu(scratch, elements, Operand(base_offset)); 3242 __ Daddu(scratch, elements, Operand(base_offset));
3222 3243
3223 if (!key_is_constant) { 3244 if (!key_is_constant) {
3224 key = ToRegister(instr->key()); 3245 key = ToRegister(instr->key());
3225 int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) 3246 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
3226 ? (element_size_shift - kSmiTagSize) : element_size_shift; 3247 ? (element_size_shift - (kSmiTagSize + kSmiShiftSize))
3227 __ sll(at, key, shift_size); 3248 : element_size_shift;
3228 __ Addu(scratch, scratch, at); 3249 if (shift_size > 0) {
3250 __ dsll(at, key, shift_size);
3251 } else if (shift_size == -32) {
3252 __ dsra32(at, key, 0);
3253 } else {
3254 __ dsra(at, key, -shift_size);
3255 }
3256 __ Daddu(scratch, scratch, at);
3229 } 3257 }
3230 3258
3231 __ ldc1(result, MemOperand(scratch)); 3259 __ ldc1(result, MemOperand(scratch));
3232 3260
3233 if (instr->hydrogen()->RequiresHoleCheck()) { 3261 if (instr->hydrogen()->RequiresHoleCheck()) {
3234 __ lw(scratch, MemOperand(scratch, kHoleNanUpper32Offset)); 3262 __ lw(scratch, MemOperand(scratch, sizeof(kHoleNanLower32)));
3235 DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32)); 3263 DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32));
3236 } 3264 }
3237 } 3265 }
3238 3266
3239 3267
3240 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { 3268 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
3269 HLoadKeyed* hinstr = instr->hydrogen();
3241 Register elements = ToRegister(instr->elements()); 3270 Register elements = ToRegister(instr->elements());
3242 Register result = ToRegister(instr->result()); 3271 Register result = ToRegister(instr->result());
3243 Register scratch = scratch0(); 3272 Register scratch = scratch0();
3244 Register store_base = scratch; 3273 Register store_base = scratch;
3245 int offset = instr->base_offset(); 3274 int offset = instr->base_offset();
3246 3275
3247 if (instr->key()->IsConstantOperand()) { 3276 if (instr->key()->IsConstantOperand()) {
3248 LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); 3277 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
3249 offset += ToInteger32(const_operand) * kPointerSize; 3278 offset += ToInteger32(const_operand) * kPointerSize;
3250 store_base = elements; 3279 store_base = elements;
3251 } else { 3280 } else {
3252 Register key = ToRegister(instr->key()); 3281 Register key = ToRegister(instr->key());
3253 // Even though the HLoadKeyed instruction forces the input 3282 // Even though the HLoadKeyed instruction forces the input
3254 // representation for the key to be an integer, the input gets replaced 3283 // representation for the key to be an integer, the input gets replaced
3255 // during bound check elimination with the index argument to the bounds 3284 // during bound check elimination with the index argument to the bounds
3256 // check, which can be tagged, so that case must be handled here, too. 3285 // check, which can be tagged, so that case must be handled here, too.
3257 if (instr->hydrogen()->key()->representation().IsSmi()) { 3286 if (instr->hydrogen()->key()->representation().IsSmi()) {
3258 __ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize); 3287 __ SmiScale(scratch, key, kPointerSizeLog2);
3259 __ addu(scratch, elements, scratch); 3288 __ daddu(scratch, elements, scratch);
3260 } else { 3289 } else {
3261 __ sll(scratch, key, kPointerSizeLog2); 3290 __ dsll(scratch, key, kPointerSizeLog2);
3262 __ addu(scratch, elements, scratch); 3291 __ daddu(scratch, elements, scratch);
3263 } 3292 }
3264 } 3293 }
3265 __ lw(result, MemOperand(store_base, offset)); 3294
3295 Representation representation = hinstr->representation();
3296 if (representation.IsInteger32() && SmiValuesAre32Bits() &&
3297 hinstr->elements_kind() == FAST_SMI_ELEMENTS) {
3298 ASSERT(!hinstr->RequiresHoleCheck());
3299 if (FLAG_debug_code) {
3300 Register temp = scratch1();
3301 __ Load(temp, MemOperand(store_base, offset), Representation::Smi());
3302 __ AssertSmi(temp);
3303 }
3304
3305 // Read int value directly from upper half of the smi.
3306 STATIC_ASSERT(kSmiTag == 0);
3307 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
3308 offset += kPointerSize / 2;
3309 }
3310
3311 __ Load(result, MemOperand(store_base, offset), representation);
3266 3312
3267 // Check for the hole value. 3313 // Check for the hole value.
3268 if (instr->hydrogen()->RequiresHoleCheck()) { 3314 if (hinstr->RequiresHoleCheck()) {
3269 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) { 3315 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
3270 __ SmiTst(result, scratch); 3316 __ SmiTst(result, scratch);
3271 DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg)); 3317 DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
3272 } else { 3318 } else {
3273 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); 3319 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
3274 DeoptimizeIf(eq, instr->environment(), result, Operand(scratch)); 3320 DeoptimizeIf(eq, instr->environment(), result, Operand(scratch));
3275 } 3321 }
3276 } 3322 }
3277 } 3323 }
3278 3324
(...skipping 15 matching lines...) Expand all
3294 int constant_key, 3340 int constant_key,
3295 int element_size, 3341 int element_size,
3296 int shift_size, 3342 int shift_size,
3297 int base_offset) { 3343 int base_offset) {
3298 if (key_is_constant) { 3344 if (key_is_constant) {
3299 return MemOperand(base, (constant_key << element_size) + base_offset); 3345 return MemOperand(base, (constant_key << element_size) + base_offset);
3300 } 3346 }
3301 3347
3302 if (base_offset == 0) { 3348 if (base_offset == 0) {
3303 if (shift_size >= 0) { 3349 if (shift_size >= 0) {
3304 __ sll(scratch0(), key, shift_size); 3350 __ dsll(scratch0(), key, shift_size);
3305 __ Addu(scratch0(), base, scratch0()); 3351 __ Daddu(scratch0(), base, scratch0());
3306 return MemOperand(scratch0()); 3352 return MemOperand(scratch0());
3307 } else { 3353 } else {
3308 ASSERT_EQ(-1, shift_size); 3354 if (shift_size == -32) {
3309 __ srl(scratch0(), key, 1); 3355 __ dsra32(scratch0(), key, 0);
3310 __ Addu(scratch0(), base, scratch0()); 3356 } else {
3357 __ dsra(scratch0(), key, -shift_size);
3358 }
3359 __ Daddu(scratch0(), base, scratch0());
3311 return MemOperand(scratch0()); 3360 return MemOperand(scratch0());
3312 } 3361 }
3313 } 3362 }
3314 3363
3315 if (shift_size >= 0) { 3364 if (shift_size >= 0) {
3316 __ sll(scratch0(), key, shift_size); 3365 __ dsll(scratch0(), key, shift_size);
3317 __ Addu(scratch0(), base, scratch0()); 3366 __ Daddu(scratch0(), base, scratch0());
3318 return MemOperand(scratch0(), base_offset); 3367 return MemOperand(scratch0(), base_offset);
3319 } else { 3368 } else {
3320 ASSERT_EQ(-1, shift_size); 3369 if (shift_size == -32) {
3321 __ sra(scratch0(), key, 1); 3370 __ dsra32(scratch0(), key, 0);
3322 __ Addu(scratch0(), base, scratch0()); 3371 } else {
3372 __ dsra(scratch0(), key, -shift_size);
3373 }
3374 __ Daddu(scratch0(), base, scratch0());
3323 return MemOperand(scratch0(), base_offset); 3375 return MemOperand(scratch0(), base_offset);
3324 } 3376 }
3325 } 3377 }
3326 3378
3327 3379
3328 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) { 3380 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
3329 ASSERT(ToRegister(instr->context()).is(cp)); 3381 ASSERT(ToRegister(instr->context()).is(cp));
3330 ASSERT(ToRegister(instr->object()).is(LoadIC::ReceiverRegister())); 3382 ASSERT(ToRegister(instr->object()).is(LoadIC::ReceiverRegister()));
3331 ASSERT(ToRegister(instr->key()).is(LoadIC::NameRegister())); 3383 ASSERT(ToRegister(instr->key()).is(LoadIC::NameRegister()));
3332 3384
3333 Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize(); 3385 Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
3334 CallCode(ic, RelocInfo::CODE_TARGET, instr); 3386 CallCode(ic, RelocInfo::CODE_TARGET, instr);
3335 } 3387 }
3336 3388
3337 3389
3338 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) { 3390 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
3339 Register scratch = scratch0(); 3391 Register scratch = scratch0();
3340 Register temp = scratch1(); 3392 Register temp = scratch1();
3341 Register result = ToRegister(instr->result()); 3393 Register result = ToRegister(instr->result());
3342 3394
3343 if (instr->hydrogen()->from_inlined()) { 3395 if (instr->hydrogen()->from_inlined()) {
3344 __ Subu(result, sp, 2 * kPointerSize); 3396 __ Dsubu(result, sp, 2 * kPointerSize);
3345 } else { 3397 } else {
3346 // Check if the calling frame is an arguments adaptor frame. 3398 // Check if the calling frame is an arguments adaptor frame.
3347 Label done, adapted; 3399 Label done, adapted;
3348 __ lw(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); 3400 __ ld(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3349 __ lw(result, MemOperand(scratch, StandardFrameConstants::kContextOffset)); 3401 __ ld(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
3350 __ Xor(temp, result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); 3402 __ Xor(temp, result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3351 3403
3352 // Result is the frame pointer for the frame if not adapted and for the real 3404 // Result is the frame pointer for the frame if not adapted and for the real
3353 // frame below the adaptor frame if adapted. 3405 // frame below the adaptor frame if adapted.
3354 __ Movn(result, fp, temp); // Move only if temp is not equal to zero (ne). 3406 __ Movn(result, fp, temp); // Move only if temp is not equal to zero (ne).
3355 __ Movz(result, scratch, temp); // Move only if temp is equal to zero (eq). 3407 __ Movz(result, scratch, temp); // Move only if temp is equal to zero (eq).
3356 } 3408 }
3357 } 3409 }
3358 3410
3359 3411
3360 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) { 3412 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
3361 Register elem = ToRegister(instr->elements()); 3413 Register elem = ToRegister(instr->elements());
3362 Register result = ToRegister(instr->result()); 3414 Register result = ToRegister(instr->result());
3363 3415
3364 Label done; 3416 Label done;
3365 3417
3366 // If no arguments adaptor frame the number of arguments is fixed. 3418 // If no arguments adaptor frame the number of arguments is fixed.
3367 __ Addu(result, zero_reg, Operand(scope()->num_parameters())); 3419 __ Daddu(result, zero_reg, Operand(scope()->num_parameters()));
3368 __ Branch(&done, eq, fp, Operand(elem)); 3420 __ Branch(&done, eq, fp, Operand(elem));
3369 3421
3370 // Arguments adaptor frame present. Get argument length from there. 3422 // Arguments adaptor frame present. Get argument length from there.
3371 __ lw(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); 3423 __ ld(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3372 __ lw(result, 3424 __ ld(result,
3373 MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset)); 3425 MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
3374 __ SmiUntag(result); 3426 __ SmiUntag(result);
3375 3427
3376 // Argument length is in result register. 3428 // Argument length is in result register.
3377 __ bind(&done); 3429 __ bind(&done);
3378 } 3430 }
3379 3431
3380 3432
3381 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) { 3433 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
3382 Register receiver = ToRegister(instr->receiver()); 3434 Register receiver = ToRegister(instr->receiver());
3383 Register function = ToRegister(instr->function()); 3435 Register function = ToRegister(instr->function());
3384 Register result = ToRegister(instr->result()); 3436 Register result = ToRegister(instr->result());
3385 Register scratch = scratch0(); 3437 Register scratch = scratch0();
3386 3438
3387 // If the receiver is null or undefined, we have to pass the global 3439 // If the receiver is null or undefined, we have to pass the global
3388 // object as a receiver to normal functions. Values have to be 3440 // object as a receiver to normal functions. Values have to be
3389 // passed unchanged to builtins and strict-mode functions. 3441 // passed unchanged to builtins and strict-mode functions.
3390 Label global_object, result_in_receiver; 3442 Label global_object, result_in_receiver;
3391 3443
3392 if (!instr->hydrogen()->known_function()) { 3444 if (!instr->hydrogen()->known_function()) {
3393 // Do not transform the receiver to object for strict mode 3445 // Do not transform the receiver to object for strict mode functions.
3394 // functions. 3446 __ ld(scratch,
3395 __ lw(scratch,
3396 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset)); 3447 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
3397 __ lw(scratch,
3398 FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
3399 3448
3400 // Do not transform the receiver to object for builtins. 3449 // Do not transform the receiver to object for builtins.
3401 int32_t strict_mode_function_mask = 3450 int32_t strict_mode_function_mask =
3402 1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize); 3451 1 << SharedFunctionInfo::kStrictModeBitWithinByte;
3403 int32_t native_mask = 1 << (SharedFunctionInfo::kNative + kSmiTagSize); 3452 int32_t native_mask = 1 << SharedFunctionInfo::kNativeBitWithinByte;
3404 __ And(scratch, scratch, Operand(strict_mode_function_mask | native_mask)); 3453
3405 __ Branch(&result_in_receiver, ne, scratch, Operand(zero_reg)); 3454 __ lbu(at,
3455 FieldMemOperand(scratch, SharedFunctionInfo::kStrictModeByteOffset));
3456 __ And(at, at, Operand(strict_mode_function_mask));
3457 __ Branch(&result_in_receiver, ne, at, Operand(zero_reg));
3458 __ lbu(at,
3459 FieldMemOperand(scratch, SharedFunctionInfo::kNativeByteOffset));
3460 __ And(at, at, Operand(native_mask));
3461 __ Branch(&result_in_receiver, ne, at, Operand(zero_reg));
3406 } 3462 }
3407 3463
3408 // Normal function. Replace undefined or null with global receiver. 3464 // Normal function. Replace undefined or null with global receiver.
3409 __ LoadRoot(scratch, Heap::kNullValueRootIndex); 3465 __ LoadRoot(scratch, Heap::kNullValueRootIndex);
3410 __ Branch(&global_object, eq, receiver, Operand(scratch)); 3466 __ Branch(&global_object, eq, receiver, Operand(scratch));
3411 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); 3467 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
3412 __ Branch(&global_object, eq, receiver, Operand(scratch)); 3468 __ Branch(&global_object, eq, receiver, Operand(scratch));
3413 3469
3414 // Deoptimize if the receiver is not a JS object. 3470 // Deoptimize if the receiver is not a JS object.
3415 __ SmiTst(receiver, scratch); 3471 __ SmiTst(receiver, scratch);
3416 DeoptimizeIf(eq, instr->environment(), scratch, Operand(zero_reg)); 3472 DeoptimizeIf(eq, instr->environment(), scratch, Operand(zero_reg));
3417 3473
3418 __ GetObjectType(receiver, scratch, scratch); 3474 __ GetObjectType(receiver, scratch, scratch);
3419 DeoptimizeIf(lt, instr->environment(), 3475 DeoptimizeIf(lt, instr->environment(),
3420 scratch, Operand(FIRST_SPEC_OBJECT_TYPE)); 3476 scratch, Operand(FIRST_SPEC_OBJECT_TYPE));
3477 __ Branch(&result_in_receiver);
3421 3478
3422 __ Branch(&result_in_receiver);
3423 __ bind(&global_object); 3479 __ bind(&global_object);
3424 __ lw(result, FieldMemOperand(function, JSFunction::kContextOffset)); 3480 __ ld(result, FieldMemOperand(function, JSFunction::kContextOffset));
3425 __ lw(result, 3481 __ ld(result,
3426 ContextOperand(result, Context::GLOBAL_OBJECT_INDEX)); 3482 ContextOperand(result, Context::GLOBAL_OBJECT_INDEX));
3427 __ lw(result, 3483 __ ld(result,
3428 FieldMemOperand(result, GlobalObject::kGlobalProxyOffset)); 3484 FieldMemOperand(result, GlobalObject::kGlobalProxyOffset));
3429 3485
3430 if (result.is(receiver)) { 3486 if (result.is(receiver)) {
3431 __ bind(&result_in_receiver); 3487 __ bind(&result_in_receiver);
3432 } else { 3488 } else {
3433 Label result_ok; 3489 Label result_ok;
3434 __ Branch(&result_ok); 3490 __ Branch(&result_ok);
3435 __ bind(&result_in_receiver); 3491 __ bind(&result_in_receiver);
3436 __ mov(result, receiver); 3492 __ mov(result, receiver);
3437 __ bind(&result_ok); 3493 __ bind(&result_ok);
(...skipping 14 matching lines...) Expand all
3452 // Copy the arguments to this function possibly from the 3508 // Copy the arguments to this function possibly from the
3453 // adaptor frame below it. 3509 // adaptor frame below it.
3454 const uint32_t kArgumentsLimit = 1 * KB; 3510 const uint32_t kArgumentsLimit = 1 * KB;
3455 DeoptimizeIf(hi, instr->environment(), length, Operand(kArgumentsLimit)); 3511 DeoptimizeIf(hi, instr->environment(), length, Operand(kArgumentsLimit));
3456 3512
3457 // Push the receiver and use the register to keep the original 3513 // Push the receiver and use the register to keep the original
3458 // number of arguments. 3514 // number of arguments.
3459 __ push(receiver); 3515 __ push(receiver);
3460 __ Move(receiver, length); 3516 __ Move(receiver, length);
3461 // The arguments are at a one pointer size offset from elements. 3517 // The arguments are at a one pointer size offset from elements.
3462 __ Addu(elements, elements, Operand(1 * kPointerSize)); 3518 __ Daddu(elements, elements, Operand(1 * kPointerSize));
3463 3519
3464 // Loop through the arguments pushing them onto the execution 3520 // Loop through the arguments pushing them onto the execution
3465 // stack. 3521 // stack.
3466 Label invoke, loop; 3522 Label invoke, loop;
3467 // length is a small non-negative integer, due to the test above. 3523 // length is a small non-negative integer, due to the test above.
3468 __ Branch(USE_DELAY_SLOT, &invoke, eq, length, Operand(zero_reg)); 3524 __ Branch(USE_DELAY_SLOT, &invoke, eq, length, Operand(zero_reg));
3469 __ sll(scratch, length, 2); 3525 __ dsll(scratch, length, kPointerSizeLog2);
3470 __ bind(&loop); 3526 __ bind(&loop);
3471 __ Addu(scratch, elements, scratch); 3527 __ Daddu(scratch, elements, scratch);
3472 __ lw(scratch, MemOperand(scratch)); 3528 __ ld(scratch, MemOperand(scratch));
3473 __ push(scratch); 3529 __ push(scratch);
3474 __ Subu(length, length, Operand(1)); 3530 __ Dsubu(length, length, Operand(1));
3475 __ Branch(USE_DELAY_SLOT, &loop, ne, length, Operand(zero_reg)); 3531 __ Branch(USE_DELAY_SLOT, &loop, ne, length, Operand(zero_reg));
3476 __ sll(scratch, length, 2); 3532 __ dsll(scratch, length, kPointerSizeLog2);
3477 3533
3478 __ bind(&invoke); 3534 __ bind(&invoke);
3479 ASSERT(instr->HasPointerMap()); 3535 ASSERT(instr->HasPointerMap());
3480 LPointerMap* pointers = instr->pointer_map(); 3536 LPointerMap* pointers = instr->pointer_map();
3481 SafepointGenerator safepoint_generator( 3537 SafepointGenerator safepoint_generator(
3482 this, pointers, Safepoint::kLazyDeopt); 3538 this, pointers, Safepoint::kLazyDeopt);
3483 // The number of arguments is stored in receiver which is a0, as expected 3539 // The number of arguments is stored in receiver which is a0, as expected
3484 // by InvokeFunction. 3540 // by InvokeFunction.
3485 ParameterCount actual(receiver); 3541 ParameterCount actual(receiver);
3486 __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator); 3542 __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
(...skipping 11 matching lines...) Expand all
3498 } 3554 }
3499 3555
3500 3556
3501 void LCodeGen::DoDrop(LDrop* instr) { 3557 void LCodeGen::DoDrop(LDrop* instr) {
3502 __ Drop(instr->count()); 3558 __ Drop(instr->count());
3503 } 3559 }
3504 3560
3505 3561
3506 void LCodeGen::DoThisFunction(LThisFunction* instr) { 3562 void LCodeGen::DoThisFunction(LThisFunction* instr) {
3507 Register result = ToRegister(instr->result()); 3563 Register result = ToRegister(instr->result());
3508 __ lw(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); 3564 __ ld(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
3509 } 3565 }
3510 3566
3511 3567
3512 void LCodeGen::DoContext(LContext* instr) { 3568 void LCodeGen::DoContext(LContext* instr) {
3513 // If there is a non-return use, the context must be moved to a register. 3569 // If there is a non-return use, the context must be moved to a register.
3514 Register result = ToRegister(instr->result()); 3570 Register result = ToRegister(instr->result());
3515 if (info()->IsOptimizing()) { 3571 if (info()->IsOptimizing()) {
3516 __ lw(result, MemOperand(fp, StandardFrameConstants::kContextOffset)); 3572 __ ld(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
3517 } else { 3573 } else {
3518 // If there is no frame, the context must be in cp. 3574 // If there is no frame, the context must be in cp.
3519 ASSERT(result.is(cp)); 3575 ASSERT(result.is(cp));
3520 } 3576 }
3521 } 3577 }
3522 3578
3523 3579
3524 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) { 3580 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
3525 ASSERT(ToRegister(instr->context()).is(cp)); 3581 ASSERT(ToRegister(instr->context()).is(cp));
3526 __ li(scratch0(), instr->hydrogen()->pairs()); 3582 __ li(scratch0(), instr->hydrogen()->pairs());
(...skipping 15 matching lines...) Expand all
3542 dont_adapt_arguments || formal_parameter_count == arity; 3598 dont_adapt_arguments || formal_parameter_count == arity;
3543 3599
3544 LPointerMap* pointers = instr->pointer_map(); 3600 LPointerMap* pointers = instr->pointer_map();
3545 3601
3546 if (can_invoke_directly) { 3602 if (can_invoke_directly) {
3547 if (a1_state == A1_UNINITIALIZED) { 3603 if (a1_state == A1_UNINITIALIZED) {
3548 __ li(a1, function); 3604 __ li(a1, function);
3549 } 3605 }
3550 3606
3551 // Change context. 3607 // Change context.
3552 __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); 3608 __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
3553 3609
3554 // Set r0 to arguments count if adaption is not needed. Assumes that r0 3610 // Set r0 to arguments count if adaption is not needed. Assumes that r0
3555 // is available to write to at this point. 3611 // is available to write to at this point.
3556 if (dont_adapt_arguments) { 3612 if (dont_adapt_arguments) {
3557 __ li(a0, Operand(arity)); 3613 __ li(a0, Operand(arity));
3558 } 3614 }
3559 3615
3560 // Invoke function. 3616 // Invoke function.
3561 __ lw(at, FieldMemOperand(a1, JSFunction::kCodeEntryOffset)); 3617 __ ld(at, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
3562 __ Call(at); 3618 __ Call(at);
3563 3619
3564 // Set up deoptimization. 3620 // Set up deoptimization.
3565 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); 3621 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3566 } else { 3622 } else {
3567 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); 3623 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
3568 ParameterCount count(arity); 3624 ParameterCount count(arity);
3569 ParameterCount expected(formal_parameter_count); 3625 ParameterCount expected(formal_parameter_count);
3570 __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator); 3626 __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator);
3571 } 3627 }
3572 } 3628 }
3573 3629
3574 3630
3575 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) { 3631 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3576 ASSERT(instr->context() != NULL); 3632 ASSERT(instr->context() != NULL);
3577 ASSERT(ToRegister(instr->context()).is(cp)); 3633 ASSERT(ToRegister(instr->context()).is(cp));
3578 Register input = ToRegister(instr->value()); 3634 Register input = ToRegister(instr->value());
3579 Register result = ToRegister(instr->result()); 3635 Register result = ToRegister(instr->result());
3580 Register scratch = scratch0(); 3636 Register scratch = scratch0();
3581 3637
3582 // Deoptimize if not a heap number. 3638 // Deoptimize if not a heap number.
3583 __ lw(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); 3639 __ ld(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
3584 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); 3640 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
3585 DeoptimizeIf(ne, instr->environment(), scratch, Operand(at)); 3641 DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
3586 3642
3587 Label done; 3643 Label done;
3588 Register exponent = scratch0(); 3644 Register exponent = scratch0();
3589 scratch = no_reg; 3645 scratch = no_reg;
3590 __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset)); 3646 __ lwu(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3591 // Check the sign of the argument. If the argument is positive, just 3647 // Check the sign of the argument. If the argument is positive, just
3592 // return it. 3648 // return it.
3593 __ Move(result, input); 3649 __ Move(result, input);
3594 __ And(at, exponent, Operand(HeapNumber::kSignMask)); 3650 __ And(at, exponent, Operand(HeapNumber::kSignMask));
3595 __ Branch(&done, eq, at, Operand(zero_reg)); 3651 __ Branch(&done, eq, at, Operand(zero_reg));
3596 3652
3597 // Input is negative. Reverse its sign. 3653 // Input is negative. Reverse its sign.
3598 // Preserve the value of all registers. 3654 // Preserve the value of all registers.
3599 { 3655 {
3600 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); 3656 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
3601 3657
3602 // Registers were saved at the safepoint, so we can use 3658 // Registers were saved at the safepoint, so we can use
3603 // many scratch registers. 3659 // many scratch registers.
3604 Register tmp1 = input.is(a1) ? a0 : a1; 3660 Register tmp1 = input.is(a1) ? a0 : a1;
3605 Register tmp2 = input.is(a2) ? a0 : a2; 3661 Register tmp2 = input.is(a2) ? a0 : a2;
3606 Register tmp3 = input.is(a3) ? a0 : a3; 3662 Register tmp3 = input.is(a3) ? a0 : a3;
3607 Register tmp4 = input.is(t0) ? a0 : t0; 3663 Register tmp4 = input.is(a4) ? a0 : a4;
3608 3664
3609 // exponent: floating point exponent value. 3665 // exponent: floating point exponent value.
3610 3666
3611 Label allocated, slow; 3667 Label allocated, slow;
3612 __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex); 3668 __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
3613 __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow); 3669 __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
3614 __ Branch(&allocated); 3670 __ Branch(&allocated);
3615 3671
3616 // Slow case: Call the runtime system to do the number allocation. 3672 // Slow case: Call the runtime system to do the number allocation.
3617 __ bind(&slow); 3673 __ bind(&slow);
3618 3674
3619 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr, 3675 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
3620 instr->context()); 3676 instr->context());
3621 // Set the pointer to the new heap number in tmp. 3677 // Set the pointer to the new heap number in tmp.
3622 if (!tmp1.is(v0)) 3678 if (!tmp1.is(v0))
3623 __ mov(tmp1, v0); 3679 __ mov(tmp1, v0);
3624 // Restore input_reg after call to runtime. 3680 // Restore input_reg after call to runtime.
3625 __ LoadFromSafepointRegisterSlot(input, input); 3681 __ LoadFromSafepointRegisterSlot(input, input);
3626 __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset)); 3682 __ lwu(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3627 3683
3628 __ bind(&allocated); 3684 __ bind(&allocated);
3629 // exponent: floating point exponent value. 3685 // exponent: floating point exponent value.
3630 // tmp1: allocated heap number. 3686 // tmp1: allocated heap number.
3631 __ And(exponent, exponent, Operand(~HeapNumber::kSignMask)); 3687 __ And(exponent, exponent, Operand(~HeapNumber::kSignMask));
3632 __ sw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset)); 3688 __ sw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
3633 __ lw(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset)); 3689 __ lwu(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
3634 __ sw(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset)); 3690 __ sw(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
3635 3691
3636 __ StoreToSafepointRegisterSlot(tmp1, result); 3692 __ StoreToSafepointRegisterSlot(tmp1, result);
3637 } 3693 }
3638 3694
3639 __ bind(&done); 3695 __ bind(&done);
3640 } 3696 }
3641 3697
3642 3698
3643 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) { 3699 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
3644 Register input = ToRegister(instr->value()); 3700 Register input = ToRegister(instr->value());
3645 Register result = ToRegister(instr->result()); 3701 Register result = ToRegister(instr->result());
3646 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_); 3702 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
3647 Label done; 3703 Label done;
3648 __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg)); 3704 __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg));
3649 __ mov(result, input); 3705 __ mov(result, input);
3650 __ subu(result, zero_reg, input); 3706 __ dsubu(result, zero_reg, input);
3651 // Overflow if result is still negative, i.e. 0x80000000. 3707 // Overflow if result is still negative, i.e. 0x80000000.
3652 DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg)); 3708 DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg));
3653 __ bind(&done); 3709 __ bind(&done);
3654 } 3710 }
3655 3711
3656 3712
3657 void LCodeGen::DoMathAbs(LMathAbs* instr) { 3713 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3658 // Class for deferred case. 3714 // Class for deferred case.
3659 class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode { 3715 class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode {
3660 public: 3716 public:
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
3702 double_scratch0(), 3758 double_scratch0(),
3703 except_flag); 3759 except_flag);
3704 3760
3705 // Deopt if the operation did not succeed. 3761 // Deopt if the operation did not succeed.
3706 DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg)); 3762 DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
3707 3763
3708 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 3764 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3709 // Test for -0. 3765 // Test for -0.
3710 Label done; 3766 Label done;
3711 __ Branch(&done, ne, result, Operand(zero_reg)); 3767 __ Branch(&done, ne, result, Operand(zero_reg));
3712 __ mfc1(scratch1, input.high()); 3768 __ mfhc1(scratch1, input); // Get exponent/sign bits.
3713 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); 3769 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
3714 DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg)); 3770 DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
3715 __ bind(&done); 3771 __ bind(&done);
3716 } 3772 }
3717 } 3773 }
3718 3774
3719 3775
3720 void LCodeGen::DoMathRound(LMathRound* instr) { 3776 void LCodeGen::DoMathRound(LMathRound* instr) {
3721 DoubleRegister input = ToDoubleRegister(instr->value()); 3777 DoubleRegister input = ToDoubleRegister(instr->value());
3722 Register result = ToRegister(instr->result()); 3778 Register result = ToRegister(instr->result());
3723 DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp()); 3779 DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp());
3724 Register scratch = scratch0(); 3780 Register scratch = scratch0();
3725 Label done, check_sign_on_zero; 3781 Label done, check_sign_on_zero;
3726 3782
3727 // Extract exponent bits. 3783 // Extract exponent bits.
3728 __ mfc1(result, input.high()); 3784 __ mfhc1(result, input);
3729 __ Ext(scratch, 3785 __ Ext(scratch,
3730 result, 3786 result,
3731 HeapNumber::kExponentShift, 3787 HeapNumber::kExponentShift,
3732 HeapNumber::kExponentBits); 3788 HeapNumber::kExponentBits);
3733 3789
3734 // If the number is in ]-0.5, +0.5[, the result is +/- 0. 3790 // If the number is in ]-0.5, +0.5[, the result is +/- 0.
3735 Label skip1; 3791 Label skip1;
3736 __ Branch(&skip1, gt, scratch, Operand(HeapNumber::kExponentBias - 2)); 3792 __ Branch(&skip1, gt, scratch, Operand(HeapNumber::kExponentBias - 2));
3737 __ mov(result, zero_reg); 3793 __ mov(result, zero_reg);
3738 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 3794 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3739 __ Branch(&check_sign_on_zero); 3795 __ Branch(&check_sign_on_zero);
3740 } else { 3796 } else {
3741 __ Branch(&done); 3797 __ Branch(&done);
3742 } 3798 }
3743 __ bind(&skip1); 3799 __ bind(&skip1);
3744 3800
3745 // The following conversion will not work with numbers 3801 // The following conversion will not work with numbers
3746 // outside of ]-2^32, 2^32[. 3802 // outside of ]-2^32, 2^32[.
3747 DeoptimizeIf(ge, instr->environment(), scratch, 3803 DeoptimizeIf(ge, instr->environment(), scratch,
3748 Operand(HeapNumber::kExponentBias + 32)); 3804 Operand(HeapNumber::kExponentBias + 32));
3749 3805
3750 // Save the original sign for later comparison. 3806 // Save the original sign for later comparison.
3751 __ And(scratch, result, Operand(HeapNumber::kSignMask)); 3807 __ And(scratch, result, Operand(HeapNumber::kSignMask));
3752 3808
3753 __ Move(double_scratch0(), 0.5); 3809 __ Move(double_scratch0(), 0.5);
3754 __ add_d(double_scratch0(), input, double_scratch0()); 3810 __ add_d(double_scratch0(), input, double_scratch0());
3755 3811
3756 // Check sign of the result: if the sign changed, the input 3812 // Check sign of the result: if the sign changed, the input
3757 // value was in ]0.5, 0[ and the result should be -0. 3813 // value was in ]0.5, 0[ and the result should be -0.
3758 __ mfc1(result, double_scratch0().high()); 3814 __ mfhc1(result, double_scratch0());
3815 // mfhc1 sign-extends, clear the upper bits.
3816 __ dsll32(result, result, 0);
3817 __ dsrl32(result, result, 0);
3759 __ Xor(result, result, Operand(scratch)); 3818 __ Xor(result, result, Operand(scratch));
3760 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 3819 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3761 // ARM uses 'mi' here, which is 'lt' 3820 // ARM uses 'mi' here, which is 'lt'
3762 DeoptimizeIf(lt, instr->environment(), result, 3821 DeoptimizeIf(lt, instr->environment(), result,
3763 Operand(zero_reg)); 3822 Operand(zero_reg));
3764 } else { 3823 } else {
3765 Label skip2; 3824 Label skip2;
3766 // ARM uses 'mi' here, which is 'lt' 3825 // ARM uses 'mi' here, which is 'lt'
3767 // Negating it results in 'ge' 3826 // Negating it results in 'ge'
3768 __ Branch(&skip2, ge, result, Operand(zero_reg)); 3827 __ Branch(&skip2, ge, result, Operand(zero_reg));
3769 __ mov(result, zero_reg); 3828 __ mov(result, zero_reg);
3770 __ Branch(&done); 3829 __ Branch(&done);
3771 __ bind(&skip2); 3830 __ bind(&skip2);
3772 } 3831 }
3773 3832
3774 Register except_flag = scratch; 3833 Register except_flag = scratch;
3775 __ EmitFPUTruncate(kRoundToMinusInf, 3834 __ EmitFPUTruncate(kRoundToMinusInf,
3776 result, 3835 result,
3777 double_scratch0(), 3836 double_scratch0(),
3778 at, 3837 at,
3779 double_scratch1, 3838 double_scratch1,
3780 except_flag); 3839 except_flag);
3781 3840
3782 DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg)); 3841 DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
3783 3842
3784 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 3843 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3785 // Test for -0. 3844 // Test for -0.
3786 __ Branch(&done, ne, result, Operand(zero_reg)); 3845 __ Branch(&done, ne, result, Operand(zero_reg));
3787 __ bind(&check_sign_on_zero); 3846 __ bind(&check_sign_on_zero);
3788 __ mfc1(scratch, input.high()); 3847 __ mfhc1(scratch, input); // Get exponent/sign bits.
3789 __ And(scratch, scratch, Operand(HeapNumber::kSignMask)); 3848 __ And(scratch, scratch, Operand(HeapNumber::kSignMask));
3790 DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg)); 3849 DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
3791 } 3850 }
3792 __ bind(&done); 3851 __ bind(&done);
3793 } 3852 }
3794 3853
3795 3854
3796 void LCodeGen::DoMathSqrt(LMathSqrt* instr) { 3855 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
3797 DoubleRegister input = ToDoubleRegister(instr->value()); 3856 DoubleRegister input = ToDoubleRegister(instr->value());
3798 DoubleRegister result = ToDoubleRegister(instr->result()); 3857 DoubleRegister result = ToDoubleRegister(instr->result());
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
3834 ToRegister(instr->right()).is(a2)); 3893 ToRegister(instr->right()).is(a2));
3835 ASSERT(ToDoubleRegister(instr->left()).is(f2)); 3894 ASSERT(ToDoubleRegister(instr->left()).is(f2));
3836 ASSERT(ToDoubleRegister(instr->result()).is(f0)); 3895 ASSERT(ToDoubleRegister(instr->result()).is(f0));
3837 3896
3838 if (exponent_type.IsSmi()) { 3897 if (exponent_type.IsSmi()) {
3839 MathPowStub stub(isolate(), MathPowStub::TAGGED); 3898 MathPowStub stub(isolate(), MathPowStub::TAGGED);
3840 __ CallStub(&stub); 3899 __ CallStub(&stub);
3841 } else if (exponent_type.IsTagged()) { 3900 } else if (exponent_type.IsTagged()) {
3842 Label no_deopt; 3901 Label no_deopt;
3843 __ JumpIfSmi(a2, &no_deopt); 3902 __ JumpIfSmi(a2, &no_deopt);
3844 __ lw(t3, FieldMemOperand(a2, HeapObject::kMapOffset)); 3903 __ ld(a7, FieldMemOperand(a2, HeapObject::kMapOffset));
3845 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); 3904 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
3846 DeoptimizeIf(ne, instr->environment(), t3, Operand(at)); 3905 DeoptimizeIf(ne, instr->environment(), a7, Operand(at));
3847 __ bind(&no_deopt); 3906 __ bind(&no_deopt);
3848 MathPowStub stub(isolate(), MathPowStub::TAGGED); 3907 MathPowStub stub(isolate(), MathPowStub::TAGGED);
3849 __ CallStub(&stub); 3908 __ CallStub(&stub);
3850 } else if (exponent_type.IsInteger32()) { 3909 } else if (exponent_type.IsInteger32()) {
3851 MathPowStub stub(isolate(), MathPowStub::INTEGER); 3910 MathPowStub stub(isolate(), MathPowStub::INTEGER);
3852 __ CallStub(&stub); 3911 __ CallStub(&stub);
3853 } else { 3912 } else {
3854 ASSERT(exponent_type.IsDouble()); 3913 ASSERT(exponent_type.IsDouble());
3855 MathPowStub stub(isolate(), MathPowStub::DOUBLE); 3914 MathPowStub stub(isolate(), MathPowStub::DOUBLE);
3856 __ CallStub(&stub); 3915 __ CallStub(&stub);
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after
3917 3976
3918 if (instr->target()->IsConstantOperand()) { 3977 if (instr->target()->IsConstantOperand()) {
3919 LConstantOperand* target = LConstantOperand::cast(instr->target()); 3978 LConstantOperand* target = LConstantOperand::cast(instr->target());
3920 Handle<Code> code = Handle<Code>::cast(ToHandle(target)); 3979 Handle<Code> code = Handle<Code>::cast(ToHandle(target));
3921 generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET)); 3980 generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
3922 __ Call(code, RelocInfo::CODE_TARGET); 3981 __ Call(code, RelocInfo::CODE_TARGET);
3923 } else { 3982 } else {
3924 ASSERT(instr->target()->IsRegister()); 3983 ASSERT(instr->target()->IsRegister());
3925 Register target = ToRegister(instr->target()); 3984 Register target = ToRegister(instr->target());
3926 generator.BeforeCall(__ CallSize(target)); 3985 generator.BeforeCall(__ CallSize(target));
3927 __ Addu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag)); 3986 __ Daddu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
3928 __ Call(target); 3987 __ Call(target);
3929 } 3988 }
3930 generator.AfterCall(); 3989 generator.AfterCall();
3931 } 3990 }
3932 3991
3933 3992
3934 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) { 3993 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
3935 ASSERT(ToRegister(instr->function()).is(a1)); 3994 ASSERT(ToRegister(instr->function()).is(a1));
3936 ASSERT(ToRegister(instr->result()).is(v0)); 3995 ASSERT(ToRegister(instr->result()).is(v0));
3937 3996
3938 if (instr->hydrogen()->pass_argument_count()) { 3997 if (instr->hydrogen()->pass_argument_count()) {
3939 __ li(a0, Operand(instr->arity())); 3998 __ li(a0, Operand(instr->arity()));
3940 } 3999 }
3941 4000
3942 // Change context. 4001 // Change context.
3943 __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); 4002 __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
3944 4003
3945 // Load the code entry address 4004 // Load the code entry address
3946 __ lw(at, FieldMemOperand(a1, JSFunction::kCodeEntryOffset)); 4005 __ ld(at, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
3947 __ Call(at); 4006 __ Call(at);
3948 4007
3949 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); 4008 RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
3950 } 4009 }
3951 4010
3952 4011
3953 void LCodeGen::DoCallFunction(LCallFunction* instr) { 4012 void LCodeGen::DoCallFunction(LCallFunction* instr) {
3954 ASSERT(ToRegister(instr->context()).is(cp)); 4013 ASSERT(ToRegister(instr->context()).is(cp));
3955 ASSERT(ToRegister(instr->function()).is(a1)); 4014 ASSERT(ToRegister(instr->function()).is(a1));
3956 ASSERT(ToRegister(instr->result()).is(v0)); 4015 ASSERT(ToRegister(instr->result()).is(v0));
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
3989 4048
3990 if (instr->arity() == 0) { 4049 if (instr->arity() == 0) {
3991 ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode); 4050 ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
3992 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); 4051 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
3993 } else if (instr->arity() == 1) { 4052 } else if (instr->arity() == 1) {
3994 Label done; 4053 Label done;
3995 if (IsFastPackedElementsKind(kind)) { 4054 if (IsFastPackedElementsKind(kind)) {
3996 Label packed_case; 4055 Label packed_case;
3997 // We might need a change here, 4056 // We might need a change here,
3998 // look at the first argument. 4057 // look at the first argument.
3999 __ lw(t1, MemOperand(sp, 0)); 4058 __ ld(a5, MemOperand(sp, 0));
4000 __ Branch(&packed_case, eq, t1, Operand(zero_reg)); 4059 __ Branch(&packed_case, eq, a5, Operand(zero_reg));
4001 4060
4002 ElementsKind holey_kind = GetHoleyElementsKind(kind); 4061 ElementsKind holey_kind = GetHoleyElementsKind(kind);
4003 ArraySingleArgumentConstructorStub stub(isolate(), 4062 ArraySingleArgumentConstructorStub stub(isolate(),
4004 holey_kind, 4063 holey_kind,
4005 override_mode); 4064 override_mode);
4006 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); 4065 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4007 __ jmp(&done); 4066 __ jmp(&done);
4008 __ bind(&packed_case); 4067 __ bind(&packed_case);
4009 } 4068 }
4010 4069
4011 ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode); 4070 ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
4012 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); 4071 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4013 __ bind(&done); 4072 __ bind(&done);
4014 } else { 4073 } else {
4015 ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode); 4074 ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
4016 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); 4075 CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
4017 } 4076 }
4018 } 4077 }
4019 4078
4020 4079
4021 void LCodeGen::DoCallRuntime(LCallRuntime* instr) { 4080 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
4022 CallRuntime(instr->function(), instr->arity(), instr); 4081 CallRuntime(instr->function(), instr->arity(), instr);
4023 } 4082 }
4024 4083
4025 4084
4026 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) { 4085 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
4027 Register function = ToRegister(instr->function()); 4086 Register function = ToRegister(instr->function());
4028 Register code_object = ToRegister(instr->code_object()); 4087 Register code_object = ToRegister(instr->code_object());
4029 __ Addu(code_object, code_object, 4088 __ Daddu(code_object, code_object,
4030 Operand(Code::kHeaderSize - kHeapObjectTag)); 4089 Operand(Code::kHeaderSize - kHeapObjectTag));
4031 __ sw(code_object, 4090 __ sd(code_object,
4032 FieldMemOperand(function, JSFunction::kCodeEntryOffset)); 4091 FieldMemOperand(function, JSFunction::kCodeEntryOffset));
4033 } 4092 }
4034 4093
4035 4094
4036 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) { 4095 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
4037 Register result = ToRegister(instr->result()); 4096 Register result = ToRegister(instr->result());
4038 Register base = ToRegister(instr->base_object()); 4097 Register base = ToRegister(instr->base_object());
4039 if (instr->offset()->IsConstantOperand()) { 4098 if (instr->offset()->IsConstantOperand()) {
4040 LConstantOperand* offset = LConstantOperand::cast(instr->offset()); 4099 LConstantOperand* offset = LConstantOperand::cast(instr->offset());
4041 __ Addu(result, base, Operand(ToInteger32(offset))); 4100 __ Daddu(result, base, Operand(ToInteger32(offset)));
4042 } else { 4101 } else {
4043 Register offset = ToRegister(instr->offset()); 4102 Register offset = ToRegister(instr->offset());
4044 __ Addu(result, base, offset); 4103 __ Daddu(result, base, offset);
4045 } 4104 }
4046 } 4105 }
4047 4106
4048 4107
4049 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { 4108 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
4050 Representation representation = instr->representation(); 4109 Representation representation = instr->representation();
4051 4110
4052 Register object = ToRegister(instr->object()); 4111 Register object = ToRegister(instr->object());
4053 Register scratch = scratch0(); 4112 Register scratch2 = scratch1();
4113 Register scratch1 = scratch0();
4054 HObjectAccess access = instr->hydrogen()->access(); 4114 HObjectAccess access = instr->hydrogen()->access();
4055 int offset = access.offset(); 4115 int offset = access.offset();
4056
4057 if (access.IsExternalMemory()) { 4116 if (access.IsExternalMemory()) {
4058 Register value = ToRegister(instr->value()); 4117 Register value = ToRegister(instr->value());
4059 MemOperand operand = MemOperand(object, offset); 4118 MemOperand operand = MemOperand(object, offset);
4060 __ Store(value, operand, representation); 4119 __ Store(value, operand, representation);
4061 return; 4120 return;
4062 } 4121 }
4063 4122
4064 __ AssertNotSmi(object); 4123 __ AssertNotSmi(object);
4065 4124
4066 ASSERT(!representation.IsSmi() || 4125 ASSERT(!representation.IsSmi() ||
4067 !instr->value()->IsConstantOperand() || 4126 !instr->value()->IsConstantOperand() ||
4068 IsSmi(LConstantOperand::cast(instr->value()))); 4127 IsSmi(LConstantOperand::cast(instr->value())));
4069 if (representation.IsDouble()) { 4128 if (representation.IsDouble()) {
4070 ASSERT(access.IsInobject()); 4129 ASSERT(access.IsInobject());
4071 ASSERT(!instr->hydrogen()->has_transition()); 4130 ASSERT(!instr->hydrogen()->has_transition());
4072 ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); 4131 ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
4073 DoubleRegister value = ToDoubleRegister(instr->value()); 4132 DoubleRegister value = ToDoubleRegister(instr->value());
4074 __ sdc1(value, FieldMemOperand(object, offset)); 4133 __ sdc1(value, FieldMemOperand(object, offset));
4075 return; 4134 return;
4076 } 4135 }
4077 4136
4078 if (instr->hydrogen()->has_transition()) { 4137 if (instr->hydrogen()->has_transition()) {
4079 Handle<Map> transition = instr->hydrogen()->transition_map(); 4138 Handle<Map> transition = instr->hydrogen()->transition_map();
4080 AddDeprecationDependency(transition); 4139 AddDeprecationDependency(transition);
4081 __ li(scratch, Operand(transition)); 4140 __ li(scratch1, Operand(transition));
4082 __ sw(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); 4141 __ sd(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
4083 if (instr->hydrogen()->NeedsWriteBarrierForMap()) { 4142 if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
4084 Register temp = ToRegister(instr->temp()); 4143 Register temp = ToRegister(instr->temp());
4085 // Update the write barrier for the map field. 4144 // Update the write barrier for the map field.
4086 __ RecordWriteForMap(object, 4145 __ RecordWriteForMap(object,
4087 scratch, 4146 scratch1,
4088 temp, 4147 temp,
4089 GetRAState(), 4148 GetRAState(),
4090 kSaveFPRegs); 4149 kSaveFPRegs);
4091 } 4150 }
4092 } 4151 }
4093 4152
4094 // Do the store. 4153 // Do the store.
4154 Register destination = object;
4155 if (!access.IsInobject()) {
4156 destination = scratch1;
4157 __ ld(destination, FieldMemOperand(object, JSObject::kPropertiesOffset));
4158 }
4095 Register value = ToRegister(instr->value()); 4159 Register value = ToRegister(instr->value());
4096 if (access.IsInobject()) { 4160 if (representation.IsSmi() && SmiValuesAre32Bits() &&
4097 MemOperand operand = FieldMemOperand(object, offset); 4161 instr->hydrogen()->value()->representation().IsInteger32()) {
4098 __ Store(value, operand, representation); 4162 ASSERT(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
4099 if (instr->hydrogen()->NeedsWriteBarrier()) { 4163 if (FLAG_debug_code) {
4100 // Update the write barrier for the object for in-object properties. 4164 __ Load(scratch2, FieldMemOperand(destination, offset), representation);
4101 __ RecordWriteField(object, 4165 __ AssertSmi(scratch2);
4102 offset,
4103 value,
4104 scratch,
4105 GetRAState(),
4106 kSaveFPRegs,
4107 EMIT_REMEMBERED_SET,
4108 instr->hydrogen()->SmiCheckForWriteBarrier(),
4109 instr->hydrogen()->PointersToHereCheckForValue());
4110 } 4166 }
4111 } else { 4167
4112 __ lw(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset)); 4168 // Store int value directly to upper half of the smi.
4113 MemOperand operand = FieldMemOperand(scratch, offset); 4169 offset += kPointerSize / 2;
4114 __ Store(value, operand, representation); 4170 representation = Representation::Integer32();
4115 if (instr->hydrogen()->NeedsWriteBarrier()) { 4171 }
4116 // Update the write barrier for the properties array. 4172
4117 // object is used as a scratch register. 4173 MemOperand operand = FieldMemOperand(destination, offset);
4118 __ RecordWriteField(scratch, 4174 __ Store(value, operand, representation);
4119 offset, 4175 if (instr->hydrogen()->NeedsWriteBarrier()) {
4120 value, 4176 // Update the write barrier for the object for in-object properties.
4121 object, 4177 __ RecordWriteField(destination,
4122 GetRAState(), 4178 offset,
4123 kSaveFPRegs, 4179 value,
4124 EMIT_REMEMBERED_SET, 4180 scratch2,
4125 instr->hydrogen()->SmiCheckForWriteBarrier(), 4181 GetRAState(),
4126 instr->hydrogen()->PointersToHereCheckForValue()); 4182 kSaveFPRegs,
4127 } 4183 EMIT_REMEMBERED_SET,
4184 instr->hydrogen()->SmiCheckForWriteBarrier(),
4185 instr->hydrogen()->PointersToHereCheckForValue());
4128 } 4186 }
4129 } 4187 }
4130 4188
4131 4189
4132 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) { 4190 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
4133 ASSERT(ToRegister(instr->context()).is(cp)); 4191 ASSERT(ToRegister(instr->context()).is(cp));
4134 ASSERT(ToRegister(instr->object()).is(a1)); 4192 ASSERT(ToRegister(instr->object()).is(a1));
4135 ASSERT(ToRegister(instr->value()).is(a0)); 4193 ASSERT(ToRegister(instr->value()).is(a0));
4136 4194
4137 // Name is always in a2. 4195 // Name is always in a2.
4138 __ li(a2, Operand(instr->name())); 4196 __ li(a2, Operand(instr->name()));
4139 Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode()); 4197 Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
4140 CallCode(ic, RelocInfo::CODE_TARGET, instr); 4198 CallCode(ic, RelocInfo::CODE_TARGET, instr);
4141 } 4199 }
4142 4200
4143 4201
4144 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { 4202 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
4145 Condition cc = instr->hydrogen()->allow_equality() ? hi : hs; 4203 Condition cc = instr->hydrogen()->allow_equality() ? hi : hs;
4146 Operand operand(0); 4204 Operand operand((int64_t)0);
4147 Register reg; 4205 Register reg;
4148 if (instr->index()->IsConstantOperand()) { 4206 if (instr->index()->IsConstantOperand()) {
4149 operand = ToOperand(instr->index()); 4207 operand = ToOperand(instr->index());
4150 reg = ToRegister(instr->length()); 4208 reg = ToRegister(instr->length());
4151 cc = CommuteCondition(cc); 4209 cc = CommuteCondition(cc);
4152 } else { 4210 } else {
4153 reg = ToRegister(instr->index()); 4211 reg = ToRegister(instr->index());
4154 operand = ToOperand(instr->length()); 4212 operand = ToOperand(instr->length());
4155 } 4213 }
4156 if (FLAG_debug_code && instr->hydrogen()->skip_check()) { 4214 if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
(...skipping 16 matching lines...) Expand all
4173 if (key_is_constant) { 4231 if (key_is_constant) {
4174 constant_key = ToInteger32(LConstantOperand::cast(instr->key())); 4232 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4175 if (constant_key & 0xF0000000) { 4233 if (constant_key & 0xF0000000) {
4176 Abort(kArrayIndexConstantValueTooBig); 4234 Abort(kArrayIndexConstantValueTooBig);
4177 } 4235 }
4178 } else { 4236 } else {
4179 key = ToRegister(instr->key()); 4237 key = ToRegister(instr->key());
4180 } 4238 }
4181 int element_size_shift = ElementsKindToShiftSize(elements_kind); 4239 int element_size_shift = ElementsKindToShiftSize(elements_kind);
4182 int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) 4240 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
4183 ? (element_size_shift - kSmiTagSize) : element_size_shift; 4241 ? (element_size_shift - (kSmiTagSize + kSmiShiftSize))
4242 : element_size_shift;
4184 int base_offset = instr->base_offset(); 4243 int base_offset = instr->base_offset();
4185 4244
4186 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS || 4245 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
4187 elements_kind == FLOAT32_ELEMENTS || 4246 elements_kind == FLOAT32_ELEMENTS ||
4188 elements_kind == EXTERNAL_FLOAT64_ELEMENTS || 4247 elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
4189 elements_kind == FLOAT64_ELEMENTS) { 4248 elements_kind == FLOAT64_ELEMENTS) {
4190 Register address = scratch0(); 4249 Register address = scratch0();
4191 FPURegister value(ToDoubleRegister(instr->value())); 4250 FPURegister value(ToDoubleRegister(instr->value()));
4192 if (key_is_constant) { 4251 if (key_is_constant) {
4193 if (constant_key != 0) { 4252 if (constant_key != 0) {
4194 __ Addu(address, external_pointer, 4253 __ Daddu(address, external_pointer,
4195 Operand(constant_key << element_size_shift)); 4254 Operand(constant_key << element_size_shift));
4196 } else { 4255 } else {
4197 address = external_pointer; 4256 address = external_pointer;
4198 } 4257 }
4199 } else { 4258 } else {
4200 __ sll(address, key, shift_size); 4259 if (shift_size < 0) {
4201 __ Addu(address, external_pointer, address); 4260 if (shift_size == -32) {
4261 __ dsra32(address, key, 0);
4262 } else {
4263 __ dsra(address, key, -shift_size);
4264 }
4265 } else {
4266 __ dsll(address, key, shift_size);
4267 }
4268 __ Daddu(address, external_pointer, address);
4202 } 4269 }
4203 4270
4204 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS || 4271 if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
4205 elements_kind == FLOAT32_ELEMENTS) { 4272 elements_kind == FLOAT32_ELEMENTS) {
4206 __ cvt_s_d(double_scratch0(), value); 4273 __ cvt_s_d(double_scratch0(), value);
4207 __ swc1(double_scratch0(), MemOperand(address, base_offset)); 4274 __ swc1(double_scratch0(), MemOperand(address, base_offset));
4208 } else { // Storing doubles, not floats. 4275 } else { // Storing doubles, not floats.
4209 __ sdc1(value, MemOperand(address, base_offset)); 4276 __ sdc1(value, MemOperand(address, base_offset));
4210 } 4277 }
4211 } else { 4278 } else {
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after
4264 Label not_nan, done; 4331 Label not_nan, done;
4265 4332
4266 // Calculate the effective address of the slot in the array to store the 4333 // Calculate the effective address of the slot in the array to store the
4267 // double value. 4334 // double value.
4268 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS); 4335 int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
4269 if (key_is_constant) { 4336 if (key_is_constant) {
4270 int constant_key = ToInteger32(LConstantOperand::cast(instr->key())); 4337 int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4271 if (constant_key & 0xF0000000) { 4338 if (constant_key & 0xF0000000) {
4272 Abort(kArrayIndexConstantValueTooBig); 4339 Abort(kArrayIndexConstantValueTooBig);
4273 } 4340 }
4274 __ Addu(scratch, elements, 4341 __ Daddu(scratch, elements,
4275 Operand((constant_key << element_size_shift) + base_offset)); 4342 Operand((constant_key << element_size_shift) + base_offset));
4276 } else { 4343 } else {
4277 int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) 4344 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
4278 ? (element_size_shift - kSmiTagSize) : element_size_shift; 4345 ? (element_size_shift - (kSmiTagSize + kSmiShiftSize))
4279 __ Addu(scratch, elements, Operand(base_offset)); 4346 : element_size_shift;
4280 __ sll(at, ToRegister(instr->key()), shift_size); 4347 __ Daddu(scratch, elements, Operand(base_offset));
4281 __ Addu(scratch, scratch, at); 4348 ASSERT((shift_size == 3) || (shift_size == -29));
4349 if (shift_size == 3) {
4350 __ dsll(at, ToRegister(instr->key()), 3);
4351 } else if (shift_size == -29) {
4352 __ dsra(at, ToRegister(instr->key()), 29);
4353 }
4354 __ Daddu(scratch, scratch, at);
4282 } 4355 }
4283 4356
4284 if (instr->NeedsCanonicalization()) { 4357 if (instr->NeedsCanonicalization()) {
4285 Label is_nan; 4358 Label is_nan;
4286 // Check for NaN. All NaNs must be canonicalized. 4359 // Check for NaN. All NaNs must be canonicalized.
4287 __ BranchF(NULL, &is_nan, eq, value, value); 4360 __ BranchF(NULL, &is_nan, eq, value, value);
4288 __ Branch(&not_nan); 4361 __ Branch(&not_nan);
4289 4362
4290 // Only load canonical NaN if the comparison above set the overflow. 4363 // Only load canonical NaN if the comparison above set the overflow.
4291 __ bind(&is_nan); 4364 __ bind(&is_nan);
(...skipping 23 matching lines...) Expand all
4315 ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); 4388 ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
4316 LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); 4389 LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
4317 offset += ToInteger32(const_operand) * kPointerSize; 4390 offset += ToInteger32(const_operand) * kPointerSize;
4318 store_base = elements; 4391 store_base = elements;
4319 } else { 4392 } else {
4320 // Even though the HLoadKeyed instruction forces the input 4393 // Even though the HLoadKeyed instruction forces the input
4321 // representation for the key to be an integer, the input gets replaced 4394 // representation for the key to be an integer, the input gets replaced
4322 // during bound check elimination with the index argument to the bounds 4395 // during bound check elimination with the index argument to the bounds
4323 // check, which can be tagged, so that case must be handled here, too. 4396 // check, which can be tagged, so that case must be handled here, too.
4324 if (instr->hydrogen()->key()->representation().IsSmi()) { 4397 if (instr->hydrogen()->key()->representation().IsSmi()) {
4325 __ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize); 4398 __ SmiScale(scratch, key, kPointerSizeLog2);
4326 __ addu(scratch, elements, scratch); 4399 __ daddu(store_base, elements, scratch);
4327 } else { 4400 } else {
4328 __ sll(scratch, key, kPointerSizeLog2); 4401 __ dsll(scratch, key, kPointerSizeLog2);
4329 __ addu(scratch, elements, scratch); 4402 __ daddu(store_base, elements, scratch);
4330 } 4403 }
4331 } 4404 }
4332 __ sw(value, MemOperand(store_base, offset)); 4405
4406 Representation representation = instr->hydrogen()->value()->representation();
4407 if (representation.IsInteger32() && SmiValuesAre32Bits()) {
4408 ASSERT(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
4409 ASSERT(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS);
4410 if (FLAG_debug_code) {
4411 Register temp = scratch1();
4412 __ Load(temp, MemOperand(store_base, offset), Representation::Smi());
4413 __ AssertSmi(temp);
4414 }
4415
4416 // Store int value directly to upper half of the smi.
4417 STATIC_ASSERT(kSmiTag == 0);
4418 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
4419 offset += kPointerSize / 2;
4420 representation = Representation::Integer32();
4421 }
4422
4423 __ Store(value, MemOperand(store_base, offset), representation);
4333 4424
4334 if (instr->hydrogen()->NeedsWriteBarrier()) { 4425 if (instr->hydrogen()->NeedsWriteBarrier()) {
4335 SmiCheck check_needed = 4426 SmiCheck check_needed =
4336 instr->hydrogen()->value()->type().IsHeapObject() 4427 instr->hydrogen()->value()->type().IsHeapObject()
4337 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; 4428 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
4338 // Compute address of modified element and store it into key register. 4429 // Compute address of modified element and store it into key register.
4339 __ Addu(key, store_base, Operand(offset)); 4430 __ Daddu(key, store_base, Operand(offset));
4340 __ RecordWrite(elements, 4431 __ RecordWrite(elements,
4341 key, 4432 key,
4342 value, 4433 value,
4343 GetRAState(), 4434 GetRAState(),
4344 kSaveFPRegs, 4435 kSaveFPRegs,
4345 EMIT_REMEMBERED_SET, 4436 EMIT_REMEMBERED_SET,
4346 check_needed, 4437 check_needed,
4347 instr->hydrogen()->PointersToHereCheckForValue()); 4438 instr->hydrogen()->PointersToHereCheckForValue());
4348 } 4439 }
4349 } 4440 }
(...skipping 27 matching lines...) Expand all
4377 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { 4468 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
4378 Register object_reg = ToRegister(instr->object()); 4469 Register object_reg = ToRegister(instr->object());
4379 Register scratch = scratch0(); 4470 Register scratch = scratch0();
4380 4471
4381 Handle<Map> from_map = instr->original_map(); 4472 Handle<Map> from_map = instr->original_map();
4382 Handle<Map> to_map = instr->transitioned_map(); 4473 Handle<Map> to_map = instr->transitioned_map();
4383 ElementsKind from_kind = instr->from_kind(); 4474 ElementsKind from_kind = instr->from_kind();
4384 ElementsKind to_kind = instr->to_kind(); 4475 ElementsKind to_kind = instr->to_kind();
4385 4476
4386 Label not_applicable; 4477 Label not_applicable;
4387 __ lw(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset)); 4478 __ ld(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4388 __ Branch(&not_applicable, ne, scratch, Operand(from_map)); 4479 __ Branch(&not_applicable, ne, scratch, Operand(from_map));
4389 4480
4390 if (IsSimpleMapChangeTransition(from_kind, to_kind)) { 4481 if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
4391 Register new_map_reg = ToRegister(instr->new_map_temp()); 4482 Register new_map_reg = ToRegister(instr->new_map_temp());
4392 __ li(new_map_reg, Operand(to_map)); 4483 __ li(new_map_reg, Operand(to_map));
4393 __ sw(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset)); 4484 __ sd(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
4394 // Write barrier. 4485 // Write barrier.
4395 __ RecordWriteForMap(object_reg, 4486 __ RecordWriteForMap(object_reg,
4396 new_map_reg, 4487 new_map_reg,
4397 scratch, 4488 scratch,
4398 GetRAState(), 4489 GetRAState(),
4399 kDontSaveFPRegs); 4490 kDontSaveFPRegs);
4400 } else { 4491 } else {
4401 ASSERT(object_reg.is(a0)); 4492 ASSERT(object_reg.is(a0));
4402 ASSERT(ToRegister(instr->context()).is(cp)); 4493 ASSERT(ToRegister(instr->context()).is(cp));
4403 PushSafepointRegistersScope scope( 4494 PushSafepointRegistersScope scope(
(...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after
4468 // result register contain a valid pointer because it is already 4559 // result register contain a valid pointer because it is already
4469 // contained in the register pointer map. 4560 // contained in the register pointer map.
4470 __ mov(result, zero_reg); 4561 __ mov(result, zero_reg);
4471 4562
4472 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); 4563 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4473 __ push(string); 4564 __ push(string);
4474 // Push the index as a smi. This is safe because of the checks in 4565 // Push the index as a smi. This is safe because of the checks in
4475 // DoStringCharCodeAt above. 4566 // DoStringCharCodeAt above.
4476 if (instr->index()->IsConstantOperand()) { 4567 if (instr->index()->IsConstantOperand()) {
4477 int const_index = ToInteger32(LConstantOperand::cast(instr->index())); 4568 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
4478 __ Addu(scratch, zero_reg, Operand(Smi::FromInt(const_index))); 4569 __ Daddu(scratch, zero_reg, Operand(Smi::FromInt(const_index)));
4479 __ push(scratch); 4570 __ push(scratch);
4480 } else { 4571 } else {
4481 Register index = ToRegister(instr->index()); 4572 Register index = ToRegister(instr->index());
4482 __ SmiTag(index); 4573 __ SmiTag(index);
4483 __ push(index); 4574 __ push(index);
4484 } 4575 }
4485 CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr, 4576 CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
4486 instr->context()); 4577 instr->context());
4487 __ AssertSmi(v0); 4578 __ AssertSmi(v0);
4488 __ SmiUntag(v0); 4579 __ SmiUntag(v0);
(...skipping 19 matching lines...) Expand all
4508 4599
4509 ASSERT(instr->hydrogen()->value()->representation().IsInteger32()); 4600 ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
4510 Register char_code = ToRegister(instr->char_code()); 4601 Register char_code = ToRegister(instr->char_code());
4511 Register result = ToRegister(instr->result()); 4602 Register result = ToRegister(instr->result());
4512 Register scratch = scratch0(); 4603 Register scratch = scratch0();
4513 ASSERT(!char_code.is(result)); 4604 ASSERT(!char_code.is(result));
4514 4605
4515 __ Branch(deferred->entry(), hi, 4606 __ Branch(deferred->entry(), hi,
4516 char_code, Operand(String::kMaxOneByteCharCode)); 4607 char_code, Operand(String::kMaxOneByteCharCode));
4517 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex); 4608 __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
4518 __ sll(scratch, char_code, kPointerSizeLog2); 4609 __ dsll(scratch, char_code, kPointerSizeLog2);
4519 __ Addu(result, result, scratch); 4610 __ Daddu(result, result, scratch);
4520 __ lw(result, FieldMemOperand(result, FixedArray::kHeaderSize)); 4611 __ ld(result, FieldMemOperand(result, FixedArray::kHeaderSize));
4521 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); 4612 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
4522 __ Branch(deferred->entry(), eq, result, Operand(scratch)); 4613 __ Branch(deferred->entry(), eq, result, Operand(scratch));
4523 __ bind(deferred->exit()); 4614 __ bind(deferred->exit());
4524 } 4615 }
4525 4616
4526 4617
4527 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) { 4618 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
4528 Register char_code = ToRegister(instr->char_code()); 4619 Register char_code = ToRegister(instr->char_code());
4529 Register result = ToRegister(instr->result()); 4620 Register result = ToRegister(instr->result());
4530 4621
(...skipping 11 matching lines...) Expand all
4542 4633
4543 4634
4544 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { 4635 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4545 LOperand* input = instr->value(); 4636 LOperand* input = instr->value();
4546 ASSERT(input->IsRegister() || input->IsStackSlot()); 4637 ASSERT(input->IsRegister() || input->IsStackSlot());
4547 LOperand* output = instr->result(); 4638 LOperand* output = instr->result();
4548 ASSERT(output->IsDoubleRegister()); 4639 ASSERT(output->IsDoubleRegister());
4549 FPURegister single_scratch = double_scratch0().low(); 4640 FPURegister single_scratch = double_scratch0().low();
4550 if (input->IsStackSlot()) { 4641 if (input->IsStackSlot()) {
4551 Register scratch = scratch0(); 4642 Register scratch = scratch0();
4552 __ lw(scratch, ToMemOperand(input)); 4643 __ ld(scratch, ToMemOperand(input));
4553 __ mtc1(scratch, single_scratch); 4644 __ mtc1(scratch, single_scratch);
4554 } else { 4645 } else {
4555 __ mtc1(ToRegister(input), single_scratch); 4646 __ mtc1(ToRegister(input), single_scratch);
4556 } 4647 }
4557 __ cvt_d_w(ToDoubleRegister(output), single_scratch); 4648 __ cvt_d_w(ToDoubleRegister(output), single_scratch);
4558 } 4649 }
4559 4650
4560 4651
4561 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { 4652 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4562 LOperand* input = instr->value(); 4653 LOperand* input = instr->value();
4563 LOperand* output = instr->result(); 4654 LOperand* output = instr->result();
4564 4655
4565 FPURegister dbl_scratch = double_scratch0(); 4656 FPURegister dbl_scratch = double_scratch0();
4566 __ mtc1(ToRegister(input), dbl_scratch); 4657 __ mtc1(ToRegister(input), dbl_scratch);
4567 __ Cvt_d_uw(ToDoubleRegister(output), dbl_scratch, f22); 4658 __ Cvt_d_uw(ToDoubleRegister(output), dbl_scratch, f22); // TODO(plind): f22?
4568 } 4659 }
4569 4660
4570 4661
4571 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
4572 class DeferredNumberTagI V8_FINAL : public LDeferredCode {
4573 public:
4574 DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
4575 : LDeferredCode(codegen), instr_(instr) { }
4576 virtual void Generate() V8_OVERRIDE {
4577 codegen()->DoDeferredNumberTagIU(instr_,
4578 instr_->value(),
4579 instr_->temp1(),
4580 instr_->temp2(),
4581 SIGNED_INT32);
4582 }
4583 virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
4584 private:
4585 LNumberTagI* instr_;
4586 };
4587
4588 Register src = ToRegister(instr->value());
4589 Register dst = ToRegister(instr->result());
4590 Register overflow = scratch0();
4591
4592 DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
4593 __ SmiTagCheckOverflow(dst, src, overflow);
4594 __ BranchOnOverflow(deferred->entry(), overflow);
4595 __ bind(deferred->exit());
4596 }
4597
4598
4599 void LCodeGen::DoNumberTagU(LNumberTagU* instr) { 4662 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
4600 class DeferredNumberTagU V8_FINAL : public LDeferredCode { 4663 class DeferredNumberTagU V8_FINAL : public LDeferredCode {
4601 public: 4664 public:
4602 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr) 4665 DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
4603 : LDeferredCode(codegen), instr_(instr) { } 4666 : LDeferredCode(codegen), instr_(instr) { }
4604 virtual void Generate() V8_OVERRIDE { 4667 virtual void Generate() V8_OVERRIDE {
4605 codegen()->DoDeferredNumberTagIU(instr_, 4668 codegen()->DoDeferredNumberTagIU(instr_,
4606 instr_->value(), 4669 instr_->value(),
4607 instr_->temp1(), 4670 instr_->temp1(),
4608 instr_->temp2(), 4671 instr_->temp2(),
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
4646 } 4709 }
4647 __ mtc1(src, dbl_scratch); 4710 __ mtc1(src, dbl_scratch);
4648 __ cvt_d_w(dbl_scratch, dbl_scratch); 4711 __ cvt_d_w(dbl_scratch, dbl_scratch);
4649 } else { 4712 } else {
4650 __ mtc1(src, dbl_scratch); 4713 __ mtc1(src, dbl_scratch);
4651 __ Cvt_d_uw(dbl_scratch, dbl_scratch, f22); 4714 __ Cvt_d_uw(dbl_scratch, dbl_scratch, f22);
4652 } 4715 }
4653 4716
4654 if (FLAG_inline_new) { 4717 if (FLAG_inline_new) {
4655 __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex); 4718 __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
4656 __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow, DONT_TAG_RESULT); 4719 __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow, TAG_RESULT);
4657 __ Branch(&done); 4720 __ Branch(&done);
4658 } 4721 }
4659 4722
4660 // Slow case: Call the runtime system to do the number allocation. 4723 // Slow case: Call the runtime system to do the number allocation.
4661 __ bind(&slow); 4724 __ bind(&slow);
4662 { 4725 {
4663 // TODO(3095996): Put a valid pointer value in the stack slot where the 4726 // TODO(3095996): Put a valid pointer value in the stack slot where the
4664 // result register is stored, as this register is in the pointer map, but 4727 // result register is stored, as this register is in the pointer map, but
4665 // contains an integer value. 4728 // contains an integer value.
4666 __ mov(dst, zero_reg); 4729 __ mov(dst, zero_reg);
4667
4668 // Preserve the value of all registers. 4730 // Preserve the value of all registers.
4669 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); 4731 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4670 4732
4671 // NumberTagI and NumberTagD use the context from the frame, rather than 4733 // NumberTagI and NumberTagD use the context from the frame, rather than
4672 // the environment's HContext or HInlinedContext value. 4734 // the environment's HContext or HInlinedContext value.
4673 // They only call Runtime::kAllocateHeapNumber. 4735 // They only call Runtime::kAllocateHeapNumber.
4674 // The corresponding HChange instructions are added in a phase that does 4736 // The corresponding HChange instructions are added in a phase that does
4675 // not have easy access to the local context. 4737 // not have easy access to the local context.
4676 __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); 4738 __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4677 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); 4739 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4678 RecordSafepointWithRegisters( 4740 RecordSafepointWithRegisters(
4679 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); 4741 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4680 __ Subu(v0, v0, kHeapObjectTag);
4681 __ StoreToSafepointRegisterSlot(v0, dst); 4742 __ StoreToSafepointRegisterSlot(v0, dst);
4682 } 4743 }
4683 4744
4684
4685 // Done. Put the value in dbl_scratch into the value of the allocated heap 4745 // Done. Put the value in dbl_scratch into the value of the allocated heap
4686 // number. 4746 // number.
4687 __ bind(&done); 4747 __ bind(&done);
4688 __ sdc1(dbl_scratch, MemOperand(dst, HeapNumber::kValueOffset)); 4748 __ sdc1(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
4689 __ Addu(dst, dst, kHeapObjectTag);
4690 } 4749 }
4691 4750
4692 4751
4693 void LCodeGen::DoNumberTagD(LNumberTagD* instr) { 4752 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4694 class DeferredNumberTagD V8_FINAL : public LDeferredCode { 4753 class DeferredNumberTagD V8_FINAL : public LDeferredCode {
4695 public: 4754 public:
4696 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr) 4755 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4697 : LDeferredCode(codegen), instr_(instr) { } 4756 : LDeferredCode(codegen), instr_(instr) { }
4698 virtual void Generate() V8_OVERRIDE { 4757 virtual void Generate() V8_OVERRIDE {
4699 codegen()->DoDeferredNumberTagD(instr_); 4758 codegen()->DoDeferredNumberTagD(instr_);
(...skipping 14 matching lines...) Expand all
4714 __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex); 4773 __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
4715 // We want the untagged address first for performance 4774 // We want the untagged address first for performance
4716 __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(), 4775 __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(),
4717 DONT_TAG_RESULT); 4776 DONT_TAG_RESULT);
4718 } else { 4777 } else {
4719 __ Branch(deferred->entry()); 4778 __ Branch(deferred->entry());
4720 } 4779 }
4721 __ bind(deferred->exit()); 4780 __ bind(deferred->exit());
4722 __ sdc1(input_reg, MemOperand(reg, HeapNumber::kValueOffset)); 4781 __ sdc1(input_reg, MemOperand(reg, HeapNumber::kValueOffset));
4723 // Now that we have finished with the object's real address tag it 4782 // Now that we have finished with the object's real address tag it
4724 __ Addu(reg, reg, kHeapObjectTag); 4783 __ Daddu(reg, reg, kHeapObjectTag);
4725 } 4784 }
4726 4785
4727 4786
4728 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { 4787 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4729 // TODO(3095996): Get rid of this. For now, we need to make the 4788 // TODO(3095996): Get rid of this. For now, we need to make the
4730 // result register contain a valid pointer because it is already 4789 // result register contain a valid pointer because it is already
4731 // contained in the register pointer map. 4790 // contained in the register pointer map.
4732 Register reg = ToRegister(instr->result()); 4791 Register reg = ToRegister(instr->result());
4733 __ mov(reg, zero_reg); 4792 __ mov(reg, zero_reg);
4734 4793
4735 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); 4794 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4736 // NumberTagI and NumberTagD use the context from the frame, rather than 4795 // NumberTagI and NumberTagD use the context from the frame, rather than
4737 // the environment's HContext or HInlinedContext value. 4796 // the environment's HContext or HInlinedContext value.
4738 // They only call Runtime::kAllocateHeapNumber. 4797 // They only call Runtime::kAllocateHeapNumber.
4739 // The corresponding HChange instructions are added in a phase that does 4798 // The corresponding HChange instructions are added in a phase that does
4740 // not have easy access to the local context. 4799 // not have easy access to the local context.
4741 __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); 4800 __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
4742 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); 4801 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
4743 RecordSafepointWithRegisters( 4802 RecordSafepointWithRegisters(
4744 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); 4803 instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
4745 __ Subu(v0, v0, kHeapObjectTag); 4804 __ Dsubu(v0, v0, kHeapObjectTag);
4746 __ StoreToSafepointRegisterSlot(v0, reg); 4805 __ StoreToSafepointRegisterSlot(v0, reg);
4747 } 4806 }
4748 4807
4749 4808
4750 void LCodeGen::DoSmiTag(LSmiTag* instr) { 4809 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4751 HChange* hchange = instr->hydrogen(); 4810 HChange* hchange = instr->hydrogen();
4752 Register input = ToRegister(instr->value()); 4811 Register input = ToRegister(instr->value());
4753 Register output = ToRegister(instr->result()); 4812 Register output = ToRegister(instr->result());
4754 if (hchange->CheckFlag(HValue::kCanOverflow) && 4813 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4755 hchange->value()->CheckFlag(HValue::kUint32)) { 4814 hchange->value()->CheckFlag(HValue::kUint32)) {
4756 __ And(at, input, Operand(0xc0000000)); 4815 __ And(at, input, Operand(0x80000000));
4757 DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg)); 4816 DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
4758 } 4817 }
4759 if (hchange->CheckFlag(HValue::kCanOverflow) && 4818 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4760 !hchange->value()->CheckFlag(HValue::kUint32)) { 4819 !hchange->value()->CheckFlag(HValue::kUint32)) {
4761 __ SmiTagCheckOverflow(output, input, at); 4820 __ SmiTagCheckOverflow(output, input, at);
4762 DeoptimizeIf(lt, instr->environment(), at, Operand(zero_reg)); 4821 DeoptimizeIf(lt, instr->environment(), at, Operand(zero_reg));
4763 } else { 4822 } else {
4764 __ SmiTag(output, input); 4823 __ SmiTag(output, input);
4765 } 4824 }
4766 } 4825 }
(...skipping 20 matching lines...) Expand all
4787 bool can_convert_undefined_to_nan, 4846 bool can_convert_undefined_to_nan,
4788 bool deoptimize_on_minus_zero, 4847 bool deoptimize_on_minus_zero,
4789 LEnvironment* env, 4848 LEnvironment* env,
4790 NumberUntagDMode mode) { 4849 NumberUntagDMode mode) {
4791 Register scratch = scratch0(); 4850 Register scratch = scratch0();
4792 Label convert, load_smi, done; 4851 Label convert, load_smi, done;
4793 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { 4852 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
4794 // Smi check. 4853 // Smi check.
4795 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi); 4854 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
4796 // Heap number map check. 4855 // Heap number map check.
4797 __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); 4856 __ ld(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4798 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); 4857 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
4799 if (can_convert_undefined_to_nan) { 4858 if (can_convert_undefined_to_nan) {
4800 __ Branch(&convert, ne, scratch, Operand(at)); 4859 __ Branch(&convert, ne, scratch, Operand(at));
4801 } else { 4860 } else {
4802 DeoptimizeIf(ne, env, scratch, Operand(at)); 4861 DeoptimizeIf(ne, env, scratch, Operand(at));
4803 } 4862 }
4804 // Load heap number. 4863 // Load heap number.
4805 __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset)); 4864 __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
4806 if (deoptimize_on_minus_zero) { 4865 if (deoptimize_on_minus_zero) {
4807 __ mfc1(at, result_reg.low()); 4866 __ mfc1(at, result_reg);
4808 __ Branch(&done, ne, at, Operand(zero_reg)); 4867 __ Branch(&done, ne, at, Operand(zero_reg));
4809 __ mfc1(scratch, result_reg.high()); 4868 __ mfhc1(scratch, result_reg); // Get exponent/sign bits.
4810 DeoptimizeIf(eq, env, scratch, Operand(HeapNumber::kSignMask)); 4869 DeoptimizeIf(eq, env, scratch, Operand(HeapNumber::kSignMask));
4811 } 4870 }
4812 __ Branch(&done); 4871 __ Branch(&done);
4813 if (can_convert_undefined_to_nan) { 4872 if (can_convert_undefined_to_nan) {
4814 __ bind(&convert); 4873 __ bind(&convert);
4815 // Convert undefined (and hole) to NaN. 4874 // Convert undefined (and hole) to NaN.
4816 __ LoadRoot(at, Heap::kUndefinedValueRootIndex); 4875 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4817 DeoptimizeIf(ne, env, input_reg, Operand(at)); 4876 DeoptimizeIf(ne, env, input_reg, Operand(at));
4818 __ LoadRoot(scratch, Heap::kNanValueRootIndex); 4877 __ LoadRoot(scratch, Heap::kNanValueRootIndex);
4819 __ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset)); 4878 __ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
(...skipping 19 matching lines...) Expand all
4839 DoubleRegister double_scratch = double_scratch0(); 4898 DoubleRegister double_scratch = double_scratch0();
4840 DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp2()); 4899 DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp2());
4841 4900
4842 ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2)); 4901 ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2));
4843 ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1)); 4902 ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1));
4844 4903
4845 Label done; 4904 Label done;
4846 4905
4847 // The input is a tagged HeapObject. 4906 // The input is a tagged HeapObject.
4848 // Heap number map check. 4907 // Heap number map check.
4849 __ lw(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset)); 4908 __ ld(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4850 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); 4909 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
4851 // This 'at' value and scratch1 map value are used for tests in both clauses 4910 // This 'at' value and scratch1 map value are used for tests in both clauses
4852 // of the if. 4911 // of the if.
4853 4912
4854 if (instr->truncating()) { 4913 if (instr->truncating()) {
4855 // Performs a truncating conversion of a floating point number as used by 4914 // Performs a truncating conversion of a floating point number as used by
4856 // the JS bitwise operations. 4915 // the JS bitwise operations.
4857 Label no_heap_number, check_bools, check_false; 4916 Label no_heap_number, check_bools, check_false;
4858 // Check HeapNumber map. 4917 // Check HeapNumber map.
4859 __ Branch(USE_DELAY_SLOT, &no_heap_number, ne, scratch1, Operand(at)); 4918 __ Branch(USE_DELAY_SLOT, &no_heap_number, ne, scratch1, Operand(at));
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
4897 double_scratch2, 4956 double_scratch2,
4898 except_flag, 4957 except_flag,
4899 kCheckForInexactConversion); 4958 kCheckForInexactConversion);
4900 4959
4901 // Deopt if the operation did not succeed. 4960 // Deopt if the operation did not succeed.
4902 DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg)); 4961 DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
4903 4962
4904 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 4963 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4905 __ Branch(&done, ne, input_reg, Operand(zero_reg)); 4964 __ Branch(&done, ne, input_reg, Operand(zero_reg));
4906 4965
4907 __ mfc1(scratch1, double_scratch.high()); 4966 __ mfhc1(scratch1, double_scratch); // Get exponent/sign bits.
4908 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); 4967 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
4909 DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg)); 4968 DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
4910 } 4969 }
4911 } 4970 }
4912 __ bind(&done); 4971 __ bind(&done);
4913 } 4972 }
4914 4973
4915 4974
4916 void LCodeGen::DoTaggedToI(LTaggedToI* instr) { 4975 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
4917 class DeferredTaggedToI V8_FINAL : public LDeferredCode { 4976 class DeferredTaggedToI V8_FINAL : public LDeferredCode {
(...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after
4985 double_scratch0(), 5044 double_scratch0(),
4986 except_flag, 5045 except_flag,
4987 kCheckForInexactConversion); 5046 kCheckForInexactConversion);
4988 5047
4989 // Deopt if the operation did not succeed (except_flag != 0). 5048 // Deopt if the operation did not succeed (except_flag != 0).
4990 DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg)); 5049 DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
4991 5050
4992 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 5051 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4993 Label done; 5052 Label done;
4994 __ Branch(&done, ne, result_reg, Operand(zero_reg)); 5053 __ Branch(&done, ne, result_reg, Operand(zero_reg));
4995 __ mfc1(scratch1, double_input.high()); 5054 __ mfhc1(scratch1, double_input); // Get exponent/sign bits.
4996 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); 5055 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
4997 DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg)); 5056 DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
4998 __ bind(&done); 5057 __ bind(&done);
4999 } 5058 }
5000 } 5059 }
5001 } 5060 }
5002 5061
5003 5062
5004 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { 5063 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
5005 Register result_reg = ToRegister(instr->result()); 5064 Register result_reg = ToRegister(instr->result());
(...skipping 12 matching lines...) Expand all
5018 double_scratch0(), 5077 double_scratch0(),
5019 except_flag, 5078 except_flag,
5020 kCheckForInexactConversion); 5079 kCheckForInexactConversion);
5021 5080
5022 // Deopt if the operation did not succeed (except_flag != 0). 5081 // Deopt if the operation did not succeed (except_flag != 0).
5023 DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg)); 5082 DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
5024 5083
5025 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 5084 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5026 Label done; 5085 Label done;
5027 __ Branch(&done, ne, result_reg, Operand(zero_reg)); 5086 __ Branch(&done, ne, result_reg, Operand(zero_reg));
5028 __ mfc1(scratch1, double_input.high()); 5087 __ mfhc1(scratch1, double_input); // Get exponent/sign bits.
5029 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); 5088 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
5030 DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg)); 5089 DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
5031 __ bind(&done); 5090 __ bind(&done);
5032 } 5091 }
5033 } 5092 }
5034 __ SmiTagCheckOverflow(result_reg, result_reg, scratch1); 5093 __ SmiTag(result_reg, result_reg);
5035 DeoptimizeIf(lt, instr->environment(), scratch1, Operand(zero_reg));
5036 } 5094 }
5037 5095
5038 5096
5039 void LCodeGen::DoCheckSmi(LCheckSmi* instr) { 5097 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
5040 LOperand* input = instr->value(); 5098 LOperand* input = instr->value();
5041 __ SmiTst(ToRegister(input), at); 5099 __ SmiTst(ToRegister(input), at);
5042 DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg)); 5100 DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
5043 } 5101 }
5044 5102
5045 5103
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after
5092 5150
5093 5151
5094 void LCodeGen::DoCheckValue(LCheckValue* instr) { 5152 void LCodeGen::DoCheckValue(LCheckValue* instr) {
5095 Register reg = ToRegister(instr->value()); 5153 Register reg = ToRegister(instr->value());
5096 Handle<HeapObject> object = instr->hydrogen()->object().handle(); 5154 Handle<HeapObject> object = instr->hydrogen()->object().handle();
5097 AllowDeferredHandleDereference smi_check; 5155 AllowDeferredHandleDereference smi_check;
5098 if (isolate()->heap()->InNewSpace(*object)) { 5156 if (isolate()->heap()->InNewSpace(*object)) {
5099 Register reg = ToRegister(instr->value()); 5157 Register reg = ToRegister(instr->value());
5100 Handle<Cell> cell = isolate()->factory()->NewCell(object); 5158 Handle<Cell> cell = isolate()->factory()->NewCell(object);
5101 __ li(at, Operand(Handle<Object>(cell))); 5159 __ li(at, Operand(Handle<Object>(cell)));
5102 __ lw(at, FieldMemOperand(at, Cell::kValueOffset)); 5160 __ ld(at, FieldMemOperand(at, Cell::kValueOffset));
5103 DeoptimizeIf(ne, instr->environment(), reg, 5161 DeoptimizeIf(ne, instr->environment(), reg,
5104 Operand(at)); 5162 Operand(at));
5105 } else { 5163 } else {
5106 DeoptimizeIf(ne, instr->environment(), reg, 5164 DeoptimizeIf(ne, instr->environment(), reg,
5107 Operand(object)); 5165 Operand(object));
5108 } 5166 }
5109 } 5167 }
5110 5168
5111 5169
5112 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { 5170 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
5147 for (int i = 0; i < maps->size(); ++i) { 5205 for (int i = 0; i < maps->size(); ++i) {
5148 AddStabilityDependency(maps->at(i).handle()); 5206 AddStabilityDependency(maps->at(i).handle());
5149 } 5207 }
5150 return; 5208 return;
5151 } 5209 }
5152 5210
5153 Register map_reg = scratch0(); 5211 Register map_reg = scratch0();
5154 LOperand* input = instr->value(); 5212 LOperand* input = instr->value();
5155 ASSERT(input->IsRegister()); 5213 ASSERT(input->IsRegister());
5156 Register reg = ToRegister(input); 5214 Register reg = ToRegister(input);
5157 __ lw(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset)); 5215 __ ld(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
5158 5216
5159 DeferredCheckMaps* deferred = NULL; 5217 DeferredCheckMaps* deferred = NULL;
5160 if (instr->hydrogen()->HasMigrationTarget()) { 5218 if (instr->hydrogen()->HasMigrationTarget()) {
5161 deferred = new(zone()) DeferredCheckMaps(this, instr, reg); 5219 deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
5162 __ bind(deferred->check_maps()); 5220 __ bind(deferred->check_maps());
5163 } 5221 }
5164 5222
5165 const UniqueSet<Map>* maps = instr->hydrogen()->maps(); 5223 const UniqueSet<Map>* maps = instr->hydrogen()->maps();
5166 Label success; 5224 Label success;
5167 for (int i = 0; i < maps->size() - 1; i++) { 5225 for (int i = 0; i < maps->size() - 1; i++) {
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
5199 Register scratch = scratch0(); 5257 Register scratch = scratch0();
5200 Register input_reg = ToRegister(instr->unclamped()); 5258 Register input_reg = ToRegister(instr->unclamped());
5201 Register result_reg = ToRegister(instr->result()); 5259 Register result_reg = ToRegister(instr->result());
5202 DoubleRegister temp_reg = ToDoubleRegister(instr->temp()); 5260 DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
5203 Label is_smi, done, heap_number; 5261 Label is_smi, done, heap_number;
5204 5262
5205 // Both smi and heap number cases are handled. 5263 // Both smi and heap number cases are handled.
5206 __ UntagAndJumpIfSmi(scratch, input_reg, &is_smi); 5264 __ UntagAndJumpIfSmi(scratch, input_reg, &is_smi);
5207 5265
5208 // Check for heap number 5266 // Check for heap number
5209 __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); 5267 __ ld(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
5210 __ Branch(&heap_number, eq, scratch, Operand(factory()->heap_number_map())); 5268 __ Branch(&heap_number, eq, scratch, Operand(factory()->heap_number_map()));
5211 5269
5212 // Check for undefined. Undefined is converted to zero for clamping 5270 // Check for undefined. Undefined is converted to zero for clamping
5213 // conversions. 5271 // conversions.
5214 DeoptimizeIf(ne, instr->environment(), input_reg, 5272 DeoptimizeIf(ne, instr->environment(), input_reg,
5215 Operand(factory()->undefined_value())); 5273 Operand(factory()->undefined_value()));
5216 __ mov(result_reg, zero_reg); 5274 __ mov(result_reg, zero_reg);
5217 __ jmp(&done); 5275 __ jmp(&done);
5218 5276
5219 // Heap number 5277 // Heap number
(...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after
5295 } 5353 }
5296 5354
5297 __ bind(deferred->exit()); 5355 __ bind(deferred->exit());
5298 5356
5299 if (instr->hydrogen()->MustPrefillWithFiller()) { 5357 if (instr->hydrogen()->MustPrefillWithFiller()) {
5300 STATIC_ASSERT(kHeapObjectTag == 1); 5358 STATIC_ASSERT(kHeapObjectTag == 1);
5301 if (instr->size()->IsConstantOperand()) { 5359 if (instr->size()->IsConstantOperand()) {
5302 int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); 5360 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5303 __ li(scratch, Operand(size - kHeapObjectTag)); 5361 __ li(scratch, Operand(size - kHeapObjectTag));
5304 } else { 5362 } else {
5305 __ Subu(scratch, ToRegister(instr->size()), Operand(kHeapObjectTag)); 5363 __ Dsubu(scratch, ToRegister(instr->size()), Operand(kHeapObjectTag));
5306 } 5364 }
5307 __ li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map())); 5365 __ li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
5308 Label loop; 5366 Label loop;
5309 __ bind(&loop); 5367 __ bind(&loop);
5310 __ Subu(scratch, scratch, Operand(kPointerSize)); 5368 __ Dsubu(scratch, scratch, Operand(kPointerSize));
5311 __ Addu(at, result, Operand(scratch)); 5369 __ Daddu(at, result, Operand(scratch));
5312 __ sw(scratch2, MemOperand(at)); 5370 __ sd(scratch2, MemOperand(at));
5313 __ Branch(&loop, ge, scratch, Operand(zero_reg)); 5371 __ Branch(&loop, ge, scratch, Operand(zero_reg));
5314 } 5372 }
5315 } 5373 }
5316 5374
5317 5375
5318 void LCodeGen::DoDeferredAllocate(LAllocate* instr) { 5376 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
5319 Register result = ToRegister(instr->result()); 5377 Register result = ToRegister(instr->result());
5320 5378
5321 // TODO(3095996): Get rid of this. For now, we need to make the 5379 // TODO(3095996): Get rid of this. For now, we need to make the
5322 // result register contain a valid pointer because it is already 5380 // result register contain a valid pointer because it is already
5323 // contained in the register pointer map. 5381 // contained in the register pointer map.
5324 __ mov(result, zero_reg); 5382 __ mov(result, zero_reg);
5325 5383
5326 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); 5384 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
5327 if (instr->size()->IsRegister()) { 5385 if (instr->size()->IsRegister()) {
5328 Register size = ToRegister(instr->size()); 5386 Register size = ToRegister(instr->size());
5329 ASSERT(!size.is(result)); 5387 ASSERT(!size.is(result));
5330 __ SmiTag(size); 5388 __ SmiTag(size);
5331 __ push(size); 5389 __ push(size);
5332 } else { 5390 } else {
5333 int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); 5391 int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
5334 if (size >= 0 && size <= Smi::kMaxValue) { 5392 if (size >= 0 && size <= Smi::kMaxValue) {
5335 __ Push(Smi::FromInt(size)); 5393 __ li(v0, Operand(Smi::FromInt(size)));
5394 __ Push(v0);
5336 } else { 5395 } else {
5337 // We should never get here at runtime => abort 5396 // We should never get here at runtime => abort
5338 __ stop("invalid allocation size"); 5397 __ stop("invalid allocation size");
5339 return; 5398 return;
5340 } 5399 }
5341 } 5400 }
5342 5401
5343 int flags = AllocateDoubleAlignFlag::encode( 5402 int flags = AllocateDoubleAlignFlag::encode(
5344 instr->hydrogen()->MustAllocateDoubleAligned()); 5403 instr->hydrogen()->MustAllocateDoubleAligned());
5345 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) { 5404 if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
5346 ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation()); 5405 ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
5347 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation()); 5406 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5348 flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE); 5407 flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
5349 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) { 5408 } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
5350 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation()); 5409 ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
5351 flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE); 5410 flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
5352 } else { 5411 } else {
5353 flags = AllocateTargetSpace::update(flags, NEW_SPACE); 5412 flags = AllocateTargetSpace::update(flags, NEW_SPACE);
5354 } 5413 }
5355 __ Push(Smi::FromInt(flags)); 5414 __ li(v0, Operand(Smi::FromInt(flags)));
5415 __ Push(v0);
5356 5416
5357 CallRuntimeFromDeferred( 5417 CallRuntimeFromDeferred(
5358 Runtime::kAllocateInTargetSpace, 2, instr, instr->context()); 5418 Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
5359 __ StoreToSafepointRegisterSlot(v0, result); 5419 __ StoreToSafepointRegisterSlot(v0, result);
5360 } 5420 }
5361 5421
5362 5422
5363 void LCodeGen::DoToFastProperties(LToFastProperties* instr) { 5423 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
5364 ASSERT(ToRegister(instr->value()).is(a0)); 5424 ASSERT(ToRegister(instr->value()).is(a0));
5365 ASSERT(ToRegister(instr->result()).is(v0)); 5425 ASSERT(ToRegister(instr->result()).is(v0));
5366 __ push(a0); 5426 __ push(a0);
5367 CallRuntime(Runtime::kToFastProperties, 1, instr); 5427 CallRuntime(Runtime::kToFastProperties, 1, instr);
5368 } 5428 }
5369 5429
5370 5430
5371 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) { 5431 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
5372 ASSERT(ToRegister(instr->context()).is(cp)); 5432 ASSERT(ToRegister(instr->context()).is(cp));
5373 Label materialized; 5433 Label materialized;
5374 // Registers will be used as follows: 5434 // Registers will be used as follows:
5375 // t3 = literals array. 5435 // a7 = literals array.
5376 // a1 = regexp literal. 5436 // a1 = regexp literal.
5377 // a0 = regexp literal clone. 5437 // a0 = regexp literal clone.
5378 // a2 and t0-t2 are used as temporaries. 5438 // a2 and a4-a6 are used as temporaries.
5379 int literal_offset = 5439 int literal_offset =
5380 FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index()); 5440 FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
5381 __ li(t3, instr->hydrogen()->literals()); 5441 __ li(a7, instr->hydrogen()->literals());
5382 __ lw(a1, FieldMemOperand(t3, literal_offset)); 5442 __ ld(a1, FieldMemOperand(a7, literal_offset));
5383 __ LoadRoot(at, Heap::kUndefinedValueRootIndex); 5443 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5384 __ Branch(&materialized, ne, a1, Operand(at)); 5444 __ Branch(&materialized, ne, a1, Operand(at));
5385 5445
5386 // Create regexp literal using runtime function 5446 // Create regexp literal using runtime function
5387 // Result will be in v0. 5447 // Result will be in v0.
5388 __ li(t2, Operand(Smi::FromInt(instr->hydrogen()->literal_index()))); 5448 __ li(a6, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
5389 __ li(t1, Operand(instr->hydrogen()->pattern())); 5449 __ li(a5, Operand(instr->hydrogen()->pattern()));
5390 __ li(t0, Operand(instr->hydrogen()->flags())); 5450 __ li(a4, Operand(instr->hydrogen()->flags()));
5391 __ Push(t3, t2, t1, t0); 5451 __ Push(a7, a6, a5, a4);
5392 CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr); 5452 CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
5393 __ mov(a1, v0); 5453 __ mov(a1, v0);
5394 5454
5395 __ bind(&materialized); 5455 __ bind(&materialized);
5396 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize; 5456 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
5397 Label allocated, runtime_allocate; 5457 Label allocated, runtime_allocate;
5398 5458
5399 __ Allocate(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT); 5459 __ Allocate(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
5400 __ jmp(&allocated); 5460 __ jmp(&allocated);
5401 5461
5402 __ bind(&runtime_allocate); 5462 __ bind(&runtime_allocate);
5403 __ li(a0, Operand(Smi::FromInt(size))); 5463 __ li(a0, Operand(Smi::FromInt(size)));
5404 __ Push(a1, a0); 5464 __ Push(a1, a0);
5405 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr); 5465 CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
5406 __ pop(a1); 5466 __ pop(a1);
5407 5467
5408 __ bind(&allocated); 5468 __ bind(&allocated);
5409 // Copy the content into the newly allocated memory. 5469 // Copy the content into the newly allocated memory.
5410 // (Unroll copy loop once for better throughput). 5470 // (Unroll copy loop once for better throughput).
5411 for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) { 5471 for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
5412 __ lw(a3, FieldMemOperand(a1, i)); 5472 __ ld(a3, FieldMemOperand(a1, i));
5413 __ lw(a2, FieldMemOperand(a1, i + kPointerSize)); 5473 __ ld(a2, FieldMemOperand(a1, i + kPointerSize));
5414 __ sw(a3, FieldMemOperand(v0, i)); 5474 __ sd(a3, FieldMemOperand(v0, i));
5415 __ sw(a2, FieldMemOperand(v0, i + kPointerSize)); 5475 __ sd(a2, FieldMemOperand(v0, i + kPointerSize));
5416 } 5476 }
5417 if ((size % (2 * kPointerSize)) != 0) { 5477 if ((size % (2 * kPointerSize)) != 0) {
5418 __ lw(a3, FieldMemOperand(a1, size - kPointerSize)); 5478 __ ld(a3, FieldMemOperand(a1, size - kPointerSize));
5419 __ sw(a3, FieldMemOperand(v0, size - kPointerSize)); 5479 __ sd(a3, FieldMemOperand(v0, size - kPointerSize));
5420 } 5480 }
5421 } 5481 }
5422 5482
5423 5483
5424 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) { 5484 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
5425 ASSERT(ToRegister(instr->context()).is(cp)); 5485 ASSERT(ToRegister(instr->context()).is(cp));
5426 // Use the fast case closure allocation code that allocates in new 5486 // Use the fast case closure allocation code that allocates in new
5427 // space for nested functions that don't need literals cloning. 5487 // space for nested functions that don't need literals cloning.
5428 bool pretenure = instr->hydrogen()->pretenure(); 5488 bool pretenure = instr->hydrogen()->pretenure();
5429 if (!pretenure && instr->hydrogen()->has_no_literals()) { 5489 if (!pretenure && instr->hydrogen()->has_no_literals()) {
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after
5479 Register* cmp1, 5539 Register* cmp1,
5480 Operand* cmp2) { 5540 Operand* cmp2) {
5481 // This function utilizes the delay slot heavily. This is used to load 5541 // This function utilizes the delay slot heavily. This is used to load
5482 // values that are always usable without depending on the type of the input 5542 // values that are always usable without depending on the type of the input
5483 // register. 5543 // register.
5484 Condition final_branch_condition = kNoCondition; 5544 Condition final_branch_condition = kNoCondition;
5485 Register scratch = scratch0(); 5545 Register scratch = scratch0();
5486 Factory* factory = isolate()->factory(); 5546 Factory* factory = isolate()->factory();
5487 if (String::Equals(type_name, factory->number_string())) { 5547 if (String::Equals(type_name, factory->number_string())) {
5488 __ JumpIfSmi(input, true_label); 5548 __ JumpIfSmi(input, true_label);
5489 __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset)); 5549 __ ld(input, FieldMemOperand(input, HeapObject::kMapOffset));
5490 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); 5550 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
5491 *cmp1 = input; 5551 *cmp1 = input;
5492 *cmp2 = Operand(at); 5552 *cmp2 = Operand(at);
5493 final_branch_condition = eq; 5553 final_branch_condition = eq;
5494 5554
5495 } else if (String::Equals(type_name, factory->string_string())) { 5555 } else if (String::Equals(type_name, factory->string_string())) {
5496 __ JumpIfSmi(input, false_label); 5556 __ JumpIfSmi(input, false_label);
5497 __ GetObjectType(input, input, scratch); 5557 __ GetObjectType(input, input, scratch);
5498 __ Branch(USE_DELAY_SLOT, false_label, 5558 __ Branch(USE_DELAY_SLOT, false_label,
5499 ge, scratch, Operand(FIRST_NONSTRING_TYPE)); 5559 ge, scratch, Operand(FIRST_NONSTRING_TYPE));
(...skipping 27 matching lines...) Expand all
5527 *cmp2 = Operand(input); 5587 *cmp2 = Operand(input);
5528 final_branch_condition = eq; 5588 final_branch_condition = eq;
5529 5589
5530 } else if (String::Equals(type_name, factory->undefined_string())) { 5590 } else if (String::Equals(type_name, factory->undefined_string())) {
5531 __ LoadRoot(at, Heap::kUndefinedValueRootIndex); 5591 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5532 __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input)); 5592 __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
5533 // The first instruction of JumpIfSmi is an And - it is safe in the delay 5593 // The first instruction of JumpIfSmi is an And - it is safe in the delay
5534 // slot. 5594 // slot.
5535 __ JumpIfSmi(input, false_label); 5595 __ JumpIfSmi(input, false_label);
5536 // Check for undetectable objects => true. 5596 // Check for undetectable objects => true.
5537 __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset)); 5597 __ ld(input, FieldMemOperand(input, HeapObject::kMapOffset));
5538 __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset)); 5598 __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
5539 __ And(at, at, 1 << Map::kIsUndetectable); 5599 __ And(at, at, 1 << Map::kIsUndetectable);
5540 *cmp1 = at; 5600 *cmp1 = at;
5541 *cmp2 = Operand(zero_reg); 5601 *cmp2 = Operand(zero_reg);
5542 final_branch_condition = ne; 5602 final_branch_condition = ne;
5543 5603
5544 } else if (String::Equals(type_name, factory->function_string())) { 5604 } else if (String::Equals(type_name, factory->function_string())) {
5545 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); 5605 STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
5546 __ JumpIfSmi(input, false_label); 5606 __ JumpIfSmi(input, false_label);
5547 __ GetObjectType(input, scratch, input); 5607 __ GetObjectType(input, scratch, input);
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
5586 EmitIsConstructCall(temp1, scratch0()); 5646 EmitIsConstructCall(temp1, scratch0());
5587 5647
5588 EmitBranch(instr, eq, temp1, 5648 EmitBranch(instr, eq, temp1,
5589 Operand(Smi::FromInt(StackFrame::CONSTRUCT))); 5649 Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
5590 } 5650 }
5591 5651
5592 5652
5593 void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) { 5653 void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
5594 ASSERT(!temp1.is(temp2)); 5654 ASSERT(!temp1.is(temp2));
5595 // Get the frame pointer for the calling frame. 5655 // Get the frame pointer for the calling frame.
5596 __ lw(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); 5656 __ ld(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
5597 5657
5598 // Skip the arguments adaptor frame if it exists. 5658 // Skip the arguments adaptor frame if it exists.
5599 Label check_frame_marker; 5659 Label check_frame_marker;
5600 __ lw(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset)); 5660 __ ld(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
5601 __ Branch(&check_frame_marker, ne, temp2, 5661 __ Branch(&check_frame_marker, ne, temp2,
5602 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); 5662 Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
5603 __ lw(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset)); 5663 __ ld(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
5604 5664
5605 // Check the marker in the calling frame. 5665 // Check the marker in the calling frame.
5606 __ bind(&check_frame_marker); 5666 __ bind(&check_frame_marker);
5607 __ lw(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset)); 5667 __ ld(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
5608 } 5668 }
5609 5669
5610 5670
5611 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) { 5671 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
5612 if (!info()->IsStub()) { 5672 if (!info()->IsStub()) {
5613 // Ensure that we have enough space after the previous lazy-bailout 5673 // Ensure that we have enough space after the previous lazy-bailout
5614 // instruction for patching the code here. 5674 // instruction for patching the code here.
5615 int current_pc = masm()->pc_offset(); 5675 int current_pc = masm()->pc_offset();
5616 if (current_pc < last_lazy_deopt_pc_ + space_needed) { 5676 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
5617 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc; 5677 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
(...skipping 114 matching lines...) Expand 10 before | Expand all | Expand 10 after
5732 GenerateOsrPrologue(); 5792 GenerateOsrPrologue();
5733 } 5793 }
5734 5794
5735 5795
5736 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { 5796 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5737 Register result = ToRegister(instr->result()); 5797 Register result = ToRegister(instr->result());
5738 Register object = ToRegister(instr->object()); 5798 Register object = ToRegister(instr->object());
5739 __ LoadRoot(at, Heap::kUndefinedValueRootIndex); 5799 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5740 DeoptimizeIf(eq, instr->environment(), object, Operand(at)); 5800 DeoptimizeIf(eq, instr->environment(), object, Operand(at));
5741 5801
5742 Register null_value = t1; 5802 Register null_value = a5;
5743 __ LoadRoot(null_value, Heap::kNullValueRootIndex); 5803 __ LoadRoot(null_value, Heap::kNullValueRootIndex);
5744 DeoptimizeIf(eq, instr->environment(), object, Operand(null_value)); 5804 DeoptimizeIf(eq, instr->environment(), object, Operand(null_value));
5745 5805
5746 __ And(at, object, kSmiTagMask); 5806 __ And(at, object, kSmiTagMask);
5747 DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg)); 5807 DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
5748 5808
5749 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE); 5809 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
5750 __ GetObjectType(object, a1, a1); 5810 __ GetObjectType(object, a1, a1);
5751 DeoptimizeIf(le, instr->environment(), a1, Operand(LAST_JS_PROXY_TYPE)); 5811 DeoptimizeIf(le, instr->environment(), a1, Operand(LAST_JS_PROXY_TYPE));
5752 5812
5753 Label use_cache, call_runtime; 5813 Label use_cache, call_runtime;
5754 ASSERT(object.is(a0)); 5814 ASSERT(object.is(a0));
5755 __ CheckEnumCache(null_value, &call_runtime); 5815 __ CheckEnumCache(null_value, &call_runtime);
5756 5816
5757 __ lw(result, FieldMemOperand(object, HeapObject::kMapOffset)); 5817 __ ld(result, FieldMemOperand(object, HeapObject::kMapOffset));
5758 __ Branch(&use_cache); 5818 __ Branch(&use_cache);
5759 5819
5760 // Get the set of properties to enumerate. 5820 // Get the set of properties to enumerate.
5761 __ bind(&call_runtime); 5821 __ bind(&call_runtime);
5762 __ push(object); 5822 __ push(object);
5763 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr); 5823 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
5764 5824
5765 __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset)); 5825 __ ld(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
5766 ASSERT(result.is(v0)); 5826 ASSERT(result.is(v0));
5767 __ LoadRoot(at, Heap::kMetaMapRootIndex); 5827 __ LoadRoot(at, Heap::kMetaMapRootIndex);
5768 DeoptimizeIf(ne, instr->environment(), a1, Operand(at)); 5828 DeoptimizeIf(ne, instr->environment(), a1, Operand(at));
5769 __ bind(&use_cache); 5829 __ bind(&use_cache);
5770 } 5830 }
5771 5831
5772 5832
5773 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) { 5833 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5774 Register map = ToRegister(instr->map()); 5834 Register map = ToRegister(instr->map());
5775 Register result = ToRegister(instr->result()); 5835 Register result = ToRegister(instr->result());
5776 Label load_cache, done; 5836 Label load_cache, done;
5777 __ EnumLength(result, map); 5837 __ EnumLength(result, map);
5778 __ Branch(&load_cache, ne, result, Operand(Smi::FromInt(0))); 5838 __ Branch(&load_cache, ne, result, Operand(Smi::FromInt(0)));
5779 __ li(result, Operand(isolate()->factory()->empty_fixed_array())); 5839 __ li(result, Operand(isolate()->factory()->empty_fixed_array()));
5780 __ jmp(&done); 5840 __ jmp(&done);
5781 5841
5782 __ bind(&load_cache); 5842 __ bind(&load_cache);
5783 __ LoadInstanceDescriptors(map, result); 5843 __ LoadInstanceDescriptors(map, result);
5784 __ lw(result, 5844 __ ld(result,
5785 FieldMemOperand(result, DescriptorArray::kEnumCacheOffset)); 5845 FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
5786 __ lw(result, 5846 __ ld(result,
5787 FieldMemOperand(result, FixedArray::SizeFor(instr->idx()))); 5847 FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
5788 DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg)); 5848 DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
5789 5849
5790 __ bind(&done); 5850 __ bind(&done);
5791 } 5851 }
5792 5852
5793 5853
5794 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { 5854 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5795 Register object = ToRegister(instr->value()); 5855 Register object = ToRegister(instr->value());
5796 Register map = ToRegister(instr->map()); 5856 Register map = ToRegister(instr->map());
5797 __ lw(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset)); 5857 __ ld(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
5798 DeoptimizeIf(ne, instr->environment(), map, Operand(scratch0())); 5858 DeoptimizeIf(ne, instr->environment(), map, Operand(scratch0()));
5799 } 5859 }
5800 5860
5801 5861
5802 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, 5862 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
5803 Register result, 5863 Register result,
5804 Register object, 5864 Register object,
5805 Register index) { 5865 Register index) {
5806 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); 5866 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
5807 __ Push(object, index); 5867 __ Push(object, index);
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
5844 Register scratch = scratch0(); 5904 Register scratch = scratch0();
5845 5905
5846 DeferredLoadMutableDouble* deferred; 5906 DeferredLoadMutableDouble* deferred;
5847 deferred = new(zone()) DeferredLoadMutableDouble( 5907 deferred = new(zone()) DeferredLoadMutableDouble(
5848 this, instr, result, object, index); 5908 this, instr, result, object, index);
5849 5909
5850 Label out_of_object, done; 5910 Label out_of_object, done;
5851 5911
5852 __ And(scratch, index, Operand(Smi::FromInt(1))); 5912 __ And(scratch, index, Operand(Smi::FromInt(1)));
5853 __ Branch(deferred->entry(), ne, scratch, Operand(zero_reg)); 5913 __ Branch(deferred->entry(), ne, scratch, Operand(zero_reg));
5854 __ sra(index, index, 1); 5914 __ dsra(index, index, 1);
5855 5915
5856 __ Branch(USE_DELAY_SLOT, &out_of_object, lt, index, Operand(zero_reg)); 5916 __ Branch(USE_DELAY_SLOT, &out_of_object, lt, index, Operand(zero_reg));
5857 __ sll(scratch, index, kPointerSizeLog2 - kSmiTagSize); // In delay slot. 5917 __ SmiScale(scratch, index, kPointerSizeLog2); // In delay slot.
5858 5918 __ Daddu(scratch, object, scratch);
5859 STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize); 5919 __ ld(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
5860 __ Addu(scratch, object, scratch);
5861 __ lw(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
5862 5920
5863 __ Branch(&done); 5921 __ Branch(&done);
5864 5922
5865 __ bind(&out_of_object); 5923 __ bind(&out_of_object);
5866 __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); 5924 __ ld(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
5867 // Index is equal to negated out of object property index plus 1. 5925 // Index is equal to negated out of object property index plus 1.
5868 __ Subu(scratch, result, scratch); 5926 __ Dsubu(scratch, result, scratch);
5869 __ lw(result, FieldMemOperand(scratch, 5927 __ ld(result, FieldMemOperand(scratch,
5870 FixedArray::kHeaderSize - kPointerSize)); 5928 FixedArray::kHeaderSize - kPointerSize));
5871 __ bind(deferred->exit()); 5929 __ bind(deferred->exit());
5872 __ bind(&done); 5930 __ bind(&done);
5873 } 5931 }
5874 5932
5875 5933
5876 void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) { 5934 void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
5877 Register context = ToRegister(instr->context()); 5935 Register context = ToRegister(instr->context());
5878 __ sw(context, MemOperand(fp, StandardFrameConstants::kContextOffset)); 5936 __ sd(context, MemOperand(fp, StandardFrameConstants::kContextOffset));
5879 } 5937 }
5880 5938
5881 5939
5882 void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) { 5940 void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
5883 Handle<ScopeInfo> scope_info = instr->scope_info(); 5941 Handle<ScopeInfo> scope_info = instr->scope_info();
5884 __ li(at, scope_info); 5942 __ li(at, scope_info);
5885 __ Push(at, ToRegister(instr->function())); 5943 __ Push(at, ToRegister(instr->function()));
5886 CallRuntime(Runtime::kPushBlockContext, 2, instr); 5944 CallRuntime(Runtime::kPushBlockContext, 2, instr);
5887 RecordSafepoint(Safepoint::kNoLazyDeopt); 5945 RecordSafepoint(Safepoint::kNoLazyDeopt);
5888 } 5946 }
5889 5947
5890 5948
5891 #undef __ 5949 #undef __
5892 5950
5893 } } // namespace v8::internal 5951 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/mips64/lithium-codegen-mips64.h ('k') | src/mips64/lithium-gap-resolver-mips64.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698