Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(568)

Side by Side Diff: src/mips/lithium-codegen-mips.cc

Issue 148593004: A64: Synchronize with r18084. (Closed) Base URL: https://v8.googlecode.com/svn/branches/experimental/a64
Patch Set: Created 6 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/mips/lithium-codegen-mips.h ('k') | src/mips/lithium-mips.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after
91 info()->CommitDependencies(code); 91 info()->CommitDependencies(code);
92 } 92 }
93 93
94 94
95 void LChunkBuilder::Abort(BailoutReason reason) { 95 void LChunkBuilder::Abort(BailoutReason reason) {
96 info()->set_bailout_reason(reason); 96 info()->set_bailout_reason(reason);
97 status_ = ABORTED; 97 status_ = ABORTED;
98 } 98 }
99 99
100 100
101 void LCodeGen::SaveCallerDoubles() {
102 ASSERT(info()->saves_caller_doubles());
103 ASSERT(NeedsEagerFrame());
104 Comment(";;; Save clobbered callee double registers");
105 int count = 0;
106 BitVector* doubles = chunk()->allocated_double_registers();
107 BitVector::Iterator save_iterator(doubles);
108 while (!save_iterator.Done()) {
109 __ sdc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
110 MemOperand(sp, count * kDoubleSize));
111 save_iterator.Advance();
112 count++;
113 }
114 }
115
116
117 void LCodeGen::RestoreCallerDoubles() {
118 ASSERT(info()->saves_caller_doubles());
119 ASSERT(NeedsEagerFrame());
120 Comment(";;; Restore clobbered callee double registers");
121 BitVector* doubles = chunk()->allocated_double_registers();
122 BitVector::Iterator save_iterator(doubles);
123 int count = 0;
124 while (!save_iterator.Done()) {
125 __ ldc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
126 MemOperand(sp, count * kDoubleSize));
127 save_iterator.Advance();
128 count++;
129 }
130 }
131
132
101 bool LCodeGen::GeneratePrologue() { 133 bool LCodeGen::GeneratePrologue() {
102 ASSERT(is_generating()); 134 ASSERT(is_generating());
103 135
104 if (info()->IsOptimizing()) { 136 if (info()->IsOptimizing()) {
105 ProfileEntryHookStub::MaybeCallEntryHook(masm_); 137 ProfileEntryHookStub::MaybeCallEntryHook(masm_);
106 138
107 #ifdef DEBUG 139 #ifdef DEBUG
108 if (strlen(FLAG_stop_at) > 0 && 140 if (strlen(FLAG_stop_at) > 0 &&
109 info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) { 141 info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
110 __ stop("stop_at"); 142 __ stop("stop_at");
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after
153 __ sw(a1, MemOperand(a0, 2 * kPointerSize)); 185 __ sw(a1, MemOperand(a0, 2 * kPointerSize));
154 __ Branch(&loop, ne, a0, Operand(sp)); 186 __ Branch(&loop, ne, a0, Operand(sp));
155 __ pop(a1); 187 __ pop(a1);
156 __ pop(a0); 188 __ pop(a0);
157 } else { 189 } else {
158 __ Subu(sp, sp, Operand(slots * kPointerSize)); 190 __ Subu(sp, sp, Operand(slots * kPointerSize));
159 } 191 }
160 } 192 }
161 193
162 if (info()->saves_caller_doubles()) { 194 if (info()->saves_caller_doubles()) {
163 Comment(";;; Save clobbered callee double registers"); 195 SaveCallerDoubles();
164 int count = 0;
165 BitVector* doubles = chunk()->allocated_double_registers();
166 BitVector::Iterator save_iterator(doubles);
167 while (!save_iterator.Done()) {
168 __ sdc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
169 MemOperand(sp, count * kDoubleSize));
170 save_iterator.Advance();
171 count++;
172 }
173 } 196 }
174 197
175 // Possibly allocate a local context. 198 // Possibly allocate a local context.
176 int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; 199 int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
177 if (heap_slots > 0) { 200 if (heap_slots > 0) {
178 Comment(";;; Allocate local context"); 201 Comment(";;; Allocate local context");
179 // Argument to NewContext is the function, which is in a1. 202 // Argument to NewContext is the function, which is in a1.
180 __ push(a1); 203 __ push(a1);
181 if (heap_slots <= FastNewContextStub::kMaximumSlots) { 204 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
182 FastNewContextStub stub(heap_slots); 205 FastNewContextStub stub(heap_slots);
(...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after
250 code->instr()->Mnemonic()); 273 code->instr()->Mnemonic());
251 __ bind(code->entry()); 274 __ bind(code->entry());
252 if (NeedsDeferredFrame()) { 275 if (NeedsDeferredFrame()) {
253 Comment(";;; Build frame"); 276 Comment(";;; Build frame");
254 ASSERT(!frame_is_built_); 277 ASSERT(!frame_is_built_);
255 ASSERT(info()->IsStub()); 278 ASSERT(info()->IsStub());
256 frame_is_built_ = true; 279 frame_is_built_ = true;
257 __ MultiPush(cp.bit() | fp.bit() | ra.bit()); 280 __ MultiPush(cp.bit() | fp.bit() | ra.bit());
258 __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB))); 281 __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
259 __ push(scratch0()); 282 __ push(scratch0());
260 __ Addu(fp, sp, Operand(2 * kPointerSize)); 283 __ Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
261 Comment(";;; Deferred code"); 284 Comment(";;; Deferred code");
262 } 285 }
263 code->Generate(); 286 code->Generate();
264 if (NeedsDeferredFrame()) { 287 if (NeedsDeferredFrame()) {
265 Comment(";;; Destroy frame"); 288 Comment(";;; Destroy frame");
266 ASSERT(frame_is_built_); 289 ASSERT(frame_is_built_);
267 __ pop(at); 290 __ pop(at);
268 __ MultiPop(cp.bit() | fp.bit() | ra.bit()); 291 __ MultiPop(cp.bit() | fp.bit() | ra.bit());
269 frame_is_built_ = false; 292 frame_is_built_ = false;
270 } 293 }
(...skipping 20 matching lines...) Expand all
291 Address entry = deopt_jump_table_[i].address; 314 Address entry = deopt_jump_table_[i].address;
292 Deoptimizer::BailoutType type = deopt_jump_table_[i].bailout_type; 315 Deoptimizer::BailoutType type = deopt_jump_table_[i].bailout_type;
293 int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type); 316 int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
294 if (id == Deoptimizer::kNotDeoptimizationEntry) { 317 if (id == Deoptimizer::kNotDeoptimizationEntry) {
295 Comment(";;; jump table entry %d.", i); 318 Comment(";;; jump table entry %d.", i);
296 } else { 319 } else {
297 Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id); 320 Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
298 } 321 }
299 __ li(t9, Operand(ExternalReference::ForDeoptEntry(entry))); 322 __ li(t9, Operand(ExternalReference::ForDeoptEntry(entry)));
300 if (deopt_jump_table_[i].needs_frame) { 323 if (deopt_jump_table_[i].needs_frame) {
324 ASSERT(!info()->saves_caller_doubles());
301 if (needs_frame.is_bound()) { 325 if (needs_frame.is_bound()) {
302 __ Branch(&needs_frame); 326 __ Branch(&needs_frame);
303 } else { 327 } else {
304 __ bind(&needs_frame); 328 __ bind(&needs_frame);
305 __ MultiPush(cp.bit() | fp.bit() | ra.bit()); 329 __ MultiPush(cp.bit() | fp.bit() | ra.bit());
306 // This variant of deopt can only be used with stubs. Since we don't 330 // This variant of deopt can only be used with stubs. Since we don't
307 // have a function pointer to install in the stack frame that we're 331 // have a function pointer to install in the stack frame that we're
308 // building, install a special marker there instead. 332 // building, install a special marker there instead.
309 ASSERT(info()->IsStub()); 333 ASSERT(info()->IsStub());
310 __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB))); 334 __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
311 __ push(scratch0()); 335 __ push(scratch0());
312 __ Addu(fp, sp, Operand(2 * kPointerSize)); 336 __ Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
313 __ Call(t9); 337 __ Call(t9);
314 } 338 }
315 } else { 339 } else {
340 if (info()->saves_caller_doubles()) {
341 ASSERT(info()->IsStub());
342 RestoreCallerDoubles();
343 }
316 __ Call(t9); 344 __ Call(t9);
317 } 345 }
318 } 346 }
319 __ RecordComment("]"); 347 __ RecordComment("]");
320 348
321 // The deoptimization jump table is the last part of the instruction 349 // The deoptimization jump table is the last part of the instruction
322 // sequence. Mark the generated code as done unless we bailed out. 350 // sequence. Mark the generated code as done unless we bailed out.
323 if (!is_aborted()) status_ = DONE; 351 if (!is_aborted()) status_ = DONE;
324 return !is_aborted(); 352 return !is_aborted();
325 } 353 }
(...skipping 453 matching lines...) Expand 10 before | Expand all | Expand 10 after
779 if (info()->ShouldTrapOnDeopt()) { 807 if (info()->ShouldTrapOnDeopt()) {
780 Label skip; 808 Label skip;
781 if (condition != al) { 809 if (condition != al) {
782 __ Branch(&skip, NegateCondition(condition), src1, src2); 810 __ Branch(&skip, NegateCondition(condition), src1, src2);
783 } 811 }
784 __ stop("trap_on_deopt"); 812 __ stop("trap_on_deopt");
785 __ bind(&skip); 813 __ bind(&skip);
786 } 814 }
787 815
788 ASSERT(info()->IsStub() || frame_is_built_); 816 ASSERT(info()->IsStub() || frame_is_built_);
789 if (condition == al && frame_is_built_) { 817 // Go through jump table if we need to handle condition, build frame, or
818 // restore caller doubles.
819 if (condition == al && frame_is_built_ &&
820 !info()->saves_caller_doubles()) {
790 __ Call(entry, RelocInfo::RUNTIME_ENTRY, condition, src1, src2); 821 __ Call(entry, RelocInfo::RUNTIME_ENTRY, condition, src1, src2);
791 } else { 822 } else {
792 // We often have several deopts to the same entry, reuse the last 823 // We often have several deopts to the same entry, reuse the last
793 // jump entry if this is the case. 824 // jump entry if this is the case.
794 if (deopt_jump_table_.is_empty() || 825 if (deopt_jump_table_.is_empty() ||
795 (deopt_jump_table_.last().address != entry) || 826 (deopt_jump_table_.last().address != entry) ||
796 (deopt_jump_table_.last().bailout_type != bailout_type) || 827 (deopt_jump_table_.last().bailout_type != bailout_type) ||
797 (deopt_jump_table_.last().needs_frame != !frame_is_built_)) { 828 (deopt_jump_table_.last().needs_frame != !frame_is_built_)) {
798 Deoptimizer::JumpTableEntry table_entry(entry, 829 Deoptimizer::JumpTableEntry table_entry(entry,
799 bailout_type, 830 bailout_type,
(...skipping 284 matching lines...) Expand 10 before | Expand all | Expand 10 after
1084 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { 1115 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1085 DeoptimizeIf(eq, instr->environment(), result_reg, Operand(zero_reg)); 1116 DeoptimizeIf(eq, instr->environment(), result_reg, Operand(zero_reg));
1086 } 1117 }
1087 __ Branch(USE_DELAY_SLOT, &done); 1118 __ Branch(USE_DELAY_SLOT, &done);
1088 __ subu(result_reg, zero_reg, result_reg); 1119 __ subu(result_reg, zero_reg, result_reg);
1089 } 1120 }
1090 1121
1091 __ bind(&left_is_not_negative); 1122 __ bind(&left_is_not_negative);
1092 __ And(result_reg, left_reg, divisor - 1); 1123 __ And(result_reg, left_reg, divisor - 1);
1093 __ bind(&done); 1124 __ bind(&done);
1094
1095 } else if (hmod->fixed_right_arg().has_value) {
1096 const Register left_reg = ToRegister(instr->left());
1097 const Register result_reg = ToRegister(instr->result());
1098 const Register right_reg = ToRegister(instr->right());
1099
1100 int32_t divisor = hmod->fixed_right_arg().value;
1101 ASSERT(IsPowerOf2(divisor));
1102
1103 // Check if our assumption of a fixed right operand still holds.
1104 DeoptimizeIf(ne, instr->environment(), right_reg, Operand(divisor));
1105
1106 Label left_is_not_negative, done;
1107 if (left->CanBeNegative()) {
1108 __ Branch(left_reg.is(result_reg) ? PROTECT : USE_DELAY_SLOT,
1109 &left_is_not_negative, ge, left_reg, Operand(zero_reg));
1110 __ subu(result_reg, zero_reg, left_reg);
1111 __ And(result_reg, result_reg, divisor - 1);
1112 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1113 DeoptimizeIf(eq, instr->environment(), result_reg, Operand(zero_reg));
1114 }
1115 __ Branch(USE_DELAY_SLOT, &done);
1116 __ subu(result_reg, zero_reg, result_reg);
1117 }
1118
1119 __ bind(&left_is_not_negative);
1120 __ And(result_reg, left_reg, divisor - 1);
1121 __ bind(&done);
1122
1123 } else { 1125 } else {
1124 const Register scratch = scratch0(); 1126 const Register scratch = scratch0();
1125 const Register left_reg = ToRegister(instr->left()); 1127 const Register left_reg = ToRegister(instr->left());
1126 const Register result_reg = ToRegister(instr->result()); 1128 const Register result_reg = ToRegister(instr->result());
1127 1129
1128 // div runs in the background while we check for special cases. 1130 // div runs in the background while we check for special cases.
1129 Register right_reg = EmitLoadRegister(instr->right(), scratch); 1131 Register right_reg = EmitLoadRegister(instr->right(), scratch);
1130 __ div(left_reg, right_reg); 1132 __ div(left_reg, right_reg);
1131 1133
1132 Label done; 1134 Label done;
(...skipping 593 matching lines...) Expand 10 before | Expand all | Expand 10 after
1726 Register object = ToRegister(instr->date()); 1728 Register object = ToRegister(instr->date());
1727 Register result = ToRegister(instr->result()); 1729 Register result = ToRegister(instr->result());
1728 Register scratch = ToRegister(instr->temp()); 1730 Register scratch = ToRegister(instr->temp());
1729 Smi* index = instr->index(); 1731 Smi* index = instr->index();
1730 Label runtime, done; 1732 Label runtime, done;
1731 ASSERT(object.is(a0)); 1733 ASSERT(object.is(a0));
1732 ASSERT(result.is(v0)); 1734 ASSERT(result.is(v0));
1733 ASSERT(!scratch.is(scratch0())); 1735 ASSERT(!scratch.is(scratch0()));
1734 ASSERT(!scratch.is(object)); 1736 ASSERT(!scratch.is(object));
1735 1737
1736 __ And(at, object, Operand(kSmiTagMask)); 1738 __ SmiTst(object, at);
1737 DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg)); 1739 DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
1738 __ GetObjectType(object, scratch, scratch); 1740 __ GetObjectType(object, scratch, scratch);
1739 DeoptimizeIf(ne, instr->environment(), scratch, Operand(JS_DATE_TYPE)); 1741 DeoptimizeIf(ne, instr->environment(), scratch, Operand(JS_DATE_TYPE));
1740 1742
1741 if (index->value() == 0) { 1743 if (index->value() == 0) {
1742 __ lw(result, FieldMemOperand(object, JSDate::kValueOffset)); 1744 __ lw(result, FieldMemOperand(object, JSDate::kValueOffset));
1743 } else { 1745 } else {
1744 if (index->value() < JSDate::kFirstUncachedField) { 1746 if (index->value() < JSDate::kFirstUncachedField) {
1745 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate()); 1747 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
1746 __ li(scratch, Operand(stamp)); 1748 __ li(scratch, Operand(stamp));
(...skipping 390 matching lines...) Expand 10 before | Expand all | Expand 10 after
2137 __ LoadRoot(at, Heap::kNullValueRootIndex); 2139 __ LoadRoot(at, Heap::kNullValueRootIndex);
2138 __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at)); 2140 __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
2139 } 2141 }
2140 2142
2141 if (expected.Contains(ToBooleanStub::SMI)) { 2143 if (expected.Contains(ToBooleanStub::SMI)) {
2142 // Smis: 0 -> false, all other -> true. 2144 // Smis: 0 -> false, all other -> true.
2143 __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(zero_reg)); 2145 __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(zero_reg));
2144 __ JumpIfSmi(reg, instr->TrueLabel(chunk_)); 2146 __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2145 } else if (expected.NeedsMap()) { 2147 } else if (expected.NeedsMap()) {
2146 // If we need a map later and have a Smi -> deopt. 2148 // If we need a map later and have a Smi -> deopt.
2147 __ And(at, reg, Operand(kSmiTagMask)); 2149 __ SmiTst(reg, at);
2148 DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg)); 2150 DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
2149 } 2151 }
2150 2152
2151 const Register map = scratch0(); 2153 const Register map = scratch0();
2152 if (expected.NeedsMap()) { 2154 if (expected.NeedsMap()) {
2153 __ lw(map, FieldMemOperand(reg, HeapObject::kMapOffset)); 2155 __ lw(map, FieldMemOperand(reg, HeapObject::kMapOffset));
2154 if (expected.CanBeUndetectable()) { 2156 if (expected.CanBeUndetectable()) {
2155 // Undetectable -> false. 2157 // Undetectable -> false.
2156 __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset)); 2158 __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
2157 __ And(at, at, Operand(1 << Map::kIsUndetectable)); 2159 __ And(at, at, Operand(1 << Map::kIsUndetectable));
(...skipping 641 matching lines...) Expand 10 before | Expand all | Expand 10 after
2799 if (FLAG_trace && info()->IsOptimizing()) { 2801 if (FLAG_trace && info()->IsOptimizing()) {
2800 // Push the return value on the stack as the parameter. 2802 // Push the return value on the stack as the parameter.
2801 // Runtime::TraceExit returns its parameter in v0. We're leaving the code 2803 // Runtime::TraceExit returns its parameter in v0. We're leaving the code
2802 // managed by the register allocator and tearing down the frame, it's 2804 // managed by the register allocator and tearing down the frame, it's
2803 // safe to write to the context register. 2805 // safe to write to the context register.
2804 __ push(v0); 2806 __ push(v0);
2805 __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); 2807 __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2806 __ CallRuntime(Runtime::kTraceExit, 1); 2808 __ CallRuntime(Runtime::kTraceExit, 1);
2807 } 2809 }
2808 if (info()->saves_caller_doubles()) { 2810 if (info()->saves_caller_doubles()) {
2809 ASSERT(NeedsEagerFrame()); 2811 RestoreCallerDoubles();
2810 BitVector* doubles = chunk()->allocated_double_registers();
2811 BitVector::Iterator save_iterator(doubles);
2812 int count = 0;
2813 while (!save_iterator.Done()) {
2814 __ ldc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
2815 MemOperand(sp, count * kDoubleSize));
2816 save_iterator.Advance();
2817 count++;
2818 }
2819 } 2812 }
2820 int no_frame_start = -1; 2813 int no_frame_start = -1;
2821 if (NeedsEagerFrame()) { 2814 if (NeedsEagerFrame()) {
2822 __ mov(sp, fp); 2815 __ mov(sp, fp);
2823 no_frame_start = masm_->pc_offset(); 2816 no_frame_start = masm_->pc_offset();
2824 __ Pop(ra, fp); 2817 __ Pop(ra, fp);
2825 } 2818 }
2826 if (instr->has_constant_parameter_count()) { 2819 if (instr->has_constant_parameter_count()) {
2827 int parameter_count = ToInteger32(instr->constant_parameter_count()); 2820 int parameter_count = ToInteger32(instr->constant_parameter_count());
2828 int32_t sp_delta = (parameter_count + 1) * kPointerSize; 2821 int32_t sp_delta = (parameter_count + 1) * kPointerSize;
(...skipping 230 matching lines...) Expand 10 before | Expand all | Expand 10 after
3059 Register to_reg = ToRegister(instr->result()); 3052 Register to_reg = ToRegister(instr->result());
3060 Register from_reg = ToRegister(instr->object()); 3053 Register from_reg = ToRegister(instr->object());
3061 __ lw(to_reg, FieldMemOperand(from_reg, 3054 __ lw(to_reg, FieldMemOperand(from_reg,
3062 ExternalArray::kExternalPointerOffset)); 3055 ExternalArray::kExternalPointerOffset));
3063 } 3056 }
3064 3057
3065 3058
3066 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) { 3059 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
3067 Register arguments = ToRegister(instr->arguments()); 3060 Register arguments = ToRegister(instr->arguments());
3068 Register result = ToRegister(instr->result()); 3061 Register result = ToRegister(instr->result());
3069 if (instr->length()->IsConstantOperand() && 3062 // There are two words between the frame pointer and the last argument.
3070 instr->index()->IsConstantOperand()) { 3063 // Subtracting from length accounts for one of them add one more.
3064 if (instr->length()->IsConstantOperand()) {
3065 int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
3066 if (instr->index()->IsConstantOperand()) {
3067 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3068 int index = (const_length - const_index) + 1;
3069 __ lw(result, MemOperand(arguments, index * kPointerSize));
3070 } else {
3071 Register index = ToRegister(instr->index());
3072 __ li(at, Operand(const_length + 1));
3073 __ Subu(result, at, index);
3074 __ sll(at, result, kPointerSizeLog2);
3075 __ Addu(at, arguments, at);
3076 __ lw(result, MemOperand(at));
3077 }
3078 } else if (instr->index()->IsConstantOperand()) {
3079 Register length = ToRegister(instr->length());
3071 int const_index = ToInteger32(LConstantOperand::cast(instr->index())); 3080 int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
3072 int const_length = ToInteger32(LConstantOperand::cast(instr->length())); 3081 int loc = const_index - 1;
3073 int index = (const_length - const_index) + 1; 3082 if (loc != 0) {
3074 __ lw(result, MemOperand(arguments, index * kPointerSize)); 3083 __ Subu(result, length, Operand(loc));
3084 __ sll(at, result, kPointerSizeLog2);
3085 __ Addu(at, arguments, at);
3086 __ lw(result, MemOperand(at));
3087 } else {
3088 __ sll(at, length, kPointerSizeLog2);
3089 __ Addu(at, arguments, at);
3090 __ lw(result, MemOperand(at));
3091 }
3075 } else { 3092 } else {
3076 Register length = ToRegister(instr->length()); 3093 Register length = ToRegister(instr->length());
3077 Register index = ToRegister(instr->index()); 3094 Register index = ToRegister(instr->index());
3078 // There are two words between the frame pointer and the last argument. 3095 __ Subu(result, length, index);
3079 // Subtracting from length accounts for one of them, add one more. 3096 __ Addu(result, result, 1);
3080 __ subu(length, length, index); 3097 __ sll(at, result, kPointerSizeLog2);
3081 __ Addu(length, length, Operand(1)); 3098 __ Addu(at, arguments, at);
3082 __ sll(length, length, kPointerSizeLog2); 3099 __ lw(result, MemOperand(at));
3083 __ Addu(at, arguments, Operand(length));
3084 __ lw(result, MemOperand(at, 0));
3085 } 3100 }
3086 } 3101 }
3087 3102
3088 3103
3089 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) { 3104 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
3090 Register external_pointer = ToRegister(instr->elements()); 3105 Register external_pointer = ToRegister(instr->elements());
3091 Register key = no_reg; 3106 Register key = no_reg;
3092 ElementsKind elements_kind = instr->elements_kind(); 3107 ElementsKind elements_kind = instr->elements_kind();
3093 bool key_is_constant = instr->key()->IsConstantOperand(); 3108 bool key_is_constant = instr->key()->IsConstantOperand();
3094 int constant_key = 0; 3109 int constant_key = 0;
(...skipping 135 matching lines...) Expand 10 before | Expand all | Expand 10 after
3230 __ sll(scratch, key, kPointerSizeLog2); 3245 __ sll(scratch, key, kPointerSizeLog2);
3231 __ addu(scratch, elements, scratch); 3246 __ addu(scratch, elements, scratch);
3232 } 3247 }
3233 offset = FixedArray::OffsetOfElementAt(instr->additional_index()); 3248 offset = FixedArray::OffsetOfElementAt(instr->additional_index());
3234 } 3249 }
3235 __ lw(result, FieldMemOperand(store_base, offset)); 3250 __ lw(result, FieldMemOperand(store_base, offset));
3236 3251
3237 // Check for the hole value. 3252 // Check for the hole value.
3238 if (instr->hydrogen()->RequiresHoleCheck()) { 3253 if (instr->hydrogen()->RequiresHoleCheck()) {
3239 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) { 3254 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
3240 __ And(scratch, result, Operand(kSmiTagMask)); 3255 __ SmiTst(result, scratch);
3241 DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg)); 3256 DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
3242 } else { 3257 } else {
3243 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); 3258 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
3244 DeoptimizeIf(eq, instr->environment(), result, Operand(scratch)); 3259 DeoptimizeIf(eq, instr->environment(), result, Operand(scratch));
3245 } 3260 }
3246 } 3261 }
3247 } 3262 }
3248 3263
3249 3264
3250 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) { 3265 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
(...skipping 128 matching lines...) Expand 10 before | Expand all | Expand 10 after
3379 __ And(scratch, scratch, Operand(strict_mode_function_mask | native_mask)); 3394 __ And(scratch, scratch, Operand(strict_mode_function_mask | native_mask));
3380 __ Branch(&receiver_ok, ne, scratch, Operand(zero_reg)); 3395 __ Branch(&receiver_ok, ne, scratch, Operand(zero_reg));
3381 3396
3382 // Normal function. Replace undefined or null with global receiver. 3397 // Normal function. Replace undefined or null with global receiver.
3383 __ LoadRoot(scratch, Heap::kNullValueRootIndex); 3398 __ LoadRoot(scratch, Heap::kNullValueRootIndex);
3384 __ Branch(&global_object, eq, receiver, Operand(scratch)); 3399 __ Branch(&global_object, eq, receiver, Operand(scratch));
3385 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); 3400 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
3386 __ Branch(&global_object, eq, receiver, Operand(scratch)); 3401 __ Branch(&global_object, eq, receiver, Operand(scratch));
3387 3402
3388 // Deoptimize if the receiver is not a JS object. 3403 // Deoptimize if the receiver is not a JS object.
3389 __ And(scratch, receiver, Operand(kSmiTagMask)); 3404 __ SmiTst(receiver, scratch);
3390 DeoptimizeIf(eq, instr->environment(), scratch, Operand(zero_reg)); 3405 DeoptimizeIf(eq, instr->environment(), scratch, Operand(zero_reg));
3391 3406
3392 __ GetObjectType(receiver, scratch, scratch); 3407 __ GetObjectType(receiver, scratch, scratch);
3393 DeoptimizeIf(lt, instr->environment(), 3408 DeoptimizeIf(lt, instr->environment(),
3394 scratch, Operand(FIRST_SPEC_OBJECT_TYPE)); 3409 scratch, Operand(FIRST_SPEC_OBJECT_TYPE));
3395 __ Branch(&receiver_ok); 3410 __ Branch(&receiver_ok);
3396 3411
3397 __ bind(&global_object); 3412 __ bind(&global_object);
3398 __ lw(receiver, GlobalObjectOperand()); 3413 __ lw(receiver, GlobalObjectOperand());
3399 __ lw(receiver, 3414 __ lw(receiver,
(...skipping 451 matching lines...) Expand 10 before | Expand all | Expand 10 after
3851 MathPowStub stub(MathPowStub::INTEGER); 3866 MathPowStub stub(MathPowStub::INTEGER);
3852 __ CallStub(&stub); 3867 __ CallStub(&stub);
3853 } else { 3868 } else {
3854 ASSERT(exponent_type.IsDouble()); 3869 ASSERT(exponent_type.IsDouble());
3855 MathPowStub stub(MathPowStub::DOUBLE); 3870 MathPowStub stub(MathPowStub::DOUBLE);
3856 __ CallStub(&stub); 3871 __ CallStub(&stub);
3857 } 3872 }
3858 } 3873 }
3859 3874
3860 3875
3861 void LCodeGen::DoRandom(LRandom* instr) {
3862 // Assert that the register size is indeed the size of each seed.
3863 static const int kSeedSize = sizeof(uint32_t);
3864 STATIC_ASSERT(kPointerSize == kSeedSize);
3865
3866 // Load native context.
3867 Register global_object = ToRegister(instr->global_object());
3868 Register native_context = global_object;
3869 __ lw(native_context, FieldMemOperand(
3870 global_object, GlobalObject::kNativeContextOffset));
3871
3872 // Load state (FixedArray of the native context's random seeds).
3873 static const int kRandomSeedOffset =
3874 FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
3875 Register state = native_context;
3876 __ lw(state, FieldMemOperand(native_context, kRandomSeedOffset));
3877
3878 // Load state[0].
3879 Register state0 = ToRegister(instr->scratch());
3880 __ lw(state0, FieldMemOperand(state, ByteArray::kHeaderSize));
3881 // Load state[1].
3882 Register state1 = ToRegister(instr->scratch2());
3883 __ lw(state1, FieldMemOperand(state, ByteArray::kHeaderSize + kSeedSize));
3884
3885 // state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
3886 Register scratch3 = ToRegister(instr->scratch3());
3887 Register scratch4 = scratch0();
3888 __ And(scratch3, state0, Operand(0xFFFF));
3889 __ li(scratch4, Operand(18273));
3890 __ Mul(scratch3, scratch3, scratch4);
3891 __ srl(state0, state0, 16);
3892 __ Addu(state0, scratch3, state0);
3893 // Save state[0].
3894 __ sw(state0, FieldMemOperand(state, ByteArray::kHeaderSize));
3895
3896 // state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
3897 __ And(scratch3, state1, Operand(0xFFFF));
3898 __ li(scratch4, Operand(36969));
3899 __ Mul(scratch3, scratch3, scratch4);
3900 __ srl(state1, state1, 16),
3901 __ Addu(state1, scratch3, state1);
3902 // Save state[1].
3903 __ sw(state1, FieldMemOperand(state, ByteArray::kHeaderSize + kSeedSize));
3904
3905 // Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
3906 Register random = scratch4;
3907 __ And(random, state1, Operand(0x3FFFF));
3908 __ sll(state0, state0, 14);
3909 __ Addu(random, random, state0);
3910
3911 // 0x41300000 is the top half of 1.0 x 2^20 as a double.
3912 __ li(scratch3, Operand(0x41300000));
3913 // Move 0x41300000xxxxxxxx (x = random bits in v0) to FPU.
3914 DoubleRegister result = ToDoubleRegister(instr->result());
3915 __ Move(result, random, scratch3);
3916 // Move 0x4130000000000000 to FPU.
3917 DoubleRegister scratch5 = double_scratch0();
3918 __ Move(scratch5, zero_reg, scratch3);
3919 __ sub_d(result, result, scratch5);
3920 }
3921
3922
3923 void LCodeGen::DoMathExp(LMathExp* instr) { 3876 void LCodeGen::DoMathExp(LMathExp* instr) {
3924 DoubleRegister input = ToDoubleRegister(instr->value()); 3877 DoubleRegister input = ToDoubleRegister(instr->value());
3925 DoubleRegister result = ToDoubleRegister(instr->result()); 3878 DoubleRegister result = ToDoubleRegister(instr->result());
3926 DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp()); 3879 DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
3927 DoubleRegister double_scratch2 = double_scratch0(); 3880 DoubleRegister double_scratch2 = double_scratch0();
3928 Register temp1 = ToRegister(instr->temp1()); 3881 Register temp1 = ToRegister(instr->temp1());
3929 Register temp2 = ToRegister(instr->temp2()); 3882 Register temp2 = ToRegister(instr->temp2());
3930 3883
3931 MathExpGenerator::EmitMathExp( 3884 MathExpGenerator::EmitMathExp(
3932 masm(), input, result, double_scratch1, double_scratch2, 3885 masm(), input, result, double_scratch1, double_scratch2,
(...skipping 224 matching lines...) Expand 10 before | Expand all | Expand 10 after
4157 MemOperand operand = MemOperand(object, offset); 4110 MemOperand operand = MemOperand(object, offset);
4158 __ Store(value, operand, representation); 4111 __ Store(value, operand, representation);
4159 return; 4112 return;
4160 } 4113 }
4161 4114
4162 Handle<Map> transition = instr->transition(); 4115 Handle<Map> transition = instr->transition();
4163 4116
4164 if (FLAG_track_heap_object_fields && representation.IsHeapObject()) { 4117 if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
4165 Register value = ToRegister(instr->value()); 4118 Register value = ToRegister(instr->value());
4166 if (!instr->hydrogen()->value()->type().IsHeapObject()) { 4119 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
4167 __ And(scratch, value, Operand(kSmiTagMask)); 4120 __ SmiTst(value, scratch);
4168 DeoptimizeIf(eq, instr->environment(), scratch, Operand(zero_reg)); 4121 DeoptimizeIf(eq, instr->environment(), scratch, Operand(zero_reg));
4169 } 4122 }
4170 } else if (FLAG_track_double_fields && representation.IsDouble()) { 4123 } else if (FLAG_track_double_fields && representation.IsDouble()) {
4171 ASSERT(transition.is_null()); 4124 ASSERT(transition.is_null());
4172 ASSERT(access.IsInobject()); 4125 ASSERT(access.IsInobject());
4173 ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); 4126 ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
4174 DoubleRegister value = ToDoubleRegister(instr->value()); 4127 DoubleRegister value = ToDoubleRegister(instr->value());
4175 __ sdc1(value, FieldMemOperand(object, offset)); 4128 __ sdc1(value, FieldMemOperand(object, offset));
4176 return; 4129 return;
4177 } 4130 }
(...skipping 974 matching lines...) Expand 10 before | Expand all | Expand 10 after
5152 __ bind(&done); 5105 __ bind(&done);
5153 } 5106 }
5154 } 5107 }
5155 __ SmiTagCheckOverflow(result_reg, result_reg, scratch1); 5108 __ SmiTagCheckOverflow(result_reg, result_reg, scratch1);
5156 DeoptimizeIf(lt, instr->environment(), scratch1, Operand(zero_reg)); 5109 DeoptimizeIf(lt, instr->environment(), scratch1, Operand(zero_reg));
5157 } 5110 }
5158 5111
5159 5112
5160 void LCodeGen::DoCheckSmi(LCheckSmi* instr) { 5113 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
5161 LOperand* input = instr->value(); 5114 LOperand* input = instr->value();
5162 __ And(at, ToRegister(input), Operand(kSmiTagMask)); 5115 __ SmiTst(ToRegister(input), at);
5163 DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg)); 5116 DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
5164 } 5117 }
5165 5118
5166 5119
5167 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { 5120 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
5168 if (!instr->hydrogen()->value()->IsHeapObject()) { 5121 if (!instr->hydrogen()->value()->IsHeapObject()) {
5169 LOperand* input = instr->value(); 5122 LOperand* input = instr->value();
5170 __ And(at, ToRegister(input), Operand(kSmiTagMask)); 5123 __ SmiTst(ToRegister(input), at);
5171 DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg)); 5124 DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
5172 } 5125 }
5173 } 5126 }
5174 5127
5175 5128
5176 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { 5129 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
5177 Register input = ToRegister(instr->value()); 5130 Register input = ToRegister(instr->value());
5178 Register scratch = scratch0(); 5131 Register scratch = scratch0();
5179 5132
5180 __ GetObjectType(input, scratch, scratch); 5133 __ GetObjectType(input, scratch, scratch);
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after
5233 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { 5186 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
5234 { 5187 {
5235 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); 5188 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
5236 __ push(object); 5189 __ push(object);
5237 __ mov(cp, zero_reg); 5190 __ mov(cp, zero_reg);
5238 __ CallRuntimeSaveDoubles(Runtime::kMigrateInstance); 5191 __ CallRuntimeSaveDoubles(Runtime::kMigrateInstance);
5239 RecordSafepointWithRegisters( 5192 RecordSafepointWithRegisters(
5240 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt); 5193 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
5241 __ StoreToSafepointRegisterSlot(v0, scratch0()); 5194 __ StoreToSafepointRegisterSlot(v0, scratch0());
5242 } 5195 }
5243 __ And(at, scratch0(), Operand(kSmiTagMask)); 5196 __ SmiTst(scratch0(), at);
5244 DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg)); 5197 DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
5245 } 5198 }
5246 5199
5247 5200
5248 void LCodeGen::DoCheckMaps(LCheckMaps* instr) { 5201 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
5249 class DeferredCheckMaps V8_FINAL : public LDeferredCode { 5202 class DeferredCheckMaps V8_FINAL : public LDeferredCode {
5250 public: 5203 public:
5251 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object) 5204 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
5252 : LDeferredCode(codegen), instr_(instr), object_(object) { 5205 : LDeferredCode(codegen), instr_(instr), object_(object) {
5253 SetExit(check_maps()); 5206 SetExit(check_maps());
(...skipping 660 matching lines...) Expand 10 before | Expand all | Expand 10 after
5914 __ Subu(scratch, result, scratch); 5867 __ Subu(scratch, result, scratch);
5915 __ lw(result, FieldMemOperand(scratch, 5868 __ lw(result, FieldMemOperand(scratch,
5916 FixedArray::kHeaderSize - kPointerSize)); 5869 FixedArray::kHeaderSize - kPointerSize));
5917 __ bind(&done); 5870 __ bind(&done);
5918 } 5871 }
5919 5872
5920 5873
5921 #undef __ 5874 #undef __
5922 5875
5923 } } // namespace v8::internal 5876 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/mips/lithium-codegen-mips.h ('k') | src/mips/lithium-mips.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698