Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(149)

Side by Side Diff: src/arm/lithium-codegen-arm.cc

Issue 10701054: Enable stub generation using Hydrogen/Lithium (again) (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Review feedback Created 8 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after
58 Safepoint::DeoptMode deopt_mode_; 58 Safepoint::DeoptMode deopt_mode_;
59 }; 59 };
60 60
61 61
62 #define __ masm()-> 62 #define __ masm()->
63 63
64 bool LCodeGen::GenerateCode() { 64 bool LCodeGen::GenerateCode() {
65 HPhase phase("Z_Code generation", chunk()); 65 HPhase phase("Z_Code generation", chunk());
66 ASSERT(is_unused()); 66 ASSERT(is_unused());
67 status_ = GENERATING; 67 status_ = GENERATING;
68 CpuFeatures::Scope scope1(VFP3);
69 CpuFeatures::Scope scope2(ARMv7);
70 68
71 CodeStub::GenerateFPStubs(); 69 CodeStub::GenerateFPStubs();
72 70
73 // Open a frame scope to indicate that there is a frame on the stack. The 71 // Open a frame scope to indicate that there is a frame on the stack. The
74 // NONE indicates that the scope shouldn't actually generate code to set up 72 // NONE indicates that the scope shouldn't actually generate code to set up
75 // the frame (that is done in GeneratePrologue). 73 // the frame (that is done in GeneratePrologue).
76 FrameScope frame_scope(masm_, StackFrame::NONE); 74 FrameScope frame_scope(masm_, StackFrame::NONE);
77 75
78 return GeneratePrologue() && 76 return GeneratePrologue() &&
79 GenerateBody() && 77 GenerateBody() &&
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
111 size_t length = builder.position(); 109 size_t length = builder.position();
112 Vector<char> copy = Vector<char>::New(length + 1); 110 Vector<char> copy = Vector<char>::New(length + 1);
113 memcpy(copy.start(), builder.Finalize(), copy.length()); 111 memcpy(copy.start(), builder.Finalize(), copy.length());
114 masm()->RecordComment(copy.start()); 112 masm()->RecordComment(copy.start());
115 } 113 }
116 114
117 115
118 bool LCodeGen::GeneratePrologue() { 116 bool LCodeGen::GeneratePrologue() {
119 ASSERT(is_generating()); 117 ASSERT(is_generating());
120 118
121 ProfileEntryHookStub::MaybeCallEntryHook(masm_); 119 if (info()->IsOptimizing()) {
120 ProfileEntryHookStub::MaybeCallEntryHook(masm_);
122 121
123 #ifdef DEBUG 122 #ifdef DEBUG
124 if (strlen(FLAG_stop_at) > 0 && 123 if (strlen(FLAG_stop_at) > 0 &&
125 info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) { 124 info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
126 __ stop("stop_at"); 125 __ stop("stop_at");
127 } 126 }
128 #endif 127 #endif
129 128
130 // r1: Callee's JS function. 129 // r1: Callee's JS function.
131 // cp: Callee's context. 130 // cp: Callee's context.
132 // fp: Caller's frame pointer. 131 // fp: Caller's frame pointer.
133 // lr: Caller's pc. 132 // lr: Caller's pc.
134 133
135 // Strict mode functions and builtins need to replace the receiver 134 // Strict mode functions and builtins need to replace the receiver
136 // with undefined when called as functions (without an explicit 135 // with undefined when called as functions (without an explicit
137 // receiver object). r5 is zero for method calls and non-zero for 136 // receiver object). r5 is zero for method calls and non-zero for
138 // function calls. 137 // function calls.
139 if (!info_->is_classic_mode() || info_->is_native()) { 138 if (!info_->is_classic_mode() || info_->is_native()) {
140 Label ok; 139 Label ok;
141 Label begin; 140 Label begin;
142 __ bind(&begin); 141 __ bind(&begin);
143 __ cmp(r5, Operand(0)); 142 __ cmp(r5, Operand(0));
144 __ b(eq, &ok); 143 __ b(eq, &ok);
145 int receiver_offset = scope()->num_parameters() * kPointerSize; 144 int receiver_offset = scope()->num_parameters() * kPointerSize;
146 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); 145 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
147 __ str(r2, MemOperand(sp, receiver_offset)); 146 __ str(r2, MemOperand(sp, receiver_offset));
148 __ bind(&ok); 147 __ bind(&ok);
149 ASSERT_EQ(kSizeOfOptimizedStrictModePrologue, ok.pos() - begin.pos()); 148 ASSERT_EQ(kSizeOfOptimizedStrictModePrologue, ok.pos() - begin.pos());
149 }
150 } 150 }
151 151
152 // The following three instructions must remain together and unmodified for 152 if (NeedsEagerFrame()) {
153 // code aging to work properly. 153 // The following three instructions must remain together and unmodified for
154 __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit()); 154 // code aging to work properly.
155 // Add unused load of ip to ensure prologue sequence is identical for 155 __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
156 // full-codegen and lithium-codegen. 156 // Add unused load of ip to ensure prologue sequence is identical for
157 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); 157 // full-codegen and lithium-codegen.
158 __ add(fp, sp, Operand(2 * kPointerSize)); // Adjust FP to point to saved FP. 158 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
159 __ add(fp, sp, Operand(2 * kPointerSize));
160 frame_is_built_ = true;
161 }
159 162
160 // Reserve space for the stack slots needed by the code. 163 // Reserve space for the stack slots needed by the code.
161 int slots = GetStackSlotCount(); 164 int slots = GetStackSlotCount();
162 if (slots > 0) { 165 if (slots > 0) {
163 if (FLAG_debug_code) { 166 if (FLAG_debug_code) {
164 __ mov(r0, Operand(slots)); 167 __ mov(r0, Operand(slots));
165 __ mov(r2, Operand(kSlotsZapValue)); 168 __ mov(r2, Operand(kSlotsZapValue));
166 Label loop; 169 Label loop;
167 __ bind(&loop); 170 __ bind(&loop);
168 __ push(r2); 171 __ push(r2);
169 __ sub(r0, r0, Operand(1), SetCC); 172 __ sub(r0, r0, Operand(1), SetCC);
170 __ b(ne, &loop); 173 __ b(ne, &loop);
171 } else { 174 } else {
172 __ sub(sp, sp, Operand(slots * kPointerSize)); 175 __ sub(sp, sp, Operand(slots * kPointerSize));
173 } 176 }
174 } 177 }
175 178
176 // Possibly allocate a local context. 179 // Possibly allocate a local context.
177 int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; 180 int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
178 if (heap_slots > 0) { 181 if (heap_slots > 0) {
179 Comment(";;; Allocate local context"); 182 Comment(";;; Allocate local context");
180 // Argument to NewContext is the function, which is in r1. 183 // Argument to NewContext is the function, which is in r1.
181 __ push(r1); 184 __ push(r1);
182 if (heap_slots <= FastNewContextStub::kMaximumSlots) { 185 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
183 FastNewContextStub stub(heap_slots); 186 FastNewContextStub stub(heap_slots);
184 __ CallStub(&stub); 187 __ CallStub(&stub);
185 } else { 188 } else {
186 __ CallRuntime(Runtime::kNewFunctionContext, 1); 189 __ CallRuntime(Runtime::kNewFunctionContext, 1);
187 } 190 }
(...skipping 15 matching lines...) Expand all
203 __ str(r0, target); 206 __ str(r0, target);
204 // Update the write barrier. This clobbers r3 and r0. 207 // Update the write barrier. This clobbers r3 and r0.
205 __ RecordWriteContextSlot( 208 __ RecordWriteContextSlot(
206 cp, target.offset(), r0, r3, kLRHasBeenSaved, kSaveFPRegs); 209 cp, target.offset(), r0, r3, kLRHasBeenSaved, kSaveFPRegs);
207 } 210 }
208 } 211 }
209 Comment(";;; End allocate local context"); 212 Comment(";;; End allocate local context");
210 } 213 }
211 214
212 // Trace the call. 215 // Trace the call.
213 if (FLAG_trace) { 216 if (FLAG_trace && info()->IsOptimizing()) {
214 __ CallRuntime(Runtime::kTraceEnter, 0); 217 __ CallRuntime(Runtime::kTraceEnter, 0);
215 } 218 }
216 return !is_aborted(); 219 return !is_aborted();
217 } 220 }
218 221
219 222
220 bool LCodeGen::GenerateBody() { 223 bool LCodeGen::GenerateBody() {
221 ASSERT(is_generating()); 224 ASSERT(is_generating());
222 bool emit_instructions = true; 225 bool emit_instructions = true;
223 for (current_instruction_ = 0; 226 for (current_instruction_ = 0;
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
261 return !is_aborted(); 264 return !is_aborted();
262 } 265 }
263 266
264 267
265 bool LCodeGen::GenerateDeferredCode() { 268 bool LCodeGen::GenerateDeferredCode() {
266 ASSERT(is_generating()); 269 ASSERT(is_generating());
267 if (deferred_.length() > 0) { 270 if (deferred_.length() > 0) {
268 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { 271 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
269 LDeferredCode* code = deferred_[i]; 272 LDeferredCode* code = deferred_[i];
270 __ bind(code->entry()); 273 __ bind(code->entry());
274 if (NeedsDeferredFrame()) {
275 Comment(";;; Deferred build frame",
276 code->instruction_index(),
277 code->instr()->Mnemonic());
278 ASSERT(!frame_is_built_);
279 ASSERT(info()->IsStub());
280 frame_is_built_ = true;
281 __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
282 __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
283 __ push(scratch0());
284 __ add(fp, sp, Operand(2 * kPointerSize));
285 }
271 Comment(";;; Deferred code @%d: %s.", 286 Comment(";;; Deferred code @%d: %s.",
272 code->instruction_index(), 287 code->instruction_index(),
273 code->instr()->Mnemonic()); 288 code->instr()->Mnemonic());
274 code->Generate(); 289 code->Generate();
290 if (NeedsDeferredFrame()) {
291 Comment(";;; Deferred destory frame",
Jakob Kummerow 2012/11/28 16:28:22 nit: "destroy"
danno 2012/11/30 16:23:24 Done. But destroy is so final, so... depressing. "
292 code->instruction_index(),
293 code->instr()->Mnemonic());
294 ASSERT(frame_is_built_);
295 __ pop(ip);
296 __ ldm(ia_w, sp, cp.bit() | fp.bit() | lr.bit());
297 frame_is_built_ = false;
298 }
275 __ jmp(code->exit()); 299 __ jmp(code->exit());
276 } 300 }
277 } 301 }
278 302
279 // Force constant pool emission at the end of the deferred code to make 303 // Force constant pool emission at the end of the deferred code to make
280 // sure that no constant pools are emitted after. 304 // sure that no constant pools are emitted after.
281 masm()->CheckConstPool(true, false); 305 masm()->CheckConstPool(true, false);
282 306
283 return !is_aborted(); 307 return !is_aborted();
284 } 308 }
285 309
286 310
287 bool LCodeGen::GenerateDeoptJumpTable() { 311 bool LCodeGen::GenerateDeoptJumpTable() {
288 // Check that the jump table is accessible from everywhere in the function 312 // Check that the jump table is acvcessible from everywhere in the function
Jakob Kummerow 2012/11/28 16:28:22 nit: I prefer the old version.
danno 2012/11/30 16:23:24 Done.
289 // code, i.e. that offsets to the table can be encoded in the 24bit signed 313 // code, i.e. that offsets to the table can be encoded in the 24bit signed
290 // immediate of a branch instruction. 314 // immediate of a branch instruction.
291 // To simplify we consider the code size from the first instruction to the 315 // To simplify we consider the code size from the first instruction to the
292 // end of the jump table. We also don't consider the pc load delta. 316 // end of the jump table. We also don't consider the pc load delta.
293 // Each entry in the jump table generates one instruction and inlines one 317 // Each entry in the jump table generates one instruction and inlines one
294 // 32bit data after it. 318 // 32bit data after it.
295 if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) + 319 if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) +
296 deopt_jump_table_.length() * 2)) { 320 deopt_jump_table_.length() * 7)) {
297 Abort("Generated code is too large"); 321 Abort("Generated code is too large");
298 } 322 }
299 323
300 // Block the constant pool emission during the jump table emission.
301 __ BlockConstPoolFor(deopt_jump_table_.length());
302 __ RecordComment("[ Deoptimisation jump table"); 324 __ RecordComment("[ Deoptimisation jump table");
303 Label table_start; 325 Label table_start;
304 __ bind(&table_start); 326 __ bind(&table_start);
327 Label needs_frame_not_call;
328 bool has_generated_needs_frame_not_call = false;
Jakob Kummerow 2012/11/28 16:28:22 see comments on ia32 version. In short: 1) if you
danno 2012/11/30 16:23:24 Done.
329 Label needs_frame_is_call;
330 bool has_generated_needs_frame_is_call = false;
305 for (int i = 0; i < deopt_jump_table_.length(); i++) { 331 for (int i = 0; i < deopt_jump_table_.length(); i++) {
306 __ bind(&deopt_jump_table_[i].label); 332 __ bind(&deopt_jump_table_[i].label);
307 __ ldr(pc, MemOperand(pc, Assembler::kInstrSize - Assembler::kPcLoadDelta)); 333 Address entry = deopt_jump_table_[i].address;
308 __ dd(reinterpret_cast<uint32_t>(deopt_jump_table_[i].address)); 334 if (deopt_jump_table_[i].needs_frame) {
335 __ mov(ip, Operand(ExternalReference::ForDeoptEntry(entry)));
336 if (deopt_jump_table_[i].is_lazy_deopt) {
337 if (!has_generated_needs_frame_is_call) {
338 has_generated_needs_frame_is_call = true;
339 __ bind(&needs_frame_is_call);
340 __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
341 // If there is not frame, we don't have access to the JSFunction that
342 // needs to be put into the frame.
343 ASSERT(info()->IsStub());
344 __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
345 __ push(scratch0());
346 __ add(fp, sp, Operand(2 * kPointerSize));
347 __ mov(lr, Operand(pc), LeaveCC, al);
348 __ mov(pc, ip);
349 } else {
350 __ b(&needs_frame_is_call);
351 }
352 } else {
353 if (!has_generated_needs_frame_not_call) {
354 has_generated_needs_frame_not_call = true;
355 __ bind(&needs_frame_not_call);
356 __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
357 // If there is not frame, we don't have access to the JSFunction that
358 // needs to be put into the frame.
359 ASSERT(info()->IsStub());
360 __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
361 __ push(scratch0());
362 __ add(fp, sp, Operand(2 * kPointerSize));
363 __ mov(pc, ip);
364 } else {
365 __ b(&needs_frame_not_call);
366 }
367 }
368 } else {
369 if (deopt_jump_table_[i].is_lazy_deopt) {
370 __ mov(lr, Operand(pc), LeaveCC, al);
371 __ mov(pc, Operand(ExternalReference::ForDeoptEntry(entry)));
372 } else {
373 __ mov(pc, Operand(ExternalReference::ForDeoptEntry(entry)));
374 }
375 }
376 masm()->CheckConstPool(false, false);
309 } 377 }
310 ASSERT(masm()->InstructionsGeneratedSince(&table_start) ==
311 deopt_jump_table_.length() * 2);
312 __ RecordComment("]"); 378 __ RecordComment("]");
313 379
380 // Force constant pool emission at the end of the deopt jump table to make
381 // sure that no constant pools are emitted after.
382 masm()->CheckConstPool(true, false);
383
314 // The deoptimization jump table is the last part of the instruction 384 // The deoptimization jump table is the last part of the instruction
315 // sequence. Mark the generated code as done unless we bailed out. 385 // sequence. Mark the generated code as done unless we bailed out.
316 if (!is_aborted()) status_ = DONE; 386 if (!is_aborted()) status_ = DONE;
317 return !is_aborted(); 387 return !is_aborted();
318 } 388 }
319 389
320 390
321 bool LCodeGen::GenerateSafepointTable() { 391 bool LCodeGen::GenerateSafepointTable() {
322 ASSERT(is_done()); 392 ASSERT(is_done());
323 safepoints_.Emit(masm(), GetStackSlotCount()); 393 safepoints_.Emit(masm(), GetStackSlotCount());
324 return !is_aborted(); 394 return !is_aborted();
325 } 395 }
326 396
327 397
328 Register LCodeGen::ToRegister(int index) const { 398 Register LCodeGen::ToRegister(int index) const {
329 return Register::FromAllocationIndex(index); 399 return Register::FromAllocationIndex(index);
330 } 400 }
331 401
332 402
333 DoubleRegister LCodeGen::ToDoubleRegister(int index) const { 403 DwVfpRegister LCodeGen::ToDoubleRegister(int index) const {
334 return DoubleRegister::FromAllocationIndex(index); 404 return DwVfpRegister::FromAllocationIndex(index);
335 } 405 }
336 406
337 407
338 Register LCodeGen::ToRegister(LOperand* op) const { 408 Register LCodeGen::ToRegister(LOperand* op) const {
339 ASSERT(op->IsRegister()); 409 ASSERT(op->IsRegister());
340 return ToRegister(op->index()); 410 return ToRegister(op->index());
341 } 411 }
342 412
343 413
344 Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) { 414 Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
(...skipping 20 matching lines...) Expand all
365 return scratch; 435 return scratch;
366 } else if (op->IsStackSlot() || op->IsArgument()) { 436 } else if (op->IsStackSlot() || op->IsArgument()) {
367 __ ldr(scratch, ToMemOperand(op)); 437 __ ldr(scratch, ToMemOperand(op));
368 return scratch; 438 return scratch;
369 } 439 }
370 UNREACHABLE(); 440 UNREACHABLE();
371 return scratch; 441 return scratch;
372 } 442 }
373 443
374 444
375 DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const { 445 DwVfpRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
376 ASSERT(op->IsDoubleRegister()); 446 ASSERT(op->IsDoubleRegister());
377 return ToDoubleRegister(op->index()); 447 return ToDoubleRegister(op->index());
378 } 448 }
379 449
380 450
381 DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op, 451 DwVfpRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
382 SwVfpRegister flt_scratch, 452 SwVfpRegister flt_scratch,
383 DoubleRegister dbl_scratch) { 453 DwVfpRegister dbl_scratch) {
384 if (op->IsDoubleRegister()) { 454 if (op->IsDoubleRegister()) {
385 return ToDoubleRegister(op->index()); 455 return ToDoubleRegister(op->index());
386 } else if (op->IsConstantOperand()) { 456 } else if (op->IsConstantOperand()) {
387 LConstantOperand* const_op = LConstantOperand::cast(op); 457 LConstantOperand* const_op = LConstantOperand::cast(op);
388 HConstant* constant = chunk_->LookupConstant(const_op); 458 HConstant* constant = chunk_->LookupConstant(const_op);
389 Handle<Object> literal = constant->handle(); 459 Handle<Object> literal = constant->handle();
390 Representation r = chunk_->LookupLiteralRepresentation(const_op); 460 Representation r = chunk_->LookupLiteralRepresentation(const_op);
391 if (r.IsInteger32()) { 461 if (r.IsInteger32()) {
392 ASSERT(literal->IsNumber()); 462 ASSERT(literal->IsNumber());
393 __ mov(ip, Operand(static_cast<int32_t>(literal->Number()))); 463 __ mov(ip, Operand(static_cast<int32_t>(literal->Number())));
(...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after
509 // arguments index points to the first element of a sequence of tagged 579 // arguments index points to the first element of a sequence of tagged
510 // values on the stack that represent the arguments. This needs to be 580 // values on the stack that represent the arguments. This needs to be
511 // kept in sync with the LArgumentsElements implementation. 581 // kept in sync with the LArgumentsElements implementation.
512 *arguments_index = -environment->parameter_count(); 582 *arguments_index = -environment->parameter_count();
513 *arguments_count = environment->parameter_count(); 583 *arguments_count = environment->parameter_count();
514 584
515 WriteTranslation(environment->outer(), 585 WriteTranslation(environment->outer(),
516 translation, 586 translation,
517 arguments_index, 587 arguments_index,
518 arguments_count); 588 arguments_count);
519 int closure_id = *info()->closure() != *environment->closure() 589 bool has_closure_id = !info()->closure().is_null() &&
590 *info()->closure() != *environment->closure();
591 int closure_id = has_closure_id
520 ? DefineDeoptimizationLiteral(environment->closure()) 592 ? DefineDeoptimizationLiteral(environment->closure())
521 : Translation::kSelfLiteralId; 593 : Translation::kSelfLiteralId;
522 594
523 switch (environment->frame_type()) { 595 switch (environment->frame_type()) {
524 case JS_FUNCTION: 596 case JS_FUNCTION:
525 translation->BeginJSFrame(environment->ast_id(), closure_id, height); 597 translation->BeginJSFrame(environment->ast_id(), closure_id, height);
526 break; 598 break;
527 case JS_CONSTRUCT: 599 case JS_CONSTRUCT:
528 translation->BeginConstructStubFrame(closure_id, translation_size); 600 translation->BeginConstructStubFrame(closure_id, translation_size);
529 break; 601 break;
530 case JS_GETTER: 602 case JS_GETTER:
531 ASSERT(translation_size == 1); 603 ASSERT(translation_size == 1);
532 ASSERT(height == 0); 604 ASSERT(height == 0);
533 translation->BeginGetterStubFrame(closure_id); 605 translation->BeginGetterStubFrame(closure_id);
534 break; 606 break;
535 case JS_SETTER: 607 case JS_SETTER:
536 ASSERT(translation_size == 2); 608 ASSERT(translation_size == 2);
537 ASSERT(height == 0); 609 ASSERT(height == 0);
538 translation->BeginSetterStubFrame(closure_id); 610 translation->BeginSetterStubFrame(closure_id);
539 break; 611 break;
612 case STUB:
613 translation->BeginCompiledStubFrame();
614 break;
540 case ARGUMENTS_ADAPTOR: 615 case ARGUMENTS_ADAPTOR:
541 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size); 616 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
542 break; 617 break;
543 } 618 }
544 619
545 // Inlined frames which push their arguments cause the index to be 620 // Inlined frames which push their arguments cause the index to be
546 // bumped and a new stack area to be used for materialization. 621 // bumped and a new stack area to be used for materialization.
547 if (environment->entry() != NULL && 622 if (environment->entry() != NULL &&
548 environment->entry()->arguments_pushed()) { 623 environment->entry()->arguments_pushed()) {
549 *arguments_index = *arguments_index < 0 624 *arguments_index = *arguments_index < 0
(...skipping 175 matching lines...) Expand 10 before | Expand all | Expand 10 after
725 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); 800 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
726 deoptimizations_.Add(environment, zone()); 801 deoptimizations_.Add(environment, zone());
727 } 802 }
728 } 803 }
729 804
730 805
731 void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) { 806 void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
732 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); 807 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
733 ASSERT(environment->HasBeenRegistered()); 808 ASSERT(environment->HasBeenRegistered());
734 int id = environment->deoptimization_index(); 809 int id = environment->deoptimization_index();
735 Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER); 810
811 Deoptimizer::BailoutType bailout_type = info()->IsStub()
812 ? Deoptimizer::LAZY
813 : Deoptimizer::EAGER;
814 Address entry = Deoptimizer::GetDeoptimizationEntry(id, bailout_type);
736 if (entry == NULL) { 815 if (entry == NULL) {
737 Abort("bailout was not prepared"); 816 Abort("bailout was not prepared");
738 return; 817 return;
739 } 818 }
740 819
741 ASSERT(FLAG_deopt_every_n_times < 2); // Other values not supported on ARM. 820 ASSERT(FLAG_deopt_every_n_times < 2); // Other values not supported on ARM.
742 821
743 if (FLAG_deopt_every_n_times == 1 && 822 if (FLAG_deopt_every_n_times == 1 &&
744 info_->shared_info()->opt_count() == id) { 823 info_->shared_info()->opt_count() == id) {
745 __ Jump(entry, RelocInfo::RUNTIME_ENTRY); 824 __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
746 return; 825 return;
747 } 826 }
748 827
749 if (FLAG_trap_on_deopt) __ stop("trap_on_deopt", cc); 828 if (FLAG_trap_on_deopt) __ stop("trap_on_deopt", cc);
750 829
751 if (cc == al) { 830 bool needs_lazy_deopt = info()->IsStub();
831 ASSERT(info()->IsStub() || frame_is_built_);
832 if (cc == al && !needs_lazy_deopt) {
752 __ Jump(entry, RelocInfo::RUNTIME_ENTRY); 833 __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
753 } else { 834 } else {
754 // We often have several deopts to the same entry, reuse the last 835 // We often have several deopts to the same entry, reuse the last
755 // jump entry if this is the case. 836 // jump entry if this is the case.
756 if (deopt_jump_table_.is_empty() || 837 if (deopt_jump_table_.is_empty() ||
757 (deopt_jump_table_.last().address != entry)) { 838 (deopt_jump_table_.last().address != entry) ||
758 deopt_jump_table_.Add(JumpTableEntry(entry), zone()); 839 (deopt_jump_table_.last().is_lazy_deopt != needs_lazy_deopt) ||
840 (deopt_jump_table_.last().needs_frame != !frame_is_built_)) {
841 JumpTableEntry table_entry(entry, !frame_is_built_, needs_lazy_deopt);
842 deopt_jump_table_.Add(table_entry, zone());
759 } 843 }
760 __ b(cc, &deopt_jump_table_.last().label); 844 __ b(cc, &deopt_jump_table_.last().label);
761 } 845 }
762 } 846 }
763 847
764 848
765 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) { 849 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
766 int length = deoptimizations_.length(); 850 int length = deoptimizations_.length();
767 if (length == 0) return; 851 if (length == 0) return;
768 Handle<DeoptimizationInputData> data = 852 Handle<DeoptimizationInputData> data =
(...skipping 588 matching lines...) Expand 10 before | Expand all | Expand 10 after
1357 __ cmp(remainder, Operand(0)); 1441 __ cmp(remainder, Operand(0));
1358 __ teq(remainder, Operand(divisor), ne); 1442 __ teq(remainder, Operand(divisor), ne);
1359 __ sub(result, result, Operand(1), LeaveCC, mi); 1443 __ sub(result, result, Operand(1), LeaveCC, mi);
1360 } 1444 }
1361 1445
1362 1446
1363 void LCodeGen::DoDeferredBinaryOpStub(LPointerMap* pointer_map, 1447 void LCodeGen::DoDeferredBinaryOpStub(LPointerMap* pointer_map,
1364 LOperand* left_argument, 1448 LOperand* left_argument,
1365 LOperand* right_argument, 1449 LOperand* right_argument,
1366 Token::Value op) { 1450 Token::Value op) {
1451 ASSERT(CpuFeatures::IsSupported(VFP2));
Jakob Kummerow 2012/11/28 16:28:22 nit: not needed, as the Scope c'tor right below co
danno 2012/11/30 16:23:24 Done.
1452 CpuFeatures::Scope vfp_scope(VFP2);
1367 Register left = ToRegister(left_argument); 1453 Register left = ToRegister(left_argument);
1368 Register right = ToRegister(right_argument); 1454 Register right = ToRegister(right_argument);
1369 1455
1370 PushSafepointRegistersScope scope(this, Safepoint::kWithRegistersAndDoubles); 1456 PushSafepointRegistersScope scope(this, Safepoint::kWithRegistersAndDoubles);
1371 // Move left to r1 and right to r0 for the stub call. 1457 // Move left to r1 and right to r0 for the stub call.
1372 if (left.is(r1)) { 1458 if (left.is(r1)) {
1373 __ Move(r0, right); 1459 __ Move(r0, right);
1374 } else if (left.is(r0) && right.is(r1)) { 1460 } else if (left.is(r0) && right.is(r1)) {
1375 __ Swap(r0, r1, r2); 1461 __ Swap(r0, r1, r2);
1376 } else if (left.is(r0)) { 1462 } else if (left.is(r0)) {
(...skipping 265 matching lines...) Expand 10 before | Expand all | Expand 10 after
1642 1728
1643 void LCodeGen::DoConstantI(LConstantI* instr) { 1729 void LCodeGen::DoConstantI(LConstantI* instr) {
1644 ASSERT(instr->result()->IsRegister()); 1730 ASSERT(instr->result()->IsRegister());
1645 __ mov(ToRegister(instr->result()), Operand(instr->value())); 1731 __ mov(ToRegister(instr->result()), Operand(instr->value()));
1646 } 1732 }
1647 1733
1648 1734
1649 void LCodeGen::DoConstantD(LConstantD* instr) { 1735 void LCodeGen::DoConstantD(LConstantD* instr) {
1650 ASSERT(instr->result()->IsDoubleRegister()); 1736 ASSERT(instr->result()->IsDoubleRegister());
1651 DwVfpRegister result = ToDoubleRegister(instr->result()); 1737 DwVfpRegister result = ToDoubleRegister(instr->result());
1738 ASSERT(CpuFeatures::IsSupported(VFP2));
Jakob Kummerow 2012/11/28 16:28:22 nit: not needed
danno 2012/11/30 16:23:24 Done.
1739 CpuFeatures::Scope scope(VFP2);
1652 double v = instr->value(); 1740 double v = instr->value();
1653 __ Vmov(result, v, scratch0()); 1741 __ Vmov(result, v, scratch0());
1654 } 1742 }
1655 1743
1656 1744
1657 void LCodeGen::DoConstantT(LConstantT* instr) { 1745 void LCodeGen::DoConstantT(LConstantT* instr) {
1658 Handle<Object> value = instr->value(); 1746 Handle<Object> value = instr->value();
1659 if (value->IsSmi()) { 1747 if (value->IsSmi()) {
1660 __ mov(ToRegister(instr->result()), Operand(value)); 1748 __ mov(ToRegister(instr->result()), Operand(value));
1661 } else { 1749 } else {
(...skipping 148 matching lines...) Expand 10 before | Expand all | Expand 10 after
1810 ? ToOperand(right) 1898 ? ToOperand(right)
1811 : Operand(EmitLoadRegister(right, ip)); 1899 : Operand(EmitLoadRegister(right, ip));
1812 Register result_reg = ToRegister(instr->result()); 1900 Register result_reg = ToRegister(instr->result());
1813 __ cmp(left_reg, right_op); 1901 __ cmp(left_reg, right_op);
1814 if (!result_reg.is(left_reg)) { 1902 if (!result_reg.is(left_reg)) {
1815 __ mov(result_reg, left_reg, LeaveCC, condition); 1903 __ mov(result_reg, left_reg, LeaveCC, condition);
1816 } 1904 }
1817 __ mov(result_reg, right_op, LeaveCC, NegateCondition(condition)); 1905 __ mov(result_reg, right_op, LeaveCC, NegateCondition(condition));
1818 } else { 1906 } else {
1819 ASSERT(instr->hydrogen()->representation().IsDouble()); 1907 ASSERT(instr->hydrogen()->representation().IsDouble());
1820 DoubleRegister left_reg = ToDoubleRegister(left); 1908 ASSERT(CpuFeatures::IsSupported(VFP2));
Jakob Kummerow 2012/11/28 16:28:22 nit: not needed
danno 2012/11/30 16:23:24 Done.
1821 DoubleRegister right_reg = ToDoubleRegister(right); 1909 CpuFeatures::Scope scope(VFP2);
1822 DoubleRegister result_reg = ToDoubleRegister(instr->result()); 1910 DwVfpRegister left_reg = ToDoubleRegister(left);
1911 DwVfpRegister right_reg = ToDoubleRegister(right);
1912 DwVfpRegister result_reg = ToDoubleRegister(instr->result());
1823 Label check_nan_left, check_zero, return_left, return_right, done; 1913 Label check_nan_left, check_zero, return_left, return_right, done;
1824 __ VFPCompareAndSetFlags(left_reg, right_reg); 1914 __ VFPCompareAndSetFlags(left_reg, right_reg);
1825 __ b(vs, &check_nan_left); 1915 __ b(vs, &check_nan_left);
1826 __ b(eq, &check_zero); 1916 __ b(eq, &check_zero);
1827 __ b(condition, &return_left); 1917 __ b(condition, &return_left);
1828 __ b(al, &return_right); 1918 __ b(al, &return_right);
1829 1919
1830 __ bind(&check_zero); 1920 __ bind(&check_zero);
1831 __ VFPCompareAndSetFlags(left_reg, 0.0); 1921 __ VFPCompareAndSetFlags(left_reg, 0.0);
1832 __ b(ne, &return_left); // left == right != 0. 1922 __ b(ne, &return_left); // left == right != 0.
(...skipping 22 matching lines...) Expand all
1855 __ bind(&return_left); 1945 __ bind(&return_left);
1856 if (!left_reg.is(result_reg)) { 1946 if (!left_reg.is(result_reg)) {
1857 __ vmov(result_reg, left_reg); 1947 __ vmov(result_reg, left_reg);
1858 } 1948 }
1859 __ bind(&done); 1949 __ bind(&done);
1860 } 1950 }
1861 } 1951 }
1862 1952
1863 1953
1864 void LCodeGen::DoArithmeticD(LArithmeticD* instr) { 1954 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1865 DoubleRegister left = ToDoubleRegister(instr->left()); 1955 ASSERT(CpuFeatures::IsSupported(VFP2));
Jakob Kummerow 2012/11/28 16:28:22 nit: not needed
danno 2012/11/30 16:23:24 Done.
1866 DoubleRegister right = ToDoubleRegister(instr->right()); 1956 CpuFeatures::Scope scope(VFP2);
1867 DoubleRegister result = ToDoubleRegister(instr->result()); 1957 DwVfpRegister left = ToDoubleRegister(instr->left());
1958 DwVfpRegister right = ToDoubleRegister(instr->right());
1959 DwVfpRegister result = ToDoubleRegister(instr->result());
1868 switch (instr->op()) { 1960 switch (instr->op()) {
1869 case Token::ADD: 1961 case Token::ADD:
1870 __ vadd(result, left, right); 1962 __ vadd(result, left, right);
1871 break; 1963 break;
1872 case Token::SUB: 1964 case Token::SUB:
1873 __ vsub(result, left, right); 1965 __ vsub(result, left, right);
1874 break; 1966 break;
1875 case Token::MUL: 1967 case Token::MUL:
1876 __ vmul(result, left, right); 1968 __ vmul(result, left, right);
1877 break; 1969 break;
(...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after
1945 void LCodeGen::DoBranch(LBranch* instr) { 2037 void LCodeGen::DoBranch(LBranch* instr) {
1946 int true_block = chunk_->LookupDestination(instr->true_block_id()); 2038 int true_block = chunk_->LookupDestination(instr->true_block_id());
1947 int false_block = chunk_->LookupDestination(instr->false_block_id()); 2039 int false_block = chunk_->LookupDestination(instr->false_block_id());
1948 2040
1949 Representation r = instr->hydrogen()->value()->representation(); 2041 Representation r = instr->hydrogen()->value()->representation();
1950 if (r.IsInteger32()) { 2042 if (r.IsInteger32()) {
1951 Register reg = ToRegister(instr->value()); 2043 Register reg = ToRegister(instr->value());
1952 __ cmp(reg, Operand(0)); 2044 __ cmp(reg, Operand(0));
1953 EmitBranch(true_block, false_block, ne); 2045 EmitBranch(true_block, false_block, ne);
1954 } else if (r.IsDouble()) { 2046 } else if (r.IsDouble()) {
1955 DoubleRegister reg = ToDoubleRegister(instr->value()); 2047 ASSERT(CpuFeatures::IsSupported(VFP2));
Jakob Kummerow 2012/11/28 16:28:22 nit: not needed
danno 2012/11/30 16:23:24 Done.
2048 CpuFeatures::Scope scope(VFP2);
2049 DwVfpRegister reg = ToDoubleRegister(instr->value());
1956 Register scratch = scratch0(); 2050 Register scratch = scratch0();
1957 2051
1958 // Test the double value. Zero and NaN are false. 2052 // Test the double value. Zero and NaN are false.
1959 __ VFPCompareAndLoadFlags(reg, 0.0, scratch); 2053 __ VFPCompareAndLoadFlags(reg, 0.0, scratch);
1960 __ tst(scratch, Operand(kVFPZConditionFlagBit | kVFPVConditionFlagBit)); 2054 __ tst(scratch, Operand(kVFPZConditionFlagBit | kVFPVConditionFlagBit));
1961 EmitBranch(true_block, false_block, eq); 2055 EmitBranch(true_block, false_block, eq);
1962 } else { 2056 } else {
1963 ASSERT(r.IsTagged()); 2057 ASSERT(r.IsTagged());
1964 Register reg = ToRegister(instr->value()); 2058 Register reg = ToRegister(instr->value());
1965 HType type = instr->hydrogen()->value()->type(); 2059 HType type = instr->hydrogen()->value()->type();
(...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after
2030 __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE); 2124 __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
2031 __ b(ge, &not_string); 2125 __ b(ge, &not_string);
2032 __ ldr(ip, FieldMemOperand(reg, String::kLengthOffset)); 2126 __ ldr(ip, FieldMemOperand(reg, String::kLengthOffset));
2033 __ cmp(ip, Operand(0)); 2127 __ cmp(ip, Operand(0));
2034 __ b(ne, true_label); 2128 __ b(ne, true_label);
2035 __ b(false_label); 2129 __ b(false_label);
2036 __ bind(&not_string); 2130 __ bind(&not_string);
2037 } 2131 }
2038 2132
2039 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) { 2133 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
2134 CpuFeatures::Scope scope(VFP2);
2040 // heap number -> false iff +0, -0, or NaN. 2135 // heap number -> false iff +0, -0, or NaN.
2041 DoubleRegister dbl_scratch = double_scratch0(); 2136 DwVfpRegister dbl_scratch = double_scratch0();
2042 Label not_heap_number; 2137 Label not_heap_number;
2043 __ CompareRoot(map, Heap::kHeapNumberMapRootIndex); 2138 __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
2044 __ b(ne, &not_heap_number); 2139 __ b(ne, &not_heap_number);
2045 __ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset)); 2140 __ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2046 __ VFPCompareAndSetFlags(dbl_scratch, 0.0); 2141 __ VFPCompareAndSetFlags(dbl_scratch, 0.0);
2047 __ b(vs, false_label); // NaN -> false. 2142 __ b(vs, false_label); // NaN -> false.
2048 __ b(eq, false_label); // +0, -0 -> false. 2143 __ b(eq, false_label); // +0, -0 -> false.
2049 __ b(true_label); 2144 __ b(true_label);
2050 __ bind(&not_heap_number); 2145 __ bind(&not_heap_number);
2051 } 2146 }
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after
2109 if (left->IsConstantOperand() && right->IsConstantOperand()) { 2204 if (left->IsConstantOperand() && right->IsConstantOperand()) {
2110 // We can statically evaluate the comparison. 2205 // We can statically evaluate the comparison.
2111 double left_val = ToDouble(LConstantOperand::cast(left)); 2206 double left_val = ToDouble(LConstantOperand::cast(left));
2112 double right_val = ToDouble(LConstantOperand::cast(right)); 2207 double right_val = ToDouble(LConstantOperand::cast(right));
2113 int next_block = 2208 int next_block =
2114 EvalComparison(instr->op(), left_val, right_val) ? true_block 2209 EvalComparison(instr->op(), left_val, right_val) ? true_block
2115 : false_block; 2210 : false_block;
2116 EmitGoto(next_block); 2211 EmitGoto(next_block);
2117 } else { 2212 } else {
2118 if (instr->is_double()) { 2213 if (instr->is_double()) {
2214 ASSERT(CpuFeatures::IsSupported(VFP2));
Jakob Kummerow 2012/11/28 16:28:22 nit: not needed
danno 2012/11/30 16:23:24 Done.
2215 CpuFeatures::Scope scope(VFP2);
2119 // Compare left and right operands as doubles and load the 2216 // Compare left and right operands as doubles and load the
2120 // resulting flags into the normal status register. 2217 // resulting flags into the normal status register.
2121 __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right)); 2218 __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
2122 // If a NaN is involved, i.e. the result is unordered (V set), 2219 // If a NaN is involved, i.e. the result is unordered (V set),
2123 // jump to false block label. 2220 // jump to false block label.
2124 __ b(vs, chunk_->GetAssemblyLabel(false_block)); 2221 __ b(vs, chunk_->GetAssemblyLabel(false_block));
2125 } else { 2222 } else {
2126 if (right->IsConstantOperand()) { 2223 if (right->IsConstantOperand()) {
2127 __ cmp(ToRegister(left), 2224 __ cmp(ToRegister(left),
2128 Operand(ToInteger32(LConstantOperand::cast(right)))); 2225 Operand(ToInteger32(LConstantOperand::cast(right))));
(...skipping 518 matching lines...) Expand 10 before | Expand all | Expand 10 after
2647 __ LoadRoot(ToRegister(instr->result()), 2744 __ LoadRoot(ToRegister(instr->result()),
2648 Heap::kTrueValueRootIndex, 2745 Heap::kTrueValueRootIndex,
2649 condition); 2746 condition);
2650 __ LoadRoot(ToRegister(instr->result()), 2747 __ LoadRoot(ToRegister(instr->result()),
2651 Heap::kFalseValueRootIndex, 2748 Heap::kFalseValueRootIndex,
2652 NegateCondition(condition)); 2749 NegateCondition(condition));
2653 } 2750 }
2654 2751
2655 2752
2656 void LCodeGen::DoReturn(LReturn* instr) { 2753 void LCodeGen::DoReturn(LReturn* instr) {
2657 if (FLAG_trace) { 2754 if (FLAG_trace && info()->IsOptimizing()) {
2658 // Push the return value on the stack as the parameter. 2755 // Push the return value on the stack as the parameter.
2659 // Runtime::TraceExit returns its parameter in r0. 2756 // Runtime::TraceExit returns its parameter in r0.
2660 __ push(r0); 2757 __ push(r0);
2661 __ CallRuntime(Runtime::kTraceExit, 1); 2758 __ CallRuntime(Runtime::kTraceExit, 1);
2662 } 2759 }
2663 int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize; 2760 if (NeedsEagerFrame()) {
2664 __ mov(sp, fp); 2761 int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize;
2665 __ ldm(ia_w, sp, fp.bit() | lr.bit()); 2762 __ mov(sp, fp);
2666 __ add(sp, sp, Operand(sp_delta)); 2763 __ ldm(ia_w, sp, fp.bit() | lr.bit());
2764 __ add(sp, sp, Operand(sp_delta));
2765 }
2766 if (info()->IsStub()) {
2767 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2768 }
2667 __ Jump(lr); 2769 __ Jump(lr);
2668 } 2770 }
2669 2771
2670 2772
2671 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { 2773 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
2672 Register result = ToRegister(instr->result()); 2774 Register result = ToRegister(instr->result());
2673 __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell()))); 2775 __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell())));
2674 __ ldr(result, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset)); 2776 __ ldr(result, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
2675 if (instr->hydrogen()->RequiresHoleCheck()) { 2777 if (instr->hydrogen()->RequiresHoleCheck()) {
2676 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); 2778 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
(...skipping 329 matching lines...) Expand 10 before | Expand all | Expand 10 after
3006 } else { 3108 } else {
3007 key = ToRegister(instr->key()); 3109 key = ToRegister(instr->key());
3008 } 3110 }
3009 int element_size_shift = ElementsKindToShiftSize(elements_kind); 3111 int element_size_shift = ElementsKindToShiftSize(elements_kind);
3010 int shift_size = (instr->hydrogen()->key()->representation().IsTagged()) 3112 int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
3011 ? (element_size_shift - kSmiTagSize) : element_size_shift; 3113 ? (element_size_shift - kSmiTagSize) : element_size_shift;
3012 int additional_offset = instr->additional_index() << element_size_shift; 3114 int additional_offset = instr->additional_index() << element_size_shift;
3013 3115
3014 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS || 3116 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
3015 elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { 3117 elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
3016 CpuFeatures::Scope scope(VFP3);
3017 DwVfpRegister result = ToDoubleRegister(instr->result()); 3118 DwVfpRegister result = ToDoubleRegister(instr->result());
3018 Operand operand = key_is_constant 3119 Operand operand = key_is_constant
3019 ? Operand(constant_key << element_size_shift) 3120 ? Operand(constant_key << element_size_shift)
3020 : Operand(key, LSL, shift_size); 3121 : Operand(key, LSL, shift_size);
3021 __ add(scratch0(), external_pointer, operand); 3122 __ add(scratch0(), external_pointer, operand);
3022 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { 3123 if (CpuFeatures::IsSupported(VFP2)) {
3023 __ vldr(result.low(), scratch0(), additional_offset); 3124 CpuFeatures::Scope scope(VFP2);
3024 __ vcvt_f64_f32(result, result.low()); 3125 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
3025 } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS 3126 __ vldr(result.low(), scratch0(), additional_offset);
3026 __ vldr(result, scratch0(), additional_offset); 3127 __ vcvt_f64_f32(result, result.low());
3128 } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
3129 __ vldr(result, scratch0(), additional_offset);
3130 }
3131 } else {
3132 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
3133 Register value = external_pointer;
3134 __ ldr(value, MemOperand(scratch0(), additional_offset));
3135 __ and_(sfpd_lo, value, Operand(kBinary32MantissaMask));
3136
3137 __ mov(scratch0(), Operand(value, LSR, kBinary32MantissaBits));
3138 __ and_(scratch0(), scratch0(),
3139 Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
3140
3141 Label exponent_rebiased;
3142 __ teq(scratch0(), Operand(0x00));
3143 __ b(eq, &exponent_rebiased);
3144
3145 __ teq(scratch0(), Operand(0xff));
3146 __ mov(scratch0(), Operand(0x7ff), LeaveCC, eq);
3147 __ b(eq, &exponent_rebiased);
3148
3149 // Rebias exponent.
3150 __ add(scratch0(),
3151 scratch0(),
3152 Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias));
3153
3154 __ bind(&exponent_rebiased);
3155 __ and_(sfpd_hi, value, Operand(kBinary32SignMask));
3156 __ orr(sfpd_hi, sfpd_hi,
3157 Operand(scratch0(), LSL, HeapNumber::kMantissaBitsInTopWord));
3158
3159 // Shift mantissa.
3160 static const int kMantissaShiftForHiWord =
3161 kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
3162
3163 static const int kMantissaShiftForLoWord =
3164 kBitsPerInt - kMantissaShiftForHiWord;
3165
3166 __ orr(sfpd_hi, sfpd_hi,
3167 Operand(sfpd_lo, LSR, kMantissaShiftForHiWord));
3168 __ mov(sfpd_lo, Operand(sfpd_lo, LSL, kMantissaShiftForLoWord));
3169
3170 } else {
3171 __ ldr(sfpd_lo, MemOperand(scratch0(), additional_offset));
3172 __ ldr(sfpd_hi, MemOperand(scratch0(),
3173 additional_offset + kPointerSize));
3174 }
3027 } 3175 }
3028 } else { 3176 } else {
3029 Register result = ToRegister(instr->result()); 3177 Register result = ToRegister(instr->result());
3030 MemOperand mem_operand = PrepareKeyedOperand( 3178 MemOperand mem_operand = PrepareKeyedOperand(
3031 key, external_pointer, key_is_constant, constant_key, 3179 key, external_pointer, key_is_constant, constant_key,
3032 element_size_shift, shift_size, 3180 element_size_shift, shift_size,
3033 instr->additional_index(), additional_offset); 3181 instr->additional_index(), additional_offset);
3034 switch (elements_kind) { 3182 switch (elements_kind) {
3035 case EXTERNAL_BYTE_ELEMENTS: 3183 case EXTERNAL_BYTE_ELEMENTS:
3036 __ ldrsb(result, mem_operand); 3184 __ ldrsb(result, mem_operand);
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after
3085 int constant_key = 0; 3233 int constant_key = 0;
3086 if (key_is_constant) { 3234 if (key_is_constant) {
3087 constant_key = ToInteger32(LConstantOperand::cast(instr->key())); 3235 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3088 if (constant_key & 0xF0000000) { 3236 if (constant_key & 0xF0000000) {
3089 Abort("array index constant value too big."); 3237 Abort("array index constant value too big.");
3090 } 3238 }
3091 } else { 3239 } else {
3092 key = ToRegister(instr->key()); 3240 key = ToRegister(instr->key());
3093 } 3241 }
3094 3242
3095 Operand operand = key_is_constant 3243 int base_offset = (FixedDoubleArray::kHeaderSize - kHeapObjectTag) +
3096 ? Operand(((constant_key + instr->additional_index()) << 3244 ((constant_key + instr->additional_index()) << element_size_shift);
3097 element_size_shift) +
3098 FixedDoubleArray::kHeaderSize - kHeapObjectTag)
3099 : Operand(key, LSL, shift_size);
3100 __ add(elements, elements, operand);
3101 if (!key_is_constant) { 3245 if (!key_is_constant) {
3102 __ add(elements, elements, 3246 __ add(elements, elements, Operand(key, LSL, shift_size));
3103 Operand((FixedDoubleArray::kHeaderSize - kHeapObjectTag) +
3104 (instr->additional_index() << element_size_shift)));
3105 } 3247 }
3106 3248 if (CpuFeatures::IsSupported(VFP2)) {
3107 __ vldr(result, elements, 0); 3249 CpuFeatures::Scope scope(VFP2);
3108 if (instr->hydrogen()->RequiresHoleCheck()) { 3250 __ add(elements, elements, Operand(base_offset));
3109 __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32))); 3251 __ vldr(result, elements, 0);
3110 __ cmp(scratch, Operand(kHoleNanUpper32)); 3252 if (instr->hydrogen()->RequiresHoleCheck()) {
3111 DeoptimizeIf(eq, instr->environment()); 3253 __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
3254 __ cmp(scratch, Operand(kHoleNanUpper32));
3255 DeoptimizeIf(eq, instr->environment());
3256 }
3257 } else {
3258 __ ldr(sfpd_hi, MemOperand(elements, base_offset + kPointerSize));
3259 __ ldr(sfpd_lo, MemOperand(elements, base_offset));
3260 if (instr->hydrogen()->RequiresHoleCheck()) {
3261 ASSERT(kPointerSize == sizeof(kHoleNanLower32));
3262 __ cmp(sfpd_hi, Operand(kHoleNanUpper32));
3263 DeoptimizeIf(eq, instr->environment());
3264 }
3112 } 3265 }
3113 } 3266 }
3114 3267
3115 3268
3116 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { 3269 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
3117 Register elements = ToRegister(instr->elements()); 3270 Register elements = ToRegister(instr->elements());
3118 Register result = ToRegister(instr->result()); 3271 Register result = ToRegister(instr->result());
3119 Register scratch = scratch0(); 3272 Register scratch = scratch0();
3120 Register store_base = scratch; 3273 Register store_base = scratch;
3121 int offset = 0; 3274 int offset = 0;
(...skipping 415 matching lines...) Expand 10 before | Expand all | Expand 10 after
3537 // We can make rsb conditional because the previous cmp instruction 3690 // We can make rsb conditional because the previous cmp instruction
3538 // will clear the V (overflow) flag and rsb won't set this flag 3691 // will clear the V (overflow) flag and rsb won't set this flag
3539 // if input is positive. 3692 // if input is positive.
3540 __ rsb(result, input, Operand(0), SetCC, mi); 3693 __ rsb(result, input, Operand(0), SetCC, mi);
3541 // Deoptimize on overflow. 3694 // Deoptimize on overflow.
3542 DeoptimizeIf(vs, instr->environment()); 3695 DeoptimizeIf(vs, instr->environment());
3543 } 3696 }
3544 3697
3545 3698
3546 void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) { 3699 void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
3700 ASSERT(CpuFeatures::IsSupported(VFP2));
Jakob Kummerow 2012/11/28 16:28:22 nit: not needed
danno 2012/11/30 16:23:24 Done.
3701 CpuFeatures::Scope scope(VFP2);
3547 // Class for deferred case. 3702 // Class for deferred case.
3548 class DeferredMathAbsTaggedHeapNumber: public LDeferredCode { 3703 class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
3549 public: 3704 public:
3550 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, 3705 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
3551 LUnaryMathOperation* instr) 3706 LUnaryMathOperation* instr)
3552 : LDeferredCode(codegen), instr_(instr) { } 3707 : LDeferredCode(codegen), instr_(instr) { }
3553 virtual void Generate() { 3708 virtual void Generate() {
3554 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_); 3709 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3555 } 3710 }
3556 virtual LInstruction* instr() { return instr_; } 3711 virtual LInstruction* instr() { return instr_; }
(...skipping 16 matching lines...) Expand all
3573 // Smi check. 3728 // Smi check.
3574 __ JumpIfNotSmi(input, deferred->entry()); 3729 __ JumpIfNotSmi(input, deferred->entry());
3575 // If smi, handle it directly. 3730 // If smi, handle it directly.
3576 EmitIntegerMathAbs(instr); 3731 EmitIntegerMathAbs(instr);
3577 __ bind(deferred->exit()); 3732 __ bind(deferred->exit());
3578 } 3733 }
3579 } 3734 }
3580 3735
3581 3736
3582 void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) { 3737 void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
3583 DoubleRegister input = ToDoubleRegister(instr->value()); 3738 ASSERT(CpuFeatures::IsSupported(VFP2));
Jakob Kummerow 2012/11/28 16:28:22 nit: not needed
danno 2012/11/30 16:23:24 Done.
3739 CpuFeatures::Scope scope(VFP2);
3740 DwVfpRegister input = ToDoubleRegister(instr->value());
3584 Register result = ToRegister(instr->result()); 3741 Register result = ToRegister(instr->result());
3585 Register scratch = scratch0(); 3742 Register scratch = scratch0();
3586 3743
3587 __ EmitVFPTruncate(kRoundToMinusInf, 3744 __ EmitVFPTruncate(kRoundToMinusInf,
3588 result, 3745 result,
3589 input, 3746 input,
3590 scratch, 3747 scratch,
3591 double_scratch0()); 3748 double_scratch0());
3592 DeoptimizeIf(ne, instr->environment()); 3749 DeoptimizeIf(ne, instr->environment());
3593 3750
3594 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 3751 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3595 // Test for -0. 3752 // Test for -0.
3596 Label done; 3753 Label done;
3597 __ cmp(result, Operand(0)); 3754 __ cmp(result, Operand(0));
3598 __ b(ne, &done); 3755 __ b(ne, &done);
3599 __ vmov(scratch, input.high()); 3756 __ vmov(scratch, input.high());
3600 __ tst(scratch, Operand(HeapNumber::kSignMask)); 3757 __ tst(scratch, Operand(HeapNumber::kSignMask));
3601 DeoptimizeIf(ne, instr->environment()); 3758 DeoptimizeIf(ne, instr->environment());
3602 __ bind(&done); 3759 __ bind(&done);
3603 } 3760 }
3604 } 3761 }
3605 3762
3606 3763
3607 void LCodeGen::DoMathRound(LUnaryMathOperation* instr) { 3764 void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
3608 DoubleRegister input = ToDoubleRegister(instr->value()); 3765 ASSERT(CpuFeatures::IsSupported(VFP2));
Jakob Kummerow 2012/11/28 16:28:22 nit: not needed
danno 2012/11/30 16:23:24 Done.
3766 CpuFeatures::Scope scope(VFP2);
3767 DwVfpRegister input = ToDoubleRegister(instr->value());
3609 Register result = ToRegister(instr->result()); 3768 Register result = ToRegister(instr->result());
3610 DwVfpRegister double_scratch1 = ToDoubleRegister(instr->temp()); 3769 DwVfpRegister double_scratch1 = ToDoubleRegister(instr->temp());
3611 Register scratch = scratch0(); 3770 Register scratch = scratch0();
3612 Label done, check_sign_on_zero; 3771 Label done, check_sign_on_zero;
3613 3772
3614 // Extract exponent bits. 3773 // Extract exponent bits.
3615 __ vmov(result, input.high()); 3774 __ vmov(result, input.high());
3616 __ ubfx(scratch, 3775 __ ubfx(scratch,
3617 result, 3776 result,
3618 HeapNumber::kExponentShift, 3777 HeapNumber::kExponentShift,
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after
3663 __ bind(&check_sign_on_zero); 3822 __ bind(&check_sign_on_zero);
3664 __ vmov(scratch, input.high()); 3823 __ vmov(scratch, input.high());
3665 __ tst(scratch, Operand(HeapNumber::kSignMask)); 3824 __ tst(scratch, Operand(HeapNumber::kSignMask));
3666 DeoptimizeIf(ne, instr->environment()); 3825 DeoptimizeIf(ne, instr->environment());
3667 } 3826 }
3668 __ bind(&done); 3827 __ bind(&done);
3669 } 3828 }
3670 3829
3671 3830
3672 void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) { 3831 void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
3673 DoubleRegister input = ToDoubleRegister(instr->value()); 3832 ASSERT(CpuFeatures::IsSupported(VFP2));
Jakob Kummerow 2012/11/28 16:28:22 nit: not needed
danno 2012/11/30 16:23:24 Done.
3674 DoubleRegister result = ToDoubleRegister(instr->result()); 3833 CpuFeatures::Scope scope(VFP2);
3834 DwVfpRegister input = ToDoubleRegister(instr->value());
3835 DwVfpRegister result = ToDoubleRegister(instr->result());
3675 __ vsqrt(result, input); 3836 __ vsqrt(result, input);
3676 } 3837 }
3677 3838
3678 3839
3679 void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) { 3840 void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
3680 DoubleRegister input = ToDoubleRegister(instr->value()); 3841 ASSERT(CpuFeatures::IsSupported(VFP2));
Jakob Kummerow 2012/11/28 16:28:22 nit: not needed
danno 2012/11/30 16:23:24 Done.
3681 DoubleRegister result = ToDoubleRegister(instr->result()); 3842 CpuFeatures::Scope scope(VFP2);
3682 DoubleRegister temp = ToDoubleRegister(instr->temp()); 3843 DwVfpRegister input = ToDoubleRegister(instr->value());
3844 DwVfpRegister result = ToDoubleRegister(instr->result());
3845 DwVfpRegister temp = ToDoubleRegister(instr->temp());
3683 3846
3684 // Note that according to ECMA-262 15.8.2.13: 3847 // Note that according to ECMA-262 15.8.2.13:
3685 // Math.pow(-Infinity, 0.5) == Infinity 3848 // Math.pow(-Infinity, 0.5) == Infinity
3686 // Math.sqrt(-Infinity) == NaN 3849 // Math.sqrt(-Infinity) == NaN
3687 Label done; 3850 Label done;
3688 __ vmov(temp, -V8_INFINITY, scratch0()); 3851 __ vmov(temp, -V8_INFINITY, scratch0());
3689 __ VFPCompareAndSetFlags(input, temp); 3852 __ VFPCompareAndSetFlags(input, temp);
3690 __ vneg(result, temp, eq); 3853 __ vneg(result, temp, eq);
3691 __ b(&done, eq); 3854 __ b(&done, eq);
3692 3855
3693 // Add +0 to convert -0 to +0. 3856 // Add +0 to convert -0 to +0.
3694 __ vadd(result, input, kDoubleRegZero); 3857 __ vadd(result, input, kDoubleRegZero);
3695 __ vsqrt(result, result); 3858 __ vsqrt(result, result);
3696 __ bind(&done); 3859 __ bind(&done);
3697 } 3860 }
3698 3861
3699 3862
3700 void LCodeGen::DoPower(LPower* instr) { 3863 void LCodeGen::DoPower(LPower* instr) {
3864 ASSERT(CpuFeatures::IsSupported(VFP2));
Jakob Kummerow 2012/11/28 16:28:22 nit: not needed
danno 2012/11/30 16:23:24 Done.
3865 CpuFeatures::Scope scope(VFP2);
3701 Representation exponent_type = instr->hydrogen()->right()->representation(); 3866 Representation exponent_type = instr->hydrogen()->right()->representation();
3702 // Having marked this as a call, we can use any registers. 3867 // Having marked this as a call, we can use any registers.
3703 // Just make sure that the input/output registers are the expected ones. 3868 // Just make sure that the input/output registers are the expected ones.
3704 ASSERT(!instr->right()->IsDoubleRegister() || 3869 ASSERT(!instr->right()->IsDoubleRegister() ||
3705 ToDoubleRegister(instr->right()).is(d2)); 3870 ToDoubleRegister(instr->right()).is(d2));
3706 ASSERT(!instr->right()->IsRegister() || 3871 ASSERT(!instr->right()->IsRegister() ||
3707 ToRegister(instr->right()).is(r2)); 3872 ToRegister(instr->right()).is(r2));
3708 ASSERT(ToDoubleRegister(instr->left()).is(d1)); 3873 ASSERT(ToDoubleRegister(instr->left()).is(d1));
3709 ASSERT(ToDoubleRegister(instr->result()).is(d3)); 3874 ASSERT(ToDoubleRegister(instr->result()).is(d3));
3710 3875
(...skipping 12 matching lines...) Expand all
3723 __ CallStub(&stub); 3888 __ CallStub(&stub);
3724 } else { 3889 } else {
3725 ASSERT(exponent_type.IsDouble()); 3890 ASSERT(exponent_type.IsDouble());
3726 MathPowStub stub(MathPowStub::DOUBLE); 3891 MathPowStub stub(MathPowStub::DOUBLE);
3727 __ CallStub(&stub); 3892 __ CallStub(&stub);
3728 } 3893 }
3729 } 3894 }
3730 3895
3731 3896
3732 void LCodeGen::DoRandom(LRandom* instr) { 3897 void LCodeGen::DoRandom(LRandom* instr) {
3898 ASSERT(CpuFeatures::IsSupported(VFP2));
Jakob Kummerow 2012/11/28 16:28:22 nit: not needed
danno 2012/11/30 16:23:24 Done.
3899 CpuFeatures::Scope scope(VFP2);
3733 class DeferredDoRandom: public LDeferredCode { 3900 class DeferredDoRandom: public LDeferredCode {
3734 public: 3901 public:
3735 DeferredDoRandom(LCodeGen* codegen, LRandom* instr) 3902 DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
3736 : LDeferredCode(codegen), instr_(instr) { } 3903 : LDeferredCode(codegen), instr_(instr) { }
3737 virtual void Generate() { codegen()->DoDeferredRandom(instr_); } 3904 virtual void Generate() { codegen()->DoDeferredRandom(instr_); }
3738 virtual LInstruction* instr() { return instr_; } 3905 virtual LInstruction* instr() { return instr_; }
3739 private: 3906 private:
3740 LRandom* instr_; 3907 LRandom* instr_;
3741 }; 3908 };
3742 3909
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after
3801 3968
3802 3969
3803 void LCodeGen::DoDeferredRandom(LRandom* instr) { 3970 void LCodeGen::DoDeferredRandom(LRandom* instr) {
3804 __ PrepareCallCFunction(1, scratch0()); 3971 __ PrepareCallCFunction(1, scratch0());
3805 __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1); 3972 __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
3806 // Return value is in r0. 3973 // Return value is in r0.
3807 } 3974 }
3808 3975
3809 3976
3810 void LCodeGen::DoMathExp(LMathExp* instr) { 3977 void LCodeGen::DoMathExp(LMathExp* instr) {
3811 DoubleRegister input = ToDoubleRegister(instr->value()); 3978 ASSERT(CpuFeatures::IsSupported(VFP2));
Jakob Kummerow 2012/11/28 16:28:22 nit: not needed
danno 2012/11/30 16:23:24 Done.
3812 DoubleRegister result = ToDoubleRegister(instr->result()); 3979 CpuFeatures::Scope scope(VFP2);
3813 DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp()); 3980 DwVfpRegister input = ToDoubleRegister(instr->value());
3814 DoubleRegister double_scratch2 = double_scratch0(); 3981 DwVfpRegister result = ToDoubleRegister(instr->result());
3982 DwVfpRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
3983 DwVfpRegister double_scratch2 = double_scratch0();
3815 Register temp1 = ToRegister(instr->temp1()); 3984 Register temp1 = ToRegister(instr->temp1());
3816 Register temp2 = ToRegister(instr->temp2()); 3985 Register temp2 = ToRegister(instr->temp2());
3817 3986
3818 MathExpGenerator::EmitMathExp( 3987 MathExpGenerator::EmitMathExp(
3819 masm(), input, result, double_scratch1, double_scratch2, 3988 masm(), input, result, double_scratch1, double_scratch2,
3820 temp1, temp2, scratch0()); 3989 temp1, temp2, scratch0());
3821 } 3990 }
3822 3991
3823 3992
3824 void LCodeGen::DoMathLog(LUnaryMathOperation* instr) { 3993 void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
(...skipping 265 matching lines...) Expand 10 before | Expand all | Expand 10 after
4090 } 4259 }
4091 __ cmp(ip, ToRegister(instr->length())); 4260 __ cmp(ip, ToRegister(instr->length()));
4092 } else { 4261 } else {
4093 __ cmp(ToRegister(instr->index()), ToRegister(instr->length())); 4262 __ cmp(ToRegister(instr->index()), ToRegister(instr->length()));
4094 } 4263 }
4095 DeoptimizeIf(hs, instr->environment()); 4264 DeoptimizeIf(hs, instr->environment());
4096 } 4265 }
4097 4266
4098 4267
4099 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { 4268 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
4269 ASSERT(CpuFeatures::IsSupported(VFP2));
Jakob Kummerow 2012/11/28 16:28:22 nit: not needed
danno 2012/11/30 16:23:24 Done.
4270 CpuFeatures::Scope scope(VFP2);
4100 Register external_pointer = ToRegister(instr->elements()); 4271 Register external_pointer = ToRegister(instr->elements());
4101 Register key = no_reg; 4272 Register key = no_reg;
4102 ElementsKind elements_kind = instr->elements_kind(); 4273 ElementsKind elements_kind = instr->elements_kind();
4103 bool key_is_constant = instr->key()->IsConstantOperand(); 4274 bool key_is_constant = instr->key()->IsConstantOperand();
4104 int constant_key = 0; 4275 int constant_key = 0;
4105 if (key_is_constant) { 4276 if (key_is_constant) {
4106 constant_key = ToInteger32(LConstantOperand::cast(instr->key())); 4277 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4107 if (constant_key & 0xF0000000) { 4278 if (constant_key & 0xF0000000) {
4108 Abort("array index constant value too big."); 4279 Abort("array index constant value too big.");
4109 } 4280 }
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after
4160 case DICTIONARY_ELEMENTS: 4331 case DICTIONARY_ELEMENTS:
4161 case NON_STRICT_ARGUMENTS_ELEMENTS: 4332 case NON_STRICT_ARGUMENTS_ELEMENTS:
4162 UNREACHABLE(); 4333 UNREACHABLE();
4163 break; 4334 break;
4164 } 4335 }
4165 } 4336 }
4166 } 4337 }
4167 4338
4168 4339
4169 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { 4340 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4341 ASSERT(CpuFeatures::IsSupported(VFP2));
Jakob Kummerow 2012/11/28 16:28:22 nit: not needed
danno 2012/11/30 16:23:24 Done.
4342 CpuFeatures::Scope scope(VFP2);
4170 DwVfpRegister value = ToDoubleRegister(instr->value()); 4343 DwVfpRegister value = ToDoubleRegister(instr->value());
4171 Register elements = ToRegister(instr->elements()); 4344 Register elements = ToRegister(instr->elements());
4172 Register key = no_reg; 4345 Register key = no_reg;
4173 Register scratch = scratch0(); 4346 Register scratch = scratch0();
4174 bool key_is_constant = instr->key()->IsConstantOperand(); 4347 bool key_is_constant = instr->key()->IsConstantOperand();
4175 int constant_key = 0; 4348 int constant_key = 0;
4176 4349
4177 // Calculate the effective address of the slot in the array to store the 4350 // Calculate the effective address of the slot in the array to store the
4178 // double value. 4351 // double value.
4179 if (key_is_constant) { 4352 if (key_is_constant) {
(...skipping 256 matching lines...) Expand 10 before | Expand all | Expand 10 after
4436 4609
4437 4610
4438 void LCodeGen::DoStringLength(LStringLength* instr) { 4611 void LCodeGen::DoStringLength(LStringLength* instr) {
4439 Register string = ToRegister(instr->string()); 4612 Register string = ToRegister(instr->string());
4440 Register result = ToRegister(instr->result()); 4613 Register result = ToRegister(instr->result());
4441 __ ldr(result, FieldMemOperand(string, String::kLengthOffset)); 4614 __ ldr(result, FieldMemOperand(string, String::kLengthOffset));
4442 } 4615 }
4443 4616
4444 4617
4445 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { 4618 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4619 ASSERT(CpuFeatures::IsSupported(VFP2));
Jakob Kummerow 2012/11/28 16:28:22 nit: not needed
danno 2012/11/30 16:23:24 Done.
4620 CpuFeatures::Scope scope(VFP2);
4446 LOperand* input = instr->value(); 4621 LOperand* input = instr->value();
4447 ASSERT(input->IsRegister() || input->IsStackSlot()); 4622 ASSERT(input->IsRegister() || input->IsStackSlot());
4448 LOperand* output = instr->result(); 4623 LOperand* output = instr->result();
4449 ASSERT(output->IsDoubleRegister()); 4624 ASSERT(output->IsDoubleRegister());
4450 SwVfpRegister single_scratch = double_scratch0().low(); 4625 SwVfpRegister single_scratch = double_scratch0().low();
4451 if (input->IsStackSlot()) { 4626 if (input->IsStackSlot()) {
4452 Register scratch = scratch0(); 4627 Register scratch = scratch0();
4453 __ ldr(scratch, ToMemOperand(input)); 4628 __ ldr(scratch, ToMemOperand(input));
4454 __ vmov(single_scratch, scratch); 4629 __ vmov(single_scratch, scratch);
4455 } else { 4630 } else {
4456 __ vmov(single_scratch, ToRegister(input)); 4631 __ vmov(single_scratch, ToRegister(input));
4457 } 4632 }
4458 __ vcvt_f64_s32(ToDoubleRegister(output), single_scratch); 4633 __ vcvt_f64_s32(ToDoubleRegister(output), single_scratch);
4459 } 4634 }
4460 4635
4461 4636
4462 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { 4637 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4638 ASSERT(CpuFeatures::IsSupported(VFP2));
Jakob Kummerow 2012/11/28 16:28:22 nit: not needed
danno 2012/11/30 16:23:24 Done.
4639 CpuFeatures::Scope scope(VFP2);
4463 LOperand* input = instr->value(); 4640 LOperand* input = instr->value();
4464 LOperand* output = instr->result(); 4641 LOperand* output = instr->result();
4465 4642
4466 SwVfpRegister flt_scratch = double_scratch0().low(); 4643 SwVfpRegister flt_scratch = double_scratch0().low();
4467 __ vmov(flt_scratch, ToRegister(input)); 4644 __ vmov(flt_scratch, ToRegister(input));
4468 __ vcvt_f64_u32(ToDoubleRegister(output), flt_scratch); 4645 __ vcvt_f64_u32(ToDoubleRegister(output), flt_scratch);
4469 } 4646 }
4470 4647
4471 4648
4472 void LCodeGen::DoNumberTagI(LNumberTagI* instr) { 4649 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
4514 Register reg = ToRegister(input); 4691 Register reg = ToRegister(input);
4515 4692
4516 DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr); 4693 DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
4517 __ cmp(reg, Operand(Smi::kMaxValue)); 4694 __ cmp(reg, Operand(Smi::kMaxValue));
4518 __ b(hi, deferred->entry()); 4695 __ b(hi, deferred->entry());
4519 __ SmiTag(reg, reg); 4696 __ SmiTag(reg, reg);
4520 __ bind(deferred->exit()); 4697 __ bind(deferred->exit());
4521 } 4698 }
4522 4699
4523 4700
4701 // Convert unsigned integer with specified number of leading zeroes in binary
4702 // representation to IEEE 754 double.
4703 // Integer to convert is passed in register hiword.
4704 // Resulting double is returned in registers hiword:loword.
4705 // This functions does not work correctly for 0.
4706 static void GenerateUInt2Double(MacroAssembler* masm,
4707 Register hiword,
4708 Register loword,
4709 Register scratch,
4710 int leading_zeroes) {
4711 const int meaningful_bits = kBitsPerInt - leading_zeroes - 1;
4712 const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits;
4713
4714 const int mantissa_shift_for_hi_word =
4715 meaningful_bits - HeapNumber::kMantissaBitsInTopWord;
4716 const int mantissa_shift_for_lo_word =
4717 kBitsPerInt - mantissa_shift_for_hi_word;
4718 masm->mov(scratch, Operand(biased_exponent << HeapNumber::kExponentShift));
4719 if (mantissa_shift_for_hi_word > 0) {
4720 masm->mov(loword, Operand(hiword, LSL, mantissa_shift_for_lo_word));
4721 masm->orr(hiword, scratch,
4722 Operand(hiword, LSR, mantissa_shift_for_hi_word));
4723 } else {
4724 masm->mov(loword, Operand(0, RelocInfo::NONE));
4725 masm->orr(hiword, scratch,
4726 Operand(hiword, LSL, mantissa_shift_for_hi_word));
Jakob Kummerow 2012/11/28 16:28:22 I can't convince myself that this is right. I thin
danno 2012/11/30 16:23:24 Done.
4727 }
4728
4729 // If least significant bit of biased exponent was not 1 it was corrupted
4730 // by most significant bit of mantissa so we should fix that.
4731 if (!(biased_exponent & 1)) {
4732 masm->bic(hiword, hiword, Operand(1 << HeapNumber::kExponentShift));
4733 }
4734 }
4735
4736
4524 void LCodeGen::DoDeferredNumberTagI(LInstruction* instr, 4737 void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
4525 LOperand* value, 4738 LOperand* value,
4526 IntegerSignedness signedness) { 4739 IntegerSignedness signedness) {
4527 Label slow; 4740 Label slow;
4528 Register src = ToRegister(value); 4741 Register src = ToRegister(value);
4529 Register dst = ToRegister(instr->result()); 4742 Register dst = ToRegister(instr->result());
4530 DoubleRegister dbl_scratch = double_scratch0(); 4743 DwVfpRegister dbl_scratch = double_scratch0();
4531 SwVfpRegister flt_scratch = dbl_scratch.low(); 4744 SwVfpRegister flt_scratch = dbl_scratch.low();
4532 4745
4533 // Preserve the value of all registers. 4746 // Preserve the value of all registers.
4534 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); 4747 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4535 4748
4536 Label done; 4749 Label done;
4537 if (signedness == SIGNED_INT32) { 4750 if (signedness == SIGNED_INT32) {
4538 // There was overflow, so bits 30 and 31 of the original integer 4751 // There was overflow, so bits 30 and 31 of the original integer
4539 // disagree. Try to allocate a heap number in new space and store 4752 // disagree. Try to allocate a heap number in new space and store
4540 // the value in there. If that fails, call the runtime system. 4753 // the value in there. If that fails, call the runtime system.
4541 if (dst.is(src)) { 4754 if (dst.is(src)) {
4542 __ SmiUntag(src, dst); 4755 __ SmiUntag(src, dst);
4543 __ eor(src, src, Operand(0x80000000)); 4756 __ eor(src, src, Operand(0x80000000));
4544 } 4757 }
4545 __ vmov(flt_scratch, src); 4758 if (CpuFeatures::IsSupported(VFP2)) {
4546 __ vcvt_f64_s32(dbl_scratch, flt_scratch); 4759 CpuFeatures::Scope scope(VFP2);
4760 __ vmov(flt_scratch, src);
4761 __ vcvt_f64_s32(dbl_scratch, flt_scratch);
4762 } else {
4763 FloatingPointHelper::Destination dest =
4764 FloatingPointHelper::kCoreRegisters;
4765 FloatingPointHelper::ConvertIntToDouble(masm(), src, dest, d0,
4766 sfpd_lo, sfpd_hi,
4767 scratch0(), s0);
4768 }
4547 } else { 4769 } else {
4548 __ vmov(flt_scratch, src); 4770 if (CpuFeatures::IsSupported(VFP2)) {
4549 __ vcvt_f64_u32(dbl_scratch, flt_scratch); 4771 CpuFeatures::Scope scope(VFP2);
4772 __ vmov(flt_scratch, src);
4773 __ vcvt_f64_u32(dbl_scratch, flt_scratch);
4774 } else {
4775 Label no_leading_zero, done;
4776 __ tst(src, Operand(0x80000000));
4777 __ b(ne, &no_leading_zero);
4778
4779 // Integer has one leading zeros.
4780 GenerateUInt2Double(masm(), sfpd_hi, sfpd_lo, r9, 1);
4781 __ b(&done);
4782
4783 __ bind(&no_leading_zero);
4784 GenerateUInt2Double(masm(), sfpd_hi, sfpd_lo, r9, 0);
4785 __ b(&done);
4786 }
4550 } 4787 }
4551 4788
4552 if (FLAG_inline_new) { 4789 if (FLAG_inline_new) {
4553 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); 4790 __ LoadRoot(scratch0(), Heap::kHeapNumberMapRootIndex);
4554 __ AllocateHeapNumber(r5, r3, r4, r6, &slow, DONT_TAG_RESULT); 4791 __ AllocateHeapNumber(r5, r3, r4, scratch0(), &slow, DONT_TAG_RESULT);
4555 __ Move(dst, r5); 4792 __ Move(dst, r5);
4556 __ b(&done); 4793 __ b(&done);
4557 } 4794 }
4558 4795
4559 // Slow case: Call the runtime system to do the number allocation. 4796 // Slow case: Call the runtime system to do the number allocation.
4560 __ bind(&slow); 4797 __ bind(&slow);
4561 4798
4562 // TODO(3095996): Put a valid pointer value in the stack slot where the result 4799 // TODO(3095996): Put a valid pointer value in the stack slot where the result
4563 // register is stored, as this register is in the pointer map, but contains an 4800 // register is stored, as this register is in the pointer map, but contains an
4564 // integer value. 4801 // integer value.
4565 __ mov(ip, Operand(0)); 4802 __ mov(ip, Operand(0));
4566 __ StoreToSafepointRegisterSlot(ip, dst); 4803 __ StoreToSafepointRegisterSlot(ip, dst);
4567 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr); 4804 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
4568 __ Move(dst, r0); 4805 __ Move(dst, r0);
4569 __ sub(dst, dst, Operand(kHeapObjectTag)); 4806 __ sub(dst, dst, Operand(kHeapObjectTag));
4570 4807
4571 // Done. Put the value in dbl_scratch into the value of the allocated heap 4808 // Done. Put the value in dbl_scratch into the value of the allocated heap
4572 // number. 4809 // number.
4573 __ bind(&done); 4810 __ bind(&done);
4574 __ vstr(dbl_scratch, dst, HeapNumber::kValueOffset); 4811 if (CpuFeatures::IsSupported(VFP2)) {
4812 CpuFeatures::Scope scope(VFP2);
4813 __ vstr(dbl_scratch, dst, HeapNumber::kValueOffset);
4814 } else {
4815 __ str(sfpd_lo, MemOperand(dst, HeapNumber::kMantissaOffset));
4816 __ str(sfpd_hi, MemOperand(dst, HeapNumber::kExponentOffset));
4817 }
4575 __ add(dst, dst, Operand(kHeapObjectTag)); 4818 __ add(dst, dst, Operand(kHeapObjectTag));
4576 __ StoreToSafepointRegisterSlot(dst, dst); 4819 __ StoreToSafepointRegisterSlot(dst, dst);
4577 } 4820 }
4578 4821
4579 4822
4580 void LCodeGen::DoNumberTagD(LNumberTagD* instr) { 4823 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4581 class DeferredNumberTagD: public LDeferredCode { 4824 class DeferredNumberTagD: public LDeferredCode {
4582 public: 4825 public:
4583 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr) 4826 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4584 : LDeferredCode(codegen), instr_(instr) { } 4827 : LDeferredCode(codegen), instr_(instr) { }
4585 virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); } 4828 virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
4586 virtual LInstruction* instr() { return instr_; } 4829 virtual LInstruction* instr() { return instr_; }
4587 private: 4830 private:
4588 LNumberTagD* instr_; 4831 LNumberTagD* instr_;
4589 }; 4832 };
4590 4833
4591 DoubleRegister input_reg = ToDoubleRegister(instr->value()); 4834 DwVfpRegister input_reg = ToDoubleRegister(instr->value());
4592 Register scratch = scratch0(); 4835 Register scratch = scratch0();
4593 Register reg = ToRegister(instr->result()); 4836 Register reg = ToRegister(instr->result());
4594 Register temp1 = ToRegister(instr->temp()); 4837 Register temp1 = ToRegister(instr->temp());
4595 Register temp2 = ToRegister(instr->temp2()); 4838 Register temp2 = ToRegister(instr->temp2());
4596 4839
4597 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr); 4840 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
4598 if (FLAG_inline_new) { 4841 if (FLAG_inline_new) {
4599 __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex); 4842 __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
4600 // We want the untagged address first for performance 4843 // We want the untagged address first for performance
4601 __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(), 4844 __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(),
4602 DONT_TAG_RESULT); 4845 DONT_TAG_RESULT);
4603 } else { 4846 } else {
4604 __ jmp(deferred->entry()); 4847 __ jmp(deferred->entry());
4605 } 4848 }
4606 __ bind(deferred->exit()); 4849 __ bind(deferred->exit());
4607 __ vstr(input_reg, reg, HeapNumber::kValueOffset); 4850 if (CpuFeatures::IsSupported(VFP2)) {
4851 CpuFeatures::Scope scope(VFP2);
4852 __ vstr(input_reg, reg, HeapNumber::kValueOffset);
4853 } else {
4854 __ str(sfpd_lo, MemOperand(reg, HeapNumber::kValueOffset));
4855 __ str(sfpd_hi, MemOperand(reg, HeapNumber::kValueOffset + kPointerSize));
4856 }
4608 // Now that we have finished with the object's real address tag it 4857 // Now that we have finished with the object's real address tag it
4609 __ add(reg, reg, Operand(kHeapObjectTag)); 4858 __ add(reg, reg, Operand(kHeapObjectTag));
4610 } 4859 }
4611 4860
4612 4861
4613 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { 4862 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4614 // TODO(3095996): Get rid of this. For now, we need to make the 4863 // TODO(3095996): Get rid of this. For now, we need to make the
4615 // result register contain a valid pointer because it is already 4864 // result register contain a valid pointer because it is already
4616 // contained in the register pointer map. 4865 // contained in the register pointer map.
4617 Register reg = ToRegister(instr->result()); 4866 Register reg = ToRegister(instr->result());
(...skipping 20 matching lines...) Expand all
4638 // If the input is a HeapObject, SmiUntag will set the carry flag. 4887 // If the input is a HeapObject, SmiUntag will set the carry flag.
4639 __ SmiUntag(result, input, SetCC); 4888 __ SmiUntag(result, input, SetCC);
4640 DeoptimizeIf(cs, instr->environment()); 4889 DeoptimizeIf(cs, instr->environment());
4641 } else { 4890 } else {
4642 __ SmiUntag(result, input); 4891 __ SmiUntag(result, input);
4643 } 4892 }
4644 } 4893 }
4645 4894
4646 4895
4647 void LCodeGen::EmitNumberUntagD(Register input_reg, 4896 void LCodeGen::EmitNumberUntagD(Register input_reg,
4648 DoubleRegister result_reg, 4897 DwVfpRegister result_reg,
4649 bool deoptimize_on_undefined, 4898 bool deoptimize_on_undefined,
4650 bool deoptimize_on_minus_zero, 4899 bool deoptimize_on_minus_zero,
4651 LEnvironment* env) { 4900 LEnvironment* env) {
4652 Register scratch = scratch0(); 4901 Register scratch = scratch0();
4653 SwVfpRegister flt_scratch = double_scratch0().low(); 4902 SwVfpRegister flt_scratch = double_scratch0().low();
4654 ASSERT(!result_reg.is(double_scratch0())); 4903 ASSERT(!result_reg.is(double_scratch0()));
4904 ASSERT(CpuFeatures::IsSupported(VFP2));
Jakob Kummerow 2012/11/28 16:28:22 nit: not needed
danno 2012/11/30 16:23:24 Done.
4905 CpuFeatures::Scope scope(VFP2);
4655 4906
4656 Label load_smi, heap_number, done; 4907 Label load_smi, heap_number, done;
4657 4908
4658 // Smi check. 4909 // Smi check.
4659 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi); 4910 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
4660 4911
4661 // Heap number map check. 4912 // Heap number map check.
4662 __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); 4913 __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4663 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); 4914 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
4664 __ cmp(scratch, Operand(ip)); 4915 __ cmp(scratch, Operand(ip));
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after
4719 // SmiUntag(heap_object, SetCC) 4970 // SmiUntag(heap_object, SetCC)
4720 STATIC_ASSERT(kHeapObjectTag == 1); 4971 STATIC_ASSERT(kHeapObjectTag == 1);
4721 __ adc(input_reg, input_reg, Operand(input_reg)); 4972 __ adc(input_reg, input_reg, Operand(input_reg));
4722 4973
4723 // Heap number map check. 4974 // Heap number map check.
4724 __ ldr(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset)); 4975 __ ldr(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4725 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); 4976 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
4726 __ cmp(scratch1, Operand(ip)); 4977 __ cmp(scratch1, Operand(ip));
4727 4978
4728 if (instr->truncating()) { 4979 if (instr->truncating()) {
4980 ASSERT(CpuFeatures::IsSupported(VFP2));
Jakob Kummerow 2012/11/28 16:28:22 nit: not needed
danno 2012/11/30 16:23:24 Done.
4981 CpuFeatures::Scope scope(VFP2);
4729 Register scratch3 = ToRegister(instr->temp2()); 4982 Register scratch3 = ToRegister(instr->temp2());
4730 SwVfpRegister single_scratch = double_scratch.low(); 4983 SwVfpRegister single_scratch = double_scratch.low();
4731 ASSERT(!scratch3.is(input_reg) && 4984 ASSERT(!scratch3.is(input_reg) &&
4732 !scratch3.is(scratch1) && 4985 !scratch3.is(scratch1) &&
4733 !scratch3.is(scratch2)); 4986 !scratch3.is(scratch2));
4734 // Performs a truncating conversion of a floating point number as used by 4987 // Performs a truncating conversion of a floating point number as used by
4735 // the JS bitwise operations. 4988 // the JS bitwise operations.
4736 Label heap_number; 4989 Label heap_number;
4737 __ b(eq, &heap_number); 4990 __ b(eq, &heap_number);
4738 // Check for undefined. Undefined is converted to zero for truncating 4991 // Check for undefined. Undefined is converted to zero for truncating
(...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after
4810 } 5063 }
4811 5064
4812 5065
4813 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { 5066 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4814 LOperand* input = instr->value(); 5067 LOperand* input = instr->value();
4815 ASSERT(input->IsRegister()); 5068 ASSERT(input->IsRegister());
4816 LOperand* result = instr->result(); 5069 LOperand* result = instr->result();
4817 ASSERT(result->IsDoubleRegister()); 5070 ASSERT(result->IsDoubleRegister());
4818 5071
4819 Register input_reg = ToRegister(input); 5072 Register input_reg = ToRegister(input);
4820 DoubleRegister result_reg = ToDoubleRegister(result); 5073 DwVfpRegister result_reg = ToDoubleRegister(result);
4821 5074
4822 EmitNumberUntagD(input_reg, result_reg, 5075 EmitNumberUntagD(input_reg, result_reg,
4823 instr->hydrogen()->deoptimize_on_undefined(), 5076 instr->hydrogen()->deoptimize_on_undefined(),
4824 instr->hydrogen()->deoptimize_on_minus_zero(), 5077 instr->hydrogen()->deoptimize_on_minus_zero(),
4825 instr->environment()); 5078 instr->environment());
4826 } 5079 }
4827 5080
4828 5081
4829 void LCodeGen::DoDoubleToI(LDoubleToI* instr) { 5082 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
4830 Register result_reg = ToRegister(instr->result()); 5083 Register result_reg = ToRegister(instr->result());
(...skipping 128 matching lines...) Expand 10 before | Expand all | Expand 10 after
4959 __ CompareMap(reg, scratch, map, &success, REQUIRE_EXACT_MAP); 5212 __ CompareMap(reg, scratch, map, &success, REQUIRE_EXACT_MAP);
4960 __ b(eq, &success); 5213 __ b(eq, &success);
4961 } 5214 }
4962 Handle<Map> map = map_set->last(); 5215 Handle<Map> map = map_set->last();
4963 DoCheckMapCommon(reg, scratch, map, REQUIRE_EXACT_MAP, instr->environment()); 5216 DoCheckMapCommon(reg, scratch, map, REQUIRE_EXACT_MAP, instr->environment());
4964 __ bind(&success); 5217 __ bind(&success);
4965 } 5218 }
4966 5219
4967 5220
4968 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { 5221 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
4969 DoubleRegister value_reg = ToDoubleRegister(instr->unclamped()); 5222 ASSERT(CpuFeatures::IsSupported(VFP2));
Jakob Kummerow 2012/11/28 16:28:22 nit: not needed
danno 2012/11/30 16:23:24 Done.
5223 CpuFeatures::Scope vfp_scope(VFP2);
5224 DwVfpRegister value_reg = ToDoubleRegister(instr->unclamped());
4970 Register result_reg = ToRegister(instr->result()); 5225 Register result_reg = ToRegister(instr->result());
4971 DoubleRegister temp_reg = ToDoubleRegister(instr->temp()); 5226 DwVfpRegister temp_reg = ToDoubleRegister(instr->temp());
4972 __ ClampDoubleToUint8(result_reg, value_reg, temp_reg); 5227 __ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
4973 } 5228 }
4974 5229
4975 5230
4976 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { 5231 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
5232 ASSERT(CpuFeatures::IsSupported(VFP2));
Jakob Kummerow 2012/11/28 16:28:22 nit: not needed
danno 2012/11/30 16:23:24 Done.
5233 CpuFeatures::Scope scope(VFP2);
4977 Register unclamped_reg = ToRegister(instr->unclamped()); 5234 Register unclamped_reg = ToRegister(instr->unclamped());
4978 Register result_reg = ToRegister(instr->result()); 5235 Register result_reg = ToRegister(instr->result());
4979 __ ClampUint8(result_reg, unclamped_reg); 5236 __ ClampUint8(result_reg, unclamped_reg);
4980 } 5237 }
4981 5238
4982 5239
4983 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { 5240 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
5241 ASSERT(CpuFeatures::IsSupported(VFP2));
Jakob Kummerow 2012/11/28 16:28:22 nit: not needed
danno 2012/11/30 16:23:24 Done.
5242 CpuFeatures::Scope scope(VFP2);
4984 Register scratch = scratch0(); 5243 Register scratch = scratch0();
4985 Register input_reg = ToRegister(instr->unclamped()); 5244 Register input_reg = ToRegister(instr->unclamped());
4986 Register result_reg = ToRegister(instr->result()); 5245 Register result_reg = ToRegister(instr->result());
4987 DoubleRegister temp_reg = ToDoubleRegister(instr->temp()); 5246 DwVfpRegister temp_reg = ToDoubleRegister(instr->temp());
4988 Label is_smi, done, heap_number; 5247 Label is_smi, done, heap_number;
4989 5248
4990 // Both smi and heap number cases are handled. 5249 // Both smi and heap number cases are handled.
4991 __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi); 5250 __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi);
4992 5251
4993 // Check for heap number 5252 // Check for heap number
4994 __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); 5253 __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4995 __ cmp(scratch, Operand(factory()->heap_number_map())); 5254 __ cmp(scratch, Operand(factory()->heap_number_map()));
4996 __ b(eq, &heap_number); 5255 __ b(eq, &heap_number);
4997 5256
(...skipping 556 matching lines...) Expand 10 before | Expand all | Expand 10 after
5554 __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset)); 5813 __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
5555 5814
5556 // Check the marker in the calling frame. 5815 // Check the marker in the calling frame.
5557 __ bind(&check_frame_marker); 5816 __ bind(&check_frame_marker);
5558 __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset)); 5817 __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
5559 __ cmp(temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT))); 5818 __ cmp(temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
5560 } 5819 }
5561 5820
5562 5821
5563 void LCodeGen::EnsureSpaceForLazyDeopt() { 5822 void LCodeGen::EnsureSpaceForLazyDeopt() {
5823 if (info()->IsStub()) return;
5564 // Ensure that we have enough space after the previous lazy-bailout 5824 // Ensure that we have enough space after the previous lazy-bailout
5565 // instruction for patching the code here. 5825 // instruction for patching the code here.
5566 int current_pc = masm()->pc_offset(); 5826 int current_pc = masm()->pc_offset();
5567 int patch_size = Deoptimizer::patch_size(); 5827 int patch_size = Deoptimizer::patch_size();
5568 if (current_pc < last_lazy_deopt_pc_ + patch_size) { 5828 if (current_pc < last_lazy_deopt_pc_ + patch_size) {
5569 // Block literal pool emission for duration of padding. 5829 // Block literal pool emission for duration of padding.
5570 Assembler::BlockConstPoolScope block_const_pool(masm()); 5830 Assembler::BlockConstPoolScope block_const_pool(masm());
5571 int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc; 5831 int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
5572 ASSERT_EQ(0, padding_size % Assembler::kInstrSize); 5832 ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
5573 while (padding_size > 0) { 5833 while (padding_size > 0) {
(...skipping 211 matching lines...) Expand 10 before | Expand all | Expand 10 after
5785 __ sub(scratch, result, Operand(index, LSL, kPointerSizeLog2 - kSmiTagSize)); 6045 __ sub(scratch, result, Operand(index, LSL, kPointerSizeLog2 - kSmiTagSize));
5786 __ ldr(result, FieldMemOperand(scratch, 6046 __ ldr(result, FieldMemOperand(scratch,
5787 FixedArray::kHeaderSize - kPointerSize)); 6047 FixedArray::kHeaderSize - kPointerSize));
5788 __ bind(&done); 6048 __ bind(&done);
5789 } 6049 }
5790 6050
5791 6051
5792 #undef __ 6052 #undef __
5793 6053
5794 } } // namespace v8::internal 6054 } } // namespace v8::internal
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698