Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(273)

Side by Side Diff: src/arm/lithium-codegen-arm.cc

Issue 10701054: Enable stub generation using Hydrogen/Lithium (again) (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Merge with latest Created 8 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/arm/lithium-codegen-arm.h ('k') | src/arm/lithium-gap-resolver-arm.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after
58 Safepoint::DeoptMode deopt_mode_; 58 Safepoint::DeoptMode deopt_mode_;
59 }; 59 };
60 60
61 61
62 #define __ masm()-> 62 #define __ masm()->
63 63
64 bool LCodeGen::GenerateCode() { 64 bool LCodeGen::GenerateCode() {
65 HPhase phase("Z_Code generation", chunk()); 65 HPhase phase("Z_Code generation", chunk());
66 ASSERT(is_unused()); 66 ASSERT(is_unused());
67 status_ = GENERATING; 67 status_ = GENERATING;
68 CpuFeatures::Scope scope1(VFP3);
69 CpuFeatures::Scope scope2(ARMv7);
70 68
71 CodeStub::GenerateFPStubs(); 69 CodeStub::GenerateFPStubs();
72 70
73 // Open a frame scope to indicate that there is a frame on the stack. The 71 // Open a frame scope to indicate that there is a frame on the stack. The
74 // NONE indicates that the scope shouldn't actually generate code to set up 72 // NONE indicates that the scope shouldn't actually generate code to set up
75 // the frame (that is done in GeneratePrologue). 73 // the frame (that is done in GeneratePrologue).
76 FrameScope frame_scope(masm_, StackFrame::NONE); 74 FrameScope frame_scope(masm_, StackFrame::NONE);
77 75
78 return GeneratePrologue() && 76 return GeneratePrologue() &&
79 GenerateBody() && 77 GenerateBody() &&
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
111 size_t length = builder.position(); 109 size_t length = builder.position();
112 Vector<char> copy = Vector<char>::New(length + 1); 110 Vector<char> copy = Vector<char>::New(length + 1);
113 memcpy(copy.start(), builder.Finalize(), copy.length()); 111 memcpy(copy.start(), builder.Finalize(), copy.length());
114 masm()->RecordComment(copy.start()); 112 masm()->RecordComment(copy.start());
115 } 113 }
116 114
117 115
118 bool LCodeGen::GeneratePrologue() { 116 bool LCodeGen::GeneratePrologue() {
119 ASSERT(is_generating()); 117 ASSERT(is_generating());
120 118
121 ProfileEntryHookStub::MaybeCallEntryHook(masm_); 119 if (info()->IsOptimizing()) {
120 ProfileEntryHookStub::MaybeCallEntryHook(masm_);
122 121
123 #ifdef DEBUG 122 #ifdef DEBUG
124 if (strlen(FLAG_stop_at) > 0 && 123 if (strlen(FLAG_stop_at) > 0 &&
125 info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) { 124 info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) {
126 __ stop("stop_at"); 125 __ stop("stop_at");
127 } 126 }
128 #endif 127 #endif
129 128
130 // r1: Callee's JS function. 129 // r1: Callee's JS function.
131 // cp: Callee's context. 130 // cp: Callee's context.
132 // fp: Caller's frame pointer. 131 // fp: Caller's frame pointer.
133 // lr: Caller's pc. 132 // lr: Caller's pc.
134 133
135 // Strict mode functions and builtins need to replace the receiver 134 // Strict mode functions and builtins need to replace the receiver
136 // with undefined when called as functions (without an explicit 135 // with undefined when called as functions (without an explicit
137 // receiver object). r5 is zero for method calls and non-zero for 136 // receiver object). r5 is zero for method calls and non-zero for
138 // function calls. 137 // function calls.
139 if (!info_->is_classic_mode() || info_->is_native()) { 138 if (!info_->is_classic_mode() || info_->is_native()) {
140 Label ok; 139 Label ok;
141 __ cmp(r5, Operand(0)); 140 __ cmp(r5, Operand(0));
142 __ b(eq, &ok); 141 __ b(eq, &ok);
143 int receiver_offset = scope()->num_parameters() * kPointerSize; 142 int receiver_offset = scope()->num_parameters() * kPointerSize;
144 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); 143 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
145 __ str(r2, MemOperand(sp, receiver_offset)); 144 __ str(r2, MemOperand(sp, receiver_offset));
146 __ bind(&ok); 145 __ bind(&ok);
146 }
147 } 147 }
148 148
149
150 info()->set_prologue_offset(masm_->pc_offset()); 149 info()->set_prologue_offset(masm_->pc_offset());
151 { 150 if (NeedsEagerFrame()) {
152 PredictableCodeSizeScope predictible_code_size_scope( 151 PredictableCodeSizeScope predictible_code_size_scope(
153 masm_, kNoCodeAgeSequenceLength * Assembler::kInstrSize); 152 masm_, kNoCodeAgeSequenceLength * Assembler::kInstrSize);
154 // The following three instructions must remain together and unmodified 153 // The following three instructions must remain together and unmodified
155 // for code aging to work properly. 154 // for code aging to work properly.
156 __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit()); 155 __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
157 // Load undefined value here, so the value is ready for the loop 156 // Load undefined value here, so the value is ready for the loop
158 // below. 157 // below.
159 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); 158 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
160 // Adjust FP to point to saved FP. 159 // Adjust FP to point to saved FP.
161 __ add(fp, sp, Operand(2 * kPointerSize)); 160 __ add(fp, sp, Operand(2 * kPointerSize));
161 frame_is_built_ = true;
162 } 162 }
163 163
164 // Reserve space for the stack slots needed by the code. 164 // Reserve space for the stack slots needed by the code.
165 int slots = GetStackSlotCount(); 165 int slots = GetStackSlotCount();
166 if (slots > 0) { 166 if (slots > 0) {
167 if (FLAG_debug_code) { 167 if (FLAG_debug_code) {
168 __ mov(r0, Operand(slots)); 168 __ mov(r0, Operand(slots));
169 __ mov(r2, Operand(kSlotsZapValue)); 169 __ mov(r2, Operand(kSlotsZapValue));
170 Label loop; 170 Label loop;
171 __ bind(&loop); 171 __ bind(&loop);
172 __ push(r2); 172 __ push(r2);
173 __ sub(r0, r0, Operand(1), SetCC); 173 __ sub(r0, r0, Operand(1), SetCC);
174 __ b(ne, &loop); 174 __ b(ne, &loop);
175 } else { 175 } else {
176 __ sub(sp, sp, Operand(slots * kPointerSize)); 176 __ sub(sp, sp, Operand(slots * kPointerSize));
177 } 177 }
178 } 178 }
179 179
180 // Possibly allocate a local context. 180 // Possibly allocate a local context.
181 int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; 181 int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
182 if (heap_slots > 0) { 182 if (heap_slots > 0) {
183 Comment(";;; Allocate local context"); 183 Comment(";;; Allocate local context");
184 // Argument to NewContext is the function, which is in r1. 184 // Argument to NewContext is the function, which is in r1.
185 __ push(r1); 185 __ push(r1);
186 if (heap_slots <= FastNewContextStub::kMaximumSlots) { 186 if (heap_slots <= FastNewContextStub::kMaximumSlots) {
187 FastNewContextStub stub(heap_slots); 187 FastNewContextStub stub(heap_slots);
188 __ CallStub(&stub); 188 __ CallStub(&stub);
189 } else { 189 } else {
190 __ CallRuntime(Runtime::kNewFunctionContext, 1); 190 __ CallRuntime(Runtime::kNewFunctionContext, 1);
191 } 191 }
(...skipping 15 matching lines...) Expand all
207 __ str(r0, target); 207 __ str(r0, target);
208 // Update the write barrier. This clobbers r3 and r0. 208 // Update the write barrier. This clobbers r3 and r0.
209 __ RecordWriteContextSlot( 209 __ RecordWriteContextSlot(
210 cp, target.offset(), r0, r3, kLRHasBeenSaved, kSaveFPRegs); 210 cp, target.offset(), r0, r3, kLRHasBeenSaved, kSaveFPRegs);
211 } 211 }
212 } 212 }
213 Comment(";;; End allocate local context"); 213 Comment(";;; End allocate local context");
214 } 214 }
215 215
216 // Trace the call. 216 // Trace the call.
217 if (FLAG_trace) { 217 if (FLAG_trace && info()->IsOptimizing()) {
218 __ CallRuntime(Runtime::kTraceEnter, 0); 218 __ CallRuntime(Runtime::kTraceEnter, 0);
219 } 219 }
220 return !is_aborted(); 220 return !is_aborted();
221 } 221 }
222 222
223 223
224 bool LCodeGen::GenerateBody() { 224 bool LCodeGen::GenerateBody() {
225 ASSERT(is_generating()); 225 ASSERT(is_generating());
226 bool emit_instructions = true; 226 bool emit_instructions = true;
227 for (current_instruction_ = 0; 227 for (current_instruction_ = 0;
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
265 return !is_aborted(); 265 return !is_aborted();
266 } 266 }
267 267
268 268
269 bool LCodeGen::GenerateDeferredCode() { 269 bool LCodeGen::GenerateDeferredCode() {
270 ASSERT(is_generating()); 270 ASSERT(is_generating());
271 if (deferred_.length() > 0) { 271 if (deferred_.length() > 0) {
272 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { 272 for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
273 LDeferredCode* code = deferred_[i]; 273 LDeferredCode* code = deferred_[i];
274 __ bind(code->entry()); 274 __ bind(code->entry());
275 if (NeedsDeferredFrame()) {
276 Comment(";;; Deferred build frame",
277 code->instruction_index(),
278 code->instr()->Mnemonic());
279 ASSERT(!frame_is_built_);
280 ASSERT(info()->IsStub());
281 frame_is_built_ = true;
282 __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
283 __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
284 __ push(scratch0());
285 __ add(fp, sp, Operand(2 * kPointerSize));
286 }
275 Comment(";;; Deferred code @%d: %s.", 287 Comment(";;; Deferred code @%d: %s.",
276 code->instruction_index(), 288 code->instruction_index(),
277 code->instr()->Mnemonic()); 289 code->instr()->Mnemonic());
278 code->Generate(); 290 code->Generate();
291 if (NeedsDeferredFrame()) {
292 Comment(";;; Deferred destroy frame",
293 code->instruction_index(),
294 code->instr()->Mnemonic());
295 ASSERT(frame_is_built_);
296 __ pop(ip);
297 __ ldm(ia_w, sp, cp.bit() | fp.bit() | lr.bit());
298 frame_is_built_ = false;
299 }
279 __ jmp(code->exit()); 300 __ jmp(code->exit());
280 } 301 }
281 } 302 }
282 303
283 // Force constant pool emission at the end of the deferred code to make 304 // Force constant pool emission at the end of the deferred code to make
284 // sure that no constant pools are emitted after. 305 // sure that no constant pools are emitted after.
285 masm()->CheckConstPool(true, false); 306 masm()->CheckConstPool(true, false);
286 307
287 return !is_aborted(); 308 return !is_aborted();
288 } 309 }
289 310
290 311
291 bool LCodeGen::GenerateDeoptJumpTable() { 312 bool LCodeGen::GenerateDeoptJumpTable() {
292 // Check that the jump table is accessible from everywhere in the function 313 // Check that the jump table is accessible from everywhere in the function
293 // code, i.e. that offsets to the table can be encoded in the 24bit signed 314 // code, i.e. that offsets to the table can be encoded in the 24bit signed
294 // immediate of a branch instruction. 315 // immediate of a branch instruction.
295 // To simplify we consider the code size from the first instruction to the 316 // To simplify we consider the code size from the first instruction to the
296 // end of the jump table. We also don't consider the pc load delta. 317 // end of the jump table. We also don't consider the pc load delta.
297 // Each entry in the jump table generates one instruction and inlines one 318 // Each entry in the jump table generates one instruction and inlines one
298 // 32bit data after it. 319 // 32bit data after it.
299 if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) + 320 if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) +
300 deopt_jump_table_.length() * 2)) { 321 deopt_jump_table_.length() * 7)) {
301 Abort("Generated code is too large"); 322 Abort("Generated code is too large");
302 } 323 }
303 324
304 // Block the constant pool emission during the jump table emission.
305 __ BlockConstPoolFor(deopt_jump_table_.length());
306 __ RecordComment("[ Deoptimisation jump table"); 325 __ RecordComment("[ Deoptimisation jump table");
307 Label table_start; 326 Label table_start;
308 __ bind(&table_start); 327 __ bind(&table_start);
328 Label needs_frame_not_call;
329 Label needs_frame_is_call;
309 for (int i = 0; i < deopt_jump_table_.length(); i++) { 330 for (int i = 0; i < deopt_jump_table_.length(); i++) {
310 __ bind(&deopt_jump_table_[i].label); 331 __ bind(&deopt_jump_table_[i].label);
311 __ ldr(pc, MemOperand(pc, Assembler::kInstrSize - Assembler::kPcLoadDelta)); 332 Address entry = deopt_jump_table_[i].address;
312 __ dd(reinterpret_cast<uint32_t>(deopt_jump_table_[i].address)); 333 if (deopt_jump_table_[i].needs_frame) {
334 __ mov(ip, Operand(ExternalReference::ForDeoptEntry(entry)));
335 if (deopt_jump_table_[i].is_lazy_deopt) {
336 if (needs_frame_is_call.is_bound()) {
337 __ b(&needs_frame_is_call);
338 } else {
339 __ bind(&needs_frame_is_call);
340 __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
341 // This variant of deopt can only be used with stubs. Since we don't
342 // have a function pointer to install in the stack frame that we're
343 // building, install a special marker there instead.
344 ASSERT(info()->IsStub());
345 __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
346 __ push(scratch0());
347 __ add(fp, sp, Operand(2 * kPointerSize));
348 __ mov(lr, Operand(pc), LeaveCC, al);
349 __ mov(pc, ip);
350 }
351 } else {
352 if (needs_frame_not_call.is_bound()) {
353 __ b(&needs_frame_not_call);
354 } else {
355 __ bind(&needs_frame_not_call);
356 __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
357 // This variant of deopt can only be used with stubs. Since we don't
358 // have a function pointer to install in the stack frame that we're
359 // building, install a special marker there instead.
360 ASSERT(info()->IsStub());
361 __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
362 __ push(scratch0());
363 __ add(fp, sp, Operand(2 * kPointerSize));
364 __ mov(pc, ip);
365 }
366 }
367 } else {
368 if (deopt_jump_table_[i].is_lazy_deopt) {
369 __ mov(lr, Operand(pc), LeaveCC, al);
370 __ mov(pc, Operand(ExternalReference::ForDeoptEntry(entry)));
371 } else {
372 __ mov(pc, Operand(ExternalReference::ForDeoptEntry(entry)));
373 }
374 }
375 masm()->CheckConstPool(false, false);
313 } 376 }
314 ASSERT(masm()->InstructionsGeneratedSince(&table_start) ==
315 deopt_jump_table_.length() * 2);
316 __ RecordComment("]"); 377 __ RecordComment("]");
317 378
379 // Force constant pool emission at the end of the deopt jump table to make
380 // sure that no constant pools are emitted after.
381 masm()->CheckConstPool(true, false);
382
318 // The deoptimization jump table is the last part of the instruction 383 // The deoptimization jump table is the last part of the instruction
319 // sequence. Mark the generated code as done unless we bailed out. 384 // sequence. Mark the generated code as done unless we bailed out.
320 if (!is_aborted()) status_ = DONE; 385 if (!is_aborted()) status_ = DONE;
321 return !is_aborted(); 386 return !is_aborted();
322 } 387 }
323 388
324 389
325 bool LCodeGen::GenerateSafepointTable() { 390 bool LCodeGen::GenerateSafepointTable() {
326 ASSERT(is_done()); 391 ASSERT(is_done());
327 safepoints_.Emit(masm(), GetStackSlotCount()); 392 safepoints_.Emit(masm(), GetStackSlotCount());
328 return !is_aborted(); 393 return !is_aborted();
329 } 394 }
330 395
331 396
332 Register LCodeGen::ToRegister(int index) const { 397 Register LCodeGen::ToRegister(int index) const {
333 return Register::FromAllocationIndex(index); 398 return Register::FromAllocationIndex(index);
334 } 399 }
335 400
336 401
337 DoubleRegister LCodeGen::ToDoubleRegister(int index) const { 402 DwVfpRegister LCodeGen::ToDoubleRegister(int index) const {
338 return DoubleRegister::FromAllocationIndex(index); 403 return DwVfpRegister::FromAllocationIndex(index);
339 } 404 }
340 405
341 406
342 Register LCodeGen::ToRegister(LOperand* op) const { 407 Register LCodeGen::ToRegister(LOperand* op) const {
343 ASSERT(op->IsRegister()); 408 ASSERT(op->IsRegister());
344 return ToRegister(op->index()); 409 return ToRegister(op->index());
345 } 410 }
346 411
347 412
348 Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) { 413 Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
(...skipping 20 matching lines...) Expand all
369 return scratch; 434 return scratch;
370 } else if (op->IsStackSlot() || op->IsArgument()) { 435 } else if (op->IsStackSlot() || op->IsArgument()) {
371 __ ldr(scratch, ToMemOperand(op)); 436 __ ldr(scratch, ToMemOperand(op));
372 return scratch; 437 return scratch;
373 } 438 }
374 UNREACHABLE(); 439 UNREACHABLE();
375 return scratch; 440 return scratch;
376 } 441 }
377 442
378 443
379 DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const { 444 DwVfpRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
380 ASSERT(op->IsDoubleRegister()); 445 ASSERT(op->IsDoubleRegister());
381 return ToDoubleRegister(op->index()); 446 return ToDoubleRegister(op->index());
382 } 447 }
383 448
384 449
385 DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op, 450 DwVfpRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
386 SwVfpRegister flt_scratch, 451 SwVfpRegister flt_scratch,
387 DoubleRegister dbl_scratch) { 452 DwVfpRegister dbl_scratch) {
388 if (op->IsDoubleRegister()) { 453 if (op->IsDoubleRegister()) {
389 return ToDoubleRegister(op->index()); 454 return ToDoubleRegister(op->index());
390 } else if (op->IsConstantOperand()) { 455 } else if (op->IsConstantOperand()) {
391 LConstantOperand* const_op = LConstantOperand::cast(op); 456 LConstantOperand* const_op = LConstantOperand::cast(op);
392 HConstant* constant = chunk_->LookupConstant(const_op); 457 HConstant* constant = chunk_->LookupConstant(const_op);
393 Handle<Object> literal = constant->handle(); 458 Handle<Object> literal = constant->handle();
394 Representation r = chunk_->LookupLiteralRepresentation(const_op); 459 Representation r = chunk_->LookupLiteralRepresentation(const_op);
395 if (r.IsInteger32()) { 460 if (r.IsInteger32()) {
396 ASSERT(literal->IsNumber()); 461 ASSERT(literal->IsNumber());
397 __ mov(ip, Operand(static_cast<int32_t>(literal->Number()))); 462 __ mov(ip, Operand(static_cast<int32_t>(literal->Number())));
(...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after
513 // arguments index points to the first element of a sequence of tagged 578 // arguments index points to the first element of a sequence of tagged
514 // values on the stack that represent the arguments. This needs to be 579 // values on the stack that represent the arguments. This needs to be
515 // kept in sync with the LArgumentsElements implementation. 580 // kept in sync with the LArgumentsElements implementation.
516 *arguments_index = -environment->parameter_count(); 581 *arguments_index = -environment->parameter_count();
517 *arguments_count = environment->parameter_count(); 582 *arguments_count = environment->parameter_count();
518 583
519 WriteTranslation(environment->outer(), 584 WriteTranslation(environment->outer(),
520 translation, 585 translation,
521 arguments_index, 586 arguments_index,
522 arguments_count); 587 arguments_count);
523 int closure_id = *info()->closure() != *environment->closure() 588 bool has_closure_id = !info()->closure().is_null() &&
589 *info()->closure() != *environment->closure();
590 int closure_id = has_closure_id
524 ? DefineDeoptimizationLiteral(environment->closure()) 591 ? DefineDeoptimizationLiteral(environment->closure())
525 : Translation::kSelfLiteralId; 592 : Translation::kSelfLiteralId;
526 593
527 switch (environment->frame_type()) { 594 switch (environment->frame_type()) {
528 case JS_FUNCTION: 595 case JS_FUNCTION:
529 translation->BeginJSFrame(environment->ast_id(), closure_id, height); 596 translation->BeginJSFrame(environment->ast_id(), closure_id, height);
530 break; 597 break;
531 case JS_CONSTRUCT: 598 case JS_CONSTRUCT:
532 translation->BeginConstructStubFrame(closure_id, translation_size); 599 translation->BeginConstructStubFrame(closure_id, translation_size);
533 break; 600 break;
534 case JS_GETTER: 601 case JS_GETTER:
535 ASSERT(translation_size == 1); 602 ASSERT(translation_size == 1);
536 ASSERT(height == 0); 603 ASSERT(height == 0);
537 translation->BeginGetterStubFrame(closure_id); 604 translation->BeginGetterStubFrame(closure_id);
538 break; 605 break;
539 case JS_SETTER: 606 case JS_SETTER:
540 ASSERT(translation_size == 2); 607 ASSERT(translation_size == 2);
541 ASSERT(height == 0); 608 ASSERT(height == 0);
542 translation->BeginSetterStubFrame(closure_id); 609 translation->BeginSetterStubFrame(closure_id);
543 break; 610 break;
611 case STUB:
612 translation->BeginCompiledStubFrame();
613 break;
544 case ARGUMENTS_ADAPTOR: 614 case ARGUMENTS_ADAPTOR:
545 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size); 615 translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
546 break; 616 break;
547 } 617 }
548 618
549 // Inlined frames which push their arguments cause the index to be 619 // Inlined frames which push their arguments cause the index to be
550 // bumped and a new stack area to be used for materialization. 620 // bumped and a new stack area to be used for materialization.
551 if (environment->entry() != NULL && 621 if (environment->entry() != NULL &&
552 environment->entry()->arguments_pushed()) { 622 environment->entry()->arguments_pushed()) {
553 *arguments_index = *arguments_index < 0 623 *arguments_index = *arguments_index < 0
(...skipping 175 matching lines...) Expand 10 before | Expand all | Expand 10 after
729 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); 799 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
730 deoptimizations_.Add(environment, zone()); 800 deoptimizations_.Add(environment, zone());
731 } 801 }
732 } 802 }
733 803
734 804
735 void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) { 805 void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
736 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); 806 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
737 ASSERT(environment->HasBeenRegistered()); 807 ASSERT(environment->HasBeenRegistered());
738 int id = environment->deoptimization_index(); 808 int id = environment->deoptimization_index();
739 Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER); 809
810 Deoptimizer::BailoutType bailout_type = info()->IsStub()
811 ? Deoptimizer::LAZY
812 : Deoptimizer::EAGER;
813 Address entry = Deoptimizer::GetDeoptimizationEntry(id, bailout_type);
740 if (entry == NULL) { 814 if (entry == NULL) {
741 Abort("bailout was not prepared"); 815 Abort("bailout was not prepared");
742 return; 816 return;
743 } 817 }
744 818
745 ASSERT(FLAG_deopt_every_n_times < 2); // Other values not supported on ARM. 819 ASSERT(FLAG_deopt_every_n_times < 2); // Other values not supported on ARM.
746 820
747 if (FLAG_deopt_every_n_times == 1 && 821 if (FLAG_deopt_every_n_times == 1 &&
748 info_->shared_info()->opt_count() == id) { 822 info_->shared_info()->opt_count() == id) {
749 __ Jump(entry, RelocInfo::RUNTIME_ENTRY); 823 __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
750 return; 824 return;
751 } 825 }
752 826
753 if (FLAG_trap_on_deopt) __ stop("trap_on_deopt", cc); 827 if (FLAG_trap_on_deopt) __ stop("trap_on_deopt", cc);
754 828
755 if (cc == al) { 829 bool needs_lazy_deopt = info()->IsStub();
830 ASSERT(info()->IsStub() || frame_is_built_);
831 if (cc == al && !needs_lazy_deopt) {
756 __ Jump(entry, RelocInfo::RUNTIME_ENTRY); 832 __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
757 } else { 833 } else {
758 // We often have several deopts to the same entry, reuse the last 834 // We often have several deopts to the same entry, reuse the last
759 // jump entry if this is the case. 835 // jump entry if this is the case.
760 if (deopt_jump_table_.is_empty() || 836 if (deopt_jump_table_.is_empty() ||
761 (deopt_jump_table_.last().address != entry)) { 837 (deopt_jump_table_.last().address != entry) ||
762 deopt_jump_table_.Add(JumpTableEntry(entry), zone()); 838 (deopt_jump_table_.last().is_lazy_deopt != needs_lazy_deopt) ||
839 (deopt_jump_table_.last().needs_frame != !frame_is_built_)) {
840 JumpTableEntry table_entry(entry, !frame_is_built_, needs_lazy_deopt);
841 deopt_jump_table_.Add(table_entry, zone());
763 } 842 }
764 __ b(cc, &deopt_jump_table_.last().label); 843 __ b(cc, &deopt_jump_table_.last().label);
765 } 844 }
766 } 845 }
767 846
768 847
769 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) { 848 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
770 int length = deoptimizations_.length(); 849 int length = deoptimizations_.length();
771 if (length == 0) return; 850 if (length == 0) return;
772 Handle<DeoptimizationInputData> data = 851 Handle<DeoptimizationInputData> data =
(...skipping 588 matching lines...) Expand 10 before | Expand all | Expand 10 after
1361 __ cmp(remainder, Operand(0)); 1440 __ cmp(remainder, Operand(0));
1362 __ teq(remainder, Operand(divisor), ne); 1441 __ teq(remainder, Operand(divisor), ne);
1363 __ sub(result, result, Operand(1), LeaveCC, mi); 1442 __ sub(result, result, Operand(1), LeaveCC, mi);
1364 } 1443 }
1365 1444
1366 1445
1367 void LCodeGen::DoDeferredBinaryOpStub(LPointerMap* pointer_map, 1446 void LCodeGen::DoDeferredBinaryOpStub(LPointerMap* pointer_map,
1368 LOperand* left_argument, 1447 LOperand* left_argument,
1369 LOperand* right_argument, 1448 LOperand* right_argument,
1370 Token::Value op) { 1449 Token::Value op) {
1450 CpuFeatures::Scope vfp_scope(VFP2);
1371 Register left = ToRegister(left_argument); 1451 Register left = ToRegister(left_argument);
1372 Register right = ToRegister(right_argument); 1452 Register right = ToRegister(right_argument);
1373 1453
1374 PushSafepointRegistersScope scope(this, Safepoint::kWithRegistersAndDoubles); 1454 PushSafepointRegistersScope scope(this, Safepoint::kWithRegistersAndDoubles);
1375 // Move left to r1 and right to r0 for the stub call. 1455 // Move left to r1 and right to r0 for the stub call.
1376 if (left.is(r1)) { 1456 if (left.is(r1)) {
1377 __ Move(r0, right); 1457 __ Move(r0, right);
1378 } else if (left.is(r0) && right.is(r1)) { 1458 } else if (left.is(r0) && right.is(r1)) {
1379 __ Swap(r0, r1, r2); 1459 __ Swap(r0, r1, r2);
1380 } else if (left.is(r0)) { 1460 } else if (left.is(r0)) {
(...skipping 265 matching lines...) Expand 10 before | Expand all | Expand 10 after
1646 1726
1647 void LCodeGen::DoConstantI(LConstantI* instr) { 1727 void LCodeGen::DoConstantI(LConstantI* instr) {
1648 ASSERT(instr->result()->IsRegister()); 1728 ASSERT(instr->result()->IsRegister());
1649 __ mov(ToRegister(instr->result()), Operand(instr->value())); 1729 __ mov(ToRegister(instr->result()), Operand(instr->value()));
1650 } 1730 }
1651 1731
1652 1732
1653 void LCodeGen::DoConstantD(LConstantD* instr) { 1733 void LCodeGen::DoConstantD(LConstantD* instr) {
1654 ASSERT(instr->result()->IsDoubleRegister()); 1734 ASSERT(instr->result()->IsDoubleRegister());
1655 DwVfpRegister result = ToDoubleRegister(instr->result()); 1735 DwVfpRegister result = ToDoubleRegister(instr->result());
1736 CpuFeatures::Scope scope(VFP2);
1656 double v = instr->value(); 1737 double v = instr->value();
1657 __ Vmov(result, v, scratch0()); 1738 __ Vmov(result, v, scratch0());
1658 } 1739 }
1659 1740
1660 1741
1661 void LCodeGen::DoConstantT(LConstantT* instr) { 1742 void LCodeGen::DoConstantT(LConstantT* instr) {
1662 Handle<Object> value = instr->value(); 1743 Handle<Object> value = instr->value();
1663 if (value->IsSmi()) { 1744 if (value->IsSmi()) {
1664 __ mov(ToRegister(instr->result()), Operand(value)); 1745 __ mov(ToRegister(instr->result()), Operand(value));
1665 } else { 1746 } else {
(...skipping 148 matching lines...) Expand 10 before | Expand all | Expand 10 after
1814 ? ToOperand(right) 1895 ? ToOperand(right)
1815 : Operand(EmitLoadRegister(right, ip)); 1896 : Operand(EmitLoadRegister(right, ip));
1816 Register result_reg = ToRegister(instr->result()); 1897 Register result_reg = ToRegister(instr->result());
1817 __ cmp(left_reg, right_op); 1898 __ cmp(left_reg, right_op);
1818 if (!result_reg.is(left_reg)) { 1899 if (!result_reg.is(left_reg)) {
1819 __ mov(result_reg, left_reg, LeaveCC, condition); 1900 __ mov(result_reg, left_reg, LeaveCC, condition);
1820 } 1901 }
1821 __ mov(result_reg, right_op, LeaveCC, NegateCondition(condition)); 1902 __ mov(result_reg, right_op, LeaveCC, NegateCondition(condition));
1822 } else { 1903 } else {
1823 ASSERT(instr->hydrogen()->representation().IsDouble()); 1904 ASSERT(instr->hydrogen()->representation().IsDouble());
1824 DoubleRegister left_reg = ToDoubleRegister(left); 1905 CpuFeatures::Scope scope(VFP2);
1825 DoubleRegister right_reg = ToDoubleRegister(right); 1906 DwVfpRegister left_reg = ToDoubleRegister(left);
1826 DoubleRegister result_reg = ToDoubleRegister(instr->result()); 1907 DwVfpRegister right_reg = ToDoubleRegister(right);
1908 DwVfpRegister result_reg = ToDoubleRegister(instr->result());
1827 Label check_nan_left, check_zero, return_left, return_right, done; 1909 Label check_nan_left, check_zero, return_left, return_right, done;
1828 __ VFPCompareAndSetFlags(left_reg, right_reg); 1910 __ VFPCompareAndSetFlags(left_reg, right_reg);
1829 __ b(vs, &check_nan_left); 1911 __ b(vs, &check_nan_left);
1830 __ b(eq, &check_zero); 1912 __ b(eq, &check_zero);
1831 __ b(condition, &return_left); 1913 __ b(condition, &return_left);
1832 __ b(al, &return_right); 1914 __ b(al, &return_right);
1833 1915
1834 __ bind(&check_zero); 1916 __ bind(&check_zero);
1835 __ VFPCompareAndSetFlags(left_reg, 0.0); 1917 __ VFPCompareAndSetFlags(left_reg, 0.0);
1836 __ b(ne, &return_left); // left == right != 0. 1918 __ b(ne, &return_left); // left == right != 0.
(...skipping 22 matching lines...) Expand all
1859 __ bind(&return_left); 1941 __ bind(&return_left);
1860 if (!left_reg.is(result_reg)) { 1942 if (!left_reg.is(result_reg)) {
1861 __ vmov(result_reg, left_reg); 1943 __ vmov(result_reg, left_reg);
1862 } 1944 }
1863 __ bind(&done); 1945 __ bind(&done);
1864 } 1946 }
1865 } 1947 }
1866 1948
1867 1949
1868 void LCodeGen::DoArithmeticD(LArithmeticD* instr) { 1950 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
1869 DoubleRegister left = ToDoubleRegister(instr->left()); 1951 CpuFeatures::Scope scope(VFP2);
1870 DoubleRegister right = ToDoubleRegister(instr->right()); 1952 DwVfpRegister left = ToDoubleRegister(instr->left());
1871 DoubleRegister result = ToDoubleRegister(instr->result()); 1953 DwVfpRegister right = ToDoubleRegister(instr->right());
1954 DwVfpRegister result = ToDoubleRegister(instr->result());
1872 switch (instr->op()) { 1955 switch (instr->op()) {
1873 case Token::ADD: 1956 case Token::ADD:
1874 __ vadd(result, left, right); 1957 __ vadd(result, left, right);
1875 break; 1958 break;
1876 case Token::SUB: 1959 case Token::SUB:
1877 __ vsub(result, left, right); 1960 __ vsub(result, left, right);
1878 break; 1961 break;
1879 case Token::MUL: 1962 case Token::MUL:
1880 __ vmul(result, left, right); 1963 __ vmul(result, left, right);
1881 break; 1964 break;
(...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after
1949 void LCodeGen::DoBranch(LBranch* instr) { 2032 void LCodeGen::DoBranch(LBranch* instr) {
1950 int true_block = chunk_->LookupDestination(instr->true_block_id()); 2033 int true_block = chunk_->LookupDestination(instr->true_block_id());
1951 int false_block = chunk_->LookupDestination(instr->false_block_id()); 2034 int false_block = chunk_->LookupDestination(instr->false_block_id());
1952 2035
1953 Representation r = instr->hydrogen()->value()->representation(); 2036 Representation r = instr->hydrogen()->value()->representation();
1954 if (r.IsInteger32()) { 2037 if (r.IsInteger32()) {
1955 Register reg = ToRegister(instr->value()); 2038 Register reg = ToRegister(instr->value());
1956 __ cmp(reg, Operand(0)); 2039 __ cmp(reg, Operand(0));
1957 EmitBranch(true_block, false_block, ne); 2040 EmitBranch(true_block, false_block, ne);
1958 } else if (r.IsDouble()) { 2041 } else if (r.IsDouble()) {
1959 DoubleRegister reg = ToDoubleRegister(instr->value()); 2042 CpuFeatures::Scope scope(VFP2);
2043 DwVfpRegister reg = ToDoubleRegister(instr->value());
1960 Register scratch = scratch0(); 2044 Register scratch = scratch0();
1961 2045
1962 // Test the double value. Zero and NaN are false. 2046 // Test the double value. Zero and NaN are false.
1963 __ VFPCompareAndLoadFlags(reg, 0.0, scratch); 2047 __ VFPCompareAndLoadFlags(reg, 0.0, scratch);
1964 __ tst(scratch, Operand(kVFPZConditionFlagBit | kVFPVConditionFlagBit)); 2048 __ tst(scratch, Operand(kVFPZConditionFlagBit | kVFPVConditionFlagBit));
1965 EmitBranch(true_block, false_block, eq); 2049 EmitBranch(true_block, false_block, eq);
1966 } else { 2050 } else {
1967 ASSERT(r.IsTagged()); 2051 ASSERT(r.IsTagged());
1968 Register reg = ToRegister(instr->value()); 2052 Register reg = ToRegister(instr->value());
1969 HType type = instr->hydrogen()->value()->type(); 2053 HType type = instr->hydrogen()->value()->type();
(...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after
2034 __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE); 2118 __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
2035 __ b(ge, &not_string); 2119 __ b(ge, &not_string);
2036 __ ldr(ip, FieldMemOperand(reg, String::kLengthOffset)); 2120 __ ldr(ip, FieldMemOperand(reg, String::kLengthOffset));
2037 __ cmp(ip, Operand(0)); 2121 __ cmp(ip, Operand(0));
2038 __ b(ne, true_label); 2122 __ b(ne, true_label);
2039 __ b(false_label); 2123 __ b(false_label);
2040 __ bind(&not_string); 2124 __ bind(&not_string);
2041 } 2125 }
2042 2126
2043 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) { 2127 if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
2128 CpuFeatures::Scope scope(VFP2);
2044 // heap number -> false iff +0, -0, or NaN. 2129 // heap number -> false iff +0, -0, or NaN.
2045 DoubleRegister dbl_scratch = double_scratch0(); 2130 DwVfpRegister dbl_scratch = double_scratch0();
2046 Label not_heap_number; 2131 Label not_heap_number;
2047 __ CompareRoot(map, Heap::kHeapNumberMapRootIndex); 2132 __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
2048 __ b(ne, &not_heap_number); 2133 __ b(ne, &not_heap_number);
2049 __ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset)); 2134 __ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
2050 __ VFPCompareAndSetFlags(dbl_scratch, 0.0); 2135 __ VFPCompareAndSetFlags(dbl_scratch, 0.0);
2051 __ b(vs, false_label); // NaN -> false. 2136 __ b(vs, false_label); // NaN -> false.
2052 __ b(eq, false_label); // +0, -0 -> false. 2137 __ b(eq, false_label); // +0, -0 -> false.
2053 __ b(true_label); 2138 __ b(true_label);
2054 __ bind(&not_heap_number); 2139 __ bind(&not_heap_number);
2055 } 2140 }
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after
2113 if (left->IsConstantOperand() && right->IsConstantOperand()) { 2198 if (left->IsConstantOperand() && right->IsConstantOperand()) {
2114 // We can statically evaluate the comparison. 2199 // We can statically evaluate the comparison.
2115 double left_val = ToDouble(LConstantOperand::cast(left)); 2200 double left_val = ToDouble(LConstantOperand::cast(left));
2116 double right_val = ToDouble(LConstantOperand::cast(right)); 2201 double right_val = ToDouble(LConstantOperand::cast(right));
2117 int next_block = 2202 int next_block =
2118 EvalComparison(instr->op(), left_val, right_val) ? true_block 2203 EvalComparison(instr->op(), left_val, right_val) ? true_block
2119 : false_block; 2204 : false_block;
2120 EmitGoto(next_block); 2205 EmitGoto(next_block);
2121 } else { 2206 } else {
2122 if (instr->is_double()) { 2207 if (instr->is_double()) {
2208 CpuFeatures::Scope scope(VFP2);
2123 // Compare left and right operands as doubles and load the 2209 // Compare left and right operands as doubles and load the
2124 // resulting flags into the normal status register. 2210 // resulting flags into the normal status register.
2125 __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right)); 2211 __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
2126 // If a NaN is involved, i.e. the result is unordered (V set), 2212 // If a NaN is involved, i.e. the result is unordered (V set),
2127 // jump to false block label. 2213 // jump to false block label.
2128 __ b(vs, chunk_->GetAssemblyLabel(false_block)); 2214 __ b(vs, chunk_->GetAssemblyLabel(false_block));
2129 } else { 2215 } else {
2130 if (right->IsConstantOperand()) { 2216 if (right->IsConstantOperand()) {
2131 __ cmp(ToRegister(left), 2217 __ cmp(ToRegister(left),
2132 Operand(ToInteger32(LConstantOperand::cast(right)))); 2218 Operand(ToInteger32(LConstantOperand::cast(right))));
(...skipping 518 matching lines...) Expand 10 before | Expand all | Expand 10 after
2651 __ LoadRoot(ToRegister(instr->result()), 2737 __ LoadRoot(ToRegister(instr->result()),
2652 Heap::kTrueValueRootIndex, 2738 Heap::kTrueValueRootIndex,
2653 condition); 2739 condition);
2654 __ LoadRoot(ToRegister(instr->result()), 2740 __ LoadRoot(ToRegister(instr->result()),
2655 Heap::kFalseValueRootIndex, 2741 Heap::kFalseValueRootIndex,
2656 NegateCondition(condition)); 2742 NegateCondition(condition));
2657 } 2743 }
2658 2744
2659 2745
2660 void LCodeGen::DoReturn(LReturn* instr) { 2746 void LCodeGen::DoReturn(LReturn* instr) {
2661 if (FLAG_trace) { 2747 if (FLAG_trace && info()->IsOptimizing()) {
2662 // Push the return value on the stack as the parameter. 2748 // Push the return value on the stack as the parameter.
2663 // Runtime::TraceExit returns its parameter in r0. 2749 // Runtime::TraceExit returns its parameter in r0.
2664 __ push(r0); 2750 __ push(r0);
2665 __ CallRuntime(Runtime::kTraceExit, 1); 2751 __ CallRuntime(Runtime::kTraceExit, 1);
2666 } 2752 }
2667 int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize; 2753 if (NeedsEagerFrame()) {
2668 __ mov(sp, fp); 2754 int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize;
2669 __ ldm(ia_w, sp, fp.bit() | lr.bit()); 2755 __ mov(sp, fp);
2670 __ add(sp, sp, Operand(sp_delta)); 2756 __ ldm(ia_w, sp, fp.bit() | lr.bit());
2757 __ add(sp, sp, Operand(sp_delta));
2758 }
2759 if (info()->IsStub()) {
2760 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2761 }
2671 __ Jump(lr); 2762 __ Jump(lr);
2672 } 2763 }
2673 2764
2674 2765
2675 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { 2766 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
2676 Register result = ToRegister(instr->result()); 2767 Register result = ToRegister(instr->result());
2677 __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell()))); 2768 __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell())));
2678 __ ldr(result, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset)); 2769 __ ldr(result, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
2679 if (instr->hydrogen()->RequiresHoleCheck()) { 2770 if (instr->hydrogen()->RequiresHoleCheck()) {
2680 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); 2771 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
(...skipping 329 matching lines...) Expand 10 before | Expand all | Expand 10 after
3010 } else { 3101 } else {
3011 key = ToRegister(instr->key()); 3102 key = ToRegister(instr->key());
3012 } 3103 }
3013 int element_size_shift = ElementsKindToShiftSize(elements_kind); 3104 int element_size_shift = ElementsKindToShiftSize(elements_kind);
3014 int shift_size = (instr->hydrogen()->key()->representation().IsTagged()) 3105 int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
3015 ? (element_size_shift - kSmiTagSize) : element_size_shift; 3106 ? (element_size_shift - kSmiTagSize) : element_size_shift;
3016 int additional_offset = instr->additional_index() << element_size_shift; 3107 int additional_offset = instr->additional_index() << element_size_shift;
3017 3108
3018 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS || 3109 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
3019 elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { 3110 elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
3020 CpuFeatures::Scope scope(VFP3);
3021 DwVfpRegister result = ToDoubleRegister(instr->result()); 3111 DwVfpRegister result = ToDoubleRegister(instr->result());
3022 Operand operand = key_is_constant 3112 Operand operand = key_is_constant
3023 ? Operand(constant_key << element_size_shift) 3113 ? Operand(constant_key << element_size_shift)
3024 : Operand(key, LSL, shift_size); 3114 : Operand(key, LSL, shift_size);
3025 __ add(scratch0(), external_pointer, operand); 3115 __ add(scratch0(), external_pointer, operand);
3026 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { 3116 if (CpuFeatures::IsSupported(VFP2)) {
3027 __ vldr(result.low(), scratch0(), additional_offset); 3117 CpuFeatures::Scope scope(VFP2);
3028 __ vcvt_f64_f32(result, result.low()); 3118 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
3029 } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS 3119 __ vldr(result.low(), scratch0(), additional_offset);
3030 __ vldr(result, scratch0(), additional_offset); 3120 __ vcvt_f64_f32(result, result.low());
3121 } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
3122 __ vldr(result, scratch0(), additional_offset);
3123 }
3124 } else {
3125 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
3126 Register value = external_pointer;
3127 __ ldr(value, MemOperand(scratch0(), additional_offset));
3128 __ and_(sfpd_lo, value, Operand(kBinary32MantissaMask));
3129
3130 __ mov(scratch0(), Operand(value, LSR, kBinary32MantissaBits));
3131 __ and_(scratch0(), scratch0(),
3132 Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
3133
3134 Label exponent_rebiased;
3135 __ teq(scratch0(), Operand(0x00));
3136 __ b(eq, &exponent_rebiased);
3137
3138 __ teq(scratch0(), Operand(0xff));
3139 __ mov(scratch0(), Operand(0x7ff), LeaveCC, eq);
3140 __ b(eq, &exponent_rebiased);
3141
3142 // Rebias exponent.
3143 __ add(scratch0(),
3144 scratch0(),
3145 Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias));
3146
3147 __ bind(&exponent_rebiased);
3148 __ and_(sfpd_hi, value, Operand(kBinary32SignMask));
3149 __ orr(sfpd_hi, sfpd_hi,
3150 Operand(scratch0(), LSL, HeapNumber::kMantissaBitsInTopWord));
3151
3152 // Shift mantissa.
3153 static const int kMantissaShiftForHiWord =
3154 kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
3155
3156 static const int kMantissaShiftForLoWord =
3157 kBitsPerInt - kMantissaShiftForHiWord;
3158
3159 __ orr(sfpd_hi, sfpd_hi,
3160 Operand(sfpd_lo, LSR, kMantissaShiftForHiWord));
3161 __ mov(sfpd_lo, Operand(sfpd_lo, LSL, kMantissaShiftForLoWord));
3162
3163 } else {
3164 __ ldr(sfpd_lo, MemOperand(scratch0(), additional_offset));
3165 __ ldr(sfpd_hi, MemOperand(scratch0(),
3166 additional_offset + kPointerSize));
3167 }
3031 } 3168 }
3032 } else { 3169 } else {
3033 Register result = ToRegister(instr->result()); 3170 Register result = ToRegister(instr->result());
3034 MemOperand mem_operand = PrepareKeyedOperand( 3171 MemOperand mem_operand = PrepareKeyedOperand(
3035 key, external_pointer, key_is_constant, constant_key, 3172 key, external_pointer, key_is_constant, constant_key,
3036 element_size_shift, shift_size, 3173 element_size_shift, shift_size,
3037 instr->additional_index(), additional_offset); 3174 instr->additional_index(), additional_offset);
3038 switch (elements_kind) { 3175 switch (elements_kind) {
3039 case EXTERNAL_BYTE_ELEMENTS: 3176 case EXTERNAL_BYTE_ELEMENTS:
3040 __ ldrsb(result, mem_operand); 3177 __ ldrsb(result, mem_operand);
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after
3089 int constant_key = 0; 3226 int constant_key = 0;
3090 if (key_is_constant) { 3227 if (key_is_constant) {
3091 constant_key = ToInteger32(LConstantOperand::cast(instr->key())); 3228 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
3092 if (constant_key & 0xF0000000) { 3229 if (constant_key & 0xF0000000) {
3093 Abort("array index constant value too big."); 3230 Abort("array index constant value too big.");
3094 } 3231 }
3095 } else { 3232 } else {
3096 key = ToRegister(instr->key()); 3233 key = ToRegister(instr->key());
3097 } 3234 }
3098 3235
3099 Operand operand = key_is_constant 3236 int base_offset = (FixedDoubleArray::kHeaderSize - kHeapObjectTag) +
3100 ? Operand(((constant_key + instr->additional_index()) << 3237 ((constant_key + instr->additional_index()) << element_size_shift);
3101 element_size_shift) +
3102 FixedDoubleArray::kHeaderSize - kHeapObjectTag)
3103 : Operand(key, LSL, shift_size);
3104 __ add(elements, elements, operand);
3105 if (!key_is_constant) { 3238 if (!key_is_constant) {
3106 __ add(elements, elements, 3239 __ add(elements, elements, Operand(key, LSL, shift_size));
3107 Operand((FixedDoubleArray::kHeaderSize - kHeapObjectTag) +
3108 (instr->additional_index() << element_size_shift)));
3109 } 3240 }
3110 3241 if (CpuFeatures::IsSupported(VFP2)) {
3111 __ vldr(result, elements, 0); 3242 CpuFeatures::Scope scope(VFP2);
3112 if (instr->hydrogen()->RequiresHoleCheck()) { 3243 __ add(elements, elements, Operand(base_offset));
3113 __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32))); 3244 __ vldr(result, elements, 0);
3114 __ cmp(scratch, Operand(kHoleNanUpper32)); 3245 if (instr->hydrogen()->RequiresHoleCheck()) {
3115 DeoptimizeIf(eq, instr->environment()); 3246 __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
3247 __ cmp(scratch, Operand(kHoleNanUpper32));
3248 DeoptimizeIf(eq, instr->environment());
3249 }
3250 } else {
3251 __ ldr(sfpd_hi, MemOperand(elements, base_offset + kPointerSize));
3252 __ ldr(sfpd_lo, MemOperand(elements, base_offset));
3253 if (instr->hydrogen()->RequiresHoleCheck()) {
3254 ASSERT(kPointerSize == sizeof(kHoleNanLower32));
3255 __ cmp(sfpd_hi, Operand(kHoleNanUpper32));
3256 DeoptimizeIf(eq, instr->environment());
3257 }
3116 } 3258 }
3117 } 3259 }
3118 3260
3119 3261
3120 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { 3262 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
3121 Register elements = ToRegister(instr->elements()); 3263 Register elements = ToRegister(instr->elements());
3122 Register result = ToRegister(instr->result()); 3264 Register result = ToRegister(instr->result());
3123 Register scratch = scratch0(); 3265 Register scratch = scratch0();
3124 Register store_base = scratch; 3266 Register store_base = scratch;
3125 int offset = 0; 3267 int offset = 0;
(...skipping 415 matching lines...) Expand 10 before | Expand all | Expand 10 after
3541 // We can make rsb conditional because the previous cmp instruction 3683 // We can make rsb conditional because the previous cmp instruction
3542 // will clear the V (overflow) flag and rsb won't set this flag 3684 // will clear the V (overflow) flag and rsb won't set this flag
3543 // if input is positive. 3685 // if input is positive.
3544 __ rsb(result, input, Operand(0), SetCC, mi); 3686 __ rsb(result, input, Operand(0), SetCC, mi);
3545 // Deoptimize on overflow. 3687 // Deoptimize on overflow.
3546 DeoptimizeIf(vs, instr->environment()); 3688 DeoptimizeIf(vs, instr->environment());
3547 } 3689 }
3548 3690
3549 3691
3550 void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) { 3692 void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
3693 CpuFeatures::Scope scope(VFP2);
3551 // Class for deferred case. 3694 // Class for deferred case.
3552 class DeferredMathAbsTaggedHeapNumber: public LDeferredCode { 3695 class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
3553 public: 3696 public:
3554 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, 3697 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
3555 LUnaryMathOperation* instr) 3698 LUnaryMathOperation* instr)
3556 : LDeferredCode(codegen), instr_(instr) { } 3699 : LDeferredCode(codegen), instr_(instr) { }
3557 virtual void Generate() { 3700 virtual void Generate() {
3558 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_); 3701 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
3559 } 3702 }
3560 virtual LInstruction* instr() { return instr_; } 3703 virtual LInstruction* instr() { return instr_; }
(...skipping 16 matching lines...) Expand all
3577 // Smi check. 3720 // Smi check.
3578 __ JumpIfNotSmi(input, deferred->entry()); 3721 __ JumpIfNotSmi(input, deferred->entry());
3579 // If smi, handle it directly. 3722 // If smi, handle it directly.
3580 EmitIntegerMathAbs(instr); 3723 EmitIntegerMathAbs(instr);
3581 __ bind(deferred->exit()); 3724 __ bind(deferred->exit());
3582 } 3725 }
3583 } 3726 }
3584 3727
3585 3728
3586 void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) { 3729 void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
3587 DoubleRegister input = ToDoubleRegister(instr->value()); 3730 CpuFeatures::Scope scope(VFP2);
3731 DwVfpRegister input = ToDoubleRegister(instr->value());
3588 Register result = ToRegister(instr->result()); 3732 Register result = ToRegister(instr->result());
3589 Register scratch = scratch0(); 3733 Register scratch = scratch0();
3590 3734
3591 __ EmitVFPTruncate(kRoundToMinusInf, 3735 __ EmitVFPTruncate(kRoundToMinusInf,
3592 result, 3736 result,
3593 input, 3737 input,
3594 scratch, 3738 scratch,
3595 double_scratch0()); 3739 double_scratch0());
3596 DeoptimizeIf(ne, instr->environment()); 3740 DeoptimizeIf(ne, instr->environment());
3597 3741
3598 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 3742 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3599 // Test for -0. 3743 // Test for -0.
3600 Label done; 3744 Label done;
3601 __ cmp(result, Operand(0)); 3745 __ cmp(result, Operand(0));
3602 __ b(ne, &done); 3746 __ b(ne, &done);
3603 __ vmov(scratch, input.high()); 3747 __ vmov(scratch, input.high());
3604 __ tst(scratch, Operand(HeapNumber::kSignMask)); 3748 __ tst(scratch, Operand(HeapNumber::kSignMask));
3605 DeoptimizeIf(ne, instr->environment()); 3749 DeoptimizeIf(ne, instr->environment());
3606 __ bind(&done); 3750 __ bind(&done);
3607 } 3751 }
3608 } 3752 }
3609 3753
3610 3754
3611 void LCodeGen::DoMathRound(LUnaryMathOperation* instr) { 3755 void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
3612 DoubleRegister input = ToDoubleRegister(instr->value()); 3756 CpuFeatures::Scope scope(VFP2);
3757 DwVfpRegister input = ToDoubleRegister(instr->value());
3613 Register result = ToRegister(instr->result()); 3758 Register result = ToRegister(instr->result());
3614 DwVfpRegister double_scratch1 = ToDoubleRegister(instr->temp()); 3759 DwVfpRegister double_scratch1 = ToDoubleRegister(instr->temp());
3615 Register scratch = scratch0(); 3760 Register scratch = scratch0();
3616 Label done, check_sign_on_zero; 3761 Label done, check_sign_on_zero;
3617 3762
3618 // Extract exponent bits. 3763 // Extract exponent bits.
3619 __ vmov(result, input.high()); 3764 __ vmov(result, input.high());
3620 __ ubfx(scratch, 3765 __ ubfx(scratch,
3621 result, 3766 result,
3622 HeapNumber::kExponentShift, 3767 HeapNumber::kExponentShift,
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after
3667 __ bind(&check_sign_on_zero); 3812 __ bind(&check_sign_on_zero);
3668 __ vmov(scratch, input.high()); 3813 __ vmov(scratch, input.high());
3669 __ tst(scratch, Operand(HeapNumber::kSignMask)); 3814 __ tst(scratch, Operand(HeapNumber::kSignMask));
3670 DeoptimizeIf(ne, instr->environment()); 3815 DeoptimizeIf(ne, instr->environment());
3671 } 3816 }
3672 __ bind(&done); 3817 __ bind(&done);
3673 } 3818 }
3674 3819
3675 3820
3676 void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) { 3821 void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
3677 DoubleRegister input = ToDoubleRegister(instr->value()); 3822 CpuFeatures::Scope scope(VFP2);
3678 DoubleRegister result = ToDoubleRegister(instr->result()); 3823 DwVfpRegister input = ToDoubleRegister(instr->value());
3824 DwVfpRegister result = ToDoubleRegister(instr->result());
3679 __ vsqrt(result, input); 3825 __ vsqrt(result, input);
3680 } 3826 }
3681 3827
3682 3828
3683 void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) { 3829 void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
3684 DoubleRegister input = ToDoubleRegister(instr->value()); 3830 CpuFeatures::Scope scope(VFP2);
3685 DoubleRegister result = ToDoubleRegister(instr->result()); 3831 DwVfpRegister input = ToDoubleRegister(instr->value());
3686 DoubleRegister temp = ToDoubleRegister(instr->temp()); 3832 DwVfpRegister result = ToDoubleRegister(instr->result());
3833 DwVfpRegister temp = ToDoubleRegister(instr->temp());
3687 3834
3688 // Note that according to ECMA-262 15.8.2.13: 3835 // Note that according to ECMA-262 15.8.2.13:
3689 // Math.pow(-Infinity, 0.5) == Infinity 3836 // Math.pow(-Infinity, 0.5) == Infinity
3690 // Math.sqrt(-Infinity) == NaN 3837 // Math.sqrt(-Infinity) == NaN
3691 Label done; 3838 Label done;
3692 __ vmov(temp, -V8_INFINITY, scratch0()); 3839 __ vmov(temp, -V8_INFINITY, scratch0());
3693 __ VFPCompareAndSetFlags(input, temp); 3840 __ VFPCompareAndSetFlags(input, temp);
3694 __ vneg(result, temp, eq); 3841 __ vneg(result, temp, eq);
3695 __ b(&done, eq); 3842 __ b(&done, eq);
3696 3843
3697 // Add +0 to convert -0 to +0. 3844 // Add +0 to convert -0 to +0.
3698 __ vadd(result, input, kDoubleRegZero); 3845 __ vadd(result, input, kDoubleRegZero);
3699 __ vsqrt(result, result); 3846 __ vsqrt(result, result);
3700 __ bind(&done); 3847 __ bind(&done);
3701 } 3848 }
3702 3849
3703 3850
3704 void LCodeGen::DoPower(LPower* instr) { 3851 void LCodeGen::DoPower(LPower* instr) {
3852 CpuFeatures::Scope scope(VFP2);
3705 Representation exponent_type = instr->hydrogen()->right()->representation(); 3853 Representation exponent_type = instr->hydrogen()->right()->representation();
3706 // Having marked this as a call, we can use any registers. 3854 // Having marked this as a call, we can use any registers.
3707 // Just make sure that the input/output registers are the expected ones. 3855 // Just make sure that the input/output registers are the expected ones.
3708 ASSERT(!instr->right()->IsDoubleRegister() || 3856 ASSERT(!instr->right()->IsDoubleRegister() ||
3709 ToDoubleRegister(instr->right()).is(d2)); 3857 ToDoubleRegister(instr->right()).is(d2));
3710 ASSERT(!instr->right()->IsRegister() || 3858 ASSERT(!instr->right()->IsRegister() ||
3711 ToRegister(instr->right()).is(r2)); 3859 ToRegister(instr->right()).is(r2));
3712 ASSERT(ToDoubleRegister(instr->left()).is(d1)); 3860 ASSERT(ToDoubleRegister(instr->left()).is(d1));
3713 ASSERT(ToDoubleRegister(instr->result()).is(d3)); 3861 ASSERT(ToDoubleRegister(instr->result()).is(d3));
3714 3862
(...skipping 12 matching lines...) Expand all
3727 __ CallStub(&stub); 3875 __ CallStub(&stub);
3728 } else { 3876 } else {
3729 ASSERT(exponent_type.IsDouble()); 3877 ASSERT(exponent_type.IsDouble());
3730 MathPowStub stub(MathPowStub::DOUBLE); 3878 MathPowStub stub(MathPowStub::DOUBLE);
3731 __ CallStub(&stub); 3879 __ CallStub(&stub);
3732 } 3880 }
3733 } 3881 }
3734 3882
3735 3883
3736 void LCodeGen::DoRandom(LRandom* instr) { 3884 void LCodeGen::DoRandom(LRandom* instr) {
3885 CpuFeatures::Scope scope(VFP2);
3737 class DeferredDoRandom: public LDeferredCode { 3886 class DeferredDoRandom: public LDeferredCode {
3738 public: 3887 public:
3739 DeferredDoRandom(LCodeGen* codegen, LRandom* instr) 3888 DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
3740 : LDeferredCode(codegen), instr_(instr) { } 3889 : LDeferredCode(codegen), instr_(instr) { }
3741 virtual void Generate() { codegen()->DoDeferredRandom(instr_); } 3890 virtual void Generate() { codegen()->DoDeferredRandom(instr_); }
3742 virtual LInstruction* instr() { return instr_; } 3891 virtual LInstruction* instr() { return instr_; }
3743 private: 3892 private:
3744 LRandom* instr_; 3893 LRandom* instr_;
3745 }; 3894 };
3746 3895
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after
3805 3954
3806 3955
3807 void LCodeGen::DoDeferredRandom(LRandom* instr) { 3956 void LCodeGen::DoDeferredRandom(LRandom* instr) {
3808 __ PrepareCallCFunction(1, scratch0()); 3957 __ PrepareCallCFunction(1, scratch0());
3809 __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1); 3958 __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
3810 // Return value is in r0. 3959 // Return value is in r0.
3811 } 3960 }
3812 3961
3813 3962
3814 void LCodeGen::DoMathExp(LMathExp* instr) { 3963 void LCodeGen::DoMathExp(LMathExp* instr) {
3815 DoubleRegister input = ToDoubleRegister(instr->value()); 3964 CpuFeatures::Scope scope(VFP2);
3816 DoubleRegister result = ToDoubleRegister(instr->result()); 3965 DwVfpRegister input = ToDoubleRegister(instr->value());
3817 DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp()); 3966 DwVfpRegister result = ToDoubleRegister(instr->result());
3818 DoubleRegister double_scratch2 = double_scratch0(); 3967 DwVfpRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
3968 DwVfpRegister double_scratch2 = double_scratch0();
3819 Register temp1 = ToRegister(instr->temp1()); 3969 Register temp1 = ToRegister(instr->temp1());
3820 Register temp2 = ToRegister(instr->temp2()); 3970 Register temp2 = ToRegister(instr->temp2());
3821 3971
3822 MathExpGenerator::EmitMathExp( 3972 MathExpGenerator::EmitMathExp(
3823 masm(), input, result, double_scratch1, double_scratch2, 3973 masm(), input, result, double_scratch1, double_scratch2,
3824 temp1, temp2, scratch0()); 3974 temp1, temp2, scratch0());
3825 } 3975 }
3826 3976
3827 3977
3828 void LCodeGen::DoMathLog(LUnaryMathOperation* instr) { 3978 void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
(...skipping 265 matching lines...) Expand 10 before | Expand all | Expand 10 after
4094 } 4244 }
4095 __ cmp(ip, ToRegister(instr->length())); 4245 __ cmp(ip, ToRegister(instr->length()));
4096 } else { 4246 } else {
4097 __ cmp(ToRegister(instr->index()), ToRegister(instr->length())); 4247 __ cmp(ToRegister(instr->index()), ToRegister(instr->length()));
4098 } 4248 }
4099 DeoptimizeIf(hs, instr->environment()); 4249 DeoptimizeIf(hs, instr->environment());
4100 } 4250 }
4101 4251
4102 4252
4103 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { 4253 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
4254 CpuFeatures::Scope scope(VFP2);
4104 Register external_pointer = ToRegister(instr->elements()); 4255 Register external_pointer = ToRegister(instr->elements());
4105 Register key = no_reg; 4256 Register key = no_reg;
4106 ElementsKind elements_kind = instr->elements_kind(); 4257 ElementsKind elements_kind = instr->elements_kind();
4107 bool key_is_constant = instr->key()->IsConstantOperand(); 4258 bool key_is_constant = instr->key()->IsConstantOperand();
4108 int constant_key = 0; 4259 int constant_key = 0;
4109 if (key_is_constant) { 4260 if (key_is_constant) {
4110 constant_key = ToInteger32(LConstantOperand::cast(instr->key())); 4261 constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
4111 if (constant_key & 0xF0000000) { 4262 if (constant_key & 0xF0000000) {
4112 Abort("array index constant value too big."); 4263 Abort("array index constant value too big.");
4113 } 4264 }
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after
4164 case DICTIONARY_ELEMENTS: 4315 case DICTIONARY_ELEMENTS:
4165 case NON_STRICT_ARGUMENTS_ELEMENTS: 4316 case NON_STRICT_ARGUMENTS_ELEMENTS:
4166 UNREACHABLE(); 4317 UNREACHABLE();
4167 break; 4318 break;
4168 } 4319 }
4169 } 4320 }
4170 } 4321 }
4171 4322
4172 4323
4173 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { 4324 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
4325 CpuFeatures::Scope scope(VFP2);
4174 DwVfpRegister value = ToDoubleRegister(instr->value()); 4326 DwVfpRegister value = ToDoubleRegister(instr->value());
4175 Register elements = ToRegister(instr->elements()); 4327 Register elements = ToRegister(instr->elements());
4176 Register key = no_reg; 4328 Register key = no_reg;
4177 Register scratch = scratch0(); 4329 Register scratch = scratch0();
4178 bool key_is_constant = instr->key()->IsConstantOperand(); 4330 bool key_is_constant = instr->key()->IsConstantOperand();
4179 int constant_key = 0; 4331 int constant_key = 0;
4180 4332
4181 // Calculate the effective address of the slot in the array to store the 4333 // Calculate the effective address of the slot in the array to store the
4182 // double value. 4334 // double value.
4183 if (key_is_constant) { 4335 if (key_is_constant) {
(...skipping 256 matching lines...) Expand 10 before | Expand all | Expand 10 after
4440 4592
4441 4593
4442 void LCodeGen::DoStringLength(LStringLength* instr) { 4594 void LCodeGen::DoStringLength(LStringLength* instr) {
4443 Register string = ToRegister(instr->string()); 4595 Register string = ToRegister(instr->string());
4444 Register result = ToRegister(instr->result()); 4596 Register result = ToRegister(instr->result());
4445 __ ldr(result, FieldMemOperand(string, String::kLengthOffset)); 4597 __ ldr(result, FieldMemOperand(string, String::kLengthOffset));
4446 } 4598 }
4447 4599
4448 4600
4449 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { 4601 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
4602 CpuFeatures::Scope scope(VFP2);
4450 LOperand* input = instr->value(); 4603 LOperand* input = instr->value();
4451 ASSERT(input->IsRegister() || input->IsStackSlot()); 4604 ASSERT(input->IsRegister() || input->IsStackSlot());
4452 LOperand* output = instr->result(); 4605 LOperand* output = instr->result();
4453 ASSERT(output->IsDoubleRegister()); 4606 ASSERT(output->IsDoubleRegister());
4454 SwVfpRegister single_scratch = double_scratch0().low(); 4607 SwVfpRegister single_scratch = double_scratch0().low();
4455 if (input->IsStackSlot()) { 4608 if (input->IsStackSlot()) {
4456 Register scratch = scratch0(); 4609 Register scratch = scratch0();
4457 __ ldr(scratch, ToMemOperand(input)); 4610 __ ldr(scratch, ToMemOperand(input));
4458 __ vmov(single_scratch, scratch); 4611 __ vmov(single_scratch, scratch);
4459 } else { 4612 } else {
4460 __ vmov(single_scratch, ToRegister(input)); 4613 __ vmov(single_scratch, ToRegister(input));
4461 } 4614 }
4462 __ vcvt_f64_s32(ToDoubleRegister(output), single_scratch); 4615 __ vcvt_f64_s32(ToDoubleRegister(output), single_scratch);
4463 } 4616 }
4464 4617
4465 4618
4466 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { 4619 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
4620 CpuFeatures::Scope scope(VFP2);
4467 LOperand* input = instr->value(); 4621 LOperand* input = instr->value();
4468 LOperand* output = instr->result(); 4622 LOperand* output = instr->result();
4469 4623
4470 SwVfpRegister flt_scratch = double_scratch0().low(); 4624 SwVfpRegister flt_scratch = double_scratch0().low();
4471 __ vmov(flt_scratch, ToRegister(input)); 4625 __ vmov(flt_scratch, ToRegister(input));
4472 __ vcvt_f64_u32(ToDoubleRegister(output), flt_scratch); 4626 __ vcvt_f64_u32(ToDoubleRegister(output), flt_scratch);
4473 } 4627 }
4474 4628
4475 4629
4476 void LCodeGen::DoNumberTagI(LNumberTagI* instr) { 4630 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
4518 Register reg = ToRegister(input); 4672 Register reg = ToRegister(input);
4519 4673
4520 DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr); 4674 DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
4521 __ cmp(reg, Operand(Smi::kMaxValue)); 4675 __ cmp(reg, Operand(Smi::kMaxValue));
4522 __ b(hi, deferred->entry()); 4676 __ b(hi, deferred->entry());
4523 __ SmiTag(reg, reg); 4677 __ SmiTag(reg, reg);
4524 __ bind(deferred->exit()); 4678 __ bind(deferred->exit());
4525 } 4679 }
4526 4680
4527 4681
4682 // Convert unsigned integer with specified number of leading zeroes in binary
4683 // representation to IEEE 754 double.
4684 // Integer to convert is passed in register hiword.
4685 // Resulting double is returned in registers hiword:loword.
4686 // This functions does not work correctly for 0.
4687 static void GenerateUInt2Double(MacroAssembler* masm,
4688 Register hiword,
4689 Register loword,
4690 Register scratch,
4691 int leading_zeroes) {
4692 const int meaningful_bits = kBitsPerInt - leading_zeroes - 1;
4693 const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits;
4694
4695 const int mantissa_shift_for_hi_word =
4696 meaningful_bits - HeapNumber::kMantissaBitsInTopWord;
4697 const int mantissa_shift_for_lo_word =
4698 kBitsPerInt - mantissa_shift_for_hi_word;
4699 masm->mov(scratch, Operand(biased_exponent << HeapNumber::kExponentShift));
4700 if (mantissa_shift_for_hi_word > 0) {
4701 masm->mov(loword, Operand(hiword, LSL, mantissa_shift_for_lo_word));
4702 masm->orr(hiword, scratch,
4703 Operand(hiword, LSR, mantissa_shift_for_hi_word));
4704 } else {
4705 masm->mov(loword, Operand(0, RelocInfo::NONE));
4706 masm->orr(hiword, scratch,
4707 Operand(hiword, LSL, -mantissa_shift_for_hi_word));
4708 }
4709
4710 // If least significant bit of biased exponent was not 1 it was corrupted
4711 // by most significant bit of mantissa so we should fix that.
4712 if (!(biased_exponent & 1)) {
4713 masm->bic(hiword, hiword, Operand(1 << HeapNumber::kExponentShift));
4714 }
4715 }
4716
4717
4528 void LCodeGen::DoDeferredNumberTagI(LInstruction* instr, 4718 void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
4529 LOperand* value, 4719 LOperand* value,
4530 IntegerSignedness signedness) { 4720 IntegerSignedness signedness) {
4531 Label slow; 4721 Label slow;
4532 Register src = ToRegister(value); 4722 Register src = ToRegister(value);
4533 Register dst = ToRegister(instr->result()); 4723 Register dst = ToRegister(instr->result());
4534 DoubleRegister dbl_scratch = double_scratch0(); 4724 DwVfpRegister dbl_scratch = double_scratch0();
4535 SwVfpRegister flt_scratch = dbl_scratch.low(); 4725 SwVfpRegister flt_scratch = dbl_scratch.low();
4536 4726
4537 // Preserve the value of all registers. 4727 // Preserve the value of all registers.
4538 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); 4728 PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
4539 4729
4540 Label done; 4730 Label done;
4541 if (signedness == SIGNED_INT32) { 4731 if (signedness == SIGNED_INT32) {
4542 // There was overflow, so bits 30 and 31 of the original integer 4732 // There was overflow, so bits 30 and 31 of the original integer
4543 // disagree. Try to allocate a heap number in new space and store 4733 // disagree. Try to allocate a heap number in new space and store
4544 // the value in there. If that fails, call the runtime system. 4734 // the value in there. If that fails, call the runtime system.
4545 if (dst.is(src)) { 4735 if (dst.is(src)) {
4546 __ SmiUntag(src, dst); 4736 __ SmiUntag(src, dst);
4547 __ eor(src, src, Operand(0x80000000)); 4737 __ eor(src, src, Operand(0x80000000));
4548 } 4738 }
4549 __ vmov(flt_scratch, src); 4739 if (CpuFeatures::IsSupported(VFP2)) {
4550 __ vcvt_f64_s32(dbl_scratch, flt_scratch); 4740 CpuFeatures::Scope scope(VFP2);
4741 __ vmov(flt_scratch, src);
4742 __ vcvt_f64_s32(dbl_scratch, flt_scratch);
4743 } else {
4744 FloatingPointHelper::Destination dest =
4745 FloatingPointHelper::kCoreRegisters;
4746 FloatingPointHelper::ConvertIntToDouble(masm(), src, dest, d0,
4747 sfpd_lo, sfpd_hi,
4748 scratch0(), s0);
4749 }
4551 } else { 4750 } else {
4552 __ vmov(flt_scratch, src); 4751 if (CpuFeatures::IsSupported(VFP2)) {
4553 __ vcvt_f64_u32(dbl_scratch, flt_scratch); 4752 CpuFeatures::Scope scope(VFP2);
4753 __ vmov(flt_scratch, src);
4754 __ vcvt_f64_u32(dbl_scratch, flt_scratch);
4755 } else {
4756 Label no_leading_zero, done;
4757 __ tst(src, Operand(0x80000000));
4758 __ b(ne, &no_leading_zero);
4759
4760 // Integer has one leading zeros.
4761 GenerateUInt2Double(masm(), sfpd_hi, sfpd_lo, r9, 1);
4762 __ b(&done);
4763
4764 __ bind(&no_leading_zero);
4765 GenerateUInt2Double(masm(), sfpd_hi, sfpd_lo, r9, 0);
4766 __ b(&done);
4767 }
4554 } 4768 }
4555 4769
4556 if (FLAG_inline_new) { 4770 if (FLAG_inline_new) {
4557 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); 4771 __ LoadRoot(scratch0(), Heap::kHeapNumberMapRootIndex);
4558 __ AllocateHeapNumber(r5, r3, r4, r6, &slow, DONT_TAG_RESULT); 4772 __ AllocateHeapNumber(r5, r3, r4, scratch0(), &slow, DONT_TAG_RESULT);
4559 __ Move(dst, r5); 4773 __ Move(dst, r5);
4560 __ b(&done); 4774 __ b(&done);
4561 } 4775 }
4562 4776
4563 // Slow case: Call the runtime system to do the number allocation. 4777 // Slow case: Call the runtime system to do the number allocation.
4564 __ bind(&slow); 4778 __ bind(&slow);
4565 4779
4566 // TODO(3095996): Put a valid pointer value in the stack slot where the result 4780 // TODO(3095996): Put a valid pointer value in the stack slot where the result
4567 // register is stored, as this register is in the pointer map, but contains an 4781 // register is stored, as this register is in the pointer map, but contains an
4568 // integer value. 4782 // integer value.
4569 __ mov(ip, Operand(0)); 4783 __ mov(ip, Operand(0));
4570 __ StoreToSafepointRegisterSlot(ip, dst); 4784 __ StoreToSafepointRegisterSlot(ip, dst);
4571 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr); 4785 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
4572 __ Move(dst, r0); 4786 __ Move(dst, r0);
4573 __ sub(dst, dst, Operand(kHeapObjectTag)); 4787 __ sub(dst, dst, Operand(kHeapObjectTag));
4574 4788
4575 // Done. Put the value in dbl_scratch into the value of the allocated heap 4789 // Done. Put the value in dbl_scratch into the value of the allocated heap
4576 // number. 4790 // number.
4577 __ bind(&done); 4791 __ bind(&done);
4578 __ vstr(dbl_scratch, dst, HeapNumber::kValueOffset); 4792 if (CpuFeatures::IsSupported(VFP2)) {
4793 CpuFeatures::Scope scope(VFP2);
4794 __ vstr(dbl_scratch, dst, HeapNumber::kValueOffset);
4795 } else {
4796 __ str(sfpd_lo, MemOperand(dst, HeapNumber::kMantissaOffset));
4797 __ str(sfpd_hi, MemOperand(dst, HeapNumber::kExponentOffset));
4798 }
4579 __ add(dst, dst, Operand(kHeapObjectTag)); 4799 __ add(dst, dst, Operand(kHeapObjectTag));
4580 __ StoreToSafepointRegisterSlot(dst, dst); 4800 __ StoreToSafepointRegisterSlot(dst, dst);
4581 } 4801 }
4582 4802
4583 4803
4584 void LCodeGen::DoNumberTagD(LNumberTagD* instr) { 4804 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
4585 class DeferredNumberTagD: public LDeferredCode { 4805 class DeferredNumberTagD: public LDeferredCode {
4586 public: 4806 public:
4587 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr) 4807 DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
4588 : LDeferredCode(codegen), instr_(instr) { } 4808 : LDeferredCode(codegen), instr_(instr) { }
4589 virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); } 4809 virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
4590 virtual LInstruction* instr() { return instr_; } 4810 virtual LInstruction* instr() { return instr_; }
4591 private: 4811 private:
4592 LNumberTagD* instr_; 4812 LNumberTagD* instr_;
4593 }; 4813 };
4594 4814
4595 DoubleRegister input_reg = ToDoubleRegister(instr->value()); 4815 DwVfpRegister input_reg = ToDoubleRegister(instr->value());
4596 Register scratch = scratch0(); 4816 Register scratch = scratch0();
4597 Register reg = ToRegister(instr->result()); 4817 Register reg = ToRegister(instr->result());
4598 Register temp1 = ToRegister(instr->temp()); 4818 Register temp1 = ToRegister(instr->temp());
4599 Register temp2 = ToRegister(instr->temp2()); 4819 Register temp2 = ToRegister(instr->temp2());
4600 4820
4601 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr); 4821 DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
4602 if (FLAG_inline_new) { 4822 if (FLAG_inline_new) {
4603 __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex); 4823 __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
4604 // We want the untagged address first for performance 4824 // We want the untagged address first for performance
4605 __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(), 4825 __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(),
4606 DONT_TAG_RESULT); 4826 DONT_TAG_RESULT);
4607 } else { 4827 } else {
4608 __ jmp(deferred->entry()); 4828 __ jmp(deferred->entry());
4609 } 4829 }
4610 __ bind(deferred->exit()); 4830 __ bind(deferred->exit());
4611 __ vstr(input_reg, reg, HeapNumber::kValueOffset); 4831 if (CpuFeatures::IsSupported(VFP2)) {
4832 CpuFeatures::Scope scope(VFP2);
4833 __ vstr(input_reg, reg, HeapNumber::kValueOffset);
4834 } else {
4835 __ str(sfpd_lo, MemOperand(reg, HeapNumber::kValueOffset));
4836 __ str(sfpd_hi, MemOperand(reg, HeapNumber::kValueOffset + kPointerSize));
4837 }
4612 // Now that we have finished with the object's real address tag it 4838 // Now that we have finished with the object's real address tag it
4613 __ add(reg, reg, Operand(kHeapObjectTag)); 4839 __ add(reg, reg, Operand(kHeapObjectTag));
4614 } 4840 }
4615 4841
4616 4842
4617 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { 4843 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
4618 // TODO(3095996): Get rid of this. For now, we need to make the 4844 // TODO(3095996): Get rid of this. For now, we need to make the
4619 // result register contain a valid pointer because it is already 4845 // result register contain a valid pointer because it is already
4620 // contained in the register pointer map. 4846 // contained in the register pointer map.
4621 Register reg = ToRegister(instr->result()); 4847 Register reg = ToRegister(instr->result());
(...skipping 20 matching lines...) Expand all
4642 // If the input is a HeapObject, SmiUntag will set the carry flag. 4868 // If the input is a HeapObject, SmiUntag will set the carry flag.
4643 __ SmiUntag(result, input, SetCC); 4869 __ SmiUntag(result, input, SetCC);
4644 DeoptimizeIf(cs, instr->environment()); 4870 DeoptimizeIf(cs, instr->environment());
4645 } else { 4871 } else {
4646 __ SmiUntag(result, input); 4872 __ SmiUntag(result, input);
4647 } 4873 }
4648 } 4874 }
4649 4875
4650 4876
4651 void LCodeGen::EmitNumberUntagD(Register input_reg, 4877 void LCodeGen::EmitNumberUntagD(Register input_reg,
4652 DoubleRegister result_reg, 4878 DwVfpRegister result_reg,
4653 bool deoptimize_on_undefined, 4879 bool deoptimize_on_undefined,
4654 bool deoptimize_on_minus_zero, 4880 bool deoptimize_on_minus_zero,
4655 LEnvironment* env) { 4881 LEnvironment* env) {
4656 Register scratch = scratch0(); 4882 Register scratch = scratch0();
4657 SwVfpRegister flt_scratch = double_scratch0().low(); 4883 SwVfpRegister flt_scratch = double_scratch0().low();
4658 ASSERT(!result_reg.is(double_scratch0())); 4884 ASSERT(!result_reg.is(double_scratch0()));
4885 CpuFeatures::Scope scope(VFP2);
4659 4886
4660 Label load_smi, heap_number, done; 4887 Label load_smi, heap_number, done;
4661 4888
4662 // Smi check. 4889 // Smi check.
4663 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi); 4890 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
4664 4891
4665 // Heap number map check. 4892 // Heap number map check.
4666 __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); 4893 __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4667 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); 4894 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
4668 __ cmp(scratch, Operand(ip)); 4895 __ cmp(scratch, Operand(ip));
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after
4723 // SmiUntag(heap_object, SetCC) 4950 // SmiUntag(heap_object, SetCC)
4724 STATIC_ASSERT(kHeapObjectTag == 1); 4951 STATIC_ASSERT(kHeapObjectTag == 1);
4725 __ adc(input_reg, input_reg, Operand(input_reg)); 4952 __ adc(input_reg, input_reg, Operand(input_reg));
4726 4953
4727 // Heap number map check. 4954 // Heap number map check.
4728 __ ldr(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset)); 4955 __ ldr(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4729 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); 4956 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
4730 __ cmp(scratch1, Operand(ip)); 4957 __ cmp(scratch1, Operand(ip));
4731 4958
4732 if (instr->truncating()) { 4959 if (instr->truncating()) {
4960 CpuFeatures::Scope scope(VFP2);
4733 Register scratch3 = ToRegister(instr->temp2()); 4961 Register scratch3 = ToRegister(instr->temp2());
4734 SwVfpRegister single_scratch = double_scratch.low(); 4962 SwVfpRegister single_scratch = double_scratch.low();
4735 ASSERT(!scratch3.is(input_reg) && 4963 ASSERT(!scratch3.is(input_reg) &&
4736 !scratch3.is(scratch1) && 4964 !scratch3.is(scratch1) &&
4737 !scratch3.is(scratch2)); 4965 !scratch3.is(scratch2));
4738 // Performs a truncating conversion of a floating point number as used by 4966 // Performs a truncating conversion of a floating point number as used by
4739 // the JS bitwise operations. 4967 // the JS bitwise operations.
4740 Label heap_number; 4968 Label heap_number;
4741 __ b(eq, &heap_number); 4969 __ b(eq, &heap_number);
4742 // Check for undefined. Undefined is converted to zero for truncating 4970 // Check for undefined. Undefined is converted to zero for truncating
(...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after
4814 } 5042 }
4815 5043
4816 5044
4817 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { 5045 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
4818 LOperand* input = instr->value(); 5046 LOperand* input = instr->value();
4819 ASSERT(input->IsRegister()); 5047 ASSERT(input->IsRegister());
4820 LOperand* result = instr->result(); 5048 LOperand* result = instr->result();
4821 ASSERT(result->IsDoubleRegister()); 5049 ASSERT(result->IsDoubleRegister());
4822 5050
4823 Register input_reg = ToRegister(input); 5051 Register input_reg = ToRegister(input);
4824 DoubleRegister result_reg = ToDoubleRegister(result); 5052 DwVfpRegister result_reg = ToDoubleRegister(result);
4825 5053
4826 EmitNumberUntagD(input_reg, result_reg, 5054 EmitNumberUntagD(input_reg, result_reg,
4827 instr->hydrogen()->deoptimize_on_undefined(), 5055 instr->hydrogen()->deoptimize_on_undefined(),
4828 instr->hydrogen()->deoptimize_on_minus_zero(), 5056 instr->hydrogen()->deoptimize_on_minus_zero(),
4829 instr->environment()); 5057 instr->environment());
4830 } 5058 }
4831 5059
4832 5060
4833 void LCodeGen::DoDoubleToI(LDoubleToI* instr) { 5061 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
4834 Register result_reg = ToRegister(instr->result()); 5062 Register result_reg = ToRegister(instr->result());
(...skipping 128 matching lines...) Expand 10 before | Expand all | Expand 10 after
4963 __ CompareMap(reg, scratch, map, &success, REQUIRE_EXACT_MAP); 5191 __ CompareMap(reg, scratch, map, &success, REQUIRE_EXACT_MAP);
4964 __ b(eq, &success); 5192 __ b(eq, &success);
4965 } 5193 }
4966 Handle<Map> map = map_set->last(); 5194 Handle<Map> map = map_set->last();
4967 DoCheckMapCommon(reg, scratch, map, REQUIRE_EXACT_MAP, instr->environment()); 5195 DoCheckMapCommon(reg, scratch, map, REQUIRE_EXACT_MAP, instr->environment());
4968 __ bind(&success); 5196 __ bind(&success);
4969 } 5197 }
4970 5198
4971 5199
4972 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { 5200 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
4973 DoubleRegister value_reg = ToDoubleRegister(instr->unclamped()); 5201 CpuFeatures::Scope vfp_scope(VFP2);
5202 DwVfpRegister value_reg = ToDoubleRegister(instr->unclamped());
4974 Register result_reg = ToRegister(instr->result()); 5203 Register result_reg = ToRegister(instr->result());
4975 DoubleRegister temp_reg = ToDoubleRegister(instr->temp()); 5204 DwVfpRegister temp_reg = ToDoubleRegister(instr->temp());
4976 __ ClampDoubleToUint8(result_reg, value_reg, temp_reg); 5205 __ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
4977 } 5206 }
4978 5207
4979 5208
4980 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { 5209 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
5210 CpuFeatures::Scope scope(VFP2);
4981 Register unclamped_reg = ToRegister(instr->unclamped()); 5211 Register unclamped_reg = ToRegister(instr->unclamped());
4982 Register result_reg = ToRegister(instr->result()); 5212 Register result_reg = ToRegister(instr->result());
4983 __ ClampUint8(result_reg, unclamped_reg); 5213 __ ClampUint8(result_reg, unclamped_reg);
4984 } 5214 }
4985 5215
4986 5216
4987 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { 5217 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
5218 CpuFeatures::Scope scope(VFP2);
4988 Register scratch = scratch0(); 5219 Register scratch = scratch0();
4989 Register input_reg = ToRegister(instr->unclamped()); 5220 Register input_reg = ToRegister(instr->unclamped());
4990 Register result_reg = ToRegister(instr->result()); 5221 Register result_reg = ToRegister(instr->result());
4991 DoubleRegister temp_reg = ToDoubleRegister(instr->temp()); 5222 DwVfpRegister temp_reg = ToDoubleRegister(instr->temp());
4992 Label is_smi, done, heap_number; 5223 Label is_smi, done, heap_number;
4993 5224
4994 // Both smi and heap number cases are handled. 5225 // Both smi and heap number cases are handled.
4995 __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi); 5226 __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi);
4996 5227
4997 // Check for heap number 5228 // Check for heap number
4998 __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); 5229 __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4999 __ cmp(scratch, Operand(factory()->heap_number_map())); 5230 __ cmp(scratch, Operand(factory()->heap_number_map()));
5000 __ b(eq, &heap_number); 5231 __ b(eq, &heap_number);
5001 5232
(...skipping 556 matching lines...) Expand 10 before | Expand all | Expand 10 after
5558 __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset)); 5789 __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
5559 5790
5560 // Check the marker in the calling frame. 5791 // Check the marker in the calling frame.
5561 __ bind(&check_frame_marker); 5792 __ bind(&check_frame_marker);
5562 __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset)); 5793 __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
5563 __ cmp(temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT))); 5794 __ cmp(temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
5564 } 5795 }
5565 5796
5566 5797
5567 void LCodeGen::EnsureSpaceForLazyDeopt() { 5798 void LCodeGen::EnsureSpaceForLazyDeopt() {
5799 if (info()->IsStub()) return;
5568 // Ensure that we have enough space after the previous lazy-bailout 5800 // Ensure that we have enough space after the previous lazy-bailout
5569 // instruction for patching the code here. 5801 // instruction for patching the code here.
5570 int current_pc = masm()->pc_offset(); 5802 int current_pc = masm()->pc_offset();
5571 int patch_size = Deoptimizer::patch_size(); 5803 int patch_size = Deoptimizer::patch_size();
5572 if (current_pc < last_lazy_deopt_pc_ + patch_size) { 5804 if (current_pc < last_lazy_deopt_pc_ + patch_size) {
5573 // Block literal pool emission for duration of padding. 5805 // Block literal pool emission for duration of padding.
5574 Assembler::BlockConstPoolScope block_const_pool(masm()); 5806 Assembler::BlockConstPoolScope block_const_pool(masm());
5575 int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc; 5807 int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
5576 ASSERT_EQ(0, padding_size % Assembler::kInstrSize); 5808 ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
5577 while (padding_size > 0) { 5809 while (padding_size > 0) {
(...skipping 211 matching lines...) Expand 10 before | Expand all | Expand 10 after
5789 __ sub(scratch, result, Operand(index, LSL, kPointerSizeLog2 - kSmiTagSize)); 6021 __ sub(scratch, result, Operand(index, LSL, kPointerSizeLog2 - kSmiTagSize));
5790 __ ldr(result, FieldMemOperand(scratch, 6022 __ ldr(result, FieldMemOperand(scratch,
5791 FixedArray::kHeaderSize - kPointerSize)); 6023 FixedArray::kHeaderSize - kPointerSize));
5792 __ bind(&done); 6024 __ bind(&done);
5793 } 6025 }
5794 6026
5795 6027
5796 #undef __ 6028 #undef __
5797 6029
5798 } } // namespace v8::internal 6030 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/arm/lithium-codegen-arm.h ('k') | src/arm/lithium-gap-resolver-arm.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698