| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 11 matching lines...) Expand all Loading... |
| 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 27 | 27 |
| 28 #include "v8.h" | 28 #include "v8.h" |
| 29 | 29 |
| 30 #if defined(V8_TARGET_ARCH_X64) | 30 #if defined(V8_TARGET_ARCH_X64) |
| 31 | 31 |
| 32 #include "bootstrapper.h" | 32 #include "codegen.h" |
| 33 #include "code-stubs.h" | |
| 34 #include "codegen-inl.h" | |
| 35 #include "compiler.h" | |
| 36 #include "debug.h" | |
| 37 #include "ic-inl.h" | |
| 38 #include "parser.h" | |
| 39 #include "regexp-macro-assembler.h" | |
| 40 #include "register-allocator-inl.h" | |
| 41 #include "scopes.h" | |
| 42 #include "virtual-frame-inl.h" | |
| 43 | 33 |
| 44 namespace v8 { | 34 namespace v8 { |
| 45 namespace internal { | 35 namespace internal { |
| 46 | 36 |
| 47 #define __ ACCESS_MASM(masm) | |
| 48 | |
| 49 // ------------------------------------------------------------------------- | |
| 50 // Platform-specific FrameRegisterState functions. | |
| 51 | |
| 52 void FrameRegisterState::Save(MacroAssembler* masm) const { | |
| 53 for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) { | |
| 54 int action = registers_[i]; | |
| 55 if (action == kPush) { | |
| 56 __ push(RegisterAllocator::ToRegister(i)); | |
| 57 } else if (action != kIgnore && (action & kSyncedFlag) == 0) { | |
| 58 __ movq(Operand(rbp, action), RegisterAllocator::ToRegister(i)); | |
| 59 } | |
| 60 } | |
| 61 } | |
| 62 | |
| 63 | |
| 64 void FrameRegisterState::Restore(MacroAssembler* masm) const { | |
| 65 // Restore registers in reverse order due to the stack. | |
| 66 for (int i = RegisterAllocator::kNumRegisters - 1; i >= 0; i--) { | |
| 67 int action = registers_[i]; | |
| 68 if (action == kPush) { | |
| 69 __ pop(RegisterAllocator::ToRegister(i)); | |
| 70 } else if (action != kIgnore) { | |
| 71 action &= ~kSyncedFlag; | |
| 72 __ movq(RegisterAllocator::ToRegister(i), Operand(rbp, action)); | |
| 73 } | |
| 74 } | |
| 75 } | |
| 76 | |
| 77 | |
| 78 #undef __ | |
| 79 #define __ ACCESS_MASM(masm_) | |
| 80 | |
| 81 // ------------------------------------------------------------------------- | |
| 82 // Platform-specific DeferredCode functions. | |
| 83 | |
| 84 void DeferredCode::SaveRegisters() { | |
| 85 frame_state_.Save(masm_); | |
| 86 } | |
| 87 | |
| 88 | |
| 89 void DeferredCode::RestoreRegisters() { | |
| 90 frame_state_.Restore(masm_); | |
| 91 } | |
| 92 | |
| 93 | |
| 94 // ------------------------------------------------------------------------- | 37 // ------------------------------------------------------------------------- |
| 95 // Platform-specific RuntimeCallHelper functions. | 38 // Platform-specific RuntimeCallHelper functions. |
| 96 | 39 |
| 97 void VirtualFrameRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const { | |
| 98 frame_state_->Save(masm); | |
| 99 } | |
| 100 | |
| 101 | |
| 102 void VirtualFrameRuntimeCallHelper::AfterCall(MacroAssembler* masm) const { | |
| 103 frame_state_->Restore(masm); | |
| 104 } | |
| 105 | |
| 106 | |
| 107 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const { | 40 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const { |
| 108 masm->EnterInternalFrame(); | 41 masm->EnterInternalFrame(); |
| 109 } | 42 } |
| 110 | 43 |
| 111 | 44 |
| 112 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const { | 45 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const { |
| 113 masm->LeaveInternalFrame(); | 46 masm->LeaveInternalFrame(); |
| 114 } | 47 } |
| 115 | 48 |
| 116 | 49 |
| 117 // ------------------------------------------------------------------------- | |
| 118 // CodeGenState implementation. | |
| 119 | |
| 120 CodeGenState::CodeGenState(CodeGenerator* owner) | |
| 121 : owner_(owner), | |
| 122 destination_(NULL), | |
| 123 previous_(NULL) { | |
| 124 owner_->set_state(this); | |
| 125 } | |
| 126 | |
| 127 | |
| 128 CodeGenState::CodeGenState(CodeGenerator* owner, | |
| 129 ControlDestination* destination) | |
| 130 : owner_(owner), | |
| 131 destination_(destination), | |
| 132 previous_(owner->state()) { | |
| 133 owner_->set_state(this); | |
| 134 } | |
| 135 | |
| 136 | |
| 137 CodeGenState::~CodeGenState() { | |
| 138 ASSERT(owner_->state() == this); | |
| 139 owner_->set_state(previous_); | |
| 140 } | |
| 141 | |
| 142 | |
| 143 // ------------------------------------------------------------------------- | |
| 144 // CodeGenerator implementation. | |
| 145 | |
| 146 CodeGenerator::CodeGenerator(MacroAssembler* masm) | |
| 147 : deferred_(8), | |
| 148 masm_(masm), | |
| 149 info_(NULL), | |
| 150 frame_(NULL), | |
| 151 allocator_(NULL), | |
| 152 state_(NULL), | |
| 153 loop_nesting_(0), | |
| 154 function_return_is_shadowed_(false), | |
| 155 in_spilled_code_(false) { | |
| 156 } | |
| 157 | |
| 158 | |
| 159 // Calling conventions: | |
| 160 // rbp: caller's frame pointer | |
| 161 // rsp: stack pointer | |
| 162 // rdi: called JS function | |
| 163 // rsi: callee's context | |
| 164 | |
| 165 void CodeGenerator::Generate(CompilationInfo* info) { | |
| 166 // Record the position for debugging purposes. | |
| 167 CodeForFunctionPosition(info->function()); | |
| 168 Comment cmnt(masm_, "[ function compiled by virtual frame code generator"); | |
| 169 | |
| 170 // Initialize state. | |
| 171 info_ = info; | |
| 172 ASSERT(allocator_ == NULL); | |
| 173 RegisterAllocator register_allocator(this); | |
| 174 allocator_ = ®ister_allocator; | |
| 175 ASSERT(frame_ == NULL); | |
| 176 frame_ = new VirtualFrame(); | |
| 177 set_in_spilled_code(false); | |
| 178 | |
| 179 // Adjust for function-level loop nesting. | |
| 180 ASSERT_EQ(0, loop_nesting_); | |
| 181 loop_nesting_ = info->is_in_loop() ? 1 : 0; | |
| 182 | |
| 183 Isolate::Current()->set_jump_target_compiling_deferred_code(false); | |
| 184 | |
| 185 { | |
| 186 CodeGenState state(this); | |
| 187 // Entry: | |
| 188 // Stack: receiver, arguments, return address. | |
| 189 // rbp: caller's frame pointer | |
| 190 // rsp: stack pointer | |
| 191 // rdi: called JS function | |
| 192 // rsi: callee's context | |
| 193 allocator_->Initialize(); | |
| 194 | |
| 195 #ifdef DEBUG | |
| 196 if (strlen(FLAG_stop_at) > 0 && | |
| 197 info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) { | |
| 198 frame_->SpillAll(); | |
| 199 __ int3(); | |
| 200 } | |
| 201 #endif | |
| 202 | |
| 203 frame_->Enter(); | |
| 204 | |
| 205 // Allocate space for locals and initialize them. | |
| 206 frame_->AllocateStackSlots(); | |
| 207 | |
| 208 // Allocate the local context if needed. | |
| 209 int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; | |
| 210 if (heap_slots > 0) { | |
| 211 Comment cmnt(masm_, "[ allocate local context"); | |
| 212 // Allocate local context. | |
| 213 // Get outer context and create a new context based on it. | |
| 214 frame_->PushFunction(); | |
| 215 Result context; | |
| 216 if (heap_slots <= FastNewContextStub::kMaximumSlots) { | |
| 217 FastNewContextStub stub(heap_slots); | |
| 218 context = frame_->CallStub(&stub, 1); | |
| 219 } else { | |
| 220 context = frame_->CallRuntime(Runtime::kNewContext, 1); | |
| 221 } | |
| 222 | |
| 223 // Update context local. | |
| 224 frame_->SaveContextRegister(); | |
| 225 | |
| 226 // Verify that the runtime call result and rsi agree. | |
| 227 if (FLAG_debug_code) { | |
| 228 __ cmpq(context.reg(), rsi); | |
| 229 __ Assert(equal, "Runtime::NewContext should end up in rsi"); | |
| 230 } | |
| 231 } | |
| 232 | |
| 233 // TODO(1241774): Improve this code: | |
| 234 // 1) only needed if we have a context | |
| 235 // 2) no need to recompute context ptr every single time | |
| 236 // 3) don't copy parameter operand code from SlotOperand! | |
| 237 { | |
| 238 Comment cmnt2(masm_, "[ copy context parameters into .context"); | |
| 239 // Note that iteration order is relevant here! If we have the same | |
| 240 // parameter twice (e.g., function (x, y, x)), and that parameter | |
| 241 // needs to be copied into the context, it must be the last argument | |
| 242 // passed to the parameter that needs to be copied. This is a rare | |
| 243 // case so we don't check for it, instead we rely on the copying | |
| 244 // order: such a parameter is copied repeatedly into the same | |
| 245 // context location and thus the last value is what is seen inside | |
| 246 // the function. | |
| 247 for (int i = 0; i < scope()->num_parameters(); i++) { | |
| 248 Variable* par = scope()->parameter(i); | |
| 249 Slot* slot = par->AsSlot(); | |
| 250 if (slot != NULL && slot->type() == Slot::CONTEXT) { | |
| 251 // The use of SlotOperand below is safe in unspilled code | |
| 252 // because the slot is guaranteed to be a context slot. | |
| 253 // | |
| 254 // There are no parameters in the global scope. | |
| 255 ASSERT(!scope()->is_global_scope()); | |
| 256 frame_->PushParameterAt(i); | |
| 257 Result value = frame_->Pop(); | |
| 258 value.ToRegister(); | |
| 259 | |
| 260 // SlotOperand loads context.reg() with the context object | |
| 261 // stored to, used below in RecordWrite. | |
| 262 Result context = allocator_->Allocate(); | |
| 263 ASSERT(context.is_valid()); | |
| 264 __ movq(SlotOperand(slot, context.reg()), value.reg()); | |
| 265 int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize; | |
| 266 Result scratch = allocator_->Allocate(); | |
| 267 ASSERT(scratch.is_valid()); | |
| 268 frame_->Spill(context.reg()); | |
| 269 frame_->Spill(value.reg()); | |
| 270 __ RecordWrite(context.reg(), offset, value.reg(), scratch.reg()); | |
| 271 } | |
| 272 } | |
| 273 } | |
| 274 | |
| 275 // Store the arguments object. This must happen after context | |
| 276 // initialization because the arguments object may be stored in | |
| 277 // the context. | |
| 278 if (ArgumentsMode() != NO_ARGUMENTS_ALLOCATION) { | |
| 279 StoreArgumentsObject(true); | |
| 280 } | |
| 281 | |
| 282 // Initialize ThisFunction reference if present. | |
| 283 if (scope()->is_function_scope() && scope()->function() != NULL) { | |
| 284 frame_->Push(FACTORY->the_hole_value()); | |
| 285 StoreToSlot(scope()->function()->AsSlot(), NOT_CONST_INIT); | |
| 286 } | |
| 287 | |
| 288 // Initialize the function return target after the locals are set | |
| 289 // up, because it needs the expected frame height from the frame. | |
| 290 function_return_.set_direction(JumpTarget::BIDIRECTIONAL); | |
| 291 function_return_is_shadowed_ = false; | |
| 292 | |
| 293 // Generate code to 'execute' declarations and initialize functions | |
| 294 // (source elements). In case of an illegal redeclaration we need to | |
| 295 // handle that instead of processing the declarations. | |
| 296 if (scope()->HasIllegalRedeclaration()) { | |
| 297 Comment cmnt(masm_, "[ illegal redeclarations"); | |
| 298 scope()->VisitIllegalRedeclaration(this); | |
| 299 } else { | |
| 300 Comment cmnt(masm_, "[ declarations"); | |
| 301 ProcessDeclarations(scope()->declarations()); | |
| 302 // Bail out if a stack-overflow exception occurred when processing | |
| 303 // declarations. | |
| 304 if (HasStackOverflow()) return; | |
| 305 } | |
| 306 | |
| 307 if (FLAG_trace) { | |
| 308 frame_->CallRuntime(Runtime::kTraceEnter, 0); | |
| 309 // Ignore the return value. | |
| 310 } | |
| 311 CheckStack(); | |
| 312 | |
| 313 // Compile the body of the function in a vanilla state. Don't | |
| 314 // bother compiling all the code if the scope has an illegal | |
| 315 // redeclaration. | |
| 316 if (!scope()->HasIllegalRedeclaration()) { | |
| 317 Comment cmnt(masm_, "[ function body"); | |
| 318 #ifdef DEBUG | |
| 319 bool is_builtin = Isolate::Current()->bootstrapper()->IsActive(); | |
| 320 bool should_trace = | |
| 321 is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls; | |
| 322 if (should_trace) { | |
| 323 frame_->CallRuntime(Runtime::kDebugTrace, 0); | |
| 324 // Ignore the return value. | |
| 325 } | |
| 326 #endif | |
| 327 VisitStatements(info->function()->body()); | |
| 328 | |
| 329 // Handle the return from the function. | |
| 330 if (has_valid_frame()) { | |
| 331 // If there is a valid frame, control flow can fall off the end of | |
| 332 // the body. In that case there is an implicit return statement. | |
| 333 ASSERT(!function_return_is_shadowed_); | |
| 334 CodeForReturnPosition(info->function()); | |
| 335 frame_->PrepareForReturn(); | |
| 336 Result undefined(FACTORY->undefined_value()); | |
| 337 if (function_return_.is_bound()) { | |
| 338 function_return_.Jump(&undefined); | |
| 339 } else { | |
| 340 function_return_.Bind(&undefined); | |
| 341 GenerateReturnSequence(&undefined); | |
| 342 } | |
| 343 } else if (function_return_.is_linked()) { | |
| 344 // If the return target has dangling jumps to it, then we have not | |
| 345 // yet generated the return sequence. This can happen when (a) | |
| 346 // control does not flow off the end of the body so we did not | |
| 347 // compile an artificial return statement just above, and (b) there | |
| 348 // are return statements in the body but (c) they are all shadowed. | |
| 349 Result return_value; | |
| 350 function_return_.Bind(&return_value); | |
| 351 GenerateReturnSequence(&return_value); | |
| 352 } | |
| 353 } | |
| 354 } | |
| 355 | |
| 356 // Adjust for function-level loop nesting. | |
| 357 ASSERT_EQ(loop_nesting_, info->is_in_loop() ? 1 : 0); | |
| 358 loop_nesting_ = 0; | |
| 359 | |
| 360 // Code generation state must be reset. | |
| 361 ASSERT(state_ == NULL); | |
| 362 ASSERT(!function_return_is_shadowed_); | |
| 363 function_return_.Unuse(); | |
| 364 DeleteFrame(); | |
| 365 | |
| 366 // Process any deferred code using the register allocator. | |
| 367 if (!HasStackOverflow()) { | |
| 368 info->isolate()->set_jump_target_compiling_deferred_code(true); | |
| 369 ProcessDeferred(); | |
| 370 info->isolate()->set_jump_target_compiling_deferred_code(false); | |
| 371 } | |
| 372 | |
| 373 // There is no need to delete the register allocator, it is a | |
| 374 // stack-allocated local. | |
| 375 allocator_ = NULL; | |
| 376 } | |
| 377 | |
| 378 | |
| 379 Operand CodeGenerator::SlotOperand(Slot* slot, Register tmp) { | |
| 380 // Currently, this assertion will fail if we try to assign to | |
| 381 // a constant variable that is constant because it is read-only | |
| 382 // (such as the variable referring to a named function expression). | |
| 383 // We need to implement assignments to read-only variables. | |
| 384 // Ideally, we should do this during AST generation (by converting | |
| 385 // such assignments into expression statements); however, in general | |
| 386 // we may not be able to make the decision until past AST generation, | |
| 387 // that is when the entire program is known. | |
| 388 ASSERT(slot != NULL); | |
| 389 int index = slot->index(); | |
| 390 switch (slot->type()) { | |
| 391 case Slot::PARAMETER: | |
| 392 return frame_->ParameterAt(index); | |
| 393 | |
| 394 case Slot::LOCAL: | |
| 395 return frame_->LocalAt(index); | |
| 396 | |
| 397 case Slot::CONTEXT: { | |
| 398 // Follow the context chain if necessary. | |
| 399 ASSERT(!tmp.is(rsi)); // do not overwrite context register | |
| 400 Register context = rsi; | |
| 401 int chain_length = scope()->ContextChainLength(slot->var()->scope()); | |
| 402 for (int i = 0; i < chain_length; i++) { | |
| 403 // Load the closure. | |
| 404 // (All contexts, even 'with' contexts, have a closure, | |
| 405 // and it is the same for all contexts inside a function. | |
| 406 // There is no need to go to the function context first.) | |
| 407 __ movq(tmp, ContextOperand(context, Context::CLOSURE_INDEX)); | |
| 408 // Load the function context (which is the incoming, outer context). | |
| 409 __ movq(tmp, FieldOperand(tmp, JSFunction::kContextOffset)); | |
| 410 context = tmp; | |
| 411 } | |
| 412 // We may have a 'with' context now. Get the function context. | |
| 413 // (In fact this mov may never be the needed, since the scope analysis | |
| 414 // may not permit a direct context access in this case and thus we are | |
| 415 // always at a function context. However it is safe to dereference be- | |
| 416 // cause the function context of a function context is itself. Before | |
| 417 // deleting this mov we should try to create a counter-example first, | |
| 418 // though...) | |
| 419 __ movq(tmp, ContextOperand(context, Context::FCONTEXT_INDEX)); | |
| 420 return ContextOperand(tmp, index); | |
| 421 } | |
| 422 | |
| 423 default: | |
| 424 UNREACHABLE(); | |
| 425 return Operand(rsp, 0); | |
| 426 } | |
| 427 } | |
| 428 | |
| 429 | |
| 430 Operand CodeGenerator::ContextSlotOperandCheckExtensions(Slot* slot, | |
| 431 Result tmp, | |
| 432 JumpTarget* slow) { | |
| 433 ASSERT(slot->type() == Slot::CONTEXT); | |
| 434 ASSERT(tmp.is_register()); | |
| 435 Register context = rsi; | |
| 436 | |
| 437 for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) { | |
| 438 if (s->num_heap_slots() > 0) { | |
| 439 if (s->calls_eval()) { | |
| 440 // Check that extension is NULL. | |
| 441 __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX), | |
| 442 Immediate(0)); | |
| 443 slow->Branch(not_equal, not_taken); | |
| 444 } | |
| 445 __ movq(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX)); | |
| 446 __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset)); | |
| 447 context = tmp.reg(); | |
| 448 } | |
| 449 } | |
| 450 // Check that last extension is NULL. | |
| 451 __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0)); | |
| 452 slow->Branch(not_equal, not_taken); | |
| 453 __ movq(tmp.reg(), ContextOperand(context, Context::FCONTEXT_INDEX)); | |
| 454 return ContextOperand(tmp.reg(), slot->index()); | |
| 455 } | |
| 456 | |
| 457 | |
| 458 // Emit code to load the value of an expression to the top of the | |
| 459 // frame. If the expression is boolean-valued it may be compiled (or | |
| 460 // partially compiled) into control flow to the control destination. | |
| 461 // If force_control is true, control flow is forced. | |
| 462 void CodeGenerator::LoadCondition(Expression* expr, | |
| 463 ControlDestination* dest, | |
| 464 bool force_control) { | |
| 465 ASSERT(!in_spilled_code()); | |
| 466 int original_height = frame_->height(); | |
| 467 | |
| 468 { CodeGenState new_state(this, dest); | |
| 469 Visit(expr); | |
| 470 | |
| 471 // If we hit a stack overflow, we may not have actually visited | |
| 472 // the expression. In that case, we ensure that we have a | |
| 473 // valid-looking frame state because we will continue to generate | |
| 474 // code as we unwind the C++ stack. | |
| 475 // | |
| 476 // It's possible to have both a stack overflow and a valid frame | |
| 477 // state (eg, a subexpression overflowed, visiting it returned | |
| 478 // with a dummied frame state, and visiting this expression | |
| 479 // returned with a normal-looking state). | |
| 480 if (HasStackOverflow() && | |
| 481 !dest->is_used() && | |
| 482 frame_->height() == original_height) { | |
| 483 dest->Goto(true); | |
| 484 } | |
| 485 } | |
| 486 | |
| 487 if (force_control && !dest->is_used()) { | |
| 488 // Convert the TOS value into flow to the control destination. | |
| 489 ToBoolean(dest); | |
| 490 } | |
| 491 | |
| 492 ASSERT(!(force_control && !dest->is_used())); | |
| 493 ASSERT(dest->is_used() || frame_->height() == original_height + 1); | |
| 494 } | |
| 495 | |
| 496 | |
| 497 void CodeGenerator::LoadAndSpill(Expression* expression) { | |
| 498 ASSERT(in_spilled_code()); | |
| 499 set_in_spilled_code(false); | |
| 500 Load(expression); | |
| 501 frame_->SpillAll(); | |
| 502 set_in_spilled_code(true); | |
| 503 } | |
| 504 | |
| 505 | |
| 506 void CodeGenerator::Load(Expression* expr) { | |
| 507 #ifdef DEBUG | |
| 508 int original_height = frame_->height(); | |
| 509 #endif | |
| 510 ASSERT(!in_spilled_code()); | |
| 511 JumpTarget true_target; | |
| 512 JumpTarget false_target; | |
| 513 ControlDestination dest(&true_target, &false_target, true); | |
| 514 LoadCondition(expr, &dest, false); | |
| 515 | |
| 516 if (dest.false_was_fall_through()) { | |
| 517 // The false target was just bound. | |
| 518 JumpTarget loaded; | |
| 519 frame_->Push(FACTORY->false_value()); | |
| 520 // There may be dangling jumps to the true target. | |
| 521 if (true_target.is_linked()) { | |
| 522 loaded.Jump(); | |
| 523 true_target.Bind(); | |
| 524 frame_->Push(FACTORY->true_value()); | |
| 525 loaded.Bind(); | |
| 526 } | |
| 527 | |
| 528 } else if (dest.is_used()) { | |
| 529 // There is true, and possibly false, control flow (with true as | |
| 530 // the fall through). | |
| 531 JumpTarget loaded; | |
| 532 frame_->Push(FACTORY->true_value()); | |
| 533 if (false_target.is_linked()) { | |
| 534 loaded.Jump(); | |
| 535 false_target.Bind(); | |
| 536 frame_->Push(FACTORY->false_value()); | |
| 537 loaded.Bind(); | |
| 538 } | |
| 539 | |
| 540 } else { | |
| 541 // We have a valid value on top of the frame, but we still may | |
| 542 // have dangling jumps to the true and false targets from nested | |
| 543 // subexpressions (eg, the left subexpressions of the | |
| 544 // short-circuited boolean operators). | |
| 545 ASSERT(has_valid_frame()); | |
| 546 if (true_target.is_linked() || false_target.is_linked()) { | |
| 547 JumpTarget loaded; | |
| 548 loaded.Jump(); // Don't lose the current TOS. | |
| 549 if (true_target.is_linked()) { | |
| 550 true_target.Bind(); | |
| 551 frame_->Push(FACTORY->true_value()); | |
| 552 if (false_target.is_linked()) { | |
| 553 loaded.Jump(); | |
| 554 } | |
| 555 } | |
| 556 if (false_target.is_linked()) { | |
| 557 false_target.Bind(); | |
| 558 frame_->Push(FACTORY->false_value()); | |
| 559 } | |
| 560 loaded.Bind(); | |
| 561 } | |
| 562 } | |
| 563 | |
| 564 ASSERT(has_valid_frame()); | |
| 565 ASSERT(frame_->height() == original_height + 1); | |
| 566 } | |
| 567 | |
| 568 | |
| 569 void CodeGenerator::LoadGlobal() { | |
| 570 if (in_spilled_code()) { | |
| 571 frame_->EmitPush(GlobalObjectOperand()); | |
| 572 } else { | |
| 573 Result temp = allocator_->Allocate(); | |
| 574 __ movq(temp.reg(), GlobalObjectOperand()); | |
| 575 frame_->Push(&temp); | |
| 576 } | |
| 577 } | |
| 578 | |
| 579 | |
| 580 void CodeGenerator::LoadGlobalReceiver() { | |
| 581 Result temp = allocator_->Allocate(); | |
| 582 Register reg = temp.reg(); | |
| 583 __ movq(reg, GlobalObjectOperand()); | |
| 584 __ movq(reg, FieldOperand(reg, GlobalObject::kGlobalReceiverOffset)); | |
| 585 frame_->Push(&temp); | |
| 586 } | |
| 587 | |
| 588 | |
| 589 void CodeGenerator::LoadTypeofExpression(Expression* expr) { | |
| 590 // Special handling of identifiers as subexpressions of typeof. | |
| 591 Variable* variable = expr->AsVariableProxy()->AsVariable(); | |
| 592 if (variable != NULL && !variable->is_this() && variable->is_global()) { | |
| 593 // For a global variable we build the property reference | |
| 594 // <global>.<variable> and perform a (regular non-contextual) property | |
| 595 // load to make sure we do not get reference errors. | |
| 596 Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX); | |
| 597 Literal key(variable->name()); | |
| 598 Property property(&global, &key, RelocInfo::kNoPosition); | |
| 599 Reference ref(this, &property); | |
| 600 ref.GetValue(); | |
| 601 } else if (variable != NULL && variable->AsSlot() != NULL) { | |
| 602 // For a variable that rewrites to a slot, we signal it is the immediate | |
| 603 // subexpression of a typeof. | |
| 604 LoadFromSlotCheckForArguments(variable->AsSlot(), INSIDE_TYPEOF); | |
| 605 } else { | |
| 606 // Anything else can be handled normally. | |
| 607 Load(expr); | |
| 608 } | |
| 609 } | |
| 610 | |
| 611 | |
| 612 ArgumentsAllocationMode CodeGenerator::ArgumentsMode() { | |
| 613 if (scope()->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION; | |
| 614 | |
| 615 // In strict mode there is no need for shadow arguments. | |
| 616 ASSERT(scope()->arguments_shadow() != NULL || scope()->is_strict_mode()); | |
| 617 // We don't want to do lazy arguments allocation for functions that | |
| 618 // have heap-allocated contexts, because it interfers with the | |
| 619 // uninitialized const tracking in the context objects. | |
| 620 return (scope()->num_heap_slots() > 0 || scope()->is_strict_mode()) | |
| 621 ? EAGER_ARGUMENTS_ALLOCATION | |
| 622 : LAZY_ARGUMENTS_ALLOCATION; | |
| 623 } | |
| 624 | |
| 625 | |
| 626 Result CodeGenerator::StoreArgumentsObject(bool initial) { | |
| 627 ArgumentsAllocationMode mode = ArgumentsMode(); | |
| 628 ASSERT(mode != NO_ARGUMENTS_ALLOCATION); | |
| 629 | |
| 630 Comment cmnt(masm_, "[ store arguments object"); | |
| 631 if (mode == LAZY_ARGUMENTS_ALLOCATION && initial) { | |
| 632 // When using lazy arguments allocation, we store the arguments marker value | |
| 633 // as a sentinel indicating that the arguments object hasn't been | |
| 634 // allocated yet. | |
| 635 frame_->Push(FACTORY->arguments_marker()); | |
| 636 } else { | |
| 637 ArgumentsAccessStub stub(is_strict_mode() | |
| 638 ? ArgumentsAccessStub::NEW_STRICT | |
| 639 : ArgumentsAccessStub::NEW_NON_STRICT); | |
| 640 frame_->PushFunction(); | |
| 641 frame_->PushReceiverSlotAddress(); | |
| 642 frame_->Push(Smi::FromInt(scope()->num_parameters())); | |
| 643 Result result = frame_->CallStub(&stub, 3); | |
| 644 frame_->Push(&result); | |
| 645 } | |
| 646 | |
| 647 Variable* arguments = scope()->arguments(); | |
| 648 Variable* shadow = scope()->arguments_shadow(); | |
| 649 ASSERT(arguments != NULL && arguments->AsSlot() != NULL); | |
| 650 ASSERT((shadow != NULL && shadow->AsSlot() != NULL) || | |
| 651 scope()->is_strict_mode()); | |
| 652 | |
| 653 JumpTarget done; | |
| 654 bool skip_arguments = false; | |
| 655 if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) { | |
| 656 // We have to skip storing into the arguments slot if it has | |
| 657 // already been written to. This can happen if the a function | |
| 658 // has a local variable named 'arguments'. | |
| 659 LoadFromSlot(arguments->AsSlot(), NOT_INSIDE_TYPEOF); | |
| 660 Result probe = frame_->Pop(); | |
| 661 if (probe.is_constant()) { | |
| 662 // We have to skip updating the arguments object if it has | |
| 663 // been assigned a proper value. | |
| 664 skip_arguments = !probe.handle()->IsArgumentsMarker(); | |
| 665 } else { | |
| 666 __ CompareRoot(probe.reg(), Heap::kArgumentsMarkerRootIndex); | |
| 667 probe.Unuse(); | |
| 668 done.Branch(not_equal); | |
| 669 } | |
| 670 } | |
| 671 if (!skip_arguments) { | |
| 672 StoreToSlot(arguments->AsSlot(), NOT_CONST_INIT); | |
| 673 if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind(); | |
| 674 } | |
| 675 if (shadow != NULL) { | |
| 676 StoreToSlot(shadow->AsSlot(), NOT_CONST_INIT); | |
| 677 } | |
| 678 return frame_->Pop(); | |
| 679 } | |
| 680 | |
| 681 //------------------------------------------------------------------------------ | |
| 682 // CodeGenerator implementation of variables, lookups, and stores. | |
| 683 | |
| 684 Reference::Reference(CodeGenerator* cgen, | |
| 685 Expression* expression, | |
| 686 bool persist_after_get) | |
| 687 : cgen_(cgen), | |
| 688 expression_(expression), | |
| 689 type_(ILLEGAL), | |
| 690 persist_after_get_(persist_after_get) { | |
| 691 cgen->LoadReference(this); | |
| 692 } | |
| 693 | |
| 694 | |
| 695 Reference::~Reference() { | |
| 696 ASSERT(is_unloaded() || is_illegal()); | |
| 697 } | |
| 698 | |
| 699 | |
| 700 void CodeGenerator::LoadReference(Reference* ref) { | |
| 701 // References are loaded from both spilled and unspilled code. Set the | |
| 702 // state to unspilled to allow that (and explicitly spill after | |
| 703 // construction at the construction sites). | |
| 704 bool was_in_spilled_code = in_spilled_code_; | |
| 705 in_spilled_code_ = false; | |
| 706 | |
| 707 Comment cmnt(masm_, "[ LoadReference"); | |
| 708 Expression* e = ref->expression(); | |
| 709 Property* property = e->AsProperty(); | |
| 710 Variable* var = e->AsVariableProxy()->AsVariable(); | |
| 711 | |
| 712 if (property != NULL) { | |
| 713 // The expression is either a property or a variable proxy that rewrites | |
| 714 // to a property. | |
| 715 Load(property->obj()); | |
| 716 if (property->key()->IsPropertyName()) { | |
| 717 ref->set_type(Reference::NAMED); | |
| 718 } else { | |
| 719 Load(property->key()); | |
| 720 ref->set_type(Reference::KEYED); | |
| 721 } | |
| 722 } else if (var != NULL) { | |
| 723 // The expression is a variable proxy that does not rewrite to a | |
| 724 // property. Global variables are treated as named property references. | |
| 725 if (var->is_global()) { | |
| 726 // If rax is free, the register allocator prefers it. Thus the code | |
| 727 // generator will load the global object into rax, which is where | |
| 728 // LoadIC wants it. Most uses of Reference call LoadIC directly | |
| 729 // after the reference is created. | |
| 730 frame_->Spill(rax); | |
| 731 LoadGlobal(); | |
| 732 ref->set_type(Reference::NAMED); | |
| 733 } else { | |
| 734 ASSERT(var->AsSlot() != NULL); | |
| 735 ref->set_type(Reference::SLOT); | |
| 736 } | |
| 737 } else { | |
| 738 // Anything else is a runtime error. | |
| 739 Load(e); | |
| 740 frame_->CallRuntime(Runtime::kThrowReferenceError, 1); | |
| 741 } | |
| 742 | |
| 743 in_spilled_code_ = was_in_spilled_code; | |
| 744 } | |
| 745 | |
| 746 | |
| 747 void CodeGenerator::UnloadReference(Reference* ref) { | |
| 748 // Pop a reference from the stack while preserving TOS. | |
| 749 Comment cmnt(masm_, "[ UnloadReference"); | |
| 750 frame_->Nip(ref->size()); | |
| 751 ref->set_unloaded(); | |
| 752 } | |
| 753 | |
| 754 | |
| 755 // ECMA-262, section 9.2, page 30: ToBoolean(). Pop the top of stack and | |
| 756 // convert it to a boolean in the condition code register or jump to | |
| 757 // 'false_target'/'true_target' as appropriate. | |
| 758 void CodeGenerator::ToBoolean(ControlDestination* dest) { | |
| 759 Comment cmnt(masm_, "[ ToBoolean"); | |
| 760 | |
| 761 // The value to convert should be popped from the frame. | |
| 762 Result value = frame_->Pop(); | |
| 763 value.ToRegister(); | |
| 764 | |
| 765 if (value.is_number()) { | |
| 766 // Fast case if TypeInfo indicates only numbers. | |
| 767 if (FLAG_debug_code) { | |
| 768 __ AbortIfNotNumber(value.reg()); | |
| 769 } | |
| 770 // Smi => false iff zero. | |
| 771 __ Cmp(value.reg(), Smi::FromInt(0)); | |
| 772 if (value.is_smi()) { | |
| 773 value.Unuse(); | |
| 774 dest->Split(not_zero); | |
| 775 } else { | |
| 776 dest->false_target()->Branch(equal); | |
| 777 Condition is_smi = masm_->CheckSmi(value.reg()); | |
| 778 dest->true_target()->Branch(is_smi); | |
| 779 __ xorpd(xmm0, xmm0); | |
| 780 __ ucomisd(xmm0, FieldOperand(value.reg(), HeapNumber::kValueOffset)); | |
| 781 value.Unuse(); | |
| 782 dest->Split(not_zero); | |
| 783 } | |
| 784 } else { | |
| 785 // Fast case checks. | |
| 786 // 'false' => false. | |
| 787 __ CompareRoot(value.reg(), Heap::kFalseValueRootIndex); | |
| 788 dest->false_target()->Branch(equal); | |
| 789 | |
| 790 // 'true' => true. | |
| 791 __ CompareRoot(value.reg(), Heap::kTrueValueRootIndex); | |
| 792 dest->true_target()->Branch(equal); | |
| 793 | |
| 794 // 'undefined' => false. | |
| 795 __ CompareRoot(value.reg(), Heap::kUndefinedValueRootIndex); | |
| 796 dest->false_target()->Branch(equal); | |
| 797 | |
| 798 // Smi => false iff zero. | |
| 799 __ Cmp(value.reg(), Smi::FromInt(0)); | |
| 800 dest->false_target()->Branch(equal); | |
| 801 Condition is_smi = masm_->CheckSmi(value.reg()); | |
| 802 dest->true_target()->Branch(is_smi); | |
| 803 | |
| 804 // Call the stub for all other cases. | |
| 805 frame_->Push(&value); // Undo the Pop() from above. | |
| 806 ToBooleanStub stub; | |
| 807 Result temp = frame_->CallStub(&stub, 1); | |
| 808 // Convert the result to a condition code. | |
| 809 __ testq(temp.reg(), temp.reg()); | |
| 810 temp.Unuse(); | |
| 811 dest->Split(not_equal); | |
| 812 } | |
| 813 } | |
| 814 | |
| 815 | |
| 816 // Call the specialized stub for a binary operation. | |
| 817 class DeferredInlineBinaryOperation: public DeferredCode { | |
| 818 public: | |
| 819 DeferredInlineBinaryOperation(Token::Value op, | |
| 820 Register dst, | |
| 821 Register left, | |
| 822 Register right, | |
| 823 OverwriteMode mode) | |
| 824 : op_(op), dst_(dst), left_(left), right_(right), mode_(mode) { | |
| 825 set_comment("[ DeferredInlineBinaryOperation"); | |
| 826 } | |
| 827 | |
| 828 virtual void Generate(); | |
| 829 | |
| 830 private: | |
| 831 Token::Value op_; | |
| 832 Register dst_; | |
| 833 Register left_; | |
| 834 Register right_; | |
| 835 OverwriteMode mode_; | |
| 836 }; | |
| 837 | |
| 838 | |
| 839 void DeferredInlineBinaryOperation::Generate() { | |
| 840 Label done; | |
| 841 if ((op_ == Token::ADD) | |
| 842 || (op_ == Token::SUB) | |
| 843 || (op_ == Token::MUL) | |
| 844 || (op_ == Token::DIV)) { | |
| 845 Label call_runtime; | |
| 846 Label left_smi, right_smi, load_right, do_op; | |
| 847 __ JumpIfSmi(left_, &left_smi); | |
| 848 __ CompareRoot(FieldOperand(left_, HeapObject::kMapOffset), | |
| 849 Heap::kHeapNumberMapRootIndex); | |
| 850 __ j(not_equal, &call_runtime); | |
| 851 __ movsd(xmm0, FieldOperand(left_, HeapNumber::kValueOffset)); | |
| 852 if (mode_ == OVERWRITE_LEFT) { | |
| 853 __ movq(dst_, left_); | |
| 854 } | |
| 855 __ jmp(&load_right); | |
| 856 | |
| 857 __ bind(&left_smi); | |
| 858 __ SmiToInteger32(left_, left_); | |
| 859 __ cvtlsi2sd(xmm0, left_); | |
| 860 __ Integer32ToSmi(left_, left_); | |
| 861 if (mode_ == OVERWRITE_LEFT) { | |
| 862 Label alloc_failure; | |
| 863 __ AllocateHeapNumber(dst_, no_reg, &call_runtime); | |
| 864 } | |
| 865 | |
| 866 __ bind(&load_right); | |
| 867 __ JumpIfSmi(right_, &right_smi); | |
| 868 __ CompareRoot(FieldOperand(right_, HeapObject::kMapOffset), | |
| 869 Heap::kHeapNumberMapRootIndex); | |
| 870 __ j(not_equal, &call_runtime); | |
| 871 __ movsd(xmm1, FieldOperand(right_, HeapNumber::kValueOffset)); | |
| 872 if (mode_ == OVERWRITE_RIGHT) { | |
| 873 __ movq(dst_, right_); | |
| 874 } else if (mode_ == NO_OVERWRITE) { | |
| 875 Label alloc_failure; | |
| 876 __ AllocateHeapNumber(dst_, no_reg, &call_runtime); | |
| 877 } | |
| 878 __ jmp(&do_op); | |
| 879 | |
| 880 __ bind(&right_smi); | |
| 881 __ SmiToInteger32(right_, right_); | |
| 882 __ cvtlsi2sd(xmm1, right_); | |
| 883 __ Integer32ToSmi(right_, right_); | |
| 884 if (mode_ == OVERWRITE_RIGHT || mode_ == NO_OVERWRITE) { | |
| 885 Label alloc_failure; | |
| 886 __ AllocateHeapNumber(dst_, no_reg, &call_runtime); | |
| 887 } | |
| 888 | |
| 889 __ bind(&do_op); | |
| 890 switch (op_) { | |
| 891 case Token::ADD: __ addsd(xmm0, xmm1); break; | |
| 892 case Token::SUB: __ subsd(xmm0, xmm1); break; | |
| 893 case Token::MUL: __ mulsd(xmm0, xmm1); break; | |
| 894 case Token::DIV: __ divsd(xmm0, xmm1); break; | |
| 895 default: UNREACHABLE(); | |
| 896 } | |
| 897 __ movsd(FieldOperand(dst_, HeapNumber::kValueOffset), xmm0); | |
| 898 __ jmp(&done); | |
| 899 | |
| 900 __ bind(&call_runtime); | |
| 901 } | |
| 902 GenericBinaryOpStub stub(op_, mode_, NO_SMI_CODE_IN_STUB); | |
| 903 stub.GenerateCall(masm_, left_, right_); | |
| 904 if (!dst_.is(rax)) __ movq(dst_, rax); | |
| 905 __ bind(&done); | |
| 906 } | |
| 907 | |
| 908 | |
| 909 static TypeInfo CalculateTypeInfo(TypeInfo operands_type, | |
| 910 Token::Value op, | |
| 911 const Result& right, | |
| 912 const Result& left) { | |
| 913 // Set TypeInfo of result according to the operation performed. | |
| 914 // We rely on the fact that smis have a 32 bit payload on x64. | |
| 915 STATIC_ASSERT(kSmiValueSize == 32); | |
| 916 switch (op) { | |
| 917 case Token::COMMA: | |
| 918 return right.type_info(); | |
| 919 case Token::OR: | |
| 920 case Token::AND: | |
| 921 // Result type can be either of the two input types. | |
| 922 return operands_type; | |
| 923 case Token::BIT_OR: | |
| 924 case Token::BIT_XOR: | |
| 925 case Token::BIT_AND: | |
| 926 // Result is always a smi. | |
| 927 return TypeInfo::Smi(); | |
| 928 case Token::SAR: | |
| 929 case Token::SHL: | |
| 930 // Result is always a smi. | |
| 931 return TypeInfo::Smi(); | |
| 932 case Token::SHR: | |
| 933 // Result of x >>> y is always a smi if masked y >= 1, otherwise a number. | |
| 934 return (right.is_constant() && right.handle()->IsSmi() | |
| 935 && (Smi::cast(*right.handle())->value() & 0x1F) >= 1) | |
| 936 ? TypeInfo::Smi() | |
| 937 : TypeInfo::Number(); | |
| 938 case Token::ADD: | |
| 939 if (operands_type.IsNumber()) { | |
| 940 return TypeInfo::Number(); | |
| 941 } else if (left.type_info().IsString() || right.type_info().IsString()) { | |
| 942 return TypeInfo::String(); | |
| 943 } else { | |
| 944 return TypeInfo::Unknown(); | |
| 945 } | |
| 946 case Token::SUB: | |
| 947 case Token::MUL: | |
| 948 case Token::DIV: | |
| 949 case Token::MOD: | |
| 950 // Result is always a number. | |
| 951 return TypeInfo::Number(); | |
| 952 default: | |
| 953 UNREACHABLE(); | |
| 954 } | |
| 955 UNREACHABLE(); | |
| 956 return TypeInfo::Unknown(); | |
| 957 } | |
| 958 | |
| 959 | |
| 960 void CodeGenerator::GenericBinaryOperation(BinaryOperation* expr, | |
| 961 OverwriteMode overwrite_mode) { | |
| 962 Comment cmnt(masm_, "[ BinaryOperation"); | |
| 963 Token::Value op = expr->op(); | |
| 964 Comment cmnt_token(masm_, Token::String(op)); | |
| 965 | |
| 966 if (op == Token::COMMA) { | |
| 967 // Simply discard left value. | |
| 968 frame_->Nip(1); | |
| 969 return; | |
| 970 } | |
| 971 | |
| 972 Result right = frame_->Pop(); | |
| 973 Result left = frame_->Pop(); | |
| 974 | |
| 975 if (op == Token::ADD) { | |
| 976 const bool left_is_string = left.type_info().IsString(); | |
| 977 const bool right_is_string = right.type_info().IsString(); | |
| 978 // Make sure constant strings have string type info. | |
| 979 ASSERT(!(left.is_constant() && left.handle()->IsString()) || | |
| 980 left_is_string); | |
| 981 ASSERT(!(right.is_constant() && right.handle()->IsString()) || | |
| 982 right_is_string); | |
| 983 if (left_is_string || right_is_string) { | |
| 984 frame_->Push(&left); | |
| 985 frame_->Push(&right); | |
| 986 Result answer; | |
| 987 if (left_is_string) { | |
| 988 if (right_is_string) { | |
| 989 StringAddStub stub(NO_STRING_CHECK_IN_STUB); | |
| 990 answer = frame_->CallStub(&stub, 2); | |
| 991 } else { | |
| 992 answer = | |
| 993 frame_->InvokeBuiltin(Builtins::STRING_ADD_LEFT, CALL_FUNCTION, 2); | |
| 994 } | |
| 995 } else if (right_is_string) { | |
| 996 answer = | |
| 997 frame_->InvokeBuiltin(Builtins::STRING_ADD_RIGHT, CALL_FUNCTION, 2); | |
| 998 } | |
| 999 answer.set_type_info(TypeInfo::String()); | |
| 1000 frame_->Push(&answer); | |
| 1001 return; | |
| 1002 } | |
| 1003 // Neither operand is known to be a string. | |
| 1004 } | |
| 1005 | |
| 1006 bool left_is_smi_constant = left.is_constant() && left.handle()->IsSmi(); | |
| 1007 bool left_is_non_smi_constant = left.is_constant() && !left.handle()->IsSmi(); | |
| 1008 bool right_is_smi_constant = right.is_constant() && right.handle()->IsSmi(); | |
| 1009 bool right_is_non_smi_constant = | |
| 1010 right.is_constant() && !right.handle()->IsSmi(); | |
| 1011 | |
| 1012 if (left_is_smi_constant && right_is_smi_constant) { | |
| 1013 // Compute the constant result at compile time, and leave it on the frame. | |
| 1014 int left_int = Smi::cast(*left.handle())->value(); | |
| 1015 int right_int = Smi::cast(*right.handle())->value(); | |
| 1016 if (FoldConstantSmis(op, left_int, right_int)) return; | |
| 1017 } | |
| 1018 | |
| 1019 // Get number type of left and right sub-expressions. | |
| 1020 TypeInfo operands_type = | |
| 1021 TypeInfo::Combine(left.type_info(), right.type_info()); | |
| 1022 | |
| 1023 TypeInfo result_type = CalculateTypeInfo(operands_type, op, right, left); | |
| 1024 | |
| 1025 Result answer; | |
| 1026 if (left_is_non_smi_constant || right_is_non_smi_constant) { | |
| 1027 // Go straight to the slow case, with no smi code. | |
| 1028 GenericBinaryOpStub stub(op, | |
| 1029 overwrite_mode, | |
| 1030 NO_SMI_CODE_IN_STUB, | |
| 1031 operands_type); | |
| 1032 answer = GenerateGenericBinaryOpStubCall(&stub, &left, &right); | |
| 1033 } else if (right_is_smi_constant) { | |
| 1034 answer = ConstantSmiBinaryOperation(expr, &left, right.handle(), | |
| 1035 false, overwrite_mode); | |
| 1036 } else if (left_is_smi_constant) { | |
| 1037 answer = ConstantSmiBinaryOperation(expr, &right, left.handle(), | |
| 1038 true, overwrite_mode); | |
| 1039 } else { | |
| 1040 // Set the flags based on the operation, type and loop nesting level. | |
| 1041 // Bit operations always assume they likely operate on smis. Still only | |
| 1042 // generate the inline Smi check code if this operation is part of a loop. | |
| 1043 // For all other operations only inline the Smi check code for likely smis | |
| 1044 // if the operation is part of a loop. | |
| 1045 if (loop_nesting() > 0 && | |
| 1046 (Token::IsBitOp(op) || | |
| 1047 operands_type.IsInteger32() || | |
| 1048 expr->type()->IsLikelySmi())) { | |
| 1049 answer = LikelySmiBinaryOperation(expr, &left, &right, overwrite_mode); | |
| 1050 } else { | |
| 1051 GenericBinaryOpStub stub(op, | |
| 1052 overwrite_mode, | |
| 1053 NO_GENERIC_BINARY_FLAGS, | |
| 1054 operands_type); | |
| 1055 answer = GenerateGenericBinaryOpStubCall(&stub, &left, &right); | |
| 1056 } | |
| 1057 } | |
| 1058 | |
| 1059 answer.set_type_info(result_type); | |
| 1060 frame_->Push(&answer); | |
| 1061 } | |
| 1062 | |
| 1063 | |
| 1064 bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) { | |
| 1065 Object* answer_object = HEAP->undefined_value(); | |
| 1066 switch (op) { | |
| 1067 case Token::ADD: | |
| 1068 // Use intptr_t to detect overflow of 32-bit int. | |
| 1069 if (Smi::IsValid(static_cast<intptr_t>(left) + right)) { | |
| 1070 answer_object = Smi::FromInt(left + right); | |
| 1071 } | |
| 1072 break; | |
| 1073 case Token::SUB: | |
| 1074 // Use intptr_t to detect overflow of 32-bit int. | |
| 1075 if (Smi::IsValid(static_cast<intptr_t>(left) - right)) { | |
| 1076 answer_object = Smi::FromInt(left - right); | |
| 1077 } | |
| 1078 break; | |
| 1079 case Token::MUL: { | |
| 1080 double answer = static_cast<double>(left) * right; | |
| 1081 if (answer >= Smi::kMinValue && answer <= Smi::kMaxValue) { | |
| 1082 // If the product is zero and the non-zero factor is negative, | |
| 1083 // the spec requires us to return floating point negative zero. | |
| 1084 if (answer != 0 || (left >= 0 && right >= 0)) { | |
| 1085 answer_object = Smi::FromInt(static_cast<int>(answer)); | |
| 1086 } | |
| 1087 } | |
| 1088 } | |
| 1089 break; | |
| 1090 case Token::DIV: | |
| 1091 case Token::MOD: | |
| 1092 break; | |
| 1093 case Token::BIT_OR: | |
| 1094 answer_object = Smi::FromInt(left | right); | |
| 1095 break; | |
| 1096 case Token::BIT_AND: | |
| 1097 answer_object = Smi::FromInt(left & right); | |
| 1098 break; | |
| 1099 case Token::BIT_XOR: | |
| 1100 answer_object = Smi::FromInt(left ^ right); | |
| 1101 break; | |
| 1102 | |
| 1103 case Token::SHL: { | |
| 1104 int shift_amount = right & 0x1F; | |
| 1105 if (Smi::IsValid(left << shift_amount)) { | |
| 1106 answer_object = Smi::FromInt(left << shift_amount); | |
| 1107 } | |
| 1108 break; | |
| 1109 } | |
| 1110 case Token::SHR: { | |
| 1111 int shift_amount = right & 0x1F; | |
| 1112 unsigned int unsigned_left = left; | |
| 1113 unsigned_left >>= shift_amount; | |
| 1114 if (unsigned_left <= static_cast<unsigned int>(Smi::kMaxValue)) { | |
| 1115 answer_object = Smi::FromInt(unsigned_left); | |
| 1116 } | |
| 1117 break; | |
| 1118 } | |
| 1119 case Token::SAR: { | |
| 1120 int shift_amount = right & 0x1F; | |
| 1121 unsigned int unsigned_left = left; | |
| 1122 if (left < 0) { | |
| 1123 // Perform arithmetic shift of a negative number by | |
| 1124 // complementing number, logical shifting, complementing again. | |
| 1125 unsigned_left = ~unsigned_left; | |
| 1126 unsigned_left >>= shift_amount; | |
| 1127 unsigned_left = ~unsigned_left; | |
| 1128 } else { | |
| 1129 unsigned_left >>= shift_amount; | |
| 1130 } | |
| 1131 ASSERT(Smi::IsValid(static_cast<int32_t>(unsigned_left))); | |
| 1132 answer_object = Smi::FromInt(static_cast<int32_t>(unsigned_left)); | |
| 1133 break; | |
| 1134 } | |
| 1135 default: | |
| 1136 UNREACHABLE(); | |
| 1137 break; | |
| 1138 } | |
| 1139 if (answer_object->IsUndefined()) { | |
| 1140 return false; | |
| 1141 } | |
| 1142 frame_->Push(Handle<Object>(answer_object)); | |
| 1143 return true; | |
| 1144 } | |
| 1145 | |
| 1146 | |
| 1147 void CodeGenerator::JumpIfBothSmiUsingTypeInfo(Result* left, | |
| 1148 Result* right, | |
| 1149 JumpTarget* both_smi) { | |
| 1150 TypeInfo left_info = left->type_info(); | |
| 1151 TypeInfo right_info = right->type_info(); | |
| 1152 if (left_info.IsDouble() || left_info.IsString() || | |
| 1153 right_info.IsDouble() || right_info.IsString()) { | |
| 1154 // We know that left and right are not both smi. Don't do any tests. | |
| 1155 return; | |
| 1156 } | |
| 1157 | |
| 1158 if (left->reg().is(right->reg())) { | |
| 1159 if (!left_info.IsSmi()) { | |
| 1160 Condition is_smi = masm()->CheckSmi(left->reg()); | |
| 1161 both_smi->Branch(is_smi); | |
| 1162 } else { | |
| 1163 if (FLAG_debug_code) __ AbortIfNotSmi(left->reg()); | |
| 1164 left->Unuse(); | |
| 1165 right->Unuse(); | |
| 1166 both_smi->Jump(); | |
| 1167 } | |
| 1168 } else if (!left_info.IsSmi()) { | |
| 1169 if (!right_info.IsSmi()) { | |
| 1170 Condition is_smi = masm()->CheckBothSmi(left->reg(), right->reg()); | |
| 1171 both_smi->Branch(is_smi); | |
| 1172 } else { | |
| 1173 Condition is_smi = masm()->CheckSmi(left->reg()); | |
| 1174 both_smi->Branch(is_smi); | |
| 1175 } | |
| 1176 } else { | |
| 1177 if (FLAG_debug_code) __ AbortIfNotSmi(left->reg()); | |
| 1178 if (!right_info.IsSmi()) { | |
| 1179 Condition is_smi = masm()->CheckSmi(right->reg()); | |
| 1180 both_smi->Branch(is_smi); | |
| 1181 } else { | |
| 1182 if (FLAG_debug_code) __ AbortIfNotSmi(right->reg()); | |
| 1183 left->Unuse(); | |
| 1184 right->Unuse(); | |
| 1185 both_smi->Jump(); | |
| 1186 } | |
| 1187 } | |
| 1188 } | |
| 1189 | |
| 1190 | |
| 1191 void CodeGenerator::JumpIfNotSmiUsingTypeInfo(Register reg, | |
| 1192 TypeInfo type, | |
| 1193 DeferredCode* deferred) { | |
| 1194 if (!type.IsSmi()) { | |
| 1195 __ JumpIfNotSmi(reg, deferred->entry_label()); | |
| 1196 } | |
| 1197 if (FLAG_debug_code) { | |
| 1198 __ AbortIfNotSmi(reg); | |
| 1199 } | |
| 1200 } | |
| 1201 | |
| 1202 | |
| 1203 void CodeGenerator::JumpIfNotBothSmiUsingTypeInfo(Register left, | |
| 1204 Register right, | |
| 1205 TypeInfo left_info, | |
| 1206 TypeInfo right_info, | |
| 1207 DeferredCode* deferred) { | |
| 1208 if (!left_info.IsSmi() && !right_info.IsSmi()) { | |
| 1209 __ JumpIfNotBothSmi(left, right, deferred->entry_label()); | |
| 1210 } else if (!left_info.IsSmi()) { | |
| 1211 __ JumpIfNotSmi(left, deferred->entry_label()); | |
| 1212 } else if (!right_info.IsSmi()) { | |
| 1213 __ JumpIfNotSmi(right, deferred->entry_label()); | |
| 1214 } | |
| 1215 if (FLAG_debug_code) { | |
| 1216 __ AbortIfNotSmi(left); | |
| 1217 __ AbortIfNotSmi(right); | |
| 1218 } | |
| 1219 } | |
| 1220 | |
| 1221 | |
| 1222 // Implements a binary operation using a deferred code object and some | |
| 1223 // inline code to operate on smis quickly. | |
| 1224 Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr, | |
| 1225 Result* left, | |
| 1226 Result* right, | |
| 1227 OverwriteMode overwrite_mode) { | |
| 1228 // Copy the type info because left and right may be overwritten. | |
| 1229 TypeInfo left_type_info = left->type_info(); | |
| 1230 TypeInfo right_type_info = right->type_info(); | |
| 1231 Token::Value op = expr->op(); | |
| 1232 Result answer; | |
| 1233 // Special handling of div and mod because they use fixed registers. | |
| 1234 if (op == Token::DIV || op == Token::MOD) { | |
| 1235 // We need rax as the quotient register, rdx as the remainder | |
| 1236 // register, neither left nor right in rax or rdx, and left copied | |
| 1237 // to rax. | |
| 1238 Result quotient; | |
| 1239 Result remainder; | |
| 1240 bool left_is_in_rax = false; | |
| 1241 // Step 1: get rax for quotient. | |
| 1242 if ((left->is_register() && left->reg().is(rax)) || | |
| 1243 (right->is_register() && right->reg().is(rax))) { | |
| 1244 // One or both is in rax. Use a fresh non-rdx register for | |
| 1245 // them. | |
| 1246 Result fresh = allocator_->Allocate(); | |
| 1247 ASSERT(fresh.is_valid()); | |
| 1248 if (fresh.reg().is(rdx)) { | |
| 1249 remainder = fresh; | |
| 1250 fresh = allocator_->Allocate(); | |
| 1251 ASSERT(fresh.is_valid()); | |
| 1252 } | |
| 1253 if (left->is_register() && left->reg().is(rax)) { | |
| 1254 quotient = *left; | |
| 1255 *left = fresh; | |
| 1256 left_is_in_rax = true; | |
| 1257 } | |
| 1258 if (right->is_register() && right->reg().is(rax)) { | |
| 1259 quotient = *right; | |
| 1260 *right = fresh; | |
| 1261 } | |
| 1262 __ movq(fresh.reg(), rax); | |
| 1263 } else { | |
| 1264 // Neither left nor right is in rax. | |
| 1265 quotient = allocator_->Allocate(rax); | |
| 1266 } | |
| 1267 ASSERT(quotient.is_register() && quotient.reg().is(rax)); | |
| 1268 ASSERT(!(left->is_register() && left->reg().is(rax))); | |
| 1269 ASSERT(!(right->is_register() && right->reg().is(rax))); | |
| 1270 | |
| 1271 // Step 2: get rdx for remainder if necessary. | |
| 1272 if (!remainder.is_valid()) { | |
| 1273 if ((left->is_register() && left->reg().is(rdx)) || | |
| 1274 (right->is_register() && right->reg().is(rdx))) { | |
| 1275 Result fresh = allocator_->Allocate(); | |
| 1276 ASSERT(fresh.is_valid()); | |
| 1277 if (left->is_register() && left->reg().is(rdx)) { | |
| 1278 remainder = *left; | |
| 1279 *left = fresh; | |
| 1280 } | |
| 1281 if (right->is_register() && right->reg().is(rdx)) { | |
| 1282 remainder = *right; | |
| 1283 *right = fresh; | |
| 1284 } | |
| 1285 __ movq(fresh.reg(), rdx); | |
| 1286 } else { | |
| 1287 // Neither left nor right is in rdx. | |
| 1288 remainder = allocator_->Allocate(rdx); | |
| 1289 } | |
| 1290 } | |
| 1291 ASSERT(remainder.is_register() && remainder.reg().is(rdx)); | |
| 1292 ASSERT(!(left->is_register() && left->reg().is(rdx))); | |
| 1293 ASSERT(!(right->is_register() && right->reg().is(rdx))); | |
| 1294 | |
| 1295 left->ToRegister(); | |
| 1296 right->ToRegister(); | |
| 1297 frame_->Spill(rax); | |
| 1298 frame_->Spill(rdx); | |
| 1299 | |
| 1300 // Check that left and right are smi tagged. | |
| 1301 DeferredInlineBinaryOperation* deferred = | |
| 1302 new DeferredInlineBinaryOperation(op, | |
| 1303 (op == Token::DIV) ? rax : rdx, | |
| 1304 left->reg(), | |
| 1305 right->reg(), | |
| 1306 overwrite_mode); | |
| 1307 JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(), | |
| 1308 left_type_info, right_type_info, deferred); | |
| 1309 | |
| 1310 if (op == Token::DIV) { | |
| 1311 __ SmiDiv(rax, left->reg(), right->reg(), deferred->entry_label()); | |
| 1312 deferred->BindExit(); | |
| 1313 left->Unuse(); | |
| 1314 right->Unuse(); | |
| 1315 answer = quotient; | |
| 1316 } else { | |
| 1317 ASSERT(op == Token::MOD); | |
| 1318 __ SmiMod(rdx, left->reg(), right->reg(), deferred->entry_label()); | |
| 1319 deferred->BindExit(); | |
| 1320 left->Unuse(); | |
| 1321 right->Unuse(); | |
| 1322 answer = remainder; | |
| 1323 } | |
| 1324 ASSERT(answer.is_valid()); | |
| 1325 return answer; | |
| 1326 } | |
| 1327 | |
| 1328 // Special handling of shift operations because they use fixed | |
| 1329 // registers. | |
| 1330 if (op == Token::SHL || op == Token::SHR || op == Token::SAR) { | |
| 1331 // Move left out of rcx if necessary. | |
| 1332 if (left->is_register() && left->reg().is(rcx)) { | |
| 1333 *left = allocator_->Allocate(); | |
| 1334 ASSERT(left->is_valid()); | |
| 1335 __ movq(left->reg(), rcx); | |
| 1336 } | |
| 1337 right->ToRegister(rcx); | |
| 1338 left->ToRegister(); | |
| 1339 ASSERT(left->is_register() && !left->reg().is(rcx)); | |
| 1340 ASSERT(right->is_register() && right->reg().is(rcx)); | |
| 1341 | |
| 1342 // We will modify right, it must be spilled. | |
| 1343 frame_->Spill(rcx); | |
| 1344 | |
| 1345 // Use a fresh answer register to avoid spilling the left operand. | |
| 1346 answer = allocator_->Allocate(); | |
| 1347 ASSERT(answer.is_valid()); | |
| 1348 // Check that both operands are smis using the answer register as a | |
| 1349 // temporary. | |
| 1350 DeferredInlineBinaryOperation* deferred = | |
| 1351 new DeferredInlineBinaryOperation(op, | |
| 1352 answer.reg(), | |
| 1353 left->reg(), | |
| 1354 rcx, | |
| 1355 overwrite_mode); | |
| 1356 | |
| 1357 Label do_op; | |
| 1358 // Left operand must be unchanged in left->reg() for deferred code. | |
| 1359 // Left operand is in answer.reg(), possibly converted to int32, for | |
| 1360 // inline code. | |
| 1361 __ movq(answer.reg(), left->reg()); | |
| 1362 if (right_type_info.IsSmi()) { | |
| 1363 if (FLAG_debug_code) { | |
| 1364 __ AbortIfNotSmi(right->reg()); | |
| 1365 } | |
| 1366 // If left is not known to be a smi, check if it is. | |
| 1367 // If left is not known to be a number, and it isn't a smi, check if | |
| 1368 // it is a HeapNumber. | |
| 1369 if (!left_type_info.IsSmi()) { | |
| 1370 __ JumpIfSmi(answer.reg(), &do_op); | |
| 1371 if (!left_type_info.IsNumber()) { | |
| 1372 // Branch if not a heapnumber. | |
| 1373 __ Cmp(FieldOperand(answer.reg(), HeapObject::kMapOffset), | |
| 1374 FACTORY->heap_number_map()); | |
| 1375 deferred->Branch(not_equal); | |
| 1376 } | |
| 1377 // Load integer value into answer register using truncation. | |
| 1378 __ cvttsd2si(answer.reg(), | |
| 1379 FieldOperand(answer.reg(), HeapNumber::kValueOffset)); | |
| 1380 // Branch if we might have overflowed. | |
| 1381 // (False negative for Smi::kMinValue) | |
| 1382 __ cmpl(answer.reg(), Immediate(0x80000000)); | |
| 1383 deferred->Branch(equal); | |
| 1384 // TODO(lrn): Inline shifts on int32 here instead of first smi-tagging. | |
| 1385 __ Integer32ToSmi(answer.reg(), answer.reg()); | |
| 1386 } else { | |
| 1387 // Fast case - both are actually smis. | |
| 1388 if (FLAG_debug_code) { | |
| 1389 __ AbortIfNotSmi(left->reg()); | |
| 1390 } | |
| 1391 } | |
| 1392 } else { | |
| 1393 JumpIfNotBothSmiUsingTypeInfo(left->reg(), rcx, | |
| 1394 left_type_info, right_type_info, deferred); | |
| 1395 } | |
| 1396 __ bind(&do_op); | |
| 1397 | |
| 1398 // Perform the operation. | |
| 1399 switch (op) { | |
| 1400 case Token::SAR: | |
| 1401 __ SmiShiftArithmeticRight(answer.reg(), answer.reg(), rcx); | |
| 1402 break; | |
| 1403 case Token::SHR: { | |
| 1404 __ SmiShiftLogicalRight(answer.reg(), | |
| 1405 answer.reg(), | |
| 1406 rcx, | |
| 1407 deferred->entry_label()); | |
| 1408 break; | |
| 1409 } | |
| 1410 case Token::SHL: { | |
| 1411 __ SmiShiftLeft(answer.reg(), | |
| 1412 answer.reg(), | |
| 1413 rcx); | |
| 1414 break; | |
| 1415 } | |
| 1416 default: | |
| 1417 UNREACHABLE(); | |
| 1418 } | |
| 1419 deferred->BindExit(); | |
| 1420 left->Unuse(); | |
| 1421 right->Unuse(); | |
| 1422 ASSERT(answer.is_valid()); | |
| 1423 return answer; | |
| 1424 } | |
| 1425 | |
| 1426 // Handle the other binary operations. | |
| 1427 left->ToRegister(); | |
| 1428 right->ToRegister(); | |
| 1429 // A newly allocated register answer is used to hold the answer. The | |
| 1430 // registers containing left and right are not modified so they don't | |
| 1431 // need to be spilled in the fast case. | |
| 1432 answer = allocator_->Allocate(); | |
| 1433 ASSERT(answer.is_valid()); | |
| 1434 | |
| 1435 // Perform the smi tag check. | |
| 1436 DeferredInlineBinaryOperation* deferred = | |
| 1437 new DeferredInlineBinaryOperation(op, | |
| 1438 answer.reg(), | |
| 1439 left->reg(), | |
| 1440 right->reg(), | |
| 1441 overwrite_mode); | |
| 1442 JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(), | |
| 1443 left_type_info, right_type_info, deferred); | |
| 1444 | |
| 1445 switch (op) { | |
| 1446 case Token::ADD: | |
| 1447 __ SmiAdd(answer.reg(), | |
| 1448 left->reg(), | |
| 1449 right->reg(), | |
| 1450 deferred->entry_label()); | |
| 1451 break; | |
| 1452 | |
| 1453 case Token::SUB: | |
| 1454 __ SmiSub(answer.reg(), | |
| 1455 left->reg(), | |
| 1456 right->reg(), | |
| 1457 deferred->entry_label()); | |
| 1458 break; | |
| 1459 | |
| 1460 case Token::MUL: { | |
| 1461 __ SmiMul(answer.reg(), | |
| 1462 left->reg(), | |
| 1463 right->reg(), | |
| 1464 deferred->entry_label()); | |
| 1465 break; | |
| 1466 } | |
| 1467 | |
| 1468 case Token::BIT_OR: | |
| 1469 __ SmiOr(answer.reg(), left->reg(), right->reg()); | |
| 1470 break; | |
| 1471 | |
| 1472 case Token::BIT_AND: | |
| 1473 __ SmiAnd(answer.reg(), left->reg(), right->reg()); | |
| 1474 break; | |
| 1475 | |
| 1476 case Token::BIT_XOR: | |
| 1477 __ SmiXor(answer.reg(), left->reg(), right->reg()); | |
| 1478 break; | |
| 1479 | |
| 1480 default: | |
| 1481 UNREACHABLE(); | |
| 1482 break; | |
| 1483 } | |
| 1484 deferred->BindExit(); | |
| 1485 left->Unuse(); | |
| 1486 right->Unuse(); | |
| 1487 ASSERT(answer.is_valid()); | |
| 1488 return answer; | |
| 1489 } | |
| 1490 | |
| 1491 | |
| 1492 // Call the appropriate binary operation stub to compute src op value | |
| 1493 // and leave the result in dst. | |
| 1494 class DeferredInlineSmiOperation: public DeferredCode { | |
| 1495 public: | |
| 1496 DeferredInlineSmiOperation(Token::Value op, | |
| 1497 Register dst, | |
| 1498 Register src, | |
| 1499 Smi* value, | |
| 1500 OverwriteMode overwrite_mode) | |
| 1501 : op_(op), | |
| 1502 dst_(dst), | |
| 1503 src_(src), | |
| 1504 value_(value), | |
| 1505 overwrite_mode_(overwrite_mode) { | |
| 1506 set_comment("[ DeferredInlineSmiOperation"); | |
| 1507 } | |
| 1508 | |
| 1509 virtual void Generate(); | |
| 1510 | |
| 1511 private: | |
| 1512 Token::Value op_; | |
| 1513 Register dst_; | |
| 1514 Register src_; | |
| 1515 Smi* value_; | |
| 1516 OverwriteMode overwrite_mode_; | |
| 1517 }; | |
| 1518 | |
| 1519 | |
| 1520 void DeferredInlineSmiOperation::Generate() { | |
| 1521 // For mod we don't generate all the Smi code inline. | |
| 1522 GenericBinaryOpStub stub( | |
| 1523 op_, | |
| 1524 overwrite_mode_, | |
| 1525 (op_ == Token::MOD) ? NO_GENERIC_BINARY_FLAGS : NO_SMI_CODE_IN_STUB); | |
| 1526 stub.GenerateCall(masm_, src_, value_); | |
| 1527 if (!dst_.is(rax)) __ movq(dst_, rax); | |
| 1528 } | |
| 1529 | |
| 1530 | |
| 1531 // Call the appropriate binary operation stub to compute value op src | |
| 1532 // and leave the result in dst. | |
| 1533 class DeferredInlineSmiOperationReversed: public DeferredCode { | |
| 1534 public: | |
| 1535 DeferredInlineSmiOperationReversed(Token::Value op, | |
| 1536 Register dst, | |
| 1537 Smi* value, | |
| 1538 Register src, | |
| 1539 OverwriteMode overwrite_mode) | |
| 1540 : op_(op), | |
| 1541 dst_(dst), | |
| 1542 value_(value), | |
| 1543 src_(src), | |
| 1544 overwrite_mode_(overwrite_mode) { | |
| 1545 set_comment("[ DeferredInlineSmiOperationReversed"); | |
| 1546 } | |
| 1547 | |
| 1548 virtual void Generate(); | |
| 1549 | |
| 1550 private: | |
| 1551 Token::Value op_; | |
| 1552 Register dst_; | |
| 1553 Smi* value_; | |
| 1554 Register src_; | |
| 1555 OverwriteMode overwrite_mode_; | |
| 1556 }; | |
| 1557 | |
| 1558 | |
| 1559 void DeferredInlineSmiOperationReversed::Generate() { | |
| 1560 GenericBinaryOpStub stub( | |
| 1561 op_, | |
| 1562 overwrite_mode_, | |
| 1563 NO_SMI_CODE_IN_STUB); | |
| 1564 stub.GenerateCall(masm_, value_, src_); | |
| 1565 if (!dst_.is(rax)) __ movq(dst_, rax); | |
| 1566 } | |
| 1567 class DeferredInlineSmiAdd: public DeferredCode { | |
| 1568 public: | |
| 1569 DeferredInlineSmiAdd(Register dst, | |
| 1570 Smi* value, | |
| 1571 OverwriteMode overwrite_mode) | |
| 1572 : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) { | |
| 1573 set_comment("[ DeferredInlineSmiAdd"); | |
| 1574 } | |
| 1575 | |
| 1576 virtual void Generate(); | |
| 1577 | |
| 1578 private: | |
| 1579 Register dst_; | |
| 1580 Smi* value_; | |
| 1581 OverwriteMode overwrite_mode_; | |
| 1582 }; | |
| 1583 | |
| 1584 | |
| 1585 void DeferredInlineSmiAdd::Generate() { | |
| 1586 GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB); | |
| 1587 igostub.GenerateCall(masm_, dst_, value_); | |
| 1588 if (!dst_.is(rax)) __ movq(dst_, rax); | |
| 1589 } | |
| 1590 | |
| 1591 | |
| 1592 // The result of value + src is in dst. It either overflowed or was not | |
| 1593 // smi tagged. Undo the speculative addition and call the appropriate | |
| 1594 // specialized stub for add. The result is left in dst. | |
| 1595 class DeferredInlineSmiAddReversed: public DeferredCode { | |
| 1596 public: | |
| 1597 DeferredInlineSmiAddReversed(Register dst, | |
| 1598 Smi* value, | |
| 1599 OverwriteMode overwrite_mode) | |
| 1600 : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) { | |
| 1601 set_comment("[ DeferredInlineSmiAddReversed"); | |
| 1602 } | |
| 1603 | |
| 1604 virtual void Generate(); | |
| 1605 | |
| 1606 private: | |
| 1607 Register dst_; | |
| 1608 Smi* value_; | |
| 1609 OverwriteMode overwrite_mode_; | |
| 1610 }; | |
| 1611 | |
| 1612 | |
| 1613 void DeferredInlineSmiAddReversed::Generate() { | |
| 1614 GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB); | |
| 1615 igostub.GenerateCall(masm_, value_, dst_); | |
| 1616 if (!dst_.is(rax)) __ movq(dst_, rax); | |
| 1617 } | |
| 1618 | |
| 1619 | |
| 1620 class DeferredInlineSmiSub: public DeferredCode { | |
| 1621 public: | |
| 1622 DeferredInlineSmiSub(Register dst, | |
| 1623 Smi* value, | |
| 1624 OverwriteMode overwrite_mode) | |
| 1625 : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) { | |
| 1626 set_comment("[ DeferredInlineSmiSub"); | |
| 1627 } | |
| 1628 | |
| 1629 virtual void Generate(); | |
| 1630 | |
| 1631 private: | |
| 1632 Register dst_; | |
| 1633 Smi* value_; | |
| 1634 OverwriteMode overwrite_mode_; | |
| 1635 }; | |
| 1636 | |
| 1637 | |
| 1638 void DeferredInlineSmiSub::Generate() { | |
| 1639 GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, NO_SMI_CODE_IN_STUB); | |
| 1640 igostub.GenerateCall(masm_, dst_, value_); | |
| 1641 if (!dst_.is(rax)) __ movq(dst_, rax); | |
| 1642 } | |
| 1643 | |
| 1644 | |
| 1645 Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr, | |
| 1646 Result* operand, | |
| 1647 Handle<Object> value, | |
| 1648 bool reversed, | |
| 1649 OverwriteMode overwrite_mode) { | |
| 1650 // Generate inline code for a binary operation when one of the | |
| 1651 // operands is a constant smi. Consumes the argument "operand". | |
| 1652 if (IsUnsafeSmi(value)) { | |
| 1653 Result unsafe_operand(value); | |
| 1654 if (reversed) { | |
| 1655 return LikelySmiBinaryOperation(expr, &unsafe_operand, operand, | |
| 1656 overwrite_mode); | |
| 1657 } else { | |
| 1658 return LikelySmiBinaryOperation(expr, operand, &unsafe_operand, | |
| 1659 overwrite_mode); | |
| 1660 } | |
| 1661 } | |
| 1662 | |
| 1663 // Get the literal value. | |
| 1664 Smi* smi_value = Smi::cast(*value); | |
| 1665 int int_value = smi_value->value(); | |
| 1666 | |
| 1667 Token::Value op = expr->op(); | |
| 1668 Result answer; | |
| 1669 switch (op) { | |
| 1670 case Token::ADD: { | |
| 1671 operand->ToRegister(); | |
| 1672 frame_->Spill(operand->reg()); | |
| 1673 DeferredCode* deferred = NULL; | |
| 1674 if (reversed) { | |
| 1675 deferred = new DeferredInlineSmiAddReversed(operand->reg(), | |
| 1676 smi_value, | |
| 1677 overwrite_mode); | |
| 1678 } else { | |
| 1679 deferred = new DeferredInlineSmiAdd(operand->reg(), | |
| 1680 smi_value, | |
| 1681 overwrite_mode); | |
| 1682 } | |
| 1683 JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(), | |
| 1684 deferred); | |
| 1685 __ SmiAddConstant(operand->reg(), | |
| 1686 operand->reg(), | |
| 1687 smi_value, | |
| 1688 deferred->entry_label()); | |
| 1689 deferred->BindExit(); | |
| 1690 answer = *operand; | |
| 1691 break; | |
| 1692 } | |
| 1693 | |
| 1694 case Token::SUB: { | |
| 1695 if (reversed) { | |
| 1696 Result constant_operand(value); | |
| 1697 answer = LikelySmiBinaryOperation(expr, &constant_operand, operand, | |
| 1698 overwrite_mode); | |
| 1699 } else { | |
| 1700 operand->ToRegister(); | |
| 1701 frame_->Spill(operand->reg()); | |
| 1702 answer = *operand; | |
| 1703 DeferredCode* deferred = new DeferredInlineSmiSub(operand->reg(), | |
| 1704 smi_value, | |
| 1705 overwrite_mode); | |
| 1706 JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(), | |
| 1707 deferred); | |
| 1708 // A smi currently fits in a 32-bit Immediate. | |
| 1709 __ SmiSubConstant(operand->reg(), | |
| 1710 operand->reg(), | |
| 1711 smi_value, | |
| 1712 deferred->entry_label()); | |
| 1713 deferred->BindExit(); | |
| 1714 operand->Unuse(); | |
| 1715 } | |
| 1716 break; | |
| 1717 } | |
| 1718 | |
| 1719 case Token::SAR: | |
| 1720 if (reversed) { | |
| 1721 Result constant_operand(value); | |
| 1722 answer = LikelySmiBinaryOperation(expr, &constant_operand, operand, | |
| 1723 overwrite_mode); | |
| 1724 } else { | |
| 1725 // Only the least significant 5 bits of the shift value are used. | |
| 1726 // In the slow case, this masking is done inside the runtime call. | |
| 1727 int shift_value = int_value & 0x1f; | |
| 1728 operand->ToRegister(); | |
| 1729 frame_->Spill(operand->reg()); | |
| 1730 DeferredInlineSmiOperation* deferred = | |
| 1731 new DeferredInlineSmiOperation(op, | |
| 1732 operand->reg(), | |
| 1733 operand->reg(), | |
| 1734 smi_value, | |
| 1735 overwrite_mode); | |
| 1736 JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(), | |
| 1737 deferred); | |
| 1738 __ SmiShiftArithmeticRightConstant(operand->reg(), | |
| 1739 operand->reg(), | |
| 1740 shift_value); | |
| 1741 deferred->BindExit(); | |
| 1742 answer = *operand; | |
| 1743 } | |
| 1744 break; | |
| 1745 | |
| 1746 case Token::SHR: | |
| 1747 if (reversed) { | |
| 1748 Result constant_operand(value); | |
| 1749 answer = LikelySmiBinaryOperation(expr, &constant_operand, operand, | |
| 1750 overwrite_mode); | |
| 1751 } else { | |
| 1752 // Only the least significant 5 bits of the shift value are used. | |
| 1753 // In the slow case, this masking is done inside the runtime call. | |
| 1754 int shift_value = int_value & 0x1f; | |
| 1755 operand->ToRegister(); | |
| 1756 answer = allocator()->Allocate(); | |
| 1757 ASSERT(answer.is_valid()); | |
| 1758 DeferredInlineSmiOperation* deferred = | |
| 1759 new DeferredInlineSmiOperation(op, | |
| 1760 answer.reg(), | |
| 1761 operand->reg(), | |
| 1762 smi_value, | |
| 1763 overwrite_mode); | |
| 1764 JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(), | |
| 1765 deferred); | |
| 1766 __ SmiShiftLogicalRightConstant(answer.reg(), | |
| 1767 operand->reg(), | |
| 1768 shift_value, | |
| 1769 deferred->entry_label()); | |
| 1770 deferred->BindExit(); | |
| 1771 operand->Unuse(); | |
| 1772 } | |
| 1773 break; | |
| 1774 | |
| 1775 case Token::SHL: | |
| 1776 if (reversed) { | |
| 1777 operand->ToRegister(); | |
| 1778 | |
| 1779 // We need rcx to be available to hold operand, and to be spilled. | |
| 1780 // SmiShiftLeft implicitly modifies rcx. | |
| 1781 if (operand->reg().is(rcx)) { | |
| 1782 frame_->Spill(operand->reg()); | |
| 1783 answer = allocator()->Allocate(); | |
| 1784 } else { | |
| 1785 Result rcx_reg = allocator()->Allocate(rcx); | |
| 1786 // answer must not be rcx. | |
| 1787 answer = allocator()->Allocate(); | |
| 1788 // rcx_reg goes out of scope. | |
| 1789 } | |
| 1790 | |
| 1791 DeferredInlineSmiOperationReversed* deferred = | |
| 1792 new DeferredInlineSmiOperationReversed(op, | |
| 1793 answer.reg(), | |
| 1794 smi_value, | |
| 1795 operand->reg(), | |
| 1796 overwrite_mode); | |
| 1797 JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(), | |
| 1798 deferred); | |
| 1799 | |
| 1800 __ Move(answer.reg(), smi_value); | |
| 1801 __ SmiShiftLeft(answer.reg(), answer.reg(), operand->reg()); | |
| 1802 operand->Unuse(); | |
| 1803 | |
| 1804 deferred->BindExit(); | |
| 1805 } else { | |
| 1806 // Only the least significant 5 bits of the shift value are used. | |
| 1807 // In the slow case, this masking is done inside the runtime call. | |
| 1808 int shift_value = int_value & 0x1f; | |
| 1809 operand->ToRegister(); | |
| 1810 if (shift_value == 0) { | |
| 1811 // Spill operand so it can be overwritten in the slow case. | |
| 1812 frame_->Spill(operand->reg()); | |
| 1813 DeferredInlineSmiOperation* deferred = | |
| 1814 new DeferredInlineSmiOperation(op, | |
| 1815 operand->reg(), | |
| 1816 operand->reg(), | |
| 1817 smi_value, | |
| 1818 overwrite_mode); | |
| 1819 JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(), | |
| 1820 deferred); | |
| 1821 deferred->BindExit(); | |
| 1822 answer = *operand; | |
| 1823 } else { | |
| 1824 // Use a fresh temporary for nonzero shift values. | |
| 1825 answer = allocator()->Allocate(); | |
| 1826 ASSERT(answer.is_valid()); | |
| 1827 DeferredInlineSmiOperation* deferred = | |
| 1828 new DeferredInlineSmiOperation(op, | |
| 1829 answer.reg(), | |
| 1830 operand->reg(), | |
| 1831 smi_value, | |
| 1832 overwrite_mode); | |
| 1833 JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(), | |
| 1834 deferred); | |
| 1835 __ SmiShiftLeftConstant(answer.reg(), | |
| 1836 operand->reg(), | |
| 1837 shift_value); | |
| 1838 deferred->BindExit(); | |
| 1839 operand->Unuse(); | |
| 1840 } | |
| 1841 } | |
| 1842 break; | |
| 1843 | |
| 1844 case Token::BIT_OR: | |
| 1845 case Token::BIT_XOR: | |
| 1846 case Token::BIT_AND: { | |
| 1847 operand->ToRegister(); | |
| 1848 frame_->Spill(operand->reg()); | |
| 1849 if (reversed) { | |
| 1850 // Bit operations with a constant smi are commutative. | |
| 1851 // We can swap left and right operands with no problem. | |
| 1852 // Swap left and right overwrite modes. 0->0, 1->2, 2->1. | |
| 1853 overwrite_mode = static_cast<OverwriteMode>((2 * overwrite_mode) % 3); | |
| 1854 } | |
| 1855 DeferredCode* deferred = new DeferredInlineSmiOperation(op, | |
| 1856 operand->reg(), | |
| 1857 operand->reg(), | |
| 1858 smi_value, | |
| 1859 overwrite_mode); | |
| 1860 JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(), | |
| 1861 deferred); | |
| 1862 if (op == Token::BIT_AND) { | |
| 1863 __ SmiAndConstant(operand->reg(), operand->reg(), smi_value); | |
| 1864 } else if (op == Token::BIT_XOR) { | |
| 1865 if (int_value != 0) { | |
| 1866 __ SmiXorConstant(operand->reg(), operand->reg(), smi_value); | |
| 1867 } | |
| 1868 } else { | |
| 1869 ASSERT(op == Token::BIT_OR); | |
| 1870 if (int_value != 0) { | |
| 1871 __ SmiOrConstant(operand->reg(), operand->reg(), smi_value); | |
| 1872 } | |
| 1873 } | |
| 1874 deferred->BindExit(); | |
| 1875 answer = *operand; | |
| 1876 break; | |
| 1877 } | |
| 1878 | |
| 1879 // Generate inline code for mod of powers of 2 and negative powers of 2. | |
| 1880 case Token::MOD: | |
| 1881 if (!reversed && | |
| 1882 int_value != 0 && | |
| 1883 (IsPowerOf2(int_value) || IsPowerOf2(-int_value))) { | |
| 1884 operand->ToRegister(); | |
| 1885 frame_->Spill(operand->reg()); | |
| 1886 DeferredCode* deferred = | |
| 1887 new DeferredInlineSmiOperation(op, | |
| 1888 operand->reg(), | |
| 1889 operand->reg(), | |
| 1890 smi_value, | |
| 1891 overwrite_mode); | |
| 1892 __ JumpUnlessNonNegativeSmi(operand->reg(), deferred->entry_label()); | |
| 1893 if (int_value < 0) int_value = -int_value; | |
| 1894 if (int_value == 1) { | |
| 1895 __ Move(operand->reg(), Smi::FromInt(0)); | |
| 1896 } else { | |
| 1897 __ SmiAndConstant(operand->reg(), | |
| 1898 operand->reg(), | |
| 1899 Smi::FromInt(int_value - 1)); | |
| 1900 } | |
| 1901 deferred->BindExit(); | |
| 1902 answer = *operand; | |
| 1903 break; // This break only applies if we generated code for MOD. | |
| 1904 } | |
| 1905 // Fall through if we did not find a power of 2 on the right hand side! | |
| 1906 // The next case must be the default. | |
| 1907 | |
| 1908 default: { | |
| 1909 Result constant_operand(value); | |
| 1910 if (reversed) { | |
| 1911 answer = LikelySmiBinaryOperation(expr, &constant_operand, operand, | |
| 1912 overwrite_mode); | |
| 1913 } else { | |
| 1914 answer = LikelySmiBinaryOperation(expr, operand, &constant_operand, | |
| 1915 overwrite_mode); | |
| 1916 } | |
| 1917 break; | |
| 1918 } | |
| 1919 } | |
| 1920 ASSERT(answer.is_valid()); | |
| 1921 return answer; | |
| 1922 } | |
| 1923 | |
| 1924 | |
| 1925 static bool CouldBeNaN(const Result& result) { | |
| 1926 if (result.type_info().IsSmi()) return false; | |
| 1927 if (result.type_info().IsInteger32()) return false; | |
| 1928 if (!result.is_constant()) return true; | |
| 1929 if (!result.handle()->IsHeapNumber()) return false; | |
| 1930 return isnan(HeapNumber::cast(*result.handle())->value()); | |
| 1931 } | |
| 1932 | |
| 1933 | |
| 1934 // Convert from signed to unsigned comparison to match the way EFLAGS are set | |
| 1935 // by FPU and XMM compare instructions. | |
| 1936 static Condition DoubleCondition(Condition cc) { | |
| 1937 switch (cc) { | |
| 1938 case less: return below; | |
| 1939 case equal: return equal; | |
| 1940 case less_equal: return below_equal; | |
| 1941 case greater: return above; | |
| 1942 case greater_equal: return above_equal; | |
| 1943 default: UNREACHABLE(); | |
| 1944 } | |
| 1945 UNREACHABLE(); | |
| 1946 return equal; | |
| 1947 } | |
| 1948 | |
| 1949 | |
| 1950 static CompareFlags ComputeCompareFlags(NaNInformation nan_info, | |
| 1951 bool inline_number_compare) { | |
| 1952 CompareFlags flags = NO_SMI_COMPARE_IN_STUB; | |
| 1953 if (nan_info == kCantBothBeNaN) { | |
| 1954 flags = static_cast<CompareFlags>(flags | CANT_BOTH_BE_NAN); | |
| 1955 } | |
| 1956 if (inline_number_compare) { | |
| 1957 flags = static_cast<CompareFlags>(flags | NO_NUMBER_COMPARE_IN_STUB); | |
| 1958 } | |
| 1959 return flags; | |
| 1960 } | |
| 1961 | |
| 1962 | |
| 1963 void CodeGenerator::Comparison(AstNode* node, | |
| 1964 Condition cc, | |
| 1965 bool strict, | |
| 1966 ControlDestination* dest) { | |
| 1967 // Strict only makes sense for equality comparisons. | |
| 1968 ASSERT(!strict || cc == equal); | |
| 1969 | |
| 1970 Result left_side; | |
| 1971 Result right_side; | |
| 1972 // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order. | |
| 1973 if (cc == greater || cc == less_equal) { | |
| 1974 cc = ReverseCondition(cc); | |
| 1975 left_side = frame_->Pop(); | |
| 1976 right_side = frame_->Pop(); | |
| 1977 } else { | |
| 1978 right_side = frame_->Pop(); | |
| 1979 left_side = frame_->Pop(); | |
| 1980 } | |
| 1981 ASSERT(cc == less || cc == equal || cc == greater_equal); | |
| 1982 | |
| 1983 // If either side is a constant smi, optimize the comparison. | |
| 1984 bool left_side_constant_smi = false; | |
| 1985 bool left_side_constant_null = false; | |
| 1986 bool left_side_constant_1_char_string = false; | |
| 1987 if (left_side.is_constant()) { | |
| 1988 left_side_constant_smi = left_side.handle()->IsSmi(); | |
| 1989 left_side_constant_null = left_side.handle()->IsNull(); | |
| 1990 left_side_constant_1_char_string = | |
| 1991 (left_side.handle()->IsString() && | |
| 1992 String::cast(*left_side.handle())->length() == 1 && | |
| 1993 String::cast(*left_side.handle())->IsAsciiRepresentation()); | |
| 1994 } | |
| 1995 bool right_side_constant_smi = false; | |
| 1996 bool right_side_constant_null = false; | |
| 1997 bool right_side_constant_1_char_string = false; | |
| 1998 if (right_side.is_constant()) { | |
| 1999 right_side_constant_smi = right_side.handle()->IsSmi(); | |
| 2000 right_side_constant_null = right_side.handle()->IsNull(); | |
| 2001 right_side_constant_1_char_string = | |
| 2002 (right_side.handle()->IsString() && | |
| 2003 String::cast(*right_side.handle())->length() == 1 && | |
| 2004 String::cast(*right_side.handle())->IsAsciiRepresentation()); | |
| 2005 } | |
| 2006 | |
| 2007 if (left_side_constant_smi || right_side_constant_smi) { | |
| 2008 bool is_loop_condition = (node->AsExpression() != NULL) && | |
| 2009 node->AsExpression()->is_loop_condition(); | |
| 2010 ConstantSmiComparison(cc, strict, dest, &left_side, &right_side, | |
| 2011 left_side_constant_smi, right_side_constant_smi, | |
| 2012 is_loop_condition); | |
| 2013 } else if (left_side_constant_1_char_string || | |
| 2014 right_side_constant_1_char_string) { | |
| 2015 if (left_side_constant_1_char_string && right_side_constant_1_char_string) { | |
| 2016 // Trivial case, comparing two constants. | |
| 2017 int left_value = String::cast(*left_side.handle())->Get(0); | |
| 2018 int right_value = String::cast(*right_side.handle())->Get(0); | |
| 2019 switch (cc) { | |
| 2020 case less: | |
| 2021 dest->Goto(left_value < right_value); | |
| 2022 break; | |
| 2023 case equal: | |
| 2024 dest->Goto(left_value == right_value); | |
| 2025 break; | |
| 2026 case greater_equal: | |
| 2027 dest->Goto(left_value >= right_value); | |
| 2028 break; | |
| 2029 default: | |
| 2030 UNREACHABLE(); | |
| 2031 } | |
| 2032 } else { | |
| 2033 // Only one side is a constant 1 character string. | |
| 2034 // If left side is a constant 1-character string, reverse the operands. | |
| 2035 // Since one side is a constant string, conversion order does not matter. | |
| 2036 if (left_side_constant_1_char_string) { | |
| 2037 Result temp = left_side; | |
| 2038 left_side = right_side; | |
| 2039 right_side = temp; | |
| 2040 cc = ReverseCondition(cc); | |
| 2041 // This may reintroduce greater or less_equal as the value of cc. | |
| 2042 // CompareStub and the inline code both support all values of cc. | |
| 2043 } | |
| 2044 // Implement comparison against a constant string, inlining the case | |
| 2045 // where both sides are strings. | |
| 2046 left_side.ToRegister(); | |
| 2047 | |
| 2048 // Here we split control flow to the stub call and inlined cases | |
| 2049 // before finally splitting it to the control destination. We use | |
| 2050 // a jump target and branching to duplicate the virtual frame at | |
| 2051 // the first split. We manually handle the off-frame references | |
| 2052 // by reconstituting them on the non-fall-through path. | |
| 2053 JumpTarget is_not_string, is_string; | |
| 2054 Register left_reg = left_side.reg(); | |
| 2055 Handle<Object> right_val = right_side.handle(); | |
| 2056 ASSERT(StringShape(String::cast(*right_val)).IsSymbol()); | |
| 2057 Condition is_smi = masm()->CheckSmi(left_reg); | |
| 2058 is_not_string.Branch(is_smi, &left_side); | |
| 2059 Result temp = allocator_->Allocate(); | |
| 2060 ASSERT(temp.is_valid()); | |
| 2061 __ movq(temp.reg(), | |
| 2062 FieldOperand(left_reg, HeapObject::kMapOffset)); | |
| 2063 __ movzxbl(temp.reg(), | |
| 2064 FieldOperand(temp.reg(), Map::kInstanceTypeOffset)); | |
| 2065 // If we are testing for equality then make use of the symbol shortcut. | |
| 2066 // Check if the left hand side has the same type as the right hand | |
| 2067 // side (which is always a symbol). | |
| 2068 if (cc == equal) { | |
| 2069 Label not_a_symbol; | |
| 2070 STATIC_ASSERT(kSymbolTag != 0); | |
| 2071 // Ensure that no non-strings have the symbol bit set. | |
| 2072 STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask); | |
| 2073 __ testb(temp.reg(), Immediate(kIsSymbolMask)); // Test the symbol bit. | |
| 2074 __ j(zero, ¬_a_symbol); | |
| 2075 // They are symbols, so do identity compare. | |
| 2076 __ Cmp(left_reg, right_side.handle()); | |
| 2077 dest->true_target()->Branch(equal); | |
| 2078 dest->false_target()->Branch(not_equal); | |
| 2079 __ bind(¬_a_symbol); | |
| 2080 } | |
| 2081 // Call the compare stub if the left side is not a flat ascii string. | |
| 2082 __ andb(temp.reg(), | |
| 2083 Immediate(kIsNotStringMask | | |
| 2084 kStringRepresentationMask | | |
| 2085 kStringEncodingMask)); | |
| 2086 __ cmpb(temp.reg(), | |
| 2087 Immediate(kStringTag | kSeqStringTag | kAsciiStringTag)); | |
| 2088 temp.Unuse(); | |
| 2089 is_string.Branch(equal, &left_side); | |
| 2090 | |
| 2091 // Setup and call the compare stub. | |
| 2092 is_not_string.Bind(&left_side); | |
| 2093 CompareFlags flags = | |
| 2094 static_cast<CompareFlags>(CANT_BOTH_BE_NAN | NO_SMI_CODE_IN_STUB); | |
| 2095 CompareStub stub(cc, strict, flags); | |
| 2096 Result result = frame_->CallStub(&stub, &left_side, &right_side); | |
| 2097 result.ToRegister(); | |
| 2098 __ testq(result.reg(), result.reg()); | |
| 2099 result.Unuse(); | |
| 2100 dest->true_target()->Branch(cc); | |
| 2101 dest->false_target()->Jump(); | |
| 2102 | |
| 2103 is_string.Bind(&left_side); | |
| 2104 // left_side is a sequential ASCII string. | |
| 2105 ASSERT(left_side.reg().is(left_reg)); | |
| 2106 right_side = Result(right_val); | |
| 2107 Result temp2 = allocator_->Allocate(); | |
| 2108 ASSERT(temp2.is_valid()); | |
| 2109 // Test string equality and comparison. | |
| 2110 if (cc == equal) { | |
| 2111 Label comparison_done; | |
| 2112 __ SmiCompare(FieldOperand(left_side.reg(), String::kLengthOffset), | |
| 2113 Smi::FromInt(1)); | |
| 2114 __ j(not_equal, &comparison_done); | |
| 2115 uint8_t char_value = | |
| 2116 static_cast<uint8_t>(String::cast(*right_val)->Get(0)); | |
| 2117 __ cmpb(FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize), | |
| 2118 Immediate(char_value)); | |
| 2119 __ bind(&comparison_done); | |
| 2120 } else { | |
| 2121 __ movq(temp2.reg(), | |
| 2122 FieldOperand(left_side.reg(), String::kLengthOffset)); | |
| 2123 __ SmiSubConstant(temp2.reg(), temp2.reg(), Smi::FromInt(1)); | |
| 2124 Label comparison; | |
| 2125 // If the length is 0 then the subtraction gave -1 which compares less | |
| 2126 // than any character. | |
| 2127 __ j(negative, &comparison); | |
| 2128 // Otherwise load the first character. | |
| 2129 __ movzxbl(temp2.reg(), | |
| 2130 FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize)); | |
| 2131 __ bind(&comparison); | |
| 2132 // Compare the first character of the string with the | |
| 2133 // constant 1-character string. | |
| 2134 uint8_t char_value = | |
| 2135 static_cast<uint8_t>(String::cast(*right_side.handle())->Get(0)); | |
| 2136 __ cmpb(temp2.reg(), Immediate(char_value)); | |
| 2137 Label characters_were_different; | |
| 2138 __ j(not_equal, &characters_were_different); | |
| 2139 // If the first character is the same then the long string sorts after | |
| 2140 // the short one. | |
| 2141 __ SmiCompare(FieldOperand(left_side.reg(), String::kLengthOffset), | |
| 2142 Smi::FromInt(1)); | |
| 2143 __ bind(&characters_were_different); | |
| 2144 } | |
| 2145 temp2.Unuse(); | |
| 2146 left_side.Unuse(); | |
| 2147 right_side.Unuse(); | |
| 2148 dest->Split(cc); | |
| 2149 } | |
| 2150 } else { | |
| 2151 // Neither side is a constant Smi, constant 1-char string, or constant null. | |
| 2152 // If either side is a non-smi constant, or known to be a heap number, | |
| 2153 // skip the smi check. | |
| 2154 bool known_non_smi = | |
| 2155 (left_side.is_constant() && !left_side.handle()->IsSmi()) || | |
| 2156 (right_side.is_constant() && !right_side.handle()->IsSmi()) || | |
| 2157 left_side.type_info().IsDouble() || | |
| 2158 right_side.type_info().IsDouble(); | |
| 2159 | |
| 2160 NaNInformation nan_info = | |
| 2161 (CouldBeNaN(left_side) && CouldBeNaN(right_side)) ? | |
| 2162 kBothCouldBeNaN : | |
| 2163 kCantBothBeNaN; | |
| 2164 | |
| 2165 // Inline number comparison handling any combination of smi's and heap | |
| 2166 // numbers if: | |
| 2167 // code is in a loop | |
| 2168 // the compare operation is different from equal | |
| 2169 // compare is not a for-loop comparison | |
| 2170 // The reason for excluding equal is that it will most likely be done | |
| 2171 // with smi's (not heap numbers) and the code to comparing smi's is inlined | |
| 2172 // separately. The same reason applies for for-loop comparison which will | |
| 2173 // also most likely be smi comparisons. | |
| 2174 bool is_loop_condition = (node->AsExpression() != NULL) | |
| 2175 && node->AsExpression()->is_loop_condition(); | |
| 2176 bool inline_number_compare = | |
| 2177 loop_nesting() > 0 && cc != equal && !is_loop_condition; | |
| 2178 | |
| 2179 // Left and right needed in registers for the following code. | |
| 2180 left_side.ToRegister(); | |
| 2181 right_side.ToRegister(); | |
| 2182 | |
| 2183 if (known_non_smi) { | |
| 2184 // Inlined equality check: | |
| 2185 // If at least one of the objects is not NaN, then if the objects | |
| 2186 // are identical, they are equal. | |
| 2187 if (nan_info == kCantBothBeNaN && cc == equal) { | |
| 2188 __ cmpq(left_side.reg(), right_side.reg()); | |
| 2189 dest->true_target()->Branch(equal); | |
| 2190 } | |
| 2191 | |
| 2192 // Inlined number comparison: | |
| 2193 if (inline_number_compare) { | |
| 2194 GenerateInlineNumberComparison(&left_side, &right_side, cc, dest); | |
| 2195 } | |
| 2196 | |
| 2197 // End of in-line compare, call out to the compare stub. Don't include | |
| 2198 // number comparison in the stub if it was inlined. | |
| 2199 CompareFlags flags = ComputeCompareFlags(nan_info, inline_number_compare); | |
| 2200 CompareStub stub(cc, strict, flags); | |
| 2201 Result answer = frame_->CallStub(&stub, &left_side, &right_side); | |
| 2202 __ testq(answer.reg(), answer.reg()); // Sets both zero and sign flag. | |
| 2203 answer.Unuse(); | |
| 2204 dest->Split(cc); | |
| 2205 } else { | |
| 2206 // Here we split control flow to the stub call and inlined cases | |
| 2207 // before finally splitting it to the control destination. We use | |
| 2208 // a jump target and branching to duplicate the virtual frame at | |
| 2209 // the first split. We manually handle the off-frame references | |
| 2210 // by reconstituting them on the non-fall-through path. | |
| 2211 JumpTarget is_smi; | |
| 2212 Register left_reg = left_side.reg(); | |
| 2213 Register right_reg = right_side.reg(); | |
| 2214 | |
| 2215 // In-line check for comparing two smis. | |
| 2216 JumpIfBothSmiUsingTypeInfo(&left_side, &right_side, &is_smi); | |
| 2217 | |
| 2218 if (has_valid_frame()) { | |
| 2219 // Inline the equality check if both operands can't be a NaN. If both | |
| 2220 // objects are the same they are equal. | |
| 2221 if (nan_info == kCantBothBeNaN && cc == equal) { | |
| 2222 __ cmpq(left_side.reg(), right_side.reg()); | |
| 2223 dest->true_target()->Branch(equal); | |
| 2224 } | |
| 2225 | |
| 2226 // Inlined number comparison: | |
| 2227 if (inline_number_compare) { | |
| 2228 GenerateInlineNumberComparison(&left_side, &right_side, cc, dest); | |
| 2229 } | |
| 2230 | |
| 2231 // End of in-line compare, call out to the compare stub. Don't include | |
| 2232 // number comparison in the stub if it was inlined. | |
| 2233 CompareFlags flags = | |
| 2234 ComputeCompareFlags(nan_info, inline_number_compare); | |
| 2235 CompareStub stub(cc, strict, flags); | |
| 2236 Result answer = frame_->CallStub(&stub, &left_side, &right_side); | |
| 2237 __ testq(answer.reg(), answer.reg()); // Sets both zero and sign flags. | |
| 2238 answer.Unuse(); | |
| 2239 if (is_smi.is_linked()) { | |
| 2240 dest->true_target()->Branch(cc); | |
| 2241 dest->false_target()->Jump(); | |
| 2242 } else { | |
| 2243 dest->Split(cc); | |
| 2244 } | |
| 2245 } | |
| 2246 | |
| 2247 if (is_smi.is_linked()) { | |
| 2248 is_smi.Bind(); | |
| 2249 left_side = Result(left_reg); | |
| 2250 right_side = Result(right_reg); | |
| 2251 __ SmiCompare(left_side.reg(), right_side.reg()); | |
| 2252 right_side.Unuse(); | |
| 2253 left_side.Unuse(); | |
| 2254 dest->Split(cc); | |
| 2255 } | |
| 2256 } | |
| 2257 } | |
| 2258 } | |
| 2259 | |
| 2260 | |
| 2261 void CodeGenerator::ConstantSmiComparison(Condition cc, | |
| 2262 bool strict, | |
| 2263 ControlDestination* dest, | |
| 2264 Result* left_side, | |
| 2265 Result* right_side, | |
| 2266 bool left_side_constant_smi, | |
| 2267 bool right_side_constant_smi, | |
| 2268 bool is_loop_condition) { | |
| 2269 if (left_side_constant_smi && right_side_constant_smi) { | |
| 2270 // Trivial case, comparing two constants. | |
| 2271 int left_value = Smi::cast(*left_side->handle())->value(); | |
| 2272 int right_value = Smi::cast(*right_side->handle())->value(); | |
| 2273 switch (cc) { | |
| 2274 case less: | |
| 2275 dest->Goto(left_value < right_value); | |
| 2276 break; | |
| 2277 case equal: | |
| 2278 dest->Goto(left_value == right_value); | |
| 2279 break; | |
| 2280 case greater_equal: | |
| 2281 dest->Goto(left_value >= right_value); | |
| 2282 break; | |
| 2283 default: | |
| 2284 UNREACHABLE(); | |
| 2285 } | |
| 2286 } else { | |
| 2287 // Only one side is a constant Smi. | |
| 2288 // If left side is a constant Smi, reverse the operands. | |
| 2289 // Since one side is a constant Smi, conversion order does not matter. | |
| 2290 if (left_side_constant_smi) { | |
| 2291 Result* temp = left_side; | |
| 2292 left_side = right_side; | |
| 2293 right_side = temp; | |
| 2294 cc = ReverseCondition(cc); | |
| 2295 // This may re-introduce greater or less_equal as the value of cc. | |
| 2296 // CompareStub and the inline code both support all values of cc. | |
| 2297 } | |
| 2298 // Implement comparison against a constant Smi, inlining the case | |
| 2299 // where both sides are smis. | |
| 2300 left_side->ToRegister(); | |
| 2301 Register left_reg = left_side->reg(); | |
| 2302 Smi* constant_smi = Smi::cast(*right_side->handle()); | |
| 2303 | |
| 2304 if (left_side->is_smi()) { | |
| 2305 if (FLAG_debug_code) { | |
| 2306 __ AbortIfNotSmi(left_reg); | |
| 2307 } | |
| 2308 // Test smi equality and comparison by signed int comparison. | |
| 2309 __ SmiCompare(left_reg, constant_smi); | |
| 2310 left_side->Unuse(); | |
| 2311 right_side->Unuse(); | |
| 2312 dest->Split(cc); | |
| 2313 } else { | |
| 2314 // Only the case where the left side could possibly be a non-smi is left. | |
| 2315 JumpTarget is_smi; | |
| 2316 if (cc == equal) { | |
| 2317 // We can do the equality comparison before the smi check. | |
| 2318 __ Cmp(left_reg, constant_smi); | |
| 2319 dest->true_target()->Branch(equal); | |
| 2320 Condition left_is_smi = masm_->CheckSmi(left_reg); | |
| 2321 dest->false_target()->Branch(left_is_smi); | |
| 2322 } else { | |
| 2323 // Do the smi check, then the comparison. | |
| 2324 Condition left_is_smi = masm_->CheckSmi(left_reg); | |
| 2325 is_smi.Branch(left_is_smi, left_side, right_side); | |
| 2326 } | |
| 2327 | |
| 2328 // Jump or fall through to here if we are comparing a non-smi to a | |
| 2329 // constant smi. If the non-smi is a heap number and this is not | |
| 2330 // a loop condition, inline the floating point code. | |
| 2331 if (!is_loop_condition) { | |
| 2332 // Right side is a constant smi and left side has been checked | |
| 2333 // not to be a smi. | |
| 2334 JumpTarget not_number; | |
| 2335 __ Cmp(FieldOperand(left_reg, HeapObject::kMapOffset), | |
| 2336 FACTORY->heap_number_map()); | |
| 2337 not_number.Branch(not_equal, left_side); | |
| 2338 __ movsd(xmm1, | |
| 2339 FieldOperand(left_reg, HeapNumber::kValueOffset)); | |
| 2340 int value = constant_smi->value(); | |
| 2341 if (value == 0) { | |
| 2342 __ xorpd(xmm0, xmm0); | |
| 2343 } else { | |
| 2344 Result temp = allocator()->Allocate(); | |
| 2345 __ movl(temp.reg(), Immediate(value)); | |
| 2346 __ cvtlsi2sd(xmm0, temp.reg()); | |
| 2347 temp.Unuse(); | |
| 2348 } | |
| 2349 __ ucomisd(xmm1, xmm0); | |
| 2350 // Jump to builtin for NaN. | |
| 2351 not_number.Branch(parity_even, left_side); | |
| 2352 left_side->Unuse(); | |
| 2353 dest->true_target()->Branch(DoubleCondition(cc)); | |
| 2354 dest->false_target()->Jump(); | |
| 2355 not_number.Bind(left_side); | |
| 2356 } | |
| 2357 | |
| 2358 // Setup and call the compare stub. | |
| 2359 CompareFlags flags = | |
| 2360 static_cast<CompareFlags>(CANT_BOTH_BE_NAN | NO_SMI_CODE_IN_STUB); | |
| 2361 CompareStub stub(cc, strict, flags); | |
| 2362 Result result = frame_->CallStub(&stub, left_side, right_side); | |
| 2363 result.ToRegister(); | |
| 2364 __ testq(result.reg(), result.reg()); | |
| 2365 result.Unuse(); | |
| 2366 if (cc == equal) { | |
| 2367 dest->Split(cc); | |
| 2368 } else { | |
| 2369 dest->true_target()->Branch(cc); | |
| 2370 dest->false_target()->Jump(); | |
| 2371 | |
| 2372 // It is important for performance for this case to be at the end. | |
| 2373 is_smi.Bind(left_side, right_side); | |
| 2374 __ SmiCompare(left_reg, constant_smi); | |
| 2375 left_side->Unuse(); | |
| 2376 right_side->Unuse(); | |
| 2377 dest->Split(cc); | |
| 2378 } | |
| 2379 } | |
| 2380 } | |
| 2381 } | |
| 2382 | |
| 2383 | |
| 2384 // Load a comparison operand into into a XMM register. Jump to not_numbers jump | |
| 2385 // target passing the left and right result if the operand is not a number. | |
| 2386 static void LoadComparisonOperand(MacroAssembler* masm_, | |
| 2387 Result* operand, | |
| 2388 XMMRegister xmm_reg, | |
| 2389 Result* left_side, | |
| 2390 Result* right_side, | |
| 2391 JumpTarget* not_numbers) { | |
| 2392 Label done; | |
| 2393 if (operand->type_info().IsDouble()) { | |
| 2394 // Operand is known to be a heap number, just load it. | |
| 2395 __ movsd(xmm_reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset)); | |
| 2396 } else if (operand->type_info().IsSmi()) { | |
| 2397 // Operand is known to be a smi. Convert it to double and keep the original | |
| 2398 // smi. | |
| 2399 __ SmiToInteger32(kScratchRegister, operand->reg()); | |
| 2400 __ cvtlsi2sd(xmm_reg, kScratchRegister); | |
| 2401 } else { | |
| 2402 // Operand type not known, check for smi or heap number. | |
| 2403 Label smi; | |
| 2404 __ JumpIfSmi(operand->reg(), &smi); | |
| 2405 if (!operand->type_info().IsNumber()) { | |
| 2406 __ LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex); | |
| 2407 __ cmpq(FieldOperand(operand->reg(), HeapObject::kMapOffset), | |
| 2408 kScratchRegister); | |
| 2409 not_numbers->Branch(not_equal, left_side, right_side, taken); | |
| 2410 } | |
| 2411 __ movsd(xmm_reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset)); | |
| 2412 __ jmp(&done); | |
| 2413 | |
| 2414 __ bind(&smi); | |
| 2415 // Comvert smi to float and keep the original smi. | |
| 2416 __ SmiToInteger32(kScratchRegister, operand->reg()); | |
| 2417 __ cvtlsi2sd(xmm_reg, kScratchRegister); | |
| 2418 __ jmp(&done); | |
| 2419 } | |
| 2420 __ bind(&done); | |
| 2421 } | |
| 2422 | |
| 2423 | |
| 2424 void CodeGenerator::GenerateInlineNumberComparison(Result* left_side, | |
| 2425 Result* right_side, | |
| 2426 Condition cc, | |
| 2427 ControlDestination* dest) { | |
| 2428 ASSERT(left_side->is_register()); | |
| 2429 ASSERT(right_side->is_register()); | |
| 2430 | |
| 2431 JumpTarget not_numbers; | |
| 2432 // Load left and right operand into registers xmm0 and xmm1 and compare. | |
| 2433 LoadComparisonOperand(masm_, left_side, xmm0, left_side, right_side, | |
| 2434 ¬_numbers); | |
| 2435 LoadComparisonOperand(masm_, right_side, xmm1, left_side, right_side, | |
| 2436 ¬_numbers); | |
| 2437 __ ucomisd(xmm0, xmm1); | |
| 2438 // Bail out if a NaN is involved. | |
| 2439 not_numbers.Branch(parity_even, left_side, right_side); | |
| 2440 | |
| 2441 // Split to destination targets based on comparison. | |
| 2442 left_side->Unuse(); | |
| 2443 right_side->Unuse(); | |
| 2444 dest->true_target()->Branch(DoubleCondition(cc)); | |
| 2445 dest->false_target()->Jump(); | |
| 2446 | |
| 2447 not_numbers.Bind(left_side, right_side); | |
| 2448 } | |
| 2449 | |
| 2450 | |
| 2451 // Call the function just below TOS on the stack with the given | |
| 2452 // arguments. The receiver is the TOS. | |
| 2453 void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args, | |
| 2454 CallFunctionFlags flags, | |
| 2455 int position) { | |
| 2456 // Push the arguments ("left-to-right") on the stack. | |
| 2457 int arg_count = args->length(); | |
| 2458 for (int i = 0; i < arg_count; i++) { | |
| 2459 Load(args->at(i)); | |
| 2460 frame_->SpillTop(); | |
| 2461 } | |
| 2462 | |
| 2463 // Record the position for debugging purposes. | |
| 2464 CodeForSourcePosition(position); | |
| 2465 | |
| 2466 // Use the shared code stub to call the function. | |
| 2467 InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP; | |
| 2468 CallFunctionStub call_function(arg_count, in_loop, flags); | |
| 2469 Result answer = frame_->CallStub(&call_function, arg_count + 1); | |
| 2470 // Restore context and replace function on the stack with the | |
| 2471 // result of the stub invocation. | |
| 2472 frame_->RestoreContextRegister(); | |
| 2473 frame_->SetElementAt(0, &answer); | |
| 2474 } | |
| 2475 | |
| 2476 | |
| 2477 void CodeGenerator::CallApplyLazy(Expression* applicand, | |
| 2478 Expression* receiver, | |
| 2479 VariableProxy* arguments, | |
| 2480 int position) { | |
| 2481 // An optimized implementation of expressions of the form | |
| 2482 // x.apply(y, arguments). | |
| 2483 // If the arguments object of the scope has not been allocated, | |
| 2484 // and x.apply is Function.prototype.apply, this optimization | |
| 2485 // just copies y and the arguments of the current function on the | |
| 2486 // stack, as receiver and arguments, and calls x. | |
| 2487 // In the implementation comments, we call x the applicand | |
| 2488 // and y the receiver. | |
| 2489 ASSERT(ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION); | |
| 2490 ASSERT(arguments->IsArguments()); | |
| 2491 | |
| 2492 // Load applicand.apply onto the stack. This will usually | |
| 2493 // give us a megamorphic load site. Not super, but it works. | |
| 2494 Load(applicand); | |
| 2495 frame()->Dup(); | |
| 2496 Handle<String> name = FACTORY->LookupAsciiSymbol("apply"); | |
| 2497 frame()->Push(name); | |
| 2498 Result answer = frame()->CallLoadIC(RelocInfo::CODE_TARGET); | |
| 2499 __ nop(); | |
| 2500 frame()->Push(&answer); | |
| 2501 | |
| 2502 // Load the receiver and the existing arguments object onto the | |
| 2503 // expression stack. Avoid allocating the arguments object here. | |
| 2504 Load(receiver); | |
| 2505 LoadFromSlot(scope()->arguments()->AsSlot(), NOT_INSIDE_TYPEOF); | |
| 2506 | |
| 2507 // Emit the source position information after having loaded the | |
| 2508 // receiver and the arguments. | |
| 2509 CodeForSourcePosition(position); | |
| 2510 // Contents of frame at this point: | |
| 2511 // Frame[0]: arguments object of the current function or the hole. | |
| 2512 // Frame[1]: receiver | |
| 2513 // Frame[2]: applicand.apply | |
| 2514 // Frame[3]: applicand. | |
| 2515 | |
| 2516 // Check if the arguments object has been lazily allocated | |
| 2517 // already. If so, just use that instead of copying the arguments | |
| 2518 // from the stack. This also deals with cases where a local variable | |
| 2519 // named 'arguments' has been introduced. | |
| 2520 frame_->Dup(); | |
| 2521 Result probe = frame_->Pop(); | |
| 2522 { VirtualFrame::SpilledScope spilled_scope; | |
| 2523 Label slow, done; | |
| 2524 bool try_lazy = true; | |
| 2525 if (probe.is_constant()) { | |
| 2526 try_lazy = probe.handle()->IsArgumentsMarker(); | |
| 2527 } else { | |
| 2528 __ CompareRoot(probe.reg(), Heap::kArgumentsMarkerRootIndex); | |
| 2529 probe.Unuse(); | |
| 2530 __ j(not_equal, &slow); | |
| 2531 } | |
| 2532 | |
| 2533 if (try_lazy) { | |
| 2534 Label build_args; | |
| 2535 // Get rid of the arguments object probe. | |
| 2536 frame_->Drop(); // Can be called on a spilled frame. | |
| 2537 // Stack now has 3 elements on it. | |
| 2538 // Contents of stack at this point: | |
| 2539 // rsp[0]: receiver | |
| 2540 // rsp[1]: applicand.apply | |
| 2541 // rsp[2]: applicand. | |
| 2542 | |
| 2543 // Check that the receiver really is a JavaScript object. | |
| 2544 __ movq(rax, Operand(rsp, 0)); | |
| 2545 Condition is_smi = masm_->CheckSmi(rax); | |
| 2546 __ j(is_smi, &build_args); | |
| 2547 // We allow all JSObjects including JSFunctions. As long as | |
| 2548 // JS_FUNCTION_TYPE is the last instance type and it is right | |
| 2549 // after LAST_JS_OBJECT_TYPE, we do not have to check the upper | |
| 2550 // bound. | |
| 2551 STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE); | |
| 2552 STATIC_ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1); | |
| 2553 __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx); | |
| 2554 __ j(below, &build_args); | |
| 2555 | |
| 2556 // Check that applicand.apply is Function.prototype.apply. | |
| 2557 __ movq(rax, Operand(rsp, kPointerSize)); | |
| 2558 is_smi = masm_->CheckSmi(rax); | |
| 2559 __ j(is_smi, &build_args); | |
| 2560 __ CmpObjectType(rax, JS_FUNCTION_TYPE, rcx); | |
| 2561 __ j(not_equal, &build_args); | |
| 2562 __ movq(rcx, FieldOperand(rax, JSFunction::kCodeEntryOffset)); | |
| 2563 __ subq(rcx, Immediate(Code::kHeaderSize - kHeapObjectTag)); | |
| 2564 Handle<Code> apply_code = Isolate::Current()->builtins()->FunctionApply(); | |
| 2565 __ Cmp(rcx, apply_code); | |
| 2566 __ j(not_equal, &build_args); | |
| 2567 | |
| 2568 // Check that applicand is a function. | |
| 2569 __ movq(rdi, Operand(rsp, 2 * kPointerSize)); | |
| 2570 is_smi = masm_->CheckSmi(rdi); | |
| 2571 __ j(is_smi, &build_args); | |
| 2572 __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx); | |
| 2573 __ j(not_equal, &build_args); | |
| 2574 | |
| 2575 // Copy the arguments to this function possibly from the | |
| 2576 // adaptor frame below it. | |
| 2577 Label invoke, adapted; | |
| 2578 __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset)); | |
| 2579 __ Cmp(Operand(rdx, StandardFrameConstants::kContextOffset), | |
| 2580 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); | |
| 2581 __ j(equal, &adapted); | |
| 2582 | |
| 2583 // No arguments adaptor frame. Copy fixed number of arguments. | |
| 2584 __ Set(rax, scope()->num_parameters()); | |
| 2585 for (int i = 0; i < scope()->num_parameters(); i++) { | |
| 2586 __ push(frame_->ParameterAt(i)); | |
| 2587 } | |
| 2588 __ jmp(&invoke); | |
| 2589 | |
| 2590 // Arguments adaptor frame present. Copy arguments from there, but | |
| 2591 // avoid copying too many arguments to avoid stack overflows. | |
| 2592 __ bind(&adapted); | |
| 2593 static const uint32_t kArgumentsLimit = 1 * KB; | |
| 2594 __ SmiToInteger32(rax, | |
| 2595 Operand(rdx, | |
| 2596 ArgumentsAdaptorFrameConstants::kLengthOffset)); | |
| 2597 __ movl(rcx, rax); | |
| 2598 __ cmpl(rax, Immediate(kArgumentsLimit)); | |
| 2599 __ j(above, &build_args); | |
| 2600 | |
| 2601 // Loop through the arguments pushing them onto the execution | |
| 2602 // stack. We don't inform the virtual frame of the push, so we don't | |
| 2603 // have to worry about getting rid of the elements from the virtual | |
| 2604 // frame. | |
| 2605 Label loop; | |
| 2606 // rcx is a small non-negative integer, due to the test above. | |
| 2607 __ testl(rcx, rcx); | |
| 2608 __ j(zero, &invoke); | |
| 2609 __ bind(&loop); | |
| 2610 __ push(Operand(rdx, rcx, times_pointer_size, 1 * kPointerSize)); | |
| 2611 __ decl(rcx); | |
| 2612 __ j(not_zero, &loop); | |
| 2613 | |
| 2614 // Invoke the function. | |
| 2615 __ bind(&invoke); | |
| 2616 ParameterCount actual(rax); | |
| 2617 __ InvokeFunction(rdi, actual, CALL_FUNCTION); | |
| 2618 // Drop applicand.apply and applicand from the stack, and push | |
| 2619 // the result of the function call, but leave the spilled frame | |
| 2620 // unchanged, with 3 elements, so it is correct when we compile the | |
| 2621 // slow-case code. | |
| 2622 __ addq(rsp, Immediate(2 * kPointerSize)); | |
| 2623 __ push(rax); | |
| 2624 // Stack now has 1 element: | |
| 2625 // rsp[0]: result | |
| 2626 __ jmp(&done); | |
| 2627 | |
| 2628 // Slow-case: Allocate the arguments object since we know it isn't | |
| 2629 // there, and fall-through to the slow-case where we call | |
| 2630 // applicand.apply. | |
| 2631 __ bind(&build_args); | |
| 2632 // Stack now has 3 elements, because we have jumped from where: | |
| 2633 // rsp[0]: receiver | |
| 2634 // rsp[1]: applicand.apply | |
| 2635 // rsp[2]: applicand. | |
| 2636 | |
| 2637 // StoreArgumentsObject requires a correct frame, and may modify it. | |
| 2638 Result arguments_object = StoreArgumentsObject(false); | |
| 2639 frame_->SpillAll(); | |
| 2640 arguments_object.ToRegister(); | |
| 2641 frame_->EmitPush(arguments_object.reg()); | |
| 2642 arguments_object.Unuse(); | |
| 2643 // Stack and frame now have 4 elements. | |
| 2644 __ bind(&slow); | |
| 2645 } | |
| 2646 | |
| 2647 // Generic computation of x.apply(y, args) with no special optimization. | |
| 2648 // Flip applicand.apply and applicand on the stack, so | |
| 2649 // applicand looks like the receiver of the applicand.apply call. | |
| 2650 // Then process it as a normal function call. | |
| 2651 __ movq(rax, Operand(rsp, 3 * kPointerSize)); | |
| 2652 __ movq(rbx, Operand(rsp, 2 * kPointerSize)); | |
| 2653 __ movq(Operand(rsp, 2 * kPointerSize), rax); | |
| 2654 __ movq(Operand(rsp, 3 * kPointerSize), rbx); | |
| 2655 | |
| 2656 CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS); | |
| 2657 Result res = frame_->CallStub(&call_function, 3); | |
| 2658 // The function and its two arguments have been dropped. | |
| 2659 frame_->Drop(1); // Drop the receiver as well. | |
| 2660 res.ToRegister(); | |
| 2661 frame_->EmitPush(res.reg()); | |
| 2662 // Stack now has 1 element: | |
| 2663 // rsp[0]: result | |
| 2664 if (try_lazy) __ bind(&done); | |
| 2665 } // End of spilled scope. | |
| 2666 // Restore the context register after a call. | |
| 2667 frame_->RestoreContextRegister(); | |
| 2668 } | |
| 2669 | |
| 2670 | |
| 2671 class DeferredStackCheck: public DeferredCode { | |
| 2672 public: | |
| 2673 DeferredStackCheck() { | |
| 2674 set_comment("[ DeferredStackCheck"); | |
| 2675 } | |
| 2676 | |
| 2677 virtual void Generate(); | |
| 2678 }; | |
| 2679 | |
| 2680 | |
| 2681 void DeferredStackCheck::Generate() { | |
| 2682 StackCheckStub stub; | |
| 2683 __ CallStub(&stub); | |
| 2684 } | |
| 2685 | |
| 2686 | |
| 2687 void CodeGenerator::CheckStack() { | |
| 2688 DeferredStackCheck* deferred = new DeferredStackCheck; | |
| 2689 __ CompareRoot(rsp, Heap::kStackLimitRootIndex); | |
| 2690 deferred->Branch(below); | |
| 2691 deferred->BindExit(); | |
| 2692 } | |
| 2693 | |
| 2694 | |
| 2695 void CodeGenerator::VisitAndSpill(Statement* statement) { | |
| 2696 ASSERT(in_spilled_code()); | |
| 2697 set_in_spilled_code(false); | |
| 2698 Visit(statement); | |
| 2699 if (frame_ != NULL) { | |
| 2700 frame_->SpillAll(); | |
| 2701 } | |
| 2702 set_in_spilled_code(true); | |
| 2703 } | |
| 2704 | |
| 2705 | |
| 2706 void CodeGenerator::VisitStatementsAndSpill(ZoneList<Statement*>* statements) { | |
| 2707 #ifdef DEBUG | |
| 2708 int original_height = frame_->height(); | |
| 2709 #endif | |
| 2710 ASSERT(in_spilled_code()); | |
| 2711 set_in_spilled_code(false); | |
| 2712 VisitStatements(statements); | |
| 2713 if (frame_ != NULL) { | |
| 2714 frame_->SpillAll(); | |
| 2715 } | |
| 2716 set_in_spilled_code(true); | |
| 2717 | |
| 2718 ASSERT(!has_valid_frame() || frame_->height() == original_height); | |
| 2719 } | |
| 2720 | |
| 2721 | |
| 2722 void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) { | |
| 2723 #ifdef DEBUG | |
| 2724 int original_height = frame_->height(); | |
| 2725 #endif | |
| 2726 ASSERT(!in_spilled_code()); | |
| 2727 for (int i = 0; has_valid_frame() && i < statements->length(); i++) { | |
| 2728 Visit(statements->at(i)); | |
| 2729 } | |
| 2730 ASSERT(!has_valid_frame() || frame_->height() == original_height); | |
| 2731 } | |
| 2732 | |
| 2733 | |
| 2734 void CodeGenerator::VisitBlock(Block* node) { | |
| 2735 ASSERT(!in_spilled_code()); | |
| 2736 Comment cmnt(masm_, "[ Block"); | |
| 2737 CodeForStatementPosition(node); | |
| 2738 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY); | |
| 2739 VisitStatements(node->statements()); | |
| 2740 if (node->break_target()->is_linked()) { | |
| 2741 node->break_target()->Bind(); | |
| 2742 } | |
| 2743 node->break_target()->Unuse(); | |
| 2744 } | |
| 2745 | |
| 2746 | |
| 2747 void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) { | |
| 2748 // Call the runtime to declare the globals. The inevitable call | |
| 2749 // will sync frame elements to memory anyway, so we do it eagerly to | |
| 2750 // allow us to push the arguments directly into place. | |
| 2751 frame_->SyncRange(0, frame_->element_count() - 1); | |
| 2752 | |
| 2753 __ movq(kScratchRegister, pairs, RelocInfo::EMBEDDED_OBJECT); | |
| 2754 frame_->EmitPush(rsi); // The context is the first argument. | |
| 2755 frame_->EmitPush(kScratchRegister); | |
| 2756 frame_->EmitPush(Smi::FromInt(is_eval() ? 1 : 0)); | |
| 2757 frame_->EmitPush(Smi::FromInt(strict_mode_flag())); | |
| 2758 Result ignored = frame_->CallRuntime(Runtime::kDeclareGlobals, 4); | |
| 2759 // Return value is ignored. | |
| 2760 } | |
| 2761 | |
| 2762 | |
| 2763 void CodeGenerator::VisitDeclaration(Declaration* node) { | |
| 2764 Comment cmnt(masm_, "[ Declaration"); | |
| 2765 Variable* var = node->proxy()->var(); | |
| 2766 ASSERT(var != NULL); // must have been resolved | |
| 2767 Slot* slot = var->AsSlot(); | |
| 2768 | |
| 2769 // If it was not possible to allocate the variable at compile time, | |
| 2770 // we need to "declare" it at runtime to make sure it actually | |
| 2771 // exists in the local context. | |
| 2772 if (slot != NULL && slot->type() == Slot::LOOKUP) { | |
| 2773 // Variables with a "LOOKUP" slot were introduced as non-locals | |
| 2774 // during variable resolution and must have mode DYNAMIC. | |
| 2775 ASSERT(var->is_dynamic()); | |
| 2776 // For now, just do a runtime call. Sync the virtual frame eagerly | |
| 2777 // so we can simply push the arguments into place. | |
| 2778 frame_->SyncRange(0, frame_->element_count() - 1); | |
| 2779 frame_->EmitPush(rsi); | |
| 2780 __ movq(kScratchRegister, var->name(), RelocInfo::EMBEDDED_OBJECT); | |
| 2781 frame_->EmitPush(kScratchRegister); | |
| 2782 // Declaration nodes are always introduced in one of two modes. | |
| 2783 ASSERT(node->mode() == Variable::VAR || node->mode() == Variable::CONST); | |
| 2784 PropertyAttributes attr = node->mode() == Variable::VAR ? NONE : READ_ONLY; | |
| 2785 frame_->EmitPush(Smi::FromInt(attr)); | |
| 2786 // Push initial value, if any. | |
| 2787 // Note: For variables we must not push an initial value (such as | |
| 2788 // 'undefined') because we may have a (legal) redeclaration and we | |
| 2789 // must not destroy the current value. | |
| 2790 if (node->mode() == Variable::CONST) { | |
| 2791 frame_->EmitPush(Heap::kTheHoleValueRootIndex); | |
| 2792 } else if (node->fun() != NULL) { | |
| 2793 Load(node->fun()); | |
| 2794 } else { | |
| 2795 frame_->EmitPush(Smi::FromInt(0)); // no initial value! | |
| 2796 } | |
| 2797 Result ignored = frame_->CallRuntime(Runtime::kDeclareContextSlot, 4); | |
| 2798 // Ignore the return value (declarations are statements). | |
| 2799 return; | |
| 2800 } | |
| 2801 | |
| 2802 ASSERT(!var->is_global()); | |
| 2803 | |
| 2804 // If we have a function or a constant, we need to initialize the variable. | |
| 2805 Expression* val = NULL; | |
| 2806 if (node->mode() == Variable::CONST) { | |
| 2807 val = new Literal(FACTORY->the_hole_value()); | |
| 2808 } else { | |
| 2809 val = node->fun(); // NULL if we don't have a function | |
| 2810 } | |
| 2811 | |
| 2812 if (val != NULL) { | |
| 2813 { | |
| 2814 // Set the initial value. | |
| 2815 Reference target(this, node->proxy()); | |
| 2816 Load(val); | |
| 2817 target.SetValue(NOT_CONST_INIT); | |
| 2818 // The reference is removed from the stack (preserving TOS) when | |
| 2819 // it goes out of scope. | |
| 2820 } | |
| 2821 // Get rid of the assigned value (declarations are statements). | |
| 2822 frame_->Drop(); | |
| 2823 } | |
| 2824 } | |
| 2825 | |
| 2826 | |
| 2827 void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) { | |
| 2828 ASSERT(!in_spilled_code()); | |
| 2829 Comment cmnt(masm_, "[ ExpressionStatement"); | |
| 2830 CodeForStatementPosition(node); | |
| 2831 Expression* expression = node->expression(); | |
| 2832 expression->MarkAsStatement(); | |
| 2833 Load(expression); | |
| 2834 // Remove the lingering expression result from the top of stack. | |
| 2835 frame_->Drop(); | |
| 2836 } | |
| 2837 | |
| 2838 | |
| 2839 void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) { | |
| 2840 ASSERT(!in_spilled_code()); | |
| 2841 Comment cmnt(masm_, "// EmptyStatement"); | |
| 2842 CodeForStatementPosition(node); | |
| 2843 // nothing to do | |
| 2844 } | |
| 2845 | |
| 2846 | |
| 2847 void CodeGenerator::VisitIfStatement(IfStatement* node) { | |
| 2848 ASSERT(!in_spilled_code()); | |
| 2849 Comment cmnt(masm_, "[ IfStatement"); | |
| 2850 // Generate different code depending on which parts of the if statement | |
| 2851 // are present or not. | |
| 2852 bool has_then_stm = node->HasThenStatement(); | |
| 2853 bool has_else_stm = node->HasElseStatement(); | |
| 2854 | |
| 2855 CodeForStatementPosition(node); | |
| 2856 JumpTarget exit; | |
| 2857 if (has_then_stm && has_else_stm) { | |
| 2858 JumpTarget then; | |
| 2859 JumpTarget else_; | |
| 2860 ControlDestination dest(&then, &else_, true); | |
| 2861 LoadCondition(node->condition(), &dest, true); | |
| 2862 | |
| 2863 if (dest.false_was_fall_through()) { | |
| 2864 // The else target was bound, so we compile the else part first. | |
| 2865 Visit(node->else_statement()); | |
| 2866 | |
| 2867 // We may have dangling jumps to the then part. | |
| 2868 if (then.is_linked()) { | |
| 2869 if (has_valid_frame()) exit.Jump(); | |
| 2870 then.Bind(); | |
| 2871 Visit(node->then_statement()); | |
| 2872 } | |
| 2873 } else { | |
| 2874 // The then target was bound, so we compile the then part first. | |
| 2875 Visit(node->then_statement()); | |
| 2876 | |
| 2877 if (else_.is_linked()) { | |
| 2878 if (has_valid_frame()) exit.Jump(); | |
| 2879 else_.Bind(); | |
| 2880 Visit(node->else_statement()); | |
| 2881 } | |
| 2882 } | |
| 2883 | |
| 2884 } else if (has_then_stm) { | |
| 2885 ASSERT(!has_else_stm); | |
| 2886 JumpTarget then; | |
| 2887 ControlDestination dest(&then, &exit, true); | |
| 2888 LoadCondition(node->condition(), &dest, true); | |
| 2889 | |
| 2890 if (dest.false_was_fall_through()) { | |
| 2891 // The exit label was bound. We may have dangling jumps to the | |
| 2892 // then part. | |
| 2893 if (then.is_linked()) { | |
| 2894 exit.Unuse(); | |
| 2895 exit.Jump(); | |
| 2896 then.Bind(); | |
| 2897 Visit(node->then_statement()); | |
| 2898 } | |
| 2899 } else { | |
| 2900 // The then label was bound. | |
| 2901 Visit(node->then_statement()); | |
| 2902 } | |
| 2903 | |
| 2904 } else if (has_else_stm) { | |
| 2905 ASSERT(!has_then_stm); | |
| 2906 JumpTarget else_; | |
| 2907 ControlDestination dest(&exit, &else_, false); | |
| 2908 LoadCondition(node->condition(), &dest, true); | |
| 2909 | |
| 2910 if (dest.true_was_fall_through()) { | |
| 2911 // The exit label was bound. We may have dangling jumps to the | |
| 2912 // else part. | |
| 2913 if (else_.is_linked()) { | |
| 2914 exit.Unuse(); | |
| 2915 exit.Jump(); | |
| 2916 else_.Bind(); | |
| 2917 Visit(node->else_statement()); | |
| 2918 } | |
| 2919 } else { | |
| 2920 // The else label was bound. | |
| 2921 Visit(node->else_statement()); | |
| 2922 } | |
| 2923 | |
| 2924 } else { | |
| 2925 ASSERT(!has_then_stm && !has_else_stm); | |
| 2926 // We only care about the condition's side effects (not its value | |
| 2927 // or control flow effect). LoadCondition is called without | |
| 2928 // forcing control flow. | |
| 2929 ControlDestination dest(&exit, &exit, true); | |
| 2930 LoadCondition(node->condition(), &dest, false); | |
| 2931 if (!dest.is_used()) { | |
| 2932 // We got a value on the frame rather than (or in addition to) | |
| 2933 // control flow. | |
| 2934 frame_->Drop(); | |
| 2935 } | |
| 2936 } | |
| 2937 | |
| 2938 if (exit.is_linked()) { | |
| 2939 exit.Bind(); | |
| 2940 } | |
| 2941 } | |
| 2942 | |
| 2943 | |
| 2944 void CodeGenerator::VisitContinueStatement(ContinueStatement* node) { | |
| 2945 ASSERT(!in_spilled_code()); | |
| 2946 Comment cmnt(masm_, "[ ContinueStatement"); | |
| 2947 CodeForStatementPosition(node); | |
| 2948 node->target()->continue_target()->Jump(); | |
| 2949 } | |
| 2950 | |
| 2951 | |
| 2952 void CodeGenerator::VisitBreakStatement(BreakStatement* node) { | |
| 2953 ASSERT(!in_spilled_code()); | |
| 2954 Comment cmnt(masm_, "[ BreakStatement"); | |
| 2955 CodeForStatementPosition(node); | |
| 2956 node->target()->break_target()->Jump(); | |
| 2957 } | |
| 2958 | |
| 2959 | |
| 2960 void CodeGenerator::VisitReturnStatement(ReturnStatement* node) { | |
| 2961 ASSERT(!in_spilled_code()); | |
| 2962 Comment cmnt(masm_, "[ ReturnStatement"); | |
| 2963 | |
| 2964 CodeForStatementPosition(node); | |
| 2965 Load(node->expression()); | |
| 2966 Result return_value = frame_->Pop(); | |
| 2967 masm()->positions_recorder()->WriteRecordedPositions(); | |
| 2968 if (function_return_is_shadowed_) { | |
| 2969 function_return_.Jump(&return_value); | |
| 2970 } else { | |
| 2971 frame_->PrepareForReturn(); | |
| 2972 if (function_return_.is_bound()) { | |
| 2973 // If the function return label is already bound we reuse the | |
| 2974 // code by jumping to the return site. | |
| 2975 function_return_.Jump(&return_value); | |
| 2976 } else { | |
| 2977 function_return_.Bind(&return_value); | |
| 2978 GenerateReturnSequence(&return_value); | |
| 2979 } | |
| 2980 } | |
| 2981 } | |
| 2982 | |
| 2983 | |
| 2984 void CodeGenerator::GenerateReturnSequence(Result* return_value) { | |
| 2985 // The return value is a live (but not currently reference counted) | |
| 2986 // reference to rax. This is safe because the current frame does not | |
| 2987 // contain a reference to rax (it is prepared for the return by spilling | |
| 2988 // all registers). | |
| 2989 if (FLAG_trace) { | |
| 2990 frame_->Push(return_value); | |
| 2991 *return_value = frame_->CallRuntime(Runtime::kTraceExit, 1); | |
| 2992 } | |
| 2993 return_value->ToRegister(rax); | |
| 2994 | |
| 2995 // Add a label for checking the size of the code used for returning. | |
| 2996 #ifdef DEBUG | |
| 2997 Label check_exit_codesize; | |
| 2998 masm_->bind(&check_exit_codesize); | |
| 2999 #endif | |
| 3000 | |
| 3001 // Leave the frame and return popping the arguments and the | |
| 3002 // receiver. | |
| 3003 frame_->Exit(); | |
| 3004 int arguments_bytes = (scope()->num_parameters() + 1) * kPointerSize; | |
| 3005 __ Ret(arguments_bytes, rcx); | |
| 3006 DeleteFrame(); | |
| 3007 | |
| 3008 #ifdef ENABLE_DEBUGGER_SUPPORT | |
| 3009 // Add padding that will be overwritten by a debugger breakpoint. | |
| 3010 // The shortest return sequence generated is "movq rsp, rbp; pop rbp; ret k" | |
| 3011 // with length 7 (3 + 1 + 3). | |
| 3012 const int kPadding = Assembler::kJSReturnSequenceLength - 7; | |
| 3013 for (int i = 0; i < kPadding; ++i) { | |
| 3014 masm_->int3(); | |
| 3015 } | |
| 3016 // Check that the size of the code used for returning is large enough | |
| 3017 // for the debugger's requirements. | |
| 3018 ASSERT(Assembler::kJSReturnSequenceLength <= | |
| 3019 masm_->SizeOfCodeGeneratedSince(&check_exit_codesize)); | |
| 3020 #endif | |
| 3021 } | |
| 3022 | |
| 3023 | |
| 3024 void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) { | |
| 3025 ASSERT(!in_spilled_code()); | |
| 3026 Comment cmnt(masm_, "[ WithEnterStatement"); | |
| 3027 CodeForStatementPosition(node); | |
| 3028 Load(node->expression()); | |
| 3029 Result context; | |
| 3030 if (node->is_catch_block()) { | |
| 3031 context = frame_->CallRuntime(Runtime::kPushCatchContext, 1); | |
| 3032 } else { | |
| 3033 context = frame_->CallRuntime(Runtime::kPushContext, 1); | |
| 3034 } | |
| 3035 | |
| 3036 // Update context local. | |
| 3037 frame_->SaveContextRegister(); | |
| 3038 | |
| 3039 // Verify that the runtime call result and rsi agree. | |
| 3040 if (FLAG_debug_code) { | |
| 3041 __ cmpq(context.reg(), rsi); | |
| 3042 __ Assert(equal, "Runtime::NewContext should end up in rsi"); | |
| 3043 } | |
| 3044 } | |
| 3045 | |
| 3046 | |
| 3047 void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) { | |
| 3048 ASSERT(!in_spilled_code()); | |
| 3049 Comment cmnt(masm_, "[ WithExitStatement"); | |
| 3050 CodeForStatementPosition(node); | |
| 3051 // Pop context. | |
| 3052 __ movq(rsi, ContextOperand(rsi, Context::PREVIOUS_INDEX)); | |
| 3053 // Update context local. | |
| 3054 frame_->SaveContextRegister(); | |
| 3055 } | |
| 3056 | |
| 3057 | |
| 3058 void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) { | |
| 3059 ASSERT(!in_spilled_code()); | |
| 3060 Comment cmnt(masm_, "[ SwitchStatement"); | |
| 3061 CodeForStatementPosition(node); | |
| 3062 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY); | |
| 3063 | |
| 3064 // Compile the switch value. | |
| 3065 Load(node->tag()); | |
| 3066 | |
| 3067 ZoneList<CaseClause*>* cases = node->cases(); | |
| 3068 int length = cases->length(); | |
| 3069 CaseClause* default_clause = NULL; | |
| 3070 | |
| 3071 JumpTarget next_test; | |
| 3072 // Compile the case label expressions and comparisons. Exit early | |
| 3073 // if a comparison is unconditionally true. The target next_test is | |
| 3074 // bound before the loop in order to indicate control flow to the | |
| 3075 // first comparison. | |
| 3076 next_test.Bind(); | |
| 3077 for (int i = 0; i < length && !next_test.is_unused(); i++) { | |
| 3078 CaseClause* clause = cases->at(i); | |
| 3079 // The default is not a test, but remember it for later. | |
| 3080 if (clause->is_default()) { | |
| 3081 default_clause = clause; | |
| 3082 continue; | |
| 3083 } | |
| 3084 | |
| 3085 Comment cmnt(masm_, "[ Case comparison"); | |
| 3086 // We recycle the same target next_test for each test. Bind it if | |
| 3087 // the previous test has not done so and then unuse it for the | |
| 3088 // loop. | |
| 3089 if (next_test.is_linked()) { | |
| 3090 next_test.Bind(); | |
| 3091 } | |
| 3092 next_test.Unuse(); | |
| 3093 | |
| 3094 // Duplicate the switch value. | |
| 3095 frame_->Dup(); | |
| 3096 | |
| 3097 // Compile the label expression. | |
| 3098 Load(clause->label()); | |
| 3099 | |
| 3100 // Compare and branch to the body if true or the next test if | |
| 3101 // false. Prefer the next test as a fall through. | |
| 3102 ControlDestination dest(clause->body_target(), &next_test, false); | |
| 3103 Comparison(node, equal, true, &dest); | |
| 3104 | |
| 3105 // If the comparison fell through to the true target, jump to the | |
| 3106 // actual body. | |
| 3107 if (dest.true_was_fall_through()) { | |
| 3108 clause->body_target()->Unuse(); | |
| 3109 clause->body_target()->Jump(); | |
| 3110 } | |
| 3111 } | |
| 3112 | |
| 3113 // If there was control flow to a next test from the last one | |
| 3114 // compiled, compile a jump to the default or break target. | |
| 3115 if (!next_test.is_unused()) { | |
| 3116 if (next_test.is_linked()) { | |
| 3117 next_test.Bind(); | |
| 3118 } | |
| 3119 // Drop the switch value. | |
| 3120 frame_->Drop(); | |
| 3121 if (default_clause != NULL) { | |
| 3122 default_clause->body_target()->Jump(); | |
| 3123 } else { | |
| 3124 node->break_target()->Jump(); | |
| 3125 } | |
| 3126 } | |
| 3127 | |
| 3128 // The last instruction emitted was a jump, either to the default | |
| 3129 // clause or the break target, or else to a case body from the loop | |
| 3130 // that compiles the tests. | |
| 3131 ASSERT(!has_valid_frame()); | |
| 3132 // Compile case bodies as needed. | |
| 3133 for (int i = 0; i < length; i++) { | |
| 3134 CaseClause* clause = cases->at(i); | |
| 3135 | |
| 3136 // There are two ways to reach the body: from the corresponding | |
| 3137 // test or as the fall through of the previous body. | |
| 3138 if (clause->body_target()->is_linked() || has_valid_frame()) { | |
| 3139 if (clause->body_target()->is_linked()) { | |
| 3140 if (has_valid_frame()) { | |
| 3141 // If we have both a jump to the test and a fall through, put | |
| 3142 // a jump on the fall through path to avoid the dropping of | |
| 3143 // the switch value on the test path. The exception is the | |
| 3144 // default which has already had the switch value dropped. | |
| 3145 if (clause->is_default()) { | |
| 3146 clause->body_target()->Bind(); | |
| 3147 } else { | |
| 3148 JumpTarget body; | |
| 3149 body.Jump(); | |
| 3150 clause->body_target()->Bind(); | |
| 3151 frame_->Drop(); | |
| 3152 body.Bind(); | |
| 3153 } | |
| 3154 } else { | |
| 3155 // No fall through to worry about. | |
| 3156 clause->body_target()->Bind(); | |
| 3157 if (!clause->is_default()) { | |
| 3158 frame_->Drop(); | |
| 3159 } | |
| 3160 } | |
| 3161 } else { | |
| 3162 // Otherwise, we have only fall through. | |
| 3163 ASSERT(has_valid_frame()); | |
| 3164 } | |
| 3165 | |
| 3166 // We are now prepared to compile the body. | |
| 3167 Comment cmnt(masm_, "[ Case body"); | |
| 3168 VisitStatements(clause->statements()); | |
| 3169 } | |
| 3170 clause->body_target()->Unuse(); | |
| 3171 } | |
| 3172 | |
| 3173 // We may not have a valid frame here so bind the break target only | |
| 3174 // if needed. | |
| 3175 if (node->break_target()->is_linked()) { | |
| 3176 node->break_target()->Bind(); | |
| 3177 } | |
| 3178 node->break_target()->Unuse(); | |
| 3179 } | |
| 3180 | |
| 3181 | |
| 3182 void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) { | |
| 3183 ASSERT(!in_spilled_code()); | |
| 3184 Comment cmnt(masm_, "[ DoWhileStatement"); | |
| 3185 CodeForStatementPosition(node); | |
| 3186 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY); | |
| 3187 JumpTarget body(JumpTarget::BIDIRECTIONAL); | |
| 3188 IncrementLoopNesting(); | |
| 3189 | |
| 3190 ConditionAnalysis info = AnalyzeCondition(node->cond()); | |
| 3191 // Label the top of the loop for the backward jump if necessary. | |
| 3192 switch (info) { | |
| 3193 case ALWAYS_TRUE: | |
| 3194 // Use the continue target. | |
| 3195 node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL); | |
| 3196 node->continue_target()->Bind(); | |
| 3197 break; | |
| 3198 case ALWAYS_FALSE: | |
| 3199 // No need to label it. | |
| 3200 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY); | |
| 3201 break; | |
| 3202 case DONT_KNOW: | |
| 3203 // Continue is the test, so use the backward body target. | |
| 3204 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY); | |
| 3205 body.Bind(); | |
| 3206 break; | |
| 3207 } | |
| 3208 | |
| 3209 CheckStack(); // TODO(1222600): ignore if body contains calls. | |
| 3210 Visit(node->body()); | |
| 3211 | |
| 3212 // Compile the test. | |
| 3213 switch (info) { | |
| 3214 case ALWAYS_TRUE: | |
| 3215 // If control flow can fall off the end of the body, jump back | |
| 3216 // to the top and bind the break target at the exit. | |
| 3217 if (has_valid_frame()) { | |
| 3218 node->continue_target()->Jump(); | |
| 3219 } | |
| 3220 if (node->break_target()->is_linked()) { | |
| 3221 node->break_target()->Bind(); | |
| 3222 } | |
| 3223 break; | |
| 3224 case ALWAYS_FALSE: | |
| 3225 // We may have had continues or breaks in the body. | |
| 3226 if (node->continue_target()->is_linked()) { | |
| 3227 node->continue_target()->Bind(); | |
| 3228 } | |
| 3229 if (node->break_target()->is_linked()) { | |
| 3230 node->break_target()->Bind(); | |
| 3231 } | |
| 3232 break; | |
| 3233 case DONT_KNOW: | |
| 3234 // We have to compile the test expression if it can be reached by | |
| 3235 // control flow falling out of the body or via continue. | |
| 3236 if (node->continue_target()->is_linked()) { | |
| 3237 node->continue_target()->Bind(); | |
| 3238 } | |
| 3239 if (has_valid_frame()) { | |
| 3240 Comment cmnt(masm_, "[ DoWhileCondition"); | |
| 3241 CodeForDoWhileConditionPosition(node); | |
| 3242 ControlDestination dest(&body, node->break_target(), false); | |
| 3243 LoadCondition(node->cond(), &dest, true); | |
| 3244 } | |
| 3245 if (node->break_target()->is_linked()) { | |
| 3246 node->break_target()->Bind(); | |
| 3247 } | |
| 3248 break; | |
| 3249 } | |
| 3250 | |
| 3251 DecrementLoopNesting(); | |
| 3252 node->continue_target()->Unuse(); | |
| 3253 node->break_target()->Unuse(); | |
| 3254 } | |
| 3255 | |
| 3256 | |
| 3257 void CodeGenerator::VisitWhileStatement(WhileStatement* node) { | |
| 3258 ASSERT(!in_spilled_code()); | |
| 3259 Comment cmnt(masm_, "[ WhileStatement"); | |
| 3260 CodeForStatementPosition(node); | |
| 3261 | |
| 3262 // If the condition is always false and has no side effects, we do not | |
| 3263 // need to compile anything. | |
| 3264 ConditionAnalysis info = AnalyzeCondition(node->cond()); | |
| 3265 if (info == ALWAYS_FALSE) return; | |
| 3266 | |
| 3267 // Do not duplicate conditions that may have function literal | |
| 3268 // subexpressions. This can cause us to compile the function literal | |
| 3269 // twice. | |
| 3270 bool test_at_bottom = !node->may_have_function_literal(); | |
| 3271 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY); | |
| 3272 IncrementLoopNesting(); | |
| 3273 JumpTarget body; | |
| 3274 if (test_at_bottom) { | |
| 3275 body.set_direction(JumpTarget::BIDIRECTIONAL); | |
| 3276 } | |
| 3277 | |
| 3278 // Based on the condition analysis, compile the test as necessary. | |
| 3279 switch (info) { | |
| 3280 case ALWAYS_TRUE: | |
| 3281 // We will not compile the test expression. Label the top of the | |
| 3282 // loop with the continue target. | |
| 3283 node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL); | |
| 3284 node->continue_target()->Bind(); | |
| 3285 break; | |
| 3286 case DONT_KNOW: { | |
| 3287 if (test_at_bottom) { | |
| 3288 // Continue is the test at the bottom, no need to label the test | |
| 3289 // at the top. The body is a backward target. | |
| 3290 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY); | |
| 3291 } else { | |
| 3292 // Label the test at the top as the continue target. The body | |
| 3293 // is a forward-only target. | |
| 3294 node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL); | |
| 3295 node->continue_target()->Bind(); | |
| 3296 } | |
| 3297 // Compile the test with the body as the true target and preferred | |
| 3298 // fall-through and with the break target as the false target. | |
| 3299 ControlDestination dest(&body, node->break_target(), true); | |
| 3300 LoadCondition(node->cond(), &dest, true); | |
| 3301 | |
| 3302 if (dest.false_was_fall_through()) { | |
| 3303 // If we got the break target as fall-through, the test may have | |
| 3304 // been unconditionally false (if there are no jumps to the | |
| 3305 // body). | |
| 3306 if (!body.is_linked()) { | |
| 3307 DecrementLoopNesting(); | |
| 3308 return; | |
| 3309 } | |
| 3310 | |
| 3311 // Otherwise, jump around the body on the fall through and then | |
| 3312 // bind the body target. | |
| 3313 node->break_target()->Unuse(); | |
| 3314 node->break_target()->Jump(); | |
| 3315 body.Bind(); | |
| 3316 } | |
| 3317 break; | |
| 3318 } | |
| 3319 case ALWAYS_FALSE: | |
| 3320 UNREACHABLE(); | |
| 3321 break; | |
| 3322 } | |
| 3323 | |
| 3324 CheckStack(); // TODO(1222600): ignore if body contains calls. | |
| 3325 Visit(node->body()); | |
| 3326 | |
| 3327 // Based on the condition analysis, compile the backward jump as | |
| 3328 // necessary. | |
| 3329 switch (info) { | |
| 3330 case ALWAYS_TRUE: | |
| 3331 // The loop body has been labeled with the continue target. | |
| 3332 if (has_valid_frame()) { | |
| 3333 node->continue_target()->Jump(); | |
| 3334 } | |
| 3335 break; | |
| 3336 case DONT_KNOW: | |
| 3337 if (test_at_bottom) { | |
| 3338 // If we have chosen to recompile the test at the bottom, | |
| 3339 // then it is the continue target. | |
| 3340 if (node->continue_target()->is_linked()) { | |
| 3341 node->continue_target()->Bind(); | |
| 3342 } | |
| 3343 if (has_valid_frame()) { | |
| 3344 // The break target is the fall-through (body is a backward | |
| 3345 // jump from here and thus an invalid fall-through). | |
| 3346 ControlDestination dest(&body, node->break_target(), false); | |
| 3347 LoadCondition(node->cond(), &dest, true); | |
| 3348 } | |
| 3349 } else { | |
| 3350 // If we have chosen not to recompile the test at the bottom, | |
| 3351 // jump back to the one at the top. | |
| 3352 if (has_valid_frame()) { | |
| 3353 node->continue_target()->Jump(); | |
| 3354 } | |
| 3355 } | |
| 3356 break; | |
| 3357 case ALWAYS_FALSE: | |
| 3358 UNREACHABLE(); | |
| 3359 break; | |
| 3360 } | |
| 3361 | |
| 3362 // The break target may be already bound (by the condition), or there | |
| 3363 // may not be a valid frame. Bind it only if needed. | |
| 3364 if (node->break_target()->is_linked()) { | |
| 3365 node->break_target()->Bind(); | |
| 3366 } | |
| 3367 DecrementLoopNesting(); | |
| 3368 } | |
| 3369 | |
| 3370 | |
| 3371 void CodeGenerator::SetTypeForStackSlot(Slot* slot, TypeInfo info) { | |
| 3372 ASSERT(slot->type() == Slot::LOCAL || slot->type() == Slot::PARAMETER); | |
| 3373 if (slot->type() == Slot::LOCAL) { | |
| 3374 frame_->SetTypeForLocalAt(slot->index(), info); | |
| 3375 } else { | |
| 3376 frame_->SetTypeForParamAt(slot->index(), info); | |
| 3377 } | |
| 3378 if (FLAG_debug_code && info.IsSmi()) { | |
| 3379 if (slot->type() == Slot::LOCAL) { | |
| 3380 frame_->PushLocalAt(slot->index()); | |
| 3381 } else { | |
| 3382 frame_->PushParameterAt(slot->index()); | |
| 3383 } | |
| 3384 Result var = frame_->Pop(); | |
| 3385 var.ToRegister(); | |
| 3386 __ AbortIfNotSmi(var.reg()); | |
| 3387 } | |
| 3388 } | |
| 3389 | |
| 3390 | |
| 3391 void CodeGenerator::GenerateFastSmiLoop(ForStatement* node) { | |
| 3392 // A fast smi loop is a for loop with an initializer | |
| 3393 // that is a simple assignment of a smi to a stack variable, | |
| 3394 // a test that is a simple test of that variable against a smi constant, | |
| 3395 // and a step that is a increment/decrement of the variable, and | |
| 3396 // where the variable isn't modified in the loop body. | |
| 3397 // This guarantees that the variable is always a smi. | |
| 3398 | |
| 3399 Variable* loop_var = node->loop_variable(); | |
| 3400 Smi* initial_value = *Handle<Smi>::cast(node->init() | |
| 3401 ->StatementAsSimpleAssignment()->value()->AsLiteral()->handle()); | |
| 3402 Smi* limit_value = *Handle<Smi>::cast( | |
| 3403 node->cond()->AsCompareOperation()->right()->AsLiteral()->handle()); | |
| 3404 Token::Value compare_op = | |
| 3405 node->cond()->AsCompareOperation()->op(); | |
| 3406 bool increments = | |
| 3407 node->next()->StatementAsCountOperation()->op() == Token::INC; | |
| 3408 | |
| 3409 // Check that the condition isn't initially false. | |
| 3410 bool initially_false = false; | |
| 3411 int initial_int_value = initial_value->value(); | |
| 3412 int limit_int_value = limit_value->value(); | |
| 3413 switch (compare_op) { | |
| 3414 case Token::LT: | |
| 3415 initially_false = initial_int_value >= limit_int_value; | |
| 3416 break; | |
| 3417 case Token::LTE: | |
| 3418 initially_false = initial_int_value > limit_int_value; | |
| 3419 break; | |
| 3420 case Token::GT: | |
| 3421 initially_false = initial_int_value <= limit_int_value; | |
| 3422 break; | |
| 3423 case Token::GTE: | |
| 3424 initially_false = initial_int_value < limit_int_value; | |
| 3425 break; | |
| 3426 default: | |
| 3427 UNREACHABLE(); | |
| 3428 } | |
| 3429 if (initially_false) return; | |
| 3430 | |
| 3431 // Only check loop condition at the end. | |
| 3432 | |
| 3433 Visit(node->init()); | |
| 3434 | |
| 3435 JumpTarget loop(JumpTarget::BIDIRECTIONAL); | |
| 3436 // Set type and stack height of BreakTargets. | |
| 3437 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY); | |
| 3438 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY); | |
| 3439 | |
| 3440 IncrementLoopNesting(); | |
| 3441 loop.Bind(); | |
| 3442 | |
| 3443 // Set number type of the loop variable to smi. | |
| 3444 CheckStack(); // TODO(1222600): ignore if body contains calls. | |
| 3445 | |
| 3446 SetTypeForStackSlot(loop_var->AsSlot(), TypeInfo::Smi()); | |
| 3447 Visit(node->body()); | |
| 3448 | |
| 3449 if (node->continue_target()->is_linked()) { | |
| 3450 node->continue_target()->Bind(); | |
| 3451 } | |
| 3452 | |
| 3453 if (has_valid_frame()) { | |
| 3454 CodeForStatementPosition(node); | |
| 3455 Slot* loop_var_slot = loop_var->AsSlot(); | |
| 3456 if (loop_var_slot->type() == Slot::LOCAL) { | |
| 3457 frame_->TakeLocalAt(loop_var_slot->index()); | |
| 3458 } else { | |
| 3459 ASSERT(loop_var_slot->type() == Slot::PARAMETER); | |
| 3460 frame_->TakeParameterAt(loop_var_slot->index()); | |
| 3461 } | |
| 3462 Result loop_var_result = frame_->Pop(); | |
| 3463 if (!loop_var_result.is_register()) { | |
| 3464 loop_var_result.ToRegister(); | |
| 3465 } | |
| 3466 Register loop_var_reg = loop_var_result.reg(); | |
| 3467 frame_->Spill(loop_var_reg); | |
| 3468 if (increments) { | |
| 3469 __ SmiAddConstant(loop_var_reg, | |
| 3470 loop_var_reg, | |
| 3471 Smi::FromInt(1)); | |
| 3472 } else { | |
| 3473 __ SmiSubConstant(loop_var_reg, | |
| 3474 loop_var_reg, | |
| 3475 Smi::FromInt(1)); | |
| 3476 } | |
| 3477 | |
| 3478 frame_->Push(&loop_var_result); | |
| 3479 if (loop_var_slot->type() == Slot::LOCAL) { | |
| 3480 frame_->StoreToLocalAt(loop_var_slot->index()); | |
| 3481 } else { | |
| 3482 ASSERT(loop_var_slot->type() == Slot::PARAMETER); | |
| 3483 frame_->StoreToParameterAt(loop_var_slot->index()); | |
| 3484 } | |
| 3485 frame_->Drop(); | |
| 3486 | |
| 3487 __ SmiCompare(loop_var_reg, limit_value); | |
| 3488 Condition condition; | |
| 3489 switch (compare_op) { | |
| 3490 case Token::LT: | |
| 3491 condition = less; | |
| 3492 break; | |
| 3493 case Token::LTE: | |
| 3494 condition = less_equal; | |
| 3495 break; | |
| 3496 case Token::GT: | |
| 3497 condition = greater; | |
| 3498 break; | |
| 3499 case Token::GTE: | |
| 3500 condition = greater_equal; | |
| 3501 break; | |
| 3502 default: | |
| 3503 condition = never; | |
| 3504 UNREACHABLE(); | |
| 3505 } | |
| 3506 loop.Branch(condition); | |
| 3507 } | |
| 3508 if (node->break_target()->is_linked()) { | |
| 3509 node->break_target()->Bind(); | |
| 3510 } | |
| 3511 DecrementLoopNesting(); | |
| 3512 } | |
| 3513 | |
| 3514 | |
| 3515 void CodeGenerator::VisitForStatement(ForStatement* node) { | |
| 3516 ASSERT(!in_spilled_code()); | |
| 3517 Comment cmnt(masm_, "[ ForStatement"); | |
| 3518 CodeForStatementPosition(node); | |
| 3519 | |
| 3520 if (node->is_fast_smi_loop()) { | |
| 3521 GenerateFastSmiLoop(node); | |
| 3522 return; | |
| 3523 } | |
| 3524 | |
| 3525 // Compile the init expression if present. | |
| 3526 if (node->init() != NULL) { | |
| 3527 Visit(node->init()); | |
| 3528 } | |
| 3529 | |
| 3530 // If the condition is always false and has no side effects, we do not | |
| 3531 // need to compile anything else. | |
| 3532 ConditionAnalysis info = AnalyzeCondition(node->cond()); | |
| 3533 if (info == ALWAYS_FALSE) return; | |
| 3534 | |
| 3535 // Do not duplicate conditions that may have function literal | |
| 3536 // subexpressions. This can cause us to compile the function literal | |
| 3537 // twice. | |
| 3538 bool test_at_bottom = !node->may_have_function_literal(); | |
| 3539 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY); | |
| 3540 IncrementLoopNesting(); | |
| 3541 | |
| 3542 // Target for backward edge if no test at the bottom, otherwise | |
| 3543 // unused. | |
| 3544 JumpTarget loop(JumpTarget::BIDIRECTIONAL); | |
| 3545 | |
| 3546 // Target for backward edge if there is a test at the bottom, | |
| 3547 // otherwise used as target for test at the top. | |
| 3548 JumpTarget body; | |
| 3549 if (test_at_bottom) { | |
| 3550 body.set_direction(JumpTarget::BIDIRECTIONAL); | |
| 3551 } | |
| 3552 | |
| 3553 // Based on the condition analysis, compile the test as necessary. | |
| 3554 switch (info) { | |
| 3555 case ALWAYS_TRUE: | |
| 3556 // We will not compile the test expression. Label the top of the | |
| 3557 // loop. | |
| 3558 if (node->next() == NULL) { | |
| 3559 // Use the continue target if there is no update expression. | |
| 3560 node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL); | |
| 3561 node->continue_target()->Bind(); | |
| 3562 } else { | |
| 3563 // Otherwise use the backward loop target. | |
| 3564 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY); | |
| 3565 loop.Bind(); | |
| 3566 } | |
| 3567 break; | |
| 3568 case DONT_KNOW: { | |
| 3569 if (test_at_bottom) { | |
| 3570 // Continue is either the update expression or the test at the | |
| 3571 // bottom, no need to label the test at the top. | |
| 3572 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY); | |
| 3573 } else if (node->next() == NULL) { | |
| 3574 // We are not recompiling the test at the bottom and there is no | |
| 3575 // update expression. | |
| 3576 node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL); | |
| 3577 node->continue_target()->Bind(); | |
| 3578 } else { | |
| 3579 // We are not recompiling the test at the bottom and there is an | |
| 3580 // update expression. | |
| 3581 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY); | |
| 3582 loop.Bind(); | |
| 3583 } | |
| 3584 | |
| 3585 // Compile the test with the body as the true target and preferred | |
| 3586 // fall-through and with the break target as the false target. | |
| 3587 ControlDestination dest(&body, node->break_target(), true); | |
| 3588 LoadCondition(node->cond(), &dest, true); | |
| 3589 | |
| 3590 if (dest.false_was_fall_through()) { | |
| 3591 // If we got the break target as fall-through, the test may have | |
| 3592 // been unconditionally false (if there are no jumps to the | |
| 3593 // body). | |
| 3594 if (!body.is_linked()) { | |
| 3595 DecrementLoopNesting(); | |
| 3596 return; | |
| 3597 } | |
| 3598 | |
| 3599 // Otherwise, jump around the body on the fall through and then | |
| 3600 // bind the body target. | |
| 3601 node->break_target()->Unuse(); | |
| 3602 node->break_target()->Jump(); | |
| 3603 body.Bind(); | |
| 3604 } | |
| 3605 break; | |
| 3606 } | |
| 3607 case ALWAYS_FALSE: | |
| 3608 UNREACHABLE(); | |
| 3609 break; | |
| 3610 } | |
| 3611 | |
| 3612 CheckStack(); // TODO(1222600): ignore if body contains calls. | |
| 3613 | |
| 3614 Visit(node->body()); | |
| 3615 | |
| 3616 // If there is an update expression, compile it if necessary. | |
| 3617 if (node->next() != NULL) { | |
| 3618 if (node->continue_target()->is_linked()) { | |
| 3619 node->continue_target()->Bind(); | |
| 3620 } | |
| 3621 | |
| 3622 // Control can reach the update by falling out of the body or by a | |
| 3623 // continue. | |
| 3624 if (has_valid_frame()) { | |
| 3625 // Record the source position of the statement as this code which | |
| 3626 // is after the code for the body actually belongs to the loop | |
| 3627 // statement and not the body. | |
| 3628 CodeForStatementPosition(node); | |
| 3629 Visit(node->next()); | |
| 3630 } | |
| 3631 } | |
| 3632 | |
| 3633 // Based on the condition analysis, compile the backward jump as | |
| 3634 // necessary. | |
| 3635 switch (info) { | |
| 3636 case ALWAYS_TRUE: | |
| 3637 if (has_valid_frame()) { | |
| 3638 if (node->next() == NULL) { | |
| 3639 node->continue_target()->Jump(); | |
| 3640 } else { | |
| 3641 loop.Jump(); | |
| 3642 } | |
| 3643 } | |
| 3644 break; | |
| 3645 case DONT_KNOW: | |
| 3646 if (test_at_bottom) { | |
| 3647 if (node->continue_target()->is_linked()) { | |
| 3648 // We can have dangling jumps to the continue target if there | |
| 3649 // was no update expression. | |
| 3650 node->continue_target()->Bind(); | |
| 3651 } | |
| 3652 // Control can reach the test at the bottom by falling out of | |
| 3653 // the body, by a continue in the body, or from the update | |
| 3654 // expression. | |
| 3655 if (has_valid_frame()) { | |
| 3656 // The break target is the fall-through (body is a backward | |
| 3657 // jump from here). | |
| 3658 ControlDestination dest(&body, node->break_target(), false); | |
| 3659 LoadCondition(node->cond(), &dest, true); | |
| 3660 } | |
| 3661 } else { | |
| 3662 // Otherwise, jump back to the test at the top. | |
| 3663 if (has_valid_frame()) { | |
| 3664 if (node->next() == NULL) { | |
| 3665 node->continue_target()->Jump(); | |
| 3666 } else { | |
| 3667 loop.Jump(); | |
| 3668 } | |
| 3669 } | |
| 3670 } | |
| 3671 break; | |
| 3672 case ALWAYS_FALSE: | |
| 3673 UNREACHABLE(); | |
| 3674 break; | |
| 3675 } | |
| 3676 | |
| 3677 // The break target may be already bound (by the condition), or there | |
| 3678 // may not be a valid frame. Bind it only if needed. | |
| 3679 if (node->break_target()->is_linked()) { | |
| 3680 node->break_target()->Bind(); | |
| 3681 } | |
| 3682 DecrementLoopNesting(); | |
| 3683 } | |
| 3684 | |
| 3685 | |
| 3686 void CodeGenerator::VisitForInStatement(ForInStatement* node) { | |
| 3687 ASSERT(!in_spilled_code()); | |
| 3688 VirtualFrame::SpilledScope spilled_scope; | |
| 3689 Comment cmnt(masm_, "[ ForInStatement"); | |
| 3690 CodeForStatementPosition(node); | |
| 3691 | |
| 3692 JumpTarget primitive; | |
| 3693 JumpTarget jsobject; | |
| 3694 JumpTarget fixed_array; | |
| 3695 JumpTarget entry(JumpTarget::BIDIRECTIONAL); | |
| 3696 JumpTarget end_del_check; | |
| 3697 JumpTarget exit; | |
| 3698 | |
| 3699 // Get the object to enumerate over (converted to JSObject). | |
| 3700 LoadAndSpill(node->enumerable()); | |
| 3701 | |
| 3702 // Both SpiderMonkey and kjs ignore null and undefined in contrast | |
| 3703 // to the specification. 12.6.4 mandates a call to ToObject. | |
| 3704 frame_->EmitPop(rax); | |
| 3705 | |
| 3706 // rax: value to be iterated over | |
| 3707 __ CompareRoot(rax, Heap::kUndefinedValueRootIndex); | |
| 3708 exit.Branch(equal); | |
| 3709 __ CompareRoot(rax, Heap::kNullValueRootIndex); | |
| 3710 exit.Branch(equal); | |
| 3711 | |
| 3712 // Stack layout in body: | |
| 3713 // [iteration counter (smi)] <- slot 0 | |
| 3714 // [length of array] <- slot 1 | |
| 3715 // [FixedArray] <- slot 2 | |
| 3716 // [Map or 0] <- slot 3 | |
| 3717 // [Object] <- slot 4 | |
| 3718 | |
| 3719 // Check if enumerable is already a JSObject | |
| 3720 // rax: value to be iterated over | |
| 3721 Condition is_smi = masm_->CheckSmi(rax); | |
| 3722 primitive.Branch(is_smi); | |
| 3723 __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx); | |
| 3724 jsobject.Branch(above_equal); | |
| 3725 | |
| 3726 primitive.Bind(); | |
| 3727 frame_->EmitPush(rax); | |
| 3728 frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION, 1); | |
| 3729 // function call returns the value in rax, which is where we want it below | |
| 3730 | |
| 3731 jsobject.Bind(); | |
| 3732 // Get the set of properties (as a FixedArray or Map). | |
| 3733 // rax: value to be iterated over | |
| 3734 frame_->EmitPush(rax); // Push the object being iterated over. | |
| 3735 | |
| 3736 | |
| 3737 // Check cache validity in generated code. This is a fast case for | |
| 3738 // the JSObject::IsSimpleEnum cache validity checks. If we cannot | |
| 3739 // guarantee cache validity, call the runtime system to check cache | |
| 3740 // validity or get the property names in a fixed array. | |
| 3741 JumpTarget call_runtime; | |
| 3742 JumpTarget loop(JumpTarget::BIDIRECTIONAL); | |
| 3743 JumpTarget check_prototype; | |
| 3744 JumpTarget use_cache; | |
| 3745 __ movq(rcx, rax); | |
| 3746 loop.Bind(); | |
| 3747 // Check that there are no elements. | |
| 3748 __ movq(rdx, FieldOperand(rcx, JSObject::kElementsOffset)); | |
| 3749 __ CompareRoot(rdx, Heap::kEmptyFixedArrayRootIndex); | |
| 3750 call_runtime.Branch(not_equal); | |
| 3751 // Check that instance descriptors are not empty so that we can | |
| 3752 // check for an enum cache. Leave the map in ebx for the subsequent | |
| 3753 // prototype load. | |
| 3754 __ movq(rbx, FieldOperand(rcx, HeapObject::kMapOffset)); | |
| 3755 __ movq(rdx, FieldOperand(rbx, Map::kInstanceDescriptorsOffset)); | |
| 3756 __ CompareRoot(rdx, Heap::kEmptyDescriptorArrayRootIndex); | |
| 3757 call_runtime.Branch(equal); | |
| 3758 // Check that there in an enum cache in the non-empty instance | |
| 3759 // descriptors. This is the case if the next enumeration index | |
| 3760 // field does not contain a smi. | |
| 3761 __ movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumerationIndexOffset)); | |
| 3762 is_smi = masm_->CheckSmi(rdx); | |
| 3763 call_runtime.Branch(is_smi); | |
| 3764 // For all objects but the receiver, check that the cache is empty. | |
| 3765 __ cmpq(rcx, rax); | |
| 3766 check_prototype.Branch(equal); | |
| 3767 __ movq(rdx, FieldOperand(rdx, DescriptorArray::kEnumCacheBridgeCacheOffset)); | |
| 3768 __ CompareRoot(rdx, Heap::kEmptyFixedArrayRootIndex); | |
| 3769 call_runtime.Branch(not_equal); | |
| 3770 check_prototype.Bind(); | |
| 3771 // Load the prototype from the map and loop if non-null. | |
| 3772 __ movq(rcx, FieldOperand(rbx, Map::kPrototypeOffset)); | |
| 3773 __ CompareRoot(rcx, Heap::kNullValueRootIndex); | |
| 3774 loop.Branch(not_equal); | |
| 3775 // The enum cache is valid. Load the map of the object being | |
| 3776 // iterated over and use the cache for the iteration. | |
| 3777 __ movq(rax, FieldOperand(rax, HeapObject::kMapOffset)); | |
| 3778 use_cache.Jump(); | |
| 3779 | |
| 3780 call_runtime.Bind(); | |
| 3781 // Call the runtime to get the property names for the object. | |
| 3782 frame_->EmitPush(rax); // push the Object (slot 4) for the runtime call | |
| 3783 frame_->CallRuntime(Runtime::kGetPropertyNamesFast, 1); | |
| 3784 | |
| 3785 // If we got a Map, we can do a fast modification check. | |
| 3786 // Otherwise, we got a FixedArray, and we have to do a slow check. | |
| 3787 // rax: map or fixed array (result from call to | |
| 3788 // Runtime::kGetPropertyNamesFast) | |
| 3789 __ movq(rdx, rax); | |
| 3790 __ movq(rcx, FieldOperand(rdx, HeapObject::kMapOffset)); | |
| 3791 __ CompareRoot(rcx, Heap::kMetaMapRootIndex); | |
| 3792 fixed_array.Branch(not_equal); | |
| 3793 | |
| 3794 use_cache.Bind(); | |
| 3795 // Get enum cache | |
| 3796 // rax: map (either the result from a call to | |
| 3797 // Runtime::kGetPropertyNamesFast or has been fetched directly from | |
| 3798 // the object) | |
| 3799 __ movq(rcx, rax); | |
| 3800 __ movq(rcx, FieldOperand(rcx, Map::kInstanceDescriptorsOffset)); | |
| 3801 // Get the bridge array held in the enumeration index field. | |
| 3802 __ movq(rcx, FieldOperand(rcx, DescriptorArray::kEnumerationIndexOffset)); | |
| 3803 // Get the cache from the bridge array. | |
| 3804 __ movq(rdx, FieldOperand(rcx, DescriptorArray::kEnumCacheBridgeCacheOffset)); | |
| 3805 | |
| 3806 frame_->EmitPush(rax); // <- slot 3 | |
| 3807 frame_->EmitPush(rdx); // <- slot 2 | |
| 3808 __ movq(rax, FieldOperand(rdx, FixedArray::kLengthOffset)); | |
| 3809 frame_->EmitPush(rax); // <- slot 1 | |
| 3810 frame_->EmitPush(Smi::FromInt(0)); // <- slot 0 | |
| 3811 entry.Jump(); | |
| 3812 | |
| 3813 fixed_array.Bind(); | |
| 3814 // rax: fixed array (result from call to Runtime::kGetPropertyNamesFast) | |
| 3815 frame_->EmitPush(Smi::FromInt(0)); // <- slot 3 | |
| 3816 frame_->EmitPush(rax); // <- slot 2 | |
| 3817 | |
| 3818 // Push the length of the array and the initial index onto the stack. | |
| 3819 __ movq(rax, FieldOperand(rax, FixedArray::kLengthOffset)); | |
| 3820 frame_->EmitPush(rax); // <- slot 1 | |
| 3821 frame_->EmitPush(Smi::FromInt(0)); // <- slot 0 | |
| 3822 | |
| 3823 // Condition. | |
| 3824 entry.Bind(); | |
| 3825 // Grab the current frame's height for the break and continue | |
| 3826 // targets only after all the state is pushed on the frame. | |
| 3827 node->break_target()->set_direction(JumpTarget::FORWARD_ONLY); | |
| 3828 node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY); | |
| 3829 | |
| 3830 __ movq(rax, frame_->ElementAt(0)); // load the current count | |
| 3831 __ SmiCompare(frame_->ElementAt(1), rax); // compare to the array length | |
| 3832 node->break_target()->Branch(below_equal); | |
| 3833 | |
| 3834 // Get the i'th entry of the array. | |
| 3835 __ movq(rdx, frame_->ElementAt(2)); | |
| 3836 SmiIndex index = masm_->SmiToIndex(rbx, rax, kPointerSizeLog2); | |
| 3837 __ movq(rbx, | |
| 3838 FieldOperand(rdx, index.reg, index.scale, FixedArray::kHeaderSize)); | |
| 3839 | |
| 3840 // Get the expected map from the stack or a zero map in the | |
| 3841 // permanent slow case rax: current iteration count rbx: i'th entry | |
| 3842 // of the enum cache | |
| 3843 __ movq(rdx, frame_->ElementAt(3)); | |
| 3844 // Check if the expected map still matches that of the enumerable. | |
| 3845 // If not, we have to filter the key. | |
| 3846 // rax: current iteration count | |
| 3847 // rbx: i'th entry of the enum cache | |
| 3848 // rdx: expected map value | |
| 3849 __ movq(rcx, frame_->ElementAt(4)); | |
| 3850 __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset)); | |
| 3851 __ cmpq(rcx, rdx); | |
| 3852 end_del_check.Branch(equal); | |
| 3853 | |
| 3854 // Convert the entry to a string (or null if it isn't a property anymore). | |
| 3855 frame_->EmitPush(frame_->ElementAt(4)); // push enumerable | |
| 3856 frame_->EmitPush(rbx); // push entry | |
| 3857 frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION, 2); | |
| 3858 __ movq(rbx, rax); | |
| 3859 | |
| 3860 // If the property has been removed while iterating, we just skip it. | |
| 3861 __ Cmp(rbx, Smi::FromInt(0)); | |
| 3862 node->continue_target()->Branch(equal); | |
| 3863 | |
| 3864 end_del_check.Bind(); | |
| 3865 // Store the entry in the 'each' expression and take another spin in the | |
| 3866 // loop. rdx: i'th entry of the enum cache (or string there of) | |
| 3867 frame_->EmitPush(rbx); | |
| 3868 { Reference each(this, node->each()); | |
| 3869 // Loading a reference may leave the frame in an unspilled state. | |
| 3870 frame_->SpillAll(); | |
| 3871 if (!each.is_illegal()) { | |
| 3872 if (each.size() > 0) { | |
| 3873 frame_->EmitPush(frame_->ElementAt(each.size())); | |
| 3874 each.SetValue(NOT_CONST_INIT); | |
| 3875 frame_->Drop(2); // Drop the original and the copy of the element. | |
| 3876 } else { | |
| 3877 // If the reference has size zero then we can use the value below | |
| 3878 // the reference as if it were above the reference, instead of pushing | |
| 3879 // a new copy of it above the reference. | |
| 3880 each.SetValue(NOT_CONST_INIT); | |
| 3881 frame_->Drop(); // Drop the original of the element. | |
| 3882 } | |
| 3883 } | |
| 3884 } | |
| 3885 // Unloading a reference may leave the frame in an unspilled state. | |
| 3886 frame_->SpillAll(); | |
| 3887 | |
| 3888 // Body. | |
| 3889 CheckStack(); // TODO(1222600): ignore if body contains calls. | |
| 3890 VisitAndSpill(node->body()); | |
| 3891 | |
| 3892 // Next. Reestablish a spilled frame in case we are coming here via | |
| 3893 // a continue in the body. | |
| 3894 node->continue_target()->Bind(); | |
| 3895 frame_->SpillAll(); | |
| 3896 frame_->EmitPop(rax); | |
| 3897 __ SmiAddConstant(rax, rax, Smi::FromInt(1)); | |
| 3898 frame_->EmitPush(rax); | |
| 3899 entry.Jump(); | |
| 3900 | |
| 3901 // Cleanup. No need to spill because VirtualFrame::Drop is safe for | |
| 3902 // any frame. | |
| 3903 node->break_target()->Bind(); | |
| 3904 frame_->Drop(5); | |
| 3905 | |
| 3906 // Exit. | |
| 3907 exit.Bind(); | |
| 3908 | |
| 3909 node->continue_target()->Unuse(); | |
| 3910 node->break_target()->Unuse(); | |
| 3911 } | |
| 3912 | |
| 3913 | |
| 3914 void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) { | |
| 3915 ASSERT(!in_spilled_code()); | |
| 3916 VirtualFrame::SpilledScope spilled_scope; | |
| 3917 Comment cmnt(masm_, "[ TryCatchStatement"); | |
| 3918 CodeForStatementPosition(node); | |
| 3919 | |
| 3920 JumpTarget try_block; | |
| 3921 JumpTarget exit; | |
| 3922 | |
| 3923 try_block.Call(); | |
| 3924 // --- Catch block --- | |
| 3925 frame_->EmitPush(rax); | |
| 3926 | |
| 3927 // Store the caught exception in the catch variable. | |
| 3928 Variable* catch_var = node->catch_var()->var(); | |
| 3929 ASSERT(catch_var != NULL && catch_var->AsSlot() != NULL); | |
| 3930 StoreToSlot(catch_var->AsSlot(), NOT_CONST_INIT); | |
| 3931 | |
| 3932 // Remove the exception from the stack. | |
| 3933 frame_->Drop(); | |
| 3934 | |
| 3935 VisitStatementsAndSpill(node->catch_block()->statements()); | |
| 3936 if (has_valid_frame()) { | |
| 3937 exit.Jump(); | |
| 3938 } | |
| 3939 | |
| 3940 | |
| 3941 // --- Try block --- | |
| 3942 try_block.Bind(); | |
| 3943 | |
| 3944 frame_->PushTryHandler(TRY_CATCH_HANDLER); | |
| 3945 int handler_height = frame_->height(); | |
| 3946 | |
| 3947 // Shadow the jump targets for all escapes from the try block, including | |
| 3948 // returns. During shadowing, the original target is hidden as the | |
| 3949 // ShadowTarget and operations on the original actually affect the | |
| 3950 // shadowing target. | |
| 3951 // | |
| 3952 // We should probably try to unify the escaping targets and the return | |
| 3953 // target. | |
| 3954 int nof_escapes = node->escaping_targets()->length(); | |
| 3955 List<ShadowTarget*> shadows(1 + nof_escapes); | |
| 3956 | |
| 3957 // Add the shadow target for the function return. | |
| 3958 static const int kReturnShadowIndex = 0; | |
| 3959 shadows.Add(new ShadowTarget(&function_return_)); | |
| 3960 bool function_return_was_shadowed = function_return_is_shadowed_; | |
| 3961 function_return_is_shadowed_ = true; | |
| 3962 ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_); | |
| 3963 | |
| 3964 // Add the remaining shadow targets. | |
| 3965 for (int i = 0; i < nof_escapes; i++) { | |
| 3966 shadows.Add(new ShadowTarget(node->escaping_targets()->at(i))); | |
| 3967 } | |
| 3968 | |
| 3969 // Generate code for the statements in the try block. | |
| 3970 VisitStatementsAndSpill(node->try_block()->statements()); | |
| 3971 | |
| 3972 // Stop the introduced shadowing and count the number of required unlinks. | |
| 3973 // After shadowing stops, the original targets are unshadowed and the | |
| 3974 // ShadowTargets represent the formerly shadowing targets. | |
| 3975 bool has_unlinks = false; | |
| 3976 for (int i = 0; i < shadows.length(); i++) { | |
| 3977 shadows[i]->StopShadowing(); | |
| 3978 has_unlinks = has_unlinks || shadows[i]->is_linked(); | |
| 3979 } | |
| 3980 function_return_is_shadowed_ = function_return_was_shadowed; | |
| 3981 | |
| 3982 // Get an external reference to the handler address. | |
| 3983 ExternalReference handler_address(Isolate::k_handler_address, isolate()); | |
| 3984 | |
| 3985 // Make sure that there's nothing left on the stack above the | |
| 3986 // handler structure. | |
| 3987 if (FLAG_debug_code) { | |
| 3988 __ movq(kScratchRegister, handler_address); | |
| 3989 __ cmpq(rsp, Operand(kScratchRegister, 0)); | |
| 3990 __ Assert(equal, "stack pointer should point to top handler"); | |
| 3991 } | |
| 3992 | |
| 3993 // If we can fall off the end of the try block, unlink from try chain. | |
| 3994 if (has_valid_frame()) { | |
| 3995 // The next handler address is on top of the frame. Unlink from | |
| 3996 // the handler list and drop the rest of this handler from the | |
| 3997 // frame. | |
| 3998 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); | |
| 3999 __ movq(kScratchRegister, handler_address); | |
| 4000 frame_->EmitPop(Operand(kScratchRegister, 0)); | |
| 4001 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1); | |
| 4002 if (has_unlinks) { | |
| 4003 exit.Jump(); | |
| 4004 } | |
| 4005 } | |
| 4006 | |
| 4007 // Generate unlink code for the (formerly) shadowing targets that | |
| 4008 // have been jumped to. Deallocate each shadow target. | |
| 4009 Result return_value; | |
| 4010 for (int i = 0; i < shadows.length(); i++) { | |
| 4011 if (shadows[i]->is_linked()) { | |
| 4012 // Unlink from try chain; be careful not to destroy the TOS if | |
| 4013 // there is one. | |
| 4014 if (i == kReturnShadowIndex) { | |
| 4015 shadows[i]->Bind(&return_value); | |
| 4016 return_value.ToRegister(rax); | |
| 4017 } else { | |
| 4018 shadows[i]->Bind(); | |
| 4019 } | |
| 4020 // Because we can be jumping here (to spilled code) from | |
| 4021 // unspilled code, we need to reestablish a spilled frame at | |
| 4022 // this block. | |
| 4023 frame_->SpillAll(); | |
| 4024 | |
| 4025 // Reload sp from the top handler, because some statements that we | |
| 4026 // break from (eg, for...in) may have left stuff on the stack. | |
| 4027 __ movq(kScratchRegister, handler_address); | |
| 4028 __ movq(rsp, Operand(kScratchRegister, 0)); | |
| 4029 frame_->Forget(frame_->height() - handler_height); | |
| 4030 | |
| 4031 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); | |
| 4032 __ movq(kScratchRegister, handler_address); | |
| 4033 frame_->EmitPop(Operand(kScratchRegister, 0)); | |
| 4034 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1); | |
| 4035 | |
| 4036 if (i == kReturnShadowIndex) { | |
| 4037 if (!function_return_is_shadowed_) frame_->PrepareForReturn(); | |
| 4038 shadows[i]->other_target()->Jump(&return_value); | |
| 4039 } else { | |
| 4040 shadows[i]->other_target()->Jump(); | |
| 4041 } | |
| 4042 } | |
| 4043 } | |
| 4044 | |
| 4045 exit.Bind(); | |
| 4046 } | |
| 4047 | |
| 4048 | |
| 4049 void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) { | |
| 4050 ASSERT(!in_spilled_code()); | |
| 4051 VirtualFrame::SpilledScope spilled_scope; | |
| 4052 Comment cmnt(masm_, "[ TryFinallyStatement"); | |
| 4053 CodeForStatementPosition(node); | |
| 4054 | |
| 4055 // State: Used to keep track of reason for entering the finally | |
| 4056 // block. Should probably be extended to hold information for | |
| 4057 // break/continue from within the try block. | |
| 4058 enum { FALLING, THROWING, JUMPING }; | |
| 4059 | |
| 4060 JumpTarget try_block; | |
| 4061 JumpTarget finally_block; | |
| 4062 | |
| 4063 try_block.Call(); | |
| 4064 | |
| 4065 frame_->EmitPush(rax); | |
| 4066 // In case of thrown exceptions, this is where we continue. | |
| 4067 __ Move(rcx, Smi::FromInt(THROWING)); | |
| 4068 finally_block.Jump(); | |
| 4069 | |
| 4070 // --- Try block --- | |
| 4071 try_block.Bind(); | |
| 4072 | |
| 4073 frame_->PushTryHandler(TRY_FINALLY_HANDLER); | |
| 4074 int handler_height = frame_->height(); | |
| 4075 | |
| 4076 // Shadow the jump targets for all escapes from the try block, including | |
| 4077 // returns. During shadowing, the original target is hidden as the | |
| 4078 // ShadowTarget and operations on the original actually affect the | |
| 4079 // shadowing target. | |
| 4080 // | |
| 4081 // We should probably try to unify the escaping targets and the return | |
| 4082 // target. | |
| 4083 int nof_escapes = node->escaping_targets()->length(); | |
| 4084 List<ShadowTarget*> shadows(1 + nof_escapes); | |
| 4085 | |
| 4086 // Add the shadow target for the function return. | |
| 4087 static const int kReturnShadowIndex = 0; | |
| 4088 shadows.Add(new ShadowTarget(&function_return_)); | |
| 4089 bool function_return_was_shadowed = function_return_is_shadowed_; | |
| 4090 function_return_is_shadowed_ = true; | |
| 4091 ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_); | |
| 4092 | |
| 4093 // Add the remaining shadow targets. | |
| 4094 for (int i = 0; i < nof_escapes; i++) { | |
| 4095 shadows.Add(new ShadowTarget(node->escaping_targets()->at(i))); | |
| 4096 } | |
| 4097 | |
| 4098 // Generate code for the statements in the try block. | |
| 4099 VisitStatementsAndSpill(node->try_block()->statements()); | |
| 4100 | |
| 4101 // Stop the introduced shadowing and count the number of required unlinks. | |
| 4102 // After shadowing stops, the original targets are unshadowed and the | |
| 4103 // ShadowTargets represent the formerly shadowing targets. | |
| 4104 int nof_unlinks = 0; | |
| 4105 for (int i = 0; i < shadows.length(); i++) { | |
| 4106 shadows[i]->StopShadowing(); | |
| 4107 if (shadows[i]->is_linked()) nof_unlinks++; | |
| 4108 } | |
| 4109 function_return_is_shadowed_ = function_return_was_shadowed; | |
| 4110 | |
| 4111 // Get an external reference to the handler address. | |
| 4112 ExternalReference handler_address(Isolate::k_handler_address, isolate()); | |
| 4113 | |
| 4114 // If we can fall off the end of the try block, unlink from the try | |
| 4115 // chain and set the state on the frame to FALLING. | |
| 4116 if (has_valid_frame()) { | |
| 4117 // The next handler address is on top of the frame. | |
| 4118 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); | |
| 4119 __ movq(kScratchRegister, handler_address); | |
| 4120 frame_->EmitPop(Operand(kScratchRegister, 0)); | |
| 4121 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1); | |
| 4122 | |
| 4123 // Fake a top of stack value (unneeded when FALLING) and set the | |
| 4124 // state in ecx, then jump around the unlink blocks if any. | |
| 4125 frame_->EmitPush(Heap::kUndefinedValueRootIndex); | |
| 4126 __ Move(rcx, Smi::FromInt(FALLING)); | |
| 4127 if (nof_unlinks > 0) { | |
| 4128 finally_block.Jump(); | |
| 4129 } | |
| 4130 } | |
| 4131 | |
| 4132 // Generate code to unlink and set the state for the (formerly) | |
| 4133 // shadowing targets that have been jumped to. | |
| 4134 for (int i = 0; i < shadows.length(); i++) { | |
| 4135 if (shadows[i]->is_linked()) { | |
| 4136 // If we have come from the shadowed return, the return value is | |
| 4137 // on the virtual frame. We must preserve it until it is | |
| 4138 // pushed. | |
| 4139 if (i == kReturnShadowIndex) { | |
| 4140 Result return_value; | |
| 4141 shadows[i]->Bind(&return_value); | |
| 4142 return_value.ToRegister(rax); | |
| 4143 } else { | |
| 4144 shadows[i]->Bind(); | |
| 4145 } | |
| 4146 // Because we can be jumping here (to spilled code) from | |
| 4147 // unspilled code, we need to reestablish a spilled frame at | |
| 4148 // this block. | |
| 4149 frame_->SpillAll(); | |
| 4150 | |
| 4151 // Reload sp from the top handler, because some statements that | |
| 4152 // we break from (eg, for...in) may have left stuff on the | |
| 4153 // stack. | |
| 4154 __ movq(kScratchRegister, handler_address); | |
| 4155 __ movq(rsp, Operand(kScratchRegister, 0)); | |
| 4156 frame_->Forget(frame_->height() - handler_height); | |
| 4157 | |
| 4158 // Unlink this handler and drop it from the frame. | |
| 4159 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); | |
| 4160 __ movq(kScratchRegister, handler_address); | |
| 4161 frame_->EmitPop(Operand(kScratchRegister, 0)); | |
| 4162 frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1); | |
| 4163 | |
| 4164 if (i == kReturnShadowIndex) { | |
| 4165 // If this target shadowed the function return, materialize | |
| 4166 // the return value on the stack. | |
| 4167 frame_->EmitPush(rax); | |
| 4168 } else { | |
| 4169 // Fake TOS for targets that shadowed breaks and continues. | |
| 4170 frame_->EmitPush(Heap::kUndefinedValueRootIndex); | |
| 4171 } | |
| 4172 __ Move(rcx, Smi::FromInt(JUMPING + i)); | |
| 4173 if (--nof_unlinks > 0) { | |
| 4174 // If this is not the last unlink block, jump around the next. | |
| 4175 finally_block.Jump(); | |
| 4176 } | |
| 4177 } | |
| 4178 } | |
| 4179 | |
| 4180 // --- Finally block --- | |
| 4181 finally_block.Bind(); | |
| 4182 | |
| 4183 // Push the state on the stack. | |
| 4184 frame_->EmitPush(rcx); | |
| 4185 | |
| 4186 // We keep two elements on the stack - the (possibly faked) result | |
| 4187 // and the state - while evaluating the finally block. | |
| 4188 // | |
| 4189 // Generate code for the statements in the finally block. | |
| 4190 VisitStatementsAndSpill(node->finally_block()->statements()); | |
| 4191 | |
| 4192 if (has_valid_frame()) { | |
| 4193 // Restore state and return value or faked TOS. | |
| 4194 frame_->EmitPop(rcx); | |
| 4195 frame_->EmitPop(rax); | |
| 4196 } | |
| 4197 | |
| 4198 // Generate code to jump to the right destination for all used | |
| 4199 // formerly shadowing targets. Deallocate each shadow target. | |
| 4200 for (int i = 0; i < shadows.length(); i++) { | |
| 4201 if (has_valid_frame() && shadows[i]->is_bound()) { | |
| 4202 BreakTarget* original = shadows[i]->other_target(); | |
| 4203 __ SmiCompare(rcx, Smi::FromInt(JUMPING + i)); | |
| 4204 if (i == kReturnShadowIndex) { | |
| 4205 // The return value is (already) in rax. | |
| 4206 Result return_value = allocator_->Allocate(rax); | |
| 4207 ASSERT(return_value.is_valid()); | |
| 4208 if (function_return_is_shadowed_) { | |
| 4209 original->Branch(equal, &return_value); | |
| 4210 } else { | |
| 4211 // Branch around the preparation for return which may emit | |
| 4212 // code. | |
| 4213 JumpTarget skip; | |
| 4214 skip.Branch(not_equal); | |
| 4215 frame_->PrepareForReturn(); | |
| 4216 original->Jump(&return_value); | |
| 4217 skip.Bind(); | |
| 4218 } | |
| 4219 } else { | |
| 4220 original->Branch(equal); | |
| 4221 } | |
| 4222 } | |
| 4223 } | |
| 4224 | |
| 4225 if (has_valid_frame()) { | |
| 4226 // Check if we need to rethrow the exception. | |
| 4227 JumpTarget exit; | |
| 4228 __ SmiCompare(rcx, Smi::FromInt(THROWING)); | |
| 4229 exit.Branch(not_equal); | |
| 4230 | |
| 4231 // Rethrow exception. | |
| 4232 frame_->EmitPush(rax); // undo pop from above | |
| 4233 frame_->CallRuntime(Runtime::kReThrow, 1); | |
| 4234 | |
| 4235 // Done. | |
| 4236 exit.Bind(); | |
| 4237 } | |
| 4238 } | |
| 4239 | |
| 4240 | |
| 4241 void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) { | |
| 4242 ASSERT(!in_spilled_code()); | |
| 4243 Comment cmnt(masm_, "[ DebuggerStatement"); | |
| 4244 CodeForStatementPosition(node); | |
| 4245 #ifdef ENABLE_DEBUGGER_SUPPORT | |
| 4246 // Spill everything, even constants, to the frame. | |
| 4247 frame_->SpillAll(); | |
| 4248 | |
| 4249 frame_->DebugBreak(); | |
| 4250 // Ignore the return value. | |
| 4251 #endif | |
| 4252 } | |
| 4253 | |
| 4254 | |
| 4255 void CodeGenerator::InstantiateFunction( | |
| 4256 Handle<SharedFunctionInfo> function_info, | |
| 4257 bool pretenure) { | |
| 4258 // The inevitable call will sync frame elements to memory anyway, so | |
| 4259 // we do it eagerly to allow us to push the arguments directly into | |
| 4260 // place. | |
| 4261 frame_->SyncRange(0, frame_->element_count() - 1); | |
| 4262 | |
| 4263 // Use the fast case closure allocation code that allocates in new | |
| 4264 // space for nested functions that don't need literals cloning. | |
| 4265 if (!pretenure && | |
| 4266 scope()->is_function_scope() && | |
| 4267 function_info->num_literals() == 0) { | |
| 4268 FastNewClosureStub stub( | |
| 4269 function_info->strict_mode() ? kStrictMode : kNonStrictMode); | |
| 4270 frame_->Push(function_info); | |
| 4271 Result answer = frame_->CallStub(&stub, 1); | |
| 4272 frame_->Push(&answer); | |
| 4273 } else { | |
| 4274 // Call the runtime to instantiate the function based on the | |
| 4275 // shared function info. | |
| 4276 frame_->EmitPush(rsi); | |
| 4277 frame_->EmitPush(function_info); | |
| 4278 frame_->EmitPush(pretenure | |
| 4279 ? FACTORY->true_value() | |
| 4280 : FACTORY->false_value()); | |
| 4281 Result result = frame_->CallRuntime(Runtime::kNewClosure, 3); | |
| 4282 frame_->Push(&result); | |
| 4283 } | |
| 4284 } | |
| 4285 | |
| 4286 | |
| 4287 void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) { | |
| 4288 Comment cmnt(masm_, "[ FunctionLiteral"); | |
| 4289 | |
| 4290 // Build the function info and instantiate it. | |
| 4291 Handle<SharedFunctionInfo> function_info = | |
| 4292 Compiler::BuildFunctionInfo(node, script()); | |
| 4293 // Check for stack-overflow exception. | |
| 4294 if (function_info.is_null()) { | |
| 4295 SetStackOverflow(); | |
| 4296 return; | |
| 4297 } | |
| 4298 InstantiateFunction(function_info, node->pretenure()); | |
| 4299 } | |
| 4300 | |
| 4301 | |
| 4302 void CodeGenerator::VisitSharedFunctionInfoLiteral( | |
| 4303 SharedFunctionInfoLiteral* node) { | |
| 4304 Comment cmnt(masm_, "[ SharedFunctionInfoLiteral"); | |
| 4305 InstantiateFunction(node->shared_function_info(), false); | |
| 4306 } | |
| 4307 | |
| 4308 | |
| 4309 void CodeGenerator::VisitConditional(Conditional* node) { | |
| 4310 Comment cmnt(masm_, "[ Conditional"); | |
| 4311 JumpTarget then; | |
| 4312 JumpTarget else_; | |
| 4313 JumpTarget exit; | |
| 4314 ControlDestination dest(&then, &else_, true); | |
| 4315 LoadCondition(node->condition(), &dest, true); | |
| 4316 | |
| 4317 if (dest.false_was_fall_through()) { | |
| 4318 // The else target was bound, so we compile the else part first. | |
| 4319 Load(node->else_expression()); | |
| 4320 | |
| 4321 if (then.is_linked()) { | |
| 4322 exit.Jump(); | |
| 4323 then.Bind(); | |
| 4324 Load(node->then_expression()); | |
| 4325 } | |
| 4326 } else { | |
| 4327 // The then target was bound, so we compile the then part first. | |
| 4328 Load(node->then_expression()); | |
| 4329 | |
| 4330 if (else_.is_linked()) { | |
| 4331 exit.Jump(); | |
| 4332 else_.Bind(); | |
| 4333 Load(node->else_expression()); | |
| 4334 } | |
| 4335 } | |
| 4336 | |
| 4337 exit.Bind(); | |
| 4338 } | |
| 4339 | |
| 4340 | |
| 4341 void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) { | |
| 4342 if (slot->type() == Slot::LOOKUP) { | |
| 4343 ASSERT(slot->var()->is_dynamic()); | |
| 4344 | |
| 4345 JumpTarget slow; | |
| 4346 JumpTarget done; | |
| 4347 Result value; | |
| 4348 | |
| 4349 // Generate fast case for loading from slots that correspond to | |
| 4350 // local/global variables or arguments unless they are shadowed by | |
| 4351 // eval-introduced bindings. | |
| 4352 EmitDynamicLoadFromSlotFastCase(slot, | |
| 4353 typeof_state, | |
| 4354 &value, | |
| 4355 &slow, | |
| 4356 &done); | |
| 4357 | |
| 4358 slow.Bind(); | |
| 4359 // A runtime call is inevitable. We eagerly sync frame elements | |
| 4360 // to memory so that we can push the arguments directly into place | |
| 4361 // on top of the frame. | |
| 4362 frame_->SyncRange(0, frame_->element_count() - 1); | |
| 4363 frame_->EmitPush(rsi); | |
| 4364 __ movq(kScratchRegister, slot->var()->name(), RelocInfo::EMBEDDED_OBJECT); | |
| 4365 frame_->EmitPush(kScratchRegister); | |
| 4366 if (typeof_state == INSIDE_TYPEOF) { | |
| 4367 value = | |
| 4368 frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2); | |
| 4369 } else { | |
| 4370 value = frame_->CallRuntime(Runtime::kLoadContextSlot, 2); | |
| 4371 } | |
| 4372 | |
| 4373 done.Bind(&value); | |
| 4374 frame_->Push(&value); | |
| 4375 | |
| 4376 } else if (slot->var()->mode() == Variable::CONST) { | |
| 4377 // Const slots may contain 'the hole' value (the constant hasn't been | |
| 4378 // initialized yet) which needs to be converted into the 'undefined' | |
| 4379 // value. | |
| 4380 // | |
| 4381 // We currently spill the virtual frame because constants use the | |
| 4382 // potentially unsafe direct-frame access of SlotOperand. | |
| 4383 VirtualFrame::SpilledScope spilled_scope; | |
| 4384 Comment cmnt(masm_, "[ Load const"); | |
| 4385 JumpTarget exit; | |
| 4386 __ movq(rcx, SlotOperand(slot, rcx)); | |
| 4387 __ CompareRoot(rcx, Heap::kTheHoleValueRootIndex); | |
| 4388 exit.Branch(not_equal); | |
| 4389 __ LoadRoot(rcx, Heap::kUndefinedValueRootIndex); | |
| 4390 exit.Bind(); | |
| 4391 frame_->EmitPush(rcx); | |
| 4392 | |
| 4393 } else if (slot->type() == Slot::PARAMETER) { | |
| 4394 frame_->PushParameterAt(slot->index()); | |
| 4395 | |
| 4396 } else if (slot->type() == Slot::LOCAL) { | |
| 4397 frame_->PushLocalAt(slot->index()); | |
| 4398 | |
| 4399 } else { | |
| 4400 // The other remaining slot types (LOOKUP and GLOBAL) cannot reach | |
| 4401 // here. | |
| 4402 // | |
| 4403 // The use of SlotOperand below is safe for an unspilled frame | |
| 4404 // because it will always be a context slot. | |
| 4405 ASSERT(slot->type() == Slot::CONTEXT); | |
| 4406 Result temp = allocator_->Allocate(); | |
| 4407 ASSERT(temp.is_valid()); | |
| 4408 __ movq(temp.reg(), SlotOperand(slot, temp.reg())); | |
| 4409 frame_->Push(&temp); | |
| 4410 } | |
| 4411 } | |
| 4412 | |
| 4413 | |
| 4414 void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot, | |
| 4415 TypeofState state) { | |
| 4416 LoadFromSlot(slot, state); | |
| 4417 | |
| 4418 // Bail out quickly if we're not using lazy arguments allocation. | |
| 4419 if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return; | |
| 4420 | |
| 4421 // ... or if the slot isn't a non-parameter arguments slot. | |
| 4422 if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return; | |
| 4423 | |
| 4424 // Pop the loaded value from the stack. | |
| 4425 Result value = frame_->Pop(); | |
| 4426 | |
| 4427 // If the loaded value is a constant, we know if the arguments | |
| 4428 // object has been lazily loaded yet. | |
| 4429 if (value.is_constant()) { | |
| 4430 if (value.handle()->IsArgumentsMarker()) { | |
| 4431 Result arguments = StoreArgumentsObject(false); | |
| 4432 frame_->Push(&arguments); | |
| 4433 } else { | |
| 4434 frame_->Push(&value); | |
| 4435 } | |
| 4436 return; | |
| 4437 } | |
| 4438 | |
| 4439 // The loaded value is in a register. If it is the sentinel that | |
| 4440 // indicates that we haven't loaded the arguments object yet, we | |
| 4441 // need to do it now. | |
| 4442 JumpTarget exit; | |
| 4443 __ CompareRoot(value.reg(), Heap::kArgumentsMarkerRootIndex); | |
| 4444 frame_->Push(&value); | |
| 4445 exit.Branch(not_equal); | |
| 4446 Result arguments = StoreArgumentsObject(false); | |
| 4447 frame_->SetElementAt(0, &arguments); | |
| 4448 exit.Bind(); | |
| 4449 } | |
| 4450 | |
| 4451 | |
| 4452 Result CodeGenerator::LoadFromGlobalSlotCheckExtensions( | |
| 4453 Slot* slot, | |
| 4454 TypeofState typeof_state, | |
| 4455 JumpTarget* slow) { | |
| 4456 // Check that no extension objects have been created by calls to | |
| 4457 // eval from the current scope to the global scope. | |
| 4458 Register context = rsi; | |
| 4459 Result tmp = allocator_->Allocate(); | |
| 4460 ASSERT(tmp.is_valid()); // All non-reserved registers were available. | |
| 4461 | |
| 4462 Scope* s = scope(); | |
| 4463 while (s != NULL) { | |
| 4464 if (s->num_heap_slots() > 0) { | |
| 4465 if (s->calls_eval()) { | |
| 4466 // Check that extension is NULL. | |
| 4467 __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX), | |
| 4468 Immediate(0)); | |
| 4469 slow->Branch(not_equal, not_taken); | |
| 4470 } | |
| 4471 // Load next context in chain. | |
| 4472 __ movq(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX)); | |
| 4473 __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset)); | |
| 4474 context = tmp.reg(); | |
| 4475 } | |
| 4476 // If no outer scope calls eval, we do not need to check more | |
| 4477 // context extensions. If we have reached an eval scope, we check | |
| 4478 // all extensions from this point. | |
| 4479 if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break; | |
| 4480 s = s->outer_scope(); | |
| 4481 } | |
| 4482 | |
| 4483 if (s->is_eval_scope()) { | |
| 4484 // Loop up the context chain. There is no frame effect so it is | |
| 4485 // safe to use raw labels here. | |
| 4486 Label next, fast; | |
| 4487 if (!context.is(tmp.reg())) { | |
| 4488 __ movq(tmp.reg(), context); | |
| 4489 } | |
| 4490 // Load map for comparison into register, outside loop. | |
| 4491 __ LoadRoot(kScratchRegister, Heap::kGlobalContextMapRootIndex); | |
| 4492 __ bind(&next); | |
| 4493 // Terminate at global context. | |
| 4494 __ cmpq(kScratchRegister, FieldOperand(tmp.reg(), HeapObject::kMapOffset)); | |
| 4495 __ j(equal, &fast); | |
| 4496 // Check that extension is NULL. | |
| 4497 __ cmpq(ContextOperand(tmp.reg(), Context::EXTENSION_INDEX), Immediate(0)); | |
| 4498 slow->Branch(not_equal); | |
| 4499 // Load next context in chain. | |
| 4500 __ movq(tmp.reg(), ContextOperand(tmp.reg(), Context::CLOSURE_INDEX)); | |
| 4501 __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset)); | |
| 4502 __ jmp(&next); | |
| 4503 __ bind(&fast); | |
| 4504 } | |
| 4505 tmp.Unuse(); | |
| 4506 | |
| 4507 // All extension objects were empty and it is safe to use a global | |
| 4508 // load IC call. | |
| 4509 LoadGlobal(); | |
| 4510 frame_->Push(slot->var()->name()); | |
| 4511 RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF) | |
| 4512 ? RelocInfo::CODE_TARGET | |
| 4513 : RelocInfo::CODE_TARGET_CONTEXT; | |
| 4514 Result answer = frame_->CallLoadIC(mode); | |
| 4515 // A test rax instruction following the call signals that the inobject | |
| 4516 // property case was inlined. Ensure that there is not a test rax | |
| 4517 // instruction here. | |
| 4518 masm_->nop(); | |
| 4519 return answer; | |
| 4520 } | |
| 4521 | |
| 4522 | |
| 4523 void CodeGenerator::EmitDynamicLoadFromSlotFastCase(Slot* slot, | |
| 4524 TypeofState typeof_state, | |
| 4525 Result* result, | |
| 4526 JumpTarget* slow, | |
| 4527 JumpTarget* done) { | |
| 4528 // Generate fast-case code for variables that might be shadowed by | |
| 4529 // eval-introduced variables. Eval is used a lot without | |
| 4530 // introducing variables. In those cases, we do not want to | |
| 4531 // perform a runtime call for all variables in the scope | |
| 4532 // containing the eval. | |
| 4533 if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) { | |
| 4534 *result = LoadFromGlobalSlotCheckExtensions(slot, typeof_state, slow); | |
| 4535 done->Jump(result); | |
| 4536 | |
| 4537 } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) { | |
| 4538 Slot* potential_slot = slot->var()->local_if_not_shadowed()->AsSlot(); | |
| 4539 Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite(); | |
| 4540 if (potential_slot != NULL) { | |
| 4541 // Generate fast case for locals that rewrite to slots. | |
| 4542 // Allocate a fresh register to use as a temp in | |
| 4543 // ContextSlotOperandCheckExtensions and to hold the result | |
| 4544 // value. | |
| 4545 *result = allocator_->Allocate(); | |
| 4546 ASSERT(result->is_valid()); | |
| 4547 __ movq(result->reg(), | |
| 4548 ContextSlotOperandCheckExtensions(potential_slot, | |
| 4549 *result, | |
| 4550 slow)); | |
| 4551 if (potential_slot->var()->mode() == Variable::CONST) { | |
| 4552 __ CompareRoot(result->reg(), Heap::kTheHoleValueRootIndex); | |
| 4553 done->Branch(not_equal, result); | |
| 4554 __ LoadRoot(result->reg(), Heap::kUndefinedValueRootIndex); | |
| 4555 } | |
| 4556 done->Jump(result); | |
| 4557 } else if (rewrite != NULL) { | |
| 4558 // Generate fast case for argument loads. | |
| 4559 Property* property = rewrite->AsProperty(); | |
| 4560 if (property != NULL) { | |
| 4561 VariableProxy* obj_proxy = property->obj()->AsVariableProxy(); | |
| 4562 Literal* key_literal = property->key()->AsLiteral(); | |
| 4563 if (obj_proxy != NULL && | |
| 4564 key_literal != NULL && | |
| 4565 obj_proxy->IsArguments() && | |
| 4566 key_literal->handle()->IsSmi()) { | |
| 4567 // Load arguments object if there are no eval-introduced | |
| 4568 // variables. Then load the argument from the arguments | |
| 4569 // object using keyed load. | |
| 4570 Result arguments = allocator()->Allocate(); | |
| 4571 ASSERT(arguments.is_valid()); | |
| 4572 __ movq(arguments.reg(), | |
| 4573 ContextSlotOperandCheckExtensions(obj_proxy->var()->AsSlot(), | |
| 4574 arguments, | |
| 4575 slow)); | |
| 4576 frame_->Push(&arguments); | |
| 4577 frame_->Push(key_literal->handle()); | |
| 4578 *result = EmitKeyedLoad(); | |
| 4579 done->Jump(result); | |
| 4580 } | |
| 4581 } | |
| 4582 } | |
| 4583 } | |
| 4584 } | |
| 4585 | |
| 4586 | |
| 4587 void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) { | |
| 4588 if (slot->type() == Slot::LOOKUP) { | |
| 4589 ASSERT(slot->var()->is_dynamic()); | |
| 4590 | |
| 4591 // For now, just do a runtime call. Since the call is inevitable, | |
| 4592 // we eagerly sync the virtual frame so we can directly push the | |
| 4593 // arguments into place. | |
| 4594 frame_->SyncRange(0, frame_->element_count() - 1); | |
| 4595 | |
| 4596 frame_->EmitPush(rsi); | |
| 4597 frame_->EmitPush(slot->var()->name()); | |
| 4598 | |
| 4599 Result value; | |
| 4600 if (init_state == CONST_INIT) { | |
| 4601 // Same as the case for a normal store, but ignores attribute | |
| 4602 // (e.g. READ_ONLY) of context slot so that we can initialize const | |
| 4603 // properties (introduced via eval("const foo = (some expr);")). Also, | |
| 4604 // uses the current function context instead of the top context. | |
| 4605 // | |
| 4606 // Note that we must declare the foo upon entry of eval(), via a | |
| 4607 // context slot declaration, but we cannot initialize it at the same | |
| 4608 // time, because the const declaration may be at the end of the eval | |
| 4609 // code (sigh...) and the const variable may have been used before | |
| 4610 // (where its value is 'undefined'). Thus, we can only do the | |
| 4611 // initialization when we actually encounter the expression and when | |
| 4612 // the expression operands are defined and valid, and thus we need the | |
| 4613 // split into 2 operations: declaration of the context slot followed | |
| 4614 // by initialization. | |
| 4615 value = frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3); | |
| 4616 } else { | |
| 4617 frame_->Push(Smi::FromInt(strict_mode_flag())); | |
| 4618 value = frame_->CallRuntime(Runtime::kStoreContextSlot, 4); | |
| 4619 } | |
| 4620 // Storing a variable must keep the (new) value on the expression | |
| 4621 // stack. This is necessary for compiling chained assignment | |
| 4622 // expressions. | |
| 4623 frame_->Push(&value); | |
| 4624 } else { | |
| 4625 ASSERT(!slot->var()->is_dynamic()); | |
| 4626 | |
| 4627 JumpTarget exit; | |
| 4628 if (init_state == CONST_INIT) { | |
| 4629 ASSERT(slot->var()->mode() == Variable::CONST); | |
| 4630 // Only the first const initialization must be executed (the slot | |
| 4631 // still contains 'the hole' value). When the assignment is executed, | |
| 4632 // the code is identical to a normal store (see below). | |
| 4633 // | |
| 4634 // We spill the frame in the code below because the direct-frame | |
| 4635 // access of SlotOperand is potentially unsafe with an unspilled | |
| 4636 // frame. | |
| 4637 VirtualFrame::SpilledScope spilled_scope; | |
| 4638 Comment cmnt(masm_, "[ Init const"); | |
| 4639 __ movq(rcx, SlotOperand(slot, rcx)); | |
| 4640 __ CompareRoot(rcx, Heap::kTheHoleValueRootIndex); | |
| 4641 exit.Branch(not_equal); | |
| 4642 } | |
| 4643 | |
| 4644 // We must execute the store. Storing a variable must keep the (new) | |
| 4645 // value on the stack. This is necessary for compiling assignment | |
| 4646 // expressions. | |
| 4647 // | |
| 4648 // Note: We will reach here even with slot->var()->mode() == | |
| 4649 // Variable::CONST because of const declarations which will initialize | |
| 4650 // consts to 'the hole' value and by doing so, end up calling this code. | |
| 4651 if (slot->type() == Slot::PARAMETER) { | |
| 4652 frame_->StoreToParameterAt(slot->index()); | |
| 4653 } else if (slot->type() == Slot::LOCAL) { | |
| 4654 frame_->StoreToLocalAt(slot->index()); | |
| 4655 } else { | |
| 4656 // The other slot types (LOOKUP and GLOBAL) cannot reach here. | |
| 4657 // | |
| 4658 // The use of SlotOperand below is safe for an unspilled frame | |
| 4659 // because the slot is a context slot. | |
| 4660 ASSERT(slot->type() == Slot::CONTEXT); | |
| 4661 frame_->Dup(); | |
| 4662 Result value = frame_->Pop(); | |
| 4663 value.ToRegister(); | |
| 4664 Result start = allocator_->Allocate(); | |
| 4665 ASSERT(start.is_valid()); | |
| 4666 __ movq(SlotOperand(slot, start.reg()), value.reg()); | |
| 4667 // RecordWrite may destroy the value registers. | |
| 4668 // | |
| 4669 // TODO(204): Avoid actually spilling when the value is not | |
| 4670 // needed (probably the common case). | |
| 4671 frame_->Spill(value.reg()); | |
| 4672 int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize; | |
| 4673 Result temp = allocator_->Allocate(); | |
| 4674 ASSERT(temp.is_valid()); | |
| 4675 __ RecordWrite(start.reg(), offset, value.reg(), temp.reg()); | |
| 4676 // The results start, value, and temp are unused by going out of | |
| 4677 // scope. | |
| 4678 } | |
| 4679 | |
| 4680 exit.Bind(); | |
| 4681 } | |
| 4682 } | |
| 4683 | |
| 4684 | |
| 4685 void CodeGenerator::VisitSlot(Slot* node) { | |
| 4686 Comment cmnt(masm_, "[ Slot"); | |
| 4687 LoadFromSlotCheckForArguments(node, NOT_INSIDE_TYPEOF); | |
| 4688 } | |
| 4689 | |
| 4690 | |
| 4691 void CodeGenerator::VisitVariableProxy(VariableProxy* node) { | |
| 4692 Comment cmnt(masm_, "[ VariableProxy"); | |
| 4693 Variable* var = node->var(); | |
| 4694 Expression* expr = var->rewrite(); | |
| 4695 if (expr != NULL) { | |
| 4696 Visit(expr); | |
| 4697 } else { | |
| 4698 ASSERT(var->is_global()); | |
| 4699 Reference ref(this, node); | |
| 4700 ref.GetValue(); | |
| 4701 } | |
| 4702 } | |
| 4703 | |
| 4704 | |
| 4705 void CodeGenerator::VisitLiteral(Literal* node) { | |
| 4706 Comment cmnt(masm_, "[ Literal"); | |
| 4707 frame_->Push(node->handle()); | |
| 4708 } | |
| 4709 | |
| 4710 | |
| 4711 void CodeGenerator::LoadUnsafeSmi(Register target, Handle<Object> value) { | |
| 4712 UNIMPLEMENTED(); | |
| 4713 // TODO(X64): Implement security policy for loads of smis. | |
| 4714 } | |
| 4715 | |
| 4716 | |
| 4717 bool CodeGenerator::IsUnsafeSmi(Handle<Object> value) { | |
| 4718 return false; | |
| 4719 } | |
| 4720 | |
| 4721 | |
| 4722 // Materialize the regexp literal 'node' in the literals array | |
| 4723 // 'literals' of the function. Leave the regexp boilerplate in | |
| 4724 // 'boilerplate'. | |
| 4725 class DeferredRegExpLiteral: public DeferredCode { | |
| 4726 public: | |
| 4727 DeferredRegExpLiteral(Register boilerplate, | |
| 4728 Register literals, | |
| 4729 RegExpLiteral* node) | |
| 4730 : boilerplate_(boilerplate), literals_(literals), node_(node) { | |
| 4731 set_comment("[ DeferredRegExpLiteral"); | |
| 4732 } | |
| 4733 | |
| 4734 void Generate(); | |
| 4735 | |
| 4736 private: | |
| 4737 Register boilerplate_; | |
| 4738 Register literals_; | |
| 4739 RegExpLiteral* node_; | |
| 4740 }; | |
| 4741 | |
| 4742 | |
| 4743 void DeferredRegExpLiteral::Generate() { | |
| 4744 // Since the entry is undefined we call the runtime system to | |
| 4745 // compute the literal. | |
| 4746 // Literal array (0). | |
| 4747 __ push(literals_); | |
| 4748 // Literal index (1). | |
| 4749 __ Push(Smi::FromInt(node_->literal_index())); | |
| 4750 // RegExp pattern (2). | |
| 4751 __ Push(node_->pattern()); | |
| 4752 // RegExp flags (3). | |
| 4753 __ Push(node_->flags()); | |
| 4754 __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4); | |
| 4755 if (!boilerplate_.is(rax)) __ movq(boilerplate_, rax); | |
| 4756 } | |
| 4757 | |
| 4758 | |
| 4759 class DeferredAllocateInNewSpace: public DeferredCode { | |
| 4760 public: | |
| 4761 DeferredAllocateInNewSpace(int size, | |
| 4762 Register target, | |
| 4763 int registers_to_save = 0) | |
| 4764 : size_(size), target_(target), registers_to_save_(registers_to_save) { | |
| 4765 ASSERT(size >= kPointerSize && size <= HEAP->MaxObjectSizeInNewSpace()); | |
| 4766 set_comment("[ DeferredAllocateInNewSpace"); | |
| 4767 } | |
| 4768 void Generate(); | |
| 4769 | |
| 4770 private: | |
| 4771 int size_; | |
| 4772 Register target_; | |
| 4773 int registers_to_save_; | |
| 4774 }; | |
| 4775 | |
| 4776 | |
| 4777 void DeferredAllocateInNewSpace::Generate() { | |
| 4778 for (int i = 0; i < kNumRegs; i++) { | |
| 4779 if (registers_to_save_ & (1 << i)) { | |
| 4780 Register save_register = { i }; | |
| 4781 __ push(save_register); | |
| 4782 } | |
| 4783 } | |
| 4784 __ Push(Smi::FromInt(size_)); | |
| 4785 __ CallRuntime(Runtime::kAllocateInNewSpace, 1); | |
| 4786 if (!target_.is(rax)) { | |
| 4787 __ movq(target_, rax); | |
| 4788 } | |
| 4789 for (int i = kNumRegs - 1; i >= 0; i--) { | |
| 4790 if (registers_to_save_ & (1 << i)) { | |
| 4791 Register save_register = { i }; | |
| 4792 __ pop(save_register); | |
| 4793 } | |
| 4794 } | |
| 4795 } | |
| 4796 | |
| 4797 | |
| 4798 void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) { | |
| 4799 Comment cmnt(masm_, "[ RegExp Literal"); | |
| 4800 | |
| 4801 // Retrieve the literals array and check the allocated entry. Begin | |
| 4802 // with a writable copy of the function of this activation in a | |
| 4803 // register. | |
| 4804 frame_->PushFunction(); | |
| 4805 Result literals = frame_->Pop(); | |
| 4806 literals.ToRegister(); | |
| 4807 frame_->Spill(literals.reg()); | |
| 4808 | |
| 4809 // Load the literals array of the function. | |
| 4810 __ movq(literals.reg(), | |
| 4811 FieldOperand(literals.reg(), JSFunction::kLiteralsOffset)); | |
| 4812 | |
| 4813 // Load the literal at the ast saved index. | |
| 4814 Result boilerplate = allocator_->Allocate(); | |
| 4815 ASSERT(boilerplate.is_valid()); | |
| 4816 int literal_offset = | |
| 4817 FixedArray::kHeaderSize + node->literal_index() * kPointerSize; | |
| 4818 __ movq(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset)); | |
| 4819 | |
| 4820 // Check whether we need to materialize the RegExp object. If so, | |
| 4821 // jump to the deferred code passing the literals array. | |
| 4822 DeferredRegExpLiteral* deferred = | |
| 4823 new DeferredRegExpLiteral(boilerplate.reg(), literals.reg(), node); | |
| 4824 __ CompareRoot(boilerplate.reg(), Heap::kUndefinedValueRootIndex); | |
| 4825 deferred->Branch(equal); | |
| 4826 deferred->BindExit(); | |
| 4827 | |
| 4828 // Register of boilerplate contains RegExp object. | |
| 4829 | |
| 4830 Result tmp = allocator()->Allocate(); | |
| 4831 ASSERT(tmp.is_valid()); | |
| 4832 | |
| 4833 int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize; | |
| 4834 | |
| 4835 DeferredAllocateInNewSpace* allocate_fallback = | |
| 4836 new DeferredAllocateInNewSpace(size, literals.reg()); | |
| 4837 frame_->Push(&boilerplate); | |
| 4838 frame_->SpillTop(); | |
| 4839 __ AllocateInNewSpace(size, | |
| 4840 literals.reg(), | |
| 4841 tmp.reg(), | |
| 4842 no_reg, | |
| 4843 allocate_fallback->entry_label(), | |
| 4844 TAG_OBJECT); | |
| 4845 allocate_fallback->BindExit(); | |
| 4846 boilerplate = frame_->Pop(); | |
| 4847 // Copy from boilerplate to clone and return clone. | |
| 4848 | |
| 4849 for (int i = 0; i < size; i += kPointerSize) { | |
| 4850 __ movq(tmp.reg(), FieldOperand(boilerplate.reg(), i)); | |
| 4851 __ movq(FieldOperand(literals.reg(), i), tmp.reg()); | |
| 4852 } | |
| 4853 frame_->Push(&literals); | |
| 4854 } | |
| 4855 | |
| 4856 | |
| 4857 void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) { | |
| 4858 Comment cmnt(masm_, "[ ObjectLiteral"); | |
| 4859 | |
| 4860 // Load a writable copy of the function of this activation in a | |
| 4861 // register. | |
| 4862 frame_->PushFunction(); | |
| 4863 Result literals = frame_->Pop(); | |
| 4864 literals.ToRegister(); | |
| 4865 frame_->Spill(literals.reg()); | |
| 4866 | |
| 4867 // Load the literals array of the function. | |
| 4868 __ movq(literals.reg(), | |
| 4869 FieldOperand(literals.reg(), JSFunction::kLiteralsOffset)); | |
| 4870 // Literal array. | |
| 4871 frame_->Push(&literals); | |
| 4872 // Literal index. | |
| 4873 frame_->Push(Smi::FromInt(node->literal_index())); | |
| 4874 // Constant properties. | |
| 4875 frame_->Push(node->constant_properties()); | |
| 4876 // Should the object literal have fast elements? | |
| 4877 frame_->Push(Smi::FromInt(node->fast_elements() ? 1 : 0)); | |
| 4878 Result clone; | |
| 4879 if (node->depth() > 1) { | |
| 4880 clone = frame_->CallRuntime(Runtime::kCreateObjectLiteral, 4); | |
| 4881 } else { | |
| 4882 clone = frame_->CallRuntime(Runtime::kCreateObjectLiteralShallow, 4); | |
| 4883 } | |
| 4884 frame_->Push(&clone); | |
| 4885 | |
| 4886 // Mark all computed expressions that are bound to a key that | |
| 4887 // is shadowed by a later occurrence of the same key. For the | |
| 4888 // marked expressions, no store code is emitted. | |
| 4889 node->CalculateEmitStore(); | |
| 4890 | |
| 4891 for (int i = 0; i < node->properties()->length(); i++) { | |
| 4892 ObjectLiteral::Property* property = node->properties()->at(i); | |
| 4893 switch (property->kind()) { | |
| 4894 case ObjectLiteral::Property::CONSTANT: | |
| 4895 break; | |
| 4896 case ObjectLiteral::Property::MATERIALIZED_LITERAL: | |
| 4897 if (CompileTimeValue::IsCompileTimeValue(property->value())) break; | |
| 4898 // else fall through. | |
| 4899 case ObjectLiteral::Property::COMPUTED: { | |
| 4900 Handle<Object> key(property->key()->handle()); | |
| 4901 if (key->IsSymbol()) { | |
| 4902 // Duplicate the object as the IC receiver. | |
| 4903 frame_->Dup(); | |
| 4904 Load(property->value()); | |
| 4905 if (property->emit_store()) { | |
| 4906 Result ignored = | |
| 4907 frame_->CallStoreIC(Handle<String>::cast(key), false, | |
| 4908 strict_mode_flag()); | |
| 4909 // A test rax instruction following the store IC call would | |
| 4910 // indicate the presence of an inlined version of the | |
| 4911 // store. Add a nop to indicate that there is no such | |
| 4912 // inlined version. | |
| 4913 __ nop(); | |
| 4914 } else { | |
| 4915 frame_->Drop(2); | |
| 4916 } | |
| 4917 break; | |
| 4918 } | |
| 4919 // Fall through | |
| 4920 } | |
| 4921 case ObjectLiteral::Property::PROTOTYPE: { | |
| 4922 // Duplicate the object as an argument to the runtime call. | |
| 4923 frame_->Dup(); | |
| 4924 Load(property->key()); | |
| 4925 Load(property->value()); | |
| 4926 if (property->emit_store()) { | |
| 4927 frame_->Push(Smi::FromInt(NONE)); // PropertyAttributes | |
| 4928 // Ignore the result. | |
| 4929 Result ignored = frame_->CallRuntime(Runtime::kSetProperty, 4); | |
| 4930 } else { | |
| 4931 frame_->Drop(3); | |
| 4932 } | |
| 4933 break; | |
| 4934 } | |
| 4935 case ObjectLiteral::Property::SETTER: { | |
| 4936 // Duplicate the object as an argument to the runtime call. | |
| 4937 frame_->Dup(); | |
| 4938 Load(property->key()); | |
| 4939 frame_->Push(Smi::FromInt(1)); | |
| 4940 Load(property->value()); | |
| 4941 Result ignored = frame_->CallRuntime(Runtime::kDefineAccessor, 4); | |
| 4942 // Ignore the result. | |
| 4943 break; | |
| 4944 } | |
| 4945 case ObjectLiteral::Property::GETTER: { | |
| 4946 // Duplicate the object as an argument to the runtime call. | |
| 4947 frame_->Dup(); | |
| 4948 Load(property->key()); | |
| 4949 frame_->Push(Smi::FromInt(0)); | |
| 4950 Load(property->value()); | |
| 4951 Result ignored = frame_->CallRuntime(Runtime::kDefineAccessor, 4); | |
| 4952 // Ignore the result. | |
| 4953 break; | |
| 4954 } | |
| 4955 default: UNREACHABLE(); | |
| 4956 } | |
| 4957 } | |
| 4958 } | |
| 4959 | |
| 4960 | |
| 4961 void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) { | |
| 4962 Comment cmnt(masm_, "[ ArrayLiteral"); | |
| 4963 | |
| 4964 // Load a writable copy of the function of this activation in a | |
| 4965 // register. | |
| 4966 frame_->PushFunction(); | |
| 4967 Result literals = frame_->Pop(); | |
| 4968 literals.ToRegister(); | |
| 4969 frame_->Spill(literals.reg()); | |
| 4970 | |
| 4971 // Load the literals array of the function. | |
| 4972 __ movq(literals.reg(), | |
| 4973 FieldOperand(literals.reg(), JSFunction::kLiteralsOffset)); | |
| 4974 | |
| 4975 frame_->Push(&literals); | |
| 4976 frame_->Push(Smi::FromInt(node->literal_index())); | |
| 4977 frame_->Push(node->constant_elements()); | |
| 4978 int length = node->values()->length(); | |
| 4979 Result clone; | |
| 4980 if (node->constant_elements()->map() == HEAP->fixed_cow_array_map()) { | |
| 4981 FastCloneShallowArrayStub stub( | |
| 4982 FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length); | |
| 4983 clone = frame_->CallStub(&stub, 3); | |
| 4984 Counters* counters = masm()->isolate()->counters(); | |
| 4985 __ IncrementCounter(counters->cow_arrays_created_stub(), 1); | |
| 4986 } else if (node->depth() > 1) { | |
| 4987 clone = frame_->CallRuntime(Runtime::kCreateArrayLiteral, 3); | |
| 4988 } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) { | |
| 4989 clone = frame_->CallRuntime(Runtime::kCreateArrayLiteralShallow, 3); | |
| 4990 } else { | |
| 4991 FastCloneShallowArrayStub stub( | |
| 4992 FastCloneShallowArrayStub::CLONE_ELEMENTS, length); | |
| 4993 clone = frame_->CallStub(&stub, 3); | |
| 4994 } | |
| 4995 frame_->Push(&clone); | |
| 4996 | |
| 4997 // Generate code to set the elements in the array that are not | |
| 4998 // literals. | |
| 4999 for (int i = 0; i < length; i++) { | |
| 5000 Expression* value = node->values()->at(i); | |
| 5001 | |
| 5002 if (!CompileTimeValue::ArrayLiteralElementNeedsInitialization(value)) { | |
| 5003 continue; | |
| 5004 } | |
| 5005 | |
| 5006 // The property must be set by generated code. | |
| 5007 Load(value); | |
| 5008 | |
| 5009 // Get the property value off the stack. | |
| 5010 Result prop_value = frame_->Pop(); | |
| 5011 prop_value.ToRegister(); | |
| 5012 | |
| 5013 // Fetch the array literal while leaving a copy on the stack and | |
| 5014 // use it to get the elements array. | |
| 5015 frame_->Dup(); | |
| 5016 Result elements = frame_->Pop(); | |
| 5017 elements.ToRegister(); | |
| 5018 frame_->Spill(elements.reg()); | |
| 5019 // Get the elements FixedArray. | |
| 5020 __ movq(elements.reg(), | |
| 5021 FieldOperand(elements.reg(), JSObject::kElementsOffset)); | |
| 5022 | |
| 5023 // Write to the indexed properties array. | |
| 5024 int offset = i * kPointerSize + FixedArray::kHeaderSize; | |
| 5025 __ movq(FieldOperand(elements.reg(), offset), prop_value.reg()); | |
| 5026 | |
| 5027 // Update the write barrier for the array address. | |
| 5028 frame_->Spill(prop_value.reg()); // Overwritten by the write barrier. | |
| 5029 Result scratch = allocator_->Allocate(); | |
| 5030 ASSERT(scratch.is_valid()); | |
| 5031 __ RecordWrite(elements.reg(), offset, prop_value.reg(), scratch.reg()); | |
| 5032 } | |
| 5033 } | |
| 5034 | |
| 5035 | |
| 5036 void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) { | |
| 5037 ASSERT(!in_spilled_code()); | |
| 5038 // Call runtime routine to allocate the catch extension object and | |
| 5039 // assign the exception value to the catch variable. | |
| 5040 Comment cmnt(masm_, "[ CatchExtensionObject"); | |
| 5041 Load(node->key()); | |
| 5042 Load(node->value()); | |
| 5043 Result result = | |
| 5044 frame_->CallRuntime(Runtime::kCreateCatchExtensionObject, 2); | |
| 5045 frame_->Push(&result); | |
| 5046 } | |
| 5047 | |
| 5048 | |
| 5049 void CodeGenerator::EmitSlotAssignment(Assignment* node) { | |
| 5050 #ifdef DEBUG | |
| 5051 int original_height = frame()->height(); | |
| 5052 #endif | |
| 5053 Comment cmnt(masm(), "[ Variable Assignment"); | |
| 5054 Variable* var = node->target()->AsVariableProxy()->AsVariable(); | |
| 5055 ASSERT(var != NULL); | |
| 5056 Slot* slot = var->AsSlot(); | |
| 5057 ASSERT(slot != NULL); | |
| 5058 | |
| 5059 // Evaluate the right-hand side. | |
| 5060 if (node->is_compound()) { | |
| 5061 // For a compound assignment the right-hand side is a binary operation | |
| 5062 // between the current property value and the actual right-hand side. | |
| 5063 LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF); | |
| 5064 Load(node->value()); | |
| 5065 | |
| 5066 // Perform the binary operation. | |
| 5067 bool overwrite_value = node->value()->ResultOverwriteAllowed(); | |
| 5068 // Construct the implicit binary operation. | |
| 5069 BinaryOperation expr(node); | |
| 5070 GenericBinaryOperation(&expr, | |
| 5071 overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE); | |
| 5072 } else { | |
| 5073 // For non-compound assignment just load the right-hand side. | |
| 5074 Load(node->value()); | |
| 5075 } | |
| 5076 | |
| 5077 // Perform the assignment. | |
| 5078 if (var->mode() != Variable::CONST || node->op() == Token::INIT_CONST) { | |
| 5079 CodeForSourcePosition(node->position()); | |
| 5080 StoreToSlot(slot, | |
| 5081 node->op() == Token::INIT_CONST ? CONST_INIT : NOT_CONST_INIT); | |
| 5082 } | |
| 5083 ASSERT(frame()->height() == original_height + 1); | |
| 5084 } | |
| 5085 | |
| 5086 | |
| 5087 void CodeGenerator::EmitNamedPropertyAssignment(Assignment* node) { | |
| 5088 #ifdef DEBUG | |
| 5089 int original_height = frame()->height(); | |
| 5090 #endif | |
| 5091 Comment cmnt(masm(), "[ Named Property Assignment"); | |
| 5092 Variable* var = node->target()->AsVariableProxy()->AsVariable(); | |
| 5093 Property* prop = node->target()->AsProperty(); | |
| 5094 ASSERT(var == NULL || (prop == NULL && var->is_global())); | |
| 5095 | |
| 5096 // Initialize name and evaluate the receiver sub-expression if necessary. If | |
| 5097 // the receiver is trivial it is not placed on the stack at this point, but | |
| 5098 // loaded whenever actually needed. | |
| 5099 Handle<String> name; | |
| 5100 bool is_trivial_receiver = false; | |
| 5101 if (var != NULL) { | |
| 5102 name = var->name(); | |
| 5103 } else { | |
| 5104 Literal* lit = prop->key()->AsLiteral(); | |
| 5105 ASSERT_NOT_NULL(lit); | |
| 5106 name = Handle<String>::cast(lit->handle()); | |
| 5107 // Do not materialize the receiver on the frame if it is trivial. | |
| 5108 is_trivial_receiver = prop->obj()->IsTrivial(); | |
| 5109 if (!is_trivial_receiver) Load(prop->obj()); | |
| 5110 } | |
| 5111 | |
| 5112 // Change to slow case in the beginning of an initialization block to | |
| 5113 // avoid the quadratic behavior of repeatedly adding fast properties. | |
| 5114 if (node->starts_initialization_block()) { | |
| 5115 // Initialization block consists of assignments of the form expr.x = ..., so | |
| 5116 // this will never be an assignment to a variable, so there must be a | |
| 5117 // receiver object. | |
| 5118 ASSERT_EQ(NULL, var); | |
| 5119 if (is_trivial_receiver) { | |
| 5120 frame()->Push(prop->obj()); | |
| 5121 } else { | |
| 5122 frame()->Dup(); | |
| 5123 } | |
| 5124 Result ignored = frame()->CallRuntime(Runtime::kToSlowProperties, 1); | |
| 5125 } | |
| 5126 | |
| 5127 // Change to fast case at the end of an initialization block. To prepare for | |
| 5128 // that add an extra copy of the receiver to the frame, so that it can be | |
| 5129 // converted back to fast case after the assignment. | |
| 5130 if (node->ends_initialization_block() && !is_trivial_receiver) { | |
| 5131 frame()->Dup(); | |
| 5132 } | |
| 5133 | |
| 5134 // Stack layout: | |
| 5135 // [tos] : receiver (only materialized if non-trivial) | |
| 5136 // [tos+1] : receiver if at the end of an initialization block | |
| 5137 | |
| 5138 // Evaluate the right-hand side. | |
| 5139 if (node->is_compound()) { | |
| 5140 // For a compound assignment the right-hand side is a binary operation | |
| 5141 // between the current property value and the actual right-hand side. | |
| 5142 if (is_trivial_receiver) { | |
| 5143 frame()->Push(prop->obj()); | |
| 5144 } else if (var != NULL) { | |
| 5145 // The LoadIC stub expects the object in rax. | |
| 5146 // Freeing rax causes the code generator to load the global into it. | |
| 5147 frame_->Spill(rax); | |
| 5148 LoadGlobal(); | |
| 5149 } else { | |
| 5150 frame()->Dup(); | |
| 5151 } | |
| 5152 Result value = EmitNamedLoad(name, var != NULL); | |
| 5153 frame()->Push(&value); | |
| 5154 Load(node->value()); | |
| 5155 | |
| 5156 bool overwrite_value = node->value()->ResultOverwriteAllowed(); | |
| 5157 // Construct the implicit binary operation. | |
| 5158 BinaryOperation expr(node); | |
| 5159 GenericBinaryOperation(&expr, | |
| 5160 overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE); | |
| 5161 } else { | |
| 5162 // For non-compound assignment just load the right-hand side. | |
| 5163 Load(node->value()); | |
| 5164 } | |
| 5165 | |
| 5166 // Stack layout: | |
| 5167 // [tos] : value | |
| 5168 // [tos+1] : receiver (only materialized if non-trivial) | |
| 5169 // [tos+2] : receiver if at the end of an initialization block | |
| 5170 | |
| 5171 // Perform the assignment. It is safe to ignore constants here. | |
| 5172 ASSERT(var == NULL || var->mode() != Variable::CONST); | |
| 5173 ASSERT_NE(Token::INIT_CONST, node->op()); | |
| 5174 if (is_trivial_receiver) { | |
| 5175 Result value = frame()->Pop(); | |
| 5176 frame()->Push(prop->obj()); | |
| 5177 frame()->Push(&value); | |
| 5178 } | |
| 5179 CodeForSourcePosition(node->position()); | |
| 5180 bool is_contextual = (var != NULL); | |
| 5181 Result answer = EmitNamedStore(name, is_contextual); | |
| 5182 frame()->Push(&answer); | |
| 5183 | |
| 5184 // Stack layout: | |
| 5185 // [tos] : result | |
| 5186 // [tos+1] : receiver if at the end of an initialization block | |
| 5187 | |
| 5188 if (node->ends_initialization_block()) { | |
| 5189 ASSERT_EQ(NULL, var); | |
| 5190 // The argument to the runtime call is the receiver. | |
| 5191 if (is_trivial_receiver) { | |
| 5192 frame()->Push(prop->obj()); | |
| 5193 } else { | |
| 5194 // A copy of the receiver is below the value of the assignment. Swap | |
| 5195 // the receiver and the value of the assignment expression. | |
| 5196 Result result = frame()->Pop(); | |
| 5197 Result receiver = frame()->Pop(); | |
| 5198 frame()->Push(&result); | |
| 5199 frame()->Push(&receiver); | |
| 5200 } | |
| 5201 Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1); | |
| 5202 } | |
| 5203 | |
| 5204 // Stack layout: | |
| 5205 // [tos] : result | |
| 5206 | |
| 5207 ASSERT_EQ(frame()->height(), original_height + 1); | |
| 5208 } | |
| 5209 | |
| 5210 | |
| 5211 void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) { | |
| 5212 #ifdef DEBUG | |
| 5213 int original_height = frame()->height(); | |
| 5214 #endif | |
| 5215 Comment cmnt(masm_, "[ Keyed Property Assignment"); | |
| 5216 Property* prop = node->target()->AsProperty(); | |
| 5217 ASSERT_NOT_NULL(prop); | |
| 5218 | |
| 5219 // Evaluate the receiver subexpression. | |
| 5220 Load(prop->obj()); | |
| 5221 | |
| 5222 // Change to slow case in the beginning of an initialization block to | |
| 5223 // avoid the quadratic behavior of repeatedly adding fast properties. | |
| 5224 if (node->starts_initialization_block()) { | |
| 5225 frame_->Dup(); | |
| 5226 Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1); | |
| 5227 } | |
| 5228 | |
| 5229 // Change to fast case at the end of an initialization block. To prepare for | |
| 5230 // that add an extra copy of the receiver to the frame, so that it can be | |
| 5231 // converted back to fast case after the assignment. | |
| 5232 if (node->ends_initialization_block()) { | |
| 5233 frame_->Dup(); | |
| 5234 } | |
| 5235 | |
| 5236 // Evaluate the key subexpression. | |
| 5237 Load(prop->key()); | |
| 5238 | |
| 5239 // Stack layout: | |
| 5240 // [tos] : key | |
| 5241 // [tos+1] : receiver | |
| 5242 // [tos+2] : receiver if at the end of an initialization block | |
| 5243 | |
| 5244 // Evaluate the right-hand side. | |
| 5245 if (node->is_compound()) { | |
| 5246 // For a compound assignment the right-hand side is a binary operation | |
| 5247 // between the current property value and the actual right-hand side. | |
| 5248 // Duplicate receiver and key for loading the current property value. | |
| 5249 frame()->PushElementAt(1); | |
| 5250 frame()->PushElementAt(1); | |
| 5251 Result value = EmitKeyedLoad(); | |
| 5252 frame()->Push(&value); | |
| 5253 Load(node->value()); | |
| 5254 | |
| 5255 // Perform the binary operation. | |
| 5256 bool overwrite_value = node->value()->ResultOverwriteAllowed(); | |
| 5257 BinaryOperation expr(node); | |
| 5258 GenericBinaryOperation(&expr, | |
| 5259 overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE); | |
| 5260 } else { | |
| 5261 // For non-compound assignment just load the right-hand side. | |
| 5262 Load(node->value()); | |
| 5263 } | |
| 5264 | |
| 5265 // Stack layout: | |
| 5266 // [tos] : value | |
| 5267 // [tos+1] : key | |
| 5268 // [tos+2] : receiver | |
| 5269 // [tos+3] : receiver if at the end of an initialization block | |
| 5270 | |
| 5271 // Perform the assignment. It is safe to ignore constants here. | |
| 5272 ASSERT(node->op() != Token::INIT_CONST); | |
| 5273 CodeForSourcePosition(node->position()); | |
| 5274 Result answer = EmitKeyedStore(prop->key()->type()); | |
| 5275 frame()->Push(&answer); | |
| 5276 | |
| 5277 // Stack layout: | |
| 5278 // [tos] : result | |
| 5279 // [tos+1] : receiver if at the end of an initialization block | |
| 5280 | |
| 5281 // Change to fast case at the end of an initialization block. | |
| 5282 if (node->ends_initialization_block()) { | |
| 5283 // The argument to the runtime call is the extra copy of the receiver, | |
| 5284 // which is below the value of the assignment. Swap the receiver and | |
| 5285 // the value of the assignment expression. | |
| 5286 Result result = frame()->Pop(); | |
| 5287 Result receiver = frame()->Pop(); | |
| 5288 frame()->Push(&result); | |
| 5289 frame()->Push(&receiver); | |
| 5290 Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1); | |
| 5291 } | |
| 5292 | |
| 5293 // Stack layout: | |
| 5294 // [tos] : result | |
| 5295 | |
| 5296 ASSERT(frame()->height() == original_height + 1); | |
| 5297 } | |
| 5298 | |
| 5299 | |
| 5300 void CodeGenerator::VisitAssignment(Assignment* node) { | |
| 5301 #ifdef DEBUG | |
| 5302 int original_height = frame()->height(); | |
| 5303 #endif | |
| 5304 Variable* var = node->target()->AsVariableProxy()->AsVariable(); | |
| 5305 Property* prop = node->target()->AsProperty(); | |
| 5306 | |
| 5307 if (var != NULL && !var->is_global()) { | |
| 5308 EmitSlotAssignment(node); | |
| 5309 | |
| 5310 } else if ((prop != NULL && prop->key()->IsPropertyName()) || | |
| 5311 (var != NULL && var->is_global())) { | |
| 5312 // Properties whose keys are property names and global variables are | |
| 5313 // treated as named property references. We do not need to consider | |
| 5314 // global 'this' because it is not a valid left-hand side. | |
| 5315 EmitNamedPropertyAssignment(node); | |
| 5316 | |
| 5317 } else if (prop != NULL) { | |
| 5318 // Other properties (including rewritten parameters for a function that | |
| 5319 // uses arguments) are keyed property assignments. | |
| 5320 EmitKeyedPropertyAssignment(node); | |
| 5321 | |
| 5322 } else { | |
| 5323 // Invalid left-hand side. | |
| 5324 Load(node->target()); | |
| 5325 Result result = frame()->CallRuntime(Runtime::kThrowReferenceError, 1); | |
| 5326 // The runtime call doesn't actually return but the code generator will | |
| 5327 // still generate code and expects a certain frame height. | |
| 5328 frame()->Push(&result); | |
| 5329 } | |
| 5330 | |
| 5331 ASSERT(frame()->height() == original_height + 1); | |
| 5332 } | |
| 5333 | |
| 5334 | |
| 5335 void CodeGenerator::VisitThrow(Throw* node) { | |
| 5336 Comment cmnt(masm_, "[ Throw"); | |
| 5337 Load(node->exception()); | |
| 5338 Result result = frame_->CallRuntime(Runtime::kThrow, 1); | |
| 5339 frame_->Push(&result); | |
| 5340 } | |
| 5341 | |
| 5342 | |
| 5343 void CodeGenerator::VisitProperty(Property* node) { | |
| 5344 Comment cmnt(masm_, "[ Property"); | |
| 5345 Reference property(this, node); | |
| 5346 property.GetValue(); | |
| 5347 } | |
| 5348 | |
| 5349 | |
| 5350 void CodeGenerator::VisitCall(Call* node) { | |
| 5351 Comment cmnt(masm_, "[ Call"); | |
| 5352 | |
| 5353 ZoneList<Expression*>* args = node->arguments(); | |
| 5354 | |
| 5355 // Check if the function is a variable or a property. | |
| 5356 Expression* function = node->expression(); | |
| 5357 Variable* var = function->AsVariableProxy()->AsVariable(); | |
| 5358 Property* property = function->AsProperty(); | |
| 5359 | |
| 5360 // ------------------------------------------------------------------------ | |
| 5361 // Fast-case: Use inline caching. | |
| 5362 // --- | |
| 5363 // According to ECMA-262, section 11.2.3, page 44, the function to call | |
| 5364 // must be resolved after the arguments have been evaluated. The IC code | |
| 5365 // automatically handles this by loading the arguments before the function | |
| 5366 // is resolved in cache misses (this also holds for megamorphic calls). | |
| 5367 // ------------------------------------------------------------------------ | |
| 5368 | |
| 5369 if (var != NULL && var->is_possibly_eval()) { | |
| 5370 // ---------------------------------- | |
| 5371 // JavaScript example: 'eval(arg)' // eval is not known to be shadowed | |
| 5372 // ---------------------------------- | |
| 5373 | |
| 5374 // In a call to eval, we first call %ResolvePossiblyDirectEval to | |
| 5375 // resolve the function we need to call and the receiver of the | |
| 5376 // call. Then we call the resolved function using the given | |
| 5377 // arguments. | |
| 5378 | |
| 5379 // Prepare the stack for the call to the resolved function. | |
| 5380 Load(function); | |
| 5381 | |
| 5382 // Allocate a frame slot for the receiver. | |
| 5383 frame_->Push(FACTORY->undefined_value()); | |
| 5384 | |
| 5385 // Load the arguments. | |
| 5386 int arg_count = args->length(); | |
| 5387 for (int i = 0; i < arg_count; i++) { | |
| 5388 Load(args->at(i)); | |
| 5389 frame_->SpillTop(); | |
| 5390 } | |
| 5391 | |
| 5392 // Result to hold the result of the function resolution and the | |
| 5393 // final result of the eval call. | |
| 5394 Result result; | |
| 5395 | |
| 5396 // If we know that eval can only be shadowed by eval-introduced | |
| 5397 // variables we attempt to load the global eval function directly | |
| 5398 // in generated code. If we succeed, there is no need to perform a | |
| 5399 // context lookup in the runtime system. | |
| 5400 JumpTarget done; | |
| 5401 if (var->AsSlot() != NULL && var->mode() == Variable::DYNAMIC_GLOBAL) { | |
| 5402 ASSERT(var->AsSlot()->type() == Slot::LOOKUP); | |
| 5403 JumpTarget slow; | |
| 5404 // Prepare the stack for the call to | |
| 5405 // ResolvePossiblyDirectEvalNoLookup by pushing the loaded | |
| 5406 // function, the first argument to the eval call and the | |
| 5407 // receiver. | |
| 5408 Result fun = LoadFromGlobalSlotCheckExtensions(var->AsSlot(), | |
| 5409 NOT_INSIDE_TYPEOF, | |
| 5410 &slow); | |
| 5411 frame_->Push(&fun); | |
| 5412 if (arg_count > 0) { | |
| 5413 frame_->PushElementAt(arg_count); | |
| 5414 } else { | |
| 5415 frame_->Push(FACTORY->undefined_value()); | |
| 5416 } | |
| 5417 frame_->PushParameterAt(-1); | |
| 5418 | |
| 5419 // Push the strict mode flag. | |
| 5420 frame_->Push(Smi::FromInt(strict_mode_flag())); | |
| 5421 | |
| 5422 // Resolve the call. | |
| 5423 result = | |
| 5424 frame_->CallRuntime(Runtime::kResolvePossiblyDirectEvalNoLookup, 4); | |
| 5425 | |
| 5426 done.Jump(&result); | |
| 5427 slow.Bind(); | |
| 5428 } | |
| 5429 | |
| 5430 // Prepare the stack for the call to ResolvePossiblyDirectEval by | |
| 5431 // pushing the loaded function, the first argument to the eval | |
| 5432 // call and the receiver. | |
| 5433 frame_->PushElementAt(arg_count + 1); | |
| 5434 if (arg_count > 0) { | |
| 5435 frame_->PushElementAt(arg_count); | |
| 5436 } else { | |
| 5437 frame_->Push(FACTORY->undefined_value()); | |
| 5438 } | |
| 5439 frame_->PushParameterAt(-1); | |
| 5440 | |
| 5441 // Push the strict mode flag. | |
| 5442 frame_->Push(Smi::FromInt(strict_mode_flag())); | |
| 5443 | |
| 5444 // Resolve the call. | |
| 5445 result = frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 4); | |
| 5446 | |
| 5447 // If we generated fast-case code bind the jump-target where fast | |
| 5448 // and slow case merge. | |
| 5449 if (done.is_linked()) done.Bind(&result); | |
| 5450 | |
| 5451 // The runtime call returns a pair of values in rax (function) and | |
| 5452 // rdx (receiver). Touch up the stack with the right values. | |
| 5453 Result receiver = allocator_->Allocate(rdx); | |
| 5454 frame_->SetElementAt(arg_count + 1, &result); | |
| 5455 frame_->SetElementAt(arg_count, &receiver); | |
| 5456 receiver.Unuse(); | |
| 5457 | |
| 5458 // Call the function. | |
| 5459 CodeForSourcePosition(node->position()); | |
| 5460 InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP; | |
| 5461 CallFunctionStub call_function(arg_count, in_loop, RECEIVER_MIGHT_BE_VALUE); | |
| 5462 result = frame_->CallStub(&call_function, arg_count + 1); | |
| 5463 | |
| 5464 // Restore the context and overwrite the function on the stack with | |
| 5465 // the result. | |
| 5466 frame_->RestoreContextRegister(); | |
| 5467 frame_->SetElementAt(0, &result); | |
| 5468 | |
| 5469 } else if (var != NULL && !var->is_this() && var->is_global()) { | |
| 5470 // ---------------------------------- | |
| 5471 // JavaScript example: 'foo(1, 2, 3)' // foo is global | |
| 5472 // ---------------------------------- | |
| 5473 | |
| 5474 // Pass the global object as the receiver and let the IC stub | |
| 5475 // patch the stack to use the global proxy as 'this' in the | |
| 5476 // invoked function. | |
| 5477 LoadGlobal(); | |
| 5478 | |
| 5479 // Load the arguments. | |
| 5480 int arg_count = args->length(); | |
| 5481 for (int i = 0; i < arg_count; i++) { | |
| 5482 Load(args->at(i)); | |
| 5483 frame_->SpillTop(); | |
| 5484 } | |
| 5485 | |
| 5486 // Push the name of the function on the frame. | |
| 5487 frame_->Push(var->name()); | |
| 5488 | |
| 5489 // Call the IC initialization code. | |
| 5490 CodeForSourcePosition(node->position()); | |
| 5491 Result result = frame_->CallCallIC(RelocInfo::CODE_TARGET_CONTEXT, | |
| 5492 arg_count, | |
| 5493 loop_nesting()); | |
| 5494 frame_->RestoreContextRegister(); | |
| 5495 // Replace the function on the stack with the result. | |
| 5496 frame_->Push(&result); | |
| 5497 | |
| 5498 } else if (var != NULL && var->AsSlot() != NULL && | |
| 5499 var->AsSlot()->type() == Slot::LOOKUP) { | |
| 5500 // ---------------------------------- | |
| 5501 // JavaScript examples: | |
| 5502 // | |
| 5503 // with (obj) foo(1, 2, 3) // foo may be in obj. | |
| 5504 // | |
| 5505 // function f() {}; | |
| 5506 // function g() { | |
| 5507 // eval(...); | |
| 5508 // f(); // f could be in extension object. | |
| 5509 // } | |
| 5510 // ---------------------------------- | |
| 5511 | |
| 5512 JumpTarget slow, done; | |
| 5513 Result function; | |
| 5514 | |
| 5515 // Generate fast case for loading functions from slots that | |
| 5516 // correspond to local/global variables or arguments unless they | |
| 5517 // are shadowed by eval-introduced bindings. | |
| 5518 EmitDynamicLoadFromSlotFastCase(var->AsSlot(), | |
| 5519 NOT_INSIDE_TYPEOF, | |
| 5520 &function, | |
| 5521 &slow, | |
| 5522 &done); | |
| 5523 | |
| 5524 slow.Bind(); | |
| 5525 // Load the function from the context. Sync the frame so we can | |
| 5526 // push the arguments directly into place. | |
| 5527 frame_->SyncRange(0, frame_->element_count() - 1); | |
| 5528 frame_->EmitPush(rsi); | |
| 5529 frame_->EmitPush(var->name()); | |
| 5530 frame_->CallRuntime(Runtime::kLoadContextSlot, 2); | |
| 5531 // The runtime call returns a pair of values in rax and rdx. The | |
| 5532 // looked-up function is in rax and the receiver is in rdx. These | |
| 5533 // register references are not ref counted here. We spill them | |
| 5534 // eagerly since they are arguments to an inevitable call (and are | |
| 5535 // not sharable by the arguments). | |
| 5536 ASSERT(!allocator()->is_used(rax)); | |
| 5537 frame_->EmitPush(rax); | |
| 5538 | |
| 5539 // Load the receiver. | |
| 5540 ASSERT(!allocator()->is_used(rdx)); | |
| 5541 frame_->EmitPush(rdx); | |
| 5542 | |
| 5543 // If fast case code has been generated, emit code to push the | |
| 5544 // function and receiver and have the slow path jump around this | |
| 5545 // code. | |
| 5546 if (done.is_linked()) { | |
| 5547 JumpTarget call; | |
| 5548 call.Jump(); | |
| 5549 done.Bind(&function); | |
| 5550 frame_->Push(&function); | |
| 5551 LoadGlobalReceiver(); | |
| 5552 call.Bind(); | |
| 5553 } | |
| 5554 | |
| 5555 // Call the function. | |
| 5556 CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position()); | |
| 5557 | |
| 5558 } else if (property != NULL) { | |
| 5559 // Check if the key is a literal string. | |
| 5560 Literal* literal = property->key()->AsLiteral(); | |
| 5561 | |
| 5562 if (literal != NULL && literal->handle()->IsSymbol()) { | |
| 5563 // ------------------------------------------------------------------ | |
| 5564 // JavaScript example: 'object.foo(1, 2, 3)' or 'map["key"](1, 2, 3)' | |
| 5565 // ------------------------------------------------------------------ | |
| 5566 | |
| 5567 Handle<String> name = Handle<String>::cast(literal->handle()); | |
| 5568 | |
| 5569 if (ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION && | |
| 5570 name->IsEqualTo(CStrVector("apply")) && | |
| 5571 args->length() == 2 && | |
| 5572 args->at(1)->AsVariableProxy() != NULL && | |
| 5573 args->at(1)->AsVariableProxy()->IsArguments()) { | |
| 5574 // Use the optimized Function.prototype.apply that avoids | |
| 5575 // allocating lazily allocated arguments objects. | |
| 5576 CallApplyLazy(property->obj(), | |
| 5577 args->at(0), | |
| 5578 args->at(1)->AsVariableProxy(), | |
| 5579 node->position()); | |
| 5580 | |
| 5581 } else { | |
| 5582 // Push the receiver onto the frame. | |
| 5583 Load(property->obj()); | |
| 5584 | |
| 5585 // Load the arguments. | |
| 5586 int arg_count = args->length(); | |
| 5587 for (int i = 0; i < arg_count; i++) { | |
| 5588 Load(args->at(i)); | |
| 5589 frame_->SpillTop(); | |
| 5590 } | |
| 5591 | |
| 5592 // Push the name of the function onto the frame. | |
| 5593 frame_->Push(name); | |
| 5594 | |
| 5595 // Call the IC initialization code. | |
| 5596 CodeForSourcePosition(node->position()); | |
| 5597 Result result = frame_->CallCallIC(RelocInfo::CODE_TARGET, | |
| 5598 arg_count, | |
| 5599 loop_nesting()); | |
| 5600 frame_->RestoreContextRegister(); | |
| 5601 frame_->Push(&result); | |
| 5602 } | |
| 5603 | |
| 5604 } else { | |
| 5605 // ------------------------------------------- | |
| 5606 // JavaScript example: 'array[index](1, 2, 3)' | |
| 5607 // ------------------------------------------- | |
| 5608 | |
| 5609 // Load the function to call from the property through a reference. | |
| 5610 if (property->is_synthetic()) { | |
| 5611 Reference ref(this, property, false); | |
| 5612 ref.GetValue(); | |
| 5613 // Use global object as receiver. | |
| 5614 LoadGlobalReceiver(); | |
| 5615 // Call the function. | |
| 5616 CallWithArguments(args, RECEIVER_MIGHT_BE_VALUE, node->position()); | |
| 5617 } else { | |
| 5618 // Push the receiver onto the frame. | |
| 5619 Load(property->obj()); | |
| 5620 | |
| 5621 // Load the name of the function. | |
| 5622 Load(property->key()); | |
| 5623 | |
| 5624 // Swap the name of the function and the receiver on the stack to follow | |
| 5625 // the calling convention for call ICs. | |
| 5626 Result key = frame_->Pop(); | |
| 5627 Result receiver = frame_->Pop(); | |
| 5628 frame_->Push(&key); | |
| 5629 frame_->Push(&receiver); | |
| 5630 key.Unuse(); | |
| 5631 receiver.Unuse(); | |
| 5632 | |
| 5633 // Load the arguments. | |
| 5634 int arg_count = args->length(); | |
| 5635 for (int i = 0; i < arg_count; i++) { | |
| 5636 Load(args->at(i)); | |
| 5637 frame_->SpillTop(); | |
| 5638 } | |
| 5639 | |
| 5640 // Place the key on top of stack and call the IC initialization code. | |
| 5641 frame_->PushElementAt(arg_count + 1); | |
| 5642 CodeForSourcePosition(node->position()); | |
| 5643 Result result = frame_->CallKeyedCallIC(RelocInfo::CODE_TARGET, | |
| 5644 arg_count, | |
| 5645 loop_nesting()); | |
| 5646 frame_->Drop(); // Drop the key still on the stack. | |
| 5647 frame_->RestoreContextRegister(); | |
| 5648 frame_->Push(&result); | |
| 5649 } | |
| 5650 } | |
| 5651 } else { | |
| 5652 // ---------------------------------- | |
| 5653 // JavaScript example: 'foo(1, 2, 3)' // foo is not global | |
| 5654 // ---------------------------------- | |
| 5655 | |
| 5656 // Load the function. | |
| 5657 Load(function); | |
| 5658 | |
| 5659 // Pass the global proxy as the receiver. | |
| 5660 LoadGlobalReceiver(); | |
| 5661 | |
| 5662 // Call the function. | |
| 5663 CallWithArguments(args, NO_CALL_FUNCTION_FLAGS, node->position()); | |
| 5664 } | |
| 5665 } | |
| 5666 | |
| 5667 | |
| 5668 void CodeGenerator::VisitCallNew(CallNew* node) { | |
| 5669 Comment cmnt(masm_, "[ CallNew"); | |
| 5670 | |
| 5671 // According to ECMA-262, section 11.2.2, page 44, the function | |
| 5672 // expression in new calls must be evaluated before the | |
| 5673 // arguments. This is different from ordinary calls, where the | |
| 5674 // actual function to call is resolved after the arguments have been | |
| 5675 // evaluated. | |
| 5676 | |
| 5677 // Push constructor on the stack. If it's not a function it's used as | |
| 5678 // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is | |
| 5679 // ignored. | |
| 5680 Load(node->expression()); | |
| 5681 | |
| 5682 // Push the arguments ("left-to-right") on the stack. | |
| 5683 ZoneList<Expression*>* args = node->arguments(); | |
| 5684 int arg_count = args->length(); | |
| 5685 for (int i = 0; i < arg_count; i++) { | |
| 5686 Load(args->at(i)); | |
| 5687 } | |
| 5688 | |
| 5689 // Call the construct call builtin that handles allocation and | |
| 5690 // constructor invocation. | |
| 5691 CodeForSourcePosition(node->position()); | |
| 5692 Result result = frame_->CallConstructor(arg_count); | |
| 5693 frame_->Push(&result); | |
| 5694 } | |
| 5695 | |
| 5696 | |
| 5697 void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) { | |
| 5698 ASSERT(args->length() == 1); | |
| 5699 Load(args->at(0)); | |
| 5700 Result value = frame_->Pop(); | |
| 5701 value.ToRegister(); | |
| 5702 ASSERT(value.is_valid()); | |
| 5703 Condition is_smi = masm_->CheckSmi(value.reg()); | |
| 5704 value.Unuse(); | |
| 5705 destination()->Split(is_smi); | |
| 5706 } | |
| 5707 | |
| 5708 | |
| 5709 void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) { | |
| 5710 // Conditionally generate a log call. | |
| 5711 // Args: | |
| 5712 // 0 (literal string): The type of logging (corresponds to the flags). | |
| 5713 // This is used to determine whether or not to generate the log call. | |
| 5714 // 1 (string): Format string. Access the string at argument index 2 | |
| 5715 // with '%2s' (see Logger::LogRuntime for all the formats). | |
| 5716 // 2 (array): Arguments to the format string. | |
| 5717 ASSERT_EQ(args->length(), 3); | |
| 5718 #ifdef ENABLE_LOGGING_AND_PROFILING | |
| 5719 if (ShouldGenerateLog(args->at(0))) { | |
| 5720 Load(args->at(1)); | |
| 5721 Load(args->at(2)); | |
| 5722 frame_->CallRuntime(Runtime::kLog, 2); | |
| 5723 } | |
| 5724 #endif | |
| 5725 // Finally, we're expected to leave a value on the top of the stack. | |
| 5726 frame_->Push(FACTORY->undefined_value()); | |
| 5727 } | |
| 5728 | |
| 5729 | |
| 5730 void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) { | |
| 5731 ASSERT(args->length() == 1); | |
| 5732 Load(args->at(0)); | |
| 5733 Result value = frame_->Pop(); | |
| 5734 value.ToRegister(); | |
| 5735 ASSERT(value.is_valid()); | |
| 5736 Condition non_negative_smi = masm_->CheckNonNegativeSmi(value.reg()); | |
| 5737 value.Unuse(); | |
| 5738 destination()->Split(non_negative_smi); | |
| 5739 } | |
| 5740 | |
| 5741 | |
| 5742 class DeferredStringCharCodeAt : public DeferredCode { | |
| 5743 public: | |
| 5744 DeferredStringCharCodeAt(Register object, | |
| 5745 Register index, | |
| 5746 Register scratch, | |
| 5747 Register result) | |
| 5748 : result_(result), | |
| 5749 char_code_at_generator_(object, | |
| 5750 index, | |
| 5751 scratch, | |
| 5752 result, | |
| 5753 &need_conversion_, | |
| 5754 &need_conversion_, | |
| 5755 &index_out_of_range_, | |
| 5756 STRING_INDEX_IS_NUMBER) {} | |
| 5757 | |
| 5758 StringCharCodeAtGenerator* fast_case_generator() { | |
| 5759 return &char_code_at_generator_; | |
| 5760 } | |
| 5761 | |
| 5762 virtual void Generate() { | |
| 5763 VirtualFrameRuntimeCallHelper call_helper(frame_state()); | |
| 5764 char_code_at_generator_.GenerateSlow(masm(), call_helper); | |
| 5765 | |
| 5766 __ bind(&need_conversion_); | |
| 5767 // Move the undefined value into the result register, which will | |
| 5768 // trigger conversion. | |
| 5769 __ LoadRoot(result_, Heap::kUndefinedValueRootIndex); | |
| 5770 __ jmp(exit_label()); | |
| 5771 | |
| 5772 __ bind(&index_out_of_range_); | |
| 5773 // When the index is out of range, the spec requires us to return | |
| 5774 // NaN. | |
| 5775 __ LoadRoot(result_, Heap::kNanValueRootIndex); | |
| 5776 __ jmp(exit_label()); | |
| 5777 } | |
| 5778 | |
| 5779 private: | |
| 5780 Register result_; | |
| 5781 | |
| 5782 Label need_conversion_; | |
| 5783 Label index_out_of_range_; | |
| 5784 | |
| 5785 StringCharCodeAtGenerator char_code_at_generator_; | |
| 5786 }; | |
| 5787 | |
| 5788 | |
| 5789 // This generates code that performs a String.prototype.charCodeAt() call | |
| 5790 // or returns a smi in order to trigger conversion. | |
| 5791 void CodeGenerator::GenerateStringCharCodeAt(ZoneList<Expression*>* args) { | |
| 5792 Comment(masm_, "[ GenerateStringCharCodeAt"); | |
| 5793 ASSERT(args->length() == 2); | |
| 5794 | |
| 5795 Load(args->at(0)); | |
| 5796 Load(args->at(1)); | |
| 5797 Result index = frame_->Pop(); | |
| 5798 Result object = frame_->Pop(); | |
| 5799 object.ToRegister(); | |
| 5800 index.ToRegister(); | |
| 5801 // We might mutate the object register. | |
| 5802 frame_->Spill(object.reg()); | |
| 5803 | |
| 5804 // We need two extra registers. | |
| 5805 Result result = allocator()->Allocate(); | |
| 5806 ASSERT(result.is_valid()); | |
| 5807 Result scratch = allocator()->Allocate(); | |
| 5808 ASSERT(scratch.is_valid()); | |
| 5809 | |
| 5810 DeferredStringCharCodeAt* deferred = | |
| 5811 new DeferredStringCharCodeAt(object.reg(), | |
| 5812 index.reg(), | |
| 5813 scratch.reg(), | |
| 5814 result.reg()); | |
| 5815 deferred->fast_case_generator()->GenerateFast(masm_); | |
| 5816 deferred->BindExit(); | |
| 5817 frame_->Push(&result); | |
| 5818 } | |
| 5819 | |
| 5820 | |
| 5821 class DeferredStringCharFromCode : public DeferredCode { | |
| 5822 public: | |
| 5823 DeferredStringCharFromCode(Register code, | |
| 5824 Register result) | |
| 5825 : char_from_code_generator_(code, result) {} | |
| 5826 | |
| 5827 StringCharFromCodeGenerator* fast_case_generator() { | |
| 5828 return &char_from_code_generator_; | |
| 5829 } | |
| 5830 | |
| 5831 virtual void Generate() { | |
| 5832 VirtualFrameRuntimeCallHelper call_helper(frame_state()); | |
| 5833 char_from_code_generator_.GenerateSlow(masm(), call_helper); | |
| 5834 } | |
| 5835 | |
| 5836 private: | |
| 5837 StringCharFromCodeGenerator char_from_code_generator_; | |
| 5838 }; | |
| 5839 | |
| 5840 | |
| 5841 // Generates code for creating a one-char string from a char code. | |
| 5842 void CodeGenerator::GenerateStringCharFromCode(ZoneList<Expression*>* args) { | |
| 5843 Comment(masm_, "[ GenerateStringCharFromCode"); | |
| 5844 ASSERT(args->length() == 1); | |
| 5845 | |
| 5846 Load(args->at(0)); | |
| 5847 | |
| 5848 Result code = frame_->Pop(); | |
| 5849 code.ToRegister(); | |
| 5850 ASSERT(code.is_valid()); | |
| 5851 | |
| 5852 Result result = allocator()->Allocate(); | |
| 5853 ASSERT(result.is_valid()); | |
| 5854 | |
| 5855 DeferredStringCharFromCode* deferred = new DeferredStringCharFromCode( | |
| 5856 code.reg(), result.reg()); | |
| 5857 deferred->fast_case_generator()->GenerateFast(masm_); | |
| 5858 deferred->BindExit(); | |
| 5859 frame_->Push(&result); | |
| 5860 } | |
| 5861 | |
| 5862 | |
| 5863 class DeferredStringCharAt : public DeferredCode { | |
| 5864 public: | |
| 5865 DeferredStringCharAt(Register object, | |
| 5866 Register index, | |
| 5867 Register scratch1, | |
| 5868 Register scratch2, | |
| 5869 Register result) | |
| 5870 : result_(result), | |
| 5871 char_at_generator_(object, | |
| 5872 index, | |
| 5873 scratch1, | |
| 5874 scratch2, | |
| 5875 result, | |
| 5876 &need_conversion_, | |
| 5877 &need_conversion_, | |
| 5878 &index_out_of_range_, | |
| 5879 STRING_INDEX_IS_NUMBER) {} | |
| 5880 | |
| 5881 StringCharAtGenerator* fast_case_generator() { | |
| 5882 return &char_at_generator_; | |
| 5883 } | |
| 5884 | |
| 5885 virtual void Generate() { | |
| 5886 VirtualFrameRuntimeCallHelper call_helper(frame_state()); | |
| 5887 char_at_generator_.GenerateSlow(masm(), call_helper); | |
| 5888 | |
| 5889 __ bind(&need_conversion_); | |
| 5890 // Move smi zero into the result register, which will trigger | |
| 5891 // conversion. | |
| 5892 __ Move(result_, Smi::FromInt(0)); | |
| 5893 __ jmp(exit_label()); | |
| 5894 | |
| 5895 __ bind(&index_out_of_range_); | |
| 5896 // When the index is out of range, the spec requires us to return | |
| 5897 // the empty string. | |
| 5898 __ LoadRoot(result_, Heap::kEmptyStringRootIndex); | |
| 5899 __ jmp(exit_label()); | |
| 5900 } | |
| 5901 | |
| 5902 private: | |
| 5903 Register result_; | |
| 5904 | |
| 5905 Label need_conversion_; | |
| 5906 Label index_out_of_range_; | |
| 5907 | |
| 5908 StringCharAtGenerator char_at_generator_; | |
| 5909 }; | |
| 5910 | |
| 5911 | |
| 5912 // This generates code that performs a String.prototype.charAt() call | |
| 5913 // or returns a smi in order to trigger conversion. | |
| 5914 void CodeGenerator::GenerateStringCharAt(ZoneList<Expression*>* args) { | |
| 5915 Comment(masm_, "[ GenerateStringCharAt"); | |
| 5916 ASSERT(args->length() == 2); | |
| 5917 | |
| 5918 Load(args->at(0)); | |
| 5919 Load(args->at(1)); | |
| 5920 Result index = frame_->Pop(); | |
| 5921 Result object = frame_->Pop(); | |
| 5922 object.ToRegister(); | |
| 5923 index.ToRegister(); | |
| 5924 // We might mutate the object register. | |
| 5925 frame_->Spill(object.reg()); | |
| 5926 | |
| 5927 // We need three extra registers. | |
| 5928 Result result = allocator()->Allocate(); | |
| 5929 ASSERT(result.is_valid()); | |
| 5930 Result scratch1 = allocator()->Allocate(); | |
| 5931 ASSERT(scratch1.is_valid()); | |
| 5932 Result scratch2 = allocator()->Allocate(); | |
| 5933 ASSERT(scratch2.is_valid()); | |
| 5934 | |
| 5935 DeferredStringCharAt* deferred = | |
| 5936 new DeferredStringCharAt(object.reg(), | |
| 5937 index.reg(), | |
| 5938 scratch1.reg(), | |
| 5939 scratch2.reg(), | |
| 5940 result.reg()); | |
| 5941 deferred->fast_case_generator()->GenerateFast(masm_); | |
| 5942 deferred->BindExit(); | |
| 5943 frame_->Push(&result); | |
| 5944 } | |
| 5945 | |
| 5946 | |
| 5947 void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) { | |
| 5948 ASSERT(args->length() == 1); | |
| 5949 Load(args->at(0)); | |
| 5950 Result value = frame_->Pop(); | |
| 5951 value.ToRegister(); | |
| 5952 ASSERT(value.is_valid()); | |
| 5953 Condition is_smi = masm_->CheckSmi(value.reg()); | |
| 5954 destination()->false_target()->Branch(is_smi); | |
| 5955 // It is a heap object - get map. | |
| 5956 // Check if the object is a JS array or not. | |
| 5957 __ CmpObjectType(value.reg(), JS_ARRAY_TYPE, kScratchRegister); | |
| 5958 value.Unuse(); | |
| 5959 destination()->Split(equal); | |
| 5960 } | |
| 5961 | |
| 5962 | |
| 5963 void CodeGenerator::GenerateIsRegExp(ZoneList<Expression*>* args) { | |
| 5964 ASSERT(args->length() == 1); | |
| 5965 Load(args->at(0)); | |
| 5966 Result value = frame_->Pop(); | |
| 5967 value.ToRegister(); | |
| 5968 ASSERT(value.is_valid()); | |
| 5969 Condition is_smi = masm_->CheckSmi(value.reg()); | |
| 5970 destination()->false_target()->Branch(is_smi); | |
| 5971 // It is a heap object - get map. | |
| 5972 // Check if the object is a regexp. | |
| 5973 __ CmpObjectType(value.reg(), JS_REGEXP_TYPE, kScratchRegister); | |
| 5974 value.Unuse(); | |
| 5975 destination()->Split(equal); | |
| 5976 } | |
| 5977 | |
| 5978 | |
| 5979 void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) { | |
| 5980 // This generates a fast version of: | |
| 5981 // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp') | |
| 5982 ASSERT(args->length() == 1); | |
| 5983 Load(args->at(0)); | |
| 5984 Result obj = frame_->Pop(); | |
| 5985 obj.ToRegister(); | |
| 5986 Condition is_smi = masm_->CheckSmi(obj.reg()); | |
| 5987 destination()->false_target()->Branch(is_smi); | |
| 5988 | |
| 5989 __ Move(kScratchRegister, FACTORY->null_value()); | |
| 5990 __ cmpq(obj.reg(), kScratchRegister); | |
| 5991 destination()->true_target()->Branch(equal); | |
| 5992 | |
| 5993 __ movq(kScratchRegister, FieldOperand(obj.reg(), HeapObject::kMapOffset)); | |
| 5994 // Undetectable objects behave like undefined when tested with typeof. | |
| 5995 __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset), | |
| 5996 Immediate(1 << Map::kIsUndetectable)); | |
| 5997 destination()->false_target()->Branch(not_zero); | |
| 5998 __ movzxbq(kScratchRegister, | |
| 5999 FieldOperand(kScratchRegister, Map::kInstanceTypeOffset)); | |
| 6000 __ cmpq(kScratchRegister, Immediate(FIRST_JS_OBJECT_TYPE)); | |
| 6001 destination()->false_target()->Branch(below); | |
| 6002 __ cmpq(kScratchRegister, Immediate(LAST_JS_OBJECT_TYPE)); | |
| 6003 obj.Unuse(); | |
| 6004 destination()->Split(below_equal); | |
| 6005 } | |
| 6006 | |
| 6007 | |
| 6008 void CodeGenerator::GenerateIsSpecObject(ZoneList<Expression*>* args) { | |
| 6009 // This generates a fast version of: | |
| 6010 // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp' || | |
| 6011 // typeof(arg) == function). | |
| 6012 // It includes undetectable objects (as opposed to IsObject). | |
| 6013 ASSERT(args->length() == 1); | |
| 6014 Load(args->at(0)); | |
| 6015 Result value = frame_->Pop(); | |
| 6016 value.ToRegister(); | |
| 6017 ASSERT(value.is_valid()); | |
| 6018 Condition is_smi = masm_->CheckSmi(value.reg()); | |
| 6019 destination()->false_target()->Branch(is_smi); | |
| 6020 // Check that this is an object. | |
| 6021 __ CmpObjectType(value.reg(), FIRST_JS_OBJECT_TYPE, kScratchRegister); | |
| 6022 value.Unuse(); | |
| 6023 destination()->Split(above_equal); | |
| 6024 } | |
| 6025 | |
| 6026 | |
| 6027 // Deferred code to check whether the String JavaScript object is safe for using | |
| 6028 // default value of. This code is called after the bit caching this information | |
| 6029 // in the map has been checked with the map for the object in the map_result_ | |
| 6030 // register. On return the register map_result_ contains 1 for true and 0 for | |
| 6031 // false. | |
| 6032 class DeferredIsStringWrapperSafeForDefaultValueOf : public DeferredCode { | |
| 6033 public: | |
| 6034 DeferredIsStringWrapperSafeForDefaultValueOf(Register object, | |
| 6035 Register map_result, | |
| 6036 Register scratch1, | |
| 6037 Register scratch2) | |
| 6038 : object_(object), | |
| 6039 map_result_(map_result), | |
| 6040 scratch1_(scratch1), | |
| 6041 scratch2_(scratch2) { } | |
| 6042 | |
| 6043 virtual void Generate() { | |
| 6044 Label false_result; | |
| 6045 | |
| 6046 // Check that map is loaded as expected. | |
| 6047 if (FLAG_debug_code) { | |
| 6048 __ cmpq(map_result_, FieldOperand(object_, HeapObject::kMapOffset)); | |
| 6049 __ Assert(equal, "Map not in expected register"); | |
| 6050 } | |
| 6051 | |
| 6052 // Check for fast case object. Generate false result for slow case object. | |
| 6053 __ movq(scratch1_, FieldOperand(object_, JSObject::kPropertiesOffset)); | |
| 6054 __ movq(scratch1_, FieldOperand(scratch1_, HeapObject::kMapOffset)); | |
| 6055 __ CompareRoot(scratch1_, Heap::kHashTableMapRootIndex); | |
| 6056 __ j(equal, &false_result); | |
| 6057 | |
| 6058 // Look for valueOf symbol in the descriptor array, and indicate false if | |
| 6059 // found. The type is not checked, so if it is a transition it is a false | |
| 6060 // negative. | |
| 6061 __ movq(map_result_, | |
| 6062 FieldOperand(map_result_, Map::kInstanceDescriptorsOffset)); | |
| 6063 __ movq(scratch1_, FieldOperand(map_result_, FixedArray::kLengthOffset)); | |
| 6064 // map_result_: descriptor array | |
| 6065 // scratch1_: length of descriptor array | |
| 6066 // Calculate the end of the descriptor array. | |
| 6067 SmiIndex index = masm_->SmiToIndex(scratch2_, scratch1_, kPointerSizeLog2); | |
| 6068 __ lea(scratch1_, | |
| 6069 Operand( | |
| 6070 map_result_, index.reg, index.scale, FixedArray::kHeaderSize)); | |
| 6071 // Calculate location of the first key name. | |
| 6072 __ addq(map_result_, | |
| 6073 Immediate(FixedArray::kHeaderSize + | |
| 6074 DescriptorArray::kFirstIndex * kPointerSize)); | |
| 6075 // Loop through all the keys in the descriptor array. If one of these is the | |
| 6076 // symbol valueOf the result is false. | |
| 6077 Label entry, loop; | |
| 6078 __ jmp(&entry); | |
| 6079 __ bind(&loop); | |
| 6080 __ movq(scratch2_, FieldOperand(map_result_, 0)); | |
| 6081 __ Cmp(scratch2_, FACTORY->value_of_symbol()); | |
| 6082 __ j(equal, &false_result); | |
| 6083 __ addq(map_result_, Immediate(kPointerSize)); | |
| 6084 __ bind(&entry); | |
| 6085 __ cmpq(map_result_, scratch1_); | |
| 6086 __ j(not_equal, &loop); | |
| 6087 | |
| 6088 // Reload map as register map_result_ was used as temporary above. | |
| 6089 __ movq(map_result_, FieldOperand(object_, HeapObject::kMapOffset)); | |
| 6090 | |
| 6091 // If a valueOf property is not found on the object check that it's | |
| 6092 // prototype is the un-modified String prototype. If not result is false. | |
| 6093 __ movq(scratch1_, FieldOperand(map_result_, Map::kPrototypeOffset)); | |
| 6094 __ testq(scratch1_, Immediate(kSmiTagMask)); | |
| 6095 __ j(zero, &false_result); | |
| 6096 __ movq(scratch1_, FieldOperand(scratch1_, HeapObject::kMapOffset)); | |
| 6097 __ movq(scratch2_, | |
| 6098 Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX))); | |
| 6099 __ movq(scratch2_, | |
| 6100 FieldOperand(scratch2_, GlobalObject::kGlobalContextOffset)); | |
| 6101 __ cmpq(scratch1_, | |
| 6102 ContextOperand( | |
| 6103 scratch2_, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX)); | |
| 6104 __ j(not_equal, &false_result); | |
| 6105 // Set the bit in the map to indicate that it has been checked safe for | |
| 6106 // default valueOf and set true result. | |
| 6107 __ or_(FieldOperand(map_result_, Map::kBitField2Offset), | |
| 6108 Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf)); | |
| 6109 __ Set(map_result_, 1); | |
| 6110 __ jmp(exit_label()); | |
| 6111 __ bind(&false_result); | |
| 6112 // Set false result. | |
| 6113 __ Set(map_result_, 0); | |
| 6114 } | |
| 6115 | |
| 6116 private: | |
| 6117 Register object_; | |
| 6118 Register map_result_; | |
| 6119 Register scratch1_; | |
| 6120 Register scratch2_; | |
| 6121 }; | |
| 6122 | |
| 6123 | |
| 6124 void CodeGenerator::GenerateIsStringWrapperSafeForDefaultValueOf( | |
| 6125 ZoneList<Expression*>* args) { | |
| 6126 ASSERT(args->length() == 1); | |
| 6127 Load(args->at(0)); | |
| 6128 Result obj = frame_->Pop(); // Pop the string wrapper. | |
| 6129 obj.ToRegister(); | |
| 6130 ASSERT(obj.is_valid()); | |
| 6131 if (FLAG_debug_code) { | |
| 6132 __ AbortIfSmi(obj.reg()); | |
| 6133 } | |
| 6134 | |
| 6135 // Check whether this map has already been checked to be safe for default | |
| 6136 // valueOf. | |
| 6137 Result map_result = allocator()->Allocate(); | |
| 6138 ASSERT(map_result.is_valid()); | |
| 6139 __ movq(map_result.reg(), FieldOperand(obj.reg(), HeapObject::kMapOffset)); | |
| 6140 __ testb(FieldOperand(map_result.reg(), Map::kBitField2Offset), | |
| 6141 Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf)); | |
| 6142 destination()->true_target()->Branch(not_zero); | |
| 6143 | |
| 6144 // We need an additional two scratch registers for the deferred code. | |
| 6145 Result temp1 = allocator()->Allocate(); | |
| 6146 ASSERT(temp1.is_valid()); | |
| 6147 Result temp2 = allocator()->Allocate(); | |
| 6148 ASSERT(temp2.is_valid()); | |
| 6149 | |
| 6150 DeferredIsStringWrapperSafeForDefaultValueOf* deferred = | |
| 6151 new DeferredIsStringWrapperSafeForDefaultValueOf( | |
| 6152 obj.reg(), map_result.reg(), temp1.reg(), temp2.reg()); | |
| 6153 deferred->Branch(zero); | |
| 6154 deferred->BindExit(); | |
| 6155 __ testq(map_result.reg(), map_result.reg()); | |
| 6156 obj.Unuse(); | |
| 6157 map_result.Unuse(); | |
| 6158 temp1.Unuse(); | |
| 6159 temp2.Unuse(); | |
| 6160 destination()->Split(not_equal); | |
| 6161 } | |
| 6162 | |
| 6163 | |
| 6164 void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) { | |
| 6165 // This generates a fast version of: | |
| 6166 // (%_ClassOf(arg) === 'Function') | |
| 6167 ASSERT(args->length() == 1); | |
| 6168 Load(args->at(0)); | |
| 6169 Result obj = frame_->Pop(); | |
| 6170 obj.ToRegister(); | |
| 6171 Condition is_smi = masm_->CheckSmi(obj.reg()); | |
| 6172 destination()->false_target()->Branch(is_smi); | |
| 6173 __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, kScratchRegister); | |
| 6174 obj.Unuse(); | |
| 6175 destination()->Split(equal); | |
| 6176 } | |
| 6177 | |
| 6178 | |
| 6179 void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) { | |
| 6180 ASSERT(args->length() == 1); | |
| 6181 Load(args->at(0)); | |
| 6182 Result obj = frame_->Pop(); | |
| 6183 obj.ToRegister(); | |
| 6184 Condition is_smi = masm_->CheckSmi(obj.reg()); | |
| 6185 destination()->false_target()->Branch(is_smi); | |
| 6186 __ movq(kScratchRegister, FieldOperand(obj.reg(), HeapObject::kMapOffset)); | |
| 6187 __ movzxbl(kScratchRegister, | |
| 6188 FieldOperand(kScratchRegister, Map::kBitFieldOffset)); | |
| 6189 __ testl(kScratchRegister, Immediate(1 << Map::kIsUndetectable)); | |
| 6190 obj.Unuse(); | |
| 6191 destination()->Split(not_zero); | |
| 6192 } | |
| 6193 | |
| 6194 | |
| 6195 void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) { | |
| 6196 ASSERT(args->length() == 0); | |
| 6197 | |
| 6198 // Get the frame pointer for the calling frame. | |
| 6199 Result fp = allocator()->Allocate(); | |
| 6200 __ movq(fp.reg(), Operand(rbp, StandardFrameConstants::kCallerFPOffset)); | |
| 6201 | |
| 6202 // Skip the arguments adaptor frame if it exists. | |
| 6203 Label check_frame_marker; | |
| 6204 __ Cmp(Operand(fp.reg(), StandardFrameConstants::kContextOffset), | |
| 6205 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); | |
| 6206 __ j(not_equal, &check_frame_marker); | |
| 6207 __ movq(fp.reg(), Operand(fp.reg(), StandardFrameConstants::kCallerFPOffset)); | |
| 6208 | |
| 6209 // Check the marker in the calling frame. | |
| 6210 __ bind(&check_frame_marker); | |
| 6211 __ Cmp(Operand(fp.reg(), StandardFrameConstants::kMarkerOffset), | |
| 6212 Smi::FromInt(StackFrame::CONSTRUCT)); | |
| 6213 fp.Unuse(); | |
| 6214 destination()->Split(equal); | |
| 6215 } | |
| 6216 | |
| 6217 | |
| 6218 void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) { | |
| 6219 ASSERT(args->length() == 0); | |
| 6220 | |
| 6221 Result fp = allocator_->Allocate(); | |
| 6222 Result result = allocator_->Allocate(); | |
| 6223 ASSERT(fp.is_valid() && result.is_valid()); | |
| 6224 | |
| 6225 Label exit; | |
| 6226 | |
| 6227 // Get the number of formal parameters. | |
| 6228 __ Move(result.reg(), Smi::FromInt(scope()->num_parameters())); | |
| 6229 | |
| 6230 // Check if the calling frame is an arguments adaptor frame. | |
| 6231 __ movq(fp.reg(), Operand(rbp, StandardFrameConstants::kCallerFPOffset)); | |
| 6232 __ Cmp(Operand(fp.reg(), StandardFrameConstants::kContextOffset), | |
| 6233 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); | |
| 6234 __ j(not_equal, &exit); | |
| 6235 | |
| 6236 // Arguments adaptor case: Read the arguments length from the | |
| 6237 // adaptor frame. | |
| 6238 __ movq(result.reg(), | |
| 6239 Operand(fp.reg(), ArgumentsAdaptorFrameConstants::kLengthOffset)); | |
| 6240 | |
| 6241 __ bind(&exit); | |
| 6242 result.set_type_info(TypeInfo::Smi()); | |
| 6243 if (FLAG_debug_code) { | |
| 6244 __ AbortIfNotSmi(result.reg()); | |
| 6245 } | |
| 6246 frame_->Push(&result); | |
| 6247 } | |
| 6248 | |
| 6249 | |
| 6250 void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) { | |
| 6251 ASSERT(args->length() == 1); | |
| 6252 JumpTarget leave, null, function, non_function_constructor; | |
| 6253 Load(args->at(0)); // Load the object. | |
| 6254 Result obj = frame_->Pop(); | |
| 6255 obj.ToRegister(); | |
| 6256 frame_->Spill(obj.reg()); | |
| 6257 | |
| 6258 // If the object is a smi, we return null. | |
| 6259 Condition is_smi = masm_->CheckSmi(obj.reg()); | |
| 6260 null.Branch(is_smi); | |
| 6261 | |
| 6262 // Check that the object is a JS object but take special care of JS | |
| 6263 // functions to make sure they have 'Function' as their class. | |
| 6264 | |
| 6265 __ CmpObjectType(obj.reg(), FIRST_JS_OBJECT_TYPE, obj.reg()); | |
| 6266 null.Branch(below); | |
| 6267 | |
| 6268 // As long as JS_FUNCTION_TYPE is the last instance type and it is | |
| 6269 // right after LAST_JS_OBJECT_TYPE, we can avoid checking for | |
| 6270 // LAST_JS_OBJECT_TYPE. | |
| 6271 ASSERT(LAST_TYPE == JS_FUNCTION_TYPE); | |
| 6272 ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1); | |
| 6273 __ CmpInstanceType(obj.reg(), JS_FUNCTION_TYPE); | |
| 6274 function.Branch(equal); | |
| 6275 | |
| 6276 // Check if the constructor in the map is a function. | |
| 6277 __ movq(obj.reg(), FieldOperand(obj.reg(), Map::kConstructorOffset)); | |
| 6278 __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, kScratchRegister); | |
| 6279 non_function_constructor.Branch(not_equal); | |
| 6280 | |
| 6281 // The obj register now contains the constructor function. Grab the | |
| 6282 // instance class name from there. | |
| 6283 __ movq(obj.reg(), | |
| 6284 FieldOperand(obj.reg(), JSFunction::kSharedFunctionInfoOffset)); | |
| 6285 __ movq(obj.reg(), | |
| 6286 FieldOperand(obj.reg(), | |
| 6287 SharedFunctionInfo::kInstanceClassNameOffset)); | |
| 6288 frame_->Push(&obj); | |
| 6289 leave.Jump(); | |
| 6290 | |
| 6291 // Functions have class 'Function'. | |
| 6292 function.Bind(); | |
| 6293 frame_->Push(FACTORY->function_class_symbol()); | |
| 6294 leave.Jump(); | |
| 6295 | |
| 6296 // Objects with a non-function constructor have class 'Object'. | |
| 6297 non_function_constructor.Bind(); | |
| 6298 frame_->Push(FACTORY->Object_symbol()); | |
| 6299 leave.Jump(); | |
| 6300 | |
| 6301 // Non-JS objects have class null. | |
| 6302 null.Bind(); | |
| 6303 frame_->Push(FACTORY->null_value()); | |
| 6304 | |
| 6305 // All done. | |
| 6306 leave.Bind(); | |
| 6307 } | |
| 6308 | |
| 6309 | |
| 6310 void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) { | |
| 6311 ASSERT(args->length() == 1); | |
| 6312 JumpTarget leave; | |
| 6313 Load(args->at(0)); // Load the object. | |
| 6314 frame_->Dup(); | |
| 6315 Result object = frame_->Pop(); | |
| 6316 object.ToRegister(); | |
| 6317 ASSERT(object.is_valid()); | |
| 6318 // if (object->IsSmi()) return object. | |
| 6319 Condition is_smi = masm_->CheckSmi(object.reg()); | |
| 6320 leave.Branch(is_smi); | |
| 6321 // It is a heap object - get map. | |
| 6322 Result temp = allocator()->Allocate(); | |
| 6323 ASSERT(temp.is_valid()); | |
| 6324 // if (!object->IsJSValue()) return object. | |
| 6325 __ CmpObjectType(object.reg(), JS_VALUE_TYPE, temp.reg()); | |
| 6326 leave.Branch(not_equal); | |
| 6327 __ movq(temp.reg(), FieldOperand(object.reg(), JSValue::kValueOffset)); | |
| 6328 object.Unuse(); | |
| 6329 frame_->SetElementAt(0, &temp); | |
| 6330 leave.Bind(); | |
| 6331 } | |
| 6332 | |
| 6333 | |
| 6334 void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) { | |
| 6335 ASSERT(args->length() == 2); | |
| 6336 JumpTarget leave; | |
| 6337 Load(args->at(0)); // Load the object. | |
| 6338 Load(args->at(1)); // Load the value. | |
| 6339 Result value = frame_->Pop(); | |
| 6340 Result object = frame_->Pop(); | |
| 6341 value.ToRegister(); | |
| 6342 object.ToRegister(); | |
| 6343 | |
| 6344 // if (object->IsSmi()) return value. | |
| 6345 Condition is_smi = masm_->CheckSmi(object.reg()); | |
| 6346 leave.Branch(is_smi, &value); | |
| 6347 | |
| 6348 // It is a heap object - get its map. | |
| 6349 Result scratch = allocator_->Allocate(); | |
| 6350 ASSERT(scratch.is_valid()); | |
| 6351 // if (!object->IsJSValue()) return value. | |
| 6352 __ CmpObjectType(object.reg(), JS_VALUE_TYPE, scratch.reg()); | |
| 6353 leave.Branch(not_equal, &value); | |
| 6354 | |
| 6355 // Store the value. | |
| 6356 __ movq(FieldOperand(object.reg(), JSValue::kValueOffset), value.reg()); | |
| 6357 // Update the write barrier. Save the value as it will be | |
| 6358 // overwritten by the write barrier code and is needed afterward. | |
| 6359 Result duplicate_value = allocator_->Allocate(); | |
| 6360 ASSERT(duplicate_value.is_valid()); | |
| 6361 __ movq(duplicate_value.reg(), value.reg()); | |
| 6362 // The object register is also overwritten by the write barrier and | |
| 6363 // possibly aliased in the frame. | |
| 6364 frame_->Spill(object.reg()); | |
| 6365 __ RecordWrite(object.reg(), JSValue::kValueOffset, duplicate_value.reg(), | |
| 6366 scratch.reg()); | |
| 6367 object.Unuse(); | |
| 6368 scratch.Unuse(); | |
| 6369 duplicate_value.Unuse(); | |
| 6370 | |
| 6371 // Leave. | |
| 6372 leave.Bind(&value); | |
| 6373 frame_->Push(&value); | |
| 6374 } | |
| 6375 | |
| 6376 | |
| 6377 void CodeGenerator::GenerateArguments(ZoneList<Expression*>* args) { | |
| 6378 ASSERT(args->length() == 1); | |
| 6379 | |
| 6380 // ArgumentsAccessStub expects the key in rdx and the formal | |
| 6381 // parameter count in rax. | |
| 6382 Load(args->at(0)); | |
| 6383 Result key = frame_->Pop(); | |
| 6384 // Explicitly create a constant result. | |
| 6385 Result count(Handle<Smi>(Smi::FromInt(scope()->num_parameters()))); | |
| 6386 // Call the shared stub to get to arguments[key]. | |
| 6387 ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT); | |
| 6388 Result result = frame_->CallStub(&stub, &key, &count); | |
| 6389 frame_->Push(&result); | |
| 6390 } | |
| 6391 | |
| 6392 | |
| 6393 void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) { | |
| 6394 ASSERT(args->length() == 2); | |
| 6395 | |
| 6396 // Load the two objects into registers and perform the comparison. | |
| 6397 Load(args->at(0)); | |
| 6398 Load(args->at(1)); | |
| 6399 Result right = frame_->Pop(); | |
| 6400 Result left = frame_->Pop(); | |
| 6401 right.ToRegister(); | |
| 6402 left.ToRegister(); | |
| 6403 __ cmpq(right.reg(), left.reg()); | |
| 6404 right.Unuse(); | |
| 6405 left.Unuse(); | |
| 6406 destination()->Split(equal); | |
| 6407 } | |
| 6408 | |
| 6409 | |
| 6410 void CodeGenerator::GenerateGetFramePointer(ZoneList<Expression*>* args) { | |
| 6411 ASSERT(args->length() == 0); | |
| 6412 // RBP value is aligned, so it should be tagged as a smi (without necesarily | |
| 6413 // being padded as a smi, so it should not be treated as a smi.). | |
| 6414 STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1); | |
| 6415 Result rbp_as_smi = allocator_->Allocate(); | |
| 6416 ASSERT(rbp_as_smi.is_valid()); | |
| 6417 __ movq(rbp_as_smi.reg(), rbp); | |
| 6418 frame_->Push(&rbp_as_smi); | |
| 6419 } | |
| 6420 | |
| 6421 | |
| 6422 void CodeGenerator::GenerateRandomHeapNumber( | |
| 6423 ZoneList<Expression*>* args) { | |
| 6424 ASSERT(args->length() == 0); | |
| 6425 frame_->SpillAll(); | |
| 6426 | |
| 6427 Label slow_allocate_heapnumber; | |
| 6428 Label heapnumber_allocated; | |
| 6429 __ AllocateHeapNumber(rbx, rcx, &slow_allocate_heapnumber); | |
| 6430 __ jmp(&heapnumber_allocated); | |
| 6431 | |
| 6432 __ bind(&slow_allocate_heapnumber); | |
| 6433 // Allocate a heap number. | |
| 6434 __ CallRuntime(Runtime::kNumberAlloc, 0); | |
| 6435 __ movq(rbx, rax); | |
| 6436 | |
| 6437 __ bind(&heapnumber_allocated); | |
| 6438 | |
| 6439 // Return a random uint32 number in rax. | |
| 6440 // The fresh HeapNumber is in rbx, which is callee-save on both x64 ABIs. | |
| 6441 __ PrepareCallCFunction(1); | |
| 6442 #ifdef _WIN64 | |
| 6443 __ LoadAddress(rcx, ExternalReference::isolate_address()); | |
| 6444 #else | |
| 6445 __ LoadAddress(rdi, ExternalReference::isolate_address()); | |
| 6446 #endif | |
| 6447 __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1); | |
| 6448 | |
| 6449 // Convert 32 random bits in rax to 0.(32 random bits) in a double | |
| 6450 // by computing: | |
| 6451 // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)). | |
| 6452 __ movl(rcx, Immediate(0x49800000)); // 1.0 x 2^20 as single. | |
| 6453 __ movd(xmm1, rcx); | |
| 6454 __ movd(xmm0, rax); | |
| 6455 __ cvtss2sd(xmm1, xmm1); | |
| 6456 __ xorpd(xmm0, xmm1); | |
| 6457 __ subsd(xmm0, xmm1); | |
| 6458 __ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm0); | |
| 6459 | |
| 6460 __ movq(rax, rbx); | |
| 6461 Result result = allocator_->Allocate(rax); | |
| 6462 frame_->Push(&result); | |
| 6463 } | |
| 6464 | |
| 6465 | |
| 6466 void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) { | |
| 6467 ASSERT_EQ(2, args->length()); | |
| 6468 | |
| 6469 Load(args->at(0)); | |
| 6470 Load(args->at(1)); | |
| 6471 | |
| 6472 StringAddStub stub(NO_STRING_ADD_FLAGS); | |
| 6473 Result answer = frame_->CallStub(&stub, 2); | |
| 6474 frame_->Push(&answer); | |
| 6475 } | |
| 6476 | |
| 6477 | |
| 6478 void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) { | |
| 6479 ASSERT_EQ(3, args->length()); | |
| 6480 | |
| 6481 Load(args->at(0)); | |
| 6482 Load(args->at(1)); | |
| 6483 Load(args->at(2)); | |
| 6484 | |
| 6485 SubStringStub stub; | |
| 6486 Result answer = frame_->CallStub(&stub, 3); | |
| 6487 frame_->Push(&answer); | |
| 6488 } | |
| 6489 | |
| 6490 | |
| 6491 void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) { | |
| 6492 ASSERT_EQ(2, args->length()); | |
| 6493 | |
| 6494 Load(args->at(0)); | |
| 6495 Load(args->at(1)); | |
| 6496 | |
| 6497 StringCompareStub stub; | |
| 6498 Result answer = frame_->CallStub(&stub, 2); | |
| 6499 frame_->Push(&answer); | |
| 6500 } | |
| 6501 | |
| 6502 | |
| 6503 void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) { | |
| 6504 ASSERT_EQ(args->length(), 4); | |
| 6505 | |
| 6506 // Load the arguments on the stack and call the runtime system. | |
| 6507 Load(args->at(0)); | |
| 6508 Load(args->at(1)); | |
| 6509 Load(args->at(2)); | |
| 6510 Load(args->at(3)); | |
| 6511 RegExpExecStub stub; | |
| 6512 Result result = frame_->CallStub(&stub, 4); | |
| 6513 frame_->Push(&result); | |
| 6514 } | |
| 6515 | |
| 6516 | |
| 6517 void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) { | |
| 6518 ASSERT_EQ(3, args->length()); | |
| 6519 Load(args->at(0)); // Size of array, smi. | |
| 6520 Load(args->at(1)); // "index" property value. | |
| 6521 Load(args->at(2)); // "input" property value. | |
| 6522 RegExpConstructResultStub stub; | |
| 6523 Result result = frame_->CallStub(&stub, 3); | |
| 6524 frame_->Push(&result); | |
| 6525 } | |
| 6526 | |
| 6527 | |
| 6528 class DeferredSearchCache: public DeferredCode { | |
| 6529 public: | |
| 6530 DeferredSearchCache(Register dst, | |
| 6531 Register cache, | |
| 6532 Register key, | |
| 6533 Register scratch) | |
| 6534 : dst_(dst), cache_(cache), key_(key), scratch_(scratch) { | |
| 6535 set_comment("[ DeferredSearchCache"); | |
| 6536 } | |
| 6537 | |
| 6538 virtual void Generate(); | |
| 6539 | |
| 6540 private: | |
| 6541 Register dst_; // on invocation index of finger (as int32), on exit | |
| 6542 // holds value being looked up. | |
| 6543 Register cache_; // instance of JSFunctionResultCache. | |
| 6544 Register key_; // key being looked up. | |
| 6545 Register scratch_; | |
| 6546 }; | |
| 6547 | |
| 6548 | |
| 6549 // Return a position of the element at |index| + |additional_offset| | |
| 6550 // in FixedArray pointer to which is held in |array|. |index| is int32. | |
| 6551 static Operand ArrayElement(Register array, | |
| 6552 Register index, | |
| 6553 int additional_offset = 0) { | |
| 6554 int offset = FixedArray::kHeaderSize + additional_offset * kPointerSize; | |
| 6555 return FieldOperand(array, index, times_pointer_size, offset); | |
| 6556 } | |
| 6557 | |
| 6558 | |
| 6559 void DeferredSearchCache::Generate() { | |
| 6560 Label first_loop, search_further, second_loop, cache_miss; | |
| 6561 | |
| 6562 Immediate kEntriesIndexImm = Immediate(JSFunctionResultCache::kEntriesIndex); | |
| 6563 Immediate kEntrySizeImm = Immediate(JSFunctionResultCache::kEntrySize); | |
| 6564 | |
| 6565 // Check the cache from finger to start of the cache. | |
| 6566 __ bind(&first_loop); | |
| 6567 __ subl(dst_, kEntrySizeImm); | |
| 6568 __ cmpl(dst_, kEntriesIndexImm); | |
| 6569 __ j(less, &search_further); | |
| 6570 | |
| 6571 __ cmpq(ArrayElement(cache_, dst_), key_); | |
| 6572 __ j(not_equal, &first_loop); | |
| 6573 | |
| 6574 __ Integer32ToSmiField( | |
| 6575 FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), dst_); | |
| 6576 __ movq(dst_, ArrayElement(cache_, dst_, 1)); | |
| 6577 __ jmp(exit_label()); | |
| 6578 | |
| 6579 __ bind(&search_further); | |
| 6580 | |
| 6581 // Check the cache from end of cache up to finger. | |
| 6582 __ SmiToInteger32(dst_, | |
| 6583 FieldOperand(cache_, | |
| 6584 JSFunctionResultCache::kCacheSizeOffset)); | |
| 6585 __ SmiToInteger32(scratch_, | |
| 6586 FieldOperand(cache_, JSFunctionResultCache::kFingerOffset)); | |
| 6587 | |
| 6588 __ bind(&second_loop); | |
| 6589 __ subl(dst_, kEntrySizeImm); | |
| 6590 __ cmpl(dst_, scratch_); | |
| 6591 __ j(less_equal, &cache_miss); | |
| 6592 | |
| 6593 __ cmpq(ArrayElement(cache_, dst_), key_); | |
| 6594 __ j(not_equal, &second_loop); | |
| 6595 | |
| 6596 __ Integer32ToSmiField( | |
| 6597 FieldOperand(cache_, JSFunctionResultCache::kFingerOffset), dst_); | |
| 6598 __ movq(dst_, ArrayElement(cache_, dst_, 1)); | |
| 6599 __ jmp(exit_label()); | |
| 6600 | |
| 6601 __ bind(&cache_miss); | |
| 6602 __ push(cache_); // store a reference to cache | |
| 6603 __ push(key_); // store a key | |
| 6604 __ push(Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX))); | |
| 6605 __ push(key_); | |
| 6606 // On x64 function must be in rdi. | |
| 6607 __ movq(rdi, FieldOperand(cache_, JSFunctionResultCache::kFactoryOffset)); | |
| 6608 ParameterCount expected(1); | |
| 6609 __ InvokeFunction(rdi, expected, CALL_FUNCTION); | |
| 6610 | |
| 6611 // Find a place to put new cached value into. | |
| 6612 Label add_new_entry, update_cache; | |
| 6613 __ movq(rcx, Operand(rsp, kPointerSize)); // restore the cache | |
| 6614 // Possible optimization: cache size is constant for the given cache | |
| 6615 // so technically we could use a constant here. However, if we have | |
| 6616 // cache miss this optimization would hardly matter much. | |
| 6617 | |
| 6618 // Check if we could add new entry to cache. | |
| 6619 __ SmiToInteger32(rbx, FieldOperand(rcx, FixedArray::kLengthOffset)); | |
| 6620 __ SmiToInteger32(r9, | |
| 6621 FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset)); | |
| 6622 __ cmpl(rbx, r9); | |
| 6623 __ j(greater, &add_new_entry); | |
| 6624 | |
| 6625 // Check if we could evict entry after finger. | |
| 6626 __ SmiToInteger32(rdx, | |
| 6627 FieldOperand(rcx, JSFunctionResultCache::kFingerOffset)); | |
| 6628 __ addl(rdx, kEntrySizeImm); | |
| 6629 Label forward; | |
| 6630 __ cmpl(rbx, rdx); | |
| 6631 __ j(greater, &forward); | |
| 6632 // Need to wrap over the cache. | |
| 6633 __ movl(rdx, kEntriesIndexImm); | |
| 6634 __ bind(&forward); | |
| 6635 __ movl(r9, rdx); | |
| 6636 __ jmp(&update_cache); | |
| 6637 | |
| 6638 __ bind(&add_new_entry); | |
| 6639 // r9 holds cache size as int32. | |
| 6640 __ leal(rbx, Operand(r9, JSFunctionResultCache::kEntrySize)); | |
| 6641 __ Integer32ToSmiField( | |
| 6642 FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset), rbx); | |
| 6643 | |
| 6644 // Update the cache itself. | |
| 6645 // r9 holds the index as int32. | |
| 6646 __ bind(&update_cache); | |
| 6647 __ pop(rbx); // restore the key | |
| 6648 __ Integer32ToSmiField( | |
| 6649 FieldOperand(rcx, JSFunctionResultCache::kFingerOffset), r9); | |
| 6650 // Store key. | |
| 6651 __ movq(ArrayElement(rcx, r9), rbx); | |
| 6652 __ RecordWrite(rcx, 0, rbx, r9); | |
| 6653 | |
| 6654 // Store value. | |
| 6655 __ pop(rcx); // restore the cache. | |
| 6656 __ SmiToInteger32(rdx, | |
| 6657 FieldOperand(rcx, JSFunctionResultCache::kFingerOffset)); | |
| 6658 __ incl(rdx); | |
| 6659 // Backup rax, because the RecordWrite macro clobbers its arguments. | |
| 6660 __ movq(rbx, rax); | |
| 6661 __ movq(ArrayElement(rcx, rdx), rax); | |
| 6662 __ RecordWrite(rcx, 0, rbx, rdx); | |
| 6663 | |
| 6664 if (!dst_.is(rax)) { | |
| 6665 __ movq(dst_, rax); | |
| 6666 } | |
| 6667 } | |
| 6668 | |
| 6669 | |
| 6670 void CodeGenerator::GenerateGetFromCache(ZoneList<Expression*>* args) { | |
| 6671 ASSERT_EQ(2, args->length()); | |
| 6672 | |
| 6673 ASSERT_NE(NULL, args->at(0)->AsLiteral()); | |
| 6674 int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->handle()))->value(); | |
| 6675 | |
| 6676 Handle<FixedArray> jsfunction_result_caches( | |
| 6677 Isolate::Current()->global_context()->jsfunction_result_caches()); | |
| 6678 if (jsfunction_result_caches->length() <= cache_id) { | |
| 6679 __ Abort("Attempt to use undefined cache."); | |
| 6680 frame_->Push(FACTORY->undefined_value()); | |
| 6681 return; | |
| 6682 } | |
| 6683 | |
| 6684 Load(args->at(1)); | |
| 6685 Result key = frame_->Pop(); | |
| 6686 key.ToRegister(); | |
| 6687 | |
| 6688 Result cache = allocator()->Allocate(); | |
| 6689 ASSERT(cache.is_valid()); | |
| 6690 __ movq(cache.reg(), ContextOperand(rsi, Context::GLOBAL_INDEX)); | |
| 6691 __ movq(cache.reg(), | |
| 6692 FieldOperand(cache.reg(), GlobalObject::kGlobalContextOffset)); | |
| 6693 __ movq(cache.reg(), | |
| 6694 ContextOperand(cache.reg(), Context::JSFUNCTION_RESULT_CACHES_INDEX)); | |
| 6695 __ movq(cache.reg(), | |
| 6696 FieldOperand(cache.reg(), FixedArray::OffsetOfElementAt(cache_id))); | |
| 6697 | |
| 6698 Result tmp = allocator()->Allocate(); | |
| 6699 ASSERT(tmp.is_valid()); | |
| 6700 | |
| 6701 Result scratch = allocator()->Allocate(); | |
| 6702 ASSERT(scratch.is_valid()); | |
| 6703 | |
| 6704 DeferredSearchCache* deferred = new DeferredSearchCache(tmp.reg(), | |
| 6705 cache.reg(), | |
| 6706 key.reg(), | |
| 6707 scratch.reg()); | |
| 6708 | |
| 6709 const int kFingerOffset = | |
| 6710 FixedArray::OffsetOfElementAt(JSFunctionResultCache::kFingerIndex); | |
| 6711 // tmp.reg() now holds finger offset as a smi. | |
| 6712 __ SmiToInteger32(tmp.reg(), FieldOperand(cache.reg(), kFingerOffset)); | |
| 6713 __ cmpq(key.reg(), FieldOperand(cache.reg(), | |
| 6714 tmp.reg(), times_pointer_size, | |
| 6715 FixedArray::kHeaderSize)); | |
| 6716 deferred->Branch(not_equal); | |
| 6717 __ movq(tmp.reg(), FieldOperand(cache.reg(), | |
| 6718 tmp.reg(), times_pointer_size, | |
| 6719 FixedArray::kHeaderSize + kPointerSize)); | |
| 6720 | |
| 6721 deferred->BindExit(); | |
| 6722 frame_->Push(&tmp); | |
| 6723 } | |
| 6724 | |
| 6725 | |
| 6726 void CodeGenerator::GenerateNumberToString(ZoneList<Expression*>* args) { | |
| 6727 ASSERT_EQ(args->length(), 1); | |
| 6728 | |
| 6729 // Load the argument on the stack and jump to the runtime. | |
| 6730 Load(args->at(0)); | |
| 6731 | |
| 6732 NumberToStringStub stub; | |
| 6733 Result result = frame_->CallStub(&stub, 1); | |
| 6734 frame_->Push(&result); | |
| 6735 } | |
| 6736 | |
| 6737 | |
| 6738 class DeferredSwapElements: public DeferredCode { | |
| 6739 public: | |
| 6740 DeferredSwapElements(Register object, Register index1, Register index2) | |
| 6741 : object_(object), index1_(index1), index2_(index2) { | |
| 6742 set_comment("[ DeferredSwapElements"); | |
| 6743 } | |
| 6744 | |
| 6745 virtual void Generate(); | |
| 6746 | |
| 6747 private: | |
| 6748 Register object_, index1_, index2_; | |
| 6749 }; | |
| 6750 | |
| 6751 | |
| 6752 void DeferredSwapElements::Generate() { | |
| 6753 __ push(object_); | |
| 6754 __ push(index1_); | |
| 6755 __ push(index2_); | |
| 6756 __ CallRuntime(Runtime::kSwapElements, 3); | |
| 6757 } | |
| 6758 | |
| 6759 | |
| 6760 void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) { | |
| 6761 Comment cmnt(masm_, "[ GenerateSwapElements"); | |
| 6762 | |
| 6763 ASSERT_EQ(3, args->length()); | |
| 6764 | |
| 6765 Load(args->at(0)); | |
| 6766 Load(args->at(1)); | |
| 6767 Load(args->at(2)); | |
| 6768 | |
| 6769 Result index2 = frame_->Pop(); | |
| 6770 index2.ToRegister(); | |
| 6771 | |
| 6772 Result index1 = frame_->Pop(); | |
| 6773 index1.ToRegister(); | |
| 6774 | |
| 6775 Result object = frame_->Pop(); | |
| 6776 object.ToRegister(); | |
| 6777 | |
| 6778 Result tmp1 = allocator()->Allocate(); | |
| 6779 tmp1.ToRegister(); | |
| 6780 Result tmp2 = allocator()->Allocate(); | |
| 6781 tmp2.ToRegister(); | |
| 6782 | |
| 6783 frame_->Spill(object.reg()); | |
| 6784 frame_->Spill(index1.reg()); | |
| 6785 frame_->Spill(index2.reg()); | |
| 6786 | |
| 6787 DeferredSwapElements* deferred = new DeferredSwapElements(object.reg(), | |
| 6788 index1.reg(), | |
| 6789 index2.reg()); | |
| 6790 | |
| 6791 // Fetch the map and check if array is in fast case. | |
| 6792 // Check that object doesn't require security checks and | |
| 6793 // has no indexed interceptor. | |
| 6794 __ CmpObjectType(object.reg(), JS_ARRAY_TYPE, tmp1.reg()); | |
| 6795 deferred->Branch(not_equal); | |
| 6796 __ testb(FieldOperand(tmp1.reg(), Map::kBitFieldOffset), | |
| 6797 Immediate(KeyedLoadIC::kSlowCaseBitFieldMask)); | |
| 6798 deferred->Branch(not_zero); | |
| 6799 | |
| 6800 // Check the object's elements are in fast case and writable. | |
| 6801 __ movq(tmp1.reg(), FieldOperand(object.reg(), JSObject::kElementsOffset)); | |
| 6802 __ CompareRoot(FieldOperand(tmp1.reg(), HeapObject::kMapOffset), | |
| 6803 Heap::kFixedArrayMapRootIndex); | |
| 6804 deferred->Branch(not_equal); | |
| 6805 | |
| 6806 // Check that both indices are smis. | |
| 6807 Condition both_smi = masm()->CheckBothSmi(index1.reg(), index2.reg()); | |
| 6808 deferred->Branch(NegateCondition(both_smi)); | |
| 6809 | |
| 6810 // Check that both indices are valid. | |
| 6811 __ movq(tmp2.reg(), FieldOperand(object.reg(), JSArray::kLengthOffset)); | |
| 6812 __ SmiCompare(tmp2.reg(), index1.reg()); | |
| 6813 deferred->Branch(below_equal); | |
| 6814 __ SmiCompare(tmp2.reg(), index2.reg()); | |
| 6815 deferred->Branch(below_equal); | |
| 6816 | |
| 6817 // Bring addresses into index1 and index2. | |
| 6818 __ SmiToInteger32(index1.reg(), index1.reg()); | |
| 6819 __ lea(index1.reg(), FieldOperand(tmp1.reg(), | |
| 6820 index1.reg(), | |
| 6821 times_pointer_size, | |
| 6822 FixedArray::kHeaderSize)); | |
| 6823 __ SmiToInteger32(index2.reg(), index2.reg()); | |
| 6824 __ lea(index2.reg(), FieldOperand(tmp1.reg(), | |
| 6825 index2.reg(), | |
| 6826 times_pointer_size, | |
| 6827 FixedArray::kHeaderSize)); | |
| 6828 | |
| 6829 // Swap elements. | |
| 6830 __ movq(object.reg(), Operand(index1.reg(), 0)); | |
| 6831 __ movq(tmp2.reg(), Operand(index2.reg(), 0)); | |
| 6832 __ movq(Operand(index2.reg(), 0), object.reg()); | |
| 6833 __ movq(Operand(index1.reg(), 0), tmp2.reg()); | |
| 6834 | |
| 6835 Label done; | |
| 6836 __ InNewSpace(tmp1.reg(), tmp2.reg(), equal, &done); | |
| 6837 // Possible optimization: do a check that both values are smis | |
| 6838 // (or them and test against Smi mask.) | |
| 6839 | |
| 6840 __ movq(tmp2.reg(), tmp1.reg()); | |
| 6841 __ RecordWriteHelper(tmp1.reg(), index1.reg(), object.reg()); | |
| 6842 __ RecordWriteHelper(tmp2.reg(), index2.reg(), object.reg()); | |
| 6843 __ bind(&done); | |
| 6844 | |
| 6845 deferred->BindExit(); | |
| 6846 frame_->Push(FACTORY->undefined_value()); | |
| 6847 } | |
| 6848 | |
| 6849 | |
| 6850 void CodeGenerator::GenerateCallFunction(ZoneList<Expression*>* args) { | |
| 6851 Comment cmnt(masm_, "[ GenerateCallFunction"); | |
| 6852 | |
| 6853 ASSERT(args->length() >= 2); | |
| 6854 | |
| 6855 int n_args = args->length() - 2; // for receiver and function. | |
| 6856 Load(args->at(0)); // receiver | |
| 6857 for (int i = 0; i < n_args; i++) { | |
| 6858 Load(args->at(i + 1)); | |
| 6859 } | |
| 6860 Load(args->at(n_args + 1)); // function | |
| 6861 Result result = frame_->CallJSFunction(n_args); | |
| 6862 frame_->Push(&result); | |
| 6863 } | |
| 6864 | |
| 6865 | |
| 6866 // Generates the Math.pow method. Only handles special cases and | |
| 6867 // branches to the runtime system for everything else. Please note | |
| 6868 // that this function assumes that the callsite has executed ToNumber | |
| 6869 // on both arguments. | |
| 6870 void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) { | |
| 6871 ASSERT(args->length() == 2); | |
| 6872 Load(args->at(0)); | |
| 6873 Load(args->at(1)); | |
| 6874 | |
| 6875 Label allocate_return; | |
| 6876 // Load the two operands while leaving the values on the frame. | |
| 6877 frame()->Dup(); | |
| 6878 Result exponent = frame()->Pop(); | |
| 6879 exponent.ToRegister(); | |
| 6880 frame()->Spill(exponent.reg()); | |
| 6881 frame()->PushElementAt(1); | |
| 6882 Result base = frame()->Pop(); | |
| 6883 base.ToRegister(); | |
| 6884 frame()->Spill(base.reg()); | |
| 6885 | |
| 6886 Result answer = allocator()->Allocate(); | |
| 6887 ASSERT(answer.is_valid()); | |
| 6888 ASSERT(!exponent.reg().is(base.reg())); | |
| 6889 JumpTarget call_runtime; | |
| 6890 | |
| 6891 // Save 1 in xmm3 - we need this several times later on. | |
| 6892 __ movl(answer.reg(), Immediate(1)); | |
| 6893 __ cvtlsi2sd(xmm3, answer.reg()); | |
| 6894 | |
| 6895 Label exponent_nonsmi; | |
| 6896 Label base_nonsmi; | |
| 6897 // If the exponent is a heap number go to that specific case. | |
| 6898 __ JumpIfNotSmi(exponent.reg(), &exponent_nonsmi); | |
| 6899 __ JumpIfNotSmi(base.reg(), &base_nonsmi); | |
| 6900 | |
| 6901 // Optimized version when y is an integer. | |
| 6902 Label powi; | |
| 6903 __ SmiToInteger32(base.reg(), base.reg()); | |
| 6904 __ cvtlsi2sd(xmm0, base.reg()); | |
| 6905 __ jmp(&powi); | |
| 6906 // exponent is smi and base is a heapnumber. | |
| 6907 __ bind(&base_nonsmi); | |
| 6908 __ CompareRoot(FieldOperand(base.reg(), HeapObject::kMapOffset), | |
| 6909 Heap::kHeapNumberMapRootIndex); | |
| 6910 call_runtime.Branch(not_equal); | |
| 6911 | |
| 6912 __ movsd(xmm0, FieldOperand(base.reg(), HeapNumber::kValueOffset)); | |
| 6913 | |
| 6914 // Optimized version of pow if y is an integer. | |
| 6915 __ bind(&powi); | |
| 6916 __ SmiToInteger32(exponent.reg(), exponent.reg()); | |
| 6917 | |
| 6918 // Save exponent in base as we need to check if exponent is negative later. | |
| 6919 // We know that base and exponent are in different registers. | |
| 6920 __ movl(base.reg(), exponent.reg()); | |
| 6921 | |
| 6922 // Get absolute value of exponent. | |
| 6923 Label no_neg; | |
| 6924 __ cmpl(exponent.reg(), Immediate(0)); | |
| 6925 __ j(greater_equal, &no_neg); | |
| 6926 __ negl(exponent.reg()); | |
| 6927 __ bind(&no_neg); | |
| 6928 | |
| 6929 // Load xmm1 with 1. | |
| 6930 __ movsd(xmm1, xmm3); | |
| 6931 Label while_true; | |
| 6932 Label no_multiply; | |
| 6933 | |
| 6934 __ bind(&while_true); | |
| 6935 __ shrl(exponent.reg(), Immediate(1)); | |
| 6936 __ j(not_carry, &no_multiply); | |
| 6937 __ mulsd(xmm1, xmm0); | |
| 6938 __ bind(&no_multiply); | |
| 6939 __ testl(exponent.reg(), exponent.reg()); | |
| 6940 __ mulsd(xmm0, xmm0); | |
| 6941 __ j(not_zero, &while_true); | |
| 6942 | |
| 6943 // x has the original value of y - if y is negative return 1/result. | |
| 6944 __ testl(base.reg(), base.reg()); | |
| 6945 __ j(positive, &allocate_return); | |
| 6946 // Special case if xmm1 has reached infinity. | |
| 6947 __ movl(answer.reg(), Immediate(0x7FB00000)); | |
| 6948 __ movd(xmm0, answer.reg()); | |
| 6949 __ cvtss2sd(xmm0, xmm0); | |
| 6950 __ ucomisd(xmm0, xmm1); | |
| 6951 call_runtime.Branch(equal); | |
| 6952 __ divsd(xmm3, xmm1); | |
| 6953 __ movsd(xmm1, xmm3); | |
| 6954 __ jmp(&allocate_return); | |
| 6955 | |
| 6956 // exponent (or both) is a heapnumber - no matter what we should now work | |
| 6957 // on doubles. | |
| 6958 __ bind(&exponent_nonsmi); | |
| 6959 __ CompareRoot(FieldOperand(exponent.reg(), HeapObject::kMapOffset), | |
| 6960 Heap::kHeapNumberMapRootIndex); | |
| 6961 call_runtime.Branch(not_equal); | |
| 6962 __ movsd(xmm1, FieldOperand(exponent.reg(), HeapNumber::kValueOffset)); | |
| 6963 // Test if exponent is nan. | |
| 6964 __ ucomisd(xmm1, xmm1); | |
| 6965 call_runtime.Branch(parity_even); | |
| 6966 | |
| 6967 Label base_not_smi; | |
| 6968 Label handle_special_cases; | |
| 6969 __ JumpIfNotSmi(base.reg(), &base_not_smi); | |
| 6970 __ SmiToInteger32(base.reg(), base.reg()); | |
| 6971 __ cvtlsi2sd(xmm0, base.reg()); | |
| 6972 __ jmp(&handle_special_cases); | |
| 6973 __ bind(&base_not_smi); | |
| 6974 __ CompareRoot(FieldOperand(base.reg(), HeapObject::kMapOffset), | |
| 6975 Heap::kHeapNumberMapRootIndex); | |
| 6976 call_runtime.Branch(not_equal); | |
| 6977 __ movl(answer.reg(), FieldOperand(base.reg(), HeapNumber::kExponentOffset)); | |
| 6978 __ andl(answer.reg(), Immediate(HeapNumber::kExponentMask)); | |
| 6979 __ cmpl(answer.reg(), Immediate(HeapNumber::kExponentMask)); | |
| 6980 // base is NaN or +/-Infinity | |
| 6981 call_runtime.Branch(greater_equal); | |
| 6982 __ movsd(xmm0, FieldOperand(base.reg(), HeapNumber::kValueOffset)); | |
| 6983 | |
| 6984 // base is in xmm0 and exponent is in xmm1. | |
| 6985 __ bind(&handle_special_cases); | |
| 6986 Label not_minus_half; | |
| 6987 // Test for -0.5. | |
| 6988 // Load xmm2 with -0.5. | |
| 6989 __ movl(answer.reg(), Immediate(0xBF000000)); | |
| 6990 __ movd(xmm2, answer.reg()); | |
| 6991 __ cvtss2sd(xmm2, xmm2); | |
| 6992 // xmm2 now has -0.5. | |
| 6993 __ ucomisd(xmm2, xmm1); | |
| 6994 __ j(not_equal, ¬_minus_half); | |
| 6995 | |
| 6996 // Calculates reciprocal of square root. | |
| 6997 // sqrtsd returns -0 when input is -0. ECMA spec requires +0. | |
| 6998 __ xorpd(xmm1, xmm1); | |
| 6999 __ addsd(xmm1, xmm0); | |
| 7000 __ sqrtsd(xmm1, xmm1); | |
| 7001 __ divsd(xmm3, xmm1); | |
| 7002 __ movsd(xmm1, xmm3); | |
| 7003 __ jmp(&allocate_return); | |
| 7004 | |
| 7005 // Test for 0.5. | |
| 7006 __ bind(¬_minus_half); | |
| 7007 // Load xmm2 with 0.5. | |
| 7008 // Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3. | |
| 7009 __ addsd(xmm2, xmm3); | |
| 7010 // xmm2 now has 0.5. | |
| 7011 __ ucomisd(xmm2, xmm1); | |
| 7012 call_runtime.Branch(not_equal); | |
| 7013 | |
| 7014 // Calculates square root. | |
| 7015 // sqrtsd returns -0 when input is -0. ECMA spec requires +0. | |
| 7016 __ xorpd(xmm1, xmm1); | |
| 7017 __ addsd(xmm1, xmm0); | |
| 7018 __ sqrtsd(xmm1, xmm1); | |
| 7019 | |
| 7020 JumpTarget done; | |
| 7021 Label failure, success; | |
| 7022 __ bind(&allocate_return); | |
| 7023 // Make a copy of the frame to enable us to handle allocation | |
| 7024 // failure after the JumpTarget jump. | |
| 7025 VirtualFrame* clone = new VirtualFrame(frame()); | |
| 7026 __ AllocateHeapNumber(answer.reg(), exponent.reg(), &failure); | |
| 7027 __ movsd(FieldOperand(answer.reg(), HeapNumber::kValueOffset), xmm1); | |
| 7028 // Remove the two original values from the frame - we only need those | |
| 7029 // in the case where we branch to runtime. | |
| 7030 frame()->Drop(2); | |
| 7031 exponent.Unuse(); | |
| 7032 base.Unuse(); | |
| 7033 done.Jump(&answer); | |
| 7034 // Use the copy of the original frame as our current frame. | |
| 7035 RegisterFile empty_regs; | |
| 7036 SetFrame(clone, &empty_regs); | |
| 7037 // If we experience an allocation failure we branch to runtime. | |
| 7038 __ bind(&failure); | |
| 7039 call_runtime.Bind(); | |
| 7040 answer = frame()->CallRuntime(Runtime::kMath_pow_cfunction, 2); | |
| 7041 | |
| 7042 done.Bind(&answer); | |
| 7043 frame()->Push(&answer); | |
| 7044 } | |
| 7045 | |
| 7046 | |
| 7047 void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) { | |
| 7048 ASSERT_EQ(args->length(), 1); | |
| 7049 Load(args->at(0)); | |
| 7050 TranscendentalCacheStub stub(TranscendentalCache::SIN, | |
| 7051 TranscendentalCacheStub::TAGGED); | |
| 7052 Result result = frame_->CallStub(&stub, 1); | |
| 7053 frame_->Push(&result); | |
| 7054 } | |
| 7055 | |
| 7056 | |
| 7057 void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) { | |
| 7058 ASSERT_EQ(args->length(), 1); | |
| 7059 Load(args->at(0)); | |
| 7060 TranscendentalCacheStub stub(TranscendentalCache::COS, | |
| 7061 TranscendentalCacheStub::TAGGED); | |
| 7062 Result result = frame_->CallStub(&stub, 1); | |
| 7063 frame_->Push(&result); | |
| 7064 } | |
| 7065 | |
| 7066 | |
| 7067 void CodeGenerator::GenerateMathLog(ZoneList<Expression*>* args) { | |
| 7068 ASSERT_EQ(args->length(), 1); | |
| 7069 Load(args->at(0)); | |
| 7070 TranscendentalCacheStub stub(TranscendentalCache::LOG, | |
| 7071 TranscendentalCacheStub::TAGGED); | |
| 7072 Result result = frame_->CallStub(&stub, 1); | |
| 7073 frame_->Push(&result); | |
| 7074 } | |
| 7075 | |
| 7076 | |
| 7077 // Generates the Math.sqrt method. Please note - this function assumes that | |
| 7078 // the callsite has executed ToNumber on the argument. | |
| 7079 void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) { | |
| 7080 ASSERT(args->length() == 1); | |
| 7081 Load(args->at(0)); | |
| 7082 | |
| 7083 // Leave original value on the frame if we need to call runtime. | |
| 7084 frame()->Dup(); | |
| 7085 Result result = frame()->Pop(); | |
| 7086 result.ToRegister(); | |
| 7087 frame()->Spill(result.reg()); | |
| 7088 Label runtime; | |
| 7089 Label non_smi; | |
| 7090 Label load_done; | |
| 7091 JumpTarget end; | |
| 7092 | |
| 7093 __ JumpIfNotSmi(result.reg(), &non_smi); | |
| 7094 __ SmiToInteger32(result.reg(), result.reg()); | |
| 7095 __ cvtlsi2sd(xmm0, result.reg()); | |
| 7096 __ jmp(&load_done); | |
| 7097 __ bind(&non_smi); | |
| 7098 __ CompareRoot(FieldOperand(result.reg(), HeapObject::kMapOffset), | |
| 7099 Heap::kHeapNumberMapRootIndex); | |
| 7100 __ j(not_equal, &runtime); | |
| 7101 __ movsd(xmm0, FieldOperand(result.reg(), HeapNumber::kValueOffset)); | |
| 7102 | |
| 7103 __ bind(&load_done); | |
| 7104 __ sqrtsd(xmm0, xmm0); | |
| 7105 // A copy of the virtual frame to allow us to go to runtime after the | |
| 7106 // JumpTarget jump. | |
| 7107 Result scratch = allocator()->Allocate(); | |
| 7108 VirtualFrame* clone = new VirtualFrame(frame()); | |
| 7109 __ AllocateHeapNumber(result.reg(), scratch.reg(), &runtime); | |
| 7110 | |
| 7111 __ movsd(FieldOperand(result.reg(), HeapNumber::kValueOffset), xmm0); | |
| 7112 frame()->Drop(1); | |
| 7113 scratch.Unuse(); | |
| 7114 end.Jump(&result); | |
| 7115 // We only branch to runtime if we have an allocation error. | |
| 7116 // Use the copy of the original frame as our current frame. | |
| 7117 RegisterFile empty_regs; | |
| 7118 SetFrame(clone, &empty_regs); | |
| 7119 __ bind(&runtime); | |
| 7120 result = frame()->CallRuntime(Runtime::kMath_sqrt, 1); | |
| 7121 | |
| 7122 end.Bind(&result); | |
| 7123 frame()->Push(&result); | |
| 7124 } | |
| 7125 | |
| 7126 | |
| 7127 void CodeGenerator::GenerateIsRegExpEquivalent(ZoneList<Expression*>* args) { | |
| 7128 ASSERT_EQ(2, args->length()); | |
| 7129 Load(args->at(0)); | |
| 7130 Load(args->at(1)); | |
| 7131 Result right_res = frame_->Pop(); | |
| 7132 Result left_res = frame_->Pop(); | |
| 7133 right_res.ToRegister(); | |
| 7134 left_res.ToRegister(); | |
| 7135 Result tmp_res = allocator()->Allocate(); | |
| 7136 ASSERT(tmp_res.is_valid()); | |
| 7137 Register right = right_res.reg(); | |
| 7138 Register left = left_res.reg(); | |
| 7139 Register tmp = tmp_res.reg(); | |
| 7140 right_res.Unuse(); | |
| 7141 left_res.Unuse(); | |
| 7142 tmp_res.Unuse(); | |
| 7143 __ cmpq(left, right); | |
| 7144 destination()->true_target()->Branch(equal); | |
| 7145 // Fail if either is a non-HeapObject. | |
| 7146 Condition either_smi = | |
| 7147 masm()->CheckEitherSmi(left, right, tmp); | |
| 7148 destination()->false_target()->Branch(either_smi); | |
| 7149 __ movq(tmp, FieldOperand(left, HeapObject::kMapOffset)); | |
| 7150 __ cmpb(FieldOperand(tmp, Map::kInstanceTypeOffset), | |
| 7151 Immediate(JS_REGEXP_TYPE)); | |
| 7152 destination()->false_target()->Branch(not_equal); | |
| 7153 __ cmpq(tmp, FieldOperand(right, HeapObject::kMapOffset)); | |
| 7154 destination()->false_target()->Branch(not_equal); | |
| 7155 __ movq(tmp, FieldOperand(left, JSRegExp::kDataOffset)); | |
| 7156 __ cmpq(tmp, FieldOperand(right, JSRegExp::kDataOffset)); | |
| 7157 destination()->Split(equal); | |
| 7158 } | |
| 7159 | |
| 7160 | |
| 7161 void CodeGenerator::GenerateHasCachedArrayIndex(ZoneList<Expression*>* args) { | |
| 7162 ASSERT(args->length() == 1); | |
| 7163 Load(args->at(0)); | |
| 7164 Result value = frame_->Pop(); | |
| 7165 value.ToRegister(); | |
| 7166 ASSERT(value.is_valid()); | |
| 7167 __ testl(FieldOperand(value.reg(), String::kHashFieldOffset), | |
| 7168 Immediate(String::kContainsCachedArrayIndexMask)); | |
| 7169 value.Unuse(); | |
| 7170 destination()->Split(zero); | |
| 7171 } | |
| 7172 | |
| 7173 | |
| 7174 void CodeGenerator::GenerateGetCachedArrayIndex(ZoneList<Expression*>* args) { | |
| 7175 ASSERT(args->length() == 1); | |
| 7176 Load(args->at(0)); | |
| 7177 Result string = frame_->Pop(); | |
| 7178 string.ToRegister(); | |
| 7179 | |
| 7180 Result number = allocator()->Allocate(); | |
| 7181 ASSERT(number.is_valid()); | |
| 7182 __ movl(number.reg(), FieldOperand(string.reg(), String::kHashFieldOffset)); | |
| 7183 __ IndexFromHash(number.reg(), number.reg()); | |
| 7184 string.Unuse(); | |
| 7185 frame_->Push(&number); | |
| 7186 } | |
| 7187 | |
| 7188 | |
| 7189 void CodeGenerator::GenerateFastAsciiArrayJoin(ZoneList<Expression*>* args) { | |
| 7190 frame_->Push(FACTORY->undefined_value()); | |
| 7191 } | |
| 7192 | |
| 7193 | |
| 7194 void CodeGenerator::VisitCallRuntime(CallRuntime* node) { | |
| 7195 if (CheckForInlineRuntimeCall(node)) { | |
| 7196 return; | |
| 7197 } | |
| 7198 | |
| 7199 ZoneList<Expression*>* args = node->arguments(); | |
| 7200 Comment cmnt(masm_, "[ CallRuntime"); | |
| 7201 const Runtime::Function* function = node->function(); | |
| 7202 | |
| 7203 if (function == NULL) { | |
| 7204 // Push the builtins object found in the current global object. | |
| 7205 Result temp = allocator()->Allocate(); | |
| 7206 ASSERT(temp.is_valid()); | |
| 7207 __ movq(temp.reg(), GlobalObjectOperand()); | |
| 7208 __ movq(temp.reg(), | |
| 7209 FieldOperand(temp.reg(), GlobalObject::kBuiltinsOffset)); | |
| 7210 frame_->Push(&temp); | |
| 7211 } | |
| 7212 | |
| 7213 // Push the arguments ("left-to-right"). | |
| 7214 int arg_count = args->length(); | |
| 7215 for (int i = 0; i < arg_count; i++) { | |
| 7216 Load(args->at(i)); | |
| 7217 } | |
| 7218 | |
| 7219 if (function == NULL) { | |
| 7220 // Call the JS runtime function. | |
| 7221 frame_->Push(node->name()); | |
| 7222 Result answer = frame_->CallCallIC(RelocInfo::CODE_TARGET, | |
| 7223 arg_count, | |
| 7224 loop_nesting_); | |
| 7225 frame_->RestoreContextRegister(); | |
| 7226 frame_->Push(&answer); | |
| 7227 } else { | |
| 7228 // Call the C runtime function. | |
| 7229 Result answer = frame_->CallRuntime(function, arg_count); | |
| 7230 frame_->Push(&answer); | |
| 7231 } | |
| 7232 } | |
| 7233 | |
| 7234 | |
| 7235 void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) { | |
| 7236 Comment cmnt(masm_, "[ UnaryOperation"); | |
| 7237 | |
| 7238 Token::Value op = node->op(); | |
| 7239 | |
| 7240 if (op == Token::NOT) { | |
| 7241 // Swap the true and false targets but keep the same actual label | |
| 7242 // as the fall through. | |
| 7243 destination()->Invert(); | |
| 7244 LoadCondition(node->expression(), destination(), true); | |
| 7245 // Swap the labels back. | |
| 7246 destination()->Invert(); | |
| 7247 | |
| 7248 } else if (op == Token::DELETE) { | |
| 7249 Property* property = node->expression()->AsProperty(); | |
| 7250 if (property != NULL) { | |
| 7251 Load(property->obj()); | |
| 7252 Load(property->key()); | |
| 7253 frame_->Push(Smi::FromInt(strict_mode_flag())); | |
| 7254 Result answer = frame_->InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, 3); | |
| 7255 frame_->Push(&answer); | |
| 7256 return; | |
| 7257 } | |
| 7258 | |
| 7259 Variable* variable = node->expression()->AsVariableProxy()->AsVariable(); | |
| 7260 if (variable != NULL) { | |
| 7261 // Delete of an unqualified identifier is disallowed in strict mode | |
| 7262 // but "delete this" is. | |
| 7263 ASSERT(strict_mode_flag() == kNonStrictMode || variable->is_this()); | |
| 7264 Slot* slot = variable->AsSlot(); | |
| 7265 if (variable->is_global()) { | |
| 7266 LoadGlobal(); | |
| 7267 frame_->Push(variable->name()); | |
| 7268 frame_->Push(Smi::FromInt(kNonStrictMode)); | |
| 7269 Result answer = frame_->InvokeBuiltin(Builtins::DELETE, | |
| 7270 CALL_FUNCTION, 3); | |
| 7271 frame_->Push(&answer); | |
| 7272 | |
| 7273 } else if (slot != NULL && slot->type() == Slot::LOOKUP) { | |
| 7274 // Call the runtime to delete from the context holding the named | |
| 7275 // variable. Sync the virtual frame eagerly so we can push the | |
| 7276 // arguments directly into place. | |
| 7277 frame_->SyncRange(0, frame_->element_count() - 1); | |
| 7278 frame_->EmitPush(rsi); | |
| 7279 frame_->EmitPush(variable->name()); | |
| 7280 Result answer = frame_->CallRuntime(Runtime::kDeleteContextSlot, 2); | |
| 7281 frame_->Push(&answer); | |
| 7282 } else { | |
| 7283 // Default: Result of deleting non-global, not dynamically | |
| 7284 // introduced variables is false. | |
| 7285 frame_->Push(FACTORY->false_value()); | |
| 7286 } | |
| 7287 } else { | |
| 7288 // Default: Result of deleting expressions is true. | |
| 7289 Load(node->expression()); // may have side-effects | |
| 7290 frame_->SetElementAt(0, FACTORY->true_value()); | |
| 7291 } | |
| 7292 | |
| 7293 } else if (op == Token::TYPEOF) { | |
| 7294 // Special case for loading the typeof expression; see comment on | |
| 7295 // LoadTypeofExpression(). | |
| 7296 LoadTypeofExpression(node->expression()); | |
| 7297 Result answer = frame_->CallRuntime(Runtime::kTypeof, 1); | |
| 7298 frame_->Push(&answer); | |
| 7299 | |
| 7300 } else if (op == Token::VOID) { | |
| 7301 Expression* expression = node->expression(); | |
| 7302 if (expression && expression->AsLiteral() && ( | |
| 7303 expression->AsLiteral()->IsTrue() || | |
| 7304 expression->AsLiteral()->IsFalse() || | |
| 7305 expression->AsLiteral()->handle()->IsNumber() || | |
| 7306 expression->AsLiteral()->handle()->IsString() || | |
| 7307 expression->AsLiteral()->handle()->IsJSRegExp() || | |
| 7308 expression->AsLiteral()->IsNull())) { | |
| 7309 // Omit evaluating the value of the primitive literal. | |
| 7310 // It will be discarded anyway, and can have no side effect. | |
| 7311 frame_->Push(FACTORY->undefined_value()); | |
| 7312 } else { | |
| 7313 Load(node->expression()); | |
| 7314 frame_->SetElementAt(0, FACTORY->undefined_value()); | |
| 7315 } | |
| 7316 | |
| 7317 } else { | |
| 7318 bool can_overwrite = node->expression()->ResultOverwriteAllowed(); | |
| 7319 UnaryOverwriteMode overwrite = | |
| 7320 can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE; | |
| 7321 bool no_negative_zero = node->expression()->no_negative_zero(); | |
| 7322 Load(node->expression()); | |
| 7323 switch (op) { | |
| 7324 case Token::NOT: | |
| 7325 case Token::DELETE: | |
| 7326 case Token::TYPEOF: | |
| 7327 UNREACHABLE(); // handled above | |
| 7328 break; | |
| 7329 | |
| 7330 case Token::SUB: { | |
| 7331 GenericUnaryOpStub stub( | |
| 7332 Token::SUB, | |
| 7333 overwrite, | |
| 7334 NO_UNARY_FLAGS, | |
| 7335 no_negative_zero ? kIgnoreNegativeZero : kStrictNegativeZero); | |
| 7336 Result operand = frame_->Pop(); | |
| 7337 Result answer = frame_->CallStub(&stub, &operand); | |
| 7338 answer.set_type_info(TypeInfo::Number()); | |
| 7339 frame_->Push(&answer); | |
| 7340 break; | |
| 7341 } | |
| 7342 | |
| 7343 case Token::BIT_NOT: { | |
| 7344 // Smi check. | |
| 7345 JumpTarget smi_label; | |
| 7346 JumpTarget continue_label; | |
| 7347 Result operand = frame_->Pop(); | |
| 7348 operand.ToRegister(); | |
| 7349 | |
| 7350 Condition is_smi = masm_->CheckSmi(operand.reg()); | |
| 7351 smi_label.Branch(is_smi, &operand); | |
| 7352 | |
| 7353 GenericUnaryOpStub stub(Token::BIT_NOT, | |
| 7354 overwrite, | |
| 7355 NO_UNARY_SMI_CODE_IN_STUB); | |
| 7356 Result answer = frame_->CallStub(&stub, &operand); | |
| 7357 continue_label.Jump(&answer); | |
| 7358 | |
| 7359 smi_label.Bind(&answer); | |
| 7360 answer.ToRegister(); | |
| 7361 frame_->Spill(answer.reg()); | |
| 7362 __ SmiNot(answer.reg(), answer.reg()); | |
| 7363 continue_label.Bind(&answer); | |
| 7364 answer.set_type_info(TypeInfo::Smi()); | |
| 7365 frame_->Push(&answer); | |
| 7366 break; | |
| 7367 } | |
| 7368 | |
| 7369 case Token::ADD: { | |
| 7370 // Smi check. | |
| 7371 JumpTarget continue_label; | |
| 7372 Result operand = frame_->Pop(); | |
| 7373 TypeInfo operand_info = operand.type_info(); | |
| 7374 operand.ToRegister(); | |
| 7375 Condition is_smi = masm_->CheckSmi(operand.reg()); | |
| 7376 continue_label.Branch(is_smi, &operand); | |
| 7377 frame_->Push(&operand); | |
| 7378 Result answer = frame_->InvokeBuiltin(Builtins::TO_NUMBER, | |
| 7379 CALL_FUNCTION, 1); | |
| 7380 | |
| 7381 continue_label.Bind(&answer); | |
| 7382 if (operand_info.IsSmi()) { | |
| 7383 answer.set_type_info(TypeInfo::Smi()); | |
| 7384 } else if (operand_info.IsInteger32()) { | |
| 7385 answer.set_type_info(TypeInfo::Integer32()); | |
| 7386 } else { | |
| 7387 answer.set_type_info(TypeInfo::Number()); | |
| 7388 } | |
| 7389 frame_->Push(&answer); | |
| 7390 break; | |
| 7391 } | |
| 7392 default: | |
| 7393 UNREACHABLE(); | |
| 7394 } | |
| 7395 } | |
| 7396 } | |
| 7397 | |
| 7398 | |
| 7399 // The value in dst was optimistically incremented or decremented. | |
| 7400 // The result overflowed or was not smi tagged. Call into the runtime | |
| 7401 // to convert the argument to a number, and call the specialized add | |
| 7402 // or subtract stub. The result is left in dst. | |
| 7403 class DeferredPrefixCountOperation: public DeferredCode { | |
| 7404 public: | |
| 7405 DeferredPrefixCountOperation(Register dst, | |
| 7406 bool is_increment, | |
| 7407 TypeInfo input_type) | |
| 7408 : dst_(dst), is_increment_(is_increment), input_type_(input_type) { | |
| 7409 set_comment("[ DeferredCountOperation"); | |
| 7410 } | |
| 7411 | |
| 7412 virtual void Generate(); | |
| 7413 | |
| 7414 private: | |
| 7415 Register dst_; | |
| 7416 bool is_increment_; | |
| 7417 TypeInfo input_type_; | |
| 7418 }; | |
| 7419 | |
| 7420 | |
| 7421 void DeferredPrefixCountOperation::Generate() { | |
| 7422 Register left; | |
| 7423 if (input_type_.IsNumber()) { | |
| 7424 left = dst_; | |
| 7425 } else { | |
| 7426 __ push(dst_); | |
| 7427 __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION); | |
| 7428 left = rax; | |
| 7429 } | |
| 7430 | |
| 7431 GenericBinaryOpStub stub(is_increment_ ? Token::ADD : Token::SUB, | |
| 7432 NO_OVERWRITE, | |
| 7433 NO_GENERIC_BINARY_FLAGS, | |
| 7434 TypeInfo::Number()); | |
| 7435 stub.GenerateCall(masm_, left, Smi::FromInt(1)); | |
| 7436 | |
| 7437 if (!dst_.is(rax)) __ movq(dst_, rax); | |
| 7438 } | |
| 7439 | |
| 7440 | |
| 7441 // The value in dst was optimistically incremented or decremented. | |
| 7442 // The result overflowed or was not smi tagged. Call into the runtime | |
| 7443 // to convert the argument to a number. Update the original value in | |
| 7444 // old. Call the specialized add or subtract stub. The result is | |
| 7445 // left in dst. | |
| 7446 class DeferredPostfixCountOperation: public DeferredCode { | |
| 7447 public: | |
| 7448 DeferredPostfixCountOperation(Register dst, | |
| 7449 Register old, | |
| 7450 bool is_increment, | |
| 7451 TypeInfo input_type) | |
| 7452 : dst_(dst), | |
| 7453 old_(old), | |
| 7454 is_increment_(is_increment), | |
| 7455 input_type_(input_type) { | |
| 7456 set_comment("[ DeferredCountOperation"); | |
| 7457 } | |
| 7458 | |
| 7459 virtual void Generate(); | |
| 7460 | |
| 7461 private: | |
| 7462 Register dst_; | |
| 7463 Register old_; | |
| 7464 bool is_increment_; | |
| 7465 TypeInfo input_type_; | |
| 7466 }; | |
| 7467 | |
| 7468 | |
| 7469 void DeferredPostfixCountOperation::Generate() { | |
| 7470 Register left; | |
| 7471 if (input_type_.IsNumber()) { | |
| 7472 __ push(dst_); // Save the input to use as the old value. | |
| 7473 left = dst_; | |
| 7474 } else { | |
| 7475 __ push(dst_); | |
| 7476 __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION); | |
| 7477 __ push(rax); // Save the result of ToNumber to use as the old value. | |
| 7478 left = rax; | |
| 7479 } | |
| 7480 | |
| 7481 GenericBinaryOpStub stub(is_increment_ ? Token::ADD : Token::SUB, | |
| 7482 NO_OVERWRITE, | |
| 7483 NO_GENERIC_BINARY_FLAGS, | |
| 7484 TypeInfo::Number()); | |
| 7485 stub.GenerateCall(masm_, left, Smi::FromInt(1)); | |
| 7486 | |
| 7487 if (!dst_.is(rax)) __ movq(dst_, rax); | |
| 7488 __ pop(old_); | |
| 7489 } | |
| 7490 | |
| 7491 | |
| 7492 void CodeGenerator::VisitCountOperation(CountOperation* node) { | |
| 7493 Comment cmnt(masm_, "[ CountOperation"); | |
| 7494 | |
| 7495 bool is_postfix = node->is_postfix(); | |
| 7496 bool is_increment = node->op() == Token::INC; | |
| 7497 | |
| 7498 Variable* var = node->expression()->AsVariableProxy()->AsVariable(); | |
| 7499 bool is_const = (var != NULL && var->mode() == Variable::CONST); | |
| 7500 | |
| 7501 // Postfix operations need a stack slot under the reference to hold | |
| 7502 // the old value while the new value is being stored. This is so that | |
| 7503 // in the case that storing the new value requires a call, the old | |
| 7504 // value will be in the frame to be spilled. | |
| 7505 if (is_postfix) frame_->Push(Smi::FromInt(0)); | |
| 7506 | |
| 7507 // A constant reference is not saved to, so the reference is not a | |
| 7508 // compound assignment reference. | |
| 7509 { Reference target(this, node->expression(), !is_const); | |
| 7510 if (target.is_illegal()) { | |
| 7511 // Spoof the virtual frame to have the expected height (one higher | |
| 7512 // than on entry). | |
| 7513 if (!is_postfix) frame_->Push(Smi::FromInt(0)); | |
| 7514 return; | |
| 7515 } | |
| 7516 target.TakeValue(); | |
| 7517 | |
| 7518 Result new_value = frame_->Pop(); | |
| 7519 new_value.ToRegister(); | |
| 7520 | |
| 7521 Result old_value; // Only allocated in the postfix case. | |
| 7522 if (is_postfix) { | |
| 7523 // Allocate a temporary to preserve the old value. | |
| 7524 old_value = allocator_->Allocate(); | |
| 7525 ASSERT(old_value.is_valid()); | |
| 7526 __ movq(old_value.reg(), new_value.reg()); | |
| 7527 | |
| 7528 // The return value for postfix operations is ToNumber(input). | |
| 7529 // Keep more precise type info if the input is some kind of | |
| 7530 // number already. If the input is not a number we have to wait | |
| 7531 // for the deferred code to convert it. | |
| 7532 if (new_value.type_info().IsNumber()) { | |
| 7533 old_value.set_type_info(new_value.type_info()); | |
| 7534 } | |
| 7535 } | |
| 7536 // Ensure the new value is writable. | |
| 7537 frame_->Spill(new_value.reg()); | |
| 7538 | |
| 7539 DeferredCode* deferred = NULL; | |
| 7540 if (is_postfix) { | |
| 7541 deferred = new DeferredPostfixCountOperation(new_value.reg(), | |
| 7542 old_value.reg(), | |
| 7543 is_increment, | |
| 7544 new_value.type_info()); | |
| 7545 } else { | |
| 7546 deferred = new DeferredPrefixCountOperation(new_value.reg(), | |
| 7547 is_increment, | |
| 7548 new_value.type_info()); | |
| 7549 } | |
| 7550 | |
| 7551 if (new_value.is_smi()) { | |
| 7552 if (FLAG_debug_code) { __ AbortIfNotSmi(new_value.reg()); } | |
| 7553 } else { | |
| 7554 __ JumpIfNotSmi(new_value.reg(), deferred->entry_label()); | |
| 7555 } | |
| 7556 if (is_increment) { | |
| 7557 __ SmiAddConstant(new_value.reg(), | |
| 7558 new_value.reg(), | |
| 7559 Smi::FromInt(1), | |
| 7560 deferred->entry_label()); | |
| 7561 } else { | |
| 7562 __ SmiSubConstant(new_value.reg(), | |
| 7563 new_value.reg(), | |
| 7564 Smi::FromInt(1), | |
| 7565 deferred->entry_label()); | |
| 7566 } | |
| 7567 deferred->BindExit(); | |
| 7568 | |
| 7569 // Postfix count operations return their input converted to | |
| 7570 // number. The case when the input is already a number is covered | |
| 7571 // above in the allocation code for old_value. | |
| 7572 if (is_postfix && !new_value.type_info().IsNumber()) { | |
| 7573 old_value.set_type_info(TypeInfo::Number()); | |
| 7574 } | |
| 7575 | |
| 7576 new_value.set_type_info(TypeInfo::Number()); | |
| 7577 | |
| 7578 // Postfix: store the old value in the allocated slot under the | |
| 7579 // reference. | |
| 7580 if (is_postfix) frame_->SetElementAt(target.size(), &old_value); | |
| 7581 | |
| 7582 frame_->Push(&new_value); | |
| 7583 // Non-constant: update the reference. | |
| 7584 if (!is_const) target.SetValue(NOT_CONST_INIT); | |
| 7585 } | |
| 7586 | |
| 7587 // Postfix: drop the new value and use the old. | |
| 7588 if (is_postfix) frame_->Drop(); | |
| 7589 } | |
| 7590 | |
| 7591 | |
| 7592 void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) { | |
| 7593 // According to ECMA-262 section 11.11, page 58, the binary logical | |
| 7594 // operators must yield the result of one of the two expressions | |
| 7595 // before any ToBoolean() conversions. This means that the value | |
| 7596 // produced by a && or || operator is not necessarily a boolean. | |
| 7597 | |
| 7598 // NOTE: If the left hand side produces a materialized value (not | |
| 7599 // control flow), we force the right hand side to do the same. This | |
| 7600 // is necessary because we assume that if we get control flow on the | |
| 7601 // last path out of an expression we got it on all paths. | |
| 7602 if (node->op() == Token::AND) { | |
| 7603 JumpTarget is_true; | |
| 7604 ControlDestination dest(&is_true, destination()->false_target(), true); | |
| 7605 LoadCondition(node->left(), &dest, false); | |
| 7606 | |
| 7607 if (dest.false_was_fall_through()) { | |
| 7608 // The current false target was used as the fall-through. If | |
| 7609 // there are no dangling jumps to is_true then the left | |
| 7610 // subexpression was unconditionally false. Otherwise we have | |
| 7611 // paths where we do have to evaluate the right subexpression. | |
| 7612 if (is_true.is_linked()) { | |
| 7613 // We need to compile the right subexpression. If the jump to | |
| 7614 // the current false target was a forward jump then we have a | |
| 7615 // valid frame, we have just bound the false target, and we | |
| 7616 // have to jump around the code for the right subexpression. | |
| 7617 if (has_valid_frame()) { | |
| 7618 destination()->false_target()->Unuse(); | |
| 7619 destination()->false_target()->Jump(); | |
| 7620 } | |
| 7621 is_true.Bind(); | |
| 7622 // The left subexpression compiled to control flow, so the | |
| 7623 // right one is free to do so as well. | |
| 7624 LoadCondition(node->right(), destination(), false); | |
| 7625 } else { | |
| 7626 // We have actually just jumped to or bound the current false | |
| 7627 // target but the current control destination is not marked as | |
| 7628 // used. | |
| 7629 destination()->Use(false); | |
| 7630 } | |
| 7631 | |
| 7632 } else if (dest.is_used()) { | |
| 7633 // The left subexpression compiled to control flow (and is_true | |
| 7634 // was just bound), so the right is free to do so as well. | |
| 7635 LoadCondition(node->right(), destination(), false); | |
| 7636 | |
| 7637 } else { | |
| 7638 // We have a materialized value on the frame, so we exit with | |
| 7639 // one on all paths. There are possibly also jumps to is_true | |
| 7640 // from nested subexpressions. | |
| 7641 JumpTarget pop_and_continue; | |
| 7642 JumpTarget exit; | |
| 7643 | |
| 7644 // Avoid popping the result if it converts to 'false' using the | |
| 7645 // standard ToBoolean() conversion as described in ECMA-262, | |
| 7646 // section 9.2, page 30. | |
| 7647 // | |
| 7648 // Duplicate the TOS value. The duplicate will be popped by | |
| 7649 // ToBoolean. | |
| 7650 frame_->Dup(); | |
| 7651 ControlDestination dest(&pop_and_continue, &exit, true); | |
| 7652 ToBoolean(&dest); | |
| 7653 | |
| 7654 // Pop the result of evaluating the first part. | |
| 7655 frame_->Drop(); | |
| 7656 | |
| 7657 // Compile right side expression. | |
| 7658 is_true.Bind(); | |
| 7659 Load(node->right()); | |
| 7660 | |
| 7661 // Exit (always with a materialized value). | |
| 7662 exit.Bind(); | |
| 7663 } | |
| 7664 | |
| 7665 } else { | |
| 7666 ASSERT(node->op() == Token::OR); | |
| 7667 JumpTarget is_false; | |
| 7668 ControlDestination dest(destination()->true_target(), &is_false, false); | |
| 7669 LoadCondition(node->left(), &dest, false); | |
| 7670 | |
| 7671 if (dest.true_was_fall_through()) { | |
| 7672 // The current true target was used as the fall-through. If | |
| 7673 // there are no dangling jumps to is_false then the left | |
| 7674 // subexpression was unconditionally true. Otherwise we have | |
| 7675 // paths where we do have to evaluate the right subexpression. | |
| 7676 if (is_false.is_linked()) { | |
| 7677 // We need to compile the right subexpression. If the jump to | |
| 7678 // the current true target was a forward jump then we have a | |
| 7679 // valid frame, we have just bound the true target, and we | |
| 7680 // have to jump around the code for the right subexpression. | |
| 7681 if (has_valid_frame()) { | |
| 7682 destination()->true_target()->Unuse(); | |
| 7683 destination()->true_target()->Jump(); | |
| 7684 } | |
| 7685 is_false.Bind(); | |
| 7686 // The left subexpression compiled to control flow, so the | |
| 7687 // right one is free to do so as well. | |
| 7688 LoadCondition(node->right(), destination(), false); | |
| 7689 } else { | |
| 7690 // We have just jumped to or bound the current true target but | |
| 7691 // the current control destination is not marked as used. | |
| 7692 destination()->Use(true); | |
| 7693 } | |
| 7694 | |
| 7695 } else if (dest.is_used()) { | |
| 7696 // The left subexpression compiled to control flow (and is_false | |
| 7697 // was just bound), so the right is free to do so as well. | |
| 7698 LoadCondition(node->right(), destination(), false); | |
| 7699 | |
| 7700 } else { | |
| 7701 // We have a materialized value on the frame, so we exit with | |
| 7702 // one on all paths. There are possibly also jumps to is_false | |
| 7703 // from nested subexpressions. | |
| 7704 JumpTarget pop_and_continue; | |
| 7705 JumpTarget exit; | |
| 7706 | |
| 7707 // Avoid popping the result if it converts to 'true' using the | |
| 7708 // standard ToBoolean() conversion as described in ECMA-262, | |
| 7709 // section 9.2, page 30. | |
| 7710 // | |
| 7711 // Duplicate the TOS value. The duplicate will be popped by | |
| 7712 // ToBoolean. | |
| 7713 frame_->Dup(); | |
| 7714 ControlDestination dest(&exit, &pop_and_continue, false); | |
| 7715 ToBoolean(&dest); | |
| 7716 | |
| 7717 // Pop the result of evaluating the first part. | |
| 7718 frame_->Drop(); | |
| 7719 | |
| 7720 // Compile right side expression. | |
| 7721 is_false.Bind(); | |
| 7722 Load(node->right()); | |
| 7723 | |
| 7724 // Exit (always with a materialized value). | |
| 7725 exit.Bind(); | |
| 7726 } | |
| 7727 } | |
| 7728 } | |
| 7729 | |
| 7730 void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) { | |
| 7731 Comment cmnt(masm_, "[ BinaryOperation"); | |
| 7732 | |
| 7733 if (node->op() == Token::AND || node->op() == Token::OR) { | |
| 7734 GenerateLogicalBooleanOperation(node); | |
| 7735 } else { | |
| 7736 // NOTE: The code below assumes that the slow cases (calls to runtime) | |
| 7737 // never return a constant/immutable object. | |
| 7738 OverwriteMode overwrite_mode = NO_OVERWRITE; | |
| 7739 if (node->left()->ResultOverwriteAllowed()) { | |
| 7740 overwrite_mode = OVERWRITE_LEFT; | |
| 7741 } else if (node->right()->ResultOverwriteAllowed()) { | |
| 7742 overwrite_mode = OVERWRITE_RIGHT; | |
| 7743 } | |
| 7744 | |
| 7745 if (node->left()->IsTrivial()) { | |
| 7746 Load(node->right()); | |
| 7747 Result right = frame_->Pop(); | |
| 7748 frame_->Push(node->left()); | |
| 7749 frame_->Push(&right); | |
| 7750 } else { | |
| 7751 Load(node->left()); | |
| 7752 Load(node->right()); | |
| 7753 } | |
| 7754 GenericBinaryOperation(node, overwrite_mode); | |
| 7755 } | |
| 7756 } | |
| 7757 | |
| 7758 | |
| 7759 void CodeGenerator::VisitThisFunction(ThisFunction* node) { | |
| 7760 frame_->PushFunction(); | |
| 7761 } | |
| 7762 | |
| 7763 | |
| 7764 void CodeGenerator::VisitCompareOperation(CompareOperation* node) { | |
| 7765 Comment cmnt(masm_, "[ CompareOperation"); | |
| 7766 | |
| 7767 // Get the expressions from the node. | |
| 7768 Expression* left = node->left(); | |
| 7769 Expression* right = node->right(); | |
| 7770 Token::Value op = node->op(); | |
| 7771 // To make typeof testing for natives implemented in JavaScript really | |
| 7772 // efficient, we generate special code for expressions of the form: | |
| 7773 // 'typeof <expression> == <string>'. | |
| 7774 UnaryOperation* operation = left->AsUnaryOperation(); | |
| 7775 if ((op == Token::EQ || op == Token::EQ_STRICT) && | |
| 7776 (operation != NULL && operation->op() == Token::TYPEOF) && | |
| 7777 (right->AsLiteral() != NULL && | |
| 7778 right->AsLiteral()->handle()->IsString())) { | |
| 7779 Handle<String> check(Handle<String>::cast(right->AsLiteral()->handle())); | |
| 7780 | |
| 7781 // Load the operand and move it to a register. | |
| 7782 LoadTypeofExpression(operation->expression()); | |
| 7783 Result answer = frame_->Pop(); | |
| 7784 answer.ToRegister(); | |
| 7785 | |
| 7786 if (check->Equals(HEAP->number_symbol())) { | |
| 7787 Condition is_smi = masm_->CheckSmi(answer.reg()); | |
| 7788 destination()->true_target()->Branch(is_smi); | |
| 7789 frame_->Spill(answer.reg()); | |
| 7790 __ movq(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset)); | |
| 7791 __ CompareRoot(answer.reg(), Heap::kHeapNumberMapRootIndex); | |
| 7792 answer.Unuse(); | |
| 7793 destination()->Split(equal); | |
| 7794 | |
| 7795 } else if (check->Equals(HEAP->string_symbol())) { | |
| 7796 Condition is_smi = masm_->CheckSmi(answer.reg()); | |
| 7797 destination()->false_target()->Branch(is_smi); | |
| 7798 | |
| 7799 // It can be an undetectable string object. | |
| 7800 __ movq(kScratchRegister, | |
| 7801 FieldOperand(answer.reg(), HeapObject::kMapOffset)); | |
| 7802 __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset), | |
| 7803 Immediate(1 << Map::kIsUndetectable)); | |
| 7804 destination()->false_target()->Branch(not_zero); | |
| 7805 __ CmpInstanceType(kScratchRegister, FIRST_NONSTRING_TYPE); | |
| 7806 answer.Unuse(); | |
| 7807 destination()->Split(below); // Unsigned byte comparison needed. | |
| 7808 | |
| 7809 } else if (check->Equals(HEAP->boolean_symbol())) { | |
| 7810 __ CompareRoot(answer.reg(), Heap::kTrueValueRootIndex); | |
| 7811 destination()->true_target()->Branch(equal); | |
| 7812 __ CompareRoot(answer.reg(), Heap::kFalseValueRootIndex); | |
| 7813 answer.Unuse(); | |
| 7814 destination()->Split(equal); | |
| 7815 | |
| 7816 } else if (check->Equals(HEAP->undefined_symbol())) { | |
| 7817 __ CompareRoot(answer.reg(), Heap::kUndefinedValueRootIndex); | |
| 7818 destination()->true_target()->Branch(equal); | |
| 7819 | |
| 7820 Condition is_smi = masm_->CheckSmi(answer.reg()); | |
| 7821 destination()->false_target()->Branch(is_smi); | |
| 7822 | |
| 7823 // It can be an undetectable object. | |
| 7824 __ movq(kScratchRegister, | |
| 7825 FieldOperand(answer.reg(), HeapObject::kMapOffset)); | |
| 7826 __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset), | |
| 7827 Immediate(1 << Map::kIsUndetectable)); | |
| 7828 answer.Unuse(); | |
| 7829 destination()->Split(not_zero); | |
| 7830 | |
| 7831 } else if (check->Equals(HEAP->function_symbol())) { | |
| 7832 Condition is_smi = masm_->CheckSmi(answer.reg()); | |
| 7833 destination()->false_target()->Branch(is_smi); | |
| 7834 frame_->Spill(answer.reg()); | |
| 7835 __ CmpObjectType(answer.reg(), JS_FUNCTION_TYPE, answer.reg()); | |
| 7836 destination()->true_target()->Branch(equal); | |
| 7837 // Regular expressions are callable so typeof == 'function'. | |
| 7838 __ CmpInstanceType(answer.reg(), JS_REGEXP_TYPE); | |
| 7839 answer.Unuse(); | |
| 7840 destination()->Split(equal); | |
| 7841 | |
| 7842 } else if (check->Equals(HEAP->object_symbol())) { | |
| 7843 Condition is_smi = masm_->CheckSmi(answer.reg()); | |
| 7844 destination()->false_target()->Branch(is_smi); | |
| 7845 __ CompareRoot(answer.reg(), Heap::kNullValueRootIndex); | |
| 7846 destination()->true_target()->Branch(equal); | |
| 7847 | |
| 7848 // Regular expressions are typeof == 'function', not 'object'. | |
| 7849 __ CmpObjectType(answer.reg(), JS_REGEXP_TYPE, kScratchRegister); | |
| 7850 destination()->false_target()->Branch(equal); | |
| 7851 | |
| 7852 // It can be an undetectable object. | |
| 7853 __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset), | |
| 7854 Immediate(1 << Map::kIsUndetectable)); | |
| 7855 destination()->false_target()->Branch(not_zero); | |
| 7856 __ CmpInstanceType(kScratchRegister, FIRST_JS_OBJECT_TYPE); | |
| 7857 destination()->false_target()->Branch(below); | |
| 7858 __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE); | |
| 7859 answer.Unuse(); | |
| 7860 destination()->Split(below_equal); | |
| 7861 } else { | |
| 7862 // Uncommon case: typeof testing against a string literal that is | |
| 7863 // never returned from the typeof operator. | |
| 7864 answer.Unuse(); | |
| 7865 destination()->Goto(false); | |
| 7866 } | |
| 7867 return; | |
| 7868 } | |
| 7869 | |
| 7870 Condition cc = no_condition; | |
| 7871 bool strict = false; | |
| 7872 switch (op) { | |
| 7873 case Token::EQ_STRICT: | |
| 7874 strict = true; | |
| 7875 // Fall through | |
| 7876 case Token::EQ: | |
| 7877 cc = equal; | |
| 7878 break; | |
| 7879 case Token::LT: | |
| 7880 cc = less; | |
| 7881 break; | |
| 7882 case Token::GT: | |
| 7883 cc = greater; | |
| 7884 break; | |
| 7885 case Token::LTE: | |
| 7886 cc = less_equal; | |
| 7887 break; | |
| 7888 case Token::GTE: | |
| 7889 cc = greater_equal; | |
| 7890 break; | |
| 7891 case Token::IN: { | |
| 7892 Load(left); | |
| 7893 Load(right); | |
| 7894 Result answer = frame_->InvokeBuiltin(Builtins::IN, CALL_FUNCTION, 2); | |
| 7895 frame_->Push(&answer); // push the result | |
| 7896 return; | |
| 7897 } | |
| 7898 case Token::INSTANCEOF: { | |
| 7899 Load(left); | |
| 7900 Load(right); | |
| 7901 InstanceofStub stub(InstanceofStub::kNoFlags); | |
| 7902 Result answer = frame_->CallStub(&stub, 2); | |
| 7903 answer.ToRegister(); | |
| 7904 __ testq(answer.reg(), answer.reg()); | |
| 7905 answer.Unuse(); | |
| 7906 destination()->Split(zero); | |
| 7907 return; | |
| 7908 } | |
| 7909 default: | |
| 7910 UNREACHABLE(); | |
| 7911 } | |
| 7912 | |
| 7913 if (left->IsTrivial()) { | |
| 7914 Load(right); | |
| 7915 Result right_result = frame_->Pop(); | |
| 7916 frame_->Push(left); | |
| 7917 frame_->Push(&right_result); | |
| 7918 } else { | |
| 7919 Load(left); | |
| 7920 Load(right); | |
| 7921 } | |
| 7922 | |
| 7923 Comparison(node, cc, strict, destination()); | |
| 7924 } | |
| 7925 | |
| 7926 | |
| 7927 void CodeGenerator::VisitCompareToNull(CompareToNull* node) { | |
| 7928 Comment cmnt(masm_, "[ CompareToNull"); | |
| 7929 | |
| 7930 Load(node->expression()); | |
| 7931 Result operand = frame_->Pop(); | |
| 7932 operand.ToRegister(); | |
| 7933 __ CompareRoot(operand.reg(), Heap::kNullValueRootIndex); | |
| 7934 if (node->is_strict()) { | |
| 7935 operand.Unuse(); | |
| 7936 destination()->Split(equal); | |
| 7937 } else { | |
| 7938 // The 'null' value is only equal to 'undefined' if using non-strict | |
| 7939 // comparisons. | |
| 7940 destination()->true_target()->Branch(equal); | |
| 7941 __ CompareRoot(operand.reg(), Heap::kUndefinedValueRootIndex); | |
| 7942 destination()->true_target()->Branch(equal); | |
| 7943 Condition is_smi = masm_->CheckSmi(operand.reg()); | |
| 7944 destination()->false_target()->Branch(is_smi); | |
| 7945 | |
| 7946 // It can be an undetectable object. | |
| 7947 // Use a scratch register in preference to spilling operand.reg(). | |
| 7948 Result temp = allocator()->Allocate(); | |
| 7949 ASSERT(temp.is_valid()); | |
| 7950 __ movq(temp.reg(), | |
| 7951 FieldOperand(operand.reg(), HeapObject::kMapOffset)); | |
| 7952 __ testb(FieldOperand(temp.reg(), Map::kBitFieldOffset), | |
| 7953 Immediate(1 << Map::kIsUndetectable)); | |
| 7954 temp.Unuse(); | |
| 7955 operand.Unuse(); | |
| 7956 destination()->Split(not_zero); | |
| 7957 } | |
| 7958 } | |
| 7959 | |
| 7960 | |
| 7961 #ifdef DEBUG | |
| 7962 bool CodeGenerator::HasValidEntryRegisters() { | |
| 7963 return (allocator()->count(rax) == (frame()->is_used(rax) ? 1 : 0)) | |
| 7964 && (allocator()->count(rbx) == (frame()->is_used(rbx) ? 1 : 0)) | |
| 7965 && (allocator()->count(rcx) == (frame()->is_used(rcx) ? 1 : 0)) | |
| 7966 && (allocator()->count(rdx) == (frame()->is_used(rdx) ? 1 : 0)) | |
| 7967 && (allocator()->count(rdi) == (frame()->is_used(rdi) ? 1 : 0)) | |
| 7968 && (allocator()->count(r8) == (frame()->is_used(r8) ? 1 : 0)) | |
| 7969 && (allocator()->count(r9) == (frame()->is_used(r9) ? 1 : 0)) | |
| 7970 && (allocator()->count(r11) == (frame()->is_used(r11) ? 1 : 0)) | |
| 7971 && (allocator()->count(r14) == (frame()->is_used(r14) ? 1 : 0)) | |
| 7972 && (allocator()->count(r15) == (frame()->is_used(r15) ? 1 : 0)); | |
| 7973 } | |
| 7974 #endif | |
| 7975 | |
| 7976 | |
| 7977 | |
| 7978 // Emit a LoadIC call to get the value from receiver and leave it in | |
| 7979 // dst. The receiver register is restored after the call. | |
| 7980 class DeferredReferenceGetNamedValue: public DeferredCode { | |
| 7981 public: | |
| 7982 DeferredReferenceGetNamedValue(Register dst, | |
| 7983 Register receiver, | |
| 7984 Handle<String> name) | |
| 7985 : dst_(dst), receiver_(receiver), name_(name) { | |
| 7986 set_comment("[ DeferredReferenceGetNamedValue"); | |
| 7987 } | |
| 7988 | |
| 7989 virtual void Generate(); | |
| 7990 | |
| 7991 Label* patch_site() { return &patch_site_; } | |
| 7992 | |
| 7993 private: | |
| 7994 Label patch_site_; | |
| 7995 Register dst_; | |
| 7996 Register receiver_; | |
| 7997 Handle<String> name_; | |
| 7998 }; | |
| 7999 | |
| 8000 | |
| 8001 void DeferredReferenceGetNamedValue::Generate() { | |
| 8002 if (!receiver_.is(rax)) { | |
| 8003 __ movq(rax, receiver_); | |
| 8004 } | |
| 8005 __ Move(rcx, name_); | |
| 8006 Handle<Code> ic = Isolate::Current()->builtins()->LoadIC_Initialize(); | |
| 8007 __ Call(ic, RelocInfo::CODE_TARGET); | |
| 8008 // The call must be followed by a test rax instruction to indicate | |
| 8009 // that the inobject property case was inlined. | |
| 8010 // | |
| 8011 // Store the delta to the map check instruction here in the test | |
| 8012 // instruction. Use masm_-> instead of the __ macro since the | |
| 8013 // latter can't return a value. | |
| 8014 int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site()); | |
| 8015 // Here we use masm_-> instead of the __ macro because this is the | |
| 8016 // instruction that gets patched and coverage code gets in the way. | |
| 8017 masm_->testl(rax, Immediate(-delta_to_patch_site)); | |
| 8018 Counters* counters = masm()->isolate()->counters(); | |
| 8019 __ IncrementCounter(counters->named_load_inline_miss(), 1); | |
| 8020 | |
| 8021 if (!dst_.is(rax)) __ movq(dst_, rax); | |
| 8022 } | |
| 8023 | |
| 8024 | |
| 8025 class DeferredReferenceGetKeyedValue: public DeferredCode { | |
| 8026 public: | |
| 8027 explicit DeferredReferenceGetKeyedValue(Register dst, | |
| 8028 Register receiver, | |
| 8029 Register key) | |
| 8030 : dst_(dst), receiver_(receiver), key_(key) { | |
| 8031 set_comment("[ DeferredReferenceGetKeyedValue"); | |
| 8032 } | |
| 8033 | |
| 8034 virtual void Generate(); | |
| 8035 | |
| 8036 Label* patch_site() { return &patch_site_; } | |
| 8037 | |
| 8038 private: | |
| 8039 Label patch_site_; | |
| 8040 Register dst_; | |
| 8041 Register receiver_; | |
| 8042 Register key_; | |
| 8043 }; | |
| 8044 | |
| 8045 | |
| 8046 void DeferredReferenceGetKeyedValue::Generate() { | |
| 8047 if (receiver_.is(rdx)) { | |
| 8048 if (!key_.is(rax)) { | |
| 8049 __ movq(rax, key_); | |
| 8050 } // else do nothing. | |
| 8051 } else if (receiver_.is(rax)) { | |
| 8052 if (key_.is(rdx)) { | |
| 8053 __ xchg(rax, rdx); | |
| 8054 } else if (key_.is(rax)) { | |
| 8055 __ movq(rdx, receiver_); | |
| 8056 } else { | |
| 8057 __ movq(rdx, receiver_); | |
| 8058 __ movq(rax, key_); | |
| 8059 } | |
| 8060 } else if (key_.is(rax)) { | |
| 8061 __ movq(rdx, receiver_); | |
| 8062 } else { | |
| 8063 __ movq(rax, key_); | |
| 8064 __ movq(rdx, receiver_); | |
| 8065 } | |
| 8066 // Calculate the delta from the IC call instruction to the map check | |
| 8067 // movq instruction in the inlined version. This delta is stored in | |
| 8068 // a test(rax, delta) instruction after the call so that we can find | |
| 8069 // it in the IC initialization code and patch the movq instruction. | |
| 8070 // This means that we cannot allow test instructions after calls to | |
| 8071 // KeyedLoadIC stubs in other places. | |
| 8072 Handle<Code> ic = Isolate::Current()->builtins()->KeyedLoadIC_Initialize(); | |
| 8073 __ Call(ic, RelocInfo::CODE_TARGET); | |
| 8074 // The delta from the start of the map-compare instruction to the | |
| 8075 // test instruction. We use masm_-> directly here instead of the __ | |
| 8076 // macro because the macro sometimes uses macro expansion to turn | |
| 8077 // into something that can't return a value. This is encountered | |
| 8078 // when doing generated code coverage tests. | |
| 8079 int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site()); | |
| 8080 // Here we use masm_-> instead of the __ macro because this is the | |
| 8081 // instruction that gets patched and coverage code gets in the way. | |
| 8082 // TODO(X64): Consider whether it's worth switching the test to a | |
| 8083 // 7-byte NOP with non-zero immediate (0f 1f 80 xxxxxxxx) which won't | |
| 8084 // be generated normally. | |
| 8085 masm_->testl(rax, Immediate(-delta_to_patch_site)); | |
| 8086 Counters* counters = masm()->isolate()->counters(); | |
| 8087 __ IncrementCounter(counters->keyed_load_inline_miss(), 1); | |
| 8088 | |
| 8089 if (!dst_.is(rax)) __ movq(dst_, rax); | |
| 8090 } | |
| 8091 | |
| 8092 | |
| 8093 class DeferredReferenceSetKeyedValue: public DeferredCode { | |
| 8094 public: | |
| 8095 DeferredReferenceSetKeyedValue(Register value, | |
| 8096 Register key, | |
| 8097 Register receiver, | |
| 8098 StrictModeFlag strict_mode) | |
| 8099 : value_(value), | |
| 8100 key_(key), | |
| 8101 receiver_(receiver), | |
| 8102 strict_mode_(strict_mode) { | |
| 8103 set_comment("[ DeferredReferenceSetKeyedValue"); | |
| 8104 } | |
| 8105 | |
| 8106 virtual void Generate(); | |
| 8107 | |
| 8108 Label* patch_site() { return &patch_site_; } | |
| 8109 | |
| 8110 private: | |
| 8111 Register value_; | |
| 8112 Register key_; | |
| 8113 Register receiver_; | |
| 8114 Label patch_site_; | |
| 8115 StrictModeFlag strict_mode_; | |
| 8116 }; | |
| 8117 | |
| 8118 | |
| 8119 void DeferredReferenceSetKeyedValue::Generate() { | |
| 8120 Counters* counters = masm()->isolate()->counters(); | |
| 8121 __ IncrementCounter(counters->keyed_store_inline_miss(), 1); | |
| 8122 // Move value, receiver, and key to registers rax, rdx, and rcx, as | |
| 8123 // the IC stub expects. | |
| 8124 // Move value to rax, using xchg if the receiver or key is in rax. | |
| 8125 if (!value_.is(rax)) { | |
| 8126 if (!receiver_.is(rax) && !key_.is(rax)) { | |
| 8127 __ movq(rax, value_); | |
| 8128 } else { | |
| 8129 __ xchg(rax, value_); | |
| 8130 // Update receiver_ and key_ if they are affected by the swap. | |
| 8131 if (receiver_.is(rax)) { | |
| 8132 receiver_ = value_; | |
| 8133 } else if (receiver_.is(value_)) { | |
| 8134 receiver_ = rax; | |
| 8135 } | |
| 8136 if (key_.is(rax)) { | |
| 8137 key_ = value_; | |
| 8138 } else if (key_.is(value_)) { | |
| 8139 key_ = rax; | |
| 8140 } | |
| 8141 } | |
| 8142 } | |
| 8143 // Value is now in rax. Its original location is remembered in value_, | |
| 8144 // and the value is restored to value_ before returning. | |
| 8145 // The variables receiver_ and key_ are not preserved. | |
| 8146 // Move receiver and key to rdx and rcx, swapping if necessary. | |
| 8147 if (receiver_.is(rdx)) { | |
| 8148 if (!key_.is(rcx)) { | |
| 8149 __ movq(rcx, key_); | |
| 8150 } // Else everything is already in the right place. | |
| 8151 } else if (receiver_.is(rcx)) { | |
| 8152 if (key_.is(rdx)) { | |
| 8153 __ xchg(rcx, rdx); | |
| 8154 } else if (key_.is(rcx)) { | |
| 8155 __ movq(rdx, receiver_); | |
| 8156 } else { | |
| 8157 __ movq(rdx, receiver_); | |
| 8158 __ movq(rcx, key_); | |
| 8159 } | |
| 8160 } else if (key_.is(rcx)) { | |
| 8161 __ movq(rdx, receiver_); | |
| 8162 } else { | |
| 8163 __ movq(rcx, key_); | |
| 8164 __ movq(rdx, receiver_); | |
| 8165 } | |
| 8166 | |
| 8167 // Call the IC stub. | |
| 8168 Handle<Code> ic(Isolate::Current()->builtins()->builtin( | |
| 8169 (strict_mode_ == kStrictMode) ? Builtins::kKeyedStoreIC_Initialize_Strict | |
| 8170 : Builtins::kKeyedStoreIC_Initialize)); | |
| 8171 __ Call(ic, RelocInfo::CODE_TARGET); | |
| 8172 // The delta from the start of the map-compare instructions (initial movq) | |
| 8173 // to the test instruction. We use masm_-> directly here instead of the | |
| 8174 // __ macro because the macro sometimes uses macro expansion to turn | |
| 8175 // into something that can't return a value. This is encountered | |
| 8176 // when doing generated code coverage tests. | |
| 8177 int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site()); | |
| 8178 // Here we use masm_-> instead of the __ macro because this is the | |
| 8179 // instruction that gets patched and coverage code gets in the way. | |
| 8180 masm_->testl(rax, Immediate(-delta_to_patch_site)); | |
| 8181 // Restore value (returned from store IC). | |
| 8182 if (!value_.is(rax)) __ movq(value_, rax); | |
| 8183 } | |
| 8184 | |
| 8185 | |
| 8186 Result CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) { | |
| 8187 #ifdef DEBUG | |
| 8188 int original_height = frame()->height(); | |
| 8189 #endif | |
| 8190 Result result; | |
| 8191 // Do not inline the inobject property case for loads from the global | |
| 8192 // object. Also do not inline for unoptimized code. This saves time | |
| 8193 // in the code generator. Unoptimized code is toplevel code or code | |
| 8194 // that is not in a loop. | |
| 8195 if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) { | |
| 8196 Comment cmnt(masm(), "[ Load from named Property"); | |
| 8197 frame()->Push(name); | |
| 8198 | |
| 8199 RelocInfo::Mode mode = is_contextual | |
| 8200 ? RelocInfo::CODE_TARGET_CONTEXT | |
| 8201 : RelocInfo::CODE_TARGET; | |
| 8202 result = frame()->CallLoadIC(mode); | |
| 8203 // A test rax instruction following the call signals that the | |
| 8204 // inobject property case was inlined. Ensure that there is not | |
| 8205 // a test rax instruction here. | |
| 8206 __ nop(); | |
| 8207 } else { | |
| 8208 // Inline the inobject property case. | |
| 8209 Comment cmnt(masm(), "[ Inlined named property load"); | |
| 8210 Result receiver = frame()->Pop(); | |
| 8211 receiver.ToRegister(); | |
| 8212 result = allocator()->Allocate(); | |
| 8213 ASSERT(result.is_valid()); | |
| 8214 | |
| 8215 // r12 is now a reserved register, so it cannot be the receiver. | |
| 8216 // If it was, the distance to the fixup location would not be constant. | |
| 8217 ASSERT(!receiver.reg().is(r12)); | |
| 8218 | |
| 8219 DeferredReferenceGetNamedValue* deferred = | |
| 8220 new DeferredReferenceGetNamedValue(result.reg(), receiver.reg(), name); | |
| 8221 | |
| 8222 // Check that the receiver is a heap object. | |
| 8223 __ JumpIfSmi(receiver.reg(), deferred->entry_label()); | |
| 8224 | |
| 8225 __ bind(deferred->patch_site()); | |
| 8226 // This is the map check instruction that will be patched (so we can't | |
| 8227 // use the double underscore macro that may insert instructions). | |
| 8228 // Initially use an invalid map to force a failure. | |
| 8229 masm()->movq(kScratchRegister, FACTORY->null_value(), | |
| 8230 RelocInfo::EMBEDDED_OBJECT); | |
| 8231 masm()->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset), | |
| 8232 kScratchRegister); | |
| 8233 // This branch is always a forwards branch so it's always a fixed | |
| 8234 // size which allows the assert below to succeed and patching to work. | |
| 8235 // Don't use deferred->Branch(...), since that might add coverage code. | |
| 8236 masm()->j(not_equal, deferred->entry_label()); | |
| 8237 | |
| 8238 // The delta from the patch label to the load offset must be | |
| 8239 // statically known. | |
| 8240 ASSERT(masm()->SizeOfCodeGeneratedSince(deferred->patch_site()) == | |
| 8241 LoadIC::kOffsetToLoadInstruction); | |
| 8242 // The initial (invalid) offset has to be large enough to force | |
| 8243 // a 32-bit instruction encoding to allow patching with an | |
| 8244 // arbitrary offset. Use kMaxInt (minus kHeapObjectTag). | |
| 8245 int offset = kMaxInt; | |
| 8246 masm()->movq(result.reg(), FieldOperand(receiver.reg(), offset)); | |
| 8247 | |
| 8248 Counters* counters = masm()->isolate()->counters(); | |
| 8249 __ IncrementCounter(counters->named_load_inline(), 1); | |
| 8250 deferred->BindExit(); | |
| 8251 } | |
| 8252 ASSERT(frame()->height() == original_height - 1); | |
| 8253 return result; | |
| 8254 } | |
| 8255 | |
| 8256 | |
| 8257 Result CodeGenerator::EmitNamedStore(Handle<String> name, bool is_contextual) { | |
| 8258 #ifdef DEBUG | |
| 8259 int expected_height = frame()->height() - (is_contextual ? 1 : 2); | |
| 8260 #endif | |
| 8261 | |
| 8262 Result result; | |
| 8263 if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) { | |
| 8264 result = frame()->CallStoreIC(name, is_contextual, strict_mode_flag()); | |
| 8265 // A test rax instruction following the call signals that the inobject | |
| 8266 // property case was inlined. Ensure that there is not a test rax | |
| 8267 // instruction here. | |
| 8268 __ nop(); | |
| 8269 } else { | |
| 8270 // Inline the in-object property case. | |
| 8271 JumpTarget slow, done; | |
| 8272 Label patch_site; | |
| 8273 | |
| 8274 // Get the value and receiver from the stack. | |
| 8275 Result value = frame()->Pop(); | |
| 8276 value.ToRegister(); | |
| 8277 Result receiver = frame()->Pop(); | |
| 8278 receiver.ToRegister(); | |
| 8279 | |
| 8280 // Allocate result register. | |
| 8281 result = allocator()->Allocate(); | |
| 8282 ASSERT(result.is_valid() && receiver.is_valid() && value.is_valid()); | |
| 8283 | |
| 8284 // r12 is now a reserved register, so it cannot be the receiver. | |
| 8285 // If it was, the distance to the fixup location would not be constant. | |
| 8286 ASSERT(!receiver.reg().is(r12)); | |
| 8287 | |
| 8288 // Check that the receiver is a heap object. | |
| 8289 Condition is_smi = masm()->CheckSmi(receiver.reg()); | |
| 8290 slow.Branch(is_smi, &value, &receiver); | |
| 8291 | |
| 8292 // This is the map check instruction that will be patched. | |
| 8293 // Initially use an invalid map to force a failure. The exact | |
| 8294 // instruction sequence is important because we use the | |
| 8295 // kOffsetToStoreInstruction constant for patching. We avoid using | |
| 8296 // the __ macro for the following two instructions because it | |
| 8297 // might introduce extra instructions. | |
| 8298 __ bind(&patch_site); | |
| 8299 masm()->movq(kScratchRegister, FACTORY->null_value(), | |
| 8300 RelocInfo::EMBEDDED_OBJECT); | |
| 8301 masm()->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset), | |
| 8302 kScratchRegister); | |
| 8303 // This branch is always a forwards branch so it's always a fixed size | |
| 8304 // which allows the assert below to succeed and patching to work. | |
| 8305 slow.Branch(not_equal, &value, &receiver); | |
| 8306 | |
| 8307 // The delta from the patch label to the store offset must be | |
| 8308 // statically known. | |
| 8309 ASSERT(masm()->SizeOfCodeGeneratedSince(&patch_site) == | |
| 8310 StoreIC::kOffsetToStoreInstruction); | |
| 8311 | |
| 8312 // The initial (invalid) offset has to be large enough to force a 32-bit | |
| 8313 // instruction encoding to allow patching with an arbitrary offset. Use | |
| 8314 // kMaxInt (minus kHeapObjectTag). | |
| 8315 int offset = kMaxInt; | |
| 8316 __ movq(FieldOperand(receiver.reg(), offset), value.reg()); | |
| 8317 __ movq(result.reg(), value.reg()); | |
| 8318 | |
| 8319 // Allocate scratch register for write barrier. | |
| 8320 Result scratch = allocator()->Allocate(); | |
| 8321 ASSERT(scratch.is_valid()); | |
| 8322 | |
| 8323 // The write barrier clobbers all input registers, so spill the | |
| 8324 // receiver and the value. | |
| 8325 frame_->Spill(receiver.reg()); | |
| 8326 frame_->Spill(value.reg()); | |
| 8327 | |
| 8328 // If the receiver and the value share a register allocate a new | |
| 8329 // register for the receiver. | |
| 8330 if (receiver.reg().is(value.reg())) { | |
| 8331 receiver = allocator()->Allocate(); | |
| 8332 ASSERT(receiver.is_valid()); | |
| 8333 __ movq(receiver.reg(), value.reg()); | |
| 8334 } | |
| 8335 | |
| 8336 // Update the write barrier. To save instructions in the inlined | |
| 8337 // version we do not filter smis. | |
| 8338 Label skip_write_barrier; | |
| 8339 __ InNewSpace(receiver.reg(), value.reg(), equal, &skip_write_barrier); | |
| 8340 int delta_to_record_write = masm_->SizeOfCodeGeneratedSince(&patch_site); | |
| 8341 __ lea(scratch.reg(), Operand(receiver.reg(), offset)); | |
| 8342 __ RecordWriteHelper(receiver.reg(), scratch.reg(), value.reg()); | |
| 8343 if (FLAG_debug_code) { | |
| 8344 __ movq(receiver.reg(), BitCast<int64_t>(kZapValue), RelocInfo::NONE); | |
| 8345 __ movq(value.reg(), BitCast<int64_t>(kZapValue), RelocInfo::NONE); | |
| 8346 __ movq(scratch.reg(), BitCast<int64_t>(kZapValue), RelocInfo::NONE); | |
| 8347 } | |
| 8348 __ bind(&skip_write_barrier); | |
| 8349 value.Unuse(); | |
| 8350 scratch.Unuse(); | |
| 8351 receiver.Unuse(); | |
| 8352 done.Jump(&result); | |
| 8353 | |
| 8354 slow.Bind(&value, &receiver); | |
| 8355 frame()->Push(&receiver); | |
| 8356 frame()->Push(&value); | |
| 8357 result = frame()->CallStoreIC(name, is_contextual, strict_mode_flag()); | |
| 8358 // Encode the offset to the map check instruction and the offset | |
| 8359 // to the write barrier store address computation in a test rax | |
| 8360 // instruction. | |
| 8361 int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(&patch_site); | |
| 8362 __ testl(rax, | |
| 8363 Immediate((delta_to_record_write << 16) | delta_to_patch_site)); | |
| 8364 done.Bind(&result); | |
| 8365 } | |
| 8366 | |
| 8367 ASSERT_EQ(expected_height, frame()->height()); | |
| 8368 return result; | |
| 8369 } | |
| 8370 | |
| 8371 | |
| 8372 Result CodeGenerator::EmitKeyedLoad() { | |
| 8373 #ifdef DEBUG | |
| 8374 int original_height = frame()->height(); | |
| 8375 #endif | |
| 8376 Result result; | |
| 8377 // Inline array load code if inside of a loop. We do not know | |
| 8378 // the receiver map yet, so we initially generate the code with | |
| 8379 // a check against an invalid map. In the inline cache code, we | |
| 8380 // patch the map check if appropriate. | |
| 8381 if (loop_nesting() > 0) { | |
| 8382 Comment cmnt(masm_, "[ Inlined load from keyed Property"); | |
| 8383 | |
| 8384 // Use a fresh temporary to load the elements without destroying | |
| 8385 // the receiver which is needed for the deferred slow case. | |
| 8386 // Allocate the temporary early so that we use rax if it is free. | |
| 8387 Result elements = allocator()->Allocate(); | |
| 8388 ASSERT(elements.is_valid()); | |
| 8389 | |
| 8390 Result key = frame_->Pop(); | |
| 8391 Result receiver = frame_->Pop(); | |
| 8392 key.ToRegister(); | |
| 8393 receiver.ToRegister(); | |
| 8394 | |
| 8395 // If key and receiver are shared registers on the frame, their values will | |
| 8396 // be automatically saved and restored when going to deferred code. | |
| 8397 // The result is returned in elements, which is not shared. | |
| 8398 DeferredReferenceGetKeyedValue* deferred = | |
| 8399 new DeferredReferenceGetKeyedValue(elements.reg(), | |
| 8400 receiver.reg(), | |
| 8401 key.reg()); | |
| 8402 | |
| 8403 __ JumpIfSmi(receiver.reg(), deferred->entry_label()); | |
| 8404 | |
| 8405 // Check that the receiver has the expected map. | |
| 8406 // Initially, use an invalid map. The map is patched in the IC | |
| 8407 // initialization code. | |
| 8408 __ bind(deferred->patch_site()); | |
| 8409 // Use masm-> here instead of the double underscore macro since extra | |
| 8410 // coverage code can interfere with the patching. Do not use a load | |
| 8411 // from the root array to load null_value, since the load must be patched | |
| 8412 // with the expected receiver map, which is not in the root array. | |
| 8413 masm_->movq(kScratchRegister, FACTORY->null_value(), | |
| 8414 RelocInfo::EMBEDDED_OBJECT); | |
| 8415 masm_->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset), | |
| 8416 kScratchRegister); | |
| 8417 deferred->Branch(not_equal); | |
| 8418 | |
| 8419 __ JumpUnlessNonNegativeSmi(key.reg(), deferred->entry_label()); | |
| 8420 | |
| 8421 // Get the elements array from the receiver. | |
| 8422 __ movq(elements.reg(), | |
| 8423 FieldOperand(receiver.reg(), JSObject::kElementsOffset)); | |
| 8424 __ AssertFastElements(elements.reg()); | |
| 8425 | |
| 8426 // Check that key is within bounds. | |
| 8427 __ SmiCompare(key.reg(), | |
| 8428 FieldOperand(elements.reg(), FixedArray::kLengthOffset)); | |
| 8429 deferred->Branch(above_equal); | |
| 8430 | |
| 8431 // Load and check that the result is not the hole. We could | |
| 8432 // reuse the index or elements register for the value. | |
| 8433 // | |
| 8434 // TODO(206): Consider whether it makes sense to try some | |
| 8435 // heuristic about which register to reuse. For example, if | |
| 8436 // one is rax, the we can reuse that one because the value | |
| 8437 // coming from the deferred code will be in rax. | |
| 8438 SmiIndex index = | |
| 8439 masm_->SmiToIndex(kScratchRegister, key.reg(), kPointerSizeLog2); | |
| 8440 __ movq(elements.reg(), | |
| 8441 FieldOperand(elements.reg(), | |
| 8442 index.reg, | |
| 8443 index.scale, | |
| 8444 FixedArray::kHeaderSize)); | |
| 8445 result = elements; | |
| 8446 __ CompareRoot(result.reg(), Heap::kTheHoleValueRootIndex); | |
| 8447 deferred->Branch(equal); | |
| 8448 Counters* counters = masm()->isolate()->counters(); | |
| 8449 __ IncrementCounter(counters->keyed_load_inline(), 1); | |
| 8450 | |
| 8451 deferred->BindExit(); | |
| 8452 } else { | |
| 8453 Comment cmnt(masm_, "[ Load from keyed Property"); | |
| 8454 result = frame_->CallKeyedLoadIC(RelocInfo::CODE_TARGET); | |
| 8455 // Make sure that we do not have a test instruction after the | |
| 8456 // call. A test instruction after the call is used to | |
| 8457 // indicate that we have generated an inline version of the | |
| 8458 // keyed load. The explicit nop instruction is here because | |
| 8459 // the push that follows might be peep-hole optimized away. | |
| 8460 __ nop(); | |
| 8461 } | |
| 8462 ASSERT(frame()->height() == original_height - 2); | |
| 8463 return result; | |
| 8464 } | |
| 8465 | |
| 8466 | |
| 8467 Result CodeGenerator::EmitKeyedStore(StaticType* key_type) { | |
| 8468 #ifdef DEBUG | |
| 8469 int original_height = frame()->height(); | |
| 8470 #endif | |
| 8471 Result result; | |
| 8472 // Generate inlined version of the keyed store if the code is in a loop | |
| 8473 // and the key is likely to be a smi. | |
| 8474 if (loop_nesting() > 0 && key_type->IsLikelySmi()) { | |
| 8475 Comment cmnt(masm(), "[ Inlined store to keyed Property"); | |
| 8476 | |
| 8477 // Get the receiver, key and value into registers. | |
| 8478 result = frame()->Pop(); | |
| 8479 Result key = frame()->Pop(); | |
| 8480 Result receiver = frame()->Pop(); | |
| 8481 | |
| 8482 Result tmp = allocator_->Allocate(); | |
| 8483 ASSERT(tmp.is_valid()); | |
| 8484 Result tmp2 = allocator_->Allocate(); | |
| 8485 ASSERT(tmp2.is_valid()); | |
| 8486 | |
| 8487 // Determine whether the value is a constant before putting it in a | |
| 8488 // register. | |
| 8489 bool value_is_constant = result.is_constant(); | |
| 8490 | |
| 8491 // Make sure that value, key and receiver are in registers. | |
| 8492 result.ToRegister(); | |
| 8493 key.ToRegister(); | |
| 8494 receiver.ToRegister(); | |
| 8495 | |
| 8496 DeferredReferenceSetKeyedValue* deferred = | |
| 8497 new DeferredReferenceSetKeyedValue(result.reg(), | |
| 8498 key.reg(), | |
| 8499 receiver.reg(), | |
| 8500 strict_mode_flag()); | |
| 8501 | |
| 8502 // Check that the receiver is not a smi. | |
| 8503 __ JumpIfSmi(receiver.reg(), deferred->entry_label()); | |
| 8504 | |
| 8505 // Check that the key is a smi. | |
| 8506 if (!key.is_smi()) { | |
| 8507 __ JumpIfNotSmi(key.reg(), deferred->entry_label()); | |
| 8508 } else if (FLAG_debug_code) { | |
| 8509 __ AbortIfNotSmi(key.reg()); | |
| 8510 } | |
| 8511 | |
| 8512 // Check that the receiver is a JSArray. | |
| 8513 __ CmpObjectType(receiver.reg(), JS_ARRAY_TYPE, kScratchRegister); | |
| 8514 deferred->Branch(not_equal); | |
| 8515 | |
| 8516 // Get the elements array from the receiver and check that it is not a | |
| 8517 // dictionary. | |
| 8518 __ movq(tmp.reg(), | |
| 8519 FieldOperand(receiver.reg(), JSArray::kElementsOffset)); | |
| 8520 | |
| 8521 // Check whether it is possible to omit the write barrier. If the elements | |
| 8522 // array is in new space or the value written is a smi we can safely update | |
| 8523 // the elements array without write barrier. | |
| 8524 Label in_new_space; | |
| 8525 __ InNewSpace(tmp.reg(), tmp2.reg(), equal, &in_new_space); | |
| 8526 if (!value_is_constant) { | |
| 8527 __ JumpIfNotSmi(result.reg(), deferred->entry_label()); | |
| 8528 } | |
| 8529 | |
| 8530 __ bind(&in_new_space); | |
| 8531 // Bind the deferred code patch site to be able to locate the fixed | |
| 8532 // array map comparison. When debugging, we patch this comparison to | |
| 8533 // always fail so that we will hit the IC call in the deferred code | |
| 8534 // which will allow the debugger to break for fast case stores. | |
| 8535 __ bind(deferred->patch_site()); | |
| 8536 // Avoid using __ to ensure the distance from patch_site | |
| 8537 // to the map address is always the same. | |
| 8538 masm()->movq(kScratchRegister, FACTORY->fixed_array_map(), | |
| 8539 RelocInfo::EMBEDDED_OBJECT); | |
| 8540 __ cmpq(FieldOperand(tmp.reg(), HeapObject::kMapOffset), | |
| 8541 kScratchRegister); | |
| 8542 deferred->Branch(not_equal); | |
| 8543 | |
| 8544 // Check that the key is within bounds. Both the key and the length of | |
| 8545 // the JSArray are smis (because the fixed array check above ensures the | |
| 8546 // elements are in fast case). Use unsigned comparison to handle negative | |
| 8547 // keys. | |
| 8548 __ SmiCompare(FieldOperand(receiver.reg(), JSArray::kLengthOffset), | |
| 8549 key.reg()); | |
| 8550 deferred->Branch(below_equal); | |
| 8551 | |
| 8552 // Store the value. | |
| 8553 SmiIndex index = | |
| 8554 masm()->SmiToIndex(kScratchRegister, key.reg(), kPointerSizeLog2); | |
| 8555 __ movq(FieldOperand(tmp.reg(), | |
| 8556 index.reg, | |
| 8557 index.scale, | |
| 8558 FixedArray::kHeaderSize), | |
| 8559 result.reg()); | |
| 8560 Counters* counters = masm()->isolate()->counters(); | |
| 8561 __ IncrementCounter(counters->keyed_store_inline(), 1); | |
| 8562 | |
| 8563 deferred->BindExit(); | |
| 8564 } else { | |
| 8565 result = frame()->CallKeyedStoreIC(strict_mode_flag()); | |
| 8566 // Make sure that we do not have a test instruction after the | |
| 8567 // call. A test instruction after the call is used to | |
| 8568 // indicate that we have generated an inline version of the | |
| 8569 // keyed store. | |
| 8570 __ nop(); | |
| 8571 } | |
| 8572 ASSERT(frame()->height() == original_height - 3); | |
| 8573 return result; | |
| 8574 } | |
| 8575 | |
| 8576 | |
| 8577 #undef __ | |
| 8578 #define __ ACCESS_MASM(masm) | |
| 8579 | |
| 8580 | |
| 8581 Handle<String> Reference::GetName() { | |
| 8582 ASSERT(type_ == NAMED); | |
| 8583 Property* property = expression_->AsProperty(); | |
| 8584 if (property == NULL) { | |
| 8585 // Global variable reference treated as a named property reference. | |
| 8586 VariableProxy* proxy = expression_->AsVariableProxy(); | |
| 8587 ASSERT(proxy->AsVariable() != NULL); | |
| 8588 ASSERT(proxy->AsVariable()->is_global()); | |
| 8589 return proxy->name(); | |
| 8590 } else { | |
| 8591 Literal* raw_name = property->key()->AsLiteral(); | |
| 8592 ASSERT(raw_name != NULL); | |
| 8593 return Handle<String>(String::cast(*raw_name->handle())); | |
| 8594 } | |
| 8595 } | |
| 8596 | |
| 8597 | |
| 8598 void Reference::GetValue() { | |
| 8599 ASSERT(!cgen_->in_spilled_code()); | |
| 8600 ASSERT(cgen_->HasValidEntryRegisters()); | |
| 8601 ASSERT(!is_illegal()); | |
| 8602 MacroAssembler* masm = cgen_->masm(); | |
| 8603 | |
| 8604 // Record the source position for the property load. | |
| 8605 Property* property = expression_->AsProperty(); | |
| 8606 if (property != NULL) { | |
| 8607 cgen_->CodeForSourcePosition(property->position()); | |
| 8608 } | |
| 8609 | |
| 8610 switch (type_) { | |
| 8611 case SLOT: { | |
| 8612 Comment cmnt(masm, "[ Load from Slot"); | |
| 8613 Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot(); | |
| 8614 ASSERT(slot != NULL); | |
| 8615 cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF); | |
| 8616 break; | |
| 8617 } | |
| 8618 | |
| 8619 case NAMED: { | |
| 8620 Variable* var = expression_->AsVariableProxy()->AsVariable(); | |
| 8621 bool is_global = var != NULL; | |
| 8622 ASSERT(!is_global || var->is_global()); | |
| 8623 if (persist_after_get_) { | |
| 8624 cgen_->frame()->Dup(); | |
| 8625 } | |
| 8626 Result result = cgen_->EmitNamedLoad(GetName(), is_global); | |
| 8627 cgen_->frame()->Push(&result); | |
| 8628 break; | |
| 8629 } | |
| 8630 | |
| 8631 case KEYED: { | |
| 8632 // A load of a bare identifier (load from global) cannot be keyed. | |
| 8633 ASSERT(expression_->AsVariableProxy()->AsVariable() == NULL); | |
| 8634 if (persist_after_get_) { | |
| 8635 cgen_->frame()->PushElementAt(1); | |
| 8636 cgen_->frame()->PushElementAt(1); | |
| 8637 } | |
| 8638 Result value = cgen_->EmitKeyedLoad(); | |
| 8639 cgen_->frame()->Push(&value); | |
| 8640 break; | |
| 8641 } | |
| 8642 | |
| 8643 default: | |
| 8644 UNREACHABLE(); | |
| 8645 } | |
| 8646 | |
| 8647 if (!persist_after_get_) { | |
| 8648 set_unloaded(); | |
| 8649 } | |
| 8650 } | |
| 8651 | |
| 8652 | |
| 8653 void Reference::TakeValue() { | |
| 8654 // TODO(X64): This function is completely architecture independent. Move | |
| 8655 // it somewhere shared. | |
| 8656 | |
| 8657 // For non-constant frame-allocated slots, we invalidate the value in the | |
| 8658 // slot. For all others, we fall back on GetValue. | |
| 8659 ASSERT(!cgen_->in_spilled_code()); | |
| 8660 ASSERT(!is_illegal()); | |
| 8661 if (type_ != SLOT) { | |
| 8662 GetValue(); | |
| 8663 return; | |
| 8664 } | |
| 8665 | |
| 8666 Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot(); | |
| 8667 ASSERT(slot != NULL); | |
| 8668 if (slot->type() == Slot::LOOKUP || | |
| 8669 slot->type() == Slot::CONTEXT || | |
| 8670 slot->var()->mode() == Variable::CONST || | |
| 8671 slot->is_arguments()) { | |
| 8672 GetValue(); | |
| 8673 return; | |
| 8674 } | |
| 8675 | |
| 8676 // Only non-constant, frame-allocated parameters and locals can reach | |
| 8677 // here. Be careful not to use the optimizations for arguments | |
| 8678 // object access since it may not have been initialized yet. | |
| 8679 ASSERT(!slot->is_arguments()); | |
| 8680 if (slot->type() == Slot::PARAMETER) { | |
| 8681 cgen_->frame()->TakeParameterAt(slot->index()); | |
| 8682 } else { | |
| 8683 ASSERT(slot->type() == Slot::LOCAL); | |
| 8684 cgen_->frame()->TakeLocalAt(slot->index()); | |
| 8685 } | |
| 8686 | |
| 8687 ASSERT(persist_after_get_); | |
| 8688 // Do not unload the reference, because it is used in SetValue. | |
| 8689 } | |
| 8690 | |
| 8691 | |
| 8692 void Reference::SetValue(InitState init_state) { | |
| 8693 ASSERT(cgen_->HasValidEntryRegisters()); | |
| 8694 ASSERT(!is_illegal()); | |
| 8695 MacroAssembler* masm = cgen_->masm(); | |
| 8696 switch (type_) { | |
| 8697 case SLOT: { | |
| 8698 Comment cmnt(masm, "[ Store to Slot"); | |
| 8699 Slot* slot = expression_->AsVariableProxy()->AsVariable()->AsSlot(); | |
| 8700 ASSERT(slot != NULL); | |
| 8701 cgen_->StoreToSlot(slot, init_state); | |
| 8702 set_unloaded(); | |
| 8703 break; | |
| 8704 } | |
| 8705 | |
| 8706 case NAMED: { | |
| 8707 Comment cmnt(masm, "[ Store to named Property"); | |
| 8708 Result answer = cgen_->EmitNamedStore(GetName(), false); | |
| 8709 cgen_->frame()->Push(&answer); | |
| 8710 set_unloaded(); | |
| 8711 break; | |
| 8712 } | |
| 8713 | |
| 8714 case KEYED: { | |
| 8715 Comment cmnt(masm, "[ Store to keyed Property"); | |
| 8716 Property* property = expression()->AsProperty(); | |
| 8717 ASSERT(property != NULL); | |
| 8718 | |
| 8719 Result answer = cgen_->EmitKeyedStore(property->key()->type()); | |
| 8720 cgen_->frame()->Push(&answer); | |
| 8721 set_unloaded(); | |
| 8722 break; | |
| 8723 } | |
| 8724 | |
| 8725 case UNLOADED: | |
| 8726 case ILLEGAL: | |
| 8727 UNREACHABLE(); | |
| 8728 } | |
| 8729 } | |
| 8730 | |
| 8731 | |
| 8732 Result CodeGenerator::GenerateGenericBinaryOpStubCall(GenericBinaryOpStub* stub, | |
| 8733 Result* left, | |
| 8734 Result* right) { | |
| 8735 if (stub->ArgsInRegistersSupported()) { | |
| 8736 stub->SetArgsInRegisters(); | |
| 8737 return frame_->CallStub(stub, left, right); | |
| 8738 } else { | |
| 8739 frame_->Push(left); | |
| 8740 frame_->Push(right); | |
| 8741 return frame_->CallStub(stub, 2); | |
| 8742 } | |
| 8743 } | |
| 8744 | |
| 8745 #undef __ | |
| 8746 | |
| 8747 #define __ masm. | 50 #define __ masm. |
| 8748 | 51 |
| 8749 #ifdef _WIN64 | 52 #ifdef _WIN64 |
| 8750 typedef double (*ModuloFunction)(double, double); | 53 typedef double (*ModuloFunction)(double, double); |
| 8751 // Define custom fmod implementation. | 54 // Define custom fmod implementation. |
| 8752 ModuloFunction CreateModuloFunction() { | 55 ModuloFunction CreateModuloFunction() { |
| 8753 size_t actual_size; | 56 size_t actual_size; |
| 8754 byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize, | 57 byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize, |
| 8755 &actual_size, | 58 &actual_size, |
| 8756 true)); | 59 true)); |
| (...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 8834 } | 137 } |
| 8835 | 138 |
| 8836 #endif | 139 #endif |
| 8837 | 140 |
| 8838 | 141 |
| 8839 #undef __ | 142 #undef __ |
| 8840 | 143 |
| 8841 } } // namespace v8::internal | 144 } } // namespace v8::internal |
| 8842 | 145 |
| 8843 #endif // V8_TARGET_ARCH_X64 | 146 #endif // V8_TARGET_ARCH_X64 |
| OLD | NEW |