Chromium Code Reviews| OLD | NEW |
|---|---|
| (Empty) | |
| 1 // Copyright 2006-2008 the V8 project authors. All rights reserved. | |
|
Søren Thygesen Gjesse
2010/01/19 22:59:12
Please remove all the code in comments.
Alexandre
2010/01/22 23:08:42
Removed.
On 2010/01/19 22:59:12, Søren Gjesse wrot
| |
| 2 // Redistribution and use in source and binary forms, with or without | |
| 3 // modification, are permitted provided that the following conditions are | |
| 4 // met: | |
| 5 // | |
| 6 // * Redistributions of source code must retain the above copyright | |
| 7 // notice, this list of conditions and the following disclaimer. | |
| 8 // * Redistributions in binary form must reproduce the above | |
| 9 // copyright notice, this list of conditions and the following | |
| 10 // disclaimer in the documentation and/or other materials provided | |
| 11 // with the distribution. | |
| 12 // * Neither the name of Google Inc. nor the names of its | |
| 13 // contributors may be used to endorse or promote products derived | |
| 14 // from this software without specific prior written permission. | |
| 15 // | |
| 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
| 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
| 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
| 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
| 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
| 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
| 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
| 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
| 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
| 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
| 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
| 27 | |
| 28 | |
| 29 #include "v8.h" | |
| 30 | |
| 31 #include "bootstrapper.h" | |
| 32 #include "codegen-inl.h" | |
| 33 #include "debug.h" | |
| 34 #include "parser.h" | |
| 35 #include "register-allocator-inl.h" | |
| 36 #include "runtime.h" | |
| 37 #include "scopes.h" | |
| 38 #include "compiler.h" | |
| 39 | |
| 40 | |
| 41 | |
| 42 namespace v8 { | |
| 43 namespace internal { | |
| 44 | |
| 45 #define __ ACCESS_MASM(masm_) | |
| 46 | |
| 47 //static void EmitIdenticalObjectComparison(MacroAssembler* masm, | |
| 48 // Label* slow, | |
| 49 // Condition cc); | |
| 50 //static void EmitSmiNonsmiComparison(MacroAssembler* masm, | |
| 51 // Label* rhs_not_nan, | |
| 52 // Label* slow, | |
| 53 // bool strict); | |
| 54 //static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc); | |
| 55 //static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm); | |
| 56 //static void MultiplyByKnownInt(MacroAssembler* masm, | |
| 57 // Register source, | |
| 58 // Register destination, | |
| 59 // int known_int); | |
| 60 //static bool IsEasyToMultiplyBy(int x); | |
| 61 | |
| 62 | |
| 63 | |
| 64 // ------------------------------------------------------------------------- | |
| 65 // Platform-specific DeferredCode functions. | |
| 66 | |
| 67 | |
| 68 void DeferredCode::SaveRegisters() { | |
| 69 UNIMPLEMENTED_(); | |
| 70 // for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) { | |
| 71 // int action = registers_[i]; | |
| 72 // if (action == kPush) { | |
| 73 // __ push(RegisterAllocator::ToRegister(i)); | |
| 74 // } else if (action != kIgnore && (action & kSyncedFlag) == 0) { | |
| 75 // __ sw(RegisterAllocator::ToRegister(i), MemOperand(fp, action)); | |
| 76 // } | |
| 77 // } | |
| 78 } | |
| 79 | |
| 80 | |
| 81 void DeferredCode::RestoreRegisters() { | |
| 82 UNIMPLEMENTED_(); | |
| 83 // // Restore registers in reverse order due to the stack. | |
| 84 // for (int i = RegisterAllocator::kNumRegisters - 1; i >= 0; i--) { | |
| 85 // int action = registers_[i]; | |
| 86 // if (action == kPush) { | |
| 87 // __ pop(RegisterAllocator::ToRegister(i)); | |
| 88 // } else if (action != kIgnore) { | |
| 89 // action &= ~kSyncedFlag; | |
| 90 // __ lw(RegisterAllocator::ToRegister(i), MemOperand(fp, action)); | |
| 91 // } | |
| 92 // } | |
| 93 } | |
| 94 | |
| 95 | |
| 96 // ------------------------------------------------------------------------- | |
| 97 // CodeGenState implementation. | |
| 98 | |
| 99 CodeGenState::CodeGenState(CodeGenerator* owner) | |
| 100 : owner_(owner), | |
| 101 true_target_(NULL), | |
| 102 false_target_(NULL), | |
| 103 previous_(NULL) { | |
| 104 owner_->set_state(this); | |
| 105 } | |
| 106 | |
| 107 | |
| 108 CodeGenState::CodeGenState(CodeGenerator* owner, | |
| 109 JumpTarget* true_target, | |
| 110 JumpTarget* false_target) | |
| 111 : owner_(owner), | |
| 112 true_target_(true_target), | |
| 113 false_target_(false_target), | |
| 114 previous_(owner->state()) { | |
| 115 owner_->set_state(this); | |
| 116 } | |
| 117 | |
| 118 | |
| 119 CodeGenState::~CodeGenState() { | |
| 120 ASSERT(owner_->state() == this); | |
| 121 owner_->set_state(previous_); | |
| 122 } | |
| 123 | |
| 124 | |
| 125 // ------------------------------------------------------------------------- | |
| 126 // CodeGenerator implementation | |
| 127 | |
| 128 CodeGenerator::CodeGenerator(int buffer_size, Handle<Script> script, | |
| 129 bool is_eval) | |
| 130 : is_eval_(is_eval), | |
| 131 script_(script), | |
| 132 deferred_(8), | |
| 133 masm_(new MacroAssembler(NULL, buffer_size)), | |
| 134 scope_(NULL), | |
| 135 frame_(NULL), | |
| 136 allocator_(NULL), | |
| 137 cc_reg_(cc_always), | |
| 138 state_(NULL), | |
| 139 function_return_is_shadowed_(false) { | |
| 140 } | |
| 141 | |
| 142 | |
| 143 // Calling conventions: | |
| 144 // s8_fp: caller's frame pointer | |
| 145 // sp: stack pointer | |
| 146 // a1: called JS function | |
| 147 // cp: callee's context | |
| 148 | |
| 149 void CodeGenerator::GenCode(FunctionLiteral* fun) { | |
| 150 UNIMPLEMENTED_(); | |
| 151 // // Record the position for debugging purposes. | |
| 152 // CodeForFunctionPosition(fun); | |
| 153 // | |
| 154 // ZoneList<Statement*>* body = fun->body(); | |
| 155 // | |
| 156 // // Initialize state. | |
| 157 // ASSERT(scope_ == NULL); | |
| 158 // scope_ = fun->scope(); | |
| 159 // ASSERT(allocator_ == NULL); | |
| 160 // RegisterAllocator register_allocator(this); | |
| 161 // allocator_ = ®ister_allocator; | |
| 162 // ASSERT(frame_ == NULL); | |
| 163 // frame_ = new VirtualFrame(); | |
| 164 // cc_reg_ = cc_always; | |
| 165 // { | |
| 166 // CodeGenState state(this); | |
| 167 // | |
| 168 // | |
| 169 // // Entry: | |
| 170 // // Stack: receiver, arguments | |
| 171 // // ra: return address | |
| 172 // // fp: caller's frame pointer | |
| 173 // // sp: stack pointer | |
| 174 // // a1: called JS function | |
| 175 // // cp: callee's context | |
| 176 // allocator_->Initialize(); | |
| 177 // frame_->Enter(); | |
| 178 // | |
| 179 //#ifdef DEBUG | |
| 180 // if (strlen(FLAG_stop_at) > 0 && | |
| 181 // fun->name()->IsEqualTo(CStrVector(FLAG_stop_at))) { | |
| 182 // frame_->SpillAll(); | |
| 183 // __ stop("stop-at"); | |
| 184 // } | |
| 185 //#endif | |
| 186 // | |
| 187 // // Allocate space for locals and initialize them. This also checks | |
| 188 // // for stack overflow. | |
| 189 // frame_->AllocateStackSlots(); | |
| 190 // // Initialize the function return target after the locals are set | |
| 191 // // up, because it needs the expected frame height from the frame. | |
| 192 // function_return_.set_direction(JumpTarget::BIDIRECTIONAL); | |
| 193 // function_return_is_shadowed_ = false; | |
| 194 // | |
| 195 // VirtualFrame::SpilledScope spilled_scope; | |
| 196 // if (scope_->num_heap_slots() > 0) { | |
| 197 //#ifdef DEBUG | |
| 198 //// printf("%s - %d - %s: if (scope_->num_heap_slots() > 0)\n", __FILE__, __LINE__, __func__); | |
| 199 //#endif | |
| 200 // | |
| 201 // // Allocate local context. | |
| 202 // // Get outer context and create a new context based on it. | |
| 203 // __ lw(a0, frame_->Function()); | |
| 204 // frame_->EmitPush(a0); | |
| 205 // frame_->CallRuntime(Runtime::kNewContext, 1); // v0 holds the result | |
| 206 // __ nop(); // NOP_ADDED | |
| 207 // | |
| 208 //#ifdef DEBUG | |
| 209 // JumpTarget verified_true; | |
| 210 // verified_true.Branch(eq, no_hint, v0, Operand(cp)); | |
| 211 // __ nop(); // NOP_ADDED | |
| 212 // __ stop("NewContext: v0 is expected to be the same as cp"); | |
| 213 // verified_true.Bind(); | |
| 214 //#endif | |
| 215 // // Update context local. | |
| 216 // __ sw(cp, frame_->Context()); | |
| 217 // } | |
| 218 // | |
| 219 // // TODO(1241774): Improve this code: | |
| 220 // // 1) only needed if we have a context | |
| 221 // // 2) no need to recompute context ptr every single time | |
| 222 // // 3) don't copy parameter operand code from SlotOperand! | |
| 223 // { | |
| 224 // Comment cmnt2(masm_, "[ copy context parameters into .context"); | |
| 225 // | |
| 226 // // Note that iteration order is relevant here! If we have the same | |
| 227 // // parameter twice (e.g., function (x, y, x)), and that parameter | |
| 228 // // needs to be copied into the context, it must be the last argument | |
| 229 // // passed to the parameter that needs to be copied. This is a rare | |
| 230 // // case so we don't check for it, instead we rely on the copying | |
| 231 // // order: such a parameter is copied repeatedly into the same | |
| 232 // // context location and thus the last value is what is seen inside | |
| 233 // // the function. | |
| 234 // for (int i = 0; i < scope_->num_parameters(); i++) { | |
| 235 // Variable* par = scope_->parameter(i); | |
| 236 // Slot* slot = par->slot(); | |
| 237 // if (slot != NULL && slot->type() == Slot::CONTEXT) { | |
| 238 // ASSERT(!scope_->is_global_scope()); // no parameters in global scop e | |
| 239 // __ lw(a1, frame_->ParameterAt(i)); | |
| 240 // // Loads r2 with context; used below in RecordWrite. | |
| 241 // __ sw(a1, SlotOperand(slot, a2)); | |
| 242 // // Load the offset into r3. | |
| 243 // int slot_offset = | |
| 244 // FixedArray::kHeaderSize + slot->index() * kPointerSize; | |
| 245 // __ li(a3, Operand(slot_offset)); | |
| 246 // __ RecordWrite(a2, a3, a1); | |
| 247 // } | |
| 248 // } | |
| 249 // } | |
| 250 // | |
| 251 // // Store the arguments object. This must happen after context | |
| 252 // // initialization because the arguments object may be stored in the | |
| 253 // // context. | |
| 254 // if (scope_->arguments() != NULL) { | |
| 255 //#ifdef DEBUG | |
| 256 //// printf("%s - %d - %s: if (scope_->arguments() != NULL) ", __FILE__, __ LINE__, __func__); | |
| 257 //#endif | |
| 258 // | |
| 259 // ASSERT(scope_->arguments_shadow() != NULL); | |
| 260 // Comment cmnt(masm_, "[ allocate arguments object"); | |
| 261 // { Reference shadow_ref(this, scope_->arguments_shadow()); | |
| 262 // { Reference arguments_ref(this, scope_->arguments()); | |
| 263 // ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT); | |
| 264 // __ lw(a2, frame_->Function()); | |
| 265 // // The receiver is below the arguments (and args slots), the return address, | |
| 266 // // and the frame pointer on the stack. | |
| 267 // const int kReceiverDisplacement = 2 + 4 + scope_->num_parameters(); | |
| 268 // __ addiu(a1, fp, Operand(kReceiverDisplacement * kPointerSize)); | |
| 269 // __ li(a0, Operand(Smi::FromInt(scope_->num_parameters()))); | |
| 270 // frame_->Adjust(3); | |
| 271 // __ multi_push_reversed(a0.bit() | a1.bit() | a2.bit()); | |
| 272 // frame_->CallStub(&stub, 3); | |
| 273 // __ nop(); // NOP_ADDED | |
| 274 // frame_->EmitPush(v0); | |
| 275 // arguments_ref.SetValue(NOT_CONST_INIT); | |
| 276 // } | |
| 277 // shadow_ref.SetValue(NOT_CONST_INIT); | |
| 278 // } | |
| 279 // frame_->Drop(); // Value is no longer needed. | |
| 280 // } | |
| 281 // | |
| 282 // // Generate code to 'execute' declarations and initialize functions | |
| 283 // // (source elements). In case of an illegal redeclaration we need to | |
| 284 // // handle that instead of processing the declarations. | |
| 285 // if (scope_->HasIllegalRedeclaration()) { | |
| 286 // Comment cmnt(masm_, "[ illegal redeclarations"); | |
| 287 // scope_->VisitIllegalRedeclaration(this); | |
| 288 // } else { | |
| 289 // Comment cmnt(masm_, "[ declarations"); | |
| 290 // ProcessDeclarations(scope_->declarations()); | |
| 291 // // Bail out if a stack-overflow exception occurred when processing | |
| 292 // // declarations. | |
| 293 // if (HasStackOverflow()) return; | |
| 294 // } | |
| 295 // | |
| 296 // if (FLAG_trace) { | |
| 297 // frame_->CallRuntime(Runtime::kTraceEnter, 0); | |
| 298 // __ nop(); // NOP_ADDED | |
| 299 // // Ignore the return value. | |
| 300 // } | |
| 301 // | |
| 302 // // Compile the body of the function in a vanilla state. Don't | |
| 303 // // bother compiling all the code if the scope has an illegal | |
| 304 // // redeclaration. | |
| 305 // if (!scope_->HasIllegalRedeclaration()) { | |
| 306 // Comment cmnt(masm_, "[ function body"); | |
| 307 //#ifdef DEBUG | |
| 308 // bool is_builtin = Bootstrapper::IsActive(); | |
| 309 // bool should_trace = | |
| 310 // is_builtin ? FLAG_trace_builtin_calls : FLAG_trace_calls; | |
| 311 // if (should_trace) { | |
| 312 // frame_->CallRuntime(Runtime::kDebugTrace, 0); | |
| 313 // __ nop(); // NOP_ADDED | |
| 314 // // Ignore the return value. | |
| 315 // } | |
| 316 //#endif | |
| 317 //#ifdef DEBUG | |
| 318 //// printf("VisitStatementsAndSpill(body)\n"); | |
| 319 //#endif | |
| 320 // VisitStatementsAndSpill(body); | |
| 321 // } | |
| 322 // } | |
| 323 // | |
| 324 // // Generate the return sequence if necessary. | |
| 325 // if (has_valid_frame() || function_return_.is_linked()) { | |
| 326 // if (!function_return_.is_linked()) { | |
| 327 // CodeForReturnPosition(fun); | |
| 328 // } | |
| 329 // // exit | |
| 330 // // v0: result | |
| 331 // // sp: stack pointer | |
| 332 // // fp: frame pointer | |
| 333 // // cp: callee's context | |
| 334 // __ LoadRoot(v0, Heap::kUndefinedValueRootIndex); | |
| 335 // | |
| 336 // function_return_.Bind(); | |
| 337 // if (FLAG_trace) { | |
| 338 // // Push the return value on the stack as the parameter. | |
| 339 // // Runtime::TraceExit returns the parameter as it is. | |
| 340 // frame_->EmitPush(v0); | |
| 341 // frame_->CallRuntime(Runtime::kTraceExit, 1); | |
| 342 // __ nop(); // NOP_ADDED | |
| 343 // } | |
| 344 // | |
| 345 // // Add a label for checking the size of the code used for returning. | |
| 346 // Label check_exit_codesize; | |
| 347 // masm_->bind(&check_exit_codesize); | |
| 348 // | |
| 349 // // Tear down the frame which will restore the caller's frame pointer and | |
| 350 // // the link register. | |
| 351 // frame_->Exit(); | |
| 352 // | |
| 353 // // Here we use masm_-> instead of the __ macro to avoid the code coverage | |
| 354 // // tool from instrumenting as we rely on the code size here. | |
| 355 // masm_->addiu(sp, sp, Operand((scope_->num_parameters() + 1)*kPointerSize)) ; | |
| 356 //// + StandardFrameConstants::kRegularArgsSlot sSize)); | |
| 357 // masm_->Jump(ra); | |
| 358 // | |
| 359 // // Check that the size of the code used for returning matches what is | |
| 360 // // expected by the debugger. | |
| 361 // ASSERT_EQ(kJSReturnSequenceLength, | |
| 362 // masm_->InstructionsGeneratedSince(&check_exit_codesize)); | |
| 363 // __ nop(); | |
| 364 // } | |
| 365 // | |
| 366 // // Code generation state must be reset. | |
| 367 // ASSERT(!has_cc()); | |
| 368 // ASSERT(state_ == NULL); | |
| 369 // ASSERT(!function_return_is_shadowed_); | |
| 370 // function_return_.Unuse(); | |
| 371 // DeleteFrame(); | |
| 372 // | |
| 373 // // Process any deferred code using the register allocator. | |
| 374 // if (!HasStackOverflow()) { | |
| 375 // ProcessDeferred(); | |
| 376 // } | |
| 377 // | |
| 378 // allocator_ = NULL; | |
| 379 // scope_ = NULL; | |
| 380 } | |
| 381 | |
| 382 | |
| 383 MemOperand CodeGenerator::SlotOperand(Slot* slot, Register tmp) { | |
| 384 UNIMPLEMENTED_(); | |
| 385 // // Currently, this assertion will fail if we try to assign to | |
| 386 // // a constant variable that is constant because it is read-only | |
| 387 // // (such as the variable referring to a named function expression). | |
| 388 // // We need to implement assignments to read-only variables. | |
| 389 // // Ideally, we should do this during AST generation (by converting | |
| 390 // // such assignments into expression statements); however, in general | |
| 391 // // we may not be able to make the decision until past AST generation, | |
| 392 // // that is when the entire program is known. | |
| 393 // ASSERT(slot != NULL); | |
| 394 // int index = slot->index(); | |
| 395 // switch (slot->type()) { | |
| 396 // case Slot::PARAMETER: | |
| 397 // return frame_->ParameterAt(index); | |
| 398 // | |
| 399 // case Slot::LOCAL: | |
| 400 // return frame_->LocalAt(index); | |
| 401 // | |
| 402 // case Slot::CONTEXT: { | |
| 403 //#ifdef DEBUG | |
| 404 //// printf("case Slot::CONTEXT: \n"); | |
| 405 //#endif | |
| 406 // | |
| 407 //// // Follow the context chain if necessary. | |
| 408 // ASSERT(!tmp.is(cp)); // do not overwrite context register | |
| 409 // Register context = cp; | |
| 410 // int chain_length = scope()->ContextChainLength(slot->var()->scope()); | |
| 411 // for (int i = 0; i < chain_length; i++) { | |
| 412 // // Load the closure. | |
| 413 // // (All contexts, even 'with' contexts, have a closure, | |
| 414 // // and it is the same for all contexts inside a function. | |
| 415 // // There is no need to go to the function context first.) | |
| 416 //// __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX)); | |
| 417 // __ lw(tmp, ContextOperand(context, Context::CLOSURE_INDEX)); | |
| 418 // // Load the function context (which is the incoming, outer context). | |
| 419 //// __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset)); | |
| 420 // __ lw(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset)); | |
| 421 // context = tmp; | |
| 422 // } | |
| 423 // // We may have a 'with' context now. Get the function context. | |
| 424 // // (In fact this mov may never be the needed, since the scope analysis | |
| 425 // // may not permit a direct context access in this case and thus we are | |
| 426 // // always at a function context. However it is safe to dereference be- | |
| 427 // // cause the function context of a function context is itself. Before | |
| 428 // // deleting this mov we should try to create a counter-example first, | |
| 429 // // though...) | |
| 430 //// __ ldr(tmp, ContextOperand(context, Context::FCONTEXT_INDEX)); | |
| 431 // __ lw(tmp, ContextOperand(context, Context::FCONTEXT_INDEX)); | |
| 432 // return ContextOperand(tmp, index); | |
| 433 // } | |
| 434 // | |
| 435 // default: | |
| 436 // UNREACHABLE(); | |
| 437 // return MemOperand(no_reg, 0); | |
| 438 // } | |
| 439 return MemOperand(no_reg, 0); // UNIMPLEMENTED RETURN | |
| 440 } | |
| 441 | |
| 442 | |
| 443 MemOperand CodeGenerator::ContextSlotOperandCheckExtensions( | |
| 444 Slot* slot, | |
| 445 Register tmp, | |
| 446 Register tmp2, | |
| 447 JumpTarget* slow) { | |
| 448 UNIMPLEMENTED_(); | |
| 449 // ASSERT(slot->type() == Slot::CONTEXT); | |
| 450 // Register context = cp; | |
| 451 // | |
| 452 // UNIMPLEMENTED(); | |
| 453 // __ break_(0x00666); | |
| 454 // | |
| 455 // for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) { | |
| 456 // if (s->num_heap_slots() > 0) { | |
| 457 // if (s->calls_eval()) { | |
| 458 // // Check that extension is NULL. | |
| 459 //// __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX)); | |
| 460 //// __ tst(tmp2, tmp2); | |
| 461 // __ lw(tmp2, ContextOperand(context, Context::EXTENSION_INDEX)); | |
| 462 // slow->Branch(ne, no_hint, tmp2, Operand(zero_reg)); | |
| 463 // __ nop(); // NOP_ADDED | |
| 464 // } | |
| 465 //// __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX)); | |
| 466 //// __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset)); | |
| 467 // __ lw(tmp, ContextOperand(context, Context::CLOSURE_INDEX)); | |
| 468 // __ lw(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset)); | |
| 469 // context = tmp; | |
| 470 // } | |
| 471 // } | |
| 472 // // Check that last extension is NULL. | |
| 473 //// __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX)); | |
| 474 //// __ tst(tmp2, tmp2); | |
| 475 // __ lw(tmp2, ContextOperand(context, Context::EXTENSION_INDEX)); | |
| 476 // slow->Branch(ne, no_hint, tmp2, Operand(zero_reg)); | |
| 477 //// __ ldr(tmp, ContextOperand(context, Context::FCONTEXT_INDEX)); | |
| 478 // __ lw(tmp, ContextOperand(context, Context::FCONTEXT_INDEX)); | |
| 479 // return ContextOperand(tmp, slot->index()); | |
| 480 return MemOperand(no_reg, 0); // UNIMPLEMENTED RETURN | |
| 481 } | |
| 482 | |
| 483 | |
| 484 // Loads a value on TOS. If it is a boolean value, the result may have been | |
| 485 // (partially) translated into branches, or it may have set the condition | |
| 486 // code register. If force_cc is set, the value is forced to set the | |
| 487 // condition code register and no value is pushed. If the condition code | |
| 488 // register was set, has_cc() is true and cc_reg_ contains the condition to | |
| 489 // test for 'true'. | |
| 490 void CodeGenerator::LoadCondition(Expression* x, | |
| 491 JumpTarget* true_target, | |
| 492 JumpTarget* false_target, | |
| 493 bool force_cc) { | |
| 494 UNIMPLEMENTED_(); | |
| 495 // ASSERT(!has_cc()); | |
| 496 // int original_height = frame_->height(); | |
| 497 // | |
| 498 // { CodeGenState new_state(this, true_target, false_target); | |
| 499 // Visit(x); | |
| 500 // | |
| 501 // // If we hit a stack overflow, we may not have actually visited | |
| 502 // // the expression. In that case, we ensure that we have a | |
| 503 // // valid-looking frame state because we will continue to generate | |
| 504 // // code as we unwind the C++ stack. | |
| 505 // // | |
| 506 // // It's possible to have both a stack overflow and a valid frame | |
| 507 // // state (eg, a subexpression overflowed, visiting it returned | |
| 508 // // with a dummied frame state, and visiting this expression | |
| 509 // // returned with a normal-looking state). | |
| 510 // if (HasStackOverflow() && | |
| 511 // has_valid_frame() && | |
| 512 // !has_cc() && | |
| 513 // frame_->height() == original_height) { | |
| 514 // true_target->Jump(); | |
| 515 // __ nop(); // NOP_ADDED | |
| 516 // } | |
| 517 // } | |
| 518 // if (force_cc && frame_ != NULL && !has_cc()) { | |
| 519 // // Convert the TOS value to a boolean in the condition code register. | |
| 520 // ToBoolean(true_target, false_target); | |
| 521 // } | |
| 522 // ASSERT(!force_cc || !has_valid_frame() || has_cc()); | |
| 523 // ASSERT(!has_valid_frame() || | |
| 524 // (has_cc() && frame_->height() == original_height) || | |
| 525 // (!has_cc() && frame_->height() == original_height + 1)); | |
| 526 } | |
| 527 | |
| 528 | |
| 529 void CodeGenerator::Load(Expression* x) { | |
| 530 UNIMPLEMENTED_(); | |
| 531 //#ifdef DEBUG | |
| 532 // int original_height = frame_->height(); | |
| 533 //#endif | |
| 534 // JumpTarget true_target; | |
| 535 // JumpTarget false_target; | |
| 536 // LoadCondition(x, &true_target, &false_target, false); | |
| 537 // | |
| 538 // if (has_cc()) { | |
| 539 // // Convert cc_reg_ into a boolean value. | |
| 540 // JumpTarget loaded; | |
| 541 // JumpTarget materialize_true; | |
| 542 // materialize_true.Branch(cc_reg_); | |
| 543 // __ LoadRoot(a0, Heap::kFalseValueRootIndex); | |
| 544 // frame_->EmitPush(a0); | |
| 545 // loaded.Jump(); | |
| 546 // __ nop(); // NOP_ADDED | |
| 547 // materialize_true.Bind(); | |
| 548 // __ LoadRoot(a0, Heap::kTrueValueRootIndex); | |
| 549 // frame_->EmitPush(a0); | |
| 550 // loaded.Bind(); | |
| 551 // cc_reg_ = cc_always; | |
| 552 // } | |
| 553 // | |
| 554 // if (true_target.is_linked() || false_target.is_linked()) { | |
| 555 // // We have at least one condition value that has been "translated" | |
| 556 // // into a branch, thus it needs to be loaded explicitly. | |
| 557 // JumpTarget loaded; | |
| 558 // if (frame_ != NULL) { | |
| 559 // loaded.Jump(); // Don't lose the current TOS. | |
| 560 // __ nop(); // NOP_ADDED | |
| 561 // } | |
| 562 // bool both = true_target.is_linked() && false_target.is_linked(); | |
| 563 // // Load "true" if necessary. | |
| 564 // if (true_target.is_linked()) { | |
| 565 // true_target.Bind(); | |
| 566 // __ LoadRoot(a0, Heap::kTrueValueRootIndex); | |
| 567 // frame_->EmitPush(a0); | |
| 568 // } | |
| 569 // // If both "true" and "false" need to be loaded jump across the code for | |
| 570 // // "false". | |
| 571 // if (both) { | |
| 572 // loaded.Jump(); | |
| 573 // __ nop(); // NOP_ADDED | |
| 574 // } | |
| 575 // // Load "false" if necessary. | |
| 576 // if (false_target.is_linked()) { | |
| 577 // false_target.Bind(); | |
| 578 // __ LoadRoot(a0, Heap::kFalseValueRootIndex); | |
| 579 // frame_->EmitPush(a0); | |
| 580 // } | |
| 581 // // A value is loaded on all paths reaching this point. | |
| 582 // loaded.Bind(); | |
| 583 // } | |
| 584 // ASSERT(has_valid_frame()); | |
| 585 // ASSERT(!has_cc()); | |
| 586 // ASSERT(frame_->height() == original_height + 1); | |
| 587 } | |
| 588 | |
| 589 | |
| 590 void CodeGenerator::LoadGlobal() { | |
| 591 UNIMPLEMENTED_(); | |
| 592 // VirtualFrame::SpilledScope spilled_scope; | |
| 593 // __ lw(a0, GlobalObject()); | |
| 594 // frame_->EmitPush(a0); | |
| 595 } | |
| 596 | |
| 597 | |
| 598 void CodeGenerator::LoadGlobalReceiver(Register scratch) { | |
| 599 UNIMPLEMENTED_(); | |
| 600 // VirtualFrame::SpilledScope spilled_scope; | |
| 601 // __ lw(scratch, ContextOperand(cp, Context::GLOBAL_INDEX)); | |
| 602 // __ lw(scratch, | |
| 603 // FieldMemOperand(scratch, GlobalObject::kGlobalReceiverOffset)); | |
| 604 // frame_->EmitPush(scratch); | |
| 605 } | |
| 606 | |
| 607 | |
| 608 // TODO(1241834): Get rid of this function in favor of just using Load, now | |
| 609 // that we have the INSIDE_TYPEOF typeof state. => Need to handle global | |
| 610 // variables w/o reference errors elsewhere. | |
| 611 void CodeGenerator::LoadTypeofExpression(Expression* x) { | |
| 612 UNIMPLEMENTED_(); | |
| 613 //// VirtualFrame::SpilledScope spilled_scope; | |
| 614 // Variable* variable = x->AsVariableProxy()->AsVariable(); | |
| 615 // if (variable != NULL && !variable->is_this() && variable->is_global()) { | |
| 616 // // NOTE: This is somewhat nasty. We force the compiler to load | |
| 617 // // the variable as if through '<global>.<variable>' to make sure we | |
| 618 // // do not get reference errors. | |
| 619 // Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX); | |
| 620 // Literal key(variable->name()); | |
| 621 // // TODO(1241834): Fetch the position from the variable instead of using | |
| 622 // // no position. | |
| 623 // Property property(&global, &key, RelocInfo::kNoPosition); | |
| 624 // LoadAndSpill(&property); | |
| 625 // } else { | |
| 626 // LoadAndSpill(x); | |
| 627 // } | |
| 628 } | |
| 629 | |
| 630 | |
| 631 Reference::Reference(CodeGenerator* cgen, Expression* expression) | |
| 632 : cgen_(cgen), expression_(expression), type_(ILLEGAL) { | |
| 633 cgen->LoadReference(this); | |
| 634 } | |
| 635 | |
| 636 | |
| 637 Reference::~Reference() { | |
| 638 cgen_->UnloadReference(this); | |
| 639 } | |
| 640 | |
| 641 | |
| 642 void CodeGenerator::LoadReference(Reference* ref) { | |
| 643 UNIMPLEMENTED_(); | |
| 644 //#ifdef DEBUG | |
| 645 //// printf("CodeGenerator::LoadReference\n"); | |
| 646 //#endif | |
| 647 // VirtualFrame::SpilledScope spilled_scope; | |
| 648 // Comment cmnt(masm_, "[ LoadReference"); | |
| 649 // Expression* e = ref->expression(); | |
| 650 // Property* property = e->AsProperty(); | |
| 651 // Variable* var = e->AsVariableProxy()->AsVariable(); | |
| 652 // | |
| 653 // if (property != NULL) { | |
| 654 // // The expression is either a property or a variable proxy that rewrites | |
| 655 // // to a property. | |
| 656 // LoadAndSpill(property->obj()); | |
| 657 // // We use a named reference if the key is a literal symbol, unless it is | |
| 658 // // a string that can be legally parsed as an integer. This is because | |
| 659 // // otherwise we will not get into the slow case code that handles [] on | |
| 660 // // String objects. | |
| 661 // Literal* literal = property->key()->AsLiteral(); | |
| 662 // uint32_t dummy; | |
| 663 // if (literal != NULL && | |
| 664 // literal->handle()->IsSymbol() && | |
| 665 // !String::cast(*(literal->handle()))->AsArrayIndex(&dummy)) { | |
| 666 // ref->set_type(Reference::NAMED); | |
| 667 // } else { | |
| 668 // LoadAndSpill(property->key()); | |
| 669 // ref->set_type(Reference::KEYED); | |
| 670 // } | |
| 671 // } else if (var != NULL) { | |
| 672 // // The expression is a variable proxy that does not rewrite to a | |
| 673 // // property. Global variables are treated as named property references. | |
| 674 // if (var->is_global()) { | |
| 675 // LoadGlobal(); | |
| 676 // ref->set_type(Reference::NAMED); | |
| 677 // } else { | |
| 678 // ASSERT(var->slot() != NULL); | |
| 679 // ref->set_type(Reference::SLOT); | |
| 680 // } | |
| 681 // } else { | |
| 682 // // Anything else is a runtime error. | |
| 683 // LoadAndSpill(e); | |
| 684 // frame_->CallRuntime(Runtime::kThrowReferenceError, 1); | |
| 685 // __ nop(); // NOP_ADDED | |
| 686 // } | |
| 687 } | |
| 688 | |
| 689 | |
| 690 void CodeGenerator::UnloadReference(Reference* ref) { | |
| 691 UNIMPLEMENTED_(); | |
| 692 // VirtualFrame::SpilledScope spilled_scope; | |
| 693 // // Pop a reference from the stack while preserving TOS. | |
| 694 // Comment cmnt(masm_, "[ UnloadReference"); | |
| 695 // int size = ref->size(); | |
| 696 // if (size > 0) { | |
| 697 // frame_->EmitPop(a0); | |
| 698 // frame_->Drop(size); | |
| 699 // frame_->EmitPush(a0); | |
| 700 // } | |
| 701 } | |
| 702 | |
| 703 | |
| 704 // ECMA-262, section 9.2, page 30: ToBoolean(). Convert the given | |
| 705 // register to a boolean in the condition code register. The code | |
| 706 // may jump to 'false_target' in case the register converts to 'false'. | |
| 707 void CodeGenerator::ToBoolean(JumpTarget* true_target, | |
| 708 JumpTarget* false_target) { | |
| 709 UNIMPLEMENTED_(); | |
| 710 // VirtualFrame::SpilledScope spilled_scope; | |
| 711 // // Note: The generated code snippet does not change stack variables. | |
| 712 // // Only the condition code should be set. | |
| 713 //// frame_->EmitPop(r0); | |
| 714 // frame_->EmitPop(t0); | |
| 715 // | |
| 716 // // Fast case checks | |
| 717 // | |
| 718 // // Check if the value is 'false'. | |
| 719 // __ LoadRoot(t1, Heap::kFalseValueRootIndex); | |
| 720 // false_target->Branch(eq, no_hint, t0, Operand(t1)); | |
| 721 // __ nop(); // NOP_ADDED | |
| 722 // | |
| 723 // // Check if the value is 'true'. | |
| 724 // __ LoadRoot(t2, Heap::kTrueValueRootIndex); | |
| 725 // true_target->Branch(eq, no_hint, t0, Operand(t2)); | |
| 726 // __ nop(); // NOP_ADDED | |
| 727 // | |
| 728 // // Check if the value is 'undefined'. | |
| 729 // __ LoadRoot(t3, Heap::kUndefinedValueRootIndex); | |
| 730 // false_target->Branch(eq, no_hint, t0, Operand(t3)); | |
| 731 // __ nop(); // NOP_ADDED | |
| 732 // | |
| 733 // // Check if the value is a smi. | |
| 734 //// __ cmp(r0, Operand(Smi::FromInt(0))); | |
| 735 // false_target->Branch(eq, no_hint, t0, Operand(Smi::FromInt(0))); | |
| 736 // __ nop(); // NOP_ADDED | |
| 737 // __ andi(t4, t0, Operand(kSmiTagMask)); | |
| 738 // true_target->Branch(eq, no_hint, t4, Operand(zero_reg)); | |
| 739 // __ nop(); // NOP_ADDED | |
| 740 // | |
| 741 // // Slow case: call the runtime. | |
| 742 // frame_->EmitPush(t0); | |
| 743 // frame_->CallRuntime(Runtime::kToBool, 1); | |
| 744 // __ nop(); // NOP_ADDED | |
| 745 // // Convert the result (v0) to a condition code. | |
| 746 //// __ cmp(r0, ip); | |
| 747 // __ LoadRoot(s6, Heap::kFalseValueRootIndex); | |
| 748 // __ mov(s5, v0); | |
| 749 // | |
| 750 // cc_reg_ = ne; | |
| 751 } | |
| 752 | |
| 753 | |
| 754 void CodeGenerator::GenericBinaryOperation(Token::Value op, | |
| 755 OverwriteMode overwrite_mode, | |
| 756 int constant_rhs) { | |
| 757 UNIMPLEMENTED_(); | |
| 758 //#ifdef DEBUG | |
| 759 //// printf("CodeGenerator::GenericBinaryOperation\n"); | |
| 760 //#endif | |
| 761 // | |
| 762 // VirtualFrame::SpilledScope spilled_scope; | |
| 763 // // sp[0] : y | |
| 764 // // sp[1] : x | |
| 765 // // result : v0 | |
| 766 // | |
| 767 // // Stub is entered with a call: 'return address' is in lr. | |
| 768 // switch (op) { | |
| 769 // case Token::ADD: // fall through. | |
| 770 // case Token::SUB: // fall through. | |
| 771 // case Token::MUL: | |
| 772 // case Token::DIV: | |
| 773 // case Token::MOD: | |
| 774 // case Token::BIT_OR: | |
| 775 // case Token::BIT_AND: | |
| 776 // case Token::BIT_XOR: | |
| 777 // case Token::SHL: | |
| 778 // case Token::SHR: | |
| 779 // case Token::SAR: { | |
| 780 // frame_->EmitPop(a0); // a0 : y | |
| 781 // frame_->EmitPop(a1); // a1 : x | |
| 782 // GenericBinaryOpStub stub(op, overwrite_mode, constant_rhs); | |
| 783 // frame_->CallStub(&stub, 0); | |
| 784 // __ nop(); // NOP_ADDED | |
| 785 // break; | |
| 786 // } | |
| 787 // | |
| 788 // case Token::COMMA: | |
| 789 // frame_->EmitPop(v0); | |
| 790 // // simply discard left value | |
| 791 // frame_->Drop(); | |
| 792 // break; | |
| 793 //// | |
| 794 // default: | |
| 795 // // Other cases should have been handled before this point. | |
| 796 // UNREACHABLE(); | |
| 797 // break; | |
| 798 // } | |
| 799 } | |
| 800 | |
| 801 | |
| 802 class DeferredInlineSmiOperation: public DeferredCode { | |
| 803 public: | |
| 804 DeferredInlineSmiOperation(Token::Value op, | |
| 805 int value, | |
| 806 bool reversed, | |
| 807 OverwriteMode overwrite_mode) | |
| 808 : op_(op), | |
| 809 value_(value), | |
| 810 reversed_(reversed), | |
| 811 overwrite_mode_(overwrite_mode) { | |
| 812 set_comment("[ DeferredInlinedSmiOperation"); | |
| 813 } | |
| 814 | |
| 815 virtual void Generate(); | |
| 816 | |
| 817 private: | |
| 818 Token::Value op_; | |
| 819 int value_; | |
| 820 bool reversed_; | |
| 821 OverwriteMode overwrite_mode_; | |
| 822 }; | |
| 823 | |
| 824 | |
| 825 void DeferredInlineSmiOperation::Generate() { | |
| 826 UNIMPLEMENTED_(); | |
| 827 // // In CodeGenerator::SmiOperation we used a1 instead of a0, and we left the | |
| 828 // // register untouched. | |
| 829 // // We just need to load value_ and switch if necessary | |
| 830 // switch (op_) { | |
| 831 // case Token::ADD: | |
| 832 // case Token::SUB: | |
| 833 // case Token::MUL: | |
| 834 // case Token::MOD: | |
| 835 // case Token::BIT_OR: | |
| 836 // case Token::BIT_XOR: | |
| 837 // case Token::BIT_AND: { | |
| 838 // if (reversed_) { | |
| 839 // __ mov(a0, a1); | |
| 840 // __ li(a1, Operand(Smi::FromInt(value_))); | |
| 841 // } else { | |
| 842 // __ li(a0, Operand(Smi::FromInt(value_))); | |
| 843 // } | |
| 844 // break; | |
| 845 // } | |
| 846 // case Token::SHL: | |
| 847 // case Token::SHR: | |
| 848 // case Token::SAR: { | |
| 849 // if (!reversed_) { | |
| 850 // __ li(a0, Operand(Smi::FromInt(value_))); | |
| 851 // } else { | |
| 852 // UNREACHABLE(); // Should have been handled in SmiOperation. | |
| 853 // } | |
| 854 // break; | |
| 855 // } | |
| 856 // | |
| 857 // default: | |
| 858 // // Other cases should have been handled before this point. | |
| 859 // UNREACHABLE(); | |
| 860 // break; | |
| 861 // } | |
| 862 // | |
| 863 // GenericBinaryOpStub stub(op_, overwrite_mode_, value_); | |
| 864 // __ CallStub(&stub); | |
| 865 // __ nop(); // NOP_ADDED | |
| 866 } | |
| 867 | |
| 868 | |
| 869 //static bool PopCountLessThanEqual2(unsigned int x) { | |
| 870 // UNIMPLEMENTED_(); | |
| 871 //// x &= x - 1; | |
| 872 //// return (x & (x - 1)) == 0; | |
| 873 // return false; // UNIMPLEMENTED RETURN | |
| 874 //} | |
| 875 | |
| 876 | |
| 877 // Returns the index of the lowest bit set. | |
| 878 //static int BitPosition(unsigned x) { | |
| 879 // UNIMPLEMENTED_(); | |
| 880 //// int bit_posn = 0; | |
| 881 //// while ((x & 0xf) == 0) { | |
| 882 //// bit_posn += 4; | |
| 883 //// x >>= 4; | |
| 884 //// } | |
| 885 //// while ((x & 1) == 0) { | |
| 886 //// bit_posn++; | |
| 887 //// x >>= 1; | |
| 888 //// } | |
| 889 //// return bit_posn; | |
| 890 // return -1; // UNIMPLEMENTED RETURN | |
| 891 //} | |
| 892 | |
| 893 | |
| 894 void CodeGenerator::SmiOperation(Token::Value op, | |
| 895 Handle<Object> value, | |
| 896 bool reversed, | |
| 897 OverwriteMode mode) { | |
| 898 UNIMPLEMENTED_(); | |
| 899 //#ifdef DEBUG | |
| 900 //// printf("CodeGenerator::SmiOperation\n"); | |
| 901 //#endif | |
| 902 // | |
| 903 // VirtualFrame::SpilledScope spilled_scope; | |
| 904 // // NOTE: This is an attempt to inline (a bit) more of the code for | |
| 905 // // some possible smi operations (like + and -) when (at least) one | |
| 906 // // of the operands is a literal smi. With this optimization, the | |
| 907 // // performance of the system is increased by ~15%, and the generated | |
| 908 // // code size is increased by ~1% (measured on a combination of | |
| 909 // // different benchmarks). | |
| 910 // | |
| 911 // // We care about keeping a1 unchanged, as it spares the need to reverse the | |
| 912 // // optimistic operation if we need to jump to the deferred code. | |
| 913 // | |
| 914 // // sp[0] : operand | |
| 915 // | |
| 916 // // TODO(MIPS.1): Implement overflow check | |
| 917 // | |
| 918 //// __ break_(0x04008); | |
| 919 // int int_value = Smi::cast(*value)->value(); | |
| 920 // | |
| 921 // JumpTarget exit; | |
| 922 // // We use a1 instead of a0 because in most cases we will need the value in a 1 | |
| 923 // // if we jump to the deferred code. | |
| 924 // frame_->EmitPop(a1); | |
| 925 // | |
| 926 // bool something_to_inline = true; | |
| 927 // switch (op) { | |
| 928 // // TODO(MIPS.1): Implement overflow cases in CodeGenerator::SmiOperation | |
| 929 // case Token::ADD: { | |
| 930 // DeferredCode* deferred = | |
| 931 // new DeferredInlineSmiOperation(op, int_value, reversed, mode); | |
| 932 // | |
| 933 // __ addiu(v0, a1, Operand(value)); | |
| 934 //// deferred->Branch(vs); | |
| 935 // __ andi(t0, v0, Operand(kSmiTagMask)); | |
| 936 // deferred->Branch(ne, t0, Operand(zero_reg)); | |
| 937 // __ nop(); // NOP_ADDED | |
| 938 // deferred->BindExit(); | |
| 939 // break; | |
| 940 // } | |
| 941 // | |
| 942 // case Token::SUB: { | |
| 943 // DeferredCode* deferred = | |
| 944 // new DeferredInlineSmiOperation(op, int_value, reversed, mode); | |
| 945 // | |
| 946 // if (reversed) { | |
| 947 // __ li(t0, Operand(value)); | |
| 948 // __ sub(v0, t0, Operand(a1)); | |
| 949 // } else { | |
| 950 // __ li(t0, Operand(value)); | |
| 951 // __ sub(v0, a1, Operand(t0)); | |
| 952 // } | |
| 953 //// deferred->Branch(vs); | |
| 954 // __ andi(t0, v0, Operand(kSmiTagMask)); | |
| 955 // deferred->Branch(ne, t0, Operand(zero_reg)); | |
| 956 // __ nop(); // NOP_ADDED | |
| 957 // deferred->BindExit(); | |
| 958 // break; | |
| 959 // } | |
| 960 // | |
| 961 // | |
| 962 // case Token::BIT_OR: | |
| 963 // case Token::BIT_XOR: | |
| 964 // case Token::BIT_AND: { | |
| 965 // DeferredCode* deferred = | |
| 966 // new DeferredInlineSmiOperation(op, int_value, reversed, mode); | |
| 967 // __ andi(t0, a1, Operand(kSmiTagMask)); | |
| 968 // deferred->Branch(ne, t0, Operand(zero_reg)); | |
| 969 // __ nop(); // NOP_ADDED | |
| 970 // deferred->BindExit(); | |
| 971 // switch (op) { | |
| 972 // case Token::BIT_OR: __ or_(v0, a1, Operand(value)); break; | |
| 973 // case Token::BIT_XOR: __ xor_(v0, a1, Operand(value)); break; | |
| 974 // case Token::BIT_AND: __ and_(v0, a1, Operand(value)); break; | |
| 975 // default: UNREACHABLE(); | |
| 976 // } | |
| 977 // deferred->BindExit(); | |
| 978 // break; | |
| 979 // } | |
| 980 // | |
| 981 // case Token::SHL: | |
| 982 // case Token::SHR: | |
| 983 // case Token::SAR: { | |
| 984 // if (reversed) { | |
| 985 // something_to_inline = false; | |
| 986 // break; | |
| 987 // } | |
| 988 // int shift_value = int_value & 0x1f; // least significant 5 bits | |
| 989 // DeferredCode* deferred = | |
| 990 // new DeferredInlineSmiOperation(op, shift_value, false, mode); | |
| 991 // __ andi(t0, a1, Operand(kSmiTagMask)); | |
| 992 // deferred->Branch(ne, t0, Operand(zero_reg)); | |
| 993 // __ nop(); // NOP_ADDED | |
| 994 // __ sra(a2, a1, kSmiTagSize); // Remove tag | |
| 995 // switch (op) { | |
| 996 // case Token::SHL: { | |
| 997 // if (shift_value != 0) { | |
| 998 // __ sll(v0, a2, shift_value); | |
| 999 // } | |
| 1000 // // Check that the result fits in a Smi. | |
| 1001 // __ addiu(t3, v0, Operand(0x40000000)); | |
| 1002 // __ andi(t3, t3, Operand(0x80000000)); | |
| 1003 // deferred->Branch(ne, t3, Operand(zero_reg)); | |
| 1004 // __ nop(); // NOP_ADDED | |
| 1005 // break; | |
| 1006 // } | |
| 1007 // case Token::SHR: { | |
| 1008 // // LSR by immediate 0 means shifting 32 bits. | |
| 1009 // if (shift_value != 0) { | |
| 1010 // __ srl(v0, a2, shift_value); | |
| 1011 // } | |
| 1012 // // check that the *unsigned* result fits in a smi | |
| 1013 // // neither of the two high-order bits can be set: | |
| 1014 // // - 0x80000000: high bit would be lost when smi tagging | |
| 1015 // // - 0x40000000: this number would convert to negative when | |
| 1016 // // smi tagging these two cases can only happen with shifts | |
| 1017 // // by 0 or 1 when handed a valid smi | |
| 1018 // // Check that the result fits in a Smi. | |
| 1019 // __ andi(t3, v0, Operand(0xc0000000)); | |
| 1020 // deferred->Branch(ne, t3, Operand(zero_reg)); | |
| 1021 // break; | |
| 1022 // } | |
| 1023 // case Token::SAR: { | |
| 1024 // if (shift_value != 0) { | |
| 1025 // // ASR by immediate 0 means shifting 32 bits. | |
| 1026 // __ sra(v0, a2, shift_value); | |
| 1027 // } | |
| 1028 // break; | |
| 1029 // } | |
| 1030 // default: UNREACHABLE(); | |
| 1031 // } | |
| 1032 // __ sll(v0, v0, kSmiTagSize); // Tag result | |
| 1033 // deferred->BindExit(); | |
| 1034 // break; | |
| 1035 // } | |
| 1036 // | |
| 1037 // case Token::MOD: { | |
| 1038 // if (reversed || int_value < 2 || !IsPowerOf2(int_value)) { | |
| 1039 // something_to_inline = false; | |
| 1040 // break; | |
| 1041 // } | |
| 1042 // DeferredCode* deferred = | |
| 1043 // new DeferredInlineSmiOperation(op, int_value, reversed, mode); | |
| 1044 // unsigned mask = (0x80000000u | kSmiTagMask); | |
| 1045 // __ andi(t0, a1, Operand(mask)); | |
| 1046 // // Go to deferred code on non-Smis and negative. | |
| 1047 // deferred->Branch(ne, t0, Operand(zero_reg)); | |
| 1048 // __ nop(); // NOP_ADDED | |
| 1049 // mask = (int_value << kSmiTagSize) - 1; | |
| 1050 // __ and_(v0, a1, Operand(mask)); | |
| 1051 // deferred->BindExit(); | |
| 1052 // break; | |
| 1053 // } | |
| 1054 // | |
| 1055 // case Token::MUL: { | |
| 1056 // if (!IsEasyToMultiplyBy(int_value)) { | |
| 1057 // something_to_inline = false; | |
| 1058 // break; | |
| 1059 // } | |
| 1060 // DeferredCode* deferred = | |
| 1061 // new DeferredInlineSmiOperation(op, int_value, reversed, mode); | |
| 1062 // unsigned max_smi_that_wont_overflow = Smi::kMaxValue / int_value; | |
| 1063 // max_smi_that_wont_overflow <<= kSmiTagSize; | |
| 1064 // unsigned mask = 0x80000000u; | |
| 1065 // while ((mask & max_smi_that_wont_overflow) == 0) { | |
| 1066 // mask |= mask >> 1; | |
| 1067 // } | |
| 1068 // mask |= kSmiTagMask; | |
| 1069 // // This does a single mask that checks for a too high value in a | |
| 1070 // // conservative way and for a non-Smi. It also filters out negative | |
| 1071 // // numbers, unfortunately, but since this code is inline we prefer | |
| 1072 // // brevity to comprehensiveness. | |
| 1073 // __ andi(t0, a1, Operand(mask)); | |
| 1074 // deferred->Branch(ne, t0, Operand(zero_reg)); | |
| 1075 // __ nop(); // NOP_ADDED | |
| 1076 // MultiplyByKnownInt(masm_, a1, v0, int_value); | |
| 1077 // deferred->BindExit(); | |
| 1078 // break; | |
| 1079 // } | |
| 1080 // | |
| 1081 // default: | |
| 1082 // something_to_inline = false; | |
| 1083 // break; | |
| 1084 // } | |
| 1085 // | |
| 1086 // if (!something_to_inline) { | |
| 1087 // if (!reversed) { | |
| 1088 // __ li(a1, Operand(value)); | |
| 1089 // frame_->EmitMultiPush(a0.bit() | a1.bit()); | |
| 1090 // GenericBinaryOperation(op, mode, int_value); | |
| 1091 // } else { | |
| 1092 // __ li(a1, Operand(value)); | |
| 1093 // frame_->EmitMultiPushReversed(a1.bit() | a0.bit()); | |
| 1094 // GenericBinaryOperation(op, mode, kUnknownIntValue); | |
| 1095 // } | |
| 1096 // } | |
| 1097 // | |
| 1098 // exit.Bind(); | |
| 1099 } | |
| 1100 | |
| 1101 | |
| 1102 // On MIPS we load registers s5 and s6 with the values which should be compared. | |
| 1103 // With the CodeGenerator::cc_reg_ condition, functions will be able to | |
| 1104 // evaluate correctly the condition. (eg CodeGenerator::Branch) | |
| 1105 void CodeGenerator::Comparison(Condition cc, | |
| 1106 Expression* left, | |
| 1107 Expression* right, | |
| 1108 bool strict) { | |
| 1109 UNIMPLEMENTED_(); | |
| 1110 // __ nop(); | |
| 1111 // if (left != NULL) LoadAndSpill(left); | |
| 1112 // if (right != NULL) LoadAndSpill(right); | |
| 1113 // | |
| 1114 // VirtualFrame::SpilledScope spilled_scope; | |
| 1115 // // sp[0] : y | |
| 1116 // // sp[1] : x | |
| 1117 // // result : cc register | |
| 1118 // | |
| 1119 // // Strict only makes sense for equality comparisons. | |
| 1120 // ASSERT(!strict || cc == eq); | |
| 1121 // | |
| 1122 // JumpTarget exit; | |
| 1123 // JumpTarget smi; | |
| 1124 // // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order. | |
| 1125 // if (cc == greater || cc == less_equal) { | |
| 1126 // cc = ReverseCondition(cc); | |
| 1127 // frame_->EmitPop(a1); | |
| 1128 // frame_->EmitPop(a0); | |
| 1129 // } else { | |
| 1130 // frame_->EmitPop(a0); | |
| 1131 // frame_->EmitPop(a1); | |
| 1132 // } | |
| 1133 // __ or_(t2, a0, Operand(a1)); | |
| 1134 // __ andi(t3, t2, Operand(kSmiTagMask)); | |
| 1135 // smi.Branch(eq, no_hint, t3, Operand(zero_reg)); | |
| 1136 // | |
| 1137 // // Perform non-smi comparison by stub. | |
| 1138 // // CompareStub takes arguments in a0 and a1, returns <0, >0 or 0 in r0. | |
| 1139 // // We call with 0 args because there are 0 on the stack. | |
| 1140 // CompareStub stub(cc, strict); | |
| 1141 // frame_->CallStub(&stub, 0); | |
| 1142 // __ nop(); // NOP_ADDED | |
| 1143 // __ mov(s5, v0); | |
| 1144 // __ li(s6, Operand(0)); | |
| 1145 // exit.Jump(); | |
| 1146 // __ nop(); // NOP_ADDED | |
| 1147 // | |
| 1148 // // Do smi comparisons by pointer comparison. | |
| 1149 // smi.Bind(); | |
| 1150 // __ mov(s5, a1); | |
| 1151 // __ mov(s6, a0); | |
| 1152 // | |
| 1153 // exit.Bind(); | |
| 1154 // cc_reg_ = cc; | |
| 1155 } | |
| 1156 | |
| 1157 | |
| 1158 class CallFunctionStub: public CodeStub { | |
| 1159 public: | |
| 1160 CallFunctionStub(int argc, InLoopFlag in_loop) | |
| 1161 : argc_(argc), in_loop_(in_loop) {} | |
| 1162 | |
| 1163 void Generate(MacroAssembler* masm); | |
| 1164 | |
| 1165 private: | |
| 1166 int argc_; | |
| 1167 InLoopFlag in_loop_; | |
| 1168 | |
| 1169 #if defined(DEBUG) | |
| 1170 void Print() { PrintF("CallFunctionStub (argc %d)\n", argc_); } | |
| 1171 #endif // defined(DEBUG) | |
| 1172 | |
| 1173 Major MajorKey() { return CallFunction; } | |
| 1174 int MinorKey() { return argc_; } | |
| 1175 InLoopFlag InLoop() { return in_loop_; } | |
| 1176 }; | |
| 1177 | |
| 1178 | |
| 1179 // Call the function on the stack with the given arguments. | |
| 1180 void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args, | |
| 1181 int position) { | |
| 1182 UNIMPLEMENTED_(); | |
| 1183 //#ifdef DEBUG | |
| 1184 //// printf("Using CodeGenerator::CallWithArguments. There may be issues with s tack alignment.\n"); | |
| 1185 //#endif | |
| 1186 // | |
| 1187 // VirtualFrame::SpilledScope spilled_scope; | |
| 1188 // // Push the arguments ("left-to-right") on the stack. | |
| 1189 // int arg_count = args->length(); | |
| 1190 // for (int i = 0; i < arg_count; i++) { | |
| 1191 // LoadAndSpill(args->at(i)); | |
| 1192 // } | |
| 1193 // | |
| 1194 // // Record the position for debugging purposes. | |
| 1195 // CodeForSourcePosition(position); | |
| 1196 // | |
| 1197 // // Use the shared code stub to call the function. | |
| 1198 // InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP; | |
| 1199 // CallFunctionStub call_function(arg_count, in_loop); | |
| 1200 // frame_->CallStub(&call_function, arg_count + 1); | |
| 1201 // __ nop(); // NOP_ADDED | |
| 1202 //// __ addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize); // (branch del ay) | |
| 1203 // | |
| 1204 // // We need to manually restore context and pop function from the stack after | |
| 1205 // // this call. | |
| 1206 } | |
| 1207 | |
| 1208 | |
| 1209 void CodeGenerator::Branch(bool if_true, JumpTarget* target) { | |
| 1210 UNIMPLEMENTED_(); | |
| 1211 // VirtualFrame::SpilledScope spilled_scope; | |
| 1212 // ASSERT(has_cc()); | |
| 1213 // Condition cc = if_true ? cc_reg_ : NegateCondition(cc_reg_); | |
| 1214 // // cf CodeGenerator::Comparison comments. | |
| 1215 // target->Branch(cc, no_hint, s5, Operand(s6)); | |
| 1216 // cc_reg_ = cc_always; | |
| 1217 } | |
| 1218 | |
| 1219 | |
| 1220 void CodeGenerator::CheckStack() { | |
| 1221 UNIMPLEMENTED_(); | |
| 1222 // VirtualFrame::SpilledScope spilled_scope; | |
| 1223 // Comment cmnt(masm_, "[ check stack"); | |
| 1224 // | |
| 1225 // __ LoadRoot(ip, Heap::kStackLimitRootIndex); | |
| 1226 // StackCheckStub stub; | |
| 1227 // // Call the stub if lower. | |
| 1228 // __ jalcond(Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()), | |
| 1229 // RelocInfo::CODE_TARGET), | |
| 1230 // Uless, sp, Operand(ip) | |
| 1231 // ); | |
| 1232 // __ nop(); | |
| 1233 } | |
| 1234 | |
| 1235 | |
| 1236 void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) { | |
| 1237 UNIMPLEMENTED_(); | |
| 1238 //#ifdef DEBUG | |
| 1239 //// printf("CodeGenerator::VisitStatements\n"); | |
| 1240 //#endif | |
| 1241 // | |
| 1242 //#ifdef DEBUG | |
| 1243 // int original_height = frame_->height(); | |
| 1244 //#endif | |
| 1245 // VirtualFrame::SpilledScope spilled_scope; | |
| 1246 // for (int i = 0; frame_ != NULL && i < statements->length(); i++) { | |
| 1247 // VisitAndSpill(statements->at(i)); | |
| 1248 // } | |
| 1249 // ASSERT(!has_valid_frame() || frame_->height() == original_height); | |
| 1250 } | |
| 1251 | |
| 1252 | |
| 1253 void CodeGenerator::VisitBlock(Block* node) { | |
| 1254 UNIMPLEMENTED_(); | |
| 1255 //#ifdef DEBUG | |
| 1256 //// printf("CodeGenerator::VisitBlock\n"); | |
| 1257 //#endif | |
| 1258 // | |
| 1259 //#ifdef DEBUG | |
| 1260 // int original_height = frame_->height(); | |
| 1261 //#endif | |
| 1262 // VirtualFrame::SpilledScope spilled_scope; | |
| 1263 // Comment cmnt(masm_, "[ Block"); | |
| 1264 // CodeForStatementPosition(node); | |
| 1265 // node->break_target()->set_direction(JumpTarget::FORWARD_ONLY); | |
| 1266 // VisitStatementsAndSpill(node->statements()); | |
| 1267 // if (node->break_target()->is_linked()) { | |
| 1268 // node->break_target()->Bind(); | |
| 1269 // } | |
| 1270 // node->break_target()->Unuse(); | |
| 1271 // ASSERT(!has_valid_frame() || frame_->height() == original_height); | |
| 1272 } | |
| 1273 | |
| 1274 | |
| 1275 void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) { | |
| 1276 UNIMPLEMENTED_(); | |
| 1277 //#ifdef DEBUG | |
| 1278 //// printf("CodeGenerator::DeclareGlobals\n"); | |
| 1279 //#endif | |
| 1280 // VirtualFrame::SpilledScope spilled_scope; | |
| 1281 // frame_->EmitPush(cp); | |
| 1282 // __ li(t0, Operand(pairs)); | |
| 1283 // frame_->EmitPush(t0); | |
| 1284 // __ li(t0, Operand(Smi::FromInt(is_eval() ? 1 : 0))); | |
| 1285 // frame_->EmitPush(t0); | |
| 1286 // frame_->CallRuntime(Runtime::kDeclareGlobals, 3); | |
| 1287 // __ nop(); // NOP_ADDED | |
| 1288 // // The result is discarded. | |
| 1289 } | |
| 1290 | |
| 1291 | |
| 1292 void CodeGenerator::VisitDeclaration(Declaration* node) { | |
| 1293 UNIMPLEMENTED_(); | |
| 1294 //#ifdef DEBUG | |
| 1295 //// printf("CodeGenerator::VisitDeclaration\n"); | |
| 1296 //#endif | |
| 1297 // | |
| 1298 //#ifdef DEBUG | |
| 1299 // int original_height = frame_->height(); | |
| 1300 //#endif | |
| 1301 // VirtualFrame::SpilledScope spilled_scope; | |
| 1302 // Comment cmnt(masm_, "[ Declaration"); | |
| 1303 // Variable* var = node->proxy()->var(); | |
| 1304 // ASSERT(var != NULL); // must have been resolved | |
| 1305 // Slot* slot = var->slot(); | |
| 1306 // | |
| 1307 // // If it was not possible to allocate the variable at compile time, | |
| 1308 // // we need to "declare" it at runtime to make sure it actually | |
| 1309 // // exists in the local context. | |
| 1310 // if (slot != NULL && slot->type() == Slot::LOOKUP) { | |
| 1311 // // Variables with a "LOOKUP" slot were introduced as non-locals | |
| 1312 // // during variable resolution and must have mode DYNAMIC. | |
| 1313 // ASSERT(var->is_dynamic()); | |
| 1314 // // For now, just do a runtime call. | |
| 1315 // frame_->EmitPush(cp); | |
| 1316 //// __ mov(r0, Operand(var->name())); | |
| 1317 //// frame_->EmitPush(r0); | |
| 1318 // __ li(t0, Operand(var->name())); | |
| 1319 // frame_->EmitPush(t0); | |
| 1320 // // Declaration nodes are always declared in only two modes. | |
| 1321 // ASSERT(node->mode() == Variable::VAR || node->mode() == Variable::CONST); | |
| 1322 // PropertyAttributes attr = node->mode() == Variable::VAR ? NONE : READ_ONLY ; | |
| 1323 //// __ mov(r0, Operand(Smi::FromInt(attr))); | |
| 1324 //// frame_->EmitPush(r0); | |
| 1325 // __ li(t0, Operand(Smi::FromInt(attr))); | |
| 1326 // frame_->EmitPush(t0); | |
| 1327 // // Push initial value, if any. | |
| 1328 // // Note: For variables we must not push an initial value (such as | |
| 1329 // // 'undefined') because we may have a (legal) redeclaration and we | |
| 1330 // // must not destroy the current value. | |
| 1331 // if (node->mode() == Variable::CONST) { | |
| 1332 //// __ LoadRoot(r0, Heap::kTheHoleValueRootIndex); | |
| 1333 //// frame_->EmitPush(r0); | |
| 1334 // __ LoadRoot(t0, Heap::kTheHoleValueRootIndex); | |
| 1335 // frame_->EmitPush(t0); | |
| 1336 // } else if (node->fun() != NULL) { | |
| 1337 // LoadAndSpill(node->fun()); | |
| 1338 // } else { | |
| 1339 //// __ mov(r0, Operand(0)); // no initial value! | |
| 1340 //// frame_->EmitPush(r0); | |
| 1341 // __ li(t0, Operand(0)); // no initial value! | |
| 1342 // frame_->EmitPush(t0); | |
| 1343 // } | |
| 1344 // frame_->CallRuntime(Runtime::kDeclareContextSlot, 4); | |
| 1345 // // Ignore the return value (declarations are statements). | |
| 1346 // ASSERT(frame_->height() == original_height); | |
| 1347 // return; | |
| 1348 // } | |
| 1349 // | |
| 1350 // ASSERT(!var->is_global()); | |
| 1351 // | |
| 1352 // // If we have a function or a constant, we need to initialize the variable. | |
| 1353 // Expression* val = NULL; | |
| 1354 // if (node->mode() == Variable::CONST) { | |
| 1355 // val = new Literal(Factory::the_hole_value()); | |
| 1356 // } else { | |
| 1357 // val = node->fun(); // NULL if we don't have a function | |
| 1358 // } | |
| 1359 // | |
| 1360 // if (val != NULL) { | |
| 1361 // { | |
| 1362 // // Set initial value. | |
| 1363 // Reference target(this, node->proxy()); | |
| 1364 // LoadAndSpill(val); | |
| 1365 // target.SetValue(NOT_CONST_INIT); | |
| 1366 // // The reference is removed from the stack (preserving TOS) when | |
| 1367 // // it goes out of scope. | |
| 1368 // } | |
| 1369 // // Get rid of the assigned value (declarations are statements). | |
| 1370 // frame_->Drop(); | |
| 1371 // } | |
| 1372 // ASSERT(frame_->height() == original_height); | |
| 1373 } | |
| 1374 | |
| 1375 | |
| 1376 void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) { | |
| 1377 UNIMPLEMENTED_(); | |
| 1378 //#ifdef DEBUG | |
| 1379 //// printf("CodeGenerator::VisitExpressionStatement\n"); | |
| 1380 //#endif | |
| 1381 // | |
| 1382 //#ifdef DEBUG | |
| 1383 // int original_height = frame_->height(); | |
| 1384 //#endif | |
| 1385 // VirtualFrame::SpilledScope spilled_scope; | |
| 1386 // Comment cmnt(masm_, "[ ExpressionStatement"); | |
| 1387 // CodeForStatementPosition(node); | |
| 1388 // Expression* expression = node->expression(); | |
| 1389 // expression->MarkAsStatement(); | |
| 1390 // LoadAndSpill(expression); | |
| 1391 // frame_->Drop(); | |
| 1392 // ASSERT(frame_->height() == original_height); | |
| 1393 } | |
| 1394 | |
| 1395 | |
| 1396 void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) { | |
| 1397 UNIMPLEMENTED_(); | |
| 1398 //#ifdef DEBUG | |
| 1399 //// printf("CodeGenerator::VisitEmptyStatement\n"); | |
| 1400 //#endif | |
| 1401 // | |
| 1402 //#ifdef DEBUG | |
| 1403 // int original_height = frame_->height(); | |
| 1404 //#endif | |
| 1405 // VirtualFrame::SpilledScope spilled_scope; | |
| 1406 // Comment cmnt(masm_, "// EmptyStatement"); | |
| 1407 // CodeForStatementPosition(node); | |
| 1408 // // nothing to do | |
| 1409 // ASSERT(frame_->height() == original_height); | |
| 1410 } | |
| 1411 | |
| 1412 | |
| 1413 void CodeGenerator::VisitIfStatement(IfStatement* node) { | |
| 1414 UNIMPLEMENTED_(); | |
| 1415 //#ifdef DEBUG | |
| 1416 //// printf("CodeGenerator::VisitIfStatement\n"); | |
| 1417 //#endif | |
| 1418 // | |
| 1419 //#ifdef DEBUG | |
| 1420 // int original_height = frame_->height(); | |
| 1421 //#endif | |
| 1422 // VirtualFrame::SpilledScope spilled_scope; | |
| 1423 // Comment cmnt(masm_, "[ IfStatement"); | |
| 1424 // // Generate different code depending on which parts of the if statement | |
| 1425 // // are present or not. | |
| 1426 // bool has_then_stm = node->HasThenStatement(); | |
| 1427 // bool has_else_stm = node->HasElseStatement(); | |
| 1428 // | |
| 1429 // CodeForStatementPosition(node); | |
| 1430 // | |
| 1431 // JumpTarget exit; | |
| 1432 // if (has_then_stm && has_else_stm) { | |
| 1433 // Comment cmnt(masm_, "[ IfThenElse"); | |
| 1434 // JumpTarget then; | |
| 1435 // JumpTarget else_; | |
| 1436 // // if (cond) | |
| 1437 // LoadConditionAndSpill(node->condition(), | |
| 1438 // &then, &else_, true); | |
| 1439 // if (frame_ != NULL) { | |
| 1440 // Branch(false, &else_); | |
| 1441 // } | |
| 1442 // // then | |
| 1443 // if (frame_ != NULL || then.is_linked()) { | |
| 1444 // then.Bind(); | |
| 1445 // VisitAndSpill(node->then_statement()); | |
| 1446 // } | |
| 1447 // if (frame_ != NULL) { | |
| 1448 // exit.Jump(); | |
| 1449 // __ nop(); // NOP_ADDED | |
| 1450 // } | |
| 1451 // // else | |
| 1452 // if (else_.is_linked()) { | |
| 1453 // else_.Bind(); | |
| 1454 // VisitAndSpill(node->else_statement()); | |
| 1455 // } | |
| 1456 // | |
| 1457 // } else if (has_then_stm) { | |
| 1458 // Comment cmnt(masm_, "[ IfThen"); | |
| 1459 // ASSERT(!has_else_stm); | |
| 1460 // JumpTarget then; | |
| 1461 // // if (cond) | |
| 1462 // LoadConditionAndSpill(node->condition(), | |
| 1463 // &then, &exit, true); | |
| 1464 // if (frame_ != NULL) { | |
| 1465 // Branch(false, &exit); | |
| 1466 // } | |
| 1467 // // then | |
| 1468 // if (frame_ != NULL || then.is_linked()) { | |
| 1469 // then.Bind(); | |
| 1470 // VisitAndSpill(node->then_statement()); | |
| 1471 // } | |
| 1472 // | |
| 1473 // } else if (has_else_stm) { | |
| 1474 // Comment cmnt(masm_, "[ IfElse"); | |
| 1475 // ASSERT(!has_then_stm); | |
| 1476 // JumpTarget else_; | |
| 1477 // // if (!cond) | |
| 1478 // LoadConditionAndSpill(node->condition(), | |
| 1479 // &exit, &else_, true); | |
| 1480 // if (frame_ != NULL) { | |
| 1481 // Branch(true, &exit); | |
| 1482 // } | |
| 1483 // // else | |
| 1484 // if (frame_ != NULL || else_.is_linked()) { | |
| 1485 // else_.Bind(); | |
| 1486 // VisitAndSpill(node->else_statement()); | |
| 1487 // } | |
| 1488 // | |
| 1489 // } else { | |
| 1490 // Comment cmnt(masm_, "[ If"); | |
| 1491 // ASSERT(!has_then_stm && !has_else_stm); | |
| 1492 // // if (cond) | |
| 1493 // LoadConditionAndSpill(node->condition(), | |
| 1494 // &exit, &exit, false); | |
| 1495 // if (frame_ != NULL) { | |
| 1496 // if (has_cc()) { | |
| 1497 // cc_reg_ = cc_always; | |
| 1498 // } else { | |
| 1499 // frame_->Drop(); | |
| 1500 // } | |
| 1501 // } | |
| 1502 // } | |
| 1503 // | |
| 1504 // // end | |
| 1505 // if (exit.is_linked()) { | |
| 1506 // exit.Bind(); | |
| 1507 // } | |
| 1508 // ASSERT(!has_valid_frame() || frame_->height() == original_height); | |
| 1509 } | |
| 1510 | |
| 1511 | |
| 1512 void CodeGenerator::VisitContinueStatement(ContinueStatement* node) { | |
| 1513 UNIMPLEMENTED_(); | |
| 1514 //#ifdef DEBUG | |
| 1515 //// printf("CodeGenerator::VisitContinueStatement\n"); | |
| 1516 //#endif | |
| 1517 // | |
| 1518 // VirtualFrame::SpilledScope spilled_scope; | |
| 1519 // Comment cmnt(masm_, "[ ContinueStatement"); | |
| 1520 // CodeForStatementPosition(node); | |
| 1521 // node->target()->continue_target()->Jump(); | |
| 1522 // __ nop(); // NOP_ADDED | |
| 1523 } | |
| 1524 | |
| 1525 | |
| 1526 void CodeGenerator::VisitBreakStatement(BreakStatement* node) { | |
| 1527 UNIMPLEMENTED_(); | |
| 1528 //#ifdef DEBUG | |
| 1529 //// printf("CodeGenerator::VisitBreakStatement\n"); | |
| 1530 //#endif | |
| 1531 // | |
| 1532 // VirtualFrame::SpilledScope spilled_scope; | |
| 1533 // Comment cmnt(masm_, "[ BreakStatement"); | |
| 1534 // CodeForStatementPosition(node); | |
| 1535 // node->target()->break_target()->Jump(); | |
| 1536 // __ nop(); // NOP_ADDED | |
| 1537 } | |
| 1538 | |
| 1539 | |
| 1540 void CodeGenerator::VisitReturnStatement(ReturnStatement* node) { | |
| 1541 UNIMPLEMENTED_(); | |
| 1542 //#ifdef DEBUG | |
| 1543 //// printf("CodeGenerator::VisitReturnStatement\n"); | |
| 1544 //#endif | |
| 1545 // | |
| 1546 // VirtualFrame::SpilledScope spilled_scope; | |
| 1547 // Comment cmnt(masm_, "[ ReturnStatement"); | |
| 1548 // | |
| 1549 // CodeForStatementPosition(node); | |
| 1550 // LoadAndSpill(node->expression()); | |
| 1551 // if (function_return_is_shadowed_) { | |
| 1552 // frame_->EmitPop(v0); | |
| 1553 // function_return_.Jump(); | |
| 1554 // __ nop(); // NOP_ADDED | |
| 1555 // } else { | |
| 1556 // // Pop the result from the frame and prepare the frame for | |
| 1557 // // returning thus making it easier to merge. | |
| 1558 // frame_->EmitPop(v0); | |
| 1559 //// __ break_(0x00009); | |
| 1560 // frame_->PrepareForReturn(); | |
| 1561 // | |
| 1562 // function_return_.Jump(); | |
| 1563 // __ nop(); // NOP_ADDED | |
| 1564 // } | |
| 1565 } | |
| 1566 | |
| 1567 | |
| 1568 void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) { | |
| 1569 UNIMPLEMENTED_(); | |
| 1570 //#ifdef DEBUG | |
| 1571 //// printf("CodeGenerator::VisitWithEnterStatement\n"); | |
| 1572 //#endif | |
| 1573 //#ifdef DEBUG | |
| 1574 // int original_height = frame_->height(); | |
| 1575 //#endif | |
| 1576 // VirtualFrame::SpilledScope spilled_scope; | |
| 1577 // Comment cmnt(masm_, "[ WithEnterStatement"); | |
| 1578 // CodeForStatementPosition(node); | |
| 1579 // LoadAndSpill(node->expression()); | |
| 1580 // if (node->is_catch_block()) { | |
| 1581 // frame_->CallRuntime(Runtime::kPushCatchContext, 1); | |
| 1582 // __ nop(); // NOP_ADDED | |
| 1583 // } else { | |
| 1584 // frame_->CallRuntime(Runtime::kPushContext, 1); | |
| 1585 // __ nop(); // NOP_ADDED | |
| 1586 // } | |
| 1587 //#ifdef DEBUG | |
| 1588 // JumpTarget verified_true; | |
| 1589 //// __ cmp(r0, Operand(cp)); | |
| 1590 // verified_true.Branch(eq, no_hint, v0, Operand(cp)); | |
| 1591 // __ nop(); // NOP_ADDED | |
| 1592 // __ stop("PushContext: v0 is expected to be the same as cp"); | |
| 1593 // verified_true.Bind(); | |
| 1594 // __ nop(); // NOP_ADDED | |
| 1595 //#endif | |
| 1596 // // Update context local. | |
| 1597 //// __ str(cp, frame_->Context()); | |
| 1598 // __ sw(cp, frame_->Context()); | |
| 1599 // ASSERT(frame_->height() == original_height); | |
| 1600 } | |
| 1601 | |
| 1602 | |
| 1603 void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) { | |
| 1604 UNIMPLEMENTED_(); | |
| 1605 //#ifdef DEBUG | |
| 1606 //// printf("CodeGenerator::VisitWithExitStatement\n"); | |
| 1607 //#endif | |
| 1608 //#ifdef DEBUG | |
| 1609 // int original_height = frame_->height(); | |
| 1610 //#endif | |
| 1611 // VirtualFrame::SpilledScope spilled_scope; | |
| 1612 // Comment cmnt(masm_, "[ WithExitStatement"); | |
| 1613 // CodeForStatementPosition(node); | |
| 1614 // // Pop context. | |
| 1615 //// __ ldr(cp, ContextOperand(cp, Context::PREVIOUS_INDEX)); | |
| 1616 // __ lw(cp, ContextOperand(cp, Context::PREVIOUS_INDEX)); | |
| 1617 // // Update context local. | |
| 1618 //// __ str(cp, frame_->Context()); | |
| 1619 // __ sw(cp, frame_->Context()); | |
| 1620 // ASSERT(frame_->height() == original_height); | |
| 1621 } | |
| 1622 | |
| 1623 | |
| 1624 void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) { | |
| 1625 UNIMPLEMENTED_(); | |
| 1626 //#ifdef DEBUG | |
| 1627 //// printf("CodeGenerator::VisitSwitchStatement\n"); | |
| 1628 //#endif | |
| 1629 //#ifdef DEBUG | |
| 1630 // int original_height = frame_->height(); | |
| 1631 //#endif | |
| 1632 // VirtualFrame::SpilledScope spilled_scope; | |
| 1633 // Comment cmnt(masm_, "[ SwitchStatement"); | |
| 1634 // CodeForStatementPosition(node); | |
| 1635 // node->break_target()->set_direction(JumpTarget::FORWARD_ONLY); | |
| 1636 // | |
| 1637 // LoadAndSpill(node->tag()); | |
| 1638 // | |
| 1639 // JumpTarget next_test; | |
| 1640 // JumpTarget fall_through; | |
| 1641 // JumpTarget default_entry; | |
| 1642 // JumpTarget default_exit(JumpTarget::BIDIRECTIONAL); | |
| 1643 // ZoneList<CaseClause*>* cases = node->cases(); | |
| 1644 // int length = cases->length(); | |
| 1645 // CaseClause* default_clause = NULL; | |
| 1646 // | |
| 1647 // for (int i = 0; i < length; i++) { | |
| 1648 // CaseClause* clause = cases->at(i); | |
| 1649 // if (clause->is_default()) { | |
| 1650 // // Remember the default clause and compile it at the end. | |
| 1651 // default_clause = clause; | |
| 1652 // continue; | |
| 1653 // } | |
| 1654 //// | |
| 1655 // Comment cmnt(masm_, "[ Case clause"); | |
| 1656 // // Compile the test. | |
| 1657 // next_test.Bind(); | |
| 1658 // next_test.Unuse(); | |
| 1659 // // Duplicate TOS. | |
| 1660 //// __ ldr(r0, frame_->Top()); | |
| 1661 //// frame_->EmitPush(r0); | |
| 1662 // __ lw(a0, frame_->Top()); | |
| 1663 // frame_->EmitPush(a0); | |
| 1664 // Comparison(eq, NULL, clause->label(), true); | |
| 1665 // Branch(false, &next_test); | |
| 1666 // __ nop(); // NOP_ADDED | |
| 1667 // | |
| 1668 // // Before entering the body from the test, remove the switch value from | |
| 1669 // // the stack. | |
| 1670 // frame_->Drop(); | |
| 1671 // | |
| 1672 // // Label the body so that fall through is enabled. | |
| 1673 // if (i > 0 && cases->at(i - 1)->is_default()) { | |
| 1674 // default_exit.Bind(); | |
| 1675 // } else { | |
| 1676 // fall_through.Bind(); | |
| 1677 // fall_through.Unuse(); | |
| 1678 // } | |
| 1679 // VisitStatementsAndSpill(clause->statements()); | |
| 1680 // | |
| 1681 // // If control flow can fall through from the body, jump to the next body | |
| 1682 // // or the end of the statement. | |
| 1683 // if (frame_ != NULL) { | |
| 1684 // if (i < length - 1 && cases->at(i + 1)->is_default()) { | |
| 1685 // default_entry.Jump(); | |
| 1686 // __ nop(); // NOP_ADDED | |
| 1687 // } else { | |
| 1688 // fall_through.Jump(); | |
| 1689 // __ nop(); // NOP_ADDED | |
| 1690 // } | |
| 1691 // } | |
| 1692 // } | |
| 1693 // | |
| 1694 // // The final "test" removes the switch value. | |
| 1695 // next_test.Bind(); | |
| 1696 // frame_->Drop(); | |
| 1697 // | |
| 1698 // // If there is a default clause, compile it. | |
| 1699 // if (default_clause != NULL) { | |
| 1700 // Comment cmnt(masm_, "[ Default clause"); | |
| 1701 // default_entry.Bind(); | |
| 1702 // VisitStatementsAndSpill(default_clause->statements()); | |
| 1703 // // If control flow can fall out of the default and there is a case after | |
| 1704 // // it, jup to that case's body. | |
| 1705 // if (frame_ != NULL && default_exit.is_bound()) { | |
| 1706 // default_exit.Jump(); | |
| 1707 // __ nop(); // NOP_ADDED | |
| 1708 // } | |
| 1709 // } | |
| 1710 // | |
| 1711 // if (fall_through.is_linked()) { | |
| 1712 // fall_through.Bind(); | |
| 1713 // } | |
| 1714 // | |
| 1715 // if (node->break_target()->is_linked()) { | |
| 1716 // node->break_target()->Bind(); | |
| 1717 // } | |
| 1718 // node->break_target()->Unuse(); | |
| 1719 // ASSERT(!has_valid_frame() || frame_->height() == original_height); | |
| 1720 } | |
| 1721 | |
| 1722 | |
| 1723 void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) { | |
| 1724 UNIMPLEMENTED_(); | |
| 1725 //#ifdef DEBUG | |
| 1726 //// printf("CodeGenerator::VisitDoWhileStatement\n"); | |
| 1727 //#endif | |
| 1728 // | |
| 1729 //#ifdef DEBUG | |
| 1730 // int original_height = frame_->height(); | |
| 1731 //#endif | |
| 1732 // VirtualFrame::SpilledScope spilled_scope; | |
| 1733 // Comment cmnt(masm_, "[ DoWhileStatement"); | |
| 1734 // CodeForStatementPosition(node); | |
| 1735 // node->break_target()->set_direction(JumpTarget::FORWARD_ONLY); | |
| 1736 // JumpTarget body(JumpTarget::BIDIRECTIONAL); | |
| 1737 // | |
| 1738 // // Label the top of the loop for the backward CFG edge. If the test | |
| 1739 // // is always true we can use the continue target, and if the test is | |
| 1740 // // always false there is no need. | |
| 1741 // ConditionAnalysis info = AnalyzeCondition(node->cond()); | |
| 1742 // switch (info) { | |
| 1743 // case ALWAYS_TRUE: | |
| 1744 // node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL); | |
| 1745 // node->continue_target()->Bind(); | |
| 1746 // break; | |
| 1747 // case ALWAYS_FALSE: | |
| 1748 // node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY); | |
| 1749 // break; | |
| 1750 // case DONT_KNOW: | |
| 1751 // node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY); | |
| 1752 // body.Bind(); | |
| 1753 // break; | |
| 1754 // } | |
| 1755 // | |
| 1756 // CheckStack(); // TODO(1222600): ignore if body contains calls. | |
| 1757 // VisitAndSpill(node->body()); | |
| 1758 // | |
| 1759 // // Compile the test. | |
| 1760 // switch (info) { | |
| 1761 // case ALWAYS_TRUE: | |
| 1762 // // If control can fall off the end of the body, jump back to the | |
| 1763 // // top. | |
| 1764 // if (has_valid_frame()) { | |
| 1765 // node->continue_target()->Jump(); | |
| 1766 // __ nop(); // NOP_ADDED | |
| 1767 // } | |
| 1768 // break; | |
| 1769 // case ALWAYS_FALSE: | |
| 1770 // // If we have a continue in the body, we only have to bind its | |
| 1771 // // jump target. | |
| 1772 // if (node->continue_target()->is_linked()) { | |
| 1773 // node->continue_target()->Bind(); | |
| 1774 // } | |
| 1775 // break; | |
| 1776 // case DONT_KNOW: | |
| 1777 // // We have to compile the test expression if it can be reached by | |
| 1778 // // control flow falling out of the body or via continue. | |
| 1779 // if (node->continue_target()->is_linked()) { | |
| 1780 // node->continue_target()->Bind(); | |
| 1781 // } | |
| 1782 // if (has_valid_frame()) { | |
| 1783 // LoadConditionAndSpill(node->cond(), &body, node->break_target(), true) ; | |
| 1784 // if (has_valid_frame()) { | |
| 1785 // // A invalid frame here indicates that control did not | |
| 1786 // // fall out of the test expression. | |
| 1787 // Branch(true, &body); | |
| 1788 // } | |
| 1789 // } | |
| 1790 // break; | |
| 1791 // } | |
| 1792 // | |
| 1793 // if (node->break_target()->is_linked()) { | |
| 1794 // node->break_target()->Bind(); | |
| 1795 // } | |
| 1796 // ASSERT(!has_valid_frame() || frame_->height() == original_height); | |
| 1797 } | |
| 1798 | |
| 1799 | |
| 1800 void CodeGenerator::VisitWhileStatement(WhileStatement* node) { | |
| 1801 UNIMPLEMENTED_(); | |
| 1802 //#ifdef DEBUG | |
| 1803 //// printf("CodeGenerator::VisitWhileStatement\n"); | |
| 1804 //#endif | |
| 1805 // | |
| 1806 //#ifdef DEBUG | |
| 1807 // int original_height = frame_->height(); | |
| 1808 //#endif | |
| 1809 // VirtualFrame::SpilledScope spilled_scope; | |
| 1810 // Comment cmnt(masm_, "[ WhileStatement"); | |
| 1811 // CodeForStatementPosition(node); | |
| 1812 // | |
| 1813 // // If the test is never true and has no side effects there is no need | |
| 1814 // // to compile the test or body. | |
| 1815 // ConditionAnalysis info = AnalyzeCondition(node->cond()); | |
| 1816 // if (info == ALWAYS_FALSE) return; | |
| 1817 // | |
| 1818 // node->break_target()->set_direction(JumpTarget::FORWARD_ONLY); | |
| 1819 // | |
| 1820 // // Label the top of the loop with the continue target for the backward | |
| 1821 // // CFG edge. | |
| 1822 // node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL); | |
| 1823 // node->continue_target()->Bind(); | |
| 1824 // | |
| 1825 // | |
| 1826 // if (info == DONT_KNOW) { | |
| 1827 // JumpTarget body; | |
| 1828 // LoadConditionAndSpill(node->cond(), &body, node->break_target(), true); | |
| 1829 // if (has_valid_frame()) { | |
| 1830 // // A NULL frame indicates that control did not fall out of the | |
| 1831 // // test expression. | |
| 1832 // Branch(false, node->break_target()); | |
| 1833 // } | |
| 1834 // if (has_valid_frame() || body.is_linked()) { | |
| 1835 // body.Bind(); | |
| 1836 // } | |
| 1837 // } | |
| 1838 // | |
| 1839 // if (has_valid_frame()) { | |
| 1840 // CheckStack(); // TODO(1222600): ignore if body contains calls. | |
| 1841 // VisitAndSpill(node->body()); | |
| 1842 // | |
| 1843 // // If control flow can fall out of the body, jump back to the top. | |
| 1844 // if (has_valid_frame()) { | |
| 1845 // node->continue_target()->Jump(); | |
| 1846 // __ nop(); // NOP_ADDED | |
| 1847 // } | |
| 1848 // } | |
| 1849 // if (node->break_target()->is_linked()) { | |
| 1850 // node->break_target()->Bind(); | |
| 1851 // } | |
| 1852 // ASSERT(!has_valid_frame() || frame_->height() == original_height); | |
| 1853 } | |
| 1854 | |
| 1855 | |
| 1856 void CodeGenerator::VisitForStatement(ForStatement* node) { | |
| 1857 UNIMPLEMENTED_(); | |
| 1858 //#ifdef DEBUG | |
| 1859 //// printf("CodeGenerator::VisitForStatement\n"); | |
| 1860 //#endif | |
| 1861 // | |
| 1862 //#ifdef DEBUG | |
| 1863 // int original_height = frame_->height(); | |
| 1864 //#endif | |
| 1865 // VirtualFrame::SpilledScope spilled_scope; | |
| 1866 // Comment cmnt(masm_, "[ ForStatement"); | |
| 1867 // CodeForStatementPosition(node); | |
| 1868 // if (node->init() != NULL) { | |
| 1869 // VisitAndSpill(node->init()); | |
| 1870 // } | |
| 1871 // | |
| 1872 // // If the test is never true there is no need to compile the test or | |
| 1873 // // body. | |
| 1874 // ConditionAnalysis info = AnalyzeCondition(node->cond()); | |
| 1875 // if (info == ALWAYS_FALSE) return; | |
| 1876 // | |
| 1877 // node->break_target()->set_direction(JumpTarget::FORWARD_ONLY); | |
| 1878 // | |
| 1879 // // If there is no update statement, label the top of the loop with the | |
| 1880 // // continue target, otherwise with the loop target. | |
| 1881 // JumpTarget loop(JumpTarget::BIDIRECTIONAL); | |
| 1882 // if (node->next() == NULL) { | |
| 1883 // node->continue_target()->set_direction(JumpTarget::BIDIRECTIONAL); | |
| 1884 // node->continue_target()->Bind(); | |
| 1885 // } else { | |
| 1886 // node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY); | |
| 1887 // loop.Bind(); | |
| 1888 // } | |
| 1889 // | |
| 1890 // // If the test is always true, there is no need to compile it. | |
| 1891 // if (info == DONT_KNOW) { | |
| 1892 // JumpTarget body; | |
| 1893 // LoadConditionAndSpill(node->cond(), &body, node->break_target(), true); | |
| 1894 // if (has_valid_frame()) { | |
| 1895 // Branch(false, node->break_target()); | |
| 1896 // __ nop(); // NOP_ADDED | |
| 1897 // } | |
| 1898 // if (has_valid_frame() || body.is_linked()) { | |
| 1899 // body.Bind(); | |
| 1900 // } | |
| 1901 // } | |
| 1902 // | |
| 1903 // if (has_valid_frame()) { | |
| 1904 // CheckStack(); // TODO(1222600): ignore if body contains calls. | |
| 1905 // VisitAndSpill(node->body()); | |
| 1906 // | |
| 1907 // if (node->next() == NULL) { | |
| 1908 // // If there is no update statement and control flow can fall out | |
| 1909 // // of the loop, jump directly to the continue label. | |
| 1910 // if (has_valid_frame()) { | |
| 1911 // node->continue_target()->Jump(); | |
| 1912 // __ nop(); // NOP_ADDED | |
| 1913 // } | |
| 1914 // } else { | |
| 1915 // // If there is an update statement and control flow can reach it | |
| 1916 // // via falling out of the body of the loop or continuing, we | |
| 1917 // // compile the update statement. | |
| 1918 // if (node->continue_target()->is_linked()) { | |
| 1919 // node->continue_target()->Bind(); | |
| 1920 // } | |
| 1921 // if (has_valid_frame()) { | |
| 1922 // // Record source position of the statement as this code which is | |
| 1923 // // after the code for the body actually belongs to the loop | |
| 1924 // // statement and not the body. | |
| 1925 // CodeForStatementPosition(node); | |
| 1926 // VisitAndSpill(node->next()); | |
| 1927 // loop.Jump(); | |
| 1928 // __ nop(); // NOP_ADDED | |
| 1929 // } | |
| 1930 // } | |
| 1931 // } | |
| 1932 // if (node->break_target()->is_linked()) { | |
| 1933 // node->break_target()->Bind(); | |
| 1934 // } | |
| 1935 // ASSERT(!has_valid_frame() || frame_->height() == original_height); | |
| 1936 } | |
| 1937 | |
| 1938 | |
| 1939 void CodeGenerator::VisitForInStatement(ForInStatement* node) { | |
| 1940 UNIMPLEMENTED_(); | |
| 1941 //#ifdef DEBUG | |
| 1942 //// printf("CodeGenerator::VisitForInStatement\n"); | |
| 1943 //#endif | |
| 1944 //#ifdef DEBUG | |
| 1945 // int original_height = frame_->height(); | |
| 1946 //#endif | |
| 1947 // VirtualFrame::SpilledScope spilled_scope; | |
| 1948 // Comment cmnt(masm_, "[ ForInStatement"); | |
| 1949 // CodeForStatementPosition(node); | |
| 1950 // | |
| 1951 // JumpTarget primitive; | |
| 1952 // JumpTarget jsobject; | |
| 1953 // JumpTarget fixed_array; | |
| 1954 // JumpTarget entry(JumpTarget::BIDIRECTIONAL); | |
| 1955 // JumpTarget end_del_check; | |
| 1956 // JumpTarget exit; | |
| 1957 // | |
| 1958 // // Get the object to enumerate over (converted to JSObject). | |
| 1959 // LoadAndSpill(node->enumerable()); | |
| 1960 // | |
| 1961 // // Both SpiderMonkey and kjs ignore null and undefined in contrast | |
| 1962 // // to the specification. 12.6.4 mandates a call to ToObject. | |
| 1963 //// frame_->EmitPop(r0); | |
| 1964 //// __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); | |
| 1965 //// __ cmp(r0, ip); | |
| 1966 //// exit.Branch(eq); | |
| 1967 //// __ LoadRoot(ip, Heap::kNullValueRootIndex); | |
| 1968 //// __ cmp(r0, ip); | |
| 1969 //// exit.Branch(eq); | |
| 1970 // frame_->EmitPop(a0); | |
| 1971 // __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); | |
| 1972 // exit.Branch(eq, no_hint, a0, Operand(ip)); | |
| 1973 // __ nop(); // NOP_ADDED | |
| 1974 // __ LoadRoot(ip, Heap::kNullValueRootIndex); | |
| 1975 // exit.Branch(eq, no_hint, a0, Operand(ip)); | |
| 1976 // __ nop(); // NOP_ADDED | |
| 1977 // | |
| 1978 // // Stack layout in body: | |
| 1979 // // [iteration counter (Smi)] | |
| 1980 // // [length of array] | |
| 1981 // // [FixedArray] | |
| 1982 // // [Map or 0] | |
| 1983 // // [Object] | |
| 1984 // | |
| 1985 // // Check if enumerable is already a JSObject | |
| 1986 //// __ tst(r0, Operand(kSmiTagMask)); | |
| 1987 //// primitive.Branch(eq); | |
| 1988 //// __ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE); | |
| 1989 //// jsobject.Branch(hs); | |
| 1990 // __ andi(t0, a0, Operand(kSmiTagMask)); | |
| 1991 // primitive.Branch(eq, no_hint, t0, Operand(zero_reg)); | |
| 1992 // __ nop(); // NOP_ADDED | |
| 1993 // __ GetObjectType(a0, a1, a1); | |
| 1994 // jsobject.Branch(Ugreater_equal, no_hint, a1, Operand(FIRST_JS_OBJECT_TYPE)); | |
| 1995 // __ nop(); // NOP_ADDED | |
| 1996 // | |
| 1997 // primitive.Bind(); | |
| 1998 //#ifdef NO_NATIVES | |
| 1999 // __ break_(0x1973); | |
| 2000 //#else | |
| 2001 // frame_->EmitPush(a0); | |
| 2002 // Result arg_count(a0); | |
| 2003 //// __ mov(r0, Operand(0)); | |
| 2004 // __ li(a0, Operand(0)); | |
| 2005 // frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS, &arg_count, 1); | |
| 2006 // __ nop(); // NOP_ADDED | |
| 2007 //#endif | |
| 2008 // | |
| 2009 // jsobject.Bind(); | |
| 2010 // // Get the set of properties (as a FixedArray or Map). | |
| 2011 // frame_->EmitPush(a0); // duplicate the object being enumerated | |
| 2012 // frame_->EmitPush(a0); | |
| 2013 // frame_->CallRuntime(Runtime::kGetPropertyNamesFast, 1); | |
| 2014 // __ nop(); // NOP_ADDED | |
| 2015 // | |
| 2016 // // If we got a Map, we can do a fast modification check. | |
| 2017 // // Otherwise, we got a FixedArray, and we have to do a slow check. | |
| 2018 //// __ mov(r2, Operand(r0)); | |
| 2019 //// __ ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset)); | |
| 2020 //// __ LoadRoot(ip, Heap::kMetaMapRootIndex); | |
| 2021 //// __ cmp(r1, ip); | |
| 2022 //// fixed_array.Branch(ne); | |
| 2023 // __ mov(a2, v0); | |
| 2024 // __ lw(a1, FieldMemOperand(a2, HeapObject::kMapOffset)); | |
| 2025 // __ LoadRoot(ip, Heap::kMetaMapRootIndex); | |
| 2026 // fixed_array.Branch(ne, no_hint, a1, Operand(ip)); | |
| 2027 // __ nop(); // NOP_ADDED | |
| 2028 // | |
| 2029 // // Get enum cache | |
| 2030 //// __ mov(r1, Operand(r0)); | |
| 2031 //// __ ldr(r1, FieldMemOperand(r1, Map::kInstanceDescriptorsOffset)); | |
| 2032 //// __ ldr(r1, FieldMemOperand(r1, DescriptorArray::kEnumerationIndexOffset)); | |
| 2033 //// __ ldr(r2, | |
| 2034 //// FieldMemOperand(r1, DescriptorArray::kEnumCacheBridgeCacheOffset)); | |
| 2035 // __ mov(a1, v0); | |
| 2036 // __ lw(a1, FieldMemOperand(a1, Map::kInstanceDescriptorsOffset)); | |
| 2037 // __ lw(a1, FieldMemOperand(a1, DescriptorArray::kEnumerationIndexOffset)); | |
| 2038 // __ lw(a2, FieldMemOperand(a1, DescriptorArray::kEnumCacheBridgeCacheOffset)) ; | |
| 2039 // | |
| 2040 //// frame_->EmitPush(r0); // map | |
| 2041 //// frame_->EmitPush(r2); // enum cache bridge cache | |
| 2042 //// __ ldr(r0, FieldMemOperand(r2, FixedArray::kLengthOffset)); | |
| 2043 //// __ mov(r0, Operand(r0, LSL, kSmiTagSize)); | |
| 2044 //// frame_->EmitPush(r0); | |
| 2045 //// __ mov(r0, Operand(Smi::FromInt(0))); | |
| 2046 //// frame_->EmitPush(r0); | |
| 2047 //// entry.Jump(); | |
| 2048 // frame_->EmitPush(v0); // map | |
| 2049 // frame_->EmitPush(a2); // enum cache bridge cache | |
| 2050 // __ lw(a0, FieldMemOperand(a2, FixedArray::kLengthOffset)); | |
| 2051 // __ sll(a0, a0, kSmiTagSize); | |
| 2052 // frame_->EmitPush(a0); | |
| 2053 // __ li(a0, Operand(Smi::FromInt(0))); | |
| 2054 // frame_->EmitPush(a0); | |
| 2055 // entry.Jump(); | |
| 2056 // __ nop(); // NOP_ADDED | |
| 2057 // | |
| 2058 // fixed_array.Bind(); | |
| 2059 //// __ mov(r1, Operand(Smi::FromInt(0))); | |
| 2060 //// frame_->EmitPush(r1); // insert 0 in place of Map | |
| 2061 //// frame_->EmitPush(r0); | |
| 2062 // __ li(a1, Operand(Smi::FromInt(0))); | |
| 2063 // frame_->EmitPush(a1); // insert 0 in place of Map | |
| 2064 // frame_->EmitPush(v0); | |
| 2065 // | |
| 2066 // // Push the length of the array and the initial index onto the stack. | |
| 2067 //// __ ldr(r0, FieldMemOperand(r0, FixedArray::kLengthOffset)); | |
| 2068 //// __ mov(r0, Operand(r0, LSL, kSmiTagSize)); | |
| 2069 //// frame_->EmitPush(r0); | |
| 2070 //// __ mov(r0, Operand(Smi::FromInt(0))); // init index | |
| 2071 //// frame_->EmitPush(r0); | |
| 2072 // __ lw(a0, FieldMemOperand(v0, FixedArray::kLengthOffset)); | |
| 2073 // __ sll(a0, a0, kSmiTagSize); | |
| 2074 // frame_->EmitPush(a0); | |
| 2075 // __ li(a0, Operand(Smi::FromInt(0))); // init index | |
| 2076 // frame_->EmitPush(a0); | |
| 2077 // | |
| 2078 // // Condition. | |
| 2079 // entry.Bind(); | |
| 2080 // // sp[0] : index | |
| 2081 // // sp[1] : array/enum cache length | |
| 2082 // // sp[2] : array or enum cache | |
| 2083 // // sp[3] : 0 or map | |
| 2084 // // sp[4] : enumerable | |
| 2085 // // Grab the current frame's height for the break and continue | |
| 2086 // // targets only after all the state is pushed on the frame. | |
| 2087 // node->break_target()->set_direction(JumpTarget::FORWARD_ONLY); | |
| 2088 // node->continue_target()->set_direction(JumpTarget::FORWARD_ONLY); | |
| 2089 // | |
| 2090 //// __ ldr(r0, frame_->ElementAt(0)); // load the current count | |
| 2091 //// __ ldr(r1, frame_->ElementAt(1)); // load the length | |
| 2092 //// __ cmp(r0, Operand(r1)); // compare to the array length | |
| 2093 //// node->break_target()->Branch(hs); | |
| 2094 // __ lw(a0, frame_->ElementAt(0)); // load the current count | |
| 2095 // __ lw(a1, frame_->ElementAt(1)); // load the length | |
| 2096 // node->break_target()->Branch(Ugreater_equal, no_hint, a0, Operand(a1)); | |
| 2097 // __ nop(); // NOP_ADDED | |
| 2098 // | |
| 2099 //// __ ldr(r0, frame_->ElementAt(0)); | |
| 2100 // __ lw(a0, frame_->ElementAt(0)); | |
| 2101 // | |
| 2102 // // Get the i'th entry of the array. | |
| 2103 //// __ ldr(r2, frame_->ElementAt(2)); | |
| 2104 //// __ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | |
| 2105 //// __ ldr(r3, MemOperand(r2, r0, LSL, kPointerSizeLog2 - kSmiTagSize)); | |
| 2106 // __ lw(a2, frame_->ElementAt(2)); | |
| 2107 // __ addu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | |
| 2108 // __ sll(t0, a0, kPointerSizeLog2 - kSmiTagSize); | |
| 2109 // __ addu(t2, t0, a2); | |
| 2110 // __ lw(a3, MemOperand(t2)); | |
| 2111 // | |
| 2112 // // Get Map or 0. | |
| 2113 //// __ ldr(r2, frame_->ElementAt(3)); | |
| 2114 // __ lw(a2, frame_->ElementAt(3)); | |
| 2115 // // Check if this (still) matches the map of the enumerable. | |
| 2116 // // If not, we have to filter the key. | |
| 2117 //// __ ldr(r1, frame_->ElementAt(4)); | |
| 2118 //// __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset)); | |
| 2119 //// __ cmp(r1, Operand(r2)); | |
| 2120 //// end_del_check.Branch(eq); | |
| 2121 // __ lw(a1, frame_->ElementAt(4)); | |
| 2122 // __ lw(a1, FieldMemOperand(a1, HeapObject::kMapOffset)); | |
| 2123 // end_del_check.Branch(eq, no_hint, a1, Operand(a2)); | |
| 2124 // __ nop(); // NOP_ADDED | |
| 2125 // | |
| 2126 // // Convert the entry to a string (or null if it isn't a property anymore). | |
| 2127 //// __ ldr(r0, frame_->ElementAt(4)); // push enumerable | |
| 2128 //// frame_->EmitPush(r0); | |
| 2129 //// frame_->EmitPush(r3); // push entry | |
| 2130 //// Result arg_count_reg(r0); | |
| 2131 //// __ mov(r0, Operand(1)); | |
| 2132 //// frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_JS, &arg_count_reg, 2); | |
| 2133 //// __ mov(r3, Operand(r0)); | |
| 2134 // __ lw(a0, frame_->ElementAt(4)); // push enumerable | |
| 2135 // frame_->EmitPush(a0); | |
| 2136 // frame_->EmitPush(a3); // push entry | |
| 2137 // Result arg_count_reg(a0); | |
| 2138 // __ li(a0, Operand(1)); | |
| 2139 // frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_JS, &arg_count_reg, 2); | |
| 2140 // __ nop(); // NOP_ADDED | |
| 2141 // __ mov(a3, a0); | |
| 2142 // | |
| 2143 // // If the property has been removed while iterating, we just skip it. | |
| 2144 // __ LoadRoot(ip, Heap::kNullValueRootIndex); | |
| 2145 //// __ cmp(r3, ip); | |
| 2146 // node->continue_target()->Branch(eq, no_hint, a3, Operand(ip)); | |
| 2147 // __ nop(); // NOP_ADDED | |
| 2148 // | |
| 2149 // end_del_check.Bind(); | |
| 2150 // // Store the entry in the 'each' expression and take another spin in the | |
| 2151 // // loop. r3: i'th entry of the enum cache (or string there of) | |
| 2152 //// frame_->EmitPush(r3); // push entry | |
| 2153 // frame_->EmitPush(a3); // push entry | |
| 2154 // { Reference each(this, node->each()); | |
| 2155 // if (!each.is_illegal()) { | |
| 2156 // if (each.size() > 0) { | |
| 2157 //// __ ldr(r0, frame_->ElementAt(each.size())); | |
| 2158 //// frame_->EmitPush(r0); | |
| 2159 // __ lw(a0, frame_->ElementAt(each.size())); | |
| 2160 // frame_->EmitPush(a0); | |
| 2161 // } | |
| 2162 // // If the reference was to a slot we rely on the convenient property | |
| 2163 // // that it doesn't matter whether a value (eg, r3 pushed above) is | |
| 2164 // // right on top of or right underneath a zero-sized reference. | |
| 2165 // each.SetValue(NOT_CONST_INIT); | |
| 2166 // if (each.size() > 0) { | |
| 2167 // // It's safe to pop the value lying on top of the reference before | |
| 2168 // // unloading the reference itself (which preserves the top of stack, | |
| 2169 // // ie, now the topmost value of the non-zero sized reference), since | |
| 2170 // // we will discard the top of stack after unloading the reference | |
| 2171 // // anyway. | |
| 2172 //// frame_->EmitPop(r0); | |
| 2173 // frame_->EmitPop(a0); | |
| 2174 // } | |
| 2175 // } | |
| 2176 // } | |
| 2177 // // Discard the i'th entry pushed above or else the remainder of the | |
| 2178 // // reference, whichever is currently on top of the stack. | |
| 2179 // frame_->Drop(); | |
| 2180 // | |
| 2181 // // Body. | |
| 2182 // CheckStack(); // TODO(1222600): ignore if body contains calls. | |
| 2183 // VisitAndSpill(node->body()); | |
| 2184 // | |
| 2185 // // Next. Reestablish a spilled frame in case we are coming here via | |
| 2186 // // a continue in the body. | |
| 2187 // node->continue_target()->Bind(); | |
| 2188 // frame_->SpillAll(); | |
| 2189 //// frame_->EmitPop(r0); | |
| 2190 //// __ add(r0, r0, Operand(Smi::FromInt(1))); | |
| 2191 //// frame_->EmitPush(r0); | |
| 2192 // frame_->EmitPop(a0); | |
| 2193 // __ add(a0, a0, Operand(Smi::FromInt(1))); | |
| 2194 // frame_->EmitPush(a0); | |
| 2195 // entry.Jump(); | |
| 2196 // __ nop(); // NOP_ADDED | |
| 2197 // | |
| 2198 // // Cleanup. No need to spill because VirtualFrame::Drop is safe for | |
| 2199 // // any frame. | |
| 2200 // node->break_target()->Bind(); | |
| 2201 // frame_->Drop(5); | |
| 2202 // | |
| 2203 // // Exit. | |
| 2204 // exit.Bind(); | |
| 2205 // node->continue_target()->Unuse(); | |
| 2206 // node->break_target()->Unuse(); | |
| 2207 // ASSERT(frame_->height() == original_height); | |
| 2208 } | |
| 2209 | |
| 2210 | |
| 2211 void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) { | |
| 2212 UNIMPLEMENTED_(); | |
| 2213 //#ifdef DEBUG | |
| 2214 // int original_height = frame_->height(); | |
| 2215 //#endif | |
| 2216 // VirtualFrame::SpilledScope spilled_scope; | |
| 2217 // Comment cmnt(masm_, "[ TryCatchStatement"); | |
| 2218 // CodeForStatementPosition(node); | |
| 2219 // | |
| 2220 // JumpTarget try_block; | |
| 2221 // JumpTarget exit; | |
| 2222 // | |
| 2223 // | |
| 2224 // try_block.Call(); | |
| 2225 // __ nop(); | |
| 2226 // // --- Catch block --- | |
| 2227 //// frame_->EmitPush(r0); | |
| 2228 // frame_->EmitPush(v0); | |
| 2229 // | |
| 2230 // // Store the caught exception in the catch variable. | |
| 2231 // { Reference ref(this, node->catch_var()); | |
| 2232 // ASSERT(ref.is_slot()); | |
| 2233 // // Here we make use of the convenient property that it doesn't matter | |
| 2234 // // whether a value is immediately on top of or underneath a zero-sized | |
| 2235 // // reference. | |
| 2236 // ref.SetValue(NOT_CONST_INIT); | |
| 2237 // } | |
| 2238 // | |
| 2239 // // Remove the exception from the stack. | |
| 2240 // frame_->Drop(); | |
| 2241 // | |
| 2242 // VisitStatementsAndSpill(node->catch_block()->statements()); | |
| 2243 // if (frame_ != NULL) { | |
| 2244 // exit.Jump(); | |
| 2245 // __ nop(); | |
| 2246 // } | |
| 2247 // | |
| 2248 // | |
| 2249 // // --- Try block --- | |
| 2250 // try_block.Bind(); | |
| 2251 // | |
| 2252 // frame_->PushTryHandler(TRY_CATCH_HANDLER); | |
| 2253 // int handler_height = frame_->height(); | |
| 2254 // | |
| 2255 // // Shadow the labels for all escapes from the try block, including | |
| 2256 // // returns. During shadowing, the original label is hidden as the | |
| 2257 // // LabelShadow and operations on the original actually affect the | |
| 2258 // // shadowing label. | |
| 2259 // // | |
| 2260 // // We should probably try to unify the escaping labels and the return | |
| 2261 // // label. | |
| 2262 // int nof_escapes = node->escaping_targets()->length(); | |
| 2263 // List<ShadowTarget*> shadows(1 + nof_escapes); | |
| 2264 // | |
| 2265 // // Add the shadow target for the function return. | |
| 2266 // static const int kReturnShadowIndex = 0; | |
| 2267 // shadows.Add(new ShadowTarget(&function_return_)); | |
| 2268 // bool function_return_was_shadowed = function_return_is_shadowed_; | |
| 2269 // function_return_is_shadowed_ = true; | |
| 2270 // ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_); | |
| 2271 // | |
| 2272 // // Add the remaining shadow targets. | |
| 2273 // for (int i = 0; i < nof_escapes; i++) { | |
| 2274 // shadows.Add(new ShadowTarget(node->escaping_targets()->at(i))); | |
| 2275 // } | |
| 2276 // | |
| 2277 // // Generate code for the statements in the try block. | |
| 2278 // VisitStatementsAndSpill(node->try_block()->statements()); | |
| 2279 // | |
| 2280 // // Stop the introduced shadowing and count the number of required unlinks. | |
| 2281 // // After shadowing stops, the original labels are unshadowed and the | |
| 2282 // // LabelShadows represent the formerly shadowing labels. | |
| 2283 // bool has_unlinks = false; | |
| 2284 // for (int i = 0; i < shadows.length(); i++) { | |
| 2285 // shadows[i]->StopShadowing(); | |
| 2286 // has_unlinks = has_unlinks || shadows[i]->is_linked(); | |
| 2287 // } | |
| 2288 // function_return_is_shadowed_ = function_return_was_shadowed; | |
| 2289 // | |
| 2290 // // Get an external reference to the handler address. | |
| 2291 // ExternalReference handler_address(Top::k_handler_address); | |
| 2292 // | |
| 2293 // // If we can fall off the end of the try block, unlink from try chain. | |
| 2294 // if (has_valid_frame()) { | |
| 2295 // // The next handler address is on top of the frame. Unlink from | |
| 2296 // // the handler list and drop the rest of this handler from the | |
| 2297 // // frame. | |
| 2298 // ASSERT(StackHandlerConstants::kNextOffset == 0); | |
| 2299 //// frame_->EmitPop(r1); | |
| 2300 //// __ mov(r3, Operand(handler_address)); | |
| 2301 //// __ str(r1, MemOperand(r3)); | |
| 2302 // frame_->EmitPop(a1); | |
| 2303 // __ li(a3, Operand(handler_address)); | |
| 2304 // __ sw(a1, MemOperand(a3)); | |
| 2305 // frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1); | |
| 2306 // if (has_unlinks) { | |
| 2307 // exit.Jump(); | |
| 2308 // __ nop(); | |
| 2309 // } | |
| 2310 // } | |
| 2311 // | |
| 2312 // // Generate unlink code for the (formerly) shadowing labels that have been | |
| 2313 // // jumped to. Deallocate each shadow target. | |
| 2314 // for (int i = 0; i < shadows.length(); i++) { | |
| 2315 // if (shadows[i]->is_linked()) { | |
| 2316 // // Unlink from try chain; | |
| 2317 // shadows[i]->Bind(); | |
| 2318 // // Because we can be jumping here (to spilled code) from unspilled | |
| 2319 // // code, we need to reestablish a spilled frame at this block. | |
| 2320 // frame_->SpillAll(); | |
| 2321 // | |
| 2322 // // Reload sp from the top handler, because some statements that we | |
| 2323 // // break from (eg, for...in) may have left stuff on the stack. | |
| 2324 //// __ mov(r3, Operand(handler_address)); | |
| 2325 //// __ ldr(sp, MemOperand(r3)); | |
| 2326 // __ li(a3, Operand(handler_address)); | |
| 2327 // __ lw(sp, MemOperand(a3)); | |
| 2328 // frame_->Forget(frame_->height() - handler_height); | |
| 2329 // | |
| 2330 // ASSERT(StackHandlerConstants::kNextOffset == 0); | |
| 2331 //// frame_->EmitPop(r1); | |
| 2332 //// __ str(r1, MemOperand(r3)); | |
| 2333 // frame_->EmitPop(a1); | |
| 2334 // __ sw(a1, MemOperand(a3)); | |
| 2335 // frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1); | |
| 2336 // | |
| 2337 // if (!function_return_is_shadowed_ && i == kReturnShadowIndex) { | |
| 2338 // frame_->PrepareForReturn(); | |
| 2339 // } | |
| 2340 // shadows[i]->other_target()->Jump(); | |
| 2341 // } | |
| 2342 // } | |
| 2343 // | |
| 2344 // exit.Bind(); | |
| 2345 // ASSERT(!has_valid_frame() || frame_->height() == original_height); | |
| 2346 } | |
| 2347 | |
| 2348 | |
| 2349 void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) { | |
| 2350 UNIMPLEMENTED_(); | |
| 2351 //#ifdef DEBUG | |
| 2352 //// printf("CodeGenerator::VisitTryFinallyStatement\n"); | |
| 2353 //#endif | |
| 2354 //#ifdef DEBUG | |
| 2355 // int original_height = frame_->height(); | |
| 2356 //#endif | |
| 2357 // VirtualFrame::SpilledScope spilled_scope; | |
| 2358 // Comment cmnt(masm_, "[ TryFinallyStatement"); | |
| 2359 // CodeForStatementPosition(node); | |
| 2360 // | |
| 2361 // // State: Used to keep track of reason for entering the finally | |
| 2362 // // block. Should probably be extended to hold information for | |
| 2363 // // break/continue from within the try block. | |
| 2364 // enum { FALLING, THROWING, JUMPING }; | |
| 2365 // | |
| 2366 // JumpTarget try_block; | |
| 2367 // JumpTarget finally_block; | |
| 2368 // | |
| 2369 // try_block.Call(); | |
| 2370 // __ nop(); // NOP_ADDED | |
| 2371 // | |
| 2372 // frame_->EmitPush(a0); // save exception object on the stack | |
| 2373 // // In case of thrown exceptions, this is where we continue. | |
| 2374 //// __ mov(r2, Operand(Smi::FromInt(THROWING))); | |
| 2375 // __ li(a2, Operand(Smi::FromInt(THROWING))); | |
| 2376 // finally_block.Jump(); | |
| 2377 // __ nop(); // NOP_ADDED | |
| 2378 // | |
| 2379 // // --- Try block --- | |
| 2380 // try_block.Bind(); | |
| 2381 // | |
| 2382 // frame_->PushTryHandler(TRY_FINALLY_HANDLER); | |
| 2383 // int handler_height = frame_->height(); | |
| 2384 // | |
| 2385 // // Shadow the labels for all escapes from the try block, including | |
| 2386 // // returns. Shadowing hides the original label as the LabelShadow and | |
| 2387 // // operations on the original actually affect the shadowing label. | |
| 2388 // | |
| 2389 // // We should probably try to unify the escaping labels and the return | |
| 2390 // // label. | |
| 2391 // int nof_escapes = node->escaping_targets()->length(); | |
| 2392 // List<ShadowTarget*> shadows(1 + nof_escapes); | |
| 2393 // | |
| 2394 // // Add the shadow target for the function return. | |
| 2395 // static const int kReturnShadowIndex = 0; | |
| 2396 // shadows.Add(new ShadowTarget(&function_return_)); | |
| 2397 // bool function_return_was_shadowed = function_return_is_shadowed_; | |
| 2398 // function_return_is_shadowed_ = true; | |
| 2399 // ASSERT(shadows[kReturnShadowIndex]->other_target() == &function_return_); | |
| 2400 // | |
| 2401 // // Add the remaining shadow targets. | |
| 2402 // for (int i = 0; i < nof_escapes; i++) { | |
| 2403 // shadows.Add(new ShadowTarget(node->escaping_targets()->at(i))); | |
| 2404 // } | |
| 2405 // | |
| 2406 // // Generate code for the statements in the try block. | |
| 2407 // VisitStatementsAndSpill(node->try_block()->statements()); | |
| 2408 // | |
| 2409 // // Stop the introduced shadowing and count the number of required unlinks. | |
| 2410 // // After shadowing stops, the original labels are unshadowed and the | |
| 2411 // // LabelShadows represent the formerly shadowing labels. | |
| 2412 // int nof_unlinks = 0; | |
| 2413 // for (int i = 0; i < shadows.length(); i++) { | |
| 2414 // shadows[i]->StopShadowing(); | |
| 2415 // if (shadows[i]->is_linked()) nof_unlinks++; | |
| 2416 // } | |
| 2417 // function_return_is_shadowed_ = function_return_was_shadowed; | |
| 2418 // | |
| 2419 // // Get an external reference to the handler address. | |
| 2420 // ExternalReference handler_address(Top::k_handler_address); | |
| 2421 // | |
| 2422 // // If we can fall off the end of the try block, unlink from the try | |
| 2423 // // chain and set the state on the frame to FALLING. | |
| 2424 // if (has_valid_frame()) { | |
| 2425 // // The next handler address is on top of the frame. | |
| 2426 // ASSERT(StackHandlerConstants::kNextOffset == 0); | |
| 2427 //// frame_->EmitPop(r1); | |
| 2428 //// __ mov(r3, Operand(handler_address)); | |
| 2429 //// __ str(r1, MemOperand(r3)); | |
| 2430 //// frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1); | |
| 2431 // frame_->EmitPop(a1); | |
| 2432 // __ li(a3, Operand(handler_address)); | |
| 2433 // __ sw(a1, MemOperand(a3)); | |
| 2434 // frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1); | |
| 2435 // | |
| 2436 // // Fake a top of stack value (unneeded when FALLING) and set the | |
| 2437 // // state in r2, then jump around the unlink blocks if any. | |
| 2438 //// __ LoadRoot(r0, Heap::kUndefinedValueRootIndex); | |
| 2439 //// frame_->EmitPush(r0); | |
| 2440 //// __ mov(r2, Operand(Smi::FromInt(FALLING))); | |
| 2441 // __ LoadRoot(a0, Heap::kUndefinedValueRootIndex); | |
| 2442 // frame_->EmitPush(a0); | |
| 2443 // __ li(a2, Operand(Smi::FromInt(FALLING))); | |
| 2444 // if (nof_unlinks > 0) { | |
| 2445 // finally_block.Jump(); | |
| 2446 // __ nop(); // NOP_ADDED | |
| 2447 // } | |
| 2448 // } | |
| 2449 // | |
| 2450 // // Generate code to unlink and set the state for the (formerly) | |
| 2451 // // shadowing targets that have been jumped to. | |
| 2452 // for (int i = 0; i < shadows.length(); i++) { | |
| 2453 // if (shadows[i]->is_linked()) { | |
| 2454 // // If we have come from the shadowed return, the return value is | |
| 2455 // // in (a non-refcounted reference to) r0. We must preserve it | |
| 2456 // // until it is pushed. | |
| 2457 // // | |
| 2458 // // Because we can be jumping here (to spilled code) from | |
| 2459 // // unspilled code, we need to reestablish a spilled frame at | |
| 2460 // // this block. | |
| 2461 // shadows[i]->Bind(); | |
| 2462 // frame_->SpillAll(); | |
| 2463 // | |
| 2464 // // Reload sp from the top handler, because some statements that | |
| 2465 // // we break from (eg, for...in) may have left stuff on the | |
| 2466 // // stack. | |
| 2467 //// __ mov(r3, Operand(handler_address)); | |
| 2468 //// __ ldr(sp, MemOperand(r3)); | |
| 2469 // __ li(a3, Operand(handler_address)); | |
| 2470 // __ lw(sp, MemOperand(a3)); | |
| 2471 // frame_->Forget(frame_->height() - handler_height); | |
| 2472 // | |
| 2473 // // Unlink this handler and drop it from the frame. The next | |
| 2474 // // handler address is currently on top of the frame. | |
| 2475 // ASSERT(StackHandlerConstants::kNextOffset == 0); | |
| 2476 // frame_->EmitPop(a1); | |
| 2477 // __ sw(a1, MemOperand(a3)); | |
| 2478 // frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1); | |
| 2479 // | |
| 2480 // if (i == kReturnShadowIndex) { | |
| 2481 // // If this label shadowed the function return, materialize the | |
| 2482 // // return value on the stack. | |
| 2483 // frame_->EmitPush(v0); | |
| 2484 // } else { | |
| 2485 // // Fake TOS for targets that shadowed breaks and continues. | |
| 2486 // __ LoadRoot(a0, Heap::kUndefinedValueRootIndex); | |
| 2487 // frame_->EmitPush(a0); | |
| 2488 // } | |
| 2489 //// __ mov(r2, Operand(Smi::FromInt(JUMPING + i))); | |
| 2490 // if (--nof_unlinks > 0) { | |
| 2491 // // If this is not the last unlink block, jump around the next. | |
| 2492 // finally_block.Jump(); | |
| 2493 // __ nop(); // NOP_ADDED | |
| 2494 // } | |
| 2495 // } | |
| 2496 // } | |
| 2497 // | |
| 2498 //// // --- Finally block --- | |
| 2499 // finally_block.Bind(); | |
| 2500 // | |
| 2501 // // Push the state on the stack. | |
| 2502 //// frame_->EmitPush(r2); | |
| 2503 // frame_->EmitPush(a2); | |
| 2504 // | |
| 2505 // // We keep two elements on the stack - the (possibly faked) result | |
| 2506 // // and the state - while evaluating the finally block. | |
| 2507 // // | |
| 2508 // // Generate code for the statements in the finally block. | |
| 2509 // VisitStatementsAndSpill(node->finally_block()->statements()); | |
| 2510 // | |
| 2511 // if (has_valid_frame()) { | |
| 2512 // // Restore state and return value or faked TOS. | |
| 2513 //// frame_->EmitPop(r2); | |
| 2514 //// frame_->EmitPop(r0); | |
| 2515 // frame_->EmitPop(a2); | |
| 2516 // frame_->EmitPop(a0); | |
| 2517 // } | |
| 2518 // | |
| 2519 // // Generate code to jump to the right destination for all used | |
| 2520 // // formerly shadowing targets. Deallocate each shadow target. | |
| 2521 // for (int i = 0; i < shadows.length(); i++) { | |
| 2522 // if (has_valid_frame() && shadows[i]->is_bound()) { | |
| 2523 // JumpTarget* original = shadows[i]->other_target(); | |
| 2524 //// __ cmp(r2, Operand(Smi::FromInt(JUMPING + i))); | |
| 2525 // if (!function_return_is_shadowed_ && i == kReturnShadowIndex) { | |
| 2526 // JumpTarget skip; | |
| 2527 // skip.Branch(ne, no_hint, a2, Operand(Smi::FromInt(JUMPING + i))); | |
| 2528 // __ nop(); // NOP_ADDED | |
| 2529 // frame_->PrepareForReturn(); | |
| 2530 // original->Jump(); | |
| 2531 // __ nop(); // NOP_ADDED | |
| 2532 // skip.Bind(); | |
| 2533 // } else { | |
| 2534 // original->Branch(eq, no_hint, a2, Operand(Smi::FromInt(JUMPING + i))); | |
| 2535 // __ nop(); // NOP_ADDED | |
| 2536 // } | |
| 2537 // } | |
| 2538 // } | |
| 2539 // | |
| 2540 // if (has_valid_frame()) { | |
| 2541 // // Check if we need to rethrow the exception. | |
| 2542 // JumpTarget exit; | |
| 2543 //// __ cmp(r2, Operand(Smi::FromInt(THROWING))); | |
| 2544 // exit.Branch(ne, no_hint, a2, Operand(Smi::FromInt(THROWING))); | |
| 2545 // __ nop(); // NOP_ADDED | |
| 2546 // | |
| 2547 // // Rethrow exception. | |
| 2548 // frame_->EmitPush(a0); | |
| 2549 // frame_->CallRuntime(Runtime::kReThrow, 1); | |
| 2550 // | |
| 2551 // // Done. | |
| 2552 // exit.Bind(); | |
| 2553 // } | |
| 2554 // ASSERT(!has_valid_frame() || frame_->height() == original_height); | |
| 2555 } | |
| 2556 | |
| 2557 | |
| 2558 void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) { | |
| 2559 UNIMPLEMENTED(); | |
| 2560 __ break_(0x00666); | |
| 2561 __ nop(); | |
| 2562 //#ifdef DEBUG | |
| 2563 // int original_height = frame_->height(); | |
| 2564 //#endif | |
| 2565 // VirtualFrame::SpilledScope spilled_scope; | |
| 2566 // Comment cmnt(masm_, "[ DebuggerStatament"); | |
| 2567 // CodeForStatementPosition(node); | |
| 2568 //#ifdef ENABLE_DEBUGGER_SUPPORT | |
| 2569 // frame_->CallRuntime(Runtime::kDebugBreak, 0); | |
| 2570 //#endif | |
| 2571 // // Ignore the return value. | |
| 2572 // ASSERT(frame_->height() == original_height); | |
| 2573 } | |
| 2574 | |
| 2575 | |
| 2576 void CodeGenerator::InstantiateBoilerplate(Handle<JSFunction> boilerplate) { | |
| 2577 UNIMPLEMENTED_(); | |
| 2578 //#ifdef DEBUG | |
| 2579 //// printf("CodeGenerator::InstantiateBoilerplate\n"); | |
| 2580 //#endif | |
| 2581 // | |
| 2582 // VirtualFrame::SpilledScope spilled_scope; | |
| 2583 // ASSERT(boilerplate->IsBoilerplate()); | |
| 2584 // | |
| 2585 // // Create a new closure. | |
| 2586 // frame_->EmitPush(cp); | |
| 2587 //// __ mov(r0, Operand(boilerplate)); | |
| 2588 //// frame_->EmitPush(r0); | |
| 2589 // __ li(a0, Operand(boilerplate)); | |
| 2590 // frame_->EmitPush(a0); | |
| 2591 // | |
| 2592 // frame_->CallRuntime(Runtime::kNewClosure, 2); | |
| 2593 // __ nop(); // NOP_ADDED | |
| 2594 //// frame_->EmitPush(r0); | |
| 2595 // frame_->EmitPush(v0); | |
| 2596 } | |
| 2597 | |
| 2598 | |
| 2599 void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) { | |
| 2600 UNIMPLEMENTED_(); | |
| 2601 //#ifdef DEBUG | |
| 2602 //// printf("CodeGenerator::VisitFunctionLiteral\n"); | |
| 2603 //#endif | |
| 2604 // | |
| 2605 //#ifdef DEBUG | |
| 2606 // int original_height = frame_->height(); | |
| 2607 //#endif | |
| 2608 // VirtualFrame::SpilledScope spilled_scope; | |
| 2609 // Comment cmnt(masm_, "[ FunctionLiteral"); | |
| 2610 // | |
| 2611 // // Build the function boilerplate and instantiate it. | |
| 2612 // Handle<JSFunction> boilerplate = | |
| 2613 // Compiler::BuildBoilerplate(node, script_, this); | |
| 2614 // // Check for stack-overflow exception. | |
| 2615 // if (HasStackOverflow()) { | |
| 2616 // ASSERT(frame_->height() == original_height); | |
| 2617 // return; | |
| 2618 // } | |
| 2619 // InstantiateBoilerplate(boilerplate); | |
| 2620 // ASSERT(frame_->height() == original_height + 1); | |
| 2621 } | |
| 2622 | |
| 2623 | |
| 2624 void CodeGenerator::VisitFunctionBoilerplateLiteral( | |
| 2625 FunctionBoilerplateLiteral* node) { | |
| 2626 UNIMPLEMENTED_(); | |
| 2627 //#ifdef DEBUG | |
| 2628 //// printf("CodeGenerator::VisitFunctionBoilerplateLiteral\n"); | |
| 2629 //#endif | |
| 2630 //#ifdef DEBUG | |
| 2631 // int original_height = frame_->height(); | |
| 2632 //#endif | |
| 2633 // VirtualFrame::SpilledScope spilled_scope; | |
| 2634 // Comment cmnt(masm_, "[ FunctionBoilerplateLiteral"); | |
| 2635 // InstantiateBoilerplate(node->boilerplate()); | |
| 2636 // ASSERT(frame_->height() == original_height + 1); | |
| 2637 } | |
| 2638 | |
| 2639 | |
| 2640 void CodeGenerator::VisitConditional(Conditional* node) { | |
| 2641 UNIMPLEMENTED_(); | |
| 2642 //#ifdef DEBUG | |
| 2643 //// printf("CodeGenerator::VisitConditional\n"); | |
| 2644 //#endif | |
| 2645 //#ifdef DEBUG | |
| 2646 // int original_height = frame_->height(); | |
| 2647 //#endif | |
| 2648 // VirtualFrame::SpilledScope spilled_scope; | |
| 2649 // Comment cmnt(masm_, "[ Conditional"); | |
| 2650 // JumpTarget then; | |
| 2651 // JumpTarget else_; | |
| 2652 // LoadConditionAndSpill(node->condition(), &then, &else_, true); | |
| 2653 // if (has_valid_frame()) { | |
| 2654 // Branch(false, &else_); | |
| 2655 // __ nop(); // NOP_ADDED | |
| 2656 // } | |
| 2657 // if (has_valid_frame() || then.is_linked()) { | |
| 2658 // then.Bind(); | |
| 2659 // LoadAndSpill(node->then_expression()); | |
| 2660 // } | |
| 2661 // if (else_.is_linked()) { | |
| 2662 // JumpTarget exit; | |
| 2663 // if (has_valid_frame()) { | |
| 2664 // exit.Jump(); | |
| 2665 // __ nop(); // NOP_ADDED | |
| 2666 // } | |
| 2667 // else_.Bind(); | |
| 2668 // LoadAndSpill(node->else_expression()); | |
| 2669 // if (exit.is_linked()) exit.Bind(); | |
| 2670 // } | |
| 2671 // ASSERT(frame_->height() == original_height + 1); | |
| 2672 } | |
| 2673 | |
| 2674 | |
| 2675 void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) { | |
| 2676 UNIMPLEMENTED_(); | |
| 2677 //#ifdef DEBUG | |
| 2678 //// printf("CodeGenerator::LoadFromSlot\n"); | |
| 2679 //#endif | |
| 2680 // | |
| 2681 // VirtualFrame::SpilledScope spilled_scope; | |
| 2682 // if (slot->type() == Slot::LOOKUP) { | |
| 2683 // ASSERT(slot->var()->is_dynamic()); | |
| 2684 // | |
| 2685 // JumpTarget slow; | |
| 2686 // JumpTarget done; | |
| 2687 // | |
| 2688 // // Generate fast-case code for variables that might be shadowed by | |
| 2689 // // eval-introduced variables. Eval is used a lot without | |
| 2690 // // introducing variables. In those cases, we do not want to | |
| 2691 // // perform a runtime call for all variables in the scope | |
| 2692 // // containing the eval. | |
| 2693 // if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) { | |
| 2694 //// LoadFromGlobalSlotCheckExtensions(slot, typeof_state, r1, r2, &slow); | |
| 2695 // LoadFromGlobalSlotCheckExtensions(slot, typeof_state, a1, a2, &slow); | |
| 2696 // // If there was no control flow to slow, we can exit early. | |
| 2697 // if (!slow.is_linked()) { | |
| 2698 //// frame_->EmitPush(r0); | |
| 2699 // frame_->EmitPush(a0); | |
| 2700 // return; | |
| 2701 // } | |
| 2702 // | |
| 2703 // done.Jump(); | |
| 2704 // __ nop(); // NOP_ADDED | |
| 2705 // | |
| 2706 // } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) { | |
| 2707 // __ break_(0x55555); | |
| 2708 // Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot(); | |
| 2709 // // Only generate the fast case for locals that rewrite to slots. | |
| 2710 // // This rules out argument loads. | |
| 2711 // if (potential_slot != NULL) { | |
| 2712 // __ lw(a0, | |
| 2713 // ContextSlotOperandCheckExtensions(potential_slot, | |
| 2714 // a1, | |
| 2715 // a2, | |
| 2716 // &slow)); | |
| 2717 // if (potential_slot->var()->mode() == Variable::CONST) { | |
| 2718 // __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); | |
| 2719 //// __ cmp(r0, ip); | |
| 2720 // __ LoadRoot(a0, Heap::kUndefinedValueRootIndex, eq, a0, Operand(ip)) ; | |
| 2721 // } | |
| 2722 // // There is always control flow to slow from | |
| 2723 // // ContextSlotOperandCheckExtensions so we have to jump around | |
| 2724 // // it. | |
| 2725 // done.Jump(); | |
| 2726 // __ nop(); // NOP_ADDED | |
| 2727 // } | |
| 2728 // } | |
| 2729 // | |
| 2730 // slow.Bind(); | |
| 2731 // frame_->EmitPush(cp); | |
| 2732 //// __ mov(r0, Operand(slot->var()->name())); | |
| 2733 //// frame_->EmitPush(r0); | |
| 2734 // __ li(a0, Operand(slot->var()->name())); | |
| 2735 // frame_->EmitPush(a0); | |
| 2736 // | |
| 2737 // if (typeof_state == INSIDE_TYPEOF) { | |
| 2738 // frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2); | |
| 2739 // } else { | |
| 2740 // frame_->CallRuntime(Runtime::kLoadContextSlot, 2); | |
| 2741 // } | |
| 2742 // __ nop(); | |
| 2743 // | |
| 2744 // done.Bind(); | |
| 2745 // frame_->EmitPush(v0); | |
| 2746 // | |
| 2747 // } else { | |
| 2748 // // Note: We would like to keep the assert below, but it fires because of | |
| 2749 // // some nasty code in LoadTypeofExpression() which should be removed... | |
| 2750 // // ASSERT(!slot->var()->is_dynamic()); | |
| 2751 // | |
| 2752 // // Special handling for locals allocated in registers. | |
| 2753 //// __ ldr(r0, SlotOperand(slot, r2)); | |
| 2754 // __ lw(a0, SlotOperand(slot, a2)); | |
| 2755 // frame_->EmitPush(a0); | |
| 2756 // if (slot->var()->mode() == Variable::CONST) { | |
| 2757 // // Const slots may contain 'the hole' value (the constant hasn't been | |
| 2758 // // initialized yet) which needs to be converted into the 'undefined' | |
| 2759 // // value. | |
| 2760 // Comment cmnt(masm_, "[ Unhole const"); | |
| 2761 //// frame_->EmitPop(r0); | |
| 2762 //// __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); | |
| 2763 //// __ cmp(r0, ip); | |
| 2764 //// __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq); | |
| 2765 //// frame_->EmitPush(r0); | |
| 2766 // frame_->EmitPop(a0); | |
| 2767 // __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); | |
| 2768 // __ LoadRoot(a0, Heap::kUndefinedValueRootIndex, eq, a0, Operand(ip)); | |
| 2769 // frame_->EmitPush(a0); | |
| 2770 // } | |
| 2771 // } | |
| 2772 } | |
| 2773 | |
| 2774 | |
| 2775 void CodeGenerator::LoadFromGlobalSlotCheckExtensions(Slot* slot, | |
| 2776 TypeofState typeof_state, | |
| 2777 Register tmp, | |
| 2778 Register tmp2, | |
| 2779 JumpTarget* slow) { | |
| 2780 UNIMPLEMENTED(); | |
| 2781 __ break_(0x00666); | |
| 2782 __ nop(); | |
| 2783 // Check that no extension objects have been created by calls to | |
| 2784 // // eval from the current scope to the global scope. | |
| 2785 // Register context = cp; | |
| 2786 // Scope* s = scope(); | |
| 2787 // while (s != NULL) { | |
| 2788 // if (s->num_heap_slots() > 0) { | |
| 2789 // if (s->calls_eval()) { | |
| 2790 // // Check that extension is NULL. | |
| 2791 // __ ldr(tmp2, ContextOperand(context, Context::EXTENSION_INDEX)); | |
| 2792 // __ tst(tmp2, tmp2); | |
| 2793 // slow->Branch(ne); | |
| 2794 // } | |
| 2795 // // Load next context in chain. | |
| 2796 // __ ldr(tmp, ContextOperand(context, Context::CLOSURE_INDEX)); | |
| 2797 // __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset)); | |
| 2798 // context = tmp; | |
| 2799 // } | |
| 2800 // // If no outer scope calls eval, we do not need to check more | |
| 2801 // // context extensions. | |
| 2802 // if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break; | |
| 2803 // s = s->outer_scope(); | |
| 2804 // } | |
| 2805 // | |
| 2806 // if (s->is_eval_scope()) { | |
| 2807 // Label next, fast; | |
| 2808 // if (!context.is(tmp)) { | |
| 2809 // __ mov(tmp, Operand(context)); | |
| 2810 // } | |
| 2811 // __ bind(&next); | |
| 2812 // // Terminate at global context. | |
| 2813 // __ ldr(tmp2, FieldMemOperand(tmp, HeapObject::kMapOffset)); | |
| 2814 // __ LoadRoot(ip, Heap::kGlobalContextMapRootIndex); | |
| 2815 // __ cmp(tmp2, ip); | |
| 2816 // __ b(eq, &fast); | |
| 2817 // // Check that extension is NULL. | |
| 2818 // __ ldr(tmp2, ContextOperand(tmp, Context::EXTENSION_INDEX)); | |
| 2819 // __ tst(tmp2, tmp2); | |
| 2820 // slow->Branch(ne); | |
| 2821 // // Load next context in chain. | |
| 2822 // __ ldr(tmp, ContextOperand(tmp, Context::CLOSURE_INDEX)); | |
| 2823 // __ ldr(tmp, FieldMemOperand(tmp, JSFunction::kContextOffset)); | |
| 2824 // __ b(&next); | |
| 2825 // __ bind(&fast); | |
| 2826 // } | |
| 2827 // | |
| 2828 // // All extension objects were empty and it is safe to use a global | |
| 2829 // // load IC call. | |
| 2830 // Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize)); | |
| 2831 // // Load the global object. | |
| 2832 // LoadGlobal(); | |
| 2833 // // Setup the name register. | |
| 2834 // Result name(r2); | |
| 2835 // __ mov(r2, Operand(slot->var()->name())); | |
| 2836 // // Call IC stub. | |
| 2837 // if (typeof_state == INSIDE_TYPEOF) { | |
| 2838 // frame_->CallCodeObject(ic, RelocInfo::CODE_TARGET, &name, 0); | |
| 2839 // } else { | |
| 2840 // frame_->CallCodeObject(ic, RelocInfo::CODE_TARGET_CONTEXT, &name, 0); | |
| 2841 // } | |
| 2842 // | |
| 2843 // // Drop the global object. The result is in r0. | |
| 2844 // frame_->Drop(); | |
| 2845 } | |
| 2846 | |
| 2847 | |
| 2848 void CodeGenerator::VisitSlot(Slot* node) { | |
| 2849 UNIMPLEMENTED_(); | |
| 2850 //#ifdef DEBUG | |
| 2851 //// printf("CodeGenerator::VisitSlot\n"); | |
| 2852 //#endif | |
| 2853 // | |
| 2854 //#ifdef DEBUG | |
| 2855 // int original_height = frame_->height(); | |
| 2856 //#endif | |
| 2857 // VirtualFrame::SpilledScope spilled_scope; | |
| 2858 // Comment cmnt(masm_, "[ Slot"); | |
| 2859 // LoadFromSlot(node, typeof_state()); | |
| 2860 // ASSERT(frame_->height() == original_height + 1); | |
| 2861 } | |
| 2862 | |
| 2863 | |
| 2864 void CodeGenerator::VisitVariableProxy(VariableProxy* node) { | |
| 2865 UNIMPLEMENTED_(); | |
| 2866 //#ifdef DEBUG | |
| 2867 //// printf("CodeGenerator::VisitVariableProxy\n"); | |
| 2868 //#endif | |
| 2869 // | |
| 2870 //#ifdef DEBUG | |
| 2871 // int original_height = frame_->height(); | |
| 2872 //#endif | |
| 2873 // VirtualFrame::SpilledScope spilled_scope; | |
| 2874 // Comment cmnt(masm_, "[ VariableProxy"); | |
| 2875 // | |
| 2876 // Variable* var = node->var(); | |
| 2877 // Expression* expr = var->rewrite(); | |
| 2878 // if (expr != NULL) { | |
| 2879 // Visit(expr); | |
| 2880 // } else { | |
| 2881 // ASSERT(var->is_global()); | |
| 2882 // Reference ref(this, node); | |
| 2883 // ref.GetValueAndSpill(); | |
| 2884 // } | |
| 2885 // ASSERT(frame_->height() == original_height + 1); | |
| 2886 } | |
| 2887 | |
| 2888 | |
| 2889 void CodeGenerator::VisitLiteral(Literal* node) { | |
| 2890 UNIMPLEMENTED_(); | |
| 2891 //#ifdef DEBUG | |
| 2892 //// printf("CodeGenerator::VisitLiteral\n"); | |
| 2893 //#endif | |
| 2894 // | |
| 2895 //#ifdef DEBUG | |
| 2896 // int original_height = frame_->height(); | |
| 2897 //#endif | |
| 2898 // VirtualFrame::SpilledScope spilled_scope; | |
| 2899 // Comment cmnt(masm_, "[ Literal"); | |
| 2900 // __ li(t0, Operand(node->handle())); | |
| 2901 // frame_->EmitPush(t0); | |
| 2902 // ASSERT(frame_->height() == original_height + 1); | |
| 2903 } | |
| 2904 | |
| 2905 | |
| 2906 void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) { | |
| 2907 UNIMPLEMENTED(); | |
| 2908 __ break_(0x00666); | |
| 2909 __ nop(); | |
| 2910 //#ifdef DEBUG | |
| 2911 // int original_height = frame_->height(); | |
| 2912 //#endif | |
| 2913 // VirtualFrame::SpilledScope spilled_scope; | |
| 2914 // Comment cmnt(masm_, "[ RexExp Literal"); | |
| 2915 // | |
| 2916 // // Retrieve the literal array and check the allocated entry. | |
| 2917 // | |
| 2918 // // Load the function of this activation. | |
| 2919 // __ ldr(r1, frame_->Function()); | |
| 2920 // | |
| 2921 // // Load the literals array of the function. | |
| 2922 // __ ldr(r1, FieldMemOperand(r1, JSFunction::kLiteralsOffset)); | |
| 2923 // | |
| 2924 // // Load the literal at the ast saved index. | |
| 2925 // int literal_offset = | |
| 2926 // FixedArray::kHeaderSize + node->literal_index() * kPointerSize; | |
| 2927 // __ ldr(r2, FieldMemOperand(r1, literal_offset)); | |
| 2928 // | |
| 2929 // JumpTarget done; | |
| 2930 // __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); | |
| 2931 // __ cmp(r2, ip); | |
| 2932 // done.Branch(ne); | |
| 2933 // | |
| 2934 // // If the entry is undefined we call the runtime system to computed | |
| 2935 // // the literal. | |
| 2936 // frame_->EmitPush(r1); // literal array (0) | |
| 2937 // __ mov(r0, Operand(Smi::FromInt(node->literal_index()))); | |
| 2938 // frame_->EmitPush(r0); // literal index (1) | |
| 2939 // __ mov(r0, Operand(node->pattern())); // RegExp pattern (2) | |
| 2940 // frame_->EmitPush(r0); | |
| 2941 // __ mov(r0, Operand(node->flags())); // RegExp flags (3) | |
| 2942 // frame_->EmitPush(r0); | |
| 2943 // frame_->CallRuntime(Runtime::kMaterializeRegExpLiteral, 4); | |
| 2944 // __ mov(r2, Operand(r0)); | |
| 2945 // | |
| 2946 // done.Bind(); | |
| 2947 // // Push the literal. | |
| 2948 // frame_->EmitPush(r2); | |
| 2949 // ASSERT(frame_->height() == original_height + 1); | |
| 2950 } | |
| 2951 | |
| 2952 | |
| 2953 // This deferred code stub will be used for creating the boilerplate | |
| 2954 // by calling Runtime_CreateObjectLiteralBoilerplate. | |
| 2955 // Each created boilerplate is stored in the JSFunction and they are | |
| 2956 // therefore context dependent. | |
| 2957 class DeferredObjectLiteral: public DeferredCode { | |
| 2958 public: | |
| 2959 explicit DeferredObjectLiteral(ObjectLiteral* node) : node_(node) { | |
| 2960 set_comment("[ DeferredObjectLiteral"); | |
| 2961 } | |
| 2962 | |
| 2963 virtual void Generate(); | |
| 2964 | |
| 2965 private: | |
| 2966 ObjectLiteral* node_; | |
| 2967 }; | |
| 2968 | |
| 2969 | |
| 2970 void DeferredObjectLiteral::Generate() { | |
| 2971 UNIMPLEMENTED_(); | |
| 2972 // // Argument is passed in r1. | |
| 2973 // | |
| 2974 // // If the entry is undefined we call the runtime system to compute | |
| 2975 // // the literal. | |
| 2976 // // Literal array (0). | |
| 2977 //// __ push(r1); | |
| 2978 // __ push(a1); | |
| 2979 // // Literal index (1). | |
| 2980 //// __ mov(r0, Operand(Smi::FromInt(node_->literal_index()))); | |
| 2981 //// __ push(r0); | |
| 2982 // __ li(t0, Operand(Smi::FromInt(node_->literal_index()))); | |
| 2983 // __ push(t0); | |
| 2984 // // Constant properties (2). | |
| 2985 //// __ mov(r0, Operand(node_->constant_properties())); | |
| 2986 //// __ push(r0); | |
| 2987 // __ li(t0, Operand(node_->constant_properties())); | |
| 2988 // __ push(t0); | |
| 2989 // __ CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3); | |
| 2990 // __ nop(); // NOP_ADDED | |
| 2991 //// __ mov(r2, Operand(r0)); | |
| 2992 // __ mov(a2, v0); | |
| 2993 //// // Result is returned in r2. | |
| 2994 } | |
| 2995 | |
| 2996 | |
| 2997 void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) { | |
| 2998 UNIMPLEMENTED_(); | |
| 2999 //#ifdef DEBUG | |
| 3000 //// printf("CodeGenerator::VisitObjectLiteral\n"); | |
| 3001 //#endif | |
| 3002 // | |
| 3003 //#ifdef DEBUG | |
| 3004 // int original_height = frame_->height(); | |
| 3005 //#endif | |
| 3006 // VirtualFrame::SpilledScope spilled_scope; | |
| 3007 // Comment cmnt(masm_, "[ ObjectLiteral"); | |
| 3008 // | |
| 3009 // DeferredObjectLiteral* deferred = new DeferredObjectLiteral(node); | |
| 3010 // | |
| 3011 // // Retrieve the literal array and check the allocated entry. | |
| 3012 // | |
| 3013 // // Load the function of this activation. | |
| 3014 //// __ ldr(r1, frame_->Function()); | |
| 3015 // __ lw(a1, frame_->Function()); | |
| 3016 // | |
| 3017 // // Load the literals array of the function. | |
| 3018 //// __ ldr(r1, FieldMemOperand(r1, JSFunction::kLiteralsOffset)); | |
| 3019 // __ lw(a1, FieldMemOperand(a1, JSFunction::kLiteralsOffset)); | |
| 3020 // | |
| 3021 // // Load the literal at the ast saved index. | |
| 3022 // int literal_offset = | |
| 3023 // FixedArray::kHeaderSize + node->literal_index() * kPointerSize; | |
| 3024 //// __ ldr(r2, FieldMemOperand(r1, literal_offset)); | |
| 3025 // __ lw(a2, FieldMemOperand(a1, literal_offset)); | |
| 3026 // | |
| 3027 // // Check whether we need to materialize the object literal boilerplate. | |
| 3028 // // If so, jump to the deferred code. | |
| 3029 // __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); | |
| 3030 //// __ cmp(r2, Operand(ip)); | |
| 3031 //// deferred->Branch(eq); | |
| 3032 // deferred->Branch(eq, a2, Operand(ip)); | |
| 3033 // __ nop(); // NOP_ADDED | |
| 3034 // deferred->BindExit(); | |
| 3035 // | |
| 3036 // // Push the object literal boilerplate. | |
| 3037 //// frame_->EmitPush(r2); | |
| 3038 // frame_->EmitPush(a2); | |
| 3039 // | |
| 3040 // // Clone the boilerplate object. | |
| 3041 // Runtime::FunctionId clone_function_id = Runtime::kCloneLiteralBoilerplate; | |
| 3042 // if (node->depth() == 1) { | |
| 3043 // clone_function_id = Runtime::kCloneShallowLiteralBoilerplate; | |
| 3044 // } | |
| 3045 // frame_->CallRuntime(clone_function_id, 1); | |
| 3046 // __ nop(); // NOP_ADDED | |
| 3047 //// frame_->EmitPush(r0); // save the result | |
| 3048 // frame_->EmitPush(v0); // save the result | |
| 3049 // // r0: cloned object literal | |
| 3050 // | |
| 3051 // for (int i = 0; i < node->properties()->length(); i++) { | |
| 3052 // ObjectLiteral::Property* property = node->properties()->at(i); | |
| 3053 // Literal* key = property->key(); | |
| 3054 // Expression* value = property->value(); | |
| 3055 // switch (property->kind()) { | |
| 3056 // case ObjectLiteral::Property::CONSTANT: | |
| 3057 // break; | |
| 3058 // case ObjectLiteral::Property::MATERIALIZED_LITERAL: | |
| 3059 // if (CompileTimeValue::IsCompileTimeValue(property->value())) break; | |
| 3060 // // else fall through | |
| 3061 // case ObjectLiteral::Property::COMPUTED: // fall through | |
| 3062 // case ObjectLiteral::Property::PROTOTYPE: { | |
| 3063 //// frame_->EmitPush(r0); // dup the result | |
| 3064 // frame_->EmitPush(v0); // dup the result | |
| 3065 // LoadAndSpill(key); | |
| 3066 // LoadAndSpill(value); | |
| 3067 // frame_->CallRuntime(Runtime::kSetProperty, 3); | |
| 3068 // __ nop(); // NOP_ADDED | |
| 3069 // // restore r0 | |
| 3070 //// __ ldr(r0, frame_->Top()); | |
| 3071 // __ lw(v0, frame_->Top()); | |
| 3072 // break; | |
| 3073 // } | |
| 3074 // case ObjectLiteral::Property::SETTER: { | |
| 3075 //// frame_->EmitPush(r0); | |
| 3076 // frame_->EmitPush(v0); | |
| 3077 // LoadAndSpill(key); | |
| 3078 //// __ mov(r0, Operand(Smi::FromInt(1))); | |
| 3079 // __ li(a0, Operand(Smi::FromInt(1))); | |
| 3080 //// frame_->EmitPush(r0); | |
| 3081 // frame_->EmitPush(a0); | |
| 3082 // LoadAndSpill(value); | |
| 3083 // frame_->CallRuntime(Runtime::kDefineAccessor, 4); | |
| 3084 // __ nop(); // NOP_ADDED | |
| 3085 //// __ ldr(r0, frame_->Top()); | |
| 3086 // __ lw(v0, frame_->Top()); | |
| 3087 // break; | |
| 3088 // } | |
| 3089 // case ObjectLiteral::Property::GETTER: { | |
| 3090 //// frame_->EmitPush(r0); | |
| 3091 // frame_->EmitPush(v0); | |
| 3092 //// LoadAndSpill(key); | |
| 3093 //// __ mov(r0, Operand(Smi::FromInt(0))); | |
| 3094 // __ li(a0, Operand(Smi::FromInt(0))); | |
| 3095 //// frame_->EmitPush(r0); | |
| 3096 // frame_->EmitPush(a0); | |
| 3097 // LoadAndSpill(value); | |
| 3098 // frame_->CallRuntime(Runtime::kDefineAccessor, 4); | |
| 3099 // __ nop(); // NOP_ADDED | |
| 3100 //// __ ldr(r0, frame_->Top()); | |
| 3101 // __ lw(v0, frame_->Top()); | |
| 3102 // break; | |
| 3103 // } | |
| 3104 // } | |
| 3105 // } | |
| 3106 // ASSERT(frame_->height() == original_height + 1); | |
| 3107 } | |
| 3108 | |
| 3109 | |
| 3110 // This deferred code stub will be used for creating the boilerplate | |
| 3111 // by calling Runtime_CreateArrayLiteralBoilerplate. | |
| 3112 // Each created boilerplate is stored in the JSFunction and they are | |
| 3113 // therefore context dependent. | |
| 3114 class DeferredArrayLiteral: public DeferredCode { | |
| 3115 public: | |
| 3116 explicit DeferredArrayLiteral(ArrayLiteral* node) : node_(node) { | |
| 3117 set_comment("[ DeferredArrayLiteral"); | |
| 3118 } | |
| 3119 | |
| 3120 virtual void Generate(); | |
| 3121 | |
| 3122 private: | |
| 3123 ArrayLiteral* node_; | |
| 3124 }; | |
| 3125 | |
| 3126 | |
| 3127 void DeferredArrayLiteral::Generate() { | |
| 3128 UNIMPLEMENTED_(); | |
| 3129 // // Argument is passed in a1. | |
| 3130 // | |
| 3131 // // If the entry is undefined we call the runtime system to compute | |
| 3132 // // the literal. | |
| 3133 // // Literal array (0). | |
| 3134 //// __ push(r1); | |
| 3135 // __ push(a1); | |
| 3136 // // Literal index (1). | |
| 3137 //// __ mov(r0, Operand(Smi::FromInt(node_->literal_index()))); | |
| 3138 //// __ push(r0); | |
| 3139 // __ li(a0, Operand(Smi::FromInt(node_->literal_index()))); | |
| 3140 // __ push(a0); | |
| 3141 // // Constant properties (2). | |
| 3142 //// __ mov(r0, Operand(node_->literals())); | |
| 3143 //// __ push(r0); | |
| 3144 // __ li(a0, Operand(node_->literals())); | |
| 3145 // __ push(a0); | |
| 3146 // __ CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3); | |
| 3147 // __ nop(); // NOP_ADDED | |
| 3148 //// __ mov(r2, Operand(r0)); | |
| 3149 // __ mov(a2, v0); | |
| 3150 // // Result is returned in a2. | |
| 3151 } | |
| 3152 | |
| 3153 | |
| 3154 void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) { | |
| 3155 UNIMPLEMENTED_(); | |
| 3156 //#ifdef DEBUG | |
| 3157 //// printf("CodeGenerator::VisitArrayLiteral\n"); | |
| 3158 //#endif | |
| 3159 // | |
| 3160 //#ifdef DEBUG | |
| 3161 // int original_height = frame_->height(); | |
| 3162 //#endif | |
| 3163 // VirtualFrame::SpilledScope spilled_scope; | |
| 3164 // Comment cmnt(masm_, "[ ArrayLiteral"); | |
| 3165 // | |
| 3166 // DeferredArrayLiteral* deferred = new DeferredArrayLiteral(node); | |
| 3167 // | |
| 3168 // // Retrieve the literal array and check the allocated entry. | |
| 3169 // | |
| 3170 // // Load the function of this activation. | |
| 3171 //// __ ldr(r1, frame_->Function()); | |
| 3172 // __ lw(a1, frame_->Function()); | |
| 3173 // | |
| 3174 // // Load the literals array of the function. | |
| 3175 //// __ ldr(r1, FieldMemOperand(r1, JSFunction::kLiteralsOffset)); | |
| 3176 // __ lw(a1, FieldMemOperand(a1, JSFunction::kLiteralsOffset)); | |
| 3177 // | |
| 3178 // // Load the literal at the ast saved index. | |
| 3179 // int literal_offset = | |
| 3180 // FixedArray::kHeaderSize + node->literal_index() * kPointerSize; | |
| 3181 //// __ ldr(r2, FieldMemOperand(r1, literal_offset)); | |
| 3182 // __ lw(a2, FieldMemOperand(a1, literal_offset)); | |
| 3183 // | |
| 3184 // // Check whether we need to materialize the object literal boilerplate. | |
| 3185 // // If so, jump to the deferred code. | |
| 3186 // __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); | |
| 3187 //// __ cmp(r2, Operand(ip)); | |
| 3188 //// deferred->Branch(eq); | |
| 3189 // deferred->Branch(eq, a2, Operand(ip)); | |
| 3190 // __ nop(); // NOP_ADDED | |
| 3191 // deferred->BindExit(); | |
| 3192 // | |
| 3193 // // Push the object literal boilerplate. | |
| 3194 //// frame_->EmitPush(r2); | |
| 3195 // frame_->EmitPush(a2); | |
| 3196 // | |
| 3197 // // Clone the boilerplate object. | |
| 3198 // Runtime::FunctionId clone_function_id = Runtime::kCloneLiteralBoilerplate; | |
| 3199 // if (node->depth() == 1) { | |
| 3200 // clone_function_id = Runtime::kCloneShallowLiteralBoilerplate; | |
| 3201 // } | |
| 3202 // | |
| 3203 // frame_->CallRuntime(clone_function_id, 1); | |
| 3204 // __ nop(); // NOP_ADDED | |
| 3205 // | |
| 3206 // frame_->EmitPush(v0); // save the result | |
| 3207 // // v0: cloned object literal | |
| 3208 // | |
| 3209 // // Generate code to set the elements in the array that are not | |
| 3210 // // literals. | |
| 3211 // for (int i = 0; i < node->values()->length(); i++) { | |
| 3212 // Expression* value = node->values()->at(i); | |
| 3213 // | |
| 3214 // // If value is a literal the property value is already set in the | |
| 3215 // // boilerplate object. | |
| 3216 // if (value->AsLiteral() != NULL) continue; | |
| 3217 // // If value is a materialized literal the property value is already set | |
| 3218 // // in the boilerplate object if it is simple. | |
| 3219 // if (CompileTimeValue::IsCompileTimeValue(value)) continue; | |
| 3220 //// | |
| 3221 // // The property must be set by generated code. | |
| 3222 // LoadAndSpill(value); | |
| 3223 // frame_->EmitPop(a0); | |
| 3224 // | |
| 3225 // // Fetch the object literal. | |
| 3226 //// __ ldr(r1, frame_->Top()); | |
| 3227 // __ lw(a1, frame_->Top()); | |
| 3228 // // Get the elements array. | |
| 3229 //// __ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset)); | |
| 3230 // __ lw(a1, FieldMemOperand(a1, JSObject::kElementsOffset)); | |
| 3231 // | |
| 3232 // // Write to the indexed properties array. | |
| 3233 // int offset = i * kPointerSize + FixedArray::kHeaderSize; | |
| 3234 //// __ str(r0, FieldMemOperand(r1, offset)); | |
| 3235 // __ sw(a0, FieldMemOperand(a1, offset)); | |
| 3236 //// | |
| 3237 // // Update the write barrier for the array address. | |
| 3238 //// __ mov(r3, Operand(offset)); | |
| 3239 //// __ RecordWrite(r1, r3, r2); | |
| 3240 // __ li(a3, Operand(offset)); | |
| 3241 // __ RecordWrite(a1, a3, a2); | |
| 3242 // } | |
| 3243 // ASSERT(frame_->height() == original_height + 1); | |
| 3244 } | |
| 3245 | |
| 3246 | |
| 3247 void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) { | |
| 3248 UNIMPLEMENTED_(); | |
| 3249 //#ifdef DEBUG | |
| 3250 // int original_height = frame_->height(); | |
| 3251 //#endif | |
| 3252 // VirtualFrame::SpilledScope spilled_scope; | |
| 3253 // // Call runtime routine to allocate the catch extension object and | |
| 3254 // // assign the exception value to the catch variable. | |
| 3255 // Comment cmnt(masm_, "[ CatchExtensionObject"); | |
| 3256 // LoadAndSpill(node->key()); | |
| 3257 // LoadAndSpill(node->value()); | |
| 3258 // frame_->CallRuntime(Runtime::kCreateCatchExtensionObject, 2); | |
| 3259 // __ nop(); // NOP_ADDED | |
| 3260 // frame_->EmitPush(v0); | |
| 3261 // ASSERT(frame_->height() == original_height + 1); | |
| 3262 } | |
| 3263 | |
| 3264 | |
| 3265 void CodeGenerator::VisitAssignment(Assignment* node) { | |
| 3266 UNIMPLEMENTED_(); | |
| 3267 //#ifdef DEBUG | |
| 3268 //// printf("CodeGenerator::VisitAssignment\n"); | |
| 3269 //#endif | |
| 3270 //#ifdef DEBUG | |
| 3271 // int original_height = frame_->height(); | |
| 3272 //#endif | |
| 3273 // VirtualFrame::SpilledScope spilled_scope; | |
| 3274 // Comment cmnt(masm_, "[ Assignment"); | |
| 3275 // | |
| 3276 // { Reference target(this, node->target()); | |
| 3277 // if (target.is_illegal()) { | |
| 3278 // // Fool the virtual frame into thinking that we left the assignment's | |
| 3279 // // value on the frame. | |
| 3280 // __ li(a0, Operand(Smi::FromInt(0))); | |
| 3281 // frame_->EmitPush(a0); | |
| 3282 // ASSERT(frame_->height() == original_height + 1); | |
| 3283 // return; | |
| 3284 // } | |
| 3285 // | |
| 3286 // if (node->op() == Token::ASSIGN || | |
| 3287 // node->op() == Token::INIT_VAR || | |
| 3288 // node->op() == Token::INIT_CONST) { | |
| 3289 // LoadAndSpill(node->value()); | |
| 3290 // | |
| 3291 // } else { | |
| 3292 // // +=, *= and similar binary assignments. | |
| 3293 // // Get the old value of the lhs. | |
| 3294 // target.GetValueAndSpill(); | |
| 3295 // Literal* literal = node->value()->AsLiteral(); | |
| 3296 // bool overwrite = | |
| 3297 // (node->value()->AsBinaryOperation() != NULL && | |
| 3298 // node->value()->AsBinaryOperation()->ResultOverwriteAllowed()); | |
| 3299 // if (literal != NULL && literal->handle()->IsSmi()) { | |
| 3300 // SmiOperation(node->binary_op(), | |
| 3301 // literal->handle(), | |
| 3302 // false, | |
| 3303 // overwrite ? OVERWRITE_RIGHT : NO_OVERWRITE); | |
| 3304 // frame_->EmitPush(v0); | |
| 3305 // | |
| 3306 // } else { | |
| 3307 // LoadAndSpill(node->value()); | |
| 3308 // GenericBinaryOperation(node->binary_op(), | |
| 3309 // overwrite ? OVERWRITE_RIGHT : NO_OVERWRITE); | |
| 3310 // frame_->EmitPush(v0); | |
| 3311 // } | |
| 3312 // } | |
| 3313 // | |
| 3314 // Variable* var = node->target()->AsVariableProxy()->AsVariable(); | |
| 3315 // if (var != NULL && | |
| 3316 // (var->mode() == Variable::CONST) && | |
| 3317 // node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) { | |
| 3318 // // Assignment ignored - leave the value on the stack. | |
| 3319 // | |
| 3320 // } else { | |
| 3321 // CodeForSourcePosition(node->position()); | |
| 3322 // if (node->op() == Token::INIT_CONST) { | |
| 3323 // // Dynamic constant initializations must use the function context | |
| 3324 // // and initialize the actual constant declared. Dynamic variable | |
| 3325 // // initializations are simply assignments and use SetValue. | |
| 3326 // target.SetValue(CONST_INIT); | |
| 3327 // } else { | |
| 3328 // target.SetValue(NOT_CONST_INIT); | |
| 3329 // } | |
| 3330 // } | |
| 3331 // } | |
| 3332 // ASSERT(frame_->height() == original_height + 1); | |
| 3333 } | |
| 3334 | |
| 3335 | |
| 3336 void CodeGenerator::VisitThrow(Throw* node) { | |
| 3337 UNIMPLEMENTED_(); | |
| 3338 //#ifdef DEBUG | |
| 3339 //// printf("CodeGenerator::VisitThrow\n"); | |
| 3340 //#endif | |
| 3341 //#ifdef DEBUG | |
| 3342 // int original_height = frame_->height(); | |
| 3343 //#endif | |
| 3344 // VirtualFrame::SpilledScope spilled_scope; | |
| 3345 // Comment cmnt(masm_, "[ Throw"); | |
| 3346 // | |
| 3347 // LoadAndSpill(node->exception()); | |
| 3348 // CodeForSourcePosition(node->position()); | |
| 3349 // frame_->CallRuntime(Runtime::kThrow, 1); | |
| 3350 // __ nop(); // NOP_ADDED | |
| 3351 // frame_->EmitPush(v0); | |
| 3352 // ASSERT(frame_->height() == original_height + 1); | |
| 3353 } | |
| 3354 | |
| 3355 | |
| 3356 void CodeGenerator::VisitProperty(Property* node) { | |
| 3357 UNIMPLEMENTED_(); | |
| 3358 //#ifdef DEBUG | |
| 3359 //// printf("CodeGenerator::VisitProperty\n"); | |
| 3360 //#endif | |
| 3361 //#ifdef DEBUG | |
| 3362 // int original_height = frame_->height(); | |
| 3363 //#endif | |
| 3364 // VirtualFrame::SpilledScope spilled_scope; | |
| 3365 // Comment cmnt(masm_, "[ Property"); | |
| 3366 // | |
| 3367 // { Reference property(this, node); | |
| 3368 // property.GetValueAndSpill(); | |
| 3369 // } | |
| 3370 // ASSERT(frame_->height() == original_height + 1); | |
| 3371 } | |
| 3372 | |
| 3373 | |
| 3374 void CodeGenerator::VisitCall(Call* node) { | |
| 3375 UNIMPLEMENTED_(); | |
| 3376 //#ifdef DEBUG | |
| 3377 //// printf("CodeGenerator::VisitCall\n"); | |
| 3378 //#endif | |
| 3379 //#ifdef DEBUG | |
| 3380 // int original_height = frame_->height(); | |
| 3381 //#endif | |
| 3382 // VirtualFrame::SpilledScope spilled_scope; | |
| 3383 // Comment cmnt(masm_, "[ Call"); | |
| 3384 // | |
| 3385 // Expression* function = node->expression(); | |
| 3386 // ZoneList<Expression*>* args = node->arguments(); | |
| 3387 // int arg_count = args->length(); | |
| 3388 // | |
| 3389 // // Standard function call. | |
| 3390 // // Check if the function is a variable or a property. | |
| 3391 // Variable* var = function->AsVariableProxy()->AsVariable(); | |
| 3392 // Property* property = function->AsProperty(); | |
| 3393 // | |
| 3394 // // ------------------------------------------------------------------------ | |
| 3395 // // Fast-case: Use inline caching. | |
| 3396 // // --- | |
| 3397 // // According to ECMA-262, section 11.2.3, page 44, the function to call | |
| 3398 // // must be resolved after the arguments have been evaluated. The IC code | |
| 3399 // // automatically handles this by loading the arguments before the function | |
| 3400 // // is resolved in cache misses (this also holds for megamorphic calls). | |
| 3401 // // ------------------------------------------------------------------------ | |
| 3402 // | |
| 3403 // if (var != NULL && var->is_possibly_eval()) { | |
| 3404 //#ifdef DEBUG | |
| 3405 //// printf("(var != NULL && var->is_possibly_eval())\n"); | |
| 3406 //#endif | |
| 3407 // // ---------------------------------- | |
| 3408 // // JavaScript example: 'eval(arg)' // eval is not known to be shadowed | |
| 3409 // // ---------------------------------- | |
| 3410 // | |
| 3411 // __ break_(0x3378); // never tested | |
| 3412 // | |
| 3413 // // In a call to eval, we first call %ResolvePossiblyDirectEval to | |
| 3414 // // resolve the function we need to call and the receiver of the | |
| 3415 // // call. Then we call the resolved function using the given | |
| 3416 // // arguments. | |
| 3417 // // Prepare stack for call to resolved function. | |
| 3418 // | |
| 3419 // __ SetupAlignedCall(t0, arg_count); | |
| 3420 // | |
| 3421 // LoadAndSpill(function); | |
| 3422 // __ LoadRoot(t2, Heap::kUndefinedValueRootIndex); | |
| 3423 // frame_->EmitPush(t2); // Slot for receiver | |
| 3424 // | |
| 3425 // for (int i = 0; i < arg_count; i++) { | |
| 3426 // LoadAndSpill(args->at(i)); | |
| 3427 // } | |
| 3428 // | |
| 3429 // // Prepare stack for call to ResolvePossiblyDirectEval. | |
| 3430 // __ lw(a1, MemOperand(sp, arg_count * kPointerSize + kPointerSize)); | |
| 3431 // frame_->EmitPush(a1); | |
| 3432 // if (arg_count > 0) { | |
| 3433 // __ lw(a1, MemOperand(sp, arg_count * kPointerSize)); | |
| 3434 // frame_->EmitPush(a1); | |
| 3435 // } else { | |
| 3436 // frame_->EmitPush(t2); | |
| 3437 // } | |
| 3438 // | |
| 3439 // // Resolve the call. | |
| 3440 // frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 2); | |
| 3441 // __ nop(); // NOP_ADDED | |
| 3442 // | |
| 3443 // // On return we do not use ReturnFromAlignedCall() because we will call th e | |
| 3444 // // resolved function below. Instead we remove the 2 extra args pushed on t he | |
| 3445 // // stack. | |
| 3446 // | |
| 3447 //// TOCHECK: Was here for fixup ? | |
| 3448 //// __ addiu(sp, sp, Operand(4)); | |
| 3449 // | |
| 3450 // // Touch up stack with the right values for the function and the receiver. | |
| 3451 // __ lw(a1, FieldMemOperand(a0, FixedArray::kHeaderSize)); | |
| 3452 // __ sw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize)); | |
| 3453 // __ lw(a1, FieldMemOperand(a0, FixedArray::kHeaderSize + kPointerSize)); | |
| 3454 // __ sw(a1, MemOperand(sp, arg_count * kPointerSize)); | |
| 3455 // | |
| 3456 // // Call the function. | |
| 3457 // CodeForSourcePosition(node->position()); | |
| 3458 // | |
| 3459 // InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP; | |
| 3460 // CallFunctionStub call_function(arg_count, in_loop); | |
| 3461 // frame_->CallStub(&call_function, arg_count + 1); | |
| 3462 // __ nop(); // NOP_ADDED | |
| 3463 // | |
| 3464 // __ ReturnFromAlignedCall(); | |
| 3465 // | |
| 3466 // __ lw(cp, frame_->Context()); | |
| 3467 // frame_->EmitPush(v0); | |
| 3468 // | |
| 3469 //// __ ldr(cp, frame_->Context()); | |
| 3470 //// // Remove the function from the stack. | |
| 3471 //// frame_->Drop(); | |
| 3472 //// frame_->EmitPush(r0); | |
| 3473 // | |
| 3474 // } else if (var != NULL && !var->is_this() && var->is_global()) { | |
| 3475 //#ifdef DEBUG | |
| 3476 //// printf("(var != NULL && !var->is_this() && var->is_global())\n"); | |
| 3477 //#endif | |
| 3478 // // ---------------------------------- | |
| 3479 // // JavaScript example: 'foo(1, 2, 3)' // foo is global | |
| 3480 // // ---------------------------------- | |
| 3481 // | |
| 3482 // | |
| 3483 // // We need sp to be 8 bytes aligned when calling the stub. | |
| 3484 // __ SetupAlignedCall(t3, arg_count); | |
| 3485 // | |
| 3486 // // Push the name of the function and the receiver onto the stack. | |
| 3487 // __ li(a0, Operand(var->name())); | |
| 3488 // frame_->EmitPush(a0); | |
| 3489 // | |
| 3490 // // Pass the global object as the receiver and let the IC stub | |
| 3491 // // patch the stack to use the global proxy as 'this' in the | |
| 3492 // // invoked function. | |
| 3493 // LoadGlobal(); | |
| 3494 // | |
| 3495 // // Load the arguments. | |
| 3496 // for (int i = 0; i < arg_count; i++) { | |
| 3497 // LoadAndSpill(args->at(i)); | |
| 3498 // } | |
| 3499 // | |
| 3500 // | |
| 3501 // // Setup the receiver register and call the IC initialization code. | |
| 3502 // InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP; | |
| 3503 // Handle<Code> stub = ComputeCallInitialize(arg_count, in_loop); | |
| 3504 // CodeForSourcePosition(node->position()); | |
| 3505 // frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET_CONTEXT, | |
| 3506 // arg_count + 1); | |
| 3507 //// __ addiu(sp, sp, Operand(-StandardFrameConstants::kRArgsSlotsSize)); | |
| 3508 // __ nop(); | |
| 3509 // __ ReturnFromAlignedCall(); | |
| 3510 // __ lw(cp, frame_->Context()); | |
| 3511 // // Remove the function from the stack. | |
| 3512 // frame_->DropFromVFrameOnly(); | |
| 3513 // frame_->EmitPush(v0); | |
| 3514 // | |
| 3515 // } else if (var != NULL && var->slot() != NULL && | |
| 3516 // var->slot()->type() == Slot::LOOKUP) { | |
| 3517 //#ifdef DEBUG | |
| 3518 //// printf("(var != NULL && var->slot() != NULL &&var->slot()->type() == Slo t::LOOKUP)\n"); | |
| 3519 //#endif | |
| 3520 // // ---------------------------------- | |
| 3521 // // JavaScript example: 'with (obj) foo(1, 2, 3)' // foo is in obj | |
| 3522 // // ---------------------------------- | |
| 3523 // | |
| 3524 // // Load the function | |
| 3525 //// frame_->EmitPush(cp); | |
| 3526 //// __ mov(r0, Operand(var->name())); | |
| 3527 //// frame_->EmitPush(r0); | |
| 3528 //// frame_->CallRuntime(Runtime::kLoadContextSlot, 2); | |
| 3529 //// // r0: slot value; r1: receiver | |
| 3530 // frame_->EmitPush(cp); | |
| 3531 // __ li(a0, Operand(var->name())); | |
| 3532 // frame_->EmitPush(a0); | |
| 3533 // frame_->CallRuntime(Runtime::kLoadContextSlot, 2); | |
| 3534 // __ nop(); | |
| 3535 // // r0: slot value; r1: receiver | |
| 3536 // | |
| 3537 // // Load the receiver. | |
| 3538 //// frame_->EmitPush(r0); // function | |
| 3539 //// frame_->EmitPush(r1); // receiver | |
| 3540 // frame_->EmitPush(a0); // function | |
| 3541 // frame_->EmitPush(a1); // receiver | |
| 3542 // | |
| 3543 // // Call the function. | |
| 3544 // CallWithArguments(args, node->position()); | |
| 3545 // __ nop(); // NOP_ADDED | |
| 3546 // frame_->EmitPush(v0); | |
| 3547 // | |
| 3548 // } else if (property != NULL) { | |
| 3549 //#ifdef DEBUG | |
| 3550 //// printf("(property != NULL)\n"); | |
| 3551 //#endif | |
| 3552 // // Check if the key is a literal string. | |
| 3553 // Literal* literal = property->key()->AsLiteral(); | |
| 3554 // | |
| 3555 // if (literal != NULL && literal->handle()->IsSymbol()) { | |
| 3556 // // ------------------------------------------------------------------ | |
| 3557 // // JavaScript example: 'object.foo(1, 2, 3)' or 'map["key"](1, 2, 3)' | |
| 3558 // // ------------------------------------------------------------------ | |
| 3559 // | |
| 3560 // __ SetupAlignedCall(t2, arg_count); | |
| 3561 // | |
| 3562 // // Push the name of the function and the receiver onto the stack. | |
| 3563 //// __ mov(r0, Operand(literal->handle())); | |
| 3564 //// frame_->EmitPush(r0); | |
| 3565 // __ li(a0, Operand(literal->handle())); | |
| 3566 // frame_->EmitPush(a0); | |
| 3567 // LoadAndSpill(property->obj()); | |
| 3568 // | |
| 3569 // // Load the arguments. | |
| 3570 // for (int i = 0; i < arg_count; i++) { | |
| 3571 // LoadAndSpill(args->at(i)); | |
| 3572 // } | |
| 3573 // | |
| 3574 // // Set the receiver register and call the IC initialization code. | |
| 3575 // InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP; | |
| 3576 // Handle<Code> stub = ComputeCallInitialize(arg_count, in_loop); | |
| 3577 // CodeForSourcePosition(node->position()); | |
| 3578 // frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1); | |
| 3579 //// __ addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize); // branch delay | |
| 3580 // __ nop(); | |
| 3581 // __ ReturnFromAlignedCall(); | |
| 3582 // | |
| 3583 // __ lw(cp, frame_->Context()); | |
| 3584 // | |
| 3585 // // Remove the function from the stack. | |
| 3586 // frame_->DropFromVFrameOnly(); | |
| 3587 // | |
| 3588 // frame_->EmitPush(v0); | |
| 3589 // | |
| 3590 // | |
| 3591 // } else { | |
| 3592 //#ifdef DEBUG | |
| 3593 //// printf("else\n"); | |
| 3594 //#endif | |
| 3595 // // ------------------------------------------- | |
| 3596 // // JavaScript example: 'array[index](1, 2, 3)' | |
| 3597 // // ------------------------------------------- | |
| 3598 // | |
| 3599 // __ SetupAlignedCall(t3, arg_count); | |
| 3600 // | |
| 3601 // // Load the function to call from the property through a reference. | |
| 3602 // Reference ref(this, property); | |
| 3603 // ref.GetValueAndSpill(); // receiver | |
| 3604 // | |
| 3605 // // Pass receiver to called function. | |
| 3606 // if (property->is_synthetic()) { | |
| 3607 // LoadGlobalReceiver(a0); | |
| 3608 // } else { | |
| 3609 // __ lw(a0, frame_->ElementAt(ref.size())); | |
| 3610 // frame_->EmitPush(a0); | |
| 3611 // } | |
| 3612 // | |
| 3613 // // Call the function (and allocate args slots). | |
| 3614 // CallWithArguments(args, node->position()); | |
| 3615 // __ ReturnFromAlignedCall(); | |
| 3616 // | |
| 3617 // __ lw(cp, frame_->Context()); | |
| 3618 // frame_->DropFromVFrameOnly(); // discard the TOS | |
| 3619 // | |
| 3620 // frame_->EmitPush(v0); | |
| 3621 // } | |
| 3622 // } else { | |
| 3623 //#ifdef DEBUG | |
| 3624 //// printf("else2\n"); | |
| 3625 //#endif | |
| 3626 // // ---------------------------------- | |
| 3627 // // JavaScript example: 'foo(1, 2, 3)' // foo is not global | |
| 3628 // // ---------------------------------- | |
| 3629 // | |
| 3630 // __ SetupAlignedCall(t1, arg_count); | |
| 3631 // | |
| 3632 // // Load the function. | |
| 3633 // LoadAndSpill(function); | |
| 3634 // | |
| 3635 // // Pass the global proxy as the receiver. | |
| 3636 // LoadGlobalReceiver(a0); | |
| 3637 // | |
| 3638 // // Call the function (and allocate args slots). | |
| 3639 // CallWithArguments(args, node->position()); | |
| 3640 // __ ReturnFromAlignedCall(); | |
| 3641 // | |
| 3642 // __ lw(cp, frame_->Context()); | |
| 3643 // frame_->DropFromVFrameOnly(); // discard the TOS | |
| 3644 // | |
| 3645 // frame_->EmitPush(v0); | |
| 3646 // } | |
| 3647 // | |
| 3648 // ASSERT(frame_->height() == original_height + 1); | |
| 3649 } | |
| 3650 | |
| 3651 | |
| 3652 void CodeGenerator::VisitCallNew(CallNew* node) { | |
| 3653 UNIMPLEMENTED_(); | |
| 3654 //#ifdef DEBUG | |
| 3655 //// printf("CodeGenerator::VisitCallNew\n"); | |
| 3656 //#endif | |
| 3657 // | |
| 3658 //#ifdef DEBUG | |
| 3659 // int original_height = frame_->height(); | |
| 3660 //#endif | |
| 3661 // VirtualFrame::SpilledScope spilled_scope; | |
| 3662 // Comment cmnt(masm_, "[ CallNew"); | |
| 3663 // | |
| 3664 // // According to ECMA-262, section 11.2.2, page 44, the function | |
| 3665 // // expression in new calls must be evaluated before the | |
| 3666 // // arguments. This is different from ordinary calls, where the | |
| 3667 // // actual function to call is resolved after the arguments have been | |
| 3668 // // evaluated. | |
| 3669 // | |
| 3670 // // Setup the stack | |
| 3671 // ZoneList<Expression*>* args = node->arguments(); | |
| 3672 // int arg_count = args->length(); | |
| 3673 // | |
| 3674 // __ push(s3); // Save s3 on the stack | |
| 3675 // __ mov(s3, sp); // Save sp | |
| 3676 // __ li(t0, Operand(~7)); // Load sp mask | |
| 3677 // __ and_(sp, sp, Operand(t0)); // Align sp. | |
| 3678 //// __ break_(0x3648); | |
| 3679 // | |
| 3680 // // We are going to push (arg_count + 2)*4 on the stack. We make sure sp wi ll | |
| 3681 // // be 8 bytes aligned after this. | |
| 3682 // if( (arg_count % 2) != 0) { | |
| 3683 // __ addiu(sp, sp, -4); | |
| 3684 // } | |
| 3685 // | |
| 3686 // // Compute function to call and use the global object as the | |
| 3687 // // receiver. There is no need to use the global proxy here because | |
| 3688 // // it will always be replaced with a newly allocated object. | |
| 3689 // LoadAndSpill(node->expression()); | |
| 3690 // LoadGlobal(); | |
| 3691 // | |
| 3692 // // Push the arguments ("left-to-right") on the stack. | |
| 3693 // for (int i = 0; i < arg_count; i++) { | |
| 3694 // LoadAndSpill(args->at(i)); | |
| 3695 // } | |
| 3696 // | |
| 3697 // // r0: the number of arguments. | |
| 3698 //// Result num_args(r0); | |
| 3699 // Result num_args(a0); | |
| 3700 //// __ mov(r0, Operand(arg_count)); | |
| 3701 // __ li(a0, Operand(arg_count)); | |
| 3702 // | |
| 3703 // // Load the function into r1 as per calling convention. | |
| 3704 //// Result function(r1); | |
| 3705 //// __ ldr(r1, frame_->ElementAt(arg_count + 1)); | |
| 3706 // Result function(a1); | |
| 3707 // __ lw(a1, frame_->ElementAt(arg_count + 1)); | |
| 3708 // | |
| 3709 // // Call the construct call builtin that handles allocation and | |
| 3710 // // constructor invocation. | |
| 3711 // CodeForSourcePosition(node->position()); | |
| 3712 // Handle<Code> ic(Builtins::builtin(Builtins::JSConstructCall)); | |
| 3713 // frame_->CallCodeObject(ic, | |
| 3714 // RelocInfo::CONSTRUCT_CALL, | |
| 3715 // &num_args, | |
| 3716 // &function, | |
| 3717 // arg_count + 1, | |
| 3718 // true); // Special handling of args slots | |
| 3719 //// __ addiu(sp, sp, Operand(-StandardFrameConstants::kRArgsSlotsSize)); | |
| 3720 // __ nop(); | |
| 3721 // __ mov(sp, s3); // Restore sp. | |
| 3722 // __ teq(fp, zero_reg, 0x122); // debug help | |
| 3723 // __ pop(s3); // Restore s3 | |
| 3724 // | |
| 3725 // | |
| 3726 // // Discard old TOS value and push r0 on the stack (same as Pop(), push(r0)). | |
| 3727 //// __ str(r0, frame_->Top()); | |
| 3728 //// __ sw(v0, frame_->Top()); | |
| 3729 // __ push(v0); | |
| 3730 // ASSERT(frame_->height() == original_height + 1); | |
| 3731 //// __ break_(0x04309); | |
| 3732 } | |
| 3733 | |
| 3734 | |
| 3735 void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) { | |
| 3736 UNIMPLEMENTED_(); | |
| 3737 //#ifdef DEBUG | |
| 3738 //// printf("CodeGenerator::GenerateClassOf\n"); | |
| 3739 //#endif | |
| 3740 // VirtualFrame::SpilledScope spilled_scope; | |
| 3741 // ASSERT(args->length() == 1); | |
| 3742 // JumpTarget leave, null, function, non_function_constructor; | |
| 3743 // | |
| 3744 //// // Load the object into r0. | |
| 3745 // LoadAndSpill(args->at(0)); | |
| 3746 //// frame_->EmitPop(r0); | |
| 3747 // frame_->EmitPop(a0); | |
| 3748 // | |
| 3749 // // If the object is a smi, we return null. | |
| 3750 //// __ tst(r0, Operand(kSmiTagMask)); | |
| 3751 //// null.Branch(eq); | |
| 3752 // __ andi(t0, a0, Operand(kSmiTagMask)); | |
| 3753 // null.Branch(eq, no_hint, t0, Operand(zero_reg)); | |
| 3754 // __ nop(); // NOP_ADDED | |
| 3755 // | |
| 3756 // // Check that the object is a JS object but take special care of JS | |
| 3757 // // functions to make sure they have 'Function' as their class. | |
| 3758 //// __ CompareObjectType(r0, r0, r1, FIRST_JS_OBJECT_TYPE); | |
| 3759 //// null.Branch(lt); | |
| 3760 // __ GetObjectType(a0, a0, a1); | |
| 3761 // null.Branch(less, no_hint, a1, Operand(FIRST_JS_OBJECT_TYPE)); | |
| 3762 // __ nop(); // NOP_ADDED | |
| 3763 // | |
| 3764 // // As long as JS_FUNCTION_TYPE is the last instance type and it is | |
| 3765 // // right after LAST_JS_OBJECT_TYPE, we can avoid checking for | |
| 3766 // // LAST_JS_OBJECT_TYPE. | |
| 3767 // ASSERT(LAST_TYPE == JS_FUNCTION_TYPE); | |
| 3768 // ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1); | |
| 3769 //// __ cmp(r1, Operand(JS_FUNCTION_TYPE)); | |
| 3770 //// function.Branch(eq); | |
| 3771 // function.Branch(eq, no_hint, a1, Operand(JS_FUNCTION_TYPE)); | |
| 3772 // | |
| 3773 // // Check if the constructor in the map is a function. | |
| 3774 //// __ ldr(r0, FieldMemOperand(r0, Map::kConstructorOffset)); | |
| 3775 //// __ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE); | |
| 3776 //// non_function_constructor.Branch(ne); | |
| 3777 // __ lw(a0, FieldMemOperand(a0, Map::kConstructorOffset)); | |
| 3778 // __ GetObjectType(a0, a1, a1); | |
| 3779 // non_function_constructor.Branch(ne, no_hint, a1, Operand(JS_FUNCTION_TYPE)); | |
| 3780 // | |
| 3781 // // The r0 register now contains the constructor function. Grab the | |
| 3782 // // instance class name from there. | |
| 3783 //// __ ldr(r0, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset)); | |
| 3784 //// __ ldr(r0, FieldMemOperand(r0, SharedFunctionInfo::kInstanceClassNameOffse t)); | |
| 3785 //// frame_->EmitPush(r0); | |
| 3786 // __ lw(a0, FieldMemOperand(a0, JSFunction::kSharedFunctionInfoOffset)); | |
| 3787 // __ lw(v0, FieldMemOperand(a0, SharedFunctionInfo::kInstanceClassNameOffset)) ; | |
| 3788 // frame_->EmitPush(v0); | |
| 3789 // leave.Jump(); | |
| 3790 // __ nop(); // NOP_ADDED | |
| 3791 // | |
| 3792 // // Functions have class 'Function'. | |
| 3793 // function.Bind(); | |
| 3794 //// __ mov(r0, Operand(Factory::function_class_symbol())); | |
| 3795 //// frame_->EmitPush(r0); | |
| 3796 // __ li(v0, Operand(Factory::function_class_symbol())); | |
| 3797 // frame_->EmitPush(v0); | |
| 3798 // leave.Jump(); | |
| 3799 // __ nop(); // NOP_ADDED | |
| 3800 // | |
| 3801 // // Objects with a non-function constructor have class 'Object'. | |
| 3802 // non_function_constructor.Bind(); | |
| 3803 //// __ mov(r0, Operand(Factory::Object_symbol())); | |
| 3804 //// frame_->EmitPush(r0); | |
| 3805 // __ li(v0, Operand(Factory::Object_symbol())); | |
| 3806 // frame_->EmitPush(v0); | |
| 3807 // leave.Jump(); | |
| 3808 // __ nop(); // NOP_ADDED | |
| 3809 // | |
| 3810 // // Non-JS objects have class null. | |
| 3811 // null.Bind(); | |
| 3812 //// __ LoadRoot(r0, Heap::kNullValueRootIndex); | |
| 3813 //// frame_->EmitPush(r0); | |
| 3814 // __ LoadRoot(v0, Heap::kNullValueRootIndex); | |
| 3815 // frame_->EmitPush(v0); | |
| 3816 // | |
| 3817 // // All done. | |
| 3818 // leave.Bind(); | |
| 3819 } | |
| 3820 | |
| 3821 | |
| 3822 void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) { | |
| 3823 UNIMPLEMENTED(); | |
| 3824 __ break_(0x00666); | |
| 3825 // VirtualFrame::SpilledScope spilled_scope; | |
| 3826 // ASSERT(args->length() == 1); | |
| 3827 // JumpTarget leave; | |
| 3828 // LoadAndSpill(args->at(0)); | |
| 3829 // frame_->EmitPop(r0); // r0 contains object. | |
| 3830 // // if (object->IsSmi()) return the object. | |
| 3831 // __ tst(r0, Operand(kSmiTagMask)); | |
| 3832 // leave.Branch(eq); | |
| 3833 // // It is a heap object - get map. If (!object->IsJSValue()) return the objec t. | |
| 3834 // __ CompareObjectType(r0, r1, r1, JS_VALUE_TYPE); | |
| 3835 // leave.Branch(ne); | |
| 3836 // // Load the value. | |
| 3837 // __ ldr(r0, FieldMemOperand(r0, JSValue::kValueOffset)); | |
| 3838 // leave.Bind(); | |
| 3839 // frame_->EmitPush(r0); | |
| 3840 } | |
| 3841 | |
| 3842 | |
| 3843 void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) { | |
| 3844 UNIMPLEMENTED_(); | |
| 3845 // VirtualFrame::SpilledScope spilled_scope; | |
| 3846 // ASSERT(args->length() == 2); | |
| 3847 // JumpTarget leave; | |
| 3848 // LoadAndSpill(args->at(0)); // Load the object. | |
| 3849 // LoadAndSpill(args->at(1)); // Load the value. | |
| 3850 // frame_->EmitPop(a0); // r0 contains value | |
| 3851 // frame_->EmitPop(a1); // r1 contains object | |
| 3852 // // if (object->IsSmi()) return object. | |
| 3853 //// __ tst(r1, Operand(kSmiTagMask)); | |
| 3854 // __ andi(t1, a1, Operand(kSmiTagMask)); | |
| 3855 // leave.Branch(eq, no_hint, t1, Operand(zero_reg)); | |
| 3856 // __ nop(); // NOP_ADDED | |
| 3857 // // It is a heap object - get map. If (!object->IsJSValue()) return the objec t. | |
| 3858 //// __ CompareObjectType(r1, r2, r2, JS_VALUE_TYPE); | |
| 3859 // __ GetObjectType(a1, a2, a2); | |
| 3860 // leave.Branch(ne, no_hint, a2, Operand(JS_VALUE_TYPE)); | |
| 3861 // __ nop(); // NOP_ADDED | |
| 3862 // // Store the value. | |
| 3863 //// __ str(r0, FieldMemOperand(r1, JSValue::kValueOffset)); | |
| 3864 // __ sw(v0, FieldMemOperand(a1, JSValue::kValueOffset)); | |
| 3865 // // Update the write barrier. | |
| 3866 //// __ mov(r2, Operand(JSValue::kValueOffset - kHeapObjectTag)); | |
| 3867 //// __ RecordWrite(r1, r2, r3); | |
| 3868 // __ li(a2, Operand(JSValue::kValueOffset - kHeapObjectTag)); | |
| 3869 // __ RecordWrite(a1, a2, a3); | |
| 3870 // // Leave. | |
| 3871 // leave.Bind(); | |
| 3872 // frame_->EmitPush(v0); | |
| 3873 } | |
| 3874 | |
| 3875 | |
| 3876 void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) { | |
| 3877 UNIMPLEMENTED_(); | |
| 3878 // VirtualFrame::SpilledScope spilled_scope; | |
| 3879 // ASSERT(args->length() == 1); | |
| 3880 // LoadAndSpill(args->at(0)); | |
| 3881 // frame_->EmitPop(t0); | |
| 3882 //// __ tst(r0, Operand(kSmiTagMask)); | |
| 3883 // __ andi(s5, t0, Operand(kSmiTagMask)); | |
| 3884 // __ mov(s6, zero_reg); | |
| 3885 // cc_reg_ = eq; | |
| 3886 } | |
| 3887 | |
| 3888 | |
| 3889 void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) { | |
| 3890 UNIMPLEMENTED(); | |
| 3891 __ break_(0x00666); | |
| 3892 // VirtualFrame::SpilledScope spilled_scope; | |
| 3893 // // See comment in CodeGenerator::GenerateLog in codegen-ia32.cc. | |
| 3894 // ASSERT_EQ(args->length(), 3); | |
| 3895 //#ifdef ENABLE_LOGGING_AND_PROFILING | |
| 3896 // if (ShouldGenerateLog(args->at(0))) { | |
| 3897 // LoadAndSpill(args->at(1)); | |
| 3898 // LoadAndSpill(args->at(2)); | |
| 3899 // __ CallRuntime(Runtime::kLog, 2); | |
| 3900 // } | |
| 3901 //#endif | |
| 3902 // __ LoadRoot(r0, Heap::kUndefinedValueRootIndex); | |
| 3903 // frame_->EmitPush(r0); | |
| 3904 } | |
| 3905 | |
| 3906 | |
| 3907 void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) { | |
| 3908 UNIMPLEMENTED_(); | |
| 3909 // VirtualFrame::SpilledScope spilled_scope; | |
| 3910 // ASSERT(args->length() == 1); | |
| 3911 // LoadAndSpill(args->at(0)); | |
| 3912 //// frame_->EmitPop(r0); | |
| 3913 //// __ tst(r0, Operand(kSmiTagMask | 0x80000000u)); | |
| 3914 // frame_->EmitPop(a0); | |
| 3915 // __ andi(s5, a0, Operand(kSmiTagMask | 0x80000000u)); | |
| 3916 // __ li(s6, Operand(0)); | |
| 3917 // cc_reg_ = eq; | |
| 3918 } | |
| 3919 | |
| 3920 | |
| 3921 // This should generate code that performs a charCodeAt() call or returns | |
| 3922 // undefined in order to trigger the slow case, Runtime_StringCharCodeAt. | |
| 3923 // It is not yet implemented on ARM, so it always goes to the slow case. | |
| 3924 void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) { | |
| 3925 UNIMPLEMENTED(); | |
| 3926 __ break_(0x00666); | |
| 3927 // VirtualFrame::SpilledScope spilled_scope; | |
| 3928 // ASSERT(args->length() == 2); | |
| 3929 // __ LoadRoot(r0, Heap::kUndefinedValueRootIndex); | |
| 3930 // frame_->EmitPush(r0); | |
| 3931 } | |
| 3932 | |
| 3933 | |
| 3934 void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) { | |
| 3935 UNIMPLEMENTED(); | |
| 3936 __ break_(0x00666); | |
| 3937 // VirtualFrame::SpilledScope spilled_scope; | |
| 3938 // ASSERT(args->length() == 1); | |
| 3939 // LoadAndSpill(args->at(0)); | |
| 3940 // JumpTarget answer; | |
| 3941 // // We need the CC bits to come out as not_equal in the case where the | |
| 3942 // // object is a smi. This can't be done with the usual test opcode so | |
| 3943 // // we use XOR to get the right CC bits. | |
| 3944 // frame_->EmitPop(r0); | |
| 3945 // __ and_(r1, r0, Operand(kSmiTagMask)); | |
| 3946 // __ eor(r1, r1, Operand(kSmiTagMask), SetCC); | |
| 3947 // answer.Branch(ne); | |
| 3948 // // It is a heap object - get the map. Check if the object is a JS array. | |
| 3949 // __ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE); | |
| 3950 // answer.Bind(); | |
| 3951 // cc_reg_ = eq; | |
| 3952 } | |
| 3953 | |
| 3954 | |
| 3955 void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) { | |
| 3956 UNIMPLEMENTED_(); | |
| 3957 // VirtualFrame::SpilledScope spilled_scope; | |
| 3958 // ASSERT(args->length() == 0); | |
| 3959 // | |
| 3960 // // Get the frame pointer for the calling frame. | |
| 3961 //// __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); | |
| 3962 // __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); | |
| 3963 // | |
| 3964 // // Skip the arguments adaptor frame if it exists. | |
| 3965 // Label check_frame_marker; | |
| 3966 //// __ ldr(r1, MemOperand(r2, StandardFrameConstants::kContextOffset)); | |
| 3967 //// __ cmp(r1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); | |
| 3968 //// __ b(ne, &check_frame_marker); | |
| 3969 //// __ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset)); | |
| 3970 // __ lw(a1, MemOperand(a2, StandardFrameConstants::kContextOffset)); | |
| 3971 // __ bcond(ne, &check_frame_marker, | |
| 3972 // a1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); | |
| 3973 // __ nop(); // NOP_ADDED | |
| 3974 // __ lw(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset)); | |
| 3975 // | |
| 3976 // // Check the marker in the calling frame. | |
| 3977 // __ bind(&check_frame_marker); | |
| 3978 //// __ ldr(r1, MemOperand(r2, StandardFrameConstants::kMarkerOffset)); | |
| 3979 //// __ cmp(r1, Operand(Smi::FromInt(StackFrame::CONSTRUCT))); | |
| 3980 // __ lw(s5, MemOperand(a2, StandardFrameConstants::kMarkerOffset)); | |
| 3981 // __ li(s6, Operand(Smi::FromInt(StackFrame::CONSTRUCT))); | |
| 3982 // cc_reg_ = eq; | |
| 3983 } | |
| 3984 | |
| 3985 | |
| 3986 void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) { | |
| 3987 UNIMPLEMENTED_(); | |
| 3988 //#ifdef DEBUG | |
| 3989 //// printf("CodeGenerator::GenerateArgumentsLength\n"); | |
| 3990 //#endif | |
| 3991 // VirtualFrame::SpilledScope spilled_scope; | |
| 3992 // ASSERT(args->length() == 0); | |
| 3993 // | |
| 3994 // // Seed the result with the formal parameters count, which will be used | |
| 3995 // // in case no arguments adaptor frame is found below the current frame. | |
| 3996 //// __ mov(r0, Operand(Smi::FromInt(scope_->num_parameters()))); | |
| 3997 // __ li(a0, Operand(Smi::FromInt(scope_->num_parameters()))); | |
| 3998 //// | |
| 3999 // // Call the shared stub to get to the arguments.length. | |
| 4000 // ArgumentsAccessStub stub(ArgumentsAccessStub::READ_LENGTH); | |
| 4001 // frame_->CallStub(&stub, 0); | |
| 4002 // __ nop(); // NOP_ADDED | |
| 4003 // frame_->EmitPush(v0); | |
| 4004 } | |
| 4005 | |
| 4006 | |
| 4007 void CodeGenerator::GenerateArgumentsAccess(ZoneList<Expression*>* args) { | |
| 4008 UNIMPLEMENTED_(); | |
| 4009 //#ifdef DEBUG | |
| 4010 //// printf("CodeGenerator::GenerateArgumentsAccess\n"); | |
| 4011 //#endif | |
| 4012 // VirtualFrame::SpilledScope spilled_scope; | |
| 4013 // ASSERT(args->length() == 1); | |
| 4014 // | |
| 4015 // // Satisfy contract with ArgumentsAccessStub: | |
| 4016 // // Load the key into r1 and the formal parameters count into r0. | |
| 4017 // LoadAndSpill(args->at(0)); | |
| 4018 //// frame_->EmitPop(r1); | |
| 4019 //// __ mov(r0, Operand(Smi::FromInt(scope_->num_parameters()))); | |
| 4020 // frame_->EmitPop(a1); | |
| 4021 // __ li(a0, Operand(Smi::FromInt(scope_->num_parameters()))); | |
| 4022 // | |
| 4023 // // Call the shared stub to get to arguments[key]. | |
| 4024 // ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT); | |
| 4025 // frame_->CallStub(&stub, 0); | |
| 4026 // __ nop(); // NOP_ADDED | |
| 4027 // frame_->EmitPush(v0); | |
| 4028 } | |
| 4029 | |
| 4030 | |
| 4031 void CodeGenerator::GenerateRandomPositiveSmi(ZoneList<Expression*>* args) { | |
| 4032 UNIMPLEMENTED_(); | |
| 4033 //#ifdef DEBUG | |
| 4034 //// printf("CodeGenerator::GenerateRandomPositiveSmi\n"); | |
| 4035 //#endif | |
| 4036 // VirtualFrame::SpilledScope spilled_scope; | |
| 4037 // ASSERT(args->length() == 0); | |
| 4038 // __ Call(ExternalReference::random_positive_smi_function().address(), | |
| 4039 // RelocInfo::RUNTIME_ENTRY); | |
| 4040 // __ nop(); // NOP_ADDED | |
| 4041 // frame_->EmitPush(v0); | |
| 4042 } | |
| 4043 | |
| 4044 | |
| 4045 void CodeGenerator::GenerateFastMathOp(MathOp op, ZoneList<Expression*>* args) { | |
| 4046 UNIMPLEMENTED_(); | |
| 4047 //#ifdef DEBUG | |
| 4048 //// printf("CodeGenerator::GenerateFastMathOp\n"); | |
| 4049 //#endif | |
| 4050 // VirtualFrame::SpilledScope spilled_scope; | |
| 4051 // LoadAndSpill(args->at(0)); | |
| 4052 // switch (op) { | |
| 4053 // case SIN: | |
| 4054 // frame_->CallRuntime(Runtime::kMath_sin, 1); | |
| 4055 // break; | |
| 4056 // case COS: | |
| 4057 // frame_->CallRuntime(Runtime::kMath_cos, 1); | |
| 4058 // break; | |
| 4059 // } | |
| 4060 // __ nop(); // NOP_ADDED | |
| 4061 // frame_->EmitPush(v0); | |
| 4062 } | |
| 4063 | |
| 4064 | |
| 4065 void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) { | |
| 4066 UNIMPLEMENTED_(); | |
| 4067 //#ifdef DEBUG | |
| 4068 //// printf("CodeGenerator::GenerateObjectEquals\n"); | |
| 4069 //#endif | |
| 4070 // VirtualFrame::SpilledScope spilled_scope; | |
| 4071 // ASSERT(args->length() == 2); | |
| 4072 // | |
| 4073 // // Load the two objects into registers and perform the comparison. | |
| 4074 // LoadAndSpill(args->at(0)); | |
| 4075 // LoadAndSpill(args->at(1)); | |
| 4076 //// frame_->EmitPop(r0); | |
| 4077 //// frame_->EmitPop(r1); | |
| 4078 // frame_->EmitPop(a0); | |
| 4079 // frame_->EmitPop(a1); | |
| 4080 //// __ cmp(r0, Operand(r1)); | |
| 4081 // __ mov(s5, a0); | |
| 4082 // __ mov(s6, a1); | |
| 4083 // cc_reg_ = eq; | |
| 4084 } | |
| 4085 | |
| 4086 | |
| 4087 void CodeGenerator::VisitCallRuntime(CallRuntime* node) { | |
| 4088 UNIMPLEMENTED_(); | |
| 4089 //#ifdef DEBUG | |
| 4090 //// printf("CodeGenerator::VisitCallRuntime\n"); // Debug printf | |
| 4091 //#endif | |
| 4092 // | |
| 4093 //#ifdef DEBUG | |
| 4094 // int original_height = frame_->height(); | |
| 4095 //#endif | |
| 4096 // VirtualFrame::SpilledScope spilled_scope; | |
| 4097 // if (CheckForInlineRuntimeCall(node)) { | |
| 4098 // ASSERT((has_cc() && frame_->height() == original_height) || | |
| 4099 // (!has_cc() && frame_->height() == original_height + 1)); | |
| 4100 // return; | |
| 4101 // } | |
| 4102 // | |
| 4103 // ZoneList<Expression*>* args = node->arguments(); | |
| 4104 // Comment cmnt(masm_, "[ CallRuntime"); | |
| 4105 // Runtime::Function* function = node->function(); | |
| 4106 // | |
| 4107 // int arg_count = args->length(); | |
| 4108 // | |
| 4109 // if (function == NULL) { | |
| 4110 // // Prepare stack for calling JS runtime function. | |
| 4111 // __ SetupAlignedCall(t0, arg_count); | |
| 4112 //// __ mov(r0, Operand(node->name())); | |
| 4113 // __ li(t0, Operand(node->name())); | |
| 4114 // frame_->EmitPush(t0); | |
| 4115 // // Push the builtins object found in the current global object. | |
| 4116 //// __ ldr(r1, GlobalObject()); | |
| 4117 //// __ ldr(r0, FieldMemOperand(r1, GlobalObject::kBuiltinsOffset)); | |
| 4118 // __ lw(t1, GlobalObject()); | |
| 4119 // __ lw(t0, FieldMemOperand(t1, GlobalObject::kBuiltinsOffset)); | |
| 4120 // frame_->EmitPush(t0); | |
| 4121 // } | |
| 4122 // | |
| 4123 // // Push the arguments ("left-to-right"). | |
| 4124 // for (int i = 0; i < arg_count; i++) { | |
| 4125 // LoadAndSpill(args->at(i)); | |
| 4126 // } | |
| 4127 // | |
| 4128 // if (function == NULL) { | |
| 4129 // // Call the JS runtime function. | |
| 4130 // InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP; | |
| 4131 // Handle<Code> stub = ComputeCallInitialize(arg_count, in_loop); | |
| 4132 // frame_->CallCodeObject(stub, RelocInfo::CODE_TARGET, arg_count + 1); | |
| 4133 //// __ addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize); | |
| 4134 // __ nop(); | |
| 4135 // | |
| 4136 // __ ReturnFromAlignedCall(); | |
| 4137 // __ lw(cp, frame_->Context()); | |
| 4138 // frame_->DropFromVFrameOnly(); | |
| 4139 // frame_->EmitPush(v0); | |
| 4140 // } else { | |
| 4141 // // Call the C runtime function. | |
| 4142 // frame_->CallRuntime(function, arg_count); | |
| 4143 // __ nop(); // NOP_ADDED | |
| 4144 // frame_->EmitPush(v0); | |
| 4145 // } | |
| 4146 // ASSERT(frame_->height() == original_height + 1); | |
| 4147 } | |
| 4148 | |
| 4149 | |
| 4150 void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) { | |
| 4151 UNIMPLEMENTED_(); | |
| 4152 //#ifdef DEBUG | |
| 4153 // int original_height = frame_->height(); | |
| 4154 //#endif | |
| 4155 // VirtualFrame::SpilledScope spilled_scope; | |
| 4156 // Comment cmnt(masm_, "[ UnaryOperation"); | |
| 4157 // | |
| 4158 // Token::Value op = node->op(); | |
| 4159 // | |
| 4160 // if (op == Token::NOT) { | |
| 4161 // LoadConditionAndSpill(node->expression(), | |
| 4162 // false_target(), | |
| 4163 // true_target(), | |
| 4164 // true); | |
| 4165 // // LoadCondition may (and usually does) leave a test and branch to | |
| 4166 // // be emitted by the caller. In that case, negate the condition. | |
| 4167 // if (has_cc()) cc_reg_ = NegateCondition(cc_reg_); | |
| 4168 // | |
| 4169 // } else if (op == Token::DELETE) { | |
| 4170 // Property* property = node->expression()->AsProperty(); | |
| 4171 // Variable* variable = node->expression()->AsVariableProxy()->AsVariable(); | |
| 4172 // if (property != NULL) { | |
| 4173 // LoadAndSpill(property->obj()); | |
| 4174 // LoadAndSpill(property->key()); | |
| 4175 // Result arg_count(a0); | |
| 4176 // __ li(a0, Operand(1)); // not counting receiver | |
| 4177 // frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, &arg_count, 2); | |
| 4178 // __ nop(); // NOP_ADDED | |
| 4179 // | |
| 4180 // } else if (variable != NULL) { | |
| 4181 // Slot* slot = variable->slot(); | |
| 4182 // if (variable->is_global()) { | |
| 4183 // LoadGlobal(); | |
| 4184 // __ li(a0, Operand(variable->name())); | |
| 4185 // frame_->EmitPush(a0); | |
| 4186 // Result arg_count(a0); | |
| 4187 // __ li(a0, Operand(1)); // not counting receiver | |
| 4188 // frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, &arg_count, 2); | |
| 4189 // __ nop(); // NOP_ADDED | |
| 4190 // | |
| 4191 // } else if (slot != NULL && slot->type() == Slot::LOOKUP) { | |
| 4192 // // lookup the context holding the named variable | |
| 4193 // frame_->EmitPush(cp); | |
| 4194 // __ li(a0, Operand(variable->name())); | |
| 4195 // frame_->EmitPush(a0); | |
| 4196 // frame_->CallRuntime(Runtime::kLookupContext, 2); | |
| 4197 // // v0: context | |
| 4198 // frame_->EmitPush(v0); | |
| 4199 // __ li(a0, Operand(variable->name())); | |
| 4200 // frame_->EmitPush(a0); | |
| 4201 // Result arg_count(a0); | |
| 4202 // __ li(a0, Operand(1)); // not counting receiver | |
| 4203 // frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, &arg_count, 2); | |
| 4204 // __ nop(); // NOP_ADDED | |
| 4205 // | |
| 4206 // } else { | |
| 4207 // // Default: Result of deleting non-global, not dynamically | |
| 4208 // // introduced variables is false. | |
| 4209 // __ LoadRoot(v0, Heap::kFalseValueRootIndex); | |
| 4210 // } | |
| 4211 // | |
| 4212 // } else { | |
| 4213 // // Default: Result of deleting expressions is true. | |
| 4214 // LoadAndSpill(node->expression()); // may have side-effects | |
| 4215 // frame_->Drop(); | |
| 4216 // __ LoadRoot(v0, Heap::kTrueValueRootIndex); | |
| 4217 // } | |
| 4218 // frame_->EmitPush(v0); | |
| 4219 // | |
| 4220 // } else if (op == Token::TYPEOF) { | |
| 4221 // // Special case for loading the typeof expression; see comment on | |
| 4222 // // LoadTypeofExpression(). | |
| 4223 // LoadTypeofExpression(node->expression()); | |
| 4224 // frame_->CallRuntime(Runtime::kTypeof, 1); | |
| 4225 // frame_->EmitPush(v0); // r0 has result | |
| 4226 // | |
| 4227 // } else { | |
| 4228 // LoadAndSpill(node->expression()); | |
| 4229 // frame_->EmitPop(a0); | |
| 4230 // switch (op) { | |
| 4231 // case Token::NOT: | |
| 4232 // case Token::DELETE: | |
| 4233 // case Token::TYPEOF: | |
| 4234 // UNREACHABLE(); // handled above | |
| 4235 // break; | |
| 4236 // | |
| 4237 // case Token::SUB: { | |
| 4238 // bool overwrite = | |
| 4239 // (node->expression()->AsBinaryOperation() != NULL && | |
| 4240 // node->expression()->AsBinaryOperation()->ResultOverwriteAllowed() ); | |
| 4241 // UnarySubStub stub(overwrite); | |
| 4242 // frame_->CallStub(&stub, 0); | |
| 4243 // __ nop(); // NOP_ADDED | |
| 4244 // break; | |
| 4245 // } | |
| 4246 // | |
| 4247 // case Token::BIT_NOT: { | |
| 4248 // // smi check | |
| 4249 // JumpTarget smi_label; | |
| 4250 // JumpTarget continue_label; | |
| 4251 //// __ tst(r0, Operand(kSmiTagMask)); | |
| 4252 // __ andi(t0, a0, Operand(kSmiTagMask)); | |
| 4253 // smi_label.Branch(eq, no_hint, t0, Operand(zero_reg)); | |
| 4254 // __ nop(); // NOP_ADDED | |
| 4255 // | |
| 4256 // frame_->EmitPush(a0); | |
| 4257 // Result arg_count(a0); | |
| 4258 // __ li(a0, Operand(0)); // not counting receiver | |
| 4259 // frame_->InvokeBuiltin(Builtins::BIT_NOT, CALL_JS, &arg_count, 1); | |
| 4260 // __ nop(); // NOP_ADDED | |
| 4261 // | |
| 4262 // continue_label.Jump(); | |
| 4263 // __ nop(); // NOP_ADDED | |
| 4264 // smi_label.Bind(); | |
| 4265 //// __ mvn(r0, Operand(r0)); | |
| 4266 //// __ bic(r0, r0, Operand(kSmiTagMask)); // bit-clear inverted smi-tag | |
| 4267 // __ or_(a0, a0, Operand(kSmiTagMask)); | |
| 4268 // __ movn(a0, a0); | |
| 4269 // continue_label.Bind(); | |
| 4270 // break; | |
| 4271 // } | |
| 4272 // | |
| 4273 // case Token::VOID: | |
| 4274 // // since the stack top is cached in r0, popping and then | |
| 4275 // // pushing a value can be done by just writing to r0. | |
| 4276 // __ LoadRoot(a0, Heap::kUndefinedValueRootIndex); | |
| 4277 // break; | |
| 4278 // | |
| 4279 // case Token::ADD: { | |
| 4280 // // Smi check. | |
| 4281 // JumpTarget continue_label; | |
| 4282 //// __ tst(r0, Operand(kSmiTagMask)); | |
| 4283 // __ andi(t0, a0, Operand(kSmiTagMask)); | |
| 4284 // continue_label.Branch(eq, no_hint, t0, Operand(zero_reg)); | |
| 4285 // __ nop(); // NOP_ADDED | |
| 4286 // frame_->EmitPush(a0); | |
| 4287 // Result arg_count(a0); | |
| 4288 // __ li(a0, Operand(0)); // not counting receiver | |
| 4289 // frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, &arg_count, 1); | |
| 4290 // __ nop(); // NOP_ADDED | |
| 4291 // continue_label.Bind(); | |
| 4292 // break; | |
| 4293 // } | |
| 4294 // default: | |
| 4295 // UNREACHABLE(); | |
| 4296 // } | |
| 4297 // frame_->EmitPush(v0); // r0 has result | |
| 4298 // } | |
| 4299 // ASSERT(!has_valid_frame() || | |
| 4300 // (has_cc() && frame_->height() == original_height) || | |
| 4301 // (!has_cc() && frame_->height() == original_height + 1)); | |
| 4302 } | |
| 4303 | |
| 4304 | |
| 4305 void CodeGenerator::VisitCountOperation(CountOperation* node) { | |
| 4306 UNIMPLEMENTED_(); | |
| 4307 //#ifdef DEBUG | |
| 4308 //// printf("CodeGenerator::VisitCountOperation\n"); | |
| 4309 //#endif | |
| 4310 // | |
| 4311 // | |
| 4312 // // TODO(MIPS.1): Implement overflow checks | |
| 4313 // | |
| 4314 //#ifdef DEBUG | |
| 4315 // int original_height = frame_->height(); | |
| 4316 //#endif | |
| 4317 // VirtualFrame::SpilledScope spilled_scope; | |
| 4318 // Comment cmnt(masm_, "[ CountOperation"); | |
| 4319 // | |
| 4320 // bool is_postfix = node->is_postfix(); | |
| 4321 // bool is_increment = node->op() == Token::INC; | |
| 4322 // | |
| 4323 // Variable* var = node->expression()->AsVariableProxy()->AsVariable(); | |
| 4324 // bool is_const = (var != NULL && var->mode() == Variable::CONST); | |
| 4325 // | |
| 4326 // // Postfix: Make room for the result. | |
| 4327 // if (is_postfix) { | |
| 4328 // __ li(v0, Operand(0)); | |
| 4329 // frame_->EmitPush(v0); | |
| 4330 // __ push(v0); | |
| 4331 // } | |
| 4332 // | |
| 4333 // { Reference target(this, node->expression()); | |
| 4334 // if (target.is_illegal()) { | |
| 4335 // // Spoof the virtual frame to have the expected height (one higher | |
| 4336 // // than on entry). | |
| 4337 // if (!is_postfix) { | |
| 4338 // __ li(v0, Operand(0)); | |
| 4339 // frame_->EmitPush(v0); | |
| 4340 // } | |
| 4341 // ASSERT(frame_->height() == original_height + 1); | |
| 4342 // return; | |
| 4343 // } | |
| 4344 // target.GetValueAndSpill(); | |
| 4345 // frame_->EmitPop(a0); | |
| 4346 // | |
| 4347 // JumpTarget slow; | |
| 4348 // JumpTarget exit; | |
| 4349 // | |
| 4350 // // Check for smi operand. | |
| 4351 // __ andi(t0, a0, Operand(kSmiTagMask)); | |
| 4352 // slow.Branch(ne, no_hint, t0, Operand(zero_reg)); | |
| 4353 // __ nop(); // NOP_ADDED | |
| 4354 // | |
| 4355 // // Postfix: Store the old value as the result. | |
| 4356 // if (is_postfix) { | |
| 4357 // __ sw(a0, frame_->ElementAt(target.size())); | |
| 4358 // } | |
| 4359 // | |
| 4360 // // Perform optimistic increment/decrement. | |
| 4361 // if (is_increment) { | |
| 4362 // __ add(v0, a0, Operand(Smi::FromInt(1))); | |
| 4363 // } else { | |
| 4364 // __ add(v0, a0, Operand((Smi::FromInt(-1)))); | |
| 4365 // } | |
| 4366 // | |
| 4367 // // If the increment/decrement didn't overflow, we're done. | |
| 4368 // // TODO(MIPS.1): Since we don't check for overflow we should always jump. | |
| 4369 // exit.Branch(eq, no_hint, zero_reg, Operand(zero_reg)); | |
| 4370 // __ nop(); // NOP_ADDED | |
| 4371 // | |
| 4372 // | |
| 4373 //// // Revert optimistic increment/decrement. | |
| 4374 //// if (is_increment) { | |
| 4375 //// __ sub(r0, r0, Operand(r1)); | |
| 4376 //// } else { | |
| 4377 //// __ add(r0, r0, Operand(r1)); | |
| 4378 //// } | |
| 4379 //// | |
| 4380 //// // Slow case: Convert to number. | |
| 4381 // slow.Bind(); | |
| 4382 // __ break_(0x09001); // We should not come here yet. | |
| 4383 //// { | |
| 4384 //// // Convert the operand to a number. | |
| 4385 //// frame_->EmitPush(r0); | |
| 4386 //// Result arg_count(r0); | |
| 4387 //// __ mov(r0, Operand(0)); | |
| 4388 //// frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, &arg_count, 1); | |
| 4389 //// } | |
| 4390 //// if (is_postfix) { | |
| 4391 //// // Postfix: store to result (on the stack). | |
| 4392 //// __ str(r0, frame_->ElementAt(target.size())); | |
| 4393 //// } | |
| 4394 //// | |
| 4395 //// // Compute the new value. | |
| 4396 //// __ mov(r1, Operand(Smi::FromInt(1))); | |
| 4397 //// frame_->EmitPush(r0); | |
| 4398 //// frame_->EmitPush(r1); | |
| 4399 //// if (is_increment) { | |
| 4400 //// frame_->CallRuntime(Runtime::kNumberAdd, 2); | |
| 4401 //// } else { | |
| 4402 //// frame_->CallRuntime(Runtime::kNumberSub, 2); | |
| 4403 //// } | |
| 4404 // | |
| 4405 // // Store the new value in the target if not const. | |
| 4406 // exit.Bind(); | |
| 4407 // frame_->EmitPush(v0); | |
| 4408 // if (!is_const) target.SetValue(NOT_CONST_INIT); | |
| 4409 // } | |
| 4410 // | |
| 4411 // // Postfix: Discard the new value and use the old. | |
| 4412 // if (is_postfix) frame_->EmitPop(v0); | |
| 4413 // ASSERT(frame_->height() == original_height + 1); | |
| 4414 } | |
| 4415 | |
| 4416 | |
| 4417 void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) { | |
| 4418 UNIMPLEMENTED_(); | |
| 4419 //#ifdef DEBUG | |
| 4420 //// printf("CodeGenerator::VisitBinaryOperation\n"); | |
| 4421 //#endif | |
| 4422 // | |
| 4423 //// __ break_(0x00332); | |
| 4424 //#ifdef DEBUG | |
| 4425 // int original_height = frame_->height(); | |
| 4426 //#endif | |
| 4427 // VirtualFrame::SpilledScope spilled_scope; | |
| 4428 // Comment cmnt(masm_, "[ BinaryOperation"); | |
| 4429 // Token::Value op = node->op(); | |
| 4430 // | |
| 4431 // // According to ECMA-262 section 11.11, page 58, the binary logical | |
| 4432 // // operators must yield the result of one of the two expressions | |
| 4433 // // before any ToBoolean() conversions. This means that the value | |
| 4434 // // produced by a && or || operator is not necessarily a boolean. | |
| 4435 // | |
| 4436 // // NOTE: If the left hand side produces a materialized value (not in | |
| 4437 // // the CC register), we force the right hand side to do the | |
| 4438 // // same. This is necessary because we may have to branch to the exit | |
| 4439 // // after evaluating the left hand side (due to the shortcut | |
| 4440 // // semantics), but the compiler must (statically) know if the result | |
| 4441 // // of compiling the binary operation is materialized or not. | |
| 4442 // | |
| 4443 // if (op == Token::AND) { | |
| 4444 // JumpTarget is_true; | |
| 4445 // LoadConditionAndSpill(node->left(), | |
| 4446 // &is_true, | |
| 4447 // false_target(), | |
| 4448 // false); | |
| 4449 // if (has_valid_frame() && !has_cc()) { | |
| 4450 // // The left-hand side result is on top of the virtual frame. | |
| 4451 // JumpTarget pop_and_continue; | |
| 4452 // JumpTarget exit; | |
| 4453 // | |
| 4454 //// __ ldr(r0, frame_->Top()); // Duplicate the stack top. | |
| 4455 //// frame_->EmitPush(r0); | |
| 4456 // __ lw(t0, frame_->Top()); // Duplicate the stack top. | |
| 4457 // frame_->EmitPush(t0); | |
| 4458 // // Avoid popping the result if it converts to 'false' using the | |
| 4459 // // standard ToBoolean() conversion as described in ECMA-262, | |
| 4460 // // section 9.2, page 30. | |
| 4461 // ToBoolean(&pop_and_continue, &exit); | |
| 4462 // Branch(false, &exit); | |
| 4463 // | |
| 4464 // // Pop the result of evaluating the first part. | |
| 4465 // pop_and_continue.Bind(); | |
| 4466 //// frame_->EmitPop(r0); | |
| 4467 // frame_->EmitPop(t0); | |
| 4468 //// | |
| 4469 // // Evaluate right side expression. | |
| 4470 // is_true.Bind(); | |
| 4471 // LoadAndSpill(node->right()); | |
| 4472 // | |
| 4473 // // Exit (always with a materialized value). | |
| 4474 // exit.Bind(); | |
| 4475 // } else if (has_cc() || is_true.is_linked()) { | |
| 4476 // // The left-hand side is either (a) partially compiled to | |
| 4477 // // control flow with a final branch left to emit or (b) fully | |
| 4478 // // compiled to control flow and possibly true. | |
| 4479 // if (has_cc()) { | |
| 4480 // Branch(false, false_target()); | |
| 4481 // } | |
| 4482 // is_true.Bind(); | |
| 4483 // LoadConditionAndSpill(node->right(), | |
| 4484 // true_target(), | |
| 4485 // false_target(), | |
| 4486 // false); | |
| 4487 // } else { | |
| 4488 // // Nothing to do. | |
| 4489 // ASSERT(!has_valid_frame() && !has_cc() && !is_true.is_linked()); | |
| 4490 // } | |
| 4491 // | |
| 4492 // } else if (op == Token::OR) { | |
| 4493 // JumpTarget is_false; | |
| 4494 // LoadConditionAndSpill(node->left(), | |
| 4495 // true_target(), | |
| 4496 // &is_false, | |
| 4497 // false); | |
| 4498 // if (has_valid_frame() && !has_cc()) { | |
| 4499 // // The left-hand side result is on top of the virtual frame. | |
| 4500 // JumpTarget pop_and_continue; | |
| 4501 // JumpTarget exit; | |
| 4502 // | |
| 4503 //// __ ldr(r0, frame_->Top()); | |
| 4504 //// frame_->EmitPush(r0); | |
| 4505 // __ lw(a0, frame_->Top()); | |
| 4506 // frame_->EmitPush(a0); | |
| 4507 // // Avoid popping the result if it converts to 'true' using the | |
| 4508 // // standard ToBoolean() conversion as described in ECMA-262, | |
| 4509 // // section 9.2, page 30. | |
| 4510 // ToBoolean(&exit, &pop_and_continue); | |
| 4511 // Branch(true, &exit); | |
| 4512 // __ nop(); // NOP_ADDED | |
| 4513 // | |
| 4514 // // Pop the result of evaluating the first part. | |
| 4515 // pop_and_continue.Bind(); | |
| 4516 // frame_->EmitPop(v0); | |
| 4517 // | |
| 4518 // // Evaluate right side expression. | |
| 4519 // is_false.Bind(); | |
| 4520 // LoadAndSpill(node->right()); | |
| 4521 // | |
| 4522 // // Exit (always with a materialized value). | |
| 4523 // exit.Bind(); | |
| 4524 // } else if (has_cc() || is_false.is_linked()) { | |
| 4525 // // The left-hand side is either (a) partially compiled to | |
| 4526 // // control flow with a final branch left to emit or (b) fully | |
| 4527 // // compiled to control flow and possibly false. | |
| 4528 // if (has_cc()) { | |
| 4529 // Branch(true, true_target()); | |
| 4530 // __ nop(); // NOP_ADDED | |
| 4531 // } | |
| 4532 // is_false.Bind(); | |
| 4533 // LoadConditionAndSpill(node->right(), | |
| 4534 // true_target(), | |
| 4535 // false_target(), | |
| 4536 // false); | |
| 4537 // } else { | |
| 4538 // // Nothing to do. | |
| 4539 // ASSERT(!has_valid_frame() && !has_cc() && !is_false.is_linked()); | |
| 4540 // } | |
| 4541 // | |
| 4542 // } else { | |
| 4543 // // Optimize for the case where (at least) one of the expressions | |
| 4544 // // is a literal small integer. | |
| 4545 // Literal* lliteral = node->left()->AsLiteral(); | |
| 4546 // Literal* rliteral = node->right()->AsLiteral(); | |
| 4547 // // NOTE: The code below assumes that the slow cases (calls to runtime) | |
| 4548 // // never return a constant/immutable object. | |
| 4549 // bool overwrite_left = | |
| 4550 // (node->left()->AsBinaryOperation() != NULL && | |
| 4551 // node->left()->AsBinaryOperation()->ResultOverwriteAllowed()); | |
| 4552 // bool overwrite_right = | |
| 4553 // (node->right()->AsBinaryOperation() != NULL && | |
| 4554 // node->right()->AsBinaryOperation()->ResultOverwriteAllowed()); | |
| 4555 // | |
| 4556 // if (rliteral != NULL && rliteral->handle()->IsSmi()) { | |
| 4557 // LoadAndSpill(node->left()); | |
| 4558 // SmiOperation(node->op(), | |
| 4559 // rliteral->handle(), | |
| 4560 // false, | |
| 4561 // overwrite_right ? OVERWRITE_RIGHT : NO_OVERWRITE); | |
| 4562 // | |
| 4563 // } else if (lliteral != NULL && lliteral->handle()->IsSmi()) { | |
| 4564 // LoadAndSpill(node->right()); | |
| 4565 // SmiOperation(node->op(), | |
| 4566 // lliteral->handle(), | |
| 4567 // true, | |
| 4568 // overwrite_left ? OVERWRITE_LEFT : NO_OVERWRITE); | |
| 4569 // | |
| 4570 // } else { | |
| 4571 // OverwriteMode overwrite_mode = NO_OVERWRITE; | |
| 4572 // if (overwrite_left) { | |
| 4573 // overwrite_mode = OVERWRITE_LEFT; | |
| 4574 // } else if (overwrite_right) { | |
| 4575 // overwrite_mode = OVERWRITE_RIGHT; | |
| 4576 // } | |
| 4577 // LoadAndSpill(node->left()); | |
| 4578 // LoadAndSpill(node->right()); | |
| 4579 // GenericBinaryOperation(node->op(), overwrite_mode); | |
| 4580 // } | |
| 4581 // __ nop(); | |
| 4582 // frame_->EmitPush(v0); | |
| 4583 // } | |
| 4584 // ASSERT(!has_valid_frame() || | |
| 4585 // (has_cc() && frame_->height() == original_height) || | |
| 4586 // (!has_cc() && frame_->height() == original_height + 1)); | |
| 4587 } | |
| 4588 | |
| 4589 | |
| 4590 void CodeGenerator::VisitThisFunction(ThisFunction* node) { | |
| 4591 UNIMPLEMENTED_(); | |
| 4592 //#ifdef DEBUG | |
| 4593 //// printf("CodeGenerator::VisitThisFunction\n"); | |
| 4594 //#endif | |
| 4595 //#ifdef DEBUG | |
| 4596 // int original_height = frame_->height(); | |
| 4597 //#endif | |
| 4598 // VirtualFrame::SpilledScope spilled_scope; | |
| 4599 //// __ ldr(r0, frame_->Function()); | |
| 4600 //// frame_->EmitPush(r0); | |
| 4601 // __ lw(t0, frame_->Function()); | |
| 4602 // frame_->EmitPush(t0); | |
| 4603 // ASSERT(frame_->height() == original_height + 1); | |
| 4604 } | |
| 4605 | |
| 4606 | |
| 4607 void CodeGenerator::VisitCompareOperation(CompareOperation* node) { | |
| 4608 UNIMPLEMENTED_(); | |
| 4609 //#ifdef DEBUG | |
| 4610 //// printf("CodeGenerator::VisitCompareOperation\n"); | |
| 4611 //#endif | |
| 4612 //#ifdef DEBUG | |
| 4613 // int original_height = frame_->height(); | |
| 4614 //#endif | |
| 4615 // VirtualFrame::SpilledScope spilled_scope; | |
| 4616 // Comment cmnt(masm_, "[ CompareOperation"); | |
| 4617 // | |
| 4618 // // Get the expressions from the node. | |
| 4619 // Expression* left = node->left(); | |
| 4620 // Expression* right = node->right(); | |
| 4621 // Token::Value op = node->op(); | |
| 4622 // | |
| 4623 // // To make null checks efficient, we check if either left or right is the | |
| 4624 // // literal 'null'. If so, we optimize the code by inlining a null check | |
| 4625 // // instead of calling the (very) general runtime routine for checking | |
| 4626 // // equality. | |
| 4627 // if (op == Token::EQ || op == Token::EQ_STRICT) { | |
| 4628 // bool left_is_null = | |
| 4629 // left->AsLiteral() != NULL && left->AsLiteral()->IsNull(); | |
| 4630 // bool right_is_null = | |
| 4631 // right->AsLiteral() != NULL && right->AsLiteral()->IsNull(); | |
| 4632 // // The 'null' value can only be equal to 'null' or 'undefined'. | |
| 4633 // if (left_is_null || right_is_null) { | |
| 4634 // LoadAndSpill(left_is_null ? right : left); | |
| 4635 //// frame_->EmitPop(r0); | |
| 4636 //// __ LoadRoot(ip, Heap::kNullValueRootIndex); | |
| 4637 //// __ cmp(r0, ip); | |
| 4638 // frame_->EmitPop(t0); | |
| 4639 // __ LoadRoot(t1, Heap::kNullValueRootIndex); | |
| 4640 //// | |
| 4641 //// // The 'null' value is only equal to 'undefined' if using non-strict | |
| 4642 //// // comparisons. | |
| 4643 // if (op != Token::EQ_STRICT) { | |
| 4644 // true_target()->Branch(eq, no_hint, t0, Operand(t1)); | |
| 4645 // __ nop(); // NOP_ADDED | |
| 4646 // | |
| 4647 //// __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); | |
| 4648 //// __ cmp(r0, Operand(ip)); | |
| 4649 //// true_target()->Branch(eq); | |
| 4650 // __ LoadRoot(t1, Heap::kUndefinedValueRootIndex); | |
| 4651 // true_target()->Branch(eq, no_hint, t0, Operand(t1)); | |
| 4652 // __ nop(); // NOP_ADDED | |
| 4653 // | |
| 4654 //// __ tst(r0, Operand(kSmiTagMask)); | |
| 4655 //// false_target()->Branch(eq); | |
| 4656 // __ andi(t2, t0, Operand(kSmiTagMask)); | |
| 4657 // false_target()->Branch(eq, no_hint, t2, Operand(zero_reg)); | |
| 4658 // __ nop(); // NOP_ADDED | |
| 4659 // | |
| 4660 // // It can be an undetectable object. | |
| 4661 //// __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset)); | |
| 4662 //// __ ldrb(r0, FieldMemOperand(r0, Map::kBitFieldOffset)); | |
| 4663 //// __ and_(r0, r0, Operand(1 << Map::kIsUndetectable)); | |
| 4664 //// __ cmp(r0, Operand(1 << Map::kIsUndetectable)); | |
| 4665 // __ lw(t0, FieldMemOperand(t0, HeapObject::kMapOffset)); | |
| 4666 // __ lbu(t0, FieldMemOperand(t0, Map::kBitFieldOffset)); | |
| 4667 // __ and_(t0, t0, Operand(1 << Map::kIsUndetectable)); | |
| 4668 //// __ cmp(r0, Operand(1 << Map::kIsUndetectable)); | |
| 4669 // __ mov(s5, t0); | |
| 4670 // __ li(s6, Operand(1 << Map::kIsUndetectable)); | |
| 4671 // } | |
| 4672 // | |
| 4673 // cc_reg_ = eq; | |
| 4674 // ASSERT(has_cc() && frame_->height() == original_height); | |
| 4675 // return; | |
| 4676 // } | |
| 4677 // } | |
| 4678 // | |
| 4679 // // To make typeof testing for natives implemented in JavaScript really | |
| 4680 // // efficient, we generate special code for expressions of the form: | |
| 4681 // // 'typeof <expression> == <string>'. | |
| 4682 // UnaryOperation* operation = left->AsUnaryOperation(); | |
| 4683 // if ((op == Token::EQ || op == Token::EQ_STRICT) && | |
| 4684 // (operation != NULL && operation->op() == Token::TYPEOF) && | |
| 4685 // (right->AsLiteral() != NULL && | |
| 4686 // right->AsLiteral()->handle()->IsString())) { | |
| 4687 // Handle<String> check(String::cast(*right->AsLiteral()->handle())); | |
| 4688 // | |
| 4689 // // Load the operand, move it to register r1->t1. | |
| 4690 // LoadTypeofExpression(operation->expression()); | |
| 4691 // frame_->EmitPop(t1); | |
| 4692 // | |
| 4693 // | |
| 4694 // if (check->Equals(Heap::number_symbol())) { | |
| 4695 //// __ tst(r1, Operand(kSmiTagMask)); | |
| 4696 //// true_target()->Branch(eq); | |
| 4697 //// __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset)); | |
| 4698 //// __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); | |
| 4699 //// __ cmp(r1, ip); | |
| 4700 //// cc_reg_ = eq; | |
| 4701 // | |
| 4702 // __ andi(t2, t1, Operand(kSmiTagMask)); | |
| 4703 // true_target()->Branch(eq, no_hint, t2, Operand(zero_reg)); | |
| 4704 // __ nop(); // NOP_ADDED | |
| 4705 // __ lw(t1, FieldMemOperand(t1, HeapObject::kMapOffset)); | |
| 4706 // __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); | |
| 4707 //// __ cmp(r1, ip); | |
| 4708 // __ mov(s5, t1); | |
| 4709 // __ mov(s6, ip); | |
| 4710 // cc_reg_ = eq; | |
| 4711 // | |
| 4712 // } else if (check->Equals(Heap::string_symbol())) { | |
| 4713 //// __ tst(r1, Operand(kSmiTagMask)); | |
| 4714 //// false_target()->Branch(eq); | |
| 4715 // __ andi(t2, t1, Operand(kSmiTagMask)); | |
| 4716 // false_target()->Branch(eq, no_hint, t2, Operand(zero_reg)); | |
| 4717 // __ nop(); // NOP_ADDED | |
| 4718 // | |
| 4719 //// __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset)); | |
| 4720 // __ lw(t1, FieldMemOperand(t1, HeapObject::kMapOffset)); | |
| 4721 // | |
| 4722 // // It can be an undetectable string object. | |
| 4723 //// __ ldrb(r2, FieldMemOperand(r1, Map::kBitFieldOffset)); | |
| 4724 //// __ and_(r2, r2, Operand(1 << Map::kIsUndetectable)); | |
| 4725 //// __ cmp(r2, Operand(1 << Map::kIsUndetectable)); | |
| 4726 //// false_target()->Branch(eq); | |
| 4727 // __ lbu(t2, FieldMemOperand(t1, Map::kBitFieldOffset)); | |
| 4728 // __ and_(t2, t2, Operand(1 << Map::kIsUndetectable)); | |
| 4729 //// __ cmp(r2, Operand(1 << Map::kIsUndetectable)); | |
| 4730 // false_target()->Branch(eq, no_hint, t2, Operand(1 << Map::kIsUndetectabl e)); | |
| 4731 // __ nop(); // NOP_ADDED | |
| 4732 // | |
| 4733 //// __ ldrb(r2, FieldMemOperand(r1, Map::kInstanceTypeOffset)); | |
| 4734 //// __ cmp(r2, Operand(FIRST_NONSTRING_TYPE)); | |
| 4735 //// cc_reg_ = lt; | |
| 4736 // __ lbu(t2, FieldMemOperand(t1, Map::kInstanceTypeOffset)); | |
| 4737 // __ mov(s5, t2); | |
| 4738 // __ li(s6, Operand(FIRST_NONSTRING_TYPE)); | |
| 4739 // cc_reg_ = less; | |
| 4740 // | |
| 4741 // } else if (check->Equals(Heap::boolean_symbol())) { | |
| 4742 //// __ LoadRoot(ip, Heap::kTrueValueRootIndex); | |
| 4743 //// __ cmp(r1, ip); | |
| 4744 //// true_target()->Branch(eq); | |
| 4745 //// __ LoadRoot(ip, Heap::kFalseValueRootIndex); | |
| 4746 //// __ cmp(r1, ip); | |
| 4747 //// cc_reg_ = eq; | |
| 4748 // __ LoadRoot(ip, Heap::kTrueValueRootIndex); | |
| 4749 //// __ cmp(r1, ip); | |
| 4750 // true_target()->Branch(eq, no_hint, t1, Operand(ip)); | |
| 4751 // __ nop(); // NOP_ADDED | |
| 4752 // __ LoadRoot(ip, Heap::kFalseValueRootIndex); | |
| 4753 //// __ cmp(r1, ip); | |
| 4754 // __ mov(s5, t1); | |
| 4755 // __ mov(s6, ip); | |
| 4756 // cc_reg_ = eq; | |
| 4757 // | |
| 4758 // } else if (check->Equals(Heap::undefined_symbol())) { | |
| 4759 // __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); | |
| 4760 //// __ cmp(r1, ip); | |
| 4761 // true_target()->Branch(eq, no_hint, t1, Operand(ip)); | |
| 4762 // __ nop(); // NOP_ADDED | |
| 4763 // | |
| 4764 //// __ tst(r1, Operand(kSmiTagMask)); | |
| 4765 // __ andi(t3, t1, Operand(kSmiTagMask)); | |
| 4766 // false_target()->Branch(eq, no_hint, t3, Operand(zero_reg)); | |
| 4767 // __ nop(); // NOP_ADDED | |
| 4768 // | |
| 4769 // // It can be an undetectable object. | |
| 4770 //// __ ldr(r1, FieldMemOperand(r1, HeapObject::kMapOffset)); | |
| 4771 //// __ ldrb(r2, FieldMemOperand(r1, Map::kBitFieldOffset)); | |
| 4772 //// __ and_(r2, r2, Operand(1 << Map::kIsUndetectable)); | |
| 4773 //// __ cmp(r2, Operand(1 << Map::kIsUndetectable)); | |
| 4774 // __ lw(t1, FieldMemOperand(t1, HeapObject::kMapOffset)); | |
| 4775 // __ lbu(t2, FieldMemOperand(t1, Map::kBitFieldOffset)); | |
| 4776 // // Setup s5 and s6 to values to be compared. | |
| 4777 // __ and_(s5, t2, Operand(1 << Map::kIsUndetectable)); | |
| 4778 // __ li(s6, Operand(1 << Map::kIsUndetectable)); | |
| 4779 // | |
| 4780 // cc_reg_ = eq; | |
| 4781 // | |
| 4782 // } else if (check->Equals(Heap::function_symbol())) { | |
| 4783 //// __ tst(r1, Operand(kSmiTagMask)); | |
| 4784 //// false_target()->Branch(eq); | |
| 4785 //// __ CompareObjectType(r1, r1, r1, JS_FUNCTION_TYPE); | |
| 4786 //// cc_reg_ = eq; | |
| 4787 // __ andi(t2, t1, Operand(kSmiTagMask)); | |
| 4788 // false_target()->Branch(eq, no_hint, t2, Operand(zero_reg)); | |
| 4789 // __ nop(); // NOP_ADDED | |
| 4790 // __ GetObjectType(t1, t1, t1); | |
| 4791 // __ mov(s5, t1); | |
| 4792 // __ li(s6, Operand(JS_FUNCTION_TYPE)); | |
| 4793 // cc_reg_ = eq; | |
| 4794 // | |
| 4795 // } else if (check->Equals(Heap::object_symbol())) { | |
| 4796 //// __ tst(r1, Operand(kSmiTagMask)); | |
| 4797 //// false_target()->Branch(eq); | |
| 4798 // __ andi(t2, t1, Operand(kSmiTagMask)); | |
| 4799 // false_target()->Branch(eq, no_hint, t2, Operand(zero_reg)); | |
| 4800 // __ nop(); // NOP_ADDED | |
| 4801 // | |
| 4802 //// __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset)); | |
| 4803 //// __ LoadRoot(ip, Heap::kNullValueRootIndex); | |
| 4804 //// __ cmp(r1, ip); | |
| 4805 //// true_target()->Branch(eq); | |
| 4806 // __ lw(t2, FieldMemOperand(t1, HeapObject::kMapOffset)); | |
| 4807 // __ LoadRoot(ip, Heap::kNullValueRootIndex); | |
| 4808 //// __ cmp(r1, ip); | |
| 4809 // true_target()->Branch(eq, no_hint, t1, Operand(ip)); | |
| 4810 // __ nop(); // NOP_ADDED | |
| 4811 // | |
| 4812 // // It can be an undetectable object. | |
| 4813 //// __ ldrb(r1, FieldMemOperand(r2, Map::kBitFieldOffset)); | |
| 4814 //// __ and_(r1, r1, Operand(1 << Map::kIsUndetectable)); | |
| 4815 //// __ cmp(r1, Operand(1 << Map::kIsUndetectable)); | |
| 4816 //// false_target()->Branch(eq); | |
| 4817 // __ lbu(t1, FieldMemOperand(t2, Map::kBitFieldOffset)); | |
| 4818 // __ and_(t1, t1, Operand(1 << Map::kIsUndetectable)); | |
| 4819 //// __ cmp(r1, Operand(1 << Map::kIsUndetectable)); | |
| 4820 // false_target()->Branch(eq, no_hint, t1, Operand(1 << Map::kIsUndetectabl e)); | |
| 4821 // __ nop(); // NOP_ADDED | |
| 4822 // | |
| 4823 // | |
| 4824 //// __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset)); | |
| 4825 //// __ cmp(r2, Operand(FIRST_JS_OBJECT_TYPE)); | |
| 4826 //// false_target()->Branch(lt); | |
| 4827 //// __ cmp(r2, Operand(LAST_JS_OBJECT_TYPE)); | |
| 4828 //// cc_reg_ = le; | |
| 4829 // __ lbu(t2, FieldMemOperand(t2, Map::kInstanceTypeOffset)); | |
| 4830 //// __ cmp(r2, Operand(FIRST_JS_OBJECT_TYPE)); | |
| 4831 // false_target()->Branch(less, no_hint, t2, Operand(FIRST_JS_OBJECT_TYPE)) ; | |
| 4832 // __ nop(); // NOP_ADDED | |
| 4833 //// __ cmp(r2, Operand(LAST_JS_OBJECT_TYPE)); | |
| 4834 // __ mov(s5, t2); | |
| 4835 // __ li(s6, Operand(LAST_JS_OBJECT_TYPE)); | |
| 4836 // cc_reg_ = less_equal; | |
| 4837 // | |
| 4838 // } else { | |
| 4839 // // Uncommon case: typeof testing against a string literal that is | |
| 4840 // // never returned from the typeof operator. | |
| 4841 // false_target()->Jump(); | |
| 4842 // __ nop(); // NOP_ADDED | |
| 4843 // } | |
| 4844 // ASSERT(!has_valid_frame() || | |
| 4845 // (has_cc() && frame_->height() == original_height)); | |
| 4846 // return; | |
| 4847 // } | |
| 4848 // | |
| 4849 // switch (op) { | |
| 4850 // case Token::EQ: | |
| 4851 // Comparison(eq, left, right, false); | |
| 4852 // break; | |
| 4853 // | |
| 4854 // case Token::LT: | |
| 4855 // Comparison(less, left, right); | |
| 4856 // break; | |
| 4857 // | |
| 4858 // case Token::GT: | |
| 4859 // Comparison(greater, left, right); | |
| 4860 // break; | |
| 4861 // | |
| 4862 // case Token::LTE: | |
| 4863 // Comparison(less_equal, left, right); | |
| 4864 // break; | |
| 4865 // | |
| 4866 // case Token::GTE: | |
| 4867 // Comparison(greater_equal, left, right); | |
| 4868 // break; | |
| 4869 // | |
| 4870 // case Token::EQ_STRICT: | |
| 4871 // Comparison(eq, left, right, true); | |
| 4872 // break; | |
| 4873 // | |
| 4874 // case Token::IN: { | |
| 4875 // LoadAndSpill(left); | |
| 4876 // LoadAndSpill(right); | |
| 4877 // Result arg_count(a0); | |
| 4878 //// __ mov(r0, Operand(1)); // not counting receiver | |
| 4879 // __ li(a0, Operand(1)); // not counting receiver | |
| 4880 // frame_->InvokeBuiltin(Builtins::IN, CALL_JS, &arg_count, 2); | |
| 4881 // __ nop(); // NOP_ADDED | |
| 4882 // frame_->EmitPush(v0); | |
| 4883 // break; | |
| 4884 // } | |
| 4885 // | |
| 4886 // case Token::INSTANCEOF: { | |
| 4887 // LoadAndSpill(left); | |
| 4888 // LoadAndSpill(right); | |
| 4889 // InstanceofStub stub; | |
| 4890 // frame_->CallStub(&stub, 2); | |
| 4891 // __ nop(); // NOP_ADDED | |
| 4892 // // At this point if instanceof succeeded then r0 == 0. | |
| 4893 //// __ tst(r0, Operand(r0)); | |
| 4894 // __ mov(s5, v0); | |
| 4895 // __ mov(s6, zero_reg); | |
| 4896 // cc_reg_ = eq; | |
| 4897 // break; | |
| 4898 // } | |
| 4899 // | |
| 4900 // default: | |
| 4901 // UNREACHABLE(); | |
| 4902 // } | |
| 4903 // ASSERT((has_cc() && frame_->height() == original_height) || | |
| 4904 // (!has_cc() && frame_->height() == original_height + 1)); | |
| 4905 } | |
| 4906 | |
| 4907 | |
| 4908 #ifdef DEBUG | |
| 4909 bool CodeGenerator::HasValidEntryRegisters() { return true; } | |
| 4910 #endif | |
| 4911 | |
| 4912 | |
| 4913 #undef __ | |
| 4914 #define __ ACCESS_MASM(masm) | |
| 4915 | |
| 4916 | |
| 4917 Handle<String> Reference::GetName() { | |
| 4918 UNIMPLEMENTED_(); | |
| 4919 // ASSERT(type_ == NAMED); | |
| 4920 // Property* property = expression_->AsProperty(); | |
| 4921 // if (property == NULL) { | |
| 4922 // // Global variable reference treated as a named property reference. | |
| 4923 // VariableProxy* proxy = expression_->AsVariableProxy(); | |
| 4924 // ASSERT(proxy->AsVariable() != NULL); | |
| 4925 // ASSERT(proxy->AsVariable()->is_global()); | |
| 4926 // return proxy->name(); | |
| 4927 // } else { | |
| 4928 // Literal* raw_name = property->key()->AsLiteral(); | |
| 4929 // ASSERT(raw_name != NULL); | |
| 4930 // return Handle<String>(String::cast(*raw_name->handle())); | |
| 4931 // } | |
| 4932 return Handle<String>((String*)NULL); // UNIMPLEMENTED RETURN | |
| 4933 } | |
| 4934 | |
| 4935 | |
| 4936 void Reference::GetValue() { | |
| 4937 UNIMPLEMENTED_(); | |
| 4938 //#ifdef DEBUG | |
| 4939 //// printf("Reference::GetValue\n"); | |
| 4940 //#endif | |
| 4941 // ASSERT(cgen_->HasValidEntryRegisters()); | |
| 4942 // ASSERT(!is_illegal()); | |
| 4943 // ASSERT(!cgen_->has_cc()); | |
| 4944 // MacroAssembler* masm = cgen_->masm(); | |
| 4945 // Property* property = expression_->AsProperty(); | |
| 4946 // if (property != NULL) { | |
| 4947 // cgen_->CodeForSourcePosition(property->position()); | |
| 4948 // } | |
| 4949 // | |
| 4950 // switch (type_) { | |
| 4951 // case SLOT: { | |
| 4952 // Comment cmnt(masm, "[ Load from Slot"); | |
| 4953 // Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot(); | |
| 4954 // ASSERT(slot != NULL); | |
| 4955 // cgen_->LoadFromSlot(slot, NOT_INSIDE_TYPEOF); | |
| 4956 // break; | |
| 4957 // } | |
| 4958 //// | |
| 4959 // case NAMED: { | |
| 4960 // // TODO(1241834): Make sure that this it is safe to ignore the | |
| 4961 // // distinction between expressions in a typeof and not in a typeof. If | |
| 4962 // // there is a chance that reference errors can be thrown below, we | |
| 4963 // // must distinguish between the two kinds of loads (typeof expression | |
| 4964 // // loads must not throw a reference error). | |
| 4965 // VirtualFrame* frame = cgen_->frame(); | |
| 4966 // Comment cmnt(masm, "[ Load from named Property"); | |
| 4967 // Handle<String> name(GetName()); | |
| 4968 // Variable* var = expression_->AsVariableProxy()->AsVariable(); | |
| 4969 // Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize)); | |
| 4970 // // Setup the name register. | |
| 4971 // Result name_reg(a2); | |
| 4972 // __ li(a2, Operand(name)); | |
| 4973 // ASSERT(var == NULL || var->is_global()); | |
| 4974 // RelocInfo::Mode rmode = (var == NULL) | |
| 4975 // ? RelocInfo::CODE_TARGET | |
| 4976 // : RelocInfo::CODE_TARGET_CONTEXT; | |
| 4977 // frame->CallCodeObject(ic, rmode, &name_reg, 0); | |
| 4978 // __ nop(); // NOP_ADDED | |
| 4979 // frame->EmitPush(v0); | |
| 4980 // break; | |
| 4981 // } | |
| 4982 // | |
| 4983 // case KEYED: { | |
| 4984 // // TODO(1241834): Make sure that this it is safe to ignore the | |
| 4985 // // distinction between expressions in a typeof and not in a typeof. | |
| 4986 // | |
| 4987 // // TODO(181): Implement inlined version of array indexing once | |
| 4988 // // loop nesting is properly tracked on ARM. | |
| 4989 // VirtualFrame* frame = cgen_->frame(); | |
| 4990 // Comment cmnt(masm, "[ Load from keyed Property"); | |
| 4991 // ASSERT(property != NULL); | |
| 4992 // Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize)); | |
| 4993 // Variable* var = expression_->AsVariableProxy()->AsVariable(); | |
| 4994 // ASSERT(var == NULL || var->is_global()); | |
| 4995 // RelocInfo::Mode rmode = (var == NULL) | |
| 4996 // ? RelocInfo::CODE_TARGET | |
| 4997 // : RelocInfo::CODE_TARGET_CONTEXT; | |
| 4998 // frame->CallCodeObject(ic, rmode, 0); | |
| 4999 // __ nop(); // NOP_ADDED | |
| 5000 // frame->EmitPush(v0); | |
| 5001 // break; | |
| 5002 // } | |
| 5003 // | |
| 5004 // default: | |
| 5005 // UNREACHABLE(); | |
| 5006 // } | |
| 5007 } | |
| 5008 | |
| 5009 | |
| 5010 void Reference::SetValue(InitState init_state) { | |
| 5011 UNIMPLEMENTED_(); | |
| 5012 //#ifdef DEBUG | |
| 5013 //// printf("Reference::SetValue\n"); | |
| 5014 //#endif | |
| 5015 // | |
| 5016 // ASSERT(!is_illegal()); | |
| 5017 // ASSERT(!cgen_->has_cc()); | |
| 5018 // MacroAssembler* masm = cgen_->masm(); | |
| 5019 // VirtualFrame* frame = cgen_->frame(); | |
| 5020 // Property* property = expression_->AsProperty(); | |
| 5021 // if (property != NULL) { | |
| 5022 // cgen_->CodeForSourcePosition(property->position()); | |
| 5023 // } | |
| 5024 // | |
| 5025 // switch (type_) { | |
| 5026 // case SLOT: { | |
| 5027 // Comment cmnt(masm, "[ Store to Slot"); | |
| 5028 // Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot(); | |
| 5029 // ASSERT(slot != NULL); | |
| 5030 // if (slot->type() == Slot::LOOKUP) { | |
| 5031 // ASSERT(slot->var()->is_dynamic()); | |
| 5032 // | |
| 5033 // // For now, just do a runtime call. | |
| 5034 // frame->EmitPush(cp); | |
| 5035 //// __ mov(r0, Operand(slot->var()->name())); | |
| 5036 // __ li(a0, Operand(slot->var()->name())); | |
| 5037 // frame->EmitPush(a0); | |
| 5038 // | |
| 5039 // if (init_state == CONST_INIT) { | |
| 5040 // // Same as the case for a normal store, but ignores attribute | |
| 5041 // // (e.g. READ_ONLY) of context slot so that we can initialize | |
| 5042 // // const properties (introduced via eval("const foo = (some | |
| 5043 // // expr);")). Also, uses the current function context instead of | |
| 5044 // // the top context. | |
| 5045 // // | |
| 5046 // // Note that we must declare the foo upon entry of eval(), via a | |
| 5047 // // context slot declaration, but we cannot initialize it at the | |
| 5048 // // same time, because the const declaration may be at the end of | |
| 5049 // // the eval code (sigh...) and the const variable may have been | |
| 5050 // // used before (where its value is 'undefined'). Thus, we can only | |
| 5051 // // do the initialization when we actually encounter the expression | |
| 5052 // // and when the expression operands are defined and valid, and | |
| 5053 // // thus we need the split into 2 operations: declaration of the | |
| 5054 // // context slot followed by initialization. | |
| 5055 // frame->CallRuntime(Runtime::kInitializeConstContextSlot, 3); | |
| 5056 // } else { | |
| 5057 // frame->CallRuntime(Runtime::kStoreContextSlot, 3); | |
| 5058 // } | |
| 5059 // __ nop(); // NOP_ADDED | |
| 5060 // // Storing a variable must keep the (new) value on the expression | |
| 5061 // // stack. This is necessary for compiling assignment expressions. | |
| 5062 //// frame->EmitPush(r0); | |
| 5063 // frame->EmitPush(v0); | |
| 5064 // | |
| 5065 // } else { | |
| 5066 // ASSERT(!slot->var()->is_dynamic()); | |
| 5067 // | |
| 5068 // JumpTarget exit; | |
| 5069 // if (init_state == CONST_INIT) { | |
| 5070 // ASSERT(slot->var()->mode() == Variable::CONST); | |
| 5071 // // Only the first const initialization must be executed (the slot | |
| 5072 // // still contains 'the hole' value). When the assignment is | |
| 5073 // // executed, the code is identical to a normal store (see below). | |
| 5074 // Comment cmnt(masm, "[ Init const"); | |
| 5075 //// __ ldr(r2, cgen_->SlotOperand(slot, r2)); | |
| 5076 //// __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); | |
| 5077 //// __ cmp(r2, ip); | |
| 5078 //// exit.Branch(ne); | |
| 5079 // __ lw(a2, cgen_->SlotOperand(slot, a2)); | |
| 5080 // exit.Branch(ne,no_hint, a2, Operand(Heap::kTheHoleValueRootIndex)); | |
| 5081 // __ nop(); // NOP_ADDED | |
| 5082 // } | |
| 5083 // | |
| 5084 // // We must execute the store. Storing a variable must keep the | |
| 5085 // // (new) value on the stack. This is necessary for compiling | |
| 5086 // // assignment expressions. | |
| 5087 // // | |
| 5088 // // Note: We will reach here even with slot->var()->mode() == | |
| 5089 // // Variable::CONST because of const declarations which will | |
| 5090 // // initialize consts to 'the hole' value and by doing so, end up | |
| 5091 // // calling this code. r2 may be loaded with context; used below in | |
| 5092 // // RecordWrite. | |
| 5093 //// frame->EmitPop(r0); | |
| 5094 //// __ str(r0, cgen_->SlotOperand(slot, r2)); | |
| 5095 //// frame->EmitPush(r0); | |
| 5096 // frame->EmitPop(a0); | |
| 5097 // __ sw(a0, cgen_->SlotOperand(slot, a2)); | |
| 5098 // frame->EmitPush(a0); | |
| 5099 // if (slot->type() == Slot::CONTEXT) { | |
| 5100 // // Skip write barrier if the written value is a smi. | |
| 5101 //// __ tst(r0, Operand(kSmiTagMask)); | |
| 5102 //// exit.Branch(eq); | |
| 5103 // __ andi(ip, a0, Operand(kSmiTagMask)); | |
| 5104 // exit.Branch(eq, no_hint, ip, Operand(zero_reg)); | |
| 5105 // // r2->a2 is loaded with context when calling SlotOperand above. | |
| 5106 // int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize; | |
| 5107 //// __ mov(r3, Operand(offset)); | |
| 5108 //// __ RecordWrite(r2, r3, r1); | |
| 5109 // __ li(a3, Operand(offset)); | |
| 5110 // __ RecordWrite(a2, a3, a1); | |
| 5111 // } | |
| 5112 // // If we definitely did not jump over the assignment, we do not need | |
| 5113 // // to bind the exit label. Doing so can defeat peephole | |
| 5114 // // optimization. | |
| 5115 // if (init_state == CONST_INIT || slot->type() == Slot::CONTEXT) { | |
| 5116 // exit.Bind(); | |
| 5117 // } | |
| 5118 // } | |
| 5119 // break; | |
| 5120 // } | |
| 5121 //// | |
| 5122 // case NAMED: { | |
| 5123 // Comment cmnt(masm, "[ Store to named Property"); | |
| 5124 // // Call the appropriate IC code. | |
| 5125 // Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize)); | |
| 5126 // Handle<String> name(GetName()); | |
| 5127 // | |
| 5128 // Result value(a0); | |
| 5129 // frame->EmitPop(a0); | |
| 5130 // | |
| 5131 // // Setup the name register. | |
| 5132 // Result property_name(a2); | |
| 5133 // __ li(a2, Operand(name)); | |
| 5134 // frame->CallCodeObject(ic, | |
| 5135 // RelocInfo::CODE_TARGET, | |
| 5136 // &value, | |
| 5137 // &property_name, | |
| 5138 // 0); | |
| 5139 // __ nop(); // NOP_ADDED | |
| 5140 // frame->EmitPush(v0); | |
| 5141 //// __ break_(0x08109); | |
| 5142 // break; | |
| 5143 // } | |
| 5144 // | |
| 5145 // case KEYED: { | |
| 5146 // Comment cmnt(masm, "[ Store to keyed Property"); | |
| 5147 // Property* property = expression_->AsProperty(); | |
| 5148 // ASSERT(property != NULL); | |
| 5149 // cgen_->CodeForSourcePosition(property->position()); | |
| 5150 // | |
| 5151 // // Call IC code. | |
| 5152 // Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize)); | |
| 5153 // // TODO(1222589): Make the IC grab the values from the stack. | |
| 5154 // Result value(a0); // We use a0 because it will be used as an argument. | |
| 5155 // frame->EmitPop(a0); // value | |
| 5156 // frame->CallCodeObject(ic, RelocInfo::CODE_TARGET, &value, 0); | |
| 5157 // __ nop(); // NOP_ADDED | |
| 5158 // frame->EmitPush(v0); | |
| 5159 // break; | |
| 5160 // } | |
| 5161 // | |
| 5162 // default: | |
| 5163 // UNREACHABLE(); | |
| 5164 // } | |
| 5165 } | |
| 5166 | |
| 5167 | |
| 5168 // Takes a Smi and converts to an IEEE 64 bit floating point value in two | |
| 5169 // registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and | |
| 5170 // 52 fraction bits (20 in the first word, 32 in the second). Zeros is a | |
| 5171 // scratch register. Destroys the source register. No GC occurs during this | |
| 5172 // stub so you don't have to set up the frame. | |
| 5173 // We do not need this as we have a FPU. | |
| 5174 class ConvertToDoubleStub : public CodeStub { | |
| 5175 public: | |
| 5176 ConvertToDoubleStub(Register result_reg_1, | |
| 5177 Register result_reg_2, | |
| 5178 Register source_reg, | |
| 5179 Register scratch_reg) | |
| 5180 : result1_(result_reg_1), | |
| 5181 result2_(result_reg_2), | |
| 5182 source_(source_reg), | |
| 5183 zeros_(scratch_reg) { } | |
| 5184 | |
| 5185 private: | |
| 5186 Register result1_; | |
| 5187 Register result2_; | |
| 5188 Register source_; | |
| 5189 Register zeros_; | |
| 5190 | |
| 5191 // Minor key encoding in 16 bits. | |
| 5192 class ModeBits: public BitField<OverwriteMode, 0, 2> {}; | |
| 5193 class OpBits: public BitField<Token::Value, 2, 14> {}; | |
| 5194 | |
| 5195 Major MajorKey() { return ConvertToDouble; } | |
| 5196 int MinorKey() { | |
| 5197 // Encode the parameters in a unique 16 bit value. | |
| 5198 return result1_.code() + | |
| 5199 (result2_.code() << 4) + | |
| 5200 (source_.code() << 8) + | |
| 5201 (zeros_.code() << 12); | |
| 5202 } | |
| 5203 | |
| 5204 void Generate(MacroAssembler* masm); | |
| 5205 | |
| 5206 const char* GetName() { return "ConvertToDoubleStub"; } | |
| 5207 | |
| 5208 #ifdef DEBUG | |
| 5209 void Print() { PrintF("ConvertToDoubleStub\n"); } | |
| 5210 #endif | |
| 5211 }; | |
| 5212 | |
| 5213 | |
| 5214 void ConvertToDoubleStub::Generate(MacroAssembler* masm) { | |
| 5215 UNIMPLEMENTED(); | |
| 5216 __ break_(0x00666); | |
| 5217 } | |
| 5218 | |
| 5219 | |
| 5220 // This stub can convert a signed int32 to a heap number (double). It does | |
| 5221 // not work for int32s that are in Smi range! No GC occurs during this stub | |
| 5222 // so you don't have to set up the frame. | |
| 5223 class WriteInt32ToHeapNumberStub : public CodeStub { | |
| 5224 public: | |
| 5225 WriteInt32ToHeapNumberStub(Register the_int, | |
| 5226 Register the_heap_number, | |
| 5227 Register scratch) | |
| 5228 : the_int_(the_int), | |
| 5229 the_heap_number_(the_heap_number), | |
| 5230 scratch_(scratch) { } | |
| 5231 | |
| 5232 private: | |
| 5233 Register the_int_; | |
| 5234 Register the_heap_number_; | |
| 5235 Register scratch_; | |
| 5236 | |
| 5237 // Minor key encoding in 16 bits. | |
| 5238 class ModeBits: public BitField<OverwriteMode, 0, 2> {}; | |
| 5239 class OpBits: public BitField<Token::Value, 2, 14> {}; | |
| 5240 | |
| 5241 Major MajorKey() { return WriteInt32ToHeapNumber; } | |
| 5242 int MinorKey() { | |
| 5243 // Encode the parameters in a unique 16 bit value. | |
| 5244 return the_int_.code() + | |
| 5245 (the_heap_number_.code() << 4) + | |
| 5246 (scratch_.code() << 8); | |
| 5247 } | |
| 5248 | |
| 5249 void Generate(MacroAssembler* masm); | |
| 5250 | |
| 5251 const char* GetName() { return "WriteInt32ToHeapNumberStub"; } | |
| 5252 | |
| 5253 #ifdef DEBUG | |
| 5254 void Print() { PrintF("WriteInt32ToHeapNumberStub\n"); } | |
| 5255 #endif | |
| 5256 }; | |
| 5257 | |
| 5258 | |
| 5259 // See comment for class. | |
| 5260 void WriteInt32ToHeapNumberStub::Generate(MacroAssembler *masm) { | |
| 5261 UNIMPLEMENTED(); | |
| 5262 __ break_(0x00666); | |
| 5263 } | |
| 5264 | |
| 5265 | |
| 5266 // Handle the case where the lhs and rhs are the same object. | |
| 5267 // Equality is almost reflexive (everything but NaN), so this is a test | |
| 5268 // for "identity and not NaN". | |
| 5269 //static void EmitIdenticalObjectComparison(MacroAssembler* masm, | |
| 5270 // Label* slow, | |
| 5271 // Condition cc) { | |
| 5272 // UNIMPLEMENTED_(); | |
| 5273 // Label not_identical; | |
| 5274 //// __ cmp(r0, Operand(r1)); | |
| 5275 //// __ b(ne, ¬_identical); | |
| 5276 // __ bcond(ne, ¬_identical, a0, Operand(a1)); | |
| 5277 // __ nop(); // NOP_ADDED | |
| 5278 // | |
| 5279 //// Register exp_mask_reg = r5; | |
| 5280 //// __ mov(exp_mask_reg, Operand(HeapNumber::kExponentMask)); | |
| 5281 // Register exp_mask_reg = t5; | |
| 5282 // __ li(exp_mask_reg, Operand(HeapNumber::kExponentMask)); | |
| 5283 // | |
| 5284 // // Test for NaN. Sadly, we can't just compare to Factory::nan_value(), | |
| 5285 // // so we do the second best thing - test it ourselves. | |
| 5286 // Label heap_number, return_equal; | |
| 5287 // // They are both equal and they are not both Smis so both of them are not | |
| 5288 // // Smis. If it's not a heap number, then return equal. | |
| 5289 // if (cc == less || cc == greater) { | |
| 5290 //// __ CompareObjectType(r0, r4, r4, FIRST_JS_OBJECT_TYPE); | |
| 5291 //// __ b(ge, slow); | |
| 5292 // __ GetObjectType(a0, t4, t4); | |
| 5293 // __ bcond(greater, slow, t4, Operand(FIRST_JS_OBJECT_TYPE)); | |
| 5294 // __ nop(); // NOP_ADDED | |
| 5295 // } else { | |
| 5296 //// __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE); | |
| 5297 //// __ b(eq, &heap_number); | |
| 5298 // __ GetObjectType(a0, t4, t4); | |
| 5299 // __ bcond(eq, &heap_number, t4, Operand(HEAP_NUMBER_TYPE)); | |
| 5300 // __ nop(); // NOP_ADDED | |
| 5301 // // Comparing JS objects with <=, >= is complicated. | |
| 5302 // if (cc != eq) { | |
| 5303 //// __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE)); | |
| 5304 //// __ b(ge, slow); | |
| 5305 // __ bcond(greater, slow, t4, Operand(FIRST_JS_OBJECT_TYPE)); | |
| 5306 // __ nop(); // NOP_ADDED | |
| 5307 // } | |
| 5308 // } | |
| 5309 // __ bind(&return_equal); | |
| 5310 // if (cc == less) { | |
| 5311 // __ li(v0, Operand(GREATER)); // Things aren't less than themselves. | |
| 5312 // } else if (cc == greater) { | |
| 5313 // __ li(v0, Operand(LESS)); // Things aren't greater than themselves. | |
| 5314 // } else { | |
| 5315 // __ li(v0, Operand(0)); // Things are <=, >=, ==, === themselves. | |
| 5316 // } | |
| 5317 //// __ mov(pc, Operand(lr)); // Return. | |
| 5318 // __ jr(Operand(ra)); | |
| 5319 // __ nop(); // NOP_ADDED | |
| 5320 // | |
| 5321 // // For less and greater we don't have to check for NaN since the result of | |
| 5322 // // x < x is false regardless. For the others here is some code to check | |
| 5323 // // for NaN. | |
| 5324 // if (cc != less && cc != greater) { | |
| 5325 // __ bind(&heap_number); | |
| 5326 // // It is a heap number, so return non-equal if it's NaN and equal if it's | |
| 5327 // // not NaN. | |
| 5328 // __ ldc1(f4, FieldMemOperand(a0, HeapNumber::kValueOffset)); | |
| 5329 // __ ldc1(f6, FieldMemOperand(a0, HeapNumber::kValueOffset)); | |
| 5330 // __ c(UN, D, f4, f6); | |
| 5331 // __ bc1f(&return_equal); | |
| 5332 // __ nop(); // NOP_ADDED | |
| 5333 // if (cc != eq) { | |
| 5334 // __ jcond(Operand(ra), eq, a0, Operand(zero_reg)); | |
| 5335 // if (cc == less_equal) { | |
| 5336 // __ li(v0, Operand(GREATER)); // NaN <= NaN should fail. | |
| 5337 // } else { | |
| 5338 // __ li(v0, Operand(LESS)); // NaN >= NaN should fail. | |
| 5339 // } | |
| 5340 // } | |
| 5341 // __ jr(Operand(ra)); | |
| 5342 // __ nop(); // NOP_ADDED | |
| 5343 // } | |
| 5344 // // No fall through here. | |
| 5345 // | |
| 5346 // __ bind(¬_identical); | |
| 5347 //} | |
| 5348 | |
| 5349 | |
| 5350 // See comment at call site. | |
| 5351 //static void EmitSmiNonsmiComparison(MacroAssembler* masm, | |
| 5352 // Label* both_loaded_as_doubles, | |
| 5353 // Label* slow, | |
| 5354 // bool strict) { | |
| 5355 // UNIMPLEMENTED_(); | |
| 5356 // Label lhs_is_smi; | |
| 5357 // __ andi(t0, a0, Operand(kSmiTagMask)); | |
| 5358 // __ bcond(eq, &lhs_is_smi, t0, Operand(zero_reg)); | |
| 5359 // __ nop(); // NOP_ADDED | |
| 5360 // | |
| 5361 // // Rhs is a Smi. | |
| 5362 // // Check whether the non-smi is a heap number. | |
| 5363 //// __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE); | |
| 5364 // __ GetObjectType(a0, t4, t4); | |
| 5365 // if (strict) { | |
| 5366 // // If lhs was not a number and rhs was a Smi then strict equality cannot | |
| 5367 // // succeed. Return non-equal (r0 is already not zero) | |
| 5368 //// __ mov(pc, Operand(lr), LeaveCC, ne); // Return. | |
| 5369 // __ mov(v0,a0); | |
| 5370 // __ jcond(Operand(ra), ne, t4, Operand(HEAP_NUMBER_TYPE)); | |
| 5371 // __ nop(); // NOP_ADDED | |
| 5372 // } else { | |
| 5373 // // Smi compared non-strictly with a non-Smi non-heap-number. Call | |
| 5374 // // the runtime. | |
| 5375 //// __ b(ne, slow); | |
| 5376 // __ bcond(ne, slow, t4, Operand(HEAP_NUMBER_TYPE)); | |
| 5377 // __ nop(); // NOP_ADDED | |
| 5378 // } | |
| 5379 // | |
| 5380 // // Rhs is a smi, lhs is a number. | |
| 5381 // // Convert a1 to double. | |
| 5382 // __ mtc1(f12, a1); | |
| 5383 // __ cvt_d_s(f12,f12); | |
| 5384 // __ ldc1(f14, FieldMemOperand(a0, HeapNumber::kValueOffset)); | |
| 5385 // | |
| 5386 // // We now have both loaded as doubles. | |
| 5387 //// __ pop(lr); | |
| 5388 //// __ jmp(rhs_not_nan); | |
| 5389 // __ b(both_loaded_as_doubles); | |
| 5390 // __ nop(); // NOP_ADDED | |
| 5391 // | |
| 5392 // __ bind(&lhs_is_smi); | |
| 5393 // // Lhs is a Smi. Check whether the non-smi is a heap number. | |
| 5394 //// __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE); | |
| 5395 // __ GetObjectType(a1, t4, t4); | |
| 5396 // if (strict) { | |
| 5397 // // If lhs was not a number and rhs was a Smi then strict equality cannot | |
| 5398 // // succeed. Return non-equal. | |
| 5399 //// __ mov(r0, Operand(1), LeaveCC, ne); // Non-zero indicates not equal. | |
| 5400 //// __ mov(pc, Operand(lr), LeaveCC, ne); // Return. | |
| 5401 // __ li(v0, Operand(1)); | |
| 5402 // __ jcond(Operand(ra), ne, t4, Operand(HEAP_NUMBER_TYPE)); | |
| 5403 // __ nop(); // NOP_ADDED | |
| 5404 // } else { | |
| 5405 // // Smi compared non-strictly with a non-Smi non-heap-number. Call | |
| 5406 // // the runtime. | |
| 5407 //// __ b(ne, slow); | |
| 5408 // __ bcond(ne, slow, t4, Operand(HEAP_NUMBER_TYPE)); | |
| 5409 // __ nop(); // NOP_ADDED | |
| 5410 // } | |
| 5411 // | |
| 5412 // // Lhs is a smi, rhs is a number. | |
| 5413 // // r0 is Smi and r1 is heap number. | |
| 5414 //// __ push(lr); | |
| 5415 //// __ ldr(r2, FieldMemOperand(r1, HeapNumber::kValueOffset)); | |
| 5416 //// __ ldr(r3, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize)); | |
| 5417 //// __ mov(r7, Operand(r0)); | |
| 5418 //// ConvertToDoubleStub stub2(r1, r0, r7, r6); | |
| 5419 //// __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET); | |
| 5420 //// __ pop(lr); | |
| 5421 // // Convert a1 to double. | |
| 5422 // __ mtc1(f14, a0); | |
| 5423 // __ cvt_d_s(f14,f14); | |
| 5424 // __ ldc1(f12, FieldMemOperand(a1, HeapNumber::kValueOffset)); | |
| 5425 //// // Fall through to both_loaded_as_doubles. | |
| 5426 //} | |
| 5427 | |
| 5428 | |
| 5429 //void EmitNanCheck(MacroAssembler* masm, Condition cc) { | |
| 5430 // UNIMPLEMENTED_(); | |
| 5431 // // We use the coprocessor c.cond instructions. | |
| 5432 // Label one_is_nan, neither_is_nan; | |
| 5433 // | |
| 5434 // // Test the Unordered condition on both doubles. This is false if any of the m | |
| 5435 // // is Nan. | |
| 5436 //// __ break_(0x00071); | |
| 5437 // __ c(UN, D, f12, f14); | |
| 5438 // __ bc1f(&neither_is_nan); | |
| 5439 // __ nop(); | |
| 5440 // __ bc1t(&one_is_nan); | |
| 5441 // __ nop(); | |
| 5442 // | |
| 5443 // // At least one is nan | |
| 5444 // __ bind(&one_is_nan); | |
| 5445 // // NaN comparisons always fail. | |
| 5446 // // Load whatever we need in r0 to make the comparison fail. | |
| 5447 // if (cc == less || cc == less_equal) { | |
| 5448 //// __ mov(r0, Operand(GREATER)); | |
| 5449 // __ li(v0, Operand(GREATER)); | |
| 5450 // } else { | |
| 5451 //// __ mov(r0, Operand(LESS)); | |
| 5452 // __ li(v0, Operand(LESS)); | |
| 5453 // } | |
| 5454 //// __ mov(pc, Operand(lr)); // Return. | |
| 5455 // __ jr(Operand(ra)); | |
| 5456 // | |
| 5457 // __ bind(&neither_is_nan); | |
| 5458 //} | |
| 5459 | |
| 5460 | |
| 5461 // See comment at call site. | |
| 5462 //static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) { | |
| 5463 // UNIMPLEMENTED_(); | |
| 5464 // // f12 and f14 have the two doubles. Neither is a NaN. | |
| 5465 // // Call a native function to do a comparison between two non-NaNs. | |
| 5466 // // Call C routine that may not cause GC or other trouble. | |
| 5467 // // We use a call_was and return manually because we need arguments slots to | |
| 5468 // // be freed. | |
| 5469 //// __ mov(r5, Operand(ExternalReference::compare_doubles())); | |
| 5470 //// __ Jump(r5); // Tail call. | |
| 5471 //// __ break_(0x00091); | |
| 5472 // __ addiu(sp, sp, -8); | |
| 5473 // __ sw(s3, MemOperand(sp, 4)); | |
| 5474 // __ sw(ra, MemOperand(sp, 0)); | |
| 5475 // | |
| 5476 // __ li(t9, Operand(ExternalReference::compare_doubles())); | |
| 5477 // __ mov(s3, sp); // Save sp | |
| 5478 // __ li(t0, Operand(~7)); // Load sp mask | |
| 5479 // __ and_(sp, sp, Operand(t0)); // Align sp. | |
| 5480 // __ Call(t9); // Call the code | |
| 5481 // __ addiu(sp, sp, Operand(-StandardFrameConstants::kRArgsSlotsSize)); | |
| 5482 // __ mov(sp, s3); // Restore sp. | |
| 5483 // | |
| 5484 // __ lw(s3, MemOperand(sp, 4)); | |
| 5485 // __ lw(ra, MemOperand(sp, 0)); | |
| 5486 // __ addiu(sp, sp, 8); | |
| 5487 // | |
| 5488 // __ jr(ra); | |
| 5489 // __ nop(); | |
| 5490 //} | |
| 5491 | |
| 5492 | |
| 5493 // See comment at call site. | |
| 5494 //static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm) { | |
| 5495 // UNIMPLEMENTED_(); | |
| 5496 // // If either operand is a JSObject or an oddball value, then they are | |
| 5497 // // not equal since their pointers are different. | |
| 5498 // // There is no test for undetectability in strict equality. | |
| 5499 // ASSERT(LAST_TYPE == JS_FUNCTION_TYPE); | |
| 5500 // Label first_non_object; | |
| 5501 // // Get the type of the first operand into r2 and compare it with | |
| 5502 // // FIRST_JS_OBJECT_TYPE. | |
| 5503 //// __ CompareObjectType(r0, r2, r2, FIRST_JS_OBJECT_TYPE); | |
| 5504 //// __ b(lt, &first_non_object); | |
| 5505 // __ GetObjectType(a0, t2, t2); | |
| 5506 // __ bcond(less, &first_non_object, t2, Operand(FIRST_JS_OBJECT_TYPE)); | |
| 5507 // __ nop(); // NOP_ADDED | |
| 5508 // | |
| 5509 // // Return non-zero (r0 is not zero) | |
| 5510 // Label return_not_equal; | |
| 5511 // __ bind(&return_not_equal); | |
| 5512 //// __ mov(pc, Operand(lr)); // Return. | |
| 5513 // __ li(v0, Operand(1)); | |
| 5514 // __ jr(ra); | |
| 5515 // | |
| 5516 // __ bind(&first_non_object); | |
| 5517 // // Check for oddballs: true, false, null, undefined. | |
| 5518 //// __ cmp(r2, Operand(ODDBALL_TYPE)); | |
| 5519 //// __ b(eq, &return_not_equal); | |
| 5520 // __ bcond(eq, &return_not_equal, t2, Operand(ODDBALL_TYPE)); | |
| 5521 // __ nop(); | |
| 5522 // | |
| 5523 //// __ CompareObjectType(r1, r3, r3, FIRST_JS_OBJECT_TYPE); | |
| 5524 //// __ b(ge, &return_not_equal); | |
| 5525 // __ GetObjectType(a1, t3, t3); | |
| 5526 // __ bcond(greater, &return_not_equal, t3, Operand(FIRST_JS_OBJECT_TYPE)); | |
| 5527 // | |
| 5528 // // Check for oddballs: true, false, null, undefined. | |
| 5529 //// __ cmp(r3, Operand(ODDBALL_TYPE)); | |
| 5530 //// __ b(eq, &return_not_equal); | |
| 5531 // __ bcond(eq, &return_not_equal, t3, Operand(ODDBALL_TYPE)); | |
| 5532 // __ nop(); | |
| 5533 //} | |
| 5534 | |
| 5535 | |
| 5536 // See comment at call site. | |
| 5537 //static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm, | |
| 5538 // Label* both_loaded_as_doubles, | |
| 5539 // Label* not_heap_numbers, | |
| 5540 // Label* slow) { | |
| 5541 // UNIMPLEMENTED_(); | |
| 5542 //// __ CompareObjectType(r0, r2, r2, HEAP_NUMBER_TYPE); | |
| 5543 //// __ b(ne, not_heap_numbers); | |
| 5544 //// __ CompareObjectType(r1, r3, r3, HEAP_NUMBER_TYPE); | |
| 5545 //// __ b(ne, slow); // First was a heap number, second wasn't. Go slow case. | |
| 5546 // __ GetObjectType(a0, t2, t2); | |
| 5547 // __ bcond(ne, not_heap_numbers, t2, Operand(HEAP_NUMBER_TYPE)); | |
| 5548 // __ nop(); // NOP_ADDED | |
| 5549 // __ GetObjectType(a1, t3, t3); | |
| 5550 // // First was a heap number, second wasn't. Go slow case. | |
| 5551 // __ bcond(ne, not_heap_numbers, t3, Operand(HEAP_NUMBER_TYPE)); | |
| 5552 // __ nop(); // NOP_ADDED | |
| 5553 // | |
| 5554 // // Both are heap numbers. Load them up then jump to the code we have | |
| 5555 // // for that. | |
| 5556 // __ ldc1(f12, FieldMemOperand(a0, HeapNumber::kValueOffset)); | |
| 5557 // __ ldc1(f14, FieldMemOperand(a1, HeapNumber::kValueOffset)); | |
| 5558 // __ b(both_loaded_as_doubles); | |
| 5559 // __ nop(); // NOP_ADDED | |
| 5560 //} | |
| 5561 | |
| 5562 | |
| 5563 // Fast negative check for symbol-to-symbol equality. | |
| 5564 //static void EmitCheckForSymbols(MacroAssembler* masm, Label* slow) { | |
| 5565 // UNIMPLEMENTED_(); | |
| 5566 // // rt is object type of a0. | |
| 5567 //// __ tst(r2, Operand(kIsNotStringMask)); | |
| 5568 //// __ b(ne, slow); | |
| 5569 // __ andi(t4, t2, Operand(kIsNotStringMask)); | |
| 5570 // __ bcond(ne, slow, t4, Operand(zero_reg)); | |
| 5571 // __ nop(); // NOP_ADDED | |
| 5572 //// __ tst(r2, Operand(kIsSymbolMask)); | |
| 5573 //// __ b(eq, slow); | |
| 5574 // __ andi(t4, t2, Operand(kIsSymbolMask)); | |
| 5575 // __ bcond(ne, slow, t4, Operand(zero_reg)); | |
| 5576 // __ nop(); // NOP_ADDED | |
| 5577 //// __ CompareObjectType(r1, r3, r3, FIRST_NONSTRING_TYPE); | |
| 5578 //// __ b(ge, slow); | |
| 5579 // __ GetObjectType(a1, t3, t3); | |
| 5580 // __ bcond(greater, slow, t3, Operand(FIRST_JS_OBJECT_TYPE)); | |
| 5581 // __ nop(); // NOP_ADDED | |
| 5582 //// __ tst(r3, Operand(kIsSymbolMask)); | |
| 5583 //// __ b(eq, slow); | |
| 5584 // __ andi(t5, t3, Operand(kIsSymbolMask)); | |
| 5585 // __ bcond(ne, slow, t5, Operand(zero_reg)); | |
| 5586 // __ nop(); // NOP_ADDED | |
| 5587 // | |
| 5588 // // Both are symbols. We already checked they weren't the same pointer | |
| 5589 // // so they are not equal. | |
| 5590 //// __ mov(r0, Operand(1)); // Non-zero indicates not equal. | |
| 5591 //// __ mov(pc, Operand(lr)); // Return. | |
| 5592 // __ li(v0, Operand(1)); // Non-zero indicates not equal. | |
| 5593 // __ jr(ra); | |
| 5594 // __ nop(); // NOP_ADDED | |
| 5595 //} | |
| 5596 | |
| 5597 | |
| 5598 // On entry a0 and a1 are the things to be compared. On exit v0 is 0, | |
| 5599 // positive or negative to indicate the result of the comparison. | |
| 5600 void CompareStub::Generate(MacroAssembler* masm) { | |
| 5601 UNIMPLEMENTED_(); | |
| 5602 //// __ break_(0x00174); | |
| 5603 // Label slow; // Call builtin. | |
| 5604 // Label not_smis, both_loaded_as_doubles; | |
| 5605 // | |
| 5606 // // NOTICE! This code is only reached after a smi-fast-case check, so | |
| 5607 // // it is certain that at least one operand isn't a smi. | |
| 5608 // | |
| 5609 // // Handle the case where the objects are identical. Either returns the answ er | |
| 5610 // // or goes to slow. Only falls through if the objects were not identical. | |
| 5611 // EmitIdenticalObjectComparison(masm, &slow, cc_); | |
| 5612 // | |
| 5613 // // If either is a Smi (we know that not both are), then they can only | |
| 5614 // // be strictly equal if the other is a HeapNumber. | |
| 5615 // ASSERT_EQ(0, kSmiTag); | |
| 5616 // ASSERT_EQ(0, Smi::FromInt(0)); | |
| 5617 // __ and_(t2, a0, Operand(a1)); | |
| 5618 // __ andi(t2, t2, Operand(kSmiTagMask)); | |
| 5619 // __ bcond(ne, ¬_smis, t2, Operand(zero_reg)); | |
| 5620 // __ nop(); // NOP_ADDED | |
| 5621 // // One operand is a smi. EmitSmiNonsmiComparison generates code that can: | |
| 5622 // // 1) Return the answer. | |
| 5623 // // 2) Go to slow. | |
| 5624 // // 3) Fall through to both_loaded_as_doubles. | |
| 5625 // // 4) Jump to rhs_not_nan. | |
| 5626 // // In cases 3 and 4 we have found out we were dealing with a number-number | |
| 5627 // // comparison and the numbers have been loaded into f12 and f14 as doubles. | |
| 5628 // EmitSmiNonsmiComparison(masm, &both_loaded_as_doubles, &slow, strict_); | |
| 5629 // | |
| 5630 // __ bind(&both_loaded_as_doubles); | |
| 5631 // // f12, f14 are the double representations of the left hand side | |
| 5632 // // and the right hand side. | |
| 5633 // | |
| 5634 // // Checks for NaN in the doubles we have loaded. Can return the answer or | |
| 5635 // // fall through if neither is a NaN. Also binds rhs_not_nan. | |
| 5636 // EmitNanCheck(masm, cc_); | |
| 5637 // | |
| 5638 // // Compares two doubles in r0, r1, r2, r3 that are not NaNs. Returns the | |
| 5639 // // answer. Never falls through. | |
| 5640 // EmitTwoNonNanDoubleComparison(masm, cc_); | |
| 5641 // | |
| 5642 // __ bind(¬_smis); | |
| 5643 //// __ break_(0x00099); | |
| 5644 // // At this point we know we are dealing with two different objects, | |
| 5645 // // and neither of them is a Smi. The objects are in r0 and r1. | |
| 5646 // if (strict_) { | |
| 5647 // // This returns non-equal for some object types, or falls through if it | |
| 5648 // // was not lucky. | |
| 5649 // EmitStrictTwoHeapObjectCompare(masm); | |
| 5650 // } | |
| 5651 // | |
| 5652 // Label check_for_symbols; | |
| 5653 // // Check for heap-number-heap-number comparison. Can jump to slow case, | |
| 5654 // // or load both doubles into r0, r1, r2, r3 and jump to the code that handle s | |
| 5655 // // that case. If the inputs are not doubles then jumps to check_for_symbols . | |
| 5656 // // In this case r2 will contain the type of r0. | |
| 5657 // EmitCheckForTwoHeapNumbers(masm, | |
| 5658 // &both_loaded_as_doubles, | |
| 5659 // &check_for_symbols, | |
| 5660 // &slow); | |
| 5661 // | |
| 5662 // __ bind(&check_for_symbols); | |
| 5663 // if (cc_ == eq) { | |
| 5664 // // Either jumps to slow or returns the answer. Assumes that r2 is the typ e | |
| 5665 // // of r0 on entry. | |
| 5666 // EmitCheckForSymbols(masm, &slow); | |
| 5667 // } | |
| 5668 // | |
| 5669 // __ bind(&slow); | |
| 5670 //#ifdef NO_NATIVES | |
| 5671 // UNIMPLEMENTED(); | |
| 5672 // __ break_(0x5608); // Check below. Return has to be implemented. | |
| 5673 //#else | |
| 5674 //// __ push(lr); | |
| 5675 //// __ push(r1); | |
| 5676 //// __ push(r0); | |
| 5677 // __ multi_push_reversed(a0.bit() | a1.bit() | ra.bit()); | |
| 5678 // // Figure out which native to call and setup the arguments. | |
| 5679 // Builtins::JavaScript native; | |
| 5680 // int arg_count = 1; // Not counting receiver. | |
| 5681 // if (cc_ == eq) { | |
| 5682 // native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS; | |
| 5683 // } else { | |
| 5684 // native = Builtins::COMPARE; | |
| 5685 // int ncr; // NaN compare result | |
| 5686 // if (cc_ == less || cc_ == less_equal) { | |
| 5687 // ncr = GREATER; | |
| 5688 // } else { | |
| 5689 // ASSERT(cc_ == greater || cc_ == greater_equal); // remaining cases | |
| 5690 // ncr = LESS; | |
| 5691 // } | |
| 5692 // arg_count++; | |
| 5693 //// __ mov(r0, Operand(Smi::FromInt(ncr))); | |
| 5694 //// __ push(r0); | |
| 5695 // __ li(a0, Operand(Smi::FromInt(ncr))); | |
| 5696 // __ push(a0); | |
| 5697 // } | |
| 5698 // | |
| 5699 // // Call the native; it returns -1 (less), 0 (equal), or 1 (greater) | |
| 5700 // // tagged as a small integer. | |
| 5701 //// __ mov(r0, Operand(arg_count)); | |
| 5702 //// __ InvokeBuiltin(native, CALL_JS); | |
| 5703 //// __ cmp(r0, Operand(0)); | |
| 5704 //// __ pop(pc); | |
| 5705 // __ li(a0, Operand(arg_count)); | |
| 5706 // __ InvokeBuiltin(native, CALL_JS); | |
| 5707 // __ nop(); // NOP_ADDED | |
| 5708 // // Setup comparison | |
| 5709 // __ mov(s5, v0); | |
| 5710 // __ mov(s6, zero_reg); | |
| 5711 // // Return | |
| 5712 // __ pop(ra); | |
| 5713 // __ jr(ra); | |
| 5714 // __ nop(); | |
| 5715 //#endif | |
| 5716 } | |
| 5717 | |
| 5718 | |
| 5719 // Allocates a heap number or jumps to the label if the young space is full and | |
| 5720 // a scavenge is needed. | |
| 5721 //static void AllocateHeapNumber( | |
| 5722 // MacroAssembler* masm, | |
| 5723 // Label* need_gc, // Jump here if young space is full. | |
| 5724 // Register result, // The tagged address of the new heap number. | |
| 5725 // Register scratch1, // A scratch register. | |
| 5726 // Register scratch2) { // Another scratch register. | |
| 5727 // UNIMPLEMENTED_(); | |
| 5728 | |
| 5729 //// // Allocate an object in the heap for the heap number and tag it as a heap | |
| 5730 //// // object. | |
| 5731 //// __ AllocateInNewSpace(HeapNumber::kSize / kPointerSize, | |
| 5732 //// result, | |
| 5733 //// scratch1, | |
| 5734 //// scratch2, | |
| 5735 //// need_gc, | |
| 5736 //// TAG_OBJECT); | |
| 5737 //// | |
| 5738 // | |
| 5739 // // We ask for four more bytes to align it as we need and align the result. | |
| 5740 // // (HeapNumber::kSize is modified to be 4-byte bigger) | |
| 5741 // __ AllocateInNewSpace((HeapNumber::kSize) / kPointerSize, | |
| 5742 // result, | |
| 5743 // scratch1, | |
| 5744 // scratch2, | |
| 5745 // need_gc, | |
| 5746 // TAG_OBJECT); | |
| 5747 // | |
| 5748 // // Align to 8 bytes | |
| 5749 // __ addiu(result, result, 7-1); // -1 because result is tagged | |
| 5750 // __ andi(result, result, Operand(~7)); | |
| 5751 // __ ori(result, result, Operand(1)); // Tag it back. | |
| 5752 // | |
| 5753 //#ifdef DEBUG | |
| 5754 //////// TODO(MIPS.6) | |
| 5755 //// // Check that the result is 8-byte aligned. | |
| 5756 //// __ andi(scratch2, result, Operand(7)); | |
| 5757 //// __ xori(scratch2, scratch2, Operand(1)); // Fail if the tag is missing. | |
| 5758 //// __ Check(eq, | |
| 5759 //// "Error in HeapNumber allocation (not 8-byte aligned or tag missing )", | |
| 5760 //// scratch2, Operand(zero_reg)); | |
| 5761 //#endif | |
| 5762 // | |
| 5763 // // Get heap number map and store it in the allocated object. | |
| 5764 //// __ LoadRoot(scratch1, Heap::kHeapNumberMapRootIndex); | |
| 5765 //// __ str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset)); | |
| 5766 // __ LoadRoot(scratch1, Heap::kHeapNumberMapRootIndex); | |
| 5767 // __ sw(scratch1, FieldMemOperand(result, HeapObject::kMapOffset)); | |
| 5768 //} | |
| 5769 | |
| 5770 | |
| 5771 // We fall into this code if the operands were Smis, but the result was | |
| 5772 // not (eg. overflow). We branch into this code (to the not_smi label) if | |
| 5773 // the operands were not both Smi. The operands are in r0 and r1. In order | |
| 5774 // to call the C-implemented binary fp operation routines we need to end up | |
| 5775 // with the double precision floating point operands in r0 and r1 (for the | |
| 5776 // value in r1) and r2 and r3 (for the value in r0). | |
| 5777 // CURRENT: static void HandleBinaryOpSlowCases | |
| 5778 //static void HandleBinaryOpSlowCases(MacroAssembler* masm, | |
| 5779 // Label* not_smi, | |
| 5780 // const Builtins::JavaScript& builtin, | |
| 5781 // Token::Value operation, | |
| 5782 // OverwriteMode mode) { | |
| 5783 // UNIMPLEMENTED_(); | |
| 5784 // // TODO(MIPS.1): Implement overflow cases. | |
| 5785 // Label slow, do_the_call; | |
| 5786 // Label a0_is_smi, a1_is_smi, finished_loading_a0, finished_loading_a1; | |
| 5787 // // Smi-smi case (overflow). | |
| 5788 // // Since both are Smis there is no heap number to overwrite, so allocate. | |
| 5789 // // The new heap number is in r5. r6 and r7 are scratch. | |
| 5790 // // We should not meet this case yet, as we do not check for smi-smi overflow s | |
| 5791 // // in GenericBinaryOpStub::Generate | |
| 5792 //// AllocateHeapNumber(masm, &slow, r5, r6, r7); | |
| 5793 //// // Write Smi from r0 to r3 and r2 in double format. r6 is scratch. | |
| 5794 //// __ mov(r7, Operand(r0)); | |
| 5795 //// ConvertToDoubleStub stub1(r3, r2, r7, r6); | |
| 5796 //// __ push(lr); | |
| 5797 //// __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET); | |
| 5798 //// // Write Smi from r1 to r1 and r0 in double format. r6 is scratch. | |
| 5799 //// __ mov(r7, Operand(r1)); | |
| 5800 //// ConvertToDoubleStub stub2(r1, r0, r7, r6); | |
| 5801 //// __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET); | |
| 5802 //// __ pop(lr); | |
| 5803 //// __ jmp(&do_the_call); // Tail call. No return. | |
| 5804 // | |
| 5805 //// // We jump to here if something goes wrong (one param is not a number of a ny | |
| 5806 //// // sort or new-space allocation fails). | |
| 5807 // __ bind(&slow); | |
| 5808 //#ifdef NO_NATIVES | |
| 5809 // __ break_(0x00707); // We should not come here yet. | |
| 5810 //#else | |
| 5811 //// __ push(r1); | |
| 5812 //// __ push(r0); | |
| 5813 //// __ mov(r0, Operand(1)); // Set number of arguments. | |
| 5814 // __ push(a1); | |
| 5815 // __ push(a0); | |
| 5816 // __ li(a0, Operand(1)); // Set number of arguments. | |
| 5817 //// __ break_(0x5622); | |
| 5818 // __ InvokeBuiltin(builtin, JUMP_JS); // Tail call. No return. | |
| 5819 // __ nop(); // NOP_ADDED | |
| 5820 //#endif | |
| 5821 // | |
| 5822 // // We branch here if at least one of r0 and r1 is not a Smi. | |
| 5823 // // Currently we should always get here. See comment about smi-smi case befor e. | |
| 5824 // __ bind(not_smi); | |
| 5825 // if (mode == NO_OVERWRITE) { | |
| 5826 // // In the case where there is no chance of an overwritable float we may as | |
| 5827 // // well do the allocation immediately while a0 and a1 are untouched. | |
| 5828 // AllocateHeapNumber(masm, &slow, t5, t6, t7); | |
| 5829 // } | |
| 5830 // | |
| 5831 // // Check if a0 is smi or not | |
| 5832 // __ andi(t0, a0, Operand(kSmiTagMask)); | |
| 5833 // __ bcond(eq, &a0_is_smi, t0, Operand(zero_reg)); // It's a Smi so don't che ck it's a heap number. | |
| 5834 // __ nop(); // NOP_ADDED | |
| 5835 // __ GetObjectType(a0, t0, t0); | |
| 5836 // __ bcond(ne, &slow, t0, Operand(HEAP_NUMBER_TYPE)); | |
| 5837 // __ nop(); // NOP_ADDED | |
| 5838 // if (mode == OVERWRITE_RIGHT) { | |
| 5839 // __ mov(t5, a0); | |
| 5840 // } | |
| 5841 // // As we have only 2 arguments which are doubles, so we pass them in f12 and | |
| 5842 // // f14 coprocessor registers. | |
| 5843 // __ ldc1(f14, FieldMemOperand(a0, HeapNumber::kValueOffset)); | |
| 5844 // __ b(&finished_loading_a0); | |
| 5845 // __ nop(); // NOP_ADDED | |
| 5846 // __ bind(&a0_is_smi); // BIND a0_is_smi | |
| 5847 // if (mode == OVERWRITE_RIGHT) { | |
| 5848 // // We can't overwrite a Smi so get address of new heap number into r5. | |
| 5849 // AllocateHeapNumber(masm, &slow, t5, t6, t7); | |
| 5850 // } | |
| 5851 // // We move a0 to coprocessor and convert it to a double. | |
| 5852 // __ mtc1(f14, a0); | |
| 5853 // __ cvt_d_w(f14, f14); | |
| 5854 // __ bind(&finished_loading_a0); // BIND finished_loading_a0 | |
| 5855 // | |
| 5856 // __ andi(t1, a1, Operand(kSmiTagMask)); | |
| 5857 // __ bcond(eq, &a1_is_smi, t1, Operand(zero_reg)); // It's a Smi so don't che ck it's a heap number. | |
| 5858 // __ nop(); // NOP_ADDED | |
| 5859 // __ GetObjectType(a1, t1, t1); | |
| 5860 // __ bcond(ne, &slow, t1, Operand(HEAP_NUMBER_TYPE)); | |
| 5861 // __ nop(); // NOP_ADDED | |
| 5862 // if (mode == OVERWRITE_LEFT) { | |
| 5863 // __ mov(t5, a1); // Overwrite this heap number. | |
| 5864 // } | |
| 5865 // // Calling convention says that first double is in r0 and r1. | |
| 5866 // __ ldc1(f12, FieldMemOperand(a1, HeapNumber::kValueOffset)); | |
| 5867 // __ b(&finished_loading_a1); | |
| 5868 // __ nop(); // NOP_ADDED | |
| 5869 // __ bind(&a1_is_smi); // BIND a1_is_smi | |
| 5870 // if (mode == OVERWRITE_LEFT) { | |
| 5871 // // We can't overwrite a Smi so get address of new heap number into r5. | |
| 5872 // AllocateHeapNumber(masm, &slow, t5, t6, t7); | |
| 5873 // } | |
| 5874 // // We move a0 to coprocessor and convert it to a double. | |
| 5875 // __ mtc1(f12, a0); | |
| 5876 // __ cvt_d_w(f12, f12); | |
| 5877 // __ bind(&finished_loading_a1); // BIND finished_loading_a1 | |
| 5878 // | |
| 5879 // __ bind(&do_the_call); | |
| 5880 // // f12: left value | |
| 5881 // // f14: right value | |
| 5882 // // t5: Address of heap number for result. | |
| 5883 // __ addiu(sp, sp, -12); | |
| 5884 // __ sw(s3, MemOperand(sp, 8)); | |
| 5885 // __ sw(ra, MemOperand(sp, 4)); // For later | |
| 5886 // __ sw(t5, MemOperand(sp, 0)); // Address of heap number that is answer. | |
| 5887 // // Call C routine that may not cause GC or other trouble. | |
| 5888 // // We need to align sp as we use floating point, so we save it in s3. | |
| 5889 // __ li(t9, Operand(ExternalReference::double_fp_operation(operation))); | |
| 5890 // __ mov(s3, sp); // Save sp | |
| 5891 // __ li(t3, Operand(~7)); // Load sp mask | |
| 5892 // __ and_(sp, sp, Operand(t3)); // Align sp. We use the branch delay slot. | |
| 5893 // __ Call(t9); // Call the code | |
| 5894 // __ addiu(sp, sp, Operand(-StandardFrameConstants::kRArgsSlotsSize)); | |
| 5895 // __ mov(sp, s3); // Restore sp. | |
| 5896 // // Store answer in the overwritable heap number. | |
| 5897 // __ lw(t5, MemOperand(sp, 0)); | |
| 5898 // // Store double returned in f0 | |
| 5899 // __ sdc1(f0, MemOperand(t5, HeapNumber::kValueOffset - kHeapObjectTag)); | |
| 5900 // // Copy result address to v0 | |
| 5901 // __ mov(v0, t5); | |
| 5902 //// __ break_(0x00109); | |
| 5903 // // And we are done. | |
| 5904 // __ lw(ra, MemOperand(sp, 4)); | |
| 5905 // __ lw(s3, MemOperand(sp, 8)); | |
| 5906 // __ Jump(ra); | |
| 5907 // __ addiu(sp, sp, 12); // Restore sp | |
| 5908 //} | |
| 5909 | |
| 5910 | |
| 5911 // Tries to get a signed int32 out of a double precision floating point heap | |
| 5912 // number. Rounds towards 0. Fastest for doubles that are in the ranges | |
| 5913 // -0x7fffffff to -0x40000000 or 0x40000000 to 0x7fffffff. This corresponds | |
| 5914 // almost to the range of signed int32 values that are not Smis. Jumps to the | |
| 5915 // label 'slow' if the double isn't in the range -0x80000000.0 to 0x80000000.0 | |
| 5916 // (excluding the endpoints). | |
| 5917 //static void GetInt32(MacroAssembler* masm, | |
| 5918 // Register source, | |
| 5919 // Register dest, | |
| 5920 // Label* slow) { | |
| 5921 // UNIMPLEMENTED_(); | |
| 5922 // | |
| 5923 // // Load the double value. | |
| 5924 // __ ldc1(f12, FieldMemOperand(source, HeapNumber::kValueOffset)); | |
| 5925 // // Convert it. | |
| 5926 // __ cvt_w_d(f4, f12); | |
| 5927 // __ mfc1(f4, dest); | |
| 5928 // // TODO(MIPS.3): Implement case where we could not convert it. | |
| 5929 // | |
| 5930 //} | |
| 5931 | |
| 5932 | |
| 5933 // For bitwise ops where the inputs are not both Smis we here try to determine | |
| 5934 // whether both inputs are either Smis or at least heap numbers that can be | |
| 5935 // represented by a 32 bit signed value. We truncate towards zero as required | |
| 5936 // by the ES spec. If this is the case we do the bitwise op and see if the | |
| 5937 // result is a Smi. If so, great, otherwise we try to find a heap number to | |
| 5938 // write the answer into (either by allocating or by overwriting). | |
| 5939 // On entry the operands are in a0 and a1. On exit the answer is in v0. | |
| 5940 void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm) { | |
| 5941 UNIMPLEMENTED_(); | |
| 5942 // Label slow, result_not_a_smi; | |
| 5943 // Label a0_is_smi, a1_is_smi; | |
| 5944 // Label done_checking_a0, done_checking_a1; | |
| 5945 // | |
| 5946 // | |
| 5947 // __ andi(t1, a1, Operand(kSmiTagMask)); | |
| 5948 // __ bcond(eq, &a1_is_smi, t1, Operand(zero_reg)); | |
| 5949 // __ nop(); // NOP_ADDED | |
| 5950 // __ GetObjectType(a1, t4, t4); | |
| 5951 // __ bcond(ne, &slow, t4, Operand(HEAP_NUMBER_TYPE)); | |
| 5952 // __ nop(); // NOP_ADDED | |
| 5953 // GetInt32(masm, a1, a3, &slow); | |
| 5954 // __ b(&done_checking_a1); | |
| 5955 // __ bind(&a1_is_smi); | |
| 5956 // __ sra(a3, a1, kSmiTagSize); | |
| 5957 // __ bind(&done_checking_a1); | |
| 5958 // | |
| 5959 // __ andi(t0, a0, Operand(kSmiTagMask)); | |
| 5960 // __ bcond(eq, &a0_is_smi, t0, Operand(zero_reg)); | |
| 5961 // __ nop(); // NOP_ADDED | |
| 5962 // __ GetObjectType(a0, t4, t4); | |
| 5963 // __ bcond(ne, &slow, t4, Operand(HEAP_NUMBER_TYPE)); | |
| 5964 // __ nop(); // NOP_ADDED | |
| 5965 // GetInt32(masm, a0, a2, &slow); | |
| 5966 // __ b(&done_checking_a0); | |
| 5967 // __ bind(&a0_is_smi); | |
| 5968 // __ sra(a2, a0, kSmiTagSize); | |
| 5969 // __ bind(&done_checking_a0); | |
| 5970 // | |
| 5971 // // a0 and a1: Original operands (Smi or heap numbers). | |
| 5972 // // a2 and a3: Signed int32 operands. | |
| 5973 // | |
| 5974 // switch (op_) { | |
| 5975 // case Token::BIT_OR: __ or_(v1, a2, Operand(a3)); break; | |
| 5976 // case Token::BIT_XOR: __ xor_(v1, a2, Operand(a3)); break; | |
| 5977 // case Token::BIT_AND: __ and_(v1, a2, Operand(a3)); break; | |
| 5978 // case Token::SAR: | |
| 5979 // __ srav(v1, a2, a3); | |
| 5980 // break; | |
| 5981 // case Token::SHR: | |
| 5982 // __ srlv(v1, a2, a3); | |
| 5983 // // SHR is special because it is required to produce a positive answer. | |
| 5984 // // The code below for writing into heap numbers isn't capable of writing | |
| 5985 // // the register as an unsigned int so we go to slow case if we hit this | |
| 5986 // // case. | |
| 5987 // __ andi(t3, v1, Operand(0x80000000)); | |
| 5988 // __ bcond(ne, &slow, t3, Operand(zero_reg)); | |
| 5989 // __ nop(); // NOP_ADDED | |
| 5990 // break; | |
| 5991 // case Token::SHL: | |
| 5992 // __ sllv(v1, a2, a3); | |
| 5993 // break; | |
| 5994 // default: UNREACHABLE(); | |
| 5995 // } | |
| 5996 // // check that the *signed* result fits in a smi | |
| 5997 // __ add(t3, v1, Operand(0x40000000)); | |
| 5998 // __ andi(t3, t3, Operand(0x80000000)); | |
| 5999 // __ bcond(ne, &slow, t3, Operand(zero_reg)); | |
| 6000 // __ nop(); // NOP_ADDED | |
| 6001 // // Smi tag result. | |
| 6002 // __ sll(v0, v1, kSmiTagMask); | |
| 6003 // __ Ret(); | |
| 6004 // | |
| 6005 // Label have_to_allocate, got_a_heap_number; | |
| 6006 // __ bind(&result_not_a_smi); | |
| 6007 // switch (mode_) { | |
| 6008 // case OVERWRITE_RIGHT: { | |
| 6009 // // t0 has not been changed since __ andi(t0, a0, Operand(kSmiTagMask)); | |
| 6010 // __ bcond(eq, &have_to_allocate, t0, Operand(zero_reg)); | |
| 6011 // __ mov(t5, a0); | |
| 6012 // break; | |
| 6013 // } | |
| 6014 // case OVERWRITE_LEFT: { | |
| 6015 // // t1 has not been changed since __ andi(t1, a1, Operand(kSmiTagMask)); | |
| 6016 // __ bcond(eq, &have_to_allocate, t1, Operand(zero_reg)); | |
| 6017 // __ nop(); // NOP_ADDED | |
| 6018 // __ mov(t5, a1); | |
| 6019 // break; | |
| 6020 // } | |
| 6021 // case NO_OVERWRITE: { | |
| 6022 // // Get a new heap number in t5. t6 and t7 are scratch. | |
| 6023 // AllocateHeapNumber(masm, &slow, t5, t6, t7); | |
| 6024 // } | |
| 6025 // default: break; | |
| 6026 // } | |
| 6027 // | |
| 6028 // __ bind(&got_a_heap_number); | |
| 6029 // // v1: Answer as signed int32. | |
| 6030 // // t5: Heap number to write answer into. | |
| 6031 // | |
| 6032 // // Nothing can go wrong now, so move the heap number to r0, which is the | |
| 6033 // // result. | |
| 6034 // __ mov(v0, t5); | |
| 6035 // | |
| 6036 // // Convert our int32 (v1) in our heap number (a0). | |
| 6037 // __ mtc1(f12, v1); | |
| 6038 // __ cvt_d_w(f4, f12); | |
| 6039 // __ sdc1(f4, MemOperand(t5, HeapNumber::kValueOffset - kHeapObjectTag)); | |
| 6040 // | |
| 6041 // if (mode_ != NO_OVERWRITE) { | |
| 6042 // __ bind(&have_to_allocate); | |
| 6043 // // Get a new heap number in t5. t6 and t7 are scratch. | |
| 6044 // AllocateHeapNumber(masm, &slow, t5, t6, t7); | |
| 6045 // __ jmp(&got_a_heap_number); | |
| 6046 // __ nop(); // NOP_ADDED | |
| 6047 // } | |
| 6048 // | |
| 6049 // // If all else failed then we go to the runtime system. | |
| 6050 // __ bind(&slow); | |
| 6051 // __ push(a1); // restore stack | |
| 6052 // __ push(a0); | |
| 6053 // __ li(a0, Operand(1)); // 1 argument (not counting receiver). | |
| 6054 // switch (op_) { | |
| 6055 // case Token::BIT_OR: | |
| 6056 // __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS); | |
| 6057 // break; | |
| 6058 // case Token::BIT_AND: | |
| 6059 // __ InvokeBuiltin(Builtins::BIT_AND, JUMP_JS); | |
| 6060 // break; | |
| 6061 // case Token::BIT_XOR: | |
| 6062 // __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_JS); | |
| 6063 // break; | |
| 6064 // case Token::SAR: | |
| 6065 // __ InvokeBuiltin(Builtins::SAR, JUMP_JS); | |
| 6066 // break; | |
| 6067 // case Token::SHR: | |
| 6068 // __ InvokeBuiltin(Builtins::SHR, JUMP_JS); | |
| 6069 // break; | |
| 6070 // case Token::SHL: | |
| 6071 // __ InvokeBuiltin(Builtins::SHL, JUMP_JS); | |
| 6072 // break; | |
| 6073 // default: | |
| 6074 // UNREACHABLE(); | |
| 6075 // } | |
| 6076 } | |
| 6077 | |
| 6078 | |
| 6079 // Can we multiply by x with max two shifts and an add. | |
| 6080 // This answers yes to all integers from 2 to 10. | |
| 6081 //static bool IsEasyToMultiplyBy(int x) { | |
| 6082 // if (x < 2) return false; // Avoid special cases. | |
| 6083 // if (x > (Smi::kMaxValue + 1) >> 2) return false; // Almost always overflows . | |
| 6084 // if (IsPowerOf2(x)) return true; // Simple shift. | |
| 6085 // if (PopCountLessThanEqual2(x)) return true; // Shift and add and shift . | |
| 6086 // if (IsPowerOf2(x + 1)) return true; // Patterns like 11111. | |
| 6087 // return false; | |
| 6088 //} | |
| 6089 | |
| 6090 | |
| 6091 // Can multiply by anything that IsEasyToMultiplyBy returns true for. | |
| 6092 // Source and destination may be the same register. This routine does | |
| 6093 // not set carry and overflow the way a mul instruction would. | |
| 6094 //static void MultiplyByKnownInt(MacroAssembler* masm, | |
| 6095 // Register source, | |
| 6096 // Register destination, | |
| 6097 // int known_int) { | |
| 6098 // if (IsPowerOf2(known_int)) { | |
| 6099 // __ sll(destination, source, BitPosition(known_int)); | |
| 6100 // } else if (PopCountLessThanEqual2(known_int)) { | |
| 6101 // int first_bit = BitPosition(known_int); | |
| 6102 // int second_bit = BitPosition(known_int ^ (1 << first_bit)); | |
| 6103 // __ sll(t0, source, second_bit - first_bit); | |
| 6104 // __ add(destination, source, Operand(t0)); | |
| 6105 // if (first_bit != 0) { | |
| 6106 // __ sll(destination, destination, first_bit); | |
| 6107 // } | |
| 6108 // } else { | |
| 6109 // ASSERT(IsPowerOf2(known_int + 1)); // Patterns like 1111. | |
| 6110 // int the_bit = BitPosition(known_int + 1); | |
| 6111 // __ sll(t0, source, the_bit); | |
| 6112 // __ sub(destination, t0, Operand(source)); | |
| 6113 // } | |
| 6114 //} | |
| 6115 | |
| 6116 | |
| 6117 //// This function (as opposed to MultiplyByKnownInt) takes the known int in a | |
| 6118 //// a register for the cases where it doesn't know a good trick, and may delive r | |
| 6119 //// a result that needs shifting. | |
| 6120 //static void MultiplyByKnownInt2( | |
| 6121 // MacroAssembler* masm, | |
| 6122 // Register result, | |
| 6123 // Register source, | |
| 6124 // Register known_int_register, // Smi tagged. | |
| 6125 // int known_int, | |
| 6126 // int* required_shift) { // Including Smi tag shift | |
| 6127 // switch (known_int) { | |
| 6128 // case 3: | |
| 6129 // __ add(result, source, Operand(source, LSL, 1)); | |
| 6130 // *required_shift = 1; | |
| 6131 // break; | |
| 6132 // case 5: | |
| 6133 // __ add(result, source, Operand(source, LSL, 2)); | |
| 6134 // *required_shift = 1; | |
| 6135 // break; | |
| 6136 // case 6: | |
| 6137 // __ add(result, source, Operand(source, LSL, 1)); | |
| 6138 // *required_shift = 2; | |
| 6139 // break; | |
| 6140 // case 7: | |
| 6141 // __ rsb(result, source, Operand(source, LSL, 3)); | |
| 6142 // *required_shift = 1; | |
| 6143 // break; | |
| 6144 // case 9: | |
| 6145 // __ add(result, source, Operand(source, LSL, 3)); | |
| 6146 // *required_shift = 1; | |
| 6147 // break; | |
| 6148 // case 10: | |
| 6149 // __ add(result, source, Operand(source, LSL, 2)); | |
| 6150 // *required_shift = 2; | |
| 6151 // break; | |
| 6152 // default: | |
| 6153 // ASSERT(!IsPowerOf2(known_int)); // That would be very inefficient. | |
| 6154 // __ mul(result, source, known_int_register); | |
| 6155 // *required_shift = 0; | |
| 6156 // } | |
| 6157 //} | |
| 6158 | |
| 6159 | |
| 6160 void GenericBinaryOpStub::Generate(MacroAssembler* masm) { | |
| 6161 UNIMPLEMENTED_(); | |
| 6162 // // TODO(MIPS.1): Implement overflow cases. | |
| 6163 // // a1 : x | |
| 6164 // // a0 : y | |
| 6165 // // result : v0 | |
| 6166 // | |
| 6167 // // All ops need to know whether we are dealing with two Smis. Set up r2 to | |
| 6168 // // tell us that. | |
| 6169 // __ or_(t2, a1, Operand(a0)); // t2 = x | y; | |
| 6170 // | |
| 6171 // switch (op_) { | |
| 6172 // case Token::ADD: { | |
| 6173 // Label not_smi; | |
| 6174 // // Fast path. | |
| 6175 // ASSERT(kSmiTag == 0); // Adjust code below. | |
| 6176 // __ andi(t3, t2, Operand(kSmiTagMask)); | |
| 6177 // __ bcond(ne, ¬_smi, t3, Operand(zero_reg)); | |
| 6178 // __ nop(); | |
| 6179 // __ Ret(); | |
| 6180 // __ addu(v0, a1, Operand(a0)); // Add y optimistically. | |
| 6181 // | |
| 6182 // HandleBinaryOpSlowCases(masm, | |
| 6183 // ¬_smi, | |
| 6184 // Builtins::ADD, | |
| 6185 // Token::ADD, | |
| 6186 // mode_); | |
| 6187 // break; | |
| 6188 // } | |
| 6189 // | |
| 6190 // case Token::SUB: { | |
| 6191 // Label not_smi; | |
| 6192 //// // Fast path. | |
| 6193 // ASSERT(kSmiTag == 0); // Adjust code below. | |
| 6194 // __ andi(t3, t2, Operand(kSmiTagMask)); | |
| 6195 // __ bcond(ne, ¬_smi, t3, Operand(zero_reg)); | |
| 6196 // __ nop(); | |
| 6197 // __ Ret(); | |
| 6198 // __ subu(v0, a1, Operand(a0)); // Subtract y optimistically. | |
| 6199 // | |
| 6200 // HandleBinaryOpSlowCases(masm, | |
| 6201 // ¬_smi, | |
| 6202 // Builtins::SUB, | |
| 6203 // Token::SUB, | |
| 6204 // mode_); | |
| 6205 // break; | |
| 6206 // } | |
| 6207 // | |
| 6208 // case Token::MUL: { | |
| 6209 // Label not_smi; | |
| 6210 // ASSERT(kSmiTag == 0); // adjust code below | |
| 6211 // __ andi(t3, t2, Operand(kSmiTagMask)); | |
| 6212 // __ bcond(ne, ¬_smi, t3, Operand(zero_reg)); | |
| 6213 // __ nop(); | |
| 6214 // // Remove tag from one operand (but keep sign), so that result is Smi. | |
| 6215 // __ sra(t0, a0, kSmiTagSize); | |
| 6216 // // Do multiplication | |
| 6217 // __ Ret(); | |
| 6218 // __ mul(v0, a1, Operand(t0)); | |
| 6219 // | |
| 6220 // HandleBinaryOpSlowCases(masm, | |
| 6221 // ¬_smi, | |
| 6222 // Builtins::MUL, | |
| 6223 // Token::MUL, | |
| 6224 // mode_); | |
| 6225 // break; | |
| 6226 // } | |
| 6227 // | |
| 6228 // case Token::DIV: { | |
| 6229 // Label not_smi; | |
| 6230 // ASSERT(kSmiTag == 0); // adjust code below | |
| 6231 // __ andi(t3, t2, Operand(kSmiTagMask)); | |
| 6232 // __ bcond(ne, ¬_smi, t3, Operand(zero_reg)); | |
| 6233 // __ nop(); | |
| 6234 // // Remove tags | |
| 6235 // __ sra(t0, a0, kSmiTagSize); | |
| 6236 // __ sra(t1, a1, kSmiTagSize); | |
| 6237 // // Divide | |
| 6238 // __ div(t1, Operand(t0)); | |
| 6239 // __ mflo(v0); | |
| 6240 // __ Ret(); | |
| 6241 // __ sll(v0, v0, 1); | |
| 6242 // | |
| 6243 // HandleBinaryOpSlowCases(masm, | |
| 6244 // ¬_smi, | |
| 6245 // op_ == Token::MOD ? Builtins::MOD : Builtins::DI V, | |
| 6246 // op_, | |
| 6247 // mode_); | |
| 6248 // break; | |
| 6249 // } | |
| 6250 // | |
| 6251 // case Token::MOD: { | |
| 6252 // Label not_smi; | |
| 6253 // ASSERT(kSmiTag == 0); // adjust code below | |
| 6254 // __ andi(t3, t2, Operand(kSmiTagMask)); | |
| 6255 // __ bcond(ne, ¬_smi, t3, Operand(zero_reg)); | |
| 6256 // __ nop(); | |
| 6257 // // Remove tag from one operand (but keep sign), so that result is Smi. | |
| 6258 // __ sra(t0, a0, kSmiTagSize); | |
| 6259 // __ div(a1, Operand(a0)); | |
| 6260 // __ Ret(); | |
| 6261 // __ mfhi(v0); | |
| 6262 // | |
| 6263 // HandleBinaryOpSlowCases(masm, | |
| 6264 // ¬_smi, | |
| 6265 // op_ == Token::MOD ? Builtins::MOD : Builtins::DI V, | |
| 6266 // op_, | |
| 6267 // mode_); | |
| 6268 // break; | |
| 6269 // } | |
| 6270 // | |
| 6271 // case Token::BIT_OR: | |
| 6272 // case Token::BIT_AND: | |
| 6273 // case Token::BIT_XOR: | |
| 6274 // case Token::SAR: | |
| 6275 // case Token::SHR: | |
| 6276 // case Token::SHL: { | |
| 6277 // Label slow; | |
| 6278 // ASSERT(kSmiTag == 0); // adjust code below | |
| 6279 // __ andi(t3, t2, Operand(kSmiTagMask)); | |
| 6280 // __ bcond(ne, &slow, t3, Operand(zero_reg)); | |
| 6281 // __ nop(); | |
| 6282 // switch (op_) { | |
| 6283 // case Token::BIT_OR: __ or_(v0, a0, Operand(a1)); break; | |
| 6284 // case Token::BIT_AND: __ and_(v0, a0, Operand(a1)); break; | |
| 6285 // case Token::BIT_XOR: __ xor_(v0, a0, Operand(a1)); break; | |
| 6286 // case Token::SAR: | |
| 6287 // // Remove tags from operands. | |
| 6288 // __ sra(a2, a0, kSmiTagSize); | |
| 6289 // __ sra(a3, a1, kSmiTagSize); | |
| 6290 // // Shift | |
| 6291 // __ srav(v0, a3, a2); | |
| 6292 // // Smi tag result. | |
| 6293 // __ sll(v0, v0, kSmiTagMask); | |
| 6294 // break; | |
| 6295 // case Token::SHR: | |
| 6296 // // Remove tags from operands. | |
| 6297 // __ sra(a2, a0, kSmiTagSize); | |
| 6298 // __ sra(a3, a1, kSmiTagSize); | |
| 6299 // // Shift | |
| 6300 // __ srlv(v0, a3, a2); | |
| 6301 // // Unsigned shift is not allowed to produce a negative number, so | |
| 6302 // // check the sign bit and the sign bit after Smi tagging. | |
| 6303 // __ andi(t3, v0, Operand(0xc0000000)); | |
| 6304 // __ bcond(ne, &slow, t3, Operand(zero_reg)); | |
| 6305 // __ nop(); // NOP_ADDED | |
| 6306 // // Smi tag result. | |
| 6307 // __ sll(v0, v0, kSmiTagMask); | |
| 6308 // break; | |
| 6309 // case Token::SHL: | |
| 6310 // // Remove tags from operands. | |
| 6311 // __ sra(a2, a0, kSmiTagSize); | |
| 6312 // __ sra(a3, a1, kSmiTagSize); | |
| 6313 // // Shift | |
| 6314 // __ sllv(v0, a3, a2); | |
| 6315 // // Check that the signed result fits in a Smi. | |
| 6316 // __ addiu(t3, v0, Operand(0x40000000)); | |
| 6317 // __ andi(t3, t3, Operand(0x80000000)); | |
| 6318 // __ bcond(ne, &slow, t3, Operand(zero_reg)); | |
| 6319 // __ nop(); // NOP_ADDED | |
| 6320 // // Smi tag result. | |
| 6321 // __ sll(v0, v0, kSmiTagMask); | |
| 6322 // break; | |
| 6323 // default: UNREACHABLE(); | |
| 6324 // } | |
| 6325 // __ Ret(); | |
| 6326 // __ bind(&slow); | |
| 6327 // HandleNonSmiBitwiseOp(masm); | |
| 6328 // break; | |
| 6329 // } | |
| 6330 // | |
| 6331 // default: UNREACHABLE(); | |
| 6332 // } | |
| 6333 // // This code should be unreachable. | |
| 6334 // __ stop("Unreachable"); | |
| 6335 } | |
| 6336 | |
| 6337 | |
| 6338 void StackCheckStub::Generate(MacroAssembler* masm) { | |
| 6339 UNIMPLEMENTED_(); | |
| 6340 // // Do tail-call to runtime routine. Runtime routines expect at least one | |
| 6341 // // argument, so give it a Smi. | |
| 6342 //// __ mov(r0, Operand(Smi::FromInt(0))); | |
| 6343 //// __ push(r0); | |
| 6344 // __ li(a0, Operand(Smi::FromInt(0))); | |
| 6345 // __ push(a0); | |
| 6346 // __ addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize); | |
| 6347 // __ TailCallRuntime(ExternalReference(Runtime::kStackGuard), 1, 1); | |
| 6348 // __ nop(); | |
| 6349 // __ addiu(sp, sp, StandardFrameConstants::kRArgsSlotsSize); | |
| 6350 // | |
| 6351 // __ StubReturn(1); | |
| 6352 } | |
| 6353 | |
| 6354 | |
| 6355 void UnarySubStub::Generate(MacroAssembler* masm) { | |
| 6356 UNIMPLEMENTED_(); | |
| 6357 // Label undo; | |
| 6358 // Label slow; | |
| 6359 // Label not_smi; | |
| 6360 // | |
| 6361 // // Enter runtime system if the value is not a smi. | |
| 6362 //// __ tst(r0, Operand(kSmiTagMask)); | |
| 6363 //// __ b(ne, ¬_smi); | |
| 6364 // __ andi(t0, a0, Operand(kSmiTagMask)); | |
| 6365 // __ bcond(ne, ¬_smi, t0, Operand(zero_reg)); | |
| 6366 // __ nop(); // NOP_ADDED | |
| 6367 // | |
| 6368 // // Enter runtime system if the value of the expression is zero | |
| 6369 // // to make sure that we switch between 0 and -0. | |
| 6370 //// __ cmp(r0, Operand(0)); | |
| 6371 //// __ b(eq, &slow); | |
| 6372 // __ bcond(eq, &slow, a0, Operand(zero_reg)); | |
| 6373 // __ nop(); // NOP_ADDED | |
| 6374 // | |
| 6375 // // The value of the expression is a smi that is not zero. Try | |
| 6376 // // optimistic subtraction '0 - value'. | |
| 6377 //// __ rsb(r1, r0, Operand(0), SetCC); | |
| 6378 //// __ b(vs, &slow); | |
| 6379 // // Check for overflow. Since we substract from 0 the only overflow case is | |
| 6380 // // when a0 is 0b10...0 = -(2^31) | |
| 6381 // __ li(t0, Operand(1<<31)); | |
| 6382 // __ bcond(eq, &slow, a0, Operand(t0)); | |
| 6383 // __ subu(v0, zero_reg, Operand(a0)); // Optimistic sub in the branch delay slot. | |
| 6384 // | |
| 6385 //// __ mov(r0, Operand(r1)); // Set r0 to result. | |
| 6386 // __ StubReturn(1); | |
| 6387 // | |
| 6388 // //TODO(MIPS.5): Call to InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_JS); | |
| 6389 // // Enter runtime system. | |
| 6390 // __ bind(&slow); | |
| 6391 // __ break_(0x6973); | |
| 6392 ////// __ push(r0); | |
| 6393 ////// __ mov(r0, Operand(0)); // Set number of arguments. | |
| 6394 ////// __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_JS); | |
| 6395 //// __ push(a0); | |
| 6396 //// __ li(a0, Operand(0)); // Set number of arguments. | |
| 6397 //// __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_JS); | |
| 6398 //// __ nop(); // NOP_ADDED | |
| 6399 // | |
| 6400 // __ bind(¬_smi); | |
| 6401 //// __ CompareObjectType(r0, r1, r1, HEAP_NUMBER_TYPE); | |
| 6402 //// __ b(ne, &slow); | |
| 6403 // __ GetObjectType(a0, a1, a1); | |
| 6404 // __ bcond(ne, &slow, a1, Operand(HEAP_NUMBER_TYPE)); | |
| 6405 // __ nop(); // NOP_ADDED | |
| 6406 // // a0 is a heap number. Get a new heap number in a1. | |
| 6407 // if (overwrite_) { | |
| 6408 //// __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); | |
| 6409 //// __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign. | |
| 6410 //// __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); | |
| 6411 // __ lw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset)); | |
| 6412 // __ xor_(a2, a2, Operand(HeapNumber::kSignMask)); // Flip sign. | |
| 6413 // __ sw(a2, FieldMemOperand(a0, HeapNumber::kExponentOffset)); | |
| 6414 // __ mov(v0, a0); // Set result | |
| 6415 // } else { | |
| 6416 //// AllocateHeapNumber(masm, &slow, r1, r2, r3); | |
| 6417 //// __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); | |
| 6418 //// __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); | |
| 6419 //// __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset)); | |
| 6420 //// __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign. | |
| 6421 //// __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset)); | |
| 6422 //// __ mov(r0, Operand(r1)); | |
| 6423 // AllocateHeapNumber(masm, &slow, t1, t2, t3); | |
| 6424 // __ lw(t3, FieldMemOperand(a0, HeapNumber::kMantissaOffset)); | |
| 6425 // __ lw(t2, FieldMemOperand(a0, HeapNumber::kExponentOffset)); | |
| 6426 // __ sw(t3, FieldMemOperand(t1, HeapNumber::kMantissaOffset)); | |
| 6427 // __ xor_(t2, t2, Operand(HeapNumber::kSignMask)); // Flip sign. | |
| 6428 // __ sw(t2, FieldMemOperand(t1, HeapNumber::kExponentOffset)); | |
| 6429 // __ mov(v0, t1); | |
| 6430 // } | |
| 6431 // __ StubReturn(1); | |
| 6432 } | |
| 6433 | |
| 6434 | |
| 6435 int CEntryStub::MinorKey() { | |
| 6436 UNIMPLEMENTED_(); | |
| 6437 ASSERT(result_size_ <= 2); | |
| 6438 // Result returned in r0 or r0+r1 by default. | |
| 6439 return 0; | |
| 6440 } | |
| 6441 | |
| 6442 | |
| 6443 void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) { | |
| 6444 UNIMPLEMENTED_(); | |
| 6445 // // Called from CEntryStub::GenerateBody. | |
| 6446 // | |
| 6447 // // r0-> v0 holds the exception. cf l.6075 lrdi32(a0, Operand(reinterpret_cas t<int32_t>(failure))); in CEntryStub::GenerateBody | |
| 6448 // | |
| 6449 // | |
| 6450 // // Adjust this code if not the case. | |
| 6451 // ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize); | |
| 6452 // | |
| 6453 // // Drop the sp to the top of the handler. | |
| 6454 //// __ mov(r3, Operand(ExternalReference(Top::k_handler_address))); | |
| 6455 //// __ ldr(sp, MemOperand(r3)); | |
| 6456 // __ li(a3, Operand(ExternalReference(Top::k_handler_address))); | |
| 6457 // __ lw(sp, MemOperand(a3)); | |
| 6458 // | |
| 6459 // | |
| 6460 // // Restore the next handler and frame pointer, discard handler state. | |
| 6461 // ASSERT(StackHandlerConstants::kNextOffset == 0); | |
| 6462 //// __ pop(r2); | |
| 6463 //// __ str(r2, MemOperand(r3)); | |
| 6464 // __ pop(a2); | |
| 6465 // __ sw(a2, MemOperand(a3)); | |
| 6466 // ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize); | |
| 6467 //// __ ldm(ia_w, sp, r3.bit() | fp.bit()); // r3: discarded state. | |
| 6468 // __ multi_pop_reversed(a3.bit() | fp.bit()); | |
| 6469 // | |
| 6470 // // Before returning we restore the context from the frame pointer if | |
| 6471 // // not NULL. The frame pointer is NULL in the exception handler of a | |
| 6472 // // JS entry frame. | |
| 6473 //// __ cmp(fp, Operand(0)); | |
| 6474 //// // Set cp to NULL if fp is NULL. | |
| 6475 //// __ mov(cp, Operand(0), LeaveCC, eq); | |
| 6476 //// // Restore cp otherwise. | |
| 6477 //// __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne); | |
| 6478 // __ beq(fp, zero_reg, 2); | |
| 6479 // __ mov(cp, zero_reg); | |
| 6480 // __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); | |
| 6481 //#ifdef DEBUG | |
| 6482 //// printf("%s --- unimplemented debug\n", __func__); | |
| 6483 //// if (FLAG_debug_code) { | |
| 6484 ////// __ mov(lr, Operand(pc)); | |
| 6485 //// } | |
| 6486 //#endif | |
| 6487 // ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize); | |
| 6488 //// __ pop(pc); | |
| 6489 // __ pop(t9); | |
| 6490 // __ jr(Operand(t9)); | |
| 6491 // __ nop(); // NOP_ADDED | |
| 6492 } | |
| 6493 | |
| 6494 | |
| 6495 void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm, | |
| 6496 UncatchableExceptionType type) { | |
| 6497 UNIMPLEMENTED_(); | |
| 6498 // // Adjust this code if not the case. | |
| 6499 // ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize); | |
| 6500 // | |
| 6501 // // Drop sp to the top stack handler. | |
| 6502 //// __ mov(r3, Operand(ExternalReference(Top::k_handler_address))); | |
| 6503 //// __ ldr(sp, MemOperand(r3)); | |
| 6504 // __ li(sp, Operand(ExternalReference(Top::k_handler_address))); | |
| 6505 // | |
| 6506 // // Unwind the handlers until the ENTRY handler is found. | |
| 6507 // Label loop, done; | |
| 6508 // __ bind(&loop); | |
| 6509 // // Load the type of the current stack handler. | |
| 6510 // const int kStateOffset = StackHandlerConstants::kStateOffset; | |
| 6511 //// __ ldr(r2, MemOperand(sp, kStateOffset)); | |
| 6512 //// __ cmp(r2, Operand(StackHandler::ENTRY)); | |
| 6513 //// __ b(eq, &done); | |
| 6514 // __ lw(a2, MemOperand(sp, kStateOffset)); | |
| 6515 // __ bcond(eq, &done, a2, Operand(StackHandler::ENTRY)); | |
| 6516 // __ nop(); // NOP_ADDED | |
| 6517 // // Fetch the next handler in the list. | |
| 6518 // const int kNextOffset = StackHandlerConstants::kNextOffset; | |
| 6519 //// __ ldr(sp, MemOperand(sp, kNextOffset)); | |
| 6520 //// __ jmp(&loop); | |
| 6521 //// __ bind(&done); | |
| 6522 // __ lw(sp, MemOperand(sp, kNextOffset)); | |
| 6523 // __ jmp(&loop); | |
| 6524 // __ bind(&done); | |
| 6525 // | |
| 6526 // // Set the top handler address to next handler past the current ENTRY handle r. | |
| 6527 // ASSERT(StackHandlerConstants::kNextOffset == 0); | |
| 6528 //// __ pop(r2); | |
| 6529 //// __ str(r2, MemOperand(r3)); | |
| 6530 // __ pop(a2); | |
| 6531 // __ sw(a2, MemOperand(a3)); | |
| 6532 // | |
| 6533 // if (type == OUT_OF_MEMORY) { | |
| 6534 // // Set external caught exception to false. | |
| 6535 // ExternalReference external_caught(Top::k_external_caught_exception_address ); | |
| 6536 //// __ mov(r0, Operand(false)); | |
| 6537 //// __ mov(r2, Operand(external_caught)); | |
| 6538 //// __ str(r0, MemOperand(r2)); | |
| 6539 // __ li(a0, Operand(false)); | |
| 6540 // __ li(a2, Operand(external_caught)); | |
| 6541 // __ sw(a0, MemOperand(a2)); | |
| 6542 // | |
| 6543 // // Set pending exception and r0 to out of memory exception. | |
| 6544 // Failure* out_of_memory = Failure::OutOfMemoryException(); | |
| 6545 //// __ mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory))); | |
| 6546 //// __ mov(r2, Operand(ExternalReference(Top::k_pending_exception_address))) ; | |
| 6547 //// __ str(r0, MemOperand(r2)); | |
| 6548 // __ li(a0, Operand(reinterpret_cast<int32_t>(out_of_memory))); | |
| 6549 // __ li(a2, Operand(ExternalReference(Top::k_pending_exception_address))); | |
| 6550 // __ sw(a0, MemOperand(a2)); | |
| 6551 // } | |
| 6552 // | |
| 6553 // // Stack layout at this point. See also StackHandlerConstants. | |
| 6554 // // sp -> state (ENTRY) | |
| 6555 // // fp | |
| 6556 // // lr | |
| 6557 // | |
| 6558 // // Discard handler state (r2 is not used) and restore frame pointer. | |
| 6559 // ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize); | |
| 6560 //// __ ldm(ia_w, sp, r2.bit() | fp.bit()); // r2: discarded state. | |
| 6561 // // TOCHECK: Is this correct? | |
| 6562 // __ pop(fp); | |
| 6563 // __ pop(); | |
| 6564 // // Before returning we restore the context from the frame pointer if | |
| 6565 // // not NULL. The frame pointer is NULL in the exception handler of a | |
| 6566 // // JS entry frame. | |
| 6567 //// __ cmp(fp, Operand(0)); | |
| 6568 // // Set cp to NULL if fp is NULL. | |
| 6569 //// __ mov(cp, Operand(0), LeaveCC, eq); | |
| 6570 // // Restore cp otherwise. | |
| 6571 //// __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne); | |
| 6572 // __ beq(fp, zero_reg, 2); | |
| 6573 // __ mov(cp, zero_reg); | |
| 6574 // __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); | |
| 6575 //#ifdef DEBUG | |
| 6576 // if (FLAG_debug_code) { | |
| 6577 //// printf("%s --- unimplemented debug\n", __func__); | |
| 6578 //// __ mov(lr, Operand(pc)); | |
| 6579 // } | |
| 6580 //#endif | |
| 6581 // ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize); | |
| 6582 //// __ pop(pc); | |
| 6583 // __ pop(at); | |
| 6584 // __ jr(Operand(at)); | |
| 6585 } | |
| 6586 | |
| 6587 void CEntryStub::GenerateCore(MacroAssembler* masm, | |
| 6588 Label* throw_normal_exception, | |
| 6589 Label* throw_termination_exception, | |
| 6590 Label* throw_out_of_memory_exception, | |
| 6591 ExitFrame::Mode mode, | |
| 6592 bool do_gc, | |
| 6593 bool always_allocate) { | |
| 6594 UNIMPLEMENTED_(); | |
| 6595 // // r0->a0: result parameter for PerformGC, if any | |
| 6596 // // r4->s0: number of arguments including receiver (C callee-saved) | |
| 6597 // // r5->s1: pointer to builtin function (C callee-saved) | |
| 6598 // // r6->s2: pointer to the first argument (C callee-saved) | |
| 6599 // | |
| 6600 //// __ break_(0x00127); | |
| 6601 // if (do_gc) { | |
| 6602 // // Passing r0. | |
| 6603 // ExternalReference gc_reference = ExternalReference::perform_gc_function(); | |
| 6604 // __ Call(gc_reference.address(), RelocInfo::RUNTIME_ENTRY); | |
| 6605 // __ addiu(sp, sp, -StandardFrameConstants::kCArgsSlotsSize); | |
| 6606 // __ addiu(sp, sp, StandardFrameConstants::kCArgsSlotsSize); | |
| 6607 // __ nop(); // NOP_ADDED | |
| 6608 // } | |
| 6609 // | |
| 6610 // ExternalReference scope_depth = | |
| 6611 // ExternalReference::heap_always_allocate_scope_depth(); | |
| 6612 // if (always_allocate) { | |
| 6613 //// __ mov(r0, Operand(scope_depth)); | |
| 6614 //// __ ldr(r1, MemOperand(r0)); | |
| 6615 //// __ add(r1, r1, Operand(1)); | |
| 6616 //// __ str(r1, MemOperand(r0)); | |
| 6617 // __ li(a0, Operand(scope_depth)); | |
| 6618 // __ lw(a1, MemOperand(a0)); | |
| 6619 // __ addi(a1, a1, 1); | |
| 6620 // __ sw(a1, MemOperand(a0)); | |
| 6621 // } | |
| 6622 // | |
| 6623 // // Call C built-in. | |
| 6624 // // r0 = argc, r1 = argv | |
| 6625 //// __ mov(r0, Operand(r4)); | |
| 6626 //// __ mov(r1, Operand(r6)); | |
| 6627 //// __ break_(0x15642); | |
| 6628 // __ mov(a0, s0); | |
| 6629 // __ mov(a1, s2); | |
| 6630 // | |
| 6631 // // ARM TODO | |
| 6632 // // TODO(1242173): To let the GC traverse the return address of the exit | |
| 6633 // // frames, we need to know where the return address is. Right now, | |
| 6634 // // we push it on the stack to be able to find it again, but we never | |
| 6635 // // restore from it in case of changes, which makes it impossible to | |
| 6636 // // support moving the C entry code stub. This should be fixed, but currently | |
| 6637 // // this is OK because the CEntryStub gets generated so early in the V8 boot | |
| 6638 // // sequence that it is not moving ever. | |
| 6639 // | |
| 6640 // // Current ra has already been saved in EnterExitFrame. | |
| 6641 // // The push and StandardFrameConstants::kMarkerOffset are linked. See | |
| 6642 // // StackFrame::ComputeType(). | |
| 6643 //// masm->add(lr, pc, Operand(4)); // compute return address: (pc + 8) + 4 | |
| 6644 //// masm->push(lr); | |
| 6645 //// masm->Jump(r5); | |
| 6646 // | |
| 6647 // // Get future return address. | |
| 6648 // masm->bal((int16_t)1); | |
| 6649 // masm->addiu(sp, sp, Operand(-4)); | |
| 6650 // masm->addiu(ra, ra, Operand(9*4)); // 9 extra instructions | |
| 6651 // masm->sw(ra, MemOperand(sp)); | |
| 6652 // | |
| 6653 // // We don't use SetupAlignedCall because we need to know exactly how many | |
| 6654 // // instructions we have here. | |
| 6655 // masm->push(s3); // Save s3 on the stack | |
| 6656 // masm->mov(s3, sp); // Save sp | |
| 6657 // masm->li(t0, Operand(~7)); // Load sp mask | |
| 6658 // masm->and_(sp, sp, Operand(t0)); // Align sp. | |
| 6659 // | |
| 6660 // masm->jalr(Operand(s1)); | |
| 6661 // masm->addiu(sp, sp, -StandardFrameConstants::kCArgsSlotsSize); | |
| 6662 // | |
| 6663 // masm->mov(sp, s3); // Restore sp. | |
| 6664 // masm->pop(s3); // Restore s3 | |
| 6665 // | |
| 6666 // | |
| 6667 // if (always_allocate) { | |
| 6668 // // It's okay to clobber r2->a2 and r3->a3 here. Don't mess with r0 and r1 | |
| 6669 // // though (contain the result). | |
| 6670 //// __ mov(r2, Operand(scope_depth)); | |
| 6671 //// __ ldr(r3, MemOperand(r2)); | |
| 6672 //// __ sub(r3, r3, Operand(1)); | |
| 6673 //// __ str(r3, MemOperand(r2)); | |
| 6674 // __ li(a2, Operand(scope_depth)); | |
| 6675 // __ lw(a3, MemOperand(a2)); | |
| 6676 // __ addi(a3, a3, -1); | |
| 6677 // __ sw(a3, MemOperand(a2)); | |
| 6678 // } | |
| 6679 // | |
| 6680 // // check for failure result | |
| 6681 // Label failure_returned; | |
| 6682 // ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0); | |
| 6683 // // Lower 2 bits of r2 are 0 if r0 has failure tag. | |
| 6684 //// __ add(r2, r0, Operand(1)); | |
| 6685 //// __ tst(r2, Operand(kFailureTagMask)); | |
| 6686 //// __ b(eq, &failure_returned); | |
| 6687 // __ addiu(a2, v0, 1); | |
| 6688 // __ andi(ip, a2, Operand(kFailureTagMask)); | |
| 6689 // __ bcond(eq, &failure_returned, ip, Operand(zero_reg)); | |
| 6690 // __ nop(); | |
| 6691 // | |
| 6692 // // Exit C frame and return. | |
| 6693 // // r0:r1->v0:v1: result | |
| 6694 // // sp: stack pointer | |
| 6695 // // fp: frame pointer | |
| 6696 // __ LeaveExitFrame(mode); | |
| 6697 // | |
| 6698 // // check if we should retry or throw exception | |
| 6699 // Label retry; | |
| 6700 // __ bind(&failure_returned); | |
| 6701 //// __ break_(0x09990); | |
| 6702 // ASSERT(Failure::RETRY_AFTER_GC == 0); | |
| 6703 //// __ tst(r0, Operand(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize)); | |
| 6704 //// __ b(eq, &retry); | |
| 6705 // __ andi(t0, v0, Operand(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize) ); | |
| 6706 // __ bcond(eq, &retry, t0, Operand(zero_reg)); | |
| 6707 // __ nop(); // NOP_ADDED | |
| 6708 // | |
| 6709 // // Special handling of out of memory exceptions. | |
| 6710 // Failure* out_of_memory = Failure::OutOfMemoryException(); | |
| 6711 //// __ cmp(r0, Operand(reinterpret_cast<int32_t>(out_of_memory))); | |
| 6712 //// __ b(eq, throw_out_of_memory_exception); | |
| 6713 // __ bcond(eq, throw_out_of_memory_exception, | |
| 6714 // v0, Operand(reinterpret_cast<int32_t>(out_of_memory))); | |
| 6715 // __ nop(); // NOP_ADDED | |
| 6716 // | |
| 6717 // // Retrieve the pending exception and clear the variable. | |
| 6718 //// __ mov(ip, Operand(ExternalReference::the_hole_value_location())); | |
| 6719 //// __ ldr(r3, MemOperand(ip)); | |
| 6720 //// __ mov(ip, Operand(ExternalReference(Top::k_pending_exception_address))); | |
| 6721 //// __ ldr(r0, MemOperand(ip)); | |
| 6722 //// __ str(r3, MemOperand(ip)); | |
| 6723 // __ li(ip, Operand(ExternalReference::the_hole_value_location())); | |
| 6724 // __ lw(a3, MemOperand(ip)); | |
| 6725 // __ li(ip, Operand(ExternalReference(Top::k_pending_exception_address))); | |
| 6726 // __ lw(v0, MemOperand(ip)); | |
| 6727 // __ sw(a3, MemOperand(ip)); | |
| 6728 // | |
| 6729 // // Special handling of termination exceptions which are uncatchable | |
| 6730 // // by javascript code. | |
| 6731 //// __ cmp(r0, Operand(Factory::termination_exception())); | |
| 6732 //// __ b(eq, throw_termination_exception); | |
| 6733 // __ bcond(eq, throw_termination_exception, | |
| 6734 // v0, Operand(Factory::termination_exception())); | |
| 6735 // __ nop(); // NOP_ADDED | |
| 6736 // | |
| 6737 // // Handle normal exception. | |
| 6738 // __ jmp(throw_normal_exception); | |
| 6739 // __ nop(); // NOP_ADDED | |
| 6740 // | |
| 6741 // __ bind(&retry); // pass last failure (r0) as parameter (r0) when retrying | |
| 6742 } | |
| 6743 | |
| 6744 void CEntryStub::GenerateBody(MacroAssembler* masm, bool is_debug_break) { | |
| 6745 UNIMPLEMENTED_(); | |
| 6746 // // Called from JavaScript; parameters are on stack as if calling JS function | |
| 6747 // // a0: number of arguments including receiver | |
| 6748 // // a1: pointer to builtin function | |
| 6749 // // fp: frame pointer (restored after C call) | |
| 6750 // // sp: stack pointer (restored as callee's sp after C call) | |
| 6751 // // cp: current context (C callee-saved) | |
| 6752 // | |
| 6753 // // NOTE: Invocations of builtins may return failure objects | |
| 6754 // // instead of a proper result. The builtin entry handles | |
| 6755 // // this by performing a garbage collection and retrying the | |
| 6756 // // builtin once. | |
| 6757 // | |
| 6758 // ExitFrame::Mode mode = is_debug_break | |
| 6759 // ? ExitFrame::MODE_DEBUG | |
| 6760 // : ExitFrame::MODE_NORMAL; | |
| 6761 // | |
| 6762 // // Enter the exit frame that transitions from JavaScript to C++. | |
| 6763 // __ EnterExitFrame(mode); | |
| 6764 // | |
| 6765 // // s0: number of arguments (C callee-saved) | |
| 6766 // // s1: pointer to builtin function (C callee-saved) | |
| 6767 // // s2: pointer to first argument (C callee-saved) | |
| 6768 // | |
| 6769 // Label throw_normal_exception; | |
| 6770 // Label throw_termination_exception; | |
| 6771 // Label throw_out_of_memory_exception; | |
| 6772 // | |
| 6773 // // Call into the runtime system. | |
| 6774 // GenerateCore(masm, | |
| 6775 // &throw_normal_exception, | |
| 6776 // &throw_termination_exception, | |
| 6777 // &throw_out_of_memory_exception, | |
| 6778 // mode, | |
| 6779 // false, | |
| 6780 // false); | |
| 6781 // | |
| 6782 // // Do space-specific GC and retry runtime call. | |
| 6783 // GenerateCore(masm, | |
| 6784 // &throw_normal_exception, | |
| 6785 // &throw_termination_exception, | |
| 6786 // &throw_out_of_memory_exception, | |
| 6787 // mode, | |
| 6788 // true, | |
| 6789 // false); | |
| 6790 // | |
| 6791 // // Do full GC and retry runtime call one final time. | |
| 6792 // Failure* failure = Failure::InternalError(); | |
| 6793 // __ li(v0, Operand(reinterpret_cast<int32_t>(failure))); | |
| 6794 // GenerateCore(masm, | |
| 6795 // &throw_normal_exception, | |
| 6796 // &throw_termination_exception, | |
| 6797 // &throw_out_of_memory_exception, | |
| 6798 // mode, | |
| 6799 // true, | |
| 6800 // true); | |
| 6801 // | |
| 6802 // __ bind(&throw_out_of_memory_exception); | |
| 6803 // GenerateThrowUncatchable(masm, OUT_OF_MEMORY); | |
| 6804 // | |
| 6805 // __ bind(&throw_termination_exception); | |
| 6806 // GenerateThrowUncatchable(masm, TERMINATION); | |
| 6807 // | |
| 6808 // __ bind(&throw_normal_exception); | |
| 6809 // GenerateThrowTOS(masm); | |
| 6810 } | |
| 6811 | |
| 6812 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { | |
| 6813 UNIMPLEMENTED_(); | |
| 6814 // Just return for now. | |
| 6815 __ Jump(ra); | |
| 6816 __ nop(); | |
| 6817 | |
| 6818 | |
| 6819 // // a0: code entry | |
| 6820 // // a1: function | |
| 6821 // // a2: receiver | |
| 6822 // // a3: argc | |
| 6823 // // [sp+16]: argv (16 = 4*kPointerSize for arguments slots) | |
| 6824 // | |
| 6825 // Label invoke, exit; | |
| 6826 // | |
| 6827 // // Called from C, so do not pop argc and args on exit (preserve sp) | |
| 6828 // // No need to save register-passed args | |
| 6829 // // Save callee-saved registers (incl. cp and fp), sp, and ra | |
| 6830 // __ multi_push(kCalleeSaved | ra.bit()); | |
| 6831 // | |
| 6832 // // Load argv in s0 register. | |
| 6833 // __ lw(s0, MemOperand(sp,(kNumCalleeSaved + 1)*kPointerSize + | |
| 6834 // StandardFrameConstants::kCArgsSlotsSize)); | |
| 6835 // | |
| 6836 // // Push a frame with special values setup to mark it as an entry frame. | |
| 6837 // // a0: code entry | |
| 6838 // // a1: function | |
| 6839 // // a2: receiver | |
| 6840 // // a3: argc | |
| 6841 // // s0: argv | |
| 6842 // | |
| 6843 // // We build an EntryFrame. | |
| 6844 // // | | | |
| 6845 // // |-----------------------| | |
| 6846 // // | bad fp (0xff...f) | + | |
| 6847 // // |-----------------------| <-- new fp | | |
| 6848 // // | context slot | | | |
| 6849 // // |-----------------------| | | |
| 6850 // // | function slot | v | |
| 6851 // // |-----------------------| - | |
| 6852 // // | caller fp | | |
| 6853 // // |-----------------------| | |
| 6854 // // | | | |
| 6855 // | |
| 6856 // __ li(t0, Operand(-1)); // Push a bad frame pointer to fail if it is used. | |
| 6857 // int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY; | |
| 6858 // __ li(t1, Operand(Smi::FromInt(marker))); | |
| 6859 // __ li(t2, Operand(Smi::FromInt(marker))); | |
| 6860 // __ li(t3, Operand(ExternalReference(Top::k_c_entry_fp_address))); | |
| 6861 // __ lw(t3, MemOperand(t3)); | |
| 6862 // __ multi_push(t0.bit() | t1.bit() | t2.bit() | t3.bit()); | |
| 6863 // | |
| 6864 // // Setup frame pointer for the frame to be pushed. | |
| 6865 // __ addiu(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset)); | |
| 6866 // | |
| 6867 // // Call a faked try-block that does the invoke. | |
| 6868 // __ bal(&invoke); | |
| 6869 // __ nop(); // NOP_ADDED | |
| 6870 // | |
| 6871 // // Caught exception: Store result (exception) in the pending | |
| 6872 // // exception field in the JSEnv and return a failure sentinel. | |
| 6873 // // Coming in here the fp will be invalid because the PushTryHandler below | |
| 6874 // // sets it to 0 to signal the existence of the JSEntry frame. | |
| 6875 // __ li(ip, Operand(ExternalReference(Top::k_pending_exception_address))); | |
| 6876 // __ sw(v0, MemOperand(ip)); // We come back from 'invoke'. result is in v0 . | |
| 6877 // __ li(v0, Operand(reinterpret_cast<int32_t>(Failure::Exception()))); | |
| 6878 // __ b(&exit); | |
| 6879 // __ nop(); // NOP_ADDED | |
| 6880 // | |
| 6881 // // Invoke: Link this frame into the handler chain. | |
| 6882 // __ bind(&invoke); | |
| 6883 // // Must preserve a0-a3 and s0. | |
| 6884 // __ PushTryHandler(IN_JS_ENTRY, JS_ENTRY_HANDLER); | |
| 6885 // // If an exception not caught by another handler occurs, this handler | |
| 6886 // // returns control to the code after the bal(&invoke) above, which | |
| 6887 // // restores all kCalleeSaved registers (including cp and fp) to their | |
| 6888 // // saved values before returning a failure to C. | |
| 6889 // | |
| 6890 // // Clear any pending exceptions. | |
| 6891 // __ li(ip, Operand(ExternalReference::the_hole_value_location())); | |
| 6892 // __ lw(t0, MemOperand(ip)); | |
| 6893 // __ li(ip, Operand(ExternalReference(Top::k_pending_exception_address))); | |
| 6894 // __ sw(t0, MemOperand(ip)); | |
| 6895 // | |
| 6896 // // Invoke the function by calling through JS entry trampoline builtin. | |
| 6897 // // Notice that we cannot store a reference to the trampoline code directly i n | |
| 6898 // // this stub, because runtime stubs are not traversed when doing GC. | |
| 6899 // | |
| 6900 // // Expected registers by Builtins::JSEntryTrampoline | |
| 6901 // // a0: code entry | |
| 6902 // // a1: function | |
| 6903 // // a2: receiver | |
| 6904 // // a3: argc | |
| 6905 // // s0: argv | |
| 6906 // if (is_construct) { | |
| 6907 // ExternalReference construct_entry(Builtins::JSConstructEntryTrampoline); | |
| 6908 // __ li(ip, Operand(construct_entry)); | |
| 6909 // } else { | |
| 6910 // ExternalReference entry(Builtins::JSEntryTrampoline); | |
| 6911 // __ li(ip, Operand(entry)); | |
| 6912 // } | |
| 6913 // __ lw(ip, MemOperand(ip)); // deref address | |
| 6914 // | |
| 6915 // // Branch and link to JSEntryTrampoline. | |
| 6916 // __ addiu(t9, ip, Operand(Code::kHeaderSize - kHeapObjectTag)); | |
| 6917 // __ jalr(Operand(t9)); | |
| 6918 // __ nop(); // NOP_ADDED | |
| 6919 // | |
| 6920 // // Unlink this frame from the handler chain. When reading the | |
| 6921 // // address of the next handler, there is no need to use the address | |
| 6922 // // displacement since the current stack pointer (sp) points directly | |
| 6923 // // to the stack handler. | |
| 6924 // __ lw(a3, MemOperand(sp, StackHandlerConstants::kNextOffset)); | |
| 6925 // __ li(ip, Operand(ExternalReference(Top::k_handler_address))); | |
| 6926 // __ sw(a3, MemOperand(ip)); | |
| 6927 // | |
| 6928 // // This restores sp to its position before PushTryHandler. | |
| 6929 // __ addiu(sp, sp, Operand(StackHandlerConstants::kSize)); | |
| 6930 // | |
| 6931 // | |
| 6932 // __ bind(&exit); // v0 holds result | |
| 6933 // // Restore the top frame descriptors from the stack. | |
| 6934 // __ pop(a3); | |
| 6935 // __ li(ip, Operand(ExternalReference(Top::k_c_entry_fp_address))); | |
| 6936 // __ sw(a3, MemOperand(ip)); | |
| 6937 // | |
| 6938 // // Reset the stack to the callee saved registers. | |
| 6939 // __ addiu(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset)); | |
| 6940 // | |
| 6941 // // Restore callee-saved registers and return. | |
| 6942 // __ multi_pop(kCalleeSaved | ra.bit()); | |
| 6943 // __ Jump(ra); | |
| 6944 // __ nop(); // NOP_ADDED | |
| 6945 // | |
| 6946 } | |
| 6947 | |
| 6948 | |
| 6949 // This stub performs an instanceof, calling the builtin function if | |
| 6950 // necessary. Uses a1 for the object, a0 for the function that it may | |
| 6951 // be an instance of (these are fetched from the stack). | |
| 6952 void InstanceofStub::Generate(MacroAssembler* masm) { | |
| 6953 UNIMPLEMENTED_(); | |
| 6954 // // Get the object - slow case for smis (we may need to throw an exception | |
| 6955 // // depending on the rhs). | |
| 6956 // Label slow, loop, is_instance, is_not_instance; | |
| 6957 //// __ ldr(r0, MemOperand(sp, 1 * kPointerSize)); | |
| 6958 //// __ BranchOnSmi(r0, &slow); | |
| 6959 // __ lw(a0, MemOperand(sp, 1 * kPointerSize)); | |
| 6960 // __ BranchOnSmi(a0, &slow); | |
| 6961 // __ nop(); // NOP_ADDED | |
| 6962 // | |
| 6963 // // Check that the left hand is a JS object and put map in r3. | |
| 6964 //// __ CompareObjectType(r0, r3, r2, FIRST_JS_OBJECT_TYPE); | |
| 6965 //// __ b(lt, &slow); | |
| 6966 //// __ cmp(r2, Operand(LAST_JS_OBJECT_TYPE)); | |
| 6967 //// __ b(gt, &slow); | |
| 6968 // __ GetObjectType(a0, a3, a2); | |
| 6969 // __ bcond(less, &slow, a2, Operand(FIRST_JS_OBJECT_TYPE)); | |
| 6970 // __ nop(); // NOP_ADDED | |
| 6971 // __ bcond(greater, &slow, a2, Operand(LAST_JS_OBJECT_TYPE)); | |
| 6972 // __ nop(); // NOP_ADDED | |
| 6973 // | |
| 6974 // // Get the prototype of the function (r4 is result, r2 is scratch). | |
| 6975 //// __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); | |
| 6976 //// __ TryGetFunctionPrototype(r1, r4, r2, &slow); | |
| 6977 // __ lw(a1, MemOperand(sp, 0 * kPointerSize)); | |
| 6978 // __ TryGetFunctionPrototype(a1, t4, a2, &slow); | |
| 6979 // | |
| 6980 // // Check that the function prototype is a JS object. | |
| 6981 //// __ BranchOnSmi(r4, &slow); | |
| 6982 //// __ CompareObjectType(r4, r5, r5, FIRST_JS_OBJECT_TYPE); | |
| 6983 //// __ b(lt, &slow); | |
| 6984 //// __ cmp(r5, Operand(LAST_JS_OBJECT_TYPE)); | |
| 6985 //// __ b(gt, &slow); | |
| 6986 // __ BranchOnSmi(t4, &slow); | |
| 6987 // __ nop(); // NOP_ADDED | |
| 6988 // __ GetObjectType(t4, t5, t5); | |
| 6989 // __ bcond(less, &slow, t5, Operand(FIRST_JS_OBJECT_TYPE)); | |
| 6990 // __ nop(); // NOP_ADDED | |
| 6991 // __ bcond(greater, &slow, t5, Operand(LAST_JS_OBJECT_TYPE)); | |
| 6992 // __ nop(); // NOP_ADDED | |
| 6993 // | |
| 6994 // // Register mapping: r3 is object map and r4 is function prototype. | |
| 6995 // // Get prototype of object into r2. | |
| 6996 //// __ ldr(r2, FieldMemOperand(r3, Map::kPrototypeOffset)); | |
| 6997 // __ lw(a2, FieldMemOperand(a3, Map::kPrototypeOffset)); | |
| 6998 // | |
| 6999 // // Loop through the prototype chain looking for the function prototype. | |
| 7000 // __ bind(&loop); | |
| 7001 //// __ cmp(r2, Operand(r4)); | |
| 7002 //// __ b(eq, &is_instance); | |
| 7003 //// __ LoadRoot(ip, Heap::kNullValueRootIndex); | |
| 7004 //// __ cmp(r2, ip); | |
| 7005 //// __ b(eq, &is_not_instance); | |
| 7006 //// __ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset)); | |
| 7007 //// __ ldr(r2, FieldMemOperand(r2, Map::kPrototypeOffset)); | |
| 7008 //// __ jmp(&loop); | |
| 7009 // __ bcond(eq, &is_instance, a2, Operand(t4)); | |
| 7010 // __ nop(); // NOP_ADDED | |
| 7011 // __ LoadRoot(ip, Heap::kNullValueRootIndex); | |
| 7012 // __ bcond(eq, &is_not_instance, a2, Operand(ip)); | |
| 7013 // __ nop(); // NOP_ADDED | |
| 7014 // __ lw(a2, FieldMemOperand(a2, HeapObject::kMapOffset)); | |
| 7015 // __ lw(a2, FieldMemOperand(a2, Map::kPrototypeOffset)); | |
| 7016 // __ b(&loop); | |
| 7017 // __ nop(); // NOP_ADDED | |
| 7018 // | |
| 7019 // __ bind(&is_instance); | |
| 7020 //// __ mov(r0, Operand(Smi::FromInt(0))); | |
| 7021 //// __ pop(); | |
| 7022 //// __ pop(); | |
| 7023 //// __ mov(pc, Operand(lr)); // Return. | |
| 7024 // __ li(v0, Operand(Smi::FromInt(0))); | |
| 7025 // __ pop(); | |
| 7026 // __ pop(); | |
| 7027 // __ jr(ra); | |
| 7028 // __ nop(); // NOP_ADDED | |
| 7029 // | |
| 7030 // __ bind(&is_not_instance); | |
| 7031 // __ li(v0, Operand(Smi::FromInt(1))); | |
| 7032 // __ pop(); | |
| 7033 // __ pop(); | |
| 7034 // __ jr(ra); | |
| 7035 // __ nop(); // NOP_ADDED | |
| 7036 // | |
| 7037 // // Slow-case. Tail call builtin. | |
| 7038 // __ bind(&slow); | |
| 7039 // // TODO(MIPS.5) | |
| 7040 // __ break_(0x00666); | |
| 7041 //// __ li(a0, Operand(1)); // Arg count without receiver. | |
| 7042 //// __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_JS); | |
| 7043 //// __ nop(); // NOP_ADDED | |
| 7044 } | |
| 7045 | |
| 7046 | |
| 7047 void ArgumentsAccessStub::GenerateReadLength(MacroAssembler* masm) { | |
| 7048 UNIMPLEMENTED_(); | |
| 7049 // // Check if the calling frame is an arguments adaptor frame. | |
| 7050 // Label adaptor; | |
| 7051 //// __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); | |
| 7052 //// __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset)); | |
| 7053 //// __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); | |
| 7054 //// __ b(eq, &adaptor); | |
| 7055 // __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); | |
| 7056 // __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset)); | |
| 7057 // __ bcond(eq, &adaptor, a3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTO R))); | |
| 7058 // __ nop(); // NOP_ADDED | |
| 7059 // | |
| 7060 // // Nothing to do: The formal number of parameters has already been | |
| 7061 // // passed in register r0 by calling function. Just return it. | |
| 7062 // __ mov(v0,a0); | |
| 7063 //// __ Jump(lr); | |
| 7064 // __ Jump(ra); | |
| 7065 // __ nop(); // NOP_ADDED | |
| 7066 // | |
| 7067 // // Arguments adaptor case: Read the arguments length from the | |
| 7068 // // adaptor frame and return it. | |
| 7069 // __ bind(&adaptor); | |
| 7070 //// __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset)); | |
| 7071 //// __ Jump(lr); | |
| 7072 // __ lw(v0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset)); | |
| 7073 // __ Jump(ra); | |
| 7074 // __ nop(); // NOP_ADDED | |
| 7075 } | |
| 7076 | |
| 7077 | |
| 7078 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { | |
| 7079 UNIMPLEMENTED_(); | |
| 7080 // // The displacement is the offset of the last parameter (if any) | |
| 7081 // // relative to the frame pointer. | |
| 7082 // static const int kDisplacement = | |
| 7083 // StandardFrameConstants::kCallerSPOffset - kPointerSize; | |
| 7084 // | |
| 7085 // // Check that the key is a smiGenerateReadElement. | |
| 7086 // Label slow; | |
| 7087 //// __ BranchOnNotSmi(r1, &slow); | |
| 7088 // __ BranchOnNotSmi(a1, &slow); | |
| 7089 // | |
| 7090 // // Check if the calling frame is an arguments adaptor frame. | |
| 7091 // Label adaptor; | |
| 7092 //// __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); | |
| 7093 //// __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset)); | |
| 7094 //// __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); | |
| 7095 //// __ b(eq, &adaptor); | |
| 7096 // __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); | |
| 7097 // __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset)); | |
| 7098 // __ bcond(eq, &adaptor, a3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTO R))); | |
| 7099 // __ nop(); // NOP_ADDED | |
| 7100 // | |
| 7101 // // Check index against formal parameters count limit passed in | |
| 7102 // // through register eax. Use unsigned comparison to get negative | |
| 7103 // // check for free. | |
| 7104 //// __ cmp(r1, r0); | |
| 7105 //// __ b(cs, &slow); | |
| 7106 // __ bcond(greater_equal, &slow, a1, Operand(a0)); | |
| 7107 // __ nop(); // NOP_ADDED | |
| 7108 // | |
| 7109 // // Read the argument from the stack and return it. | |
| 7110 //// __ sub(r3, r0, r1); | |
| 7111 //// __ add(r3, fp, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize)); | |
| 7112 //// __ ldr(r0, MemOperand(r3, kDisplacement)); | |
| 7113 //// __ Jump(lr); | |
| 7114 // __ sub(a0, a0, Operand(a1)); | |
| 7115 // __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize); | |
| 7116 // __ addu(a3, fp, Operand(t3)); | |
| 7117 // __ lw(v0, MemOperand(a3, kDisplacement)); | |
| 7118 // __ Jump(ra); | |
| 7119 // __ nop(); // NOP_ADDED | |
| 7120 // | |
| 7121 // // Arguments adaptor case: Check index against actual arguments | |
| 7122 // // limit found in the arguments adaptor frame. Use unsigned | |
| 7123 // // comparison to get negative check for free. | |
| 7124 // __ bind(&adaptor); | |
| 7125 //// __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset)); | |
| 7126 //// __ cmp(r1, r0); | |
| 7127 //// __ b(cs, &slow); | |
| 7128 // __ lw(a0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset)); | |
| 7129 // __ bcond(greater_equal, &slow, a1, Operand(a0)); | |
| 7130 // __ nop(); // NOP_ADDED | |
| 7131 // | |
| 7132 // // Read the argument from the adaptor frame and return it. | |
| 7133 //// __ sub(r3, r0, r1); | |
| 7134 //// __ add(r3, r2, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize)); | |
| 7135 //// __ ldr(r0, MemOperand(r3, kDisplacement)); | |
| 7136 //// __ Jump(lr); | |
| 7137 // __ sub(a3, a0, Operand(a1)); | |
| 7138 // __ sll(t3, a3, kPointerSizeLog2 - kSmiTagSize); | |
| 7139 // __ addu(a3, a2, Operand(t3)); | |
| 7140 // __ lw(v0, MemOperand(a3, kDisplacement)); | |
| 7141 // __ Jump(ra); | |
| 7142 // __ nop(); // NOP_ADDED | |
| 7143 // | |
| 7144 // // Slow-case: Handle non-smi or out-of-bounds access to arguments | |
| 7145 // // by calling the runtime system. | |
| 7146 // __ bind(&slow); | |
| 7147 //// __ push(r1); | |
| 7148 // __ push(a1); | |
| 7149 // __ TailCallRuntime(ExternalReference(Runtime::kGetArgumentsProperty), 1, 1); | |
| 7150 // __ nop(); // NOP_ADDED | |
| 7151 } | |
| 7152 | |
| 7153 | |
| 7154 void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) { | |
| 7155 UNIMPLEMENTED_(); | |
| 7156 // // Check if the calling frame is an arguments adaptor frame. | |
| 7157 // Label runtime; | |
| 7158 //// __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); | |
| 7159 //// __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset)); | |
| 7160 //// __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); | |
| 7161 //// __ b(ne, &runtime); | |
| 7162 // __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); | |
| 7163 // __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset)); | |
| 7164 // __ bcond(ne, &runtime, a3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTO R))); | |
| 7165 // __ nop(); // NOP_ADDED | |
| 7166 // | |
| 7167 // // Patch the arguments.length and the parameters pointer. | |
| 7168 //// __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset)); | |
| 7169 //// __ str(r0, MemOperand(sp, 0 * kPointerSize)); | |
| 7170 //// __ add(r3, r2, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize)); | |
| 7171 //// __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset)); | |
| 7172 //// __ str(r3, MemOperand(sp, 1 * kPointerSize)); | |
| 7173 // __ lw(a0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset)); | |
| 7174 // __ sw(a0, MemOperand(sp, 0 * kPointerSize)); | |
| 7175 // __ sll(t0, a0, kPointerSizeLog2 - kSmiTagSize); | |
| 7176 // __ add(a3, a2, t0); | |
| 7177 // __ add(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset)); | |
| 7178 // __ sw(a3, MemOperand(sp, 1 * kPointerSize)); | |
| 7179 // | |
| 7180 // // Do the runtime call to allocate the arguments object. | |
| 7181 // __ bind(&runtime); | |
| 7182 // __ TailCallRuntime(ExternalReference(Runtime::kNewArgumentsFast), 3, 1); | |
| 7183 // __ nop(); // NOP_ADDED | |
| 7184 } | |
| 7185 | |
| 7186 | |
| 7187 void CallFunctionStub::Generate(MacroAssembler* masm) { | |
| 7188 UNIMPLEMENTED_(); | |
| 7189 // Label slow; | |
| 7190 // __ break_(0x7108); | |
| 7191 // // Get the function to call from the stack. | |
| 7192 // // function, receiver [, arguments] | |
| 7193 //// __ ldr(r1, MemOperand(sp, (argc_ + 1) * kPointerSize)); | |
| 7194 // __ lw(a1, MemOperand(sp, kPointerSize * (argc_))); | |
| 7195 //// + StandardFrameConstants::kRArgsSlotsSize +1) )) ; | |
| 7196 // | |
| 7197 // // Check that the function is really a JavaScript function. | |
| 7198 // // a1: pushed function (to be verified) | |
| 7199 // __ BranchOnSmi(a1, &slow, t0); | |
| 7200 // __ nop(); // NOP_ADDED | |
| 7201 // // Get the map of the function object. | |
| 7202 // __ GetObjectType(a1, a2, a2); | |
| 7203 // __ bcond(ne, &slow, a2, Operand(JS_FUNCTION_TYPE)); | |
| 7204 // __ nop(); // NOP_ADDED | |
| 7205 // | |
| 7206 // // Fast-case: Invoke the function now. | |
| 7207 // // a1: pushed function | |
| 7208 // ParameterCount actual(argc_); | |
| 7209 // __ InvokeFunction(a1, actual, JUMP_FUNCTION, false); | |
| 7210 //// // Args slots are allocated in InvokeFunction. | |
| 7211 //// __ addiu(sp, sp, StandardFrameConstants::kRArgsSlotsSize); | |
| 7212 // | |
| 7213 // // Slow-case: Non-function called. | |
| 7214 // __ bind(&slow); | |
| 7215 //// __ mov(r0, Operand(argc_)); // Setup the number of arguments. | |
| 7216 //// __ mov(r2, Operand(0)); | |
| 7217 //// __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION); | |
| 7218 //// __ Jump(Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampolin e)), | |
| 7219 //// RelocInfo::CODE_TARGET); | |
| 7220 //#ifdef NO_NATIVES | |
| 7221 // UNIMPLEMENTED(); | |
| 7222 // __ break_(0x7162); | |
| 7223 //#else | |
| 7224 // __ li(a0, Operand(argc_)); // Setup the number of arguments. | |
| 7225 // __ li(a2, Operand(0)); | |
| 7226 // __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION); | |
| 7227 // __ break_(0x7147); | |
| 7228 // __ li(v1, Operand(1)); // Tell ArgumentsAdaptorTrampoline we need args slots | |
| 7229 // __ Jump(Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline) ), | |
| 7230 // RelocInfo::CODE_TARGET); | |
| 7231 // __ nop(); // NOP_ADDED | |
| 7232 //#endif | |
| 7233 } | |
| 7234 | |
| 7235 | |
| 7236 int CompareStub::MinorKey() { | |
| 7237 // Encode the two parameters in a unique 16 bit value. | |
| 7238 ASSERT(static_cast<unsigned>(cc_) >> 28 < (1 << 15)); | |
| 7239 return (static_cast<unsigned>(cc_) >> 27) | (strict_ ? 1 : 0); | |
| 7240 } | |
| 7241 | |
| 7242 | |
| 7243 #undef __ | |
| 7244 | |
| 7245 } } // namespace v8::internal | |
| OLD | NEW |