| OLD | NEW |
| (Empty) | |
| 1 // Copyright 2006-2008 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are |
| 4 // met: |
| 5 // |
| 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided |
| 11 // with the distribution. |
| 12 // * Neither the name of Google Inc. nor the names of its |
| 13 // contributors may be used to endorse or promote products derived |
| 14 // from this software without specific prior written permission. |
| 15 // |
| 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 27 |
| 28 |
| 29 |
| 30 #include "v8.h" |
| 31 |
| 32 #include "bootstrapper.h" |
| 33 #include "codegen-inl.h" |
| 34 #include "debug.h" |
| 35 #include "runtime.h" |
| 36 |
| 37 namespace v8 { |
| 38 namespace internal { |
| 39 |
| 40 MacroAssembler::MacroAssembler(void* buffer, int size) |
| 41 : Assembler(buffer, size), |
| 42 unresolved_(0), |
| 43 generating_stub_(false), |
| 44 allow_stub_calls_(true), |
| 45 code_object_(Heap::undefined_value()) { |
| 46 } |
| 47 |
| 48 |
| 49 |
| 50 void MacroAssembler::Jump(Register target, Condition cond, Register r1, const Op
erand& r2) { |
| 51 jcond(Operand(target), cond, r1, r2); |
| 52 } |
| 53 |
| 54 |
| 55 void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode, |
| 56 Condition cond, Register r1, const Operand& r2) { |
| 57 if(cond != cc_always) { |
| 58 UNIMPLEMENTED(); |
| 59 } |
| 60 // TO_UPGRADE: Use a JAL instead of JALR if the target is in the pc region and |
| 61 // TO_UPGRADE: if the target does not need RelocInfo. |
| 62 // Currently 'li' handles the cases when target need to be relocated. |
| 63 li(t9, Operand(target, rmode)); |
| 64 jr(Operand(t9)); |
| 65 ASSERT(kCallTargetAddressOffset == 4 * kInstrSize); |
| 66 } |
| 67 |
| 68 |
| 69 void MacroAssembler::Jump(byte* target, RelocInfo::Mode rmode, |
| 70 Condition cond, Register r1, const Operand& r2) { |
| 71 ASSERT(!RelocInfo::IsCodeTarget(rmode)); |
| 72 Jump(reinterpret_cast<intptr_t>(target), rmode, cond, r1, r2); |
| 73 } |
| 74 |
| 75 |
| 76 void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode, |
| 77 Condition cond, Register r1, const Operand& r2) { |
| 78 ASSERT(RelocInfo::IsCodeTarget(rmode)); |
| 79 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond); |
| 80 } |
| 81 |
| 82 |
| 83 void MacroAssembler::Call(Register target, |
| 84 Condition cond, Register r1, const Operand& r2) { |
| 85 jalcond(Operand(target), cond, r1, r2); |
| 86 } |
| 87 |
| 88 |
| 89 void MacroAssembler::Call(intptr_t target, RelocInfo::Mode rmode, |
| 90 Condition cond, Register r1, const Operand& r2) { |
| 91 if(cond != cc_always) { |
| 92 UNIMPLEMENTED(); |
| 93 } |
| 94 // TO_UPGRADE: Use a JAL instead of JALR if the target is in the pc region. |
| 95 // TO_UPGRADE: Use jalcond with always. (not implemented when writing this) |
| 96 // CAREFUL: Currently 'li' handles the cases when target need to be relocated. |
| 97 li(t9, Operand(target, rmode)); |
| 98 jalr(Operand(t9)); |
| 99 // We assume the jump is the last instruction generated. Some function use the |
| 100 // branch delay slots. (eg VirtualFrame::RawCallCodeObject) |
| 101 ASSERT(kCallTargetAddressOffset == 4 * kInstrSize); |
| 102 } |
| 103 |
| 104 |
| 105 void MacroAssembler::Call(byte* target, RelocInfo::Mode rmode, |
| 106 Condition cond, Register r1, const Operand& r2) { |
| 107 ASSERT(!RelocInfo::IsCodeTarget(rmode)); |
| 108 Call(reinterpret_cast<intptr_t>(target), rmode, cond, r1, r2); |
| 109 } |
| 110 |
| 111 |
| 112 void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode, |
| 113 Condition cond, Register r1, const Operand& r2) { |
| 114 ASSERT(RelocInfo::IsCodeTarget(rmode)); |
| 115 Call(reinterpret_cast<intptr_t>(code.location()), rmode, cond, r1, r2); |
| 116 } |
| 117 |
| 118 |
| 119 void MacroAssembler::Jump_was(Register target, Condition cond, Register r1, cons
t Operand& r2) { |
| 120 printf("Using Jump_was. Be sure to update the stack on return."); |
| 121 jcond(Operand(target), cond, r1, r2); |
| 122 addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize); |
| 123 } |
| 124 |
| 125 |
| 126 void MacroAssembler::Jump_was(intptr_t target, RelocInfo::Mode rmode, |
| 127 Condition cond, Register r1, const Operand& r2) { |
| 128 printf("Using Jump_was. Be sure to update the stack on return."); |
| 129 if(cond != cc_always) { |
| 130 UNIMPLEMENTED(); |
| 131 } |
| 132 // TO_UPGRADE: Use a JAL instead of JALR if the target is in the pc region and |
| 133 // TO_UPGRADE: if the target does not need RelocInfo. |
| 134 // Currently 'li' handles the cases when target need to be relocated. |
| 135 li(t9, Operand(target, rmode)); |
| 136 jr(Operand(t9)); |
| 137 addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize); |
| 138 ASSERT(kCallTargetAddressOffset == 4 * kInstrSize); |
| 139 } |
| 140 |
| 141 |
| 142 void MacroAssembler::Jump_was(byte* target, RelocInfo::Mode rmode, |
| 143 Condition cond, Register r1, const Operand& r2) { |
| 144 ASSERT(!RelocInfo::IsCodeTarget(rmode)); |
| 145 Jump_was(reinterpret_cast<intptr_t>(target), rmode, cond, r1, r2); |
| 146 } |
| 147 |
| 148 |
| 149 void MacroAssembler::Jump_was(Handle<Code> code, RelocInfo::Mode rmode, |
| 150 Condition cond, Register r1, const Operand& r2) { |
| 151 ASSERT(RelocInfo::IsCodeTarget(rmode)); |
| 152 Jump_was(reinterpret_cast<intptr_t>(code.location()), rmode, cond); |
| 153 } |
| 154 // Call with arguments slots. |
| 155 void MacroAssembler::Call_was(Register target, |
| 156 Condition cond, Register r1, const Operand& r2) { |
| 157 |
| 158 jalcond(Operand(target), cond, r1, r2); |
| 159 // Make space for arguments slots. We use the branch delay slot. |
| 160 addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize); |
| 161 // On return we free arguments slots. We have to care that nothing is passed |
| 162 // down on the stack. |
| 163 addiu(sp, sp, StandardFrameConstants::kRArgsSlotsSize); |
| 164 } |
| 165 |
| 166 |
| 167 void MacroAssembler::Call_was(intptr_t target, RelocInfo::Mode rmode, |
| 168 Condition cond, Register r1, const Operand& r2) { |
| 169 if(cond != cc_always) { |
| 170 UNIMPLEMENTED(); |
| 171 } |
| 172 // TO_UPGRADE: Use a JAL instead of JALR if the target is in the pc region. |
| 173 // CAREFUL: Currently 'li' handles the cases when target need to be relocated. |
| 174 li(t9, Operand(target, rmode)); |
| 175 jalr(Operand(t9)); |
| 176 addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize); |
| 177 addiu(sp, sp, StandardFrameConstants::kRArgsSlotsSize); |
| 178 ASSERT(kCallTargetAddressOffset == 4 * kInstrSize); |
| 179 } |
| 180 |
| 181 |
| 182 void MacroAssembler::Call_was(byte* target, RelocInfo::Mode rmode, |
| 183 Condition cond, Register r1, const Operand& r2) { |
| 184 ASSERT(!RelocInfo::IsCodeTarget(rmode)); |
| 185 Call_was(reinterpret_cast<intptr_t>(target), rmode, cond, r1, r2); |
| 186 } |
| 187 |
| 188 |
| 189 void MacroAssembler::Call_was(Handle<Code> code, RelocInfo::Mode rmode, |
| 190 Condition cond, Register r1, const Operand& r2) { |
| 191 ASSERT(RelocInfo::IsCodeTarget(rmode)); |
| 192 Call_was(reinterpret_cast<intptr_t>(code.location()), rmode, cond, r1, r2); |
| 193 } |
| 194 |
| 195 |
| 196 void MacroAssembler::Ret(Condition cond, Register r1, const Operand& r2) { |
| 197 jcond(Operand(ra), cond, r1, r2); |
| 198 } |
| 199 |
| 200 |
| 201 void MacroAssembler::SmiJumpTable(Register index, Vector<Label*> targets) { |
| 202 UNIMPLEMENTED(); |
| 203 // // Empty the const pool. |
| 204 // CheckConstPool(true, true); |
| 205 // add(pc, pc, Operand(index, |
| 206 // LSL, |
| 207 // assembler::arm::Instr::kInstrSizeLog2 - kSmiTagSize)); |
| 208 // BlockConstPoolBefore(pc_offset() + (targets.length() + 1) * kInstrSize); |
| 209 // nop(); // Jump table alignment. |
| 210 // for (int i = 0; i < targets.length(); i++) { |
| 211 // b(targets[i]); |
| 212 // } |
| 213 } |
| 214 |
| 215 |
| 216 void MacroAssembler::LoadRoot(Register destination, |
| 217 Heap::RootListIndex index) { |
| 218 lw(destination, MemOperand(s4, index << kPointerSizeLog2)); |
| 219 } |
| 220 |
| 221 void MacroAssembler::LoadRoot(Register destination, |
| 222 Heap::RootListIndex index, |
| 223 Condition cond, Register src1, const Operand& src2
) { |
| 224 bcond( NegateCondition(cond), 2, src1, src2); |
| 225 nop(); |
| 226 lw(destination, MemOperand(s4, index << kPointerSizeLog2)); |
| 227 } |
| 228 |
| 229 |
| 230 // Will clobber 4 registers: object, offset, scratch, ip. The |
| 231 // register 'object' contains a heap object pointer. The heap object |
| 232 // tag is shifted away. |
| 233 void MacroAssembler::RecordWrite(Register object, Register offset, |
| 234 Register scratch) { |
| 235 UNIMPLEMENTED_(); |
| 236 // // This is how much we shift the remembered set bit offset to get the |
| 237 // // offset of the word in the remembered set. We divide by kBitsPerInt (32, |
| 238 // // shift right 5) and then multiply by kIntSize (4, shift left 2). |
| 239 // const int kRSetWordShift = 3; |
| 240 // |
| 241 // Label fast, done; |
| 242 // |
| 243 // // First, test that the object is not in the new space. We cannot set |
| 244 // // remembered set bits in the new space. |
| 245 // // object: heap object pointer (with tag) |
| 246 // // offset: offset to store location from the object |
| 247 //// and_(scratch, object, Operand(Heap::NewSpaceMask())); |
| 248 //// cmp(scratch, Operand(ExternalReference::new_space_start())); |
| 249 //// b(eq, &done); |
| 250 // and_(scratch, object, Operand(Heap::NewSpaceMask())); |
| 251 // bcond(eq, &done, scratch, Operand(ExternalReference::new_space_start())); |
| 252 // nop(); // NOP_ADDED |
| 253 // |
| 254 // // Compute the bit offset in the remembered set. |
| 255 // // object: heap object pointer (with tag) |
| 256 // // offset: offset to store location from the object |
| 257 //// mov(ip, Operand(Page::kPageAlignmentMask)); // load mask only once |
| 258 //// and_(scratch, object, Operand(ip)); // offset into page of the object |
| 259 //// add(offset, scratch, Operand(offset)); // add offset into the object |
| 260 //// mov(offset, Operand(offset, LSR, kObjectAlignmentBits)); |
| 261 // li(ip, Operand(Page::kPageAlignmentMask)); // load mask only once |
| 262 // and_(scratch, object, Operand(ip)); // offset into page of the object |
| 263 // addu(offset, scratch, Operand(offset)); // add offset into the object |
| 264 // srl(offset, offset, kObjectAlignmentBits); |
| 265 // |
| 266 // // Compute the page address from the heap object pointer. |
| 267 // // object: heap object pointer (with tag) |
| 268 // // offset: bit offset of store position in the remembered set |
| 269 //// bic(object, object, Operand(ip)); |
| 270 // andi(object, object, Operand(~Page::kPageAlignmentMask)); |
| 271 // |
| 272 // // If the bit offset lies beyond the normal remembered set range, it is in |
| 273 // // the extra remembered set area of a large object. |
| 274 // // object: page start |
| 275 // // offset: bit offset of store position in the remembered set |
| 276 //// cmp(offset, Operand(Page::kPageSize / kPointerSize)); |
| 277 //// b(lt, &fast); |
| 278 // bcond(less, &fast, offset, Operand(Page::kPageSize / kPointerSize)); |
| 279 // nop(); // NOP_ADDED |
| 280 // |
| 281 // // Adjust the bit offset to be relative to the start of the extra |
| 282 // // remembered set and the start address to be the address of the extra |
| 283 // // remembered set. |
| 284 //// sub(offset, offset, Operand(Page::kPageSize / kPointerSize)); |
| 285 // addiu(offset, offset, -1* Page::kPageSize / kPointerSize); |
| 286 // // Load the array length into 'scratch' and multiply by four to get the |
| 287 // // size in bytes of the elements. |
| 288 //// ldr(scratch, MemOperand(object, Page::kObjectStartOffset |
| 289 //// + FixedArray::kLengthOffset)); |
| 290 //// mov(scratch, Operand(scratch, LSL, kObjectAlignmentBits)); |
| 291 // lw(scratch, MemOperand(object, Page::kObjectStartOffset |
| 292 // + FixedArray::kLengthOffset)); |
| 293 // sll(scratch, scratch, kObjectAlignmentBits); |
| 294 // // Add the page header (including remembered set), array header, and array |
| 295 // // body size to the page address. |
| 296 // addiu(object, object, Page::kObjectStartOffset + FixedArray::kHeaderSize); |
| 297 // addu(object, object, scratch); |
| 298 // |
| 299 // bind(&fast); |
| 300 // // Get address of the rset word. |
| 301 // // object: start of the remembered set (page start for the fast case) |
| 302 // // offset: bit offset of store position in the remembered set |
| 303 //// bic(scratch, offset, Operand(kBitsPerInt - 1)); // clear the bit offset |
| 304 //// add(object, object, Operand(scratch, LSR, kRSetWordShift)); |
| 305 // andi(object, object, Operand(~(kBitsPerInt - 1))); |
| 306 // sll(scratch, scratch, kRSetWordShift); |
| 307 // addu(object, object, scratch); |
| 308 // // Get bit offset in the rset word. |
| 309 // // object: address of remembered set word |
| 310 // // offset: bit offset of store position |
| 311 //// and_(offset, offset, Operand(kBitsPerInt - 1)); |
| 312 // and_(offset, offset, Operand(kBitsPerInt - 1)); |
| 313 // |
| 314 //// ldr(scratch, MemOperand(object)); |
| 315 //// mov(ip, Operand(1)); |
| 316 //// orr(scratch, scratch, Operand(ip, LSL, offset)); |
| 317 //// str(scratch, MemOperand(object)); |
| 318 // lw(scratch, MemOperand(object)); |
| 319 // li(ip, Operand(1)); |
| 320 // sllv(ip, ip, offset); |
| 321 // or_(scratch, scratch, Operand(ip)); |
| 322 // sw(scratch, MemOperand(object)); |
| 323 // |
| 324 // bind(&done); |
| 325 } |
| 326 |
| 327 |
| 328 void MacroAssembler::EnterFrame(StackFrame::Type type) { |
| 329 UNIMPLEMENTED_(); |
| 330 // addiu(sp, sp, Operand(-5 * kPointerSize)); |
| 331 // li(t0, Operand(Smi::FromInt(type))); |
| 332 // li(t1, Operand(CodeObject())); |
| 333 // sw(ra, MemOperand(sp, 4 * kPointerSize)); |
| 334 // sw(fp, MemOperand(sp, 3 * kPointerSize)); |
| 335 // sw(cp, MemOperand(sp, 2 * kPointerSize)); |
| 336 // sw(t0, MemOperand(sp, 1 * kPointerSize)); |
| 337 // sw(t1, MemOperand(sp, 0 * kPointerSize)); |
| 338 // addiu(fp, sp, Operand(3 * kPointerSize)); |
| 339 } |
| 340 |
| 341 |
| 342 void MacroAssembler::LeaveFrame(StackFrame::Type type) { |
| 343 UNIMPLEMENTED_(); |
| 344 // mov(sp, fp); |
| 345 // lw(fp, MemOperand(sp, 0 * kPointerSize)); |
| 346 // lw(ra, MemOperand(sp, 1 * kPointerSize)); |
| 347 // addiu(sp, Operand(2 * kPointerSize)); |
| 348 |
| 349 } |
| 350 |
| 351 |
| 352 void MacroAssembler::SetupAlignedCall(Register scratch, int arg_count) { |
| 353 UNIMPLEMENTED_(); |
| 354 // push(s3); // Save s3 on the stack |
| 355 // mov(s3, sp); // Save sp |
| 356 // |
| 357 // li(scratch, Operand(~7)); // Load sp mask |
| 358 // and_(sp, sp, Operand(scratch)); // Align sp. |
| 359 // |
| 360 // // We are going to push (arg_count + 0(2))*4 on the stack. We make sure sp
will |
| 361 // // be 8 bytes aligned after this. |
| 362 // if( (arg_count % 2) != 0) { |
| 363 // addiu(sp, sp, -4); |
| 364 // } |
| 365 } |
| 366 |
| 367 void MacroAssembler::ReturnFromAlignedCall() { |
| 368 UNIMPLEMENTED_(); |
| 369 // mov(sp, s3); // Restore sp. |
| 370 // pop(s3); // Restore s3 |
| 371 } |
| 372 |
| 373 |
| 374 void MacroAssembler::EnterExitFrame(ExitFrame::Mode mode) { |
| 375 UNIMPLEMENTED_(); |
| 376 // |
| 377 // // Compute the argv pointer and keep it in a callee-saved register. |
| 378 // // a0 is argc. |
| 379 // sll(t0, a0, kPointerSizeLog2); |
| 380 // add(s2, sp, t0); |
| 381 // addi(s2, s2, Operand(-kPointerSize)); |
| 382 // |
| 383 // // Compute callee's stack pointer before making changes and save it as |
| 384 // // ip register so that it is restored as sp register on exit, thereby |
| 385 // // popping the args. |
| 386 // |
| 387 // // ip = sp + kPointerSize * #args; |
| 388 // add(ip, sp, t0); |
| 389 // |
| 390 // // Align the stack at this point. After this point we have 5 pushes, |
| 391 // // so in fact we have to unalign here! See also the assert on the |
| 392 // // alignment immediately below. |
| 393 // AlignStack(1); |
| 394 // |
| 395 // // Save registers. |
| 396 // // We save s3 as we sill need it to save sp in CEntryStub::GenerateCore. |
| 397 // addiu(sp, sp, Operand(-16)); |
| 398 // sw(ip, MemOperand(sp, 12)); |
| 399 // sw(s3, MemOperand(sp, 8)); |
| 400 // sw(ra, MemOperand(sp, 4)); |
| 401 // sw(fp, MemOperand(sp, 0)); |
| 402 // mov(fp, sp); // setup new frame pointer |
| 403 // |
| 404 // // Push debug marker. |
| 405 // if (mode == ExitFrame::MODE_DEBUG) { |
| 406 // li(ip, Operand(Smi::FromInt(0))); |
| 407 // } else { |
| 408 // li(ip, Operand(CodeObject())); |
| 409 // } |
| 410 // push(ip); |
| 411 // |
| 412 // // Save the frame pointer and the context in top. |
| 413 // li(ip, Operand(ExternalReference(Top::k_c_entry_fp_address))); |
| 414 // sw(fp, MemOperand(ip)); |
| 415 // li(ip, Operand(ExternalReference(Top::k_context_address))); // CURRENT |
| 416 // sw(cp, MemOperand(ip)); |
| 417 // |
| 418 // // Setup argc and the builtin function in callee-saved registers. |
| 419 // mov(s0, a0); |
| 420 // mov(s1, a1); |
| 421 // |
| 422 // |
| 423 //#ifdef ENABLE_DEBUGGER_SUPPORT |
| 424 // // Save the state of all registers to the stack from the memory |
| 425 // // location. This is needed to allow nested break points. |
| 426 //// if (mode == ExitFrame::MODE_DEBUG) { |
| 427 // // Use sp as base to push. |
| 428 // // ia32 seems to have a bug here. (Cf ia32 code TODO(1243899)) |
| 429 //// CopyRegistersFromMemoryToStack(sp, kJSCallerSaved); |
| 430 //// } |
| 431 //#endif |
| 432 } |
| 433 |
| 434 |
| 435 void MacroAssembler::AlignStack(int offset) { |
| 436 UNIMPLEMENTED_(); |
| 437 // int activation_frame_alignment = OS::ActivationFrameAlignment(); |
| 438 // if (activation_frame_alignment != kPointerSize) { |
| 439 // // This code needs to be made more general if this assert doesn't hold. |
| 440 // ASSERT(activation_frame_alignment == 2 * kPointerSize); |
| 441 // li(t3, Operand(Smi::FromInt(0))); |
| 442 // andi(t0, sp, Operand(activation_frame_alignment - 1)); |
| 443 // push(t3, eq, t0, zero_reg ); |
| 444 // } |
| 445 } |
| 446 |
| 447 void MacroAssembler::LeaveExitFrame(ExitFrame::Mode mode) { |
| 448 UNIMPLEMENTED_(); |
| 449 // |
| 450 //#ifdef ENABLE_DEBUGGER_SUPPORT |
| 451 //// // Restore the memory copy of the registers by digging them out from |
| 452 //// // the stack. This is needed to allow nested break points. |
| 453 //// if (mode == ExitFrame::MODE_DEBUG) { |
| 454 //// // This code intentionally clobbers a2 and a3. |
| 455 //// const int kCallerSavedSize = kNumJSCallerSaved * kPointerSize; |
| 456 //// const int kOffset = ExitFrameConstants::kDebugMarkOffset - kCallerSavedS
ize; |
| 457 //// addi(a3, fp, Operand(kOffset)); |
| 458 //// CopyRegistersFromStackToMemory(a3, a2, kJSCallerSaved); |
| 459 //// } |
| 460 //#endif |
| 461 // |
| 462 // // Clear top frame. |
| 463 // li(a3, Operand(0)); |
| 464 // li(ip, Operand(ExternalReference(Top::k_c_entry_fp_address))); |
| 465 // sw(a3, MemOperand(ip)); |
| 466 // |
| 467 // // Restore current context from top and clear it in debug mode. |
| 468 // li(ip, Operand(ExternalReference(Top::k_context_address))); |
| 469 // lw(cp, MemOperand(ip)); |
| 470 //#ifdef DEBUG |
| 471 // sw(a3, MemOperand(ip)); |
| 472 //#endif |
| 473 // |
| 474 // // Pop the arguments, restore registers, and return. |
| 475 // mov(sp, fp); // respect ABI stack constraint |
| 476 // lw(fp, MemOperand(sp, 0)); |
| 477 // lw(ra, MemOperand(sp, 4)); |
| 478 // lw(s3, MemOperand(sp, 8)); |
| 479 // lw(sp, MemOperand(sp, 12)); |
| 480 // jr(ra); |
| 481 // nop(); // NOP_ADDED |
| 482 } |
| 483 |
| 484 |
| 485 void MacroAssembler::InvokePrologue(const ParameterCount& expected, |
| 486 const ParameterCount& actual, |
| 487 Handle<Code> code_constant, |
| 488 Register code_reg, |
| 489 Label* done, |
| 490 InvokeFlag flag, |
| 491 bool withArgsSlots) { |
| 492 UNIMPLEMENTED_(); |
| 493 |
| 494 // bool definitely_matches = false; |
| 495 // Label regular_invoke; |
| 496 // |
| 497 // // Check whether the expected and actual arguments count match. If not, |
| 498 // // setup registers according to contract with ArgumentsAdaptorTrampoline: |
| 499 // // r0: actual arguments count |
| 500 // // r1: function (passed through to callee) |
| 501 // // r2: expected arguments count |
| 502 // // r3: callee code entry |
| 503 // |
| 504 // // The code below is made a lot easier because the calling code already sets |
| 505 // // up actual and expected registers according to the contract if values are |
| 506 // // passed in registers. |
| 507 // ASSERT(actual.is_immediate() || actual.reg().is(a0)); |
| 508 // ASSERT(expected.is_immediate() || expected.reg().is(a2)); |
| 509 // ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(a3))
; |
| 510 // |
| 511 // if (expected.is_immediate()) { |
| 512 // ASSERT(actual.is_immediate()); |
| 513 // if (expected.immediate() == actual.immediate()) { |
| 514 // definitely_matches = true; |
| 515 // } else { |
| 516 //// mov(r0, Operand(actual.immediate())); |
| 517 // li(a0, Operand(actual.immediate())); |
| 518 // const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel; |
| 519 // if (expected.immediate() == sentinel) { |
| 520 // // Don't worry about adapting arguments for builtins that |
| 521 // // don't want that done. Skip adaption code by making it look |
| 522 // // like we have a match between expected and actual number of |
| 523 // // arguments. |
| 524 // definitely_matches = true; |
| 525 // } else { |
| 526 //// mov(r2, Operand(expected.immediate())); |
| 527 // li(a2, Operand(expected.immediate())); |
| 528 // } |
| 529 // } |
| 530 // } else { |
| 531 // if (actual.is_immediate()) { |
| 532 //// cmp(expected.reg(), Operand(actual.immediate())); |
| 533 //// b(eq, ®ular_invoke); |
| 534 //// mov(r0, Operand(actual.immediate())); |
| 535 // bcond(eq, ®ular_invoke, expected.reg(), Operand(actual.immediate())); |
| 536 // nop(); // NOP_ADDED |
| 537 // li(a0, Operand(actual.immediate())); |
| 538 // } else { |
| 539 //// cmp(expected.reg(), Operand(actual.reg())); |
| 540 //// b(eq, ®ular_invoke); |
| 541 // bcond(eq, ®ular_invoke, expected.reg(), Operand(actual.reg())); |
| 542 // nop(); // NOP_ADDED |
| 543 // } |
| 544 // } |
| 545 // |
| 546 // if (!definitely_matches) { |
| 547 // if (!code_constant.is_null()) { |
| 548 //// mov(r3, Operand(code_constant)); |
| 549 //// add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag)); |
| 550 // li(a3, Operand(code_constant)); |
| 551 // addiu(a3, a3, Operand(Code::kHeaderSize - kHeapObjectTag)); |
| 552 // } |
| 553 //// |
| 554 // Handle<Code> adaptor = |
| 555 // Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline)); |
| 556 // // We use v1 to tell the adaptor if we need args slots. |
| 557 // if(withArgsSlots) { |
| 558 // li(v1, Operand(0)); |
| 559 // } else { |
| 560 // li(v1, Operand(1)); |
| 561 // } |
| 562 // if (flag == CALL_FUNCTION) { |
| 563 // Call(adaptor, RelocInfo::CODE_TARGET); |
| 564 // nop(); // NOP_ADDED |
| 565 // b(done); |
| 566 // nop(); // NOP_ADDED |
| 567 // } else { |
| 568 // Jump(adaptor, RelocInfo::CODE_TARGET); |
| 569 // nop(); // NOP_ADDED |
| 570 // } |
| 571 // bind(®ular_invoke); |
| 572 // } |
| 573 } |
| 574 |
| 575 void MacroAssembler::InvokeCode(Register code, |
| 576 const ParameterCount& expected, |
| 577 const ParameterCount& actual, |
| 578 InvokeFlag flag, |
| 579 bool withArgsSlots) { |
| 580 UNIMPLEMENTED_(); |
| 581 |
| 582 // Label done; |
| 583 // |
| 584 // InvokePrologue(expected, actual, Handle<Code>::null(), code, |
| 585 // &done, flag, withArgsSlots); |
| 586 // nop(); // NOP_ADDED |
| 587 // if (flag == CALL_FUNCTION) { |
| 588 // Call(code); |
| 589 // } else { |
| 590 // ASSERT(flag == JUMP_FUNCTION); |
| 591 // Jump(code); |
| 592 // } |
| 593 // |
| 594 // // Because arguments slots may be needed and we need to ignore them in the |
| 595 // // other case we allocate them here. |
| 596 // if(withArgsSlots) { |
| 597 //// addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize); |
| 598 // } else { |
| 599 // nop(); |
| 600 // } |
| 601 // |
| 602 // // Continue here if InvokePrologue does handle the invocation due to |
| 603 // // mismatched parameter counts. |
| 604 // bind(&done); |
| 605 } |
| 606 |
| 607 |
| 608 void MacroAssembler::InvokeCode(Handle<Code> code, |
| 609 const ParameterCount& expected, |
| 610 const ParameterCount& actual, |
| 611 RelocInfo::Mode rmode, |
| 612 InvokeFlag flag, |
| 613 bool withArgsSlots) { |
| 614 UNIMPLEMENTED_(); |
| 615 // Label done; |
| 616 // |
| 617 // InvokePrologue(expected, actual, code, no_reg, |
| 618 // &done, flag, withArgsSlots); |
| 619 // nop(); // NOP_ADDED |
| 620 // if (flag == CALL_FUNCTION) { |
| 621 // Call(code, rmode); |
| 622 // } else { |
| 623 // Jump(code, rmode); |
| 624 // } |
| 625 // |
| 626 // // Because arguments slots may be needed and we need to ignore them in the |
| 627 // // other case we allocate them here. |
| 628 // if(withArgsSlots) { |
| 629 //// addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize); |
| 630 // } else { |
| 631 // nop(); |
| 632 // } |
| 633 // |
| 634 // // Continue here if InvokePrologue does handle the invocation due to |
| 635 // // mismatched parameter counts. |
| 636 // bind(&done); |
| 637 } |
| 638 |
| 639 |
| 640 void MacroAssembler::InvokeFunction(Register fun, |
| 641 const ParameterCount& actual, |
| 642 InvokeFlag flag, |
| 643 bool withArgsSlots) { |
| 644 UNIMPLEMENTED_(); |
| 645 // // Contract with called JS functions requires that function is passed in a1. |
| 646 // ASSERT(fun.is(a1)); |
| 647 // |
| 648 // Register expected_reg = a2; |
| 649 // Register code_reg = a3; // t9 ? |
| 650 // |
| 651 //// ldr(code_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); |
| 652 //// ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); |
| 653 //// ldr(expected_reg, |
| 654 //// FieldMemOperand(code_reg, |
| 655 //// SharedFunctionInfo::kFormalParameterCountOffset)); |
| 656 //// ldr(code_reg, |
| 657 //// MemOperand(code_reg, SharedFunctionInfo::kCodeOffset - kHeapObjectTag)
); |
| 658 //// add(code_reg, code_reg, Operand(Code::kHeaderSize - kHeapObjectTag)); |
| 659 // lw(code_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); |
| 660 // lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); |
| 661 // lw(expected_reg, |
| 662 // FieldMemOperand(code_reg, |
| 663 // SharedFunctionInfo::kFormalParameterCountOffset)); |
| 664 // lw(code_reg, |
| 665 // MemOperand(code_reg, SharedFunctionInfo::kCodeOffset - kHeapObjectTag)); |
| 666 // addiu(code_reg, code_reg, Operand(Code::kHeaderSize - kHeapObjectTag)); |
| 667 // |
| 668 // ParameterCount expected(expected_reg); |
| 669 // InvokeCode(code_reg, expected, actual, flag, withArgsSlots); |
| 670 // // We want the branch delay slot to be free. |
| 671 } |
| 672 |
| 673 |
| 674 #ifdef ENABLE_DEBUGGER_SUPPORT |
| 675 void MacroAssembler::SaveRegistersToMemory(RegList regs) { |
| 676 UNIMPLEMENTED_(); |
| 677 // ASSERT((regs & ~kJSCallerSaved) == 0); |
| 678 // // Copy the content of registers to memory location. |
| 679 // for (int i = 0; i < kNumJSCallerSaved; i++) { |
| 680 // int r = JSCallerSavedCode(i); |
| 681 // if ((regs & (1 << r)) != 0) { |
| 682 // Register reg = { r }; |
| 683 //// mov(ip, Operand(ExternalReference(Debug_Address::Register(i)))); |
| 684 //// str(reg, MemOperand(ip)); |
| 685 // li(ip, Operand(ExternalReference(Debug_Address::Register(i)))); |
| 686 // sw(reg, MemOperand(ip)); |
| 687 // } |
| 688 // } |
| 689 } |
| 690 |
| 691 |
| 692 void MacroAssembler::RestoreRegistersFromMemory(RegList regs) { |
| 693 UNIMPLEMENTED_(); |
| 694 // ASSERT((regs & ~kJSCallerSaved) == 0); |
| 695 // // Copy the content of memory location to registers. |
| 696 // for (int i = kNumJSCallerSaved; --i >= 0;) { |
| 697 // int r = JSCallerSavedCode(i); |
| 698 // if ((regs & (1 << r)) != 0) { |
| 699 // Register reg = { r }; |
| 700 //// mov(ip, Operand(ExternalReference(Debug_Address::Register(i)))); |
| 701 //// ldr(reg, MemOperand(ip)); |
| 702 // li(ip, Operand(ExternalReference(Debug_Address::Register(i)))); |
| 703 // lw(reg, MemOperand(ip)); |
| 704 // } |
| 705 // } |
| 706 } |
| 707 |
| 708 |
| 709 void MacroAssembler::CopyRegistersFromMemoryToStack(Register base, |
| 710 RegList regs) { |
| 711 UNIMPLEMENTED_(); |
| 712 // ASSERT((regs & ~kJSCallerSaved) == 0); |
| 713 // int16_t ActualNumSaved = 0; |
| 714 // // Copy the content of the memory location to the stack and adjust base. |
| 715 // for (int i = kNumJSCallerSaved; --i >= 0;) { |
| 716 // int r = JSCallerSavedCode(i); |
| 717 // if ((regs & (1 << r)) != 0) { |
| 718 //// mov(ip, Operand(ExternalReference(Debug_Address::Register(i)))); |
| 719 //// ldr(ip, MemOperand(ip)); |
| 720 //// str(ip, MemOperand(base, 4, NegPreIndex)); |
| 721 // li(ip, Operand(ExternalReference(Debug_Address::Register(i)))); |
| 722 // lw(ip, MemOperand(ip)); |
| 723 // sw(base, MemOperand(base, -4*(++ActualNumSaved) )); |
| 724 // } |
| 725 // } |
| 726 // addi(base, Operand(-4*ActualNumSaved)); |
| 727 } |
| 728 |
| 729 |
| 730 void MacroAssembler::CopyRegistersFromStackToMemory(Register base, |
| 731 Register scratch, |
| 732 RegList regs) { |
| 733 UNIMPLEMENTED_(); |
| 734 // ASSERT((regs & ~kJSCallerSaved) == 0); |
| 735 // int16_t ActualNumSaved = 0; |
| 736 // // Copy the content of the stack to the memory location and adjust base. |
| 737 // for (int i = 0; i < kNumJSCallerSaved; i++) { |
| 738 // int r = JSCallerSavedCode(i); |
| 739 // if ((regs & (1 << r)) != 0) { |
| 740 //// mov(ip, Operand(ExternalReference(Debug_Address::Register(i)))); |
| 741 //// ldr(scratch, MemOperand(base, 4, PostIndex)); |
| 742 //// str(scratch, MemOperand(ip)); |
| 743 // li(ip, Operand(ExternalReference(Debug_Address::Register(i)))); |
| 744 // lw(scratch, MemOperand(base, 4*(ActualNumSaved++) )); |
| 745 // sw(scratch, MemOperand(ip)); |
| 746 // } |
| 747 // } |
| 748 // addi(base, Operand(4*ActualNumSaved)); |
| 749 } |
| 750 #endif |
| 751 |
| 752 void MacroAssembler::PushTryHandler(CodeLocation try_location, |
| 753 HandlerType type) { |
| 754 UNIMPLEMENTED_(); |
| 755 // // Adjust this code if not the case. |
| 756 // ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize); |
| 757 // // The pc (return address) is passed in register lr. |
| 758 // if (try_location == IN_JAVASCRIPT) { |
| 759 // if (type == TRY_CATCH_HANDLER) { |
| 760 // li(t0, Operand(StackHandler::TRY_CATCH)); |
| 761 // } else { |
| 762 // li(t0, Operand(StackHandler::TRY_FINALLY)); |
| 763 // } |
| 764 // ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize |
| 765 // && StackHandlerConstants::kFPOffset == 2 * kPointerSize |
| 766 // && StackHandlerConstants::kPCOffset == 3 * kPointerSize |
| 767 // && StackHandlerConstants::kNextOffset == 0 * kPointerSize); |
| 768 // |
| 769 // // Save the current handler as the next handler. |
| 770 // li(t2, Operand(ExternalReference(Top::k_handler_address))); |
| 771 // lw(t1, MemOperand(t2)); |
| 772 // |
| 773 // addiu(sp, sp, -StackHandlerConstants::kSize); |
| 774 // sw(ra, MemOperand(sp, 12)); |
| 775 // sw(fp, MemOperand(sp, 8)); |
| 776 // sw(t0, MemOperand(sp, 4)); |
| 777 // sw(t1, MemOperand(sp, 0)); |
| 778 // |
| 779 // // Link this handler as the new current one. |
| 780 // sw(sp, MemOperand(t2)); |
| 781 // |
| 782 // } else { |
| 783 // ASSERT(try_location == IN_JS_ENTRY); |
| 784 // ASSERT(StackHandlerConstants::kStateOffset == 1 * kPointerSize |
| 785 // && StackHandlerConstants::kFPOffset == 2 * kPointerSize |
| 786 // && StackHandlerConstants::kPCOffset == 3 * kPointerSize |
| 787 // && StackHandlerConstants::kNextOffset == 0 * kPointerSize); |
| 788 // |
| 789 // // The frame pointer does not point to a JS frame so we save NULL |
| 790 // // for fp. We expect the code throwing an exception to check fp |
| 791 // // before dereferencing it to restore the context. |
| 792 // li(t0, Operand(StackHandler::ENTRY)); |
| 793 // |
| 794 // // Save the current handler as the next handler. |
| 795 // li(t2, Operand(ExternalReference(Top::k_handler_address))); |
| 796 // lw(t1, MemOperand(t2)); |
| 797 // |
| 798 // // To optimize the code we don't use a multi_push like function. |
| 799 // addiu(sp, sp, -StackHandlerConstants::kSize); |
| 800 // sw(ra, MemOperand(sp, 12)); |
| 801 // sw(zero_reg, MemOperand(sp, 8)); |
| 802 // sw(t0, MemOperand(sp, 4)); |
| 803 // sw(t1, MemOperand(sp, 0)); |
| 804 // |
| 805 // // Link this handler as the new current one. |
| 806 // sw(sp, MemOperand(t2)); |
| 807 // } |
| 808 } |
| 809 |
| 810 |
| 811 Register MacroAssembler::CheckMaps(JSObject* object, Register object_reg, |
| 812 JSObject* holder, Register holder_reg, |
| 813 Register scratch, |
| 814 Label* miss) { |
| 815 UNIMPLEMENTED_(); |
| 816 // // Make sure there's no overlap between scratch and the other |
| 817 // // registers. |
| 818 // ASSERT(!scratch.is(object_reg) && !scratch.is(holder_reg)); |
| 819 // |
| 820 // // Keep track of the current object in register reg. |
| 821 // Register reg = object_reg; |
| 822 // int depth = 1; |
| 823 // |
| 824 // // Check the maps in the prototype chain. |
| 825 // // Traverse the prototype chain from the object and do map checks. |
| 826 // while (object != holder) { |
| 827 // depth++; |
| 828 // |
| 829 // // Only global objects and objects that do not require access |
| 830 // // checks are allowed in stubs. |
| 831 // ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded()); |
| 832 // |
| 833 // // Get the map of the current object. |
| 834 //// ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset)); |
| 835 //// cmp(scratch, Operand(Handle<Map>(object->map()))); |
| 836 // lw(scratch, FieldMemOperand(reg, HeapObject::kMapOffset)); |
| 837 // |
| 838 // // Branch on the result of the map check. |
| 839 //// b(ne, miss); |
| 840 // bcond(ne, miss, scratch, Operand(Handle<Map>(object->map()))); |
| 841 // nop(); // NOP_ADDED |
| 842 // |
| 843 // // Check access rights to the global object. This has to happen |
| 844 // // after the map check so that we know that the object is |
| 845 // // actually a global object. |
| 846 // if (object->IsJSGlobalProxy()) { |
| 847 // CheckAccessGlobalProxy(reg, scratch, miss); |
| 848 // // Restore scratch register to be the map of the object. In the |
| 849 // // new space case below, we load the prototype from the map in |
| 850 // // the scratch register. |
| 851 //// ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset)); |
| 852 // lw(scratch, FieldMemOperand(reg, HeapObject::kMapOffset)); |
| 853 // } |
| 854 // |
| 855 // reg = holder_reg; // from now the object is in holder_reg |
| 856 // JSObject* prototype = JSObject::cast(object->GetPrototype()); |
| 857 // if (Heap::InNewSpace(prototype)) { |
| 858 // // The prototype is in new space; we cannot store a reference |
| 859 // // to it in the code. Load it from the map. |
| 860 //// ldr(reg, FieldMemOperand(scratch, Map::kPrototypeOffset)); |
| 861 // lw(reg, FieldMemOperand(scratch, Map::kPrototypeOffset)); |
| 862 // } else { |
| 863 // // The prototype is in old space; load it directly. |
| 864 //// mov(reg, Operand(Handle<JSObject>(prototype))); |
| 865 // li(reg, Operand(Handle<JSObject>(prototype))); |
| 866 // } |
| 867 // |
| 868 // // Go to the next object in the prototype chain. |
| 869 // object = prototype; |
| 870 // } |
| 871 // |
| 872 // // Check the holder map. |
| 873 //// ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset)); |
| 874 //// cmp(scratch, Operand(Handle<Map>(object->map()))); |
| 875 //// b(ne, miss); |
| 876 // lw(scratch, FieldMemOperand(reg, HeapObject::kMapOffset)); |
| 877 // bcond(ne, miss, scratch, Operand(Handle<Map>(object->map()))); |
| 878 // nop(); // NOP_ADDED |
| 879 // |
| 880 // // Log the check depth. |
| 881 // LOG(IntEvent("check-maps-depth", depth)); |
| 882 // |
| 883 // // Perform security check for access to the global object and return |
| 884 // // the holder register. |
| 885 // ASSERT(object == holder); |
| 886 // ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded()); |
| 887 // if (object->IsJSGlobalProxy()) { |
| 888 // CheckAccessGlobalProxy(reg, scratch, miss); |
| 889 // } |
| 890 // return reg; |
| 891 return at; // UNIMPLEMENTED RETURN |
| 892 } |
| 893 |
| 894 |
| 895 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, |
| 896 Register scratch, |
| 897 Label* miss) { |
| 898 UNIMPLEMENTED_(); |
| 899 // Label same_contexts; |
| 900 // |
| 901 // ASSERT(!holder_reg.is(scratch)); |
| 902 // ASSERT(!holder_reg.is(ip)); |
| 903 // ASSERT(!scratch.is(ip)); |
| 904 // |
| 905 // // Load current lexical context from the stack frame. |
| 906 //// ldr(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
| 907 // lw(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
| 908 // // In debug mode, make sure the lexical context is set. |
| 909 //#ifdef DEBUG |
| 910 //// cmp(scratch, Operand(0)); |
| 911 // Check(ne, "we should not have an empty lexical context", scratch, Operand(0)
); |
| 912 //#endif |
| 913 // |
| 914 // // Load the global context of the current context. |
| 915 // int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize; |
| 916 //// ldr(scratch, FieldMemOperand(scratch, offset)); |
| 917 //// ldr(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset))
; |
| 918 // lw(scratch, FieldMemOperand(scratch, offset)); |
| 919 // lw(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset)); |
| 920 // |
| 921 // // Check the context is a global context. |
| 922 // if (FLAG_debug_code) { |
| 923 // // TODO(119): avoid push(holder_reg)/pop(holder_reg) |
| 924 // // Cannot use ip as a temporary in this verification code. Due to the fact |
| 925 // // that ip is clobbered as part of cmp with an object Operand. |
| 926 // push(holder_reg); // Temporarily save holder on the stack. |
| 927 // // Read the first word and compare to the global_context_map. |
| 928 //// ldr(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset)); |
| 929 //// LoadRoot(ip, Heap::kGlobalContextMapRootIndex); |
| 930 //// cmp(holder_reg, ip); |
| 931 //// Check(eq, "JSGlobalObject::global_context should be a global context."); |
| 932 //// pop(holder_reg); // Restore holder. |
| 933 // lw(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset)); |
| 934 // LoadRoot(ip, Heap::kGlobalContextMapRootIndex); |
| 935 // Check(eq, "JSGlobalObject::global_context should be a global context.", |
| 936 // holder_reg, Operand(ip)); |
| 937 // pop(holder_reg); // Restore holder. |
| 938 // } |
| 939 // |
| 940 // // Check if both contexts are the same. |
| 941 //// ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset)); |
| 942 //// cmp(scratch, Operand(ip)); |
| 943 //// b(eq, &same_contexts); |
| 944 // lw(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset)); |
| 945 // bcond(eq, &same_contexts, scratch, Operand(ip)); |
| 946 // nop(); // NOP_ADDED |
| 947 // |
| 948 // // Check the context is a global context. |
| 949 // if (FLAG_debug_code) { |
| 950 // // TODO(119): avoid push(holder_reg)/pop(holder_reg) |
| 951 // // Cannot use ip as a temporary in this verification code. Due to the fact |
| 952 // // that ip is clobbered as part of cmp with an object Operand. |
| 953 //// push(holder_reg); // Temporarily save holder on the stack. |
| 954 //// mov(holder_reg, ip); // Move ip to its holding place. |
| 955 //// LoadRoot(ip, Heap::kNullValueRootIndex); |
| 956 //// cmp(holder_reg, ip); |
| 957 //// Check(ne, "JSGlobalProxy::context() should not be null."); |
| 958 // push(holder_reg); // Temporarily save holder on the stack. |
| 959 // mov(holder_reg, ip); // Move ip to its holding place. |
| 960 // LoadRoot(ip, Heap::kNullValueRootIndex); |
| 961 // Check(ne, "JSGlobalProxy::context() should not be null.", |
| 962 // holder_reg, Operand(ip)); |
| 963 // |
| 964 //// ldr(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset)); |
| 965 //// LoadRoot(ip, Heap::kGlobalContextMapRootIndex); |
| 966 //// cmp(holder_reg, ip); |
| 967 //// Check(eq, "JSGlobalObject::global_context should be a global context."); |
| 968 // lw(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset)); |
| 969 // LoadRoot(ip, Heap::kGlobalContextMapRootIndex); |
| 970 // Check(eq, "JSGlobalObject::global_context should be a global context.", |
| 971 // holder_reg, Operand(ip)); |
| 972 // // Restore ip is not needed. ip is reloaded below. |
| 973 //// pop(holder_reg); // Restore holder. |
| 974 // pop(holder_reg); // Restore holder. |
| 975 // // Restore ip to holder's context. |
| 976 //// ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset)); |
| 977 // lw(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kContextOffset)); |
| 978 // } |
| 979 // |
| 980 // // Check that the security token in the calling global object is |
| 981 // // compatible with the security token in the receiving global |
| 982 // // object. |
| 983 // int token_offset = Context::kHeaderSize + |
| 984 // Context::SECURITY_TOKEN_INDEX * kPointerSize; |
| 985 // |
| 986 //// ldr(scratch, FieldMemOperand(scratch, token_offset)); |
| 987 //// ldr(ip, FieldMemOperand(ip, token_offset)); |
| 988 //// cmp(scratch, Operand(ip)); |
| 989 //// b(ne, miss); |
| 990 // lw(scratch, FieldMemOperand(scratch, token_offset)); |
| 991 // lw(ip, FieldMemOperand(ip, token_offset)); |
| 992 // bcond(ne, miss, scratch, Operand(ip)); |
| 993 // nop(); // NOP_ADDED |
| 994 // |
| 995 // bind(&same_contexts); |
| 996 } |
| 997 |
| 998 |
| 999 void MacroAssembler::AllocateInNewSpace(int object_size, |
| 1000 Register result, |
| 1001 Register scratch1, |
| 1002 Register scratch2, |
| 1003 Label* gc_required, |
| 1004 AllocationFlags flags) { |
| 1005 UNIMPLEMENTED_(); |
| 1006 // ASSERT(!result.is(scratch1)); |
| 1007 // ASSERT(!scratch1.is(scratch2)); |
| 1008 // |
| 1009 // // Load address of new object into result and allocation top address into |
| 1010 // // scratch1. |
| 1011 // ExternalReference new_space_allocation_top = |
| 1012 // ExternalReference::new_space_allocation_top_address(); |
| 1013 // li(scratch1, Operand(new_space_allocation_top)); |
| 1014 // if ((flags & RESULT_CONTAINS_TOP) == 0) { |
| 1015 // lw(result, MemOperand(scratch1)); |
| 1016 // } else { |
| 1017 //#ifdef DEBUG |
| 1018 // // Assert that result actually contains top on entry. scratch2 is used |
| 1019 // // immediately below so this use of scratch2 does not cause difference wit
h |
| 1020 // // respect to register content between debug and release mode. |
| 1021 // lw(scratch2, MemOperand(scratch1)); |
| 1022 // Check(eq, "Unexpected allocation top", result, Operand(scratch2)); |
| 1023 //#endif |
| 1024 // } |
| 1025 // |
| 1026 // // Calculate new top and bail out if new space is exhausted. Use result |
| 1027 // // to calculate the new top. |
| 1028 // ExternalReference new_space_allocation_limit = |
| 1029 // ExternalReference::new_space_allocation_limit_address(); |
| 1030 //// mov(scratch2, Operand(new_space_allocation_limit)); |
| 1031 //// ldr(scratch2, MemOperand(scratch2)); |
| 1032 //// add(result, result, Operand(object_size * kPointerSize)); |
| 1033 //// cmp(result, Operand(scratch2)); |
| 1034 //// b(hi, gc_required); |
| 1035 // li(scratch2, Operand(new_space_allocation_limit)); |
| 1036 // lw(scratch2, MemOperand(scratch2)); |
| 1037 // addiu(result, result, Operand(object_size * kPointerSize)); |
| 1038 // bcond(Ugreater, gc_required, result, Operand(scratch2)); |
| 1039 // nop(); // NOP_ADDED |
| 1040 // |
| 1041 // // Update allocation top. result temporarily holds the new top, |
| 1042 //// str(result, MemOperand(scratch1)); |
| 1043 // sw(result, MemOperand(scratch1)); |
| 1044 // |
| 1045 // // Tag and adjust back to start of new object. |
| 1046 // if ((flags & TAG_OBJECT) != 0) { |
| 1047 // addiu(result, result, Operand(-(object_size * kPointerSize) + |
| 1048 // kHeapObjectTag)); |
| 1049 // } else { |
| 1050 // addiu(result, result, Operand(-object_size * kPointerSize)); |
| 1051 // } |
| 1052 } |
| 1053 |
| 1054 |
| 1055 void MacroAssembler::AllocateInNewSpace(Register object_size, |
| 1056 Register result, |
| 1057 Register scratch1, |
| 1058 Register scratch2, |
| 1059 Label* gc_required, |
| 1060 AllocationFlags flags) { |
| 1061 UNIMPLEMENTED_(); |
| 1062 // ASSERT(!result.is(scratch1)); |
| 1063 // ASSERT(!scratch1.is(scratch2)); |
| 1064 // |
| 1065 // // Load address of new object into result and allocation top address into |
| 1066 // // scratch1. |
| 1067 // ExternalReference new_space_allocation_top = |
| 1068 // ExternalReference::new_space_allocation_top_address(); |
| 1069 //// mov(scratch1, Operand(new_space_allocation_top)); |
| 1070 // li(scratch1, Operand(new_space_allocation_top)); |
| 1071 // if ((flags & RESULT_CONTAINS_TOP) == 0) { |
| 1072 //// ldr(result, MemOperand(scratch1)); |
| 1073 // lw(result, MemOperand(scratch1)); |
| 1074 // } else { |
| 1075 //#ifdef DEBUG |
| 1076 // // Assert that result actually contains top on entry. scratch2 is used |
| 1077 // // immediately below so this use of scratch2 does not cause difference wit
h |
| 1078 // // respect to register content between debug and release mode. |
| 1079 //// ldr(scratch2, MemOperand(scratch1)); |
| 1080 //// cmp(result, scratch2); |
| 1081 //// Check(eq, "Unexpected allocation top"); |
| 1082 // lw(scratch2, MemOperand(scratch1)); |
| 1083 // Check(eq, "Unexpected allocation top", result, Operand(scratch2)); |
| 1084 //#endif |
| 1085 // } |
| 1086 // |
| 1087 // // Calculate new top and bail out if new space is exhausted. Use result |
| 1088 // // to calculate the new top. Object size is in words so a shift is required
to |
| 1089 // // get the number of bytes |
| 1090 // ExternalReference new_space_allocation_limit = |
| 1091 // ExternalReference::new_space_allocation_limit_address(); |
| 1092 //// mov(scratch2, Operand(new_space_allocation_limit)); |
| 1093 //// ldr(scratch2, MemOperand(scratch2)); |
| 1094 //// add(result, result, Operand(object_size, LSL, kPointerSizeLog2)); |
| 1095 //// cmp(result, Operand(scratch2)); |
| 1096 //// b(hi, gc_required); |
| 1097 // li(scratch2, Operand(new_space_allocation_limit)); |
| 1098 // lw(scratch2, MemOperand(scratch2)); |
| 1099 // sll(ip, object_size, kPointerSizeLog2); |
| 1100 // addu(result, result, Operand(ip)); |
| 1101 // bcond(Ugreater, gc_required, result, Operand(scratch2)); |
| 1102 // nop(); // NOP_ADDED |
| 1103 // |
| 1104 // // Update allocation top. result temporarily holds the new top, |
| 1105 //// str(result, MemOperand(scratch1)); |
| 1106 // sw(result, MemOperand(scratch1)); |
| 1107 // |
| 1108 // // Adjust back to start of new object. |
| 1109 //// sub(result, result, Operand(object_size, LSL, kPointerSizeLog2)); |
| 1110 // sub(result, result, Operand(ip)); |
| 1111 // |
| 1112 // // Tag object if requested. |
| 1113 // if ((flags & TAG_OBJECT) != 0) { |
| 1114 //// add(result, result, Operand(kHeapObjectTag)); |
| 1115 // addiu(result, result, Operand(kHeapObjectTag)); |
| 1116 // } |
| 1117 } |
| 1118 |
| 1119 |
| 1120 void MacroAssembler::UndoAllocationInNewSpace(Register object, |
| 1121 Register scratch) { |
| 1122 UNIMPLEMENTED_(); |
| 1123 // ExternalReference new_space_allocation_top = |
| 1124 // ExternalReference::new_space_allocation_top_address(); |
| 1125 // |
| 1126 // // Make sure the object has no tag before resetting top. |
| 1127 // andi(object, object, Operand(~kHeapObjectTagMask)); |
| 1128 //#ifdef DEBUG |
| 1129 // // Check that the object un-allocated is below the current top. |
| 1130 //// mov(scratch, Operand(new_space_allocation_top)); |
| 1131 //// ldr(scratch, MemOperand(scratch)); |
| 1132 //// cmp(object, scratch); |
| 1133 //// Check(lt, "Undo allocation of non allocated memory"); |
| 1134 // li(scratch, Operand(new_space_allocation_top)); |
| 1135 // lw(scratch, MemOperand(scratch)); |
| 1136 // Check(less, "Undo allocation of non allocated memory", object, Operand(scrat
ch)); |
| 1137 //#endif |
| 1138 // // Write the address of the object to un-allocate as the current top. |
| 1139 // li(scratch, Operand(new_space_allocation_top)); |
| 1140 // sw(object, MemOperand(scratch)); |
| 1141 } |
| 1142 |
| 1143 |
| 1144 void MacroAssembler::GetObjectType(Register function, |
| 1145 Register map, |
| 1146 Register type_reg) { |
| 1147 UNIMPLEMENTED_(); |
| 1148 // lw(map, FieldMemOperand(function, HeapObject::kMapOffset)); |
| 1149 // lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset)); |
| 1150 } |
| 1151 |
| 1152 // REMOVED : code architecture does not fit MIPS. Use GetObjectType and bcond. |
| 1153 //void MacroAssembler::CompareObjectType(Register function, |
| 1154 // Register map, |
| 1155 // Register type_reg, |
| 1156 // InstanceType type) { |
| 1157 // ldr(map, FieldMemOperand(function, HeapObject::kMapOffset)); |
| 1158 // CompareInstanceType(map, type_reg, type); |
| 1159 //} |
| 1160 |
| 1161 // REMOVED : code architecture does not fit MIPS. Use GetObjectType and bcond. |
| 1162 //void MacroAssembler::CompareInstanceType(Register map, |
| 1163 // Register type_reg, |
| 1164 // InstanceType type) { |
| 1165 // ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset)); |
| 1166 // cmp(type_reg, Operand(type)); |
| 1167 //} |
| 1168 |
| 1169 |
| 1170 void MacroAssembler::TryGetFunctionPrototype(Register function, |
| 1171 Register result, |
| 1172 Register scratch, |
| 1173 Label* miss) { |
| 1174 UNIMPLEMENTED_(); |
| 1175 // // Check that the receiver isn't a smi. |
| 1176 // BranchOnSmi(function, miss); |
| 1177 // nop(); // NOP_ADDED |
| 1178 // |
| 1179 // // Check that the function really is a function. Load map into result reg. |
| 1180 //// CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE); |
| 1181 //// b(ne, miss); |
| 1182 // GetObjectType(function, result, scratch); |
| 1183 // bcond(ne, miss, scratch, Operand(JS_FUNCTION_TYPE)); |
| 1184 // nop(); // NOP_ADDED |
| 1185 // |
| 1186 // // Make sure that the function has an instance prototype. |
| 1187 // Label non_instance; |
| 1188 //// ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset)); |
| 1189 //// tst(scratch, Operand(1 << Map::kHasNonInstancePrototype)); |
| 1190 //// b(ne, &non_instance); |
| 1191 // lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset)); |
| 1192 // andi(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype)); |
| 1193 // bcond(ne, &non_instance, scratch, Operand(zero_reg)); |
| 1194 // nop(); // NOP_ADDED |
| 1195 // |
| 1196 // // Get the prototype or initial map from the function. |
| 1197 //// ldr(result, |
| 1198 //// FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); |
| 1199 // lw(result, |
| 1200 // FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); |
| 1201 // |
| 1202 // // If the prototype or initial map is the hole, don't return it and |
| 1203 // // simply miss the cache instead. This will allow us to allocate a |
| 1204 // // prototype object on-demand in the runtime system. |
| 1205 //// LoadRoot(ip, Heap::kTheHoleValueRootIndex); |
| 1206 //// cmp(result, ip); |
| 1207 //// b(eq, miss); |
| 1208 // LoadRoot(ip, Heap::kTheHoleValueRootIndex); |
| 1209 // bcond(eq, miss, result, Operand(ip)); |
| 1210 // nop(); // NOP_ADDED |
| 1211 // |
| 1212 // // If the function does not have an initial map, we're done. |
| 1213 // Label done; |
| 1214 //// CompareObjectType(result, scratch, scratch, MAP_TYPE); |
| 1215 //// b(ne, &done); |
| 1216 // GetObjectType(result, scratch, scratch); |
| 1217 // bcond(ne, &done, scratch, Operand(MAP_TYPE)); |
| 1218 // nop(); // NOP_ADDED |
| 1219 // |
| 1220 // // Get the prototype from the initial map. |
| 1221 //// ldr(result, FieldMemOperand(result, Map::kPrototypeOffset)); |
| 1222 //// jmp(&done); |
| 1223 // lw(result, FieldMemOperand(result, Map::kPrototypeOffset)); |
| 1224 // b(&done); |
| 1225 // nop(); // NOP_ADDED |
| 1226 // |
| 1227 // // Non-instance prototype: Fetch prototype from constructor field |
| 1228 // // in initial map. |
| 1229 //// bind(&non_instance); |
| 1230 //// ldr(result, FieldMemOperand(result, Map::kConstructorOffset)); |
| 1231 // bind(&non_instance); |
| 1232 // lw(result, FieldMemOperand(result, Map::kConstructorOffset)); |
| 1233 // |
| 1234 // // All done. |
| 1235 // bind(&done); |
| 1236 } |
| 1237 |
| 1238 |
| 1239 void MacroAssembler::CallStub(CodeStub* stub, Condition cond, |
| 1240 Register r1, const Operand& r2) { |
| 1241 UNIMPLEMENTED_(); |
| 1242 // ASSERT(allow_stub_calls()); // stub calls are not allowed in some stubs |
| 1243 // Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2); |
| 1244 } |
| 1245 |
| 1246 |
| 1247 void MacroAssembler::StubReturn(int argc) { |
| 1248 UNIMPLEMENTED_(); |
| 1249 // ASSERT(argc >= 1 && generating_stub()); |
| 1250 // if (argc > 1) |
| 1251 // addiu(sp, sp, Operand((argc - 1) * kPointerSize)); |
| 1252 // Ret(); |
| 1253 // nop(); // NOP_ADDED |
| 1254 } |
| 1255 |
| 1256 |
| 1257 void MacroAssembler::IllegalOperation(int num_arguments) { |
| 1258 UNIMPLEMENTED(); |
| 1259 break_(0x1232); |
| 1260 // if (num_arguments > 0) { |
| 1261 //// add(sp, sp, Operand(num_arguments * kPointerSize)); |
| 1262 // addiu(sp, sp, Operand(num_arguments * kPointerSize)); |
| 1263 // } |
| 1264 // LoadRoot(v0, Heap::kUndefinedValueRootIndex); |
| 1265 } |
| 1266 |
| 1267 |
| 1268 void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) { |
| 1269 UNIMPLEMENTED_(); |
| 1270 // // All parameters are on the stack. r0->v0 has the return value after call. |
| 1271 // |
| 1272 // // If the expected number of arguments of the runtime function is |
| 1273 // // constant, we check that the actual number of arguments match the |
| 1274 // // expectation. |
| 1275 // if (f->nargs >= 0 && f->nargs != num_arguments) { |
| 1276 // IllegalOperation(num_arguments); |
| 1277 // return; |
| 1278 // } |
| 1279 // |
| 1280 // Runtime::FunctionId function_id = |
| 1281 // static_cast<Runtime::FunctionId>(f->stub_id); |
| 1282 // RuntimeStub stub(function_id, num_arguments); |
| 1283 // CallStub(&stub); |
| 1284 } |
| 1285 |
| 1286 |
| 1287 void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) { |
| 1288 UNIMPLEMENTED_(); |
| 1289 // CallRuntime(Runtime::FunctionForId(fid), num_arguments); |
| 1290 } |
| 1291 |
| 1292 |
| 1293 void MacroAssembler::TailCallRuntime(const ExternalReference& ext, |
| 1294 int num_arguments, |
| 1295 int result_size) { |
| 1296 UNIMPLEMENTED_(); |
| 1297 // // ARM TODO |
| 1298 // // TODO(1236192): Most runtime routines don't need the number of |
| 1299 // // arguments passed in because it is constant. At some point we |
| 1300 // // should remove this need and make the runtime routine entry code |
| 1301 // // smarter. |
| 1302 //// mov(r0, Operand(num_arguments)); |
| 1303 //// JumpToRuntime(ext); |
| 1304 // li(a0, Operand(num_arguments)); |
| 1305 // JumpToRuntime(ext); |
| 1306 // nop(); // NOP_ADDED |
| 1307 } |
| 1308 |
| 1309 |
| 1310 void MacroAssembler::JumpToRuntime(const ExternalReference& builtin) { |
| 1311 UNIMPLEMENTED_(); |
| 1312 ////#if defined(__thumb__) |
| 1313 //// // Thumb mode builtin. |
| 1314 //// ASSERT((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1); |
| 1315 ////#endif |
| 1316 //// mov(r1, Operand(builtin)); |
| 1317 //// CEntryStub stub(1); |
| 1318 //// Jump(stub.GetCode(), RelocInfo::CODE_TARGET); |
| 1319 // li(a1, Operand(builtin)); |
| 1320 // CEntryStub stub(1); |
| 1321 // Jump(stub.GetCode(), RelocInfo::CODE_TARGET); |
| 1322 } |
| 1323 |
| 1324 |
| 1325 Handle<Code> MacroAssembler::ResolveBuiltin(Builtins::JavaScript id, |
| 1326 bool* resolved) { |
| 1327 UNIMPLEMENTED_(); |
| 1328 // // Contract with compiled functions is that the function is passed in r1. |
| 1329 // int builtins_offset = |
| 1330 // JSBuiltinsObject::kJSBuiltinsOffset + (id * kPointerSize); |
| 1331 //// ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); |
| 1332 //// ldr(r1, FieldMemOperand(r1, GlobalObject::kBuiltinsOffset)); |
| 1333 //// ldr(r1, FieldMemOperand(r1, builtins_offset)); |
| 1334 // lw(a1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX))); |
| 1335 // lw(a1, FieldMemOperand(a1, GlobalObject::kBuiltinsOffset)); |
| 1336 // lw(a1, FieldMemOperand(a1, builtins_offset)); |
| 1337 // |
| 1338 // return Builtins::GetCode(id, resolved); |
| 1339 return (Handle<Code>)((Code*)NULL); // UNIMPLEMENTED RETURN |
| 1340 } |
| 1341 |
| 1342 |
| 1343 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, |
| 1344 InvokeJSFlags flags) { |
| 1345 UNIMPLEMENTED_(); |
| 1346 // bool resolved; |
| 1347 // Handle<Code> code = ResolveBuiltin(id, &resolved); |
| 1348 // |
| 1349 // if (flags == CALL_JS) { |
| 1350 // Call(code, RelocInfo::CODE_TARGET); |
| 1351 // } else { |
| 1352 // ASSERT(flags == JUMP_JS); |
| 1353 // Jump(code, RelocInfo::CODE_TARGET); |
| 1354 // } |
| 1355 //// addiu(sp, sp, -StandardFrameConstants::kRArgsSlotsSize); |
| 1356 // // Arguments slots are removed in GenCode after frame->Exit(). |
| 1357 //// addiu(sp, sp, StandardFrameConstants::kRArgsSlotsSize); |
| 1358 // |
| 1359 // if (!resolved) { |
| 1360 // const char* name = Builtins::GetName(id); |
| 1361 // int argc = Builtins::GetArgumentsCount(id); |
| 1362 // uint32_t flags = |
| 1363 // Bootstrapper::FixupFlagsArgumentsCount::encode(argc) | |
| 1364 // Bootstrapper::FixupFlagsUseCodeObject::encode(false); |
| 1365 // Unresolved entry = { pc_offset() - kInstrSize, flags, name }; |
| 1366 // unresolved_.Add(entry); |
| 1367 // } |
| 1368 } |
| 1369 |
| 1370 |
| 1371 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) { |
| 1372 UNIMPLEMENTED_(); |
| 1373 // bool resolved; |
| 1374 // Handle<Code> code = ResolveBuiltin(id, &resolved); |
| 1375 // |
| 1376 //// mov(target, Operand(code)); |
| 1377 // // We may need to patch this code, so we have li generate 2 instructions. |
| 1378 // li(target, Operand(code), true); |
| 1379 // if (!resolved) { |
| 1380 // const char* name = Builtins::GetName(id); |
| 1381 // int argc = Builtins::GetArgumentsCount(id); |
| 1382 // uint32_t flags = |
| 1383 // Bootstrapper::FixupFlagsArgumentsCount::encode(argc) | |
| 1384 // Bootstrapper::FixupFlagsUseCodeObject::encode(true); |
| 1385 // // li generated 2 instructions, so we need a -2*kInstrSize offset. |
| 1386 // Unresolved entry = { pc_offset() - 2*kInstrSize, flags, name }; |
| 1387 // unresolved_.Add(entry); |
| 1388 // } |
| 1389 // |
| 1390 //// add(target, target, Operand(Code::kHeaderSize - kHeapObjectTag)); |
| 1391 // addiu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag)); |
| 1392 } |
| 1393 |
| 1394 |
| 1395 void MacroAssembler::SetCounter(StatsCounter* counter, int value, |
| 1396 Register scratch1, Register scratch2) { |
| 1397 UNIMPLEMENTED_(); |
| 1398 // if (FLAG_native_code_counters && counter->Enabled()) { |
| 1399 //// mov(scratch1, Operand(value)); |
| 1400 //// mov(scratch2, Operand(ExternalReference(counter))); |
| 1401 //// str(scratch1, MemOperand(scratch2)); |
| 1402 // li(scratch1, Operand(value)); |
| 1403 // li(scratch2, Operand(ExternalReference(counter))); |
| 1404 // sw(scratch1, MemOperand(scratch2)); |
| 1405 // } |
| 1406 } |
| 1407 |
| 1408 |
| 1409 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value, |
| 1410 Register scratch1, Register scratch2) { |
| 1411 UNIMPLEMENTED_(); |
| 1412 // ASSERT(value > 0); |
| 1413 // if (FLAG_native_code_counters && counter->Enabled()) { |
| 1414 //// mov(scratch2, Operand(ExternalReference(counter))); |
| 1415 //// ldr(scratch1, MemOperand(scratch2)); |
| 1416 //// add(scratch1, scratch1, Operand(value)); |
| 1417 //// str(scratch1, MemOperand(scratch2)); |
| 1418 // li(scratch2, Operand(ExternalReference(counter))); |
| 1419 // lw(scratch1, MemOperand(scratch2)); |
| 1420 // addiu(scratch1, scratch1, Operand(value)); |
| 1421 // sw(scratch1, MemOperand(scratch2)); |
| 1422 // } |
| 1423 } |
| 1424 |
| 1425 |
| 1426 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value, |
| 1427 Register scratch1, Register scratch2) { |
| 1428 UNIMPLEMENTED_(); |
| 1429 // ASSERT(value > 0); |
| 1430 // if (FLAG_native_code_counters && counter->Enabled()) { |
| 1431 //// mov(scratch2, Operand(ExternalReference(counter))); |
| 1432 //// ldr(scratch1, MemOperand(scratch2)); |
| 1433 //// sub(scratch1, scratch1, Operand(value)); |
| 1434 //// str(scratch1, MemOperand(scratch2)); |
| 1435 // li(scratch2, Operand(ExternalReference(counter))); |
| 1436 // lw(scratch1, MemOperand(scratch2)); |
| 1437 // addiu(scratch1, scratch1, Operand(-value)); |
| 1438 // sw(scratch1, MemOperand(scratch2)); |
| 1439 // } |
| 1440 } |
| 1441 |
| 1442 |
| 1443 |
| 1444 void MacroAssembler::Assert(Condition cc, const char* msg, Register rs, Operand
rt) { |
| 1445 UNIMPLEMENTED_(); |
| 1446 // if (FLAG_debug_code) |
| 1447 // Check(cc, msg, rs, rt); |
| 1448 } |
| 1449 |
| 1450 |
| 1451 void MacroAssembler::Check(Condition cc, const char* msg, Register rs, Operand r
t) { |
| 1452 UNIMPLEMENTED_(); |
| 1453 // Label L; |
| 1454 // bcond(cc, &L, rs, rt); |
| 1455 // nop(); |
| 1456 // Abort(msg); |
| 1457 // // will not return here |
| 1458 // bind(&L); |
| 1459 } |
| 1460 |
| 1461 |
| 1462 void MacroAssembler::Abort(const char* msg) { |
| 1463 UNIMPLEMENTED_(); |
| 1464 // // We want to pass the msg string like a smi to avoid GC |
| 1465 // // problems, however msg is not guaranteed to be aligned |
| 1466 // // properly. Instead, we pass an aligned pointer that is |
| 1467 // // a proper v8 smi, but also pass the alignment difference |
| 1468 //// // from the real pointer as a smi. |
| 1469 // intptr_t p1 = reinterpret_cast<intptr_t>(msg); |
| 1470 // intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag; |
| 1471 // ASSERT(reinterpret_cast<Object*>(p0)->IsSmi()); |
| 1472 //#ifdef DEBUG |
| 1473 // if (msg != NULL) { |
| 1474 // RecordComment("Abort message: "); |
| 1475 // RecordComment(msg); |
| 1476 // } |
| 1477 //#endif |
| 1478 //// mov(r0, Operand(p0)); |
| 1479 //// push(r0); |
| 1480 //// mov(r0, Operand(Smi::FromInt(p1 - p0))); |
| 1481 //// push(r0); |
| 1482 //// CallRuntime(Runtime::kAbort, 2); |
| 1483 // li(a0, Operand(p0)); |
| 1484 // push(a0); |
| 1485 // li(a0, Operand(Smi::FromInt(p1 - p0))); |
| 1486 // push(a0); |
| 1487 // CallRuntime(Runtime::kAbort, 2); |
| 1488 // // will not return here |
| 1489 } |
| 1490 |
| 1491 |
| 1492 #ifdef ENABLE_DEBUGGER_SUPPORT |
| 1493 CodePatcher::CodePatcher(byte* address, int instructions) |
| 1494 : address_(address), |
| 1495 instructions_(instructions), |
| 1496 size_(instructions * Assembler::kInstrSize), |
| 1497 masm_(address, size_ + Assembler::kGap) { |
| 1498 UNIMPLEMENTED_(); |
| 1499 // Create a new macro assembler pointing to the address of the code to patch. |
| 1500 // The size is adjusted with kGap on order for the assembler to generate size |
| 1501 // bytes of instructions without failing with buffer size constraints. |
| 1502 // ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); |
| 1503 } |
| 1504 |
| 1505 |
| 1506 CodePatcher::~CodePatcher() { |
| 1507 UNIMPLEMENTED_(); |
| 1508 // // Indicate that code has changed. |
| 1509 // CPU::FlushICache(address_, size_); |
| 1510 // |
| 1511 // // Check that the code was patched as expected. |
| 1512 // ASSERT(masm_.pc_ == address_ + size_); |
| 1513 // ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); |
| 1514 } |
| 1515 |
| 1516 |
| 1517 void CodePatcher::Emit(Instr x) { |
| 1518 UNIMPLEMENTED_(); |
| 1519 // masm()->emit(x); |
| 1520 } |
| 1521 |
| 1522 |
| 1523 void CodePatcher::Emit(Address addr) { |
| 1524 UNIMPLEMENTED_(); |
| 1525 // masm()->emit(reinterpret_cast<Instr>(addr)); |
| 1526 } |
| 1527 #endif // ENABLE_DEBUGGER_SUPPORT |
| 1528 |
| 1529 |
| 1530 } } // namespace v8::internal |
| OLD | NEW |