| OLD | NEW |
| (Empty) |
| 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file | |
| 2 // for details. All rights reserved. Use of this source code is governed by a | |
| 3 // BSD-style license that can be found in the LICENSE file. | |
| 4 | |
| 5 #include "vm/globals.h" | |
| 6 #if defined(TARGET_ARCH_MIPS) | |
| 7 | |
| 8 #include "vm/assembler.h" | |
| 9 #include "vm/compiler.h" | |
| 10 #include "vm/dart_entry.h" | |
| 11 #include "vm/flow_graph_compiler.h" | |
| 12 #include "vm/heap.h" | |
| 13 #include "vm/instructions.h" | |
| 14 #include "vm/object_store.h" | |
| 15 #include "vm/runtime_entry.h" | |
| 16 #include "vm/stack_frame.h" | |
| 17 #include "vm/stub_code.h" | |
| 18 #include "vm/tags.h" | |
| 19 | |
| 20 #define __ assembler-> | |
| 21 | |
| 22 namespace dart { | |
| 23 | |
| 24 DEFINE_FLAG(bool, inline_alloc, true, "Inline allocation of objects."); | |
| 25 DEFINE_FLAG(bool, | |
| 26 use_slow_path, | |
| 27 false, | |
| 28 "Set to true for debugging & verifying the slow paths."); | |
| 29 DECLARE_FLAG(bool, trace_optimized_ic_calls); | |
| 30 | |
| 31 // Input parameters: | |
| 32 // RA : return address. | |
| 33 // SP : address of last argument in argument array. | |
| 34 // SP + 4*S4 - 4 : address of first argument in argument array. | |
| 35 // SP + 4*S4 : address of return value. | |
| 36 // S5 : address of the runtime function to call. | |
| 37 // S4 : number of arguments to the call. | |
| 38 void StubCode::GenerateCallToRuntimeStub(Assembler* assembler) { | |
| 39 const intptr_t thread_offset = NativeArguments::thread_offset(); | |
| 40 const intptr_t argc_tag_offset = NativeArguments::argc_tag_offset(); | |
| 41 const intptr_t argv_offset = NativeArguments::argv_offset(); | |
| 42 const intptr_t retval_offset = NativeArguments::retval_offset(); | |
| 43 | |
| 44 __ SetPrologueOffset(); | |
| 45 __ Comment("CallToRuntimeStub"); | |
| 46 __ EnterStubFrame(); | |
| 47 | |
| 48 // Save exit frame information to enable stack walking as we are about | |
| 49 // to transition to Dart VM C++ code. | |
| 50 __ sw(FP, Address(THR, Thread::top_exit_frame_info_offset())); | |
| 51 | |
| 52 #if defined(DEBUG) | |
| 53 { | |
| 54 Label ok; | |
| 55 // Check that we are always entering from Dart code. | |
| 56 __ lw(T0, Assembler::VMTagAddress()); | |
| 57 __ BranchEqual(T0, Immediate(VMTag::kDartTagId), &ok); | |
| 58 __ Stop("Not coming from Dart code."); | |
| 59 __ Bind(&ok); | |
| 60 } | |
| 61 #endif | |
| 62 | |
| 63 // Mark that the thread is executing VM code. | |
| 64 __ sw(S5, Assembler::VMTagAddress()); | |
| 65 | |
| 66 // Reserve space for arguments and align frame before entering C++ world. | |
| 67 // NativeArguments are passed in registers. | |
| 68 ASSERT(sizeof(NativeArguments) == 4 * kWordSize); | |
| 69 __ ReserveAlignedFrameSpace(4 * kWordSize); // Reserve space for arguments. | |
| 70 | |
| 71 // Pass NativeArguments structure by value and call runtime. | |
| 72 // Registers A0, A1, A2, and A3 are used. | |
| 73 | |
| 74 ASSERT(thread_offset == 0 * kWordSize); | |
| 75 // Set thread in NativeArgs. | |
| 76 __ mov(A0, THR); | |
| 77 | |
| 78 // There are no runtime calls to closures, so we do not need to set the tag | |
| 79 // bits kClosureFunctionBit and kInstanceFunctionBit in argc_tag_. | |
| 80 ASSERT(argc_tag_offset == 1 * kWordSize); | |
| 81 __ mov(A1, S4); // Set argc in NativeArguments. | |
| 82 | |
| 83 ASSERT(argv_offset == 2 * kWordSize); | |
| 84 __ sll(A2, S4, 2); | |
| 85 __ addu(A2, FP, A2); // Compute argv. | |
| 86 // Set argv in NativeArguments. | |
| 87 __ addiu(A2, A2, Immediate(kParamEndSlotFromFp * kWordSize)); | |
| 88 | |
| 89 | |
| 90 // Call runtime or redirection via simulator. | |
| 91 // We defensively always jalr through T9 because it is sometimes required by | |
| 92 // the MIPS ABI. | |
| 93 __ mov(T9, S5); | |
| 94 __ jalr(T9); | |
| 95 | |
| 96 ASSERT(retval_offset == 3 * kWordSize); | |
| 97 // Retval is next to 1st argument. | |
| 98 __ delay_slot()->addiu(A3, A2, Immediate(kWordSize)); | |
| 99 __ Comment("CallToRuntimeStub return"); | |
| 100 | |
| 101 // Mark that the thread is executing Dart code. | |
| 102 __ LoadImmediate(A2, VMTag::kDartTagId); | |
| 103 __ sw(A2, Assembler::VMTagAddress()); | |
| 104 | |
| 105 // Reset exit frame information in Isolate structure. | |
| 106 __ sw(ZR, Address(THR, Thread::top_exit_frame_info_offset())); | |
| 107 | |
| 108 __ LeaveStubFrameAndReturn(); | |
| 109 } | |
| 110 | |
| 111 | |
| 112 // Print the stop message. | |
| 113 DEFINE_LEAF_RUNTIME_ENTRY(void, PrintStopMessage, 1, const char* message) { | |
| 114 OS::Print("Stop message: %s\n", message); | |
| 115 } | |
| 116 END_LEAF_RUNTIME_ENTRY | |
| 117 | |
| 118 | |
| 119 // Input parameters: | |
| 120 // A0 : stop message (const char*). | |
| 121 // Must preserve all registers. | |
| 122 void StubCode::GeneratePrintStopMessageStub(Assembler* assembler) { | |
| 123 __ EnterCallRuntimeFrame(0); | |
| 124 // Call the runtime leaf function. A0 already contains the parameter. | |
| 125 __ CallRuntime(kPrintStopMessageRuntimeEntry, 1); | |
| 126 __ LeaveCallRuntimeFrame(); | |
| 127 __ Ret(); | |
| 128 } | |
| 129 | |
| 130 | |
| 131 // Input parameters: | |
| 132 // RA : return address. | |
| 133 // SP : address of return value. | |
| 134 // T5 : address of the native function to call. | |
| 135 // A2 : address of first argument in argument array. | |
| 136 // A1 : argc_tag including number of arguments and function kind. | |
| 137 static void GenerateCallNativeWithWrapperStub(Assembler* assembler, | |
| 138 Address wrapper) { | |
| 139 const intptr_t thread_offset = NativeArguments::thread_offset(); | |
| 140 const intptr_t argc_tag_offset = NativeArguments::argc_tag_offset(); | |
| 141 const intptr_t argv_offset = NativeArguments::argv_offset(); | |
| 142 const intptr_t retval_offset = NativeArguments::retval_offset(); | |
| 143 | |
| 144 __ SetPrologueOffset(); | |
| 145 __ Comment("CallNativeCFunctionStub"); | |
| 146 __ EnterStubFrame(); | |
| 147 | |
| 148 // Save exit frame information to enable stack walking as we are about | |
| 149 // to transition to native code. | |
| 150 __ sw(FP, Address(THR, Thread::top_exit_frame_info_offset())); | |
| 151 | |
| 152 #if defined(DEBUG) | |
| 153 { | |
| 154 Label ok; | |
| 155 // Check that we are always entering from Dart code. | |
| 156 __ lw(T0, Assembler::VMTagAddress()); | |
| 157 __ BranchEqual(T0, Immediate(VMTag::kDartTagId), &ok); | |
| 158 __ Stop("Not coming from Dart code."); | |
| 159 __ Bind(&ok); | |
| 160 } | |
| 161 #endif | |
| 162 | |
| 163 // Mark that the thread is executing native code. | |
| 164 __ sw(T5, Assembler::VMTagAddress()); | |
| 165 | |
| 166 // Initialize NativeArguments structure and call native function. | |
| 167 // Registers A0, A1, A2, and A3 are used. | |
| 168 | |
| 169 ASSERT(thread_offset == 0 * kWordSize); | |
| 170 // Set thread in NativeArgs. | |
| 171 __ mov(A0, THR); | |
| 172 | |
| 173 // There are no native calls to closures, so we do not need to set the tag | |
| 174 // bits kClosureFunctionBit and kInstanceFunctionBit in argc_tag_. | |
| 175 ASSERT(argc_tag_offset == 1 * kWordSize); | |
| 176 // Set argc in NativeArguments: A1 already contains argc. | |
| 177 | |
| 178 ASSERT(argv_offset == 2 * kWordSize); | |
| 179 // Set argv in NativeArguments: A2 already contains argv. | |
| 180 | |
| 181 ASSERT(retval_offset == 3 * kWordSize); | |
| 182 // Set retval in NativeArgs. | |
| 183 __ addiu(A3, FP, Immediate(kCallerSpSlotFromFp * kWordSize)); | |
| 184 | |
| 185 // Passing the structure by value as in runtime calls would require changing | |
| 186 // Dart API for native functions. | |
| 187 // For now, space is reserved on the stack and we pass a pointer to it. | |
| 188 __ addiu(SP, SP, Immediate(-4 * kWordSize)); | |
| 189 __ sw(A3, Address(SP, 3 * kWordSize)); | |
| 190 __ sw(A2, Address(SP, 2 * kWordSize)); | |
| 191 __ sw(A1, Address(SP, 1 * kWordSize)); | |
| 192 __ sw(A0, Address(SP, 0 * kWordSize)); | |
| 193 __ mov(A0, SP); // Pass the pointer to the NativeArguments. | |
| 194 | |
| 195 | |
| 196 __ mov(A1, T5); // Pass the function entrypoint. | |
| 197 __ ReserveAlignedFrameSpace(2 * kWordSize); // Just passing A0, A1. | |
| 198 | |
| 199 // Call native wrapper function or redirection via simulator. | |
| 200 __ lw(T9, wrapper); | |
| 201 __ jalr(T9); | |
| 202 __ Comment("CallNativeCFunctionStub return"); | |
| 203 | |
| 204 // Mark that the thread is executing Dart code. | |
| 205 __ LoadImmediate(A2, VMTag::kDartTagId); | |
| 206 __ sw(A2, Assembler::VMTagAddress()); | |
| 207 | |
| 208 // Reset exit frame information in Isolate structure. | |
| 209 __ sw(ZR, Address(THR, Thread::top_exit_frame_info_offset())); | |
| 210 | |
| 211 __ LeaveStubFrameAndReturn(); | |
| 212 } | |
| 213 | |
| 214 | |
| 215 void StubCode::GenerateCallNoScopeNativeStub(Assembler* assembler) { | |
| 216 GenerateCallNativeWithWrapperStub( | |
| 217 assembler, | |
| 218 Address(THR, Thread::no_scope_native_wrapper_entry_point_offset())); | |
| 219 } | |
| 220 | |
| 221 | |
| 222 void StubCode::GenerateCallAutoScopeNativeStub(Assembler* assembler) { | |
| 223 GenerateCallNativeWithWrapperStub( | |
| 224 assembler, | |
| 225 Address(THR, Thread::auto_scope_native_wrapper_entry_point_offset())); | |
| 226 } | |
| 227 | |
| 228 | |
| 229 // Input parameters: | |
| 230 // RA : return address. | |
| 231 // SP : address of return value. | |
| 232 // T5 : address of the native function to call. | |
| 233 // A2 : address of first argument in argument array. | |
| 234 // A1 : argc_tag including number of arguments and function kind. | |
| 235 void StubCode::GenerateCallBootstrapNativeStub(Assembler* assembler) { | |
| 236 const intptr_t thread_offset = NativeArguments::thread_offset(); | |
| 237 const intptr_t argc_tag_offset = NativeArguments::argc_tag_offset(); | |
| 238 const intptr_t argv_offset = NativeArguments::argv_offset(); | |
| 239 const intptr_t retval_offset = NativeArguments::retval_offset(); | |
| 240 | |
| 241 __ SetPrologueOffset(); | |
| 242 __ Comment("CallNativeCFunctionStub"); | |
| 243 __ EnterStubFrame(); | |
| 244 | |
| 245 // Save exit frame information to enable stack walking as we are about | |
| 246 // to transition to native code. | |
| 247 __ sw(FP, Address(THR, Thread::top_exit_frame_info_offset())); | |
| 248 | |
| 249 #if defined(DEBUG) | |
| 250 { | |
| 251 Label ok; | |
| 252 // Check that we are always entering from Dart code. | |
| 253 __ lw(T0, Assembler::VMTagAddress()); | |
| 254 __ BranchEqual(T0, Immediate(VMTag::kDartTagId), &ok); | |
| 255 __ Stop("Not coming from Dart code."); | |
| 256 __ Bind(&ok); | |
| 257 } | |
| 258 #endif | |
| 259 | |
| 260 // Mark that the thread is executing native code. | |
| 261 __ sw(T5, Assembler::VMTagAddress()); | |
| 262 | |
| 263 // Initialize NativeArguments structure and call native function. | |
| 264 // Registers A0, A1, A2, and A3 are used. | |
| 265 | |
| 266 ASSERT(thread_offset == 0 * kWordSize); | |
| 267 // Set thread in NativeArgs. | |
| 268 __ mov(A0, THR); | |
| 269 | |
| 270 // There are no native calls to closures, so we do not need to set the tag | |
| 271 // bits kClosureFunctionBit and kInstanceFunctionBit in argc_tag_. | |
| 272 ASSERT(argc_tag_offset == 1 * kWordSize); | |
| 273 // Set argc in NativeArguments: A1 already contains argc. | |
| 274 | |
| 275 ASSERT(argv_offset == 2 * kWordSize); | |
| 276 // Set argv in NativeArguments: A2 already contains argv. | |
| 277 | |
| 278 ASSERT(retval_offset == 3 * kWordSize); | |
| 279 // Set retval in NativeArgs. | |
| 280 __ addiu(A3, FP, Immediate(kCallerSpSlotFromFp * kWordSize)); | |
| 281 | |
| 282 // Passing the structure by value as in runtime calls would require changing | |
| 283 // Dart API for native functions. | |
| 284 // For now, space is reserved on the stack and we pass a pointer to it. | |
| 285 __ addiu(SP, SP, Immediate(-4 * kWordSize)); | |
| 286 __ sw(A3, Address(SP, 3 * kWordSize)); | |
| 287 __ sw(A2, Address(SP, 2 * kWordSize)); | |
| 288 __ sw(A1, Address(SP, 1 * kWordSize)); | |
| 289 __ sw(A0, Address(SP, 0 * kWordSize)); | |
| 290 __ mov(A0, SP); // Pass the pointer to the NativeArguments. | |
| 291 | |
| 292 __ ReserveAlignedFrameSpace(kWordSize); // Just passing A0. | |
| 293 | |
| 294 // Call native function or redirection via simulator. | |
| 295 | |
| 296 // We defensively always jalr through T9 because it is sometimes required by | |
| 297 // the MIPS ABI. | |
| 298 __ mov(T9, T5); | |
| 299 __ jalr(T9); | |
| 300 __ Comment("CallNativeCFunctionStub return"); | |
| 301 | |
| 302 // Mark that the thread is executing Dart code. | |
| 303 __ LoadImmediate(A2, VMTag::kDartTagId); | |
| 304 __ sw(A2, Assembler::VMTagAddress()); | |
| 305 | |
| 306 // Reset exit frame information in Isolate structure. | |
| 307 __ sw(ZR, Address(THR, Thread::top_exit_frame_info_offset())); | |
| 308 | |
| 309 __ LeaveStubFrameAndReturn(); | |
| 310 } | |
| 311 | |
| 312 | |
| 313 // Input parameters: | |
| 314 // S4: arguments descriptor array. | |
| 315 void StubCode::GenerateCallStaticFunctionStub(Assembler* assembler) { | |
| 316 __ Comment("CallStaticFunctionStub"); | |
| 317 __ EnterStubFrame(); | |
| 318 // Setup space on stack for return value and preserve arguments descriptor. | |
| 319 | |
| 320 __ addiu(SP, SP, Immediate(-2 * kWordSize)); | |
| 321 __ sw(S4, Address(SP, 1 * kWordSize)); | |
| 322 __ sw(ZR, Address(SP, 0 * kWordSize)); | |
| 323 | |
| 324 __ CallRuntime(kPatchStaticCallRuntimeEntry, 0); | |
| 325 __ Comment("CallStaticFunctionStub return"); | |
| 326 | |
| 327 // Get Code object result and restore arguments descriptor array. | |
| 328 __ lw(CODE_REG, Address(SP, 0 * kWordSize)); | |
| 329 __ lw(S4, Address(SP, 1 * kWordSize)); | |
| 330 __ addiu(SP, SP, Immediate(2 * kWordSize)); | |
| 331 | |
| 332 __ lw(T0, FieldAddress(CODE_REG, Code::entry_point_offset())); | |
| 333 | |
| 334 // Remove the stub frame as we are about to jump to the dart function. | |
| 335 __ LeaveStubFrameAndReturn(T0); | |
| 336 } | |
| 337 | |
| 338 | |
| 339 // Called from a static call only when an invalid code has been entered | |
| 340 // (invalid because its function was optimized or deoptimized). | |
| 341 // S4: arguments descriptor array. | |
| 342 void StubCode::GenerateFixCallersTargetStub(Assembler* assembler) { | |
| 343 // Load code pointer to this stub from the thread: | |
| 344 // The one that is passed in, is not correct - it points to the code object | |
| 345 // that needs to be replaced. | |
| 346 __ lw(CODE_REG, Address(THR, Thread::fix_callers_target_code_offset())); | |
| 347 // Create a stub frame as we are pushing some objects on the stack before | |
| 348 // calling into the runtime. | |
| 349 __ EnterStubFrame(); | |
| 350 // Setup space on stack for return value and preserve arguments descriptor. | |
| 351 __ addiu(SP, SP, Immediate(-2 * kWordSize)); | |
| 352 __ sw(S4, Address(SP, 1 * kWordSize)); | |
| 353 __ sw(ZR, Address(SP, 0 * kWordSize)); | |
| 354 __ CallRuntime(kFixCallersTargetRuntimeEntry, 0); | |
| 355 // Get Code object result and restore arguments descriptor array. | |
| 356 __ lw(CODE_REG, Address(SP, 0 * kWordSize)); | |
| 357 __ lw(S4, Address(SP, 1 * kWordSize)); | |
| 358 __ addiu(SP, SP, Immediate(2 * kWordSize)); | |
| 359 | |
| 360 // Jump to the dart function. | |
| 361 __ lw(T0, FieldAddress(CODE_REG, Code::entry_point_offset())); | |
| 362 | |
| 363 // Remove the stub frame. | |
| 364 __ LeaveStubFrameAndReturn(T0); | |
| 365 } | |
| 366 | |
| 367 | |
| 368 // Called from object allocate instruction when the allocation stub has been | |
| 369 // disabled. | |
| 370 void StubCode::GenerateFixAllocationStubTargetStub(Assembler* assembler) { | |
| 371 // Load code pointer to this stub from the thread: | |
| 372 // The one that is passed in, is not correct - it points to the code object | |
| 373 // that needs to be replaced. | |
| 374 __ lw(CODE_REG, Address(THR, Thread::fix_allocation_stub_code_offset())); | |
| 375 __ EnterStubFrame(); | |
| 376 // Setup space on stack for return value. | |
| 377 __ addiu(SP, SP, Immediate(-1 * kWordSize)); | |
| 378 __ sw(ZR, Address(SP, 0 * kWordSize)); | |
| 379 __ CallRuntime(kFixAllocationStubTargetRuntimeEntry, 0); | |
| 380 // Get Code object result. | |
| 381 __ lw(CODE_REG, Address(SP, 0 * kWordSize)); | |
| 382 __ addiu(SP, SP, Immediate(1 * kWordSize)); | |
| 383 | |
| 384 // Jump to the dart function. | |
| 385 __ lw(T0, FieldAddress(CODE_REG, Code::entry_point_offset())); | |
| 386 | |
| 387 // Remove the stub frame. | |
| 388 __ LeaveStubFrameAndReturn(T0); | |
| 389 } | |
| 390 | |
| 391 | |
| 392 // Input parameters: | |
| 393 // A1: Smi-tagged argument count, may be zero. | |
| 394 // FP[kParamEndSlotFromFp + 1]: Last argument. | |
| 395 static void PushArgumentsArray(Assembler* assembler) { | |
| 396 __ Comment("PushArgumentsArray"); | |
| 397 // Allocate array to store arguments of caller. | |
| 398 __ LoadObject(A0, Object::null_object()); | |
| 399 // A0: Null element type for raw Array. | |
| 400 // A1: Smi-tagged argument count, may be zero. | |
| 401 __ BranchLink(*StubCode::AllocateArray_entry()); | |
| 402 __ Comment("PushArgumentsArray return"); | |
| 403 // V0: newly allocated array. | |
| 404 // A1: Smi-tagged argument count, may be zero (was preserved by the stub). | |
| 405 __ Push(V0); // Array is in V0 and on top of stack. | |
| 406 __ sll(T1, A1, 1); | |
| 407 __ addu(T1, FP, T1); | |
| 408 __ AddImmediate(T1, kParamEndSlotFromFp * kWordSize); | |
| 409 // T1: address of first argument on stack. | |
| 410 // T2: address of first argument in array. | |
| 411 | |
| 412 Label loop, loop_exit; | |
| 413 __ blez(A1, &loop_exit); | |
| 414 __ delay_slot()->addiu(T2, V0, | |
| 415 Immediate(Array::data_offset() - kHeapObjectTag)); | |
| 416 __ Bind(&loop); | |
| 417 __ lw(T3, Address(T1)); | |
| 418 __ addiu(A1, A1, Immediate(-Smi::RawValue(1))); | |
| 419 __ addiu(T1, T1, Immediate(-kWordSize)); | |
| 420 __ addiu(T2, T2, Immediate(kWordSize)); | |
| 421 __ bgez(A1, &loop); | |
| 422 __ delay_slot()->sw(T3, Address(T2, -kWordSize)); | |
| 423 __ Bind(&loop_exit); | |
| 424 } | |
| 425 | |
| 426 | |
| 427 // Used by eager and lazy deoptimization. Preserve result in V0 if necessary. | |
| 428 // This stub translates optimized frame into unoptimized frame. The optimized | |
| 429 // frame can contain values in registers and on stack, the unoptimized | |
| 430 // frame contains all values on stack. | |
| 431 // Deoptimization occurs in following steps: | |
| 432 // - Push all registers that can contain values. | |
| 433 // - Call C routine to copy the stack and saved registers into temporary buffer. | |
| 434 // - Adjust caller's frame to correct unoptimized frame size. | |
| 435 // - Fill the unoptimized frame. | |
| 436 // - Materialize objects that require allocation (e.g. Double instances). | |
| 437 // GC can occur only after frame is fully rewritten. | |
| 438 // Stack after EnterFrame(...) below: | |
| 439 // +------------------+ | |
| 440 // | Saved PP | <- TOS | |
| 441 // +------------------+ | |
| 442 // | Saved CODE_REG | | |
| 443 // +------------------+ | |
| 444 // | Saved FP | <- FP of stub | |
| 445 // +------------------+ | |
| 446 // | Saved LR | (deoptimization point) | |
| 447 // +------------------+ | |
| 448 // | Saved CODE_REG | | |
| 449 // +------------------+ | |
| 450 // | ... | <- SP of optimized frame | |
| 451 // | |
| 452 // Parts of the code cannot GC, part of the code can GC. | |
| 453 static void GenerateDeoptimizationSequence(Assembler* assembler, | |
| 454 DeoptStubKind kind) { | |
| 455 const intptr_t kPushedRegistersSize = | |
| 456 kNumberOfCpuRegisters * kWordSize + kNumberOfFRegisters * kWordSize; | |
| 457 | |
| 458 __ SetPrologueOffset(); | |
| 459 __ Comment("GenerateDeoptimizationSequence"); | |
| 460 // DeoptimizeCopyFrame expects a Dart frame. | |
| 461 __ EnterStubFrame(kPushedRegistersSize); | |
| 462 | |
| 463 // The code in this frame may not cause GC. kDeoptimizeCopyFrameRuntimeEntry | |
| 464 // and kDeoptimizeFillFrameRuntimeEntry are leaf runtime calls. | |
| 465 const intptr_t saved_result_slot_from_fp = | |
| 466 kFirstLocalSlotFromFp + 1 - (kNumberOfCpuRegisters - V0); | |
| 467 const intptr_t saved_exception_slot_from_fp = | |
| 468 kFirstLocalSlotFromFp + 1 - (kNumberOfCpuRegisters - V0); | |
| 469 const intptr_t saved_stacktrace_slot_from_fp = | |
| 470 kFirstLocalSlotFromFp + 1 - (kNumberOfCpuRegisters - V1); | |
| 471 // Result in V0 is preserved as part of pushing all registers below. | |
| 472 | |
| 473 // Push registers in their enumeration order: lowest register number at | |
| 474 // lowest address. | |
| 475 for (int i = 0; i < kNumberOfCpuRegisters; i++) { | |
| 476 const int slot = kNumberOfCpuRegisters - i; | |
| 477 Register reg = static_cast<Register>(i); | |
| 478 if (reg == CODE_REG) { | |
| 479 // Save the original value of CODE_REG pushed before invoking this stub | |
| 480 // instead of the value used to call this stub. | |
| 481 COMPILE_ASSERT(TMP < CODE_REG); // Assert TMP is pushed first. | |
| 482 __ lw(TMP, Address(FP, kCallerSpSlotFromFp * kWordSize)); | |
| 483 __ sw(TMP, Address(SP, kPushedRegistersSize - slot * kWordSize)); | |
| 484 } else { | |
| 485 __ sw(reg, Address(SP, kPushedRegistersSize - slot * kWordSize)); | |
| 486 } | |
| 487 } | |
| 488 for (int i = 0; i < kNumberOfFRegisters; i++) { | |
| 489 // These go below the CPU registers. | |
| 490 const int slot = kNumberOfCpuRegisters + kNumberOfFRegisters - i; | |
| 491 FRegister reg = static_cast<FRegister>(i); | |
| 492 __ swc1(reg, Address(SP, kPushedRegistersSize - slot * kWordSize)); | |
| 493 } | |
| 494 | |
| 495 __ mov(A0, SP); // Pass address of saved registers block. | |
| 496 bool is_lazy = | |
| 497 (kind == kLazyDeoptFromReturn) || (kind == kLazyDeoptFromThrow); | |
| 498 __ LoadImmediate(A1, is_lazy ? 1 : 0); | |
| 499 __ ReserveAlignedFrameSpace(1 * kWordSize); | |
| 500 __ CallRuntime(kDeoptimizeCopyFrameRuntimeEntry, 2); | |
| 501 // Result (V0) is stack-size (FP - SP) in bytes, incl. the return address. | |
| 502 | |
| 503 if (kind == kLazyDeoptFromReturn) { | |
| 504 // Restore result into T1 temporarily. | |
| 505 __ lw(T1, Address(FP, saved_result_slot_from_fp * kWordSize)); | |
| 506 } else if (kind == kLazyDeoptFromThrow) { | |
| 507 // Restore result into T1 temporarily. | |
| 508 __ lw(T1, Address(FP, saved_exception_slot_from_fp * kWordSize)); | |
| 509 __ lw(T2, Address(FP, saved_stacktrace_slot_from_fp * kWordSize)); | |
| 510 } | |
| 511 | |
| 512 __ RestoreCodePointer(); | |
| 513 __ LeaveDartFrame(); | |
| 514 __ subu(SP, FP, V0); | |
| 515 | |
| 516 // DeoptimizeFillFrame expects a Dart frame, i.e. EnterDartFrame(0), but there | |
| 517 // is no need to set the correct PC marker or load PP, since they get patched. | |
| 518 __ EnterStubFrame(); | |
| 519 | |
| 520 __ mov(A0, FP); // Get last FP address. | |
| 521 if (kind == kLazyDeoptFromReturn) { | |
| 522 __ Push(T1); // Preserve result as first local. | |
| 523 } else if (kind == kLazyDeoptFromThrow) { | |
| 524 __ Push(T1); // Preserve exception as first local. | |
| 525 __ Push(T2); // Preserve stacktrace as second local. | |
| 526 } | |
| 527 __ ReserveAlignedFrameSpace(1 * kWordSize); | |
| 528 __ CallRuntime(kDeoptimizeFillFrameRuntimeEntry, 1); // Pass last FP in A0. | |
| 529 if (kind == kLazyDeoptFromReturn) { | |
| 530 // Restore result into T1. | |
| 531 __ lw(T1, Address(FP, kFirstLocalSlotFromFp * kWordSize)); | |
| 532 } else if (kind == kLazyDeoptFromThrow) { | |
| 533 // Restore result into T1. | |
| 534 __ lw(T1, Address(FP, kFirstLocalSlotFromFp * kWordSize)); | |
| 535 __ lw(T2, Address(FP, (kFirstLocalSlotFromFp - 1) * kWordSize)); | |
| 536 } | |
| 537 // Code above cannot cause GC. | |
| 538 __ RestoreCodePointer(); | |
| 539 __ LeaveStubFrame(); | |
| 540 | |
| 541 // Frame is fully rewritten at this point and it is safe to perform a GC. | |
| 542 // Materialize any objects that were deferred by FillFrame because they | |
| 543 // require allocation. | |
| 544 // Enter stub frame with loading PP. The caller's PP is not materialized yet. | |
| 545 __ EnterStubFrame(); | |
| 546 if (kind == kLazyDeoptFromReturn) { | |
| 547 __ Push(T1); // Preserve result, it will be GC-d here. | |
| 548 } else if (kind == kLazyDeoptFromThrow) { | |
| 549 __ Push(T1); // Preserve exception, it will be GC-d here. | |
| 550 __ Push(T2); // Preserve stacktrace, it will be GC-d here. | |
| 551 } | |
| 552 __ PushObject(Smi::ZoneHandle()); // Space for the result. | |
| 553 __ CallRuntime(kDeoptimizeMaterializeRuntimeEntry, 0); | |
| 554 // Result tells stub how many bytes to remove from the expression stack | |
| 555 // of the bottom-most frame. They were used as materialization arguments. | |
| 556 __ Pop(T1); | |
| 557 if (kind == kLazyDeoptFromReturn) { | |
| 558 __ Pop(V0); // Restore result. | |
| 559 } else if (kind == kLazyDeoptFromThrow) { | |
| 560 __ Pop(V1); // Restore stacktrace. | |
| 561 __ Pop(V0); // Restore exception. | |
| 562 } | |
| 563 __ LeaveStubFrame(); | |
| 564 // Remove materialization arguments. | |
| 565 __ SmiUntag(T1); | |
| 566 __ addu(SP, SP, T1); | |
| 567 // The caller is responsible for emitting the return instruction. | |
| 568 } | |
| 569 | |
| 570 // V0: result, must be preserved | |
| 571 void StubCode::GenerateDeoptimizeLazyFromReturnStub(Assembler* assembler) { | |
| 572 // Push zap value instead of CODE_REG for lazy deopt. | |
| 573 __ LoadImmediate(TMP, kZapCodeReg); | |
| 574 __ Push(TMP); | |
| 575 // Return address for "call" to deopt stub. | |
| 576 __ LoadImmediate(RA, kZapReturnAddress); | |
| 577 __ lw(CODE_REG, Address(THR, Thread::lazy_deopt_from_return_stub_offset())); | |
| 578 GenerateDeoptimizationSequence(assembler, kLazyDeoptFromReturn); | |
| 579 __ Ret(); | |
| 580 } | |
| 581 | |
| 582 | |
| 583 // V0: exception, must be preserved | |
| 584 // V1: stacktrace, must be preserved | |
| 585 void StubCode::GenerateDeoptimizeLazyFromThrowStub(Assembler* assembler) { | |
| 586 // Push zap value instead of CODE_REG for lazy deopt. | |
| 587 __ LoadImmediate(TMP, kZapCodeReg); | |
| 588 __ Push(TMP); | |
| 589 // Return address for "call" to deopt stub. | |
| 590 __ LoadImmediate(RA, kZapReturnAddress); | |
| 591 __ lw(CODE_REG, Address(THR, Thread::lazy_deopt_from_throw_stub_offset())); | |
| 592 GenerateDeoptimizationSequence(assembler, kLazyDeoptFromThrow); | |
| 593 __ Ret(); | |
| 594 } | |
| 595 | |
| 596 | |
| 597 void StubCode::GenerateDeoptimizeStub(Assembler* assembler) { | |
| 598 GenerateDeoptimizationSequence(assembler, kEagerDeopt); | |
| 599 __ Ret(); | |
| 600 } | |
| 601 | |
| 602 | |
| 603 static void GenerateDispatcherCode(Assembler* assembler, | |
| 604 Label* call_target_function) { | |
| 605 __ Comment("NoSuchMethodDispatch"); | |
| 606 // When lazily generated invocation dispatchers are disabled, the | |
| 607 // miss-handler may return null. | |
| 608 __ BranchNotEqual(T0, Object::null_object(), call_target_function); | |
| 609 __ EnterStubFrame(); | |
| 610 // Load the receiver. | |
| 611 __ lw(A1, FieldAddress(S4, ArgumentsDescriptor::count_offset())); | |
| 612 __ sll(TMP, A1, 1); // A1 is a Smi. | |
| 613 __ addu(TMP, FP, TMP); | |
| 614 __ lw(T6, Address(TMP, kParamEndSlotFromFp * kWordSize)); | |
| 615 | |
| 616 // Push space for the return value. | |
| 617 // Push the receiver. | |
| 618 // Push ICData/MegamorphicCache object. | |
| 619 // Push arguments descriptor array. | |
| 620 // Push original arguments array. | |
| 621 __ addiu(SP, SP, Immediate(-4 * kWordSize)); | |
| 622 __ sw(ZR, Address(SP, 3 * kWordSize)); | |
| 623 __ sw(T6, Address(SP, 2 * kWordSize)); | |
| 624 __ sw(S5, Address(SP, 1 * kWordSize)); | |
| 625 __ sw(S4, Address(SP, 0 * kWordSize)); | |
| 626 | |
| 627 // Adjust arguments count. | |
| 628 __ lw(TMP, FieldAddress(S4, ArgumentsDescriptor::type_args_len_offset())); | |
| 629 Label args_count_ok; | |
| 630 __ BranchEqual(TMP, Immediate(0), &args_count_ok); | |
| 631 __ AddImmediate(A1, A1, Smi::RawValue(1)); // Include the type arguments. | |
| 632 __ Bind(&args_count_ok); | |
| 633 | |
| 634 // A1: Smi-tagged arguments array length. | |
| 635 PushArgumentsArray(assembler); | |
| 636 const intptr_t kNumArgs = 4; | |
| 637 __ CallRuntime(kInvokeNoSuchMethodDispatcherRuntimeEntry, kNumArgs); | |
| 638 __ lw(V0, Address(SP, 4 * kWordSize)); // Return value. | |
| 639 __ addiu(SP, SP, Immediate(5 * kWordSize)); | |
| 640 __ LeaveStubFrame(); | |
| 641 __ Ret(); | |
| 642 } | |
| 643 | |
| 644 | |
| 645 void StubCode::GenerateMegamorphicMissStub(Assembler* assembler) { | |
| 646 __ EnterStubFrame(); | |
| 647 | |
| 648 // Load the receiver. | |
| 649 __ lw(T2, FieldAddress(S4, ArgumentsDescriptor::count_offset())); | |
| 650 __ sll(T2, T2, 1); // T2 is a Smi. | |
| 651 __ addu(TMP, FP, T2); | |
| 652 __ lw(T6, Address(TMP, kParamEndSlotFromFp * kWordSize)); | |
| 653 | |
| 654 // Preserve IC data and arguments descriptor. | |
| 655 __ addiu(SP, SP, Immediate(-6 * kWordSize)); | |
| 656 __ sw(S5, Address(SP, 5 * kWordSize)); | |
| 657 __ sw(S4, Address(SP, 4 * kWordSize)); | |
| 658 | |
| 659 // Push space for the return value. | |
| 660 // Push the receiver. | |
| 661 // Push IC data object. | |
| 662 // Push arguments descriptor array. | |
| 663 __ sw(ZR, Address(SP, 3 * kWordSize)); | |
| 664 __ sw(T6, Address(SP, 2 * kWordSize)); | |
| 665 __ sw(S5, Address(SP, 1 * kWordSize)); | |
| 666 __ sw(S4, Address(SP, 0 * kWordSize)); | |
| 667 | |
| 668 __ CallRuntime(kMegamorphicCacheMissHandlerRuntimeEntry, 3); | |
| 669 | |
| 670 __ lw(T0, Address(SP, 3 * kWordSize)); // Get result function. | |
| 671 __ lw(S4, Address(SP, 4 * kWordSize)); // Restore argument descriptor. | |
| 672 __ lw(S5, Address(SP, 5 * kWordSize)); // Restore IC data. | |
| 673 __ addiu(SP, SP, Immediate(6 * kWordSize)); | |
| 674 | |
| 675 __ RestoreCodePointer(); | |
| 676 __ LeaveStubFrame(); | |
| 677 | |
| 678 if (!FLAG_lazy_dispatchers) { | |
| 679 Label call_target_function; | |
| 680 GenerateDispatcherCode(assembler, &call_target_function); | |
| 681 __ Bind(&call_target_function); | |
| 682 } | |
| 683 | |
| 684 __ lw(CODE_REG, FieldAddress(T0, Function::code_offset())); | |
| 685 __ lw(T2, FieldAddress(T0, Function::entry_point_offset())); | |
| 686 __ jr(T2); | |
| 687 } | |
| 688 | |
| 689 | |
| 690 // Called for inline allocation of arrays. | |
| 691 // Input parameters: | |
| 692 // RA: return address. | |
| 693 // A1: Array length as Smi (must be preserved). | |
| 694 // A0: array element type (either NULL or an instantiated type). | |
| 695 // NOTE: A1 cannot be clobbered here as the caller relies on it being saved. | |
| 696 // The newly allocated object is returned in V0. | |
| 697 void StubCode::GenerateAllocateArrayStub(Assembler* assembler) { | |
| 698 __ Comment("AllocateArrayStub"); | |
| 699 Label slow_case; | |
| 700 // Compute the size to be allocated, it is based on the array length | |
| 701 // and is computed as: | |
| 702 // RoundedAllocationSize((array_length * kwordSize) + sizeof(RawArray)). | |
| 703 __ mov(T3, A1); // Array length. | |
| 704 | |
| 705 // Check that length is a positive Smi. | |
| 706 __ andi(CMPRES1, T3, Immediate(kSmiTagMask)); | |
| 707 if (FLAG_use_slow_path) { | |
| 708 __ b(&slow_case); | |
| 709 } else { | |
| 710 __ bne(CMPRES1, ZR, &slow_case); | |
| 711 } | |
| 712 __ bltz(T3, &slow_case); | |
| 713 | |
| 714 // Check for maximum allowed length. | |
| 715 const intptr_t max_len = | |
| 716 reinterpret_cast<int32_t>(Smi::New(Array::kMaxElements)); | |
| 717 __ BranchUnsignedGreater(T3, Immediate(max_len), &slow_case); | |
| 718 | |
| 719 const intptr_t cid = kArrayCid; | |
| 720 NOT_IN_PRODUCT(__ MaybeTraceAllocation(kArrayCid, T4, &slow_case)); | |
| 721 | |
| 722 const intptr_t fixed_size_plus_alignment_padding = | |
| 723 sizeof(RawArray) + kObjectAlignment - 1; | |
| 724 __ LoadImmediate(T2, fixed_size_plus_alignment_padding); | |
| 725 __ sll(T3, T3, 1); // T3 is a Smi. | |
| 726 __ addu(T2, T2, T3); | |
| 727 ASSERT(kSmiTagShift == 1); | |
| 728 __ LoadImmediate(T3, ~(kObjectAlignment - 1)); | |
| 729 __ and_(T2, T2, T3); | |
| 730 | |
| 731 // T2: Allocation size. | |
| 732 | |
| 733 Heap::Space space = Heap::kNew; | |
| 734 __ lw(T3, Address(THR, Thread::heap_offset())); | |
| 735 // Potential new object start. | |
| 736 __ lw(T0, Address(T3, Heap::TopOffset(space))); | |
| 737 | |
| 738 __ addu(T1, T0, T2); // Potential next object start. | |
| 739 __ BranchUnsignedLess(T1, T0, &slow_case); // Branch on unsigned overflow. | |
| 740 | |
| 741 // Check if the allocation fits into the remaining space. | |
| 742 // T0: potential new object start. | |
| 743 // T1: potential next object start. | |
| 744 // T2: allocation size. | |
| 745 // T3: heap. | |
| 746 __ lw(T4, Address(T3, Heap::EndOffset(space))); | |
| 747 __ BranchUnsignedGreaterEqual(T1, T4, &slow_case); | |
| 748 | |
| 749 // Successfully allocated the object(s), now update top to point to | |
| 750 // next object start and initialize the object. | |
| 751 // T3: heap. | |
| 752 __ sw(T1, Address(T3, Heap::TopOffset(space))); | |
| 753 __ addiu(T0, T0, Immediate(kHeapObjectTag)); | |
| 754 NOT_IN_PRODUCT(__ UpdateAllocationStatsWithSize(cid, T2, T4, space)); | |
| 755 | |
| 756 // Initialize the tags. | |
| 757 // T0: new object start as a tagged pointer. | |
| 758 // T1: new object end address. | |
| 759 // T2: allocation size. | |
| 760 { | |
| 761 Label overflow, done; | |
| 762 const intptr_t shift = RawObject::kSizeTagPos - kObjectAlignmentLog2; | |
| 763 | |
| 764 __ BranchUnsignedGreater(T2, Immediate(RawObject::SizeTag::kMaxSizeTag), | |
| 765 &overflow); | |
| 766 __ b(&done); | |
| 767 __ delay_slot()->sll(T2, T2, shift); | |
| 768 __ Bind(&overflow); | |
| 769 __ mov(T2, ZR); | |
| 770 __ Bind(&done); | |
| 771 | |
| 772 // Get the class index and insert it into the tags. | |
| 773 // T2: size and bit tags. | |
| 774 __ LoadImmediate(TMP, RawObject::ClassIdTag::encode(cid)); | |
| 775 __ or_(T2, T2, TMP); | |
| 776 __ sw(T2, FieldAddress(T0, Array::tags_offset())); // Store tags. | |
| 777 } | |
| 778 | |
| 779 // T0: new object start as a tagged pointer. | |
| 780 // T1: new object end address. | |
| 781 // Store the type argument field. | |
| 782 __ StoreIntoObjectNoBarrier( | |
| 783 T0, FieldAddress(T0, Array::type_arguments_offset()), A0); | |
| 784 | |
| 785 // Set the length field. | |
| 786 __ StoreIntoObjectNoBarrier(T0, FieldAddress(T0, Array::length_offset()), A1); | |
| 787 | |
| 788 __ LoadObject(T7, Object::null_object()); | |
| 789 // Initialize all array elements to raw_null. | |
| 790 // T0: new object start as a tagged pointer. | |
| 791 // T1: new object end address. | |
| 792 // T2: iterator which initially points to the start of the variable | |
| 793 // data area to be initialized. | |
| 794 // T7: null. | |
| 795 __ AddImmediate(T2, T0, sizeof(RawArray) - kHeapObjectTag); | |
| 796 | |
| 797 Label done; | |
| 798 Label init_loop; | |
| 799 __ Bind(&init_loop); | |
| 800 __ BranchUnsignedGreaterEqual(T2, T1, &done); | |
| 801 __ sw(T7, Address(T2, 0)); | |
| 802 __ b(&init_loop); | |
| 803 __ delay_slot()->addiu(T2, T2, Immediate(kWordSize)); | |
| 804 __ Bind(&done); | |
| 805 | |
| 806 __ Ret(); // Returns the newly allocated object in V0. | |
| 807 __ delay_slot()->mov(V0, T0); | |
| 808 | |
| 809 // Unable to allocate the array using the fast inline code, just call | |
| 810 // into the runtime. | |
| 811 __ Bind(&slow_case); | |
| 812 // Create a stub frame as we are pushing some objects on the stack before | |
| 813 // calling into the runtime. | |
| 814 __ EnterStubFrame(); | |
| 815 // Setup space on stack for return value. | |
| 816 // Push array length as Smi and element type. | |
| 817 __ addiu(SP, SP, Immediate(-3 * kWordSize)); | |
| 818 __ sw(ZR, Address(SP, 2 * kWordSize)); | |
| 819 __ sw(A1, Address(SP, 1 * kWordSize)); | |
| 820 __ sw(A0, Address(SP, 0 * kWordSize)); | |
| 821 __ CallRuntime(kAllocateArrayRuntimeEntry, 2); | |
| 822 __ Comment("AllocateArrayStub return"); | |
| 823 // Pop arguments; result is popped in IP. | |
| 824 __ lw(V0, Address(SP, 2 * kWordSize)); | |
| 825 __ lw(A1, Address(SP, 1 * kWordSize)); | |
| 826 __ lw(A0, Address(SP, 0 * kWordSize)); | |
| 827 __ addiu(SP, SP, Immediate(3 * kWordSize)); | |
| 828 | |
| 829 __ LeaveStubFrameAndReturn(); | |
| 830 } | |
| 831 | |
| 832 | |
| 833 // Called when invoking Dart code from C++ (VM code). | |
| 834 // Input parameters: | |
| 835 // RA : points to return address. | |
| 836 // A0 : code object of the Dart function to call. | |
| 837 // A1 : arguments descriptor array. | |
| 838 // A2 : arguments array. | |
| 839 // A3 : current thread. | |
| 840 void StubCode::GenerateInvokeDartCodeStub(Assembler* assembler) { | |
| 841 // Save frame pointer coming in. | |
| 842 __ Comment("InvokeDartCodeStub"); | |
| 843 __ EnterFrame(); | |
| 844 | |
| 845 // Push code object to PC marker slot. | |
| 846 __ lw(TMP, Address(A3, Thread::invoke_dart_code_stub_offset())); | |
| 847 __ Push(TMP); | |
| 848 | |
| 849 // Save new context and C++ ABI callee-saved registers. | |
| 850 | |
| 851 // The saved vm tag, top resource, and top exit frame info. | |
| 852 const intptr_t kPreservedSlots = 3; | |
| 853 const intptr_t kPreservedRegSpace = | |
| 854 kWordSize * | |
| 855 (kAbiPreservedCpuRegCount + kAbiPreservedFpuRegCount + kPreservedSlots); | |
| 856 | |
| 857 __ addiu(SP, SP, Immediate(-kPreservedRegSpace)); | |
| 858 for (int i = S0; i <= S7; i++) { | |
| 859 Register r = static_cast<Register>(i); | |
| 860 const intptr_t slot = i - S0 + kPreservedSlots; | |
| 861 __ sw(r, Address(SP, slot * kWordSize)); | |
| 862 } | |
| 863 | |
| 864 for (intptr_t i = kAbiFirstPreservedFpuReg; i <= kAbiLastPreservedFpuReg; | |
| 865 i++) { | |
| 866 FRegister r = static_cast<FRegister>(i); | |
| 867 const intptr_t slot = kAbiPreservedCpuRegCount + kPreservedSlots + i - | |
| 868 kAbiFirstPreservedFpuReg; | |
| 869 __ swc1(r, Address(SP, slot * kWordSize)); | |
| 870 } | |
| 871 | |
| 872 // We now load the pool pointer(PP) with a GC safe value as we are about | |
| 873 // to invoke dart code. | |
| 874 __ LoadImmediate(PP, 0); | |
| 875 | |
| 876 // Set up THR, which caches the current thread in Dart code. | |
| 877 if (THR != A3) { | |
| 878 __ mov(THR, A3); | |
| 879 } | |
| 880 | |
| 881 // Save the current VMTag on the stack. | |
| 882 __ lw(T1, Assembler::VMTagAddress()); | |
| 883 __ sw(T1, Address(SP, 2 * kWordSize)); | |
| 884 | |
| 885 // Mark that the thread is executing Dart code. | |
| 886 __ LoadImmediate(T0, VMTag::kDartTagId); | |
| 887 __ sw(T0, Assembler::VMTagAddress()); | |
| 888 | |
| 889 // Save top resource and top exit frame info. Use T0 as a temporary register. | |
| 890 // StackFrameIterator reads the top exit frame info saved in this frame. | |
| 891 __ lw(T0, Address(THR, Thread::top_resource_offset())); | |
| 892 __ sw(ZR, Address(THR, Thread::top_resource_offset())); | |
| 893 __ sw(T0, Address(SP, 1 * kWordSize)); | |
| 894 __ lw(T0, Address(THR, Thread::top_exit_frame_info_offset())); | |
| 895 __ sw(ZR, Address(THR, Thread::top_exit_frame_info_offset())); | |
| 896 // kExitLinkSlotFromEntryFp must be kept in sync with the code below. | |
| 897 ASSERT(kExitLinkSlotFromEntryFp == -24); | |
| 898 __ sw(T0, Address(SP, 0 * kWordSize)); | |
| 899 | |
| 900 // After the call, The stack pointer is restored to this location. | |
| 901 // Pushed S0-7, F20-31, T0, T0, T1 = 23. | |
| 902 | |
| 903 // Load arguments descriptor array into S4, which is passed to Dart code. | |
| 904 __ lw(S4, Address(A1, VMHandles::kOffsetOfRawPtrInHandle)); | |
| 905 | |
| 906 // No need to check for type args, disallowed by DartEntry::InvokeFunction. | |
| 907 // Load number of arguments into S5. | |
| 908 __ lw(T1, FieldAddress(S4, ArgumentsDescriptor::count_offset())); | |
| 909 __ SmiUntag(T1); | |
| 910 | |
| 911 // Compute address of 'arguments array' data area into A2. | |
| 912 __ lw(A2, Address(A2, VMHandles::kOffsetOfRawPtrInHandle)); | |
| 913 | |
| 914 // Set up arguments for the Dart call. | |
| 915 Label push_arguments; | |
| 916 Label done_push_arguments; | |
| 917 __ beq(T1, ZR, &done_push_arguments); // check if there are arguments. | |
| 918 __ delay_slot()->addiu(A2, A2, | |
| 919 Immediate(Array::data_offset() - kHeapObjectTag)); | |
| 920 __ mov(A1, ZR); | |
| 921 __ Bind(&push_arguments); | |
| 922 __ lw(A3, Address(A2)); | |
| 923 __ Push(A3); | |
| 924 __ addiu(A1, A1, Immediate(1)); | |
| 925 __ BranchSignedLess(A1, T1, &push_arguments); | |
| 926 __ delay_slot()->addiu(A2, A2, Immediate(kWordSize)); | |
| 927 | |
| 928 __ Bind(&done_push_arguments); | |
| 929 | |
| 930 // Call the Dart code entrypoint. | |
| 931 // We are calling into Dart code, here, so there is no need to call through | |
| 932 // T9 to match the ABI. | |
| 933 __ lw(CODE_REG, Address(A0, VMHandles::kOffsetOfRawPtrInHandle)); | |
| 934 __ lw(A0, FieldAddress(CODE_REG, Code::entry_point_offset())); | |
| 935 __ jalr(A0); // S4 is the arguments descriptor array. | |
| 936 __ Comment("InvokeDartCodeStub return"); | |
| 937 | |
| 938 // Get rid of arguments pushed on the stack. | |
| 939 __ AddImmediate(SP, FP, kExitLinkSlotFromEntryFp * kWordSize); | |
| 940 | |
| 941 | |
| 942 // Restore the current VMTag from the stack. | |
| 943 __ lw(T1, Address(SP, 2 * kWordSize)); | |
| 944 __ sw(T1, Assembler::VMTagAddress()); | |
| 945 | |
| 946 // Restore the saved top resource and top exit frame info back into the | |
| 947 // Isolate structure. Uses T0 as a temporary register for this. | |
| 948 __ lw(T0, Address(SP, 1 * kWordSize)); | |
| 949 __ sw(T0, Address(THR, Thread::top_resource_offset())); | |
| 950 __ lw(T0, Address(SP, 0 * kWordSize)); | |
| 951 __ sw(T0, Address(THR, Thread::top_exit_frame_info_offset())); | |
| 952 | |
| 953 // Restore C++ ABI callee-saved registers. | |
| 954 for (int i = S0; i <= S7; i++) { | |
| 955 Register r = static_cast<Register>(i); | |
| 956 const intptr_t slot = i - S0 + kPreservedSlots; | |
| 957 __ lw(r, Address(SP, slot * kWordSize)); | |
| 958 } | |
| 959 | |
| 960 for (intptr_t i = kAbiFirstPreservedFpuReg; i <= kAbiLastPreservedFpuReg; | |
| 961 i++) { | |
| 962 FRegister r = static_cast<FRegister>(i); | |
| 963 const intptr_t slot = kAbiPreservedCpuRegCount + kPreservedSlots + i - | |
| 964 kAbiFirstPreservedFpuReg; | |
| 965 __ lwc1(r, Address(SP, slot * kWordSize)); | |
| 966 } | |
| 967 | |
| 968 __ addiu(SP, SP, Immediate(kPreservedRegSpace)); | |
| 969 | |
| 970 // Restore the frame pointer and return. | |
| 971 __ LeaveFrameAndReturn(); | |
| 972 } | |
| 973 | |
| 974 | |
| 975 // Called for inline allocation of contexts. | |
| 976 // Input: | |
| 977 // T1: number of context variables. | |
| 978 // Output: | |
| 979 // V0: new allocated RawContext object. | |
| 980 void StubCode::GenerateAllocateContextStub(Assembler* assembler) { | |
| 981 __ Comment("AllocateContext"); | |
| 982 if (FLAG_inline_alloc) { | |
| 983 Label slow_case; | |
| 984 // First compute the rounded instance size. | |
| 985 // T1: number of context variables. | |
| 986 intptr_t fixed_size_plus_alignment_padding = | |
| 987 sizeof(RawContext) + kObjectAlignment - 1; | |
| 988 __ LoadImmediate(T2, fixed_size_plus_alignment_padding); | |
| 989 __ sll(T0, T1, 2); | |
| 990 __ addu(T2, T2, T0); | |
| 991 ASSERT(kSmiTagShift == 1); | |
| 992 __ LoadImmediate(T0, ~((kObjectAlignment)-1)); | |
| 993 __ and_(T2, T2, T0); | |
| 994 | |
| 995 NOT_IN_PRODUCT(__ MaybeTraceAllocation(kContextCid, T4, &slow_case)); | |
| 996 // Now allocate the object. | |
| 997 // T1: number of context variables. | |
| 998 // T2: object size. | |
| 999 const intptr_t cid = kContextCid; | |
| 1000 Heap::Space space = Heap::kNew; | |
| 1001 __ lw(T5, Address(THR, Thread::heap_offset())); | |
| 1002 __ lw(V0, Address(T5, Heap::TopOffset(space))); | |
| 1003 __ addu(T3, T2, V0); | |
| 1004 | |
| 1005 // Check if the allocation fits into the remaining space. | |
| 1006 // V0: potential new object. | |
| 1007 // T1: number of context variables. | |
| 1008 // T2: object size. | |
| 1009 // T3: potential next object start. | |
| 1010 // T5: heap. | |
| 1011 __ lw(CMPRES1, Address(T5, Heap::EndOffset(space))); | |
| 1012 if (FLAG_use_slow_path) { | |
| 1013 __ b(&slow_case); | |
| 1014 } else { | |
| 1015 __ BranchUnsignedGreaterEqual(T3, CMPRES1, &slow_case); | |
| 1016 } | |
| 1017 | |
| 1018 // Successfully allocated the object, now update top to point to | |
| 1019 // next object start and initialize the object. | |
| 1020 // V0: new object. | |
| 1021 // T1: number of context variables. | |
| 1022 // T2: object size. | |
| 1023 // T3: next object start. | |
| 1024 // T5: heap. | |
| 1025 __ sw(T3, Address(T5, Heap::TopOffset(space))); | |
| 1026 __ addiu(V0, V0, Immediate(kHeapObjectTag)); | |
| 1027 NOT_IN_PRODUCT(__ UpdateAllocationStatsWithSize(cid, T2, T5, space)); | |
| 1028 | |
| 1029 // Calculate the size tag. | |
| 1030 // V0: new object. | |
| 1031 // T1: number of context variables. | |
| 1032 // T2: object size. | |
| 1033 const intptr_t shift = RawObject::kSizeTagPos - kObjectAlignmentLog2; | |
| 1034 __ LoadImmediate(TMP, RawObject::SizeTag::kMaxSizeTag); | |
| 1035 __ sltu(CMPRES1, TMP, T2); // CMPRES1 = T2 > TMP ? 1 : 0. | |
| 1036 __ movn(T2, ZR, CMPRES1); // T2 = CMPRES1 != 0 ? 0 : T2. | |
| 1037 __ sll(TMP, T2, shift); // TMP = T2 << shift. | |
| 1038 __ movz(T2, TMP, CMPRES1); // T2 = CMPRES1 == 0 ? TMP : T2. | |
| 1039 | |
| 1040 // Get the class index and insert it into the tags. | |
| 1041 // T2: size and bit tags. | |
| 1042 __ LoadImmediate(TMP, RawObject::ClassIdTag::encode(cid)); | |
| 1043 __ or_(T2, T2, TMP); | |
| 1044 __ sw(T2, FieldAddress(V0, Context::tags_offset())); | |
| 1045 | |
| 1046 // Setup up number of context variables field. | |
| 1047 // V0: new object. | |
| 1048 // T1: number of context variables as integer value (not object). | |
| 1049 __ sw(T1, FieldAddress(V0, Context::num_variables_offset())); | |
| 1050 | |
| 1051 __ LoadObject(T7, Object::null_object()); | |
| 1052 | |
| 1053 // Initialize the context variables. | |
| 1054 // V0: new object. | |
| 1055 // T1: number of context variables. | |
| 1056 Label loop, loop_exit; | |
| 1057 __ blez(T1, &loop_exit); | |
| 1058 // Setup the parent field. | |
| 1059 __ delay_slot()->sw(T7, FieldAddress(V0, Context::parent_offset())); | |
| 1060 __ AddImmediate(T3, V0, Context::variable_offset(0) - kHeapObjectTag); | |
| 1061 __ sll(T1, T1, 2); | |
| 1062 __ Bind(&loop); | |
| 1063 __ addiu(T1, T1, Immediate(-kWordSize)); | |
| 1064 __ addu(T4, T3, T1); | |
| 1065 __ bgtz(T1, &loop); | |
| 1066 __ delay_slot()->sw(T7, Address(T4)); | |
| 1067 __ Bind(&loop_exit); | |
| 1068 | |
| 1069 // Done allocating and initializing the context. | |
| 1070 // V0: new object. | |
| 1071 __ Ret(); | |
| 1072 | |
| 1073 __ Bind(&slow_case); | |
| 1074 } | |
| 1075 // Create a stub frame as we are pushing some objects on the stack before | |
| 1076 // calling into the runtime. | |
| 1077 __ EnterStubFrame(); | |
| 1078 // Setup space on stack for return value. | |
| 1079 __ SmiTag(T1); | |
| 1080 __ addiu(SP, SP, Immediate(-2 * kWordSize)); | |
| 1081 __ LoadObject(TMP, Object::null_object()); | |
| 1082 __ sw(TMP, Address(SP, 1 * kWordSize)); // Store null. | |
| 1083 __ sw(T1, Address(SP, 0 * kWordSize)); | |
| 1084 __ CallRuntime(kAllocateContextRuntimeEntry, 1); // Allocate context. | |
| 1085 __ lw(V0, Address(SP, 1 * kWordSize)); // Get the new context. | |
| 1086 __ addiu(SP, SP, Immediate(2 * kWordSize)); // Pop argument and return. | |
| 1087 | |
| 1088 // V0: new object | |
| 1089 // Restore the frame pointer. | |
| 1090 __ LeaveStubFrameAndReturn(); | |
| 1091 } | |
| 1092 | |
| 1093 | |
| 1094 // Helper stub to implement Assembler::StoreIntoObject. | |
| 1095 // Input parameters: | |
| 1096 // T0: Address (i.e. object) being stored into. | |
| 1097 void StubCode::GenerateUpdateStoreBufferStub(Assembler* assembler) { | |
| 1098 // Save values being destroyed. | |
| 1099 __ Comment("UpdateStoreBufferStub"); | |
| 1100 __ addiu(SP, SP, Immediate(-3 * kWordSize)); | |
| 1101 __ sw(T3, Address(SP, 2 * kWordSize)); | |
| 1102 __ sw(T2, Address(SP, 1 * kWordSize)); | |
| 1103 __ sw(T1, Address(SP, 0 * kWordSize)); | |
| 1104 | |
| 1105 Label add_to_buffer; | |
| 1106 // Check whether this object has already been remembered. Skip adding to the | |
| 1107 // store buffer if the object is in the store buffer already. | |
| 1108 // Spilled: T1, T2, T3. | |
| 1109 // T0: Address being stored. | |
| 1110 __ lw(T2, FieldAddress(T0, Object::tags_offset())); | |
| 1111 __ andi(CMPRES1, T2, Immediate(1 << RawObject::kRememberedBit)); | |
| 1112 __ beq(CMPRES1, ZR, &add_to_buffer); | |
| 1113 __ lw(T1, Address(SP, 0 * kWordSize)); | |
| 1114 __ lw(T2, Address(SP, 1 * kWordSize)); | |
| 1115 __ lw(T3, Address(SP, 2 * kWordSize)); | |
| 1116 __ addiu(SP, SP, Immediate(3 * kWordSize)); | |
| 1117 __ Ret(); | |
| 1118 | |
| 1119 __ Bind(&add_to_buffer); | |
| 1120 // Atomically set the remembered bit of the object header. | |
| 1121 Label retry; | |
| 1122 __ Bind(&retry); | |
| 1123 __ ll(T2, FieldAddress(T0, Object::tags_offset())); | |
| 1124 __ ori(T2, T2, Immediate(1 << RawObject::kRememberedBit)); | |
| 1125 __ sc(T2, FieldAddress(T0, Object::tags_offset())); | |
| 1126 // T2 = 1 on success, 0 on failure. | |
| 1127 __ beq(T2, ZR, &retry); | |
| 1128 | |
| 1129 // Load the StoreBuffer block out of the thread. Then load top_ out of the | |
| 1130 // StoreBufferBlock and add the address to the pointers_. | |
| 1131 __ lw(T1, Address(THR, Thread::store_buffer_block_offset())); | |
| 1132 __ lw(T2, Address(T1, StoreBufferBlock::top_offset())); | |
| 1133 __ sll(T3, T2, 2); | |
| 1134 __ addu(T3, T1, T3); | |
| 1135 __ sw(T0, Address(T3, StoreBufferBlock::pointers_offset())); | |
| 1136 | |
| 1137 // Increment top_ and check for overflow. | |
| 1138 // T2: top_ | |
| 1139 // T1: StoreBufferBlock | |
| 1140 Label L; | |
| 1141 __ addiu(T2, T2, Immediate(1)); | |
| 1142 __ sw(T2, Address(T1, StoreBufferBlock::top_offset())); | |
| 1143 __ addiu(CMPRES1, T2, Immediate(-StoreBufferBlock::kSize)); | |
| 1144 // Restore values. | |
| 1145 __ lw(T1, Address(SP, 0 * kWordSize)); | |
| 1146 __ lw(T2, Address(SP, 1 * kWordSize)); | |
| 1147 __ lw(T3, Address(SP, 2 * kWordSize)); | |
| 1148 __ beq(CMPRES1, ZR, &L); | |
| 1149 __ delay_slot()->addiu(SP, SP, Immediate(3 * kWordSize)); | |
| 1150 __ Ret(); | |
| 1151 | |
| 1152 // Handle overflow: Call the runtime leaf function. | |
| 1153 __ Bind(&L); | |
| 1154 // Setup frame, push callee-saved registers. | |
| 1155 | |
| 1156 __ EnterCallRuntimeFrame(1 * kWordSize); | |
| 1157 __ mov(A0, THR); | |
| 1158 __ CallRuntime(kStoreBufferBlockProcessRuntimeEntry, 1); | |
| 1159 __ Comment("UpdateStoreBufferStub return"); | |
| 1160 // Restore callee-saved registers, tear down frame. | |
| 1161 __ LeaveCallRuntimeFrame(); | |
| 1162 __ Ret(); | |
| 1163 } | |
| 1164 | |
| 1165 | |
| 1166 // Called for inline allocation of objects. | |
| 1167 // Input parameters: | |
| 1168 // RA : return address. | |
| 1169 // SP + 0 : type arguments object (only if class is parameterized). | |
| 1170 void StubCode::GenerateAllocationStubForClass(Assembler* assembler, | |
| 1171 const Class& cls) { | |
| 1172 __ Comment("AllocationStubForClass"); | |
| 1173 // The generated code is different if the class is parameterized. | |
| 1174 const bool is_cls_parameterized = cls.NumTypeArguments() > 0; | |
| 1175 ASSERT(!is_cls_parameterized || | |
| 1176 (cls.type_arguments_field_offset() != Class::kNoTypeArguments)); | |
| 1177 // kInlineInstanceSize is a constant used as a threshold for determining | |
| 1178 // when the object initialization should be done as a loop or as | |
| 1179 // straight line code. | |
| 1180 const int kInlineInstanceSize = 12; | |
| 1181 const intptr_t instance_size = cls.instance_size(); | |
| 1182 ASSERT(instance_size > 0); | |
| 1183 if (is_cls_parameterized) { | |
| 1184 __ lw(T1, Address(SP, 0 * kWordSize)); | |
| 1185 // T1: type arguments. | |
| 1186 } | |
| 1187 Isolate* isolate = Isolate::Current(); | |
| 1188 if (FLAG_inline_alloc && Heap::IsAllocatableInNewSpace(instance_size) && | |
| 1189 !cls.TraceAllocation(isolate)) { | |
| 1190 Label slow_case; | |
| 1191 // Allocate the object and update top to point to | |
| 1192 // next object start and initialize the allocated object. | |
| 1193 // T1: instantiated type arguments (if is_cls_parameterized). | |
| 1194 Heap::Space space = Heap::kNew; | |
| 1195 __ lw(T5, Address(THR, Thread::heap_offset())); | |
| 1196 __ lw(T2, Address(T5, Heap::TopOffset(space))); | |
| 1197 __ LoadImmediate(T4, instance_size); | |
| 1198 __ addu(T3, T2, T4); | |
| 1199 // Check if the allocation fits into the remaining space. | |
| 1200 // T2: potential new object start. | |
| 1201 // T3: potential next object start. | |
| 1202 // T5: heap. | |
| 1203 __ lw(CMPRES1, Address(T5, Heap::EndOffset(space))); | |
| 1204 if (FLAG_use_slow_path) { | |
| 1205 __ b(&slow_case); | |
| 1206 } else { | |
| 1207 __ BranchUnsignedGreaterEqual(T3, CMPRES1, &slow_case); | |
| 1208 } | |
| 1209 // Successfully allocated the object(s), now update top to point to | |
| 1210 // next object start and initialize the object. | |
| 1211 __ sw(T3, Address(T5, Heap::TopOffset(space))); | |
| 1212 NOT_IN_PRODUCT(__ UpdateAllocationStats(cls.id(), T5, space)); | |
| 1213 | |
| 1214 // T2: new object start. | |
| 1215 // T3: next object start. | |
| 1216 // T1: new object type arguments (if is_cls_parameterized). | |
| 1217 // Set the tags. | |
| 1218 uint32_t tags = 0; | |
| 1219 tags = RawObject::SizeTag::update(instance_size, tags); | |
| 1220 ASSERT(cls.id() != kIllegalCid); | |
| 1221 tags = RawObject::ClassIdTag::update(cls.id(), tags); | |
| 1222 __ LoadImmediate(T0, tags); | |
| 1223 __ sw(T0, Address(T2, Instance::tags_offset())); | |
| 1224 | |
| 1225 __ LoadObject(T7, Object::null_object()); | |
| 1226 | |
| 1227 // Initialize the remaining words of the object. | |
| 1228 // T2: new object start. | |
| 1229 // T3: next object start. | |
| 1230 // T1: new object type arguments (if is_cls_parameterized). | |
| 1231 // First try inlining the initialization without a loop. | |
| 1232 if (instance_size < (kInlineInstanceSize * kWordSize)) { | |
| 1233 // Check if the object contains any non-header fields. | |
| 1234 // Small objects are initialized using a consecutive set of writes. | |
| 1235 for (intptr_t current_offset = Instance::NextFieldOffset(); | |
| 1236 current_offset < instance_size; current_offset += kWordSize) { | |
| 1237 __ sw(T7, Address(T2, current_offset)); | |
| 1238 } | |
| 1239 } else { | |
| 1240 __ addiu(T4, T2, Immediate(Instance::NextFieldOffset())); | |
| 1241 // Loop until the whole object is initialized. | |
| 1242 // T2: new object. | |
| 1243 // T3: next object start. | |
| 1244 // T4: next word to be initialized. | |
| 1245 // T1: new object type arguments (if is_cls_parameterized). | |
| 1246 Label loop, loop_exit; | |
| 1247 __ BranchUnsignedGreaterEqual(T4, T3, &loop_exit); | |
| 1248 __ Bind(&loop); | |
| 1249 __ addiu(T4, T4, Immediate(kWordSize)); | |
| 1250 __ bne(T4, T3, &loop); | |
| 1251 __ delay_slot()->sw(T7, Address(T4, -kWordSize)); | |
| 1252 __ Bind(&loop_exit); | |
| 1253 } | |
| 1254 if (is_cls_parameterized) { | |
| 1255 // T1: new object type arguments. | |
| 1256 // Set the type arguments in the new object. | |
| 1257 __ sw(T1, Address(T2, cls.type_arguments_field_offset())); | |
| 1258 } | |
| 1259 // Done allocating and initializing the instance. | |
| 1260 // T2: new object still missing its heap tag. | |
| 1261 __ Ret(); | |
| 1262 __ delay_slot()->addiu(V0, T2, Immediate(kHeapObjectTag)); | |
| 1263 | |
| 1264 __ Bind(&slow_case); | |
| 1265 } | |
| 1266 // If is_cls_parameterized: | |
| 1267 // T1: new object type arguments (instantiated or not). | |
| 1268 // Create a stub frame as we are pushing some objects on the stack before | |
| 1269 // calling into the runtime. | |
| 1270 __ EnterStubFrame(); // Uses pool pointer to pass cls to runtime. | |
| 1271 __ LoadObject(TMP, cls); | |
| 1272 | |
| 1273 __ addiu(SP, SP, Immediate(-3 * kWordSize)); | |
| 1274 // Space on stack for return value. | |
| 1275 __ LoadObject(T7, Object::null_object()); | |
| 1276 __ sw(T7, Address(SP, 2 * kWordSize)); | |
| 1277 __ sw(TMP, Address(SP, 1 * kWordSize)); // Class of object to be allocated. | |
| 1278 | |
| 1279 if (is_cls_parameterized) { | |
| 1280 // Push type arguments of object to be allocated and of instantiator. | |
| 1281 __ sw(T1, Address(SP, 0 * kWordSize)); | |
| 1282 } else { | |
| 1283 // Push null type arguments. | |
| 1284 __ sw(T7, Address(SP, 0 * kWordSize)); | |
| 1285 } | |
| 1286 __ CallRuntime(kAllocateObjectRuntimeEntry, 2); // Allocate object. | |
| 1287 __ Comment("AllocationStubForClass return"); | |
| 1288 // Pop result (newly allocated object). | |
| 1289 __ lw(V0, Address(SP, 2 * kWordSize)); | |
| 1290 __ addiu(SP, SP, Immediate(3 * kWordSize)); // Pop arguments. | |
| 1291 // V0: new object | |
| 1292 // Restore the frame pointer and return. | |
| 1293 __ LeaveStubFrameAndReturn(RA); | |
| 1294 } | |
| 1295 | |
| 1296 | |
| 1297 // Called for invoking "dynamic noSuchMethod(Invocation invocation)" function | |
| 1298 // from the entry code of a dart function after an error in passed argument | |
| 1299 // name or number is detected. | |
| 1300 // Input parameters: | |
| 1301 // RA : return address. | |
| 1302 // SP : address of last argument. | |
| 1303 // S4: arguments descriptor array. | |
| 1304 void StubCode::GenerateCallClosureNoSuchMethodStub(Assembler* assembler) { | |
| 1305 __ EnterStubFrame(); | |
| 1306 | |
| 1307 // Load the receiver. | |
| 1308 __ lw(A1, FieldAddress(S4, ArgumentsDescriptor::count_offset())); | |
| 1309 __ sll(TMP, A1, 1); // A1 is a Smi. | |
| 1310 __ addu(TMP, FP, TMP); | |
| 1311 __ lw(T6, Address(TMP, kParamEndSlotFromFp * kWordSize)); | |
| 1312 | |
| 1313 // Push space for the return value. | |
| 1314 // Push the receiver. | |
| 1315 // Push arguments descriptor array. | |
| 1316 const intptr_t kNumArgs = 3; | |
| 1317 __ addiu(SP, SP, Immediate(-kNumArgs * kWordSize)); | |
| 1318 __ sw(ZR, Address(SP, 2 * kWordSize)); | |
| 1319 __ sw(T6, Address(SP, 1 * kWordSize)); | |
| 1320 __ sw(S4, Address(SP, 0 * kWordSize)); | |
| 1321 | |
| 1322 // Adjust arguments count. | |
| 1323 __ lw(TMP, FieldAddress(S4, ArgumentsDescriptor::type_args_len_offset())); | |
| 1324 Label args_count_ok; | |
| 1325 __ BranchEqual(TMP, Immediate(0), &args_count_ok); | |
| 1326 __ AddImmediate(A1, A1, Smi::RawValue(1)); // Include the type arguments. | |
| 1327 __ Bind(&args_count_ok); | |
| 1328 | |
| 1329 // A1: Smi-tagged arguments array length. | |
| 1330 PushArgumentsArray(assembler); | |
| 1331 | |
| 1332 __ CallRuntime(kInvokeClosureNoSuchMethodRuntimeEntry, kNumArgs); | |
| 1333 // noSuchMethod on closures always throws an error, so it will never return. | |
| 1334 __ break_(0); | |
| 1335 } | |
| 1336 | |
| 1337 | |
| 1338 // T0: function object. | |
| 1339 // S5: inline cache data object. | |
| 1340 // Cannot use function object from ICData as it may be the inlined | |
| 1341 // function and not the top-scope function. | |
| 1342 void StubCode::GenerateOptimizedUsageCounterIncrement(Assembler* assembler) { | |
| 1343 __ Comment("OptimizedUsageCounterIncrement"); | |
| 1344 Register ic_reg = S5; | |
| 1345 Register func_reg = T0; | |
| 1346 if (FLAG_trace_optimized_ic_calls) { | |
| 1347 __ EnterStubFrame(); | |
| 1348 __ addiu(SP, SP, Immediate(-4 * kWordSize)); | |
| 1349 __ sw(T0, Address(SP, 3 * kWordSize)); | |
| 1350 __ sw(S5, Address(SP, 2 * kWordSize)); | |
| 1351 __ sw(ic_reg, Address(SP, 1 * kWordSize)); // Argument. | |
| 1352 __ sw(func_reg, Address(SP, 0 * kWordSize)); // Argument. | |
| 1353 __ CallRuntime(kTraceICCallRuntimeEntry, 2); | |
| 1354 __ lw(S5, Address(SP, 2 * kWordSize)); | |
| 1355 __ lw(T0, Address(SP, 3 * kWordSize)); | |
| 1356 __ addiu(SP, SP, Immediate(4 * kWordSize)); // Discard argument; | |
| 1357 __ LeaveStubFrame(); | |
| 1358 } | |
| 1359 __ lw(T7, FieldAddress(func_reg, Function::usage_counter_offset())); | |
| 1360 __ addiu(T7, T7, Immediate(1)); | |
| 1361 __ sw(T7, FieldAddress(func_reg, Function::usage_counter_offset())); | |
| 1362 } | |
| 1363 | |
| 1364 | |
| 1365 // Loads function into 'temp_reg'. | |
| 1366 void StubCode::GenerateUsageCounterIncrement(Assembler* assembler, | |
| 1367 Register temp_reg) { | |
| 1368 if (FLAG_optimization_counter_threshold >= 0) { | |
| 1369 __ Comment("UsageCounterIncrement"); | |
| 1370 Register ic_reg = S5; | |
| 1371 Register func_reg = temp_reg; | |
| 1372 ASSERT(temp_reg == T0); | |
| 1373 __ Comment("Increment function counter"); | |
| 1374 __ lw(func_reg, FieldAddress(ic_reg, ICData::owner_offset())); | |
| 1375 __ lw(T1, FieldAddress(func_reg, Function::usage_counter_offset())); | |
| 1376 __ addiu(T1, T1, Immediate(1)); | |
| 1377 __ sw(T1, FieldAddress(func_reg, Function::usage_counter_offset())); | |
| 1378 } | |
| 1379 } | |
| 1380 | |
| 1381 | |
| 1382 // Note: S5 must be preserved. | |
| 1383 // Attempt a quick Smi operation for known operations ('kind'). The ICData | |
| 1384 // must have been primed with a Smi/Smi check that will be used for counting | |
| 1385 // the invocations. | |
| 1386 static void EmitFastSmiOp(Assembler* assembler, | |
| 1387 Token::Kind kind, | |
| 1388 intptr_t num_args, | |
| 1389 Label* not_smi_or_overflow) { | |
| 1390 __ Comment("Fast Smi op"); | |
| 1391 ASSERT(num_args == 2); | |
| 1392 __ lw(T0, Address(SP, 0 * kWordSize)); // Left. | |
| 1393 __ lw(T1, Address(SP, 1 * kWordSize)); // Right. | |
| 1394 __ or_(CMPRES1, T0, T1); | |
| 1395 __ andi(CMPRES1, CMPRES1, Immediate(kSmiTagMask)); | |
| 1396 __ bne(CMPRES1, ZR, not_smi_or_overflow); | |
| 1397 switch (kind) { | |
| 1398 case Token::kADD: { | |
| 1399 __ AdduDetectOverflow(V0, T1, T0, CMPRES1); // Add. | |
| 1400 __ bltz(CMPRES1, not_smi_or_overflow); // Fall through on overflow. | |
| 1401 break; | |
| 1402 } | |
| 1403 case Token::kSUB: { | |
| 1404 __ SubuDetectOverflow(V0, T1, T0, CMPRES1); // Subtract. | |
| 1405 __ bltz(CMPRES1, not_smi_or_overflow); // Fall through on overflow. | |
| 1406 break; | |
| 1407 } | |
| 1408 case Token::kEQ: { | |
| 1409 Label true_label, done; | |
| 1410 __ beq(T1, T0, &true_label); | |
| 1411 __ LoadObject(V0, Bool::False()); | |
| 1412 __ b(&done); | |
| 1413 __ Bind(&true_label); | |
| 1414 __ LoadObject(V0, Bool::True()); | |
| 1415 __ Bind(&done); | |
| 1416 break; | |
| 1417 } | |
| 1418 default: | |
| 1419 UNIMPLEMENTED(); | |
| 1420 } | |
| 1421 // S5: IC data object (preserved). | |
| 1422 __ lw(T0, FieldAddress(S5, ICData::ic_data_offset())); | |
| 1423 // T0: ic_data_array with check entries: classes and target functions. | |
| 1424 __ AddImmediate(T0, Array::data_offset() - kHeapObjectTag); | |
| 1425 // T0: points directly to the first ic data array element. | |
| 1426 #if defined(DEBUG) | |
| 1427 // Check that first entry is for Smi/Smi. | |
| 1428 Label error, ok; | |
| 1429 const int32_t imm_smi_cid = reinterpret_cast<int32_t>(Smi::New(kSmiCid)); | |
| 1430 __ lw(T4, Address(T0)); | |
| 1431 __ BranchNotEqual(T4, Immediate(imm_smi_cid), &error); | |
| 1432 __ lw(T4, Address(T0, kWordSize)); | |
| 1433 __ BranchEqual(T4, Immediate(imm_smi_cid), &ok); | |
| 1434 __ Bind(&error); | |
| 1435 __ Stop("Incorrect IC data"); | |
| 1436 __ Bind(&ok); | |
| 1437 #endif | |
| 1438 if (FLAG_optimization_counter_threshold >= 0) { | |
| 1439 // Update counter, ignore overflow. | |
| 1440 const intptr_t count_offset = ICData::CountIndexFor(num_args) * kWordSize; | |
| 1441 __ lw(T4, Address(T0, count_offset)); | |
| 1442 __ AddImmediate(T4, T4, Smi::RawValue(1)); | |
| 1443 __ sw(T4, Address(T0, count_offset)); | |
| 1444 } | |
| 1445 | |
| 1446 __ Ret(); | |
| 1447 } | |
| 1448 | |
| 1449 | |
| 1450 // Generate inline cache check for 'num_args'. | |
| 1451 // RA: return address | |
| 1452 // S5: Inline cache data object. | |
| 1453 // Control flow: | |
| 1454 // - If receiver is null -> jump to IC miss. | |
| 1455 // - If receiver is Smi -> load Smi class. | |
| 1456 // - If receiver is not-Smi -> load receiver's class. | |
| 1457 // - Check if 'num_args' (including receiver) match any IC data group. | |
| 1458 // - Match found -> jump to target. | |
| 1459 // - Match not found -> jump to IC miss. | |
| 1460 void StubCode::GenerateNArgsCheckInlineCacheStub( | |
| 1461 Assembler* assembler, | |
| 1462 intptr_t num_args, | |
| 1463 const RuntimeEntry& handle_ic_miss, | |
| 1464 Token::Kind kind, | |
| 1465 bool optimized) { | |
| 1466 __ Comment("NArgsCheckInlineCacheStub"); | |
| 1467 ASSERT(num_args == 1 || num_args == 2); | |
| 1468 #if defined(DEBUG) | |
| 1469 { | |
| 1470 Label ok; | |
| 1471 // Check that the IC data array has NumArgsTested() == num_args. | |
| 1472 // 'NumArgsTested' is stored in the least significant bits of 'state_bits'. | |
| 1473 __ lw(T0, FieldAddress(S5, ICData::state_bits_offset())); | |
| 1474 ASSERT(ICData::NumArgsTestedShift() == 0); // No shift needed. | |
| 1475 __ andi(T0, T0, Immediate(ICData::NumArgsTestedMask())); | |
| 1476 __ BranchEqual(T0, Immediate(num_args), &ok); | |
| 1477 __ Stop("Incorrect stub for IC data"); | |
| 1478 __ Bind(&ok); | |
| 1479 } | |
| 1480 #endif // DEBUG | |
| 1481 | |
| 1482 | |
| 1483 Label stepping, done_stepping; | |
| 1484 if (FLAG_support_debugger && !optimized) { | |
| 1485 __ Comment("Check single stepping"); | |
| 1486 __ LoadIsolate(T0); | |
| 1487 __ lbu(T0, Address(T0, Isolate::single_step_offset())); | |
| 1488 __ BranchNotEqual(T0, Immediate(0), &stepping); | |
| 1489 __ Bind(&done_stepping); | |
| 1490 } | |
| 1491 | |
| 1492 Label not_smi_or_overflow; | |
| 1493 if (kind != Token::kILLEGAL) { | |
| 1494 EmitFastSmiOp(assembler, kind, num_args, ¬_smi_or_overflow); | |
| 1495 } | |
| 1496 __ Bind(¬_smi_or_overflow); | |
| 1497 | |
| 1498 __ Comment("Extract ICData initial values and receiver cid"); | |
| 1499 // Load argument descriptor into S4. | |
| 1500 __ lw(S4, FieldAddress(S5, ICData::arguments_descriptor_offset())); | |
| 1501 // Preserve return address, since RA is needed for subroutine call. | |
| 1502 __ mov(T2, RA); | |
| 1503 // Loop that checks if there is an IC data match. | |
| 1504 Label loop, found, miss; | |
| 1505 // S5: IC data object (preserved). | |
| 1506 __ lw(T0, FieldAddress(S5, ICData::ic_data_offset())); | |
| 1507 // T0: ic_data_array with check entries: classes and target functions. | |
| 1508 __ AddImmediate(T0, Array::data_offset() - kHeapObjectTag); | |
| 1509 // T0: points directly to the first ic data array element. | |
| 1510 | |
| 1511 // Get the receiver's class ID (first read number of arguments from | |
| 1512 // arguments descriptor array and then access the receiver from the stack). | |
| 1513 __ lw(T1, FieldAddress(S4, ArgumentsDescriptor::count_offset())); | |
| 1514 __ sll(T5, T1, 1); // T1 (argument_count - 1) is smi. | |
| 1515 __ addu(T5, T5, SP); | |
| 1516 __ lw(T3, Address(T5, -kWordSize)); | |
| 1517 __ LoadTaggedClassIdMayBeSmi(T3, T3); | |
| 1518 | |
| 1519 if (num_args == 2) { | |
| 1520 __ lw(T5, Address(T5, -2 * kWordSize)); | |
| 1521 __ LoadTaggedClassIdMayBeSmi(T5, T5); | |
| 1522 } | |
| 1523 | |
| 1524 const intptr_t entry_size = ICData::TestEntryLengthFor(num_args) * kWordSize; | |
| 1525 // T1: argument_count (smi). | |
| 1526 // T3: receiver's class ID (smi). | |
| 1527 // T5: first argument's class ID (smi). | |
| 1528 | |
| 1529 // We unroll the generic one that is generated once more than the others. | |
| 1530 const bool optimize = kind == Token::kILLEGAL; | |
| 1531 | |
| 1532 __ Comment("ICData loop"); | |
| 1533 __ Bind(&loop); | |
| 1534 for (int unroll = optimize ? 4 : 2; unroll >= 0; unroll--) { | |
| 1535 __ lw(T4, Address(T0, 0)); | |
| 1536 if (num_args == 1) { | |
| 1537 __ beq(T3, T4, &found); // IC hit. | |
| 1538 } else { | |
| 1539 ASSERT(num_args == 2); | |
| 1540 Label update; | |
| 1541 __ bne(T3, T4, &update); // Continue. | |
| 1542 __ lw(T4, Address(T0, kWordSize)); | |
| 1543 __ beq(T5, T4, &found); // IC hit. | |
| 1544 __ Bind(&update); | |
| 1545 } | |
| 1546 | |
| 1547 __ AddImmediate(T0, entry_size); // Next entry. | |
| 1548 if (unroll == 0) { | |
| 1549 __ BranchNotEqual(T4, Immediate(Smi::RawValue(kIllegalCid)), | |
| 1550 &loop); // Done? | |
| 1551 } else { | |
| 1552 __ BranchEqual(T4, Immediate(Smi::RawValue(kIllegalCid)), | |
| 1553 &miss); // Done? | |
| 1554 } | |
| 1555 } | |
| 1556 | |
| 1557 __ Bind(&miss); | |
| 1558 __ Comment("IC miss"); | |
| 1559 // Restore return address. | |
| 1560 __ mov(RA, T2); | |
| 1561 | |
| 1562 // Compute address of arguments (first read number of arguments from | |
| 1563 // arguments descriptor array and then compute address on the stack). | |
| 1564 // T1: argument_count (smi). | |
| 1565 __ addiu(T1, T1, Immediate(Smi::RawValue(-1))); | |
| 1566 __ sll(T1, T1, 1); // T1 is Smi. | |
| 1567 __ addu(T1, SP, T1); | |
| 1568 // T1: address of receiver. | |
| 1569 // Create a stub frame as we are pushing some objects on the stack before | |
| 1570 // calling into the runtime. | |
| 1571 __ EnterStubFrame(); | |
| 1572 // Preserve IC data object and arguments descriptor array and | |
| 1573 // setup space on stack for result (target code object). | |
| 1574 int num_slots = num_args + 4; | |
| 1575 __ addiu(SP, SP, Immediate(-num_slots * kWordSize)); | |
| 1576 __ sw(S5, Address(SP, (num_slots - 1) * kWordSize)); | |
| 1577 __ sw(S4, Address(SP, (num_slots - 2) * kWordSize)); | |
| 1578 __ sw(ZR, Address(SP, (num_slots - 3) * kWordSize)); | |
| 1579 // Push call arguments. | |
| 1580 for (intptr_t i = 0; i < num_args; i++) { | |
| 1581 __ lw(TMP, Address(T1, -i * kWordSize)); | |
| 1582 __ sw(TMP, Address(SP, (num_slots - i - 4) * kWordSize)); | |
| 1583 } | |
| 1584 // Pass IC data object. | |
| 1585 __ sw(S5, Address(SP, (num_slots - num_args - 4) * kWordSize)); | |
| 1586 __ CallRuntime(handle_ic_miss, num_args + 1); | |
| 1587 __ Comment("NArgsCheckInlineCacheStub return"); | |
| 1588 // Pop returned function object into T3. | |
| 1589 // Restore arguments descriptor array and IC data array. | |
| 1590 __ lw(T3, Address(SP, (num_slots - 3) * kWordSize)); | |
| 1591 __ lw(S4, Address(SP, (num_slots - 2) * kWordSize)); | |
| 1592 __ lw(S5, Address(SP, (num_slots - 1) * kWordSize)); | |
| 1593 // Remove the call arguments pushed earlier, including the IC data object | |
| 1594 // and the arguments descriptor array. | |
| 1595 __ addiu(SP, SP, Immediate(num_slots * kWordSize)); | |
| 1596 __ RestoreCodePointer(); | |
| 1597 __ LeaveStubFrame(); | |
| 1598 | |
| 1599 Label call_target_function; | |
| 1600 if (!FLAG_lazy_dispatchers) { | |
| 1601 __ mov(T0, T3); | |
| 1602 GenerateDispatcherCode(assembler, &call_target_function); | |
| 1603 } else { | |
| 1604 __ b(&call_target_function); | |
| 1605 } | |
| 1606 | |
| 1607 __ Bind(&found); | |
| 1608 __ mov(RA, T2); // Restore return address if found. | |
| 1609 __ Comment("Update caller's counter"); | |
| 1610 // T0: Pointer to an IC data check group. | |
| 1611 const intptr_t target_offset = ICData::TargetIndexFor(num_args) * kWordSize; | |
| 1612 const intptr_t count_offset = ICData::CountIndexFor(num_args) * kWordSize; | |
| 1613 __ lw(T3, Address(T0, target_offset)); | |
| 1614 | |
| 1615 if (FLAG_optimization_counter_threshold >= 0) { | |
| 1616 // Update counter, ignore overflow. | |
| 1617 __ lw(T4, Address(T0, count_offset)); | |
| 1618 __ AddImmediate(T4, T4, Smi::RawValue(1)); | |
| 1619 __ sw(T4, Address(T0, count_offset)); | |
| 1620 } | |
| 1621 | |
| 1622 __ Comment("Call target"); | |
| 1623 __ Bind(&call_target_function); | |
| 1624 // T0 <- T3: Target function. | |
| 1625 __ mov(T0, T3); | |
| 1626 Label is_compiled; | |
| 1627 __ lw(T4, FieldAddress(T0, Function::entry_point_offset())); | |
| 1628 __ lw(CODE_REG, FieldAddress(T0, Function::code_offset())); | |
| 1629 __ jr(T4); | |
| 1630 | |
| 1631 // Call single step callback in debugger. | |
| 1632 if (FLAG_support_debugger && !optimized) { | |
| 1633 __ Bind(&stepping); | |
| 1634 __ EnterStubFrame(); | |
| 1635 __ addiu(SP, SP, Immediate(-2 * kWordSize)); | |
| 1636 __ sw(S5, Address(SP, 1 * kWordSize)); // Preserve IC data. | |
| 1637 __ sw(RA, Address(SP, 0 * kWordSize)); // Return address. | |
| 1638 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0); | |
| 1639 __ lw(RA, Address(SP, 0 * kWordSize)); | |
| 1640 __ lw(S5, Address(SP, 1 * kWordSize)); | |
| 1641 __ addiu(SP, SP, Immediate(2 * kWordSize)); | |
| 1642 __ RestoreCodePointer(); | |
| 1643 __ LeaveStubFrame(); | |
| 1644 __ b(&done_stepping); | |
| 1645 } | |
| 1646 } | |
| 1647 | |
| 1648 | |
| 1649 // Use inline cache data array to invoke the target or continue in inline | |
| 1650 // cache miss handler. Stub for 1-argument check (receiver class). | |
| 1651 // RA: Return address. | |
| 1652 // S5: Inline cache data object. | |
| 1653 // Inline cache data object structure: | |
| 1654 // 0: function-name | |
| 1655 // 1: N, number of arguments checked. | |
| 1656 // 2 .. (length - 1): group of checks, each check containing: | |
| 1657 // - N classes. | |
| 1658 // - 1 target function. | |
| 1659 void StubCode::GenerateOneArgCheckInlineCacheStub(Assembler* assembler) { | |
| 1660 GenerateUsageCounterIncrement(assembler, T0); | |
| 1661 GenerateNArgsCheckInlineCacheStub( | |
| 1662 assembler, 1, kInlineCacheMissHandlerOneArgRuntimeEntry, Token::kILLEGAL); | |
| 1663 } | |
| 1664 | |
| 1665 | |
| 1666 void StubCode::GenerateTwoArgsCheckInlineCacheStub(Assembler* assembler) { | |
| 1667 GenerateUsageCounterIncrement(assembler, T0); | |
| 1668 GenerateNArgsCheckInlineCacheStub(assembler, 2, | |
| 1669 kInlineCacheMissHandlerTwoArgsRuntimeEntry, | |
| 1670 Token::kILLEGAL); | |
| 1671 } | |
| 1672 | |
| 1673 | |
| 1674 void StubCode::GenerateSmiAddInlineCacheStub(Assembler* assembler) { | |
| 1675 GenerateUsageCounterIncrement(assembler, T0); | |
| 1676 GenerateNArgsCheckInlineCacheStub( | |
| 1677 assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kADD); | |
| 1678 } | |
| 1679 | |
| 1680 | |
| 1681 void StubCode::GenerateSmiSubInlineCacheStub(Assembler* assembler) { | |
| 1682 GenerateUsageCounterIncrement(assembler, T0); | |
| 1683 GenerateNArgsCheckInlineCacheStub( | |
| 1684 assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kSUB); | |
| 1685 } | |
| 1686 | |
| 1687 | |
| 1688 void StubCode::GenerateSmiEqualInlineCacheStub(Assembler* assembler) { | |
| 1689 GenerateUsageCounterIncrement(assembler, T0); | |
| 1690 GenerateNArgsCheckInlineCacheStub( | |
| 1691 assembler, 2, kInlineCacheMissHandlerTwoArgsRuntimeEntry, Token::kEQ); | |
| 1692 } | |
| 1693 | |
| 1694 | |
| 1695 void StubCode::GenerateOneArgOptimizedCheckInlineCacheStub( | |
| 1696 Assembler* assembler) { | |
| 1697 GenerateOptimizedUsageCounterIncrement(assembler); | |
| 1698 GenerateNArgsCheckInlineCacheStub(assembler, 1, | |
| 1699 kInlineCacheMissHandlerOneArgRuntimeEntry, | |
| 1700 Token::kILLEGAL, true /* optimized */); | |
| 1701 } | |
| 1702 | |
| 1703 | |
| 1704 void StubCode::GenerateTwoArgsOptimizedCheckInlineCacheStub( | |
| 1705 Assembler* assembler) { | |
| 1706 GenerateOptimizedUsageCounterIncrement(assembler); | |
| 1707 GenerateNArgsCheckInlineCacheStub(assembler, 2, | |
| 1708 kInlineCacheMissHandlerTwoArgsRuntimeEntry, | |
| 1709 Token::kILLEGAL, true /* optimized */); | |
| 1710 } | |
| 1711 | |
| 1712 | |
| 1713 // Intermediary stub between a static call and its target. ICData contains | |
| 1714 // the target function and the call count. | |
| 1715 // S5: ICData | |
| 1716 void StubCode::GenerateZeroArgsUnoptimizedStaticCallStub(Assembler* assembler) { | |
| 1717 GenerateUsageCounterIncrement(assembler, T0); | |
| 1718 __ Comment("UnoptimizedStaticCallStub"); | |
| 1719 #if defined(DEBUG) | |
| 1720 { | |
| 1721 Label ok; | |
| 1722 // Check that the IC data array has NumArgsTested() == 0. | |
| 1723 // 'NumArgsTested' is stored in the least significant bits of 'state_bits'. | |
| 1724 __ lw(T0, FieldAddress(S5, ICData::state_bits_offset())); | |
| 1725 ASSERT(ICData::NumArgsTestedShift() == 0); // No shift needed. | |
| 1726 __ andi(T0, T0, Immediate(ICData::NumArgsTestedMask())); | |
| 1727 __ beq(T0, ZR, &ok); | |
| 1728 __ Stop("Incorrect IC data for unoptimized static call"); | |
| 1729 __ Bind(&ok); | |
| 1730 } | |
| 1731 #endif // DEBUG | |
| 1732 | |
| 1733 // Check single stepping. | |
| 1734 Label stepping, done_stepping; | |
| 1735 if (FLAG_support_debugger) { | |
| 1736 __ LoadIsolate(T0); | |
| 1737 __ lbu(T0, Address(T0, Isolate::single_step_offset())); | |
| 1738 __ BranchNotEqual(T0, Immediate(0), &stepping); | |
| 1739 __ Bind(&done_stepping); | |
| 1740 } | |
| 1741 | |
| 1742 // S5: IC data object (preserved). | |
| 1743 __ lw(T0, FieldAddress(S5, ICData::ic_data_offset())); | |
| 1744 // T0: ic_data_array with entries: target functions and count. | |
| 1745 __ AddImmediate(T0, Array::data_offset() - kHeapObjectTag); | |
| 1746 // T0: points directly to the first ic data array element. | |
| 1747 const intptr_t target_offset = ICData::TargetIndexFor(0) * kWordSize; | |
| 1748 const intptr_t count_offset = ICData::CountIndexFor(0) * kWordSize; | |
| 1749 | |
| 1750 if (FLAG_optimization_counter_threshold >= 0) { | |
| 1751 // Increment count for this call, ignore overflow. | |
| 1752 __ lw(T4, Address(T0, count_offset)); | |
| 1753 __ AddImmediate(T4, T4, Smi::RawValue(1)); | |
| 1754 __ sw(T4, Address(T0, count_offset)); | |
| 1755 } | |
| 1756 | |
| 1757 // Load arguments descriptor into S4. | |
| 1758 __ lw(S4, FieldAddress(S5, ICData::arguments_descriptor_offset())); | |
| 1759 | |
| 1760 // Get function and call it, if possible. | |
| 1761 __ lw(T0, Address(T0, target_offset)); | |
| 1762 __ lw(CODE_REG, FieldAddress(T0, Function::code_offset())); | |
| 1763 __ lw(T4, FieldAddress(T0, Function::entry_point_offset())); | |
| 1764 __ jr(T4); | |
| 1765 | |
| 1766 // Call single step callback in debugger. | |
| 1767 if (FLAG_support_debugger) { | |
| 1768 __ Bind(&stepping); | |
| 1769 __ EnterStubFrame(); | |
| 1770 __ addiu(SP, SP, Immediate(-2 * kWordSize)); | |
| 1771 __ sw(S5, Address(SP, 1 * kWordSize)); // Preserve IC data. | |
| 1772 __ sw(RA, Address(SP, 0 * kWordSize)); // Return address. | |
| 1773 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0); | |
| 1774 __ lw(RA, Address(SP, 0 * kWordSize)); | |
| 1775 __ lw(S5, Address(SP, 1 * kWordSize)); | |
| 1776 __ addiu(SP, SP, Immediate(2 * kWordSize)); | |
| 1777 __ RestoreCodePointer(); | |
| 1778 __ LeaveStubFrame(); | |
| 1779 __ b(&done_stepping); | |
| 1780 } | |
| 1781 } | |
| 1782 | |
| 1783 | |
| 1784 void StubCode::GenerateOneArgUnoptimizedStaticCallStub(Assembler* assembler) { | |
| 1785 GenerateUsageCounterIncrement(assembler, T0); | |
| 1786 GenerateNArgsCheckInlineCacheStub( | |
| 1787 assembler, 1, kStaticCallMissHandlerOneArgRuntimeEntry, Token::kILLEGAL); | |
| 1788 } | |
| 1789 | |
| 1790 | |
| 1791 void StubCode::GenerateTwoArgsUnoptimizedStaticCallStub(Assembler* assembler) { | |
| 1792 GenerateUsageCounterIncrement(assembler, T0); | |
| 1793 GenerateNArgsCheckInlineCacheStub( | |
| 1794 assembler, 2, kStaticCallMissHandlerTwoArgsRuntimeEntry, Token::kILLEGAL); | |
| 1795 } | |
| 1796 | |
| 1797 | |
| 1798 // Stub for compiling a function and jumping to the compiled code. | |
| 1799 // S5: IC-Data (for methods). | |
| 1800 // S4: Arguments descriptor. | |
| 1801 // T0: Function. | |
| 1802 void StubCode::GenerateLazyCompileStub(Assembler* assembler) { | |
| 1803 __ EnterStubFrame(); | |
| 1804 __ addiu(SP, SP, Immediate(-3 * kWordSize)); | |
| 1805 __ sw(S5, Address(SP, 2 * kWordSize)); // Preserve IC data object. | |
| 1806 __ sw(S4, Address(SP, 1 * kWordSize)); // Preserve args descriptor array. | |
| 1807 __ sw(T0, Address(SP, 0 * kWordSize)); // Pass function. | |
| 1808 __ CallRuntime(kCompileFunctionRuntimeEntry, 1); | |
| 1809 __ lw(T0, Address(SP, 0 * kWordSize)); // Restore function. | |
| 1810 __ lw(S4, Address(SP, 1 * kWordSize)); // Restore args descriptor array. | |
| 1811 __ lw(S5, Address(SP, 2 * kWordSize)); // Restore IC data array. | |
| 1812 __ addiu(SP, SP, Immediate(3 * kWordSize)); | |
| 1813 __ LeaveStubFrame(); | |
| 1814 | |
| 1815 __ lw(CODE_REG, FieldAddress(T0, Function::code_offset())); | |
| 1816 __ lw(T2, FieldAddress(T0, Function::entry_point_offset())); | |
| 1817 __ jr(T2); | |
| 1818 } | |
| 1819 | |
| 1820 | |
| 1821 // S5: Contains an ICData. | |
| 1822 void StubCode::GenerateICCallBreakpointStub(Assembler* assembler) { | |
| 1823 __ Comment("ICCallBreakpoint stub"); | |
| 1824 __ EnterStubFrame(); | |
| 1825 __ addiu(SP, SP, Immediate(-2 * kWordSize)); | |
| 1826 __ sw(S5, Address(SP, 1 * kWordSize)); | |
| 1827 __ sw(ZR, Address(SP, 0 * kWordSize)); | |
| 1828 | |
| 1829 __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0); | |
| 1830 | |
| 1831 __ lw(S5, Address(SP, 1 * kWordSize)); | |
| 1832 __ lw(CODE_REG, Address(SP, 0 * kWordSize)); | |
| 1833 __ addiu(SP, SP, Immediate(2 * kWordSize)); | |
| 1834 __ LeaveStubFrame(); | |
| 1835 __ lw(T0, FieldAddress(CODE_REG, Code::entry_point_offset())); | |
| 1836 __ jr(T0); | |
| 1837 } | |
| 1838 | |
| 1839 | |
| 1840 void StubCode::GenerateRuntimeCallBreakpointStub(Assembler* assembler) { | |
| 1841 __ Comment("RuntimeCallBreakpoint stub"); | |
| 1842 __ EnterStubFrame(); | |
| 1843 __ addiu(SP, SP, Immediate(-1 * kWordSize)); | |
| 1844 __ sw(ZR, Address(SP, 0 * kWordSize)); | |
| 1845 | |
| 1846 __ CallRuntime(kBreakpointRuntimeHandlerRuntimeEntry, 0); | |
| 1847 | |
| 1848 __ lw(CODE_REG, Address(SP, 0 * kWordSize)); | |
| 1849 __ addiu(SP, SP, Immediate(3 * kWordSize)); | |
| 1850 __ LeaveStubFrame(); | |
| 1851 __ lw(T0, FieldAddress(CODE_REG, Code::entry_point_offset())); | |
| 1852 __ jr(T0); | |
| 1853 } | |
| 1854 | |
| 1855 | |
| 1856 // Called only from unoptimized code. All relevant registers have been saved. | |
| 1857 // RA: return address. | |
| 1858 void StubCode::GenerateDebugStepCheckStub(Assembler* assembler) { | |
| 1859 // Check single stepping. | |
| 1860 Label stepping, done_stepping; | |
| 1861 __ LoadIsolate(T0); | |
| 1862 __ lbu(T0, Address(T0, Isolate::single_step_offset())); | |
| 1863 __ BranchNotEqual(T0, Immediate(0), &stepping); | |
| 1864 __ Bind(&done_stepping); | |
| 1865 | |
| 1866 __ Ret(); | |
| 1867 | |
| 1868 // Call single step callback in debugger. | |
| 1869 __ Bind(&stepping); | |
| 1870 __ EnterStubFrame(); | |
| 1871 __ addiu(SP, SP, Immediate(-1 * kWordSize)); | |
| 1872 __ sw(RA, Address(SP, 0 * kWordSize)); // Return address. | |
| 1873 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0); | |
| 1874 __ lw(RA, Address(SP, 0 * kWordSize)); | |
| 1875 __ addiu(SP, SP, Immediate(1 * kWordSize)); | |
| 1876 __ LeaveStubFrame(); | |
| 1877 __ b(&done_stepping); | |
| 1878 } | |
| 1879 | |
| 1880 | |
| 1881 // Used to check class and type arguments. Arguments passed in registers: | |
| 1882 // RA: return address. | |
| 1883 // A0: instance (must be preserved). | |
| 1884 // A1: instantiator type arguments (only if n == 4, can be raw_null). | |
| 1885 // A2: function type arguments (only if n == 4, can be raw_null). | |
| 1886 // A3: SubtypeTestCache. | |
| 1887 // Result in V0: null -> not found, otherwise result (true or false). | |
| 1888 static void GenerateSubtypeNTestCacheStub(Assembler* assembler, int n) { | |
| 1889 __ Comment("SubtypeNTestCacheStub"); | |
| 1890 ASSERT((n == 1) || (n == 2) || (n == 4)); | |
| 1891 if (n > 1) { | |
| 1892 __ LoadClass(T0, A0); | |
| 1893 // Compute instance type arguments into T1. | |
| 1894 Label has_no_type_arguments; | |
| 1895 __ LoadObject(T1, Object::null_object()); | |
| 1896 __ lw(T2, FieldAddress( | |
| 1897 T0, Class::type_arguments_field_offset_in_words_offset())); | |
| 1898 __ BranchEqual(T2, Immediate(Class::kNoTypeArguments), | |
| 1899 &has_no_type_arguments); | |
| 1900 __ sll(T2, T2, 2); | |
| 1901 __ addu(T2, A0, T2); // T2 <- A0 + T2 * 4 | |
| 1902 __ lw(T1, FieldAddress(T2, 0)); | |
| 1903 __ Bind(&has_no_type_arguments); | |
| 1904 } | |
| 1905 __ LoadClassId(T0, A0); | |
| 1906 // A0: instance. | |
| 1907 // A1: instantiator type arguments (only if n == 4, can be raw_null). | |
| 1908 // A2: function type arguments (only if n == 4, can be raw_null). | |
| 1909 // A3: SubtypeTestCache. | |
| 1910 // T0: instance class id. | |
| 1911 // T1: instance type arguments (null if none), used only if n > 1. | |
| 1912 __ lw(T2, FieldAddress(A3, SubtypeTestCache::cache_offset())); | |
| 1913 __ AddImmediate(T2, Array::data_offset() - kHeapObjectTag); | |
| 1914 | |
| 1915 __ LoadObject(T7, Object::null_object()); | |
| 1916 Label loop, found, not_found, next_iteration; | |
| 1917 // T0: instance class id. | |
| 1918 // T1: instance type arguments (still null if closure). | |
| 1919 // T2: Entry start. | |
| 1920 // T7: null. | |
| 1921 __ SmiTag(T0); | |
| 1922 __ BranchNotEqual(T0, Immediate(Smi::RawValue(kClosureCid)), &loop); | |
| 1923 __ lw(T1, FieldAddress(A0, Closure::function_type_arguments_offset())); | |
| 1924 __ bne(T1, T7, ¬_found); // Cache cannot be used for generic closures. | |
| 1925 __ lw(T1, FieldAddress(A0, Closure::instantiator_type_arguments_offset())); | |
| 1926 __ lw(T0, FieldAddress(A0, Closure::function_offset())); | |
| 1927 // T0: instance class id as Smi or function. | |
| 1928 __ Bind(&loop); | |
| 1929 __ lw(T3, | |
| 1930 Address(T2, kWordSize * SubtypeTestCache::kInstanceClassIdOrFunction)); | |
| 1931 __ beq(T3, T7, ¬_found); | |
| 1932 if (n == 1) { | |
| 1933 __ beq(T3, T0, &found); | |
| 1934 } else { | |
| 1935 __ bne(T3, T0, &next_iteration); | |
| 1936 __ lw(T3, | |
| 1937 Address(T2, kWordSize * SubtypeTestCache::kInstanceTypeArguments)); | |
| 1938 if (n == 2) { | |
| 1939 __ beq(T3, T1, &found); | |
| 1940 } else { | |
| 1941 __ bne(T3, T1, &next_iteration); | |
| 1942 __ lw(T3, Address(T2, kWordSize * | |
| 1943 SubtypeTestCache::kInstantiatorTypeArguments)); | |
| 1944 __ bne(T3, A1, &next_iteration); | |
| 1945 __ lw(T3, | |
| 1946 Address(T2, kWordSize * SubtypeTestCache::kFunctionTypeArguments)); | |
| 1947 __ beq(T3, A2, &found); | |
| 1948 } | |
| 1949 } | |
| 1950 __ Bind(&next_iteration); | |
| 1951 __ b(&loop); | |
| 1952 __ delay_slot()->addiu( | |
| 1953 T2, T2, Immediate(kWordSize * SubtypeTestCache::kTestEntryLength)); | |
| 1954 // Fall through to not found. | |
| 1955 __ Bind(¬_found); | |
| 1956 __ Ret(); | |
| 1957 __ delay_slot()->mov(V0, T7); | |
| 1958 | |
| 1959 __ Bind(&found); | |
| 1960 __ Ret(); | |
| 1961 __ delay_slot()->lw(V0, | |
| 1962 Address(T2, kWordSize * SubtypeTestCache::kTestResult)); | |
| 1963 } | |
| 1964 | |
| 1965 | |
| 1966 // Used to check class and type arguments. Arguments passed in registers: | |
| 1967 // RA: return address. | |
| 1968 // A0: instance (must be preserved). | |
| 1969 // A1: unused. | |
| 1970 // A2: unused. | |
| 1971 // A3: SubtypeTestCache. | |
| 1972 // Result in V0: null -> not found, otherwise result (true or false). | |
| 1973 void StubCode::GenerateSubtype1TestCacheStub(Assembler* assembler) { | |
| 1974 GenerateSubtypeNTestCacheStub(assembler, 1); | |
| 1975 } | |
| 1976 | |
| 1977 | |
| 1978 // Used to check class and type arguments. Arguments passed in registers: | |
| 1979 // RA: return address. | |
| 1980 // A0: instance (must be preserved). | |
| 1981 // A1: unused. | |
| 1982 // A2: unused. | |
| 1983 // A3: SubtypeTestCache. | |
| 1984 // Result in V0: null -> not found, otherwise result (true or false). | |
| 1985 void StubCode::GenerateSubtype2TestCacheStub(Assembler* assembler) { | |
| 1986 GenerateSubtypeNTestCacheStub(assembler, 2); | |
| 1987 } | |
| 1988 | |
| 1989 | |
| 1990 // Used to check class and type arguments. Arguments passed in registers: | |
| 1991 // RA: return address. | |
| 1992 // A0: instance (must be preserved). | |
| 1993 // A1: instantiator type arguments (can be raw_null). | |
| 1994 // A2: function type arguments (can be raw_null). | |
| 1995 // A3: SubtypeTestCache. | |
| 1996 // Result in V0: null -> not found, otherwise result (true or false). | |
| 1997 void StubCode::GenerateSubtype4TestCacheStub(Assembler* assembler) { | |
| 1998 GenerateSubtypeNTestCacheStub(assembler, 4); | |
| 1999 } | |
| 2000 | |
| 2001 | |
| 2002 // Return the current stack pointer address, used to stack alignment | |
| 2003 // checks. | |
| 2004 void StubCode::GenerateGetCStackPointerStub(Assembler* assembler) { | |
| 2005 __ Ret(); | |
| 2006 __ delay_slot()->mov(V0, SP); | |
| 2007 } | |
| 2008 | |
| 2009 | |
| 2010 // Jump to the exception or error handler. | |
| 2011 // RA: return address. | |
| 2012 // A0: program_counter. | |
| 2013 // A1: stack_pointer. | |
| 2014 // A2: frame_pointer. | |
| 2015 // A3: thread. | |
| 2016 // Does not return. | |
| 2017 void StubCode::GenerateJumpToFrameStub(Assembler* assembler) { | |
| 2018 ASSERT(kExceptionObjectReg == V0); | |
| 2019 ASSERT(kStackTraceObjectReg == V1); | |
| 2020 __ mov(FP, A2); // Frame_pointer. | |
| 2021 __ mov(THR, A3); // Thread. | |
| 2022 // Set tag. | |
| 2023 __ LoadImmediate(A2, VMTag::kDartTagId); | |
| 2024 __ sw(A2, Assembler::VMTagAddress()); | |
| 2025 // Clear top exit frame. | |
| 2026 __ sw(ZR, Address(THR, Thread::top_exit_frame_info_offset())); | |
| 2027 // Restore pool pointer. | |
| 2028 __ RestoreCodePointer(); | |
| 2029 __ LoadPoolPointer(); | |
| 2030 __ jr(A0); // Jump to the program counter. | |
| 2031 __ delay_slot()->mov(SP, A1); // Stack pointer. | |
| 2032 } | |
| 2033 | |
| 2034 | |
| 2035 // Run an exception handler. Execution comes from JumpToFrame | |
| 2036 // stub or from the simulator. | |
| 2037 // | |
| 2038 // The arguments are stored in the Thread object. | |
| 2039 // Does not return. | |
| 2040 void StubCode::GenerateRunExceptionHandlerStub(Assembler* assembler) { | |
| 2041 __ lw(A0, Address(THR, Thread::resume_pc_offset())); | |
| 2042 __ LoadImmediate(A2, 0); | |
| 2043 | |
| 2044 // Load the exception from the current thread. | |
| 2045 Address exception_addr(THR, Thread::active_exception_offset()); | |
| 2046 __ lw(V0, exception_addr); | |
| 2047 __ sw(A2, exception_addr); | |
| 2048 | |
| 2049 // Load the stacktrace from the current thread. | |
| 2050 Address stacktrace_addr(THR, Thread::active_stacktrace_offset()); | |
| 2051 __ lw(V1, stacktrace_addr); | |
| 2052 | |
| 2053 __ jr(A0); // Jump to continuation point. | |
| 2054 __ delay_slot()->sw(A2, stacktrace_addr); | |
| 2055 } | |
| 2056 | |
| 2057 | |
| 2058 // Deoptimize a frame on the call stack before rewinding. | |
| 2059 // The arguments are stored in the Thread object. | |
| 2060 // No result. | |
| 2061 void StubCode::GenerateDeoptForRewindStub(Assembler* assembler) { | |
| 2062 // Push zap value instead of CODE_REG. | |
| 2063 __ LoadImmediate(TMP, kZapCodeReg); | |
| 2064 __ Push(TMP); | |
| 2065 | |
| 2066 // Load the deopt pc into RA. | |
| 2067 __ lw(RA, Address(THR, Thread::resume_pc_offset())); | |
| 2068 GenerateDeoptimizationSequence(assembler, kEagerDeopt); | |
| 2069 | |
| 2070 // After we have deoptimized, jump to the correct frame. | |
| 2071 __ EnterStubFrame(); | |
| 2072 __ CallRuntime(kRewindPostDeoptRuntimeEntry, 0); | |
| 2073 __ LeaveStubFrame(); | |
| 2074 __ break_(0); | |
| 2075 } | |
| 2076 | |
| 2077 | |
| 2078 // Calls to the runtime to optimize the given function. | |
| 2079 // T0: function to be reoptimized. | |
| 2080 // S4: argument descriptor (preserved). | |
| 2081 void StubCode::GenerateOptimizeFunctionStub(Assembler* assembler) { | |
| 2082 __ Comment("OptimizeFunctionStub"); | |
| 2083 __ EnterStubFrame(); | |
| 2084 __ addiu(SP, SP, Immediate(-3 * kWordSize)); | |
| 2085 __ sw(S4, Address(SP, 2 * kWordSize)); | |
| 2086 // Setup space on stack for return value. | |
| 2087 __ sw(ZR, Address(SP, 1 * kWordSize)); | |
| 2088 __ sw(T0, Address(SP, 0 * kWordSize)); | |
| 2089 __ CallRuntime(kOptimizeInvokedFunctionRuntimeEntry, 1); | |
| 2090 __ Comment("OptimizeFunctionStub return"); | |
| 2091 __ lw(T0, Address(SP, 1 * kWordSize)); // Get Function object | |
| 2092 __ lw(S4, Address(SP, 2 * kWordSize)); // Restore argument descriptor. | |
| 2093 __ addiu(SP, SP, Immediate(3 * kWordSize)); // Discard argument. | |
| 2094 | |
| 2095 __ lw(CODE_REG, FieldAddress(T0, Function::code_offset())); | |
| 2096 __ lw(T1, FieldAddress(T0, Function::entry_point_offset())); | |
| 2097 __ LeaveStubFrameAndReturn(T1); | |
| 2098 __ break_(0); | |
| 2099 } | |
| 2100 | |
| 2101 | |
| 2102 // Does identical check (object references are equal or not equal) with special | |
| 2103 // checks for boxed numbers. | |
| 2104 // Returns: CMPRES1 is zero if equal, non-zero otherwise. | |
| 2105 // Note: A Mint cannot contain a value that would fit in Smi, a Bigint | |
| 2106 // cannot contain a value that fits in Mint or Smi. | |
| 2107 static void GenerateIdenticalWithNumberCheckStub(Assembler* assembler, | |
| 2108 const Register left, | |
| 2109 const Register right, | |
| 2110 const Register temp1, | |
| 2111 const Register temp2) { | |
| 2112 __ Comment("IdenticalWithNumberCheckStub"); | |
| 2113 Label reference_compare, done, check_mint, check_bigint; | |
| 2114 // If any of the arguments is Smi do reference compare. | |
| 2115 __ andi(temp1, left, Immediate(kSmiTagMask)); | |
| 2116 __ beq(temp1, ZR, &reference_compare); | |
| 2117 __ andi(temp1, right, Immediate(kSmiTagMask)); | |
| 2118 __ beq(temp1, ZR, &reference_compare); | |
| 2119 | |
| 2120 // Value compare for two doubles. | |
| 2121 __ LoadImmediate(temp1, kDoubleCid); | |
| 2122 __ LoadClassId(temp2, left); | |
| 2123 __ bne(temp1, temp2, &check_mint); | |
| 2124 __ LoadClassId(temp2, right); | |
| 2125 __ subu(CMPRES1, temp1, temp2); | |
| 2126 __ bne(CMPRES1, ZR, &done); | |
| 2127 | |
| 2128 // Double values bitwise compare. | |
| 2129 __ lw(temp1, FieldAddress(left, Double::value_offset() + 0 * kWordSize)); | |
| 2130 __ lw(temp2, FieldAddress(right, Double::value_offset() + 0 * kWordSize)); | |
| 2131 __ subu(CMPRES1, temp1, temp2); | |
| 2132 __ bne(CMPRES1, ZR, &done); | |
| 2133 __ lw(temp1, FieldAddress(left, Double::value_offset() + 1 * kWordSize)); | |
| 2134 __ lw(temp2, FieldAddress(right, Double::value_offset() + 1 * kWordSize)); | |
| 2135 __ b(&done); | |
| 2136 __ delay_slot()->subu(CMPRES1, temp1, temp2); | |
| 2137 | |
| 2138 __ Bind(&check_mint); | |
| 2139 __ LoadImmediate(temp1, kMintCid); | |
| 2140 __ LoadClassId(temp2, left); | |
| 2141 __ bne(temp1, temp2, &check_bigint); | |
| 2142 __ LoadClassId(temp2, right); | |
| 2143 __ subu(CMPRES1, temp1, temp2); | |
| 2144 __ bne(CMPRES1, ZR, &done); | |
| 2145 | |
| 2146 __ lw(temp1, FieldAddress(left, Mint::value_offset() + 0 * kWordSize)); | |
| 2147 __ lw(temp2, FieldAddress(right, Mint::value_offset() + 0 * kWordSize)); | |
| 2148 __ subu(CMPRES1, temp1, temp2); | |
| 2149 __ bne(CMPRES1, ZR, &done); | |
| 2150 __ lw(temp1, FieldAddress(left, Mint::value_offset() + 1 * kWordSize)); | |
| 2151 __ lw(temp2, FieldAddress(right, Mint::value_offset() + 1 * kWordSize)); | |
| 2152 __ b(&done); | |
| 2153 __ delay_slot()->subu(CMPRES1, temp1, temp2); | |
| 2154 | |
| 2155 __ Bind(&check_bigint); | |
| 2156 __ LoadImmediate(temp1, kBigintCid); | |
| 2157 __ LoadClassId(temp2, left); | |
| 2158 __ bne(temp1, temp2, &reference_compare); | |
| 2159 __ LoadClassId(temp2, right); | |
| 2160 __ subu(CMPRES1, temp1, temp2); | |
| 2161 __ bne(CMPRES1, ZR, &done); | |
| 2162 | |
| 2163 __ EnterStubFrame(); | |
| 2164 __ ReserveAlignedFrameSpace(2 * kWordSize); | |
| 2165 __ sw(left, Address(SP, 1 * kWordSize)); | |
| 2166 __ sw(right, Address(SP, 0 * kWordSize)); | |
| 2167 __ mov(A0, left); | |
| 2168 __ mov(A1, right); | |
| 2169 __ CallRuntime(kBigintCompareRuntimeEntry, 2); | |
| 2170 __ Comment("IdenticalWithNumberCheckStub return"); | |
| 2171 // Result in V0, 0 means equal. | |
| 2172 __ LeaveStubFrame(); | |
| 2173 __ b(&done); | |
| 2174 __ delay_slot()->mov(CMPRES1, V0); | |
| 2175 | |
| 2176 __ Bind(&reference_compare); | |
| 2177 __ subu(CMPRES1, left, right); | |
| 2178 __ Bind(&done); | |
| 2179 // A branch or test after this comparison will check CMPRES1 == ZR. | |
| 2180 } | |
| 2181 | |
| 2182 | |
| 2183 // Called only from unoptimized code. All relevant registers have been saved. | |
| 2184 // RA: return address. | |
| 2185 // SP + 4: left operand. | |
| 2186 // SP + 0: right operand. | |
| 2187 // Returns: CMPRES1 is zero if equal, non-zero otherwise. | |
| 2188 void StubCode::GenerateUnoptimizedIdenticalWithNumberCheckStub( | |
| 2189 Assembler* assembler) { | |
| 2190 // Check single stepping. | |
| 2191 Label stepping, done_stepping; | |
| 2192 if (FLAG_support_debugger) { | |
| 2193 __ LoadIsolate(T0); | |
| 2194 __ lbu(T0, Address(T0, Isolate::single_step_offset())); | |
| 2195 __ BranchNotEqual(T0, Immediate(0), &stepping); | |
| 2196 __ Bind(&done_stepping); | |
| 2197 } | |
| 2198 | |
| 2199 const Register temp1 = T2; | |
| 2200 const Register temp2 = T3; | |
| 2201 const Register left = T1; | |
| 2202 const Register right = T0; | |
| 2203 __ lw(left, Address(SP, 1 * kWordSize)); | |
| 2204 __ lw(right, Address(SP, 0 * kWordSize)); | |
| 2205 GenerateIdenticalWithNumberCheckStub(assembler, left, right, temp1, temp2); | |
| 2206 __ Ret(); | |
| 2207 | |
| 2208 // Call single step callback in debugger. | |
| 2209 if (FLAG_support_debugger) { | |
| 2210 __ Bind(&stepping); | |
| 2211 __ EnterStubFrame(); | |
| 2212 __ addiu(SP, SP, Immediate(-1 * kWordSize)); | |
| 2213 __ sw(RA, Address(SP, 0 * kWordSize)); // Return address. | |
| 2214 __ CallRuntime(kSingleStepHandlerRuntimeEntry, 0); | |
| 2215 __ lw(RA, Address(SP, 0 * kWordSize)); | |
| 2216 __ addiu(SP, SP, Immediate(1 * kWordSize)); | |
| 2217 __ RestoreCodePointer(); | |
| 2218 __ LeaveStubFrame(); | |
| 2219 __ b(&done_stepping); | |
| 2220 } | |
| 2221 } | |
| 2222 | |
| 2223 | |
| 2224 // Called from optimized code only. | |
| 2225 // SP + 4: left operand. | |
| 2226 // SP + 0: right operand. | |
| 2227 // Returns: CMPRES1 is zero if equal, non-zero otherwise. | |
| 2228 void StubCode::GenerateOptimizedIdenticalWithNumberCheckStub( | |
| 2229 Assembler* assembler) { | |
| 2230 const Register temp1 = T2; | |
| 2231 const Register temp2 = T3; | |
| 2232 const Register left = T1; | |
| 2233 const Register right = T0; | |
| 2234 __ lw(left, Address(SP, 1 * kWordSize)); | |
| 2235 __ lw(right, Address(SP, 0 * kWordSize)); | |
| 2236 GenerateIdenticalWithNumberCheckStub(assembler, left, right, temp1, temp2); | |
| 2237 __ Ret(); | |
| 2238 } | |
| 2239 | |
| 2240 | |
| 2241 // Called from megamorphic calls. | |
| 2242 // T0: receiver | |
| 2243 // S5: MegamorphicCache (preserved) | |
| 2244 // Passed to target: | |
| 2245 // CODE_REG: target Code object | |
| 2246 // S4: arguments descriptor | |
| 2247 void StubCode::GenerateMegamorphicCallStub(Assembler* assembler) { | |
| 2248 __ LoadTaggedClassIdMayBeSmi(T0, T0); | |
| 2249 // T0: class ID of the receiver (smi). | |
| 2250 __ lw(S4, FieldAddress(S5, MegamorphicCache::arguments_descriptor_offset())); | |
| 2251 __ lw(T2, FieldAddress(S5, MegamorphicCache::buckets_offset())); | |
| 2252 __ lw(T1, FieldAddress(S5, MegamorphicCache::mask_offset())); | |
| 2253 // T2: cache buckets array. | |
| 2254 // T1: mask. | |
| 2255 __ LoadImmediate(TMP, MegamorphicCache::kSpreadFactor); | |
| 2256 __ mult(TMP, T0); | |
| 2257 __ mflo(T3); | |
| 2258 // T3: probe. | |
| 2259 | |
| 2260 Label loop, update, call_target_function; | |
| 2261 __ b(&loop); | |
| 2262 | |
| 2263 __ Bind(&update); | |
| 2264 __ addiu(T3, T3, Immediate(Smi::RawValue(1))); | |
| 2265 __ Bind(&loop); | |
| 2266 __ and_(T3, T3, T1); | |
| 2267 const intptr_t base = Array::data_offset(); | |
| 2268 // T3 is smi tagged, but table entries are two words, so LSL 2. | |
| 2269 __ sll(TMP, T3, 2); | |
| 2270 __ addu(TMP, T2, TMP); | |
| 2271 __ lw(T4, FieldAddress(TMP, base)); | |
| 2272 | |
| 2273 ASSERT(kIllegalCid == 0); | |
| 2274 __ beq(T4, ZR, &call_target_function); | |
| 2275 __ bne(T4, T0, &update); | |
| 2276 | |
| 2277 __ Bind(&call_target_function); | |
| 2278 // Call the target found in the cache. For a class id match, this is a | |
| 2279 // proper target for the given name and arguments descriptor. If the | |
| 2280 // illegal class id was found, the target is a cache miss handler that can | |
| 2281 // be invoked as a normal Dart function. | |
| 2282 __ sll(T1, T3, 2); | |
| 2283 __ addu(T1, T2, T1); | |
| 2284 __ lw(T0, FieldAddress(T1, base + kWordSize)); | |
| 2285 | |
| 2286 __ lw(T1, FieldAddress(T0, Function::entry_point_offset())); | |
| 2287 __ lw(CODE_REG, FieldAddress(T0, Function::code_offset())); | |
| 2288 __ jr(T1); | |
| 2289 } | |
| 2290 | |
| 2291 | |
| 2292 // Called from switchable IC calls. | |
| 2293 // T0: receiver | |
| 2294 // S5: ICData (preserved) | |
| 2295 // Passed to target: | |
| 2296 // CODE_REG: target Code object | |
| 2297 // S4: arguments descriptor | |
| 2298 void StubCode::GenerateICCallThroughFunctionStub(Assembler* assembler) { | |
| 2299 Label loop, found, miss; | |
| 2300 __ lw(T6, FieldAddress(S5, ICData::ic_data_offset())); | |
| 2301 __ lw(S4, FieldAddress(S5, ICData::arguments_descriptor_offset())); | |
| 2302 __ AddImmediate(T6, T6, Array::data_offset() - kHeapObjectTag); | |
| 2303 // T6: first IC entry. | |
| 2304 __ LoadTaggedClassIdMayBeSmi(T1, T0); | |
| 2305 // T1: receiver cid as Smi | |
| 2306 | |
| 2307 __ Bind(&loop); | |
| 2308 __ lw(T2, Address(T6, 0)); | |
| 2309 __ beq(T1, T2, &found); | |
| 2310 ASSERT(Smi::RawValue(kIllegalCid) == 0); | |
| 2311 __ beq(T2, ZR, &miss); | |
| 2312 | |
| 2313 const intptr_t entry_length = ICData::TestEntryLengthFor(1) * kWordSize; | |
| 2314 __ AddImmediate(T6, entry_length); // Next entry. | |
| 2315 __ b(&loop); | |
| 2316 | |
| 2317 __ Bind(&found); | |
| 2318 const intptr_t target_offset = ICData::TargetIndexFor(1) * kWordSize; | |
| 2319 __ lw(T0, Address(T6, target_offset)); | |
| 2320 __ lw(T1, FieldAddress(T0, Function::entry_point_offset())); | |
| 2321 __ lw(CODE_REG, FieldAddress(T0, Function::code_offset())); | |
| 2322 __ jr(T1); | |
| 2323 | |
| 2324 __ Bind(&miss); | |
| 2325 __ LoadIsolate(T2); | |
| 2326 __ lw(CODE_REG, Address(T2, Isolate::ic_miss_code_offset())); | |
| 2327 __ lw(T1, FieldAddress(CODE_REG, Code::entry_point_offset())); | |
| 2328 __ jr(T1); | |
| 2329 } | |
| 2330 | |
| 2331 | |
| 2332 void StubCode::GenerateICCallThroughCodeStub(Assembler* assembler) { | |
| 2333 Label loop, found, miss; | |
| 2334 __ lw(T6, FieldAddress(S5, ICData::ic_data_offset())); | |
| 2335 __ lw(S4, FieldAddress(S5, ICData::arguments_descriptor_offset())); | |
| 2336 __ AddImmediate(T6, T6, Array::data_offset() - kHeapObjectTag); | |
| 2337 // T6: first IC entry. | |
| 2338 __ LoadTaggedClassIdMayBeSmi(T1, T0); | |
| 2339 // T1: receiver cid as Smi | |
| 2340 | |
| 2341 __ Bind(&loop); | |
| 2342 __ lw(T2, Address(T6, 0)); | |
| 2343 __ beq(T1, T2, &found); | |
| 2344 ASSERT(Smi::RawValue(kIllegalCid) == 0); | |
| 2345 __ beq(T2, ZR, &miss); | |
| 2346 | |
| 2347 const intptr_t entry_length = ICData::TestEntryLengthFor(1) * kWordSize; | |
| 2348 __ AddImmediate(T6, entry_length); // Next entry. | |
| 2349 __ b(&loop); | |
| 2350 | |
| 2351 __ Bind(&found); | |
| 2352 const intptr_t code_offset = ICData::CodeIndexFor(1) * kWordSize; | |
| 2353 const intptr_t entry_offset = ICData::EntryPointIndexFor(1) * kWordSize; | |
| 2354 __ lw(T1, Address(T6, entry_offset)); | |
| 2355 __ lw(CODE_REG, Address(T6, code_offset)); | |
| 2356 __ jr(T1); | |
| 2357 | |
| 2358 __ Bind(&miss); | |
| 2359 __ LoadIsolate(T2); | |
| 2360 __ lw(CODE_REG, Address(T2, Isolate::ic_miss_code_offset())); | |
| 2361 __ lw(T1, FieldAddress(CODE_REG, Code::entry_point_offset())); | |
| 2362 __ jr(T1); | |
| 2363 } | |
| 2364 | |
| 2365 | |
| 2366 // Called from switchable IC calls. | |
| 2367 // T0: receiver | |
| 2368 // S5: SingleTargetCache | |
| 2369 void StubCode::GenerateUnlinkedCallStub(Assembler* assembler) { | |
| 2370 __ EnterStubFrame(); | |
| 2371 __ Push(T0); // Preserve receiver. | |
| 2372 | |
| 2373 __ Push(ZR); // Result slot. | |
| 2374 __ Push(T0); // Arg0: Receiver | |
| 2375 __ Push(S5); // Arg1: UnlinkedCall | |
| 2376 __ CallRuntime(kUnlinkedCallRuntimeEntry, 2); | |
| 2377 __ Drop(2); | |
| 2378 __ Pop(S5); // result = IC | |
| 2379 | |
| 2380 __ Pop(T0); // Restore receiver. | |
| 2381 __ LeaveStubFrame(); | |
| 2382 | |
| 2383 __ lw(CODE_REG, Address(THR, Thread::ic_lookup_through_code_stub_offset())); | |
| 2384 __ lw(T1, FieldAddress(CODE_REG, Code::checked_entry_point_offset())); | |
| 2385 __ jr(T1); | |
| 2386 } | |
| 2387 | |
| 2388 | |
| 2389 // Called from switchable IC calls. | |
| 2390 // T0: receiver | |
| 2391 // S5: SingleTargetCache | |
| 2392 // Passed to target: | |
| 2393 // CODE_REG: target Code object | |
| 2394 void StubCode::GenerateSingleTargetCallStub(Assembler* assembler) { | |
| 2395 Label miss; | |
| 2396 __ LoadClassIdMayBeSmi(T1, T0); | |
| 2397 __ lhu(T2, FieldAddress(S5, SingleTargetCache::lower_limit_offset())); | |
| 2398 __ lhu(T3, FieldAddress(S5, SingleTargetCache::upper_limit_offset())); | |
| 2399 | |
| 2400 __ BranchUnsignedLess(T1, T2, &miss); | |
| 2401 __ BranchUnsignedGreater(T1, T3, &miss); | |
| 2402 | |
| 2403 __ lw(T1, FieldAddress(S5, SingleTargetCache::entry_point_offset())); | |
| 2404 __ lw(CODE_REG, FieldAddress(S5, SingleTargetCache::target_offset())); | |
| 2405 __ jr(T1); | |
| 2406 | |
| 2407 __ Bind(&miss); | |
| 2408 __ EnterStubFrame(); | |
| 2409 __ Push(T0); // Preserve receiver. | |
| 2410 | |
| 2411 __ Push(ZR); // Result slot. | |
| 2412 __ Push(T0); // Arg0: Receiver | |
| 2413 __ CallRuntime(kSingleTargetMissRuntimeEntry, 1); | |
| 2414 __ Drop(1); | |
| 2415 __ Pop(S5); // result = IC | |
| 2416 | |
| 2417 __ Pop(T0); // Restore receiver. | |
| 2418 __ LeaveStubFrame(); | |
| 2419 | |
| 2420 __ lw(CODE_REG, Address(THR, Thread::ic_lookup_through_code_stub_offset())); | |
| 2421 __ lw(T1, FieldAddress(CODE_REG, Code::checked_entry_point_offset())); | |
| 2422 __ jr(T1); | |
| 2423 } | |
| 2424 | |
| 2425 | |
| 2426 // Called from the monomorphic checked entry. | |
| 2427 // T0: receiver | |
| 2428 void StubCode::GenerateMonomorphicMissStub(Assembler* assembler) { | |
| 2429 __ lw(CODE_REG, Address(THR, Thread::monomorphic_miss_stub_offset())); | |
| 2430 __ EnterStubFrame(); | |
| 2431 __ Push(T0); // Preserve receiver. | |
| 2432 | |
| 2433 __ Push(ZR); // Result slot. | |
| 2434 __ Push(T0); // Arg0: Receiver | |
| 2435 __ CallRuntime(kMonomorphicMissRuntimeEntry, 1); | |
| 2436 __ Drop(1); | |
| 2437 __ Pop(S5); // result = IC | |
| 2438 | |
| 2439 __ Pop(T0); // Restore receiver. | |
| 2440 __ LeaveStubFrame(); | |
| 2441 | |
| 2442 __ lw(CODE_REG, Address(THR, Thread::ic_lookup_through_code_stub_offset())); | |
| 2443 __ lw(T1, FieldAddress(CODE_REG, Code::checked_entry_point_offset())); | |
| 2444 __ jr(T1); | |
| 2445 } | |
| 2446 | |
| 2447 | |
| 2448 void StubCode::GenerateFrameAwaitingMaterializationStub(Assembler* assembler) { | |
| 2449 __ break_(0); | |
| 2450 } | |
| 2451 | |
| 2452 | |
| 2453 void StubCode::GenerateAsynchronousGapMarkerStub(Assembler* assembler) { | |
| 2454 __ break_(0); | |
| 2455 } | |
| 2456 | |
| 2457 } // namespace dart | |
| 2458 | |
| 2459 #endif // defined TARGET_ARCH_MIPS | |
| OLD | NEW |