| OLD | NEW |
| (Empty) |
| 1 // Copyright 2013 the V8 project authors. All rights reserved. | |
| 2 // Redistribution and use in source and binary forms, with or without | |
| 3 // modification, are permitted provided that the following conditions are | |
| 4 // met: | |
| 5 // | |
| 6 // * Redistributions of source code must retain the above copyright | |
| 7 // notice, this list of conditions and the following disclaimer. | |
| 8 // * Redistributions in binary form must reproduce the above | |
| 9 // copyright notice, this list of conditions and the following | |
| 10 // disclaimer in the documentation and/or other materials provided | |
| 11 // with the distribution. | |
| 12 // * Neither the name of Google Inc. nor the names of its | |
| 13 // contributors may be used to endorse or promote products derived | |
| 14 // from this software without specific prior written permission. | |
| 15 // | |
| 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
| 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
| 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
| 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
| 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
| 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
| 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
| 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
| 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
| 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
| 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
| 27 | |
| 28 #include "v8.h" | |
| 29 | |
| 30 #if V8_TARGET_ARCH_A64 | |
| 31 | |
| 32 #include "bootstrapper.h" | |
| 33 #include "code-stubs.h" | |
| 34 #include "regexp-macro-assembler.h" | |
| 35 #include "stub-cache.h" | |
| 36 | |
| 37 namespace v8 { | |
| 38 namespace internal { | |
| 39 | |
| 40 | |
| 41 void FastNewClosureStub::InitializeInterfaceDescriptor( | |
| 42 Isolate* isolate, | |
| 43 CodeStubInterfaceDescriptor* descriptor) { | |
| 44 // x2: function info | |
| 45 static Register registers[] = { x2 }; | |
| 46 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); | |
| 47 descriptor->register_params_ = registers; | |
| 48 descriptor->deoptimization_handler_ = | |
| 49 Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry; | |
| 50 } | |
| 51 | |
| 52 | |
| 53 void FastNewContextStub::InitializeInterfaceDescriptor( | |
| 54 Isolate* isolate, | |
| 55 CodeStubInterfaceDescriptor* descriptor) { | |
| 56 // x1: function | |
| 57 static Register registers[] = { x1 }; | |
| 58 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); | |
| 59 descriptor->register_params_ = registers; | |
| 60 descriptor->deoptimization_handler_ = NULL; | |
| 61 } | |
| 62 | |
| 63 | |
| 64 void ToNumberStub::InitializeInterfaceDescriptor( | |
| 65 Isolate* isolate, | |
| 66 CodeStubInterfaceDescriptor* descriptor) { | |
| 67 // x0: value | |
| 68 static Register registers[] = { x0 }; | |
| 69 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); | |
| 70 descriptor->register_params_ = registers; | |
| 71 descriptor->deoptimization_handler_ = NULL; | |
| 72 } | |
| 73 | |
| 74 | |
| 75 void NumberToStringStub::InitializeInterfaceDescriptor( | |
| 76 Isolate* isolate, | |
| 77 CodeStubInterfaceDescriptor* descriptor) { | |
| 78 // x0: value | |
| 79 static Register registers[] = { x0 }; | |
| 80 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); | |
| 81 descriptor->register_params_ = registers; | |
| 82 descriptor->deoptimization_handler_ = | |
| 83 Runtime::FunctionForId(Runtime::kNumberToString)->entry; | |
| 84 } | |
| 85 | |
| 86 | |
| 87 void FastCloneShallowArrayStub::InitializeInterfaceDescriptor( | |
| 88 Isolate* isolate, | |
| 89 CodeStubInterfaceDescriptor* descriptor) { | |
| 90 // x3: array literals array | |
| 91 // x2: array literal index | |
| 92 // x1: constant elements | |
| 93 static Register registers[] = { x3, x2, x1 }; | |
| 94 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); | |
| 95 descriptor->register_params_ = registers; | |
| 96 descriptor->deoptimization_handler_ = | |
| 97 Runtime::FunctionForId(Runtime::kCreateArrayLiteralStubBailout)->entry; | |
| 98 } | |
| 99 | |
| 100 | |
| 101 void FastCloneShallowObjectStub::InitializeInterfaceDescriptor( | |
| 102 Isolate* isolate, | |
| 103 CodeStubInterfaceDescriptor* descriptor) { | |
| 104 // x3: object literals array | |
| 105 // x2: object literal index | |
| 106 // x1: constant properties | |
| 107 // x0: object literal flags | |
| 108 static Register registers[] = { x3, x2, x1, x0 }; | |
| 109 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); | |
| 110 descriptor->register_params_ = registers; | |
| 111 descriptor->deoptimization_handler_ = | |
| 112 Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry; | |
| 113 } | |
| 114 | |
| 115 | |
| 116 void CreateAllocationSiteStub::InitializeInterfaceDescriptor( | |
| 117 Isolate* isolate, | |
| 118 CodeStubInterfaceDescriptor* descriptor) { | |
| 119 // x2: feedback vector | |
| 120 // x3: call feedback slot | |
| 121 static Register registers[] = { x2, x3 }; | |
| 122 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); | |
| 123 descriptor->register_params_ = registers; | |
| 124 descriptor->deoptimization_handler_ = NULL; | |
| 125 } | |
| 126 | |
| 127 | |
| 128 void KeyedLoadFastElementStub::InitializeInterfaceDescriptor( | |
| 129 Isolate* isolate, | |
| 130 CodeStubInterfaceDescriptor* descriptor) { | |
| 131 // x1: receiver | |
| 132 // x0: key | |
| 133 static Register registers[] = { x1, x0 }; | |
| 134 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); | |
| 135 descriptor->register_params_ = registers; | |
| 136 descriptor->deoptimization_handler_ = | |
| 137 FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure); | |
| 138 } | |
| 139 | |
| 140 | |
| 141 void KeyedLoadDictionaryElementStub::InitializeInterfaceDescriptor( | |
| 142 Isolate* isolate, | |
| 143 CodeStubInterfaceDescriptor* descriptor) { | |
| 144 // x1: receiver | |
| 145 // x0: key | |
| 146 static Register registers[] = { x1, x0 }; | |
| 147 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); | |
| 148 descriptor->register_params_ = registers; | |
| 149 descriptor->deoptimization_handler_ = | |
| 150 FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure); | |
| 151 } | |
| 152 | |
| 153 | |
| 154 void RegExpConstructResultStub::InitializeInterfaceDescriptor( | |
| 155 Isolate* isolate, | |
| 156 CodeStubInterfaceDescriptor* descriptor) { | |
| 157 // x2: length | |
| 158 // x1: index (of last match) | |
| 159 // x0: string | |
| 160 static Register registers[] = { x2, x1, x0 }; | |
| 161 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); | |
| 162 descriptor->register_params_ = registers; | |
| 163 descriptor->deoptimization_handler_ = | |
| 164 Runtime::FunctionForId(Runtime::kRegExpConstructResult)->entry; | |
| 165 } | |
| 166 | |
| 167 | |
| 168 void LoadFieldStub::InitializeInterfaceDescriptor( | |
| 169 Isolate* isolate, | |
| 170 CodeStubInterfaceDescriptor* descriptor) { | |
| 171 // x0: receiver | |
| 172 static Register registers[] = { x0 }; | |
| 173 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); | |
| 174 descriptor->register_params_ = registers; | |
| 175 descriptor->deoptimization_handler_ = NULL; | |
| 176 } | |
| 177 | |
| 178 | |
| 179 void KeyedLoadFieldStub::InitializeInterfaceDescriptor( | |
| 180 Isolate* isolate, | |
| 181 CodeStubInterfaceDescriptor* descriptor) { | |
| 182 // x1: receiver | |
| 183 static Register registers[] = { x1 }; | |
| 184 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); | |
| 185 descriptor->register_params_ = registers; | |
| 186 descriptor->deoptimization_handler_ = NULL; | |
| 187 } | |
| 188 | |
| 189 | |
| 190 void KeyedStoreFastElementStub::InitializeInterfaceDescriptor( | |
| 191 Isolate* isolate, | |
| 192 CodeStubInterfaceDescriptor* descriptor) { | |
| 193 // x2: receiver | |
| 194 // x1: key | |
| 195 // x0: value | |
| 196 static Register registers[] = { x2, x1, x0 }; | |
| 197 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); | |
| 198 descriptor->register_params_ = registers; | |
| 199 descriptor->deoptimization_handler_ = | |
| 200 FUNCTION_ADDR(KeyedStoreIC_MissFromStubFailure); | |
| 201 } | |
| 202 | |
| 203 | |
| 204 void TransitionElementsKindStub::InitializeInterfaceDescriptor( | |
| 205 Isolate* isolate, | |
| 206 CodeStubInterfaceDescriptor* descriptor) { | |
| 207 // x0: value (js_array) | |
| 208 // x1: to_map | |
| 209 static Register registers[] = { x0, x1 }; | |
| 210 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); | |
| 211 descriptor->register_params_ = registers; | |
| 212 Address entry = | |
| 213 Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry; | |
| 214 descriptor->deoptimization_handler_ = FUNCTION_ADDR(entry); | |
| 215 } | |
| 216 | |
| 217 | |
| 218 void CompareNilICStub::InitializeInterfaceDescriptor( | |
| 219 Isolate* isolate, | |
| 220 CodeStubInterfaceDescriptor* descriptor) { | |
| 221 // x0: value to compare | |
| 222 static Register registers[] = { x0 }; | |
| 223 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); | |
| 224 descriptor->register_params_ = registers; | |
| 225 descriptor->deoptimization_handler_ = | |
| 226 FUNCTION_ADDR(CompareNilIC_Miss); | |
| 227 descriptor->SetMissHandler( | |
| 228 ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate)); | |
| 229 } | |
| 230 | |
| 231 | |
| 232 static void InitializeArrayConstructorDescriptor( | |
| 233 Isolate* isolate, | |
| 234 CodeStubInterfaceDescriptor* descriptor, | |
| 235 int constant_stack_parameter_count) { | |
| 236 // x1: function | |
| 237 // x2: allocation site with elements kind | |
| 238 // x0: number of arguments to the constructor function | |
| 239 static Register registers_variable_args[] = { x1, x2, x0 }; | |
| 240 static Register registers_no_args[] = { x1, x2 }; | |
| 241 | |
| 242 if (constant_stack_parameter_count == 0) { | |
| 243 descriptor->register_param_count_ = | |
| 244 sizeof(registers_no_args) / sizeof(registers_no_args[0]); | |
| 245 descriptor->register_params_ = registers_no_args; | |
| 246 } else { | |
| 247 // stack param count needs (constructor pointer, and single argument) | |
| 248 descriptor->handler_arguments_mode_ = PASS_ARGUMENTS; | |
| 249 descriptor->stack_parameter_count_ = x0; | |
| 250 descriptor->register_param_count_ = | |
| 251 sizeof(registers_variable_args) / sizeof(registers_variable_args[0]); | |
| 252 descriptor->register_params_ = registers_variable_args; | |
| 253 } | |
| 254 | |
| 255 descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count; | |
| 256 descriptor->function_mode_ = JS_FUNCTION_STUB_MODE; | |
| 257 descriptor->deoptimization_handler_ = | |
| 258 Runtime::FunctionForId(Runtime::kArrayConstructor)->entry; | |
| 259 } | |
| 260 | |
| 261 | |
| 262 void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor( | |
| 263 Isolate* isolate, | |
| 264 CodeStubInterfaceDescriptor* descriptor) { | |
| 265 InitializeArrayConstructorDescriptor(isolate, descriptor, 0); | |
| 266 } | |
| 267 | |
| 268 | |
| 269 void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor( | |
| 270 Isolate* isolate, | |
| 271 CodeStubInterfaceDescriptor* descriptor) { | |
| 272 InitializeArrayConstructorDescriptor(isolate, descriptor, 1); | |
| 273 } | |
| 274 | |
| 275 | |
| 276 void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor( | |
| 277 Isolate* isolate, | |
| 278 CodeStubInterfaceDescriptor* descriptor) { | |
| 279 InitializeArrayConstructorDescriptor(isolate, descriptor, -1); | |
| 280 } | |
| 281 | |
| 282 | |
| 283 static void InitializeInternalArrayConstructorDescriptor( | |
| 284 Isolate* isolate, | |
| 285 CodeStubInterfaceDescriptor* descriptor, | |
| 286 int constant_stack_parameter_count) { | |
| 287 // x1: constructor function | |
| 288 // x0: number of arguments to the constructor function | |
| 289 static Register registers_variable_args[] = { x1, x0 }; | |
| 290 static Register registers_no_args[] = { x1 }; | |
| 291 | |
| 292 if (constant_stack_parameter_count == 0) { | |
| 293 descriptor->register_param_count_ = | |
| 294 sizeof(registers_no_args) / sizeof(registers_no_args[0]); | |
| 295 descriptor->register_params_ = registers_no_args; | |
| 296 } else { | |
| 297 // stack param count needs (constructor pointer, and single argument) | |
| 298 descriptor->handler_arguments_mode_ = PASS_ARGUMENTS; | |
| 299 descriptor->stack_parameter_count_ = x0; | |
| 300 descriptor->register_param_count_ = | |
| 301 sizeof(registers_variable_args) / sizeof(registers_variable_args[0]); | |
| 302 descriptor->register_params_ = registers_variable_args; | |
| 303 } | |
| 304 | |
| 305 descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count; | |
| 306 descriptor->function_mode_ = JS_FUNCTION_STUB_MODE; | |
| 307 descriptor->deoptimization_handler_ = | |
| 308 Runtime::FunctionForId(Runtime::kInternalArrayConstructor)->entry; | |
| 309 } | |
| 310 | |
| 311 | |
| 312 void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor( | |
| 313 Isolate* isolate, | |
| 314 CodeStubInterfaceDescriptor* descriptor) { | |
| 315 InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 0); | |
| 316 } | |
| 317 | |
| 318 | |
| 319 void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor( | |
| 320 Isolate* isolate, | |
| 321 CodeStubInterfaceDescriptor* descriptor) { | |
| 322 InitializeInternalArrayConstructorDescriptor(isolate, descriptor, 1); | |
| 323 } | |
| 324 | |
| 325 | |
| 326 void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor( | |
| 327 Isolate* isolate, | |
| 328 CodeStubInterfaceDescriptor* descriptor) { | |
| 329 InitializeInternalArrayConstructorDescriptor(isolate, descriptor, -1); | |
| 330 } | |
| 331 | |
| 332 | |
| 333 void ToBooleanStub::InitializeInterfaceDescriptor( | |
| 334 Isolate* isolate, | |
| 335 CodeStubInterfaceDescriptor* descriptor) { | |
| 336 // x0: value | |
| 337 static Register registers[] = { x0 }; | |
| 338 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); | |
| 339 descriptor->register_params_ = registers; | |
| 340 descriptor->deoptimization_handler_ = FUNCTION_ADDR(ToBooleanIC_Miss); | |
| 341 descriptor->SetMissHandler( | |
| 342 ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate)); | |
| 343 } | |
| 344 | |
| 345 | |
| 346 void StoreGlobalStub::InitializeInterfaceDescriptor( | |
| 347 Isolate* isolate, | |
| 348 CodeStubInterfaceDescriptor* descriptor) { | |
| 349 // x1: receiver | |
| 350 // x2: key (unused) | |
| 351 // x0: value | |
| 352 static Register registers[] = { x1, x2, x0 }; | |
| 353 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); | |
| 354 descriptor->register_params_ = registers; | |
| 355 descriptor->deoptimization_handler_ = | |
| 356 FUNCTION_ADDR(StoreIC_MissFromStubFailure); | |
| 357 } | |
| 358 | |
| 359 | |
| 360 void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor( | |
| 361 Isolate* isolate, | |
| 362 CodeStubInterfaceDescriptor* descriptor) { | |
| 363 // x0: value | |
| 364 // x3: target map | |
| 365 // x1: key | |
| 366 // x2: receiver | |
| 367 static Register registers[] = { x0, x3, x1, x2 }; | |
| 368 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); | |
| 369 descriptor->register_params_ = registers; | |
| 370 descriptor->deoptimization_handler_ = | |
| 371 FUNCTION_ADDR(ElementsTransitionAndStoreIC_Miss); | |
| 372 } | |
| 373 | |
| 374 | |
| 375 void BinaryOpICStub::InitializeInterfaceDescriptor( | |
| 376 Isolate* isolate, | |
| 377 CodeStubInterfaceDescriptor* descriptor) { | |
| 378 // x1: left operand | |
| 379 // x0: right operand | |
| 380 static Register registers[] = { x1, x0 }; | |
| 381 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); | |
| 382 descriptor->register_params_ = registers; | |
| 383 descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss); | |
| 384 descriptor->SetMissHandler( | |
| 385 ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate)); | |
| 386 } | |
| 387 | |
| 388 | |
| 389 void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor( | |
| 390 Isolate* isolate, | |
| 391 CodeStubInterfaceDescriptor* descriptor) { | |
| 392 // x2: allocation site | |
| 393 // x1: left operand | |
| 394 // x0: right operand | |
| 395 static Register registers[] = { x2, x1, x0 }; | |
| 396 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); | |
| 397 descriptor->register_params_ = registers; | |
| 398 descriptor->deoptimization_handler_ = | |
| 399 FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite); | |
| 400 } | |
| 401 | |
| 402 | |
| 403 void StringAddStub::InitializeInterfaceDescriptor( | |
| 404 Isolate* isolate, | |
| 405 CodeStubInterfaceDescriptor* descriptor) { | |
| 406 // x1: left operand | |
| 407 // x0: right operand | |
| 408 static Register registers[] = { x1, x0 }; | |
| 409 descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]); | |
| 410 descriptor->register_params_ = registers; | |
| 411 descriptor->deoptimization_handler_ = | |
| 412 Runtime::FunctionForId(Runtime::kStringAdd)->entry; | |
| 413 } | |
| 414 | |
| 415 | |
| 416 void CallDescriptors::InitializeForIsolate(Isolate* isolate) { | |
| 417 static PlatformCallInterfaceDescriptor default_descriptor = | |
| 418 PlatformCallInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS); | |
| 419 | |
| 420 static PlatformCallInterfaceDescriptor noInlineDescriptor = | |
| 421 PlatformCallInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS); | |
| 422 | |
| 423 { | |
| 424 CallInterfaceDescriptor* descriptor = | |
| 425 isolate->call_descriptor(Isolate::ArgumentAdaptorCall); | |
| 426 static Register registers[] = { x1, // JSFunction | |
| 427 cp, // context | |
| 428 x0, // actual number of arguments | |
| 429 x2, // expected number of arguments | |
| 430 }; | |
| 431 static Representation representations[] = { | |
| 432 Representation::Tagged(), // JSFunction | |
| 433 Representation::Tagged(), // context | |
| 434 Representation::Integer32(), // actual number of arguments | |
| 435 Representation::Integer32(), // expected number of arguments | |
| 436 }; | |
| 437 descriptor->register_param_count_ = 4; | |
| 438 descriptor->register_params_ = registers; | |
| 439 descriptor->param_representations_ = representations; | |
| 440 descriptor->platform_specific_descriptor_ = &default_descriptor; | |
| 441 } | |
| 442 { | |
| 443 CallInterfaceDescriptor* descriptor = | |
| 444 isolate->call_descriptor(Isolate::KeyedCall); | |
| 445 static Register registers[] = { cp, // context | |
| 446 x2, // key | |
| 447 }; | |
| 448 static Representation representations[] = { | |
| 449 Representation::Tagged(), // context | |
| 450 Representation::Tagged(), // key | |
| 451 }; | |
| 452 descriptor->register_param_count_ = 2; | |
| 453 descriptor->register_params_ = registers; | |
| 454 descriptor->param_representations_ = representations; | |
| 455 descriptor->platform_specific_descriptor_ = &noInlineDescriptor; | |
| 456 } | |
| 457 { | |
| 458 CallInterfaceDescriptor* descriptor = | |
| 459 isolate->call_descriptor(Isolate::NamedCall); | |
| 460 static Register registers[] = { cp, // context | |
| 461 x2, // name | |
| 462 }; | |
| 463 static Representation representations[] = { | |
| 464 Representation::Tagged(), // context | |
| 465 Representation::Tagged(), // name | |
| 466 }; | |
| 467 descriptor->register_param_count_ = 2; | |
| 468 descriptor->register_params_ = registers; | |
| 469 descriptor->param_representations_ = representations; | |
| 470 descriptor->platform_specific_descriptor_ = &noInlineDescriptor; | |
| 471 } | |
| 472 { | |
| 473 CallInterfaceDescriptor* descriptor = | |
| 474 isolate->call_descriptor(Isolate::CallHandler); | |
| 475 static Register registers[] = { cp, // context | |
| 476 x0, // receiver | |
| 477 }; | |
| 478 static Representation representations[] = { | |
| 479 Representation::Tagged(), // context | |
| 480 Representation::Tagged(), // receiver | |
| 481 }; | |
| 482 descriptor->register_param_count_ = 2; | |
| 483 descriptor->register_params_ = registers; | |
| 484 descriptor->param_representations_ = representations; | |
| 485 descriptor->platform_specific_descriptor_ = &default_descriptor; | |
| 486 } | |
| 487 { | |
| 488 CallInterfaceDescriptor* descriptor = | |
| 489 isolate->call_descriptor(Isolate::ApiFunctionCall); | |
| 490 static Register registers[] = { x0, // callee | |
| 491 x4, // call_data | |
| 492 x2, // holder | |
| 493 x1, // api_function_address | |
| 494 cp, // context | |
| 495 }; | |
| 496 static Representation representations[] = { | |
| 497 Representation::Tagged(), // callee | |
| 498 Representation::Tagged(), // call_data | |
| 499 Representation::Tagged(), // holder | |
| 500 Representation::External(), // api_function_address | |
| 501 Representation::Tagged(), // context | |
| 502 }; | |
| 503 descriptor->register_param_count_ = 5; | |
| 504 descriptor->register_params_ = registers; | |
| 505 descriptor->param_representations_ = representations; | |
| 506 descriptor->platform_specific_descriptor_ = &default_descriptor; | |
| 507 } | |
| 508 } | |
| 509 | |
| 510 | |
| 511 #define __ ACCESS_MASM(masm) | |
| 512 | |
| 513 | |
| 514 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) { | |
| 515 // Update the static counter each time a new code stub is generated. | |
| 516 Isolate* isolate = masm->isolate(); | |
| 517 isolate->counters()->code_stubs()->Increment(); | |
| 518 | |
| 519 CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(isolate); | |
| 520 int param_count = descriptor->register_param_count_; | |
| 521 { | |
| 522 // Call the runtime system in a fresh internal frame. | |
| 523 FrameScope scope(masm, StackFrame::INTERNAL); | |
| 524 ASSERT((descriptor->register_param_count_ == 0) || | |
| 525 x0.Is(descriptor->register_params_[param_count - 1])); | |
| 526 | |
| 527 // Push arguments | |
| 528 MacroAssembler::PushPopQueue queue(masm); | |
| 529 for (int i = 0; i < param_count; ++i) { | |
| 530 queue.Queue(descriptor->register_params_[i]); | |
| 531 } | |
| 532 queue.PushQueued(); | |
| 533 | |
| 534 ExternalReference miss = descriptor->miss_handler(); | |
| 535 __ CallExternalReference(miss, descriptor->register_param_count_); | |
| 536 } | |
| 537 | |
| 538 __ Ret(); | |
| 539 } | |
| 540 | |
| 541 | |
| 542 void DoubleToIStub::Generate(MacroAssembler* masm) { | |
| 543 Label done; | |
| 544 Register input = source(); | |
| 545 Register result = destination(); | |
| 546 ASSERT(is_truncating()); | |
| 547 | |
| 548 ASSERT(result.Is64Bits()); | |
| 549 ASSERT(jssp.Is(masm->StackPointer())); | |
| 550 | |
| 551 int double_offset = offset(); | |
| 552 | |
| 553 DoubleRegister double_scratch = d0; // only used if !skip_fastpath() | |
| 554 Register scratch1 = GetAllocatableRegisterThatIsNotOneOf(input, result); | |
| 555 Register scratch2 = | |
| 556 GetAllocatableRegisterThatIsNotOneOf(input, result, scratch1); | |
| 557 | |
| 558 __ Push(scratch1, scratch2); | |
| 559 // Account for saved regs if input is jssp. | |
| 560 if (input.is(jssp)) double_offset += 2 * kPointerSize; | |
| 561 | |
| 562 if (!skip_fastpath()) { | |
| 563 __ Push(double_scratch); | |
| 564 if (input.is(jssp)) double_offset += 1 * kDoubleSize; | |
| 565 __ Ldr(double_scratch, MemOperand(input, double_offset)); | |
| 566 // Try to convert with a FPU convert instruction. This handles all | |
| 567 // non-saturating cases. | |
| 568 __ TryConvertDoubleToInt64(result, double_scratch, &done); | |
| 569 __ Fmov(result, double_scratch); | |
| 570 } else { | |
| 571 __ Ldr(result, MemOperand(input, double_offset)); | |
| 572 } | |
| 573 | |
| 574 // If we reach here we need to manually convert the input to an int32. | |
| 575 | |
| 576 // Extract the exponent. | |
| 577 Register exponent = scratch1; | |
| 578 __ Ubfx(exponent, result, HeapNumber::kMantissaBits, | |
| 579 HeapNumber::kExponentBits); | |
| 580 | |
| 581 // It the exponent is >= 84 (kMantissaBits + 32), the result is always 0 since | |
| 582 // the mantissa gets shifted completely out of the int32_t result. | |
| 583 __ Cmp(exponent, HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 32); | |
| 584 __ CzeroX(result, ge); | |
| 585 __ B(ge, &done); | |
| 586 | |
| 587 // The Fcvtzs sequence handles all cases except where the conversion causes | |
| 588 // signed overflow in the int64_t target. Since we've already handled | |
| 589 // exponents >= 84, we can guarantee that 63 <= exponent < 84. | |
| 590 | |
| 591 if (masm->emit_debug_code()) { | |
| 592 __ Cmp(exponent, HeapNumber::kExponentBias + 63); | |
| 593 // Exponents less than this should have been handled by the Fcvt case. | |
| 594 __ Check(ge, kUnexpectedValue); | |
| 595 } | |
| 596 | |
| 597 // Isolate the mantissa bits, and set the implicit '1'. | |
| 598 Register mantissa = scratch2; | |
| 599 __ Ubfx(mantissa, result, 0, HeapNumber::kMantissaBits); | |
| 600 __ Orr(mantissa, mantissa, 1UL << HeapNumber::kMantissaBits); | |
| 601 | |
| 602 // Negate the mantissa if necessary. | |
| 603 __ Tst(result, kXSignMask); | |
| 604 __ Cneg(mantissa, mantissa, ne); | |
| 605 | |
| 606 // Shift the mantissa bits in the correct place. We know that we have to shift | |
| 607 // it left here, because exponent >= 63 >= kMantissaBits. | |
| 608 __ Sub(exponent, exponent, | |
| 609 HeapNumber::kExponentBias + HeapNumber::kMantissaBits); | |
| 610 __ Lsl(result, mantissa, exponent); | |
| 611 | |
| 612 __ Bind(&done); | |
| 613 if (!skip_fastpath()) { | |
| 614 __ Pop(double_scratch); | |
| 615 } | |
| 616 __ Pop(scratch2, scratch1); | |
| 617 __ Ret(); | |
| 618 } | |
| 619 | |
| 620 | |
| 621 // See call site for description. | |
| 622 static void EmitIdenticalObjectComparison(MacroAssembler* masm, | |
| 623 Register left, | |
| 624 Register right, | |
| 625 Register scratch, | |
| 626 FPRegister double_scratch, | |
| 627 Label* slow, | |
| 628 Condition cond) { | |
| 629 ASSERT(!AreAliased(left, right, scratch)); | |
| 630 Label not_identical, return_equal, heap_number; | |
| 631 Register result = x0; | |
| 632 | |
| 633 __ Cmp(right, left); | |
| 634 __ B(ne, ¬_identical); | |
| 635 | |
| 636 // Test for NaN. Sadly, we can't just compare to factory::nan_value(), | |
| 637 // so we do the second best thing - test it ourselves. | |
| 638 // They are both equal and they are not both Smis so both of them are not | |
| 639 // Smis. If it's not a heap number, then return equal. | |
| 640 if ((cond == lt) || (cond == gt)) { | |
| 641 __ JumpIfObjectType(right, scratch, scratch, FIRST_SPEC_OBJECT_TYPE, slow, | |
| 642 ge); | |
| 643 } else { | |
| 644 Register right_type = scratch; | |
| 645 __ JumpIfObjectType(right, right_type, right_type, HEAP_NUMBER_TYPE, | |
| 646 &heap_number); | |
| 647 // Comparing JS objects with <=, >= is complicated. | |
| 648 if (cond != eq) { | |
| 649 __ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE); | |
| 650 __ B(ge, slow); | |
| 651 // Normally here we fall through to return_equal, but undefined is | |
| 652 // special: (undefined == undefined) == true, but | |
| 653 // (undefined <= undefined) == false! See ECMAScript 11.8.5. | |
| 654 if ((cond == le) || (cond == ge)) { | |
| 655 __ Cmp(right_type, ODDBALL_TYPE); | |
| 656 __ B(ne, &return_equal); | |
| 657 __ JumpIfNotRoot(right, Heap::kUndefinedValueRootIndex, &return_equal); | |
| 658 if (cond == le) { | |
| 659 // undefined <= undefined should fail. | |
| 660 __ Mov(result, GREATER); | |
| 661 } else { | |
| 662 // undefined >= undefined should fail. | |
| 663 __ Mov(result, LESS); | |
| 664 } | |
| 665 __ Ret(); | |
| 666 } | |
| 667 } | |
| 668 } | |
| 669 | |
| 670 __ Bind(&return_equal); | |
| 671 if (cond == lt) { | |
| 672 __ Mov(result, GREATER); // Things aren't less than themselves. | |
| 673 } else if (cond == gt) { | |
| 674 __ Mov(result, LESS); // Things aren't greater than themselves. | |
| 675 } else { | |
| 676 __ Mov(result, EQUAL); // Things are <=, >=, ==, === themselves. | |
| 677 } | |
| 678 __ Ret(); | |
| 679 | |
| 680 // Cases lt and gt have been handled earlier, and case ne is never seen, as | |
| 681 // it is handled in the parser (see Parser::ParseBinaryExpression). We are | |
| 682 // only concerned with cases ge, le and eq here. | |
| 683 if ((cond != lt) && (cond != gt)) { | |
| 684 ASSERT((cond == ge) || (cond == le) || (cond == eq)); | |
| 685 __ Bind(&heap_number); | |
| 686 // Left and right are identical pointers to a heap number object. Return | |
| 687 // non-equal if the heap number is a NaN, and equal otherwise. Comparing | |
| 688 // the number to itself will set the overflow flag iff the number is NaN. | |
| 689 __ Ldr(double_scratch, FieldMemOperand(right, HeapNumber::kValueOffset)); | |
| 690 __ Fcmp(double_scratch, double_scratch); | |
| 691 __ B(vc, &return_equal); // Not NaN, so treat as normal heap number. | |
| 692 | |
| 693 if (cond == le) { | |
| 694 __ Mov(result, GREATER); | |
| 695 } else { | |
| 696 __ Mov(result, LESS); | |
| 697 } | |
| 698 __ Ret(); | |
| 699 } | |
| 700 | |
| 701 // No fall through here. | |
| 702 if (FLAG_debug_code) { | |
| 703 __ Unreachable(); | |
| 704 } | |
| 705 | |
| 706 __ Bind(¬_identical); | |
| 707 } | |
| 708 | |
| 709 | |
| 710 // See call site for description. | |
| 711 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, | |
| 712 Register left, | |
| 713 Register right, | |
| 714 Register left_type, | |
| 715 Register right_type, | |
| 716 Register scratch) { | |
| 717 ASSERT(!AreAliased(left, right, left_type, right_type, scratch)); | |
| 718 | |
| 719 if (masm->emit_debug_code()) { | |
| 720 // We assume that the arguments are not identical. | |
| 721 __ Cmp(left, right); | |
| 722 __ Assert(ne, kExpectedNonIdenticalObjects); | |
| 723 } | |
| 724 | |
| 725 // If either operand is a JS object or an oddball value, then they are not | |
| 726 // equal since their pointers are different. | |
| 727 // There is no test for undetectability in strict equality. | |
| 728 STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE); | |
| 729 Label right_non_object; | |
| 730 | |
| 731 __ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE); | |
| 732 __ B(lt, &right_non_object); | |
| 733 | |
| 734 // Return non-zero - x0 already contains a non-zero pointer. | |
| 735 ASSERT(left.is(x0) || right.is(x0)); | |
| 736 Label return_not_equal; | |
| 737 __ Bind(&return_not_equal); | |
| 738 __ Ret(); | |
| 739 | |
| 740 __ Bind(&right_non_object); | |
| 741 | |
| 742 // Check for oddballs: true, false, null, undefined. | |
| 743 __ Cmp(right_type, ODDBALL_TYPE); | |
| 744 | |
| 745 // If right is not ODDBALL, test left. Otherwise, set eq condition. | |
| 746 __ Ccmp(left_type, ODDBALL_TYPE, ZFlag, ne); | |
| 747 | |
| 748 // If right or left is not ODDBALL, test left >= FIRST_SPEC_OBJECT_TYPE. | |
| 749 // Otherwise, right or left is ODDBALL, so set a ge condition. | |
| 750 __ Ccmp(left_type, FIRST_SPEC_OBJECT_TYPE, NVFlag, ne); | |
| 751 | |
| 752 __ B(ge, &return_not_equal); | |
| 753 | |
| 754 // Internalized strings are unique, so they can only be equal if they are the | |
| 755 // same object. We have already tested that case, so if left and right are | |
| 756 // both internalized strings, they cannot be equal. | |
| 757 STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0)); | |
| 758 __ Orr(scratch, left_type, right_type); | |
| 759 __ TestAndBranchIfAllClear( | |
| 760 scratch, kIsNotStringMask | kIsNotInternalizedMask, &return_not_equal); | |
| 761 } | |
| 762 | |
| 763 | |
| 764 // See call site for description. | |
| 765 static void EmitSmiNonsmiComparison(MacroAssembler* masm, | |
| 766 Register left, | |
| 767 Register right, | |
| 768 FPRegister left_d, | |
| 769 FPRegister right_d, | |
| 770 Register scratch, | |
| 771 Label* slow, | |
| 772 bool strict) { | |
| 773 ASSERT(!AreAliased(left, right, scratch)); | |
| 774 ASSERT(!AreAliased(left_d, right_d)); | |
| 775 ASSERT((left.is(x0) && right.is(x1)) || | |
| 776 (right.is(x0) && left.is(x1))); | |
| 777 Register result = x0; | |
| 778 | |
| 779 Label right_is_smi, done; | |
| 780 __ JumpIfSmi(right, &right_is_smi); | |
| 781 | |
| 782 // Left is the smi. Check whether right is a heap number. | |
| 783 if (strict) { | |
| 784 // If right is not a number and left is a smi, then strict equality cannot | |
| 785 // succeed. Return non-equal. | |
| 786 Label is_heap_number; | |
| 787 __ JumpIfObjectType(right, scratch, scratch, HEAP_NUMBER_TYPE, | |
| 788 &is_heap_number); | |
| 789 // Register right is a non-zero pointer, which is a valid NOT_EQUAL result. | |
| 790 if (!right.is(result)) { | |
| 791 __ Mov(result, NOT_EQUAL); | |
| 792 } | |
| 793 __ Ret(); | |
| 794 __ Bind(&is_heap_number); | |
| 795 } else { | |
| 796 // Smi compared non-strictly with a non-smi, non-heap-number. Call the | |
| 797 // runtime. | |
| 798 __ JumpIfNotObjectType(right, scratch, scratch, HEAP_NUMBER_TYPE, slow); | |
| 799 } | |
| 800 | |
| 801 // Left is the smi. Right is a heap number. Load right value into right_d, and | |
| 802 // convert left smi into double in left_d. | |
| 803 __ Ldr(right_d, FieldMemOperand(right, HeapNumber::kValueOffset)); | |
| 804 __ SmiUntagToDouble(left_d, left); | |
| 805 __ B(&done); | |
| 806 | |
| 807 __ Bind(&right_is_smi); | |
| 808 // Right is a smi. Check whether the non-smi left is a heap number. | |
| 809 if (strict) { | |
| 810 // If left is not a number and right is a smi then strict equality cannot | |
| 811 // succeed. Return non-equal. | |
| 812 Label is_heap_number; | |
| 813 __ JumpIfObjectType(left, scratch, scratch, HEAP_NUMBER_TYPE, | |
| 814 &is_heap_number); | |
| 815 // Register left is a non-zero pointer, which is a valid NOT_EQUAL result. | |
| 816 if (!left.is(result)) { | |
| 817 __ Mov(result, NOT_EQUAL); | |
| 818 } | |
| 819 __ Ret(); | |
| 820 __ Bind(&is_heap_number); | |
| 821 } else { | |
| 822 // Smi compared non-strictly with a non-smi, non-heap-number. Call the | |
| 823 // runtime. | |
| 824 __ JumpIfNotObjectType(left, scratch, scratch, HEAP_NUMBER_TYPE, slow); | |
| 825 } | |
| 826 | |
| 827 // Right is the smi. Left is a heap number. Load left value into left_d, and | |
| 828 // convert right smi into double in right_d. | |
| 829 __ Ldr(left_d, FieldMemOperand(left, HeapNumber::kValueOffset)); | |
| 830 __ SmiUntagToDouble(right_d, right); | |
| 831 | |
| 832 // Fall through to both_loaded_as_doubles. | |
| 833 __ Bind(&done); | |
| 834 } | |
| 835 | |
| 836 | |
| 837 // Fast negative check for internalized-to-internalized equality. | |
| 838 // See call site for description. | |
| 839 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm, | |
| 840 Register left, | |
| 841 Register right, | |
| 842 Register left_map, | |
| 843 Register right_map, | |
| 844 Register left_type, | |
| 845 Register right_type, | |
| 846 Label* possible_strings, | |
| 847 Label* not_both_strings) { | |
| 848 ASSERT(!AreAliased(left, right, left_map, right_map, left_type, right_type)); | |
| 849 Register result = x0; | |
| 850 | |
| 851 Label object_test; | |
| 852 STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0)); | |
| 853 // TODO(all): reexamine this branch sequence for optimisation wrt branch | |
| 854 // prediction. | |
| 855 __ Tbnz(right_type, MaskToBit(kIsNotStringMask), &object_test); | |
| 856 __ Tbnz(right_type, MaskToBit(kIsNotInternalizedMask), possible_strings); | |
| 857 __ Tbnz(left_type, MaskToBit(kIsNotStringMask), not_both_strings); | |
| 858 __ Tbnz(left_type, MaskToBit(kIsNotInternalizedMask), possible_strings); | |
| 859 | |
| 860 // Both are internalized. We already checked that they weren't the same | |
| 861 // pointer, so they are not equal. | |
| 862 __ Mov(result, NOT_EQUAL); | |
| 863 __ Ret(); | |
| 864 | |
| 865 __ Bind(&object_test); | |
| 866 | |
| 867 __ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE); | |
| 868 | |
| 869 // If right >= FIRST_SPEC_OBJECT_TYPE, test left. | |
| 870 // Otherwise, right < FIRST_SPEC_OBJECT_TYPE, so set lt condition. | |
| 871 __ Ccmp(left_type, FIRST_SPEC_OBJECT_TYPE, NFlag, ge); | |
| 872 | |
| 873 __ B(lt, not_both_strings); | |
| 874 | |
| 875 // If both objects are undetectable, they are equal. Otherwise, they are not | |
| 876 // equal, since they are different objects and an object is not equal to | |
| 877 // undefined. | |
| 878 | |
| 879 // Returning here, so we can corrupt right_type and left_type. | |
| 880 Register right_bitfield = right_type; | |
| 881 Register left_bitfield = left_type; | |
| 882 __ Ldrb(right_bitfield, FieldMemOperand(right_map, Map::kBitFieldOffset)); | |
| 883 __ Ldrb(left_bitfield, FieldMemOperand(left_map, Map::kBitFieldOffset)); | |
| 884 __ And(result, right_bitfield, left_bitfield); | |
| 885 __ And(result, result, 1 << Map::kIsUndetectable); | |
| 886 __ Eor(result, result, 1 << Map::kIsUndetectable); | |
| 887 __ Ret(); | |
| 888 } | |
| 889 | |
| 890 | |
| 891 static void ICCompareStub_CheckInputType(MacroAssembler* masm, | |
| 892 Register input, | |
| 893 Register scratch, | |
| 894 CompareIC::State expected, | |
| 895 Label* fail) { | |
| 896 Label ok; | |
| 897 if (expected == CompareIC::SMI) { | |
| 898 __ JumpIfNotSmi(input, fail); | |
| 899 } else if (expected == CompareIC::NUMBER) { | |
| 900 __ JumpIfSmi(input, &ok); | |
| 901 __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail, | |
| 902 DONT_DO_SMI_CHECK); | |
| 903 } | |
| 904 // We could be strict about internalized/non-internalized here, but as long as | |
| 905 // hydrogen doesn't care, the stub doesn't have to care either. | |
| 906 __ Bind(&ok); | |
| 907 } | |
| 908 | |
| 909 | |
| 910 void ICCompareStub::GenerateGeneric(MacroAssembler* masm) { | |
| 911 Register lhs = x1; | |
| 912 Register rhs = x0; | |
| 913 Register result = x0; | |
| 914 Condition cond = GetCondition(); | |
| 915 | |
| 916 Label miss; | |
| 917 ICCompareStub_CheckInputType(masm, lhs, x2, left_, &miss); | |
| 918 ICCompareStub_CheckInputType(masm, rhs, x3, right_, &miss); | |
| 919 | |
| 920 Label slow; // Call builtin. | |
| 921 Label not_smis, both_loaded_as_doubles; | |
| 922 Label not_two_smis, smi_done; | |
| 923 __ JumpIfEitherNotSmi(lhs, rhs, ¬_two_smis); | |
| 924 __ SmiUntag(lhs); | |
| 925 __ Sub(result, lhs, Operand::UntagSmi(rhs)); | |
| 926 __ Ret(); | |
| 927 | |
| 928 __ Bind(¬_two_smis); | |
| 929 | |
| 930 // NOTICE! This code is only reached after a smi-fast-case check, so it is | |
| 931 // certain that at least one operand isn't a smi. | |
| 932 | |
| 933 // Handle the case where the objects are identical. Either returns the answer | |
| 934 // or goes to slow. Only falls through if the objects were not identical. | |
| 935 EmitIdenticalObjectComparison(masm, lhs, rhs, x10, d0, &slow, cond); | |
| 936 | |
| 937 // If either is a smi (we know that at least one is not a smi), then they can | |
| 938 // only be strictly equal if the other is a HeapNumber. | |
| 939 __ JumpIfBothNotSmi(lhs, rhs, ¬_smis); | |
| 940 | |
| 941 // Exactly one operand is a smi. EmitSmiNonsmiComparison generates code that | |
| 942 // can: | |
| 943 // 1) Return the answer. | |
| 944 // 2) Branch to the slow case. | |
| 945 // 3) Fall through to both_loaded_as_doubles. | |
| 946 // In case 3, we have found out that we were dealing with a number-number | |
| 947 // comparison. The double values of the numbers have been loaded, right into | |
| 948 // rhs_d, left into lhs_d. | |
| 949 FPRegister rhs_d = d0; | |
| 950 FPRegister lhs_d = d1; | |
| 951 EmitSmiNonsmiComparison(masm, lhs, rhs, lhs_d, rhs_d, x10, &slow, strict()); | |
| 952 | |
| 953 __ Bind(&both_loaded_as_doubles); | |
| 954 // The arguments have been converted to doubles and stored in rhs_d and | |
| 955 // lhs_d. | |
| 956 Label nan; | |
| 957 __ Fcmp(lhs_d, rhs_d); | |
| 958 __ B(vs, &nan); // Overflow flag set if either is NaN. | |
| 959 STATIC_ASSERT((LESS == -1) && (EQUAL == 0) && (GREATER == 1)); | |
| 960 __ Cset(result, gt); // gt => 1, otherwise (lt, eq) => 0 (EQUAL). | |
| 961 __ Csinv(result, result, xzr, ge); // lt => -1, gt => 1, eq => 0. | |
| 962 __ Ret(); | |
| 963 | |
| 964 __ Bind(&nan); | |
| 965 // Left and/or right is a NaN. Load the result register with whatever makes | |
| 966 // the comparison fail, since comparisons with NaN always fail (except ne, | |
| 967 // which is filtered out at a higher level.) | |
| 968 ASSERT(cond != ne); | |
| 969 if ((cond == lt) || (cond == le)) { | |
| 970 __ Mov(result, GREATER); | |
| 971 } else { | |
| 972 __ Mov(result, LESS); | |
| 973 } | |
| 974 __ Ret(); | |
| 975 | |
| 976 __ Bind(¬_smis); | |
| 977 // At this point we know we are dealing with two different objects, and | |
| 978 // neither of them is a smi. The objects are in rhs_ and lhs_. | |
| 979 | |
| 980 // Load the maps and types of the objects. | |
| 981 Register rhs_map = x10; | |
| 982 Register rhs_type = x11; | |
| 983 Register lhs_map = x12; | |
| 984 Register lhs_type = x13; | |
| 985 __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset)); | |
| 986 __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset)); | |
| 987 __ Ldrb(rhs_type, FieldMemOperand(rhs_map, Map::kInstanceTypeOffset)); | |
| 988 __ Ldrb(lhs_type, FieldMemOperand(lhs_map, Map::kInstanceTypeOffset)); | |
| 989 | |
| 990 if (strict()) { | |
| 991 // This emits a non-equal return sequence for some object types, or falls | |
| 992 // through if it was not lucky. | |
| 993 EmitStrictTwoHeapObjectCompare(masm, lhs, rhs, lhs_type, rhs_type, x14); | |
| 994 } | |
| 995 | |
| 996 Label check_for_internalized_strings; | |
| 997 Label flat_string_check; | |
| 998 // Check for heap number comparison. Branch to earlier double comparison code | |
| 999 // if they are heap numbers, otherwise, branch to internalized string check. | |
| 1000 __ Cmp(rhs_type, HEAP_NUMBER_TYPE); | |
| 1001 __ B(ne, &check_for_internalized_strings); | |
| 1002 __ Cmp(lhs_map, rhs_map); | |
| 1003 | |
| 1004 // If maps aren't equal, lhs_ and rhs_ are not heap numbers. Branch to flat | |
| 1005 // string check. | |
| 1006 __ B(ne, &flat_string_check); | |
| 1007 | |
| 1008 // Both lhs_ and rhs_ are heap numbers. Load them and branch to the double | |
| 1009 // comparison code. | |
| 1010 __ Ldr(lhs_d, FieldMemOperand(lhs, HeapNumber::kValueOffset)); | |
| 1011 __ Ldr(rhs_d, FieldMemOperand(rhs, HeapNumber::kValueOffset)); | |
| 1012 __ B(&both_loaded_as_doubles); | |
| 1013 | |
| 1014 __ Bind(&check_for_internalized_strings); | |
| 1015 // In the strict case, the EmitStrictTwoHeapObjectCompare already took care | |
| 1016 // of internalized strings. | |
| 1017 if ((cond == eq) && !strict()) { | |
| 1018 // Returns an answer for two internalized strings or two detectable objects. | |
| 1019 // Otherwise branches to the string case or not both strings case. | |
| 1020 EmitCheckForInternalizedStringsOrObjects(masm, lhs, rhs, lhs_map, rhs_map, | |
| 1021 lhs_type, rhs_type, | |
| 1022 &flat_string_check, &slow); | |
| 1023 } | |
| 1024 | |
| 1025 // Check for both being sequential ASCII strings, and inline if that is the | |
| 1026 // case. | |
| 1027 __ Bind(&flat_string_check); | |
| 1028 __ JumpIfBothInstanceTypesAreNotSequentialAscii(lhs_type, rhs_type, x14, | |
| 1029 x15, &slow); | |
| 1030 | |
| 1031 Isolate* isolate = masm->isolate(); | |
| 1032 __ IncrementCounter(isolate->counters()->string_compare_native(), 1, x10, | |
| 1033 x11); | |
| 1034 if (cond == eq) { | |
| 1035 StringCompareStub::GenerateFlatAsciiStringEquals(masm, lhs, rhs, | |
| 1036 x10, x11, x12); | |
| 1037 } else { | |
| 1038 StringCompareStub::GenerateCompareFlatAsciiStrings(masm, lhs, rhs, | |
| 1039 x10, x11, x12, x13); | |
| 1040 } | |
| 1041 | |
| 1042 // Never fall through to here. | |
| 1043 if (FLAG_debug_code) { | |
| 1044 __ Unreachable(); | |
| 1045 } | |
| 1046 | |
| 1047 __ Bind(&slow); | |
| 1048 | |
| 1049 __ Push(lhs, rhs); | |
| 1050 // Figure out which native to call and setup the arguments. | |
| 1051 Builtins::JavaScript native; | |
| 1052 if (cond == eq) { | |
| 1053 native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS; | |
| 1054 } else { | |
| 1055 native = Builtins::COMPARE; | |
| 1056 int ncr; // NaN compare result | |
| 1057 if ((cond == lt) || (cond == le)) { | |
| 1058 ncr = GREATER; | |
| 1059 } else { | |
| 1060 ASSERT((cond == gt) || (cond == ge)); // remaining cases | |
| 1061 ncr = LESS; | |
| 1062 } | |
| 1063 __ Mov(x10, Operand(Smi::FromInt(ncr))); | |
| 1064 __ Push(x10); | |
| 1065 } | |
| 1066 | |
| 1067 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater) | |
| 1068 // tagged as a small integer. | |
| 1069 __ InvokeBuiltin(native, JUMP_FUNCTION); | |
| 1070 | |
| 1071 __ Bind(&miss); | |
| 1072 GenerateMiss(masm); | |
| 1073 } | |
| 1074 | |
| 1075 | |
| 1076 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { | |
| 1077 // Preserve caller-saved registers x0-x7 and x10-x15. We don't care if x8, x9, | |
| 1078 // ip0 and ip1 are corrupted by the call into C. | |
| 1079 CPURegList saved_regs = kCallerSaved; | |
| 1080 saved_regs.Remove(ip0); | |
| 1081 saved_regs.Remove(ip1); | |
| 1082 saved_regs.Remove(x8); | |
| 1083 saved_regs.Remove(x9); | |
| 1084 | |
| 1085 // We don't allow a GC during a store buffer overflow so there is no need to | |
| 1086 // store the registers in any particular way, but we do have to store and | |
| 1087 // restore them. | |
| 1088 __ PushCPURegList(saved_regs); | |
| 1089 if (save_doubles_ == kSaveFPRegs) { | |
| 1090 __ PushCPURegList(kCallerSavedFP); | |
| 1091 } | |
| 1092 | |
| 1093 AllowExternalCallThatCantCauseGC scope(masm); | |
| 1094 __ Mov(x0, Operand(ExternalReference::isolate_address(masm->isolate()))); | |
| 1095 __ CallCFunction( | |
| 1096 ExternalReference::store_buffer_overflow_function(masm->isolate()), | |
| 1097 1, 0); | |
| 1098 | |
| 1099 if (save_doubles_ == kSaveFPRegs) { | |
| 1100 __ PopCPURegList(kCallerSavedFP); | |
| 1101 } | |
| 1102 __ PopCPURegList(saved_regs); | |
| 1103 __ Ret(); | |
| 1104 } | |
| 1105 | |
| 1106 | |
| 1107 void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime( | |
| 1108 Isolate* isolate) { | |
| 1109 StoreBufferOverflowStub stub1(kDontSaveFPRegs); | |
| 1110 stub1.GetCode(isolate); | |
| 1111 StoreBufferOverflowStub stub2(kSaveFPRegs); | |
| 1112 stub2.GetCode(isolate); | |
| 1113 } | |
| 1114 | |
| 1115 | |
| 1116 void MathPowStub::Generate(MacroAssembler* masm) { | |
| 1117 // Stack on entry: | |
| 1118 // jssp[0]: Exponent (as a tagged value). | |
| 1119 // jssp[1]: Base (as a tagged value). | |
| 1120 // | |
| 1121 // The (tagged) result will be returned in x0, as a heap number. | |
| 1122 | |
| 1123 Register result_tagged = x0; | |
| 1124 Register base_tagged = x10; | |
| 1125 Register exponent_tagged = x11; | |
| 1126 Register exponent_integer = x12; | |
| 1127 Register scratch1 = x14; | |
| 1128 Register scratch0 = x15; | |
| 1129 Register saved_lr = x19; | |
| 1130 FPRegister result_double = d0; | |
| 1131 FPRegister base_double = d0; | |
| 1132 FPRegister exponent_double = d1; | |
| 1133 FPRegister base_double_copy = d2; | |
| 1134 FPRegister scratch1_double = d6; | |
| 1135 FPRegister scratch0_double = d7; | |
| 1136 | |
| 1137 // A fast-path for integer exponents. | |
| 1138 Label exponent_is_smi, exponent_is_integer; | |
| 1139 // Bail out to runtime. | |
| 1140 Label call_runtime; | |
| 1141 // Allocate a heap number for the result, and return it. | |
| 1142 Label done; | |
| 1143 | |
| 1144 // Unpack the inputs. | |
| 1145 if (exponent_type_ == ON_STACK) { | |
| 1146 Label base_is_smi; | |
| 1147 Label unpack_exponent; | |
| 1148 | |
| 1149 __ Pop(exponent_tagged, base_tagged); | |
| 1150 | |
| 1151 __ JumpIfSmi(base_tagged, &base_is_smi); | |
| 1152 __ JumpIfNotHeapNumber(base_tagged, &call_runtime); | |
| 1153 // base_tagged is a heap number, so load its double value. | |
| 1154 __ Ldr(base_double, FieldMemOperand(base_tagged, HeapNumber::kValueOffset)); | |
| 1155 __ B(&unpack_exponent); | |
| 1156 __ Bind(&base_is_smi); | |
| 1157 // base_tagged is a SMI, so untag it and convert it to a double. | |
| 1158 __ SmiUntagToDouble(base_double, base_tagged); | |
| 1159 | |
| 1160 __ Bind(&unpack_exponent); | |
| 1161 // x10 base_tagged The tagged base (input). | |
| 1162 // x11 exponent_tagged The tagged exponent (input). | |
| 1163 // d1 base_double The base as a double. | |
| 1164 __ JumpIfSmi(exponent_tagged, &exponent_is_smi); | |
| 1165 __ JumpIfNotHeapNumber(exponent_tagged, &call_runtime); | |
| 1166 // exponent_tagged is a heap number, so load its double value. | |
| 1167 __ Ldr(exponent_double, | |
| 1168 FieldMemOperand(exponent_tagged, HeapNumber::kValueOffset)); | |
| 1169 } else if (exponent_type_ == TAGGED) { | |
| 1170 __ JumpIfSmi(exponent_tagged, &exponent_is_smi); | |
| 1171 __ Ldr(exponent_double, | |
| 1172 FieldMemOperand(exponent_tagged, HeapNumber::kValueOffset)); | |
| 1173 } | |
| 1174 | |
| 1175 // Handle double (heap number) exponents. | |
| 1176 if (exponent_type_ != INTEGER) { | |
| 1177 // Detect integer exponents stored as doubles and handle those in the | |
| 1178 // integer fast-path. | |
| 1179 __ TryConvertDoubleToInt64(exponent_integer, exponent_double, | |
| 1180 scratch0_double, &exponent_is_integer); | |
| 1181 | |
| 1182 if (exponent_type_ == ON_STACK) { | |
| 1183 FPRegister half_double = d3; | |
| 1184 FPRegister minus_half_double = d4; | |
| 1185 FPRegister zero_double = d5; | |
| 1186 // Detect square root case. Crankshaft detects constant +/-0.5 at compile | |
| 1187 // time and uses DoMathPowHalf instead. We then skip this check for | |
| 1188 // non-constant cases of +/-0.5 as these hardly occur. | |
| 1189 | |
| 1190 __ Fmov(minus_half_double, -0.5); | |
| 1191 __ Fmov(half_double, 0.5); | |
| 1192 __ Fcmp(minus_half_double, exponent_double); | |
| 1193 __ Fccmp(half_double, exponent_double, NZFlag, ne); | |
| 1194 // Condition flags at this point: | |
| 1195 // 0.5; nZCv // Identified by eq && pl | |
| 1196 // -0.5: NZcv // Identified by eq && mi | |
| 1197 // other: ?z?? // Identified by ne | |
| 1198 __ B(ne, &call_runtime); | |
| 1199 | |
| 1200 // The exponent is 0.5 or -0.5. | |
| 1201 | |
| 1202 // Given that exponent is known to be either 0.5 or -0.5, the following | |
| 1203 // special cases could apply (according to ECMA-262 15.8.2.13): | |
| 1204 // | |
| 1205 // base.isNaN(): The result is NaN. | |
| 1206 // (base == +INFINITY) || (base == -INFINITY) | |
| 1207 // exponent == 0.5: The result is +INFINITY. | |
| 1208 // exponent == -0.5: The result is +0. | |
| 1209 // (base == +0) || (base == -0) | |
| 1210 // exponent == 0.5: The result is +0. | |
| 1211 // exponent == -0.5: The result is +INFINITY. | |
| 1212 // (base < 0) && base.isFinite(): The result is NaN. | |
| 1213 // | |
| 1214 // Fsqrt (and Fdiv for the -0.5 case) can handle all of those except | |
| 1215 // where base is -INFINITY or -0. | |
| 1216 | |
| 1217 // Add +0 to base. This has no effect other than turning -0 into +0. | |
| 1218 __ Fmov(zero_double, 0.0); | |
| 1219 __ Fadd(base_double, base_double, zero_double); | |
| 1220 // The operation -0+0 results in +0 in all cases except where the | |
| 1221 // FPCR rounding mode is 'round towards minus infinity' (RM). The | |
| 1222 // A64 simulator does not currently simulate FPCR (where the rounding | |
| 1223 // mode is set), so test the operation with some debug code. | |
| 1224 if (masm->emit_debug_code()) { | |
| 1225 Register temp = masm->Tmp1(); | |
| 1226 // d5 zero_double The value +0.0 as a double. | |
| 1227 __ Fneg(scratch0_double, zero_double); | |
| 1228 // Verify that we correctly generated +0.0 and -0.0. | |
| 1229 // bits(+0.0) = 0x0000000000000000 | |
| 1230 // bits(-0.0) = 0x8000000000000000 | |
| 1231 __ Fmov(temp, zero_double); | |
| 1232 __ CheckRegisterIsClear(temp, kCouldNotGenerateZero); | |
| 1233 __ Fmov(temp, scratch0_double); | |
| 1234 __ Eor(temp, temp, kDSignMask); | |
| 1235 __ CheckRegisterIsClear(temp, kCouldNotGenerateNegativeZero); | |
| 1236 // Check that -0.0 + 0.0 == +0.0. | |
| 1237 __ Fadd(scratch0_double, scratch0_double, zero_double); | |
| 1238 __ Fmov(temp, scratch0_double); | |
| 1239 __ CheckRegisterIsClear(temp, kExpectedPositiveZero); | |
| 1240 } | |
| 1241 | |
| 1242 // If base is -INFINITY, make it +INFINITY. | |
| 1243 // * Calculate base - base: All infinities will become NaNs since both | |
| 1244 // -INFINITY+INFINITY and +INFINITY-INFINITY are NaN in A64. | |
| 1245 // * If the result is NaN, calculate abs(base). | |
| 1246 __ Fsub(scratch0_double, base_double, base_double); | |
| 1247 __ Fcmp(scratch0_double, 0.0); | |
| 1248 __ Fabs(scratch1_double, base_double); | |
| 1249 __ Fcsel(base_double, scratch1_double, base_double, vs); | |
| 1250 | |
| 1251 // Calculate the square root of base. | |
| 1252 __ Fsqrt(result_double, base_double); | |
| 1253 __ Fcmp(exponent_double, 0.0); | |
| 1254 __ B(ge, &done); // Finish now for exponents of 0.5. | |
| 1255 // Find the inverse for exponents of -0.5. | |
| 1256 __ Fmov(scratch0_double, 1.0); | |
| 1257 __ Fdiv(result_double, scratch0_double, result_double); | |
| 1258 __ B(&done); | |
| 1259 } | |
| 1260 | |
| 1261 { | |
| 1262 AllowExternalCallThatCantCauseGC scope(masm); | |
| 1263 __ Mov(saved_lr, lr); | |
| 1264 __ CallCFunction( | |
| 1265 ExternalReference::power_double_double_function(masm->isolate()), | |
| 1266 0, 2); | |
| 1267 __ Mov(lr, saved_lr); | |
| 1268 __ B(&done); | |
| 1269 } | |
| 1270 | |
| 1271 // Handle SMI exponents. | |
| 1272 __ Bind(&exponent_is_smi); | |
| 1273 // x10 base_tagged The tagged base (input). | |
| 1274 // x11 exponent_tagged The tagged exponent (input). | |
| 1275 // d1 base_double The base as a double. | |
| 1276 __ SmiUntag(exponent_integer, exponent_tagged); | |
| 1277 } | |
| 1278 | |
| 1279 __ Bind(&exponent_is_integer); | |
| 1280 // x10 base_tagged The tagged base (input). | |
| 1281 // x11 exponent_tagged The tagged exponent (input). | |
| 1282 // x12 exponent_integer The exponent as an integer. | |
| 1283 // d1 base_double The base as a double. | |
| 1284 | |
| 1285 // Find abs(exponent). For negative exponents, we can find the inverse later. | |
| 1286 Register exponent_abs = x13; | |
| 1287 __ Cmp(exponent_integer, 0); | |
| 1288 __ Cneg(exponent_abs, exponent_integer, mi); | |
| 1289 // x13 exponent_abs The value of abs(exponent_integer). | |
| 1290 | |
| 1291 // Repeatedly multiply to calculate the power. | |
| 1292 // result = 1.0; | |
| 1293 // For each bit n (exponent_integer{n}) { | |
| 1294 // if (exponent_integer{n}) { | |
| 1295 // result *= base; | |
| 1296 // } | |
| 1297 // base *= base; | |
| 1298 // if (remaining bits in exponent_integer are all zero) { | |
| 1299 // break; | |
| 1300 // } | |
| 1301 // } | |
| 1302 Label power_loop, power_loop_entry, power_loop_exit; | |
| 1303 __ Fmov(scratch1_double, base_double); | |
| 1304 __ Fmov(base_double_copy, base_double); | |
| 1305 __ Fmov(result_double, 1.0); | |
| 1306 __ B(&power_loop_entry); | |
| 1307 | |
| 1308 __ Bind(&power_loop); | |
| 1309 __ Fmul(scratch1_double, scratch1_double, scratch1_double); | |
| 1310 __ Lsr(exponent_abs, exponent_abs, 1); | |
| 1311 __ Cbz(exponent_abs, &power_loop_exit); | |
| 1312 | |
| 1313 __ Bind(&power_loop_entry); | |
| 1314 __ Tbz(exponent_abs, 0, &power_loop); | |
| 1315 __ Fmul(result_double, result_double, scratch1_double); | |
| 1316 __ B(&power_loop); | |
| 1317 | |
| 1318 __ Bind(&power_loop_exit); | |
| 1319 | |
| 1320 // If the exponent was positive, result_double holds the result. | |
| 1321 __ Tbz(exponent_integer, kXSignBit, &done); | |
| 1322 | |
| 1323 // The exponent was negative, so find the inverse. | |
| 1324 __ Fmov(scratch0_double, 1.0); | |
| 1325 __ Fdiv(result_double, scratch0_double, result_double); | |
| 1326 // ECMA-262 only requires Math.pow to return an 'implementation-dependent | |
| 1327 // approximation' of base^exponent. However, mjsunit/math-pow uses Math.pow | |
| 1328 // to calculate the subnormal value 2^-1074. This method of calculating | |
| 1329 // negative powers doesn't work because 2^1074 overflows to infinity. To | |
| 1330 // catch this corner-case, we bail out if the result was 0. (This can only | |
| 1331 // occur if the divisor is infinity or the base is zero.) | |
| 1332 __ Fcmp(result_double, 0.0); | |
| 1333 __ B(&done, ne); | |
| 1334 | |
| 1335 if (exponent_type_ == ON_STACK) { | |
| 1336 // Bail out to runtime code. | |
| 1337 __ Bind(&call_runtime); | |
| 1338 // Put the arguments back on the stack. | |
| 1339 __ Push(base_tagged, exponent_tagged); | |
| 1340 __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1); | |
| 1341 | |
| 1342 // Return. | |
| 1343 __ Bind(&done); | |
| 1344 __ AllocateHeapNumber(result_tagged, &call_runtime, scratch0, scratch1); | |
| 1345 __ Str(result_double, | |
| 1346 FieldMemOperand(result_tagged, HeapNumber::kValueOffset)); | |
| 1347 ASSERT(result_tagged.is(x0)); | |
| 1348 __ IncrementCounter( | |
| 1349 masm->isolate()->counters()->math_pow(), 1, scratch0, scratch1); | |
| 1350 __ Ret(); | |
| 1351 } else { | |
| 1352 AllowExternalCallThatCantCauseGC scope(masm); | |
| 1353 __ Mov(saved_lr, lr); | |
| 1354 __ Fmov(base_double, base_double_copy); | |
| 1355 __ Scvtf(exponent_double, exponent_integer); | |
| 1356 __ CallCFunction( | |
| 1357 ExternalReference::power_double_double_function(masm->isolate()), | |
| 1358 0, 2); | |
| 1359 __ Mov(lr, saved_lr); | |
| 1360 __ Bind(&done); | |
| 1361 __ IncrementCounter( | |
| 1362 masm->isolate()->counters()->math_pow(), 1, scratch0, scratch1); | |
| 1363 __ Ret(); | |
| 1364 } | |
| 1365 } | |
| 1366 | |
| 1367 | |
| 1368 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) { | |
| 1369 // It is important that the following stubs are generated in this order | |
| 1370 // because pregenerated stubs can only call other pregenerated stubs. | |
| 1371 // RecordWriteStub uses StoreBufferOverflowStub, which in turn uses | |
| 1372 // CEntryStub. | |
| 1373 CEntryStub::GenerateAheadOfTime(isolate); | |
| 1374 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate); | |
| 1375 StubFailureTrampolineStub::GenerateAheadOfTime(isolate); | |
| 1376 ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate); | |
| 1377 CreateAllocationSiteStub::GenerateAheadOfTime(isolate); | |
| 1378 BinaryOpICStub::GenerateAheadOfTime(isolate); | |
| 1379 BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate); | |
| 1380 } | |
| 1381 | |
| 1382 | |
| 1383 void CodeStub::GenerateFPStubs(Isolate* isolate) { | |
| 1384 // Floating-point code doesn't get special handling in A64, so there's | |
| 1385 // nothing to do here. | |
| 1386 USE(isolate); | |
| 1387 } | |
| 1388 | |
| 1389 | |
| 1390 static void JumpIfOOM(MacroAssembler* masm, | |
| 1391 Register value, | |
| 1392 Register scratch, | |
| 1393 Label* oom_label) { | |
| 1394 STATIC_ASSERT(Failure::OUT_OF_MEMORY_EXCEPTION == 3); | |
| 1395 STATIC_ASSERT(kFailureTag == 3); | |
| 1396 __ And(scratch, value, 0xf); | |
| 1397 __ Cmp(scratch, 0xf); | |
| 1398 __ B(eq, oom_label); | |
| 1399 } | |
| 1400 | |
| 1401 | |
| 1402 bool CEntryStub::NeedsImmovableCode() { | |
| 1403 // CEntryStub stores the return address on the stack before calling into | |
| 1404 // C++ code. In some cases, the VM accesses this address, but it is not used | |
| 1405 // when the C++ code returns to the stub because LR holds the return address | |
| 1406 // in AAPCS64. If the stub is moved (perhaps during a GC), we could end up | |
| 1407 // returning to dead code. | |
| 1408 // TODO(jbramley): Whilst this is the only analysis that makes sense, I can't | |
| 1409 // find any comment to confirm this, and I don't hit any crashes whatever | |
| 1410 // this function returns. The anaylsis should be properly confirmed. | |
| 1411 return true; | |
| 1412 } | |
| 1413 | |
| 1414 | |
| 1415 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) { | |
| 1416 CEntryStub stub(1, kDontSaveFPRegs); | |
| 1417 stub.GetCode(isolate); | |
| 1418 CEntryStub stub_fp(1, kSaveFPRegs); | |
| 1419 stub_fp.GetCode(isolate); | |
| 1420 } | |
| 1421 | |
| 1422 | |
| 1423 void CEntryStub::GenerateCore(MacroAssembler* masm, | |
| 1424 Label* throw_normal, | |
| 1425 Label* throw_termination, | |
| 1426 Label* throw_out_of_memory, | |
| 1427 bool do_gc, | |
| 1428 bool always_allocate) { | |
| 1429 // x0 : Result parameter for PerformGC, if do_gc is true. | |
| 1430 // x21 : argv | |
| 1431 // x22 : argc | |
| 1432 // x23 : target | |
| 1433 // | |
| 1434 // The stack (on entry) holds the arguments and the receiver, with the | |
| 1435 // receiver at the highest address: | |
| 1436 // | |
| 1437 // argv[8]: receiver | |
| 1438 // argv -> argv[0]: arg[argc-2] | |
| 1439 // ... ... | |
| 1440 // argv[...]: arg[1] | |
| 1441 // argv[...]: arg[0] | |
| 1442 // | |
| 1443 // Immediately below (after) this is the exit frame, as constructed by | |
| 1444 // EnterExitFrame: | |
| 1445 // fp[8]: CallerPC (lr) | |
| 1446 // fp -> fp[0]: CallerFP (old fp) | |
| 1447 // fp[-8]: Space reserved for SPOffset. | |
| 1448 // fp[-16]: CodeObject() | |
| 1449 // csp[...]: Saved doubles, if saved_doubles is true. | |
| 1450 // csp[32]: Alignment padding, if necessary. | |
| 1451 // csp[24]: Preserved x23 (used for target). | |
| 1452 // csp[16]: Preserved x22 (used for argc). | |
| 1453 // csp[8]: Preserved x21 (used for argv). | |
| 1454 // csp -> csp[0]: Space reserved for the return address. | |
| 1455 // | |
| 1456 // After a successful call, the exit frame, preserved registers (x21-x23) and | |
| 1457 // the arguments (including the receiver) are dropped or popped as | |
| 1458 // appropriate. The stub then returns. | |
| 1459 // | |
| 1460 // After an unsuccessful call, the exit frame and suchlike are left | |
| 1461 // untouched, and the stub either throws an exception by jumping to one of | |
| 1462 // the provided throw_ labels, or it falls through. The failure details are | |
| 1463 // passed through in x0. | |
| 1464 ASSERT(csp.Is(__ StackPointer())); | |
| 1465 | |
| 1466 Isolate* isolate = masm->isolate(); | |
| 1467 | |
| 1468 const Register& argv = x21; | |
| 1469 const Register& argc = x22; | |
| 1470 const Register& target = x23; | |
| 1471 | |
| 1472 if (do_gc) { | |
| 1473 // Call Runtime::PerformGC, passing x0 (the result parameter for | |
| 1474 // PerformGC) and x1 (the isolate). | |
| 1475 __ Mov(x1, Operand(ExternalReference::isolate_address(masm->isolate()))); | |
| 1476 __ CallCFunction( | |
| 1477 ExternalReference::perform_gc_function(isolate), 2, 0); | |
| 1478 } | |
| 1479 | |
| 1480 ExternalReference scope_depth = | |
| 1481 ExternalReference::heap_always_allocate_scope_depth(isolate); | |
| 1482 if (always_allocate) { | |
| 1483 __ Mov(x10, Operand(scope_depth)); | |
| 1484 __ Ldr(x11, MemOperand(x10)); | |
| 1485 __ Add(x11, x11, 1); | |
| 1486 __ Str(x11, MemOperand(x10)); | |
| 1487 } | |
| 1488 | |
| 1489 // Prepare AAPCS64 arguments to pass to the builtin. | |
| 1490 __ Mov(x0, argc); | |
| 1491 __ Mov(x1, argv); | |
| 1492 __ Mov(x2, Operand(ExternalReference::isolate_address(isolate))); | |
| 1493 | |
| 1494 // Store the return address on the stack, in the space previously allocated | |
| 1495 // by EnterExitFrame. The return address is queried by | |
| 1496 // ExitFrame::GetStateForFramePointer. | |
| 1497 Label return_location; | |
| 1498 __ Adr(x12, &return_location); | |
| 1499 __ Poke(x12, 0); | |
| 1500 if (__ emit_debug_code()) { | |
| 1501 // Verify that the slot below fp[kSPOffset]-8 points to the return location | |
| 1502 // (currently in x12). | |
| 1503 Register temp = masm->Tmp1(); | |
| 1504 __ Ldr(temp, MemOperand(fp, ExitFrameConstants::kSPOffset)); | |
| 1505 __ Ldr(temp, MemOperand(temp, -static_cast<int64_t>(kXRegSizeInBytes))); | |
| 1506 __ Cmp(temp, x12); | |
| 1507 __ Check(eq, kReturnAddressNotFoundInFrame); | |
| 1508 } | |
| 1509 | |
| 1510 // Call the builtin. | |
| 1511 __ Blr(target); | |
| 1512 __ Bind(&return_location); | |
| 1513 const Register& result = x0; | |
| 1514 | |
| 1515 if (always_allocate) { | |
| 1516 __ Mov(x10, Operand(scope_depth)); | |
| 1517 __ Ldr(x11, MemOperand(x10)); | |
| 1518 __ Sub(x11, x11, 1); | |
| 1519 __ Str(x11, MemOperand(x10)); | |
| 1520 } | |
| 1521 | |
| 1522 // x0 result The return code from the call. | |
| 1523 // x21 argv | |
| 1524 // x22 argc | |
| 1525 // x23 target | |
| 1526 // | |
| 1527 // If all of the result bits matching kFailureTagMask are '1', the result is | |
| 1528 // a failure. Otherwise, it's an ordinary tagged object and the call was a | |
| 1529 // success. | |
| 1530 Label failure; | |
| 1531 __ And(x10, result, kFailureTagMask); | |
| 1532 __ Cmp(x10, kFailureTagMask); | |
| 1533 __ B(&failure, eq); | |
| 1534 | |
| 1535 // The call succeeded, so unwind the stack and return. | |
| 1536 | |
| 1537 // Restore callee-saved registers x21-x23. | |
| 1538 __ Mov(x11, argc); | |
| 1539 | |
| 1540 __ Peek(argv, 1 * kPointerSize); | |
| 1541 __ Peek(argc, 2 * kPointerSize); | |
| 1542 __ Peek(target, 3 * kPointerSize); | |
| 1543 | |
| 1544 __ LeaveExitFrame(save_doubles_, x10, true); | |
| 1545 ASSERT(jssp.Is(__ StackPointer())); | |
| 1546 // Pop or drop the remaining stack slots and return from the stub. | |
| 1547 // jssp[24]: Arguments array (of size argc), including receiver. | |
| 1548 // jssp[16]: Preserved x23 (used for target). | |
| 1549 // jssp[8]: Preserved x22 (used for argc). | |
| 1550 // jssp[0]: Preserved x21 (used for argv). | |
| 1551 __ Drop(x11); | |
| 1552 __ Ret(); | |
| 1553 | |
| 1554 // The stack pointer is still csp if we aren't returning, and the frame | |
| 1555 // hasn't changed (except for the return address). | |
| 1556 __ SetStackPointer(csp); | |
| 1557 | |
| 1558 __ Bind(&failure); | |
| 1559 // The call failed, so check if we need to throw an exception, and fall | |
| 1560 // through (to retry) otherwise. | |
| 1561 | |
| 1562 Label retry; | |
| 1563 // x0 result The return code from the call, including the failure | |
| 1564 // code and details. | |
| 1565 // x21 argv | |
| 1566 // x22 argc | |
| 1567 // x23 target | |
| 1568 // Refer to the Failure class for details of the bit layout. | |
| 1569 STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0); | |
| 1570 __ Tst(result, kFailureTypeTagMask << kFailureTagSize); | |
| 1571 __ B(eq, &retry); // RETRY_AFTER_GC | |
| 1572 | |
| 1573 // Special handling of out-of-memory exceptions: Pass the failure result, | |
| 1574 // rather than the exception descriptor. | |
| 1575 JumpIfOOM(masm, result, x10, throw_out_of_memory); | |
| 1576 | |
| 1577 // Retrieve the pending exception. | |
| 1578 const Register& exception = result; | |
| 1579 const Register& exception_address = x11; | |
| 1580 __ Mov(exception_address, | |
| 1581 Operand(ExternalReference(Isolate::kPendingExceptionAddress, | |
| 1582 isolate))); | |
| 1583 __ Ldr(exception, MemOperand(exception_address)); | |
| 1584 | |
| 1585 // See if we just retrieved an OOM exception. | |
| 1586 JumpIfOOM(masm, exception, x10, throw_out_of_memory); | |
| 1587 | |
| 1588 // Clear the pending exception. | |
| 1589 __ Mov(x10, Operand(isolate->factory()->the_hole_value())); | |
| 1590 __ Str(x10, MemOperand(exception_address)); | |
| 1591 | |
| 1592 // x0 exception The exception descriptor. | |
| 1593 // x21 argv | |
| 1594 // x22 argc | |
| 1595 // x23 target | |
| 1596 | |
| 1597 // Special handling of termination exceptions, which are uncatchable by | |
| 1598 // JavaScript code. | |
| 1599 __ Cmp(exception, Operand(isolate->factory()->termination_exception())); | |
| 1600 __ B(eq, throw_termination); | |
| 1601 | |
| 1602 // Handle normal exception. | |
| 1603 __ B(throw_normal); | |
| 1604 | |
| 1605 __ Bind(&retry); | |
| 1606 // The result (x0) is passed through as the next PerformGC parameter. | |
| 1607 } | |
| 1608 | |
| 1609 | |
| 1610 void CEntryStub::Generate(MacroAssembler* masm) { | |
| 1611 // The Abort mechanism relies on CallRuntime, which in turn relies on | |
| 1612 // CEntryStub, so until this stub has been generated, we have to use a | |
| 1613 // fall-back Abort mechanism. | |
| 1614 // | |
| 1615 // Note that this stub must be generated before any use of Abort. | |
| 1616 MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm); | |
| 1617 | |
| 1618 ASM_LOCATION("CEntryStub::Generate entry"); | |
| 1619 ProfileEntryHookStub::MaybeCallEntryHook(masm); | |
| 1620 | |
| 1621 // Register parameters: | |
| 1622 // x0: argc (including receiver, untagged) | |
| 1623 // x1: target | |
| 1624 // | |
| 1625 // The stack on entry holds the arguments and the receiver, with the receiver | |
| 1626 // at the highest address: | |
| 1627 // | |
| 1628 // jssp]argc-1]: receiver | |
| 1629 // jssp[argc-2]: arg[argc-2] | |
| 1630 // ... ... | |
| 1631 // jssp[1]: arg[1] | |
| 1632 // jssp[0]: arg[0] | |
| 1633 // | |
| 1634 // The arguments are in reverse order, so that arg[argc-2] is actually the | |
| 1635 // first argument to the target function and arg[0] is the last. | |
| 1636 ASSERT(jssp.Is(__ StackPointer())); | |
| 1637 const Register& argc_input = x0; | |
| 1638 const Register& target_input = x1; | |
| 1639 | |
| 1640 // Calculate argv, argc and the target address, and store them in | |
| 1641 // callee-saved registers so we can retry the call without having to reload | |
| 1642 // these arguments. | |
| 1643 // TODO(jbramley): If the first call attempt succeeds in the common case (as | |
| 1644 // it should), then we might be better off putting these parameters directly | |
| 1645 // into their argument registers, rather than using callee-saved registers and | |
| 1646 // preserving them on the stack. | |
| 1647 const Register& argv = x21; | |
| 1648 const Register& argc = x22; | |
| 1649 const Register& target = x23; | |
| 1650 | |
| 1651 // Derive argv from the stack pointer so that it points to the first argument | |
| 1652 // (arg[argc-2]), or just below the receiver in case there are no arguments. | |
| 1653 // - Adjust for the arg[] array. | |
| 1654 Register temp_argv = x11; | |
| 1655 __ Add(temp_argv, jssp, Operand(x0, LSL, kPointerSizeLog2)); | |
| 1656 // - Adjust for the receiver. | |
| 1657 __ Sub(temp_argv, temp_argv, 1 * kPointerSize); | |
| 1658 | |
| 1659 // Enter the exit frame. Reserve three slots to preserve x21-x23 callee-saved | |
| 1660 // registers. | |
| 1661 FrameScope scope(masm, StackFrame::MANUAL); | |
| 1662 __ EnterExitFrame(save_doubles_, x10, 3); | |
| 1663 ASSERT(csp.Is(__ StackPointer())); | |
| 1664 | |
| 1665 // Poke callee-saved registers into reserved space. | |
| 1666 __ Poke(argv, 1 * kPointerSize); | |
| 1667 __ Poke(argc, 2 * kPointerSize); | |
| 1668 __ Poke(target, 3 * kPointerSize); | |
| 1669 | |
| 1670 // We normally only keep tagged values in callee-saved registers, as they | |
| 1671 // could be pushed onto the stack by called stubs and functions, and on the | |
| 1672 // stack they can confuse the GC. However, we're only calling C functions | |
| 1673 // which can push arbitrary data onto the stack anyway, and so the GC won't | |
| 1674 // examine that part of the stack. | |
| 1675 __ Mov(argc, argc_input); | |
| 1676 __ Mov(target, target_input); | |
| 1677 __ Mov(argv, temp_argv); | |
| 1678 | |
| 1679 Label throw_normal; | |
| 1680 Label throw_termination; | |
| 1681 Label throw_out_of_memory; | |
| 1682 | |
| 1683 // Call the runtime function. | |
| 1684 GenerateCore(masm, | |
| 1685 &throw_normal, | |
| 1686 &throw_termination, | |
| 1687 &throw_out_of_memory, | |
| 1688 false, | |
| 1689 false); | |
| 1690 | |
| 1691 // If successful, the previous GenerateCore will have returned to the | |
| 1692 // calling code. Otherwise, we fall through into the following. | |
| 1693 | |
| 1694 // Do space-specific GC and retry runtime call. | |
| 1695 GenerateCore(masm, | |
| 1696 &throw_normal, | |
| 1697 &throw_termination, | |
| 1698 &throw_out_of_memory, | |
| 1699 true, | |
| 1700 false); | |
| 1701 | |
| 1702 // Do full GC and retry runtime call one final time. | |
| 1703 __ Mov(x0, reinterpret_cast<uint64_t>(Failure::InternalError())); | |
| 1704 GenerateCore(masm, | |
| 1705 &throw_normal, | |
| 1706 &throw_termination, | |
| 1707 &throw_out_of_memory, | |
| 1708 true, | |
| 1709 true); | |
| 1710 | |
| 1711 // We didn't execute a return case, so the stack frame hasn't been updated | |
| 1712 // (except for the return address slot). However, we don't need to initialize | |
| 1713 // jssp because the throw method will immediately overwrite it when it | |
| 1714 // unwinds the stack. | |
| 1715 if (__ emit_debug_code()) { | |
| 1716 __ Mov(jssp, kDebugZapValue); | |
| 1717 } | |
| 1718 __ SetStackPointer(jssp); | |
| 1719 | |
| 1720 // Throw exceptions. | |
| 1721 // If we throw an exception, we can end up re-entering CEntryStub before we | |
| 1722 // pop the exit frame, so need to ensure that x21-x23 contain GC-safe values | |
| 1723 // here. | |
| 1724 __ Bind(&throw_out_of_memory); | |
| 1725 ASM_LOCATION("Throw out of memory"); | |
| 1726 __ Mov(argv, 0); | |
| 1727 __ Mov(argc, 0); | |
| 1728 __ Mov(target, 0); | |
| 1729 // Set external caught exception to false. | |
| 1730 Isolate* isolate = masm->isolate(); | |
| 1731 __ Mov(x2, Operand(ExternalReference(Isolate::kExternalCaughtExceptionAddress, | |
| 1732 isolate))); | |
| 1733 __ Str(xzr, MemOperand(x2)); | |
| 1734 | |
| 1735 // Set pending exception and x0 to out of memory exception. | |
| 1736 Label already_have_failure; | |
| 1737 JumpIfOOM(masm, x0, x10, &already_have_failure); | |
| 1738 Failure* out_of_memory = Failure::OutOfMemoryException(0x1); | |
| 1739 __ Mov(x0, Operand(reinterpret_cast<uint64_t>(out_of_memory))); | |
| 1740 __ Bind(&already_have_failure); | |
| 1741 __ Mov(x2, Operand(ExternalReference(Isolate::kPendingExceptionAddress, | |
| 1742 isolate))); | |
| 1743 __ Str(x0, MemOperand(x2)); | |
| 1744 // Fall through to the next label. | |
| 1745 | |
| 1746 __ Bind(&throw_termination); | |
| 1747 ASM_LOCATION("Throw termination"); | |
| 1748 __ Mov(argv, 0); | |
| 1749 __ Mov(argc, 0); | |
| 1750 __ Mov(target, 0); | |
| 1751 __ ThrowUncatchable(x0, x10, x11, x12, x13); | |
| 1752 | |
| 1753 __ Bind(&throw_normal); | |
| 1754 ASM_LOCATION("Throw normal"); | |
| 1755 __ Mov(argv, 0); | |
| 1756 __ Mov(argc, 0); | |
| 1757 __ Mov(target, 0); | |
| 1758 __ Throw(x0, x10, x11, x12, x13); | |
| 1759 } | |
| 1760 | |
| 1761 | |
| 1762 // This is the entry point from C++. 5 arguments are provided in x0-x4. | |
| 1763 // See use of the CALL_GENERATED_CODE macro for example in src/execution.cc. | |
| 1764 // Input: | |
| 1765 // x0: code entry. | |
| 1766 // x1: function. | |
| 1767 // x2: receiver. | |
| 1768 // x3: argc. | |
| 1769 // x4: argv. | |
| 1770 // Output: | |
| 1771 // x0: result. | |
| 1772 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { | |
| 1773 ASSERT(jssp.Is(__ StackPointer())); | |
| 1774 Register code_entry = x0; | |
| 1775 | |
| 1776 // Enable instruction instrumentation. This only works on the simulator, and | |
| 1777 // will have no effect on the model or real hardware. | |
| 1778 __ EnableInstrumentation(); | |
| 1779 | |
| 1780 Label invoke, handler_entry, exit; | |
| 1781 | |
| 1782 // Push callee-saved registers and synchronize the system stack pointer (csp) | |
| 1783 // and the JavaScript stack pointer (jssp). | |
| 1784 // | |
| 1785 // We must not write to jssp until after the PushCalleeSavedRegisters() | |
| 1786 // call, since jssp is itself a callee-saved register. | |
| 1787 __ SetStackPointer(csp); | |
| 1788 __ PushCalleeSavedRegisters(); | |
| 1789 __ Mov(jssp, csp); | |
| 1790 __ SetStackPointer(jssp); | |
| 1791 | |
| 1792 ProfileEntryHookStub::MaybeCallEntryHook(masm); | |
| 1793 | |
| 1794 // Build an entry frame (see layout below). | |
| 1795 Isolate* isolate = masm->isolate(); | |
| 1796 | |
| 1797 // Build an entry frame. | |
| 1798 int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY; | |
| 1799 int64_t bad_frame_pointer = -1L; // Bad frame pointer to fail if it is used. | |
| 1800 __ Mov(x13, bad_frame_pointer); | |
| 1801 __ Mov(x12, Operand(Smi::FromInt(marker))); | |
| 1802 __ Mov(x11, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate))); | |
| 1803 __ Ldr(x10, MemOperand(x11)); | |
| 1804 | |
| 1805 // TODO(all): Pushing the marker twice seems unnecessary. | |
| 1806 // In this case perhaps we could push xzr in the slot for the context | |
| 1807 // (see MAsm::EnterFrame). | |
| 1808 __ Push(x13, x12, x12, x10); | |
| 1809 // Set up fp. | |
| 1810 __ Sub(fp, jssp, EntryFrameConstants::kCallerFPOffset); | |
| 1811 | |
| 1812 // Push the JS entry frame marker. Also set js_entry_sp if this is the | |
| 1813 // outermost JS call. | |
| 1814 Label non_outermost_js, done; | |
| 1815 ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate); | |
| 1816 __ Mov(x10, Operand(ExternalReference(js_entry_sp))); | |
| 1817 __ Ldr(x11, MemOperand(x10)); | |
| 1818 __ Cbnz(x11, &non_outermost_js); | |
| 1819 __ Str(fp, MemOperand(x10)); | |
| 1820 __ Mov(x12, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME))); | |
| 1821 __ Push(x12); | |
| 1822 __ B(&done); | |
| 1823 __ Bind(&non_outermost_js); | |
| 1824 // We spare one instruction by pushing xzr since the marker is 0. | |
| 1825 ASSERT(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME) == NULL); | |
| 1826 __ Push(xzr); | |
| 1827 __ Bind(&done); | |
| 1828 | |
| 1829 // The frame set up looks like this: | |
| 1830 // jssp[0] : JS entry frame marker. | |
| 1831 // jssp[1] : C entry FP. | |
| 1832 // jssp[2] : stack frame marker. | |
| 1833 // jssp[3] : stack frmae marker. | |
| 1834 // jssp[4] : bad frame pointer 0xfff...ff <- fp points here. | |
| 1835 | |
| 1836 | |
| 1837 // Jump to a faked try block that does the invoke, with a faked catch | |
| 1838 // block that sets the pending exception. | |
| 1839 __ B(&invoke); | |
| 1840 | |
| 1841 // Prevent the constant pool from being emitted between the record of the | |
| 1842 // handler_entry position and the first instruction of the sequence here. | |
| 1843 // There is no risk because Assembler::Emit() emits the instruction before | |
| 1844 // checking for constant pool emission, but we do not want to depend on | |
| 1845 // that. | |
| 1846 { | |
| 1847 Assembler::BlockConstPoolScope block_const_pool(masm); | |
| 1848 __ bind(&handler_entry); | |
| 1849 handler_offset_ = handler_entry.pos(); | |
| 1850 // Caught exception: Store result (exception) in the pending exception | |
| 1851 // field in the JSEnv and return a failure sentinel. Coming in here the | |
| 1852 // fp will be invalid because the PushTryHandler below sets it to 0 to | |
| 1853 // signal the existence of the JSEntry frame. | |
| 1854 // TODO(jbramley): Do this in the Assembler. | |
| 1855 __ Mov(x10, Operand(ExternalReference(Isolate::kPendingExceptionAddress, | |
| 1856 isolate))); | |
| 1857 } | |
| 1858 __ Str(code_entry, MemOperand(x10)); | |
| 1859 __ Mov(x0, Operand(reinterpret_cast<int64_t>(Failure::Exception()))); | |
| 1860 __ B(&exit); | |
| 1861 | |
| 1862 // Invoke: Link this frame into the handler chain. There's only one | |
| 1863 // handler block in this code object, so its index is 0. | |
| 1864 __ Bind(&invoke); | |
| 1865 __ PushTryHandler(StackHandler::JS_ENTRY, 0); | |
| 1866 // If an exception not caught by another handler occurs, this handler | |
| 1867 // returns control to the code after the B(&invoke) above, which | |
| 1868 // restores all callee-saved registers (including cp and fp) to their | |
| 1869 // saved values before returning a failure to C. | |
| 1870 | |
| 1871 // Clear any pending exceptions. | |
| 1872 __ Mov(x10, Operand(isolate->factory()->the_hole_value())); | |
| 1873 __ Mov(x11, Operand(ExternalReference(Isolate::kPendingExceptionAddress, | |
| 1874 isolate))); | |
| 1875 __ Str(x10, MemOperand(x11)); | |
| 1876 | |
| 1877 // Invoke the function by calling through the JS entry trampoline builtin. | |
| 1878 // Notice that we cannot store a reference to the trampoline code directly in | |
| 1879 // this stub, because runtime stubs are not traversed when doing GC. | |
| 1880 | |
| 1881 // Expected registers by Builtins::JSEntryTrampoline | |
| 1882 // x0: code entry. | |
| 1883 // x1: function. | |
| 1884 // x2: receiver. | |
| 1885 // x3: argc. | |
| 1886 // x4: argv. | |
| 1887 ExternalReference entry(is_construct ? Builtins::kJSConstructEntryTrampoline | |
| 1888 : Builtins::kJSEntryTrampoline, | |
| 1889 isolate); | |
| 1890 __ Mov(x10, Operand(entry)); | |
| 1891 | |
| 1892 // Call the JSEntryTrampoline. | |
| 1893 __ Ldr(x11, MemOperand(x10)); // Dereference the address. | |
| 1894 __ Add(x12, x11, Code::kHeaderSize - kHeapObjectTag); | |
| 1895 __ Blr(x12); | |
| 1896 | |
| 1897 // Unlink this frame from the handler chain. | |
| 1898 __ PopTryHandler(); | |
| 1899 | |
| 1900 | |
| 1901 __ Bind(&exit); | |
| 1902 // x0 holds the result. | |
| 1903 // The stack pointer points to the top of the entry frame pushed on entry from | |
| 1904 // C++ (at the beginning of this stub): | |
| 1905 // jssp[0] : JS entry frame marker. | |
| 1906 // jssp[1] : C entry FP. | |
| 1907 // jssp[2] : stack frame marker. | |
| 1908 // jssp[3] : stack frmae marker. | |
| 1909 // jssp[4] : bad frame pointer 0xfff...ff <- fp points here. | |
| 1910 | |
| 1911 // Check if the current stack frame is marked as the outermost JS frame. | |
| 1912 Label non_outermost_js_2; | |
| 1913 __ Pop(x10); | |
| 1914 __ Cmp(x10, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME))); | |
| 1915 __ B(ne, &non_outermost_js_2); | |
| 1916 __ Mov(x11, Operand(ExternalReference(js_entry_sp))); | |
| 1917 __ Str(xzr, MemOperand(x11)); | |
| 1918 __ Bind(&non_outermost_js_2); | |
| 1919 | |
| 1920 // Restore the top frame descriptors from the stack. | |
| 1921 __ Pop(x10); | |
| 1922 __ Mov(x11, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate))); | |
| 1923 __ Str(x10, MemOperand(x11)); | |
| 1924 | |
| 1925 // Reset the stack to the callee saved registers. | |
| 1926 __ Drop(-EntryFrameConstants::kCallerFPOffset, kByteSizeInBytes); | |
| 1927 // Restore the callee-saved registers and return. | |
| 1928 ASSERT(jssp.Is(__ StackPointer())); | |
| 1929 __ Mov(csp, jssp); | |
| 1930 __ SetStackPointer(csp); | |
| 1931 __ PopCalleeSavedRegisters(); | |
| 1932 // After this point, we must not modify jssp because it is a callee-saved | |
| 1933 // register which we have just restored. | |
| 1934 __ Ret(); | |
| 1935 } | |
| 1936 | |
| 1937 | |
| 1938 void FunctionPrototypeStub::Generate(MacroAssembler* masm) { | |
| 1939 Label miss; | |
| 1940 Register receiver; | |
| 1941 if (kind() == Code::KEYED_LOAD_IC) { | |
| 1942 // ----------- S t a t e ------------- | |
| 1943 // -- lr : return address | |
| 1944 // -- x1 : receiver | |
| 1945 // -- x0 : key | |
| 1946 // ----------------------------------- | |
| 1947 Register key = x0; | |
| 1948 receiver = x1; | |
| 1949 __ Cmp(key, Operand(masm->isolate()->factory()->prototype_string())); | |
| 1950 __ B(ne, &miss); | |
| 1951 } else { | |
| 1952 ASSERT(kind() == Code::LOAD_IC); | |
| 1953 // ----------- S t a t e ------------- | |
| 1954 // -- lr : return address | |
| 1955 // -- x2 : name | |
| 1956 // -- x0 : receiver | |
| 1957 // -- sp[0] : receiver | |
| 1958 // ----------------------------------- | |
| 1959 receiver = x0; | |
| 1960 } | |
| 1961 | |
| 1962 StubCompiler::GenerateLoadFunctionPrototype(masm, receiver, x10, x11, &miss); | |
| 1963 | |
| 1964 __ Bind(&miss); | |
| 1965 StubCompiler::TailCallBuiltin(masm, | |
| 1966 BaseLoadStoreStubCompiler::MissBuiltin(kind())); | |
| 1967 } | |
| 1968 | |
| 1969 | |
| 1970 void StringLengthStub::Generate(MacroAssembler* masm) { | |
| 1971 Label miss; | |
| 1972 Register receiver; | |
| 1973 if (kind() == Code::KEYED_LOAD_IC) { | |
| 1974 // ----------- S t a t e ------------- | |
| 1975 // -- lr : return address | |
| 1976 // -- x1 : receiver | |
| 1977 // -- x0 : key | |
| 1978 // ----------------------------------- | |
| 1979 Register key = x0; | |
| 1980 receiver = x1; | |
| 1981 __ Cmp(key, Operand(masm->isolate()->factory()->length_string())); | |
| 1982 __ B(ne, &miss); | |
| 1983 } else { | |
| 1984 ASSERT(kind() == Code::LOAD_IC); | |
| 1985 // ----------- S t a t e ------------- | |
| 1986 // -- lr : return address | |
| 1987 // -- x2 : name | |
| 1988 // -- x0 : receiver | |
| 1989 // -- sp[0] : receiver | |
| 1990 // ----------------------------------- | |
| 1991 receiver = x0; | |
| 1992 } | |
| 1993 | |
| 1994 StubCompiler::GenerateLoadStringLength(masm, receiver, x10, x11, &miss); | |
| 1995 | |
| 1996 __ Bind(&miss); | |
| 1997 StubCompiler::TailCallBuiltin(masm, | |
| 1998 BaseLoadStoreStubCompiler::MissBuiltin(kind())); | |
| 1999 } | |
| 2000 | |
| 2001 | |
| 2002 void StoreArrayLengthStub::Generate(MacroAssembler* masm) { | |
| 2003 ASM_LOCATION("StoreArrayLengthStub::Generate"); | |
| 2004 // This accepts as a receiver anything JSArray::SetElementsLength accepts | |
| 2005 // (currently anything except for external arrays which means anything with | |
| 2006 // elements of FixedArray type). Value must be a number, but only smis are | |
| 2007 // accepted as the most common case. | |
| 2008 Label miss; | |
| 2009 | |
| 2010 Register receiver; | |
| 2011 Register value; | |
| 2012 if (kind() == Code::KEYED_STORE_IC) { | |
| 2013 // ----------- S t a t e ------------- | |
| 2014 // -- lr : return address | |
| 2015 // -- x2 : receiver | |
| 2016 // -- x1 : key | |
| 2017 // -- x0 : value | |
| 2018 // ----------------------------------- | |
| 2019 Register key = x1; | |
| 2020 receiver = x2; | |
| 2021 value = x0; | |
| 2022 __ Cmp(key, Operand(masm->isolate()->factory()->length_string())); | |
| 2023 __ B(ne, &miss); | |
| 2024 } else { | |
| 2025 ASSERT(kind() == Code::STORE_IC); | |
| 2026 // ----------- S t a t e ------------- | |
| 2027 // -- lr : return address | |
| 2028 // -- x2 : key | |
| 2029 // -- x1 : receiver | |
| 2030 // -- x0 : value | |
| 2031 // ----------------------------------- | |
| 2032 receiver = x1; | |
| 2033 value = x0; | |
| 2034 } | |
| 2035 | |
| 2036 // Check that the receiver isn't a smi. | |
| 2037 __ JumpIfSmi(receiver, &miss); | |
| 2038 | |
| 2039 // Check that the object is a JS array. | |
| 2040 __ CompareObjectType(receiver, x10, x11, JS_ARRAY_TYPE); | |
| 2041 __ B(ne, &miss); | |
| 2042 | |
| 2043 // Check that elements are FixedArray. | |
| 2044 // We rely on StoreIC_ArrayLength below to deal with all types of | |
| 2045 // fast elements (including COW). | |
| 2046 __ Ldr(x10, FieldMemOperand(receiver, JSArray::kElementsOffset)); | |
| 2047 __ CompareObjectType(x10, x11, x12, FIXED_ARRAY_TYPE); | |
| 2048 __ B(ne, &miss); | |
| 2049 | |
| 2050 // Check that the array has fast properties, otherwise the length | |
| 2051 // property might have been redefined. | |
| 2052 __ Ldr(x10, FieldMemOperand(receiver, JSArray::kPropertiesOffset)); | |
| 2053 __ Ldr(x10, FieldMemOperand(x10, FixedArray::kMapOffset)); | |
| 2054 __ CompareRoot(x10, Heap::kHashTableMapRootIndex); | |
| 2055 __ B(eq, &miss); | |
| 2056 | |
| 2057 // Check that value is a smi. | |
| 2058 __ JumpIfNotSmi(value, &miss); | |
| 2059 | |
| 2060 // Prepare tail call to StoreIC_ArrayLength. | |
| 2061 __ Push(receiver, value); | |
| 2062 | |
| 2063 ExternalReference ref = | |
| 2064 ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength), masm->isolate()); | |
| 2065 __ TailCallExternalReference(ref, 2, 1); | |
| 2066 | |
| 2067 __ Bind(&miss); | |
| 2068 StubCompiler::TailCallBuiltin(masm, | |
| 2069 BaseLoadStoreStubCompiler::MissBuiltin(kind())); | |
| 2070 } | |
| 2071 | |
| 2072 | |
| 2073 void InstanceofStub::Generate(MacroAssembler* masm) { | |
| 2074 // Stack on entry: | |
| 2075 // jssp[0]: function. | |
| 2076 // jssp[8]: object. | |
| 2077 // | |
| 2078 // Returns result in x0. Zero indicates instanceof, smi 1 indicates not | |
| 2079 // instanceof. | |
| 2080 | |
| 2081 Register result = x0; | |
| 2082 Register function = right(); | |
| 2083 Register object = left(); | |
| 2084 Register scratch1 = x6; | |
| 2085 Register scratch2 = x7; | |
| 2086 Register res_true = x8; | |
| 2087 Register res_false = x9; | |
| 2088 // Only used if there was an inline map check site. (See | |
| 2089 // LCodeGen::DoInstanceOfKnownGlobal().) | |
| 2090 Register map_check_site = x4; | |
| 2091 // Delta for the instructions generated between the inline map check and the | |
| 2092 // instruction setting the result. | |
| 2093 const int32_t kDeltaToLoadBoolResult = 4 * kInstructionSize; | |
| 2094 | |
| 2095 Label not_js_object, slow; | |
| 2096 | |
| 2097 if (!HasArgsInRegisters()) { | |
| 2098 __ Pop(function, object); | |
| 2099 } | |
| 2100 | |
| 2101 if (ReturnTrueFalseObject()) { | |
| 2102 __ LoadTrueFalseRoots(res_true, res_false); | |
| 2103 } else { | |
| 2104 // This is counter-intuitive, but correct. | |
| 2105 __ Mov(res_true, Operand(Smi::FromInt(0))); | |
| 2106 __ Mov(res_false, Operand(Smi::FromInt(1))); | |
| 2107 } | |
| 2108 | |
| 2109 // Check that the left hand side is a JS object and load its map as a side | |
| 2110 // effect. | |
| 2111 Register map = x12; | |
| 2112 __ JumpIfSmi(object, ¬_js_object); | |
| 2113 __ IsObjectJSObjectType(object, map, scratch2, ¬_js_object); | |
| 2114 | |
| 2115 // If there is a call site cache, don't look in the global cache, but do the | |
| 2116 // real lookup and update the call site cache. | |
| 2117 if (!HasCallSiteInlineCheck()) { | |
| 2118 Label miss; | |
| 2119 __ JumpIfNotRoot(function, Heap::kInstanceofCacheFunctionRootIndex, &miss); | |
| 2120 __ JumpIfNotRoot(map, Heap::kInstanceofCacheMapRootIndex, &miss); | |
| 2121 __ LoadRoot(result, Heap::kInstanceofCacheAnswerRootIndex); | |
| 2122 __ Ret(); | |
| 2123 __ Bind(&miss); | |
| 2124 } | |
| 2125 | |
| 2126 // Get the prototype of the function. | |
| 2127 Register prototype = x13; | |
| 2128 __ TryGetFunctionPrototype(function, prototype, scratch2, &slow, | |
| 2129 MacroAssembler::kMissOnBoundFunction); | |
| 2130 | |
| 2131 // Check that the function prototype is a JS object. | |
| 2132 __ JumpIfSmi(prototype, &slow); | |
| 2133 __ IsObjectJSObjectType(prototype, scratch1, scratch2, &slow); | |
| 2134 | |
| 2135 // Update the global instanceof or call site inlined cache with the current | |
| 2136 // map and function. The cached answer will be set when it is known below. | |
| 2137 if (HasCallSiteInlineCheck()) { | |
| 2138 // Patch the (relocated) inlined map check. | |
| 2139 __ GetRelocatedValueLocation(map_check_site, scratch1); | |
| 2140 // We have a cell, so need another level of dereferencing. | |
| 2141 __ Ldr(scratch1, MemOperand(scratch1)); | |
| 2142 __ Str(map, FieldMemOperand(scratch1, Cell::kValueOffset)); | |
| 2143 } else { | |
| 2144 __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex); | |
| 2145 __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex); | |
| 2146 } | |
| 2147 | |
| 2148 Label return_true, return_result; | |
| 2149 { | |
| 2150 // Loop through the prototype chain looking for the function prototype. | |
| 2151 Register chain_map = x1; | |
| 2152 Register chain_prototype = x14; | |
| 2153 Register null_value = x15; | |
| 2154 Label loop; | |
| 2155 __ Ldr(chain_prototype, FieldMemOperand(map, Map::kPrototypeOffset)); | |
| 2156 __ LoadRoot(null_value, Heap::kNullValueRootIndex); | |
| 2157 // Speculatively set a result. | |
| 2158 __ Mov(result, res_false); | |
| 2159 | |
| 2160 __ Bind(&loop); | |
| 2161 | |
| 2162 // If the chain prototype is the object prototype, return true. | |
| 2163 __ Cmp(chain_prototype, prototype); | |
| 2164 __ B(eq, &return_true); | |
| 2165 | |
| 2166 // If the chain prototype is null, we've reached the end of the chain, so | |
| 2167 // return false. | |
| 2168 __ Cmp(chain_prototype, null_value); | |
| 2169 __ B(eq, &return_result); | |
| 2170 | |
| 2171 // Otherwise, load the next prototype in the chain, and loop. | |
| 2172 __ Ldr(chain_map, FieldMemOperand(chain_prototype, HeapObject::kMapOffset)); | |
| 2173 __ Ldr(chain_prototype, FieldMemOperand(chain_map, Map::kPrototypeOffset)); | |
| 2174 __ B(&loop); | |
| 2175 } | |
| 2176 | |
| 2177 // Return sequence when no arguments are on the stack. | |
| 2178 // We cannot fall through to here. | |
| 2179 __ Bind(&return_true); | |
| 2180 __ Mov(result, res_true); | |
| 2181 __ Bind(&return_result); | |
| 2182 if (HasCallSiteInlineCheck()) { | |
| 2183 ASSERT(ReturnTrueFalseObject()); | |
| 2184 __ Add(map_check_site, map_check_site, kDeltaToLoadBoolResult); | |
| 2185 __ GetRelocatedValueLocation(map_check_site, scratch2); | |
| 2186 __ Str(result, MemOperand(scratch2)); | |
| 2187 } else { | |
| 2188 __ StoreRoot(result, Heap::kInstanceofCacheAnswerRootIndex); | |
| 2189 } | |
| 2190 __ Ret(); | |
| 2191 | |
| 2192 Label object_not_null, object_not_null_or_smi; | |
| 2193 | |
| 2194 __ Bind(¬_js_object); | |
| 2195 Register object_type = x14; | |
| 2196 // x0 result result return register (uninit) | |
| 2197 // x10 function pointer to function | |
| 2198 // x11 object pointer to object | |
| 2199 // x14 object_type type of object (uninit) | |
| 2200 | |
| 2201 // Before null, smi and string checks, check that the rhs is a function. | |
| 2202 // For a non-function rhs, an exception must be thrown. | |
| 2203 __ JumpIfSmi(function, &slow); | |
| 2204 __ JumpIfNotObjectType( | |
| 2205 function, scratch1, object_type, JS_FUNCTION_TYPE, &slow); | |
| 2206 | |
| 2207 __ Mov(result, res_false); | |
| 2208 | |
| 2209 // Null is not instance of anything. | |
| 2210 __ Cmp(object_type, Operand(masm->isolate()->factory()->null_value())); | |
| 2211 __ B(ne, &object_not_null); | |
| 2212 __ Ret(); | |
| 2213 | |
| 2214 __ Bind(&object_not_null); | |
| 2215 // Smi values are not instances of anything. | |
| 2216 __ JumpIfNotSmi(object, &object_not_null_or_smi); | |
| 2217 __ Ret(); | |
| 2218 | |
| 2219 __ Bind(&object_not_null_or_smi); | |
| 2220 // String values are not instances of anything. | |
| 2221 __ IsObjectJSStringType(object, scratch2, &slow); | |
| 2222 __ Ret(); | |
| 2223 | |
| 2224 // Slow-case. Tail call builtin. | |
| 2225 __ Bind(&slow); | |
| 2226 { | |
| 2227 FrameScope scope(masm, StackFrame::INTERNAL); | |
| 2228 // Arguments have either been passed into registers or have been previously | |
| 2229 // popped. We need to push them before calling builtin. | |
| 2230 __ Push(object, function); | |
| 2231 __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION); | |
| 2232 } | |
| 2233 if (ReturnTrueFalseObject()) { | |
| 2234 // Reload true/false because they were clobbered in the builtin call. | |
| 2235 __ LoadTrueFalseRoots(res_true, res_false); | |
| 2236 __ Cmp(result, 0); | |
| 2237 __ Csel(result, res_true, res_false, eq); | |
| 2238 } | |
| 2239 __ Ret(); | |
| 2240 } | |
| 2241 | |
| 2242 | |
| 2243 Register InstanceofStub::left() { | |
| 2244 // Object to check (instanceof lhs). | |
| 2245 return x11; | |
| 2246 } | |
| 2247 | |
| 2248 | |
| 2249 Register InstanceofStub::right() { | |
| 2250 // Constructor function (instanceof rhs). | |
| 2251 return x10; | |
| 2252 } | |
| 2253 | |
| 2254 | |
| 2255 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { | |
| 2256 Register arg_count = x0; | |
| 2257 Register key = x1; | |
| 2258 | |
| 2259 // The displacement is the offset of the last parameter (if any) relative | |
| 2260 // to the frame pointer. | |
| 2261 static const int kDisplacement = | |
| 2262 StandardFrameConstants::kCallerSPOffset - kPointerSize; | |
| 2263 | |
| 2264 // Check that the key is a smi. | |
| 2265 Label slow; | |
| 2266 __ JumpIfNotSmi(key, &slow); | |
| 2267 | |
| 2268 // Check if the calling frame is an arguments adaptor frame. | |
| 2269 Register local_fp = x11; | |
| 2270 Register caller_fp = x11; | |
| 2271 Register caller_ctx = x12; | |
| 2272 Label skip_adaptor; | |
| 2273 __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); | |
| 2274 __ Ldr(caller_ctx, MemOperand(caller_fp, | |
| 2275 StandardFrameConstants::kContextOffset)); | |
| 2276 __ Cmp(caller_ctx, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); | |
| 2277 __ Csel(local_fp, fp, caller_fp, ne); | |
| 2278 __ B(ne, &skip_adaptor); | |
| 2279 | |
| 2280 // Load the actual arguments limit found in the arguments adaptor frame. | |
| 2281 __ Ldr(arg_count, MemOperand(caller_fp, | |
| 2282 ArgumentsAdaptorFrameConstants::kLengthOffset)); | |
| 2283 __ Bind(&skip_adaptor); | |
| 2284 | |
| 2285 // Check index against formal parameters count limit. Use unsigned comparison | |
| 2286 // to get negative check for free: branch if key < 0 or key >= arg_count. | |
| 2287 __ Cmp(key, arg_count); | |
| 2288 __ B(hs, &slow); | |
| 2289 | |
| 2290 // Read the argument from the stack and return it. | |
| 2291 __ Sub(x10, arg_count, key); | |
| 2292 __ Add(x10, local_fp, Operand::UntagSmiAndScale(x10, kPointerSizeLog2)); | |
| 2293 __ Ldr(x0, MemOperand(x10, kDisplacement)); | |
| 2294 __ Ret(); | |
| 2295 | |
| 2296 // Slow case: handle non-smi or out-of-bounds access to arguments by calling | |
| 2297 // the runtime system. | |
| 2298 __ Bind(&slow); | |
| 2299 __ Push(key); | |
| 2300 __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1); | |
| 2301 } | |
| 2302 | |
| 2303 | |
| 2304 void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) { | |
| 2305 // Stack layout on entry. | |
| 2306 // jssp[0]: number of parameters (tagged) | |
| 2307 // jssp[8]: address of receiver argument | |
| 2308 // jssp[16]: function | |
| 2309 | |
| 2310 // Check if the calling frame is an arguments adaptor frame. | |
| 2311 Label runtime; | |
| 2312 Register caller_fp = x10; | |
| 2313 __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); | |
| 2314 // Load and untag the context. | |
| 2315 STATIC_ASSERT((kSmiShift / kBitsPerByte) == 4); | |
| 2316 __ Ldr(w11, MemOperand(caller_fp, StandardFrameConstants::kContextOffset + | |
| 2317 (kSmiShift / kBitsPerByte))); | |
| 2318 __ Cmp(w11, StackFrame::ARGUMENTS_ADAPTOR); | |
| 2319 __ B(ne, &runtime); | |
| 2320 | |
| 2321 // Patch the arguments.length and parameters pointer in the current frame. | |
| 2322 __ Ldr(x11, MemOperand(caller_fp, | |
| 2323 ArgumentsAdaptorFrameConstants::kLengthOffset)); | |
| 2324 __ Poke(x11, 0 * kXRegSizeInBytes); | |
| 2325 __ Add(x10, caller_fp, Operand::UntagSmiAndScale(x11, kPointerSizeLog2)); | |
| 2326 __ Add(x10, x10, Operand(StandardFrameConstants::kCallerSPOffset)); | |
| 2327 __ Poke(x10, 1 * kXRegSizeInBytes); | |
| 2328 | |
| 2329 __ Bind(&runtime); | |
| 2330 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1); | |
| 2331 } | |
| 2332 | |
| 2333 | |
| 2334 void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) { | |
| 2335 // Stack layout on entry. | |
| 2336 // jssp[0]: number of parameters (tagged) | |
| 2337 // jssp[8]: address of receiver argument | |
| 2338 // jssp[16]: function | |
| 2339 // | |
| 2340 // Returns pointer to result object in x0. | |
| 2341 | |
| 2342 // Note: arg_count_smi is an alias of param_count_smi. | |
| 2343 Register arg_count_smi = x3; | |
| 2344 Register param_count_smi = x3; | |
| 2345 Register param_count = x7; | |
| 2346 Register recv_arg = x14; | |
| 2347 Register function = x4; | |
| 2348 __ Pop(param_count_smi, recv_arg, function); | |
| 2349 __ SmiUntag(param_count, param_count_smi); | |
| 2350 | |
| 2351 // Check if the calling frame is an arguments adaptor frame. | |
| 2352 Register caller_fp = x11; | |
| 2353 Register caller_ctx = x12; | |
| 2354 Label runtime; | |
| 2355 Label adaptor_frame, try_allocate; | |
| 2356 __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); | |
| 2357 __ Ldr(caller_ctx, MemOperand(caller_fp, | |
| 2358 StandardFrameConstants::kContextOffset)); | |
| 2359 __ Cmp(caller_ctx, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); | |
| 2360 __ B(eq, &adaptor_frame); | |
| 2361 | |
| 2362 // No adaptor, parameter count = argument count. | |
| 2363 | |
| 2364 // x1 mapped_params number of mapped params, min(params, args) (uninit) | |
| 2365 // x2 arg_count number of function arguments (uninit) | |
| 2366 // x3 arg_count_smi number of function arguments (smi) | |
| 2367 // x4 function function pointer | |
| 2368 // x7 param_count number of function parameters | |
| 2369 // x11 caller_fp caller's frame pointer | |
| 2370 // x14 recv_arg pointer to receiver arguments | |
| 2371 | |
| 2372 Register arg_count = x2; | |
| 2373 __ Mov(arg_count, param_count); | |
| 2374 __ B(&try_allocate); | |
| 2375 | |
| 2376 // We have an adaptor frame. Patch the parameters pointer. | |
| 2377 __ Bind(&adaptor_frame); | |
| 2378 __ Ldr(arg_count_smi, | |
| 2379 MemOperand(caller_fp, | |
| 2380 ArgumentsAdaptorFrameConstants::kLengthOffset)); | |
| 2381 __ SmiUntag(arg_count, arg_count_smi); | |
| 2382 __ Add(x10, caller_fp, Operand(arg_count, LSL, kPointerSizeLog2)); | |
| 2383 __ Add(recv_arg, x10, StandardFrameConstants::kCallerSPOffset); | |
| 2384 | |
| 2385 // Compute the mapped parameter count = min(param_count, arg_count) | |
| 2386 Register mapped_params = x1; | |
| 2387 __ Cmp(param_count, arg_count); | |
| 2388 __ Csel(mapped_params, param_count, arg_count, lt); | |
| 2389 | |
| 2390 __ Bind(&try_allocate); | |
| 2391 | |
| 2392 // x0 alloc_obj pointer to allocated objects: param map, backing | |
| 2393 // store, arguments (uninit) | |
| 2394 // x1 mapped_params number of mapped parameters, min(params, args) | |
| 2395 // x2 arg_count number of function arguments | |
| 2396 // x3 arg_count_smi number of function arguments (smi) | |
| 2397 // x4 function function pointer | |
| 2398 // x7 param_count number of function parameters | |
| 2399 // x10 size size of objects to allocate (uninit) | |
| 2400 // x14 recv_arg pointer to receiver arguments | |
| 2401 | |
| 2402 // Compute the size of backing store, parameter map, and arguments object. | |
| 2403 // 1. Parameter map, has two extra words containing context and backing | |
| 2404 // store. | |
| 2405 const int kParameterMapHeaderSize = | |
| 2406 FixedArray::kHeaderSize + 2 * kPointerSize; | |
| 2407 | |
| 2408 // Calculate the parameter map size, assuming it exists. | |
| 2409 Register size = x10; | |
| 2410 __ Mov(size, Operand(mapped_params, LSL, kPointerSizeLog2)); | |
| 2411 __ Add(size, size, kParameterMapHeaderSize); | |
| 2412 | |
| 2413 // If there are no mapped parameters, set the running size total to zero. | |
| 2414 // Otherwise, use the parameter map size calculated earlier. | |
| 2415 __ Cmp(mapped_params, 0); | |
| 2416 __ CzeroX(size, eq); | |
| 2417 | |
| 2418 // 2. Add the size of the backing store and arguments object. | |
| 2419 __ Add(size, size, Operand(arg_count, LSL, kPointerSizeLog2)); | |
| 2420 __ Add(size, size, FixedArray::kHeaderSize + Heap::kArgumentsObjectSize); | |
| 2421 | |
| 2422 // Do the allocation of all three objects in one go. Assign this to x0, as it | |
| 2423 // will be returned to the caller. | |
| 2424 Register alloc_obj = x0; | |
| 2425 __ Allocate(size, alloc_obj, x11, x12, &runtime, TAG_OBJECT); | |
| 2426 | |
| 2427 // Get the arguments boilerplate from the current (global) context. | |
| 2428 | |
| 2429 // x0 alloc_obj pointer to allocated objects (param map, backing | |
| 2430 // store, arguments) | |
| 2431 // x1 mapped_params number of mapped parameters, min(params, args) | |
| 2432 // x2 arg_count number of function arguments | |
| 2433 // x3 arg_count_smi number of function arguments (smi) | |
| 2434 // x4 function function pointer | |
| 2435 // x7 param_count number of function parameters | |
| 2436 // x11 args_offset offset to args (or aliased args) boilerplate (uninit) | |
| 2437 // x14 recv_arg pointer to receiver arguments | |
| 2438 | |
| 2439 Register global_object = x10; | |
| 2440 Register global_ctx = x10; | |
| 2441 Register args_offset = x11; | |
| 2442 Register aliased_args_offset = x10; | |
| 2443 __ Ldr(global_object, GlobalObjectMemOperand()); | |
| 2444 __ Ldr(global_ctx, FieldMemOperand(global_object, | |
| 2445 GlobalObject::kNativeContextOffset)); | |
| 2446 | |
| 2447 __ Ldr(args_offset, ContextMemOperand(global_ctx, | |
| 2448 Context::ARGUMENTS_BOILERPLATE_INDEX)); | |
| 2449 __ Ldr(aliased_args_offset, | |
| 2450 ContextMemOperand(global_ctx, | |
| 2451 Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX)); | |
| 2452 __ Cmp(mapped_params, 0); | |
| 2453 __ CmovX(args_offset, aliased_args_offset, ne); | |
| 2454 | |
| 2455 // Copy the JS object part. | |
| 2456 __ CopyFields(alloc_obj, args_offset, CPURegList(x10, x12, x13), | |
| 2457 JSObject::kHeaderSize / kPointerSize); | |
| 2458 | |
| 2459 // Set up the callee in-object property. | |
| 2460 STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1); | |
| 2461 const int kCalleeOffset = JSObject::kHeaderSize + | |
| 2462 Heap::kArgumentsCalleeIndex * kPointerSize; | |
| 2463 __ Str(function, FieldMemOperand(alloc_obj, kCalleeOffset)); | |
| 2464 | |
| 2465 // Use the length and set that as an in-object property. | |
| 2466 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0); | |
| 2467 const int kLengthOffset = JSObject::kHeaderSize + | |
| 2468 Heap::kArgumentsLengthIndex * kPointerSize; | |
| 2469 __ Str(arg_count_smi, FieldMemOperand(alloc_obj, kLengthOffset)); | |
| 2470 | |
| 2471 // Set up the elements pointer in the allocated arguments object. | |
| 2472 // If we allocated a parameter map, "elements" will point there, otherwise | |
| 2473 // it will point to the backing store. | |
| 2474 | |
| 2475 // x0 alloc_obj pointer to allocated objects (param map, backing | |
| 2476 // store, arguments) | |
| 2477 // x1 mapped_params number of mapped parameters, min(params, args) | |
| 2478 // x2 arg_count number of function arguments | |
| 2479 // x3 arg_count_smi number of function arguments (smi) | |
| 2480 // x4 function function pointer | |
| 2481 // x5 elements pointer to parameter map or backing store (uninit) | |
| 2482 // x6 backing_store pointer to backing store (uninit) | |
| 2483 // x7 param_count number of function parameters | |
| 2484 // x14 recv_arg pointer to receiver arguments | |
| 2485 | |
| 2486 Register elements = x5; | |
| 2487 __ Add(elements, alloc_obj, Heap::kArgumentsObjectSize); | |
| 2488 __ Str(elements, FieldMemOperand(alloc_obj, JSObject::kElementsOffset)); | |
| 2489 | |
| 2490 // Initialize parameter map. If there are no mapped arguments, we're done. | |
| 2491 Label skip_parameter_map; | |
| 2492 __ Cmp(mapped_params, 0); | |
| 2493 // Set up backing store address, because it is needed later for filling in | |
| 2494 // the unmapped arguments. | |
| 2495 Register backing_store = x6; | |
| 2496 __ CmovX(backing_store, elements, eq); | |
| 2497 __ B(eq, &skip_parameter_map); | |
| 2498 | |
| 2499 __ LoadRoot(x10, Heap::kNonStrictArgumentsElementsMapRootIndex); | |
| 2500 __ Str(x10, FieldMemOperand(elements, FixedArray::kMapOffset)); | |
| 2501 __ Add(x10, mapped_params, 2); | |
| 2502 __ SmiTag(x10); | |
| 2503 __ Str(x10, FieldMemOperand(elements, FixedArray::kLengthOffset)); | |
| 2504 __ Str(cp, FieldMemOperand(elements, | |
| 2505 FixedArray::kHeaderSize + 0 * kPointerSize)); | |
| 2506 __ Add(x10, elements, Operand(mapped_params, LSL, kPointerSizeLog2)); | |
| 2507 __ Add(x10, x10, kParameterMapHeaderSize); | |
| 2508 __ Str(x10, FieldMemOperand(elements, | |
| 2509 FixedArray::kHeaderSize + 1 * kPointerSize)); | |
| 2510 | |
| 2511 // Copy the parameter slots and the holes in the arguments. | |
| 2512 // We need to fill in mapped_parameter_count slots. Then index the context, | |
| 2513 // where parameters are stored in reverse order, at: | |
| 2514 // | |
| 2515 // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS + parameter_count - 1 | |
| 2516 // | |
| 2517 // The mapped parameter thus needs to get indices: | |
| 2518 // | |
| 2519 // MIN_CONTEXT_SLOTS + parameter_count - 1 .. | |
| 2520 // MIN_CONTEXT_SLOTS + parameter_count - mapped_parameter_count | |
| 2521 // | |
| 2522 // We loop from right to left. | |
| 2523 | |
| 2524 // x0 alloc_obj pointer to allocated objects (param map, backing | |
| 2525 // store, arguments) | |
| 2526 // x1 mapped_params number of mapped parameters, min(params, args) | |
| 2527 // x2 arg_count number of function arguments | |
| 2528 // x3 arg_count_smi number of function arguments (smi) | |
| 2529 // x4 function function pointer | |
| 2530 // x5 elements pointer to parameter map or backing store (uninit) | |
| 2531 // x6 backing_store pointer to backing store (uninit) | |
| 2532 // x7 param_count number of function parameters | |
| 2533 // x11 loop_count parameter loop counter (uninit) | |
| 2534 // x12 index parameter index (smi, uninit) | |
| 2535 // x13 the_hole hole value (uninit) | |
| 2536 // x14 recv_arg pointer to receiver arguments | |
| 2537 | |
| 2538 Register loop_count = x11; | |
| 2539 Register index = x12; | |
| 2540 Register the_hole = x13; | |
| 2541 Label parameters_loop, parameters_test; | |
| 2542 __ Mov(loop_count, mapped_params); | |
| 2543 __ Add(index, param_count, static_cast<int>(Context::MIN_CONTEXT_SLOTS)); | |
| 2544 __ Sub(index, index, mapped_params); | |
| 2545 __ SmiTag(index); | |
| 2546 __ LoadRoot(the_hole, Heap::kTheHoleValueRootIndex); | |
| 2547 __ Add(backing_store, elements, Operand(loop_count, LSL, kPointerSizeLog2)); | |
| 2548 __ Add(backing_store, backing_store, kParameterMapHeaderSize); | |
| 2549 | |
| 2550 __ B(¶meters_test); | |
| 2551 | |
| 2552 __ Bind(¶meters_loop); | |
| 2553 __ Sub(loop_count, loop_count, 1); | |
| 2554 __ Mov(x10, Operand(loop_count, LSL, kPointerSizeLog2)); | |
| 2555 __ Add(x10, x10, kParameterMapHeaderSize - kHeapObjectTag); | |
| 2556 __ Str(index, MemOperand(elements, x10)); | |
| 2557 __ Sub(x10, x10, kParameterMapHeaderSize - FixedArray::kHeaderSize); | |
| 2558 __ Str(the_hole, MemOperand(backing_store, x10)); | |
| 2559 __ Add(index, index, Operand(Smi::FromInt(1))); | |
| 2560 __ Bind(¶meters_test); | |
| 2561 __ Cbnz(loop_count, ¶meters_loop); | |
| 2562 | |
| 2563 __ Bind(&skip_parameter_map); | |
| 2564 // Copy arguments header and remaining slots (if there are any.) | |
| 2565 __ LoadRoot(x10, Heap::kFixedArrayMapRootIndex); | |
| 2566 __ Str(x10, FieldMemOperand(backing_store, FixedArray::kMapOffset)); | |
| 2567 __ Str(arg_count_smi, FieldMemOperand(backing_store, | |
| 2568 FixedArray::kLengthOffset)); | |
| 2569 | |
| 2570 // x0 alloc_obj pointer to allocated objects (param map, backing | |
| 2571 // store, arguments) | |
| 2572 // x1 mapped_params number of mapped parameters, min(params, args) | |
| 2573 // x2 arg_count number of function arguments | |
| 2574 // x4 function function pointer | |
| 2575 // x3 arg_count_smi number of function arguments (smi) | |
| 2576 // x6 backing_store pointer to backing store (uninit) | |
| 2577 // x14 recv_arg pointer to receiver arguments | |
| 2578 | |
| 2579 Label arguments_loop, arguments_test; | |
| 2580 __ Mov(x10, mapped_params); | |
| 2581 __ Sub(recv_arg, recv_arg, Operand(x10, LSL, kPointerSizeLog2)); | |
| 2582 __ B(&arguments_test); | |
| 2583 | |
| 2584 __ Bind(&arguments_loop); | |
| 2585 __ Sub(recv_arg, recv_arg, kPointerSize); | |
| 2586 __ Ldr(x11, MemOperand(recv_arg)); | |
| 2587 __ Add(x12, backing_store, Operand(x10, LSL, kPointerSizeLog2)); | |
| 2588 __ Str(x11, FieldMemOperand(x12, FixedArray::kHeaderSize)); | |
| 2589 __ Add(x10, x10, 1); | |
| 2590 | |
| 2591 __ Bind(&arguments_test); | |
| 2592 __ Cmp(x10, arg_count); | |
| 2593 __ B(lt, &arguments_loop); | |
| 2594 | |
| 2595 __ Ret(); | |
| 2596 | |
| 2597 // Do the runtime call to allocate the arguments object. | |
| 2598 __ Bind(&runtime); | |
| 2599 __ Push(function, recv_arg, arg_count_smi); | |
| 2600 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1); | |
| 2601 } | |
| 2602 | |
| 2603 | |
| 2604 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { | |
| 2605 // Stack layout on entry. | |
| 2606 // jssp[0]: number of parameters (tagged) | |
| 2607 // jssp[8]: address of receiver argument | |
| 2608 // jssp[16]: function | |
| 2609 // | |
| 2610 // Returns pointer to result object in x0. | |
| 2611 | |
| 2612 // Get the stub arguments from the frame, and make an untagged copy of the | |
| 2613 // parameter count. | |
| 2614 Register param_count_smi = x1; | |
| 2615 Register params = x2; | |
| 2616 Register function = x3; | |
| 2617 Register param_count = x13; | |
| 2618 __ Pop(param_count_smi, params, function); | |
| 2619 __ SmiUntag(param_count, param_count_smi); | |
| 2620 | |
| 2621 // Test if arguments adaptor needed. | |
| 2622 Register caller_fp = x11; | |
| 2623 Register caller_ctx = x12; | |
| 2624 Label try_allocate, runtime; | |
| 2625 __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); | |
| 2626 __ Ldr(caller_ctx, MemOperand(caller_fp, | |
| 2627 StandardFrameConstants::kContextOffset)); | |
| 2628 __ Cmp(caller_ctx, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); | |
| 2629 __ B(ne, &try_allocate); | |
| 2630 | |
| 2631 // x1 param_count_smi number of parameters passed to function (smi) | |
| 2632 // x2 params pointer to parameters | |
| 2633 // x3 function function pointer | |
| 2634 // x11 caller_fp caller's frame pointer | |
| 2635 // x13 param_count number of parameters passed to function | |
| 2636 | |
| 2637 // Patch the argument length and parameters pointer. | |
| 2638 __ Ldr(param_count_smi, | |
| 2639 MemOperand(caller_fp, | |
| 2640 ArgumentsAdaptorFrameConstants::kLengthOffset)); | |
| 2641 __ SmiUntag(param_count, param_count_smi); | |
| 2642 __ Add(x10, caller_fp, Operand(param_count, LSL, kPointerSizeLog2)); | |
| 2643 __ Add(params, x10, StandardFrameConstants::kCallerSPOffset); | |
| 2644 | |
| 2645 // Try the new space allocation. Start out with computing the size of the | |
| 2646 // arguments object and the elements array in words. | |
| 2647 Register size = x10; | |
| 2648 __ Bind(&try_allocate); | |
| 2649 __ Add(size, param_count, FixedArray::kHeaderSize / kPointerSize); | |
| 2650 __ Cmp(param_count, 0); | |
| 2651 __ CzeroX(size, eq); | |
| 2652 __ Add(size, size, Heap::kArgumentsObjectSizeStrict / kPointerSize); | |
| 2653 | |
| 2654 // Do the allocation of both objects in one go. Assign this to x0, as it will | |
| 2655 // be returned to the caller. | |
| 2656 Register alloc_obj = x0; | |
| 2657 __ Allocate(size, alloc_obj, x11, x12, &runtime, | |
| 2658 static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS)); | |
| 2659 | |
| 2660 // Get the arguments boilerplate from the current (native) context. | |
| 2661 Register global_object = x10; | |
| 2662 Register global_ctx = x10; | |
| 2663 Register args_offset = x4; | |
| 2664 __ Ldr(global_object, GlobalObjectMemOperand()); | |
| 2665 __ Ldr(global_ctx, FieldMemOperand(global_object, | |
| 2666 GlobalObject::kNativeContextOffset)); | |
| 2667 __ Ldr(args_offset, | |
| 2668 ContextMemOperand(global_ctx, | |
| 2669 Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX)); | |
| 2670 | |
| 2671 // x0 alloc_obj pointer to allocated objects: parameter array and | |
| 2672 // arguments object | |
| 2673 // x1 param_count_smi number of parameters passed to function (smi) | |
| 2674 // x2 params pointer to parameters | |
| 2675 // x3 function function pointer | |
| 2676 // x4 args_offset offset to arguments boilerplate | |
| 2677 // x13 param_count number of parameters passed to function | |
| 2678 | |
| 2679 // Copy the JS object part. | |
| 2680 __ CopyFields(alloc_obj, args_offset, CPURegList(x5, x6, x7), | |
| 2681 JSObject::kHeaderSize / kPointerSize); | |
| 2682 | |
| 2683 // Set the smi-tagged length as an in-object property. | |
| 2684 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0); | |
| 2685 const int kLengthOffset = JSObject::kHeaderSize + | |
| 2686 Heap::kArgumentsLengthIndex * kPointerSize; | |
| 2687 __ Str(param_count_smi, FieldMemOperand(alloc_obj, kLengthOffset)); | |
| 2688 | |
| 2689 // If there are no actual arguments, we're done. | |
| 2690 Label done; | |
| 2691 __ Cbz(param_count, &done); | |
| 2692 | |
| 2693 // Set up the elements pointer in the allocated arguments object and | |
| 2694 // initialize the header in the elements fixed array. | |
| 2695 Register elements = x5; | |
| 2696 __ Add(elements, alloc_obj, Heap::kArgumentsObjectSizeStrict); | |
| 2697 __ Str(elements, FieldMemOperand(alloc_obj, JSObject::kElementsOffset)); | |
| 2698 __ LoadRoot(x10, Heap::kFixedArrayMapRootIndex); | |
| 2699 __ Str(x10, FieldMemOperand(elements, FixedArray::kMapOffset)); | |
| 2700 __ Str(param_count_smi, FieldMemOperand(elements, FixedArray::kLengthOffset)); | |
| 2701 | |
| 2702 // x0 alloc_obj pointer to allocated objects: parameter array and | |
| 2703 // arguments object | |
| 2704 // x1 param_count_smi number of parameters passed to function (smi) | |
| 2705 // x2 params pointer to parameters | |
| 2706 // x3 function function pointer | |
| 2707 // x4 array pointer to array slot (uninit) | |
| 2708 // x5 elements pointer to elements array of alloc_obj | |
| 2709 // x13 param_count number of parameters passed to function | |
| 2710 | |
| 2711 // Copy the fixed array slots. | |
| 2712 Label loop; | |
| 2713 Register array = x4; | |
| 2714 // Set up pointer to first array slot. | |
| 2715 __ Add(array, elements, FixedArray::kHeaderSize - kHeapObjectTag); | |
| 2716 | |
| 2717 __ Bind(&loop); | |
| 2718 // Pre-decrement the parameters pointer by kPointerSize on each iteration. | |
| 2719 // Pre-decrement in order to skip receiver. | |
| 2720 __ Ldr(x10, MemOperand(params, -kPointerSize, PreIndex)); | |
| 2721 // Post-increment elements by kPointerSize on each iteration. | |
| 2722 __ Str(x10, MemOperand(array, kPointerSize, PostIndex)); | |
| 2723 __ Sub(param_count, param_count, 1); | |
| 2724 __ Cbnz(param_count, &loop); | |
| 2725 | |
| 2726 // Return from stub. | |
| 2727 __ Bind(&done); | |
| 2728 __ Ret(); | |
| 2729 | |
| 2730 // Do the runtime call to allocate the arguments object. | |
| 2731 __ Bind(&runtime); | |
| 2732 __ Push(function, params, param_count_smi); | |
| 2733 __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1); | |
| 2734 } | |
| 2735 | |
| 2736 | |
| 2737 void RegExpExecStub::Generate(MacroAssembler* masm) { | |
| 2738 #ifdef V8_INTERPRETED_REGEXP | |
| 2739 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); | |
| 2740 #else // V8_INTERPRETED_REGEXP | |
| 2741 | |
| 2742 // Stack frame on entry. | |
| 2743 // jssp[0]: last_match_info (expected JSArray) | |
| 2744 // jssp[8]: previous index | |
| 2745 // jssp[16]: subject string | |
| 2746 // jssp[24]: JSRegExp object | |
| 2747 Label runtime; | |
| 2748 | |
| 2749 // Use of registers for this function. | |
| 2750 | |
| 2751 // Variable registers: | |
| 2752 // x10-x13 used as scratch registers | |
| 2753 // w0 string_type type of subject string | |
| 2754 // x2 jsstring_length subject string length | |
| 2755 // x3 jsregexp_object JSRegExp object | |
| 2756 // w4 string_encoding ASCII or UC16 | |
| 2757 // w5 sliced_string_offset if the string is a SlicedString | |
| 2758 // offset to the underlying string | |
| 2759 // w6 string_representation groups attributes of the string: | |
| 2760 // - is a string | |
| 2761 // - type of the string | |
| 2762 // - is a short external string | |
| 2763 Register string_type = w0; | |
| 2764 Register jsstring_length = x2; | |
| 2765 Register jsregexp_object = x3; | |
| 2766 Register string_encoding = w4; | |
| 2767 Register sliced_string_offset = w5; | |
| 2768 Register string_representation = w6; | |
| 2769 | |
| 2770 // These are in callee save registers and will be preserved by the call | |
| 2771 // to the native RegExp code, as this code is called using the normal | |
| 2772 // C calling convention. When calling directly from generated code the | |
| 2773 // native RegExp code will not do a GC and therefore the content of | |
| 2774 // these registers are safe to use after the call. | |
| 2775 | |
| 2776 // x19 subject subject string | |
| 2777 // x20 regexp_data RegExp data (FixedArray) | |
| 2778 // x21 last_match_info_elements info relative to the last match | |
| 2779 // (FixedArray) | |
| 2780 // x22 code_object generated regexp code | |
| 2781 Register subject = x19; | |
| 2782 Register regexp_data = x20; | |
| 2783 Register last_match_info_elements = x21; | |
| 2784 Register code_object = x22; | |
| 2785 | |
| 2786 // TODO(jbramley): Is it necessary to preserve these? I don't think ARM does. | |
| 2787 CPURegList used_callee_saved_registers(subject, | |
| 2788 regexp_data, | |
| 2789 last_match_info_elements, | |
| 2790 code_object); | |
| 2791 __ PushCPURegList(used_callee_saved_registers); | |
| 2792 | |
| 2793 // Stack frame. | |
| 2794 // jssp[0] : x19 | |
| 2795 // jssp[8] : x20 | |
| 2796 // jssp[16]: x21 | |
| 2797 // jssp[24]: x22 | |
| 2798 // jssp[32]: last_match_info (JSArray) | |
| 2799 // jssp[40]: previous index | |
| 2800 // jssp[48]: subject string | |
| 2801 // jssp[56]: JSRegExp object | |
| 2802 | |
| 2803 const int kLastMatchInfoOffset = 4 * kPointerSize; | |
| 2804 const int kPreviousIndexOffset = 5 * kPointerSize; | |
| 2805 const int kSubjectOffset = 6 * kPointerSize; | |
| 2806 const int kJSRegExpOffset = 7 * kPointerSize; | |
| 2807 | |
| 2808 // Ensure that a RegExp stack is allocated. | |
| 2809 Isolate* isolate = masm->isolate(); | |
| 2810 ExternalReference address_of_regexp_stack_memory_address = | |
| 2811 ExternalReference::address_of_regexp_stack_memory_address(isolate); | |
| 2812 ExternalReference address_of_regexp_stack_memory_size = | |
| 2813 ExternalReference::address_of_regexp_stack_memory_size(isolate); | |
| 2814 __ Mov(x10, Operand(address_of_regexp_stack_memory_size)); | |
| 2815 __ Ldr(x10, MemOperand(x10)); | |
| 2816 __ Cbz(x10, &runtime); | |
| 2817 | |
| 2818 // Check that the first argument is a JSRegExp object. | |
| 2819 ASSERT(jssp.Is(__ StackPointer())); | |
| 2820 __ Peek(jsregexp_object, kJSRegExpOffset); | |
| 2821 __ JumpIfSmi(jsregexp_object, &runtime); | |
| 2822 __ JumpIfNotObjectType(jsregexp_object, x10, x10, JS_REGEXP_TYPE, &runtime); | |
| 2823 | |
| 2824 // Check that the RegExp has been compiled (data contains a fixed array). | |
| 2825 __ Ldr(regexp_data, FieldMemOperand(jsregexp_object, JSRegExp::kDataOffset)); | |
| 2826 if (FLAG_debug_code) { | |
| 2827 STATIC_ASSERT(kSmiTag == 0); | |
| 2828 __ Tst(regexp_data, kSmiTagMask); | |
| 2829 __ Check(ne, kUnexpectedTypeForRegExpDataFixedArrayExpected); | |
| 2830 __ CompareObjectType(regexp_data, x10, x10, FIXED_ARRAY_TYPE); | |
| 2831 __ Check(eq, kUnexpectedTypeForRegExpDataFixedArrayExpected); | |
| 2832 } | |
| 2833 | |
| 2834 // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP. | |
| 2835 __ Ldr(x10, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset)); | |
| 2836 __ Cmp(x10, Operand(Smi::FromInt(JSRegExp::IRREGEXP))); | |
| 2837 __ B(ne, &runtime); | |
| 2838 | |
| 2839 // Check that the number of captures fit in the static offsets vector buffer. | |
| 2840 // We have always at least one capture for the whole match, plus additional | |
| 2841 // ones due to capturing parentheses. A capture takes 2 registers. | |
| 2842 // The number of capture registers then is (number_of_captures + 1) * 2. | |
| 2843 __ Ldrsw(x10, | |
| 2844 UntagSmiFieldMemOperand(regexp_data, | |
| 2845 JSRegExp::kIrregexpCaptureCountOffset)); | |
| 2846 // Check (number_of_captures + 1) * 2 <= offsets vector size | |
| 2847 // number_of_captures * 2 <= offsets vector size - 2 | |
| 2848 STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2); | |
| 2849 __ Add(x10, x10, x10); | |
| 2850 __ Cmp(x10, Isolate::kJSRegexpStaticOffsetsVectorSize - 2); | |
| 2851 __ B(hi, &runtime); | |
| 2852 | |
| 2853 // Initialize offset for possibly sliced string. | |
| 2854 __ Mov(sliced_string_offset, 0); | |
| 2855 | |
| 2856 ASSERT(jssp.Is(__ StackPointer())); | |
| 2857 __ Peek(subject, kSubjectOffset); | |
| 2858 __ JumpIfSmi(subject, &runtime); | |
| 2859 | |
| 2860 __ Ldr(x10, FieldMemOperand(subject, HeapObject::kMapOffset)); | |
| 2861 __ Ldrb(string_type, FieldMemOperand(x10, Map::kInstanceTypeOffset)); | |
| 2862 | |
| 2863 __ Ldr(jsstring_length, FieldMemOperand(subject, String::kLengthOffset)); | |
| 2864 | |
| 2865 // Handle subject string according to its encoding and representation: | |
| 2866 // (1) Sequential string? If yes, go to (5). | |
| 2867 // (2) Anything but sequential or cons? If yes, go to (6). | |
| 2868 // (3) Cons string. If the string is flat, replace subject with first string. | |
| 2869 // Otherwise bailout. | |
| 2870 // (4) Is subject external? If yes, go to (7). | |
| 2871 // (5) Sequential string. Load regexp code according to encoding. | |
| 2872 // (E) Carry on. | |
| 2873 /// [...] | |
| 2874 | |
| 2875 // Deferred code at the end of the stub: | |
| 2876 // (6) Not a long external string? If yes, go to (8). | |
| 2877 // (7) External string. Make it, offset-wise, look like a sequential string. | |
| 2878 // Go to (5). | |
| 2879 // (8) Short external string or not a string? If yes, bail out to runtime. | |
| 2880 // (9) Sliced string. Replace subject with parent. Go to (4). | |
| 2881 | |
| 2882 Label check_underlying; // (4) | |
| 2883 Label seq_string; // (5) | |
| 2884 Label not_seq_nor_cons; // (6) | |
| 2885 Label external_string; // (7) | |
| 2886 Label not_long_external; // (8) | |
| 2887 | |
| 2888 // (1) Sequential string? If yes, go to (5). | |
| 2889 __ And(string_representation, | |
| 2890 string_type, | |
| 2891 kIsNotStringMask | | |
| 2892 kStringRepresentationMask | | |
| 2893 kShortExternalStringMask); | |
| 2894 // We depend on the fact that Strings of type | |
| 2895 // SeqString and not ShortExternalString are defined | |
| 2896 // by the following pattern: | |
| 2897 // string_type: 0XX0 XX00 | |
| 2898 // ^ ^ ^^ | |
| 2899 // | | || | |
| 2900 // | | is a SeqString | |
| 2901 // | is not a short external String | |
| 2902 // is a String | |
| 2903 STATIC_ASSERT((kStringTag | kSeqStringTag) == 0); | |
| 2904 STATIC_ASSERT(kShortExternalStringTag != 0); | |
| 2905 __ Cbz(string_representation, &seq_string); // Go to (5). | |
| 2906 | |
| 2907 // (2) Anything but sequential or cons? If yes, go to (6). | |
| 2908 STATIC_ASSERT(kConsStringTag < kExternalStringTag); | |
| 2909 STATIC_ASSERT(kSlicedStringTag > kExternalStringTag); | |
| 2910 STATIC_ASSERT(kIsNotStringMask > kExternalStringTag); | |
| 2911 STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag); | |
| 2912 __ Cmp(string_representation, kExternalStringTag); | |
| 2913 __ B(ge, ¬_seq_nor_cons); // Go to (6). | |
| 2914 | |
| 2915 // (3) Cons string. Check that it's flat. | |
| 2916 __ Ldr(x10, FieldMemOperand(subject, ConsString::kSecondOffset)); | |
| 2917 __ JumpIfNotRoot(x10, Heap::kempty_stringRootIndex, &runtime); | |
| 2918 // Replace subject with first string. | |
| 2919 __ Ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset)); | |
| 2920 | |
| 2921 // (4) Is subject external? If yes, go to (7). | |
| 2922 __ Bind(&check_underlying); | |
| 2923 // Reload the string type. | |
| 2924 __ Ldr(x10, FieldMemOperand(subject, HeapObject::kMapOffset)); | |
| 2925 __ Ldrb(string_type, FieldMemOperand(x10, Map::kInstanceTypeOffset)); | |
| 2926 STATIC_ASSERT(kSeqStringTag == 0); | |
| 2927 // The underlying external string is never a short external string. | |
| 2928 STATIC_CHECK(ExternalString::kMaxShortLength < ConsString::kMinLength); | |
| 2929 STATIC_CHECK(ExternalString::kMaxShortLength < SlicedString::kMinLength); | |
| 2930 __ TestAndBranchIfAnySet(string_type.X(), | |
| 2931 kStringRepresentationMask, | |
| 2932 &external_string); // Go to (7). | |
| 2933 | |
| 2934 // (5) Sequential string. Load regexp code according to encoding. | |
| 2935 __ Bind(&seq_string); | |
| 2936 | |
| 2937 // Check that the third argument is a positive smi less than the subject | |
| 2938 // string length. A negative value will be greater (unsigned comparison). | |
| 2939 ASSERT(jssp.Is(__ StackPointer())); | |
| 2940 __ Peek(x10, kPreviousIndexOffset); | |
| 2941 __ JumpIfNotSmi(x10, &runtime); | |
| 2942 __ Cmp(jsstring_length, x10); | |
| 2943 __ B(ls, &runtime); | |
| 2944 | |
| 2945 // Argument 2 (x1): We need to load argument 2 (the previous index) into x1 | |
| 2946 // before entering the exit frame. | |
| 2947 __ SmiUntag(x1, x10); | |
| 2948 | |
| 2949 // The third bit determines the string encoding in string_type. | |
| 2950 STATIC_ASSERT(kOneByteStringTag == 0x04); | |
| 2951 STATIC_ASSERT(kTwoByteStringTag == 0x00); | |
| 2952 STATIC_ASSERT(kStringEncodingMask == 0x04); | |
| 2953 | |
| 2954 // Find the code object based on the assumptions above. | |
| 2955 // kDataAsciiCodeOffset and kDataUC16CodeOffset are adjacent, adds an offset | |
| 2956 // of kPointerSize to reach the latter. | |
| 2957 ASSERT_EQ(JSRegExp::kDataAsciiCodeOffset + kPointerSize, | |
| 2958 JSRegExp::kDataUC16CodeOffset); | |
| 2959 __ Mov(x10, kPointerSize); | |
| 2960 // We will need the encoding later: ASCII = 0x04 | |
| 2961 // UC16 = 0x00 | |
| 2962 __ Ands(string_encoding, string_type, kStringEncodingMask); | |
| 2963 __ CzeroX(x10, ne); | |
| 2964 __ Add(x10, regexp_data, x10); | |
| 2965 __ Ldr(code_object, FieldMemOperand(x10, JSRegExp::kDataAsciiCodeOffset)); | |
| 2966 | |
| 2967 // (E) Carry on. String handling is done. | |
| 2968 | |
| 2969 // Check that the irregexp code has been generated for the actual string | |
| 2970 // encoding. If it has, the field contains a code object otherwise it contains | |
| 2971 // a smi (code flushing support). | |
| 2972 __ JumpIfSmi(code_object, &runtime); | |
| 2973 | |
| 2974 // All checks done. Now push arguments for native regexp code. | |
| 2975 __ IncrementCounter(isolate->counters()->regexp_entry_native(), 1, | |
| 2976 x10, | |
| 2977 x11); | |
| 2978 | |
| 2979 // Isolates: note we add an additional parameter here (isolate pointer). | |
| 2980 __ EnterExitFrame(false, x10, 1); | |
| 2981 ASSERT(csp.Is(__ StackPointer())); | |
| 2982 | |
| 2983 // We have 9 arguments to pass to the regexp code, therefore we have to pass | |
| 2984 // one on the stack and the rest as registers. | |
| 2985 | |
| 2986 // Note that the placement of the argument on the stack isn't standard | |
| 2987 // AAPCS64: | |
| 2988 // csp[0]: Space for the return address placed by DirectCEntryStub. | |
| 2989 // csp[8]: Argument 9, the current isolate address. | |
| 2990 | |
| 2991 __ Mov(x10, Operand(ExternalReference::isolate_address(isolate))); | |
| 2992 __ Poke(x10, kPointerSize); | |
| 2993 | |
| 2994 Register length = w11; | |
| 2995 Register previous_index_in_bytes = w12; | |
| 2996 Register start = x13; | |
| 2997 | |
| 2998 // Load start of the subject string. | |
| 2999 __ Add(start, subject, SeqString::kHeaderSize - kHeapObjectTag); | |
| 3000 // Load the length from the original subject string from the previous stack | |
| 3001 // frame. Therefore we have to use fp, which points exactly to two pointer | |
| 3002 // sizes below the previous sp. (Because creating a new stack frame pushes | |
| 3003 // the previous fp onto the stack and decrements sp by 2 * kPointerSize.) | |
| 3004 __ Ldr(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize)); | |
| 3005 __ Ldr(length, UntagSmiFieldMemOperand(subject, String::kLengthOffset)); | |
| 3006 | |
| 3007 // Handle UC16 encoding, two bytes make one character. | |
| 3008 // string_encoding: if ASCII: 0x04 | |
| 3009 // if UC16: 0x00 | |
| 3010 STATIC_ASSERT(kStringEncodingMask == 0x04); | |
| 3011 __ Ubfx(string_encoding, string_encoding, 2, 1); | |
| 3012 __ Eor(string_encoding, string_encoding, 1); | |
| 3013 // string_encoding: if ASCII: 0 | |
| 3014 // if UC16: 1 | |
| 3015 | |
| 3016 // Convert string positions from characters to bytes. | |
| 3017 // Previous index is in x1. | |
| 3018 __ Lsl(previous_index_in_bytes, w1, string_encoding); | |
| 3019 __ Lsl(length, length, string_encoding); | |
| 3020 __ Lsl(sliced_string_offset, sliced_string_offset, string_encoding); | |
| 3021 | |
| 3022 // Argument 1 (x0): Subject string. | |
| 3023 __ Mov(x0, subject); | |
| 3024 | |
| 3025 // Argument 2 (x1): Previous index, already there. | |
| 3026 | |
| 3027 // Argument 3 (x2): Get the start of input. | |
| 3028 // Start of input = start of string + previous index + substring offset | |
| 3029 // (0 if the string | |
| 3030 // is not sliced). | |
| 3031 __ Add(w10, previous_index_in_bytes, sliced_string_offset); | |
| 3032 __ Add(x2, start, Operand(w10, UXTW)); | |
| 3033 | |
| 3034 // Argument 4 (x3): | |
| 3035 // End of input = start of input + (length of input - previous index) | |
| 3036 __ Sub(w10, length, previous_index_in_bytes); | |
| 3037 __ Add(x3, x2, Operand(w10, UXTW)); | |
| 3038 | |
| 3039 // Argument 5 (x4): static offsets vector buffer. | |
| 3040 __ Mov(x4, | |
| 3041 Operand(ExternalReference::address_of_static_offsets_vector(isolate))); | |
| 3042 | |
| 3043 // Argument 6 (x5): Set the number of capture registers to zero to force | |
| 3044 // global regexps to behave as non-global. This stub is not used for global | |
| 3045 // regexps. | |
| 3046 __ Mov(x5, 0); | |
| 3047 | |
| 3048 // Argument 7 (x6): Start (high end) of backtracking stack memory area. | |
| 3049 __ Mov(x10, Operand(address_of_regexp_stack_memory_address)); | |
| 3050 __ Ldr(x10, MemOperand(x10)); | |
| 3051 __ Mov(x11, Operand(address_of_regexp_stack_memory_size)); | |
| 3052 __ Ldr(x11, MemOperand(x11)); | |
| 3053 __ Add(x6, x10, x11); | |
| 3054 | |
| 3055 // Argument 8 (x7): Indicate that this is a direct call from JavaScript. | |
| 3056 __ Mov(x7, 1); | |
| 3057 | |
| 3058 // Locate the code entry and call it. | |
| 3059 __ Add(code_object, code_object, Code::kHeaderSize - kHeapObjectTag); | |
| 3060 DirectCEntryStub stub; | |
| 3061 stub.GenerateCall(masm, code_object); | |
| 3062 | |
| 3063 __ LeaveExitFrame(false, x10, true); | |
| 3064 | |
| 3065 // The generated regexp code returns an int32 in w0. | |
| 3066 Label failure, exception; | |
| 3067 __ CompareAndBranch(w0, NativeRegExpMacroAssembler::FAILURE, eq, &failure); | |
| 3068 __ CompareAndBranch(w0, | |
| 3069 NativeRegExpMacroAssembler::EXCEPTION, | |
| 3070 eq, | |
| 3071 &exception); | |
| 3072 __ CompareAndBranch(w0, NativeRegExpMacroAssembler::RETRY, eq, &runtime); | |
| 3073 | |
| 3074 // Success: process the result from the native regexp code. | |
| 3075 Register number_of_capture_registers = x12; | |
| 3076 | |
| 3077 // Calculate number of capture registers (number_of_captures + 1) * 2 | |
| 3078 // and store it in the last match info. | |
| 3079 __ Ldrsw(x10, | |
| 3080 UntagSmiFieldMemOperand(regexp_data, | |
| 3081 JSRegExp::kIrregexpCaptureCountOffset)); | |
| 3082 __ Add(x10, x10, x10); | |
| 3083 __ Add(number_of_capture_registers, x10, 2); | |
| 3084 | |
| 3085 // Check that the fourth object is a JSArray object. | |
| 3086 ASSERT(jssp.Is(__ StackPointer())); | |
| 3087 __ Peek(x10, kLastMatchInfoOffset); | |
| 3088 __ JumpIfSmi(x10, &runtime); | |
| 3089 __ JumpIfNotObjectType(x10, x11, x11, JS_ARRAY_TYPE, &runtime); | |
| 3090 | |
| 3091 // Check that the JSArray is the fast case. | |
| 3092 __ Ldr(last_match_info_elements, | |
| 3093 FieldMemOperand(x10, JSArray::kElementsOffset)); | |
| 3094 __ Ldr(x10, | |
| 3095 FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset)); | |
| 3096 __ JumpIfNotRoot(x10, Heap::kFixedArrayMapRootIndex, &runtime); | |
| 3097 | |
| 3098 // Check that the last match info has space for the capture registers and the | |
| 3099 // additional information (overhead). | |
| 3100 // (number_of_captures + 1) * 2 + overhead <= last match info size | |
| 3101 // (number_of_captures * 2) + 2 + overhead <= last match info size | |
| 3102 // number_of_capture_registers + overhead <= last match info size | |
| 3103 __ Ldrsw(x10, | |
| 3104 UntagSmiFieldMemOperand(last_match_info_elements, | |
| 3105 FixedArray::kLengthOffset)); | |
| 3106 __ Add(x11, number_of_capture_registers, RegExpImpl::kLastMatchOverhead); | |
| 3107 __ Cmp(x11, x10); | |
| 3108 __ B(gt, &runtime); | |
| 3109 | |
| 3110 // Store the capture count. | |
| 3111 __ SmiTag(x10, number_of_capture_registers); | |
| 3112 __ Str(x10, | |
| 3113 FieldMemOperand(last_match_info_elements, | |
| 3114 RegExpImpl::kLastCaptureCountOffset)); | |
| 3115 // Store last subject and last input. | |
| 3116 __ Str(subject, | |
| 3117 FieldMemOperand(last_match_info_elements, | |
| 3118 RegExpImpl::kLastSubjectOffset)); | |
| 3119 // Use x10 as the subject string in order to only need | |
| 3120 // one RecordWriteStub. | |
| 3121 __ Mov(x10, subject); | |
| 3122 __ RecordWriteField(last_match_info_elements, | |
| 3123 RegExpImpl::kLastSubjectOffset, | |
| 3124 x10, | |
| 3125 x11, | |
| 3126 kLRHasNotBeenSaved, | |
| 3127 kDontSaveFPRegs); | |
| 3128 __ Str(subject, | |
| 3129 FieldMemOperand(last_match_info_elements, | |
| 3130 RegExpImpl::kLastInputOffset)); | |
| 3131 __ Mov(x10, subject); | |
| 3132 __ RecordWriteField(last_match_info_elements, | |
| 3133 RegExpImpl::kLastInputOffset, | |
| 3134 x10, | |
| 3135 x11, | |
| 3136 kLRHasNotBeenSaved, | |
| 3137 kDontSaveFPRegs); | |
| 3138 | |
| 3139 Register last_match_offsets = x13; | |
| 3140 Register offsets_vector_index = x14; | |
| 3141 Register current_offset = x15; | |
| 3142 | |
| 3143 // Get the static offsets vector filled by the native regexp code | |
| 3144 // and fill the last match info. | |
| 3145 ExternalReference address_of_static_offsets_vector = | |
| 3146 ExternalReference::address_of_static_offsets_vector(isolate); | |
| 3147 __ Mov(offsets_vector_index, Operand(address_of_static_offsets_vector)); | |
| 3148 | |
| 3149 Label next_capture, done; | |
| 3150 // Capture register counter starts from number of capture registers and | |
| 3151 // iterates down to zero (inclusive). | |
| 3152 __ Add(last_match_offsets, | |
| 3153 last_match_info_elements, | |
| 3154 RegExpImpl::kFirstCaptureOffset - kHeapObjectTag); | |
| 3155 __ Bind(&next_capture); | |
| 3156 __ Subs(number_of_capture_registers, number_of_capture_registers, 2); | |
| 3157 __ B(mi, &done); | |
| 3158 // Read two 32 bit values from the static offsets vector buffer into | |
| 3159 // an X register | |
| 3160 __ Ldr(current_offset, | |
| 3161 MemOperand(offsets_vector_index, kWRegSizeInBytes * 2, PostIndex)); | |
| 3162 // Store the smi values in the last match info. | |
| 3163 __ SmiTag(x10, current_offset); | |
| 3164 // Clearing the 32 bottom bits gives us a Smi. | |
| 3165 STATIC_ASSERT(kSmiShift == 32); | |
| 3166 __ And(x11, current_offset, ~kWRegMask); | |
| 3167 __ Stp(x10, | |
| 3168 x11, | |
| 3169 MemOperand(last_match_offsets, kXRegSizeInBytes * 2, PostIndex)); | |
| 3170 __ B(&next_capture); | |
| 3171 __ Bind(&done); | |
| 3172 | |
| 3173 // Return last match info. | |
| 3174 __ Peek(x0, kLastMatchInfoOffset); | |
| 3175 __ PopCPURegList(used_callee_saved_registers); | |
| 3176 // Drop the 4 arguments of the stub from the stack. | |
| 3177 __ Drop(4); | |
| 3178 __ Ret(); | |
| 3179 | |
| 3180 __ Bind(&exception); | |
| 3181 Register exception_value = x0; | |
| 3182 // A stack overflow (on the backtrack stack) may have occured | |
| 3183 // in the RegExp code but no exception has been created yet. | |
| 3184 // If there is no pending exception, handle that in the runtime system. | |
| 3185 __ Mov(x10, Operand(isolate->factory()->the_hole_value())); | |
| 3186 __ Mov(x11, | |
| 3187 Operand(ExternalReference(Isolate::kPendingExceptionAddress, | |
| 3188 isolate))); | |
| 3189 __ Ldr(exception_value, MemOperand(x11)); | |
| 3190 __ Cmp(x10, exception_value); | |
| 3191 __ B(eq, &runtime); | |
| 3192 | |
| 3193 __ Str(x10, MemOperand(x11)); // Clear pending exception. | |
| 3194 | |
| 3195 // Check if the exception is a termination. If so, throw as uncatchable. | |
| 3196 Label termination_exception; | |
| 3197 __ JumpIfRoot(exception_value, | |
| 3198 Heap::kTerminationExceptionRootIndex, | |
| 3199 &termination_exception); | |
| 3200 | |
| 3201 __ Throw(exception_value, x10, x11, x12, x13); | |
| 3202 | |
| 3203 __ Bind(&termination_exception); | |
| 3204 __ ThrowUncatchable(exception_value, x10, x11, x12, x13); | |
| 3205 | |
| 3206 __ Bind(&failure); | |
| 3207 __ Mov(x0, Operand(masm->isolate()->factory()->null_value())); | |
| 3208 __ PopCPURegList(used_callee_saved_registers); | |
| 3209 // Drop the 4 arguments of the stub from the stack. | |
| 3210 __ Drop(4); | |
| 3211 __ Ret(); | |
| 3212 | |
| 3213 __ Bind(&runtime); | |
| 3214 __ PopCPURegList(used_callee_saved_registers); | |
| 3215 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); | |
| 3216 | |
| 3217 // Deferred code for string handling. | |
| 3218 // (6) Not a long external string? If yes, go to (8). | |
| 3219 __ Bind(¬_seq_nor_cons); | |
| 3220 // Compare flags are still set. | |
| 3221 __ B(ne, ¬_long_external); // Go to (8). | |
| 3222 | |
| 3223 // (7) External string. Make it, offset-wise, look like a sequential string. | |
| 3224 __ Bind(&external_string); | |
| 3225 if (masm->emit_debug_code()) { | |
| 3226 // Assert that we do not have a cons or slice (indirect strings) here. | |
| 3227 // Sequential strings have already been ruled out. | |
| 3228 __ Ldr(x10, FieldMemOperand(subject, HeapObject::kMapOffset)); | |
| 3229 __ Ldrb(x10, FieldMemOperand(x10, Map::kInstanceTypeOffset)); | |
| 3230 __ Tst(x10, kIsIndirectStringMask); | |
| 3231 __ Check(eq, kExternalStringExpectedButNotFound); | |
| 3232 __ And(x10, x10, kStringRepresentationMask); | |
| 3233 __ Cmp(x10, 0); | |
| 3234 __ Check(ne, kExternalStringExpectedButNotFound); | |
| 3235 } | |
| 3236 __ Ldr(subject, | |
| 3237 FieldMemOperand(subject, ExternalString::kResourceDataOffset)); | |
| 3238 // Move the pointer so that offset-wise, it looks like a sequential string. | |
| 3239 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize); | |
| 3240 __ Sub(subject, subject, SeqTwoByteString::kHeaderSize - kHeapObjectTag); | |
| 3241 __ B(&seq_string); // Go to (5). | |
| 3242 | |
| 3243 // (8) If this is a short external string or not a string, bail out to | |
| 3244 // runtime. | |
| 3245 __ Bind(¬_long_external); | |
| 3246 STATIC_ASSERT(kShortExternalStringTag != 0); | |
| 3247 __ TestAndBranchIfAnySet(string_representation, | |
| 3248 kShortExternalStringMask | kIsNotStringMask, | |
| 3249 &runtime); | |
| 3250 | |
| 3251 // (9) Sliced string. Replace subject with parent. | |
| 3252 __ Ldr(sliced_string_offset, | |
| 3253 UntagSmiFieldMemOperand(subject, SlicedString::kOffsetOffset)); | |
| 3254 __ Ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset)); | |
| 3255 __ B(&check_underlying); // Go to (4). | |
| 3256 #endif | |
| 3257 } | |
| 3258 | |
| 3259 | |
| 3260 // TODO(jbramley): Don't use static registers here, but take them as arguments. | |
| 3261 static void GenerateRecordCallTarget(MacroAssembler* masm) { | |
| 3262 ASM_LOCATION("GenerateRecordCallTarget"); | |
| 3263 // Cache the called function in a feedback vector slot. Cache states are | |
| 3264 // uninitialized, monomorphic (indicated by a JSFunction), and megamorphic. | |
| 3265 // x0 : number of arguments to the construct function | |
| 3266 // x1 : the function to call | |
| 3267 // x2 : feedback vector | |
| 3268 // x3 : slot in feedback vector (smi) | |
| 3269 Label initialize, done, miss, megamorphic, not_array_function; | |
| 3270 | |
| 3271 ASSERT_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()), | |
| 3272 masm->isolate()->heap()->undefined_value()); | |
| 3273 ASSERT_EQ(*TypeFeedbackInfo::UninitializedSentinel(masm->isolate()), | |
| 3274 masm->isolate()->heap()->the_hole_value()); | |
| 3275 | |
| 3276 // Load the cache state. | |
| 3277 __ Add(x4, x2, Operand::UntagSmiAndScale(x3, kPointerSizeLog2)); | |
| 3278 __ Ldr(x4, FieldMemOperand(x4, FixedArray::kHeaderSize)); | |
| 3279 | |
| 3280 // A monomorphic cache hit or an already megamorphic state: invoke the | |
| 3281 // function without changing the state. | |
| 3282 __ Cmp(x4, x1); | |
| 3283 __ B(eq, &done); | |
| 3284 | |
| 3285 // If we came here, we need to see if we are the array function. | |
| 3286 // If we didn't have a matching function, and we didn't find the megamorph | |
| 3287 // sentinel, then we have in the slot either some other function or an | |
| 3288 // AllocationSite. Do a map check on the object in ecx. | |
| 3289 __ Ldr(x5, FieldMemOperand(x4, AllocationSite::kMapOffset)); | |
| 3290 __ JumpIfNotRoot(x5, Heap::kAllocationSiteMapRootIndex, &miss); | |
| 3291 | |
| 3292 // Make sure the function is the Array() function | |
| 3293 __ LoadArrayFunction(x4); | |
| 3294 __ Cmp(x1, x4); | |
| 3295 __ B(ne, &megamorphic); | |
| 3296 __ B(&done); | |
| 3297 | |
| 3298 __ Bind(&miss); | |
| 3299 | |
| 3300 // A monomorphic miss (i.e, here the cache is not uninitialized) goes | |
| 3301 // megamorphic. | |
| 3302 __ JumpIfRoot(x4, Heap::kTheHoleValueRootIndex, &initialize); | |
| 3303 // MegamorphicSentinel is an immortal immovable object (undefined) so no | |
| 3304 // write-barrier is needed. | |
| 3305 __ Bind(&megamorphic); | |
| 3306 __ Add(x4, x2, Operand::UntagSmiAndScale(x3, kPointerSizeLog2)); | |
| 3307 __ LoadRoot(x10, Heap::kUndefinedValueRootIndex); | |
| 3308 __ Str(x10, FieldMemOperand(x4, FixedArray::kHeaderSize)); | |
| 3309 __ B(&done); | |
| 3310 | |
| 3311 // An uninitialized cache is patched with the function or sentinel to | |
| 3312 // indicate the ElementsKind if function is the Array constructor. | |
| 3313 __ Bind(&initialize); | |
| 3314 // Make sure the function is the Array() function | |
| 3315 __ LoadArrayFunction(x4); | |
| 3316 __ Cmp(x1, x4); | |
| 3317 __ B(ne, ¬_array_function); | |
| 3318 | |
| 3319 // The target function is the Array constructor, | |
| 3320 // Create an AllocationSite if we don't already have it, store it in the slot. | |
| 3321 { | |
| 3322 FrameScope scope(masm, StackFrame::INTERNAL); | |
| 3323 CreateAllocationSiteStub create_stub; | |
| 3324 | |
| 3325 // Arguments register must be smi-tagged to call out. | |
| 3326 __ SmiTag(x0); | |
| 3327 __ Push(x0, x1, x2, x3); | |
| 3328 | |
| 3329 __ CallStub(&create_stub); | |
| 3330 | |
| 3331 __ Pop(x3, x2, x1, x0); | |
| 3332 __ SmiUntag(x0); | |
| 3333 } | |
| 3334 __ B(&done); | |
| 3335 | |
| 3336 __ Bind(¬_array_function); | |
| 3337 // An uninitialized cache is patched with the function. | |
| 3338 | |
| 3339 __ Add(x4, x2, Operand::UntagSmiAndScale(x3, kPointerSizeLog2)); | |
| 3340 // TODO(all): Does the value need to be left in x4? If not, FieldMemOperand | |
| 3341 // could be used to avoid this add. | |
| 3342 __ Add(x4, x4, FixedArray::kHeaderSize - kHeapObjectTag); | |
| 3343 __ Str(x1, MemOperand(x4, 0)); | |
| 3344 | |
| 3345 __ Push(x4, x2, x1); | |
| 3346 __ RecordWrite(x2, x4, x1, kLRHasNotBeenSaved, kDontSaveFPRegs, | |
| 3347 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); | |
| 3348 __ Pop(x1, x2, x4); | |
| 3349 | |
| 3350 // TODO(all): Are x4, x2 and x1 outputs? This isn't clear. | |
| 3351 | |
| 3352 __ Bind(&done); | |
| 3353 } | |
| 3354 | |
| 3355 | |
| 3356 void CallFunctionStub::Generate(MacroAssembler* masm) { | |
| 3357 ASM_LOCATION("CallFunctionStub::Generate"); | |
| 3358 // x1 function the function to call | |
| 3359 // x2 : feedback vector | |
| 3360 // x3 : slot in feedback vector (smi) (if x2 is not undefined) | |
| 3361 Register function = x1; | |
| 3362 Register cache_cell = x2; | |
| 3363 Register slot = x3; | |
| 3364 Register type = x4; | |
| 3365 Label slow, non_function, wrap, cont; | |
| 3366 | |
| 3367 // TODO(jbramley): This function has a lot of unnamed registers. Name them, | |
| 3368 // and tidy things up a bit. | |
| 3369 | |
| 3370 if (NeedsChecks()) { | |
| 3371 // Check that the function is really a JavaScript function. | |
| 3372 __ JumpIfSmi(function, &non_function); | |
| 3373 | |
| 3374 // Goto slow case if we do not have a function. | |
| 3375 __ JumpIfNotObjectType(function, x10, type, JS_FUNCTION_TYPE, &slow); | |
| 3376 | |
| 3377 if (RecordCallTarget()) { | |
| 3378 GenerateRecordCallTarget(masm); | |
| 3379 } | |
| 3380 } | |
| 3381 | |
| 3382 // Fast-case: Invoke the function now. | |
| 3383 // x1 function pushed function | |
| 3384 ParameterCount actual(argc_); | |
| 3385 | |
| 3386 if (CallAsMethod()) { | |
| 3387 if (NeedsChecks()) { | |
| 3388 // Do not transform the receiver for strict mode functions. | |
| 3389 __ Ldr(x3, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset)); | |
| 3390 __ Ldr(w4, FieldMemOperand(x3, SharedFunctionInfo::kCompilerHintsOffset)); | |
| 3391 __ Tbnz(w4, SharedFunctionInfo::kStrictModeFunction, &cont); | |
| 3392 | |
| 3393 // Do not transform the receiver for native (Compilerhints already in x3). | |
| 3394 __ Tbnz(w4, SharedFunctionInfo::kNative, &cont); | |
| 3395 } | |
| 3396 | |
| 3397 // Compute the receiver in non-strict mode. | |
| 3398 __ Peek(x3, argc_ * kPointerSize); | |
| 3399 | |
| 3400 if (NeedsChecks()) { | |
| 3401 __ JumpIfSmi(x3, &wrap); | |
| 3402 __ JumpIfObjectType(x3, x10, type, FIRST_SPEC_OBJECT_TYPE, &wrap, lt); | |
| 3403 } else { | |
| 3404 __ B(&wrap); | |
| 3405 } | |
| 3406 | |
| 3407 __ Bind(&cont); | |
| 3408 } | |
| 3409 __ InvokeFunction(function, | |
| 3410 actual, | |
| 3411 JUMP_FUNCTION, | |
| 3412 NullCallWrapper()); | |
| 3413 | |
| 3414 if (NeedsChecks()) { | |
| 3415 // Slow-case: Non-function called. | |
| 3416 __ Bind(&slow); | |
| 3417 if (RecordCallTarget()) { | |
| 3418 // If there is a call target cache, mark it megamorphic in the | |
| 3419 // non-function case. MegamorphicSentinel is an immortal immovable object | |
| 3420 // (undefined) so no write barrier is needed. | |
| 3421 ASSERT_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()), | |
| 3422 masm->isolate()->heap()->undefined_value()); | |
| 3423 __ Add(x12, cache_cell, Operand::UntagSmiAndScale(slot, | |
| 3424 kPointerSizeLog2)); | |
| 3425 __ LoadRoot(x11, Heap::kUndefinedValueRootIndex); | |
| 3426 __ Str(x11, FieldMemOperand(x12, FixedArray::kHeaderSize)); | |
| 3427 } | |
| 3428 // Check for function proxy. | |
| 3429 // x10 : function type. | |
| 3430 __ CompareAndBranch(type, JS_FUNCTION_PROXY_TYPE, ne, &non_function); | |
| 3431 __ Push(function); // put proxy as additional argument | |
| 3432 __ Mov(x0, argc_ + 1); | |
| 3433 __ Mov(x2, 0); | |
| 3434 __ GetBuiltinFunction(x1, Builtins::CALL_FUNCTION_PROXY); | |
| 3435 { | |
| 3436 Handle<Code> adaptor = | |
| 3437 masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(); | |
| 3438 __ Jump(adaptor, RelocInfo::CODE_TARGET); | |
| 3439 } | |
| 3440 | |
| 3441 // CALL_NON_FUNCTION expects the non-function callee as receiver (instead | |
| 3442 // of the original receiver from the call site). | |
| 3443 __ Bind(&non_function); | |
| 3444 __ Poke(function, argc_ * kXRegSizeInBytes); | |
| 3445 __ Mov(x0, argc_); // Set up the number of arguments. | |
| 3446 __ Mov(x2, 0); | |
| 3447 __ GetBuiltinFunction(function, Builtins::CALL_NON_FUNCTION); | |
| 3448 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), | |
| 3449 RelocInfo::CODE_TARGET); | |
| 3450 } | |
| 3451 | |
| 3452 if (CallAsMethod()) { | |
| 3453 __ Bind(&wrap); | |
| 3454 // Wrap the receiver and patch it back onto the stack. | |
| 3455 { FrameScope frame_scope(masm, StackFrame::INTERNAL); | |
| 3456 __ Push(x1, x3); | |
| 3457 __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); | |
| 3458 __ Pop(x1); | |
| 3459 } | |
| 3460 __ Poke(x0, argc_ * kPointerSize); | |
| 3461 __ B(&cont); | |
| 3462 } | |
| 3463 } | |
| 3464 | |
| 3465 | |
| 3466 void CallConstructStub::Generate(MacroAssembler* masm) { | |
| 3467 ASM_LOCATION("CallConstructStub::Generate"); | |
| 3468 // x0 : number of arguments | |
| 3469 // x1 : the function to call | |
| 3470 // x2 : feedback vector | |
| 3471 // x3 : slot in feedback vector (smi) (if r2 is not undefined) | |
| 3472 Register function = x1; | |
| 3473 Label slow, non_function_call; | |
| 3474 | |
| 3475 // Check that the function is not a smi. | |
| 3476 __ JumpIfSmi(function, &non_function_call); | |
| 3477 // Check that the function is a JSFunction. | |
| 3478 Register object_type = x10; | |
| 3479 __ JumpIfNotObjectType(function, object_type, object_type, JS_FUNCTION_TYPE, | |
| 3480 &slow); | |
| 3481 | |
| 3482 if (RecordCallTarget()) { | |
| 3483 GenerateRecordCallTarget(masm); | |
| 3484 } | |
| 3485 | |
| 3486 // Jump to the function-specific construct stub. | |
| 3487 Register jump_reg = x4; | |
| 3488 Register shared_func_info = jump_reg; | |
| 3489 Register cons_stub = jump_reg; | |
| 3490 Register cons_stub_code = jump_reg; | |
| 3491 __ Ldr(shared_func_info, | |
| 3492 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset)); | |
| 3493 __ Ldr(cons_stub, | |
| 3494 FieldMemOperand(shared_func_info, | |
| 3495 SharedFunctionInfo::kConstructStubOffset)); | |
| 3496 __ Add(cons_stub_code, cons_stub, Code::kHeaderSize - kHeapObjectTag); | |
| 3497 __ Br(cons_stub_code); | |
| 3498 | |
| 3499 Label do_call; | |
| 3500 __ Bind(&slow); | |
| 3501 __ Cmp(object_type, JS_FUNCTION_PROXY_TYPE); | |
| 3502 __ B(ne, &non_function_call); | |
| 3503 __ GetBuiltinFunction(x1, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR); | |
| 3504 __ B(&do_call); | |
| 3505 | |
| 3506 __ Bind(&non_function_call); | |
| 3507 __ GetBuiltinFunction(x1, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR); | |
| 3508 | |
| 3509 __ Bind(&do_call); | |
| 3510 // Set expected number of arguments to zero (not changing x0). | |
| 3511 __ Mov(x2, 0); | |
| 3512 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), | |
| 3513 RelocInfo::CODE_TARGET); | |
| 3514 } | |
| 3515 | |
| 3516 | |
| 3517 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { | |
| 3518 // If the receiver is a smi trigger the non-string case. | |
| 3519 __ JumpIfSmi(object_, receiver_not_string_); | |
| 3520 | |
| 3521 // Fetch the instance type of the receiver into result register. | |
| 3522 __ Ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); | |
| 3523 __ Ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); | |
| 3524 | |
| 3525 // If the receiver is not a string trigger the non-string case. | |
| 3526 __ TestAndBranchIfAnySet(result_, kIsNotStringMask, receiver_not_string_); | |
| 3527 | |
| 3528 // If the index is non-smi trigger the non-smi case. | |
| 3529 __ JumpIfNotSmi(index_, &index_not_smi_); | |
| 3530 | |
| 3531 __ Bind(&got_smi_index_); | |
| 3532 // Check for index out of range. | |
| 3533 __ Ldrsw(result_, UntagSmiFieldMemOperand(object_, String::kLengthOffset)); | |
| 3534 __ Cmp(result_, Operand::UntagSmi(index_)); | |
| 3535 __ B(ls, index_out_of_range_); | |
| 3536 | |
| 3537 __ SmiUntag(index_); | |
| 3538 | |
| 3539 StringCharLoadGenerator::Generate(masm, | |
| 3540 object_, | |
| 3541 index_.W(), | |
| 3542 result_, | |
| 3543 &call_runtime_); | |
| 3544 __ SmiTag(result_); | |
| 3545 __ Bind(&exit_); | |
| 3546 } | |
| 3547 | |
| 3548 | |
| 3549 void StringCharCodeAtGenerator::GenerateSlow( | |
| 3550 MacroAssembler* masm, | |
| 3551 const RuntimeCallHelper& call_helper) { | |
| 3552 __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase); | |
| 3553 | |
| 3554 __ Bind(&index_not_smi_); | |
| 3555 // If index is a heap number, try converting it to an integer. | |
| 3556 __ CheckMap(index_, | |
| 3557 result_, | |
| 3558 Heap::kHeapNumberMapRootIndex, | |
| 3559 index_not_number_, | |
| 3560 DONT_DO_SMI_CHECK); | |
| 3561 call_helper.BeforeCall(masm); | |
| 3562 // Save object_ on the stack and pass index_ as argument for runtime call. | |
| 3563 __ Push(object_, index_); | |
| 3564 if (index_flags_ == STRING_INDEX_IS_NUMBER) { | |
| 3565 __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1); | |
| 3566 } else { | |
| 3567 ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX); | |
| 3568 // NumberToSmi discards numbers that are not exact integers. | |
| 3569 __ CallRuntime(Runtime::kNumberToSmi, 1); | |
| 3570 } | |
| 3571 // Save the conversion result before the pop instructions below | |
| 3572 // have a chance to overwrite it. | |
| 3573 __ Mov(index_, x0); | |
| 3574 __ Pop(object_); | |
| 3575 // Reload the instance type. | |
| 3576 __ Ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); | |
| 3577 __ Ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); | |
| 3578 call_helper.AfterCall(masm); | |
| 3579 | |
| 3580 // If index is still not a smi, it must be out of range. | |
| 3581 __ JumpIfNotSmi(index_, index_out_of_range_); | |
| 3582 // Otherwise, return to the fast path. | |
| 3583 __ B(&got_smi_index_); | |
| 3584 | |
| 3585 // Call runtime. We get here when the receiver is a string and the | |
| 3586 // index is a number, but the code of getting the actual character | |
| 3587 // is too complex (e.g., when the string needs to be flattened). | |
| 3588 __ Bind(&call_runtime_); | |
| 3589 call_helper.BeforeCall(masm); | |
| 3590 __ SmiTag(index_); | |
| 3591 __ Push(object_, index_); | |
| 3592 __ CallRuntime(Runtime::kStringCharCodeAt, 2); | |
| 3593 __ Mov(result_, x0); | |
| 3594 call_helper.AfterCall(masm); | |
| 3595 __ B(&exit_); | |
| 3596 | |
| 3597 __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase); | |
| 3598 } | |
| 3599 | |
| 3600 | |
| 3601 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) { | |
| 3602 __ JumpIfNotSmi(code_, &slow_case_); | |
| 3603 __ Cmp(code_, Operand(Smi::FromInt(String::kMaxOneByteCharCode))); | |
| 3604 __ B(hi, &slow_case_); | |
| 3605 | |
| 3606 __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex); | |
| 3607 // At this point code register contains smi tagged ASCII char code. | |
| 3608 STATIC_ASSERT(kSmiShift > kPointerSizeLog2); | |
| 3609 __ Add(result_, result_, Operand(code_, LSR, kSmiShift - kPointerSizeLog2)); | |
| 3610 __ Ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize)); | |
| 3611 __ JumpIfRoot(result_, Heap::kUndefinedValueRootIndex, &slow_case_); | |
| 3612 __ Bind(&exit_); | |
| 3613 } | |
| 3614 | |
| 3615 | |
| 3616 void StringCharFromCodeGenerator::GenerateSlow( | |
| 3617 MacroAssembler* masm, | |
| 3618 const RuntimeCallHelper& call_helper) { | |
| 3619 __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase); | |
| 3620 | |
| 3621 __ Bind(&slow_case_); | |
| 3622 call_helper.BeforeCall(masm); | |
| 3623 __ Push(code_); | |
| 3624 __ CallRuntime(Runtime::kCharFromCode, 1); | |
| 3625 __ Mov(result_, x0); | |
| 3626 call_helper.AfterCall(masm); | |
| 3627 __ B(&exit_); | |
| 3628 | |
| 3629 __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase); | |
| 3630 } | |
| 3631 | |
| 3632 | |
| 3633 void ICCompareStub::GenerateSmis(MacroAssembler* masm) { | |
| 3634 // Inputs are in x0 (lhs) and x1 (rhs). | |
| 3635 ASSERT(state_ == CompareIC::SMI); | |
| 3636 ASM_LOCATION("ICCompareStub[Smis]"); | |
| 3637 Label miss; | |
| 3638 // Bail out (to 'miss') unless both x0 and x1 are smis. | |
| 3639 __ JumpIfEitherNotSmi(x0, x1, &miss); | |
| 3640 | |
| 3641 if (GetCondition() == eq) { | |
| 3642 // For equality we do not care about the sign of the result. | |
| 3643 __ Sub(x0, x0, x1); | |
| 3644 } else { | |
| 3645 // Untag before subtracting to avoid handling overflow. | |
| 3646 __ SmiUntag(x1); | |
| 3647 __ Sub(x0, x1, Operand::UntagSmi(x0)); | |
| 3648 } | |
| 3649 __ Ret(); | |
| 3650 | |
| 3651 __ Bind(&miss); | |
| 3652 GenerateMiss(masm); | |
| 3653 } | |
| 3654 | |
| 3655 | |
| 3656 void ICCompareStub::GenerateNumbers(MacroAssembler* masm) { | |
| 3657 ASSERT(state_ == CompareIC::NUMBER); | |
| 3658 ASM_LOCATION("ICCompareStub[HeapNumbers]"); | |
| 3659 | |
| 3660 Label unordered, maybe_undefined1, maybe_undefined2; | |
| 3661 Label miss, handle_lhs, values_in_d_regs; | |
| 3662 Label untag_rhs, untag_lhs; | |
| 3663 | |
| 3664 Register result = x0; | |
| 3665 Register rhs = x0; | |
| 3666 Register lhs = x1; | |
| 3667 FPRegister rhs_d = d0; | |
| 3668 FPRegister lhs_d = d1; | |
| 3669 | |
| 3670 if (left_ == CompareIC::SMI) { | |
| 3671 __ JumpIfNotSmi(lhs, &miss); | |
| 3672 } | |
| 3673 if (right_ == CompareIC::SMI) { | |
| 3674 __ JumpIfNotSmi(rhs, &miss); | |
| 3675 } | |
| 3676 | |
| 3677 __ SmiUntagToDouble(rhs_d, rhs, kSpeculativeUntag); | |
| 3678 __ SmiUntagToDouble(lhs_d, lhs, kSpeculativeUntag); | |
| 3679 | |
| 3680 // Load rhs if it's a heap number. | |
| 3681 __ JumpIfSmi(rhs, &handle_lhs); | |
| 3682 __ CheckMap(rhs, x10, Heap::kHeapNumberMapRootIndex, &maybe_undefined1, | |
| 3683 DONT_DO_SMI_CHECK); | |
| 3684 __ Ldr(rhs_d, FieldMemOperand(rhs, HeapNumber::kValueOffset)); | |
| 3685 | |
| 3686 // Load lhs if it's a heap number. | |
| 3687 __ Bind(&handle_lhs); | |
| 3688 __ JumpIfSmi(lhs, &values_in_d_regs); | |
| 3689 __ CheckMap(lhs, x10, Heap::kHeapNumberMapRootIndex, &maybe_undefined2, | |
| 3690 DONT_DO_SMI_CHECK); | |
| 3691 __ Ldr(lhs_d, FieldMemOperand(lhs, HeapNumber::kValueOffset)); | |
| 3692 | |
| 3693 __ Bind(&values_in_d_regs); | |
| 3694 __ Fcmp(lhs_d, rhs_d); | |
| 3695 __ B(vs, &unordered); // Overflow flag set if either is NaN. | |
| 3696 STATIC_ASSERT((LESS == -1) && (EQUAL == 0) && (GREATER == 1)); | |
| 3697 __ Cset(result, gt); // gt => 1, otherwise (lt, eq) => 0 (EQUAL). | |
| 3698 __ Csinv(result, result, xzr, ge); // lt => -1, gt => 1, eq => 0. | |
| 3699 __ Ret(); | |
| 3700 | |
| 3701 __ Bind(&unordered); | |
| 3702 ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC, | |
| 3703 CompareIC::GENERIC); | |
| 3704 __ Jump(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); | |
| 3705 | |
| 3706 __ Bind(&maybe_undefined1); | |
| 3707 if (Token::IsOrderedRelationalCompareOp(op_)) { | |
| 3708 __ JumpIfNotRoot(rhs, Heap::kUndefinedValueRootIndex, &miss); | |
| 3709 __ JumpIfSmi(lhs, &unordered); | |
| 3710 __ JumpIfNotObjectType(lhs, x10, x10, HEAP_NUMBER_TYPE, &maybe_undefined2); | |
| 3711 __ B(&unordered); | |
| 3712 } | |
| 3713 | |
| 3714 __ Bind(&maybe_undefined2); | |
| 3715 if (Token::IsOrderedRelationalCompareOp(op_)) { | |
| 3716 __ JumpIfRoot(lhs, Heap::kUndefinedValueRootIndex, &unordered); | |
| 3717 } | |
| 3718 | |
| 3719 __ Bind(&miss); | |
| 3720 GenerateMiss(masm); | |
| 3721 } | |
| 3722 | |
| 3723 | |
| 3724 void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) { | |
| 3725 ASSERT(state_ == CompareIC::INTERNALIZED_STRING); | |
| 3726 ASM_LOCATION("ICCompareStub[InternalizedStrings]"); | |
| 3727 Label miss; | |
| 3728 | |
| 3729 Register result = x0; | |
| 3730 Register rhs = x0; | |
| 3731 Register lhs = x1; | |
| 3732 | |
| 3733 // Check that both operands are heap objects. | |
| 3734 __ JumpIfEitherSmi(lhs, rhs, &miss); | |
| 3735 | |
| 3736 // Check that both operands are internalized strings. | |
| 3737 Register rhs_map = x10; | |
| 3738 Register lhs_map = x11; | |
| 3739 Register rhs_type = x10; | |
| 3740 Register lhs_type = x11; | |
| 3741 __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset)); | |
| 3742 __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset)); | |
| 3743 __ Ldrb(lhs_type, FieldMemOperand(lhs_map, Map::kInstanceTypeOffset)); | |
| 3744 __ Ldrb(rhs_type, FieldMemOperand(rhs_map, Map::kInstanceTypeOffset)); | |
| 3745 | |
| 3746 STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0)); | |
| 3747 __ Orr(x12, lhs_type, rhs_type); | |
| 3748 __ TestAndBranchIfAnySet( | |
| 3749 x12, kIsNotStringMask | kIsNotInternalizedMask, &miss); | |
| 3750 | |
| 3751 // Internalized strings are compared by identity. | |
| 3752 STATIC_ASSERT(EQUAL == 0); | |
| 3753 __ Cmp(lhs, rhs); | |
| 3754 __ Cset(result, ne); | |
| 3755 __ Ret(); | |
| 3756 | |
| 3757 __ Bind(&miss); | |
| 3758 GenerateMiss(masm); | |
| 3759 } | |
| 3760 | |
| 3761 | |
| 3762 void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) { | |
| 3763 ASSERT(state_ == CompareIC::UNIQUE_NAME); | |
| 3764 ASM_LOCATION("ICCompareStub[UniqueNames]"); | |
| 3765 ASSERT(GetCondition() == eq); | |
| 3766 Label miss; | |
| 3767 | |
| 3768 Register result = x0; | |
| 3769 Register rhs = x0; | |
| 3770 Register lhs = x1; | |
| 3771 | |
| 3772 Register lhs_instance_type = w2; | |
| 3773 Register rhs_instance_type = w3; | |
| 3774 | |
| 3775 // Check that both operands are heap objects. | |
| 3776 __ JumpIfEitherSmi(lhs, rhs, &miss); | |
| 3777 | |
| 3778 // Check that both operands are unique names. This leaves the instance | |
| 3779 // types loaded in tmp1 and tmp2. | |
| 3780 __ Ldr(x10, FieldMemOperand(lhs, HeapObject::kMapOffset)); | |
| 3781 __ Ldr(x11, FieldMemOperand(rhs, HeapObject::kMapOffset)); | |
| 3782 __ Ldrb(lhs_instance_type, FieldMemOperand(x10, Map::kInstanceTypeOffset)); | |
| 3783 __ Ldrb(rhs_instance_type, FieldMemOperand(x11, Map::kInstanceTypeOffset)); | |
| 3784 | |
| 3785 // To avoid a miss, each instance type should be either SYMBOL_TYPE or it | |
| 3786 // should have kInternalizedTag set. | |
| 3787 __ JumpIfNotUniqueName(lhs_instance_type, &miss); | |
| 3788 __ JumpIfNotUniqueName(rhs_instance_type, &miss); | |
| 3789 | |
| 3790 // Unique names are compared by identity. | |
| 3791 STATIC_ASSERT(EQUAL == 0); | |
| 3792 __ Cmp(lhs, rhs); | |
| 3793 __ Cset(result, ne); | |
| 3794 __ Ret(); | |
| 3795 | |
| 3796 __ Bind(&miss); | |
| 3797 GenerateMiss(masm); | |
| 3798 } | |
| 3799 | |
| 3800 | |
| 3801 void ICCompareStub::GenerateStrings(MacroAssembler* masm) { | |
| 3802 ASSERT(state_ == CompareIC::STRING); | |
| 3803 ASM_LOCATION("ICCompareStub[Strings]"); | |
| 3804 | |
| 3805 Label miss; | |
| 3806 | |
| 3807 bool equality = Token::IsEqualityOp(op_); | |
| 3808 | |
| 3809 Register result = x0; | |
| 3810 Register rhs = x0; | |
| 3811 Register lhs = x1; | |
| 3812 | |
| 3813 // Check that both operands are heap objects. | |
| 3814 __ JumpIfEitherSmi(rhs, lhs, &miss); | |
| 3815 | |
| 3816 // Check that both operands are strings. | |
| 3817 Register rhs_map = x10; | |
| 3818 Register lhs_map = x11; | |
| 3819 Register rhs_type = x10; | |
| 3820 Register lhs_type = x11; | |
| 3821 __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset)); | |
| 3822 __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset)); | |
| 3823 __ Ldrb(lhs_type, FieldMemOperand(lhs_map, Map::kInstanceTypeOffset)); | |
| 3824 __ Ldrb(rhs_type, FieldMemOperand(rhs_map, Map::kInstanceTypeOffset)); | |
| 3825 STATIC_ASSERT(kNotStringTag != 0); | |
| 3826 __ Orr(x12, lhs_type, rhs_type); | |
| 3827 __ Tbnz(x12, MaskToBit(kIsNotStringMask), &miss); | |
| 3828 | |
| 3829 // Fast check for identical strings. | |
| 3830 Label not_equal; | |
| 3831 __ Cmp(lhs, rhs); | |
| 3832 __ B(ne, ¬_equal); | |
| 3833 __ Mov(result, EQUAL); | |
| 3834 __ Ret(); | |
| 3835 | |
| 3836 __ Bind(¬_equal); | |
| 3837 // Handle not identical strings | |
| 3838 | |
| 3839 // Check that both strings are internalized strings. If they are, we're done | |
| 3840 // because we already know they are not identical. We know they are both | |
| 3841 // strings. | |
| 3842 if (equality) { | |
| 3843 ASSERT(GetCondition() == eq); | |
| 3844 STATIC_ASSERT(kInternalizedTag == 0); | |
| 3845 Label not_internalized_strings; | |
| 3846 __ Orr(x12, lhs_type, rhs_type); | |
| 3847 __ TestAndBranchIfAnySet( | |
| 3848 x12, kIsNotInternalizedMask, ¬_internalized_strings); | |
| 3849 // Result is in rhs (x0), and not EQUAL, as rhs is not a smi. | |
| 3850 __ Ret(); | |
| 3851 __ Bind(¬_internalized_strings); | |
| 3852 } | |
| 3853 | |
| 3854 // Check that both strings are sequential ASCII. | |
| 3855 Label runtime; | |
| 3856 __ JumpIfBothInstanceTypesAreNotSequentialAscii( | |
| 3857 lhs_type, rhs_type, x12, x13, &runtime); | |
| 3858 | |
| 3859 // Compare flat ASCII strings. Returns when done. | |
| 3860 if (equality) { | |
| 3861 StringCompareStub::GenerateFlatAsciiStringEquals( | |
| 3862 masm, lhs, rhs, x10, x11, x12); | |
| 3863 } else { | |
| 3864 StringCompareStub::GenerateCompareFlatAsciiStrings( | |
| 3865 masm, lhs, rhs, x10, x11, x12, x13); | |
| 3866 } | |
| 3867 | |
| 3868 // Handle more complex cases in runtime. | |
| 3869 __ Bind(&runtime); | |
| 3870 __ Push(lhs, rhs); | |
| 3871 if (equality) { | |
| 3872 __ TailCallRuntime(Runtime::kStringEquals, 2, 1); | |
| 3873 } else { | |
| 3874 __ TailCallRuntime(Runtime::kStringCompare, 2, 1); | |
| 3875 } | |
| 3876 | |
| 3877 __ Bind(&miss); | |
| 3878 GenerateMiss(masm); | |
| 3879 } | |
| 3880 | |
| 3881 | |
| 3882 void ICCompareStub::GenerateObjects(MacroAssembler* masm) { | |
| 3883 ASSERT(state_ == CompareIC::OBJECT); | |
| 3884 ASM_LOCATION("ICCompareStub[Objects]"); | |
| 3885 | |
| 3886 Label miss; | |
| 3887 | |
| 3888 Register result = x0; | |
| 3889 Register rhs = x0; | |
| 3890 Register lhs = x1; | |
| 3891 | |
| 3892 __ JumpIfEitherSmi(rhs, lhs, &miss); | |
| 3893 | |
| 3894 __ JumpIfNotObjectType(rhs, x10, x10, JS_OBJECT_TYPE, &miss); | |
| 3895 __ JumpIfNotObjectType(lhs, x10, x10, JS_OBJECT_TYPE, &miss); | |
| 3896 | |
| 3897 ASSERT(GetCondition() == eq); | |
| 3898 __ Sub(result, rhs, lhs); | |
| 3899 __ Ret(); | |
| 3900 | |
| 3901 __ Bind(&miss); | |
| 3902 GenerateMiss(masm); | |
| 3903 } | |
| 3904 | |
| 3905 | |
| 3906 void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) { | |
| 3907 ASM_LOCATION("ICCompareStub[KnownObjects]"); | |
| 3908 | |
| 3909 Label miss; | |
| 3910 | |
| 3911 Register result = x0; | |
| 3912 Register rhs = x0; | |
| 3913 Register lhs = x1; | |
| 3914 | |
| 3915 __ JumpIfEitherSmi(rhs, lhs, &miss); | |
| 3916 | |
| 3917 Register rhs_map = x10; | |
| 3918 Register lhs_map = x11; | |
| 3919 __ Ldr(rhs_map, FieldMemOperand(rhs, HeapObject::kMapOffset)); | |
| 3920 __ Ldr(lhs_map, FieldMemOperand(lhs, HeapObject::kMapOffset)); | |
| 3921 __ Cmp(rhs_map, Operand(known_map_)); | |
| 3922 __ B(ne, &miss); | |
| 3923 __ Cmp(lhs_map, Operand(known_map_)); | |
| 3924 __ B(ne, &miss); | |
| 3925 | |
| 3926 __ Sub(result, rhs, lhs); | |
| 3927 __ Ret(); | |
| 3928 | |
| 3929 __ Bind(&miss); | |
| 3930 GenerateMiss(masm); | |
| 3931 } | |
| 3932 | |
| 3933 | |
| 3934 // This method handles the case where a compare stub had the wrong | |
| 3935 // implementation. It calls a miss handler, which re-writes the stub. All other | |
| 3936 // ICCompareStub::Generate* methods should fall back into this one if their | |
| 3937 // operands were not the expected types. | |
| 3938 void ICCompareStub::GenerateMiss(MacroAssembler* masm) { | |
| 3939 ASM_LOCATION("ICCompareStub[Miss]"); | |
| 3940 | |
| 3941 Register stub_entry = x11; | |
| 3942 { | |
| 3943 ExternalReference miss = | |
| 3944 ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate()); | |
| 3945 | |
| 3946 FrameScope scope(masm, StackFrame::INTERNAL); | |
| 3947 Register op = x10; | |
| 3948 Register left = x1; | |
| 3949 Register right = x0; | |
| 3950 // Preserve some caller-saved registers. | |
| 3951 __ Push(x1, x0, lr); | |
| 3952 // Push the arguments. | |
| 3953 __ Mov(op, Operand(Smi::FromInt(op_))); | |
| 3954 __ Push(left, right, op); | |
| 3955 | |
| 3956 // Call the miss handler. This also pops the arguments. | |
| 3957 __ CallExternalReference(miss, 3); | |
| 3958 | |
| 3959 // Compute the entry point of the rewritten stub. | |
| 3960 __ Add(stub_entry, x0, Code::kHeaderSize - kHeapObjectTag); | |
| 3961 // Restore caller-saved registers. | |
| 3962 __ Pop(lr, x0, x1); | |
| 3963 } | |
| 3964 | |
| 3965 // Tail-call to the new stub. | |
| 3966 __ Jump(stub_entry); | |
| 3967 } | |
| 3968 | |
| 3969 | |
| 3970 void StringHelper::GenerateHashInit(MacroAssembler* masm, | |
| 3971 Register hash, | |
| 3972 Register character) { | |
| 3973 ASSERT(!AreAliased(hash, character)); | |
| 3974 | |
| 3975 // hash = character + (character << 10); | |
| 3976 __ LoadRoot(hash, Heap::kHashSeedRootIndex); | |
| 3977 // Untag smi seed and add the character. | |
| 3978 __ Add(hash, character, Operand(hash, LSR, kSmiShift)); | |
| 3979 | |
| 3980 // Compute hashes modulo 2^32 using a 32-bit W register. | |
| 3981 Register hash_w = hash.W(); | |
| 3982 | |
| 3983 // hash += hash << 10; | |
| 3984 __ Add(hash_w, hash_w, Operand(hash_w, LSL, 10)); | |
| 3985 // hash ^= hash >> 6; | |
| 3986 __ Eor(hash_w, hash_w, Operand(hash_w, LSR, 6)); | |
| 3987 } | |
| 3988 | |
| 3989 | |
| 3990 void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm, | |
| 3991 Register hash, | |
| 3992 Register character) { | |
| 3993 ASSERT(!AreAliased(hash, character)); | |
| 3994 | |
| 3995 // hash += character; | |
| 3996 __ Add(hash, hash, character); | |
| 3997 | |
| 3998 // Compute hashes modulo 2^32 using a 32-bit W register. | |
| 3999 Register hash_w = hash.W(); | |
| 4000 | |
| 4001 // hash += hash << 10; | |
| 4002 __ Add(hash_w, hash_w, Operand(hash_w, LSL, 10)); | |
| 4003 // hash ^= hash >> 6; | |
| 4004 __ Eor(hash_w, hash_w, Operand(hash_w, LSR, 6)); | |
| 4005 } | |
| 4006 | |
| 4007 | |
| 4008 void StringHelper::GenerateHashGetHash(MacroAssembler* masm, | |
| 4009 Register hash, | |
| 4010 Register scratch) { | |
| 4011 // Compute hashes modulo 2^32 using a 32-bit W register. | |
| 4012 Register hash_w = hash.W(); | |
| 4013 Register scratch_w = scratch.W(); | |
| 4014 ASSERT(!AreAliased(hash_w, scratch_w)); | |
| 4015 | |
| 4016 // hash += hash << 3; | |
| 4017 __ Add(hash_w, hash_w, Operand(hash_w, LSL, 3)); | |
| 4018 // hash ^= hash >> 11; | |
| 4019 __ Eor(hash_w, hash_w, Operand(hash_w, LSR, 11)); | |
| 4020 // hash += hash << 15; | |
| 4021 __ Add(hash_w, hash_w, Operand(hash_w, LSL, 15)); | |
| 4022 | |
| 4023 __ Ands(hash_w, hash_w, String::kHashBitMask); | |
| 4024 | |
| 4025 // if (hash == 0) hash = 27; | |
| 4026 __ Mov(scratch_w, StringHasher::kZeroHash); | |
| 4027 __ Csel(hash_w, scratch_w, hash_w, eq); | |
| 4028 } | |
| 4029 | |
| 4030 | |
| 4031 void SubStringStub::Generate(MacroAssembler* masm) { | |
| 4032 ASM_LOCATION("SubStringStub::Generate"); | |
| 4033 Label runtime; | |
| 4034 | |
| 4035 // Stack frame on entry. | |
| 4036 // lr: return address | |
| 4037 // jssp[0]: substring "to" offset | |
| 4038 // jssp[8]: substring "from" offset | |
| 4039 // jssp[16]: pointer to string object | |
| 4040 | |
| 4041 // This stub is called from the native-call %_SubString(...), so | |
| 4042 // nothing can be assumed about the arguments. It is tested that: | |
| 4043 // "string" is a sequential string, | |
| 4044 // both "from" and "to" are smis, and | |
| 4045 // 0 <= from <= to <= string.length (in debug mode.) | |
| 4046 // If any of these assumptions fail, we call the runtime system. | |
| 4047 | |
| 4048 static const int kToOffset = 0 * kPointerSize; | |
| 4049 static const int kFromOffset = 1 * kPointerSize; | |
| 4050 static const int kStringOffset = 2 * kPointerSize; | |
| 4051 | |
| 4052 Register to = x0; | |
| 4053 Register from = x15; | |
| 4054 Register input_string = x10; | |
| 4055 Register input_length = x11; | |
| 4056 Register input_type = x12; | |
| 4057 Register result_string = x0; | |
| 4058 Register result_length = x1; | |
| 4059 Register temp = x3; | |
| 4060 | |
| 4061 __ Peek(to, kToOffset); | |
| 4062 __ Peek(from, kFromOffset); | |
| 4063 | |
| 4064 // Check that both from and to are smis. If not, jump to runtime. | |
| 4065 __ JumpIfEitherNotSmi(from, to, &runtime); | |
| 4066 __ SmiUntag(from); | |
| 4067 __ SmiUntag(to); | |
| 4068 | |
| 4069 // Calculate difference between from and to. If to < from, branch to runtime. | |
| 4070 __ Subs(result_length, to, from); | |
| 4071 __ B(mi, &runtime); | |
| 4072 | |
| 4073 // Check from is positive. | |
| 4074 __ Tbnz(from, kWSignBit, &runtime); | |
| 4075 | |
| 4076 // Make sure first argument is a string. | |
| 4077 __ Peek(input_string, kStringOffset); | |
| 4078 __ JumpIfSmi(input_string, &runtime); | |
| 4079 __ IsObjectJSStringType(input_string, input_type, &runtime); | |
| 4080 | |
| 4081 Label single_char; | |
| 4082 __ Cmp(result_length, 1); | |
| 4083 __ B(eq, &single_char); | |
| 4084 | |
| 4085 // Short-cut for the case of trivial substring. | |
| 4086 Label return_x0; | |
| 4087 __ Ldrsw(input_length, | |
| 4088 UntagSmiFieldMemOperand(input_string, String::kLengthOffset)); | |
| 4089 | |
| 4090 __ Cmp(result_length, input_length); | |
| 4091 __ CmovX(x0, input_string, eq); | |
| 4092 // Return original string. | |
| 4093 __ B(eq, &return_x0); | |
| 4094 | |
| 4095 // Longer than original string's length or negative: unsafe arguments. | |
| 4096 __ B(hi, &runtime); | |
| 4097 | |
| 4098 // Shorter than original string's length: an actual substring. | |
| 4099 | |
| 4100 // x0 to substring end character offset | |
| 4101 // x1 result_length length of substring result | |
| 4102 // x10 input_string pointer to input string object | |
| 4103 // x10 unpacked_string pointer to unpacked string object | |
| 4104 // x11 input_length length of input string | |
| 4105 // x12 input_type instance type of input string | |
| 4106 // x15 from substring start character offset | |
| 4107 | |
| 4108 // Deal with different string types: update the index if necessary and put | |
| 4109 // the underlying string into register unpacked_string. | |
| 4110 Label underlying_unpacked, sliced_string, seq_or_external_string; | |
| 4111 Label update_instance_type; | |
| 4112 // If the string is not indirect, it can only be sequential or external. | |
| 4113 STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag)); | |
| 4114 STATIC_ASSERT(kIsIndirectStringMask != 0); | |
| 4115 | |
| 4116 // Test for string types, and branch/fall through to appropriate unpacking | |
| 4117 // code. | |
| 4118 __ Tst(input_type, kIsIndirectStringMask); | |
| 4119 __ B(eq, &seq_or_external_string); | |
| 4120 __ Tst(input_type, kSlicedNotConsMask); | |
| 4121 __ B(ne, &sliced_string); | |
| 4122 | |
| 4123 Register unpacked_string = input_string; | |
| 4124 | |
| 4125 // Cons string. Check whether it is flat, then fetch first part. | |
| 4126 __ Ldr(temp, FieldMemOperand(input_string, ConsString::kSecondOffset)); | |
| 4127 __ JumpIfNotRoot(temp, Heap::kempty_stringRootIndex, &runtime); | |
| 4128 __ Ldr(unpacked_string, | |
| 4129 FieldMemOperand(input_string, ConsString::kFirstOffset)); | |
| 4130 __ B(&update_instance_type); | |
| 4131 | |
| 4132 __ Bind(&sliced_string); | |
| 4133 // Sliced string. Fetch parent and correct start index by offset. | |
| 4134 __ Ldrsw(temp, | |
| 4135 UntagSmiFieldMemOperand(input_string, SlicedString::kOffsetOffset)); | |
| 4136 __ Add(from, from, temp); | |
| 4137 __ Ldr(unpacked_string, | |
| 4138 FieldMemOperand(input_string, SlicedString::kParentOffset)); | |
| 4139 | |
| 4140 __ Bind(&update_instance_type); | |
| 4141 __ Ldr(temp, FieldMemOperand(unpacked_string, HeapObject::kMapOffset)); | |
| 4142 __ Ldrb(input_type, FieldMemOperand(temp, Map::kInstanceTypeOffset)); | |
| 4143 // TODO(all): This generates "b #+0x4". Can these be optimised out? | |
| 4144 __ B(&underlying_unpacked); | |
| 4145 | |
| 4146 __ Bind(&seq_or_external_string); | |
| 4147 // Sequential or external string. Registers unpacked_string and input_string | |
| 4148 // alias, so there's nothing to do here. | |
| 4149 | |
| 4150 // x0 result_string pointer to result string object (uninit) | |
| 4151 // x1 result_length length of substring result | |
| 4152 // x10 unpacked_string pointer to unpacked string object | |
| 4153 // x11 input_length length of input string | |
| 4154 // x12 input_type instance type of input string | |
| 4155 // x15 from substring start character offset | |
| 4156 __ Bind(&underlying_unpacked); | |
| 4157 | |
| 4158 if (FLAG_string_slices) { | |
| 4159 Label copy_routine; | |
| 4160 __ Cmp(result_length, SlicedString::kMinLength); | |
| 4161 // Short slice. Copy instead of slicing. | |
| 4162 __ B(lt, ©_routine); | |
| 4163 // Allocate new sliced string. At this point we do not reload the instance | |
| 4164 // type including the string encoding because we simply rely on the info | |
| 4165 // provided by the original string. It does not matter if the original | |
| 4166 // string's encoding is wrong because we always have to recheck encoding of | |
| 4167 // the newly created string's parent anyway due to externalized strings. | |
| 4168 Label two_byte_slice, set_slice_header; | |
| 4169 STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0); | |
| 4170 STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0); | |
| 4171 __ Tbz(input_type, MaskToBit(kStringEncodingMask), &two_byte_slice); | |
| 4172 __ AllocateAsciiSlicedString(result_string, result_length, x3, x4, | |
| 4173 &runtime); | |
| 4174 __ B(&set_slice_header); | |
| 4175 | |
| 4176 __ Bind(&two_byte_slice); | |
| 4177 __ AllocateTwoByteSlicedString(result_string, result_length, x3, x4, | |
| 4178 &runtime); | |
| 4179 | |
| 4180 __ Bind(&set_slice_header); | |
| 4181 __ SmiTag(from); | |
| 4182 __ Str(from, FieldMemOperand(result_string, SlicedString::kOffsetOffset)); | |
| 4183 __ Str(unpacked_string, | |
| 4184 FieldMemOperand(result_string, SlicedString::kParentOffset)); | |
| 4185 __ B(&return_x0); | |
| 4186 | |
| 4187 __ Bind(©_routine); | |
| 4188 } | |
| 4189 | |
| 4190 // x0 result_string pointer to result string object (uninit) | |
| 4191 // x1 result_length length of substring result | |
| 4192 // x10 unpacked_string pointer to unpacked string object | |
| 4193 // x11 input_length length of input string | |
| 4194 // x12 input_type instance type of input string | |
| 4195 // x13 unpacked_char0 pointer to first char of unpacked string (uninit) | |
| 4196 // x13 substring_char0 pointer to first char of substring (uninit) | |
| 4197 // x14 result_char0 pointer to first char of result (uninit) | |
| 4198 // x15 from substring start character offset | |
| 4199 Register unpacked_char0 = x13; | |
| 4200 Register substring_char0 = x13; | |
| 4201 Register result_char0 = x14; | |
| 4202 Label two_byte_sequential, sequential_string, allocate_result; | |
| 4203 STATIC_ASSERT(kExternalStringTag != 0); | |
| 4204 STATIC_ASSERT(kSeqStringTag == 0); | |
| 4205 | |
| 4206 __ Tst(input_type, kExternalStringTag); | |
| 4207 __ B(eq, &sequential_string); | |
| 4208 | |
| 4209 __ Tst(input_type, kShortExternalStringTag); | |
| 4210 __ B(ne, &runtime); | |
| 4211 __ Ldr(unpacked_char0, | |
| 4212 FieldMemOperand(unpacked_string, ExternalString::kResourceDataOffset)); | |
| 4213 // unpacked_char0 points to the first character of the underlying string. | |
| 4214 __ B(&allocate_result); | |
| 4215 | |
| 4216 __ Bind(&sequential_string); | |
| 4217 // Locate first character of underlying subject string. | |
| 4218 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize); | |
| 4219 __ Add(unpacked_char0, unpacked_string, | |
| 4220 SeqOneByteString::kHeaderSize - kHeapObjectTag); | |
| 4221 | |
| 4222 __ Bind(&allocate_result); | |
| 4223 // Sequential ASCII string. Allocate the result. | |
| 4224 STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0); | |
| 4225 __ Tbz(input_type, MaskToBit(kStringEncodingMask), &two_byte_sequential); | |
| 4226 | |
| 4227 // Allocate and copy the resulting ASCII string. | |
| 4228 __ AllocateAsciiString(result_string, result_length, x3, x4, x5, &runtime); | |
| 4229 | |
| 4230 // Locate first character of substring to copy. | |
| 4231 __ Add(substring_char0, unpacked_char0, from); | |
| 4232 | |
| 4233 // Locate first character of result. | |
| 4234 __ Add(result_char0, result_string, | |
| 4235 SeqOneByteString::kHeaderSize - kHeapObjectTag); | |
| 4236 | |
| 4237 STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0); | |
| 4238 __ CopyBytes(result_char0, substring_char0, result_length, x3, kCopyLong); | |
| 4239 __ B(&return_x0); | |
| 4240 | |
| 4241 // Allocate and copy the resulting two-byte string. | |
| 4242 __ Bind(&two_byte_sequential); | |
| 4243 __ AllocateTwoByteString(result_string, result_length, x3, x4, x5, &runtime); | |
| 4244 | |
| 4245 // Locate first character of substring to copy. | |
| 4246 __ Add(substring_char0, unpacked_char0, Operand(from, LSL, 1)); | |
| 4247 | |
| 4248 // Locate first character of result. | |
| 4249 __ Add(result_char0, result_string, | |
| 4250 SeqTwoByteString::kHeaderSize - kHeapObjectTag); | |
| 4251 | |
| 4252 STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); | |
| 4253 __ Add(result_length, result_length, result_length); | |
| 4254 __ CopyBytes(result_char0, substring_char0, result_length, x3, kCopyLong); | |
| 4255 | |
| 4256 __ Bind(&return_x0); | |
| 4257 Counters* counters = masm->isolate()->counters(); | |
| 4258 __ IncrementCounter(counters->sub_string_native(), 1, x3, x4); | |
| 4259 __ Drop(3); | |
| 4260 __ Ret(); | |
| 4261 | |
| 4262 __ Bind(&runtime); | |
| 4263 __ TailCallRuntime(Runtime::kSubString, 3, 1); | |
| 4264 | |
| 4265 __ bind(&single_char); | |
| 4266 // x1: result_length | |
| 4267 // x10: input_string | |
| 4268 // x12: input_type | |
| 4269 // x15: from (untagged) | |
| 4270 __ SmiTag(from); | |
| 4271 StringCharAtGenerator generator( | |
| 4272 input_string, from, result_length, x0, | |
| 4273 &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER); | |
| 4274 generator.GenerateFast(masm); | |
| 4275 // TODO(jbramley): Why doesn't this jump to return_x0? | |
| 4276 __ Drop(3); | |
| 4277 __ Ret(); | |
| 4278 generator.SkipSlow(masm, &runtime); | |
| 4279 } | |
| 4280 | |
| 4281 | |
| 4282 void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm, | |
| 4283 Register left, | |
| 4284 Register right, | |
| 4285 Register scratch1, | |
| 4286 Register scratch2, | |
| 4287 Register scratch3) { | |
| 4288 ASSERT(!AreAliased(left, right, scratch1, scratch2, scratch3)); | |
| 4289 Register result = x0; | |
| 4290 Register left_length = scratch1; | |
| 4291 Register right_length = scratch2; | |
| 4292 | |
| 4293 // Compare lengths. If lengths differ, strings can't be equal. Lengths are | |
| 4294 // smis, and don't need to be untagged. | |
| 4295 Label strings_not_equal, check_zero_length; | |
| 4296 __ Ldr(left_length, FieldMemOperand(left, String::kLengthOffset)); | |
| 4297 __ Ldr(right_length, FieldMemOperand(right, String::kLengthOffset)); | |
| 4298 __ Cmp(left_length, right_length); | |
| 4299 __ B(eq, &check_zero_length); | |
| 4300 | |
| 4301 __ Bind(&strings_not_equal); | |
| 4302 __ Mov(result, Operand(Smi::FromInt(NOT_EQUAL))); | |
| 4303 __ Ret(); | |
| 4304 | |
| 4305 // Check if the length is zero. If so, the strings must be equal (and empty.) | |
| 4306 Label compare_chars; | |
| 4307 __ Bind(&check_zero_length); | |
| 4308 STATIC_ASSERT(kSmiTag == 0); | |
| 4309 __ Cbnz(left_length, &compare_chars); | |
| 4310 __ Mov(result, Operand(Smi::FromInt(EQUAL))); | |
| 4311 __ Ret(); | |
| 4312 | |
| 4313 // Compare characters. Falls through if all characters are equal. | |
| 4314 __ Bind(&compare_chars); | |
| 4315 GenerateAsciiCharsCompareLoop(masm, left, right, left_length, scratch2, | |
| 4316 scratch3, &strings_not_equal); | |
| 4317 | |
| 4318 // Characters in strings are equal. | |
| 4319 __ Mov(result, Operand(Smi::FromInt(EQUAL))); | |
| 4320 __ Ret(); | |
| 4321 } | |
| 4322 | |
| 4323 | |
| 4324 void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm, | |
| 4325 Register left, | |
| 4326 Register right, | |
| 4327 Register scratch1, | |
| 4328 Register scratch2, | |
| 4329 Register scratch3, | |
| 4330 Register scratch4) { | |
| 4331 ASSERT(!AreAliased(left, right, scratch1, scratch2, scratch3, scratch4)); | |
| 4332 Label result_not_equal, compare_lengths; | |
| 4333 | |
| 4334 // Find minimum length and length difference. | |
| 4335 Register length_delta = scratch3; | |
| 4336 __ Ldr(scratch1, FieldMemOperand(left, String::kLengthOffset)); | |
| 4337 __ Ldr(scratch2, FieldMemOperand(right, String::kLengthOffset)); | |
| 4338 __ Subs(length_delta, scratch1, scratch2); | |
| 4339 | |
| 4340 Register min_length = scratch1; | |
| 4341 __ Csel(min_length, scratch2, scratch1, gt); | |
| 4342 __ Cbz(min_length, &compare_lengths); | |
| 4343 | |
| 4344 // Compare loop. | |
| 4345 GenerateAsciiCharsCompareLoop(masm, | |
| 4346 left, right, min_length, scratch2, scratch4, | |
| 4347 &result_not_equal); | |
| 4348 | |
| 4349 // Compare lengths - strings up to min-length are equal. | |
| 4350 __ Bind(&compare_lengths); | |
| 4351 | |
| 4352 ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0)); | |
| 4353 | |
| 4354 // Use length_delta as result if it's zero. | |
| 4355 Register result = x0; | |
| 4356 __ Subs(result, length_delta, 0); | |
| 4357 | |
| 4358 __ Bind(&result_not_equal); | |
| 4359 Register greater = x10; | |
| 4360 Register less = x11; | |
| 4361 __ Mov(greater, Operand(Smi::FromInt(GREATER))); | |
| 4362 __ Mov(less, Operand(Smi::FromInt(LESS))); | |
| 4363 __ CmovX(result, greater, gt); | |
| 4364 __ CmovX(result, less, lt); | |
| 4365 __ Ret(); | |
| 4366 } | |
| 4367 | |
| 4368 | |
| 4369 void StringCompareStub::GenerateAsciiCharsCompareLoop( | |
| 4370 MacroAssembler* masm, | |
| 4371 Register left, | |
| 4372 Register right, | |
| 4373 Register length, | |
| 4374 Register scratch1, | |
| 4375 Register scratch2, | |
| 4376 Label* chars_not_equal) { | |
| 4377 ASSERT(!AreAliased(left, right, length, scratch1, scratch2)); | |
| 4378 | |
| 4379 // Change index to run from -length to -1 by adding length to string | |
| 4380 // start. This means that loop ends when index reaches zero, which | |
| 4381 // doesn't need an additional compare. | |
| 4382 __ SmiUntag(length); | |
| 4383 __ Add(scratch1, length, SeqOneByteString::kHeaderSize - kHeapObjectTag); | |
| 4384 __ Add(left, left, scratch1); | |
| 4385 __ Add(right, right, scratch1); | |
| 4386 | |
| 4387 Register index = length; | |
| 4388 __ Neg(index, length); // index = -length; | |
| 4389 | |
| 4390 // Compare loop | |
| 4391 Label loop; | |
| 4392 __ Bind(&loop); | |
| 4393 __ Ldrb(scratch1, MemOperand(left, index)); | |
| 4394 __ Ldrb(scratch2, MemOperand(right, index)); | |
| 4395 __ Cmp(scratch1, scratch2); | |
| 4396 __ B(ne, chars_not_equal); | |
| 4397 __ Add(index, index, 1); | |
| 4398 __ Cbnz(index, &loop); | |
| 4399 } | |
| 4400 | |
| 4401 | |
| 4402 void StringCompareStub::Generate(MacroAssembler* masm) { | |
| 4403 Label runtime; | |
| 4404 | |
| 4405 Counters* counters = masm->isolate()->counters(); | |
| 4406 | |
| 4407 // Stack frame on entry. | |
| 4408 // sp[0]: right string | |
| 4409 // sp[8]: left string | |
| 4410 Register right = x10; | |
| 4411 Register left = x11; | |
| 4412 Register result = x0; | |
| 4413 __ Pop(right, left); | |
| 4414 | |
| 4415 Label not_same; | |
| 4416 __ Subs(result, right, left); | |
| 4417 __ B(ne, ¬_same); | |
| 4418 STATIC_ASSERT(EQUAL == 0); | |
| 4419 __ IncrementCounter(counters->string_compare_native(), 1, x3, x4); | |
| 4420 __ Ret(); | |
| 4421 | |
| 4422 __ Bind(¬_same); | |
| 4423 | |
| 4424 // Check that both objects are sequential ASCII strings. | |
| 4425 __ JumpIfEitherIsNotSequentialAsciiStrings(left, right, x12, x13, &runtime); | |
| 4426 | |
| 4427 // Compare flat ASCII strings natively. Remove arguments from stack first, | |
| 4428 // as this function will generate a return. | |
| 4429 __ IncrementCounter(counters->string_compare_native(), 1, x3, x4); | |
| 4430 GenerateCompareFlatAsciiStrings(masm, left, right, x12, x13, x14, x15); | |
| 4431 | |
| 4432 __ Bind(&runtime); | |
| 4433 | |
| 4434 // Push arguments back on to the stack. | |
| 4435 // sp[0] = right string | |
| 4436 // sp[8] = left string. | |
| 4437 __ Push(left, right); | |
| 4438 | |
| 4439 // Call the runtime. | |
| 4440 // Returns -1 (less), 0 (equal), or 1 (greater) tagged as a small integer. | |
| 4441 __ TailCallRuntime(Runtime::kStringCompare, 2, 1); | |
| 4442 } | |
| 4443 | |
| 4444 | |
| 4445 void ArrayPushStub::Generate(MacroAssembler* masm) { | |
| 4446 Register receiver = x0; | |
| 4447 | |
| 4448 int argc = arguments_count(); | |
| 4449 | |
| 4450 if (argc == 0) { | |
| 4451 // Nothing to do, just return the length. | |
| 4452 __ Ldr(x0, FieldMemOperand(receiver, JSArray::kLengthOffset)); | |
| 4453 __ Drop(argc + 1); | |
| 4454 __ Ret(); | |
| 4455 return; | |
| 4456 } | |
| 4457 | |
| 4458 Isolate* isolate = masm->isolate(); | |
| 4459 | |
| 4460 if (argc != 1) { | |
| 4461 __ TailCallExternalReference( | |
| 4462 ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1); | |
| 4463 return; | |
| 4464 } | |
| 4465 | |
| 4466 Label call_builtin, attempt_to_grow_elements, with_write_barrier; | |
| 4467 | |
| 4468 Register elements_length = x8; | |
| 4469 Register length = x7; | |
| 4470 Register elements = x6; | |
| 4471 Register end_elements = x5; | |
| 4472 Register value = x4; | |
| 4473 // Get the elements array of the object. | |
| 4474 __ Ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset)); | |
| 4475 | |
| 4476 if (IsFastSmiOrObjectElementsKind(elements_kind())) { | |
| 4477 // Check that the elements are in fast mode and writable. | |
| 4478 __ CheckMap(elements, | |
| 4479 x10, | |
| 4480 Heap::kFixedArrayMapRootIndex, | |
| 4481 &call_builtin, | |
| 4482 DONT_DO_SMI_CHECK); | |
| 4483 } | |
| 4484 | |
| 4485 // Get the array's length and calculate new length. | |
| 4486 __ Ldr(length, FieldMemOperand(receiver, JSArray::kLengthOffset)); | |
| 4487 STATIC_ASSERT(kSmiTag == 0); | |
| 4488 __ Add(length, length, Operand(Smi::FromInt(argc))); | |
| 4489 | |
| 4490 // Check if we could survive without allocation. | |
| 4491 __ Ldr(elements_length, | |
| 4492 FieldMemOperand(elements, FixedArray::kLengthOffset)); | |
| 4493 __ Cmp(length, elements_length); | |
| 4494 | |
| 4495 const int kEndElementsOffset = | |
| 4496 FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize; | |
| 4497 | |
| 4498 if (IsFastSmiOrObjectElementsKind(elements_kind())) { | |
| 4499 __ B(gt, &attempt_to_grow_elements); | |
| 4500 | |
| 4501 // Check if value is a smi. | |
| 4502 __ Peek(value, (argc - 1) * kPointerSize); | |
| 4503 __ JumpIfNotSmi(value, &with_write_barrier); | |
| 4504 | |
| 4505 // Store the value. | |
| 4506 // We may need a register containing the address end_elements below, | |
| 4507 // so write back the value in end_elements. | |
| 4508 __ Add(end_elements, elements, | |
| 4509 Operand::UntagSmiAndScale(length, kPointerSizeLog2)); | |
| 4510 __ Str(value, MemOperand(end_elements, kEndElementsOffset, PreIndex)); | |
| 4511 } else { | |
| 4512 // TODO(all): ARM has a redundant cmp here. | |
| 4513 __ B(gt, &call_builtin); | |
| 4514 | |
| 4515 __ Peek(value, (argc - 1) * kPointerSize); | |
| 4516 __ StoreNumberToDoubleElements(value, length, elements, x10, d0, d1, | |
| 4517 &call_builtin, argc * kDoubleSize); | |
| 4518 } | |
| 4519 | |
| 4520 // Save new length. | |
| 4521 __ Str(length, FieldMemOperand(receiver, JSArray::kLengthOffset)); | |
| 4522 | |
| 4523 // Return length. | |
| 4524 __ Drop(argc + 1); | |
| 4525 __ Mov(x0, length); | |
| 4526 __ Ret(); | |
| 4527 | |
| 4528 if (IsFastDoubleElementsKind(elements_kind())) { | |
| 4529 __ Bind(&call_builtin); | |
| 4530 __ TailCallExternalReference( | |
| 4531 ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1); | |
| 4532 return; | |
| 4533 } | |
| 4534 | |
| 4535 __ Bind(&with_write_barrier); | |
| 4536 | |
| 4537 if (IsFastSmiElementsKind(elements_kind())) { | |
| 4538 if (FLAG_trace_elements_transitions) { | |
| 4539 __ B(&call_builtin); | |
| 4540 } | |
| 4541 | |
| 4542 __ Ldr(x10, FieldMemOperand(value, HeapObject::kMapOffset)); | |
| 4543 __ JumpIfHeapNumber(x10, &call_builtin); | |
| 4544 | |
| 4545 ElementsKind target_kind = IsHoleyElementsKind(elements_kind()) | |
| 4546 ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS; | |
| 4547 __ Ldr(x10, GlobalObjectMemOperand()); | |
| 4548 __ Ldr(x10, FieldMemOperand(x10, GlobalObject::kNativeContextOffset)); | |
| 4549 __ Ldr(x10, ContextMemOperand(x10, Context::JS_ARRAY_MAPS_INDEX)); | |
| 4550 const int header_size = FixedArrayBase::kHeaderSize; | |
| 4551 // Verify that the object can be transitioned in place. | |
| 4552 const int origin_offset = header_size + elements_kind() * kPointerSize; | |
| 4553 __ ldr(x11, FieldMemOperand(receiver, origin_offset)); | |
| 4554 __ ldr(x12, FieldMemOperand(x10, HeapObject::kMapOffset)); | |
| 4555 __ cmp(x11, x12); | |
| 4556 __ B(ne, &call_builtin); | |
| 4557 | |
| 4558 const int target_offset = header_size + target_kind * kPointerSize; | |
| 4559 __ Ldr(x10, FieldMemOperand(x10, target_offset)); | |
| 4560 __ Mov(x11, receiver); | |
| 4561 ElementsTransitionGenerator::GenerateMapChangeElementsTransition( | |
| 4562 masm, DONT_TRACK_ALLOCATION_SITE, NULL); | |
| 4563 } | |
| 4564 | |
| 4565 // Save new length. | |
| 4566 __ Str(length, FieldMemOperand(receiver, JSArray::kLengthOffset)); | |
| 4567 | |
| 4568 // Store the value. | |
| 4569 // We may need a register containing the address end_elements below, | |
| 4570 // so write back the value in end_elements. | |
| 4571 __ Add(end_elements, elements, | |
| 4572 Operand::UntagSmiAndScale(length, kPointerSizeLog2)); | |
| 4573 __ Str(value, MemOperand(end_elements, kEndElementsOffset, PreIndex)); | |
| 4574 | |
| 4575 __ RecordWrite(elements, | |
| 4576 end_elements, | |
| 4577 value, | |
| 4578 kLRHasNotBeenSaved, | |
| 4579 kDontSaveFPRegs, | |
| 4580 EMIT_REMEMBERED_SET, | |
| 4581 OMIT_SMI_CHECK); | |
| 4582 __ Drop(argc + 1); | |
| 4583 __ Mov(x0, length); | |
| 4584 __ Ret(); | |
| 4585 | |
| 4586 __ Bind(&attempt_to_grow_elements); | |
| 4587 | |
| 4588 if (!FLAG_inline_new) { | |
| 4589 __ B(&call_builtin); | |
| 4590 } | |
| 4591 | |
| 4592 Register argument = x2; | |
| 4593 __ Peek(argument, (argc - 1) * kPointerSize); | |
| 4594 // Growing elements that are SMI-only requires special handling in case | |
| 4595 // the new element is non-Smi. For now, delegate to the builtin. | |
| 4596 if (IsFastSmiElementsKind(elements_kind())) { | |
| 4597 __ JumpIfNotSmi(argument, &call_builtin); | |
| 4598 } | |
| 4599 | |
| 4600 // We could be lucky and the elements array could be at the top of new-space. | |
| 4601 // In this case we can just grow it in place by moving the allocation pointer | |
| 4602 // up. | |
| 4603 ExternalReference new_space_allocation_top = | |
| 4604 ExternalReference::new_space_allocation_top_address(isolate); | |
| 4605 ExternalReference new_space_allocation_limit = | |
| 4606 ExternalReference::new_space_allocation_limit_address(isolate); | |
| 4607 | |
| 4608 const int kAllocationDelta = 4; | |
| 4609 ASSERT(kAllocationDelta >= argc); | |
| 4610 Register allocation_top_addr = x5; | |
| 4611 Register allocation_top = x9; | |
| 4612 // Load top and check if it is the end of elements. | |
| 4613 __ Add(end_elements, elements, | |
| 4614 Operand::UntagSmiAndScale(length, kPointerSizeLog2)); | |
| 4615 __ Add(end_elements, end_elements, kEndElementsOffset); | |
| 4616 __ Mov(allocation_top_addr, Operand(new_space_allocation_top)); | |
| 4617 __ Ldr(allocation_top, MemOperand(allocation_top_addr)); | |
| 4618 __ Cmp(end_elements, allocation_top); | |
| 4619 __ B(ne, &call_builtin); | |
| 4620 | |
| 4621 __ Mov(x10, Operand(new_space_allocation_limit)); | |
| 4622 __ Ldr(x10, MemOperand(x10)); | |
| 4623 __ Add(allocation_top, allocation_top, kAllocationDelta * kPointerSize); | |
| 4624 __ Cmp(allocation_top, x10); | |
| 4625 __ B(hi, &call_builtin); | |
| 4626 | |
| 4627 // We fit and could grow elements. | |
| 4628 // Update new_space_allocation_top. | |
| 4629 __ Str(allocation_top, MemOperand(allocation_top_addr)); | |
| 4630 // Push the argument. | |
| 4631 __ Str(argument, MemOperand(end_elements)); | |
| 4632 // Fill the rest with holes. | |
| 4633 __ LoadRoot(x10, Heap::kTheHoleValueRootIndex); | |
| 4634 for (int i = 1; i < kAllocationDelta; i++) { | |
| 4635 // TODO(all): Try to use stp here. | |
| 4636 __ Str(x10, MemOperand(end_elements, i * kPointerSize)); | |
| 4637 } | |
| 4638 | |
| 4639 // Update elements' and array's sizes. | |
| 4640 __ Str(length, FieldMemOperand(receiver, JSArray::kLengthOffset)); | |
| 4641 __ Add(elements_length, | |
| 4642 elements_length, | |
| 4643 Operand(Smi::FromInt(kAllocationDelta))); | |
| 4644 __ Str(elements_length, | |
| 4645 FieldMemOperand(elements, FixedArray::kLengthOffset)); | |
| 4646 | |
| 4647 // Elements are in new space, so write barrier is not required. | |
| 4648 __ Drop(argc + 1); | |
| 4649 __ Mov(x0, length); | |
| 4650 __ Ret(); | |
| 4651 | |
| 4652 __ Bind(&call_builtin); | |
| 4653 __ TailCallExternalReference( | |
| 4654 ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1); | |
| 4655 } | |
| 4656 | |
| 4657 | |
| 4658 void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) { | |
| 4659 // ----------- S t a t e ------------- | |
| 4660 // -- x1 : left | |
| 4661 // -- x0 : right | |
| 4662 // -- lr : return address | |
| 4663 // ----------------------------------- | |
| 4664 Isolate* isolate = masm->isolate(); | |
| 4665 | |
| 4666 // Load x2 with the allocation site. We stick an undefined dummy value here | |
| 4667 // and replace it with the real allocation site later when we instantiate this | |
| 4668 // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate(). | |
| 4669 __ LoadObject(x2, handle(isolate->heap()->undefined_value())); | |
| 4670 | |
| 4671 // Make sure that we actually patched the allocation site. | |
| 4672 if (FLAG_debug_code) { | |
| 4673 __ AssertNotSmi(x2, kExpectedAllocationSite); | |
| 4674 __ Ldr(x10, FieldMemOperand(x2, HeapObject::kMapOffset)); | |
| 4675 __ AssertRegisterIsRoot(x10, Heap::kAllocationSiteMapRootIndex, | |
| 4676 kExpectedAllocationSite); | |
| 4677 } | |
| 4678 | |
| 4679 // Tail call into the stub that handles binary operations with allocation | |
| 4680 // sites. | |
| 4681 BinaryOpWithAllocationSiteStub stub(state_); | |
| 4682 __ TailCallStub(&stub); | |
| 4683 } | |
| 4684 | |
| 4685 | |
| 4686 bool CodeStub::CanUseFPRegisters() { | |
| 4687 // FP registers always available on A64. | |
| 4688 return true; | |
| 4689 } | |
| 4690 | |
| 4691 | |
| 4692 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) { | |
| 4693 // We need some extra registers for this stub, they have been allocated | |
| 4694 // but we need to save them before using them. | |
| 4695 regs_.Save(masm); | |
| 4696 | |
| 4697 if (remembered_set_action_ == EMIT_REMEMBERED_SET) { | |
| 4698 Label dont_need_remembered_set; | |
| 4699 | |
| 4700 Register value = regs_.scratch0(); | |
| 4701 __ Ldr(value, MemOperand(regs_.address())); | |
| 4702 __ JumpIfNotInNewSpace(value, &dont_need_remembered_set); | |
| 4703 | |
| 4704 __ CheckPageFlagSet(regs_.object(), | |
| 4705 value, | |
| 4706 1 << MemoryChunk::SCAN_ON_SCAVENGE, | |
| 4707 &dont_need_remembered_set); | |
| 4708 | |
| 4709 // First notify the incremental marker if necessary, then update the | |
| 4710 // remembered set. | |
| 4711 CheckNeedsToInformIncrementalMarker( | |
| 4712 masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode); | |
| 4713 InformIncrementalMarker(masm, mode); | |
| 4714 regs_.Restore(masm); // Restore the extra scratch registers we used. | |
| 4715 __ RememberedSetHelper(object_, | |
| 4716 address_, | |
| 4717 value_, | |
| 4718 save_fp_regs_mode_, | |
| 4719 MacroAssembler::kReturnAtEnd); | |
| 4720 | |
| 4721 __ Bind(&dont_need_remembered_set); | |
| 4722 } | |
| 4723 | |
| 4724 CheckNeedsToInformIncrementalMarker( | |
| 4725 masm, kReturnOnNoNeedToInformIncrementalMarker, mode); | |
| 4726 InformIncrementalMarker(masm, mode); | |
| 4727 regs_.Restore(masm); // Restore the extra scratch registers we used. | |
| 4728 __ Ret(); | |
| 4729 } | |
| 4730 | |
| 4731 | |
| 4732 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) { | |
| 4733 regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_); | |
| 4734 Register address = | |
| 4735 x0.Is(regs_.address()) ? regs_.scratch0() : regs_.address(); | |
| 4736 ASSERT(!address.Is(regs_.object())); | |
| 4737 ASSERT(!address.Is(x0)); | |
| 4738 __ Mov(address, regs_.address()); | |
| 4739 __ Mov(x0, regs_.object()); | |
| 4740 __ Mov(x1, address); | |
| 4741 __ Mov(x2, Operand(ExternalReference::isolate_address(masm->isolate()))); | |
| 4742 | |
| 4743 AllowExternalCallThatCantCauseGC scope(masm); | |
| 4744 ExternalReference function = (mode == INCREMENTAL_COMPACTION) | |
| 4745 ? ExternalReference::incremental_evacuation_record_write_function( | |
| 4746 masm->isolate()) | |
| 4747 : ExternalReference::incremental_marking_record_write_function( | |
| 4748 masm->isolate()); | |
| 4749 __ CallCFunction(function, 3, 0); | |
| 4750 | |
| 4751 regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_); | |
| 4752 } | |
| 4753 | |
| 4754 | |
| 4755 void RecordWriteStub::CheckNeedsToInformIncrementalMarker( | |
| 4756 MacroAssembler* masm, | |
| 4757 OnNoNeedToInformIncrementalMarker on_no_need, | |
| 4758 Mode mode) { | |
| 4759 Label on_black; | |
| 4760 Label need_incremental; | |
| 4761 Label need_incremental_pop_scratch; | |
| 4762 | |
| 4763 Register mem_chunk = regs_.scratch0(); | |
| 4764 Register counter = regs_.scratch1(); | |
| 4765 __ Bic(mem_chunk, regs_.object(), Page::kPageAlignmentMask); | |
| 4766 __ Ldr(counter, | |
| 4767 MemOperand(mem_chunk, MemoryChunk::kWriteBarrierCounterOffset)); | |
| 4768 __ Subs(counter, counter, 1); | |
| 4769 __ Str(counter, | |
| 4770 MemOperand(mem_chunk, MemoryChunk::kWriteBarrierCounterOffset)); | |
| 4771 __ B(mi, &need_incremental); | |
| 4772 | |
| 4773 // If the object is not black we don't have to inform the incremental marker. | |
| 4774 __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black); | |
| 4775 | |
| 4776 regs_.Restore(masm); // Restore the extra scratch registers we used. | |
| 4777 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) { | |
| 4778 __ RememberedSetHelper(object_, | |
| 4779 address_, | |
| 4780 value_, | |
| 4781 save_fp_regs_mode_, | |
| 4782 MacroAssembler::kReturnAtEnd); | |
| 4783 } else { | |
| 4784 __ Ret(); | |
| 4785 } | |
| 4786 | |
| 4787 __ Bind(&on_black); | |
| 4788 // Get the value from the slot. | |
| 4789 Register value = regs_.scratch0(); | |
| 4790 __ Ldr(value, MemOperand(regs_.address())); | |
| 4791 | |
| 4792 if (mode == INCREMENTAL_COMPACTION) { | |
| 4793 Label ensure_not_white; | |
| 4794 | |
| 4795 __ CheckPageFlagClear(value, | |
| 4796 regs_.scratch1(), | |
| 4797 MemoryChunk::kEvacuationCandidateMask, | |
| 4798 &ensure_not_white); | |
| 4799 | |
| 4800 __ CheckPageFlagClear(regs_.object(), | |
| 4801 regs_.scratch1(), | |
| 4802 MemoryChunk::kSkipEvacuationSlotsRecordingMask, | |
| 4803 &need_incremental); | |
| 4804 | |
| 4805 __ Bind(&ensure_not_white); | |
| 4806 } | |
| 4807 | |
| 4808 // We need extra registers for this, so we push the object and the address | |
| 4809 // register temporarily. | |
| 4810 __ Push(regs_.address(), regs_.object()); | |
| 4811 __ EnsureNotWhite(value, | |
| 4812 regs_.scratch1(), // Scratch. | |
| 4813 regs_.object(), // Scratch. | |
| 4814 regs_.address(), // Scratch. | |
| 4815 regs_.scratch2(), // Scratch. | |
| 4816 &need_incremental_pop_scratch); | |
| 4817 __ Pop(regs_.object(), regs_.address()); | |
| 4818 | |
| 4819 regs_.Restore(masm); // Restore the extra scratch registers we used. | |
| 4820 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) { | |
| 4821 __ RememberedSetHelper(object_, | |
| 4822 address_, | |
| 4823 value_, | |
| 4824 save_fp_regs_mode_, | |
| 4825 MacroAssembler::kReturnAtEnd); | |
| 4826 } else { | |
| 4827 __ Ret(); | |
| 4828 } | |
| 4829 | |
| 4830 __ Bind(&need_incremental_pop_scratch); | |
| 4831 __ Pop(regs_.object(), regs_.address()); | |
| 4832 | |
| 4833 __ Bind(&need_incremental); | |
| 4834 // Fall through when we need to inform the incremental marker. | |
| 4835 } | |
| 4836 | |
| 4837 | |
| 4838 void RecordWriteStub::Generate(MacroAssembler* masm) { | |
| 4839 Label skip_to_incremental_noncompacting; | |
| 4840 Label skip_to_incremental_compacting; | |
| 4841 | |
| 4842 // We patch these two first instructions back and forth between a nop and | |
| 4843 // real branch when we start and stop incremental heap marking. | |
| 4844 // Initially the stub is expected to be in STORE_BUFFER_ONLY mode, so 2 nops | |
| 4845 // are generated. | |
| 4846 // See RecordWriteStub::Patch for details. | |
| 4847 { | |
| 4848 InstructionAccurateScope scope(masm, 2); | |
| 4849 __ adr(xzr, &skip_to_incremental_noncompacting); | |
| 4850 __ adr(xzr, &skip_to_incremental_compacting); | |
| 4851 } | |
| 4852 | |
| 4853 if (remembered_set_action_ == EMIT_REMEMBERED_SET) { | |
| 4854 __ RememberedSetHelper(object_, | |
| 4855 address_, | |
| 4856 value_, | |
| 4857 save_fp_regs_mode_, | |
| 4858 MacroAssembler::kReturnAtEnd); | |
| 4859 } | |
| 4860 __ Ret(); | |
| 4861 | |
| 4862 __ Bind(&skip_to_incremental_noncompacting); | |
| 4863 GenerateIncremental(masm, INCREMENTAL); | |
| 4864 | |
| 4865 __ Bind(&skip_to_incremental_compacting); | |
| 4866 GenerateIncremental(masm, INCREMENTAL_COMPACTION); | |
| 4867 } | |
| 4868 | |
| 4869 | |
| 4870 void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) { | |
| 4871 // TODO(all): Possible optimisations in this function: | |
| 4872 // 1. Merge CheckFastElements and CheckFastSmiElements, so that the map | |
| 4873 // bitfield is loaded only once. | |
| 4874 // 2. Refactor the Ldr/Add sequence at the start of fast_elements and | |
| 4875 // smi_element. | |
| 4876 | |
| 4877 // x0 value element value to store | |
| 4878 // x3 index_smi element index as smi | |
| 4879 // sp[0] array_index_smi array literal index in function as smi | |
| 4880 // sp[1] array array literal | |
| 4881 | |
| 4882 Register value = x0; | |
| 4883 Register index_smi = x3; | |
| 4884 | |
| 4885 Register array = x1; | |
| 4886 Register array_map = x2; | |
| 4887 Register array_index_smi = x4; | |
| 4888 __ PeekPair(array_index_smi, array, 0); | |
| 4889 __ Ldr(array_map, FieldMemOperand(array, JSObject::kMapOffset)); | |
| 4890 | |
| 4891 Label double_elements, smi_element, fast_elements, slow_elements; | |
| 4892 __ CheckFastElements(array_map, x10, &double_elements); | |
| 4893 __ JumpIfSmi(value, &smi_element); | |
| 4894 __ CheckFastSmiElements(array_map, x10, &fast_elements); | |
| 4895 | |
| 4896 // Store into the array literal requires an elements transition. Call into | |
| 4897 // the runtime. | |
| 4898 __ Bind(&slow_elements); | |
| 4899 __ Push(array, index_smi, value); | |
| 4900 __ Ldr(x10, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); | |
| 4901 __ Ldr(x11, FieldMemOperand(x10, JSFunction::kLiteralsOffset)); | |
| 4902 __ Push(x11, array_index_smi); | |
| 4903 __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1); | |
| 4904 | |
| 4905 // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object. | |
| 4906 __ Bind(&fast_elements); | |
| 4907 __ Ldr(x10, FieldMemOperand(array, JSObject::kElementsOffset)); | |
| 4908 __ Add(x11, x10, Operand::UntagSmiAndScale(index_smi, kPointerSizeLog2)); | |
| 4909 __ Add(x11, x11, FixedArray::kHeaderSize - kHeapObjectTag); | |
| 4910 __ Str(value, MemOperand(x11)); | |
| 4911 // Update the write barrier for the array store. | |
| 4912 __ RecordWrite(x10, x11, value, kLRHasNotBeenSaved, kDontSaveFPRegs, | |
| 4913 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); | |
| 4914 __ Ret(); | |
| 4915 | |
| 4916 // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS, | |
| 4917 // and value is Smi. | |
| 4918 __ Bind(&smi_element); | |
| 4919 __ Ldr(x10, FieldMemOperand(array, JSObject::kElementsOffset)); | |
| 4920 __ Add(x11, x10, Operand::UntagSmiAndScale(index_smi, kPointerSizeLog2)); | |
| 4921 __ Str(value, FieldMemOperand(x11, FixedArray::kHeaderSize)); | |
| 4922 __ Ret(); | |
| 4923 | |
| 4924 __ Bind(&double_elements); | |
| 4925 __ Ldr(x10, FieldMemOperand(array, JSObject::kElementsOffset)); | |
| 4926 __ StoreNumberToDoubleElements(value, index_smi, x10, x11, d0, d1, | |
| 4927 &slow_elements); | |
| 4928 __ Ret(); | |
| 4929 } | |
| 4930 | |
| 4931 | |
| 4932 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) { | |
| 4933 // TODO(jbramley): The ARM code leaves the (shifted) offset in r1. Why? | |
| 4934 CEntryStub ces(1, kSaveFPRegs); | |
| 4935 __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); | |
| 4936 int parameter_count_offset = | |
| 4937 StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset; | |
| 4938 __ Ldr(x1, MemOperand(fp, parameter_count_offset)); | |
| 4939 if (function_mode_ == JS_FUNCTION_STUB_MODE) { | |
| 4940 __ Add(x1, x1, 1); | |
| 4941 } | |
| 4942 masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE); | |
| 4943 __ Drop(x1); | |
| 4944 // Return to IC Miss stub, continuation still on stack. | |
| 4945 __ Ret(); | |
| 4946 } | |
| 4947 | |
| 4948 | |
| 4949 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) { | |
| 4950 if (masm->isolate()->function_entry_hook() != NULL) { | |
| 4951 // TODO(all): This needs to be reliably consistent with | |
| 4952 // kReturnAddressDistanceFromFunctionStart in ::Generate. | |
| 4953 Assembler::BlockConstPoolScope no_const_pools(masm); | |
| 4954 ProfileEntryHookStub stub; | |
| 4955 __ Push(lr); | |
| 4956 __ CallStub(&stub); | |
| 4957 __ Pop(lr); | |
| 4958 } | |
| 4959 } | |
| 4960 | |
| 4961 | |
| 4962 void ProfileEntryHookStub::Generate(MacroAssembler* masm) { | |
| 4963 MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm); | |
| 4964 // The entry hook is a "BumpSystemStackPointer" instruction (sub), followed by | |
| 4965 // a "Push lr" instruction, followed by a call. | |
| 4966 // TODO(jbramley): Verify that this call is always made with relocation. | |
| 4967 static const int kReturnAddressDistanceFromFunctionStart = | |
| 4968 Assembler::kCallSizeWithRelocation + (2 * kInstructionSize); | |
| 4969 | |
| 4970 // Save all kCallerSaved registers (including lr), since this can be called | |
| 4971 // from anywhere. | |
| 4972 // TODO(jbramley): What about FP registers? | |
| 4973 __ PushCPURegList(kCallerSaved); | |
| 4974 ASSERT(kCallerSaved.IncludesAliasOf(lr)); | |
| 4975 const int kNumSavedRegs = kCallerSaved.Count(); | |
| 4976 | |
| 4977 // Compute the function's address as the first argument. | |
| 4978 __ Sub(x0, lr, kReturnAddressDistanceFromFunctionStart); | |
| 4979 | |
| 4980 #if V8_HOST_ARCH_A64 | |
| 4981 uintptr_t entry_hook = | |
| 4982 reinterpret_cast<uintptr_t>(masm->isolate()->function_entry_hook()); | |
| 4983 __ Mov(x10, entry_hook); | |
| 4984 #else | |
| 4985 // Under the simulator we need to indirect the entry hook through a trampoline | |
| 4986 // function at a known address. | |
| 4987 ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline)); | |
| 4988 __ Mov(x10, Operand(ExternalReference(&dispatcher, | |
| 4989 ExternalReference::BUILTIN_CALL, | |
| 4990 masm->isolate()))); | |
| 4991 // It additionally takes an isolate as a third parameter | |
| 4992 __ Mov(x2, Operand(ExternalReference::isolate_address(masm->isolate()))); | |
| 4993 #endif | |
| 4994 | |
| 4995 // The caller's return address is above the saved temporaries. | |
| 4996 // Grab its location for the second argument to the hook. | |
| 4997 __ Add(x1, __ StackPointer(), kNumSavedRegs * kPointerSize); | |
| 4998 | |
| 4999 { | |
| 5000 // Create a dummy frame, as CallCFunction requires this. | |
| 5001 FrameScope frame(masm, StackFrame::MANUAL); | |
| 5002 __ CallCFunction(x10, 2, 0); | |
| 5003 } | |
| 5004 | |
| 5005 __ PopCPURegList(kCallerSaved); | |
| 5006 __ Ret(); | |
| 5007 } | |
| 5008 | |
| 5009 | |
| 5010 void DirectCEntryStub::Generate(MacroAssembler* masm) { | |
| 5011 // When calling into C++ code the stack pointer must be csp. | |
| 5012 // Therefore this code must use csp for peek/poke operations when the | |
| 5013 // stub is generated. When the stub is called | |
| 5014 // (via DirectCEntryStub::GenerateCall), the caller must setup an ExitFrame | |
| 5015 // and configure the stack pointer *before* doing the call. | |
| 5016 const Register old_stack_pointer = __ StackPointer(); | |
| 5017 __ SetStackPointer(csp); | |
| 5018 | |
| 5019 // Put return address on the stack (accessible to GC through exit frame pc). | |
| 5020 __ Poke(lr, 0); | |
| 5021 // Call the C++ function. | |
| 5022 __ Blr(x10); | |
| 5023 // Return to calling code. | |
| 5024 __ Peek(lr, 0); | |
| 5025 __ Ret(); | |
| 5026 | |
| 5027 __ SetStackPointer(old_stack_pointer); | |
| 5028 } | |
| 5029 | |
| 5030 void DirectCEntryStub::GenerateCall(MacroAssembler* masm, | |
| 5031 Register target) { | |
| 5032 // Make sure the caller configured the stack pointer (see comment in | |
| 5033 // DirectCEntryStub::Generate). | |
| 5034 ASSERT(csp.Is(__ StackPointer())); | |
| 5035 | |
| 5036 intptr_t code = | |
| 5037 reinterpret_cast<intptr_t>(GetCode(masm->isolate()).location()); | |
| 5038 __ Mov(lr, Operand(code, RelocInfo::CODE_TARGET)); | |
| 5039 __ Mov(x10, target); | |
| 5040 // Branch to the stub. | |
| 5041 __ Blr(lr); | |
| 5042 } | |
| 5043 | |
| 5044 | |
| 5045 // Probe the name dictionary in the 'elements' register. | |
| 5046 // Jump to the 'done' label if a property with the given name is found. | |
| 5047 // Jump to the 'miss' label otherwise. | |
| 5048 // | |
| 5049 // If lookup was successful 'scratch2' will be equal to elements + 4 * index. | |
| 5050 // 'elements' and 'name' registers are preserved on miss. | |
| 5051 void NameDictionaryLookupStub::GeneratePositiveLookup( | |
| 5052 MacroAssembler* masm, | |
| 5053 Label* miss, | |
| 5054 Label* done, | |
| 5055 Register elements, | |
| 5056 Register name, | |
| 5057 Register scratch1, | |
| 5058 Register scratch2) { | |
| 5059 ASSERT(!AreAliased(elements, name, scratch1, scratch2)); | |
| 5060 | |
| 5061 // Assert that name contains a string. | |
| 5062 __ AssertName(name); | |
| 5063 | |
| 5064 // Compute the capacity mask. | |
| 5065 __ Ldrsw(scratch1, UntagSmiFieldMemOperand(elements, kCapacityOffset)); | |
| 5066 __ Sub(scratch1, scratch1, 1); | |
| 5067 | |
| 5068 // Generate an unrolled loop that performs a few probes before giving up. | |
| 5069 for (int i = 0; i < kInlinedProbes; i++) { | |
| 5070 // Compute the masked index: (hash + i + i * i) & mask. | |
| 5071 __ Ldr(scratch2, FieldMemOperand(name, Name::kHashFieldOffset)); | |
| 5072 if (i > 0) { | |
| 5073 // Add the probe offset (i + i * i) left shifted to avoid right shifting | |
| 5074 // the hash in a separate instruction. The value hash + i + i * i is right | |
| 5075 // shifted in the following and instruction. | |
| 5076 ASSERT(NameDictionary::GetProbeOffset(i) < | |
| 5077 1 << (32 - Name::kHashFieldOffset)); | |
| 5078 __ Add(scratch2, scratch2, Operand( | |
| 5079 NameDictionary::GetProbeOffset(i) << Name::kHashShift)); | |
| 5080 } | |
| 5081 __ And(scratch2, scratch1, Operand(scratch2, LSR, Name::kHashShift)); | |
| 5082 | |
| 5083 // Scale the index by multiplying by the element size. | |
| 5084 ASSERT(NameDictionary::kEntrySize == 3); | |
| 5085 __ Add(scratch2, scratch2, Operand(scratch2, LSL, 1)); | |
| 5086 | |
| 5087 // Check if the key is identical to the name. | |
| 5088 __ Add(scratch2, elements, Operand(scratch2, LSL, kPointerSizeLog2)); | |
| 5089 // TODO(jbramley): We need another scratch here, but some callers can't | |
| 5090 // provide a scratch3 so we have to use Tmp1(). We should find a clean way | |
| 5091 // to make it unavailable to the MacroAssembler for a short time. | |
| 5092 __ Ldr(__ Tmp1(), FieldMemOperand(scratch2, kElementsStartOffset)); | |
| 5093 __ Cmp(name, __ Tmp1()); | |
| 5094 __ B(eq, done); | |
| 5095 } | |
| 5096 | |
| 5097 // The inlined probes didn't find the entry. | |
| 5098 // Call the complete stub to scan the whole dictionary. | |
| 5099 | |
| 5100 CPURegList spill_list(CPURegister::kRegister, kXRegSize, 0, 6); | |
| 5101 spill_list.Combine(lr); | |
| 5102 spill_list.Remove(scratch1); | |
| 5103 spill_list.Remove(scratch2); | |
| 5104 | |
| 5105 __ PushCPURegList(spill_list); | |
| 5106 | |
| 5107 if (name.is(x0)) { | |
| 5108 ASSERT(!elements.is(x1)); | |
| 5109 __ Mov(x1, name); | |
| 5110 __ Mov(x0, elements); | |
| 5111 } else { | |
| 5112 __ Mov(x0, elements); | |
| 5113 __ Mov(x1, name); | |
| 5114 } | |
| 5115 | |
| 5116 Label not_found; | |
| 5117 NameDictionaryLookupStub stub(POSITIVE_LOOKUP); | |
| 5118 __ CallStub(&stub); | |
| 5119 __ Cbz(x0, ¬_found); | |
| 5120 __ Mov(scratch2, x2); // Move entry index into scratch2. | |
| 5121 __ PopCPURegList(spill_list); | |
| 5122 __ B(done); | |
| 5123 | |
| 5124 __ Bind(¬_found); | |
| 5125 __ PopCPURegList(spill_list); | |
| 5126 __ B(miss); | |
| 5127 } | |
| 5128 | |
| 5129 | |
| 5130 void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm, | |
| 5131 Label* miss, | |
| 5132 Label* done, | |
| 5133 Register receiver, | |
| 5134 Register properties, | |
| 5135 Handle<Name> name, | |
| 5136 Register scratch0) { | |
| 5137 ASSERT(!AreAliased(receiver, properties, scratch0)); | |
| 5138 ASSERT(name->IsUniqueName()); | |
| 5139 // If names of slots in range from 1 to kProbes - 1 for the hash value are | |
| 5140 // not equal to the name and kProbes-th slot is not used (its name is the | |
| 5141 // undefined value), it guarantees the hash table doesn't contain the | |
| 5142 // property. It's true even if some slots represent deleted properties | |
| 5143 // (their names are the hole value). | |
| 5144 for (int i = 0; i < kInlinedProbes; i++) { | |
| 5145 // scratch0 points to properties hash. | |
| 5146 // Compute the masked index: (hash + i + i * i) & mask. | |
| 5147 Register index = scratch0; | |
| 5148 // Capacity is smi 2^n. | |
| 5149 __ Ldrsw(index, UntagSmiFieldMemOperand(properties, kCapacityOffset)); | |
| 5150 __ Sub(index, index, 1); | |
| 5151 __ And(index, index, name->Hash() + NameDictionary::GetProbeOffset(i)); | |
| 5152 | |
| 5153 // Scale the index by multiplying by the entry size. | |
| 5154 ASSERT(NameDictionary::kEntrySize == 3); | |
| 5155 __ Add(index, index, Operand(index, LSL, 1)); // index *= 3. | |
| 5156 | |
| 5157 Register entity_name = scratch0; | |
| 5158 // Having undefined at this place means the name is not contained. | |
| 5159 Register tmp = index; | |
| 5160 __ Add(tmp, properties, Operand(index, LSL, kPointerSizeLog2)); | |
| 5161 __ Ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset)); | |
| 5162 | |
| 5163 __ JumpIfRoot(entity_name, Heap::kUndefinedValueRootIndex, done); | |
| 5164 | |
| 5165 // Stop if found the property. | |
| 5166 __ Cmp(entity_name, Operand(name)); | |
| 5167 __ B(eq, miss); | |
| 5168 | |
| 5169 Label good; | |
| 5170 __ JumpIfRoot(entity_name, Heap::kTheHoleValueRootIndex, &good); | |
| 5171 | |
| 5172 // Check if the entry name is not a unique name. | |
| 5173 __ Ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset)); | |
| 5174 __ Ldrb(entity_name, | |
| 5175 FieldMemOperand(entity_name, Map::kInstanceTypeOffset)); | |
| 5176 __ JumpIfNotUniqueName(entity_name, miss); | |
| 5177 __ Bind(&good); | |
| 5178 } | |
| 5179 | |
| 5180 CPURegList spill_list(CPURegister::kRegister, kXRegSize, 0, 6); | |
| 5181 spill_list.Combine(lr); | |
| 5182 spill_list.Remove(scratch0); // Scratch registers don't need to be preserved. | |
| 5183 | |
| 5184 __ PushCPURegList(spill_list); | |
| 5185 | |
| 5186 __ Ldr(x0, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); | |
| 5187 __ Mov(x1, Operand(name)); | |
| 5188 NameDictionaryLookupStub stub(NEGATIVE_LOOKUP); | |
| 5189 __ CallStub(&stub); | |
| 5190 // Move stub return value to scratch0. Note that scratch0 is not included in | |
| 5191 // spill_list and won't be clobbered by PopCPURegList. | |
| 5192 __ Mov(scratch0, x0); | |
| 5193 __ PopCPURegList(spill_list); | |
| 5194 | |
| 5195 __ Cbz(scratch0, done); | |
| 5196 __ B(miss); | |
| 5197 } | |
| 5198 | |
| 5199 | |
| 5200 void NameDictionaryLookupStub::Generate(MacroAssembler* masm) { | |
| 5201 // This stub overrides SometimesSetsUpAFrame() to return false. That means | |
| 5202 // we cannot call anything that could cause a GC from this stub. | |
| 5203 // | |
| 5204 // Arguments are in x0 and x1: | |
| 5205 // x0: property dictionary. | |
| 5206 // x1: the name of the property we are looking for. | |
| 5207 // | |
| 5208 // Return value is in x0 and is zero if lookup failed, non zero otherwise. | |
| 5209 // If the lookup is successful, x2 will contains the index of the entry. | |
| 5210 | |
| 5211 Register result = x0; | |
| 5212 Register dictionary = x0; | |
| 5213 Register key = x1; | |
| 5214 Register index = x2; | |
| 5215 Register mask = x3; | |
| 5216 Register hash = x4; | |
| 5217 Register undefined = x5; | |
| 5218 Register entry_key = x6; | |
| 5219 | |
| 5220 Label in_dictionary, maybe_in_dictionary, not_in_dictionary; | |
| 5221 | |
| 5222 __ Ldrsw(mask, UntagSmiFieldMemOperand(dictionary, kCapacityOffset)); | |
| 5223 __ Sub(mask, mask, 1); | |
| 5224 | |
| 5225 __ Ldr(hash, FieldMemOperand(key, Name::kHashFieldOffset)); | |
| 5226 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex); | |
| 5227 | |
| 5228 for (int i = kInlinedProbes; i < kTotalProbes; i++) { | |
| 5229 // Compute the masked index: (hash + i + i * i) & mask. | |
| 5230 // Capacity is smi 2^n. | |
| 5231 if (i > 0) { | |
| 5232 // Add the probe offset (i + i * i) left shifted to avoid right shifting | |
| 5233 // the hash in a separate instruction. The value hash + i + i * i is right | |
| 5234 // shifted in the following and instruction. | |
| 5235 ASSERT(NameDictionary::GetProbeOffset(i) < | |
| 5236 1 << (32 - Name::kHashFieldOffset)); | |
| 5237 __ Add(index, hash, | |
| 5238 NameDictionary::GetProbeOffset(i) << Name::kHashShift); | |
| 5239 } else { | |
| 5240 __ Mov(index, hash); | |
| 5241 } | |
| 5242 __ And(index, mask, Operand(index, LSR, Name::kHashShift)); | |
| 5243 | |
| 5244 // Scale the index by multiplying by the entry size. | |
| 5245 ASSERT(NameDictionary::kEntrySize == 3); | |
| 5246 __ Add(index, index, Operand(index, LSL, 1)); // index *= 3. | |
| 5247 | |
| 5248 __ Add(index, dictionary, Operand(index, LSL, kPointerSizeLog2)); | |
| 5249 __ Ldr(entry_key, FieldMemOperand(index, kElementsStartOffset)); | |
| 5250 | |
| 5251 // Having undefined at this place means the name is not contained. | |
| 5252 __ Cmp(entry_key, undefined); | |
| 5253 __ B(eq, ¬_in_dictionary); | |
| 5254 | |
| 5255 // Stop if found the property. | |
| 5256 __ Cmp(entry_key, key); | |
| 5257 __ B(eq, &in_dictionary); | |
| 5258 | |
| 5259 if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) { | |
| 5260 // Check if the entry name is not a unique name. | |
| 5261 __ Ldr(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset)); | |
| 5262 __ Ldrb(entry_key, FieldMemOperand(entry_key, Map::kInstanceTypeOffset)); | |
| 5263 __ JumpIfNotUniqueName(entry_key, &maybe_in_dictionary); | |
| 5264 } | |
| 5265 } | |
| 5266 | |
| 5267 __ Bind(&maybe_in_dictionary); | |
| 5268 // If we are doing negative lookup then probing failure should be | |
| 5269 // treated as a lookup success. For positive lookup, probing failure | |
| 5270 // should be treated as lookup failure. | |
| 5271 if (mode_ == POSITIVE_LOOKUP) { | |
| 5272 __ Mov(result, 0); | |
| 5273 __ Ret(); | |
| 5274 } | |
| 5275 | |
| 5276 __ Bind(&in_dictionary); | |
| 5277 __ Mov(result, 1); | |
| 5278 __ Ret(); | |
| 5279 | |
| 5280 __ Bind(¬_in_dictionary); | |
| 5281 __ Mov(result, 0); | |
| 5282 __ Ret(); | |
| 5283 } | |
| 5284 | |
| 5285 | |
| 5286 template<class T> | |
| 5287 static void CreateArrayDispatch(MacroAssembler* masm, | |
| 5288 AllocationSiteOverrideMode mode) { | |
| 5289 ASM_LOCATION("CreateArrayDispatch"); | |
| 5290 if (mode == DISABLE_ALLOCATION_SITES) { | |
| 5291 T stub(GetInitialFastElementsKind(), mode); | |
| 5292 __ TailCallStub(&stub); | |
| 5293 | |
| 5294 } else if (mode == DONT_OVERRIDE) { | |
| 5295 Register kind = x3; | |
| 5296 int last_index = | |
| 5297 GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND); | |
| 5298 for (int i = 0; i <= last_index; ++i) { | |
| 5299 Label next; | |
| 5300 ElementsKind candidate_kind = GetFastElementsKindFromSequenceIndex(i); | |
| 5301 // TODO(jbramley): Is this the best way to handle this? Can we make the | |
| 5302 // tail calls conditional, rather than hopping over each one? | |
| 5303 __ CompareAndBranch(kind, candidate_kind, ne, &next); | |
| 5304 T stub(candidate_kind); | |
| 5305 __ TailCallStub(&stub); | |
| 5306 __ Bind(&next); | |
| 5307 } | |
| 5308 | |
| 5309 // If we reached this point there is a problem. | |
| 5310 __ Abort(kUnexpectedElementsKindInArrayConstructor); | |
| 5311 | |
| 5312 } else { | |
| 5313 UNREACHABLE(); | |
| 5314 } | |
| 5315 } | |
| 5316 | |
| 5317 | |
| 5318 // TODO(jbramley): If this needs to be a special case, make it a proper template | |
| 5319 // specialization, and not a separate function. | |
| 5320 static void CreateArrayDispatchOneArgument(MacroAssembler* masm, | |
| 5321 AllocationSiteOverrideMode mode) { | |
| 5322 ASM_LOCATION("CreateArrayDispatchOneArgument"); | |
| 5323 // x0 - argc | |
| 5324 // x1 - constructor? | |
| 5325 // x2 - allocation site (if mode != DISABLE_ALLOCATION_SITES) | |
| 5326 // x3 - kind (if mode != DISABLE_ALLOCATION_SITES) | |
| 5327 // sp[0] - last argument | |
| 5328 | |
| 5329 Register allocation_site = x2; | |
| 5330 Register kind = x3; | |
| 5331 | |
| 5332 Label normal_sequence; | |
| 5333 if (mode == DONT_OVERRIDE) { | |
| 5334 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); | |
| 5335 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); | |
| 5336 STATIC_ASSERT(FAST_ELEMENTS == 2); | |
| 5337 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3); | |
| 5338 STATIC_ASSERT(FAST_DOUBLE_ELEMENTS == 4); | |
| 5339 STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5); | |
| 5340 | |
| 5341 // Is the low bit set? If so, the array is holey. | |
| 5342 __ Tbnz(kind, 0, &normal_sequence); | |
| 5343 } | |
| 5344 | |
| 5345 // Look at the last argument. | |
| 5346 // TODO(jbramley): What does a 0 argument represent? | |
| 5347 __ Peek(x10, 0); | |
| 5348 __ Cbz(x10, &normal_sequence); | |
| 5349 | |
| 5350 if (mode == DISABLE_ALLOCATION_SITES) { | |
| 5351 ElementsKind initial = GetInitialFastElementsKind(); | |
| 5352 ElementsKind holey_initial = GetHoleyElementsKind(initial); | |
| 5353 | |
| 5354 ArraySingleArgumentConstructorStub stub_holey(holey_initial, | |
| 5355 DISABLE_ALLOCATION_SITES); | |
| 5356 __ TailCallStub(&stub_holey); | |
| 5357 | |
| 5358 __ Bind(&normal_sequence); | |
| 5359 ArraySingleArgumentConstructorStub stub(initial, | |
| 5360 DISABLE_ALLOCATION_SITES); | |
| 5361 __ TailCallStub(&stub); | |
| 5362 } else if (mode == DONT_OVERRIDE) { | |
| 5363 // We are going to create a holey array, but our kind is non-holey. | |
| 5364 // Fix kind and retry (only if we have an allocation site in the slot). | |
| 5365 __ Orr(kind, kind, 1); | |
| 5366 | |
| 5367 if (FLAG_debug_code) { | |
| 5368 __ Ldr(x10, FieldMemOperand(allocation_site, 0)); | |
| 5369 __ JumpIfNotRoot(x10, Heap::kAllocationSiteMapRootIndex, | |
| 5370 &normal_sequence); | |
| 5371 __ Assert(eq, kExpectedAllocationSite); | |
| 5372 } | |
| 5373 | |
| 5374 // Save the resulting elements kind in type info. We can't just store 'kind' | |
| 5375 // in the AllocationSite::transition_info field because elements kind is | |
| 5376 // restricted to a portion of the field; upper bits need to be left alone. | |
| 5377 STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0); | |
| 5378 __ Ldr(x11, FieldMemOperand(allocation_site, | |
| 5379 AllocationSite::kTransitionInfoOffset)); | |
| 5380 __ Add(x11, x11, Operand(Smi::FromInt(kFastElementsKindPackedToHoley))); | |
| 5381 __ Str(x11, FieldMemOperand(allocation_site, | |
| 5382 AllocationSite::kTransitionInfoOffset)); | |
| 5383 | |
| 5384 __ Bind(&normal_sequence); | |
| 5385 int last_index = | |
| 5386 GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND); | |
| 5387 for (int i = 0; i <= last_index; ++i) { | |
| 5388 Label next; | |
| 5389 ElementsKind candidate_kind = GetFastElementsKindFromSequenceIndex(i); | |
| 5390 // TODO(jbramley): Is this the best way to handle this? Can we make the | |
| 5391 // tail calls conditional, rather than hopping over each one? | |
| 5392 __ CompareAndBranch(kind, candidate_kind, ne, &next); | |
| 5393 ArraySingleArgumentConstructorStub stub(candidate_kind); | |
| 5394 __ TailCallStub(&stub); | |
| 5395 __ Bind(&next); | |
| 5396 } | |
| 5397 | |
| 5398 // If we reached this point there is a problem. | |
| 5399 __ Abort(kUnexpectedElementsKindInArrayConstructor); | |
| 5400 } else { | |
| 5401 UNREACHABLE(); | |
| 5402 } | |
| 5403 } | |
| 5404 | |
| 5405 | |
| 5406 template<class T> | |
| 5407 static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) { | |
| 5408 int to_index = GetSequenceIndexFromFastElementsKind( | |
| 5409 TERMINAL_FAST_ELEMENTS_KIND); | |
| 5410 for (int i = 0; i <= to_index; ++i) { | |
| 5411 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); | |
| 5412 T stub(kind); | |
| 5413 stub.GetCode(isolate); | |
| 5414 if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) { | |
| 5415 T stub1(kind, DISABLE_ALLOCATION_SITES); | |
| 5416 stub1.GetCode(isolate); | |
| 5417 } | |
| 5418 } | |
| 5419 } | |
| 5420 | |
| 5421 | |
| 5422 void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) { | |
| 5423 ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>( | |
| 5424 isolate); | |
| 5425 ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>( | |
| 5426 isolate); | |
| 5427 ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>( | |
| 5428 isolate); | |
| 5429 } | |
| 5430 | |
| 5431 | |
| 5432 void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime( | |
| 5433 Isolate* isolate) { | |
| 5434 ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS }; | |
| 5435 for (int i = 0; i < 2; i++) { | |
| 5436 // For internal arrays we only need a few things | |
| 5437 InternalArrayNoArgumentConstructorStub stubh1(kinds[i]); | |
| 5438 stubh1.GetCode(isolate); | |
| 5439 InternalArraySingleArgumentConstructorStub stubh2(kinds[i]); | |
| 5440 stubh2.GetCode(isolate); | |
| 5441 InternalArrayNArgumentsConstructorStub stubh3(kinds[i]); | |
| 5442 stubh3.GetCode(isolate); | |
| 5443 } | |
| 5444 } | |
| 5445 | |
| 5446 | |
| 5447 void ArrayConstructorStub::GenerateDispatchToArrayStub( | |
| 5448 MacroAssembler* masm, | |
| 5449 AllocationSiteOverrideMode mode) { | |
| 5450 Register argc = x0; | |
| 5451 if (argument_count_ == ANY) { | |
| 5452 Label zero_case, n_case; | |
| 5453 __ Cbz(argc, &zero_case); | |
| 5454 __ Cmp(argc, 1); | |
| 5455 __ B(ne, &n_case); | |
| 5456 | |
| 5457 // One argument. | |
| 5458 CreateArrayDispatchOneArgument(masm, mode); | |
| 5459 | |
| 5460 __ Bind(&zero_case); | |
| 5461 // No arguments. | |
| 5462 CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode); | |
| 5463 | |
| 5464 __ Bind(&n_case); | |
| 5465 // N arguments. | |
| 5466 CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode); | |
| 5467 | |
| 5468 } else if (argument_count_ == NONE) { | |
| 5469 CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode); | |
| 5470 } else if (argument_count_ == ONE) { | |
| 5471 CreateArrayDispatchOneArgument(masm, mode); | |
| 5472 } else if (argument_count_ == MORE_THAN_ONE) { | |
| 5473 CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode); | |
| 5474 } else { | |
| 5475 UNREACHABLE(); | |
| 5476 } | |
| 5477 } | |
| 5478 | |
| 5479 | |
| 5480 void ArrayConstructorStub::Generate(MacroAssembler* masm) { | |
| 5481 ASM_LOCATION("ArrayConstructorStub::Generate"); | |
| 5482 // ----------- S t a t e ------------- | |
| 5483 // -- x0 : argc (only if argument_count_ == ANY) | |
| 5484 // -- x1 : constructor | |
| 5485 // -- x2 : feedback vector (fixed array or undefined) | |
| 5486 // -- x3 : slot index (if x2 is fixed array) | |
| 5487 // -- sp[0] : return address | |
| 5488 // -- sp[4] : last argument | |
| 5489 // ----------------------------------- | |
| 5490 Register constructor = x1; | |
| 5491 Register feedback_vector = x2; | |
| 5492 Register slot_index = x3; | |
| 5493 | |
| 5494 if (FLAG_debug_code) { | |
| 5495 // The array construct code is only set for the global and natives | |
| 5496 // builtin Array functions which always have maps. | |
| 5497 | |
| 5498 Label unexpected_map, map_ok; | |
| 5499 // Initial map for the builtin Array function should be a map. | |
| 5500 __ Ldr(x10, FieldMemOperand(constructor, | |
| 5501 JSFunction::kPrototypeOrInitialMapOffset)); | |
| 5502 // Will both indicate a NULL and a Smi. | |
| 5503 __ JumpIfSmi(x10, &unexpected_map); | |
| 5504 __ JumpIfObjectType(x10, x10, x11, MAP_TYPE, &map_ok); | |
| 5505 __ Bind(&unexpected_map); | |
| 5506 __ Abort(kUnexpectedInitialMapForArrayFunction); | |
| 5507 __ Bind(&map_ok); | |
| 5508 | |
| 5509 // In feedback_vector, we expect either undefined or a valid fixed array. | |
| 5510 Label okay_here; | |
| 5511 Handle<Map> fixed_array_map = masm->isolate()->factory()->fixed_array_map(); | |
| 5512 __ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex, &okay_here); | |
| 5513 __ Ldr(x10, FieldMemOperand(feedback_vector, FixedArray::kMapOffset)); | |
| 5514 __ Cmp(x10, Operand(fixed_array_map)); | |
| 5515 __ Assert(eq, kExpectedFixedArrayInFeedbackVector); | |
| 5516 | |
| 5517 // slot_index should be a smi if we don't have undefined in feedback_vector. | |
| 5518 __ AssertSmi(slot_index); | |
| 5519 | |
| 5520 __ Bind(&okay_here); | |
| 5521 } | |
| 5522 | |
| 5523 Register allocation_site = x2; // Overwrites feedback_vector. | |
| 5524 Register kind = x3; | |
| 5525 Label no_info; | |
| 5526 // Get the elements kind and case on that. | |
| 5527 __ JumpIfRoot(feedback_vector, Heap::kUndefinedValueRootIndex, &no_info); | |
| 5528 __ Add(feedback_vector, feedback_vector, | |
| 5529 Operand::UntagSmiAndScale(slot_index, kPointerSizeLog2)); | |
| 5530 __ Ldr(allocation_site, FieldMemOperand(feedback_vector, | |
| 5531 FixedArray::kHeaderSize)); | |
| 5532 | |
| 5533 // If the feedback vector is undefined, or contains anything other than an | |
| 5534 // AllocationSite, call an array constructor that doesn't use AllocationSites. | |
| 5535 __ Ldr(x10, FieldMemOperand(allocation_site, AllocationSite::kMapOffset)); | |
| 5536 __ JumpIfNotRoot(x10, Heap::kAllocationSiteMapRootIndex, &no_info); | |
| 5537 | |
| 5538 __ Ldrsw(kind, | |
| 5539 UntagSmiFieldMemOperand(allocation_site, | |
| 5540 AllocationSite::kTransitionInfoOffset)); | |
| 5541 __ And(kind, kind, AllocationSite::ElementsKindBits::kMask); | |
| 5542 GenerateDispatchToArrayStub(masm, DONT_OVERRIDE); | |
| 5543 | |
| 5544 __ Bind(&no_info); | |
| 5545 GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES); | |
| 5546 } | |
| 5547 | |
| 5548 | |
| 5549 void InternalArrayConstructorStub::GenerateCase( | |
| 5550 MacroAssembler* masm, ElementsKind kind) { | |
| 5551 Label zero_case, n_case; | |
| 5552 Register argc = x0; | |
| 5553 | |
| 5554 __ Cbz(argc, &zero_case); | |
| 5555 __ CompareAndBranch(argc, 1, ne, &n_case); | |
| 5556 | |
| 5557 // One argument. | |
| 5558 if (IsFastPackedElementsKind(kind)) { | |
| 5559 Label packed_case; | |
| 5560 | |
| 5561 // We might need to create a holey array; look at the first argument. | |
| 5562 __ Peek(x10, 0); | |
| 5563 __ Cbz(x10, &packed_case); | |
| 5564 | |
| 5565 InternalArraySingleArgumentConstructorStub | |
| 5566 stub1_holey(GetHoleyElementsKind(kind)); | |
| 5567 __ TailCallStub(&stub1_holey); | |
| 5568 | |
| 5569 __ Bind(&packed_case); | |
| 5570 } | |
| 5571 InternalArraySingleArgumentConstructorStub stub1(kind); | |
| 5572 __ TailCallStub(&stub1); | |
| 5573 | |
| 5574 __ Bind(&zero_case); | |
| 5575 // No arguments. | |
| 5576 InternalArrayNoArgumentConstructorStub stub0(kind); | |
| 5577 __ TailCallStub(&stub0); | |
| 5578 | |
| 5579 __ Bind(&n_case); | |
| 5580 // N arguments. | |
| 5581 InternalArrayNArgumentsConstructorStub stubN(kind); | |
| 5582 __ TailCallStub(&stubN); | |
| 5583 } | |
| 5584 | |
| 5585 | |
| 5586 void InternalArrayConstructorStub::Generate(MacroAssembler* masm) { | |
| 5587 // ----------- S t a t e ------------- | |
| 5588 // -- x0 : argc | |
| 5589 // -- x1 : constructor | |
| 5590 // -- sp[0] : return address | |
| 5591 // -- sp[4] : last argument | |
| 5592 // ----------------------------------- | |
| 5593 Handle<Object> undefined_sentinel( | |
| 5594 masm->isolate()->heap()->undefined_value(), masm->isolate()); | |
| 5595 | |
| 5596 Register constructor = x1; | |
| 5597 | |
| 5598 if (FLAG_debug_code) { | |
| 5599 // The array construct code is only set for the global and natives | |
| 5600 // builtin Array functions which always have maps. | |
| 5601 | |
| 5602 Label unexpected_map, map_ok; | |
| 5603 // Initial map for the builtin Array function should be a map. | |
| 5604 __ Ldr(x10, FieldMemOperand(constructor, | |
| 5605 JSFunction::kPrototypeOrInitialMapOffset)); | |
| 5606 // Will both indicate a NULL and a Smi. | |
| 5607 __ JumpIfSmi(x10, &unexpected_map); | |
| 5608 __ JumpIfObjectType(x10, x10, x11, MAP_TYPE, &map_ok); | |
| 5609 __ Bind(&unexpected_map); | |
| 5610 __ Abort(kUnexpectedInitialMapForArrayFunction); | |
| 5611 __ Bind(&map_ok); | |
| 5612 } | |
| 5613 | |
| 5614 Register kind = w3; | |
| 5615 // Figure out the right elements kind | |
| 5616 __ Ldr(x10, FieldMemOperand(constructor, | |
| 5617 JSFunction::kPrototypeOrInitialMapOffset)); | |
| 5618 | |
| 5619 // TODO(jbramley): Add a helper function to read elements kind from an | |
| 5620 // existing map. | |
| 5621 // Load the map's "bit field 2" into result. | |
| 5622 __ Ldr(kind, FieldMemOperand(x10, Map::kBitField2Offset)); | |
| 5623 // Retrieve elements_kind from bit field 2. | |
| 5624 __ Ubfx(kind, kind, Map::kElementsKindShift, Map::kElementsKindBitCount); | |
| 5625 | |
| 5626 if (FLAG_debug_code) { | |
| 5627 Label done; | |
| 5628 __ Cmp(x3, FAST_ELEMENTS); | |
| 5629 __ Ccmp(x3, FAST_HOLEY_ELEMENTS, ZFlag, ne); | |
| 5630 __ Assert(eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray); | |
| 5631 } | |
| 5632 | |
| 5633 Label fast_elements_case; | |
| 5634 __ CompareAndBranch(kind, FAST_ELEMENTS, eq, &fast_elements_case); | |
| 5635 GenerateCase(masm, FAST_HOLEY_ELEMENTS); | |
| 5636 | |
| 5637 __ Bind(&fast_elements_case); | |
| 5638 GenerateCase(masm, FAST_ELEMENTS); | |
| 5639 } | |
| 5640 | |
| 5641 | |
| 5642 void CallApiFunctionStub::Generate(MacroAssembler* masm) { | |
| 5643 // ----------- S t a t e ------------- | |
| 5644 // -- x0 : callee | |
| 5645 // -- x4 : call_data | |
| 5646 // -- x2 : holder | |
| 5647 // -- x1 : api_function_address | |
| 5648 // -- cp : context | |
| 5649 // -- | |
| 5650 // -- sp[0] : last argument | |
| 5651 // -- ... | |
| 5652 // -- sp[(argc - 1) * 8] : first argument | |
| 5653 // -- sp[argc * 8] : receiver | |
| 5654 // ----------------------------------- | |
| 5655 | |
| 5656 Register callee = x0; | |
| 5657 Register call_data = x4; | |
| 5658 Register holder = x2; | |
| 5659 Register api_function_address = x1; | |
| 5660 Register context = cp; | |
| 5661 | |
| 5662 int argc = ArgumentBits::decode(bit_field_); | |
| 5663 bool is_store = IsStoreBits::decode(bit_field_); | |
| 5664 bool call_data_undefined = CallDataUndefinedBits::decode(bit_field_); | |
| 5665 | |
| 5666 typedef FunctionCallbackArguments FCA; | |
| 5667 | |
| 5668 STATIC_ASSERT(FCA::kContextSaveIndex == 6); | |
| 5669 STATIC_ASSERT(FCA::kCalleeIndex == 5); | |
| 5670 STATIC_ASSERT(FCA::kDataIndex == 4); | |
| 5671 STATIC_ASSERT(FCA::kReturnValueOffset == 3); | |
| 5672 STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2); | |
| 5673 STATIC_ASSERT(FCA::kIsolateIndex == 1); | |
| 5674 STATIC_ASSERT(FCA::kHolderIndex == 0); | |
| 5675 STATIC_ASSERT(FCA::kArgsLength == 7); | |
| 5676 | |
| 5677 Isolate* isolate = masm->isolate(); | |
| 5678 | |
| 5679 // FunctionCallbackArguments: context, callee and call data. | |
| 5680 __ Push(context, callee, call_data); | |
| 5681 | |
| 5682 // Load context from callee | |
| 5683 __ Ldr(context, FieldMemOperand(callee, JSFunction::kContextOffset)); | |
| 5684 | |
| 5685 if (!call_data_undefined) { | |
| 5686 __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex); | |
| 5687 } | |
| 5688 Register isolate_reg = x5; | |
| 5689 __ Mov(isolate_reg, Operand(ExternalReference::isolate_address(isolate))); | |
| 5690 | |
| 5691 // FunctionCallbackArguments: | |
| 5692 // return value, return value default, isolate, holder. | |
| 5693 __ Push(call_data, call_data, isolate_reg, holder); | |
| 5694 | |
| 5695 // Prepare arguments. | |
| 5696 Register args = x6; | |
| 5697 __ Mov(args, masm->StackPointer()); | |
| 5698 | |
| 5699 // Allocate the v8::Arguments structure in the arguments' space, since it's | |
| 5700 // not controlled by GC. | |
| 5701 const int kApiStackSpace = 4; | |
| 5702 | |
| 5703 // Allocate space for CallApiFunctionAndReturn can store some scratch | |
| 5704 // registeres on the stack. | |
| 5705 const int kCallApiFunctionSpillSpace = 4; | |
| 5706 | |
| 5707 FrameScope frame_scope(masm, StackFrame::MANUAL); | |
| 5708 __ EnterExitFrame(false, x10, kApiStackSpace + kCallApiFunctionSpillSpace); | |
| 5709 | |
| 5710 // TODO(all): Optimize this with stp and suchlike. | |
| 5711 ASSERT(!AreAliased(x0, api_function_address)); | |
| 5712 // x0 = FunctionCallbackInfo& | |
| 5713 // Arguments is after the return address. | |
| 5714 __ Add(x0, masm->StackPointer(), 1 * kPointerSize); | |
| 5715 // FunctionCallbackInfo::implicit_args_ | |
| 5716 __ Str(args, MemOperand(x0, 0 * kPointerSize)); | |
| 5717 // FunctionCallbackInfo::values_ | |
| 5718 __ Add(x10, args, Operand((FCA::kArgsLength - 1 + argc) * kPointerSize)); | |
| 5719 __ Str(x10, MemOperand(x0, 1 * kPointerSize)); | |
| 5720 // FunctionCallbackInfo::length_ = argc | |
| 5721 __ Mov(x10, argc); | |
| 5722 __ Str(x10, MemOperand(x0, 2 * kPointerSize)); | |
| 5723 // FunctionCallbackInfo::is_construct_call = 0 | |
| 5724 __ Str(xzr, MemOperand(x0, 3 * kPointerSize)); | |
| 5725 | |
| 5726 const int kStackUnwindSpace = argc + FCA::kArgsLength + 1; | |
| 5727 Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback); | |
| 5728 ExternalReference::Type thunk_type = ExternalReference::PROFILING_API_CALL; | |
| 5729 ApiFunction thunk_fun(thunk_address); | |
| 5730 ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type, | |
| 5731 masm->isolate()); | |
| 5732 | |
| 5733 AllowExternalCallThatCantCauseGC scope(masm); | |
| 5734 MemOperand context_restore_operand( | |
| 5735 fp, (2 + FCA::kContextSaveIndex) * kPointerSize); | |
| 5736 // Stores return the first js argument | |
| 5737 int return_value_offset = 0; | |
| 5738 if (is_store) { | |
| 5739 return_value_offset = 2 + FCA::kArgsLength; | |
| 5740 } else { | |
| 5741 return_value_offset = 2 + FCA::kReturnValueOffset; | |
| 5742 } | |
| 5743 MemOperand return_value_operand(fp, return_value_offset * kPointerSize); | |
| 5744 | |
| 5745 const int spill_offset = 1 + kApiStackSpace; | |
| 5746 __ CallApiFunctionAndReturn(api_function_address, | |
| 5747 thunk_ref, | |
| 5748 kStackUnwindSpace, | |
| 5749 spill_offset, | |
| 5750 return_value_operand, | |
| 5751 &context_restore_operand); | |
| 5752 } | |
| 5753 | |
| 5754 | |
| 5755 void CallApiGetterStub::Generate(MacroAssembler* masm) { | |
| 5756 // ----------- S t a t e ------------- | |
| 5757 // -- sp[0] : name | |
| 5758 // -- sp[8 - kArgsLength*8] : PropertyCallbackArguments object | |
| 5759 // -- ... | |
| 5760 // -- x2 : api_function_address | |
| 5761 // ----------------------------------- | |
| 5762 | |
| 5763 Register api_function_address = x2; | |
| 5764 | |
| 5765 __ Mov(x0, masm->StackPointer()); // x0 = Handle<Name> | |
| 5766 __ Add(x1, x0, 1 * kPointerSize); // x1 = PCA | |
| 5767 | |
| 5768 const int kApiStackSpace = 1; | |
| 5769 | |
| 5770 // Allocate space for CallApiFunctionAndReturn can store some scratch | |
| 5771 // registeres on the stack. | |
| 5772 const int kCallApiFunctionSpillSpace = 4; | |
| 5773 | |
| 5774 FrameScope frame_scope(masm, StackFrame::MANUAL); | |
| 5775 __ EnterExitFrame(false, x10, kApiStackSpace + kCallApiFunctionSpillSpace); | |
| 5776 | |
| 5777 // Create PropertyAccessorInfo instance on the stack above the exit frame with | |
| 5778 // x1 (internal::Object** args_) as the data. | |
| 5779 __ Poke(x1, 1 * kPointerSize); | |
| 5780 __ Add(x1, masm->StackPointer(), 1 * kPointerSize); // x1 = AccessorInfo& | |
| 5781 | |
| 5782 const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1; | |
| 5783 | |
| 5784 Address thunk_address = FUNCTION_ADDR(&InvokeAccessorGetterCallback); | |
| 5785 ExternalReference::Type thunk_type = | |
| 5786 ExternalReference::PROFILING_GETTER_CALL; | |
| 5787 ApiFunction thunk_fun(thunk_address); | |
| 5788 ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type, | |
| 5789 masm->isolate()); | |
| 5790 | |
| 5791 const int spill_offset = 1 + kApiStackSpace; | |
| 5792 __ CallApiFunctionAndReturn(api_function_address, | |
| 5793 thunk_ref, | |
| 5794 kStackUnwindSpace, | |
| 5795 spill_offset, | |
| 5796 MemOperand(fp, 6 * kPointerSize), | |
| 5797 NULL); | |
| 5798 } | |
| 5799 | |
| 5800 | |
| 5801 #undef __ | |
| 5802 | |
| 5803 } } // namespace v8::internal | |
| 5804 | |
| 5805 #endif // V8_TARGET_ARCH_A64 | |
| OLD | NEW |