OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 4314 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4325 // Compare flat ASCII strings natively. Remove arguments from stack first. | 4325 // Compare flat ASCII strings natively. Remove arguments from stack first. |
4326 __ IncrementCounter(counters->string_compare_native(), 1, a2, a3); | 4326 __ IncrementCounter(counters->string_compare_native(), 1, a2, a3); |
4327 __ Addu(sp, sp, Operand(2 * kPointerSize)); | 4327 __ Addu(sp, sp, Operand(2 * kPointerSize)); |
4328 GenerateCompareFlatAsciiStrings(masm, a1, a0, a2, a3, t0, t1); | 4328 GenerateCompareFlatAsciiStrings(masm, a1, a0, a2, a3, t0, t1); |
4329 | 4329 |
4330 __ bind(&runtime); | 4330 __ bind(&runtime); |
4331 __ TailCallRuntime(Runtime::kStringCompare, 2, 1); | 4331 __ TailCallRuntime(Runtime::kStringCompare, 2, 1); |
4332 } | 4332 } |
4333 | 4333 |
4334 | 4334 |
| 4335 void ArrayPushStub::Generate(MacroAssembler* masm) { |
| 4336 Register receiver = a0; |
| 4337 Register scratch = a1; |
| 4338 |
| 4339 int argc = arguments_count(); |
| 4340 |
| 4341 if (argc == 0) { |
| 4342 // Nothing to do, just return the length. |
| 4343 __ lw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset)); |
| 4344 __ DropAndRet(argc + 1); |
| 4345 return; |
| 4346 } |
| 4347 |
| 4348 Isolate* isolate = masm->isolate(); |
| 4349 |
| 4350 if (argc != 1) { |
| 4351 __ TailCallExternalReference( |
| 4352 ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1); |
| 4353 return; |
| 4354 } |
| 4355 |
| 4356 Label call_builtin, attempt_to_grow_elements, with_write_barrier; |
| 4357 |
| 4358 Register elements = t2; |
| 4359 Register end_elements = t1; |
| 4360 // Get the elements array of the object. |
| 4361 __ lw(elements, FieldMemOperand(receiver, JSArray::kElementsOffset)); |
| 4362 |
| 4363 if (IsFastSmiOrObjectElementsKind(elements_kind())) { |
| 4364 // Check that the elements are in fast mode and writable. |
| 4365 __ CheckMap(elements, |
| 4366 scratch, |
| 4367 Heap::kFixedArrayMapRootIndex, |
| 4368 &call_builtin, |
| 4369 DONT_DO_SMI_CHECK); |
| 4370 } |
| 4371 |
| 4372 // Get the array's length into scratch and calculate new length. |
| 4373 __ lw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset)); |
| 4374 __ Addu(scratch, scratch, Operand(Smi::FromInt(argc))); |
| 4375 |
| 4376 // Get the elements' length. |
| 4377 __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset)); |
| 4378 |
| 4379 const int kEndElementsOffset = |
| 4380 FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize; |
| 4381 |
| 4382 if (IsFastSmiOrObjectElementsKind(elements_kind())) { |
| 4383 // Check if we could survive without allocation. |
| 4384 __ Branch(&attempt_to_grow_elements, gt, scratch, Operand(t0)); |
| 4385 |
| 4386 // Check if value is a smi. |
| 4387 __ lw(t0, MemOperand(sp, (argc - 1) * kPointerSize)); |
| 4388 __ JumpIfNotSmi(t0, &with_write_barrier); |
| 4389 |
| 4390 // Store the value. |
| 4391 // We may need a register containing the address end_elements below, |
| 4392 // so write back the value in end_elements. |
| 4393 __ sll(end_elements, scratch, kPointerSizeLog2 - kSmiTagSize); |
| 4394 __ Addu(end_elements, elements, end_elements); |
| 4395 __ Addu(end_elements, end_elements, kEndElementsOffset); |
| 4396 __ sw(t0, MemOperand(end_elements)); |
| 4397 } else { |
| 4398 // Check if we could survive without allocation. |
| 4399 __ Branch(&call_builtin, gt, scratch, Operand(t0)); |
| 4400 |
| 4401 __ lw(t0, MemOperand(sp, (argc - 1) * kPointerSize)); |
| 4402 __ StoreNumberToDoubleElements(t0, scratch, elements, a3, t1, a2, |
| 4403 &call_builtin, argc * kDoubleSize); |
| 4404 } |
| 4405 |
| 4406 // Save new length. |
| 4407 __ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset)); |
| 4408 __ mov(v0, scratch); |
| 4409 __ DropAndRet(argc + 1); |
| 4410 |
| 4411 if (IsFastDoubleElementsKind(elements_kind())) { |
| 4412 __ bind(&call_builtin); |
| 4413 __ TailCallExternalReference( |
| 4414 ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1); |
| 4415 return; |
| 4416 } |
| 4417 |
| 4418 __ bind(&with_write_barrier); |
| 4419 |
| 4420 if (IsFastSmiElementsKind(elements_kind())) { |
| 4421 if (FLAG_trace_elements_transitions) __ jmp(&call_builtin); |
| 4422 |
| 4423 __ lw(t3, FieldMemOperand(t0, HeapObject::kMapOffset)); |
| 4424 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); |
| 4425 __ Branch(&call_builtin, eq, t3, Operand(at)); |
| 4426 |
| 4427 ElementsKind target_kind = IsHoleyElementsKind(elements_kind()) |
| 4428 ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS; |
| 4429 __ lw(a3, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX)); |
| 4430 __ lw(a3, FieldMemOperand(a3, GlobalObject::kNativeContextOffset)); |
| 4431 __ lw(a3, ContextOperand(a3, Context::JS_ARRAY_MAPS_INDEX)); |
| 4432 const int header_size = FixedArrayBase::kHeaderSize; |
| 4433 // Verify that the object can be transitioned in place. |
| 4434 const int origin_offset = header_size + elements_kind() * kPointerSize; |
| 4435 __ lw(a2, FieldMemOperand(receiver, origin_offset)); |
| 4436 __ lw(at, FieldMemOperand(a3, HeapObject::kMapOffset)); |
| 4437 __ Branch(&call_builtin, ne, a2, Operand(at)); |
| 4438 |
| 4439 |
| 4440 const int target_offset = header_size + target_kind * kPointerSize; |
| 4441 __ lw(a3, FieldMemOperand(a3, target_offset)); |
| 4442 __ mov(a2, receiver); |
| 4443 ElementsTransitionGenerator::GenerateMapChangeElementsTransition( |
| 4444 masm, DONT_TRACK_ALLOCATION_SITE, NULL); |
| 4445 } |
| 4446 |
| 4447 // Save new length. |
| 4448 __ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset)); |
| 4449 |
| 4450 // Store the value. |
| 4451 // We may need a register containing the address end_elements below, so write |
| 4452 // back the value in end_elements. |
| 4453 __ sll(end_elements, scratch, kPointerSizeLog2 - kSmiTagSize); |
| 4454 __ Addu(end_elements, elements, end_elements); |
| 4455 __ Addu(end_elements, end_elements, kEndElementsOffset); |
| 4456 __ sw(t0, MemOperand(end_elements)); |
| 4457 |
| 4458 __ RecordWrite(elements, |
| 4459 end_elements, |
| 4460 t0, |
| 4461 kRAHasNotBeenSaved, |
| 4462 kDontSaveFPRegs, |
| 4463 EMIT_REMEMBERED_SET, |
| 4464 OMIT_SMI_CHECK); |
| 4465 __ mov(v0, scratch); |
| 4466 __ DropAndRet(argc + 1); |
| 4467 |
| 4468 __ bind(&attempt_to_grow_elements); |
| 4469 // scratch: array's length + 1. |
| 4470 |
| 4471 if (!FLAG_inline_new) { |
| 4472 __ bind(&call_builtin); |
| 4473 __ TailCallExternalReference( |
| 4474 ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1); |
| 4475 return; |
| 4476 } |
| 4477 |
| 4478 __ lw(a2, MemOperand(sp, (argc - 1) * kPointerSize)); |
| 4479 // Growing elements that are SMI-only requires special handling in case the |
| 4480 // new element is non-Smi. For now, delegate to the builtin. |
| 4481 if (IsFastSmiElementsKind(elements_kind())) { |
| 4482 __ JumpIfNotSmi(a2, &call_builtin); |
| 4483 } |
| 4484 |
| 4485 // We could be lucky and the elements array could be at the top of new-space. |
| 4486 // In this case we can just grow it in place by moving the allocation pointer |
| 4487 // up. |
| 4488 ExternalReference new_space_allocation_top = |
| 4489 ExternalReference::new_space_allocation_top_address(isolate); |
| 4490 ExternalReference new_space_allocation_limit = |
| 4491 ExternalReference::new_space_allocation_limit_address(isolate); |
| 4492 |
| 4493 const int kAllocationDelta = 4; |
| 4494 ASSERT(kAllocationDelta >= argc); |
| 4495 // Load top and check if it is the end of elements. |
| 4496 __ sll(end_elements, scratch, kPointerSizeLog2 - kSmiTagSize); |
| 4497 __ Addu(end_elements, elements, end_elements); |
| 4498 __ Addu(end_elements, end_elements, Operand(kEndElementsOffset)); |
| 4499 __ li(t0, Operand(new_space_allocation_top)); |
| 4500 __ lw(a3, MemOperand(t0)); |
| 4501 __ Branch(&call_builtin, ne, a3, Operand(end_elements)); |
| 4502 |
| 4503 __ li(t3, Operand(new_space_allocation_limit)); |
| 4504 __ lw(t3, MemOperand(t3)); |
| 4505 __ Addu(a3, a3, Operand(kAllocationDelta * kPointerSize)); |
| 4506 __ Branch(&call_builtin, hi, a3, Operand(t3)); |
| 4507 |
| 4508 // We fit and could grow elements. |
| 4509 // Update new_space_allocation_top. |
| 4510 __ sw(a3, MemOperand(t0)); |
| 4511 // Push the argument. |
| 4512 __ sw(a2, MemOperand(end_elements)); |
| 4513 // Fill the rest with holes. |
| 4514 __ LoadRoot(a3, Heap::kTheHoleValueRootIndex); |
| 4515 for (int i = 1; i < kAllocationDelta; i++) { |
| 4516 __ sw(a3, MemOperand(end_elements, i * kPointerSize)); |
| 4517 } |
| 4518 |
| 4519 // Update elements' and array's sizes. |
| 4520 __ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset)); |
| 4521 __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset)); |
| 4522 __ Addu(t0, t0, Operand(Smi::FromInt(kAllocationDelta))); |
| 4523 __ sw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset)); |
| 4524 |
| 4525 // Elements are in new space, so write barrier is not required. |
| 4526 __ mov(v0, scratch); |
| 4527 __ DropAndRet(argc + 1); |
| 4528 |
| 4529 __ bind(&call_builtin); |
| 4530 __ TailCallExternalReference( |
| 4531 ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1); |
| 4532 } |
| 4533 |
| 4534 |
4335 void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) { | 4535 void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) { |
4336 // ----------- S t a t e ------------- | 4536 // ----------- S t a t e ------------- |
4337 // -- a1 : left | 4537 // -- a1 : left |
4338 // -- a0 : right | 4538 // -- a0 : right |
4339 // -- ra : return address | 4539 // -- ra : return address |
4340 // ----------------------------------- | 4540 // ----------------------------------- |
4341 Isolate* isolate = masm->isolate(); | 4541 Isolate* isolate = masm->isolate(); |
4342 | 4542 |
4343 // Load a2 with the allocation site. We stick an undefined dummy value here | 4543 // Load a2 with the allocation site. We stick an undefined dummy value here |
4344 // and replace it with the real allocation site later when we instantiate this | 4544 // and replace it with the real allocation site later when we instantiate this |
(...skipping 1675 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6020 __ bind(&fast_elements_case); | 6220 __ bind(&fast_elements_case); |
6021 GenerateCase(masm, FAST_ELEMENTS); | 6221 GenerateCase(masm, FAST_ELEMENTS); |
6022 } | 6222 } |
6023 | 6223 |
6024 | 6224 |
6025 #undef __ | 6225 #undef __ |
6026 | 6226 |
6027 } } // namespace v8::internal | 6227 } } // namespace v8::internal |
6028 | 6228 |
6029 #endif // V8_TARGET_ARCH_MIPS | 6229 #endif // V8_TARGET_ARCH_MIPS |
OLD | NEW |