| Index: src/arm64/codegen-arm64.cc
|
| diff --git a/src/arm64/codegen-arm64.cc b/src/arm64/codegen-arm64.cc
|
| index 00e88b1d8a629edc9247da854a2b7994a649f2eb..9a9abbdc7673223ff3be63b43c8ece266f4ae22f 100644
|
| --- a/src/arm64/codegen-arm64.cc
|
| +++ b/src/arm64/codegen-arm64.cc
|
| @@ -62,7 +62,7 @@ UnaryMathFunction CreateExpFunction() {
|
|
|
| CodeDesc desc;
|
| masm.GetCode(&desc);
|
| - ASSERT(!RelocInfo::RequiresRelocation(desc));
|
| + DCHECK(!RelocInfo::RequiresRelocation(desc));
|
|
|
| CpuFeatures::FlushICache(buffer, actual_size);
|
| base::OS::ProtectCode(buffer, actual_size);
|
| @@ -86,14 +86,14 @@ UnaryMathFunction CreateSqrtFunction() {
|
|
|
| void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
|
| masm->EnterFrame(StackFrame::INTERNAL);
|
| - ASSERT(!masm->has_frame());
|
| + DCHECK(!masm->has_frame());
|
| masm->set_has_frame(true);
|
| }
|
|
|
|
|
| void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
|
| masm->LeaveFrame(StackFrame::INTERNAL);
|
| - ASSERT(masm->has_frame());
|
| + DCHECK(masm->has_frame());
|
| masm->set_has_frame(false);
|
| }
|
|
|
| @@ -111,10 +111,10 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
|
| Label* allocation_memento_found) {
|
| ASM_LOCATION(
|
| "ElementsTransitionGenerator::GenerateMapChangeElementsTransition");
|
| - ASSERT(!AreAliased(receiver, key, value, target_map));
|
| + DCHECK(!AreAliased(receiver, key, value, target_map));
|
|
|
| if (mode == TRACK_ALLOCATION_SITE) {
|
| - ASSERT(allocation_memento_found != NULL);
|
| + DCHECK(allocation_memento_found != NULL);
|
| __ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11,
|
| allocation_memento_found);
|
| }
|
| @@ -150,7 +150,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
|
| Register scratch = x6;
|
|
|
| // Verify input registers don't conflict with locals.
|
| - ASSERT(!AreAliased(receiver, key, value, target_map,
|
| + DCHECK(!AreAliased(receiver, key, value, target_map,
|
| elements, length, array_size, array));
|
|
|
| if (mode == TRACK_ALLOCATION_SITE) {
|
| @@ -253,7 +253,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
|
| Register length = x5;
|
|
|
| // Verify input registers don't conflict with locals.
|
| - ASSERT(!AreAliased(receiver, key, value, target_map,
|
| + DCHECK(!AreAliased(receiver, key, value, target_map,
|
| elements, array_size, array, length));
|
|
|
| if (mode == TRACK_ALLOCATION_SITE) {
|
| @@ -356,7 +356,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
|
|
|
|
|
| CodeAgingHelper::CodeAgingHelper() {
|
| - ASSERT(young_sequence_.length() == kNoCodeAgeSequenceLength);
|
| + DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
|
| // The sequence of instructions that is patched out for aging code is the
|
| // following boilerplate stack-building prologue that is found both in
|
| // FUNCTION and OPTIMIZED_FUNCTION code:
|
| @@ -368,7 +368,7 @@ CodeAgingHelper::CodeAgingHelper() {
|
|
|
| #ifdef DEBUG
|
| const int length = kCodeAgeStubEntryOffset / kInstructionSize;
|
| - ASSERT(old_sequence_.length() >= kCodeAgeStubEntryOffset);
|
| + DCHECK(old_sequence_.length() >= kCodeAgeStubEntryOffset);
|
| PatchingAssembler patcher_old(old_sequence_.start(), length);
|
| MacroAssembler::EmitCodeAgeSequence(&patcher_old, NULL);
|
| #endif
|
| @@ -420,7 +420,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
|
| Register index,
|
| Register result,
|
| Label* call_runtime) {
|
| - ASSERT(string.Is64Bits() && index.Is32Bits() && result.Is64Bits());
|
| + DCHECK(string.Is64Bits() && index.Is32Bits() && result.Is64Bits());
|
| // Fetch the instance type of the receiver into result register.
|
| __ Ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
|
| __ Ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
|
| @@ -516,10 +516,10 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
|
| // instead of fmul and fsub. Doing this changes the result, but since this is
|
| // an estimation anyway, does it matter?
|
|
|
| - ASSERT(!AreAliased(input, result,
|
| + DCHECK(!AreAliased(input, result,
|
| double_temp1, double_temp2,
|
| temp1, temp2, temp3));
|
| - ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
|
| + DCHECK(ExternalReference::math_exp_constants(0).address() != NULL);
|
|
|
| Label done;
|
| DoubleRegister double_temp3 = result;
|
| @@ -539,7 +539,7 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
|
| Label result_is_finite_non_zero;
|
| // Assert that we can load offset 0 (the small input threshold) and offset 1
|
| // (the large input threshold) with a single ldp.
|
| - ASSERT(kDRegSize == (ExpConstant(constants, 1).offset() -
|
| + DCHECK(kDRegSize == (ExpConstant(constants, 1).offset() -
|
| ExpConstant(constants, 0).offset()));
|
| __ Ldp(double_temp1, double_temp2, ExpConstant(constants, 0));
|
|
|
| @@ -569,7 +569,7 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
|
| __ Bind(&result_is_finite_non_zero);
|
|
|
| // Assert that we can load offset 3 and offset 4 with a single ldp.
|
| - ASSERT(kDRegSize == (ExpConstant(constants, 4).offset() -
|
| + DCHECK(kDRegSize == (ExpConstant(constants, 4).offset() -
|
| ExpConstant(constants, 3).offset()));
|
| __ Ldp(double_temp1, double_temp3, ExpConstant(constants, 3));
|
| __ Fmadd(double_temp1, double_temp1, input, double_temp3);
|
| @@ -577,7 +577,7 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
|
| __ Fsub(double_temp1, double_temp1, double_temp3);
|
|
|
| // Assert that we can load offset 5 and offset 6 with a single ldp.
|
| - ASSERT(kDRegSize == (ExpConstant(constants, 6).offset() -
|
| + DCHECK(kDRegSize == (ExpConstant(constants, 6).offset() -
|
| ExpConstant(constants, 5).offset()));
|
| __ Ldp(double_temp2, double_temp3, ExpConstant(constants, 5));
|
| // TODO(jbramley): Consider using Fnmsub here.
|
|
|