| Index: src/mips64/macro-assembler-mips64.cc
|
| diff --git a/src/mips64/macro-assembler-mips64.cc b/src/mips64/macro-assembler-mips64.cc
|
| index e332e070f01c7fcd834c8007af985bb65590b7db..87124dca14688ae64191e631396c48b249c2b082 100644
|
| --- a/src/mips64/macro-assembler-mips64.cc
|
| +++ b/src/mips64/macro-assembler-mips64.cc
|
| @@ -32,7 +32,7 @@ MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
|
| void MacroAssembler::Load(Register dst,
|
| const MemOperand& src,
|
| Representation r) {
|
| - ASSERT(!r.IsDouble());
|
| + DCHECK(!r.IsDouble());
|
| if (r.IsInteger8()) {
|
| lb(dst, src);
|
| } else if (r.IsUInteger8()) {
|
| @@ -52,7 +52,7 @@ void MacroAssembler::Load(Register dst,
|
| void MacroAssembler::Store(Register src,
|
| const MemOperand& dst,
|
| Representation r) {
|
| - ASSERT(!r.IsDouble());
|
| + DCHECK(!r.IsDouble());
|
| if (r.IsInteger8() || r.IsUInteger8()) {
|
| sb(src, dst);
|
| } else if (r.IsInteger16() || r.IsUInteger16()) {
|
| @@ -105,7 +105,7 @@ void MacroAssembler::PushSafepointRegisters() {
|
| // Safepoints expect a block of kNumSafepointRegisters values on the
|
| // stack, so adjust the stack for unsaved registers.
|
| const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
|
| - ASSERT(num_unsaved >= 0);
|
| + DCHECK(num_unsaved >= 0);
|
| if (num_unsaved > 0) {
|
| Dsubu(sp, sp, Operand(num_unsaved * kPointerSize));
|
| }
|
| @@ -157,7 +157,7 @@ void MacroAssembler::InNewSpace(Register object,
|
| Register scratch,
|
| Condition cc,
|
| Label* branch) {
|
| - ASSERT(cc == eq || cc == ne);
|
| + DCHECK(cc == eq || cc == ne);
|
| And(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
|
| Branch(branch, cc, scratch,
|
| Operand(ExternalReference::new_space_start(isolate())));
|
| @@ -174,7 +174,7 @@ void MacroAssembler::RecordWriteField(
|
| RememberedSetAction remembered_set_action,
|
| SmiCheck smi_check,
|
| PointersToHereCheck pointers_to_here_check_for_value) {
|
| - ASSERT(!AreAliased(value, dst, t8, object));
|
| + DCHECK(!AreAliased(value, dst, t8, object));
|
| // First, check if a write barrier is even needed. The tests below
|
| // catch stores of Smis.
|
| Label done;
|
| @@ -186,7 +186,7 @@ void MacroAssembler::RecordWriteField(
|
|
|
| // Although the object register is tagged, the offset is relative to the start
|
| // of the object, so so offset must be a multiple of kPointerSize.
|
| - ASSERT(IsAligned(offset, kPointerSize));
|
| + DCHECK(IsAligned(offset, kPointerSize));
|
|
|
| Daddu(dst, object, Operand(offset - kHeapObjectTag));
|
| if (emit_debug_code()) {
|
| @@ -225,7 +225,7 @@ void MacroAssembler::RecordWriteForMap(Register object,
|
| RAStatus ra_status,
|
| SaveFPRegsMode fp_mode) {
|
| if (emit_debug_code()) {
|
| - ASSERT(!dst.is(at));
|
| + DCHECK(!dst.is(at));
|
| ld(dst, FieldMemOperand(map, HeapObject::kMapOffset));
|
| Check(eq,
|
| kWrongAddressOrValuePassedToRecordWrite,
|
| @@ -304,8 +304,8 @@ void MacroAssembler::RecordWrite(
|
| RememberedSetAction remembered_set_action,
|
| SmiCheck smi_check,
|
| PointersToHereCheck pointers_to_here_check_for_value) {
|
| - ASSERT(!AreAliased(object, address, value, t8));
|
| - ASSERT(!AreAliased(object, address, value, t9));
|
| + DCHECK(!AreAliased(object, address, value, t8));
|
| + DCHECK(!AreAliased(object, address, value, t9));
|
|
|
| if (emit_debug_code()) {
|
| ld(at, MemOperand(address));
|
| @@ -323,7 +323,7 @@ void MacroAssembler::RecordWrite(
|
| Label done;
|
|
|
| if (smi_check == INLINE_SMI_CHECK) {
|
| - ASSERT_EQ(0, kSmiTag);
|
| + DCHECK_EQ(0, kSmiTag);
|
| JumpIfSmi(value, &done);
|
| }
|
|
|
| @@ -392,11 +392,11 @@ void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
|
| // Call stub on end of buffer.
|
| // Check for end of buffer.
|
| And(t8, scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
|
| - ASSERT(!scratch.is(t8));
|
| + DCHECK(!scratch.is(t8));
|
| if (and_then == kFallThroughAtEnd) {
|
| Branch(&done, eq, t8, Operand(zero_reg));
|
| } else {
|
| - ASSERT(and_then == kReturnAtEnd);
|
| + DCHECK(and_then == kReturnAtEnd);
|
| Ret(eq, t8, Operand(zero_reg));
|
| }
|
| push(ra);
|
| @@ -420,9 +420,9 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
|
| Label* miss) {
|
| Label same_contexts;
|
|
|
| - ASSERT(!holder_reg.is(scratch));
|
| - ASSERT(!holder_reg.is(at));
|
| - ASSERT(!scratch.is(at));
|
| + DCHECK(!holder_reg.is(scratch));
|
| + DCHECK(!holder_reg.is(at));
|
| + DCHECK(!scratch.is(at));
|
|
|
| // Load current lexical context from the stack frame.
|
| ld(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
|
| @@ -578,7 +578,7 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
|
| and_(reg2, reg2, reg1);
|
|
|
| // Scale the index by multiplying by the element size.
|
| - ASSERT(SeededNumberDictionary::kEntrySize == 3);
|
| + DCHECK(SeededNumberDictionary::kEntrySize == 3);
|
| dsll(at, reg2, 1); // 2x.
|
| daddu(reg2, reg2, at); // reg2 = reg2 * 3.
|
|
|
| @@ -621,7 +621,7 @@ void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
|
| addiu(rd, rs, rt.imm64_);
|
| } else {
|
| // li handles the relocation.
|
| - ASSERT(!rs.is(at));
|
| + DCHECK(!rs.is(at));
|
| li(at, rt);
|
| addu(rd, rs, at);
|
| }
|
| @@ -637,7 +637,7 @@ void MacroAssembler::Daddu(Register rd, Register rs, const Operand& rt) {
|
| daddiu(rd, rs, rt.imm64_);
|
| } else {
|
| // li handles the relocation.
|
| - ASSERT(!rs.is(at));
|
| + DCHECK(!rs.is(at));
|
| li(at, rt);
|
| daddu(rd, rs, at);
|
| }
|
| @@ -653,7 +653,7 @@ void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
|
| addiu(rd, rs, -rt.imm64_); // No subiu instr, use addiu(x, y, -imm).
|
| } else {
|
| // li handles the relocation.
|
| - ASSERT(!rs.is(at));
|
| + DCHECK(!rs.is(at));
|
| li(at, rt);
|
| subu(rd, rs, at);
|
| }
|
| @@ -669,7 +669,7 @@ void MacroAssembler::Dsubu(Register rd, Register rs, const Operand& rt) {
|
| daddiu(rd, rs, -rt.imm64_); // No subiu instr, use addiu(x, y, -imm).
|
| } else {
|
| // li handles the relocation.
|
| - ASSERT(!rs.is(at));
|
| + DCHECK(!rs.is(at));
|
| li(at, rt);
|
| dsubu(rd, rs, at);
|
| }
|
| @@ -682,7 +682,7 @@ void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
|
| mul(rd, rs, rt.rm());
|
| } else {
|
| // li handles the relocation.
|
| - ASSERT(!rs.is(at));
|
| + DCHECK(!rs.is(at));
|
| li(at, rt);
|
| mul(rd, rs, at);
|
| }
|
| @@ -699,7 +699,7 @@ void MacroAssembler::Mulh(Register rd, Register rs, const Operand& rt) {
|
| }
|
| } else {
|
| // li handles the relocation.
|
| - ASSERT(!rs.is(at));
|
| + DCHECK(!rs.is(at));
|
| li(at, rt);
|
| if (kArchVariant != kMips64r6) {
|
| mult(rs, at);
|
| @@ -721,7 +721,7 @@ void MacroAssembler::Dmul(Register rd, Register rs, const Operand& rt) {
|
| }
|
| } else {
|
| // li handles the relocation.
|
| - ASSERT(!rs.is(at));
|
| + DCHECK(!rs.is(at));
|
| li(at, rt);
|
| if (kArchVariant == kMips64r6) {
|
| dmul(rd, rs, at);
|
| @@ -743,7 +743,7 @@ void MacroAssembler::Dmulh(Register rd, Register rs, const Operand& rt) {
|
| }
|
| } else {
|
| // li handles the relocation.
|
| - ASSERT(!rs.is(at));
|
| + DCHECK(!rs.is(at));
|
| li(at, rt);
|
| if (kArchVariant == kMips64r6) {
|
| dmuh(rd, rs, at);
|
| @@ -760,7 +760,7 @@ void MacroAssembler::Mult(Register rs, const Operand& rt) {
|
| mult(rs, rt.rm());
|
| } else {
|
| // li handles the relocation.
|
| - ASSERT(!rs.is(at));
|
| + DCHECK(!rs.is(at));
|
| li(at, rt);
|
| mult(rs, at);
|
| }
|
| @@ -772,7 +772,7 @@ void MacroAssembler::Dmult(Register rs, const Operand& rt) {
|
| dmult(rs, rt.rm());
|
| } else {
|
| // li handles the relocation.
|
| - ASSERT(!rs.is(at));
|
| + DCHECK(!rs.is(at));
|
| li(at, rt);
|
| dmult(rs, at);
|
| }
|
| @@ -784,7 +784,7 @@ void MacroAssembler::Multu(Register rs, const Operand& rt) {
|
| multu(rs, rt.rm());
|
| } else {
|
| // li handles the relocation.
|
| - ASSERT(!rs.is(at));
|
| + DCHECK(!rs.is(at));
|
| li(at, rt);
|
| multu(rs, at);
|
| }
|
| @@ -796,7 +796,7 @@ void MacroAssembler::Dmultu(Register rs, const Operand& rt) {
|
| dmultu(rs, rt.rm());
|
| } else {
|
| // li handles the relocation.
|
| - ASSERT(!rs.is(at));
|
| + DCHECK(!rs.is(at));
|
| li(at, rt);
|
| dmultu(rs, at);
|
| }
|
| @@ -808,7 +808,7 @@ void MacroAssembler::Div(Register rs, const Operand& rt) {
|
| div(rs, rt.rm());
|
| } else {
|
| // li handles the relocation.
|
| - ASSERT(!rs.is(at));
|
| + DCHECK(!rs.is(at));
|
| li(at, rt);
|
| div(rs, at);
|
| }
|
| @@ -820,7 +820,7 @@ void MacroAssembler::Ddiv(Register rs, const Operand& rt) {
|
| ddiv(rs, rt.rm());
|
| } else {
|
| // li handles the relocation.
|
| - ASSERT(!rs.is(at));
|
| + DCHECK(!rs.is(at));
|
| li(at, rt);
|
| ddiv(rs, at);
|
| }
|
| @@ -834,7 +834,7 @@ void MacroAssembler::Ddiv(Register rd, Register rs, const Operand& rt) {
|
| mflo(rd);
|
| } else {
|
| // li handles the relocation.
|
| - ASSERT(!rs.is(at));
|
| + DCHECK(!rs.is(at));
|
| li(at, rt);
|
| ddiv(rs, at);
|
| mflo(rd);
|
| @@ -844,7 +844,7 @@ void MacroAssembler::Ddiv(Register rd, Register rs, const Operand& rt) {
|
| ddiv(rd, rs, rt.rm());
|
| } else {
|
| // li handles the relocation.
|
| - ASSERT(!rs.is(at));
|
| + DCHECK(!rs.is(at));
|
| li(at, rt);
|
| ddiv(rd, rs, at);
|
| }
|
| @@ -857,7 +857,7 @@ void MacroAssembler::Divu(Register rs, const Operand& rt) {
|
| divu(rs, rt.rm());
|
| } else {
|
| // li handles the relocation.
|
| - ASSERT(!rs.is(at));
|
| + DCHECK(!rs.is(at));
|
| li(at, rt);
|
| divu(rs, at);
|
| }
|
| @@ -869,7 +869,7 @@ void MacroAssembler::Ddivu(Register rs, const Operand& rt) {
|
| ddivu(rs, rt.rm());
|
| } else {
|
| // li handles the relocation.
|
| - ASSERT(!rs.is(at));
|
| + DCHECK(!rs.is(at));
|
| li(at, rt);
|
| ddivu(rs, at);
|
| }
|
| @@ -883,7 +883,7 @@ void MacroAssembler::Dmod(Register rd, Register rs, const Operand& rt) {
|
| mfhi(rd);
|
| } else {
|
| // li handles the relocation.
|
| - ASSERT(!rs.is(at));
|
| + DCHECK(!rs.is(at));
|
| li(at, rt);
|
| ddiv(rs, at);
|
| mfhi(rd);
|
| @@ -893,7 +893,7 @@ void MacroAssembler::Dmod(Register rd, Register rs, const Operand& rt) {
|
| dmod(rd, rs, rt.rm());
|
| } else {
|
| // li handles the relocation.
|
| - ASSERT(!rs.is(at));
|
| + DCHECK(!rs.is(at));
|
| li(at, rt);
|
| dmod(rd, rs, at);
|
| }
|
| @@ -909,7 +909,7 @@ void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
|
| andi(rd, rs, rt.imm64_);
|
| } else {
|
| // li handles the relocation.
|
| - ASSERT(!rs.is(at));
|
| + DCHECK(!rs.is(at));
|
| li(at, rt);
|
| and_(rd, rs, at);
|
| }
|
| @@ -925,7 +925,7 @@ void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
|
| ori(rd, rs, rt.imm64_);
|
| } else {
|
| // li handles the relocation.
|
| - ASSERT(!rs.is(at));
|
| + DCHECK(!rs.is(at));
|
| li(at, rt);
|
| or_(rd, rs, at);
|
| }
|
| @@ -941,7 +941,7 @@ void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
|
| xori(rd, rs, rt.imm64_);
|
| } else {
|
| // li handles the relocation.
|
| - ASSERT(!rs.is(at));
|
| + DCHECK(!rs.is(at));
|
| li(at, rt);
|
| xor_(rd, rs, at);
|
| }
|
| @@ -954,7 +954,7 @@ void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) {
|
| nor(rd, rs, rt.rm());
|
| } else {
|
| // li handles the relocation.
|
| - ASSERT(!rs.is(at));
|
| + DCHECK(!rs.is(at));
|
| li(at, rt);
|
| nor(rd, rs, at);
|
| }
|
| @@ -962,9 +962,9 @@ void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) {
|
|
|
|
|
| void MacroAssembler::Neg(Register rs, const Operand& rt) {
|
| - ASSERT(rt.is_reg());
|
| - ASSERT(!at.is(rs));
|
| - ASSERT(!at.is(rt.rm()));
|
| + DCHECK(rt.is_reg());
|
| + DCHECK(!at.is(rs));
|
| + DCHECK(!at.is(rt.rm()));
|
| li(at, -1);
|
| xor_(rs, rt.rm(), at);
|
| }
|
| @@ -978,7 +978,7 @@ void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
|
| slti(rd, rs, rt.imm64_);
|
| } else {
|
| // li handles the relocation.
|
| - ASSERT(!rs.is(at));
|
| + DCHECK(!rs.is(at));
|
| li(at, rt);
|
| slt(rd, rs, at);
|
| }
|
| @@ -994,7 +994,7 @@ void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
|
| sltiu(rd, rs, rt.imm64_);
|
| } else {
|
| // li handles the relocation.
|
| - ASSERT(!rs.is(at));
|
| + DCHECK(!rs.is(at));
|
| li(at, rt);
|
| sltu(rd, rs, at);
|
| }
|
| @@ -1061,7 +1061,7 @@ void MacroAssembler::Usw(Register rd, const MemOperand& rs) {
|
| void MacroAssembler::Uld(Register rd, const MemOperand& rs, Register scratch) {
|
| // Assert fail if the offset from start of object IS actually aligned.
|
| // ONLY use with known misalignment, since there is performance cost.
|
| - ASSERT((rs.offset() + kHeapObjectTag) & (kPointerSize - 1));
|
| + DCHECK((rs.offset() + kHeapObjectTag) & (kPointerSize - 1));
|
| // TODO(plind): endian dependency.
|
| lwu(rd, rs);
|
| lw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
|
| @@ -1075,7 +1075,7 @@ void MacroAssembler::Uld(Register rd, const MemOperand& rs, Register scratch) {
|
| void MacroAssembler::Usd(Register rd, const MemOperand& rs, Register scratch) {
|
| // Assert fail if the offset from start of object IS actually aligned.
|
| // ONLY use with known misalignment, since there is performance cost.
|
| - ASSERT((rs.offset() + kHeapObjectTag) & (kPointerSize - 1));
|
| + DCHECK((rs.offset() + kHeapObjectTag) & (kPointerSize - 1));
|
| // TODO(plind): endian dependency.
|
| sw(rd, rs);
|
| dsrl32(scratch, rd, 0);
|
| @@ -1088,7 +1088,7 @@ void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) {
|
| if (value->IsSmi()) {
|
| li(dst, Operand(value), mode);
|
| } else {
|
| - ASSERT(value->IsHeapObject());
|
| + DCHECK(value->IsHeapObject());
|
| if (isolate()->heap()->InNewSpace(*value)) {
|
| Handle<Cell> cell = isolate()->factory()->NewCell(value);
|
| li(dst, Operand(cell));
|
| @@ -1101,7 +1101,7 @@ void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) {
|
|
|
|
|
| void MacroAssembler::li(Register rd, Operand j, LiFlags mode) {
|
| - ASSERT(!j.is_reg());
|
| + DCHECK(!j.is_reg());
|
| BlockTrampolinePoolScope block_trampoline_pool(this);
|
| if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) {
|
| // Normal load of an immediate value which does not need Relocation Info.
|
| @@ -1275,8 +1275,8 @@ void MacroAssembler::Ext(Register rt,
|
| Register rs,
|
| uint16_t pos,
|
| uint16_t size) {
|
| - ASSERT(pos < 32);
|
| - ASSERT(pos + size < 33);
|
| + DCHECK(pos < 32);
|
| + DCHECK(pos + size < 33);
|
| ext_(rt, rs, pos, size);
|
| }
|
|
|
| @@ -1285,9 +1285,9 @@ void MacroAssembler::Ins(Register rt,
|
| Register rs,
|
| uint16_t pos,
|
| uint16_t size) {
|
| - ASSERT(pos < 32);
|
| - ASSERT(pos + size <= 32);
|
| - ASSERT(size != 0);
|
| + DCHECK(pos < 32);
|
| + DCHECK(pos + size <= 32);
|
| + DCHECK(size != 0);
|
| ins_(rt, rs, pos, size);
|
| }
|
|
|
| @@ -1308,9 +1308,9 @@ void MacroAssembler::Cvt_d_uw(FPURegister fd,
|
| // We do this by converting rs minus the MSB to avoid sign conversion,
|
| // then adding 2^31 to the result (if needed).
|
|
|
| - ASSERT(!fd.is(scratch));
|
| - ASSERT(!rs.is(t9));
|
| - ASSERT(!rs.is(at));
|
| + DCHECK(!fd.is(scratch));
|
| + DCHECK(!rs.is(t9));
|
| + DCHECK(!rs.is(at));
|
|
|
| // Save rs's MSB to t9.
|
| Ext(t9, rs, 31, 1);
|
| @@ -1404,8 +1404,8 @@ void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) {
|
| void MacroAssembler::Trunc_uw_d(FPURegister fd,
|
| Register rs,
|
| FPURegister scratch) {
|
| - ASSERT(!fd.is(scratch));
|
| - ASSERT(!rs.is(at));
|
| + DCHECK(!fd.is(scratch));
|
| + DCHECK(!rs.is(at));
|
|
|
| // Load 2^31 into scratch as its float representation.
|
| li(at, 0x41E00000);
|
| @@ -1440,7 +1440,7 @@ void MacroAssembler::Madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
|
| madd_d(fd, fr, fs, ft);
|
| } else {
|
| // Can not change source regs's value.
|
| - ASSERT(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
|
| + DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
|
| mul_d(scratch, fs, ft);
|
| add_d(fd, fr, scratch);
|
| }
|
| @@ -1459,7 +1459,7 @@ void MacroAssembler::BranchF(Label* target,
|
| return;
|
| }
|
|
|
| - ASSERT(nan || target);
|
| + DCHECK(nan || target);
|
| // Check for unordered (NaN) cases.
|
| if (nan) {
|
| if (kArchVariant != kMips64r6) {
|
| @@ -1468,7 +1468,7 @@ void MacroAssembler::BranchF(Label* target,
|
| } else {
|
| // Use f31 for comparison result. It has to be unavailable to lithium
|
| // register allocator.
|
| - ASSERT(!cmp1.is(f31) && !cmp2.is(f31));
|
| + DCHECK(!cmp1.is(f31) && !cmp2.is(f31));
|
| cmp(UN, L, f31, cmp1, cmp2);
|
| bc1nez(nan, f31);
|
| }
|
| @@ -1521,7 +1521,7 @@ void MacroAssembler::BranchF(Label* target,
|
| // have been handled by the caller.
|
| // Unsigned conditions are treated as their signed counterpart.
|
| // Use f31 for comparison result, it is valid in fp64 (FR = 1) mode.
|
| - ASSERT(!cmp1.is(f31) && !cmp2.is(f31));
|
| + DCHECK(!cmp1.is(f31) && !cmp2.is(f31));
|
| switch (cc) {
|
| case lt:
|
| cmp(OLT, L, f31, cmp1, cmp2);
|
| @@ -1646,9 +1646,9 @@ void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
|
| DoubleRegister double_scratch,
|
| Register except_flag,
|
| CheckForInexactConversion check_inexact) {
|
| - ASSERT(!result.is(scratch));
|
| - ASSERT(!double_input.is(double_scratch));
|
| - ASSERT(!except_flag.is(scratch));
|
| + DCHECK(!result.is(scratch));
|
| + DCHECK(!double_input.is(double_scratch));
|
| + DCHECK(!except_flag.is(scratch));
|
|
|
| Label done;
|
|
|
| @@ -1752,7 +1752,7 @@ void MacroAssembler::TruncateDoubleToI(Register result,
|
| void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) {
|
| Label done;
|
| DoubleRegister double_scratch = f12;
|
| - ASSERT(!result.is(object));
|
| + DCHECK(!result.is(object));
|
|
|
| ldc1(double_scratch,
|
| MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag));
|
| @@ -1779,7 +1779,7 @@ void MacroAssembler::TruncateNumberToI(Register object,
|
| Register scratch,
|
| Label* not_number) {
|
| Label done;
|
| - ASSERT(!result.is(object));
|
| + DCHECK(!result.is(object));
|
|
|
| UntagAndJumpIfSmi(result, object, &done);
|
| JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number);
|
| @@ -1801,7 +1801,7 @@ void MacroAssembler::GetLeastBitsFromSmi(Register dst,
|
| void MacroAssembler::GetLeastBitsFromInt32(Register dst,
|
| Register src,
|
| int num_least_bits) {
|
| - ASSERT(!src.is(dst));
|
| + DCHECK(!src.is(dst));
|
| And(dst, src, Operand((1 << num_least_bits) - 1));
|
| }
|
|
|
| @@ -1809,7 +1809,7 @@ void MacroAssembler::GetLeastBitsFromInt32(Register dst,
|
| // Emulated condtional branches do not emit a nop in the branch delay slot.
|
| //
|
| // BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
|
| -#define BRANCH_ARGS_CHECK(cond, rs, rt) ASSERT( \
|
| +#define BRANCH_ARGS_CHECK(cond, rs, rt) DCHECK( \
|
| (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \
|
| (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
|
|
|
| @@ -1901,7 +1901,7 @@ void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
|
| const Operand& rt,
|
| BranchDelaySlot bdslot) {
|
| BRANCH_ARGS_CHECK(cond, rs, rt);
|
| - ASSERT(!rs.is(zero_reg));
|
| + DCHECK(!rs.is(zero_reg));
|
| Register r2 = no_reg;
|
| Register scratch = at;
|
|
|
| @@ -2001,14 +2001,14 @@ void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
|
| break;
|
| case eq:
|
| // We don't want any other register but scratch clobbered.
|
| - ASSERT(!scratch.is(rs));
|
| + DCHECK(!scratch.is(rs));
|
| r2 = scratch;
|
| li(r2, rt);
|
| beq(rs, r2, offset);
|
| break;
|
| case ne:
|
| // We don't want any other register but scratch clobbered.
|
| - ASSERT(!scratch.is(rs));
|
| + DCHECK(!scratch.is(rs));
|
| r2 = scratch;
|
| li(r2, rt);
|
| bne(rs, r2, offset);
|
| @@ -2253,14 +2253,14 @@ void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
|
| b(offset);
|
| break;
|
| case eq:
|
| - ASSERT(!scratch.is(rs));
|
| + DCHECK(!scratch.is(rs));
|
| r2 = scratch;
|
| li(r2, rt);
|
| offset = shifted_branch_offset(L, false);
|
| beq(rs, r2, offset);
|
| break;
|
| case ne:
|
| - ASSERT(!scratch.is(rs));
|
| + DCHECK(!scratch.is(rs));
|
| r2 = scratch;
|
| li(r2, rt);
|
| offset = shifted_branch_offset(L, false);
|
| @@ -2272,7 +2272,7 @@ void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
|
| offset = shifted_branch_offset(L, false);
|
| bgtz(rs, offset);
|
| } else {
|
| - ASSERT(!scratch.is(rs));
|
| + DCHECK(!scratch.is(rs));
|
| r2 = scratch;
|
| li(r2, rt);
|
| slt(scratch, r2, rs);
|
| @@ -2289,7 +2289,7 @@ void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
|
| offset = shifted_branch_offset(L, false);
|
| beq(scratch, zero_reg, offset);
|
| } else {
|
| - ASSERT(!scratch.is(rs));
|
| + DCHECK(!scratch.is(rs));
|
| r2 = scratch;
|
| li(r2, rt);
|
| slt(scratch, rs, r2);
|
| @@ -2306,7 +2306,7 @@ void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
|
| offset = shifted_branch_offset(L, false);
|
| bne(scratch, zero_reg, offset);
|
| } else {
|
| - ASSERT(!scratch.is(rs));
|
| + DCHECK(!scratch.is(rs));
|
| r2 = scratch;
|
| li(r2, rt);
|
| slt(scratch, rs, r2);
|
| @@ -2319,7 +2319,7 @@ void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
|
| offset = shifted_branch_offset(L, false);
|
| blez(rs, offset);
|
| } else {
|
| - ASSERT(!scratch.is(rs));
|
| + DCHECK(!scratch.is(rs));
|
| r2 = scratch;
|
| li(r2, rt);
|
| slt(scratch, r2, rs);
|
| @@ -2333,7 +2333,7 @@ void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
|
| offset = shifted_branch_offset(L, false);
|
| bne(rs, zero_reg, offset);
|
| } else {
|
| - ASSERT(!scratch.is(rs));
|
| + DCHECK(!scratch.is(rs));
|
| r2 = scratch;
|
| li(r2, rt);
|
| sltu(scratch, r2, rs);
|
| @@ -2350,7 +2350,7 @@ void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
|
| offset = shifted_branch_offset(L, false);
|
| beq(scratch, zero_reg, offset);
|
| } else {
|
| - ASSERT(!scratch.is(rs));
|
| + DCHECK(!scratch.is(rs));
|
| r2 = scratch;
|
| li(r2, rt);
|
| sltu(scratch, rs, r2);
|
| @@ -2367,7 +2367,7 @@ void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
|
| offset = shifted_branch_offset(L, false);
|
| bne(scratch, zero_reg, offset);
|
| } else {
|
| - ASSERT(!scratch.is(rs));
|
| + DCHECK(!scratch.is(rs));
|
| r2 = scratch;
|
| li(r2, rt);
|
| sltu(scratch, rs, r2);
|
| @@ -2380,7 +2380,7 @@ void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
|
| offset = shifted_branch_offset(L, false);
|
| beq(rs, zero_reg, offset);
|
| } else {
|
| - ASSERT(!scratch.is(rs));
|
| + DCHECK(!scratch.is(rs));
|
| r2 = scratch;
|
| li(r2, rt);
|
| sltu(scratch, r2, rs);
|
| @@ -2393,7 +2393,7 @@ void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
|
| }
|
| }
|
| // Check that offset could actually hold on an int16_t.
|
| - ASSERT(is_int16(offset));
|
| + DCHECK(is_int16(offset));
|
| // Emit a nop in the branch delay slot if required.
|
| if (bdslot == PROTECT)
|
| nop();
|
| @@ -2688,7 +2688,7 @@ void MacroAssembler::BranchAndLinkShort(Label* L, Condition cond, Register rs,
|
| }
|
| }
|
| // Check that offset could actually hold on an int16_t.
|
| - ASSERT(is_int16(offset));
|
| + DCHECK(is_int16(offset));
|
|
|
| // Emit a nop in the branch delay slot if required.
|
| if (bdslot == PROTECT)
|
| @@ -2739,7 +2739,7 @@ void MacroAssembler::Jump(Address target,
|
| Register rs,
|
| const Operand& rt,
|
| BranchDelaySlot bd) {
|
| - ASSERT(!RelocInfo::IsCodeTarget(rmode));
|
| + DCHECK(!RelocInfo::IsCodeTarget(rmode));
|
| Jump(reinterpret_cast<intptr_t>(target), rmode, cond, rs, rt, bd);
|
| }
|
|
|
| @@ -2750,7 +2750,7 @@ void MacroAssembler::Jump(Handle<Code> code,
|
| Register rs,
|
| const Operand& rt,
|
| BranchDelaySlot bd) {
|
| - ASSERT(RelocInfo::IsCodeTarget(rmode));
|
| + DCHECK(RelocInfo::IsCodeTarget(rmode));
|
| AllowDeferredHandleDereference embedding_raw_address;
|
| Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond, rs, rt, bd);
|
| }
|
| @@ -2796,7 +2796,7 @@ void MacroAssembler::Call(Register target,
|
| if (bd == PROTECT)
|
| nop();
|
|
|
| - ASSERT_EQ(CallSize(target, cond, rs, rt, bd),
|
| + DCHECK_EQ(CallSize(target, cond, rs, rt, bd),
|
| SizeOfCodeGeneratedSince(&start));
|
| }
|
|
|
| @@ -2827,7 +2827,7 @@ void MacroAssembler::Call(Address target,
|
| positions_recorder()->WriteRecordedPositions();
|
| li(t9, Operand(target_int, rmode), ADDRESS_LOAD);
|
| Call(t9, cond, rs, rt, bd);
|
| - ASSERT_EQ(CallSize(target, rmode, cond, rs, rt, bd),
|
| + DCHECK_EQ(CallSize(target, rmode, cond, rs, rt, bd),
|
| SizeOfCodeGeneratedSince(&start));
|
| }
|
|
|
| @@ -2855,14 +2855,14 @@ void MacroAssembler::Call(Handle<Code> code,
|
| BlockTrampolinePoolScope block_trampoline_pool(this);
|
| Label start;
|
| bind(&start);
|
| - ASSERT(RelocInfo::IsCodeTarget(rmode));
|
| + DCHECK(RelocInfo::IsCodeTarget(rmode));
|
| if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
|
| SetRecordedAstId(ast_id);
|
| rmode = RelocInfo::CODE_TARGET_WITH_ID;
|
| }
|
| AllowDeferredHandleDereference embedding_raw_address;
|
| Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd);
|
| - ASSERT_EQ(CallSize(code, rmode, ast_id, cond, rs, rt, bd),
|
| + DCHECK_EQ(CallSize(code, rmode, ast_id, cond, rs, rt, bd),
|
| SizeOfCodeGeneratedSince(&start));
|
| }
|
|
|
| @@ -3005,7 +3005,7 @@ void MacroAssembler::Push(Handle<Object> handle) {
|
|
|
|
|
| void MacroAssembler::PushRegisterAsTwoSmis(Register src, Register scratch) {
|
| - ASSERT(!src.is(scratch));
|
| + DCHECK(!src.is(scratch));
|
| mov(scratch, src);
|
| dsrl32(src, src, 0);
|
| dsll32(src, src, 0);
|
| @@ -3016,7 +3016,7 @@ void MacroAssembler::PushRegisterAsTwoSmis(Register src, Register scratch) {
|
|
|
|
|
| void MacroAssembler::PopRegisterAsTwoSmis(Register dst, Register scratch) {
|
| - ASSERT(!dst.is(scratch));
|
| + DCHECK(!dst.is(scratch));
|
| pop(scratch);
|
| dsrl32(scratch, scratch, 0);
|
| pop(dst);
|
| @@ -3030,7 +3030,7 @@ void MacroAssembler::DebugBreak() {
|
| PrepareCEntryArgs(0);
|
| PrepareCEntryFunction(ExternalReference(Runtime::kDebugBreak, isolate()));
|
| CEntryStub ces(isolate(), 1);
|
| - ASSERT(AllowThisStubCall(&ces));
|
| + DCHECK(AllowThisStubCall(&ces));
|
| Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
|
| }
|
|
|
| @@ -3060,7 +3060,7 @@ void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
|
|
|
| // Push the frame pointer, context, state, and code object.
|
| if (kind == StackHandler::JS_ENTRY) {
|
| - ASSERT_EQ(Smi::FromInt(0), 0);
|
| + DCHECK_EQ(Smi::FromInt(0), 0);
|
| // The second zero_reg indicates no context.
|
| // The first zero_reg is the NULL frame pointer.
|
| // The operands are reversed to match the order of MultiPush/Pop.
|
| @@ -3188,7 +3188,7 @@ void MacroAssembler::Allocate(int object_size,
|
| Register scratch2,
|
| Label* gc_required,
|
| AllocationFlags flags) {
|
| - ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
|
| + DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
|
| if (!FLAG_inline_new) {
|
| if (emit_debug_code()) {
|
| // Trash the registers to simulate an allocation failure.
|
| @@ -3200,18 +3200,18 @@ void MacroAssembler::Allocate(int object_size,
|
| return;
|
| }
|
|
|
| - ASSERT(!result.is(scratch1));
|
| - ASSERT(!result.is(scratch2));
|
| - ASSERT(!scratch1.is(scratch2));
|
| - ASSERT(!scratch1.is(t9));
|
| - ASSERT(!scratch2.is(t9));
|
| - ASSERT(!result.is(t9));
|
| + DCHECK(!result.is(scratch1));
|
| + DCHECK(!result.is(scratch2));
|
| + DCHECK(!scratch1.is(scratch2));
|
| + DCHECK(!scratch1.is(t9));
|
| + DCHECK(!scratch2.is(t9));
|
| + DCHECK(!result.is(t9));
|
|
|
| // Make object size into bytes.
|
| if ((flags & SIZE_IN_WORDS) != 0) {
|
| object_size *= kPointerSize;
|
| }
|
| - ASSERT(0 == (object_size & kObjectAlignmentMask));
|
| + DCHECK(0 == (object_size & kObjectAlignmentMask));
|
|
|
| // Check relative positions of allocation top and limit addresses.
|
| // ARM adds additional checks to make sure the ldm instruction can be
|
| @@ -3225,7 +3225,7 @@ void MacroAssembler::Allocate(int object_size,
|
| reinterpret_cast<intptr_t>(allocation_top.address());
|
| intptr_t limit =
|
| reinterpret_cast<intptr_t>(allocation_limit.address());
|
| - ASSERT((limit - top) == kPointerSize);
|
| + DCHECK((limit - top) == kPointerSize);
|
|
|
| // Set up allocation top address and object size registers.
|
| Register topaddr = scratch1;
|
| @@ -3248,7 +3248,7 @@ void MacroAssembler::Allocate(int object_size,
|
| ld(t9, MemOperand(topaddr, limit - top));
|
| }
|
|
|
| - ASSERT(kPointerSize == kDoubleSize);
|
| + DCHECK(kPointerSize == kDoubleSize);
|
| if (emit_debug_code()) {
|
| And(at, result, Operand(kDoubleAlignmentMask));
|
| Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg));
|
| @@ -3284,11 +3284,11 @@ void MacroAssembler::Allocate(Register object_size,
|
| return;
|
| }
|
|
|
| - ASSERT(!result.is(scratch1));
|
| - ASSERT(!result.is(scratch2));
|
| - ASSERT(!scratch1.is(scratch2));
|
| - ASSERT(!object_size.is(t9));
|
| - ASSERT(!scratch1.is(t9) && !scratch2.is(t9) && !result.is(t9));
|
| + DCHECK(!result.is(scratch1));
|
| + DCHECK(!result.is(scratch2));
|
| + DCHECK(!scratch1.is(scratch2));
|
| + DCHECK(!object_size.is(t9));
|
| + DCHECK(!scratch1.is(t9) && !scratch2.is(t9) && !result.is(t9));
|
|
|
| // Check relative positions of allocation top and limit addresses.
|
| // ARM adds additional checks to make sure the ldm instruction can be
|
| @@ -3301,7 +3301,7 @@ void MacroAssembler::Allocate(Register object_size,
|
| reinterpret_cast<intptr_t>(allocation_top.address());
|
| intptr_t limit =
|
| reinterpret_cast<intptr_t>(allocation_limit.address());
|
| - ASSERT((limit - top) == kPointerSize);
|
| + DCHECK((limit - top) == kPointerSize);
|
|
|
| // Set up allocation top address and object size registers.
|
| Register topaddr = scratch1;
|
| @@ -3324,7 +3324,7 @@ void MacroAssembler::Allocate(Register object_size,
|
| ld(t9, MemOperand(topaddr, limit - top));
|
| }
|
|
|
| - ASSERT(kPointerSize == kDoubleSize);
|
| + DCHECK(kPointerSize == kDoubleSize);
|
| if (emit_debug_code()) {
|
| And(at, result, Operand(kDoubleAlignmentMask));
|
| Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg));
|
| @@ -3383,7 +3383,7 @@ void MacroAssembler::AllocateTwoByteString(Register result,
|
| Label* gc_required) {
|
| // Calculate the number of bytes needed for the characters in the string while
|
| // observing object alignment.
|
| - ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
|
| + DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
|
| dsll(scratch1, length, 1); // Length in bytes, not chars.
|
| daddiu(scratch1, scratch1,
|
| kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
|
| @@ -3414,8 +3414,8 @@ void MacroAssembler::AllocateAsciiString(Register result,
|
| Label* gc_required) {
|
| // Calculate the number of bytes needed for the characters in the string
|
| // while observing object alignment.
|
| - ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
|
| - ASSERT(kCharSize == 1);
|
| + DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
|
| + DCHECK(kCharSize == 1);
|
| daddiu(scratch1, length,
|
| kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
|
| And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
|
| @@ -3560,8 +3560,8 @@ void MacroAssembler::CopyFields(Register dst,
|
| Register src,
|
| RegList temps,
|
| int field_count) {
|
| - ASSERT((temps & dst.bit()) == 0);
|
| - ASSERT((temps & src.bit()) == 0);
|
| + DCHECK((temps & dst.bit()) == 0);
|
| + DCHECK((temps & src.bit()) == 0);
|
| // Primitive implementation using only one temporary register.
|
|
|
| Register tmp = no_reg;
|
| @@ -3572,7 +3572,7 @@ void MacroAssembler::CopyFields(Register dst,
|
| break;
|
| }
|
| }
|
| - ASSERT(!tmp.is(no_reg));
|
| + DCHECK(!tmp.is(no_reg));
|
|
|
| for (int i = 0; i < field_count; i++) {
|
| ld(tmp, FieldMemOperand(src, i * kPointerSize));
|
| @@ -3873,7 +3873,7 @@ void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
|
| if (!IsMipsSoftFloatABI) {
|
| const DoubleRegister fparg2 = (kMipsAbi == kN64) ? f13 : f14;
|
| if (src2.is(f12)) {
|
| - ASSERT(!src1.is(fparg2));
|
| + DCHECK(!src1.is(fparg2));
|
| Move(fparg2, src2);
|
| Move(f12, src1);
|
| } else {
|
| @@ -3911,12 +3911,12 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
|
| // The code below is made a lot easier because the calling code already sets
|
| // up actual and expected registers according to the contract if values are
|
| // passed in registers.
|
| - ASSERT(actual.is_immediate() || actual.reg().is(a0));
|
| - ASSERT(expected.is_immediate() || expected.reg().is(a2));
|
| - ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(a3));
|
| + DCHECK(actual.is_immediate() || actual.reg().is(a0));
|
| + DCHECK(expected.is_immediate() || expected.reg().is(a2));
|
| + DCHECK((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(a3));
|
|
|
| if (expected.is_immediate()) {
|
| - ASSERT(actual.is_immediate());
|
| + DCHECK(actual.is_immediate());
|
| if (expected.immediate() == actual.immediate()) {
|
| definitely_matches = true;
|
| } else {
|
| @@ -3969,7 +3969,7 @@ void MacroAssembler::InvokeCode(Register code,
|
| InvokeFlag flag,
|
| const CallWrapper& call_wrapper) {
|
| // You can't call a function without a valid frame.
|
| - ASSERT(flag == JUMP_FUNCTION || has_frame());
|
| + DCHECK(flag == JUMP_FUNCTION || has_frame());
|
|
|
| Label done;
|
|
|
| @@ -3983,7 +3983,7 @@ void MacroAssembler::InvokeCode(Register code,
|
| Call(code);
|
| call_wrapper.AfterCall();
|
| } else {
|
| - ASSERT(flag == JUMP_FUNCTION);
|
| + DCHECK(flag == JUMP_FUNCTION);
|
| Jump(code);
|
| }
|
| // Continue here if InvokePrologue does handle the invocation due to
|
| @@ -3998,10 +3998,10 @@ void MacroAssembler::InvokeFunction(Register function,
|
| InvokeFlag flag,
|
| const CallWrapper& call_wrapper) {
|
| // You can't call a function without a valid frame.
|
| - ASSERT(flag == JUMP_FUNCTION || has_frame());
|
| + DCHECK(flag == JUMP_FUNCTION || has_frame());
|
|
|
| // Contract with called JS functions requires that function is passed in a1.
|
| - ASSERT(function.is(a1));
|
| + DCHECK(function.is(a1));
|
| Register expected_reg = a2;
|
| Register code_reg = a3;
|
| ld(code_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
|
| @@ -4023,10 +4023,10 @@ void MacroAssembler::InvokeFunction(Register function,
|
| InvokeFlag flag,
|
| const CallWrapper& call_wrapper) {
|
| // You can't call a function without a valid frame.
|
| - ASSERT(flag == JUMP_FUNCTION || has_frame());
|
| + DCHECK(flag == JUMP_FUNCTION || has_frame());
|
|
|
| // Contract with called JS functions requires that function is passed in a1.
|
| - ASSERT(function.is(a1));
|
| + DCHECK(function.is(a1));
|
|
|
| // Get the function and setup the context.
|
| ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
|
| @@ -4070,7 +4070,7 @@ void MacroAssembler::IsInstanceJSObjectType(Register map,
|
| void MacroAssembler::IsObjectJSStringType(Register object,
|
| Register scratch,
|
| Label* fail) {
|
| - ASSERT(kNotStringTag != 0);
|
| + DCHECK(kNotStringTag != 0);
|
|
|
| ld(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
|
| lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
|
| @@ -4169,7 +4169,7 @@ void MacroAssembler::CallStub(CodeStub* stub,
|
| Register r1,
|
| const Operand& r2,
|
| BranchDelaySlot bd) {
|
| - ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
|
| + DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
|
| Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id,
|
| cond, r1, r2, bd);
|
| }
|
| @@ -4186,7 +4186,7 @@ void MacroAssembler::TailCallStub(CodeStub* stub,
|
|
|
| static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
|
| int64_t offset = (ref0.address() - ref1.address());
|
| - ASSERT(static_cast<int>(offset) == offset);
|
| + DCHECK(static_cast<int>(offset) == offset);
|
| return static_cast<int>(offset);
|
| }
|
|
|
| @@ -4207,7 +4207,7 @@ void MacroAssembler::CallApiFunctionAndReturn(
|
| ExternalReference::handle_scope_level_address(isolate()),
|
| next_address);
|
|
|
| - ASSERT(function_address.is(a1) || function_address.is(a2));
|
| + DCHECK(function_address.is(a1) || function_address.is(a2));
|
|
|
| Label profiler_disabled;
|
| Label end_profiler_check;
|
| @@ -4325,7 +4325,7 @@ void MacroAssembler::IndexFromHash(Register hash, Register index) {
|
| // that the constants for the maximum number of digits for an array index
|
| // cached in the hash field and the number of bits reserved for it does not
|
| // conflict.
|
| - ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
|
| + DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
|
| (1 << String::kArrayIndexValueBits));
|
| DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash);
|
| }
|
| @@ -4384,18 +4384,18 @@ void MacroAssembler::AdduAndCheckForOverflow(Register dst,
|
| Register right,
|
| Register overflow_dst,
|
| Register scratch) {
|
| - ASSERT(!dst.is(overflow_dst));
|
| - ASSERT(!dst.is(scratch));
|
| - ASSERT(!overflow_dst.is(scratch));
|
| - ASSERT(!overflow_dst.is(left));
|
| - ASSERT(!overflow_dst.is(right));
|
| + DCHECK(!dst.is(overflow_dst));
|
| + DCHECK(!dst.is(scratch));
|
| + DCHECK(!overflow_dst.is(scratch));
|
| + DCHECK(!overflow_dst.is(left));
|
| + DCHECK(!overflow_dst.is(right));
|
|
|
| if (left.is(right) && dst.is(left)) {
|
| - ASSERT(!dst.is(t9));
|
| - ASSERT(!scratch.is(t9));
|
| - ASSERT(!left.is(t9));
|
| - ASSERT(!right.is(t9));
|
| - ASSERT(!overflow_dst.is(t9));
|
| + DCHECK(!dst.is(t9));
|
| + DCHECK(!scratch.is(t9));
|
| + DCHECK(!left.is(t9));
|
| + DCHECK(!right.is(t9));
|
| + DCHECK(!overflow_dst.is(t9));
|
| mov(t9, right);
|
| right = t9;
|
| }
|
| @@ -4426,13 +4426,13 @@ void MacroAssembler::SubuAndCheckForOverflow(Register dst,
|
| Register right,
|
| Register overflow_dst,
|
| Register scratch) {
|
| - ASSERT(!dst.is(overflow_dst));
|
| - ASSERT(!dst.is(scratch));
|
| - ASSERT(!overflow_dst.is(scratch));
|
| - ASSERT(!overflow_dst.is(left));
|
| - ASSERT(!overflow_dst.is(right));
|
| - ASSERT(!scratch.is(left));
|
| - ASSERT(!scratch.is(right));
|
| + DCHECK(!dst.is(overflow_dst));
|
| + DCHECK(!dst.is(scratch));
|
| + DCHECK(!overflow_dst.is(scratch));
|
| + DCHECK(!overflow_dst.is(left));
|
| + DCHECK(!overflow_dst.is(right));
|
| + DCHECK(!scratch.is(left));
|
| + DCHECK(!scratch.is(right));
|
|
|
| // This happens with some crankshaft code. Since Subu works fine if
|
| // left == right, let's not make that restriction here.
|
| @@ -4533,7 +4533,7 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
|
| InvokeFlag flag,
|
| const CallWrapper& call_wrapper) {
|
| // You can't call a builtin without a valid frame.
|
| - ASSERT(flag == JUMP_FUNCTION || has_frame());
|
| + DCHECK(flag == JUMP_FUNCTION || has_frame());
|
|
|
| GetBuiltinEntry(t9, id);
|
| if (flag == CALL_FUNCTION) {
|
| @@ -4541,7 +4541,7 @@ void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
|
| Call(t9);
|
| call_wrapper.AfterCall();
|
| } else {
|
| - ASSERT(flag == JUMP_FUNCTION);
|
| + DCHECK(flag == JUMP_FUNCTION);
|
| Jump(t9);
|
| }
|
| }
|
| @@ -4559,7 +4559,7 @@ void MacroAssembler::GetBuiltinFunction(Register target,
|
|
|
|
|
| void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
|
| - ASSERT(!target.is(a1));
|
| + DCHECK(!target.is(a1));
|
| GetBuiltinFunction(a1, id);
|
| // Load the code entry point from the builtins object.
|
| ld(target, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
|
| @@ -4578,7 +4578,7 @@ void MacroAssembler::SetCounter(StatsCounter* counter, int value,
|
|
|
| void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
|
| Register scratch1, Register scratch2) {
|
| - ASSERT(value > 0);
|
| + DCHECK(value > 0);
|
| if (FLAG_native_code_counters && counter->Enabled()) {
|
| li(scratch2, Operand(ExternalReference(counter)));
|
| ld(scratch1, MemOperand(scratch2));
|
| @@ -4590,7 +4590,7 @@ void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
|
|
|
| void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
|
| Register scratch1, Register scratch2) {
|
| - ASSERT(value > 0);
|
| + DCHECK(value > 0);
|
| if (FLAG_native_code_counters && counter->Enabled()) {
|
| li(scratch2, Operand(ExternalReference(counter)));
|
| ld(scratch1, MemOperand(scratch2));
|
| @@ -4612,7 +4612,7 @@ void MacroAssembler::Assert(Condition cc, BailoutReason reason,
|
|
|
| void MacroAssembler::AssertFastElements(Register elements) {
|
| if (emit_debug_code()) {
|
| - ASSERT(!elements.is(at));
|
| + DCHECK(!elements.is(at));
|
| Label ok;
|
| push(elements);
|
| ld(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
|
| @@ -4675,7 +4675,7 @@ void MacroAssembler::Abort(BailoutReason reason) {
|
| // generated instructions is 10, so we use this as a maximum value.
|
| static const int kExpectedAbortInstructions = 10;
|
| int abort_instructions = InstructionsGeneratedSince(&abort_start);
|
| - ASSERT(abort_instructions <= kExpectedAbortInstructions);
|
| + DCHECK(abort_instructions <= kExpectedAbortInstructions);
|
| while (abort_instructions++ < kExpectedAbortInstructions) {
|
| nop();
|
| }
|
| @@ -4866,10 +4866,10 @@ void MacroAssembler::EnterExitFrame(bool save_doubles,
|
| // Reserve place for the return address, stack space and an optional slot
|
| // (used by the DirectCEntryStub to hold the return value if a struct is
|
| // returned) and align the frame preparing for calling the runtime function.
|
| - ASSERT(stack_space >= 0);
|
| + DCHECK(stack_space >= 0);
|
| Dsubu(sp, sp, Operand((stack_space + 2) * kPointerSize));
|
| if (frame_alignment > 0) {
|
| - ASSERT(IsPowerOf2(frame_alignment));
|
| + DCHECK(IsPowerOf2(frame_alignment));
|
| And(sp, sp, Operand(-frame_alignment)); // Align stack.
|
| }
|
|
|
| @@ -4967,7 +4967,7 @@ void MacroAssembler::AssertStackIsAligned() {
|
|
|
| if (frame_alignment > kPointerSize) {
|
| Label alignment_as_expected;
|
| - ASSERT(IsPowerOf2(frame_alignment));
|
| + DCHECK(IsPowerOf2(frame_alignment));
|
| andi(at, sp, frame_alignment_mask);
|
| Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
|
| // Don't use Check here, as it will call Runtime_Abort re-entering here.
|
| @@ -4991,7 +4991,7 @@ void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
|
|
|
|
|
| void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) {
|
| - ASSERT(!reg.is(overflow));
|
| + DCHECK(!reg.is(overflow));
|
| mov(overflow, reg); // Save original value.
|
| SmiTag(reg);
|
| xor_(overflow, overflow, reg); // Overflow if (value ^ 2 * value) < 0.
|
| @@ -5005,9 +5005,9 @@ void MacroAssembler::SmiTagCheckOverflow(Register dst,
|
| // Fall back to slower case.
|
| SmiTagCheckOverflow(dst, overflow);
|
| } else {
|
| - ASSERT(!dst.is(src));
|
| - ASSERT(!dst.is(overflow));
|
| - ASSERT(!src.is(overflow));
|
| + DCHECK(!dst.is(src));
|
| + DCHECK(!dst.is(overflow));
|
| + DCHECK(!src.is(overflow));
|
| SmiTag(dst, src);
|
| xor_(overflow, dst, src); // Overflow if (value ^ 2 * value) < 0.
|
| }
|
| @@ -5031,7 +5031,7 @@ void MacroAssembler::SmiLoadScale(Register dst, MemOperand src, int scale) {
|
| dsll(dst, dst, scale);
|
| } else {
|
| lw(dst, src);
|
| - ASSERT(scale >= kSmiTagSize);
|
| + DCHECK(scale >= kSmiTagSize);
|
| sll(dst, dst, scale - kSmiTagSize);
|
| }
|
| }
|
| @@ -5047,7 +5047,7 @@ void MacroAssembler::SmiLoadWithScale(Register d_smi,
|
| dsra(d_scaled, d_smi, kSmiShift - scale);
|
| } else {
|
| lw(d_smi, src);
|
| - ASSERT(scale >= kSmiTagSize);
|
| + DCHECK(scale >= kSmiTagSize);
|
| sll(d_scaled, d_smi, scale - kSmiTagSize);
|
| }
|
| }
|
| @@ -5073,7 +5073,7 @@ void MacroAssembler::SmiLoadUntagWithScale(Register d_int,
|
| void MacroAssembler::UntagAndJumpIfSmi(Register dst,
|
| Register src,
|
| Label* smi_case) {
|
| - // ASSERT(!dst.is(src));
|
| + // DCHECK(!dst.is(src));
|
| JumpIfSmi(src, smi_case, at, USE_DELAY_SLOT);
|
| SmiUntag(dst, src);
|
| }
|
| @@ -5082,7 +5082,7 @@ void MacroAssembler::UntagAndJumpIfSmi(Register dst,
|
| void MacroAssembler::UntagAndJumpIfNotSmi(Register dst,
|
| Register src,
|
| Label* non_smi_case) {
|
| - // ASSERT(!dst.is(src));
|
| + // DCHECK(!dst.is(src));
|
| JumpIfNotSmi(src, non_smi_case, at, USE_DELAY_SLOT);
|
| SmiUntag(dst, src);
|
| }
|
| @@ -5091,7 +5091,7 @@ void MacroAssembler::JumpIfSmi(Register value,
|
| Label* smi_label,
|
| Register scratch,
|
| BranchDelaySlot bd) {
|
| - ASSERT_EQ(0, kSmiTag);
|
| + DCHECK_EQ(0, kSmiTag);
|
| andi(scratch, value, kSmiTagMask);
|
| Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
|
| }
|
| @@ -5100,7 +5100,7 @@ void MacroAssembler::JumpIfNotSmi(Register value,
|
| Label* not_smi_label,
|
| Register scratch,
|
| BranchDelaySlot bd) {
|
| - ASSERT_EQ(0, kSmiTag);
|
| + DCHECK_EQ(0, kSmiTag);
|
| andi(scratch, value, kSmiTagMask);
|
| Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg));
|
| }
|
| @@ -5112,9 +5112,9 @@ void MacroAssembler::JumpIfNotBothSmi(Register reg1,
|
| STATIC_ASSERT(kSmiTag == 0);
|
| // TODO(plind): Find some better to fix this assert issue.
|
| #if defined(__APPLE__)
|
| - ASSERT_EQ(1, kSmiTagMask);
|
| + DCHECK_EQ(1, kSmiTagMask);
|
| #else
|
| - ASSERT_EQ((uint64_t)1, kSmiTagMask);
|
| + DCHECK_EQ((uint64_t)1, kSmiTagMask);
|
| #endif
|
| or_(at, reg1, reg2);
|
| JumpIfNotSmi(at, on_not_both_smi);
|
| @@ -5127,9 +5127,9 @@ void MacroAssembler::JumpIfEitherSmi(Register reg1,
|
| STATIC_ASSERT(kSmiTag == 0);
|
| // TODO(plind): Find some better to fix this assert issue.
|
| #if defined(__APPLE__)
|
| - ASSERT_EQ(1, kSmiTagMask);
|
| + DCHECK_EQ(1, kSmiTagMask);
|
| #else
|
| - ASSERT_EQ((uint64_t)1, kSmiTagMask);
|
| + DCHECK_EQ((uint64_t)1, kSmiTagMask);
|
| #endif
|
| // Both Smi tags must be 1 (not Smi).
|
| and_(at, reg1, reg2);
|
| @@ -5202,7 +5202,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
|
|
|
| void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
|
| if (emit_debug_code()) {
|
| - ASSERT(!reg.is(at));
|
| + DCHECK(!reg.is(at));
|
| LoadRoot(at, index);
|
| Check(eq, kHeapNumberMapRegisterClobbered, reg, Operand(at));
|
| }
|
| @@ -5349,7 +5349,7 @@ void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
|
| kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
|
| const int kFlatAsciiStringTag =
|
| kStringTag | kOneByteStringTag | kSeqStringTag;
|
| - ASSERT(kFlatAsciiStringTag <= 0xffff); // Ensure this fits 16-bit immed.
|
| + DCHECK(kFlatAsciiStringTag <= 0xffff); // Ensure this fits 16-bit immed.
|
| andi(scratch1, first, kFlatAsciiStringMask);
|
| Branch(failure, ne, scratch1, Operand(kFlatAsciiStringTag));
|
| andi(scratch2, second, kFlatAsciiStringMask);
|
| @@ -5407,7 +5407,7 @@ void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
|
| ld(at, FieldMemOperand(string, String::kLengthOffset));
|
| Check(lt, kIndexIsTooLarge, index, Operand(at));
|
|
|
| - ASSERT(Smi::FromInt(0) == 0);
|
| + DCHECK(Smi::FromInt(0) == 0);
|
| Check(ge, kIndexIsNegative, index, Operand(zero_reg));
|
| }
|
|
|
| @@ -5431,7 +5431,7 @@ void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
|
| // and the original value of sp.
|
| mov(scratch, sp);
|
| Dsubu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
|
| - ASSERT(IsPowerOf2(frame_alignment));
|
| + DCHECK(IsPowerOf2(frame_alignment));
|
| And(sp, sp, Operand(-frame_alignment));
|
| sd(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
|
| } else {
|
| @@ -5476,7 +5476,7 @@ void MacroAssembler::CallCFunction(Register function,
|
| void MacroAssembler::CallCFunctionHelper(Register function,
|
| int num_reg_arguments,
|
| int num_double_arguments) {
|
| - ASSERT(has_frame());
|
| + DCHECK(has_frame());
|
| // Make sure that the stack is aligned before calling a C function unless
|
| // running in the simulator. The simulator has its own alignment check which
|
| // provides more information.
|
| @@ -5488,7 +5488,7 @@ void MacroAssembler::CallCFunctionHelper(Register function,
|
| int frame_alignment = base::OS::ActivationFrameAlignment();
|
| int frame_alignment_mask = frame_alignment - 1;
|
| if (frame_alignment > kPointerSize) {
|
| - ASSERT(IsPowerOf2(frame_alignment));
|
| + DCHECK(IsPowerOf2(frame_alignment));
|
| Label alignment_as_expected;
|
| And(at, sp, Operand(frame_alignment_mask));
|
| Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
|
| @@ -5643,7 +5643,7 @@ void MacroAssembler::JumpIfBlack(Register object,
|
| Register scratch1,
|
| Label* on_black) {
|
| HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
|
| - ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
|
| + DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
|
| }
|
|
|
|
|
| @@ -5653,8 +5653,8 @@ void MacroAssembler::HasColor(Register object,
|
| Label* has_color,
|
| int first_bit,
|
| int second_bit) {
|
| - ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, t8));
|
| - ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, t9));
|
| + DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, t8));
|
| + DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, t9));
|
|
|
| GetMarkBits(object, bitmap_scratch, mask_scratch);
|
|
|
| @@ -5678,13 +5678,13 @@ void MacroAssembler::HasColor(Register object,
|
| void MacroAssembler::JumpIfDataObject(Register value,
|
| Register scratch,
|
| Label* not_data_object) {
|
| - ASSERT(!AreAliased(value, scratch, t8, no_reg));
|
| + DCHECK(!AreAliased(value, scratch, t8, no_reg));
|
| Label is_data_object;
|
| ld(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
|
| LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
|
| Branch(&is_data_object, eq, t8, Operand(scratch));
|
| - ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
|
| - ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
|
| + DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
|
| + DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
|
| // If it's a string and it's not a cons string then it's an object containing
|
| // no GC pointers.
|
| lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
|
| @@ -5697,7 +5697,7 @@ void MacroAssembler::JumpIfDataObject(Register value,
|
| void MacroAssembler::GetMarkBits(Register addr_reg,
|
| Register bitmap_reg,
|
| Register mask_reg) {
|
| - ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
|
| + DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
|
| // addr_reg is divided into fields:
|
| // |63 page base 20|19 high 8|7 shift 3|2 0|
|
| // 'high' gives the index of the cell holding color bits for the object.
|
| @@ -5719,14 +5719,14 @@ void MacroAssembler::EnsureNotWhite(
|
| Register mask_scratch,
|
| Register load_scratch,
|
| Label* value_is_white_and_not_data) {
|
| - ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, t8));
|
| + DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, t8));
|
| GetMarkBits(value, bitmap_scratch, mask_scratch);
|
|
|
| // If the value is black or grey we don't need to do anything.
|
| - ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
|
| - ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
|
| - ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
|
| - ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
|
| + DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
|
| + DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
|
| + DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
|
| + DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
|
|
|
| Label done;
|
|
|
| @@ -5766,8 +5766,8 @@ void MacroAssembler::EnsureNotWhite(
|
| }
|
|
|
| // Check for strings.
|
| - ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
|
| - ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
|
| + DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
|
| + DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
|
| // If it's a string and it's not a cons string then it's an object containing
|
| // no GC pointers.
|
| Register instance_type = load_scratch;
|
| @@ -5779,8 +5779,8 @@ void MacroAssembler::EnsureNotWhite(
|
| // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
|
| // External strings are the only ones with the kExternalStringTag bit
|
| // set.
|
| - ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
|
| - ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
|
| + DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
|
| + DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
|
| And(t8, instance_type, Operand(kExternalStringTag));
|
| {
|
| Label skip;
|
| @@ -5794,8 +5794,8 @@ void MacroAssembler::EnsureNotWhite(
|
| // For ASCII (char-size of 1) we shift the smi tag away to get the length.
|
| // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
|
| // getting the length multiplied by 2.
|
| - ASSERT(kOneByteStringTag == 4 && kStringEncodingMask == 4);
|
| - ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
|
| + DCHECK(kOneByteStringTag == 4 && kStringEncodingMask == 4);
|
| + DCHECK(kSmiTag == 0 && kSmiTagSize == 1);
|
| lw(t9, UntagSmiFieldMemOperand(value, String::kLengthOffset));
|
| And(t8, instance_type, Operand(kStringEncodingMask));
|
| {
|
| @@ -5806,7 +5806,7 @@ void MacroAssembler::EnsureNotWhite(
|
| bind(&skip);
|
| }
|
| Daddu(length, t9, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
|
| - ASSERT(!length.is(t8));
|
| + DCHECK(!length.is(t8));
|
| And(length, length, Operand(~kObjectAlignmentMask));
|
|
|
| bind(&is_data_object);
|
| @@ -5887,7 +5887,7 @@ void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
|
|
|
|
|
| void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
|
| - ASSERT(!output_reg.is(input_reg));
|
| + DCHECK(!output_reg.is(input_reg));
|
| Label done;
|
| li(output_reg, Operand(255));
|
| // Normal branch: nop in delay slot.
|
| @@ -5982,7 +5982,7 @@ void MacroAssembler::JumpIfDictionaryInPrototypeChain(
|
| Register scratch0,
|
| Register scratch1,
|
| Label* found) {
|
| - ASSERT(!scratch1.is(scratch0));
|
| + DCHECK(!scratch1.is(scratch0));
|
| Factory* factory = isolate()->factory();
|
| Register current = scratch0;
|
| Label loop_again;
|
| @@ -6038,7 +6038,7 @@ CodePatcher::CodePatcher(byte* address,
|
| // Create a new macro assembler pointing to the address of the code to patch.
|
| // The size is adjusted with kGap on order for the assembler to generate size
|
| // bytes of instructions without failing with buffer size constraints.
|
| - ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
|
| + DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
|
| }
|
|
|
|
|
| @@ -6048,8 +6048,8 @@ CodePatcher::~CodePatcher() {
|
| CpuFeatures::FlushICache(address_, size_);
|
| }
|
| // Check that the code was patched as expected.
|
| - ASSERT(masm_.pc_ == address_ + size_);
|
| - ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
|
| + DCHECK(masm_.pc_ == address_ + size_);
|
| + DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
|
| }
|
|
|
|
|
| @@ -6065,13 +6065,13 @@ void CodePatcher::Emit(Address addr) {
|
|
|
| void CodePatcher::ChangeBranchCondition(Condition cond) {
|
| Instr instr = Assembler::instr_at(masm_.pc_);
|
| - ASSERT(Assembler::IsBranch(instr));
|
| + DCHECK(Assembler::IsBranch(instr));
|
| uint32_t opcode = Assembler::GetOpcodeField(instr);
|
| // Currently only the 'eq' and 'ne' cond values are supported and the simple
|
| // branch instructions (with opcode being the branch type).
|
| // There are some special cases (see Assembler::IsBranch()) so extending this
|
| // would be tricky.
|
| - ASSERT(opcode == BEQ ||
|
| + DCHECK(opcode == BEQ ||
|
| opcode == BNE ||
|
| opcode == BLEZ ||
|
| opcode == BGTZ ||
|
| @@ -6088,9 +6088,9 @@ void CodePatcher::ChangeBranchCondition(Condition cond) {
|
| void MacroAssembler::TruncatingDiv(Register result,
|
| Register dividend,
|
| int32_t divisor) {
|
| - ASSERT(!dividend.is(result));
|
| - ASSERT(!dividend.is(at));
|
| - ASSERT(!result.is(at));
|
| + DCHECK(!dividend.is(result));
|
| + DCHECK(!dividend.is(at));
|
| + DCHECK(!result.is(at));
|
| MultiplierAndShift ms(divisor);
|
| li(at, Operand(ms.multiplier()));
|
| Mulh(result, dividend, Operand(at));
|
|
|