| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 3422 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3433 | 3433 |
| 3434 bind(&maybe_nan); | 3434 bind(&maybe_nan); |
| 3435 // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise | 3435 // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise |
| 3436 // it's an Infinity, and the non-NaN code path applies. | 3436 // it's an Infinity, and the non-NaN code path applies. |
| 3437 Branch(&is_nan, gt, exponent_reg, Operand(scratch1)); | 3437 Branch(&is_nan, gt, exponent_reg, Operand(scratch1)); |
| 3438 lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset)); | 3438 lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset)); |
| 3439 Branch(&have_double_value, eq, mantissa_reg, Operand(zero_reg)); | 3439 Branch(&have_double_value, eq, mantissa_reg, Operand(zero_reg)); |
| 3440 bind(&is_nan); | 3440 bind(&is_nan); |
| 3441 // Load canonical NaN for storing into the double array. | 3441 // Load canonical NaN for storing into the double array. |
| 3442 LoadRoot(at, Heap::kNanValueRootIndex); | 3442 LoadRoot(at, Heap::kNanValueRootIndex); |
| 3443 lw(mantissa_reg, FieldMemOperand(at, HeapNumber::kMantissaOffset)); | 3443 lw(mantissa_reg, FieldMemOperand(at, HeapNumber::kValueOffset)); |
| 3444 lw(exponent_reg, FieldMemOperand(at, HeapNumber::kExponentOffset)); | 3444 lw(exponent_reg, FieldMemOperand(at, HeapNumber::kValueOffset + 4)); |
| 3445 jmp(&have_double_value); | 3445 jmp(&have_double_value); |
| 3446 | 3446 |
| 3447 bind(&smi_value); | 3447 bind(&smi_value); |
| 3448 Addu(scratch1, elements_reg, | 3448 Addu(scratch1, elements_reg, |
| 3449 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag - | 3449 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag - |
| 3450 elements_offset)); | 3450 elements_offset)); |
| 3451 sll(scratch2, key_reg, kDoubleSizeLog2 - kSmiTagSize); | 3451 sll(scratch2, key_reg, kDoubleSizeLog2 - kSmiTagSize); |
| 3452 Addu(scratch1, scratch1, scratch2); | 3452 Addu(scratch1, scratch1, scratch2); |
| 3453 // scratch1 is now effective address of the double element | 3453 // scratch1 is now effective address of the double element |
| 3454 | 3454 |
| (...skipping 884 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4339 Branch(&L, cc, rs, rt); | 4339 Branch(&L, cc, rs, rt); |
| 4340 Abort(reason); | 4340 Abort(reason); |
| 4341 // Will not return here. | 4341 // Will not return here. |
| 4342 bind(&L); | 4342 bind(&L); |
| 4343 } | 4343 } |
| 4344 | 4344 |
| 4345 | 4345 |
| 4346 void MacroAssembler::Abort(BailoutReason reason) { | 4346 void MacroAssembler::Abort(BailoutReason reason) { |
| 4347 Label abort_start; | 4347 Label abort_start; |
| 4348 bind(&abort_start); | 4348 bind(&abort_start); |
| 4349 // We want to pass the msg string like a smi to avoid GC |
| 4350 // problems, however msg is not guaranteed to be aligned |
| 4351 // properly. Instead, we pass an aligned pointer that is |
| 4352 // a proper v8 smi, but also pass the alignment difference |
| 4353 // from the real pointer as a smi. |
| 4354 const char* msg = GetBailoutReason(reason); |
| 4355 intptr_t p1 = reinterpret_cast<intptr_t>(msg); |
| 4356 intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag; |
| 4357 ASSERT(reinterpret_cast<Object*>(p0)->IsSmi()); |
| 4349 #ifdef DEBUG | 4358 #ifdef DEBUG |
| 4350 const char* msg = GetBailoutReason(reason); | |
| 4351 if (msg != NULL) { | 4359 if (msg != NULL) { |
| 4352 RecordComment("Abort message: "); | 4360 RecordComment("Abort message: "); |
| 4353 RecordComment(msg); | 4361 RecordComment(msg); |
| 4354 } | 4362 } |
| 4355 | 4363 |
| 4356 if (FLAG_trap_on_abort) { | 4364 if (FLAG_trap_on_abort) { |
| 4357 stop(msg); | 4365 stop(msg); |
| 4358 return; | 4366 return; |
| 4359 } | 4367 } |
| 4360 #endif | 4368 #endif |
| 4361 | 4369 |
| 4362 li(a0, Operand(Smi::FromInt(reason))); | 4370 li(a0, Operand(p0)); |
| 4371 push(a0); |
| 4372 li(a0, Operand(Smi::FromInt(p1 - p0))); |
| 4363 push(a0); | 4373 push(a0); |
| 4364 // Disable stub call restrictions to always allow calls to abort. | 4374 // Disable stub call restrictions to always allow calls to abort. |
| 4365 if (!has_frame_) { | 4375 if (!has_frame_) { |
| 4366 // We don't actually want to generate a pile of code for this, so just | 4376 // We don't actually want to generate a pile of code for this, so just |
| 4367 // claim there is a stack frame, without generating one. | 4377 // claim there is a stack frame, without generating one. |
| 4368 FrameScope scope(this, StackFrame::NONE); | 4378 FrameScope scope(this, StackFrame::NONE); |
| 4369 CallRuntime(Runtime::kAbort, 1); | 4379 CallRuntime(Runtime::kAbort, 2); |
| 4370 } else { | 4380 } else { |
| 4371 CallRuntime(Runtime::kAbort, 1); | 4381 CallRuntime(Runtime::kAbort, 2); |
| 4372 } | 4382 } |
| 4373 // Will not return here. | 4383 // Will not return here. |
| 4374 if (is_trampoline_pool_blocked()) { | 4384 if (is_trampoline_pool_blocked()) { |
| 4375 // If the calling code cares about the exact number of | 4385 // If the calling code cares about the exact number of |
| 4376 // instructions generated, we insert padding here to keep the size | 4386 // instructions generated, we insert padding here to keep the size |
| 4377 // of the Abort macro constant. | 4387 // of the Abort macro constant. |
| 4378 // Currently in debug mode with debug_code enabled the number of | 4388 // Currently in debug mode with debug_code enabled the number of |
| 4379 // generated instructions is 10, so we use this as a maximum value. | 4389 // generated instructions is 14, so we use this as a maximum value. |
| 4380 static const int kExpectedAbortInstructions = 10; | 4390 static const int kExpectedAbortInstructions = 14; |
| 4381 int abort_instructions = InstructionsGeneratedSince(&abort_start); | 4391 int abort_instructions = InstructionsGeneratedSince(&abort_start); |
| 4382 ASSERT(abort_instructions <= kExpectedAbortInstructions); | 4392 ASSERT(abort_instructions <= kExpectedAbortInstructions); |
| 4383 while (abort_instructions++ < kExpectedAbortInstructions) { | 4393 while (abort_instructions++ < kExpectedAbortInstructions) { |
| 4384 nop(); | 4394 nop(); |
| 4385 } | 4395 } |
| 4386 } | 4396 } |
| 4387 } | 4397 } |
| 4388 | 4398 |
| 4389 | 4399 |
| 4390 void MacroAssembler::LoadContext(Register dst, int context_chain_length) { | 4400 void MacroAssembler::LoadContext(Register dst, int context_chain_length) { |
| (...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4423 lw(at, FieldMemOperand(scratch, offset)); | 4433 lw(at, FieldMemOperand(scratch, offset)); |
| 4424 Branch(no_map_match, ne, map_in_out, Operand(at)); | 4434 Branch(no_map_match, ne, map_in_out, Operand(at)); |
| 4425 | 4435 |
| 4426 // Use the transitioned cached map. | 4436 // Use the transitioned cached map. |
| 4427 offset = transitioned_kind * kPointerSize + | 4437 offset = transitioned_kind * kPointerSize + |
| 4428 FixedArrayBase::kHeaderSize; | 4438 FixedArrayBase::kHeaderSize; |
| 4429 lw(map_in_out, FieldMemOperand(scratch, offset)); | 4439 lw(map_in_out, FieldMemOperand(scratch, offset)); |
| 4430 } | 4440 } |
| 4431 | 4441 |
| 4432 | 4442 |
| 4443 void MacroAssembler::LoadInitialArrayMap( |
| 4444 Register function_in, Register scratch, |
| 4445 Register map_out, bool can_have_holes) { |
| 4446 ASSERT(!function_in.is(map_out)); |
| 4447 Label done; |
| 4448 lw(map_out, FieldMemOperand(function_in, |
| 4449 JSFunction::kPrototypeOrInitialMapOffset)); |
| 4450 if (!FLAG_smi_only_arrays) { |
| 4451 ElementsKind kind = can_have_holes ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS; |
| 4452 LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, |
| 4453 kind, |
| 4454 map_out, |
| 4455 scratch, |
| 4456 &done); |
| 4457 } else if (can_have_holes) { |
| 4458 LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, |
| 4459 FAST_HOLEY_SMI_ELEMENTS, |
| 4460 map_out, |
| 4461 scratch, |
| 4462 &done); |
| 4463 } |
| 4464 bind(&done); |
| 4465 } |
| 4466 |
| 4467 |
| 4433 void MacroAssembler::LoadGlobalFunction(int index, Register function) { | 4468 void MacroAssembler::LoadGlobalFunction(int index, Register function) { |
| 4434 // Load the global or builtins object from the current context. | 4469 // Load the global or builtins object from the current context. |
| 4435 lw(function, | 4470 lw(function, |
| 4436 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); | 4471 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); |
| 4437 // Load the native context from the global or builtins object. | 4472 // Load the native context from the global or builtins object. |
| 4438 lw(function, FieldMemOperand(function, | 4473 lw(function, FieldMemOperand(function, |
| 4439 GlobalObject::kNativeContextOffset)); | 4474 GlobalObject::kNativeContextOffset)); |
| 4440 // Load the function from the native context. | 4475 // Load the function from the native context. |
| 4441 lw(function, MemOperand(function, Context::SlotOffset(index))); | 4476 lw(function, MemOperand(function, Context::SlotOffset(index))); |
| 4442 } | 4477 } |
| (...skipping 1272 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5715 opcode == BGTZL); | 5750 opcode == BGTZL); |
| 5716 opcode = (cond == eq) ? BEQ : BNE; | 5751 opcode = (cond == eq) ? BEQ : BNE; |
| 5717 instr = (instr & ~kOpcodeMask) | opcode; | 5752 instr = (instr & ~kOpcodeMask) | opcode; |
| 5718 masm_.emit(instr); | 5753 masm_.emit(instr); |
| 5719 } | 5754 } |
| 5720 | 5755 |
| 5721 | 5756 |
| 5722 } } // namespace v8::internal | 5757 } } // namespace v8::internal |
| 5723 | 5758 |
| 5724 #endif // V8_TARGET_ARCH_MIPS | 5759 #endif // V8_TARGET_ARCH_MIPS |
| OLD | NEW |