Index: src/mips/assembler-mips.h |
diff --git a/src/mips/assembler-mips.h b/src/mips/assembler-mips.h |
index 5a6e271556955c247d9bb6dd4cefc940a19b1deb..1a49148b8892cb75e5150d8a94f38668f8cb63dd 100644 |
--- a/src/mips/assembler-mips.h |
+++ b/src/mips/assembler-mips.h |
@@ -30,7 +30,7 @@ |
// The original source code covered by the above license above has been |
// modified significantly by Google Inc. |
-// Copyright 2010 the V8 project authors. All rights reserved. |
+// Copyright 2011 the V8 project authors. All rights reserved. |
#ifndef V8_MIPS_ASSEMBLER_MIPS_H_ |
@@ -67,12 +67,13 @@ namespace internal { |
// ----------------------------------------------------------------------------- |
-// Implementation of Register and FPURegister |
+// Implementation of Register and FPURegister. |
// Core register. |
struct Register { |
static const int kNumRegisters = v8::internal::kNumRegisters; |
- static const int kNumAllocatableRegisters = 14; // v0 through t7 |
+ static const int kNumAllocatableRegisters = 14; // v0 through t7. |
+ static const int kSizeInBytes = 4; |
static int ToAllocationIndex(Register reg) { |
return reg.code() - 2; // zero_reg and 'at' are skipped. |
@@ -267,9 +268,6 @@ const FPURegister f31 = { 31 }; |
// FPU (coprocessor 1) control registers. |
// Currently only FCSR (#31) is implemented. |
struct FPUControlRegister { |
- static const int kFCSRRegister = 31; |
- static const int kInvalidFPUControlRegister = -1; |
- |
bool is_valid() const { return code_ == kFCSRRegister; } |
bool is(FPUControlRegister creg) const { return code_ == creg.code_; } |
int code() const { |
@@ -288,7 +286,7 @@ struct FPUControlRegister { |
int code_; |
}; |
-const FPUControlRegister no_fpucreg = { -1 }; |
+const FPUControlRegister no_fpucreg = { kInvalidFPUControlRegister }; |
const FPUControlRegister FCSR = { kFCSRRegister }; |
@@ -318,7 +316,7 @@ class Operand BASE_EMBEDDED { |
private: |
Register rm_; |
- int32_t imm32_; // Valid if rm_ == no_reg |
+ int32_t imm32_; // Valid if rm_ == no_reg. |
RelocInfo::Mode rmode_; |
friend class Assembler; |
@@ -342,58 +340,98 @@ class MemOperand : public Operand { |
// CpuFeatures keeps track of which features are supported by the target CPU. |
// Supported features must be enabled by a Scope before use. |
-class CpuFeatures { |
+class CpuFeatures : public AllStatic { |
public: |
// Detect features of the target CPU. Set safe defaults if the serializer |
// is enabled (snapshots must be portable). |
- void Probe(bool portable); |
+ static void Probe(); |
// Check whether a feature is supported by the target CPU. |
- bool IsSupported(CpuFeature f) const { |
+ static bool IsSupported(CpuFeature f) { |
+ ASSERT(initialized_); |
if (f == FPU && !FLAG_enable_fpu) return false; |
return (supported_ & (1u << f)) != 0; |
} |
+ |
+#ifdef DEBUG |
// Check whether a feature is currently enabled. |
- bool IsEnabled(CpuFeature f) const { |
- return (enabled_ & (1u << f)) != 0; |
+ static bool IsEnabled(CpuFeature f) { |
+ ASSERT(initialized_); |
+ Isolate* isolate = Isolate::UncheckedCurrent(); |
+ if (isolate == NULL) { |
+ // When no isolate is available, work as if we're running in |
+ // release mode. |
+ return IsSupported(f); |
+ } |
+ unsigned enabled = static_cast<unsigned>(isolate->enabled_cpu_features()); |
+ return (enabled & (1u << f)) != 0; |
} |
+#endif |
// Enable a specified feature within a scope. |
class Scope BASE_EMBEDDED { |
#ifdef DEBUG |
public: |
- explicit Scope(CpuFeature f) |
- : cpu_features_(Isolate::Current()->cpu_features()), |
- isolate_(Isolate::Current()) { |
- ASSERT(cpu_features_->IsSupported(f)); |
+ explicit Scope(CpuFeature f) { |
+ unsigned mask = 1u << f; |
+ ASSERT(CpuFeatures::IsSupported(f)); |
ASSERT(!Serializer::enabled() || |
- (cpu_features_->found_by_runtime_probing_ & (1u << f)) == 0); |
- old_enabled_ = cpu_features_->enabled_; |
- cpu_features_->enabled_ |= 1u << f; |
+ (CpuFeatures::found_by_runtime_probing_ & mask) == 0); |
+ isolate_ = Isolate::UncheckedCurrent(); |
+ old_enabled_ = 0; |
+ if (isolate_ != NULL) { |
+ old_enabled_ = static_cast<unsigned>(isolate_->enabled_cpu_features()); |
+ isolate_->set_enabled_cpu_features(old_enabled_ | mask); |
+ } |
} |
~Scope() { |
- ASSERT_EQ(Isolate::Current(), isolate_); |
- cpu_features_->enabled_ = old_enabled_; |
- } |
+ ASSERT_EQ(Isolate::UncheckedCurrent(), isolate_); |
+ if (isolate_ != NULL) { |
+ isolate_->set_enabled_cpu_features(old_enabled_); |
+ } |
+ } |
private: |
- unsigned old_enabled_; |
- CpuFeatures* cpu_features_; |
Isolate* isolate_; |
+ unsigned old_enabled_; |
#else |
public: |
explicit Scope(CpuFeature f) {} |
#endif |
}; |
- private: |
- CpuFeatures(); |
+ class TryForceFeatureScope BASE_EMBEDDED { |
+ public: |
+ explicit TryForceFeatureScope(CpuFeature f) |
+ : old_supported_(CpuFeatures::supported_) { |
+ if (CanForce()) { |
+ CpuFeatures::supported_ |= (1u << f); |
+ } |
+ } |
- unsigned supported_; |
- unsigned enabled_; |
- unsigned found_by_runtime_probing_; |
+ ~TryForceFeatureScope() { |
+ if (CanForce()) { |
+ CpuFeatures::supported_ = old_supported_; |
+ } |
+ } |
- friend class Isolate; |
+ private: |
+ static bool CanForce() { |
+ // It's only safe to temporarily force support of CPU features |
+ // when there's only a single isolate, which is guaranteed when |
+ // the serializer is enabled. |
+ return Serializer::enabled(); |
+ } |
+ |
+ const unsigned old_supported_; |
+ }; |
+ |
+ private: |
+#ifdef DEBUG |
+ static bool initialized_; |
+#endif |
+ static unsigned supported_; |
+ static unsigned found_by_runtime_probing_; |
DISALLOW_COPY_AND_ASSIGN(CpuFeatures); |
}; |
@@ -414,7 +452,7 @@ class Assembler : public AssemblerBase { |
// for code generation and assumes its size to be buffer_size. If the buffer |
// is too small, a fatal error occurs. No deallocation of the buffer is done |
// upon destruction of the assembler. |
- Assembler(void* buffer, int buffer_size); |
+ Assembler(Isolate* isolate, void* buffer, int buffer_size); |
~Assembler(); |
// Overrides the default provided by FLAG_debug_code. |
@@ -439,10 +477,10 @@ class Assembler : public AssemblerBase { |
// |
// Note: The same Label can be used for forward and backward branches |
// but it may be bound only once. |
- void bind(Label* L); // binds an unbound label L to the current code position |
+ void bind(Label* L); // Binds an unbound label L to current code position. |
- // Returns the branch offset to the given label from the current code position |
- // Links the label to the current position if it is still unbound |
+ // Returns the branch offset to the given label from the current code |
+ // position. Links the label to the current position if it is still unbound. |
// Manages the jump elimination optimization if the second parameter is true. |
int32_t branch_offset(Label* L, bool jump_elimination_allowed); |
int32_t shifted_branch_offset(Label* L, bool jump_elimination_allowed) { |
@@ -541,14 +579,14 @@ class Assembler : public AssemblerBase { |
FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED |
}; |
- // type == 0 is the default non-marking type. |
+ // Type == 0 is the default non-marking type. |
void nop(unsigned int type = 0) { |
ASSERT(type < 32); |
sll(zero_reg, zero_reg, type, true); |
} |
- //------- Branch and jump instructions -------- |
+ // --------Branch-and-jump-instructions---------- |
// We don't use likely variant of instructions. |
void b(int16_t offset); |
void b(Label* L) { b(branch_offset(L, false)>>2); } |
@@ -571,7 +609,7 @@ class Assembler : public AssemblerBase { |
} |
// Never use the int16_t b(l)cond version with a branch offset |
- // instead of using the Label* version. See Twiki for infos. |
+ // instead of using the Label* version. |
// Jump targets must be in the current 256 MB-aligned region. ie 28 bits. |
void j(int32_t target); |
@@ -761,6 +799,10 @@ class Assembler : public AssemblerBase { |
// Mark address of a debug break slot. |
void RecordDebugBreakSlot(); |
+ // Record the AST id of the CallIC being compiled, so that it can be placed |
+ // in the relocation information. |
+ void RecordAstId(unsigned ast_id) { ast_id_for_reloc_info_ = ast_id; } |
+ |
// Record a comment relocation entry that can be used by a disassembler. |
// Use --code-comments to enable. |
void RecordComment(const char* msg); |
@@ -804,6 +846,8 @@ class Assembler : public AssemblerBase { |
// Check if an instruction is a branch of some kind. |
static bool IsBranch(Instr instr); |
+ static bool IsBeq(Instr instr); |
+ static bool IsBne(Instr instr); |
static bool IsNop(Instr instr, unsigned int type); |
static bool IsPop(Instr instr); |
@@ -813,7 +857,21 @@ class Assembler : public AssemblerBase { |
static bool IsLwRegFpNegOffset(Instr instr); |
static bool IsSwRegFpNegOffset(Instr instr); |
- static Register GetRt(Instr instr); |
+ static Register GetRtReg(Instr instr); |
+ static Register GetRsReg(Instr instr); |
+ static Register GetRdReg(Instr instr); |
+ |
+ static uint32_t GetRt(Instr instr); |
+ static uint32_t GetRtField(Instr instr); |
+ static uint32_t GetRs(Instr instr); |
+ static uint32_t GetRsField(Instr instr); |
+ static uint32_t GetRd(Instr instr); |
+ static uint32_t GetRdField(Instr instr); |
+ static uint32_t GetSa(Instr instr); |
+ static uint32_t GetSaField(Instr instr); |
+ static uint32_t GetOpcodeField(Instr instr); |
+ static uint32_t GetImmediate16(Instr instr); |
+ static uint32_t GetLabelConst(Instr instr); |
static int32_t GetBranchOffset(Instr instr); |
static bool IsLw(Instr instr); |
@@ -825,9 +883,16 @@ class Assembler : public AssemblerBase { |
static bool IsAddImmediate(Instr instr); |
static Instr SetAddImmediateOffset(Instr instr, int16_t offset); |
+ static bool IsAndImmediate(Instr instr); |
+ |
void CheckTrampolinePool(bool force_emit = false); |
protected: |
+ // Relocation for a type-recording IC has the AST id added to it. This |
+ // member variable is a way to pass the information from the call site to |
+ // the relocation info. |
+ unsigned ast_id_for_reloc_info_; |
+ |
bool emit_debug_code() const { return emit_debug_code_; } |
int32_t buffer_space() const { return reloc_info_writer.pos() - pc_; } |
@@ -861,6 +926,10 @@ class Assembler : public AssemblerBase { |
return trampoline_pool_blocked_nesting_ > 0; |
} |
+ bool has_exception() const { |
+ return internal_trampoline_exception_; |
+ } |
+ |
private: |
// Code buffer: |
// The buffer into which code and relocation info are generated. |
@@ -1005,10 +1074,18 @@ class Assembler : public AssemblerBase { |
return end_; |
} |
int take_slot() { |
- int trampoline_slot = next_slot_; |
- ASSERT(free_slot_count_ > 0); |
- free_slot_count_--; |
- next_slot_ += 2 * kInstrSize; |
+ int trampoline_slot = kInvalidSlotPos; |
+ if (free_slot_count_ <= 0) { |
+ // We have run out of space on trampolines. |
+ // Make sure we fail in debug mode, so we become aware of each case |
+ // when this happens. |
+ ASSERT(0); |
+ // Internal exception will be caught. |
+ } else { |
+ trampoline_slot = next_slot_; |
+ free_slot_count_--; |
+ next_slot_ += 2*kInstrSize; |
+ } |
return trampoline_slot; |
} |
int take_label() { |
@@ -1038,8 +1115,10 @@ class Assembler : public AssemblerBase { |
static const int kMaxBranchOffset = (1 << (18 - 1)) - 1; |
static const int kMaxDistBetweenPools = |
kMaxBranchOffset - 2 * kTrampolineSize; |
+ static const int kInvalidSlotPos = -1; |
List<Trampoline> trampolines_; |
+ bool internal_trampoline_exception_; |
friend class RegExpMacroAssemblerMIPS; |
friend class RelocInfo; |