| OLD | NEW |
| 1 // Copyright (c) 1994-2006 Sun Microsystems Inc. | 1 // Copyright (c) 1994-2006 Sun Microsystems Inc. |
| 2 // All Rights Reserved. | 2 // All Rights Reserved. |
| 3 // | 3 // |
| 4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
| 5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
| 6 // met: | 6 // met: |
| 7 // | 7 // |
| 8 // - Redistributions of source code must retain the above copyright notice, | 8 // - Redistributions of source code must retain the above copyright notice, |
| 9 // this list of conditions and the following disclaimer. | 9 // this list of conditions and the following disclaimer. |
| 10 // | 10 // |
| (...skipping 12 matching lines...) Expand all Loading... |
| 23 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, | 23 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
| 24 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, | 24 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
| 25 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR | 25 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
| 26 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF | 26 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF |
| 27 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING | 27 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING |
| 28 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS | 28 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS |
| 29 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 29 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 30 | 30 |
| 31 // The original source code covered by the above license above has been | 31 // The original source code covered by the above license above has been |
| 32 // modified significantly by Google Inc. | 32 // modified significantly by Google Inc. |
| 33 // Copyright 2010 the V8 project authors. All rights reserved. | 33 // Copyright 2011 the V8 project authors. All rights reserved. |
| 34 | 34 |
| 35 | 35 |
| 36 #ifndef V8_MIPS_ASSEMBLER_MIPS_H_ | 36 #ifndef V8_MIPS_ASSEMBLER_MIPS_H_ |
| 37 #define V8_MIPS_ASSEMBLER_MIPS_H_ | 37 #define V8_MIPS_ASSEMBLER_MIPS_H_ |
| 38 | 38 |
| 39 #include <stdio.h> | 39 #include <stdio.h> |
| 40 #include "assembler.h" | 40 #include "assembler.h" |
| 41 #include "constants-mips.h" | 41 #include "constants-mips.h" |
| 42 #include "serialize.h" | 42 #include "serialize.h" |
| 43 | 43 |
| (...skipping 16 matching lines...) Expand all Loading... |
| 60 // 3) By not using an enum, we are possibly preventing the compiler from | 60 // 3) By not using an enum, we are possibly preventing the compiler from |
| 61 // doing certain constant folds, which may significantly reduce the | 61 // doing certain constant folds, which may significantly reduce the |
| 62 // code generated for some assembly instructions (because they boil down | 62 // code generated for some assembly instructions (because they boil down |
| 63 // to a few constants). If this is a problem, we could change the code | 63 // to a few constants). If this is a problem, we could change the code |
| 64 // such that we use an enum in optimized mode, and the struct in debug | 64 // such that we use an enum in optimized mode, and the struct in debug |
| 65 // mode. This way we get the compile-time error checking in debug mode | 65 // mode. This way we get the compile-time error checking in debug mode |
| 66 // and best performance in optimized code. | 66 // and best performance in optimized code. |
| 67 | 67 |
| 68 | 68 |
| 69 // ----------------------------------------------------------------------------- | 69 // ----------------------------------------------------------------------------- |
| 70 // Implementation of Register and FPURegister | 70 // Implementation of Register and FPURegister. |
| 71 | 71 |
| 72 // Core register. | 72 // Core register. |
| 73 struct Register { | 73 struct Register { |
| 74 static const int kNumRegisters = v8::internal::kNumRegisters; | 74 static const int kNumRegisters = v8::internal::kNumRegisters; |
| 75 static const int kNumAllocatableRegisters = 14; // v0 through t7 | 75 static const int kNumAllocatableRegisters = 14; // v0 through t7. |
| 76 static const int kSizeInBytes = 4; |
| 76 | 77 |
| 77 static int ToAllocationIndex(Register reg) { | 78 static int ToAllocationIndex(Register reg) { |
| 78 return reg.code() - 2; // zero_reg and 'at' are skipped. | 79 return reg.code() - 2; // zero_reg and 'at' are skipped. |
| 79 } | 80 } |
| 80 | 81 |
| 81 static Register FromAllocationIndex(int index) { | 82 static Register FromAllocationIndex(int index) { |
| 82 ASSERT(index >= 0 && index < kNumAllocatableRegisters); | 83 ASSERT(index >= 0 && index < kNumAllocatableRegisters); |
| 83 return from_code(index + 2); // zero_reg and 'at' are skipped. | 84 return from_code(index + 2); // zero_reg and 'at' are skipped. |
| 84 } | 85 } |
| 85 | 86 |
| (...skipping 174 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 260 const FPURegister f26 = { 26 }; | 261 const FPURegister f26 = { 26 }; |
| 261 const FPURegister f27 = { 27 }; | 262 const FPURegister f27 = { 27 }; |
| 262 const FPURegister f28 = { 28 }; | 263 const FPURegister f28 = { 28 }; |
| 263 const FPURegister f29 = { 29 }; | 264 const FPURegister f29 = { 29 }; |
| 264 const FPURegister f30 = { 30 }; | 265 const FPURegister f30 = { 30 }; |
| 265 const FPURegister f31 = { 31 }; | 266 const FPURegister f31 = { 31 }; |
| 266 | 267 |
| 267 // FPU (coprocessor 1) control registers. | 268 // FPU (coprocessor 1) control registers. |
| 268 // Currently only FCSR (#31) is implemented. | 269 // Currently only FCSR (#31) is implemented. |
| 269 struct FPUControlRegister { | 270 struct FPUControlRegister { |
| 270 static const int kFCSRRegister = 31; | |
| 271 static const int kInvalidFPUControlRegister = -1; | |
| 272 | |
| 273 bool is_valid() const { return code_ == kFCSRRegister; } | 271 bool is_valid() const { return code_ == kFCSRRegister; } |
| 274 bool is(FPUControlRegister creg) const { return code_ == creg.code_; } | 272 bool is(FPUControlRegister creg) const { return code_ == creg.code_; } |
| 275 int code() const { | 273 int code() const { |
| 276 ASSERT(is_valid()); | 274 ASSERT(is_valid()); |
| 277 return code_; | 275 return code_; |
| 278 } | 276 } |
| 279 int bit() const { | 277 int bit() const { |
| 280 ASSERT(is_valid()); | 278 ASSERT(is_valid()); |
| 281 return 1 << code_; | 279 return 1 << code_; |
| 282 } | 280 } |
| 283 void setcode(int f) { | 281 void setcode(int f) { |
| 284 code_ = f; | 282 code_ = f; |
| 285 ASSERT(is_valid()); | 283 ASSERT(is_valid()); |
| 286 } | 284 } |
| 287 // Unfortunately we can't make this private in a struct. | 285 // Unfortunately we can't make this private in a struct. |
| 288 int code_; | 286 int code_; |
| 289 }; | 287 }; |
| 290 | 288 |
| 291 const FPUControlRegister no_fpucreg = { -1 }; | 289 const FPUControlRegister no_fpucreg = { kInvalidFPUControlRegister }; |
| 292 const FPUControlRegister FCSR = { kFCSRRegister }; | 290 const FPUControlRegister FCSR = { kFCSRRegister }; |
| 293 | 291 |
| 294 | 292 |
| 295 // ----------------------------------------------------------------------------- | 293 // ----------------------------------------------------------------------------- |
| 296 // Machine instruction Operands. | 294 // Machine instruction Operands. |
| 297 | 295 |
| 298 // Class Operand represents a shifter operand in data processing instructions. | 296 // Class Operand represents a shifter operand in data processing instructions. |
| 299 class Operand BASE_EMBEDDED { | 297 class Operand BASE_EMBEDDED { |
| 300 public: | 298 public: |
| 301 // Immediate. | 299 // Immediate. |
| 302 INLINE(explicit Operand(int32_t immediate, | 300 INLINE(explicit Operand(int32_t immediate, |
| 303 RelocInfo::Mode rmode = RelocInfo::NONE)); | 301 RelocInfo::Mode rmode = RelocInfo::NONE)); |
| 304 INLINE(explicit Operand(const ExternalReference& f)); | 302 INLINE(explicit Operand(const ExternalReference& f)); |
| 305 INLINE(explicit Operand(const char* s)); | 303 INLINE(explicit Operand(const char* s)); |
| 306 INLINE(explicit Operand(Object** opp)); | 304 INLINE(explicit Operand(Object** opp)); |
| 307 INLINE(explicit Operand(Context** cpp)); | 305 INLINE(explicit Operand(Context** cpp)); |
| 308 explicit Operand(Handle<Object> handle); | 306 explicit Operand(Handle<Object> handle); |
| 309 INLINE(explicit Operand(Smi* value)); | 307 INLINE(explicit Operand(Smi* value)); |
| 310 | 308 |
| 311 // Register. | 309 // Register. |
| 312 INLINE(explicit Operand(Register rm)); | 310 INLINE(explicit Operand(Register rm)); |
| 313 | 311 |
| 314 // Return true if this is a register operand. | 312 // Return true if this is a register operand. |
| 315 INLINE(bool is_reg() const); | 313 INLINE(bool is_reg() const); |
| 316 | 314 |
| 317 Register rm() const { return rm_; } | 315 Register rm() const { return rm_; } |
| 318 | 316 |
| 319 private: | 317 private: |
| 320 Register rm_; | 318 Register rm_; |
| 321 int32_t imm32_; // Valid if rm_ == no_reg | 319 int32_t imm32_; // Valid if rm_ == no_reg. |
| 322 RelocInfo::Mode rmode_; | 320 RelocInfo::Mode rmode_; |
| 323 | 321 |
| 324 friend class Assembler; | 322 friend class Assembler; |
| 325 friend class MacroAssembler; | 323 friend class MacroAssembler; |
| 326 }; | 324 }; |
| 327 | 325 |
| 328 | 326 |
| 329 // On MIPS we have only one adressing mode with base_reg + offset. | 327 // On MIPS we have only one adressing mode with base_reg + offset. |
| 330 // Class MemOperand represents a memory operand in load and store instructions. | 328 // Class MemOperand represents a memory operand in load and store instructions. |
| 331 class MemOperand : public Operand { | 329 class MemOperand : public Operand { |
| 332 public: | 330 public: |
| 333 | 331 |
| 334 explicit MemOperand(Register rn, int32_t offset = 0); | 332 explicit MemOperand(Register rn, int32_t offset = 0); |
| 335 | 333 |
| 336 private: | 334 private: |
| 337 int32_t offset_; | 335 int32_t offset_; |
| 338 | 336 |
| 339 friend class Assembler; | 337 friend class Assembler; |
| 340 }; | 338 }; |
| 341 | 339 |
| 342 | 340 |
| 343 // CpuFeatures keeps track of which features are supported by the target CPU. | 341 // CpuFeatures keeps track of which features are supported by the target CPU. |
| 344 // Supported features must be enabled by a Scope before use. | 342 // Supported features must be enabled by a Scope before use. |
| 345 class CpuFeatures { | 343 class CpuFeatures : public AllStatic { |
| 346 public: | 344 public: |
| 347 // Detect features of the target CPU. Set safe defaults if the serializer | 345 // Detect features of the target CPU. Set safe defaults if the serializer |
| 348 // is enabled (snapshots must be portable). | 346 // is enabled (snapshots must be portable). |
| 349 void Probe(bool portable); | 347 static void Probe(); |
| 350 | 348 |
| 351 // Check whether a feature is supported by the target CPU. | 349 // Check whether a feature is supported by the target CPU. |
| 352 bool IsSupported(CpuFeature f) const { | 350 static bool IsSupported(CpuFeature f) { |
| 351 ASSERT(initialized_); |
| 353 if (f == FPU && !FLAG_enable_fpu) return false; | 352 if (f == FPU && !FLAG_enable_fpu) return false; |
| 354 return (supported_ & (1u << f)) != 0; | 353 return (supported_ & (1u << f)) != 0; |
| 355 } | 354 } |
| 356 | 355 |
| 356 |
| 357 #ifdef DEBUG |
| 357 // Check whether a feature is currently enabled. | 358 // Check whether a feature is currently enabled. |
| 358 bool IsEnabled(CpuFeature f) const { | 359 static bool IsEnabled(CpuFeature f) { |
| 359 return (enabled_ & (1u << f)) != 0; | 360 ASSERT(initialized_); |
| 361 Isolate* isolate = Isolate::UncheckedCurrent(); |
| 362 if (isolate == NULL) { |
| 363 // When no isolate is available, work as if we're running in |
| 364 // release mode. |
| 365 return IsSupported(f); |
| 366 } |
| 367 unsigned enabled = static_cast<unsigned>(isolate->enabled_cpu_features()); |
| 368 return (enabled & (1u << f)) != 0; |
| 360 } | 369 } |
| 370 #endif |
| 361 | 371 |
| 362 // Enable a specified feature within a scope. | 372 // Enable a specified feature within a scope. |
| 363 class Scope BASE_EMBEDDED { | 373 class Scope BASE_EMBEDDED { |
| 364 #ifdef DEBUG | 374 #ifdef DEBUG |
| 365 public: | 375 public: |
| 366 explicit Scope(CpuFeature f) | 376 explicit Scope(CpuFeature f) { |
| 367 : cpu_features_(Isolate::Current()->cpu_features()), | 377 unsigned mask = 1u << f; |
| 368 isolate_(Isolate::Current()) { | 378 ASSERT(CpuFeatures::IsSupported(f)); |
| 369 ASSERT(cpu_features_->IsSupported(f)); | |
| 370 ASSERT(!Serializer::enabled() || | 379 ASSERT(!Serializer::enabled() || |
| 371 (cpu_features_->found_by_runtime_probing_ & (1u << f)) == 0); | 380 (CpuFeatures::found_by_runtime_probing_ & mask) == 0); |
| 372 old_enabled_ = cpu_features_->enabled_; | 381 isolate_ = Isolate::UncheckedCurrent(); |
| 373 cpu_features_->enabled_ |= 1u << f; | 382 old_enabled_ = 0; |
| 383 if (isolate_ != NULL) { |
| 384 old_enabled_ = static_cast<unsigned>(isolate_->enabled_cpu_features()); |
| 385 isolate_->set_enabled_cpu_features(old_enabled_ | mask); |
| 386 } |
| 374 } | 387 } |
| 375 ~Scope() { | 388 ~Scope() { |
| 376 ASSERT_EQ(Isolate::Current(), isolate_); | 389 ASSERT_EQ(Isolate::UncheckedCurrent(), isolate_); |
| 377 cpu_features_->enabled_ = old_enabled_; | 390 if (isolate_ != NULL) { |
| 378 } | 391 isolate_->set_enabled_cpu_features(old_enabled_); |
| 392 } |
| 393 } |
| 379 private: | 394 private: |
| 395 Isolate* isolate_; |
| 380 unsigned old_enabled_; | 396 unsigned old_enabled_; |
| 381 CpuFeatures* cpu_features_; | |
| 382 Isolate* isolate_; | |
| 383 #else | 397 #else |
| 384 public: | 398 public: |
| 385 explicit Scope(CpuFeature f) {} | 399 explicit Scope(CpuFeature f) {} |
| 386 #endif | 400 #endif |
| 387 }; | 401 }; |
| 388 | 402 |
| 403 class TryForceFeatureScope BASE_EMBEDDED { |
| 404 public: |
| 405 explicit TryForceFeatureScope(CpuFeature f) |
| 406 : old_supported_(CpuFeatures::supported_) { |
| 407 if (CanForce()) { |
| 408 CpuFeatures::supported_ |= (1u << f); |
| 409 } |
| 410 } |
| 411 |
| 412 ~TryForceFeatureScope() { |
| 413 if (CanForce()) { |
| 414 CpuFeatures::supported_ = old_supported_; |
| 415 } |
| 416 } |
| 417 |
| 418 private: |
| 419 static bool CanForce() { |
| 420 // It's only safe to temporarily force support of CPU features |
| 421 // when there's only a single isolate, which is guaranteed when |
| 422 // the serializer is enabled. |
| 423 return Serializer::enabled(); |
| 424 } |
| 425 |
| 426 const unsigned old_supported_; |
| 427 }; |
| 428 |
| 389 private: | 429 private: |
| 390 CpuFeatures(); | 430 #ifdef DEBUG |
| 391 | 431 static bool initialized_; |
| 392 unsigned supported_; | 432 #endif |
| 393 unsigned enabled_; | 433 static unsigned supported_; |
| 394 unsigned found_by_runtime_probing_; | 434 static unsigned found_by_runtime_probing_; |
| 395 | |
| 396 friend class Isolate; | |
| 397 | 435 |
| 398 DISALLOW_COPY_AND_ASSIGN(CpuFeatures); | 436 DISALLOW_COPY_AND_ASSIGN(CpuFeatures); |
| 399 }; | 437 }; |
| 400 | 438 |
| 401 | 439 |
| 402 class Assembler : public AssemblerBase { | 440 class Assembler : public AssemblerBase { |
| 403 public: | 441 public: |
| 404 // Create an assembler. Instructions and relocation information are emitted | 442 // Create an assembler. Instructions and relocation information are emitted |
| 405 // into a buffer, with the instructions starting from the beginning and the | 443 // into a buffer, with the instructions starting from the beginning and the |
| 406 // relocation information starting from the end of the buffer. See CodeDesc | 444 // relocation information starting from the end of the buffer. See CodeDesc |
| 407 // for a detailed comment on the layout (globals.h). | 445 // for a detailed comment on the layout (globals.h). |
| 408 // | 446 // |
| 409 // If the provided buffer is NULL, the assembler allocates and grows its own | 447 // If the provided buffer is NULL, the assembler allocates and grows its own |
| 410 // buffer, and buffer_size determines the initial buffer size. The buffer is | 448 // buffer, and buffer_size determines the initial buffer size. The buffer is |
| 411 // owned by the assembler and deallocated upon destruction of the assembler. | 449 // owned by the assembler and deallocated upon destruction of the assembler. |
| 412 // | 450 // |
| 413 // If the provided buffer is not NULL, the assembler uses the provided buffer | 451 // If the provided buffer is not NULL, the assembler uses the provided buffer |
| 414 // for code generation and assumes its size to be buffer_size. If the buffer | 452 // for code generation and assumes its size to be buffer_size. If the buffer |
| 415 // is too small, a fatal error occurs. No deallocation of the buffer is done | 453 // is too small, a fatal error occurs. No deallocation of the buffer is done |
| 416 // upon destruction of the assembler. | 454 // upon destruction of the assembler. |
| 417 Assembler(void* buffer, int buffer_size); | 455 Assembler(Isolate* isolate, void* buffer, int buffer_size); |
| 418 ~Assembler(); | 456 ~Assembler(); |
| 419 | 457 |
| 420 // Overrides the default provided by FLAG_debug_code. | 458 // Overrides the default provided by FLAG_debug_code. |
| 421 void set_emit_debug_code(bool value) { emit_debug_code_ = value; } | 459 void set_emit_debug_code(bool value) { emit_debug_code_ = value; } |
| 422 | 460 |
| 423 // GetCode emits any pending (non-emitted) code and fills the descriptor | 461 // GetCode emits any pending (non-emitted) code and fills the descriptor |
| 424 // desc. GetCode() is idempotent; it returns the same result if no other | 462 // desc. GetCode() is idempotent; it returns the same result if no other |
| 425 // Assembler functions are invoked in between GetCode() calls. | 463 // Assembler functions are invoked in between GetCode() calls. |
| 426 void GetCode(CodeDesc* desc); | 464 void GetCode(CodeDesc* desc); |
| 427 | 465 |
| 428 // Label operations & relative jumps (PPUM Appendix D). | 466 // Label operations & relative jumps (PPUM Appendix D). |
| 429 // | 467 // |
| 430 // Takes a branch opcode (cc) and a label (L) and generates | 468 // Takes a branch opcode (cc) and a label (L) and generates |
| 431 // either a backward branch or a forward branch and links it | 469 // either a backward branch or a forward branch and links it |
| 432 // to the label fixup chain. Usage: | 470 // to the label fixup chain. Usage: |
| 433 // | 471 // |
| 434 // Label L; // unbound label | 472 // Label L; // unbound label |
| 435 // j(cc, &L); // forward branch to unbound label | 473 // j(cc, &L); // forward branch to unbound label |
| 436 // bind(&L); // bind label to the current pc | 474 // bind(&L); // bind label to the current pc |
| 437 // j(cc, &L); // backward branch to bound label | 475 // j(cc, &L); // backward branch to bound label |
| 438 // bind(&L); // illegal: a label may be bound only once | 476 // bind(&L); // illegal: a label may be bound only once |
| 439 // | 477 // |
| 440 // Note: The same Label can be used for forward and backward branches | 478 // Note: The same Label can be used for forward and backward branches |
| 441 // but it may be bound only once. | 479 // but it may be bound only once. |
| 442 void bind(Label* L); // binds an unbound label L to the current code position | 480 void bind(Label* L); // Binds an unbound label L to current code position. |
| 443 | 481 |
| 444 // Returns the branch offset to the given label from the current code position | 482 // Returns the branch offset to the given label from the current code |
| 445 // Links the label to the current position if it is still unbound | 483 // position. Links the label to the current position if it is still unbound. |
| 446 // Manages the jump elimination optimization if the second parameter is true. | 484 // Manages the jump elimination optimization if the second parameter is true. |
| 447 int32_t branch_offset(Label* L, bool jump_elimination_allowed); | 485 int32_t branch_offset(Label* L, bool jump_elimination_allowed); |
| 448 int32_t shifted_branch_offset(Label* L, bool jump_elimination_allowed) { | 486 int32_t shifted_branch_offset(Label* L, bool jump_elimination_allowed) { |
| 449 int32_t o = branch_offset(L, jump_elimination_allowed); | 487 int32_t o = branch_offset(L, jump_elimination_allowed); |
| 450 ASSERT((o & 3) == 0); // Assert the offset is aligned. | 488 ASSERT((o & 3) == 0); // Assert the offset is aligned. |
| 451 return o >> 2; | 489 return o >> 2; |
| 452 } | 490 } |
| 453 | 491 |
| 454 // Puts a labels target address at the given position. | 492 // Puts a labels target address at the given position. |
| 455 // The high 8 bits are set to zero. | 493 // The high 8 bits are set to zero. |
| (...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 534 DEBUG_BREAK_NOP, | 572 DEBUG_BREAK_NOP, |
| 535 // IC markers. | 573 // IC markers. |
| 536 PROPERTY_ACCESS_INLINED, | 574 PROPERTY_ACCESS_INLINED, |
| 537 PROPERTY_ACCESS_INLINED_CONTEXT, | 575 PROPERTY_ACCESS_INLINED_CONTEXT, |
| 538 PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE, | 576 PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE, |
| 539 // Helper values. | 577 // Helper values. |
| 540 LAST_CODE_MARKER, | 578 LAST_CODE_MARKER, |
| 541 FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED | 579 FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED |
| 542 }; | 580 }; |
| 543 | 581 |
| 544 // type == 0 is the default non-marking type. | 582 // Type == 0 is the default non-marking type. |
| 545 void nop(unsigned int type = 0) { | 583 void nop(unsigned int type = 0) { |
| 546 ASSERT(type < 32); | 584 ASSERT(type < 32); |
| 547 sll(zero_reg, zero_reg, type, true); | 585 sll(zero_reg, zero_reg, type, true); |
| 548 } | 586 } |
| 549 | 587 |
| 550 | 588 |
| 551 //------- Branch and jump instructions -------- | 589 // --------Branch-and-jump-instructions---------- |
| 552 // We don't use likely variant of instructions. | 590 // We don't use likely variant of instructions. |
| 553 void b(int16_t offset); | 591 void b(int16_t offset); |
| 554 void b(Label* L) { b(branch_offset(L, false)>>2); } | 592 void b(Label* L) { b(branch_offset(L, false)>>2); } |
| 555 void bal(int16_t offset); | 593 void bal(int16_t offset); |
| 556 void bal(Label* L) { bal(branch_offset(L, false)>>2); } | 594 void bal(Label* L) { bal(branch_offset(L, false)>>2); } |
| 557 | 595 |
| 558 void beq(Register rs, Register rt, int16_t offset); | 596 void beq(Register rs, Register rt, int16_t offset); |
| 559 void beq(Register rs, Register rt, Label* L) { | 597 void beq(Register rs, Register rt, Label* L) { |
| 560 beq(rs, rt, branch_offset(L, false) >> 2); | 598 beq(rs, rt, branch_offset(L, false) >> 2); |
| 561 } | 599 } |
| 562 void bgez(Register rs, int16_t offset); | 600 void bgez(Register rs, int16_t offset); |
| 563 void bgezal(Register rs, int16_t offset); | 601 void bgezal(Register rs, int16_t offset); |
| 564 void bgtz(Register rs, int16_t offset); | 602 void bgtz(Register rs, int16_t offset); |
| 565 void blez(Register rs, int16_t offset); | 603 void blez(Register rs, int16_t offset); |
| 566 void bltz(Register rs, int16_t offset); | 604 void bltz(Register rs, int16_t offset); |
| 567 void bltzal(Register rs, int16_t offset); | 605 void bltzal(Register rs, int16_t offset); |
| 568 void bne(Register rs, Register rt, int16_t offset); | 606 void bne(Register rs, Register rt, int16_t offset); |
| 569 void bne(Register rs, Register rt, Label* L) { | 607 void bne(Register rs, Register rt, Label* L) { |
| 570 bne(rs, rt, branch_offset(L, false)>>2); | 608 bne(rs, rt, branch_offset(L, false)>>2); |
| 571 } | 609 } |
| 572 | 610 |
| 573 // Never use the int16_t b(l)cond version with a branch offset | 611 // Never use the int16_t b(l)cond version with a branch offset |
| 574 // instead of using the Label* version. See Twiki for infos. | 612 // instead of using the Label* version. |
| 575 | 613 |
| 576 // Jump targets must be in the current 256 MB-aligned region. ie 28 bits. | 614 // Jump targets must be in the current 256 MB-aligned region. ie 28 bits. |
| 577 void j(int32_t target); | 615 void j(int32_t target); |
| 578 void jal(int32_t target); | 616 void jal(int32_t target); |
| 579 void jalr(Register rs, Register rd = ra); | 617 void jalr(Register rs, Register rd = ra); |
| 580 void jr(Register target); | 618 void jr(Register target); |
| 581 | 619 |
| 582 | 620 |
| 583 //-------Data-processing-instructions--------- | 621 //-------Data-processing-instructions--------- |
| 584 | 622 |
| (...skipping 169 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 754 }; | 792 }; |
| 755 | 793 |
| 756 // Debugging. | 794 // Debugging. |
| 757 | 795 |
| 758 // Mark address of the ExitJSFrame code. | 796 // Mark address of the ExitJSFrame code. |
| 759 void RecordJSReturn(); | 797 void RecordJSReturn(); |
| 760 | 798 |
| 761 // Mark address of a debug break slot. | 799 // Mark address of a debug break slot. |
| 762 void RecordDebugBreakSlot(); | 800 void RecordDebugBreakSlot(); |
| 763 | 801 |
| 802 // Record the AST id of the CallIC being compiled, so that it can be placed |
| 803 // in the relocation information. |
| 804 void RecordAstId(unsigned ast_id) { ast_id_for_reloc_info_ = ast_id; } |
| 805 |
| 764 // Record a comment relocation entry that can be used by a disassembler. | 806 // Record a comment relocation entry that can be used by a disassembler. |
| 765 // Use --code-comments to enable. | 807 // Use --code-comments to enable. |
| 766 void RecordComment(const char* msg); | 808 void RecordComment(const char* msg); |
| 767 | 809 |
| 768 // Writes a single byte or word of data in the code stream. Used for | 810 // Writes a single byte or word of data in the code stream. Used for |
| 769 // inline tables, e.g., jump-tables. | 811 // inline tables, e.g., jump-tables. |
| 770 void db(uint8_t data); | 812 void db(uint8_t data); |
| 771 void dd(uint32_t data); | 813 void dd(uint32_t data); |
| 772 | 814 |
| 773 int32_t pc_offset() const { return pc_ - buffer_; } | 815 int32_t pc_offset() const { return pc_ - buffer_; } |
| (...skipping 23 matching lines...) Expand all Loading... |
| 797 static void instr_at_put(byte* pc, Instr instr) { | 839 static void instr_at_put(byte* pc, Instr instr) { |
| 798 *reinterpret_cast<Instr*>(pc) = instr; | 840 *reinterpret_cast<Instr*>(pc) = instr; |
| 799 } | 841 } |
| 800 Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); } | 842 Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); } |
| 801 void instr_at_put(int pos, Instr instr) { | 843 void instr_at_put(int pos, Instr instr) { |
| 802 *reinterpret_cast<Instr*>(buffer_ + pos) = instr; | 844 *reinterpret_cast<Instr*>(buffer_ + pos) = instr; |
| 803 } | 845 } |
| 804 | 846 |
| 805 // Check if an instruction is a branch of some kind. | 847 // Check if an instruction is a branch of some kind. |
| 806 static bool IsBranch(Instr instr); | 848 static bool IsBranch(Instr instr); |
| 849 static bool IsBeq(Instr instr); |
| 850 static bool IsBne(Instr instr); |
| 807 | 851 |
| 808 static bool IsNop(Instr instr, unsigned int type); | 852 static bool IsNop(Instr instr, unsigned int type); |
| 809 static bool IsPop(Instr instr); | 853 static bool IsPop(Instr instr); |
| 810 static bool IsPush(Instr instr); | 854 static bool IsPush(Instr instr); |
| 811 static bool IsLwRegFpOffset(Instr instr); | 855 static bool IsLwRegFpOffset(Instr instr); |
| 812 static bool IsSwRegFpOffset(Instr instr); | 856 static bool IsSwRegFpOffset(Instr instr); |
| 813 static bool IsLwRegFpNegOffset(Instr instr); | 857 static bool IsLwRegFpNegOffset(Instr instr); |
| 814 static bool IsSwRegFpNegOffset(Instr instr); | 858 static bool IsSwRegFpNegOffset(Instr instr); |
| 815 | 859 |
| 816 static Register GetRt(Instr instr); | 860 static Register GetRtReg(Instr instr); |
| 861 static Register GetRsReg(Instr instr); |
| 862 static Register GetRdReg(Instr instr); |
| 863 |
| 864 static uint32_t GetRt(Instr instr); |
| 865 static uint32_t GetRtField(Instr instr); |
| 866 static uint32_t GetRs(Instr instr); |
| 867 static uint32_t GetRsField(Instr instr); |
| 868 static uint32_t GetRd(Instr instr); |
| 869 static uint32_t GetRdField(Instr instr); |
| 870 static uint32_t GetSa(Instr instr); |
| 871 static uint32_t GetSaField(Instr instr); |
| 872 static uint32_t GetOpcodeField(Instr instr); |
| 873 static uint32_t GetImmediate16(Instr instr); |
| 874 static uint32_t GetLabelConst(Instr instr); |
| 817 | 875 |
| 818 static int32_t GetBranchOffset(Instr instr); | 876 static int32_t GetBranchOffset(Instr instr); |
| 819 static bool IsLw(Instr instr); | 877 static bool IsLw(Instr instr); |
| 820 static int16_t GetLwOffset(Instr instr); | 878 static int16_t GetLwOffset(Instr instr); |
| 821 static Instr SetLwOffset(Instr instr, int16_t offset); | 879 static Instr SetLwOffset(Instr instr, int16_t offset); |
| 822 | 880 |
| 823 static bool IsSw(Instr instr); | 881 static bool IsSw(Instr instr); |
| 824 static Instr SetSwOffset(Instr instr, int16_t offset); | 882 static Instr SetSwOffset(Instr instr, int16_t offset); |
| 825 static bool IsAddImmediate(Instr instr); | 883 static bool IsAddImmediate(Instr instr); |
| 826 static Instr SetAddImmediateOffset(Instr instr, int16_t offset); | 884 static Instr SetAddImmediateOffset(Instr instr, int16_t offset); |
| 827 | 885 |
| 886 static bool IsAndImmediate(Instr instr); |
| 887 |
| 828 void CheckTrampolinePool(bool force_emit = false); | 888 void CheckTrampolinePool(bool force_emit = false); |
| 829 | 889 |
| 830 protected: | 890 protected: |
| 891 // Relocation for a type-recording IC has the AST id added to it. This |
| 892 // member variable is a way to pass the information from the call site to |
| 893 // the relocation info. |
| 894 unsigned ast_id_for_reloc_info_; |
| 895 |
| 831 bool emit_debug_code() const { return emit_debug_code_; } | 896 bool emit_debug_code() const { return emit_debug_code_; } |
| 832 | 897 |
| 833 int32_t buffer_space() const { return reloc_info_writer.pos() - pc_; } | 898 int32_t buffer_space() const { return reloc_info_writer.pos() - pc_; } |
| 834 | 899 |
| 835 // Decode branch instruction at pos and return branch target pos. | 900 // Decode branch instruction at pos and return branch target pos. |
| 836 int target_at(int32_t pos); | 901 int target_at(int32_t pos); |
| 837 | 902 |
| 838 // Patch branch instruction at pos to branch to given branch target pos. | 903 // Patch branch instruction at pos to branch to given branch target pos. |
| 839 void target_at_put(int32_t pos, int32_t target_pos); | 904 void target_at_put(int32_t pos, int32_t target_pos); |
| 840 | 905 |
| (...skipping 13 matching lines...) Expand all Loading... |
| 854 trampoline_pool_blocked_nesting_++; | 919 trampoline_pool_blocked_nesting_++; |
| 855 } | 920 } |
| 856 void EndBlockTrampolinePool() { | 921 void EndBlockTrampolinePool() { |
| 857 trampoline_pool_blocked_nesting_--; | 922 trampoline_pool_blocked_nesting_--; |
| 858 } | 923 } |
| 859 | 924 |
| 860 bool is_trampoline_pool_blocked() const { | 925 bool is_trampoline_pool_blocked() const { |
| 861 return trampoline_pool_blocked_nesting_ > 0; | 926 return trampoline_pool_blocked_nesting_ > 0; |
| 862 } | 927 } |
| 863 | 928 |
| 929 bool has_exception() const { |
| 930 return internal_trampoline_exception_; |
| 931 } |
| 932 |
| 864 private: | 933 private: |
| 865 // Code buffer: | 934 // Code buffer: |
| 866 // The buffer into which code and relocation info are generated. | 935 // The buffer into which code and relocation info are generated. |
| 867 byte* buffer_; | 936 byte* buffer_; |
| 868 int buffer_size_; | 937 int buffer_size_; |
| 869 // True if the assembler owns the buffer, false if buffer is external. | 938 // True if the assembler owns the buffer, false if buffer is external. |
| 870 bool own_buffer_; | 939 bool own_buffer_; |
| 871 | 940 |
| 872 // Buffer size and constant pool distance are checked together at regular | 941 // Buffer size and constant pool distance are checked together at regular |
| 873 // intervals of kBufferCheckInterval emitted bytes. | 942 // intervals of kBufferCheckInterval emitted bytes. |
| (...skipping 124 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 998 free_label_count_ = label_count; | 1067 free_label_count_ = label_count; |
| 999 end_ = next_label_ + (label_count - 1) * kInstrSize; | 1068 end_ = next_label_ + (label_count - 1) * kInstrSize; |
| 1000 } | 1069 } |
| 1001 int start() { | 1070 int start() { |
| 1002 return start_; | 1071 return start_; |
| 1003 } | 1072 } |
| 1004 int end() { | 1073 int end() { |
| 1005 return end_; | 1074 return end_; |
| 1006 } | 1075 } |
| 1007 int take_slot() { | 1076 int take_slot() { |
| 1008 int trampoline_slot = next_slot_; | 1077 int trampoline_slot = kInvalidSlotPos; |
| 1009 ASSERT(free_slot_count_ > 0); | 1078 if (free_slot_count_ <= 0) { |
| 1010 free_slot_count_--; | 1079 // We have run out of space on trampolines. |
| 1011 next_slot_ += 2 * kInstrSize; | 1080 // Make sure we fail in debug mode, so we become aware of each case |
| 1081 // when this happens. |
| 1082 ASSERT(0); |
| 1083 // Internal exception will be caught. |
| 1084 } else { |
| 1085 trampoline_slot = next_slot_; |
| 1086 free_slot_count_--; |
| 1087 next_slot_ += 2*kInstrSize; |
| 1088 } |
| 1012 return trampoline_slot; | 1089 return trampoline_slot; |
| 1013 } | 1090 } |
| 1014 int take_label() { | 1091 int take_label() { |
| 1015 int label_pos = next_label_; | 1092 int label_pos = next_label_; |
| 1016 ASSERT(free_label_count_ > 0); | 1093 ASSERT(free_label_count_ > 0); |
| 1017 free_label_count_--; | 1094 free_label_count_--; |
| 1018 next_label_ += kInstrSize; | 1095 next_label_ += kInstrSize; |
| 1019 return label_pos; | 1096 return label_pos; |
| 1020 } | 1097 } |
| 1021 private: | 1098 private: |
| 1022 int start_; | 1099 int start_; |
| 1023 int end_; | 1100 int end_; |
| 1024 int next_slot_; | 1101 int next_slot_; |
| 1025 int free_slot_count_; | 1102 int free_slot_count_; |
| 1026 int next_label_; | 1103 int next_label_; |
| 1027 int free_label_count_; | 1104 int free_label_count_; |
| 1028 }; | 1105 }; |
| 1029 | 1106 |
| 1030 int32_t get_label_entry(int32_t pos, bool next_pool = true); | 1107 int32_t get_label_entry(int32_t pos, bool next_pool = true); |
| 1031 int32_t get_trampoline_entry(int32_t pos, bool next_pool = true); | 1108 int32_t get_trampoline_entry(int32_t pos, bool next_pool = true); |
| 1032 | 1109 |
| 1033 static const int kSlotsPerTrampoline = 2304; | 1110 static const int kSlotsPerTrampoline = 2304; |
| 1034 static const int kLabelsPerTrampoline = 8; | 1111 static const int kLabelsPerTrampoline = 8; |
| 1035 static const int kTrampolineInst = | 1112 static const int kTrampolineInst = |
| 1036 2 * kSlotsPerTrampoline + kLabelsPerTrampoline; | 1113 2 * kSlotsPerTrampoline + kLabelsPerTrampoline; |
| 1037 static const int kTrampolineSize = kTrampolineInst * kInstrSize; | 1114 static const int kTrampolineSize = kTrampolineInst * kInstrSize; |
| 1038 static const int kMaxBranchOffset = (1 << (18 - 1)) - 1; | 1115 static const int kMaxBranchOffset = (1 << (18 - 1)) - 1; |
| 1039 static const int kMaxDistBetweenPools = | 1116 static const int kMaxDistBetweenPools = |
| 1040 kMaxBranchOffset - 2 * kTrampolineSize; | 1117 kMaxBranchOffset - 2 * kTrampolineSize; |
| 1118 static const int kInvalidSlotPos = -1; |
| 1041 | 1119 |
| 1042 List<Trampoline> trampolines_; | 1120 List<Trampoline> trampolines_; |
| 1121 bool internal_trampoline_exception_; |
| 1043 | 1122 |
| 1044 friend class RegExpMacroAssemblerMIPS; | 1123 friend class RegExpMacroAssemblerMIPS; |
| 1045 friend class RelocInfo; | 1124 friend class RelocInfo; |
| 1046 friend class CodePatcher; | 1125 friend class CodePatcher; |
| 1047 friend class BlockTrampolinePoolScope; | 1126 friend class BlockTrampolinePoolScope; |
| 1048 | 1127 |
| 1049 PositionsRecorder positions_recorder_; | 1128 PositionsRecorder positions_recorder_; |
| 1050 bool allow_peephole_optimization_; | 1129 bool allow_peephole_optimization_; |
| 1051 bool emit_debug_code_; | 1130 bool emit_debug_code_; |
| 1052 friend class PositionsRecorder; | 1131 friend class PositionsRecorder; |
| 1053 friend class EnsureSpace; | 1132 friend class EnsureSpace; |
| 1054 }; | 1133 }; |
| 1055 | 1134 |
| 1056 | 1135 |
| 1057 class EnsureSpace BASE_EMBEDDED { | 1136 class EnsureSpace BASE_EMBEDDED { |
| 1058 public: | 1137 public: |
| 1059 explicit EnsureSpace(Assembler* assembler) { | 1138 explicit EnsureSpace(Assembler* assembler) { |
| 1060 assembler->CheckBuffer(); | 1139 assembler->CheckBuffer(); |
| 1061 } | 1140 } |
| 1062 }; | 1141 }; |
| 1063 | 1142 |
| 1064 } } // namespace v8::internal | 1143 } } // namespace v8::internal |
| 1065 | 1144 |
| 1066 #endif // V8_ARM_ASSEMBLER_MIPS_H_ | 1145 #endif // V8_ARM_ASSEMBLER_MIPS_H_ |
| OLD | NEW |