OLD | NEW |
1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include <stdlib.h> | 5 #include <stdlib.h> |
6 #include <cmath> | 6 #include <cmath> |
7 #include <cstdarg> | 7 #include <cstdarg> |
8 | 8 |
9 #if V8_TARGET_ARCH_ARM64 | 9 #if V8_TARGET_ARCH_ARM64 |
10 | 10 |
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
48 TEXT_COLOUR clr_flag_value = FLAG_log_colour ? COLOUR(NORMAL) : ""; | 48 TEXT_COLOUR clr_flag_value = FLAG_log_colour ? COLOUR(NORMAL) : ""; |
49 TEXT_COLOUR clr_reg_name = FLAG_log_colour ? COLOUR_BOLD(CYAN) : ""; | 49 TEXT_COLOUR clr_reg_name = FLAG_log_colour ? COLOUR_BOLD(CYAN) : ""; |
50 TEXT_COLOUR clr_reg_value = FLAG_log_colour ? COLOUR(CYAN) : ""; | 50 TEXT_COLOUR clr_reg_value = FLAG_log_colour ? COLOUR(CYAN) : ""; |
51 TEXT_COLOUR clr_fpreg_name = FLAG_log_colour ? COLOUR_BOLD(MAGENTA) : ""; | 51 TEXT_COLOUR clr_fpreg_name = FLAG_log_colour ? COLOUR_BOLD(MAGENTA) : ""; |
52 TEXT_COLOUR clr_fpreg_value = FLAG_log_colour ? COLOUR(MAGENTA) : ""; | 52 TEXT_COLOUR clr_fpreg_value = FLAG_log_colour ? COLOUR(MAGENTA) : ""; |
53 TEXT_COLOUR clr_memory_address = FLAG_log_colour ? COLOUR_BOLD(BLUE) : ""; | 53 TEXT_COLOUR clr_memory_address = FLAG_log_colour ? COLOUR_BOLD(BLUE) : ""; |
54 TEXT_COLOUR clr_debug_number = FLAG_log_colour ? COLOUR_BOLD(YELLOW) : ""; | 54 TEXT_COLOUR clr_debug_number = FLAG_log_colour ? COLOUR_BOLD(YELLOW) : ""; |
55 TEXT_COLOUR clr_debug_message = FLAG_log_colour ? COLOUR(YELLOW) : ""; | 55 TEXT_COLOUR clr_debug_message = FLAG_log_colour ? COLOUR(YELLOW) : ""; |
56 TEXT_COLOUR clr_printf = FLAG_log_colour ? COLOUR(GREEN) : ""; | 56 TEXT_COLOUR clr_printf = FLAG_log_colour ? COLOUR(GREEN) : ""; |
57 | 57 |
| 58 // static |
| 59 base::LazyInstance<Simulator::GlobalMonitor>::type Simulator::global_monitor_ = |
| 60 LAZY_INSTANCE_INITIALIZER; |
58 | 61 |
59 // This is basically the same as PrintF, with a guard for FLAG_trace_sim. | 62 // This is basically the same as PrintF, with a guard for FLAG_trace_sim. |
60 void Simulator::TraceSim(const char* format, ...) { | 63 void Simulator::TraceSim(const char* format, ...) { |
61 if (FLAG_trace_sim) { | 64 if (FLAG_trace_sim) { |
62 va_list arguments; | 65 va_list arguments; |
63 va_start(arguments, format); | 66 va_start(arguments, format); |
64 base::OS::VFPrint(stream_, format, arguments); | 67 base::OS::VFPrint(stream_, format, arguments); |
65 va_end(arguments); | 68 va_end(arguments); |
66 } | 69 } |
67 } | 70 } |
(...skipping 354 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
422 // Returning to address 0 exits the Simulator. | 425 // Returning to address 0 exits the Simulator. |
423 set_lr(kEndOfSimAddress); | 426 set_lr(kEndOfSimAddress); |
424 | 427 |
425 // Reset debug helpers. | 428 // Reset debug helpers. |
426 breakpoints_.empty(); | 429 breakpoints_.empty(); |
427 break_on_next_ = false; | 430 break_on_next_ = false; |
428 } | 431 } |
429 | 432 |
430 | 433 |
431 Simulator::~Simulator() { | 434 Simulator::~Simulator() { |
| 435 global_monitor_.Pointer()->RemoveProcessor(&global_monitor_processor_); |
432 delete[] reinterpret_cast<byte*>(stack_); | 436 delete[] reinterpret_cast<byte*>(stack_); |
433 if (FLAG_log_instruction_stats) { | 437 if (FLAG_log_instruction_stats) { |
434 delete instrument_; | 438 delete instrument_; |
435 } | 439 } |
436 delete disassembler_decoder_; | 440 delete disassembler_decoder_; |
437 delete print_disasm_; | 441 delete print_disasm_; |
438 DeleteArray(last_debugger_input_); | 442 DeleteArray(last_debugger_input_); |
439 delete decoder_; | 443 delete decoder_; |
440 } | 444 } |
441 | 445 |
(...skipping 1179 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1621 | 1625 |
1622 | 1626 |
1623 void Simulator::LoadStoreHelper(Instruction* instr, | 1627 void Simulator::LoadStoreHelper(Instruction* instr, |
1624 int64_t offset, | 1628 int64_t offset, |
1625 AddrMode addrmode) { | 1629 AddrMode addrmode) { |
1626 unsigned srcdst = instr->Rt(); | 1630 unsigned srcdst = instr->Rt(); |
1627 unsigned addr_reg = instr->Rn(); | 1631 unsigned addr_reg = instr->Rn(); |
1628 uintptr_t address = LoadStoreAddress(addr_reg, offset, addrmode); | 1632 uintptr_t address = LoadStoreAddress(addr_reg, offset, addrmode); |
1629 uintptr_t stack = 0; | 1633 uintptr_t stack = 0; |
1630 | 1634 |
| 1635 base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex); |
| 1636 if (instr->IsLoad()) { |
| 1637 local_monitor_.NotifyLoad(address); |
| 1638 } else { |
| 1639 local_monitor_.NotifyStore(address); |
| 1640 global_monitor_.Pointer()->NotifyStore_Locked(address, |
| 1641 &global_monitor_processor_); |
| 1642 } |
| 1643 |
1631 // Handle the writeback for stores before the store. On a CPU the writeback | 1644 // Handle the writeback for stores before the store. On a CPU the writeback |
1632 // and the store are atomic, but when running on the simulator it is possible | 1645 // and the store are atomic, but when running on the simulator it is possible |
1633 // to be interrupted in between. The simulator is not thread safe and V8 does | 1646 // to be interrupted in between. The simulator is not thread safe and V8 does |
1634 // not require it to be to run JavaScript therefore the profiler may sample | 1647 // not require it to be to run JavaScript therefore the profiler may sample |
1635 // the "simulated" CPU in the middle of load/store with writeback. The code | 1648 // the "simulated" CPU in the middle of load/store with writeback. The code |
1636 // below ensures that push operations are safe even when interrupted: the | 1649 // below ensures that push operations are safe even when interrupted: the |
1637 // stack pointer will be decremented before adding an element to the stack. | 1650 // stack pointer will be decremented before adding an element to the stack. |
1638 if (instr->IsStore()) { | 1651 if (instr->IsStore()) { |
1639 LoadStoreWriteBack(addr_reg, offset, addrmode); | 1652 LoadStoreWriteBack(addr_reg, offset, addrmode); |
1640 | 1653 |
(...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1723 AddrMode addrmode) { | 1736 AddrMode addrmode) { |
1724 unsigned rt = instr->Rt(); | 1737 unsigned rt = instr->Rt(); |
1725 unsigned rt2 = instr->Rt2(); | 1738 unsigned rt2 = instr->Rt2(); |
1726 unsigned addr_reg = instr->Rn(); | 1739 unsigned addr_reg = instr->Rn(); |
1727 size_t access_size = 1 << instr->SizeLSPair(); | 1740 size_t access_size = 1 << instr->SizeLSPair(); |
1728 int64_t offset = instr->ImmLSPair() * access_size; | 1741 int64_t offset = instr->ImmLSPair() * access_size; |
1729 uintptr_t address = LoadStoreAddress(addr_reg, offset, addrmode); | 1742 uintptr_t address = LoadStoreAddress(addr_reg, offset, addrmode); |
1730 uintptr_t address2 = address + access_size; | 1743 uintptr_t address2 = address + access_size; |
1731 uintptr_t stack = 0; | 1744 uintptr_t stack = 0; |
1732 | 1745 |
| 1746 base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex); |
| 1747 if (instr->IsLoad()) { |
| 1748 local_monitor_.NotifyLoad(address); |
| 1749 local_monitor_.NotifyLoad(address2); |
| 1750 } else { |
| 1751 local_monitor_.NotifyStore(address); |
| 1752 local_monitor_.NotifyStore(address2); |
| 1753 global_monitor_.Pointer()->NotifyStore_Locked(address, |
| 1754 &global_monitor_processor_); |
| 1755 global_monitor_.Pointer()->NotifyStore_Locked(address2, |
| 1756 &global_monitor_processor_); |
| 1757 } |
| 1758 |
1733 // Handle the writeback for stores before the store. On a CPU the writeback | 1759 // Handle the writeback for stores before the store. On a CPU the writeback |
1734 // and the store are atomic, but when running on the simulator it is possible | 1760 // and the store are atomic, but when running on the simulator it is possible |
1735 // to be interrupted in between. The simulator is not thread safe and V8 does | 1761 // to be interrupted in between. The simulator is not thread safe and V8 does |
1736 // not require it to be to run JavaScript therefore the profiler may sample | 1762 // not require it to be to run JavaScript therefore the profiler may sample |
1737 // the "simulated" CPU in the middle of load/store with writeback. The code | 1763 // the "simulated" CPU in the middle of load/store with writeback. The code |
1738 // below ensures that push operations are safe even when interrupted: the | 1764 // below ensures that push operations are safe even when interrupted: the |
1739 // stack pointer will be decremented before adding an element to the stack. | 1765 // stack pointer will be decremented before adding an element to the stack. |
1740 if (instr->IsStore()) { | 1766 if (instr->IsStore()) { |
1741 LoadStoreWriteBack(addr_reg, offset, addrmode); | 1767 LoadStoreWriteBack(addr_reg, offset, addrmode); |
1742 | 1768 |
(...skipping 103 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1846 // Accesses below the stack pointer (but above the platform stack limit) are | 1872 // Accesses below the stack pointer (but above the platform stack limit) are |
1847 // not allowed in the ABI. | 1873 // not allowed in the ABI. |
1848 CheckMemoryAccess(address, stack); | 1874 CheckMemoryAccess(address, stack); |
1849 } | 1875 } |
1850 | 1876 |
1851 | 1877 |
1852 void Simulator::VisitLoadLiteral(Instruction* instr) { | 1878 void Simulator::VisitLoadLiteral(Instruction* instr) { |
1853 uintptr_t address = instr->LiteralAddress(); | 1879 uintptr_t address = instr->LiteralAddress(); |
1854 unsigned rt = instr->Rt(); | 1880 unsigned rt = instr->Rt(); |
1855 | 1881 |
| 1882 base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex); |
| 1883 local_monitor_.NotifyLoad(address); |
| 1884 |
1856 switch (instr->Mask(LoadLiteralMask)) { | 1885 switch (instr->Mask(LoadLiteralMask)) { |
1857 // Use _no_log variants to suppress the register trace (LOG_REGS, | 1886 // Use _no_log variants to suppress the register trace (LOG_REGS, |
1858 // LOG_FP_REGS), then print a more detailed log. | 1887 // LOG_FP_REGS), then print a more detailed log. |
1859 case LDR_w_lit: | 1888 case LDR_w_lit: |
1860 set_wreg_no_log(rt, MemoryRead<uint32_t>(address)); | 1889 set_wreg_no_log(rt, MemoryRead<uint32_t>(address)); |
1861 LogRead(address, kWRegSize, rt); | 1890 LogRead(address, kWRegSize, rt); |
1862 break; | 1891 break; |
1863 case LDR_x_lit: | 1892 case LDR_x_lit: |
1864 set_xreg_no_log(rt, MemoryRead<uint64_t>(address)); | 1893 set_xreg_no_log(rt, MemoryRead<uint64_t>(address)); |
1865 LogRead(address, kXRegSize, rt); | 1894 LogRead(address, kXRegSize, rt); |
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1899 void Simulator::LoadStoreWriteBack(unsigned addr_reg, | 1928 void Simulator::LoadStoreWriteBack(unsigned addr_reg, |
1900 int64_t offset, | 1929 int64_t offset, |
1901 AddrMode addrmode) { | 1930 AddrMode addrmode) { |
1902 if ((addrmode == PreIndex) || (addrmode == PostIndex)) { | 1931 if ((addrmode == PreIndex) || (addrmode == PostIndex)) { |
1903 DCHECK(offset != 0); | 1932 DCHECK(offset != 0); |
1904 uint64_t address = xreg(addr_reg, Reg31IsStackPointer); | 1933 uint64_t address = xreg(addr_reg, Reg31IsStackPointer); |
1905 set_reg(addr_reg, address + offset, Reg31IsStackPointer); | 1934 set_reg(addr_reg, address + offset, Reg31IsStackPointer); |
1906 } | 1935 } |
1907 } | 1936 } |
1908 | 1937 |
| 1938 Simulator::TransactionSize Simulator::get_transaction_size(unsigned size) { |
| 1939 switch (size) { |
| 1940 case 0: |
| 1941 return TransactionSize::None; |
| 1942 case 1: |
| 1943 return TransactionSize::Byte; |
| 1944 case 2: |
| 1945 return TransactionSize::HalfWord; |
| 1946 case 4: |
| 1947 return TransactionSize::Word; |
| 1948 default: |
| 1949 UNREACHABLE(); |
| 1950 } |
| 1951 return TransactionSize::None; |
| 1952 } |
| 1953 |
1909 void Simulator::VisitLoadStoreAcquireRelease(Instruction* instr) { | 1954 void Simulator::VisitLoadStoreAcquireRelease(Instruction* instr) { |
1910 // TODO(binji) | 1955 unsigned rs = instr->Rs(); |
| 1956 unsigned rt = instr->Rt(); |
| 1957 unsigned rn = instr->Rn(); |
| 1958 LoadStoreAcquireReleaseOp op = static_cast<LoadStoreAcquireReleaseOp>( |
| 1959 instr->Mask(LoadStoreAcquireReleaseMask)); |
| 1960 int32_t is_acquire_release = instr->LoadStoreXAcquireRelease(); |
| 1961 int32_t is_not_exclusive = instr->LoadStoreXNotExclusive(); |
| 1962 int32_t is_load = instr->LoadStoreXLoad(); |
| 1963 int32_t is_pair = instr->LoadStoreXPair(); |
| 1964 DCHECK_NE(is_acquire_release, 0); |
| 1965 DCHECK_EQ(is_not_exclusive, 0); // Non exclusive unimplemented. |
| 1966 DCHECK_EQ(is_pair, 0); // Pair unimplemented. |
| 1967 unsigned access_size = 1 << instr->LoadStoreXSizeLog2(); |
| 1968 uintptr_t address = LoadStoreAddress(rn, 0, AddrMode::Offset); |
| 1969 DCHECK(address % access_size == 0); |
| 1970 base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex); |
| 1971 if (is_load != 0) { |
| 1972 local_monitor_.NotifyLoadExcl(address, get_transaction_size(access_size)); |
| 1973 global_monitor_.Pointer()->NotifyLoadExcl_Locked( |
| 1974 address, &global_monitor_processor_); |
| 1975 switch (op) { |
| 1976 case LDAXR_b: |
| 1977 set_wreg_no_log(rt, MemoryRead<uint8_t>(address)); |
| 1978 break; |
| 1979 case LDAXR_h: |
| 1980 set_wreg_no_log(rt, MemoryRead<uint16_t>(address)); |
| 1981 break; |
| 1982 case LDAXR_w: |
| 1983 set_wreg_no_log(rt, MemoryRead<uint32_t>(address)); |
| 1984 break; |
| 1985 default: |
| 1986 UNIMPLEMENTED(); |
| 1987 } |
| 1988 LogRead(address, access_size, rt); |
| 1989 } else { |
| 1990 if (local_monitor_.NotifyStoreExcl(address, |
| 1991 get_transaction_size(access_size)) && |
| 1992 global_monitor_.Pointer()->NotifyStoreExcl_Locked( |
| 1993 address, &global_monitor_processor_)) { |
| 1994 switch (op) { |
| 1995 case STLXR_b: |
| 1996 MemoryWrite<uint8_t>(address, wreg(rt)); |
| 1997 break; |
| 1998 case STLXR_h: |
| 1999 MemoryWrite<uint16_t>(address, wreg(rt)); |
| 2000 break; |
| 2001 case STLXR_w: |
| 2002 MemoryWrite<uint32_t>(address, wreg(rt)); |
| 2003 break; |
| 2004 default: |
| 2005 UNIMPLEMENTED(); |
| 2006 } |
| 2007 LogWrite(address, access_size, rt); |
| 2008 set_wreg(rs, 0); |
| 2009 } else { |
| 2010 set_wreg(rs, 1); |
| 2011 } |
| 2012 } |
1911 } | 2013 } |
1912 | 2014 |
1913 void Simulator::CheckMemoryAccess(uintptr_t address, uintptr_t stack) { | 2015 void Simulator::CheckMemoryAccess(uintptr_t address, uintptr_t stack) { |
1914 if ((address >= stack_limit_) && (address < stack)) { | 2016 if ((address >= stack_limit_) && (address < stack)) { |
1915 fprintf(stream_, "ACCESS BELOW STACK POINTER:\n"); | 2017 fprintf(stream_, "ACCESS BELOW STACK POINTER:\n"); |
1916 fprintf(stream_, " sp is here: 0x%016" PRIx64 "\n", | 2018 fprintf(stream_, " sp is here: 0x%016" PRIx64 "\n", |
1917 static_cast<uint64_t>(stack)); | 2019 static_cast<uint64_t>(stack)); |
1918 fprintf(stream_, " access was here: 0x%016" PRIx64 "\n", | 2020 fprintf(stream_, " access was here: 0x%016" PRIx64 "\n", |
1919 static_cast<uint64_t>(address)); | 2021 static_cast<uint64_t>(address)); |
1920 fprintf(stream_, " stack limit is here: 0x%016" PRIx64 "\n", | 2022 fprintf(stream_, " stack limit is here: 0x%016" PRIx64 "\n", |
(...skipping 1949 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3870 | 3972 |
3871 // The printf parameters are inlined in the code, so skip them. | 3973 // The printf parameters are inlined in the code, so skip them. |
3872 set_pc(instr->InstructionAtOffset(kPrintfLength)); | 3974 set_pc(instr->InstructionAtOffset(kPrintfLength)); |
3873 | 3975 |
3874 // Set LR as if we'd just called a native printf function. | 3976 // Set LR as if we'd just called a native printf function. |
3875 set_lr(pc()); | 3977 set_lr(pc()); |
3876 | 3978 |
3877 delete[] format; | 3979 delete[] format; |
3878 } | 3980 } |
3879 | 3981 |
| 3982 Simulator::LocalMonitor::LocalMonitor() |
| 3983 : access_state_(MonitorAccess::Open), |
| 3984 tagged_addr_(0), |
| 3985 size_(TransactionSize::None) {} |
| 3986 |
| 3987 void Simulator::LocalMonitor::Clear() { |
| 3988 access_state_ = MonitorAccess::Open; |
| 3989 tagged_addr_ = 0; |
| 3990 size_ = TransactionSize::None; |
| 3991 } |
| 3992 |
| 3993 void Simulator::LocalMonitor::NotifyLoad(uintptr_t addr) { |
| 3994 if (access_state_ == MonitorAccess::Exclusive) { |
| 3995 // A non exclusive load could clear the local monitor. As a result, it's |
| 3996 // most strict to unconditionally clear the local monitor on load. |
| 3997 Clear(); |
| 3998 } |
| 3999 } |
| 4000 |
| 4001 void Simulator::LocalMonitor::NotifyLoadExcl(uintptr_t addr, |
| 4002 TransactionSize size) { |
| 4003 access_state_ = MonitorAccess::Exclusive; |
| 4004 tagged_addr_ = addr; |
| 4005 size_ = size; |
| 4006 } |
| 4007 |
| 4008 void Simulator::LocalMonitor::NotifyStore(uintptr_t addr) { |
| 4009 if (access_state_ == MonitorAccess::Exclusive) { |
| 4010 // A non exclusive store could clear the local monitor. As a result, it's |
| 4011 // most strict to unconditionally clear the local monitor on store. |
| 4012 Clear(); |
| 4013 } |
| 4014 } |
| 4015 |
| 4016 bool Simulator::LocalMonitor::NotifyStoreExcl(uintptr_t addr, |
| 4017 TransactionSize size) { |
| 4018 if (access_state_ == MonitorAccess::Exclusive) { |
| 4019 // It is allowed for a processor to require that the address matches |
| 4020 // exactly (B2.10.1), so this comparison does not mask addr. |
| 4021 if (addr == tagged_addr_ && size_ == size) { |
| 4022 Clear(); |
| 4023 return true; |
| 4024 } else { |
| 4025 // It is implementation-defined whether an exclusive store to a |
| 4026 // non-tagged address will update memory. As a result, it's most strict |
| 4027 // to unconditionally clear the local monitor. |
| 4028 Clear(); |
| 4029 return false; |
| 4030 } |
| 4031 } else { |
| 4032 DCHECK(access_state_ == MonitorAccess::Open); |
| 4033 return false; |
| 4034 } |
| 4035 } |
| 4036 |
| 4037 Simulator::GlobalMonitor::Processor::Processor() |
| 4038 : access_state_(MonitorAccess::Open), |
| 4039 tagged_addr_(0), |
| 4040 next_(nullptr), |
| 4041 prev_(nullptr), |
| 4042 failure_counter_(0) {} |
| 4043 |
| 4044 void Simulator::GlobalMonitor::Processor::Clear_Locked() { |
| 4045 access_state_ = MonitorAccess::Open; |
| 4046 tagged_addr_ = 0; |
| 4047 } |
| 4048 |
| 4049 void Simulator::GlobalMonitor::Processor::NotifyLoadExcl_Locked( |
| 4050 uintptr_t addr) { |
| 4051 access_state_ = MonitorAccess::Exclusive; |
| 4052 tagged_addr_ = addr; |
| 4053 } |
| 4054 |
| 4055 void Simulator::GlobalMonitor::Processor::NotifyStore_Locked( |
| 4056 uintptr_t addr, bool is_requesting_processor) { |
| 4057 if (access_state_ == MonitorAccess::Exclusive) { |
| 4058 // A non exclusive store could clear the global monitor. As a result, it's |
| 4059 // most strict to unconditionally clear global monitors on store. |
| 4060 Clear_Locked(); |
| 4061 } |
| 4062 } |
| 4063 |
| 4064 bool Simulator::GlobalMonitor::Processor::NotifyStoreExcl_Locked( |
| 4065 uintptr_t addr, bool is_requesting_processor) { |
| 4066 if (access_state_ == MonitorAccess::Exclusive) { |
| 4067 if (is_requesting_processor) { |
| 4068 // It is allowed for a processor to require that the address matches |
| 4069 // exactly (B2.10.2), so this comparison does not mask addr. |
| 4070 if (addr == tagged_addr_) { |
| 4071 Clear_Locked(); |
| 4072 // Introduce occasional stxr failures. This is to simulate the |
| 4073 // behavior of hardware, which can randomly fail due to background |
| 4074 // cache evictions. |
| 4075 if (failure_counter_++ >= kMaxFailureCounter) { |
| 4076 failure_counter_ = 0; |
| 4077 return false; |
| 4078 } else { |
| 4079 return true; |
| 4080 } |
| 4081 } |
| 4082 } else if ((addr & kExclusiveTaggedAddrMask) == |
| 4083 (tagged_addr_ & kExclusiveTaggedAddrMask)) { |
| 4084 // Check the masked addresses when responding to a successful lock by |
| 4085 // another processor so the implementation is more conservative (i.e. the |
| 4086 // granularity of locking is as large as possible.) |
| 4087 Clear_Locked(); |
| 4088 return false; |
| 4089 } |
| 4090 } |
| 4091 return false; |
| 4092 } |
| 4093 |
| 4094 Simulator::GlobalMonitor::GlobalMonitor() : head_(nullptr) {} |
| 4095 |
| 4096 void Simulator::GlobalMonitor::NotifyLoadExcl_Locked(uintptr_t addr, |
| 4097 Processor* processor) { |
| 4098 processor->NotifyLoadExcl_Locked(addr); |
| 4099 PrependProcessor_Locked(processor); |
| 4100 } |
| 4101 |
| 4102 void Simulator::GlobalMonitor::NotifyStore_Locked(uintptr_t addr, |
| 4103 Processor* processor) { |
| 4104 // Notify each processor of the store operation. |
| 4105 for (Processor* iter = head_; iter; iter = iter->next_) { |
| 4106 bool is_requesting_processor = iter == processor; |
| 4107 iter->NotifyStore_Locked(addr, is_requesting_processor); |
| 4108 } |
| 4109 } |
| 4110 |
| 4111 bool Simulator::GlobalMonitor::NotifyStoreExcl_Locked(uintptr_t addr, |
| 4112 Processor* processor) { |
| 4113 DCHECK(IsProcessorInLinkedList_Locked(processor)); |
| 4114 if (processor->NotifyStoreExcl_Locked(addr, true)) { |
| 4115 // Notify the other processors that this StoreExcl succeeded. |
| 4116 for (Processor* iter = head_; iter; iter = iter->next_) { |
| 4117 if (iter != processor) { |
| 4118 iter->NotifyStoreExcl_Locked(addr, false); |
| 4119 } |
| 4120 } |
| 4121 return true; |
| 4122 } else { |
| 4123 return false; |
| 4124 } |
| 4125 } |
| 4126 |
| 4127 bool Simulator::GlobalMonitor::IsProcessorInLinkedList_Locked( |
| 4128 Processor* processor) const { |
| 4129 return head_ == processor || processor->next_ || processor->prev_; |
| 4130 } |
| 4131 |
| 4132 void Simulator::GlobalMonitor::PrependProcessor_Locked(Processor* processor) { |
| 4133 if (IsProcessorInLinkedList_Locked(processor)) { |
| 4134 return; |
| 4135 } |
| 4136 |
| 4137 if (head_) { |
| 4138 head_->prev_ = processor; |
| 4139 } |
| 4140 processor->prev_ = nullptr; |
| 4141 processor->next_ = head_; |
| 4142 head_ = processor; |
| 4143 } |
| 4144 |
| 4145 void Simulator::GlobalMonitor::RemoveProcessor(Processor* processor) { |
| 4146 base::LockGuard<base::Mutex> lock_guard(&mutex); |
| 4147 if (!IsProcessorInLinkedList_Locked(processor)) { |
| 4148 return; |
| 4149 } |
| 4150 |
| 4151 if (processor->prev_) { |
| 4152 processor->prev_->next_ = processor->next_; |
| 4153 } else { |
| 4154 head_ = processor->next_; |
| 4155 } |
| 4156 if (processor->next_) { |
| 4157 processor->next_->prev_ = processor->prev_; |
| 4158 } |
| 4159 processor->prev_ = nullptr; |
| 4160 processor->next_ = nullptr; |
| 4161 } |
3880 | 4162 |
3881 #endif // USE_SIMULATOR | 4163 #endif // USE_SIMULATOR |
3882 | 4164 |
3883 } // namespace internal | 4165 } // namespace internal |
3884 } // namespace v8 | 4166 } // namespace v8 |
3885 | 4167 |
3886 #endif // V8_TARGET_ARCH_ARM64 | 4168 #endif // V8_TARGET_ARCH_ARM64 |
OLD | NEW |