OLD | NEW |
1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include <stdlib.h> | 5 #include <stdlib.h> |
6 #include <cmath> | 6 #include <cmath> |
7 #include <cstdarg> | 7 #include <cstdarg> |
8 | 8 |
9 #if V8_TARGET_ARCH_ARM64 | 9 #if V8_TARGET_ARCH_ARM64 |
10 | 10 |
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
48 TEXT_COLOUR clr_flag_value = FLAG_log_colour ? COLOUR(NORMAL) : ""; | 48 TEXT_COLOUR clr_flag_value = FLAG_log_colour ? COLOUR(NORMAL) : ""; |
49 TEXT_COLOUR clr_reg_name = FLAG_log_colour ? COLOUR_BOLD(CYAN) : ""; | 49 TEXT_COLOUR clr_reg_name = FLAG_log_colour ? COLOUR_BOLD(CYAN) : ""; |
50 TEXT_COLOUR clr_reg_value = FLAG_log_colour ? COLOUR(CYAN) : ""; | 50 TEXT_COLOUR clr_reg_value = FLAG_log_colour ? COLOUR(CYAN) : ""; |
51 TEXT_COLOUR clr_fpreg_name = FLAG_log_colour ? COLOUR_BOLD(MAGENTA) : ""; | 51 TEXT_COLOUR clr_fpreg_name = FLAG_log_colour ? COLOUR_BOLD(MAGENTA) : ""; |
52 TEXT_COLOUR clr_fpreg_value = FLAG_log_colour ? COLOUR(MAGENTA) : ""; | 52 TEXT_COLOUR clr_fpreg_value = FLAG_log_colour ? COLOUR(MAGENTA) : ""; |
53 TEXT_COLOUR clr_memory_address = FLAG_log_colour ? COLOUR_BOLD(BLUE) : ""; | 53 TEXT_COLOUR clr_memory_address = FLAG_log_colour ? COLOUR_BOLD(BLUE) : ""; |
54 TEXT_COLOUR clr_debug_number = FLAG_log_colour ? COLOUR_BOLD(YELLOW) : ""; | 54 TEXT_COLOUR clr_debug_number = FLAG_log_colour ? COLOUR_BOLD(YELLOW) : ""; |
55 TEXT_COLOUR clr_debug_message = FLAG_log_colour ? COLOUR(YELLOW) : ""; | 55 TEXT_COLOUR clr_debug_message = FLAG_log_colour ? COLOUR(YELLOW) : ""; |
56 TEXT_COLOUR clr_printf = FLAG_log_colour ? COLOUR(GREEN) : ""; | 56 TEXT_COLOUR clr_printf = FLAG_log_colour ? COLOUR(GREEN) : ""; |
57 | 57 |
| 58 // static |
| 59 base::LazyInstance<Simulator::GlobalMonitor>::type Simulator::global_monitor_ = |
| 60 LAZY_INSTANCE_INITIALIZER; |
58 | 61 |
59 // This is basically the same as PrintF, with a guard for FLAG_trace_sim. | 62 // This is basically the same as PrintF, with a guard for FLAG_trace_sim. |
60 void Simulator::TraceSim(const char* format, ...) { | 63 void Simulator::TraceSim(const char* format, ...) { |
61 if (FLAG_trace_sim) { | 64 if (FLAG_trace_sim) { |
62 va_list arguments; | 65 va_list arguments; |
63 va_start(arguments, format); | 66 va_start(arguments, format); |
64 base::OS::VFPrint(stream_, format, arguments); | 67 base::OS::VFPrint(stream_, format, arguments); |
65 va_end(arguments); | 68 va_end(arguments); |
66 } | 69 } |
67 } | 70 } |
(...skipping 354 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
422 // Returning to address 0 exits the Simulator. | 425 // Returning to address 0 exits the Simulator. |
423 set_lr(kEndOfSimAddress); | 426 set_lr(kEndOfSimAddress); |
424 | 427 |
425 // Reset debug helpers. | 428 // Reset debug helpers. |
426 breakpoints_.empty(); | 429 breakpoints_.empty(); |
427 break_on_next_ = false; | 430 break_on_next_ = false; |
428 } | 431 } |
429 | 432 |
430 | 433 |
431 Simulator::~Simulator() { | 434 Simulator::~Simulator() { |
| 435 global_monitor_.Pointer()->RemoveProcessor(&global_monitor_processor_); |
432 delete[] reinterpret_cast<byte*>(stack_); | 436 delete[] reinterpret_cast<byte*>(stack_); |
433 if (FLAG_log_instruction_stats) { | 437 if (FLAG_log_instruction_stats) { |
434 delete instrument_; | 438 delete instrument_; |
435 } | 439 } |
436 delete disassembler_decoder_; | 440 delete disassembler_decoder_; |
437 delete print_disasm_; | 441 delete print_disasm_; |
438 DeleteArray(last_debugger_input_); | 442 DeleteArray(last_debugger_input_); |
439 delete decoder_; | 443 delete decoder_; |
440 } | 444 } |
441 | 445 |
(...skipping 1179 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1621 | 1625 |
1622 | 1626 |
1623 void Simulator::LoadStoreHelper(Instruction* instr, | 1627 void Simulator::LoadStoreHelper(Instruction* instr, |
1624 int64_t offset, | 1628 int64_t offset, |
1625 AddrMode addrmode) { | 1629 AddrMode addrmode) { |
1626 unsigned srcdst = instr->Rt(); | 1630 unsigned srcdst = instr->Rt(); |
1627 unsigned addr_reg = instr->Rn(); | 1631 unsigned addr_reg = instr->Rn(); |
1628 uintptr_t address = LoadStoreAddress(addr_reg, offset, addrmode); | 1632 uintptr_t address = LoadStoreAddress(addr_reg, offset, addrmode); |
1629 uintptr_t stack = 0; | 1633 uintptr_t stack = 0; |
1630 | 1634 |
| 1635 base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex); |
| 1636 if (instr->IsLoad()) { |
| 1637 local_monitor_.NotifyLoad(address); |
| 1638 } else { |
| 1639 local_monitor_.NotifyStore(address); |
| 1640 global_monitor_.Pointer()->NotifyStore_Locked(address, |
| 1641 &global_monitor_processor_); |
| 1642 } |
| 1643 |
1631 // Handle the writeback for stores before the store. On a CPU the writeback | 1644 // Handle the writeback for stores before the store. On a CPU the writeback |
1632 // and the store are atomic, but when running on the simulator it is possible | 1645 // and the store are atomic, but when running on the simulator it is possible |
1633 // to be interrupted in between. The simulator is not thread safe and V8 does | 1646 // to be interrupted in between. The simulator is not thread safe and V8 does |
1634 // not require it to be to run JavaScript therefore the profiler may sample | 1647 // not require it to be to run JavaScript therefore the profiler may sample |
1635 // the "simulated" CPU in the middle of load/store with writeback. The code | 1648 // the "simulated" CPU in the middle of load/store with writeback. The code |
1636 // below ensures that push operations are safe even when interrupted: the | 1649 // below ensures that push operations are safe even when interrupted: the |
1637 // stack pointer will be decremented before adding an element to the stack. | 1650 // stack pointer will be decremented before adding an element to the stack. |
1638 if (instr->IsStore()) { | 1651 if (instr->IsStore()) { |
1639 LoadStoreWriteBack(addr_reg, offset, addrmode); | 1652 LoadStoreWriteBack(addr_reg, offset, addrmode); |
1640 | 1653 |
(...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1723 AddrMode addrmode) { | 1736 AddrMode addrmode) { |
1724 unsigned rt = instr->Rt(); | 1737 unsigned rt = instr->Rt(); |
1725 unsigned rt2 = instr->Rt2(); | 1738 unsigned rt2 = instr->Rt2(); |
1726 unsigned addr_reg = instr->Rn(); | 1739 unsigned addr_reg = instr->Rn(); |
1727 size_t access_size = 1 << instr->SizeLSPair(); | 1740 size_t access_size = 1 << instr->SizeLSPair(); |
1728 int64_t offset = instr->ImmLSPair() * access_size; | 1741 int64_t offset = instr->ImmLSPair() * access_size; |
1729 uintptr_t address = LoadStoreAddress(addr_reg, offset, addrmode); | 1742 uintptr_t address = LoadStoreAddress(addr_reg, offset, addrmode); |
1730 uintptr_t address2 = address + access_size; | 1743 uintptr_t address2 = address + access_size; |
1731 uintptr_t stack = 0; | 1744 uintptr_t stack = 0; |
1732 | 1745 |
| 1746 base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex); |
| 1747 if (instr->IsLoad()) { |
| 1748 local_monitor_.NotifyLoad(address); |
| 1749 local_monitor_.NotifyLoad(address2); |
| 1750 } else { |
| 1751 local_monitor_.NotifyStore(address); |
| 1752 local_monitor_.NotifyStore(address2); |
| 1753 global_monitor_.Pointer()->NotifyStore_Locked(address, |
| 1754 &global_monitor_processor_); |
| 1755 global_monitor_.Pointer()->NotifyStore_Locked(address2, |
| 1756 &global_monitor_processor_); |
| 1757 } |
| 1758 |
1733 // Handle the writeback for stores before the store. On a CPU the writeback | 1759 // Handle the writeback for stores before the store. On a CPU the writeback |
1734 // and the store are atomic, but when running on the simulator it is possible | 1760 // and the store are atomic, but when running on the simulator it is possible |
1735 // to be interrupted in between. The simulator is not thread safe and V8 does | 1761 // to be interrupted in between. The simulator is not thread safe and V8 does |
1736 // not require it to be to run JavaScript therefore the profiler may sample | 1762 // not require it to be to run JavaScript therefore the profiler may sample |
1737 // the "simulated" CPU in the middle of load/store with writeback. The code | 1763 // the "simulated" CPU in the middle of load/store with writeback. The code |
1738 // below ensures that push operations are safe even when interrupted: the | 1764 // below ensures that push operations are safe even when interrupted: the |
1739 // stack pointer will be decremented before adding an element to the stack. | 1765 // stack pointer will be decremented before adding an element to the stack. |
1740 if (instr->IsStore()) { | 1766 if (instr->IsStore()) { |
1741 LoadStoreWriteBack(addr_reg, offset, addrmode); | 1767 LoadStoreWriteBack(addr_reg, offset, addrmode); |
1742 | 1768 |
(...skipping 103 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1846 // Accesses below the stack pointer (but above the platform stack limit) are | 1872 // Accesses below the stack pointer (but above the platform stack limit) are |
1847 // not allowed in the ABI. | 1873 // not allowed in the ABI. |
1848 CheckMemoryAccess(address, stack); | 1874 CheckMemoryAccess(address, stack); |
1849 } | 1875 } |
1850 | 1876 |
1851 | 1877 |
1852 void Simulator::VisitLoadLiteral(Instruction* instr) { | 1878 void Simulator::VisitLoadLiteral(Instruction* instr) { |
1853 uintptr_t address = instr->LiteralAddress(); | 1879 uintptr_t address = instr->LiteralAddress(); |
1854 unsigned rt = instr->Rt(); | 1880 unsigned rt = instr->Rt(); |
1855 | 1881 |
| 1882 base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex); |
| 1883 local_monitor_.NotifyLoad(address); |
| 1884 |
1856 switch (instr->Mask(LoadLiteralMask)) { | 1885 switch (instr->Mask(LoadLiteralMask)) { |
1857 // Use _no_log variants to suppress the register trace (LOG_REGS, | 1886 // Use _no_log variants to suppress the register trace (LOG_REGS, |
1858 // LOG_FP_REGS), then print a more detailed log. | 1887 // LOG_FP_REGS), then print a more detailed log. |
1859 case LDR_w_lit: | 1888 case LDR_w_lit: |
1860 set_wreg_no_log(rt, MemoryRead<uint32_t>(address)); | 1889 set_wreg_no_log(rt, MemoryRead<uint32_t>(address)); |
1861 LogRead(address, kWRegSize, rt); | 1890 LogRead(address, kWRegSize, rt); |
1862 break; | 1891 break; |
1863 case LDR_x_lit: | 1892 case LDR_x_lit: |
1864 set_xreg_no_log(rt, MemoryRead<uint64_t>(address)); | 1893 set_xreg_no_log(rt, MemoryRead<uint64_t>(address)); |
1865 LogRead(address, kXRegSize, rt); | 1894 LogRead(address, kXRegSize, rt); |
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1899 void Simulator::LoadStoreWriteBack(unsigned addr_reg, | 1928 void Simulator::LoadStoreWriteBack(unsigned addr_reg, |
1900 int64_t offset, | 1929 int64_t offset, |
1901 AddrMode addrmode) { | 1930 AddrMode addrmode) { |
1902 if ((addrmode == PreIndex) || (addrmode == PostIndex)) { | 1931 if ((addrmode == PreIndex) || (addrmode == PostIndex)) { |
1903 DCHECK(offset != 0); | 1932 DCHECK(offset != 0); |
1904 uint64_t address = xreg(addr_reg, Reg31IsStackPointer); | 1933 uint64_t address = xreg(addr_reg, Reg31IsStackPointer); |
1905 set_reg(addr_reg, address + offset, Reg31IsStackPointer); | 1934 set_reg(addr_reg, address + offset, Reg31IsStackPointer); |
1906 } | 1935 } |
1907 } | 1936 } |
1908 | 1937 |
| 1938 Simulator::TransactionSize Simulator::get_transaction_size(unsigned size) { |
| 1939 switch (size) { |
| 1940 case 0: |
| 1941 return TransactionSize::None; |
| 1942 case 1: |
| 1943 return TransactionSize::Byte; |
| 1944 case 2: |
| 1945 return TransactionSize::HalfWord; |
| 1946 case 4: |
| 1947 return TransactionSize::Word; |
| 1948 default: |
| 1949 UNREACHABLE(); |
| 1950 } |
| 1951 return TransactionSize::None; |
| 1952 } |
| 1953 |
1909 void Simulator::VisitLoadStoreAcquireRelease(Instruction* instr) { | 1954 void Simulator::VisitLoadStoreAcquireRelease(Instruction* instr) { |
1910 // TODO(binji) | 1955 unsigned rs = instr->Rs(); |
| 1956 unsigned rt = instr->Rt(); |
| 1957 unsigned rn = instr->Rn(); |
| 1958 LoadStoreAcquireReleaseOp op = static_cast<LoadStoreAcquireReleaseOp>( |
| 1959 instr->Mask(LoadStoreAcquireReleaseMask)); |
| 1960 bool is_acquire_release = instr->LoadStoreXAcquireRelease(); |
| 1961 bool is_exclusive = !instr->LoadStoreXNotExclusive(); |
| 1962 bool is_load = instr->LoadStoreXLoad(); |
| 1963 bool is_pair = instr->LoadStoreXPair(); |
| 1964 DCHECK(is_acquire_release); |
| 1965 DCHECK(is_exclusive); // non exclusive unimplemented |
| 1966 DCHECK(!is_pair); // pair unimplemented |
| 1967 unsigned access_size = 1 << instr->LoadStoreXSizeLog2(); |
| 1968 uintptr_t address = LoadStoreAddress(rn, 0, AddrMode::Offset); |
| 1969 DCHECK(address % access_size == 0); |
| 1970 if (is_load) { |
| 1971 base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex); |
| 1972 local_monitor_.NotifyLoadExcl(address, get_transaction_size(access_size)); |
| 1973 global_monitor_.Pointer()->NotifyLoadExcl_Locked( |
| 1974 address, &global_monitor_processor_); |
| 1975 switch (op) { |
| 1976 case LDAXR_b: |
| 1977 set_wreg_no_log(rt, MemoryRead<uint8_t>(address)); |
| 1978 break; |
| 1979 case LDAXR_h: |
| 1980 set_wreg_no_log(rt, MemoryRead<uint16_t>(address)); |
| 1981 break; |
| 1982 case LDAXR_w: |
| 1983 set_wreg_no_log(rt, MemoryRead<uint32_t>(address)); |
| 1984 break; |
| 1985 default: |
| 1986 UNIMPLEMENTED(); |
| 1987 } |
| 1988 LogRead(address, access_size, rt); |
| 1989 } else { |
| 1990 base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex); |
| 1991 if (local_monitor_.NotifyStoreExcl(address, |
| 1992 get_transaction_size(access_size)) && |
| 1993 global_monitor_.Pointer()->NotifyStoreExcl_Locked( |
| 1994 address, &global_monitor_processor_)) { |
| 1995 switch (op) { |
| 1996 case STLXR_b: |
| 1997 MemoryWrite<uint8_t>(address, wreg(rt)); |
| 1998 break; |
| 1999 case STLXR_h: |
| 2000 MemoryWrite<uint16_t>(address, wreg(rt)); |
| 2001 break; |
| 2002 case STLXR_w: |
| 2003 MemoryWrite<uint32_t>(address, wreg(rt)); |
| 2004 break; |
| 2005 default: |
| 2006 UNIMPLEMENTED(); |
| 2007 } |
| 2008 LogWrite(address, access_size, rt); |
| 2009 set_wreg(rs, 0); |
| 2010 } else { |
| 2011 set_wreg(rs, 1); |
| 2012 } |
| 2013 } |
1911 } | 2014 } |
1912 | 2015 |
1913 void Simulator::CheckMemoryAccess(uintptr_t address, uintptr_t stack) { | 2016 void Simulator::CheckMemoryAccess(uintptr_t address, uintptr_t stack) { |
1914 if ((address >= stack_limit_) && (address < stack)) { | 2017 if ((address >= stack_limit_) && (address < stack)) { |
1915 fprintf(stream_, "ACCESS BELOW STACK POINTER:\n"); | 2018 fprintf(stream_, "ACCESS BELOW STACK POINTER:\n"); |
1916 fprintf(stream_, " sp is here: 0x%016" PRIx64 "\n", | 2019 fprintf(stream_, " sp is here: 0x%016" PRIx64 "\n", |
1917 static_cast<uint64_t>(stack)); | 2020 static_cast<uint64_t>(stack)); |
1918 fprintf(stream_, " access was here: 0x%016" PRIx64 "\n", | 2021 fprintf(stream_, " access was here: 0x%016" PRIx64 "\n", |
1919 static_cast<uint64_t>(address)); | 2022 static_cast<uint64_t>(address)); |
1920 fprintf(stream_, " stack limit is here: 0x%016" PRIx64 "\n", | 2023 fprintf(stream_, " stack limit is here: 0x%016" PRIx64 "\n", |
(...skipping 1949 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3870 | 3973 |
3871 // The printf parameters are inlined in the code, so skip them. | 3974 // The printf parameters are inlined in the code, so skip them. |
3872 set_pc(instr->InstructionAtOffset(kPrintfLength)); | 3975 set_pc(instr->InstructionAtOffset(kPrintfLength)); |
3873 | 3976 |
3874 // Set LR as if we'd just called a native printf function. | 3977 // Set LR as if we'd just called a native printf function. |
3875 set_lr(pc()); | 3978 set_lr(pc()); |
3876 | 3979 |
3877 delete[] format; | 3980 delete[] format; |
3878 } | 3981 } |
3879 | 3982 |
| 3983 Simulator::LocalMonitor::LocalMonitor() |
| 3984 : access_state_(MonitorAccess::Open), |
| 3985 tagged_addr_(0), |
| 3986 size_(TransactionSize::None) {} |
| 3987 |
| 3988 void Simulator::LocalMonitor::Clear() { |
| 3989 access_state_ = MonitorAccess::Open; |
| 3990 tagged_addr_ = 0; |
| 3991 size_ = TransactionSize::None; |
| 3992 } |
| 3993 |
| 3994 void Simulator::LocalMonitor::NotifyLoad(uintptr_t addr) { |
| 3995 if (access_state_ == MonitorAccess::Exclusive) { |
| 3996 // A non exclusive load could clear the local monitor. As a result, it's |
| 3997 // most strict to unconditionally clear the local monitor on load. |
| 3998 Clear(); |
| 3999 } |
| 4000 } |
| 4001 |
| 4002 void Simulator::LocalMonitor::NotifyLoadExcl(uintptr_t addr, |
| 4003 TransactionSize size) { |
| 4004 access_state_ = MonitorAccess::Exclusive; |
| 4005 tagged_addr_ = addr; |
| 4006 size_ = size; |
| 4007 } |
| 4008 |
| 4009 void Simulator::LocalMonitor::NotifyStore(uintptr_t addr) { |
| 4010 if (access_state_ == MonitorAccess::Exclusive) { |
| 4011 // A non exclusive store could clear the local monitor. As a result, it's |
| 4012 // most strict to unconditionally clear the local monitor on store. |
| 4013 Clear(); |
| 4014 } |
| 4015 } |
| 4016 |
| 4017 bool Simulator::LocalMonitor::NotifyStoreExcl(uintptr_t addr, |
| 4018 TransactionSize size) { |
| 4019 if (access_state_ == MonitorAccess::Exclusive) { |
| 4020 // It is allowed for a processor to require that the address matches |
| 4021 // exactly (B2.10.1), so this comparison does not mask addr. |
| 4022 if (addr == tagged_addr_ && size_ == size) { |
| 4023 Clear(); |
| 4024 return true; |
| 4025 } else { |
| 4026 // It is implementation-defined whether an exclusive store to a |
| 4027 // non-tagged address will update memory. As a result, it's most strict |
| 4028 // to unconditionally clear the local monitor. |
| 4029 Clear(); |
| 4030 return false; |
| 4031 } |
| 4032 } else { |
| 4033 DCHECK(access_state_ == MonitorAccess::Open); |
| 4034 return false; |
| 4035 } |
| 4036 } |
| 4037 |
| 4038 Simulator::GlobalMonitor::Processor::Processor() |
| 4039 : access_state_(MonitorAccess::Open), |
| 4040 tagged_addr_(0), |
| 4041 next_(nullptr), |
| 4042 prev_(nullptr), |
| 4043 failure_counter_(0) {} |
| 4044 |
| 4045 void Simulator::GlobalMonitor::Processor::Clear_Locked() { |
| 4046 access_state_ = MonitorAccess::Open; |
| 4047 tagged_addr_ = 0; |
| 4048 } |
| 4049 |
| 4050 void Simulator::GlobalMonitor::Processor::NotifyLoadExcl_Locked( |
| 4051 uintptr_t addr) { |
| 4052 access_state_ = MonitorAccess::Exclusive; |
| 4053 tagged_addr_ = addr; |
| 4054 } |
| 4055 |
| 4056 void Simulator::GlobalMonitor::Processor::NotifyStore_Locked( |
| 4057 uintptr_t addr, bool is_requesting_processor) { |
| 4058 if (access_state_ == MonitorAccess::Exclusive) { |
| 4059 // A non exclusive store could clear the global monitor. As a result, it's |
| 4060 // most strict to unconditionally clear global monitors on store. |
| 4061 Clear_Locked(); |
| 4062 } |
| 4063 } |
| 4064 |
| 4065 bool Simulator::GlobalMonitor::Processor::NotifyStoreExcl_Locked( |
| 4066 uintptr_t addr, bool is_requesting_processor) { |
| 4067 if (access_state_ == MonitorAccess::Exclusive) { |
| 4068 if (is_requesting_processor) { |
| 4069 // It is allowed for a processor to require that the address matches |
| 4070 // exactly (B2.10.2), so this comparison does not mask addr. |
| 4071 if (addr == tagged_addr_) { |
| 4072 Clear_Locked(); |
| 4073 // Introduce occasional stxr failures. This is to simulate the |
| 4074 // behavior of hardware, which can randomly fail due to background |
| 4075 // cache evictions. |
| 4076 if (failure_counter_++ >= kMaxFailureCounter) { |
| 4077 failure_counter_ = 0; |
| 4078 return false; |
| 4079 } else { |
| 4080 return true; |
| 4081 } |
| 4082 } |
| 4083 } else if ((addr & kExclusiveTaggedAddrMask) == |
| 4084 (tagged_addr_ & kExclusiveTaggedAddrMask)) { |
| 4085 // Check the masked addresses when responding to a successful lock by |
| 4086 // another processor so the implementation is more conservative (i.e. the |
| 4087 // granularity of locking is as large as possible.) |
| 4088 Clear_Locked(); |
| 4089 return false; |
| 4090 } |
| 4091 } |
| 4092 return false; |
| 4093 } |
| 4094 |
| 4095 Simulator::GlobalMonitor::GlobalMonitor() : head_(nullptr) {} |
| 4096 |
| 4097 void Simulator::GlobalMonitor::NotifyLoadExcl_Locked(uintptr_t addr, |
| 4098 Processor* processor) { |
| 4099 processor->NotifyLoadExcl_Locked(addr); |
| 4100 PrependProcessor_Locked(processor); |
| 4101 } |
| 4102 |
| 4103 void Simulator::GlobalMonitor::NotifyStore_Locked(uintptr_t addr, |
| 4104 Processor* processor) { |
| 4105 // Notify each processor of the store operation. |
| 4106 for (Processor* iter = head_; iter; iter = iter->next_) { |
| 4107 bool is_requesting_processor = iter == processor; |
| 4108 iter->NotifyStore_Locked(addr, is_requesting_processor); |
| 4109 } |
| 4110 } |
| 4111 |
| 4112 bool Simulator::GlobalMonitor::NotifyStoreExcl_Locked(uintptr_t addr, |
| 4113 Processor* processor) { |
| 4114 DCHECK(IsProcessorInLinkedList_Locked(processor)); |
| 4115 if (processor->NotifyStoreExcl_Locked(addr, true)) { |
| 4116 // Notify the other processors that this StoreExcl succeeded. |
| 4117 for (Processor* iter = head_; iter; iter = iter->next_) { |
| 4118 if (iter != processor) { |
| 4119 iter->NotifyStoreExcl_Locked(addr, false); |
| 4120 } |
| 4121 } |
| 4122 return true; |
| 4123 } else { |
| 4124 return false; |
| 4125 } |
| 4126 } |
| 4127 |
| 4128 bool Simulator::GlobalMonitor::IsProcessorInLinkedList_Locked( |
| 4129 Processor* processor) const { |
| 4130 return head_ == processor || processor->next_ || processor->prev_; |
| 4131 } |
| 4132 |
| 4133 void Simulator::GlobalMonitor::PrependProcessor_Locked(Processor* processor) { |
| 4134 if (IsProcessorInLinkedList_Locked(processor)) { |
| 4135 return; |
| 4136 } |
| 4137 |
| 4138 if (head_) { |
| 4139 head_->prev_ = processor; |
| 4140 } |
| 4141 processor->prev_ = nullptr; |
| 4142 processor->next_ = head_; |
| 4143 head_ = processor; |
| 4144 } |
| 4145 |
| 4146 void Simulator::GlobalMonitor::RemoveProcessor(Processor* processor) { |
| 4147 base::LockGuard<base::Mutex> lock_guard(&mutex); |
| 4148 if (!IsProcessorInLinkedList_Locked(processor)) { |
| 4149 return; |
| 4150 } |
| 4151 |
| 4152 if (processor->prev_) { |
| 4153 processor->prev_->next_ = processor->next_; |
| 4154 } else { |
| 4155 head_ = processor->next_; |
| 4156 } |
| 4157 if (processor->next_) { |
| 4158 processor->next_->prev_ = processor->prev_; |
| 4159 } |
| 4160 processor->prev_ = nullptr; |
| 4161 processor->next_ = nullptr; |
| 4162 } |
3880 | 4163 |
3881 #endif // USE_SIMULATOR | 4164 #endif // USE_SIMULATOR |
3882 | 4165 |
3883 } // namespace internal | 4166 } // namespace internal |
3884 } // namespace v8 | 4167 } // namespace v8 |
3885 | 4168 |
3886 #endif // V8_TARGET_ARCH_ARM64 | 4169 #endif // V8_TARGET_ARCH_ARM64 |
OLD | NEW |