| Index: src/arm/simulator-arm.cc
|
| diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
|
| index 39e7a8e837536443266c43075e3daaa5f2733c47..e22a8b95eb3e6c77a55d48ffbc6b613d414ab6d8 100644
|
| --- a/src/arm/simulator-arm.cc
|
| +++ b/src/arm/simulator-arm.cc
|
| @@ -22,6 +22,10 @@
|
| namespace v8 {
|
| namespace internal {
|
|
|
| +// static
|
| +base::LazyInstance<Simulator::GlobalMonitor>::type Simulator::global_monitor_ =
|
| + LAZY_INSTANCE_INITIALIZER;
|
| +
|
| // This macro provides a platform independent use of sscanf. The reason for
|
| // SScanF not being implemented in a platform independent way through
|
| // ::v8::internal::OS in the same way as SNPrintF is that the
|
| @@ -710,9 +714,10 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
|
| last_debugger_input_ = NULL;
|
| }
|
|
|
| -
|
| -Simulator::~Simulator() { free(stack_); }
|
| -
|
| +Simulator::~Simulator() {
|
| + global_monitor_.Pointer()->RemoveProcessor(&global_monitor_processor_);
|
| + free(stack_);
|
| +}
|
|
|
| // When the generated code calls an external reference we need to catch that in
|
| // the simulator. The external reference will be a function compiled for the
|
| @@ -1040,78 +1045,166 @@ void Simulator::TrashCallerSaveRegisters() {
|
| int Simulator::ReadW(int32_t addr, Instruction* instr) {
|
| // All supported ARM targets allow unaligned accesses, so we don't need to
|
| // check the alignment here.
|
| + base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
|
| + local_monitor_.NotifyLoad(addr);
|
| intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
|
| return *ptr;
|
| }
|
|
|
| +int Simulator::ReadExW(int32_t addr, Instruction* instr) {
|
| + base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
|
| + local_monitor_.NotifyLoadExcl(addr, TransactionSize::Word);
|
| + global_monitor_.Pointer()->NotifyLoadExcl_Locked(addr,
|
| + &global_monitor_processor_);
|
| + intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
|
| + return *ptr;
|
| +}
|
|
|
| void Simulator::WriteW(int32_t addr, int value, Instruction* instr) {
|
| // All supported ARM targets allow unaligned accesses, so we don't need to
|
| // check the alignment here.
|
| + base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
|
| + local_monitor_.NotifyStore(addr);
|
| + global_monitor_.Pointer()->NotifyStore_Locked(addr,
|
| + &global_monitor_processor_);
|
| intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
|
| *ptr = value;
|
| }
|
|
|
| +int Simulator::WriteExW(int32_t addr, int value, Instruction* instr) {
|
| + base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
|
| + if (local_monitor_.NotifyStoreExcl(addr, TransactionSize::Word) &&
|
| + global_monitor_.Pointer()->NotifyStoreExcl_Locked(
|
| + addr, &global_monitor_processor_)) {
|
| + intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
|
| + *ptr = value;
|
| + return 0;
|
| + } else {
|
| + return 1;
|
| + }
|
| +}
|
|
|
| uint16_t Simulator::ReadHU(int32_t addr, Instruction* instr) {
|
| // All supported ARM targets allow unaligned accesses, so we don't need to
|
| // check the alignment here.
|
| + base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
|
| + local_monitor_.NotifyLoad(addr);
|
| uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
|
| return *ptr;
|
| }
|
|
|
| -
|
| int16_t Simulator::ReadH(int32_t addr, Instruction* instr) {
|
| // All supported ARM targets allow unaligned accesses, so we don't need to
|
| // check the alignment here.
|
| + base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
|
| + local_monitor_.NotifyLoad(addr);
|
| int16_t* ptr = reinterpret_cast<int16_t*>(addr);
|
| return *ptr;
|
| }
|
|
|
| +uint16_t Simulator::ReadExHU(int32_t addr, Instruction* instr) {
|
| + base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
|
| + local_monitor_.NotifyLoadExcl(addr, TransactionSize::HalfWord);
|
| + global_monitor_.Pointer()->NotifyLoadExcl_Locked(addr,
|
| + &global_monitor_processor_);
|
| + uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
|
| + return *ptr;
|
| +}
|
|
|
| void Simulator::WriteH(int32_t addr, uint16_t value, Instruction* instr) {
|
| // All supported ARM targets allow unaligned accesses, so we don't need to
|
| // check the alignment here.
|
| + base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
|
| + local_monitor_.NotifyStore(addr);
|
| + global_monitor_.Pointer()->NotifyStore_Locked(addr,
|
| + &global_monitor_processor_);
|
| uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
|
| *ptr = value;
|
| }
|
|
|
| -
|
| void Simulator::WriteH(int32_t addr, int16_t value, Instruction* instr) {
|
| // All supported ARM targets allow unaligned accesses, so we don't need to
|
| // check the alignment here.
|
| + base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
|
| + local_monitor_.NotifyStore(addr);
|
| + global_monitor_.Pointer()->NotifyStore_Locked(addr,
|
| + &global_monitor_processor_);
|
| int16_t* ptr = reinterpret_cast<int16_t*>(addr);
|
| *ptr = value;
|
| }
|
|
|
| +int Simulator::WriteExH(int32_t addr, uint16_t value, Instruction* instr) {
|
| + base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
|
| + if (local_monitor_.NotifyStoreExcl(addr, TransactionSize::HalfWord) &&
|
| + global_monitor_.Pointer()->NotifyStoreExcl_Locked(
|
| + addr, &global_monitor_processor_)) {
|
| + uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
|
| + *ptr = value;
|
| + return 0;
|
| + } else {
|
| + return 1;
|
| + }
|
| +}
|
|
|
| uint8_t Simulator::ReadBU(int32_t addr) {
|
| + base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
|
| + local_monitor_.NotifyLoad(addr);
|
| uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
|
| return *ptr;
|
| }
|
|
|
| -
|
| int8_t Simulator::ReadB(int32_t addr) {
|
| + base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
|
| + local_monitor_.NotifyLoad(addr);
|
| int8_t* ptr = reinterpret_cast<int8_t*>(addr);
|
| return *ptr;
|
| }
|
|
|
| +uint8_t Simulator::ReadExBU(int32_t addr) {
|
| + base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
|
| + local_monitor_.NotifyLoadExcl(addr, TransactionSize::Byte);
|
| + global_monitor_.Pointer()->NotifyLoadExcl_Locked(addr,
|
| + &global_monitor_processor_);
|
| + uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
|
| + return *ptr;
|
| +}
|
|
|
| void Simulator::WriteB(int32_t addr, uint8_t value) {
|
| + base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
|
| + local_monitor_.NotifyStore(addr);
|
| + global_monitor_.Pointer()->NotifyStore_Locked(addr,
|
| + &global_monitor_processor_);
|
| uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
|
| *ptr = value;
|
| }
|
|
|
| -
|
| void Simulator::WriteB(int32_t addr, int8_t value) {
|
| + base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
|
| + local_monitor_.NotifyStore(addr);
|
| + global_monitor_.Pointer()->NotifyStore_Locked(addr,
|
| + &global_monitor_processor_);
|
| int8_t* ptr = reinterpret_cast<int8_t*>(addr);
|
| *ptr = value;
|
| }
|
|
|
| +int Simulator::WriteExB(int32_t addr, uint8_t value) {
|
| + base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
|
| + if (local_monitor_.NotifyStoreExcl(addr, TransactionSize::Byte) &&
|
| + global_monitor_.Pointer()->NotifyStoreExcl_Locked(
|
| + addr, &global_monitor_processor_)) {
|
| + uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
|
| + *ptr = value;
|
| + return 0;
|
| + } else {
|
| + return 1;
|
| + }
|
| +}
|
|
|
| int32_t* Simulator::ReadDW(int32_t addr) {
|
| // All supported ARM targets allow unaligned accesses, so we don't need to
|
| // check the alignment here.
|
| + base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
|
| + local_monitor_.NotifyLoad(addr);
|
| int32_t* ptr = reinterpret_cast<int32_t*>(addr);
|
| return ptr;
|
| }
|
| @@ -1120,6 +1213,10 @@ int32_t* Simulator::ReadDW(int32_t addr) {
|
| void Simulator::WriteDW(int32_t addr, int32_t value1, int32_t value2) {
|
| // All supported ARM targets allow unaligned accesses, so we don't need to
|
| // check the alignment here.
|
| + base::LockGuard<base::Mutex> lock_guard(&global_monitor_.Pointer()->mutex);
|
| + local_monitor_.NotifyStore(addr);
|
| + global_monitor_.Pointer()->NotifyStore_Locked(addr,
|
| + &global_monitor_processor_);
|
| int32_t* ptr = reinterpret_cast<int32_t*>(addr);
|
| *ptr++ = value1;
|
| *ptr = value2;
|
| @@ -2073,7 +2170,72 @@ void Simulator::DecodeType01(Instruction* instr) {
|
| }
|
| }
|
| } else {
|
| - UNIMPLEMENTED(); // Not used by V8.
|
| + if (instr->Bits(24, 23) == 3) {
|
| + if (instr->Bit(20) == 1) {
|
| + // ldrex
|
| + int rt = instr->RtValue();
|
| + int rn = instr->RnValue();
|
| + int32_t addr = get_register(rn);
|
| + switch (instr->Bits(22, 21)) {
|
| + case 0: {
|
| + // Format(instr, "ldrex'cond 'rt, ['rn]");
|
| + int value = ReadExW(addr, instr);
|
| + set_register(rt, value);
|
| + break;
|
| + }
|
| + case 2: {
|
| + // Format(instr, "ldrexb'cond 'rt, ['rn]");
|
| + uint8_t value = ReadExBU(addr);
|
| + set_register(rt, value);
|
| + break;
|
| + }
|
| + case 3: {
|
| + // Format(instr, "ldrexh'cond 'rt, ['rn]");
|
| + uint16_t value = ReadExHU(addr, instr);
|
| + set_register(rt, value);
|
| + break;
|
| + }
|
| + default:
|
| + UNREACHABLE();
|
| + break;
|
| + }
|
| + } else {
|
| + // The instruction is documented as strex rd, rt, [rn], but the
|
| + // "rt" register is using the rm bits.
|
| + int rd = instr->RdValue();
|
| + int rt = instr->RmValue();
|
| + int rn = instr->RnValue();
|
| + int32_t addr = get_register(rn);
|
| + switch (instr->Bits(22, 21)) {
|
| + case 0: {
|
| + // Format(instr, "strex'cond 'rd, 'rm, ['rn]");
|
| + int value = get_register(rt);
|
| + int status = WriteExW(addr, value, instr);
|
| + set_register(rd, status);
|
| + break;
|
| + }
|
| + case 2: {
|
| + // Format(instr, "strexb'cond 'rd, 'rm, ['rn]");
|
| + uint8_t value = get_register(rt);
|
| + int status = WriteExB(addr, value);
|
| + set_register(rd, status);
|
| + break;
|
| + }
|
| + case 3: {
|
| + // Format(instr, "strexh'cond 'rd, 'rm, ['rn]");
|
| + uint16_t value = get_register(rt);
|
| + int status = WriteExH(addr, value, instr);
|
| + set_register(rd, status);
|
| + break;
|
| + }
|
| + default:
|
| + UNREACHABLE();
|
| + break;
|
| + }
|
| + }
|
| + } else {
|
| + UNIMPLEMENTED(); // Not used by V8.
|
| + }
|
| }
|
| } else {
|
| // extra load/store instructions
|
| @@ -5345,6 +5507,207 @@ uintptr_t Simulator::PopAddress() {
|
| return address;
|
| }
|
|
|
| +Simulator::LocalMonitor::LocalMonitor()
|
| + : access_state_(MonitorAccess::Open),
|
| + tagged_addr_(0),
|
| + size_(TransactionSize::None) {}
|
| +
|
| +void Simulator::LocalMonitor::Clear() {
|
| + access_state_ = MonitorAccess::Open;
|
| + tagged_addr_ = 0;
|
| + size_ = TransactionSize::None;
|
| +}
|
| +
|
| +void Simulator::LocalMonitor::NotifyLoad(int32_t addr) {
|
| + if (access_state_ == MonitorAccess::Exclusive) {
|
| + // A load could cause a cache eviction which will affect the monitor. As a
|
| + // result, it's most strict to unconditionally clear the local monitor on
|
| + // load.
|
| + Clear();
|
| + }
|
| +}
|
| +
|
| +void Simulator::LocalMonitor::NotifyLoadExcl(int32_t addr,
|
| + TransactionSize size) {
|
| + access_state_ = MonitorAccess::Exclusive;
|
| + tagged_addr_ = addr;
|
| + size_ = size;
|
| +}
|
| +
|
| +void Simulator::LocalMonitor::NotifyStore(int32_t addr) {
|
| + if (access_state_ == MonitorAccess::Exclusive) {
|
| + // It is implementation-defined whether a non-exclusive store to an address
|
| + // covered by the local monitor during exclusive access transitions to open
|
| + // or exclusive access. See ARM DDI 0406C.b, A3.4.1.
|
| + //
|
| + // However, a store could cause a cache eviction which will affect the
|
| + // monitor. As a result, it's most strict to unconditionally clear the
|
| + // local monitor on store.
|
| + Clear();
|
| + }
|
| +}
|
| +
|
| +bool Simulator::LocalMonitor::NotifyStoreExcl(int32_t addr,
|
| + TransactionSize size) {
|
| + if (access_state_ == MonitorAccess::Exclusive) {
|
| + // It is allowed for a processor to require that the address matches
|
| + // exactly (A3.4.5), so this comparison does not mask addr.
|
| + if (addr == tagged_addr_ && size_ == size) {
|
| + Clear();
|
| + return true;
|
| + } else {
|
| + // It is implementation-defined whether an exclusive store to a
|
| + // non-tagged address will update memory. Behavior is unpredictable if
|
| + // the transaction size of the exclusive store differs from that of the
|
| + // exclusive load. See ARM DDI 0406C.b, A3.4.5.
|
| + Clear();
|
| + return false;
|
| + }
|
| + } else {
|
| + DCHECK(access_state_ == MonitorAccess::Open);
|
| + return false;
|
| + }
|
| +}
|
| +
|
| +Simulator::GlobalMonitor::Processor::Processor()
|
| + : access_state_(MonitorAccess::Open),
|
| + tagged_addr_(0),
|
| + next_(nullptr),
|
| + prev_(nullptr),
|
| + failure_counter_(0) {}
|
| +
|
| +void Simulator::GlobalMonitor::Processor::Clear_Locked() {
|
| + access_state_ = MonitorAccess::Open;
|
| + tagged_addr_ = 0;
|
| +}
|
| +
|
| +void Simulator::GlobalMonitor::Processor::NotifyLoadExcl_Locked(int32_t addr) {
|
| + access_state_ = MonitorAccess::Exclusive;
|
| + tagged_addr_ = addr;
|
| +}
|
| +
|
| +void Simulator::GlobalMonitor::Processor::NotifyStore_Locked(
|
| + int32_t addr, bool is_requesting_processor) {
|
| + if (access_state_ == MonitorAccess::Exclusive) {
|
| + // It is implementation-defined whether a non-exclusive store by the
|
| + // requesting processor to an address covered by the global monitor
|
| + // during exclusive access transitions to open or exclusive access.
|
| + //
|
| + // For any other processor, the access state always transitions to open
|
| + // access.
|
| + //
|
| + // See ARM DDI 0406C.b, A3.4.2.
|
| + //
|
| + // However, similar to the local monitor, it is possible that a store
|
| + // caused a cache eviction, which can affect the montior, so
|
| + // conservatively, we always clear the monitor.
|
| + Clear_Locked();
|
| + }
|
| +}
|
| +
|
| +bool Simulator::GlobalMonitor::Processor::NotifyStoreExcl_Locked(
|
| + int32_t addr, bool is_requesting_processor) {
|
| + if (access_state_ == MonitorAccess::Exclusive) {
|
| + if (is_requesting_processor) {
|
| + // It is allowed for a processor to require that the address matches
|
| + // exactly (A3.4.5), so this comparison does not mask addr.
|
| + if (addr == tagged_addr_) {
|
| + // The access state for the requesting processor after a successful
|
| + // exclusive store is implementation-defined, but according to the ARM
|
| + // DDI, this has no effect on the subsequent operation of the global
|
| + // monitor.
|
| + Clear_Locked();
|
| + // Introduce occasional strex failures. This is to simulate the
|
| + // behavior of hardware, which can randomly fail due to background
|
| + // cache evictions.
|
| + if (failure_counter_++ >= kMaxFailureCounter) {
|
| + failure_counter_ = 0;
|
| + return false;
|
| + } else {
|
| + return true;
|
| + }
|
| + }
|
| + } else if ((addr & kExclusiveTaggedAddrMask) ==
|
| + (tagged_addr_ & kExclusiveTaggedAddrMask)) {
|
| + // Check the masked addresses when responding to a successful lock by
|
| + // another processor so the implementation is more conservative (i.e. the
|
| + // granularity of locking is as large as possible.)
|
| + Clear_Locked();
|
| + return false;
|
| + }
|
| + }
|
| + return false;
|
| +}
|
| +
|
| +Simulator::GlobalMonitor::GlobalMonitor() : head_(nullptr) {}
|
| +
|
| +void Simulator::GlobalMonitor::NotifyLoadExcl_Locked(int32_t addr,
|
| + Processor* processor) {
|
| + processor->NotifyLoadExcl_Locked(addr);
|
| + PrependProcessor_Locked(processor);
|
| +}
|
| +
|
| +void Simulator::GlobalMonitor::NotifyStore_Locked(int32_t addr,
|
| + Processor* processor) {
|
| + // Notify each processor of the store operation.
|
| + for (Processor* iter = head_; iter; iter = iter->next_) {
|
| + bool is_requesting_processor = iter == processor;
|
| + iter->NotifyStore_Locked(addr, is_requesting_processor);
|
| + }
|
| +}
|
| +
|
| +bool Simulator::GlobalMonitor::NotifyStoreExcl_Locked(int32_t addr,
|
| + Processor* processor) {
|
| + DCHECK(IsProcessorInLinkedList_Locked(processor));
|
| + if (processor->NotifyStoreExcl_Locked(addr, true)) {
|
| + // Notify the other processors that this StoreExcl succeeded.
|
| + for (Processor* iter = head_; iter; iter = iter->next_) {
|
| + if (iter != processor) {
|
| + iter->NotifyStoreExcl_Locked(addr, false);
|
| + }
|
| + }
|
| + return true;
|
| + } else {
|
| + return false;
|
| + }
|
| +}
|
| +
|
| +bool Simulator::GlobalMonitor::IsProcessorInLinkedList_Locked(
|
| + Processor* processor) const {
|
| + return head_ == processor || processor->next_ || processor->prev_;
|
| +}
|
| +
|
| +void Simulator::GlobalMonitor::PrependProcessor_Locked(Processor* processor) {
|
| + if (IsProcessorInLinkedList_Locked(processor)) {
|
| + return;
|
| + }
|
| +
|
| + if (head_) {
|
| + head_->prev_ = processor;
|
| + }
|
| + processor->prev_ = nullptr;
|
| + processor->next_ = head_;
|
| + head_ = processor;
|
| +}
|
| +
|
| +void Simulator::GlobalMonitor::RemoveProcessor(Processor* processor) {
|
| + base::LockGuard<base::Mutex> lock_guard(&mutex);
|
| + if (!IsProcessorInLinkedList_Locked(processor)) {
|
| + return;
|
| + }
|
| +
|
| + if (processor->prev_) {
|
| + processor->prev_->next_ = processor->next_;
|
| + } else {
|
| + head_ = processor->next_;
|
| + }
|
| + if (processor->next_) {
|
| + processor->next_->prev_ = processor->prev_;
|
| + }
|
| + processor->prev_ = nullptr;
|
| + processor->next_ = nullptr;
|
| +}
|
| +
|
| } // namespace internal
|
| } // namespace v8
|
|
|
|
|