| OLD | NEW |
| (Empty) |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "ipc/ipc_perftest_support.h" | |
| 6 | |
| 7 #include <stddef.h> | |
| 8 #include <stdint.h> | |
| 9 | |
| 10 #include <algorithm> | |
| 11 #include <memory> | |
| 12 #include <string> | |
| 13 | |
| 14 #include "base/logging.h" | |
| 15 #include "base/macros.h" | |
| 16 #include "base/memory/ptr_util.h" | |
| 17 #include "base/pickle.h" | |
| 18 #include "base/run_loop.h" | |
| 19 #include "base/strings/stringprintf.h" | |
| 20 #include "base/test/perf_time_logger.h" | |
| 21 #include "base/test/test_io_thread.h" | |
| 22 #include "base/threading/thread.h" | |
| 23 #include "base/threading/thread_task_runner_handle.h" | |
| 24 #include "base/time/time.h" | |
| 25 #include "build/build_config.h" | |
| 26 #include "ipc/ipc_channel.h" | |
| 27 #include "ipc/ipc_channel_proxy.h" | |
| 28 #include "ipc/ipc_descriptors.h" | |
| 29 #include "ipc/ipc_message_utils.h" | |
| 30 #include "ipc/ipc_sender.h" | |
| 31 #include "mojo/edk/test/scoped_ipc_support.h" | |
| 32 | |
| 33 namespace IPC { | |
| 34 namespace test { | |
| 35 | |
| 36 // Avoid core 0 due to conflicts with Intel's Power Gadget. | |
| 37 // Setting thread affinity will fail harmlessly on single/dual core machines. | |
| 38 const int kSharedCore = 2; | |
| 39 | |
| 40 // This class simply collects stats about abstract "events" (each of which has a | |
| 41 // start time and an end time). | |
| 42 class EventTimeTracker { | |
| 43 public: | |
| 44 explicit EventTimeTracker(const char* name) | |
| 45 : name_(name), | |
| 46 count_(0) { | |
| 47 } | |
| 48 | |
| 49 void AddEvent(const base::TimeTicks& start, const base::TimeTicks& end) { | |
| 50 DCHECK(end >= start); | |
| 51 count_++; | |
| 52 base::TimeDelta duration = end - start; | |
| 53 total_duration_ += duration; | |
| 54 max_duration_ = std::max(max_duration_, duration); | |
| 55 } | |
| 56 | |
| 57 void ShowResults() const { | |
| 58 VLOG(1) << name_ << " count: " << count_; | |
| 59 VLOG(1) << name_ << " total duration: " | |
| 60 << total_duration_.InMillisecondsF() << " ms"; | |
| 61 VLOG(1) << name_ << " average duration: " | |
| 62 << (total_duration_.InMillisecondsF() / static_cast<double>(count_)) | |
| 63 << " ms"; | |
| 64 VLOG(1) << name_ << " maximum duration: " | |
| 65 << max_duration_.InMillisecondsF() << " ms"; | |
| 66 } | |
| 67 | |
| 68 void Reset() { | |
| 69 count_ = 0; | |
| 70 total_duration_ = base::TimeDelta(); | |
| 71 max_duration_ = base::TimeDelta(); | |
| 72 } | |
| 73 | |
| 74 private: | |
| 75 const std::string name_; | |
| 76 | |
| 77 uint64_t count_; | |
| 78 base::TimeDelta total_duration_; | |
| 79 base::TimeDelta max_duration_; | |
| 80 | |
| 81 DISALLOW_COPY_AND_ASSIGN(EventTimeTracker); | |
| 82 }; | |
| 83 | |
| 84 // This channel listener just replies to all messages with the exact same | |
| 85 // message. It assumes each message has one string parameter. When the string | |
| 86 // "quit" is sent, it will exit. | |
| 87 class ChannelReflectorListener : public Listener { | |
| 88 public: | |
| 89 ChannelReflectorListener() | |
| 90 : channel_(NULL), | |
| 91 latency_tracker_("Client messages") { | |
| 92 VLOG(1) << "Client listener up"; | |
| 93 } | |
| 94 | |
| 95 ~ChannelReflectorListener() override { | |
| 96 VLOG(1) << "Client listener down"; | |
| 97 latency_tracker_.ShowResults(); | |
| 98 } | |
| 99 | |
| 100 void Init(Channel* channel) { | |
| 101 DCHECK(!channel_); | |
| 102 channel_ = channel; | |
| 103 } | |
| 104 | |
| 105 bool OnMessageReceived(const Message& message) override { | |
| 106 CHECK(channel_); | |
| 107 | |
| 108 base::PickleIterator iter(message); | |
| 109 int64_t time_internal; | |
| 110 EXPECT_TRUE(iter.ReadInt64(&time_internal)); | |
| 111 int msgid; | |
| 112 EXPECT_TRUE(iter.ReadInt(&msgid)); | |
| 113 base::StringPiece payload; | |
| 114 EXPECT_TRUE(iter.ReadStringPiece(&payload)); | |
| 115 | |
| 116 // Include message deserialization in latency. | |
| 117 base::TimeTicks now = base::TimeTicks::Now(); | |
| 118 | |
| 119 if (payload == "hello") { | |
| 120 latency_tracker_.Reset(); | |
| 121 } else if (payload == "quit") { | |
| 122 latency_tracker_.ShowResults(); | |
| 123 base::MessageLoop::current()->QuitWhenIdle(); | |
| 124 return true; | |
| 125 } else { | |
| 126 // Don't track hello and quit messages. | |
| 127 latency_tracker_.AddEvent( | |
| 128 base::TimeTicks::FromInternalValue(time_internal), now); | |
| 129 } | |
| 130 | |
| 131 Message* msg = new Message(0, 2, Message::PRIORITY_NORMAL); | |
| 132 msg->WriteInt64(base::TimeTicks::Now().ToInternalValue()); | |
| 133 msg->WriteInt(msgid); | |
| 134 msg->WriteString(payload); | |
| 135 channel_->Send(msg); | |
| 136 return true; | |
| 137 } | |
| 138 | |
| 139 private: | |
| 140 Channel* channel_; | |
| 141 EventTimeTracker latency_tracker_; | |
| 142 }; | |
| 143 | |
| 144 class PerformanceChannelListener : public Listener { | |
| 145 public: | |
| 146 explicit PerformanceChannelListener(const std::string& label) | |
| 147 : label_(label), | |
| 148 sender_(NULL), | |
| 149 msg_count_(0), | |
| 150 msg_size_(0), | |
| 151 count_down_(0), | |
| 152 latency_tracker_("Server messages") { | |
| 153 VLOG(1) << "Server listener up"; | |
| 154 } | |
| 155 | |
| 156 ~PerformanceChannelListener() override { | |
| 157 VLOG(1) << "Server listener down"; | |
| 158 } | |
| 159 | |
| 160 void Init(Sender* sender) { | |
| 161 DCHECK(!sender_); | |
| 162 sender_ = sender; | |
| 163 } | |
| 164 | |
| 165 // Call this before running the message loop. | |
| 166 void SetTestParams(int msg_count, size_t msg_size) { | |
| 167 DCHECK_EQ(0, count_down_); | |
| 168 msg_count_ = msg_count; | |
| 169 msg_size_ = msg_size; | |
| 170 count_down_ = msg_count_; | |
| 171 payload_ = std::string(msg_size_, 'a'); | |
| 172 } | |
| 173 | |
| 174 bool OnMessageReceived(const Message& message) override { | |
| 175 CHECK(sender_); | |
| 176 | |
| 177 base::PickleIterator iter(message); | |
| 178 int64_t time_internal; | |
| 179 EXPECT_TRUE(iter.ReadInt64(&time_internal)); | |
| 180 int msgid; | |
| 181 EXPECT_TRUE(iter.ReadInt(&msgid)); | |
| 182 std::string reflected_payload; | |
| 183 EXPECT_TRUE(iter.ReadString(&reflected_payload)); | |
| 184 | |
| 185 // Include message deserialization in latency. | |
| 186 base::TimeTicks now = base::TimeTicks::Now(); | |
| 187 | |
| 188 if (reflected_payload == "hello") { | |
| 189 // Start timing on hello. | |
| 190 latency_tracker_.Reset(); | |
| 191 DCHECK(!perf_logger_.get()); | |
| 192 std::string test_name = | |
| 193 base::StringPrintf("IPC_%s_Perf_%dx_%u", | |
| 194 label_.c_str(), | |
| 195 msg_count_, | |
| 196 static_cast<unsigned>(msg_size_)); | |
| 197 perf_logger_.reset(new base::PerfTimeLogger(test_name.c_str())); | |
| 198 } else { | |
| 199 DCHECK_EQ(payload_.size(), reflected_payload.size()); | |
| 200 | |
| 201 latency_tracker_.AddEvent( | |
| 202 base::TimeTicks::FromInternalValue(time_internal), now); | |
| 203 | |
| 204 CHECK(count_down_ > 0); | |
| 205 count_down_--; | |
| 206 if (count_down_ == 0) { | |
| 207 perf_logger_.reset(); // Stop the perf timer now. | |
| 208 latency_tracker_.ShowResults(); | |
| 209 base::MessageLoop::current()->QuitWhenIdle(); | |
| 210 return true; | |
| 211 } | |
| 212 } | |
| 213 | |
| 214 Message* msg = new Message(0, 2, Message::PRIORITY_NORMAL); | |
| 215 msg->WriteInt64(base::TimeTicks::Now().ToInternalValue()); | |
| 216 msg->WriteInt(count_down_); | |
| 217 msg->WriteString(payload_); | |
| 218 sender_->Send(msg); | |
| 219 return true; | |
| 220 } | |
| 221 | |
| 222 private: | |
| 223 std::string label_; | |
| 224 Sender* sender_; | |
| 225 int msg_count_; | |
| 226 size_t msg_size_; | |
| 227 | |
| 228 int count_down_; | |
| 229 std::string payload_; | |
| 230 EventTimeTracker latency_tracker_; | |
| 231 std::unique_ptr<base::PerfTimeLogger> perf_logger_; | |
| 232 }; | |
| 233 | |
| 234 IPCChannelPerfTestBase::IPCChannelPerfTestBase() = default; | |
| 235 IPCChannelPerfTestBase::~IPCChannelPerfTestBase() = default; | |
| 236 | |
| 237 std::vector<PingPongTestParams> | |
| 238 IPCChannelPerfTestBase::GetDefaultTestParams() { | |
| 239 // Test several sizes. We use 12^N for message size, and limit the message | |
| 240 // count to keep the test duration reasonable. | |
| 241 #ifdef NDEBUG | |
| 242 const int kMultiplier = 100; | |
| 243 #else | |
| 244 // Debug builds on Windows run these tests orders of magnitude more slowly. | |
| 245 const int kMultiplier = 1; | |
| 246 #endif | |
| 247 std::vector<PingPongTestParams> list; | |
| 248 list.push_back(PingPongTestParams(12, 500 * kMultiplier)); | |
| 249 list.push_back(PingPongTestParams(144, 500 * kMultiplier)); | |
| 250 list.push_back(PingPongTestParams(1728, 500 * kMultiplier)); | |
| 251 list.push_back(PingPongTestParams(20736, 120 * kMultiplier)); | |
| 252 list.push_back(PingPongTestParams(248832, 10 * kMultiplier)); | |
| 253 return list; | |
| 254 } | |
| 255 | |
| 256 void IPCChannelPerfTestBase::RunTestChannelPingPong( | |
| 257 const std::vector<PingPongTestParams>& params) { | |
| 258 auto message_loop = base::MakeUnique<base::MessageLoopForIO>(); | |
| 259 mojo::edk::test::ScopedIPCSupport ipc_support(message_loop->task_runner()); | |
| 260 InitWithCustomMessageLoop("MojoPerfTestClient", std::move(message_loop)); | |
| 261 | |
| 262 // Set up IPC channel and start client. | |
| 263 PerformanceChannelListener listener("Channel"); | |
| 264 CreateChannel(&listener); | |
| 265 listener.Init(channel()); | |
| 266 ASSERT_TRUE(ConnectChannel()); | |
| 267 | |
| 268 LockThreadAffinity thread_locker(kSharedCore); | |
| 269 for (size_t i = 0; i < params.size(); i++) { | |
| 270 listener.SetTestParams(params[i].message_count(), | |
| 271 params[i].message_size()); | |
| 272 | |
| 273 // This initial message will kick-start the ping-pong of messages. | |
| 274 Message* message = | |
| 275 new Message(0, 2, Message::PRIORITY_NORMAL); | |
| 276 message->WriteInt64(base::TimeTicks::Now().ToInternalValue()); | |
| 277 message->WriteInt(-1); | |
| 278 message->WriteString("hello"); | |
| 279 sender()->Send(message); | |
| 280 | |
| 281 // Run message loop. | |
| 282 base::RunLoop().Run(); | |
| 283 } | |
| 284 | |
| 285 // Send quit message. | |
| 286 Message* message = new Message(0, 2, Message::PRIORITY_NORMAL); | |
| 287 message->WriteInt64(base::TimeTicks::Now().ToInternalValue()); | |
| 288 message->WriteInt(-1); | |
| 289 message->WriteString("quit"); | |
| 290 sender()->Send(message); | |
| 291 | |
| 292 EXPECT_TRUE(WaitForClientShutdown()); | |
| 293 DestroyChannel(); | |
| 294 } | |
| 295 | |
| 296 void IPCChannelPerfTestBase::RunTestChannelProxyPingPong( | |
| 297 const std::vector<PingPongTestParams>& params) { | |
| 298 io_thread_.reset(new base::TestIOThread(base::TestIOThread::kAutoStart)); | |
| 299 { | |
| 300 auto message_loop = base::MakeUnique<base::MessageLoopForIO>(); | |
| 301 mojo::edk::test::ScopedIPCSupport ipc_support(io_thread_->task_runner()); | |
| 302 InitWithCustomMessageLoop("MojoPerfTestClient", std::move(message_loop)); | |
| 303 | |
| 304 // Set up IPC channel and start client. | |
| 305 PerformanceChannelListener listener("ChannelProxy"); | |
| 306 auto channel_proxy = IPC::ChannelProxy::Create( | |
| 307 TakeHandle().release(), IPC::Channel::MODE_SERVER, &listener, | |
| 308 io_thread_->task_runner()); | |
| 309 listener.Init(channel_proxy.get()); | |
| 310 | |
| 311 LockThreadAffinity thread_locker(kSharedCore); | |
| 312 for (size_t i = 0; i < params.size(); i++) { | |
| 313 listener.SetTestParams(params[i].message_count(), | |
| 314 params[i].message_size()); | |
| 315 | |
| 316 // This initial message will kick-start the ping-pong of messages. | |
| 317 Message* message = new Message(0, 2, Message::PRIORITY_NORMAL); | |
| 318 message->WriteInt64(base::TimeTicks::Now().ToInternalValue()); | |
| 319 message->WriteInt(-1); | |
| 320 message->WriteString("hello"); | |
| 321 channel_proxy->Send(message); | |
| 322 | |
| 323 // Run message loop. | |
| 324 base::RunLoop().Run(); | |
| 325 } | |
| 326 | |
| 327 // Send quit message. | |
| 328 Message* message = new Message(0, 2, Message::PRIORITY_NORMAL); | |
| 329 message->WriteInt64(base::TimeTicks::Now().ToInternalValue()); | |
| 330 message->WriteInt(-1); | |
| 331 message->WriteString("quit"); | |
| 332 channel_proxy->Send(message); | |
| 333 | |
| 334 EXPECT_TRUE(WaitForClientShutdown()); | |
| 335 channel_proxy.reset(); | |
| 336 } | |
| 337 | |
| 338 io_thread_.reset(); | |
| 339 } | |
| 340 | |
| 341 | |
| 342 PingPongTestClient::PingPongTestClient() | |
| 343 : listener_(new ChannelReflectorListener()) { | |
| 344 } | |
| 345 | |
| 346 PingPongTestClient::~PingPongTestClient() { | |
| 347 } | |
| 348 | |
| 349 int PingPongTestClient::RunMain() { | |
| 350 LockThreadAffinity thread_locker(kSharedCore); | |
| 351 std::unique_ptr<Channel> channel = CreateChannel(listener_.get()); | |
| 352 listener_->Init(channel.get()); | |
| 353 CHECK(channel->Connect()); | |
| 354 | |
| 355 base::RunLoop().Run(); | |
| 356 return 0; | |
| 357 } | |
| 358 | |
| 359 scoped_refptr<base::TaskRunner> PingPongTestClient::task_runner() { | |
| 360 return main_message_loop_.task_runner(); | |
| 361 } | |
| 362 | |
| 363 LockThreadAffinity::LockThreadAffinity(int cpu_number) | |
| 364 : affinity_set_ok_(false) { | |
| 365 #if defined(OS_WIN) | |
| 366 const DWORD_PTR thread_mask = static_cast<DWORD_PTR>(1) << cpu_number; | |
| 367 old_affinity_ = SetThreadAffinityMask(GetCurrentThread(), thread_mask); | |
| 368 affinity_set_ok_ = old_affinity_ != 0; | |
| 369 #elif defined(OS_LINUX) | |
| 370 cpu_set_t cpuset; | |
| 371 CPU_ZERO(&cpuset); | |
| 372 CPU_SET(cpu_number, &cpuset); | |
| 373 auto get_result = sched_getaffinity(0, sizeof(old_cpuset_), &old_cpuset_); | |
| 374 DCHECK_EQ(0, get_result); | |
| 375 auto set_result = sched_setaffinity(0, sizeof(cpuset), &cpuset); | |
| 376 // Check for get_result failure, even though it should always succeed. | |
| 377 affinity_set_ok_ = (set_result == 0) && (get_result == 0); | |
| 378 #endif | |
| 379 if (!affinity_set_ok_) | |
| 380 LOG(WARNING) << "Failed to set thread affinity to CPU " << cpu_number; | |
| 381 } | |
| 382 | |
| 383 LockThreadAffinity::~LockThreadAffinity() { | |
| 384 if (!affinity_set_ok_) | |
| 385 return; | |
| 386 #if defined(OS_WIN) | |
| 387 auto set_result = SetThreadAffinityMask(GetCurrentThread(), old_affinity_); | |
| 388 DCHECK_NE(0u, set_result); | |
| 389 #elif defined(OS_LINUX) | |
| 390 auto set_result = sched_setaffinity(0, sizeof(old_cpuset_), &old_cpuset_); | |
| 391 DCHECK_EQ(0, set_result); | |
| 392 #endif | |
| 393 } | |
| 394 | |
| 395 } // namespace test | |
| 396 } // namespace IPC | |
| OLD | NEW |