Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(57)

Side by Side Diff: ipc/ipc_mojo_perftest.cc

Issue 2608403002: Add some perf tests to compare Mojo overhead. (Closed)
Patch Set: review comment Created 3 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « ipc/BUILD.gn ('k') | ipc/ipc_perftest_support.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2014 The Chromium Authors. All rights reserved. 1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include <stddef.h> 5 #include <stddef.h>
6 #include <memory> 6 #include <memory>
7 7
8 #include "base/memory/ptr_util.h" 8 #include "base/memory/ptr_util.h"
9 #include "base/process/process_metrics.h" 9 #include "base/process/process_metrics.h"
10 #include "base/run_loop.h" 10 #include "base/run_loop.h"
11 #include "base/strings/stringprintf.h"
12 #include "base/test/perf_time_logger.h"
13 #include "base/test/test_io_thread.h"
11 #include "base/threading/thread_task_runner_handle.h" 14 #include "base/threading/thread_task_runner_handle.h"
12 #include "build/build_config.h" 15 #include "build/build_config.h"
13 #include "ipc/ipc_channel_mojo.h" 16 #include "ipc/ipc_channel_mojo.h"
14 #include "ipc/ipc_perftest_support.h" 17 #include "ipc/ipc_test.mojom.h"
18 #include "ipc/ipc_test_base.h"
15 #include "mojo/edk/embedder/embedder.h" 19 #include "mojo/edk/embedder/embedder.h"
16 #include "mojo/edk/embedder/platform_channel_pair.h" 20 #include "mojo/edk/embedder/platform_channel_pair.h"
21 #include "mojo/edk/test/mojo_test_base.h"
17 #include "mojo/edk/test/multiprocess_test_helper.h" 22 #include "mojo/edk/test/multiprocess_test_helper.h"
18 #include "mojo/edk/test/scoped_ipc_support.h" 23 #include "mojo/edk/test/scoped_ipc_support.h"
24 #include "mojo/public/cpp/bindings/binding.h"
25 #include "mojo/public/cpp/system/message_pipe.h"
19 26
20 namespace IPC { 27 namespace IPC {
21 namespace { 28 namespace {
22 29
23 class MojoChannelPerfTest : public test::IPCChannelPerfTestBase { 30 // This class simply collects stats about abstract "events" (each of which has a
24 public: 31 // start time and an end time).
25 void TearDown() override { 32 class EventTimeTracker {
26 test::IPCChannelPerfTestBase::TearDown(); 33 public:
27 } 34 explicit EventTimeTracker(const char* name)
28 35 : name_(name),
29 mojo::edk::test::MultiprocessTestHelper helper_; 36 count_(0) {
37 }
38
39 void AddEvent(const base::TimeTicks& start, const base::TimeTicks& end) {
40 DCHECK(end >= start);
41 count_++;
42 base::TimeDelta duration = end - start;
43 total_duration_ += duration;
44 max_duration_ = std::max(max_duration_, duration);
45 }
46
47 void ShowResults() const {
48 VLOG(1) << name_ << " count: " << count_;
49 VLOG(1) << name_ << " total duration: "
50 << total_duration_.InMillisecondsF() << " ms";
51 VLOG(1) << name_ << " average duration: "
52 << (total_duration_.InMillisecondsF() / static_cast<double>(count_))
53 << " ms";
54 VLOG(1) << name_ << " maximum duration: "
55 << max_duration_.InMillisecondsF() << " ms";
56 }
57
58 void Reset() {
59 count_ = 0;
60 total_duration_ = base::TimeDelta();
61 max_duration_ = base::TimeDelta();
62 }
63
64 private:
65 const std::string name_;
66
67 uint64_t count_;
68 base::TimeDelta total_duration_;
69 base::TimeDelta max_duration_;
70
71 DISALLOW_COPY_AND_ASSIGN(EventTimeTracker);
72 };
73
74 class PerformanceChannelListener : public Listener {
75 public:
76 explicit PerformanceChannelListener(const std::string& label)
77 : label_(label),
78 sender_(NULL),
79 msg_count_(0),
80 msg_size_(0),
81 count_down_(0),
82 latency_tracker_("Server messages") {
83 VLOG(1) << "Server listener up";
84 }
85
86 ~PerformanceChannelListener() override {
87 VLOG(1) << "Server listener down";
88 }
89
90 void Init(Sender* sender) {
91 DCHECK(!sender_);
92 sender_ = sender;
93 }
94
95 // Call this before running the message loop.
96 void SetTestParams(int msg_count, size_t msg_size) {
97 DCHECK_EQ(0, count_down_);
98 msg_count_ = msg_count;
99 msg_size_ = msg_size;
100 count_down_ = msg_count_;
101 payload_ = std::string(msg_size_, 'a');
102 }
103
104 bool OnMessageReceived(const Message& message) override {
105 CHECK(sender_);
106
107 base::PickleIterator iter(message);
108 int64_t time_internal;
109 EXPECT_TRUE(iter.ReadInt64(&time_internal));
110 int msgid;
111 EXPECT_TRUE(iter.ReadInt(&msgid));
112 std::string reflected_payload;
113 EXPECT_TRUE(iter.ReadString(&reflected_payload));
114
115 // Include message deserialization in latency.
116 base::TimeTicks now = base::TimeTicks::Now();
117
118 if (reflected_payload == "hello") {
119 // Start timing on hello.
120 latency_tracker_.Reset();
121 DCHECK(!perf_logger_.get());
122 std::string test_name =
123 base::StringPrintf("IPC_%s_Perf_%dx_%u",
124 label_.c_str(),
125 msg_count_,
126 static_cast<unsigned>(msg_size_));
127 perf_logger_.reset(new base::PerfTimeLogger(test_name.c_str()));
128 } else {
129 DCHECK_EQ(payload_.size(), reflected_payload.size());
130
131 latency_tracker_.AddEvent(
132 base::TimeTicks::FromInternalValue(time_internal), now);
133
134 CHECK(count_down_ > 0);
135 count_down_--;
136 if (count_down_ == 0) {
137 perf_logger_.reset(); // Stop the perf timer now.
138 latency_tracker_.ShowResults();
139 base::MessageLoop::current()->QuitWhenIdle();
140 return true;
141 }
142 }
143
144 Message* msg = new Message(0, 2, Message::PRIORITY_NORMAL);
145 msg->WriteInt64(base::TimeTicks::Now().ToInternalValue());
146 msg->WriteInt(count_down_);
147 msg->WriteString(payload_);
148 sender_->Send(msg);
149 return true;
150 }
151
152 private:
153 std::string label_;
154 Sender* sender_;
155 int msg_count_;
156 size_t msg_size_;
157
158 int count_down_;
159 std::string payload_;
160 EventTimeTracker latency_tracker_;
161 std::unique_ptr<base::PerfTimeLogger> perf_logger_;
162 };
163
164 // This channel listener just replies to all messages with the exact same
165 // message. It assumes each message has one string parameter. When the string
166 // "quit" is sent, it will exit.
167 class ChannelReflectorListener : public Listener {
168 public:
169 ChannelReflectorListener()
170 : channel_(NULL),
171 latency_tracker_("Client messages") {
172 VLOG(1) << "Client listener up";
173 }
174
175 ~ChannelReflectorListener() override {
176 VLOG(1) << "Client listener down";
177 latency_tracker_.ShowResults();
178 }
179
180 void Init(Channel* channel) {
181 DCHECK(!channel_);
182 channel_ = channel;
183 }
184
185 bool OnMessageReceived(const Message& message) override {
186 CHECK(channel_);
187
188 base::PickleIterator iter(message);
189 int64_t time_internal;
190 EXPECT_TRUE(iter.ReadInt64(&time_internal));
191 int msgid;
192 EXPECT_TRUE(iter.ReadInt(&msgid));
193 base::StringPiece payload;
194 EXPECT_TRUE(iter.ReadStringPiece(&payload));
195
196 // Include message deserialization in latency.
197 base::TimeTicks now = base::TimeTicks::Now();
198
199 if (payload == "hello") {
200 latency_tracker_.Reset();
201 } else if (payload == "quit") {
202 latency_tracker_.ShowResults();
203 base::MessageLoop::current()->QuitWhenIdle();
204 return true;
205 } else {
206 // Don't track hello and quit messages.
207 latency_tracker_.AddEvent(
208 base::TimeTicks::FromInternalValue(time_internal), now);
209 }
210
211 Message* msg = new Message(0, 2, Message::PRIORITY_NORMAL);
212 msg->WriteInt64(base::TimeTicks::Now().ToInternalValue());
213 msg->WriteInt(msgid);
214 msg->WriteString(payload);
215 channel_->Send(msg);
216 return true;
217 }
218
219 private:
220 Channel* channel_;
221 EventTimeTracker latency_tracker_;
222 };
223
224 // This class locks the current thread to a particular CPU core. This is
225 // important because otherwise the different threads and processes of these
226 // tests end up on different CPU cores which means that all of the cores are
227 // lightly loaded so the OS (Windows and Linux) fails to ramp up the CPU
228 // frequency, leading to unpredictable and often poor performance.
229 class LockThreadAffinity {
230 public:
231 explicit LockThreadAffinity(int cpu_number) : affinity_set_ok_(false) {
232 #if defined(OS_WIN)
233 const DWORD_PTR thread_mask = static_cast<DWORD_PTR>(1) << cpu_number;
234 old_affinity_ = SetThreadAffinityMask(GetCurrentThread(), thread_mask);
235 affinity_set_ok_ = old_affinity_ != 0;
236 #elif defined(OS_LINUX)
237 cpu_set_t cpuset;
238 CPU_ZERO(&cpuset);
239 CPU_SET(cpu_number, &cpuset);
240 auto get_result = sched_getaffinity(0, sizeof(old_cpuset_), &old_cpuset_);
241 DCHECK_EQ(0, get_result);
242 auto set_result = sched_setaffinity(0, sizeof(cpuset), &cpuset);
243 // Check for get_result failure, even though it should always succeed.
244 affinity_set_ok_ = (set_result == 0) && (get_result == 0);
245 #endif
246 if (!affinity_set_ok_)
247 LOG(WARNING) << "Failed to set thread affinity to CPU " << cpu_number;
248 }
249
250 ~LockThreadAffinity() {
251 if (!affinity_set_ok_)
252 return;
253 #if defined(OS_WIN)
254 auto set_result = SetThreadAffinityMask(GetCurrentThread(), old_affinity_);
255 DCHECK_NE(0u, set_result);
256 #elif defined(OS_LINUX)
257 auto set_result = sched_setaffinity(0, sizeof(old_cpuset_), &old_cpuset_);
258 DCHECK_EQ(0, set_result);
259 #endif
260 }
261
262 private:
263 bool affinity_set_ok_;
264 #if defined(OS_WIN)
265 DWORD_PTR old_affinity_;
266 #elif defined(OS_LINUX)
267 cpu_set_t old_cpuset_;
268 #endif
269
270 DISALLOW_COPY_AND_ASSIGN(LockThreadAffinity);
271 };
272
273 class PingPongTestParams {
274 public:
275 PingPongTestParams(size_t size, int count)
276 : message_size_(size), message_count_(count) {
277 }
278
279 size_t message_size() const { return message_size_; }
280 int message_count() const { return message_count_; }
281
282 private:
283 size_t message_size_;
284 int message_count_;
285 };
286
287 std::vector<PingPongTestParams> GetDefaultTestParams() {
288 // Test several sizes. We use 12^N for message size, and limit the message
289 // count to keep the test duration reasonable.
290 #ifdef NDEBUG
291 const int kMultiplier = 100;
292 #else
293 // Debug builds on Windows run these tests orders of magnitude more slowly.
294 const int kMultiplier = 1;
295 #endif
296 std::vector<PingPongTestParams> list;
297 list.push_back(PingPongTestParams(12, 500 * kMultiplier));
298 list.push_back(PingPongTestParams(144, 500 * kMultiplier));
299 list.push_back(PingPongTestParams(1728, 500 * kMultiplier));
300 list.push_back(PingPongTestParams(20736, 120 * kMultiplier));
301 list.push_back(PingPongTestParams(248832, 10 * kMultiplier));
302 return list;
303 }
304
305 // Avoid core 0 due to conflicts with Intel's Power Gadget.
306 // Setting thread affinity will fail harmlessly on single/dual core machines.
307 const int kSharedCore = 2;
308
309 class MojoChannelPerfTest : public IPCChannelMojoTestBase {
310 public:
311 MojoChannelPerfTest() = default;
312 ~MojoChannelPerfTest() override = default;
313
314 void RunTestChannelPingPong() {
315 Init("MojoPerfTestClient");
316
317 // Set up IPC channel and start client.
318 PerformanceChannelListener listener("Channel");
319 CreateChannel(&listener);
320 listener.Init(channel());
321 ASSERT_TRUE(ConnectChannel());
322
323 LockThreadAffinity thread_locker(kSharedCore);
324 std::vector<PingPongTestParams> params = GetDefaultTestParams();
325 for (size_t i = 0; i < params.size(); i++) {
326 listener.SetTestParams(params[i].message_count(),
327 params[i].message_size());
328
329 // This initial message will kick-start the ping-pong of messages.
330 Message* message =
331 new Message(0, 2, Message::PRIORITY_NORMAL);
332 message->WriteInt64(base::TimeTicks::Now().ToInternalValue());
333 message->WriteInt(-1);
334 message->WriteString("hello");
335 sender()->Send(message);
336
337 // Run message loop.
338 base::RunLoop().Run();
339 }
340
341 // Send quit message.
342 Message* message = new Message(0, 2, Message::PRIORITY_NORMAL);
343 message->WriteInt64(base::TimeTicks::Now().ToInternalValue());
344 message->WriteInt(-1);
345 message->WriteString("quit");
346 sender()->Send(message);
347
348 EXPECT_TRUE(WaitForClientShutdown());
349 DestroyChannel();
350 }
351
352 void RunTestChannelProxyPingPong() {
353 io_thread_.reset(new base::TestIOThread(base::TestIOThread::kAutoStart));
354
355 Init("MojoPerfTestClient");
356
357 // Set up IPC channel and start client.
358 PerformanceChannelListener listener("ChannelProxy");
359 auto channel_proxy = IPC::ChannelProxy::Create(
360 TakeHandle().release(), IPC::Channel::MODE_SERVER, &listener,
361 io_thread_->task_runner());
362 listener.Init(channel_proxy.get());
363
364 LockThreadAffinity thread_locker(kSharedCore);
365 std::vector<PingPongTestParams> params = GetDefaultTestParams();
366 for (size_t i = 0; i < params.size(); i++) {
367 listener.SetTestParams(params[i].message_count(),
368 params[i].message_size());
369
370 // This initial message will kick-start the ping-pong of messages.
371 Message* message = new Message(0, 2, Message::PRIORITY_NORMAL);
372 message->WriteInt64(base::TimeTicks::Now().ToInternalValue());
373 message->WriteInt(-1);
374 message->WriteString("hello");
375 channel_proxy->Send(message);
376
377 // Run message loop.
378 base::RunLoop().Run();
379 }
380
381 // Send quit message.
382 Message* message = new Message(0, 2, Message::PRIORITY_NORMAL);
383 message->WriteInt64(base::TimeTicks::Now().ToInternalValue());
384 message->WriteInt(-1);
385 message->WriteString("quit");
386 channel_proxy->Send(message);
387
388 EXPECT_TRUE(WaitForClientShutdown());
389 channel_proxy.reset();
390
391 io_thread_.reset();
392 }
393
394 scoped_refptr<base::TaskRunner> io_task_runner() {
395 if (io_thread_)
396 return io_thread_->task_runner();
397 return base::ThreadTaskRunnerHandle::Get();
398 }
399
400 private:
401 std::unique_ptr<base::TestIOThread> io_thread_;
30 }; 402 };
31 403
32 TEST_F(MojoChannelPerfTest, ChannelPingPong) { 404 TEST_F(MojoChannelPerfTest, ChannelPingPong) {
33 RunTestChannelPingPong(GetDefaultTestParams()); 405 RunTestChannelPingPong();
34 406
35 base::RunLoop run_loop; 407 base::RunLoop run_loop;
36 run_loop.RunUntilIdle(); 408 run_loop.RunUntilIdle();
37 } 409 }
38 410
39 TEST_F(MojoChannelPerfTest, ChannelProxyPingPong) { 411 TEST_F(MojoChannelPerfTest, ChannelProxyPingPong) {
40 RunTestChannelProxyPingPong(GetDefaultTestParams()); 412 RunTestChannelProxyPingPong();
41 413
42 base::RunLoop run_loop; 414 base::RunLoop run_loop;
43 run_loop.RunUntilIdle(); 415 run_loop.RunUntilIdle();
44 } 416 }
45 417
46 // Test to see how many channels we can create. 418 // Test to see how many channels we can create.
47 TEST_F(MojoChannelPerfTest, DISABLED_MaxChannelCount) { 419 TEST_F(MojoChannelPerfTest, DISABLED_MaxChannelCount) {
48 #if defined(OS_POSIX) 420 #if defined(OS_POSIX)
49 LOG(INFO) << "base::GetMaxFds " << base::GetMaxFds(); 421 LOG(INFO) << "base::GetMaxFds " << base::GetMaxFds();
50 base::SetFdLimit(20000); 422 base::SetFdLimit(20000);
51 #endif 423 #endif
52 424
53 std::vector<mojo::edk::PlatformChannelPair*> channels; 425 std::vector<mojo::edk::PlatformChannelPair*> channels;
54 for (size_t i = 0; i < 10000; ++i) { 426 for (size_t i = 0; i < 10000; ++i) {
55 LOG(INFO) << "channels size: " << channels.size(); 427 LOG(INFO) << "channels size: " << channels.size();
56 channels.push_back(new mojo::edk::PlatformChannelPair()); 428 channels.push_back(new mojo::edk::PlatformChannelPair());
57 } 429 }
58 } 430 }
59 431
60 class MojoPerfTestClient : public test::PingPongTestClient { 432 class MojoPerfTestClient {
61 public: 433 public:
62 typedef test::PingPongTestClient SuperType; 434 MojoPerfTestClient()
435 : listener_(new ChannelReflectorListener()) {
436 mojo::edk::test::MultiprocessTestHelper::ChildSetup();
437 }
63 438
64 MojoPerfTestClient(); 439 ~MojoPerfTestClient() = default;
65 440
66 std::unique_ptr<Channel> CreateChannel(Listener* listener) override; 441 int Run(MojoHandle handle) {
442 handle_ = mojo::MakeScopedHandle(mojo::MessagePipeHandle(handle));
443 LockThreadAffinity thread_locker(kSharedCore);
444 std::unique_ptr<Channel> channel = ChannelMojo::Create(
445 std::move(handle_), Channel::MODE_CLIENT, listener_.get());
446 listener_->Init(channel.get());
447 CHECK(channel->Connect());
67 448
68 int Run(MojoHandle handle); 449 base::RunLoop().Run();
450 return 0;
451 }
69 452
70 private: 453 private:
71 mojo::edk::test::ScopedIPCSupport ipc_support_; 454 base::MessageLoopForIO main_message_loop_;
455 std::unique_ptr<ChannelReflectorListener> listener_;
456 std::unique_ptr<Channel> channel_;
72 mojo::ScopedMessagePipeHandle handle_; 457 mojo::ScopedMessagePipeHandle handle_;
73 }; 458 };
74 459
75 MojoPerfTestClient::MojoPerfTestClient()
76 : ipc_support_(base::ThreadTaskRunnerHandle::Get()) {
77 mojo::edk::test::MultiprocessTestHelper::ChildSetup();
78 }
79
80 std::unique_ptr<Channel> MojoPerfTestClient::CreateChannel(Listener* listener) {
81 return ChannelMojo::Create(std::move(handle_), Channel::MODE_CLIENT,
82 listener);
83 }
84
85 int MojoPerfTestClient::Run(MojoHandle handle) {
86 handle_ = mojo::MakeScopedHandle(mojo::MessagePipeHandle(handle));
87 return RunMain();
88 }
89
90 MULTIPROCESS_TEST_MAIN(MojoPerfTestClientTestChildMain) { 460 MULTIPROCESS_TEST_MAIN(MojoPerfTestClientTestChildMain) {
91 MojoPerfTestClient client; 461 MojoPerfTestClient client;
92 int rv = mojo::edk::test::MultiprocessTestHelper::RunClientMain( 462 int rv = mojo::edk::test::MultiprocessTestHelper::RunClientMain(
93 base::Bind(&MojoPerfTestClient::Run, base::Unretained(&client))); 463 base::Bind(&MojoPerfTestClient::Run, base::Unretained(&client)));
94 464
95 base::RunLoop run_loop; 465 base::RunLoop run_loop;
96 run_loop.RunUntilIdle(); 466 run_loop.RunUntilIdle();
97 467
98 return rv; 468 return rv;
99 } 469 }
100 470
471 class ReflectorImpl : public IPC::mojom::Reflector {
472 public:
473 explicit ReflectorImpl(mojo::ScopedMessagePipeHandle handle)
474 : binding_(this, std::move(handle)) {}
475 ~ReflectorImpl() override {
476 ignore_result(binding_.Unbind().PassMessagePipe().release());
477 }
478
479 private:
480 // IPC::mojom::Reflector:
481 void Ping(const std::string& value, const PingCallback& callback) override {
482 callback.Run(value);
483 }
484
485 void Quit() override {
486 base::MessageLoop::current()->QuitWhenIdle();
487 }
488
489 mojo::Binding<IPC::mojom::Reflector> binding_;
490 };
491
492 class MojoInterfacePerfTest : public mojo::edk::test::MojoTestBase {
493 public:
494 MojoInterfacePerfTest() : message_count_(0), count_down_(0) {}
495
496 protected:
497 void RunPingPongServer(MojoHandle mp, const std::string& label) {
498 base::MessageLoop main_message_loop;
499 label_ = label;
500
501 mojo::MessagePipeHandle mp_handle(mp);
502 mojo::ScopedMessagePipeHandle scoped_mp(mp_handle);
503 ping_receiver_.Bind(IPC::mojom::ReflectorPtrInfo(
504 std::move(scoped_mp), 0u));
505
506 LockThreadAffinity thread_locker(kSharedCore);
507 std::vector<PingPongTestParams> params = GetDefaultTestParams();
508 for (size_t i = 0; i < params.size(); i++) {
509 ping_receiver_->Ping(
510 "hello",
511 base::Bind(&MojoInterfacePerfTest::OnPong, base::Unretained(this)));
512 message_count_ = count_down_ = params[i].message_count();
513 payload_ = std::string(params[i].message_size(), 'a');
514
515 base::RunLoop().Run();
516 }
517
518 ping_receiver_->Quit();
519
520 ignore_result(ping_receiver_.PassInterface().PassHandle().release());
521 }
522
523 void OnPong(const std::string& value) {
524 if (value == "hello") {
525 DCHECK(!perf_logger_.get());
526 std::string test_name =
527 base::StringPrintf("IPC_%s_Perf_%dx_%zu",
528 label_.c_str(),
529 message_count_,
530 payload_.size());
531 perf_logger_.reset(new base::PerfTimeLogger(test_name.c_str()));
532 } else {
533 DCHECK_EQ(payload_.size(), value.size());
534
535 CHECK(count_down_ > 0);
536 count_down_--;
537 if (count_down_ == 0) {
538 perf_logger_.reset();
539 base::MessageLoop::current()->QuitWhenIdle();
540 return;
541 }
542 }
543
544 ping_receiver_->Ping(
545 payload_,
546 base::Bind(&MojoInterfacePerfTest::OnPong, base::Unretained(this)));
547 }
548
549 static int RunPingPongClient(MojoHandle mp) {
550 mojo::MessagePipeHandle mp_handle(mp);
551 mojo::ScopedMessagePipeHandle scoped_mp(mp_handle);
552
553 // In single process mode, this is running in a task and by default other
554 // tasks (in particular, the binding) won't run. To keep the single process
555 // and multi-process code paths the same, enable nestable tasks.
556 base::MessageLoop::ScopedNestableTaskAllower nest_loop(
557 base::MessageLoop::current());
558
559 LockThreadAffinity thread_locker(kSharedCore);
560 ReflectorImpl impl(std::move(scoped_mp));
561 base::RunLoop().Run();
562 return 0;
563 }
564
565 private:
566 int message_count_;
567 int count_down_;
568 std::string label_;
569 std::string payload_;
570 IPC::mojom::ReflectorPtr ping_receiver_;
571 std::unique_ptr<base::PerfTimeLogger> perf_logger_;
572
573 DISALLOW_COPY_AND_ASSIGN(MojoInterfacePerfTest);
574 };
575
576 DEFINE_TEST_CLIENT_WITH_PIPE(PingPongClient, MojoInterfacePerfTest, h) {
577 base::MessageLoop main_message_loop;
578 return RunPingPongClient(h);
579 }
580
581 // Similar to MojoChannelPerfTest above, but uses a Mojo interface instead of
582 // raw IPC::Messages.
583 TEST_F(MojoInterfacePerfTest, MultiprocessPingPong) {
584 RUN_CHILD_ON_PIPE(PingPongClient, h)
585 RunPingPongServer(h, "MultiProcess");
586 END_CHILD()
587 }
588
589 // A single process version of the above test.
590 TEST_F(MojoInterfacePerfTest, SingleProcessPingPong) {
591 MojoHandle server_handle, client_handle;
592 CreateMessagePipe(&server_handle, &client_handle);
593
594 base::Thread client_thread("PingPongClient");
595 client_thread.Start();
596 client_thread.task_runner()->PostTask(
597 FROM_HERE,
598 base::Bind(base::IgnoreResult(&RunPingPongClient), client_handle));
599
600 RunPingPongServer(server_handle, "SingleProcess");
601 }
602
603 class CallbackPerfTest : public testing::Test {
604 public:
605 CallbackPerfTest()
606 : client_thread_("PingPongClient"), message_count_(0), count_down_(0) {}
607
608 protected:
609 void RunPingPongServer() {
610 client_thread_.Start();
611
612 LockThreadAffinity thread_locker(kSharedCore);
613 std::vector<PingPongTestParams> params = GetDefaultTestParams();
614 for (size_t i = 0; i < params.size(); i++) {
615 std::string hello("hello");
616 client_thread_.task_runner()->PostTask(
617 FROM_HERE,
618 base::Bind(&CallbackPerfTest::Ping, base::Unretained(this), hello));
619 message_count_ = count_down_ = params[i].message_count();
620 payload_ = std::string(params[i].message_size(), 'a');
621
622 base::RunLoop().Run();
623 }
624 }
625
626 void Ping(const std::string& value) {
627 main_message_loop.task_runner()->PostTask(
628 FROM_HERE,
629 base::Bind(&CallbackPerfTest::OnPong, base::Unretained(this),
630 value));
631 }
632
633 void OnPong(const std::string& value) {
634 if (value == "hello") {
635 DCHECK(!perf_logger_.get());
636 std::string test_name =
637 base::StringPrintf("Callback_Perf_%dx_%zu",
638 message_count_,
639 payload_.size());
640 perf_logger_.reset(new base::PerfTimeLogger(test_name.c_str()));
641 } else {
642 DCHECK_EQ(payload_.size(), value.size());
643
644 CHECK(count_down_ > 0);
645 count_down_--;
646 if (count_down_ == 0) {
647 perf_logger_.reset();
648 base::MessageLoop::current()->QuitWhenIdle();
649 return;
650 }
651 }
652
653 client_thread_.task_runner()->PostTask(
654 FROM_HERE,
655 base::Bind(&CallbackPerfTest::Ping, base::Unretained(this), payload_));
656 }
657
658 private:
659 base::Thread client_thread_;
660 base::MessageLoop main_message_loop;
661 int message_count_;
662 int count_down_;
663 std::string payload_;
664 std::unique_ptr<base::PerfTimeLogger> perf_logger_;
665
666 DISALLOW_COPY_AND_ASSIGN(CallbackPerfTest);
667 };
668
669 // Sends the same data as above using PostTask instead of IPCs for comparison.
670 TEST_F(CallbackPerfTest, PingPong) {
671 RunPingPongServer();
672 }
673
101 } // namespace 674 } // namespace
102 } // namespace IPC 675 } // namespace IPC
OLDNEW
« no previous file with comments | « ipc/BUILD.gn ('k') | ipc/ipc_perftest_support.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698