| OLD | NEW |
| (Empty) |
| 1 // Copyright 2013 The Chromium Authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 // This tests the performance of message pipes via the C API. | |
| 6 | |
| 7 #include <mojo/system/message_pipe.h> | |
| 8 | |
| 9 #include <assert.h> | |
| 10 #include <mojo/macros.h> | |
| 11 #include <mojo/result.h> | |
| 12 #include <mojo/system/handle.h> | |
| 13 #include <mojo/system/time.h> | |
| 14 #include <mojo/system/wait.h> | |
| 15 #include <stdint.h> | |
| 16 #include <stdio.h> | |
| 17 | |
| 18 #include <thread> | |
| 19 | |
| 20 #include "gtest/gtest.h" | |
| 21 #include "mojo/public/c/tests/system/perftest_utils.h" | |
| 22 #include "mojo/public/cpp/test_support/test_support.h" | |
| 23 | |
| 24 namespace { | |
| 25 | |
| 26 TEST(MessagePipePerftest, CreateAndClose) { | |
| 27 mojo::test::IterateAndReportPerf("MessagePipe_CreateAndClose", nullptr, []() { | |
| 28 MojoHandle h0; | |
| 29 MojoHandle h1; | |
| 30 MojoResult result = MojoCreateMessagePipe(nullptr, &h0, &h1); | |
| 31 MOJO_ALLOW_UNUSED_LOCAL(result); | |
| 32 assert(result == MOJO_RESULT_OK); | |
| 33 result = MojoClose(h0); | |
| 34 assert(result == MOJO_RESULT_OK); | |
| 35 result = MojoClose(h1); | |
| 36 assert(result == MOJO_RESULT_OK); | |
| 37 }); | |
| 38 } | |
| 39 | |
| 40 TEST(MessagePipePerftest, WriteAndRead) { | |
| 41 MojoHandle h0; | |
| 42 MojoHandle h1; | |
| 43 MojoResult result = MojoCreateMessagePipe(nullptr, &h0, &h1); | |
| 44 MOJO_ALLOW_UNUSED_LOCAL(result); | |
| 45 assert(result == MOJO_RESULT_OK); | |
| 46 char buffer[10000] = {}; | |
| 47 uint32_t num_bytes = 0u; | |
| 48 auto single_iteration = [&h0, &h1, &buffer, &num_bytes]() { | |
| 49 MojoResult result = MojoWriteMessage(h0, buffer, num_bytes, nullptr, 0, | |
| 50 MOJO_WRITE_MESSAGE_FLAG_NONE); | |
| 51 MOJO_ALLOW_UNUSED_LOCAL(result); | |
| 52 assert(result == MOJO_RESULT_OK); | |
| 53 uint32_t read_bytes = num_bytes; | |
| 54 result = MojoReadMessage(h1, buffer, &read_bytes, nullptr, nullptr, | |
| 55 MOJO_READ_MESSAGE_FLAG_NONE); | |
| 56 assert(result == MOJO_RESULT_OK); | |
| 57 }; | |
| 58 num_bytes = 10u; | |
| 59 mojo::test::IterateAndReportPerf("MessagePipe_WriteAndRead", "10bytes", | |
| 60 single_iteration); | |
| 61 num_bytes = 100u; | |
| 62 mojo::test::IterateAndReportPerf("MessagePipe_WriteAndRead", "100bytes", | |
| 63 single_iteration); | |
| 64 num_bytes = 1000u; | |
| 65 mojo::test::IterateAndReportPerf("MessagePipe_WriteAndRead", "1000bytes", | |
| 66 single_iteration); | |
| 67 num_bytes = 10000u; | |
| 68 mojo::test::IterateAndReportPerf("MessagePipe_WriteAndRead", "10000bytes", | |
| 69 single_iteration); | |
| 70 result = MojoClose(h0); | |
| 71 assert(result == MOJO_RESULT_OK); | |
| 72 result = MojoClose(h1); | |
| 73 assert(result == MOJO_RESULT_OK); | |
| 74 } | |
| 75 | |
| 76 TEST(MessagePipePerftest, EmptyRead) { | |
| 77 MojoHandle h0; | |
| 78 MojoHandle h1; | |
| 79 MojoResult result = MojoCreateMessagePipe(nullptr, &h0, &h1); | |
| 80 MOJO_ALLOW_UNUSED_LOCAL(result); | |
| 81 assert(result == MOJO_RESULT_OK); | |
| 82 mojo::test::IterateAndReportPerf("MessagePipe_EmptyRead", nullptr, [&h0]() { | |
| 83 MojoResult result = MojoReadMessage(h0, nullptr, nullptr, nullptr, nullptr, | |
| 84 MOJO_READ_MESSAGE_FLAG_MAY_DISCARD); | |
| 85 MOJO_ALLOW_UNUSED_LOCAL(result); | |
| 86 assert(result == MOJO_RESULT_SHOULD_WAIT); | |
| 87 }); | |
| 88 result = MojoClose(h0); | |
| 89 assert(result == MOJO_RESULT_OK); | |
| 90 result = MojoClose(h1); | |
| 91 assert(result == MOJO_RESULT_OK); | |
| 92 } | |
| 93 | |
| 94 void DoMessagePipeThreadedTest(unsigned num_writers, | |
| 95 unsigned num_readers, | |
| 96 uint32_t num_bytes) { | |
| 97 assert(num_writers > 0u); | |
| 98 assert(num_readers > 0u); | |
| 99 | |
| 100 MojoHandle h0; | |
| 101 MojoHandle h1; | |
| 102 MojoResult result = MojoCreateMessagePipe(nullptr, &h0, &h1); | |
| 103 MOJO_ALLOW_UNUSED_LOCAL(result); | |
| 104 assert(result == MOJO_RESULT_OK); | |
| 105 | |
| 106 // Each |writers[i]| will write its final result to |num_writes[i]|. | |
| 107 std::vector<std::thread> writers(num_writers); | |
| 108 std::vector<int64_t> num_writes(num_writers, 0); | |
| 109 | |
| 110 // Similarly for |readers[i]| and |num_reads[i]|. | |
| 111 std::vector<std::thread> readers(num_readers); | |
| 112 std::vector<int64_t> num_reads(num_readers, 0); | |
| 113 | |
| 114 // Start time here, just before we fire off the threads. | |
| 115 const MojoTimeTicks start_time = MojoGetTimeTicksNow(); | |
| 116 | |
| 117 // Interleave the starts. | |
| 118 for (unsigned i = 0u; i < num_writers || i < num_readers; i++) { | |
| 119 if (i < num_writers) { | |
| 120 int64_t* final_num_writes = &num_writes[i]; | |
| 121 writers[i] = std::thread([num_bytes, h0, final_num_writes]() { | |
| 122 int64_t num_writes = 0; | |
| 123 char buffer[10000]; | |
| 124 assert(num_bytes <= sizeof(buffer)); | |
| 125 | |
| 126 // TODO(vtl): Should I throttle somehow? | |
| 127 for (;;) { | |
| 128 MojoResult result = MojoWriteMessage( | |
| 129 h0, buffer, num_bytes, nullptr, 0u, MOJO_WRITE_MESSAGE_FLAG_NONE); | |
| 130 if (result == MOJO_RESULT_OK) { | |
| 131 num_writes++; | |
| 132 continue; | |
| 133 } | |
| 134 | |
| 135 // We failed to write. | |
| 136 // Either |h0| or its peer was closed. | |
| 137 assert(result == MOJO_RESULT_INVALID_ARGUMENT || | |
| 138 result == MOJO_RESULT_FAILED_PRECONDITION); | |
| 139 break; | |
| 140 } | |
| 141 *final_num_writes = num_writes; | |
| 142 }); | |
| 143 } | |
| 144 if (i < num_readers) { | |
| 145 int64_t* final_num_reads = &num_reads[i]; | |
| 146 readers[i] = std::thread([h1, final_num_reads]() { | |
| 147 int64_t num_reads = 0; | |
| 148 char buffer[10000]; | |
| 149 | |
| 150 for (;;) { | |
| 151 uint32_t num_bytes = static_cast<uint32_t>(sizeof(buffer)); | |
| 152 MojoResult result = | |
| 153 MojoReadMessage(h1, buffer, &num_bytes, nullptr, nullptr, | |
| 154 MOJO_READ_MESSAGE_FLAG_NONE); | |
| 155 if (result == MOJO_RESULT_OK) { | |
| 156 num_reads++; | |
| 157 continue; | |
| 158 } | |
| 159 | |
| 160 if (result == MOJO_RESULT_SHOULD_WAIT) { | |
| 161 result = MojoWait(h1, MOJO_HANDLE_SIGNAL_READABLE, | |
| 162 MOJO_DEADLINE_INDEFINITE, nullptr); | |
| 163 if (result == MOJO_RESULT_OK) { | |
| 164 // Go to the top of the loop to read again. | |
| 165 continue; | |
| 166 } | |
| 167 } | |
| 168 | |
| 169 // We failed to read and possibly failed to wait. | |
| 170 // Either |h1| or its peer was closed. | |
| 171 assert(result == MOJO_RESULT_INVALID_ARGUMENT || | |
| 172 result == MOJO_RESULT_FAILED_PRECONDITION); | |
| 173 break; | |
| 174 } | |
| 175 *final_num_reads = num_reads; | |
| 176 }); | |
| 177 } | |
| 178 } | |
| 179 | |
| 180 mojo::test::Sleep(mojo::test::kPerftestTimeMicroseconds); | |
| 181 | |
| 182 // Close both handles to make writers and readers stop immediately. | |
| 183 result = MojoClose(h0); | |
| 184 assert(result == MOJO_RESULT_OK); | |
| 185 result = MojoClose(h1); | |
| 186 assert(result == MOJO_RESULT_OK); | |
| 187 | |
| 188 // Join everything. | |
| 189 for (auto& writer : writers) | |
| 190 writer.join(); | |
| 191 for (auto& reader : readers) | |
| 192 reader.join(); | |
| 193 | |
| 194 // Stop time here. | |
| 195 MojoTimeTicks end_time = MojoGetTimeTicksNow(); | |
| 196 | |
| 197 // Add up write and read counts, and destroy the threads. | |
| 198 int64_t total_num_writes = 0; | |
| 199 for (auto n : num_writes) | |
| 200 total_num_writes += n; | |
| 201 int64_t total_num_reads = 0; | |
| 202 for (auto n : num_reads) | |
| 203 total_num_reads += n; | |
| 204 | |
| 205 char sub_test_name[200]; | |
| 206 sprintf(sub_test_name, "%uw_%ur_%ubytes", num_writers, num_readers, | |
| 207 static_cast<unsigned>(num_bytes)); | |
| 208 mojo::test::LogPerfResult("MessagePipe_Threaded_Writes", sub_test_name, | |
| 209 1000000.0 * static_cast<double>(total_num_writes) / | |
| 210 (end_time - start_time), | |
| 211 "writes/second"); | |
| 212 mojo::test::LogPerfResult("MessagePipe_Threaded_Reads", sub_test_name, | |
| 213 1000000.0 * static_cast<double>(total_num_reads) / | |
| 214 (end_time - start_time), | |
| 215 "reads/second"); | |
| 216 } | |
| 217 | |
| 218 TEST(MessagePipePerftest, Threaded) { | |
| 219 DoMessagePipeThreadedTest(1u, 1u, 100u); | |
| 220 DoMessagePipeThreadedTest(2u, 2u, 100u); | |
| 221 DoMessagePipeThreadedTest(3u, 3u, 100u); | |
| 222 DoMessagePipeThreadedTest(10u, 10u, 100u); | |
| 223 DoMessagePipeThreadedTest(10u, 1u, 100u); | |
| 224 DoMessagePipeThreadedTest(1u, 10u, 100u); | |
| 225 | |
| 226 // For comparison of overhead: | |
| 227 DoMessagePipeThreadedTest(1u, 1u, 10u); | |
| 228 // 100 was done above. | |
| 229 DoMessagePipeThreadedTest(1u, 1u, 1000u); | |
| 230 DoMessagePipeThreadedTest(1u, 1u, 10000u); | |
| 231 | |
| 232 DoMessagePipeThreadedTest(3u, 3u, 10u); | |
| 233 // 100 was done above. | |
| 234 DoMessagePipeThreadedTest(3u, 3u, 1000u); | |
| 235 DoMessagePipeThreadedTest(3u, 3u, 10000u); | |
| 236 } | |
| 237 | |
| 238 } // namespace | |
| OLD | NEW |