| OLD | NEW |
| 1 // Copyright (c) 2006-2008 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2006-2008 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "base/basictypes.h" | 5 #include "base/basictypes.h" |
| 6 #include "base/multiprocess_test.h" |
| 6 #include "base/platform_thread.h" | 7 #include "base/platform_thread.h" |
| 7 #include "base/scoped_nsautorelease_pool.h" | 8 #include "base/scoped_nsautorelease_pool.h" |
| 8 #include "base/shared_memory.h" | 9 #include "base/shared_memory.h" |
| 10 #include "base/scoped_ptr.h" |
| 9 #include "testing/gtest/include/gtest/gtest.h" | 11 #include "testing/gtest/include/gtest/gtest.h" |
| 10 | 12 |
| 11 static const int kNumThreads = 5; | 13 static const int kNumThreads = 5; |
| 14 static const int kNumTasks = 5; |
| 12 | 15 |
| 13 namespace base { | 16 namespace base { |
| 14 | 17 |
| 15 namespace { | 18 namespace { |
| 16 | 19 |
| 17 // Each thread will open the shared memory. Each thread will take a different 4 | 20 // Each thread will open the shared memory. Each thread will take a different 4 |
| 18 // byte int pointer, and keep changing it, with some small pauses in between. | 21 // byte int pointer, and keep changing it, with some small pauses in between. |
| 19 // Verify that each thread's value in the shared memory is always correct. | 22 // Verify that each thread's value in the shared memory is always correct. |
| 20 class MultipleThreadMain : public PlatformThread::Delegate { | 23 class MultipleThreadMain : public PlatformThread::Delegate { |
| 21 public: | 24 public: |
| (...skipping 129 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 151 | 154 |
| 152 // Close the second memory segment. | 155 // Close the second memory segment. |
| 153 memory2.Close(); | 156 memory2.Close(); |
| 154 | 157 |
| 155 rv = memory1.Delete(test_name); | 158 rv = memory1.Delete(test_name); |
| 156 EXPECT_TRUE(rv); | 159 EXPECT_TRUE(rv); |
| 157 rv = memory2.Delete(test_name); | 160 rv = memory2.Delete(test_name); |
| 158 EXPECT_TRUE(rv); | 161 EXPECT_TRUE(rv); |
| 159 } | 162 } |
| 160 | 163 |
| 161 // Create a set of 5 threads to each open a shared memory segment and write to | 164 // Create a set of N threads to each open a shared memory segment and write to |
| 162 // it. Verify that they are always reading/writing consistent data. | 165 // it. Verify that they are always reading/writing consistent data. |
| 163 TEST(SharedMemoryTest, MultipleThreads) { | 166 TEST(SharedMemoryTest, MultipleThreads) { |
| 164 MultipleThreadMain::CleanUp(); | 167 MultipleThreadMain::CleanUp(); |
| 165 PlatformThreadHandle thread_handles[kNumThreads]; | 168 // On POSIX we have a problem when 2 threads try to create the shmem |
| 166 MultipleThreadMain* thread_delegates[kNumThreads]; | 169 // (a file) at exactly the same time, since create both creates the |
| 170 // file and zerofills it. We solve the problem for this unit test |
| 171 // (make it not flaky) by starting with 1 thread, then |
| 172 // intentionally don't clean up its shmem before running with |
| 173 // kNumThreads. |
| 167 | 174 |
| 168 // Spawn the threads. | 175 int threadcounts[] = { 1, kNumThreads }; |
| 169 for (int16 index = 0; index < kNumThreads; index++) { | 176 for (size_t i = 0; i < sizeof(threadcounts) / sizeof(threadcounts); i++) { |
| 170 PlatformThreadHandle pth; | 177 int numthreads = threadcounts[i]; |
| 171 thread_delegates[index] = new MultipleThreadMain(index); | 178 scoped_array<PlatformThreadHandle> thread_handles; |
| 172 EXPECT_TRUE(PlatformThread::Create(0, thread_delegates[index], &pth)); | 179 scoped_array<MultipleThreadMain*> thread_delegates; |
| 173 thread_handles[index] = pth; | 180 |
| 181 thread_handles.reset(new PlatformThreadHandle[numthreads]); |
| 182 thread_delegates.reset(new MultipleThreadMain*[numthreads]); |
| 183 |
| 184 // Spawn the threads. |
| 185 for (int16 index = 0; index < numthreads; index++) { |
| 186 PlatformThreadHandle pth; |
| 187 thread_delegates[index] = new MultipleThreadMain(index); |
| 188 EXPECT_TRUE(PlatformThread::Create(0, thread_delegates[index], &pth)); |
| 189 thread_handles[index] = pth; |
| 190 } |
| 191 |
| 192 // Wait for the threads to finish. |
| 193 for (int index = 0; index < numthreads; index++) { |
| 194 PlatformThread::Join(thread_handles[index]); |
| 195 delete thread_delegates[index]; |
| 196 } |
| 174 } | 197 } |
| 175 | |
| 176 // Wait for the threads to finish. | |
| 177 for (int index = 0; index < kNumThreads; index++) { | |
| 178 PlatformThread::Join(thread_handles[index]); | |
| 179 delete thread_delegates[index]; | |
| 180 } | |
| 181 | |
| 182 MultipleThreadMain::CleanUp(); | 198 MultipleThreadMain::CleanUp(); |
| 183 } | 199 } |
| 184 | 200 |
| 185 // TODO(port): this test requires the MultipleLockThread class | 201 // TODO(port): this test requires the MultipleLockThread class |
| 186 // (defined above), which requires the ability to pass file | 202 // (defined above), which requires the ability to pass file |
| 187 // descriptors between processes. We haven't done that yet in Chrome | 203 // descriptors between processes. We haven't done that yet in Chrome |
| 188 // for POSIX. | 204 // for POSIX. |
| 189 #if defined(OS_WIN) | 205 #if defined(OS_WIN) |
| 190 // Create a set of threads to each open a shared memory segment and write to it | 206 // Create a set of threads to each open a shared memory segment and write to it |
| 191 // with the lock held. Verify that they are always reading/writing consistent | 207 // with the lock held. Verify that they are always reading/writing consistent |
| (...skipping 20 matching lines...) Expand all Loading... |
| 212 | 228 |
| 213 // Allocate private (unique) shared memory with an empty string for a | 229 // Allocate private (unique) shared memory with an empty string for a |
| 214 // name. Make sure several of them don't point to the same thing as | 230 // name. Make sure several of them don't point to the same thing as |
| 215 // we might expect if the names are equal. | 231 // we might expect if the names are equal. |
| 216 TEST(SharedMemoryTest, AnonymousPrivate) { | 232 TEST(SharedMemoryTest, AnonymousPrivate) { |
| 217 int i, j; | 233 int i, j; |
| 218 int count = 4; | 234 int count = 4; |
| 219 bool rv; | 235 bool rv; |
| 220 const int kDataSize = 8192; | 236 const int kDataSize = 8192; |
| 221 | 237 |
| 222 SharedMemory* memories = new SharedMemory[count]; | 238 scoped_array<SharedMemory> memories(new SharedMemory[count]); |
| 223 int **pointers = new int*[count]; | 239 scoped_array<int*> pointers(new int*[count]); |
| 224 ASSERT_TRUE(memories); | 240 ASSERT_TRUE(memories.get()); |
| 225 ASSERT_TRUE(pointers); | 241 ASSERT_TRUE(pointers.get()); |
| 226 | 242 |
| 227 for (i = 0; i < count; i++) { | 243 for (i = 0; i < count; i++) { |
| 228 rv = memories[i].Create(L"", false, true, kDataSize); | 244 rv = memories[i].Create(L"", false, true, kDataSize); |
| 229 EXPECT_TRUE(rv); | 245 EXPECT_TRUE(rv); |
| 230 rv = memories[i].Map(kDataSize); | 246 rv = memories[i].Map(kDataSize); |
| 231 EXPECT_TRUE(rv); | 247 EXPECT_TRUE(rv); |
| 232 int *ptr = static_cast<int*>(memories[i].memory()); | 248 int *ptr = static_cast<int*>(memories[i].memory()); |
| 233 EXPECT_TRUE(ptr); | 249 EXPECT_TRUE(ptr); |
| 234 pointers[i] = ptr; | 250 pointers[i] = ptr; |
| 235 } | 251 } |
| (...skipping 11 matching lines...) Expand all Loading... |
| 247 if (i == j) | 263 if (i == j) |
| 248 EXPECT_EQ(100, pointers[j][0]); | 264 EXPECT_EQ(100, pointers[j][0]); |
| 249 else | 265 else |
| 250 EXPECT_EQ(0, pointers[j][0]); | 266 EXPECT_EQ(0, pointers[j][0]); |
| 251 } | 267 } |
| 252 } | 268 } |
| 253 | 269 |
| 254 for (int i = 0; i < count; i++) { | 270 for (int i = 0; i < count; i++) { |
| 255 memories[i].Close(); | 271 memories[i].Close(); |
| 256 } | 272 } |
| 273 |
| 274 } |
| 275 |
| 276 |
| 277 // On POSIX it is especially important we test shmem across processes, |
| 278 // not just across threads. But the test is enabled on all platforms. |
| 279 class SharedMemoryProcessTest : public MultiProcessTest { |
| 280 public: |
| 281 |
| 282 static void CleanUp() { |
| 283 SharedMemory memory; |
| 284 memory.Delete(test_name_); |
| 285 } |
| 286 |
| 287 static int TaskTestMain() { |
| 288 int errors = 0; |
| 289 ScopedNSAutoreleasePool pool; // noop if not OSX |
| 290 const int kDataSize = 1024; |
| 291 SharedMemory memory; |
| 292 bool rv = memory.Create(test_name_, false, true, kDataSize); |
| 293 EXPECT_TRUE(rv); |
| 294 if (rv != true) |
| 295 errors++; |
| 296 rv = memory.Map(kDataSize); |
| 297 EXPECT_TRUE(rv); |
| 298 if (rv != true) |
| 299 errors++; |
| 300 int *ptr = static_cast<int*>(memory.memory()); |
| 301 |
| 302 for (int idx = 0; idx < 20; idx++) { |
| 303 memory.Lock(); |
| 304 int i = (1 << 16) + idx; |
| 305 *ptr = i; |
| 306 PlatformThread::Sleep(10); // Short wait. |
| 307 if (*ptr != i) |
| 308 errors++; |
| 309 memory.Unlock(); |
| 310 } |
| 311 |
| 312 memory.Close(); |
| 313 return errors; |
| 314 } |
| 315 |
| 316 private: |
| 317 static const std::wstring test_name_; |
| 318 }; |
| 319 |
| 320 const std::wstring SharedMemoryProcessTest::test_name_ = L"MPMem"; |
| 321 |
| 322 |
| 323 TEST_F(SharedMemoryProcessTest, Tasks) { |
| 324 SharedMemoryProcessTest::CleanUp(); |
| 325 |
| 326 base::ProcessHandle handles[kNumTasks]; |
| 327 for (int index = 0; index < kNumTasks; ++index) { |
| 328 handles[index] = SpawnChild(L"SharedMemoryTestMain"); |
| 329 } |
| 330 |
| 331 int exit_code = 0; |
| 332 for (int index = 0; index < kNumTasks; ++index) { |
| 333 EXPECT_TRUE(base::WaitForExitCode(handles[index], &exit_code)); |
| 334 EXPECT_TRUE(exit_code == 0); |
| 335 } |
| 336 |
| 337 SharedMemoryProcessTest::CleanUp(); |
| 338 } |
| 339 |
| 340 MULTIPROCESS_TEST_MAIN(SharedMemoryTestMain) { |
| 341 return SharedMemoryProcessTest::TaskTestMain(); |
| 257 } | 342 } |
| 258 | 343 |
| 259 | 344 |
| 260 } // namespace base | 345 } // namespace base |
| OLD | NEW |