Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1314)

Side by Side Diff: mojo/system/raw_channel.cc

Issue 597413002: Mojo: NULL -> nullptr in mojo/system and mojo/embedder. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: EXPECT_TRUE Created 6 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « mojo/system/proxy_message_pipe_endpoint.cc ('k') | mojo/system/raw_channel_posix.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2014 The Chromium Authors. All rights reserved. 1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "mojo/system/raw_channel.h" 5 #include "mojo/system/raw_channel.h"
6 6
7 #include <string.h> 7 #include <string.h>
8 8
9 #include <algorithm> 9 #include <algorithm>
10 10
(...skipping 131 matching lines...) Expand 10 before | Expand all | Expand 10 after
142 buffers->push_back(buffer1); 142 buffers->push_back(buffer1);
143 Buffer buffer2 = { 143 Buffer buffer2 = {
144 static_cast<const char*>(message->transport_data()->buffer()), 144 static_cast<const char*>(message->transport_data()->buffer()),
145 transport_data_buffer_size}; 145 transport_data_buffer_size};
146 buffers->push_back(buffer2); 146 buffers->push_back(buffer2);
147 } 147 }
148 148
149 // RawChannel ------------------------------------------------------------------ 149 // RawChannel ------------------------------------------------------------------
150 150
151 RawChannel::RawChannel() 151 RawChannel::RawChannel()
152 : message_loop_for_io_(NULL), 152 : message_loop_for_io_(nullptr),
153 delegate_(NULL), 153 delegate_(nullptr),
154 read_stopped_(false), 154 read_stopped_(false),
155 write_stopped_(false), 155 write_stopped_(false),
156 weak_ptr_factory_(this) { 156 weak_ptr_factory_(this) {
157 } 157 }
158 158
159 RawChannel::~RawChannel() { 159 RawChannel::~RawChannel() {
160 DCHECK(!read_buffer_); 160 DCHECK(!read_buffer_);
161 DCHECK(!write_buffer_); 161 DCHECK(!write_buffer_);
162 162
163 // No need to take the |write_lock_| here -- if there are still weak pointers 163 // No need to take the |write_lock_| here -- if there are still weak pointers
(...skipping 13 matching lines...) Expand all
177 message_loop_for_io_ = 177 message_loop_for_io_ =
178 static_cast<base::MessageLoopForIO*>(base::MessageLoop::current()); 178 static_cast<base::MessageLoopForIO*>(base::MessageLoop::current());
179 179
180 // No need to take the lock. No one should be using us yet. 180 // No need to take the lock. No one should be using us yet.
181 DCHECK(!read_buffer_); 181 DCHECK(!read_buffer_);
182 read_buffer_.reset(new ReadBuffer); 182 read_buffer_.reset(new ReadBuffer);
183 DCHECK(!write_buffer_); 183 DCHECK(!write_buffer_);
184 write_buffer_.reset(new WriteBuffer(GetSerializedPlatformHandleSize())); 184 write_buffer_.reset(new WriteBuffer(GetSerializedPlatformHandleSize()));
185 185
186 if (!OnInit()) { 186 if (!OnInit()) {
187 delegate_ = NULL; 187 delegate_ = nullptr;
188 message_loop_for_io_ = NULL; 188 message_loop_for_io_ = nullptr;
189 read_buffer_.reset(); 189 read_buffer_.reset();
190 write_buffer_.reset(); 190 write_buffer_.reset();
191 return false; 191 return false;
192 } 192 }
193 193
194 IOResult io_result = ScheduleRead(); 194 IOResult io_result = ScheduleRead();
195 if (io_result != IO_PENDING) { 195 if (io_result != IO_PENDING) {
196 // This will notify the delegate about the read failure. Although we're on 196 // This will notify the delegate about the read failure. Although we're on
197 // the I/O thread, don't call it in the nested context. 197 // the I/O thread, don't call it in the nested context.
198 message_loop_for_io_->PostTask(FROM_HERE, 198 message_loop_for_io_->PostTask(FROM_HERE,
(...skipping 10 matching lines...) Expand all
209 209
210 void RawChannel::Shutdown() { 210 void RawChannel::Shutdown() {
211 DCHECK_EQ(base::MessageLoop::current(), message_loop_for_io_); 211 DCHECK_EQ(base::MessageLoop::current(), message_loop_for_io_);
212 212
213 base::AutoLock locker(write_lock_); 213 base::AutoLock locker(write_lock_);
214 214
215 LOG_IF(WARNING, !write_buffer_->message_queue_.empty()) 215 LOG_IF(WARNING, !write_buffer_->message_queue_.empty())
216 << "Shutting down RawChannel with write buffer nonempty"; 216 << "Shutting down RawChannel with write buffer nonempty";
217 217
218 // Reset the delegate so that it won't receive further calls. 218 // Reset the delegate so that it won't receive further calls.
219 delegate_ = NULL; 219 delegate_ = nullptr;
220 read_stopped_ = true; 220 read_stopped_ = true;
221 write_stopped_ = true; 221 write_stopped_ = true;
222 weak_ptr_factory_.InvalidateWeakPtrs(); 222 weak_ptr_factory_.InvalidateWeakPtrs();
223 223
224 OnShutdownNoLock(read_buffer_.Pass(), write_buffer_.Pass()); 224 OnShutdownNoLock(read_buffer_.Pass(), write_buffer_.Pass());
225 } 225 }
226 226
227 // Reminder: This must be thread-safe. 227 // Reminder: This must be thread-safe.
228 bool RawChannel::WriteMessage(scoped_ptr<MessageInTransit> message) { 228 bool RawChannel::WriteMessage(scoped_ptr<MessageInTransit> message) {
229 DCHECK(message); 229 DCHECK(message);
(...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after
312 // TODO(vtl): Validate that |message_size| is sane. 312 // TODO(vtl): Validate that |message_size| is sane.
313 while (remaining_bytes > 0 && MessageInTransit::GetNextMessageSize( 313 while (remaining_bytes > 0 && MessageInTransit::GetNextMessageSize(
314 &read_buffer_->buffer_[read_buffer_start], 314 &read_buffer_->buffer_[read_buffer_start],
315 remaining_bytes, 315 remaining_bytes,
316 &message_size) && 316 &message_size) &&
317 remaining_bytes >= message_size) { 317 remaining_bytes >= message_size) {
318 MessageInTransit::View message_view( 318 MessageInTransit::View message_view(
319 message_size, &read_buffer_->buffer_[read_buffer_start]); 319 message_size, &read_buffer_->buffer_[read_buffer_start]);
320 DCHECK_EQ(message_view.total_size(), message_size); 320 DCHECK_EQ(message_view.total_size(), message_size);
321 321
322 const char* error_message = NULL; 322 const char* error_message = nullptr;
323 if (!message_view.IsValid(GetSerializedPlatformHandleSize(), 323 if (!message_view.IsValid(GetSerializedPlatformHandleSize(),
324 &error_message)) { 324 &error_message)) {
325 DCHECK(error_message); 325 DCHECK(error_message);
326 LOG(ERROR) << "Received invalid message: " << error_message; 326 LOG(ERROR) << "Received invalid message: " << error_message;
327 read_stopped_ = true; 327 read_stopped_ = true;
328 CallOnError(Delegate::ERROR_READ_BAD_MESSAGE); 328 CallOnError(Delegate::ERROR_READ_BAD_MESSAGE);
329 return; 329 return;
330 } 330 }
331 331
332 if (message_view.type() == MessageInTransit::kTypeRawChannel) { 332 if (message_view.type() == MessageInTransit::kTypeRawChannel) {
(...skipping 180 matching lines...) Expand 10 before | Expand all | Expand 10 after
513 513
514 write_stopped_ = true; 514 write_stopped_ = true;
515 STLDeleteElements(&write_buffer_->message_queue_); 515 STLDeleteElements(&write_buffer_->message_queue_);
516 write_buffer_->platform_handles_offset_ = 0; 516 write_buffer_->platform_handles_offset_ = 0;
517 write_buffer_->data_offset_ = 0; 517 write_buffer_->data_offset_ = 0;
518 return false; 518 return false;
519 } 519 }
520 520
521 } // namespace system 521 } // namespace system
522 } // namespace mojo 522 } // namespace mojo
OLDNEW
« no previous file with comments | « mojo/system/proxy_message_pipe_endpoint.cc ('k') | mojo/system/raw_channel_posix.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698