Chromium Code Reviews| OLD | NEW |
|---|---|
| (Empty) | |
| 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "content/common/gpu/media/dxva_video_decode_accelerator.h" | |
| 6 | |
| 7 #if !defined(OS_WIN) | |
| 8 #error This file should only be built on Windows. | |
| 9 #endif // !defined(OS_WIN) | |
| 10 | |
| 11 #include <ks.h> | |
| 12 #include <codecapi.h> | |
| 13 #include <d3dx9tex.h> | |
| 14 #include <mfapi.h> | |
| 15 #include <mferror.h> | |
| 16 #include <wmcodecdsp.h> | |
| 17 | |
| 18 #include "base/bind.h" | |
| 19 #include "base/callback.h" | |
| 20 #include "base/debug/trace_event.h" | |
| 21 #include "base/logging.h" | |
| 22 #include "base/memory/scoped_handle.h" | |
| 23 #include "base/memory/scoped_ptr.h" | |
| 24 #include "base/message_loop.h" | |
| 25 #include "base/shared_memory.h" | |
| 26 #include "media/video/video_decode_accelerator.h" | |
| 27 #include "third_party/angle/include/GLES2/gl2.h" | |
| 28 #include "third_party/angle/include/GLES2/gl2ext.h" | |
| 29 | |
| 30 static const int kNumPictureBuffers = 5; | |
|
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
Doco origin of this magic 5?
ananta
2011/12/17 00:40:25
Done.
| |
| 31 | |
| 32 #define RETURN_ON_HR_FAILURE(result, log, ret_val) \ | |
|
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
s/result/hr/
(it's funny that it works anyway; yay
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
I'm surprised this doesn't NotifyError.
ananta
2011/12/17 00:40:25
Done.
| |
| 33 do { \ | |
| 34 if (FAILED(hr)) { \ | |
|
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
You also have lots of occurrences of error-checkin
ananta
2011/12/17 00:40:25
Added two variants of these macros. One which noti
| |
| 35 DVLOG(1) << log; \ | |
|
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
Not worth emitting result itself?
ananta
2011/12/17 00:40:25
The callers now pass in the formatted string if ne
| |
| 36 return ret_val; \ | |
| 37 } \ | |
| 38 } while (0) | |
| 39 | |
| 40 static IMFSample* CreateEmptySample() { | |
| 41 HRESULT hr = E_FAIL; | |
| 42 base::win::ScopedComPtr<IMFSample> sample; | |
| 43 hr = MFCreateSample(sample.Receive()); | |
| 44 RETURN_ON_HR_FAILURE(hr, "Unable to create an empty sample", NULL); | |
| 45 return sample.Detach(); | |
| 46 } | |
| 47 | |
| 48 // Creates a Media Foundation sample with one buffer of length |buffer_length| | |
| 49 // on a |align|-byte boundary. Alignment must be a perfect power of 2 or 0. | |
| 50 static IMFSample* CreateEmptySampleWithBuffer(int buffer_length, int align) { | |
| 51 CHECK_GT(buffer_length, 0); | |
| 52 base::win::ScopedComPtr<IMFSample> sample; | |
| 53 sample.Attach(CreateEmptySample()); | |
| 54 if (!sample.get()) | |
| 55 return NULL; | |
| 56 base::win::ScopedComPtr<IMFMediaBuffer> buffer; | |
| 57 HRESULT hr = E_FAIL; | |
| 58 if (align == 0) { | |
| 59 // Note that MFCreateMemoryBuffer is same as MFCreateAlignedMemoryBuffer | |
| 60 // with the align argument being 0. | |
| 61 hr = MFCreateMemoryBuffer(buffer_length, buffer.Receive()); | |
| 62 } else { | |
| 63 hr = MFCreateAlignedMemoryBuffer(buffer_length, | |
| 64 align - 1, | |
| 65 buffer.Receive()); | |
| 66 } | |
| 67 RETURN_ON_HR_FAILURE(hr, "Unable to create an empty buffer", NULL); | |
| 68 | |
| 69 hr = sample->AddBuffer(buffer.get()); | |
| 70 RETURN_ON_HR_FAILURE(hr, "Failed to add empty buffer to sample", NULL); | |
| 71 | |
| 72 return sample.Detach(); | |
| 73 } | |
| 74 | |
| 75 // Creates a Media Foundation sample with one buffer containing a copy of the | |
| 76 // given Annex B stream data. | |
| 77 // If duration and sample time are not known, provide 0. | |
| 78 // |min_size| specifies the minimum size of the buffer (might be required by | |
| 79 // the decoder for input). If no alignment is required, provide 0. | |
| 80 static IMFSample* CreateInputSample(const uint8* stream, int size, | |
| 81 int min_size, int alignment) { | |
| 82 CHECK(stream); | |
| 83 CHECK_GT(size, 0); | |
| 84 base::win::ScopedComPtr<IMFSample> sample; | |
| 85 sample.Attach(CreateEmptySampleWithBuffer(std::max(min_size, size), | |
| 86 alignment)); | |
| 87 if (!sample.get()) { | |
| 88 NOTREACHED() << "Failed to create empty buffer for input"; | |
| 89 return NULL; | |
| 90 } | |
| 91 HRESULT hr = E_FAIL; | |
|
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
Why not declare & initialize to the real result yo
ananta
2011/12/17 00:40:25
Done.
| |
| 92 | |
| 93 base::win::ScopedComPtr<IMFMediaBuffer> buffer; | |
| 94 hr = sample->GetBufferByIndex(0, buffer.Receive()); | |
| 95 RETURN_ON_HR_FAILURE(hr, "Failed to get buffer in sample", NULL); | |
| 96 | |
| 97 DWORD max_length = 0; | |
| 98 DWORD current_length = 0; | |
| 99 uint8* destination = NULL; | |
| 100 | |
|
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
remove newline
ananta
2011/12/17 00:40:25
Done.
| |
| 101 hr = buffer->Lock(&destination, &max_length, ¤t_length); | |
| 102 RETURN_ON_HR_FAILURE(hr, "Failed to lock buffer", NULL); | |
| 103 | |
| 104 CHECK_EQ(current_length, 0u); | |
| 105 CHECK_GE(static_cast<int>(max_length), size); | |
| 106 memcpy(destination, stream, size); | |
| 107 CHECK(SUCCEEDED(buffer->Unlock())); | |
| 108 | |
| 109 hr = buffer->SetCurrentLength(size); | |
| 110 RETURN_ON_HR_FAILURE(hr, "Failed to set current length", NULL); | |
| 111 | |
| 112 hr = sample->SetUINT32(MFSampleExtension_CleanPoint, TRUE); | |
| 113 RETURN_ON_HR_FAILURE(hr, "Failed to mark sample as key sample", NULL); | |
| 114 | |
| 115 return sample.Detach(); | |
| 116 } | |
| 117 | |
| 118 static IMFSample* CreateSampleFromInputBuffer( | |
| 119 const media::BitstreamBuffer& bitstream_buffer, | |
| 120 base::ProcessHandle renderer_process, | |
| 121 DWORD stream_size, | |
| 122 DWORD alignment) { | |
| 123 HANDLE shared_memory_handle = NULL; | |
| 124 if (!::DuplicateHandle(renderer_process, | |
| 125 bitstream_buffer.handle(), | |
| 126 ::GetCurrentProcess(), | |
| 127 &shared_memory_handle, | |
| 128 0, | |
| 129 FALSE, | |
| 130 DUPLICATE_SAME_ACCESS)) { | |
| 131 NOTREACHED() << "Failed to open duplicate shared mem handle"; | |
| 132 return NULL; | |
| 133 } | |
| 134 | |
| 135 base::SharedMemory shm(shared_memory_handle, true); | |
| 136 if (!shm.Map(bitstream_buffer.size())) { | |
| 137 NOTREACHED() << "Failed in SharedMemory::Map()"; | |
| 138 return NULL; | |
| 139 } | |
| 140 | |
| 141 return CreateInputSample(reinterpret_cast<const uint8*>(shm.memory()), | |
| 142 bitstream_buffer.size(), | |
| 143 stream_size, | |
| 144 alignment); | |
| 145 } | |
| 146 | |
| 147 DXVAVideoDecodeAccelerator::DXVAPictureBuffer::DXVAPictureBuffer( | |
| 148 bool is_available, const media::PictureBuffer& buffer) | |
|
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
Does the ctor ever get called with an is_available
ananta
2011/12/17 00:40:25
It does with your suggestion of using insert to ch
Ami GONE FROM CHROMIUM
2011/12/19 22:53:44
Where? All I'm seeing is "true", never "false".
| |
| 149 : available(is_available), | |
| 150 picture_buffer(buffer) { | |
| 151 } | |
| 152 | |
| 153 DXVAVideoDecodeAccelerator::DXVAPictureBuffer::DXVAPictureBuffer() | |
| 154 : available(false), | |
| 155 picture_buffer(0, gfx::Size(), 0) { | |
| 156 } | |
| 157 | |
| 158 DXVAVideoDecodeAccelerator::PendingSampleInfo::PendingSampleInfo( | |
| 159 int32 buffer_id, IDirect3DSurface9* surface) | |
| 160 : input_buffer_id(buffer_id), | |
| 161 dest_surface(surface) { | |
| 162 } | |
| 163 | |
| 164 DXVAVideoDecodeAccelerator::DXVAVideoDecodeAccelerator( | |
| 165 media::VideoDecodeAccelerator::Client* client, | |
| 166 base::ProcessHandle renderer_process) | |
| 167 : client_(client), | |
| 168 state_(kUninitialized), | |
| 169 pictures_requested_(false), | |
| 170 renderer_process_(renderer_process), | |
| 171 dev_manager_reset_token_(0), | |
| 172 last_input_buffer_id_(-1), | |
| 173 inputs_before_decode_(0) { | |
| 174 method_factory_.reset( | |
|
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
Unused, which is unfortunate (see my comment about
ananta
2011/12/17 01:29:07
Removed
| |
| 175 new ScopedRunnableMethodFactory<media::VideoDecodeAccelerator::Client>( | |
| 176 client_)); | |
| 177 } | |
| 178 | |
| 179 DXVAVideoDecodeAccelerator::~DXVAVideoDecodeAccelerator() { | |
| 180 client_ = NULL; | |
| 181 } | |
| 182 | |
| 183 bool DXVAVideoDecodeAccelerator::Initialize(Profile profile) { | |
|
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
s/ profile// if unused
ananta
2011/12/17 00:40:25
Done.
| |
| 184 DCHECK(CalledOnValidThread()); | |
| 185 | |
| 186 if (state_ != kUninitialized) { | |
| 187 NOTREACHED() << "Initialize: invalid state: " << state_; | |
| 188 return false; | |
| 189 } | |
| 190 | |
| 191 HRESULT hr = MFStartup(MF_VERSION, MFSTARTUP_FULL); | |
| 192 if (FAILED(hr)) { | |
|
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
use macro?
(here and elsewhere; please do a sweep)
ananta
2011/12/17 00:40:25
Done.
| |
| 193 NOTREACHED() << "MFStartup failed. Error:" << std::hex << std::showbase | |
| 194 << hr; | |
| 195 return false; | |
| 196 } | |
| 197 if (!CreateD3DDevManager()) | |
|
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
l.197-208 can be:
if (!CreateD3DDevManager() || !I
ananta
2011/12/17 00:40:25
Changed with the new macros.
| |
| 198 return false; | |
| 199 | |
| 200 if (!InitDecoder()) | |
| 201 return false; | |
| 202 | |
| 203 if (!GetStreamsInfoAndBufferReqs()) | |
| 204 return false; | |
| 205 | |
| 206 if (!SendMFTMessage(MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, 0)) | |
| 207 return false; | |
| 208 | |
| 209 state_ = kNormal; | |
| 210 MessageLoop::current()->PostTask(FROM_HERE, | |
| 211 base::Bind(&media::VideoDecodeAccelerator::Client::NotifyInitializeDone, | |
| 212 base::Unretained(client_))); | |
|
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
base::Unretained() should make you wonder about li
ananta
2011/12/17 01:29:07
Done.
| |
| 213 return true; | |
| 214 } | |
| 215 | |
| 216 void DXVAVideoDecodeAccelerator::Decode( | |
| 217 const media::BitstreamBuffer& bitstream_buffer) { | |
| 218 DCHECK(CalledOnValidThread()); | |
| 219 if (state_ != kNormal && state_ != kStopped) { | |
| 220 NOTREACHED() << "ConsumeVideoSample: invalid state"; | |
|
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
NOTREACHED() should be reserved for detecting prog
ananta
2011/12/17 00:40:25
Done.
| |
| 221 return; | |
| 222 } | |
| 223 | |
| 224 base::win::ScopedComPtr<IMFSample> sample; | |
| 225 sample.Attach(CreateSampleFromInputBuffer(bitstream_buffer, | |
| 226 renderer_process_, | |
| 227 input_stream_info_.cbSize, | |
| 228 input_stream_info_.cbAlignment)); | |
| 229 if (!sample.get()) { | |
| 230 NOTREACHED() << "Failed to create an input sample"; | |
| 231 NotifyError(PLATFORM_FAILURE); | |
| 232 return; | |
| 233 } | |
| 234 | |
| 235 if (!inputs_before_decode_) { | |
| 236 TRACE_EVENT_BEGIN_ETW("DXVAVideoDecodeAccelerator.Decoding", this, ""); | |
| 237 } | |
| 238 inputs_before_decode_++; | |
| 239 | |
| 240 if (!SendMFTMessage(MFT_MESSAGE_NOTIFY_START_OF_STREAM, 0)) { | |
| 241 NOTREACHED() << "Failed to send start of stream message to MFT"; | |
| 242 NotifyError(PLATFORM_FAILURE); | |
| 243 return; | |
| 244 } | |
| 245 | |
| 246 if (FAILED(decoder_->ProcessInput(0, sample.get(), 0))) { | |
| 247 NOTREACHED() << "Failed to process input"; | |
| 248 NotifyError(PLATFORM_FAILURE); | |
| 249 return; | |
| 250 } | |
| 251 | |
| 252 if (state_ != DXVAVideoDecodeAccelerator::kEosDrain) { | |
| 253 if (!SendMFTMessage(MFT_MESSAGE_NOTIFY_END_OF_STREAM, 0)) { | |
| 254 NOTREACHED() << "Failed to send eos message to MFT"; | |
| 255 NotifyError(PLATFORM_FAILURE); | |
| 256 return; | |
| 257 } else { | |
| 258 state_ = kEosDrain; | |
|
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
I still don't understand this.
What happens if th
ananta
2011/12/17 01:29:07
Every decode call is associated with a ProcessInpu
| |
| 259 } | |
| 260 } | |
| 261 | |
| 262 last_input_buffer_id_ = bitstream_buffer.id(); | |
| 263 DoDecode(); | |
| 264 if (state_ != kStopped && state_ != kNormal) { | |
| 265 NOTREACHED() << "Failed to process output. Unexpected decoder state: " | |
| 266 << state_; | |
| 267 NotifyError(PLATFORM_FAILURE); | |
| 268 return; | |
| 269 } | |
| 270 // The Microsoft Media foundation decoder internally buffers upto 30 fps | |
|
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
s/upto/up to/
s/30fps/30 frames/
ananta
2011/12/17 00:40:25
Done.
| |
| 271 // before returning a decoded frame. We need to inform the client that this | |
| 272 // input buffer is processed as it may stop sending us further input. | |
| 273 // Note: This may break clients which expect every input buffer to be | |
|
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
Wouldn't it be better to not do this (retain the s
ananta
2011/12/17 00:40:25
As per our discussion leaving this as is for now.
Ami GONE FROM CHROMIUM
2011/12/19 22:53:44
Please add a TODO for this and point to a crbug.
ananta
2011/12/20 02:27:22
Done.
| |
| 274 // associated with a decoded output buffer. | |
| 275 MessageLoop::current()->PostTask(FROM_HERE, base::Bind( | |
| 276 &media::VideoDecodeAccelerator::Client::NotifyEndOfBitstreamBuffer, | |
| 277 base::Unretained(client_), bitstream_buffer.id())); | |
| 278 } | |
| 279 | |
| 280 void DXVAVideoDecodeAccelerator::AssignPictureBuffers( | |
| 281 const std::vector<media::PictureBuffer>& buffers) { | |
| 282 DCHECK(CalledOnValidThread()); | |
| 283 // Copy the picture buffers provided by the client to the available list, | |
| 284 // and mark these buffers as available for use. | |
| 285 for (size_t buffer_index = 0; buffer_index < buffers.size(); | |
| 286 ++buffer_index) { | |
| 287 DXVAPictureBuffer picture_buffer(true, buffers[buffer_index]); | |
| 288 | |
| 289 DCHECK(output_picture_buffers_.find(buffers[buffer_index].id()) == | |
|
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
I think you missed my point. By calling insert()
ananta
2011/12/17 01:29:07
Done.
| |
| 290 output_picture_buffers_.end()); | |
| 291 output_picture_buffers_[buffers[buffer_index].id()] = picture_buffer; | |
| 292 } | |
| 293 ProcessPendingSamples(); | |
| 294 } | |
| 295 | |
| 296 void DXVAVideoDecodeAccelerator::ReusePictureBuffer( | |
| 297 int32 picture_buffer_id) { | |
| 298 DCHECK(CalledOnValidThread()); | |
| 299 | |
| 300 OutputBuffers::iterator it = output_picture_buffers_.find(picture_buffer_id); | |
| 301 if (it == output_picture_buffers_.end()) { | |
| 302 DVLOG(1) << "Invalid picture id"; | |
|
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
NotifyError
ananta
2011/12/17 00:40:25
Done.
| |
| 303 return; | |
| 304 } | |
| 305 it->second.available = true; | |
| 306 ProcessPendingSamples(); | |
| 307 } | |
| 308 | |
| 309 void DXVAVideoDecodeAccelerator::Flush() { | |
| 310 DCHECK(CalledOnValidThread()); | |
| 311 | |
| 312 VLOG(1) << "DXVAVideoDecodeAccelerator::Flush"; | |
|
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
I think you missed my comment to s/VLOG/DVLOG/ eve
ananta
2011/12/17 00:40:25
Done.
| |
| 313 | |
| 314 if (state_ != kNormal && state_ != kStopped) { | |
| 315 NOTREACHED() << "ConsumeVideoSample: invalid state:" << state_; | |
| 316 NotifyError(ILLEGAL_STATE); | |
| 317 return; | |
| 318 } | |
| 319 | |
| 320 DCHECK(state_ != kEosDrain); | |
|
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
Redundant to the if above.
ananta
2011/12/17 00:40:25
Done.
| |
| 321 state_ = kEosDrain; | |
| 322 | |
| 323 if (!SendMFTMessage(MFT_MESSAGE_COMMAND_DRAIN, 0)) { | |
| 324 VLOG(1) << "Failed to send drain message"; | |
| 325 state_ = kStopped; | |
| 326 NotifyError(PLATFORM_FAILURE); | |
| 327 return; | |
| 328 } | |
| 329 | |
| 330 // As per MSDN docs after the client sends this message, it calls | |
| 331 // IMFTransform::ProcessOutput in a loop, until ProcessOutput returns the | |
| 332 // error code MF_E_TRANSFORM_NEED_MORE_INPUT. The DoDecode function sets | |
| 333 // the state to kStopped when the decoder returns | |
| 334 // MF_E_TRANSFORM_NEED_MORE_INPUT. | |
| 335 while (state_ != kStopped) { | |
| 336 DoDecode(); | |
|
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
Given DoDecode() is synchronous does this loop bod
ananta
2011/12/17 00:40:25
What we are doing is as per msdn documentation. I
| |
| 337 } | |
| 338 | |
| 339 MessageLoop::current()->PostTask(FROM_HERE, base::Bind( | |
| 340 &media::VideoDecodeAccelerator::Client::NotifyFlushDone, | |
| 341 base::Unretained(client_))); | |
| 342 | |
| 343 state_ = kNormal;} | |
|
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
missing newline before brace.
ananta
2011/12/17 00:40:25
Done.
| |
| 344 | |
| 345 void DXVAVideoDecodeAccelerator::Reset() { | |
| 346 DCHECK(CalledOnValidThread()); | |
| 347 | |
| 348 VLOG(1) << "DXVAVideoDecodeAccelerator::Reset"; | |
| 349 | |
| 350 bool success = false; | |
|
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
unused?
ananta
2011/12/17 00:40:25
Done.
| |
| 351 | |
| 352 if (state_ != kNormal && state_ != kStopped) { | |
| 353 NOTREACHED() << "Reset: invalid state"; | |
| 354 NotifyError(ILLEGAL_STATE); | |
| 355 return; | |
| 356 } | |
| 357 | |
| 358 DCHECK(state_ != kResetting); | |
|
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
redundant
ananta
2011/12/17 00:40:25
Done.
| |
| 359 state_ = kResetting; | |
| 360 | |
| 361 if (!SendMFTMessage(MFT_MESSAGE_COMMAND_FLUSH, 0)) { | |
| 362 VLOG(1) << "DXVAVideoDecodeAccelerator::Flush failed to send message"; | |
|
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
s/Flush/Reset/
ananta
2011/12/17 00:40:25
Done.
| |
| 363 NotifyError(PLATFORM_FAILURE); | |
| 364 return; | |
| 365 } | |
| 366 | |
| 367 MessageLoop::current()->PostTask(FROM_HERE, base::Bind( | |
| 368 &media::VideoDecodeAccelerator::Client::NotifyResetDone, | |
| 369 base::Unretained(client_))); | |
| 370 | |
| 371 ClearState(); | |
| 372 state_ = DXVAVideoDecodeAccelerator::kNormal; | |
| 373 } | |
| 374 | |
| 375 void DXVAVideoDecodeAccelerator::Destroy() { | |
| 376 DCHECK(CalledOnValidThread()); | |
| 377 | |
| 378 OutputBuffers::iterator index; | |
| 379 for (index = output_picture_buffers_.begin(); | |
| 380 index != output_picture_buffers_.end(); | |
| 381 ++index) { | |
| 382 MessageLoop::current()->PostTask(FROM_HERE, base::Bind( | |
| 383 &media::VideoDecodeAccelerator::Client::DismissPictureBuffer, | |
|
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
This violates Destroy's API http://codesearch.goog
ananta
2011/12/17 00:40:25
Done.
| |
| 384 base::Unretained(client_), index->second.picture_buffer.id())); | |
| 385 } | |
| 386 ClearState(); | |
| 387 decoder_.Release(); | |
| 388 device_.Release(); | |
| 389 device_manager_.Release(); | |
| 390 MFShutdown(); | |
| 391 state_ = kUninitialized; | |
| 392 } | |
| 393 | |
| 394 bool DXVAVideoDecodeAccelerator::CreateD3DDevManager() { | |
| 395 base::win::ScopedComPtr<IDirect3D9Ex> d3d9; | |
| 396 | |
| 397 Direct3DCreate9Ex(D3D_SDK_VERSION, d3d9.Receive()); | |
|
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
Just to make sure you didn't miss them, apatrick m
ananta
2011/12/17 00:40:25
Yeah. We use the Ex versions of the APIs for that
| |
| 398 if (d3d9.get() == NULL) { | |
| 399 NOTREACHED() << "Failed to create D3D9"; | |
| 400 return false; | |
| 401 } | |
| 402 | |
| 403 D3DPRESENT_PARAMETERS present_params = {0}; | |
| 404 present_params.BackBufferWidth = 1; | |
| 405 present_params.BackBufferHeight = 1; | |
| 406 present_params.BackBufferFormat = D3DFMT_UNKNOWN; | |
| 407 present_params.BackBufferCount = 1; | |
| 408 present_params.SwapEffect = D3DSWAPEFFECT_DISCARD; | |
| 409 present_params.hDeviceWindow = GetShellWindow(); | |
| 410 present_params.Windowed = TRUE; | |
| 411 present_params.Flags = D3DPRESENTFLAG_VIDEO; | |
| 412 present_params.FullScreen_RefreshRateInHz = 0; | |
| 413 present_params.PresentationInterval = 0; | |
| 414 | |
| 415 HRESULT hr = d3d9->CreateDeviceEx(D3DADAPTER_DEFAULT, | |
| 416 D3DDEVTYPE_HAL, | |
| 417 GetShellWindow(), | |
| 418 D3DCREATE_SOFTWARE_VERTEXPROCESSING, | |
| 419 &present_params, | |
| 420 NULL, | |
| 421 device_.Receive()); | |
| 422 RETURN_ON_HR_FAILURE(hr, "Failed to create D3D Device", false); | |
| 423 | |
| 424 hr = DXVA2CreateDirect3DDeviceManager9(&dev_manager_reset_token_, | |
| 425 device_manager_.Receive()); | |
| 426 RETURN_ON_HR_FAILURE(hr, "Couldn't create D3D Device manager", false); | |
| 427 | |
| 428 hr = device_manager_->ResetDevice(device_.get(), | |
| 429 dev_manager_reset_token_); | |
| 430 RETURN_ON_HR_FAILURE(hr, "Failed to set device to device manager", false); | |
| 431 return true; | |
| 432 } | |
| 433 | |
| 434 bool DXVAVideoDecodeAccelerator::InitDecoder() { | |
| 435 HRESULT hr = CoCreateInstance(__uuidof(CMSH264DecoderMFT), | |
| 436 NULL, | |
| 437 CLSCTX_INPROC_SERVER, | |
| 438 __uuidof(IMFTransform), | |
| 439 reinterpret_cast<void**>(decoder_.Receive())); | |
| 440 RETURN_ON_HR_FAILURE(hr, "Failed to CoCreate decoder.", false); | |
| 441 | |
| 442 if (!CheckDecoderDxvaSupport()) | |
| 443 return false; | |
| 444 | |
| 445 hr = decoder_->ProcessMessage( | |
| 446 MFT_MESSAGE_SET_D3D_MANAGER, | |
| 447 reinterpret_cast<ULONG_PTR>(device_manager_.get())); | |
| 448 RETURN_ON_HR_FAILURE(hr, "Failed to pass D3D9 device to decoder.", false); | |
| 449 | |
| 450 return SetDecoderMediaTypes(); | |
| 451 } | |
| 452 | |
| 453 bool DXVAVideoDecodeAccelerator::CheckDecoderDxvaSupport() { | |
| 454 base::win::ScopedComPtr<IMFAttributes> attributes; | |
| 455 HRESULT hr = decoder_->GetAttributes(attributes.Receive()); | |
| 456 RETURN_ON_HR_FAILURE(hr, "Failed to pass decoder attributes.", false); | |
| 457 | |
| 458 UINT32 dxva = 0; | |
| 459 hr = attributes->GetUINT32(MF_SA_D3D_AWARE, &dxva); | |
| 460 RETURN_ON_HR_FAILURE(hr, "Failed to get DXVA aware attribute.", false); | |
| 461 | |
| 462 hr = attributes->SetUINT32(CODECAPI_AVDecVideoAcceleration_H264, TRUE); | |
| 463 RETURN_ON_HR_FAILURE(hr, "Failed to force codec to use H/W DXVA.", false); | |
| 464 return true; | |
| 465 } | |
| 466 | |
| 467 bool DXVAVideoDecodeAccelerator::SetDecoderMediaTypes() { | |
| 468 if (!SetDecoderInputMediaType()) | |
| 469 return false; | |
| 470 return SetDecoderOutputMediaType(MFVideoFormat_NV12); | |
| 471 } | |
| 472 | |
| 473 bool DXVAVideoDecodeAccelerator::SetDecoderInputMediaType() { | |
| 474 base::win::ScopedComPtr<IMFMediaType> media_type; | |
| 475 HRESULT hr = MFCreateMediaType(media_type.Receive()); | |
| 476 RETURN_ON_HR_FAILURE(hr, "Failed to create empty media type object", false); | |
| 477 | |
| 478 hr = media_type->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video); | |
| 479 RETURN_ON_HR_FAILURE(hr, "SetGUID for major type failed", false); | |
| 480 | |
| 481 hr = media_type->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_H264); | |
| 482 RETURN_ON_HR_FAILURE(hr, "SetGUID for subtype failed", false); | |
| 483 | |
| 484 hr = decoder_->SetInputType(0, media_type.get(), 0); // No flags | |
| 485 RETURN_ON_HR_FAILURE(hr, "Failed to set decoder's input type", false); | |
| 486 return true; | |
| 487 } | |
| 488 | |
| 489 bool DXVAVideoDecodeAccelerator::SetDecoderOutputMediaType( | |
| 490 const GUID& subtype) { | |
| 491 base::win::ScopedComPtr<IMFMediaType> out_media_type; | |
| 492 | |
| 493 for (uint32 i = 0; | |
| 494 SUCCEEDED(decoder_->GetOutputAvailableType(0, i, | |
| 495 out_media_type.Receive())); | |
| 496 ++i) { | |
| 497 GUID out_subtype = {0}; | |
| 498 HRESULT hr = out_media_type->GetGUID(MF_MT_SUBTYPE, &out_subtype); | |
| 499 RETURN_ON_HR_FAILURE(hr, "Failed to get output media type guid", false); | |
| 500 | |
| 501 if (out_subtype == subtype) { | |
| 502 hr = decoder_->SetOutputType(0, out_media_type, 0); // No flags | |
| 503 RETURN_ON_HR_FAILURE(hr, "Failed to set output media type", false); | |
| 504 return true; | |
| 505 } | |
| 506 out_media_type.Release(); | |
| 507 } | |
| 508 return false; | |
| 509 } | |
| 510 | |
| 511 bool DXVAVideoDecodeAccelerator::SendMFTMessage(MFT_MESSAGE_TYPE msg, | |
| 512 int32 param) { | |
| 513 HRESULT hr = decoder_->ProcessMessage(msg, param); | |
| 514 return SUCCEEDED(hr); | |
| 515 } | |
| 516 | |
| 517 // Gets the minimum buffer sizes for input and output samples. The MFT will not | |
| 518 // allocate buffer for input nor output, so we have to do it ourselves and make | |
| 519 // sure they're the correct size. We only provide decoding if DXVA is enabled. | |
| 520 bool DXVAVideoDecodeAccelerator::GetStreamsInfoAndBufferReqs() { | |
| 521 HRESULT hr = decoder_->GetInputStreamInfo(0, &input_stream_info_); | |
| 522 RETURN_ON_HR_FAILURE(hr, "Failed to get input stream info", false); | |
| 523 | |
| 524 hr = decoder_->GetOutputStreamInfo(0, &output_stream_info_); | |
| 525 RETURN_ON_HR_FAILURE(hr, "Failed to get output stream info", false); | |
| 526 | |
| 527 VLOG(1) << "Input stream info: "; | |
| 528 VLOG(1) << "Max latency: " << input_stream_info_.hnsMaxLatency; | |
| 529 // There should be three flags, one for requiring a whole frame be in a | |
| 530 // single sample, one for requiring there be one buffer only in a single | |
| 531 // sample, and one that specifies a fixed sample size. (as in cbSize) | |
| 532 CHECK_EQ(input_stream_info_.dwFlags, 0x7u); | |
| 533 | |
| 534 VLOG(1) << "Min buffer size: " << input_stream_info_.cbSize; | |
| 535 VLOG(1) << "Max lookahead: " << input_stream_info_.cbMaxLookahead; | |
| 536 VLOG(1) << "Alignment: " << input_stream_info_.cbAlignment; | |
| 537 | |
| 538 VLOG(1) << "Output stream info: "; | |
| 539 // The flags here should be the same and mean the same thing, except when | |
| 540 // DXVA is enabled, there is an extra 0x100 flag meaning decoder will | |
| 541 // allocate its own sample. | |
| 542 VLOG(1) << "Flags: " | |
| 543 << std::hex << std::showbase << output_stream_info_.dwFlags; | |
| 544 CHECK_EQ(output_stream_info_.dwFlags, 0x107u); | |
| 545 VLOG(1) << "Min buffer size: " << output_stream_info_.cbSize; | |
| 546 VLOG(1) << "Alignment: " << output_stream_info_.cbAlignment; | |
| 547 return true; | |
| 548 } | |
| 549 | |
| 550 void DXVAVideoDecodeAccelerator::DoDecode() { | |
| 551 if (state_ != kNormal && state_ != kEosDrain) { | |
| 552 NOTREACHED() << "DoDecode: not in normal or drain state"; | |
| 553 return; | |
| 554 } | |
| 555 | |
| 556 MFT_OUTPUT_DATA_BUFFER output_data_buffer = {0}; | |
| 557 DWORD status = 0; | |
| 558 | |
| 559 HRESULT hr = decoder_->ProcessOutput(0, // No flags | |
| 560 1, // # of out streams to pull from | |
| 561 &output_data_buffer, | |
| 562 &status); | |
| 563 IMFCollection* events = output_data_buffer.pEvents; | |
| 564 if (events != NULL) { | |
| 565 VLOG(1) << "Got events from ProcessOuput, but discarding"; | |
| 566 events->Release(); | |
| 567 } | |
| 568 if (FAILED(hr)) { | |
| 569 if (hr == MF_E_TRANSFORM_STREAM_CHANGE) { | |
| 570 if (!SetDecoderOutputMediaType(MFVideoFormat_NV12)) { | |
| 571 // No more output from the decoder. Notify EOS and stop playback. | |
|
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
Comment is at odds with the following NOTREACHED.
ananta
2011/12/17 00:40:25
Not sure what we can do here. It is probably corre
| |
| 572 NOTREACHED() << "Failed to set decoder output media type"; | |
| 573 } | |
| 574 // No more output from the decoder. Stop playback. | |
|
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
copy/paste from the NEED_MORE_INPUT case below? I
ananta
2011/12/17 00:40:25
A stream change needs further process input calls
| |
| 575 state_ = kStopped; | |
| 576 return; | |
| 577 } else if (hr == MF_E_TRANSFORM_NEED_MORE_INPUT) { | |
| 578 // No more output from the decoder. Stop playback. | |
|
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
Add a DCHECK_EQ(state_, kEosDrain); i.e. that we w
ananta
2011/12/17 00:40:25
Done.
| |
| 579 state_ = kStopped; | |
| 580 return; | |
| 581 } else { | |
| 582 NOTREACHED() << "Unhandled error in DoDecode()"; | |
| 583 return; | |
| 584 } | |
| 585 } | |
| 586 | |
| 587 TRACE_EVENT_END_ETW("DXVAVideoDecodeAccelerator.Decoding", this, ""); | |
| 588 | |
| 589 TRACE_COUNTER1("DXVA Decoding", "TotalPacketsBeforeDecode", | |
| 590 inputs_before_decode_); | |
|
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
Can this ever *not* be 1, given the synchronous pa
ananta
2011/12/17 00:40:25
Covered with the decoder buffering behavior.
| |
| 591 | |
| 592 inputs_before_decode_ = 0; | |
| 593 | |
| 594 if (!ProcessOutputSample(output_data_buffer.pSample)) { | |
| 595 NotifyError(PLATFORM_FAILURE); | |
| 596 return; | |
| 597 } | |
| 598 state_ = kNormal; | |
| 599 } | |
| 600 | |
| 601 bool DXVAVideoDecodeAccelerator::ProcessOutputSample(IMFSample* sample) { | |
| 602 if (!sample) { | |
| 603 NOTREACHED() << "ProcessOutput succeeded, but did not get a sample back"; | |
| 604 return false; | |
| 605 } | |
| 606 base::win::ScopedComPtr<IMFSample> output_sample; | |
| 607 output_sample.Attach(sample); | |
| 608 | |
| 609 base::win::ScopedComPtr<IMFMediaBuffer> output_buffer; | |
| 610 HRESULT hr = sample->GetBufferByIndex(0, output_buffer.Receive()); | |
| 611 RETURN_ON_HR_FAILURE(hr, "Failed to get buffer from sample.", false); | |
| 612 | |
| 613 base::win::ScopedComPtr<IDirect3DSurface9> surface; | |
| 614 hr = MFGetService(output_buffer, MR_BUFFER_SERVICE, | |
| 615 IID_PPV_ARGS(surface.Receive())); | |
| 616 RETURN_ON_HR_FAILURE(hr, "Failed to get surface from buffer.", false); | |
| 617 | |
| 618 D3DSURFACE_DESC surface_desc; | |
| 619 hr = surface->GetDesc(&surface_desc); | |
| 620 RETURN_ON_HR_FAILURE(hr, "Failed to get surface description.", false); | |
| 621 | |
| 622 TRACE_EVENT_BEGIN_ETW("DXVAVideoDecodeAccelerator.SurfaceCreation", this, | |
| 623 ""); | |
| 624 // TODO(ananta) | |
| 625 // The code below may not be necessary once we have an ANGLE extension which | |
| 626 // allows us to pass the Direct 3D surface directly for rendering. | |
| 627 | |
| 628 // The decoded bits in the source direct 3d surface are in the YUV | |
| 629 // format. Angle does not support that. As a workaround we create an | |
| 630 // offscreen surface in the RGB format and copy the source surface | |
| 631 // to this surface. | |
| 632 base::win::ScopedComPtr<IDirect3DSurface9> dest_surface; | |
| 633 hr = device_->CreateOffscreenPlainSurface(surface_desc.Width, | |
| 634 surface_desc.Height, | |
| 635 D3DFMT_A8R8G8B8, | |
| 636 D3DPOOL_DEFAULT, | |
| 637 dest_surface.Receive(), | |
| 638 NULL); | |
| 639 RETURN_ON_HR_FAILURE(hr, "Failed to create offscreen surface.", false); | |
| 640 | |
| 641 hr = D3DXLoadSurfaceFromSurface(dest_surface, NULL, NULL, surface, NULL, | |
| 642 NULL, 0, 0); | |
| 643 RETURN_ON_HR_FAILURE(hr, "Failed to copy source surface to dest.", false); | |
| 644 | |
| 645 TRACE_EVENT_END_ETW("DXVAVideoDecodeAccelerator.SurfaceCreation", this, ""); | |
| 646 | |
| 647 scoped_array<char> bits; | |
| 648 if (!GetBitmapFromSurface(dest_surface, &bits)) { | |
| 649 NOTREACHED() << "Failed to get bitmap from dest surface"; | |
| 650 return false; | |
| 651 } | |
| 652 | |
| 653 pending_output_samples_.push_back( | |
| 654 PendingSampleInfo(last_input_buffer_id_, dest_surface)); | |
| 655 | |
| 656 // If we have available picture buffers to copy the output data then use the | |
| 657 // first one and then flag it as not being available for use. | |
| 658 if (output_picture_buffers_.size()) { | |
| 659 ProcessPendingSamples(); | |
| 660 return true; | |
| 661 } | |
| 662 if (pictures_requested_) { | |
| 663 VLOG(1) << "Waiting for picture slots from the client."; | |
| 664 return true; | |
| 665 } | |
| 666 // Go ahead and request picture buffers. | |
| 667 MessageLoop::current()->PostTask(FROM_HERE, base::Bind( | |
| 668 &media::VideoDecodeAccelerator::Client::ProvidePictureBuffers, | |
| 669 base::Unretained(client_), kNumPictureBuffers, | |
| 670 gfx::Size(surface_desc.Width, surface_desc.Height))); | |
| 671 | |
| 672 pictures_requested_ = true; | |
| 673 return true; | |
| 674 } | |
| 675 | |
| 676 bool DXVAVideoDecodeAccelerator::CopyOutputSampleDataToPictureBuffer( | |
| 677 IDirect3DSurface9* dest_surface, media::PictureBuffer picture_buffer, | |
| 678 int input_buffer_id) { | |
| 679 DCHECK(dest_surface); | |
| 680 | |
| 681 D3DSURFACE_DESC surface_desc; | |
| 682 HRESULT hr = dest_surface->GetDesc(&surface_desc); | |
| 683 RETURN_ON_HR_FAILURE(hr, "Failed to get surface description.", false); | |
| 684 | |
| 685 scoped_array<char> bits; | |
| 686 if (!GetBitmapFromSurface(dest_surface, &bits)) { | |
| 687 NOTREACHED() << "Failed to read bits from D3D surface"; | |
| 688 return false; | |
| 689 } | |
| 690 | |
| 691 GLint current_texture = 0; | |
| 692 glGetIntegerv(GL_TEXTURE_BINDING_2D, ¤t_texture); | |
|
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
I'm surprised to see these (global-scope) gl* call
ananta
2011/12/17 00:40:25
When i step into these calls i always get a contex
ananta
2011/12/17 00:40:25
Based on a discussion with Al Patrick an open gl c
| |
| 693 | |
| 694 glBindTexture(GL_TEXTURE_2D, picture_buffer.texture_id()); | |
| 695 glTexImage2D(GL_TEXTURE_2D, 0, GL_BGRA_EXT, surface_desc.Width, | |
|
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
How is this safe without a glFinish?
ananta
2011/12/17 00:40:25
Again based on a discussion with Al Patrick not ne
| |
| 696 surface_desc.Height, 0, GL_BGRA_EXT, GL_UNSIGNED_BYTE, | |
| 697 reinterpret_cast<GLvoid*>(bits.get())); | |
| 698 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); | |
| 699 | |
| 700 glBindTexture(GL_TEXTURE_2D, current_texture); | |
| 701 | |
| 702 media::Picture output_picture(picture_buffer.id(), input_buffer_id); | |
| 703 MessageLoop::current()->PostTask(FROM_HERE, base::Bind( | |
| 704 &media::VideoDecodeAccelerator::Client::PictureReady, | |
| 705 base::Unretained(client_), output_picture)); | |
| 706 return true; | |
| 707 } | |
| 708 | |
| 709 void DXVAVideoDecodeAccelerator::ProcessPendingSamples() { | |
| 710 if (pending_output_samples_.empty()) | |
| 711 return; | |
| 712 | |
| 713 OutputBuffers::iterator index; | |
| 714 | |
| 715 for (index = output_picture_buffers_.begin(); | |
| 716 index != output_picture_buffers_.end(); | |
| 717 ++index) { | |
| 718 if (index->second.available) { | |
| 719 PendingSampleInfo sample_info = pending_output_samples_.front(); | |
| 720 | |
| 721 CopyOutputSampleDataToPictureBuffer(sample_info.dest_surface, | |
| 722 index->second.picture_buffer, | |
| 723 sample_info.input_buffer_id); | |
| 724 index->second.available = false; | |
| 725 pending_output_samples_.erase(pending_output_samples_.begin()); | |
|
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
pop_front()
ananta
2011/12/17 00:40:25
Done.
| |
| 726 | |
| 727 if (pending_output_samples_.empty()) | |
|
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
If you move this to the for-loop test you can drop
ananta
2011/12/17 00:40:25
Done.
| |
| 728 break; | |
| 729 } | |
| 730 } | |
| 731 } | |
| 732 | |
| 733 void DXVAVideoDecodeAccelerator::ClearState() { | |
| 734 last_input_buffer_id_ = -1; | |
| 735 output_picture_buffers_.clear(); | |
| 736 pending_output_samples_.clear(); | |
| 737 } | |
| 738 | |
| 739 void DXVAVideoDecodeAccelerator::NotifyError( | |
|
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
I think you actually want a StopOnError-equivalent
ananta
2011/12/17 00:40:25
Done.
| |
| 740 media::VideoDecodeAccelerator::Error error) { | |
| 741 MessageLoop::current()->PostTask(FROM_HERE, base::Bind( | |
| 742 &media::VideoDecodeAccelerator::Client::NotifyError, | |
| 743 base::Unretained(client_), error)); | |
| 744 } | |
| 745 | |
| 746 bool DXVAVideoDecodeAccelerator::GetBitmapFromSurface( | |
| 747 IDirect3DSurface9* surface, | |
| 748 scoped_array<char>* bits) { | |
| 749 // Get the currently loaded bitmap from the DC. | |
| 750 HDC hdc = NULL; | |
| 751 HRESULT hr = surface->GetDC(&hdc); | |
| 752 RETURN_ON_HR_FAILURE(hr, "Failed to get HDC for dest offscreen surface.", | |
| 753 false); | |
| 754 HBITMAP bitmap = | |
| 755 reinterpret_cast<HBITMAP>(GetCurrentObject(hdc, OBJ_BITMAP)); | |
| 756 if (!bitmap) { | |
| 757 NOTREACHED() << "Failed to get bitmap from DC"; | |
| 758 surface->ReleaseDC(hdc); | |
| 759 return false; | |
| 760 } | |
| 761 // TODO(ananta) | |
| 762 // The code below may not be necessary once we have an ANGLE extension which | |
| 763 // allows us to pass the Direct 3D surface directly for rendering. | |
| 764 // The Device dependent bitmap is upside down for OpenGL. We convert the | |
| 765 // bitmap to a DIB and render it on the texture instead. | |
| 766 BITMAP bitmap_basic_info = {0}; | |
| 767 if (!GetObject(bitmap, sizeof(BITMAP), &bitmap_basic_info)) { | |
| 768 NOTREACHED() << "Failed to read bitmap info"; | |
| 769 surface->ReleaseDC(hdc); | |
| 770 return false; | |
| 771 } | |
| 772 BITMAPINFO bitmap_info = {0}; | |
| 773 bitmap_info.bmiHeader.biSize = sizeof(BITMAPINFOHEADER); | |
| 774 bitmap_info.bmiHeader.biWidth = bitmap_basic_info.bmWidth; | |
| 775 bitmap_info.bmiHeader.biHeight = bitmap_basic_info.bmHeight; | |
| 776 bitmap_info.bmiHeader.biPlanes = 1; | |
| 777 bitmap_info.bmiHeader.biBitCount = bitmap_basic_info.bmBitsPixel; | |
| 778 bitmap_info.bmiHeader.biCompression = BI_RGB; | |
| 779 bitmap_info.bmiHeader.biSizeImage = 0; | |
| 780 bitmap_info.bmiHeader.biClrUsed = 0; | |
| 781 | |
| 782 int ret = GetDIBits(hdc, bitmap, 0, 0, NULL, &bitmap_info, DIB_RGB_COLORS); | |
| 783 if (!ret || bitmap_info.bmiHeader.biSizeImage <= 0) { | |
| 784 NOTREACHED() << "Failed to read bitmap size"; | |
| 785 surface->ReleaseDC(hdc); | |
| 786 return false; | |
| 787 } | |
| 788 | |
| 789 bits->reset(new char[bitmap_info.bmiHeader.biSizeImage]); | |
| 790 ret = GetDIBits(hdc, bitmap, 0, bitmap_basic_info.bmHeight, bits->get(), | |
| 791 &bitmap_info, DIB_RGB_COLORS); | |
| 792 | |
| 793 surface->ReleaseDC(hdc); | |
|
Ami GONE FROM CHROMIUM
2011/12/16 07:38:54
combine into if below, or error-check otherwise?
ananta
2011/12/17 00:40:25
Done.
| |
| 794 if (!ret) { | |
| 795 NOTREACHED() << "Failed to retrieve bitmap bits."; | |
| 796 return false; | |
| 797 } | |
| 798 return true; | |
| 799 } | |
| OLD | NEW |