Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(72)

Side by Side Diff: content/common/gpu/media/vt_video_decode_accelerator_mac.cc

Issue 1882373004: Migrate content/common/gpu/media code to media/gpu (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Fix several more bot-identified build issues Created 4 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "content/common/gpu/media/vt_video_decode_accelerator_mac.h"
6
7 #include <CoreVideo/CoreVideo.h>
8 #include <OpenGL/CGLIOSurface.h>
9 #include <OpenGL/gl.h>
10 #include <stddef.h>
11
12 #include <algorithm>
13
14 #include "base/bind.h"
15 #include "base/logging.h"
16 #include "base/mac/mac_logging.h"
17 #include "base/macros.h"
18 #include "base/memory/ptr_util.h"
19 #include "base/metrics/histogram_macros.h"
20 #include "base/sys_byteorder.h"
21 #include "base/sys_info.h"
22 #include "base/thread_task_runner_handle.h"
23 #include "base/version.h"
24 #include "media/base/limits.h"
25 #include "ui/gl/gl_context.h"
26 #include "ui/gl/gl_image_io_surface.h"
27 #include "ui/gl/gl_implementation.h"
28 #include "ui/gl/scoped_binders.h"
29
30 using content_common_gpu_media::kModuleVt;
31 using content_common_gpu_media::InitializeStubs;
32 using content_common_gpu_media::IsVtInitialized;
33 using content_common_gpu_media::StubPathMap;
34
35 #define NOTIFY_STATUS(name, status, session_failure) \
36 do { \
37 OSSTATUS_DLOG(ERROR, status) << name; \
38 NotifyError(PLATFORM_FAILURE, session_failure); \
39 } while (0)
40
41 namespace content {
42
43 // Only H.264 with 4:2:0 chroma sampling is supported.
44 static const media::VideoCodecProfile kSupportedProfiles[] = {
45 media::H264PROFILE_BASELINE,
46 media::H264PROFILE_MAIN,
47 media::H264PROFILE_EXTENDED,
48 media::H264PROFILE_HIGH,
49 // TODO(hubbe): Try to re-enable this again somehow. Currently it seems
50 // that some codecs fail to check the profile during initialization and
51 // then fail on the first frame decode, which currently results in a
52 // pipeline failure.
53 // media::H264PROFILE_HIGH10PROFILE,
54 media::H264PROFILE_SCALABLEBASELINE,
55 media::H264PROFILE_SCALABLEHIGH,
56 media::H264PROFILE_STEREOHIGH,
57 media::H264PROFILE_MULTIVIEWHIGH,
58 };
59
60 // Size to use for NALU length headers in AVC format (can be 1, 2, or 4).
61 static const int kNALUHeaderLength = 4;
62
63 // We request 5 picture buffers from the client, each of which has a texture ID
64 // that we can bind decoded frames to. We need enough to satisfy preroll, and
65 // enough to avoid unnecessary stalling, but no more than that. The resource
66 // requirements are low, as we don't need the textures to be backed by storage.
67 static const int kNumPictureBuffers = media::limits::kMaxVideoFrames + 1;
68
69 // Maximum number of frames to queue for reordering before we stop asking for
70 // more. (NotifyEndOfBitstreamBuffer() is called when frames are moved into the
71 // reorder queue.)
72 static const int kMaxReorderQueueSize = 16;
73
74 // Build an |image_config| dictionary for VideoToolbox initialization.
75 static base::ScopedCFTypeRef<CFMutableDictionaryRef>
76 BuildImageConfig(CMVideoDimensions coded_dimensions) {
77 base::ScopedCFTypeRef<CFMutableDictionaryRef> image_config;
78
79 // Note that 4:2:0 textures cannot be used directly as RGBA in OpenGL, but are
80 // lower power than 4:2:2 when composited directly by CoreAnimation.
81 int32_t pixel_format = kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange;
82 #define CFINT(i) CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &i)
83 base::ScopedCFTypeRef<CFNumberRef> cf_pixel_format(CFINT(pixel_format));
84 base::ScopedCFTypeRef<CFNumberRef> cf_width(CFINT(coded_dimensions.width));
85 base::ScopedCFTypeRef<CFNumberRef> cf_height(CFINT(coded_dimensions.height));
86 #undef CFINT
87 if (!cf_pixel_format.get() || !cf_width.get() || !cf_height.get())
88 return image_config;
89
90 image_config.reset(
91 CFDictionaryCreateMutable(
92 kCFAllocatorDefault,
93 3, // capacity
94 &kCFTypeDictionaryKeyCallBacks,
95 &kCFTypeDictionaryValueCallBacks));
96 if (!image_config.get())
97 return image_config;
98
99 CFDictionarySetValue(image_config, kCVPixelBufferPixelFormatTypeKey,
100 cf_pixel_format);
101 CFDictionarySetValue(image_config, kCVPixelBufferWidthKey, cf_width);
102 CFDictionarySetValue(image_config, kCVPixelBufferHeightKey, cf_height);
103
104 return image_config;
105 }
106
107 // Create a VTDecompressionSession using the provided |pps| and |sps|. If
108 // |require_hardware| is true, the session must uses real hardware decoding
109 // (as opposed to software decoding inside of VideoToolbox) to be considered
110 // successful.
111 //
112 // TODO(sandersd): Merge with ConfigureDecoder(), as the code is very similar.
113 static bool CreateVideoToolboxSession(const uint8_t* sps, size_t sps_size,
114 const uint8_t* pps, size_t pps_size,
115 bool require_hardware) {
116 const uint8_t* data_ptrs[] = {sps, pps};
117 const size_t data_sizes[] = {sps_size, pps_size};
118
119 base::ScopedCFTypeRef<CMFormatDescriptionRef> format;
120 OSStatus status = CMVideoFormatDescriptionCreateFromH264ParameterSets(
121 kCFAllocatorDefault,
122 2, // parameter_set_count
123 data_ptrs, // &parameter_set_pointers
124 data_sizes, // &parameter_set_sizes
125 kNALUHeaderLength, // nal_unit_header_length
126 format.InitializeInto());
127 if (status) {
128 OSSTATUS_DLOG(WARNING, status)
129 << "Failed to create CMVideoFormatDescription";
130 return false;
131 }
132
133 base::ScopedCFTypeRef<CFMutableDictionaryRef> decoder_config(
134 CFDictionaryCreateMutable(
135 kCFAllocatorDefault,
136 1, // capacity
137 &kCFTypeDictionaryKeyCallBacks,
138 &kCFTypeDictionaryValueCallBacks));
139 if (!decoder_config.get())
140 return false;
141
142 if (require_hardware) {
143 CFDictionarySetValue(
144 decoder_config,
145 // kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder
146 CFSTR("RequireHardwareAcceleratedVideoDecoder"),
147 kCFBooleanTrue);
148 }
149
150 base::ScopedCFTypeRef<CFMutableDictionaryRef> image_config(
151 BuildImageConfig(CMVideoFormatDescriptionGetDimensions(format)));
152 if (!image_config.get())
153 return false;
154
155 VTDecompressionOutputCallbackRecord callback = {0};
156
157 base::ScopedCFTypeRef<VTDecompressionSessionRef> session;
158 status = VTDecompressionSessionCreate(
159 kCFAllocatorDefault,
160 format, // video_format_description
161 decoder_config, // video_decoder_specification
162 image_config, // destination_image_buffer_attributes
163 &callback, // output_callback
164 session.InitializeInto());
165 if (status) {
166 OSSTATUS_DLOG(WARNING, status)
167 << "Failed to create VTDecompressionSession";
168 return false;
169 }
170
171 return true;
172 }
173
174 // The purpose of this function is to preload the generic and hardware-specific
175 // libraries required by VideoToolbox before the GPU sandbox is enabled.
176 // VideoToolbox normally loads the hardware-specific libraries lazily, so we
177 // must actually create a decompression session. If creating a decompression
178 // session fails, hardware decoding will be disabled (Initialize() will always
179 // return false).
180 static bool InitializeVideoToolboxInternal() {
181 if (!IsVtInitialized()) {
182 // CoreVideo is also required, but the loader stops after the first path is
183 // loaded. Instead we rely on the transitive dependency from VideoToolbox to
184 // CoreVideo.
185 StubPathMap paths;
186 paths[kModuleVt].push_back(FILE_PATH_LITERAL(
187 "/System/Library/Frameworks/VideoToolbox.framework/VideoToolbox"));
188 if (!InitializeStubs(paths)) {
189 DLOG(WARNING) << "Failed to initialize VideoToolbox framework";
190 return false;
191 }
192 }
193
194 // Create a hardware decoding session.
195 // SPS and PPS data are taken from a 480p sample (buck2.mp4).
196 const uint8_t sps_normal[] = {0x67, 0x64, 0x00, 0x1e, 0xac, 0xd9, 0x80, 0xd4,
197 0x3d, 0xa1, 0x00, 0x00, 0x03, 0x00, 0x01, 0x00,
198 0x00, 0x03, 0x00, 0x30, 0x8f, 0x16, 0x2d, 0x9a};
199 const uint8_t pps_normal[] = {0x68, 0xe9, 0x7b, 0xcb};
200 if (!CreateVideoToolboxSession(sps_normal, arraysize(sps_normal), pps_normal,
201 arraysize(pps_normal), true)) {
202 DLOG(WARNING) << "Failed to create hardware VideoToolbox session";
203 return false;
204 }
205
206 // Create a software decoding session.
207 // SPS and PPS data are taken from a 18p sample (small2.mp4).
208 const uint8_t sps_small[] = {0x67, 0x64, 0x00, 0x0a, 0xac, 0xd9, 0x89, 0x7e,
209 0x22, 0x10, 0x00, 0x00, 0x3e, 0x90, 0x00, 0x0e,
210 0xa6, 0x08, 0xf1, 0x22, 0x59, 0xa0};
211 const uint8_t pps_small[] = {0x68, 0xe9, 0x79, 0x72, 0xc0};
212 if (!CreateVideoToolboxSession(sps_small, arraysize(sps_small), pps_small,
213 arraysize(pps_small), false)) {
214 DLOG(WARNING) << "Failed to create software VideoToolbox session";
215 return false;
216 }
217
218 return true;
219 }
220
221 bool InitializeVideoToolbox() {
222 // InitializeVideoToolbox() is called only from the GPU process main thread;
223 // once for sandbox warmup, and then once each time a VTVideoDecodeAccelerator
224 // is initialized.
225 static bool attempted = false;
226 static bool succeeded = false;
227
228 if (!attempted) {
229 attempted = true;
230 succeeded = InitializeVideoToolboxInternal();
231 }
232
233 return succeeded;
234 }
235
236 // Route decoded frame callbacks back into the VTVideoDecodeAccelerator.
237 static void OutputThunk(
238 void* decompression_output_refcon,
239 void* source_frame_refcon,
240 OSStatus status,
241 VTDecodeInfoFlags info_flags,
242 CVImageBufferRef image_buffer,
243 CMTime presentation_time_stamp,
244 CMTime presentation_duration) {
245 VTVideoDecodeAccelerator* vda =
246 reinterpret_cast<VTVideoDecodeAccelerator*>(decompression_output_refcon);
247 vda->Output(source_frame_refcon, status, image_buffer);
248 }
249
250 VTVideoDecodeAccelerator::Task::Task(TaskType type) : type(type) {
251 }
252
253 VTVideoDecodeAccelerator::Task::Task(const Task& other) = default;
254
255 VTVideoDecodeAccelerator::Task::~Task() {
256 }
257
258 VTVideoDecodeAccelerator::Frame::Frame(int32_t bitstream_id)
259 : bitstream_id(bitstream_id),
260 pic_order_cnt(0),
261 is_idr(false),
262 reorder_window(0) {
263 }
264
265 VTVideoDecodeAccelerator::Frame::~Frame() {
266 }
267
268 VTVideoDecodeAccelerator::PictureInfo::PictureInfo(uint32_t client_texture_id,
269 uint32_t service_texture_id)
270 : client_texture_id(client_texture_id),
271 service_texture_id(service_texture_id) {}
272
273 VTVideoDecodeAccelerator::PictureInfo::~PictureInfo() {
274 if (gl_image)
275 gl_image->Destroy(false);
276 }
277
278 bool VTVideoDecodeAccelerator::FrameOrder::operator()(
279 const linked_ptr<Frame>& lhs,
280 const linked_ptr<Frame>& rhs) const {
281 if (lhs->pic_order_cnt != rhs->pic_order_cnt)
282 return lhs->pic_order_cnt > rhs->pic_order_cnt;
283 // If |pic_order_cnt| is the same, fall back on using the bitstream order.
284 // TODO(sandersd): Assign a sequence number in Decode() and use that instead.
285 // TODO(sandersd): Using the sequence number, ensure that frames older than
286 // |kMaxReorderQueueSize| are ordered first, regardless of |pic_order_cnt|.
287 return lhs->bitstream_id > rhs->bitstream_id;
288 }
289
290 VTVideoDecodeAccelerator::VTVideoDecodeAccelerator(
291 const MakeGLContextCurrentCallback& make_context_current_cb,
292 const BindGLImageCallback& bind_image_cb)
293 : make_context_current_cb_(make_context_current_cb),
294 bind_image_cb_(bind_image_cb),
295 client_(nullptr),
296 state_(STATE_DECODING),
297 format_(nullptr),
298 session_(nullptr),
299 last_sps_id_(-1),
300 last_pps_id_(-1),
301 config_changed_(false),
302 waiting_for_idr_(true),
303 missing_idr_logged_(false),
304 gpu_task_runner_(base::ThreadTaskRunnerHandle::Get()),
305 decoder_thread_("VTDecoderThread"),
306 weak_this_factory_(this) {
307 callback_.decompressionOutputCallback = OutputThunk;
308 callback_.decompressionOutputRefCon = this;
309 weak_this_ = weak_this_factory_.GetWeakPtr();
310 }
311
312 VTVideoDecodeAccelerator::~VTVideoDecodeAccelerator() {
313 DCHECK(gpu_thread_checker_.CalledOnValidThread());
314 }
315
316 bool VTVideoDecodeAccelerator::Initialize(const Config& config,
317 Client* client) {
318 DCHECK(gpu_thread_checker_.CalledOnValidThread());
319
320 if (make_context_current_cb_.is_null() || bind_image_cb_.is_null()) {
321 NOTREACHED() << "GL callbacks are required for this VDA";
322 return false;
323 }
324
325 if (config.is_encrypted) {
326 NOTREACHED() << "Encrypted streams are not supported for this VDA";
327 return false;
328 }
329
330 client_ = client;
331
332 if (!InitializeVideoToolbox())
333 return false;
334
335 bool profile_supported = false;
336 for (const auto& supported_profile : kSupportedProfiles) {
337 if (config.profile == supported_profile) {
338 profile_supported = true;
339 break;
340 }
341 }
342 if (!profile_supported)
343 return false;
344
345 // Spawn a thread to handle parsing and calling VideoToolbox.
346 if (!decoder_thread_.Start())
347 return false;
348
349 // Count the session as successfully initialized.
350 UMA_HISTOGRAM_ENUMERATION("Media.VTVDA.SessionFailureReason",
351 SFT_SUCCESSFULLY_INITIALIZED,
352 SFT_MAX + 1);
353 return true;
354 }
355
356 bool VTVideoDecodeAccelerator::FinishDelayedFrames() {
357 DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
358 if (session_) {
359 OSStatus status = VTDecompressionSessionWaitForAsynchronousFrames(session_);
360 if (status) {
361 NOTIFY_STATUS("VTDecompressionSessionWaitForAsynchronousFrames()",
362 status, SFT_PLATFORM_ERROR);
363 return false;
364 }
365 }
366 return true;
367 }
368
369 bool VTVideoDecodeAccelerator::ConfigureDecoder() {
370 DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
371 DCHECK(!last_sps_.empty());
372 DCHECK(!last_pps_.empty());
373
374 // Build the configuration records.
375 std::vector<const uint8_t*> nalu_data_ptrs;
376 std::vector<size_t> nalu_data_sizes;
377 nalu_data_ptrs.reserve(3);
378 nalu_data_sizes.reserve(3);
379 nalu_data_ptrs.push_back(&last_sps_.front());
380 nalu_data_sizes.push_back(last_sps_.size());
381 if (!last_spsext_.empty()) {
382 nalu_data_ptrs.push_back(&last_spsext_.front());
383 nalu_data_sizes.push_back(last_spsext_.size());
384 }
385 nalu_data_ptrs.push_back(&last_pps_.front());
386 nalu_data_sizes.push_back(last_pps_.size());
387
388 // Construct a new format description from the parameter sets.
389 format_.reset();
390 OSStatus status = CMVideoFormatDescriptionCreateFromH264ParameterSets(
391 kCFAllocatorDefault,
392 nalu_data_ptrs.size(), // parameter_set_count
393 &nalu_data_ptrs.front(), // &parameter_set_pointers
394 &nalu_data_sizes.front(), // &parameter_set_sizes
395 kNALUHeaderLength, // nal_unit_header_length
396 format_.InitializeInto());
397 if (status) {
398 NOTIFY_STATUS("CMVideoFormatDescriptionCreateFromH264ParameterSets()",
399 status, SFT_PLATFORM_ERROR);
400 return false;
401 }
402
403 // Store the new configuration data.
404 // TODO(sandersd): Despite the documentation, this seems to return the visible
405 // size. However, the output always appears to be top-left aligned, so it
406 // makes no difference. Re-verify this and update the variable name.
407 CMVideoDimensions coded_dimensions =
408 CMVideoFormatDescriptionGetDimensions(format_);
409 coded_size_.SetSize(coded_dimensions.width, coded_dimensions.height);
410
411 // Prepare VideoToolbox configuration dictionaries.
412 base::ScopedCFTypeRef<CFMutableDictionaryRef> decoder_config(
413 CFDictionaryCreateMutable(
414 kCFAllocatorDefault,
415 1, // capacity
416 &kCFTypeDictionaryKeyCallBacks,
417 &kCFTypeDictionaryValueCallBacks));
418 if (!decoder_config.get()) {
419 DLOG(ERROR) << "Failed to create CFMutableDictionary";
420 NotifyError(PLATFORM_FAILURE, SFT_PLATFORM_ERROR);
421 return false;
422 }
423
424 CFDictionarySetValue(
425 decoder_config,
426 // kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder
427 CFSTR("EnableHardwareAcceleratedVideoDecoder"),
428 kCFBooleanTrue);
429
430 base::ScopedCFTypeRef<CFMutableDictionaryRef> image_config(
431 BuildImageConfig(coded_dimensions));
432 if (!image_config.get()) {
433 DLOG(ERROR) << "Failed to create decoder image configuration";
434 NotifyError(PLATFORM_FAILURE, SFT_PLATFORM_ERROR);
435 return false;
436 }
437
438 // Ensure that the old decoder emits all frames before the new decoder can
439 // emit any.
440 if (!FinishDelayedFrames())
441 return false;
442
443 session_.reset();
444 status = VTDecompressionSessionCreate(
445 kCFAllocatorDefault,
446 format_, // video_format_description
447 decoder_config, // video_decoder_specification
448 image_config, // destination_image_buffer_attributes
449 &callback_, // output_callback
450 session_.InitializeInto());
451 if (status) {
452 NOTIFY_STATUS("VTDecompressionSessionCreate()", status,
453 SFT_UNSUPPORTED_STREAM_PARAMETERS);
454 return false;
455 }
456
457 // Report whether hardware decode is being used.
458 bool using_hardware = false;
459 base::ScopedCFTypeRef<CFBooleanRef> cf_using_hardware;
460 if (VTSessionCopyProperty(
461 session_,
462 // kVTDecompressionPropertyKey_UsingHardwareAcceleratedVideoDecoder
463 CFSTR("UsingHardwareAcceleratedVideoDecoder"),
464 kCFAllocatorDefault,
465 cf_using_hardware.InitializeInto()) == 0) {
466 using_hardware = CFBooleanGetValue(cf_using_hardware);
467 }
468 UMA_HISTOGRAM_BOOLEAN("Media.VTVDA.HardwareAccelerated", using_hardware);
469
470 return true;
471 }
472
473 void VTVideoDecodeAccelerator::DecodeTask(
474 const media::BitstreamBuffer& bitstream,
475 Frame* frame) {
476 DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
477
478 // Map the bitstream buffer.
479 base::SharedMemory memory(bitstream.handle(), true);
480 size_t size = bitstream.size();
481 if (!memory.Map(size)) {
482 DLOG(ERROR) << "Failed to map bitstream buffer";
483 NotifyError(PLATFORM_FAILURE, SFT_PLATFORM_ERROR);
484 return;
485 }
486 const uint8_t* buf = static_cast<uint8_t*>(memory.memory());
487
488 // NALUs are stored with Annex B format in the bitstream buffer (start codes),
489 // but VideoToolbox expects AVC format (length headers), so we must rewrite
490 // the data.
491 //
492 // Locate relevant NALUs and compute the size of the rewritten data. Also
493 // record any parameter sets for VideoToolbox initialization.
494 std::vector<uint8_t> sps;
495 std::vector<uint8_t> spsext;
496 std::vector<uint8_t> pps;
497 bool has_slice = false;
498 size_t data_size = 0;
499 std::vector<media::H264NALU> nalus;
500 parser_.SetStream(buf, size);
501 media::H264NALU nalu;
502 while (true) {
503 media::H264Parser::Result result = parser_.AdvanceToNextNALU(&nalu);
504 if (result == media::H264Parser::kEOStream)
505 break;
506 if (result == media::H264Parser::kUnsupportedStream) {
507 DLOG(ERROR) << "Unsupported H.264 stream";
508 NotifyError(PLATFORM_FAILURE, SFT_UNSUPPORTED_STREAM);
509 return;
510 }
511 if (result != media::H264Parser::kOk) {
512 DLOG(ERROR) << "Failed to parse H.264 stream";
513 NotifyError(UNREADABLE_INPUT, SFT_INVALID_STREAM);
514 return;
515 }
516 switch (nalu.nal_unit_type) {
517 case media::H264NALU::kSPS:
518 result = parser_.ParseSPS(&last_sps_id_);
519 if (result == media::H264Parser::kUnsupportedStream) {
520 DLOG(ERROR) << "Unsupported SPS";
521 NotifyError(PLATFORM_FAILURE, SFT_UNSUPPORTED_STREAM);
522 return;
523 }
524 if (result != media::H264Parser::kOk) {
525 DLOG(ERROR) << "Could not parse SPS";
526 NotifyError(UNREADABLE_INPUT, SFT_INVALID_STREAM);
527 return;
528 }
529 sps.assign(nalu.data, nalu.data + nalu.size);
530 spsext.clear();
531 break;
532
533 case media::H264NALU::kSPSExt:
534 // TODO(sandersd): Check that the previous NALU was an SPS.
535 spsext.assign(nalu.data, nalu.data + nalu.size);
536 break;
537
538 case media::H264NALU::kPPS:
539 result = parser_.ParsePPS(&last_pps_id_);
540 if (result == media::H264Parser::kUnsupportedStream) {
541 DLOG(ERROR) << "Unsupported PPS";
542 NotifyError(PLATFORM_FAILURE, SFT_UNSUPPORTED_STREAM);
543 return;
544 }
545 if (result != media::H264Parser::kOk) {
546 DLOG(ERROR) << "Could not parse PPS";
547 NotifyError(UNREADABLE_INPUT, SFT_INVALID_STREAM);
548 return;
549 }
550 pps.assign(nalu.data, nalu.data + nalu.size);
551 break;
552
553 case media::H264NALU::kSliceDataA:
554 case media::H264NALU::kSliceDataB:
555 case media::H264NALU::kSliceDataC:
556 case media::H264NALU::kNonIDRSlice:
557 case media::H264NALU::kIDRSlice:
558 // Compute the |pic_order_cnt| for the picture from the first slice.
559 if (!has_slice) {
560 // Verify that we are not trying to decode a slice without an IDR.
561 if (waiting_for_idr_) {
562 if (nalu.nal_unit_type == media::H264NALU::kIDRSlice) {
563 waiting_for_idr_ = false;
564 } else {
565 // We can't compute anything yet, bail on this frame.
566 has_slice = true;
567 break;
568 }
569 }
570
571 media::H264SliceHeader slice_hdr;
572 result = parser_.ParseSliceHeader(nalu, &slice_hdr);
573 if (result == media::H264Parser::kUnsupportedStream) {
574 DLOG(ERROR) << "Unsupported slice header";
575 NotifyError(PLATFORM_FAILURE, SFT_UNSUPPORTED_STREAM);
576 return;
577 }
578 if (result != media::H264Parser::kOk) {
579 DLOG(ERROR) << "Could not parse slice header";
580 NotifyError(UNREADABLE_INPUT, SFT_INVALID_STREAM);
581 return;
582 }
583
584 // TODO(sandersd): Maintain a cache of configurations and reconfigure
585 // when a slice references a new config.
586 DCHECK_EQ(slice_hdr.pic_parameter_set_id, last_pps_id_);
587 const media::H264PPS* pps =
588 parser_.GetPPS(slice_hdr.pic_parameter_set_id);
589 if (!pps) {
590 DLOG(ERROR) << "Mising PPS referenced by slice";
591 NotifyError(UNREADABLE_INPUT, SFT_INVALID_STREAM);
592 return;
593 }
594
595 DCHECK_EQ(pps->seq_parameter_set_id, last_sps_id_);
596 const media::H264SPS* sps = parser_.GetSPS(pps->seq_parameter_set_id);
597 if (!sps) {
598 DLOG(ERROR) << "Mising SPS referenced by PPS";
599 NotifyError(UNREADABLE_INPUT, SFT_INVALID_STREAM);
600 return;
601 }
602
603 if (!poc_.ComputePicOrderCnt(sps, slice_hdr, &frame->pic_order_cnt)) {
604 DLOG(ERROR) << "Unable to compute POC";
605 NotifyError(UNREADABLE_INPUT, SFT_INVALID_STREAM);
606 return;
607 }
608
609 if (nalu.nal_unit_type == media::H264NALU::kIDRSlice)
610 frame->is_idr = true;
611
612 if (sps->vui_parameters_present_flag &&
613 sps->bitstream_restriction_flag) {
614 frame->reorder_window = std::min(sps->max_num_reorder_frames,
615 kMaxReorderQueueSize - 1);
616 }
617 }
618 has_slice = true;
619 default:
620 nalus.push_back(nalu);
621 data_size += kNALUHeaderLength + nalu.size;
622 break;
623 }
624 }
625
626 // Initialize VideoToolbox.
627 if (!sps.empty() && sps != last_sps_) {
628 last_sps_.swap(sps);
629 last_spsext_.swap(spsext);
630 config_changed_ = true;
631 }
632 if (!pps.empty() && pps != last_pps_) {
633 last_pps_.swap(pps);
634 config_changed_ = true;
635 }
636 if (config_changed_) {
637 // Only reconfigure at IDRs to avoid corruption.
638 if (frame->is_idr) {
639 config_changed_ = false;
640
641 if (last_sps_.empty()) {
642 DLOG(ERROR) << "Invalid configuration; no SPS";
643 NotifyError(INVALID_ARGUMENT, SFT_INVALID_STREAM);
644 return;
645 }
646 if (last_pps_.empty()) {
647 DLOG(ERROR) << "Invalid configuration; no PPS";
648 NotifyError(INVALID_ARGUMENT, SFT_INVALID_STREAM);
649 return;
650 }
651
652 // ConfigureDecoder() calls NotifyError() on failure.
653 if (!ConfigureDecoder())
654 return;
655 }
656 }
657
658 // If no IDR has been seen yet, skip decoding.
659 if (has_slice && (!session_ || waiting_for_idr_) && config_changed_) {
660 if (!missing_idr_logged_) {
661 LOG(ERROR) << "Illegal attempt to decode without IDR. "
662 << "Discarding decode requests until next IDR.";
663 missing_idr_logged_ = true;
664 }
665 has_slice = false;
666 }
667
668 // If there is nothing to decode, drop the bitstream buffer by returning an
669 // empty frame.
670 if (!has_slice) {
671 // Keep everything in order by flushing first.
672 if (!FinishDelayedFrames())
673 return;
674 gpu_task_runner_->PostTask(FROM_HERE, base::Bind(
675 &VTVideoDecodeAccelerator::DecodeDone, weak_this_, frame));
676 return;
677 }
678
679 // If the session is not configured by this point, fail.
680 if (!session_) {
681 DLOG(ERROR) << "Cannot decode without configuration";
682 NotifyError(INVALID_ARGUMENT, SFT_INVALID_STREAM);
683 return;
684 }
685
686 // Update the frame metadata with configuration data.
687 frame->coded_size = coded_size_;
688
689 // Create a memory-backed CMBlockBuffer for the translated data.
690 // TODO(sandersd): Pool of memory blocks.
691 base::ScopedCFTypeRef<CMBlockBufferRef> data;
692 OSStatus status = CMBlockBufferCreateWithMemoryBlock(
693 kCFAllocatorDefault,
694 nullptr, // &memory_block
695 data_size, // block_length
696 kCFAllocatorDefault, // block_allocator
697 nullptr, // &custom_block_source
698 0, // offset_to_data
699 data_size, // data_length
700 0, // flags
701 data.InitializeInto());
702 if (status) {
703 NOTIFY_STATUS("CMBlockBufferCreateWithMemoryBlock()", status,
704 SFT_PLATFORM_ERROR);
705 return;
706 }
707
708 // Make sure that the memory is actually allocated.
709 // CMBlockBufferReplaceDataBytes() is documented to do this, but prints a
710 // message each time starting in Mac OS X 10.10.
711 status = CMBlockBufferAssureBlockMemory(data);
712 if (status) {
713 NOTIFY_STATUS("CMBlockBufferAssureBlockMemory()", status,
714 SFT_PLATFORM_ERROR);
715 return;
716 }
717
718 // Copy NALU data into the CMBlockBuffer, inserting length headers.
719 size_t offset = 0;
720 for (size_t i = 0; i < nalus.size(); i++) {
721 media::H264NALU& nalu = nalus[i];
722 uint32_t header = base::HostToNet32(static_cast<uint32_t>(nalu.size));
723 status = CMBlockBufferReplaceDataBytes(
724 &header, data, offset, kNALUHeaderLength);
725 if (status) {
726 NOTIFY_STATUS("CMBlockBufferReplaceDataBytes()", status,
727 SFT_PLATFORM_ERROR);
728 return;
729 }
730 offset += kNALUHeaderLength;
731 status = CMBlockBufferReplaceDataBytes(nalu.data, data, offset, nalu.size);
732 if (status) {
733 NOTIFY_STATUS("CMBlockBufferReplaceDataBytes()", status,
734 SFT_PLATFORM_ERROR);
735 return;
736 }
737 offset += nalu.size;
738 }
739
740 // Package the data in a CMSampleBuffer.
741 base::ScopedCFTypeRef<CMSampleBufferRef> sample;
742 status = CMSampleBufferCreate(
743 kCFAllocatorDefault,
744 data, // data_buffer
745 true, // data_ready
746 nullptr, // make_data_ready_callback
747 nullptr, // make_data_ready_refcon
748 format_, // format_description
749 1, // num_samples
750 0, // num_sample_timing_entries
751 nullptr, // &sample_timing_array
752 1, // num_sample_size_entries
753 &data_size, // &sample_size_array
754 sample.InitializeInto());
755 if (status) {
756 NOTIFY_STATUS("CMSampleBufferCreate()", status, SFT_PLATFORM_ERROR);
757 return;
758 }
759
760 // Send the frame for decoding.
761 // Asynchronous Decompression allows for parallel submission of frames
762 // (without it, DecodeFrame() does not return until the frame has been
763 // decoded). We don't enable Temporal Processing so that frames are always
764 // returned in decode order; this makes it easier to avoid deadlock.
765 VTDecodeFrameFlags decode_flags =
766 kVTDecodeFrame_EnableAsynchronousDecompression;
767 status = VTDecompressionSessionDecodeFrame(
768 session_,
769 sample, // sample_buffer
770 decode_flags, // decode_flags
771 reinterpret_cast<void*>(frame), // source_frame_refcon
772 nullptr); // &info_flags_out
773 if (status) {
774 NOTIFY_STATUS("VTDecompressionSessionDecodeFrame()", status,
775 SFT_DECODE_ERROR);
776 return;
777 }
778 }
779
780 // This method may be called on any VideoToolbox thread.
781 void VTVideoDecodeAccelerator::Output(
782 void* source_frame_refcon,
783 OSStatus status,
784 CVImageBufferRef image_buffer) {
785 if (status) {
786 NOTIFY_STATUS("Decoding", status, SFT_DECODE_ERROR);
787 return;
788 }
789
790 // The type of |image_buffer| is CVImageBuffer, but we only handle
791 // CVPixelBuffers. This should be guaranteed as we set
792 // kCVPixelBufferOpenGLCompatibilityKey in |image_config|.
793 //
794 // Sometimes, for unknown reasons (http://crbug.com/453050), |image_buffer| is
795 // NULL, which causes CFGetTypeID() to crash. While the rest of the code would
796 // smoothly handle NULL as a dropped frame, we choose to fail permanantly here
797 // until the issue is better understood.
798 if (!image_buffer || CFGetTypeID(image_buffer) != CVPixelBufferGetTypeID()) {
799 DLOG(ERROR) << "Decoded frame is not a CVPixelBuffer";
800 NotifyError(PLATFORM_FAILURE, SFT_DECODE_ERROR);
801 return;
802 }
803
804 Frame* frame = reinterpret_cast<Frame*>(source_frame_refcon);
805 frame->image.reset(image_buffer, base::scoped_policy::RETAIN);
806 gpu_task_runner_->PostTask(FROM_HERE, base::Bind(
807 &VTVideoDecodeAccelerator::DecodeDone, weak_this_, frame));
808 }
809
810 void VTVideoDecodeAccelerator::DecodeDone(Frame* frame) {
811 DCHECK(gpu_thread_checker_.CalledOnValidThread());
812 DCHECK_EQ(1u, pending_frames_.count(frame->bitstream_id));
813 Task task(TASK_FRAME);
814 task.frame = pending_frames_[frame->bitstream_id];
815 pending_frames_.erase(frame->bitstream_id);
816 task_queue_.push(task);
817 ProcessWorkQueues();
818 }
819
820 void VTVideoDecodeAccelerator::FlushTask(TaskType type) {
821 DCHECK(decoder_thread_.task_runner()->BelongsToCurrentThread());
822 FinishDelayedFrames();
823
824 // Always queue a task, even if FinishDelayedFrames() fails, so that
825 // destruction always completes.
826 gpu_task_runner_->PostTask(FROM_HERE, base::Bind(
827 &VTVideoDecodeAccelerator::FlushDone, weak_this_, type));
828 }
829
830 void VTVideoDecodeAccelerator::FlushDone(TaskType type) {
831 DCHECK(gpu_thread_checker_.CalledOnValidThread());
832 task_queue_.push(Task(type));
833 ProcessWorkQueues();
834 }
835
836 void VTVideoDecodeAccelerator::Decode(const media::BitstreamBuffer& bitstream) {
837 DCHECK(gpu_thread_checker_.CalledOnValidThread());
838 if (bitstream.id() < 0) {
839 DLOG(ERROR) << "Invalid bitstream, id: " << bitstream.id();
840 if (base::SharedMemory::IsHandleValid(bitstream.handle()))
841 base::SharedMemory::CloseHandle(bitstream.handle());
842 NotifyError(INVALID_ARGUMENT, SFT_INVALID_STREAM);
843 return;
844 }
845 DCHECK_EQ(0u, assigned_bitstream_ids_.count(bitstream.id()));
846 assigned_bitstream_ids_.insert(bitstream.id());
847 Frame* frame = new Frame(bitstream.id());
848 pending_frames_[frame->bitstream_id] = make_linked_ptr(frame);
849 decoder_thread_.task_runner()->PostTask(
850 FROM_HERE, base::Bind(&VTVideoDecodeAccelerator::DecodeTask,
851 base::Unretained(this), bitstream, frame));
852 }
853
854 void VTVideoDecodeAccelerator::AssignPictureBuffers(
855 const std::vector<media::PictureBuffer>& pictures) {
856 DCHECK(gpu_thread_checker_.CalledOnValidThread());
857
858 for (const media::PictureBuffer& picture : pictures) {
859 DCHECK(!picture_info_map_.count(picture.id()));
860 assigned_picture_ids_.insert(picture.id());
861 available_picture_ids_.push_back(picture.id());
862 DCHECK_LE(1u, picture.internal_texture_ids().size());
863 DCHECK_LE(1u, picture.texture_ids().size());
864 picture_info_map_.insert(std::make_pair(
865 picture.id(),
866 base::WrapUnique(new PictureInfo(picture.internal_texture_ids()[0],
867 picture.texture_ids()[0]))));
868 }
869
870 // Pictures are not marked as uncleared until after this method returns, and
871 // they will be broken if they are used before that happens. So, schedule
872 // future work after that happens.
873 gpu_task_runner_->PostTask(FROM_HERE, base::Bind(
874 &VTVideoDecodeAccelerator::ProcessWorkQueues, weak_this_));
875 }
876
877 void VTVideoDecodeAccelerator::ReusePictureBuffer(int32_t picture_id) {
878 DCHECK(gpu_thread_checker_.CalledOnValidThread());
879 DCHECK(picture_info_map_.count(picture_id));
880 PictureInfo* picture_info = picture_info_map_.find(picture_id)->second.get();
881 picture_info->cv_image.reset();
882 picture_info->gl_image->Destroy(false);
883 picture_info->gl_image = nullptr;
884
885 if (assigned_picture_ids_.count(picture_id) != 0) {
886 available_picture_ids_.push_back(picture_id);
887 ProcessWorkQueues();
888 } else {
889 client_->DismissPictureBuffer(picture_id);
890 }
891 }
892
893 void VTVideoDecodeAccelerator::ProcessWorkQueues() {
894 DCHECK(gpu_thread_checker_.CalledOnValidThread());
895 switch (state_) {
896 case STATE_DECODING:
897 // TODO(sandersd): Batch where possible.
898 while (state_ == STATE_DECODING) {
899 if (!ProcessReorderQueue() && !ProcessTaskQueue())
900 break;
901 }
902 return;
903
904 case STATE_ERROR:
905 // Do nothing until Destroy() is called.
906 return;
907
908 case STATE_DESTROYING:
909 // Drop tasks until we are ready to destruct.
910 while (!task_queue_.empty()) {
911 if (task_queue_.front().type == TASK_DESTROY) {
912 delete this;
913 return;
914 }
915 task_queue_.pop();
916 }
917 return;
918 }
919 }
920
921 bool VTVideoDecodeAccelerator::ProcessTaskQueue() {
922 DCHECK(gpu_thread_checker_.CalledOnValidThread());
923 DCHECK_EQ(state_, STATE_DECODING);
924
925 if (task_queue_.empty())
926 return false;
927
928 const Task& task = task_queue_.front();
929 switch (task.type) {
930 case TASK_FRAME:
931 if (reorder_queue_.size() < kMaxReorderQueueSize &&
932 (!task.frame->is_idr || reorder_queue_.empty())) {
933 assigned_bitstream_ids_.erase(task.frame->bitstream_id);
934 client_->NotifyEndOfBitstreamBuffer(task.frame->bitstream_id);
935 reorder_queue_.push(task.frame);
936 task_queue_.pop();
937 return true;
938 }
939 return false;
940
941 case TASK_FLUSH:
942 DCHECK_EQ(task.type, pending_flush_tasks_.front());
943 if (reorder_queue_.size() == 0) {
944 pending_flush_tasks_.pop();
945 client_->NotifyFlushDone();
946 task_queue_.pop();
947 return true;
948 }
949 return false;
950
951 case TASK_RESET:
952 DCHECK_EQ(task.type, pending_flush_tasks_.front());
953 if (reorder_queue_.size() == 0) {
954 waiting_for_idr_ = true;
955 pending_flush_tasks_.pop();
956 client_->NotifyResetDone();
957 task_queue_.pop();
958 return true;
959 }
960 return false;
961
962 case TASK_DESTROY:
963 NOTREACHED() << "Can't destroy while in STATE_DECODING";
964 NotifyError(ILLEGAL_STATE, SFT_PLATFORM_ERROR);
965 return false;
966 }
967 }
968
969 bool VTVideoDecodeAccelerator::ProcessReorderQueue() {
970 DCHECK(gpu_thread_checker_.CalledOnValidThread());
971 DCHECK_EQ(state_, STATE_DECODING);
972
973 if (reorder_queue_.empty())
974 return false;
975
976 // If the next task is a flush (because there is a pending flush or becuase
977 // the next frame is an IDR), then we don't need a full reorder buffer to send
978 // the next frame.
979 bool flushing = !task_queue_.empty() &&
980 (task_queue_.front().type != TASK_FRAME ||
981 task_queue_.front().frame->is_idr);
982
983 size_t reorder_window = std::max(0, reorder_queue_.top()->reorder_window);
984 if (flushing || reorder_queue_.size() > reorder_window) {
985 if (ProcessFrame(*reorder_queue_.top())) {
986 reorder_queue_.pop();
987 return true;
988 }
989 }
990
991 return false;
992 }
993
994 bool VTVideoDecodeAccelerator::ProcessFrame(const Frame& frame) {
995 DCHECK(gpu_thread_checker_.CalledOnValidThread());
996 DCHECK_EQ(state_, STATE_DECODING);
997
998 // If the next pending flush is for a reset, then the frame will be dropped.
999 bool resetting = !pending_flush_tasks_.empty() &&
1000 pending_flush_tasks_.front() == TASK_RESET;
1001
1002 if (!resetting && frame.image.get()) {
1003 // If the |coded_size| has changed, request new picture buffers and then
1004 // wait for them.
1005 // TODO(sandersd): If GpuVideoDecoder didn't specifically check the size of
1006 // textures, this would be unnecessary, as the size is actually a property
1007 // of the texture binding, not the texture. We rebind every frame, so the
1008 // size passed to ProvidePictureBuffers() is meaningless.
1009 if (picture_size_ != frame.coded_size) {
1010 // Dismiss current pictures.
1011 for (int32_t picture_id : assigned_picture_ids_)
1012 client_->DismissPictureBuffer(picture_id);
1013 assigned_picture_ids_.clear();
1014 available_picture_ids_.clear();
1015
1016 // Request new pictures.
1017 picture_size_ = frame.coded_size;
1018 client_->ProvidePictureBuffers(kNumPictureBuffers, 1, coded_size_,
1019 GL_TEXTURE_RECTANGLE_ARB);
1020 return false;
1021 }
1022 if (!SendFrame(frame))
1023 return false;
1024 }
1025
1026 return true;
1027 }
1028
1029 bool VTVideoDecodeAccelerator::SendFrame(const Frame& frame) {
1030 DCHECK(gpu_thread_checker_.CalledOnValidThread());
1031 DCHECK_EQ(state_, STATE_DECODING);
1032
1033 if (available_picture_ids_.empty())
1034 return false;
1035
1036 int32_t picture_id = available_picture_ids_.back();
1037 DCHECK(picture_info_map_.count(picture_id));
1038 PictureInfo* picture_info = picture_info_map_.find(picture_id)->second.get();
1039 DCHECK(!picture_info->cv_image);
1040 DCHECK(!picture_info->gl_image);
1041
1042 if (!make_context_current_cb_.Run()) {
1043 DLOG(ERROR) << "Failed to make GL context current";
1044 NotifyError(PLATFORM_FAILURE, SFT_PLATFORM_ERROR);
1045 return false;
1046 }
1047
1048 scoped_refptr<gl::GLImageIOSurface> gl_image(
1049 new gl::GLImageIOSurface(frame.coded_size, GL_BGRA_EXT));
1050 if (!gl_image->InitializeWithCVPixelBuffer(
1051 frame.image.get(), gfx::GenericSharedMemoryId(),
1052 gfx::BufferFormat::YUV_420_BIPLANAR)) {
1053 NOTIFY_STATUS("Failed to initialize GLImageIOSurface", PLATFORM_FAILURE,
1054 SFT_PLATFORM_ERROR);
1055 }
1056
1057 if (!bind_image_cb_.Run(picture_info->client_texture_id,
1058 GL_TEXTURE_RECTANGLE_ARB, gl_image, false)) {
1059 DLOG(ERROR) << "Failed to bind image";
1060 NotifyError(PLATFORM_FAILURE, SFT_PLATFORM_ERROR);
1061 return false;
1062 }
1063
1064 // Assign the new image(s) to the the picture info.
1065 picture_info->gl_image = gl_image;
1066 picture_info->cv_image = frame.image;
1067 available_picture_ids_.pop_back();
1068
1069 // TODO(sandersd): Currently, the size got from
1070 // CMVideoFormatDescriptionGetDimensions is visible size. We pass it to
1071 // GpuVideoDecoder so that GpuVideoDecoder can use correct visible size in
1072 // resolution changed. We should find the correct API to get the real
1073 // coded size and fix it.
1074 client_->PictureReady(media::Picture(picture_id, frame.bitstream_id,
1075 gfx::Rect(frame.coded_size),
1076 true));
1077 return true;
1078 }
1079
1080 void VTVideoDecodeAccelerator::NotifyError(
1081 Error vda_error_type,
1082 VTVDASessionFailureType session_failure_type) {
1083 DCHECK_LT(session_failure_type, SFT_MAX + 1);
1084 if (!gpu_thread_checker_.CalledOnValidThread()) {
1085 gpu_task_runner_->PostTask(FROM_HERE, base::Bind(
1086 &VTVideoDecodeAccelerator::NotifyError, weak_this_, vda_error_type,
1087 session_failure_type));
1088 } else if (state_ == STATE_DECODING) {
1089 state_ = STATE_ERROR;
1090 UMA_HISTOGRAM_ENUMERATION("Media.VTVDA.SessionFailureReason",
1091 session_failure_type,
1092 SFT_MAX + 1);
1093 client_->NotifyError(vda_error_type);
1094 }
1095 }
1096
1097 void VTVideoDecodeAccelerator::QueueFlush(TaskType type) {
1098 DCHECK(gpu_thread_checker_.CalledOnValidThread());
1099 pending_flush_tasks_.push(type);
1100 decoder_thread_.task_runner()->PostTask(
1101 FROM_HERE, base::Bind(&VTVideoDecodeAccelerator::FlushTask,
1102 base::Unretained(this), type));
1103
1104 // If this is a new flush request, see if we can make progress.
1105 if (pending_flush_tasks_.size() == 1)
1106 ProcessWorkQueues();
1107 }
1108
1109 void VTVideoDecodeAccelerator::Flush() {
1110 DCHECK(gpu_thread_checker_.CalledOnValidThread());
1111 QueueFlush(TASK_FLUSH);
1112 }
1113
1114 void VTVideoDecodeAccelerator::Reset() {
1115 DCHECK(gpu_thread_checker_.CalledOnValidThread());
1116 QueueFlush(TASK_RESET);
1117 }
1118
1119 void VTVideoDecodeAccelerator::Destroy() {
1120 DCHECK(gpu_thread_checker_.CalledOnValidThread());
1121
1122 // In a forceful shutdown, the decoder thread may be dead already.
1123 if (!decoder_thread_.IsRunning()) {
1124 delete this;
1125 return;
1126 }
1127
1128 // For a graceful shutdown, return assigned buffers and flush before
1129 // destructing |this|.
1130 // TODO(sandersd): Prevent the decoder from reading buffers before discarding
1131 // them.
1132 for (int32_t bitstream_id : assigned_bitstream_ids_)
1133 client_->NotifyEndOfBitstreamBuffer(bitstream_id);
1134 assigned_bitstream_ids_.clear();
1135 state_ = STATE_DESTROYING;
1136 QueueFlush(TASK_DESTROY);
1137 }
1138
1139 bool VTVideoDecodeAccelerator::TryToSetupDecodeOnSeparateThread(
1140 const base::WeakPtr<Client>& decode_client,
1141 const scoped_refptr<base::SingleThreadTaskRunner>& decode_task_runner) {
1142 return false;
1143 }
1144
1145 // static
1146 media::VideoDecodeAccelerator::SupportedProfiles
1147 VTVideoDecodeAccelerator::GetSupportedProfiles() {
1148 SupportedProfiles profiles;
1149 for (const auto& supported_profile : kSupportedProfiles) {
1150 SupportedProfile profile;
1151 profile.profile = supported_profile;
1152 profile.min_resolution.SetSize(16, 16);
1153 profile.max_resolution.SetSize(4096, 2160);
1154 profiles.push_back(profile);
1155 }
1156 return profiles;
1157 }
1158
1159 } // namespace content
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698