Chromium Code Reviews| OLD | NEW |
|---|---|
| (Empty) | |
| 1 // Copyright 2016 The Chromium Authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "content/common/gpu/media/vt_video_encode_accelerator_mac.h" | |
| 6 | |
| 7 #include "base/thread_task_runner_handle.h" | |
| 8 #include "media/base/mac/coremedia_glue.h" | |
| 9 #include "media/base/mac/corevideo_glue.h" | |
| 10 #include "media/base/mac/video_frame_mac.h" | |
| 11 | |
| 12 namespace content { | |
| 13 | |
| 14 namespace { | |
| 15 | |
| 16 // Subjectively chosen. | |
| 17 // TODO(emircan): Check if we can find the actual system capabilities via | |
| 18 // creating VTCompressionSessions with varying requirements. | |
| 19 // See crbug.com/584784. | |
| 20 const size_t kNumInputBuffers = 1; | |
|
jfroy
2016/02/10 18:56:27
This is somewhat configurable using kVTCompression
emircan
2016/02/11 09:22:09
For me, setting kVTCompressionPropertyKey_MaxFrame
| |
| 21 const size_t kMaxFrameRateNumerator = 30; | |
| 22 const size_t kMaxFrameRateDenominator = 1; | |
| 23 const size_t kMaxResolutionWidth = 4096; | |
| 24 const size_t kMaxResolutionHeight = 2160; | |
| 25 // The ratio of |input_visible_size| area to the max expected output | |
| 26 // BitstreamBuffer size in bytes. VideoToolbox returns variable sized encoded | |
| 27 // data whereas media::VideoEncodeAccelerator provides a uniform BitstreamBuffer | |
| 28 // size to fill this data into. This ratio is used to determine a size that | |
| 29 // would ideally be big enough to fit all frames. | |
| 30 const size_t kOutputBufferSizeRatio = 10; | |
| 31 const size_t kBitsPerByte = 8; | |
| 32 | |
| 33 } // namespace | |
| 34 | |
| 35 struct VTVideoEncodeAccelerator::InProgressFrameEncode { | |
| 36 const base::TimeDelta timestamp; | |
| 37 const base::TimeTicks reference_time; | |
| 38 | |
| 39 InProgressFrameEncode(base::TimeDelta rtp_timestamp, base::TimeTicks ref_time) | |
| 40 : timestamp(rtp_timestamp), reference_time(ref_time) {} | |
| 41 | |
| 42 private: | |
| 43 DISALLOW_IMPLICIT_CONSTRUCTORS(InProgressFrameEncode); | |
| 44 }; | |
| 45 | |
| 46 struct VTVideoEncodeAccelerator::BitstreamBufferRef { | |
| 47 BitstreamBufferRef(int32_t id, | |
| 48 scoped_ptr<base::SharedMemory> shm, | |
| 49 size_t size) | |
| 50 : id(id), shm(std::move(shm)), size(size) {} | |
| 51 const int32_t id; | |
| 52 const scoped_ptr<base::SharedMemory> shm; | |
| 53 const size_t size; | |
| 54 | |
| 55 private: | |
| 56 DISALLOW_IMPLICIT_CONSTRUCTORS(BitstreamBufferRef); | |
| 57 }; | |
| 58 | |
| 59 VTVideoEncodeAccelerator::VTVideoEncodeAccelerator() | |
| 60 : client_task_runner_(base::ThreadTaskRunnerHandle::Get()) { | |
| 61 } | |
| 62 | |
| 63 VTVideoEncodeAccelerator::~VTVideoEncodeAccelerator() { | |
| 64 DCHECK(thread_checker_.CalledOnValidThread()); | |
| 65 } | |
| 66 | |
| 67 media::VideoEncodeAccelerator::SupportedProfiles | |
| 68 VTVideoEncodeAccelerator::GetSupportedProfiles() { | |
| 69 DVLOG(3) << __FUNCTION__; | |
| 70 DCHECK(thread_checker_.CalledOnValidThread()); | |
| 71 | |
| 72 SupportedProfiles profiles; | |
| 73 SupportedProfile profile; | |
| 74 profile.profile = media::H264PROFILE_BASELINE; | |
| 75 profile.max_framerate_numerator = kMaxFrameRateNumerator; | |
| 76 profile.max_framerate_denominator = kMaxFrameRateDenominator; | |
| 77 profile.max_resolution = gfx::Size(kMaxResolutionWidth, kMaxResolutionHeight); | |
| 78 profiles.push_back(profile); | |
| 79 return profiles; | |
| 80 } | |
| 81 | |
| 82 bool VTVideoEncodeAccelerator::Initialize( | |
| 83 media::VideoPixelFormat format, | |
| 84 const gfx::Size& input_visible_size, | |
| 85 media::VideoCodecProfile output_profile, | |
| 86 uint32_t initial_bitrate, | |
| 87 Client* client) { | |
| 88 DVLOG(3) << __FUNCTION__ | |
| 89 << ": input_format=" << media::VideoPixelFormatToString(format) | |
| 90 << ", input_visible_size=" << input_visible_size.ToString() | |
| 91 << ", output_profile=" << output_profile | |
| 92 << ", initial_bitrate=" << initial_bitrate; | |
| 93 DCHECK(thread_checker_.CalledOnValidThread()); | |
| 94 DCHECK(client); | |
| 95 | |
| 96 if (media::PIXEL_FORMAT_I420 != format) { | |
|
jfroy
2016/02/10 18:56:27
You can also support NV12 pretty easily. I don't k
emircan
2016/02/11 09:22:09
I420 is the common format in Chrome HW encode.
| |
| 97 DLOG(ERROR) << "Input format not supported= " | |
| 98 << media::VideoPixelFormatToString(format); | |
| 99 return false; | |
| 100 } | |
| 101 if (media::H264PROFILE_BASELINE != output_profile) { | |
| 102 DLOG(ERROR) << "Output profile not supported= " | |
| 103 << output_profile; | |
| 104 return false; | |
| 105 } | |
| 106 | |
| 107 videotoolbox_glue_ = VideoToolboxGlue::Get(); | |
| 108 if (!videotoolbox_glue_) { | |
| 109 DLOG(ERROR) << "Failed creating VideoToolbox glue"; | |
| 110 return false; | |
| 111 } | |
| 112 | |
| 113 client_ptr_factory_.reset(new base::WeakPtrFactory<Client>(client)); | |
| 114 client_ = client_ptr_factory_->GetWeakPtr(); | |
| 115 bitrate_ = initial_bitrate; | |
| 116 input_visible_size_ = input_visible_size; | |
| 117 | |
| 118 if (!ResetCompressionSession()) { | |
| 119 DLOG(ERROR) << "Failed creating compression session"; | |
| 120 return false; | |
| 121 } | |
| 122 | |
| 123 client_->RequireBitstreamBuffers( | |
| 124 kNumInputBuffers, input_visible_size_, | |
| 125 std::max(input_visible_size_.GetArea() / kOutputBufferSizeRatio, | |
| 126 bitrate_ / kBitsPerByte)); | |
|
miu
2016/02/10 21:04:56
Sanity-check: Is the bitrate_ setting a "max bitra
jfroy
2016/02/10 23:15:34
Myself and lite@ did extensive tests of the iOS en
emircan
2016/02/11 09:22:09
I will go with jfroy@ suggestions. I will remove t
jfroy
2016/02/11 18:27:40
I would really run a few experiments to see how yo
miu
2016/02/11 21:09:40
I can't remember whether the VEA interface treats
emircan
2016/02/12 03:40:55
I did some further investigation as jfroy@ suggest
Pawel Osciak
2016/02/18 11:16:14
It's not clearly specified in the docs, but it's e
| |
| 127 return true; | |
| 128 } | |
| 129 | |
| 130 void VTVideoEncodeAccelerator::Encode( | |
| 131 const scoped_refptr<media::VideoFrame>& frame, | |
| 132 bool force_keyframe) { | |
| 133 DVLOG(3) << __FUNCTION__; | |
| 134 DCHECK(thread_checker_.CalledOnValidThread()); | |
| 135 DCHECK(compression_session_); | |
| 136 DCHECK(frame); | |
| 137 | |
| 138 base::TimeTicks ref_time; | |
| 139 if (!frame->metadata()->GetTimeTicks( | |
| 140 media::VideoFrameMetadata::REFERENCE_TIME, &ref_time)) { | |
| 141 ref_time = base::TimeTicks::Now(); | |
| 142 } | |
| 143 auto timestamp_cm = CoreMediaGlue::CMTimeMake( | |
| 144 frame->timestamp().InMicroseconds(), USEC_PER_SEC); | |
| 145 // Wrap information we'll need after the frame is encoded in a heap object. | |
| 146 // We'll get the pointer back from the VideoToolbox completion callback. | |
| 147 scoped_ptr<InProgressFrameEncode> request(new InProgressFrameEncode( | |
| 148 frame->timestamp(), ref_time)); | |
| 149 | |
| 150 // TODO(emircan): See if we can eliminate a copy here by using | |
| 151 // CVPixelBufferPool for the allocation of incoming VideoFrames. | |
| 152 base::ScopedCFTypeRef<CVPixelBufferRef> pixel_buffer = | |
| 153 media::WrapVideoFrameInCVPixelBuffer(*frame); | |
| 154 base::ScopedCFTypeRef<CFDictionaryRef> frame_props = | |
| 155 media::video_toolbox::DictionaryWithKeyValue( | |
| 156 videotoolbox_glue_->kVTEncodeFrameOptionKey_ForceKeyFrame(), | |
| 157 force_keyframe ? kCFBooleanTrue : kCFBooleanFalse); | |
| 158 | |
| 159 OSStatus status = videotoolbox_glue_->VTCompressionSessionEncodeFrame( | |
| 160 compression_session_, pixel_buffer, timestamp_cm, | |
| 161 CoreMediaGlue::CMTime{0, 0, 0, 0}, frame_props, | |
| 162 reinterpret_cast<void*>(request.release()), nullptr); | |
| 163 if (status != noErr) { | |
| 164 DLOG(ERROR) << " VTCompressionSessionEncodeFrame failed: " << status; | |
| 165 client_->NotifyError(kPlatformFailureError); | |
| 166 } | |
| 167 } | |
| 168 | |
| 169 void VTVideoEncodeAccelerator::UseOutputBitstreamBuffer( | |
| 170 const media::BitstreamBuffer& buffer) { | |
| 171 DVLOG(3) << __FUNCTION__; | |
| 172 DCHECK(thread_checker_.CalledOnValidThread()); | |
| 173 if (buffer.size() < static_cast<size_t>(input_visible_size_.GetArea() / | |
| 174 kOutputBufferSizeRatio)) { | |
| 175 DLOG(ERROR) << "Output BitstreamBuffer isn't big enough: " | |
| 176 << buffer.size() | |
| 177 << " vs. " | |
| 178 << static_cast<size_t>(input_visible_size_.GetArea() / | |
| 179 kOutputBufferSizeRatio); | |
| 180 client_->NotifyError(kInvalidArgumentError); | |
| 181 return; | |
| 182 } | |
| 183 | |
| 184 scoped_ptr<base::SharedMemory> shm( | |
| 185 new base::SharedMemory(buffer.handle(), false)); | |
| 186 if (!shm->Map(buffer.size())) { | |
| 187 DLOG(ERROR) << "Failed mapping shared memory."; | |
| 188 client_->NotifyError(kPlatformFailureError); | |
| 189 return; | |
| 190 } | |
| 191 | |
| 192 // If there are already CMSampleBufferRef waiting, copy their output first. | |
| 193 if (!encoder_output_sample_buffer_queue_.empty()) { | |
| 194 CMSampleBufferRef sbuf = encoder_output_sample_buffer_queue_.front(); | |
| 195 encoder_output_sample_buffer_queue_.pop_front(); | |
| 196 | |
| 197 auto sample_attachments = | |
| 198 static_cast<CFDictionaryRef>(CFArrayGetValueAtIndex( | |
| 199 CoreMediaGlue::CMSampleBufferGetSampleAttachmentsArray(sbuf, true), | |
| 200 0)); | |
| 201 const bool keyframe = !CFDictionaryContainsKey( | |
| 202 sample_attachments, | |
| 203 CoreMediaGlue::kCMSampleAttachmentKey_NotSync()); | |
| 204 size_t used_buffer_size = 0; | |
| 205 const bool copy_rv = media::video_toolbox::CopySampleBufferToAnnexBBuffer( | |
| 206 sbuf, keyframe, buffer.size(), | |
| 207 reinterpret_cast<uint8_t*>(shm->memory()), &used_buffer_size); | |
| 208 CFRelease(sbuf); | |
| 209 if (!copy_rv) | |
| 210 used_buffer_size = 0; | |
| 211 client_->BitstreamBufferReady(buffer.id(), used_buffer_size, keyframe); | |
| 212 return; | |
| 213 } | |
| 214 | |
| 215 scoped_ptr<BitstreamBufferRef> buffer_ref( | |
| 216 new BitstreamBufferRef(buffer.id(), std::move(shm), buffer.size())); | |
| 217 encoder_output_queue_.push_back(std::move(buffer_ref)); | |
| 218 } | |
| 219 | |
| 220 void VTVideoEncodeAccelerator::RequestEncodingParametersChange( | |
| 221 uint32_t bitrate, | |
| 222 uint32_t framerate) { | |
| 223 DVLOG(3) << __FUNCTION__; | |
| 224 DCHECK(thread_checker_.CalledOnValidThread()); | |
| 225 | |
| 226 bitrate_ = bitrate > 1 ? bitrate : 1; | |
| 227 | |
| 228 if (!compression_session_) { | |
| 229 client_->NotifyError(kPlatformFailureError); | |
| 230 return; | |
| 231 } | |
| 232 // TODO(emircan): VideoToolbox does not seem to support bitrate | |
|
jfroy
2016/02/10 18:56:27
See my update to that bug. You can actually contro
emircan
2016/02/11 09:22:09
Done.
| |
| 233 // reconfiguration, see crbug.com/425352. | |
| 234 const bool rv = session_property_setter_->SetSessionProperty( | |
|
miu
2016/02/10 21:04:56
Suggestion (to eliminate extra heap-allocated data
emircan
2016/02/11 09:22:09
Done. Keeping it as a class member on stack.
| |
| 235 videotoolbox_glue_->kVTCompressionPropertyKey_AverageBitRate(), | |
| 236 static_cast<int32_t>(bitrate_)); | |
| 237 if (!rv) { | |
| 238 DLOG(ERROR) << "Couldn't change session bitrate."; | |
| 239 } | |
| 240 } | |
| 241 | |
| 242 void VTVideoEncodeAccelerator::Destroy() { | |
| 243 DVLOG(3) << __FUNCTION__; | |
| 244 DCHECK(thread_checker_.CalledOnValidThread()); | |
| 245 | |
| 246 DestroyCompressionSession(); | |
| 247 delete this; | |
| 248 } | |
| 249 | |
| 250 // static | |
| 251 void VTVideoEncodeAccelerator::CompressionCallback(void* encoder_opaque, | |
| 252 void* request_opaque, | |
| 253 OSStatus status, | |
| 254 VTEncodeInfoFlags info, | |
| 255 CMSampleBufferRef sbuf) { | |
| 256 // This function may be called asynchronously, on a different thread from the | |
| 257 // one that calls VTCompressionSessionEncodeFrame. | |
| 258 DVLOG(3) << __FUNCTION__; | |
| 259 | |
| 260 auto encoder = reinterpret_cast<VTVideoEncodeAccelerator*>(encoder_opaque); | |
| 261 DCHECK(encoder); | |
| 262 | |
| 263 if (status != noErr) { | |
| 264 DLOG(ERROR) << " encode failed: " << status; | |
| 265 encoder->client_task_runner_->PostTask( | |
| 266 FROM_HERE, base::Bind(&Client::NotifyError, encoder->client_, | |
| 267 kPlatformFailureError)); | |
| 268 return; | |
| 269 } | |
| 270 | |
| 271 // Release InProgressFrameEncode, since we don't have support to return | |
| 272 // timestamps at this point. | |
| 273 scoped_ptr<InProgressFrameEncode> request( | |
|
miu
2016/02/10 21:04:56
This is happening after a possible return statemen
emircan
2016/02/11 09:22:09
Done.
| |
| 274 reinterpret_cast<InProgressFrameEncode*>(request_opaque)); | |
| 275 request.reset(); | |
| 276 | |
| 277 // CFRetain is required to hold onto CMSampleBufferRef when posting task | |
| 278 // between threads. The object should be released later using CFRelease. | |
| 279 CFRetain(sbuf); | |
| 280 // This method is NOT called on |client_task_runner_|, so we still need to | |
| 281 // post a task back to it to reach |client_|. | |
| 282 encoder->client_task_runner_->PostTask( | |
| 283 FROM_HERE, | |
| 284 base::Bind(&VTVideoEncodeAccelerator::CompressionCallbackTask, | |
| 285 base::Unretained(encoder), info, sbuf)); | |
| 286 } | |
| 287 | |
| 288 void VTVideoEncodeAccelerator::CompressionCallbackTask(VTEncodeInfoFlags info, | |
| 289 CMSampleBufferRef sbuf) { | |
| 290 DVLOG(3) << __FUNCTION__; | |
| 291 DCHECK(thread_checker_.CalledOnValidThread()); | |
| 292 | |
| 293 // If there isn't any BitstreamBuffer to copy into, add it to a queue for | |
| 294 // later use. | |
| 295 if (encoder_output_queue_.empty()) { | |
| 296 encoder_output_sample_buffer_queue_.push_back(sbuf); | |
| 297 return; | |
|
miu
2016/02/10 21:04:56
Same problem w.r.t. early return. The client shou
emircan
2016/02/11 09:22:10
I do not have a BitstreamBuffer to return at this
miu
2016/02/11 21:09:40
Acknowledged. My misunderstanding.
| |
| 298 } | |
| 299 | |
| 300 bool frame_dropped = false; | |
| 301 if (info & VideoToolboxGlue::kVTEncodeInfo_FrameDropped) { | |
| 302 DVLOG(2) << " frame dropped"; | |
| 303 frame_dropped = true; | |
|
miu
2016/02/10 21:04:56
Looks good here. Just need to do this above too.
emircan
2016/02/11 09:22:09
Done.
| |
| 304 } | |
| 305 | |
| 306 auto sample_attachments = static_cast<CFDictionaryRef>(CFArrayGetValueAtIndex( | |
|
jfroy
2016/02/10 18:56:27
This code seems duplicated from UseOutputBitstream
emircan
2016/02/11 09:22:09
Done.
| |
| 307 CoreMediaGlue::CMSampleBufferGetSampleAttachmentsArray(sbuf, true), 0)); | |
| 308 const bool keyframe = | |
| 309 !CFDictionaryContainsKey(sample_attachments, | |
| 310 CoreMediaGlue::kCMSampleAttachmentKey_NotSync()); | |
| 311 | |
| 312 scoped_ptr<VTVideoEncodeAccelerator::BitstreamBufferRef> buffer_ref = | |
| 313 std::move(encoder_output_queue_.front()); | |
| 314 encoder_output_queue_.pop_front(); | |
| 315 | |
| 316 size_t used_buffer_size = 0; | |
| 317 if (!frame_dropped) { | |
| 318 const bool copy_rv = media::video_toolbox::CopySampleBufferToAnnexBBuffer( | |
| 319 sbuf, keyframe, buffer_ref->size, | |
| 320 reinterpret_cast<uint8_t*>(buffer_ref->shm->memory()), | |
| 321 &used_buffer_size); | |
| 322 CFRelease(sbuf); | |
| 323 if (!copy_rv) { | |
| 324 DLOG(ERROR) << "Cannot copy output from SampleBuffer to AnnexBBuffer."; | |
| 325 used_buffer_size = 0; | |
| 326 } | |
| 327 } | |
| 328 | |
| 329 client_->BitstreamBufferReady(buffer_ref->id, used_buffer_size, keyframe); | |
| 330 } | |
| 331 | |
| 332 bool VTVideoEncodeAccelerator::ResetCompressionSession() { | |
| 333 DCHECK(thread_checker_.CalledOnValidThread()); | |
| 334 | |
| 335 DestroyCompressionSession(); | |
| 336 | |
| 337 base::ScopedCFTypeRef<CFDictionaryRef> encoder_spec = | |
| 338 media::video_toolbox::DictionaryWithKeyValue(videotoolbox_glue_ | |
| 339 ->kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder(), | |
|
jfroy
2016/02/10 18:56:27
Unless you want to fallback to Apple's *TERRIBLE*
emircan
2016/02/11 09:22:09
Thanks for the notice, I added RequireHardwareAcce
| |
| 340 kCFBooleanTrue); | |
| 341 | |
| 342 // Keep these in-sync with those in ConfigureCompressionSession(). | |
| 343 CFTypeRef attributes_keys[] = { | |
| 344 #if defined(OS_IOS) | |
| 345 kCVPixelBufferOpenGLESCompatibilityKey, | |
| 346 #else | |
| 347 kCVPixelBufferOpenGLCompatibilityKey, | |
| 348 #endif | |
| 349 kCVPixelBufferIOSurfacePropertiesKey, | |
| 350 kCVPixelBufferPixelFormatTypeKey | |
| 351 }; | |
| 352 const int format[] = { | |
| 353 CoreVideoGlue::kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange}; | |
| 354 CFTypeRef attributes_values[] = { | |
| 355 kCFBooleanTrue, | |
| 356 media::video_toolbox::DictionaryWithKeysAndValues(nullptr, nullptr, 0) | |
| 357 .release(), | |
| 358 media::video_toolbox::ArrayWithIntegers(format, arraysize(format)) | |
| 359 .release()}; | |
| 360 const base::ScopedCFTypeRef<CFDictionaryRef> attributes = | |
| 361 media::video_toolbox::DictionaryWithKeysAndValues( | |
| 362 attributes_keys, attributes_values, arraysize(attributes_keys)); | |
| 363 for (auto& v : attributes_values) | |
| 364 CFRelease(v); | |
| 365 | |
| 366 // Create the compression session. | |
| 367 OSStatus status = videotoolbox_glue_->VTCompressionSessionCreate( | |
| 368 kCFAllocatorDefault, | |
| 369 input_visible_size_.width(), | |
| 370 input_visible_size_.height(), | |
| 371 CoreMediaGlue::kCMVideoCodecType_H264, | |
| 372 encoder_spec, | |
| 373 attributes, | |
| 374 nullptr /* compressedDataAllocator */, | |
| 375 &VTVideoEncodeAccelerator::CompressionCallback, | |
| 376 reinterpret_cast<void*>(this), | |
| 377 compression_session_.InitializeInto()); | |
| 378 if (status != noErr) { | |
| 379 DLOG(ERROR) << " VTCompressionSessionCreate failed: " << status; | |
| 380 return false; | |
| 381 } | |
| 382 | |
| 383 return ConfigureCompressionSession(); | |
| 384 } | |
| 385 | |
| 386 bool VTVideoEncodeAccelerator::ConfigureCompressionSession() { | |
| 387 DCHECK(thread_checker_.CalledOnValidThread()); | |
| 388 DCHECK(compression_session_); | |
| 389 | |
| 390 session_property_setter_.reset( | |
| 391 new media::video_toolbox::SessionPropertySetter(compression_session_, | |
| 392 videotoolbox_glue_)); | |
| 393 bool rv = true; | |
| 394 rv &= session_property_setter_->SetSessionProperty( | |
| 395 videotoolbox_glue_->kVTCompressionPropertyKey_ProfileLevel(), | |
| 396 videotoolbox_glue_->kVTProfileLevel_H264_Baseline_AutoLevel()); | |
| 397 rv &= session_property_setter_->SetSessionProperty( | |
| 398 videotoolbox_glue_->kVTCompressionPropertyKey_RealTime(), true); | |
| 399 rv &= session_property_setter_->SetSessionProperty( | |
| 400 videotoolbox_glue_->kVTCompressionPropertyKey_AverageBitRate(), | |
| 401 static_cast<int32_t>(bitrate_)); | |
| 402 rv &= session_property_setter_->SetSessionProperty( | |
| 403 videotoolbox_glue_->kVTCompressionPropertyKey_AllowFrameReordering(), | |
| 404 false); | |
| 405 DLOG_IF(ERROR, !rv) << " SetSessionProperty failed."; | |
| 406 return rv; | |
| 407 } | |
| 408 | |
| 409 void VTVideoEncodeAccelerator::DestroyCompressionSession() { | |
| 410 DCHECK(thread_checker_.CalledOnValidThread()); | |
| 411 | |
| 412 if (compression_session_) { | |
| 413 videotoolbox_glue_->VTCompressionSessionInvalidate(compression_session_); | |
| 414 compression_session_.reset(); | |
| 415 } | |
| 416 } | |
| 417 | |
| 418 } // namespace content | |
| OLD | NEW |