Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2016 The Chromium Authors. All rights reserved. | 1 // Copyright 2016 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "modules/shapedetection/FaceDetector.h" | 5 #include "modules/shapedetection/FaceDetector.h" |
| 6 | 6 |
| 7 #include "core/dom/DOMException.h" | 7 #include "core/dom/DOMException.h" |
| 8 #include "core/dom/DOMRect.h" | 8 #include "core/dom/DOMRect.h" |
| 9 #include "core/dom/Document.h" | 9 #include "core/dom/Document.h" |
| 10 #include "core/fetch/ImageResource.h" | 10 #include "core/fetch/ImageResource.h" |
| 11 #include "core/frame/ImageBitmap.h" | 11 #include "core/frame/ImageBitmap.h" |
| 12 #include "core/frame/LocalDOMWindow.h" | 12 #include "core/frame/LocalDOMWindow.h" |
| 13 #include "core/frame/LocalFrame.h" | 13 #include "core/frame/LocalFrame.h" |
| 14 #include "core/html/HTMLImageElement.h" | 14 #include "core/html/HTMLImageElement.h" |
| 15 #include "core/html/HTMLVideoElement.h" | |
| 15 #include "core/html/canvas/CanvasImageSource.h" | 16 #include "core/html/canvas/CanvasImageSource.h" |
| 16 #include "platform/graphics/Image.h" | 17 #include "platform/graphics/Image.h" |
| 17 #include "public/platform/InterfaceProvider.h" | 18 #include "public/platform/InterfaceProvider.h" |
| 18 #include "third_party/skia/include/core/SkImage.h" | 19 #include "third_party/skia/include/core/SkImage.h" |
| 19 #include "third_party/skia/include/core/SkImageInfo.h" | 20 #include "third_party/skia/include/core/SkImageInfo.h" |
| 20 #include "wtf/CheckedNumeric.h" | 21 #include "wtf/CheckedNumeric.h" |
| 21 | 22 |
| 22 namespace blink { | 23 namespace blink { |
| 23 | 24 |
| 24 namespace { | 25 namespace { |
| 25 | 26 |
| 26 static CanvasImageSource* toImageSourceInternal( | 27 static CanvasImageSource* toImageSourceInternal( |
| 27 const CanvasImageSourceUnion& value) { | 28 const CanvasImageSourceUnion& value) { |
| 28 if (value.isHTMLImageElement()) | 29 if (value.isHTMLImageElement()) |
| 29 return value.getAsHTMLImageElement(); | 30 return value.getAsHTMLImageElement(); |
| 30 | 31 |
| 31 if (value.isImageBitmap()) { | 32 if (value.isImageBitmap()) { |
| 32 if (static_cast<ImageBitmap*>(value.getAsImageBitmap())->isNeutered()) | 33 if (static_cast<ImageBitmap*>(value.getAsImageBitmap())->isNeutered()) |
| 33 return nullptr; | 34 return nullptr; |
|
xianglu
2016/10/31 23:07:52
I wonder if we can put them in the same if stateme
mcasas
2016/11/01 01:10:58
Done.
| |
| 34 return value.getAsImageBitmap(); | 35 return value.getAsImageBitmap(); |
| 35 } | 36 } |
| 36 | 37 |
| 38 if (value.isHTMLVideoElement()) | |
| 39 return value.getAsHTMLVideoElement(); | |
| 40 | |
| 37 return nullptr; | 41 return nullptr; |
| 38 } | 42 } |
| 39 | 43 |
| 40 } // anonymous namespace | 44 } // anonymous namespace |
| 41 | 45 |
| 42 FaceDetector* FaceDetector::create(ScriptState* scriptState) { | 46 FaceDetector* FaceDetector::create(ScriptState* scriptState) { |
| 43 return new FaceDetector(*scriptState->domWindow()->frame()); | 47 return new FaceDetector(*scriptState->domWindow()->frame()); |
| 44 } | 48 } |
| 45 | 49 |
| 46 FaceDetector::FaceDetector(LocalFrame& frame) { | 50 FaceDetector::FaceDetector(LocalFrame& frame) { |
| (...skipping 12 matching lines...) Expand all Loading... | |
| 59 if (!imageSourceInternal) { | 63 if (!imageSourceInternal) { |
| 60 // TODO(mcasas): Implement more CanvasImageSources, https://crbug.com/659138 | 64 // TODO(mcasas): Implement more CanvasImageSources, https://crbug.com/659138 |
| 61 NOTIMPLEMENTED() << "Unsupported CanvasImageSource"; | 65 NOTIMPLEMENTED() << "Unsupported CanvasImageSource"; |
| 62 resolver->reject( | 66 resolver->reject( |
| 63 DOMException::create(NotFoundError, "Unsupported source.")); | 67 DOMException::create(NotFoundError, "Unsupported source.")); |
| 64 return promise; | 68 return promise; |
| 65 } | 69 } |
| 66 | 70 |
| 67 if (imageSourceInternal->wouldTaintOrigin( | 71 if (imageSourceInternal->wouldTaintOrigin( |
| 68 scriptState->getExecutionContext()->getSecurityOrigin())) { | 72 scriptState->getExecutionContext()->getSecurityOrigin())) { |
| 69 resolver->reject(DOMException::create(SecurityError, | 73 resolver->reject( |
| 70 "Image source would taint origin.")); | 74 DOMException::create(SecurityError, "Source would taint origin.")); |
| 71 return promise; | 75 return promise; |
| 72 } | 76 } |
| 73 | 77 |
| 74 if (imageSource.isHTMLImageElement()) { | 78 if (imageSource.isHTMLImageElement()) { |
| 75 return detectFacesOnImageElement( | 79 return detectFacesOnImageElement( |
| 76 resolver, static_cast<HTMLImageElement*>(imageSourceInternal)); | 80 resolver, static_cast<HTMLImageElement*>(imageSourceInternal)); |
| 77 } | 81 } |
| 78 if (imageSourceInternal->isImageBitmap()) { | 82 if (imageSourceInternal->isImageBitmap()) { |
| 79 return detectFacesOnImageBitmap( | 83 return detectFacesOnImageBitmap( |
| 80 resolver, static_cast<ImageBitmap*>(imageSourceInternal)); | 84 resolver, static_cast<ImageBitmap*>(imageSourceInternal)); |
| 81 } | 85 } |
| 86 if (imageSourceInternal->isVideoElement()) { | |
| 87 return detectFacesOnVideoElement( | |
| 88 resolver, static_cast<HTMLVideoElement*>(imageSourceInternal)); | |
| 89 } | |
| 90 | |
| 82 NOTREACHED(); | 91 NOTREACHED(); |
| 83 return promise; | 92 return promise; |
| 84 } | 93 } |
| 85 | 94 |
| 86 ScriptPromise FaceDetector::detectFacesOnImageElement( | 95 ScriptPromise FaceDetector::detectFacesOnImageElement( |
| 87 ScriptPromiseResolver* resolver, | 96 ScriptPromiseResolver* resolver, |
| 88 const HTMLImageElement* img) { | 97 const HTMLImageElement* img) { |
| 89 ScriptPromise promise = resolver->promise(); | 98 ScriptPromise promise = resolver->promise(); |
| 90 if (img->bitmapSourceSize().isZero()) { | 99 if (img->bitmapSourceSize().isZero()) { |
| 91 resolver->resolve(HeapVector<Member<DOMRect>>()); | 100 resolver->resolve(HeapVector<Member<DOMRect>>()); |
| (...skipping 96 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 188 allocationSize = pixmap.getSafeSize(); | 197 allocationSize = pixmap.getSafeSize(); |
| 189 } else { | 198 } else { |
| 190 pixelData = imageBitmap->copyBitmapData(imageBitmap->isPremultiplied() | 199 pixelData = imageBitmap->copyBitmapData(imageBitmap->isPremultiplied() |
| 191 ? PremultiplyAlpha | 200 ? PremultiplyAlpha |
| 192 : DontPremultiplyAlpha, | 201 : DontPremultiplyAlpha, |
| 193 N32ColorType); | 202 N32ColorType); |
| 194 pixelDataPtr = pixelData->data(); | 203 pixelDataPtr = pixelData->data(); |
| 195 allocationSize = imageBitmap->size().area() * 4 /* bytes per pixel */; | 204 allocationSize = imageBitmap->size().area() * 4 /* bytes per pixel */; |
| 196 } | 205 } |
| 197 | 206 |
| 207 return detectFacesOnData(resolver, pixelDataPtr, | |
| 208 allocationSize.ValueOrDefault(0), | |
| 209 imageBitmap->width(), imageBitmap->height()); | |
| 210 } | |
| 211 | |
| 212 ScriptPromise FaceDetector::detectFacesOnVideoElement( | |
| 213 ScriptPromiseResolver* resolver, | |
| 214 const HTMLVideoElement* video) { | |
| 215 ScriptPromise promise = resolver->promise(); | |
| 216 | |
| 217 // TODO(mcasas): Check if |video| is actually playing a MediaStream by using | |
| 218 // HTMLMediaElement::isMediaStreamURL(video->currentSrc().getString()); if | |
| 219 // there is a local WebCam associated, there might be sophisticated ways to | |
| 220 // detect faces on it. Until then, treat as a normal <video> element. | |
| 221 | |
| 222 // !hasAvailableVideoFrame() is a bundle of invalid states. | |
| 223 if (!video->hasAvailableVideoFrame()) { | |
| 224 resolver->reject(DOMException::create( | |
| 225 InvalidStateError, "Invalid HTMLVideoElement or state.")); | |
| 226 return promise; | |
| 227 } | |
| 228 | |
| 229 const FloatSize videoSize(video->videoWidth(), video->videoHeight()); | |
| 230 SourceImageStatus sourceImageStatus = InvalidSourceImageStatus; | |
| 231 RefPtr<Image> image = | |
| 232 video->getSourceImageForCanvas(&sourceImageStatus, PreferNoAcceleration, | |
| 233 SnapshotReasonDrawImage, videoSize); | |
| 234 | |
| 235 DCHECK_EQ(NormalSourceImageStatus, sourceImageStatus); | |
| 236 | |
| 237 SkPixmap pixmap; | |
| 238 RefPtr<Uint8Array> pixelData; | |
| 239 uint8_t* pixelDataPtr = nullptr; | |
| 240 WTF::CheckedNumeric<int> allocationSize = 0; | |
| 241 // Use |skImage|'s pixels if it has direct access to them. | |
| 242 sk_sp<SkImage> skImage = image->imageForCurrentFrame(); | |
| 243 if (skImage->peekPixels(&pixmap)) { | |
| 244 pixelDataPtr = static_cast<uint8_t*>(pixmap.writable_addr()); | |
| 245 allocationSize = pixmap.getSafeSize(); | |
| 246 } else { | |
| 247 // TODO(mcasas): retrieve the pixels from elsewhere. | |
| 248 NOTREACHED(); | |
| 249 resolver->reject(DOMException::create( | |
| 250 InvalidStateError, "Failed to get pixels for current frame.")); | |
| 251 return promise; | |
| 252 } | |
| 253 | |
| 254 return detectFacesOnData(resolver, pixelDataPtr, | |
| 255 allocationSize.ValueOrDefault(0), image->width(), | |
| 256 image->height()); | |
| 257 } | |
| 258 | |
| 259 ScriptPromise FaceDetector::detectFacesOnData(ScriptPromiseResolver* resolver, | |
| 260 uint8_t* data, | |
| 261 int size, | |
| 262 int width, | |
| 263 int height) { | |
| 264 DCHECK(data); | |
| 265 DCHECK(size); | |
| 266 ScriptPromise promise = resolver->promise(); | |
| 267 | |
| 198 mojo::ScopedSharedBufferHandle sharedBufferHandle = | 268 mojo::ScopedSharedBufferHandle sharedBufferHandle = |
| 199 mojo::SharedBufferHandle::Create(allocationSize.ValueOrDefault(0)); | 269 mojo::SharedBufferHandle::Create(size); |
| 200 | 270 if (!sharedBufferHandle->is_valid()) { |
| 201 if (!pixelDataPtr || !sharedBufferHandle->is_valid()) { | |
| 202 resolver->reject( | 271 resolver->reject( |
| 203 DOMException::create(InvalidStateError, "Internal allocation error")); | 272 DOMException::create(InvalidStateError, "Internal allocation error")); |
| 204 return promise; | 273 return promise; |
| 205 } | 274 } |
| 206 | 275 |
| 207 if (!m_service) { | 276 if (!m_service) { |
| 208 resolver->reject(DOMException::create( | 277 resolver->reject(DOMException::create( |
| 209 NotFoundError, "Face detection service unavailable.")); | 278 NotSupportedError, "Face detection service unavailable.")); |
| 210 return promise; | 279 return promise; |
| 211 } | 280 } |
| 212 | 281 |
| 213 const mojo::ScopedSharedBufferMapping mappedBuffer = | 282 const mojo::ScopedSharedBufferMapping mappedBuffer = |
| 214 sharedBufferHandle->Map(allocationSize.ValueOrDefault(0)); | 283 sharedBufferHandle->Map(size); |
| 215 DCHECK(mappedBuffer.get()); | 284 DCHECK(mappedBuffer.get()); |
| 216 | 285 |
| 217 memcpy(mappedBuffer.get(), pixelDataPtr, allocationSize.ValueOrDefault(0)); | 286 memcpy(mappedBuffer.get(), data, size); |
| 218 | 287 |
| 219 m_serviceRequests.add(resolver); | 288 m_serviceRequests.add(resolver); |
| 220 DCHECK(m_service.is_bound()); | 289 DCHECK(m_service.is_bound()); |
| 221 m_service->DetectFace(std::move(sharedBufferHandle), imageBitmap->width(), | 290 m_service->DetectFace(std::move(sharedBufferHandle), width, height, |
| 222 imageBitmap->height(), | |
| 223 convertToBaseCallback(WTF::bind( | 291 convertToBaseCallback(WTF::bind( |
| 224 &FaceDetector::onDetectFace, wrapPersistent(this), | 292 &FaceDetector::onDetectFace, wrapPersistent(this), |
| 225 wrapPersistent(resolver)))); | 293 wrapPersistent(resolver)))); |
| 226 sharedBufferHandle.reset(); | 294 sharedBufferHandle.reset(); |
| 227 return promise; | 295 return promise; |
| 228 } | 296 } |
| 229 | 297 |
| 230 void FaceDetector::onDetectFace( | 298 void FaceDetector::onDetectFace( |
| 231 ScriptPromiseResolver* resolver, | 299 ScriptPromiseResolver* resolver, |
| 232 mojom::blink::FaceDetectionResultPtr faceDetectionResult) { | 300 mojom::blink::FaceDetectionResultPtr faceDetectionResult) { |
| 233 if (!m_serviceRequests.contains(resolver)) | 301 if (!m_serviceRequests.contains(resolver)) |
| 234 return; | 302 return; |
| 235 | 303 |
| 236 HeapVector<Member<DOMRect>> detectedFaces; | 304 HeapVector<Member<DOMRect>> detectedFaces; |
| 237 for (const auto& boundingBox : faceDetectionResult->boundingBoxes) { | 305 for (const auto& boundingBox : faceDetectionResult->boundingBoxes) { |
| 238 detectedFaces.append(DOMRect::create(boundingBox->x, boundingBox->y, | 306 detectedFaces.append(DOMRect::create(boundingBox->x, boundingBox->y, |
| 239 boundingBox->width, | 307 boundingBox->width, |
| 240 boundingBox->height)); | 308 boundingBox->height)); |
| 241 } | 309 } |
| 242 | 310 |
| 243 resolver->resolve(detectedFaces); | 311 resolver->resolve(detectedFaces); |
| 244 m_serviceRequests.remove(resolver); | 312 m_serviceRequests.remove(resolver); |
| 245 } | 313 } |
| 246 | 314 |
| 247 DEFINE_TRACE(FaceDetector) { | 315 DEFINE_TRACE(FaceDetector) { |
| 248 visitor->trace(m_serviceRequests); | 316 visitor->trace(m_serviceRequests); |
| 249 } | 317 } |
| 250 | 318 |
| 251 } // namespace blink | 319 } // namespace blink |
| OLD | NEW |