| Index: content/renderer/media/user_media_client_impl.cc
 | 
| diff --git a/content/renderer/media/user_media_client_impl.cc b/content/renderer/media/user_media_client_impl.cc
 | 
| index 4ed7bd6340178b5f1143f8248c539f591124854c..047e07ae4fd0fc6c6da6aefd2f79dfe08dadff94 100644
 | 
| --- a/content/renderer/media/user_media_client_impl.cc
 | 
| +++ b/content/renderer/media/user_media_client_impl.cc
 | 
| @@ -27,6 +27,7 @@
 | 
|  #include "third_party/WebKit/public/platform/WebMediaConstraints.h"
 | 
|  #include "third_party/WebKit/public/platform/WebMediaDeviceInfo.h"
 | 
|  #include "third_party/WebKit/public/platform/WebMediaStreamTrack.h"
 | 
| +#include "third_party/WebKit/public/platform/WebMediaStreamTrackSourcesRequest.h"
 | 
|  #include "third_party/WebKit/public/web/WebDocument.h"
 | 
|  #include "third_party/WebKit/public/web/WebLocalFrame.h"
 | 
|  
 | 
| @@ -62,7 +63,7 @@ struct UserMediaClientImpl::MediaDevicesRequestInfo {
 | 
|                            int audio_input_request_id,
 | 
|                            int video_input_request_id,
 | 
|                            int audio_output_request_id)
 | 
| -      : request(request),
 | 
| +      : media_devices_request(request),
 | 
|          audio_input_request_id(audio_input_request_id),
 | 
|          video_input_request_id(video_input_request_id),
 | 
|          audio_output_request_id(audio_output_request_id),
 | 
| @@ -70,7 +71,26 @@ struct UserMediaClientImpl::MediaDevicesRequestInfo {
 | 
|          has_video_input_returned(false),
 | 
|          has_audio_output_returned(false) {}
 | 
|  
 | 
| -  blink::WebMediaDevicesRequest request;
 | 
| +  MediaDevicesRequestInfo(
 | 
| +      const blink::WebMediaStreamTrackSourcesRequest& request,
 | 
| +      int audio_input_request_id,
 | 
| +      int video_input_request_id)
 | 
| +      : sources_request(request),
 | 
| +        audio_input_request_id(audio_input_request_id),
 | 
| +        video_input_request_id(video_input_request_id),
 | 
| +        audio_output_request_id(-1),
 | 
| +        has_audio_input_returned(false),
 | 
| +        has_video_input_returned(false),
 | 
| +        has_audio_output_returned(false) {}
 | 
| +
 | 
| +  bool IsSourcesRequest() {
 | 
| +    // We can't check isNull() on |media_devices_request| and |sources_request|,
 | 
| +    // because in unit tests they will always be null.
 | 
| +    return audio_output_request_id == -1;
 | 
| +  }
 | 
| +
 | 
| +  blink::WebMediaDevicesRequest media_devices_request;
 | 
| +  blink::WebMediaStreamTrackSourcesRequest sources_request;
 | 
|    int audio_input_request_id;
 | 
|    int video_input_request_id;
 | 
|    int audio_output_request_id;
 | 
| @@ -260,6 +280,45 @@ void UserMediaClientImpl::cancelMediaDevicesRequest(
 | 
|    CancelAndDeleteMediaDevicesRequest(request);
 | 
|  }
 | 
|  
 | 
| +void UserMediaClientImpl::requestSources(
 | 
| +    const blink::WebMediaStreamTrackSourcesRequest& sources_request) {
 | 
| +  // We don't call UpdateWebRTCMethodCount() here to track the API count in UMA
 | 
| +  // stats. This is instead counted in MediaStreamTrack::getSources in blink.
 | 
| +  DCHECK(CalledOnValidThread());
 | 
| +
 | 
| +  int audio_input_request_id = g_next_request_id++;
 | 
| +  int video_input_request_id = g_next_request_id++;
 | 
| +
 | 
| +  // |sources_request| can't be mocked, so in tests it will be empty (the
 | 
| +  // underlying pointer is null). In order to use this function in a test we
 | 
| +  // need to check if it isNull.
 | 
| +  GURL security_origin;
 | 
| +  if (!sources_request.isNull())
 | 
| +    security_origin = GURL(sources_request.origin().utf8());
 | 
| +
 | 
| +  DVLOG(1) << "UserMediaClientImpl::requestSources("
 | 
| +           << audio_input_request_id
 | 
| +           << ", " << video_input_request_id
 | 
| +           << ", " << security_origin.spec() << ")";
 | 
| +
 | 
| +  media_devices_requests_.push_back(new MediaDevicesRequestInfo(
 | 
| +      sources_request,
 | 
| +      audio_input_request_id,
 | 
| +      video_input_request_id));
 | 
| +
 | 
| +  media_stream_dispatcher_->EnumerateDevices(
 | 
| +      audio_input_request_id,
 | 
| +      weak_factory_.GetWeakPtr(),
 | 
| +      MEDIA_DEVICE_AUDIO_CAPTURE,
 | 
| +      security_origin);
 | 
| +
 | 
| +  media_stream_dispatcher_->EnumerateDevices(
 | 
| +      video_input_request_id,
 | 
| +      weak_factory_.GetWeakPtr(),
 | 
| +      MEDIA_DEVICE_VIDEO_CAPTURE,
 | 
| +      security_origin);
 | 
| +}
 | 
| +
 | 
|  // Callback from MediaStreamDispatcher.
 | 
|  // The requested stream have been generated by the MediaStreamDispatcher.
 | 
|  void UserMediaClientImpl::OnStreamGenerated(
 | 
| @@ -336,6 +395,94 @@ void UserMediaClientImpl::OnStreamGeneratedForCancelledRequest(
 | 
|    }
 | 
|  }
 | 
|  
 | 
| +void UserMediaClientImpl::FinalizeEnumerateDevices(
 | 
| +    MediaDevicesRequestInfo* request) {
 | 
| +  // All devices are ready for copying. We use a hashed audio output device id
 | 
| +  // as the group id for input and output audio devices. If an input device
 | 
| +  // doesn't have an associated output device, we use the input device's own id.
 | 
| +  // We don't support group id for video devices, that's left empty.
 | 
| +  blink::WebVector<blink::WebMediaDeviceInfo>
 | 
| +      devices(request->audio_input_devices.size() +
 | 
| +              request->video_input_devices.size() +
 | 
| +              request->audio_output_devices.size());
 | 
| +  for (size_t i = 0; i  < request->audio_input_devices.size(); ++i) {
 | 
| +    const MediaStreamDevice& device = request->audio_input_devices[i].device;
 | 
| +    DCHECK_EQ(device.type, MEDIA_DEVICE_AUDIO_CAPTURE);
 | 
| +    std::string group_id = base::UintToString(base::Hash(
 | 
| +        !device.matched_output_device_id.empty() ?
 | 
| +            device.matched_output_device_id :
 | 
| +            device.id));
 | 
| +    devices[i].initialize(
 | 
| +        blink::WebString::fromUTF8(device.id),
 | 
| +        blink::WebMediaDeviceInfo::MediaDeviceKindAudioInput,
 | 
| +        blink::WebString::fromUTF8(device.name),
 | 
| +        blink::WebString::fromUTF8(group_id));
 | 
| +  }
 | 
| +  size_t offset = request->audio_input_devices.size();
 | 
| +  for (size_t i = 0; i  < request->video_input_devices.size(); ++i) {
 | 
| +    const MediaStreamDevice& device = request->video_input_devices[i].device;
 | 
| +    DCHECK_EQ(device.type, MEDIA_DEVICE_VIDEO_CAPTURE);
 | 
| +    devices[offset + i].initialize(
 | 
| +        blink::WebString::fromUTF8(device.id),
 | 
| +        blink::WebMediaDeviceInfo::MediaDeviceKindVideoInput,
 | 
| +        blink::WebString::fromUTF8(device.name),
 | 
| +        blink::WebString());
 | 
| +  }
 | 
| +  offset += request->video_input_devices.size();
 | 
| +  for (size_t i = 0; i  < request->audio_output_devices.size(); ++i) {
 | 
| +    const MediaStreamDevice& device = request->audio_output_devices[i].device;
 | 
| +    DCHECK_EQ(device.type, MEDIA_DEVICE_AUDIO_OUTPUT);
 | 
| +    devices[offset + i].initialize(
 | 
| +        blink::WebString::fromUTF8(device.id),
 | 
| +        blink::WebMediaDeviceInfo::MediaDeviceKindAudioOutput,
 | 
| +        blink::WebString::fromUTF8(device.name),
 | 
| +        blink::WebString::fromUTF8(base::UintToString(base::Hash(device.id))));
 | 
| +  }
 | 
| +
 | 
| +  EnumerateDevicesSucceded(&request->media_devices_request, devices);
 | 
| +}
 | 
| +
 | 
| +void UserMediaClientImpl::FinalizeEnumerateSources(
 | 
| +    MediaDevicesRequestInfo* request) {
 | 
| +  blink::WebVector<blink::WebSourceInfo>
 | 
| +      sources(request->audio_input_devices.size() +
 | 
| +              request->video_input_devices.size());
 | 
| +  for (size_t i = 0; i  < request->audio_input_devices.size(); ++i) {
 | 
| +    const MediaStreamDevice& device = request->audio_input_devices[i].device;
 | 
| +    DCHECK_EQ(device.type, MEDIA_DEVICE_AUDIO_CAPTURE);
 | 
| +    std::string group_id = base::UintToString(base::Hash(
 | 
| +        !device.matched_output_device_id.empty() ?
 | 
| +            device.matched_output_device_id :
 | 
| +            device.id));
 | 
| +    sources[i].initialize(blink::WebString::fromUTF8(device.id),
 | 
| +                          blink::WebSourceInfo::SourceKindAudio,
 | 
| +                          blink::WebString::fromUTF8(device.name),
 | 
| +                          blink::WebSourceInfo::VideoFacingModeNone);
 | 
| +  }
 | 
| +  size_t offset = request->audio_input_devices.size();
 | 
| +  for (size_t i = 0; i  < request->video_input_devices.size(); ++i) {
 | 
| +    const MediaStreamDevice& device = request->video_input_devices[i].device;
 | 
| +    DCHECK_EQ(device.type, MEDIA_DEVICE_VIDEO_CAPTURE);
 | 
| +    blink::WebSourceInfo::VideoFacingMode video_facing;
 | 
| +    switch (device.video_facing) {
 | 
| +      case MEDIA_VIDEO_FACING_USER:
 | 
| +        video_facing = blink::WebSourceInfo::VideoFacingModeUser;
 | 
| +        break;
 | 
| +      case MEDIA_VIDEO_FACING_ENVIRONMENT:
 | 
| +        video_facing = blink::WebSourceInfo::VideoFacingModeEnvironment;
 | 
| +        break;
 | 
| +      default:
 | 
| +        video_facing = blink::WebSourceInfo::VideoFacingModeNone;
 | 
| +    }
 | 
| +    sources[offset + i].initialize(blink::WebString::fromUTF8(device.id),
 | 
| +                                   blink::WebSourceInfo::SourceKindVideo,
 | 
| +                                   blink::WebString::fromUTF8(device.name),
 | 
| +                                   video_facing);
 | 
| +  }
 | 
| +
 | 
| +  EnumerateSourcesSucceded(&request->sources_request, sources);
 | 
| +}
 | 
| +
 | 
|  // Callback from MediaStreamDispatcher.
 | 
|  // The requested stream failed to be generated.
 | 
|  void UserMediaClientImpl::OnStreamGenerationFailed(
 | 
| @@ -538,54 +685,16 @@ void UserMediaClientImpl::OnDevicesEnumerated(
 | 
|  
 | 
|    if (!request->has_audio_input_returned ||
 | 
|        !request->has_video_input_returned ||
 | 
| -      !request->has_audio_output_returned) {
 | 
| +      (!request->IsSourcesRequest() && !request->has_audio_output_returned)) {
 | 
|      // Wait for the rest of the devices to complete.
 | 
|      return;
 | 
|    }
 | 
|  
 | 
| -  // All devices are ready for copying. We use a hashed audio output device id
 | 
| -  // as the group id for input and output audio devices. If an input device
 | 
| -  // doesn't have an associated output device, we use the input device's own id.
 | 
| -  // We don't support group id for video devices, that's left empty.
 | 
| -  blink::WebVector<blink::WebMediaDeviceInfo>
 | 
| -      devices(request->audio_input_devices.size() +
 | 
| -              request->video_input_devices.size() +
 | 
| -              request->audio_output_devices.size());
 | 
| -  for (size_t i = 0; i  < request->audio_input_devices.size(); ++i) {
 | 
| -    const MediaStreamDevice& device = request->audio_input_devices[i].device;
 | 
| -    DCHECK_EQ(device.type, MEDIA_DEVICE_AUDIO_CAPTURE);
 | 
| -    std::string group_id = base::UintToString(base::Hash(
 | 
| -        !device.matched_output_device_id.empty() ?
 | 
| -            device.matched_output_device_id :
 | 
| -            device.id));
 | 
| -    devices[i].initialize(
 | 
| -        blink::WebString::fromUTF8(device.id),
 | 
| -        blink::WebMediaDeviceInfo::MediaDeviceKindAudioInput,
 | 
| -        blink::WebString::fromUTF8(device.name),
 | 
| -        blink::WebString::fromUTF8(group_id));
 | 
| -  }
 | 
| -  size_t offset = request->audio_input_devices.size();
 | 
| -  for (size_t i = 0; i  < request->video_input_devices.size(); ++i) {
 | 
| -    const MediaStreamDevice& device = request->video_input_devices[i].device;
 | 
| -    DCHECK_EQ(device.type, MEDIA_DEVICE_VIDEO_CAPTURE);
 | 
| -    devices[offset + i].initialize(
 | 
| -        blink::WebString::fromUTF8(device.id),
 | 
| -        blink::WebMediaDeviceInfo::MediaDeviceKindVideoInput,
 | 
| -        blink::WebString::fromUTF8(device.name),
 | 
| -        blink::WebString());
 | 
| -  }
 | 
| -  offset += request->video_input_devices.size();
 | 
| -  for (size_t i = 0; i  < request->audio_output_devices.size(); ++i) {
 | 
| -    const MediaStreamDevice& device = request->audio_output_devices[i].device;
 | 
| -    DCHECK_EQ(device.type, MEDIA_DEVICE_AUDIO_OUTPUT);
 | 
| -    devices[offset + i].initialize(
 | 
| -        blink::WebString::fromUTF8(device.id),
 | 
| -        blink::WebMediaDeviceInfo::MediaDeviceKindAudioOutput,
 | 
| -        blink::WebString::fromUTF8(device.name),
 | 
| -        blink::WebString::fromUTF8(base::UintToString(base::Hash(device.id))));
 | 
| -  }
 | 
| +  if (request->IsSourcesRequest())
 | 
| +    FinalizeEnumerateSources(request);
 | 
| +  else
 | 
| +    FinalizeEnumerateDevices(request);
 | 
|  
 | 
| -  EnumerateDevicesSucceded(&request->request, devices);
 | 
|    CancelAndDeleteMediaDevicesRequest(request);
 | 
|  }
 | 
|  
 | 
| @@ -675,6 +784,12 @@ void UserMediaClientImpl::EnumerateDevicesSucceded(
 | 
|    request->requestSucceeded(devices);
 | 
|  }
 | 
|  
 | 
| +void UserMediaClientImpl::EnumerateSourcesSucceded(
 | 
| +    blink::WebMediaStreamTrackSourcesRequest* request,
 | 
| +    blink::WebVector<blink::WebSourceInfo>& sources) {
 | 
| +  request->requestSucceeded(sources);
 | 
| +}
 | 
| +
 | 
|  const blink::WebMediaStreamSource* UserMediaClientImpl::FindLocalSource(
 | 
|      const StreamDeviceInfo& device) const {
 | 
|    for (LocalStreamSources::const_iterator it = local_sources_.begin();
 | 
| @@ -767,7 +882,7 @@ UserMediaClientImpl::FindMediaDevicesRequestInfo(
 | 
|      const blink::WebMediaDevicesRequest& request) {
 | 
|    MediaDevicesRequests::iterator it = media_devices_requests_.begin();
 | 
|    for (; it != media_devices_requests_.end(); ++it) {
 | 
| -    if ((*it)->request == request)
 | 
| +    if ((*it)->media_devices_request == request)
 | 
|        return (*it);
 | 
|    }
 | 
|    return NULL;
 | 
| 
 |