OLD | NEW |
1 // Copyright 2013 The Chromium Authors. All rights reserved. | 1 // Copyright 2013 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include <algorithm> | 5 #include <algorithm> |
6 #include <cmath> | 6 #include <cmath> |
| 7 #include <vector> |
7 | 8 |
| 9 #include "base/callback_helpers.h" |
8 #include "base/command_line.h" | 10 #include "base/command_line.h" |
9 #include "base/float_util.h" | 11 #include "base/memory/scoped_ptr.h" |
10 #include "base/run_loop.h" | 12 #include "base/run_loop.h" |
| 13 #include "base/strings/string_number_conversions.h" |
11 #include "base/strings/stringprintf.h" | 14 #include "base/strings/stringprintf.h" |
12 #include "base/synchronization/lock.h" | |
13 #include "base/time/time.h" | |
14 #include "chrome/browser/extensions/extension_apitest.h" | 15 #include "chrome/browser/extensions/extension_apitest.h" |
15 #include "chrome/common/chrome_switches.h" | 16 #include "chrome/common/chrome_switches.h" |
16 #include "content/public/common/content_switches.h" | 17 #include "content/public/common/content_switches.h" |
17 #include "extensions/common/switches.h" | 18 #include "extensions/common/switches.h" |
18 #include "media/base/bind_to_current_loop.h" | 19 #include "media/base/bind_to_current_loop.h" |
19 #include "media/base/video_frame.h" | 20 #include "media/base/video_frame.h" |
20 #include "media/cast/cast_config.h" | 21 #include "media/cast/cast_config.h" |
21 #include "media/cast/cast_environment.h" | 22 #include "media/cast/cast_environment.h" |
22 #include "media/cast/test/utility/audio_utility.h" | 23 #include "media/cast/test/utility/audio_utility.h" |
23 #include "media/cast/test/utility/default_config.h" | 24 #include "media/cast/test/utility/default_config.h" |
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
69 << message_; | 70 << message_; |
70 } | 71 } |
71 | 72 |
72 IN_PROC_BROWSER_TEST_F(CastStreamingApiTest, NullStream) { | 73 IN_PROC_BROWSER_TEST_F(CastStreamingApiTest, NullStream) { |
73 ASSERT_TRUE(RunExtensionSubtest("cast_streaming", "null_stream.html")) | 74 ASSERT_TRUE(RunExtensionSubtest("cast_streaming", "null_stream.html")) |
74 << message_; | 75 << message_; |
75 } | 76 } |
76 | 77 |
77 namespace { | 78 namespace { |
78 | 79 |
| 80 struct YUVColor { |
| 81 int y; |
| 82 int u; |
| 83 int v; |
| 84 |
| 85 YUVColor() : y(0), u(0), v(0) {} |
| 86 YUVColor(int y_val, int u_val, int v_val) : y(y_val), u(u_val), v(v_val) {} |
| 87 }; |
| 88 |
| 89 |
| 90 media::cast::FrameReceiverConfig WithFakeAesKeyAndIv( |
| 91 media::cast::FrameReceiverConfig config) { |
| 92 config.aes_key = "0123456789abcdef"; |
| 93 config.aes_iv_mask = "fedcba9876543210"; |
| 94 return config; |
| 95 } |
| 96 |
79 // An in-process Cast receiver that examines the audio/video frames being | 97 // An in-process Cast receiver that examines the audio/video frames being |
80 // received for expected colors and tones. Used in | 98 // received for expected colors and tones. Used in |
81 // CastStreamingApiTest.EndToEnd, below. | 99 // CastStreamingApiTest.EndToEnd, below. |
82 class TestPatternReceiver : public media::cast::InProcessReceiver { | 100 class TestPatternReceiver : public media::cast::InProcessReceiver { |
83 public: | 101 public: |
84 explicit TestPatternReceiver( | 102 explicit TestPatternReceiver( |
85 const scoped_refptr<media::cast::CastEnvironment>& cast_environment, | 103 const scoped_refptr<media::cast::CastEnvironment>& cast_environment, |
86 const net::IPEndPoint& local_end_point) | 104 const net::IPEndPoint& local_end_point) |
87 : InProcessReceiver(cast_environment, | 105 : InProcessReceiver( |
88 local_end_point, | 106 cast_environment, |
89 net::IPEndPoint(), | 107 local_end_point, |
90 media::cast::GetDefaultAudioReceiverConfig(), | 108 net::IPEndPoint(), |
91 media::cast::GetDefaultVideoReceiverConfig()), | 109 WithFakeAesKeyAndIv(media::cast::GetDefaultAudioReceiverConfig()), |
92 target_tone_frequency_(0), | 110 WithFakeAesKeyAndIv(media::cast::GetDefaultVideoReceiverConfig())) { |
93 current_tone_frequency_(0.0f) { | |
94 memset(&target_color_, 0, sizeof(target_color_)); | |
95 memset(¤t_color_, 0, sizeof(current_color_)); | |
96 } | 111 } |
97 | 112 |
98 virtual ~TestPatternReceiver() {} | 113 virtual ~TestPatternReceiver() {} |
99 | 114 |
100 // Blocks the caller until this receiver has seen both |yuv_color| and | 115 void AddExpectedTone(int tone_frequency) { |
101 // |tone_frequency| consistently for the given |duration|. | 116 expected_tones_.push_back(tone_frequency); |
102 void WaitForColorAndTone(const uint8 yuv_color[3], | 117 } |
103 int tone_frequency, | |
104 base::TimeDelta duration) { | |
105 LOG(INFO) << "Waiting for test pattern: color=yuv(" | |
106 << static_cast<int>(yuv_color[0]) << ", " | |
107 << static_cast<int>(yuv_color[1]) << ", " | |
108 << static_cast<int>(yuv_color[2]) | |
109 << "), tone_frequency=" << tone_frequency << " Hz"; | |
110 | 118 |
| 119 void AddExpectedColor(const YUVColor& yuv_color) { |
| 120 expected_yuv_colors_.push_back(yuv_color); |
| 121 } |
| 122 |
| 123 // Blocks the caller until all expected tones and colors have been observed. |
| 124 void WaitForExpectedTonesAndColors() { |
111 base::RunLoop run_loop; | 125 base::RunLoop run_loop; |
112 cast_env()->PostTask( | 126 cast_env()->PostTask( |
113 media::cast::CastEnvironment::MAIN, | 127 media::cast::CastEnvironment::MAIN, |
114 FROM_HERE, | 128 FROM_HERE, |
115 base::Bind(&TestPatternReceiver::NotifyOnceMatched, | 129 base::Bind(&TestPatternReceiver::NotifyOnceObservedAllTonesAndColors, |
116 base::Unretained(this), | 130 base::Unretained(this), |
117 yuv_color, | |
118 tone_frequency, | |
119 duration, | |
120 media::BindToCurrentLoop(run_loop.QuitClosure()))); | 131 media::BindToCurrentLoop(run_loop.QuitClosure()))); |
121 run_loop.Run(); | 132 run_loop.Run(); |
122 } | 133 } |
123 | 134 |
124 private: | 135 private: |
125 // Resets tracking data and sets the match duration and callback. | 136 void NotifyOnceObservedAllTonesAndColors(const base::Closure& done_callback) { |
126 void NotifyOnceMatched(const uint8 yuv_color[3], | |
127 int tone_frequency, | |
128 base::TimeDelta match_duration, | |
129 const base::Closure& matched_callback) { | |
130 DCHECK(cast_env()->CurrentlyOn(media::cast::CastEnvironment::MAIN)); | 137 DCHECK(cast_env()->CurrentlyOn(media::cast::CastEnvironment::MAIN)); |
131 | 138 done_callback_ = done_callback; |
132 match_duration_ = match_duration; | 139 MaybeRunDoneCallback(); |
133 matched_callback_ = matched_callback; | |
134 target_color_[0] = yuv_color[0]; | |
135 target_color_[1] = yuv_color[1]; | |
136 target_color_[2] = yuv_color[2]; | |
137 target_tone_frequency_ = tone_frequency; | |
138 first_time_near_target_color_ = base::TimeTicks(); | |
139 first_time_near_target_tone_ = base::TimeTicks(); | |
140 } | 140 } |
141 | 141 |
142 // Runs |matched_callback_| once both color and tone have been matched for the | 142 void MaybeRunDoneCallback() { |
143 // required |match_duration_|. | |
144 void NotifyIfMatched() { | |
145 DCHECK(cast_env()->CurrentlyOn(media::cast::CastEnvironment::MAIN)); | 143 DCHECK(cast_env()->CurrentlyOn(media::cast::CastEnvironment::MAIN)); |
146 | 144 if (done_callback_.is_null()) |
147 // TODO(miu): Check audio tone too, once audio is fixed in the library. | |
148 // http://crbug.com/349295 | |
149 if (first_time_near_target_color_.is_null() || | |
150 /*first_time_near_target_tone_.is_null()*/ false) | |
151 return; | 145 return; |
152 const base::TimeTicks now = cast_env()->Clock()->NowTicks(); | 146 if (expected_tones_.empty() && expected_yuv_colors_.empty()) { |
153 if ((now - first_time_near_target_color_) >= match_duration_ && | 147 base::ResetAndReturn(&done_callback_).Run(); |
154 /*(now - first_time_near_target_tone_) >= match_duration_*/ true) { | 148 } else { |
155 matched_callback_.Run(); | 149 LOG(INFO) << "Waiting to encounter " << expected_tones_.size() |
| 150 << " more tone(s) and " << expected_yuv_colors_.size() |
| 151 << " more color(s)."; |
156 } | 152 } |
157 } | 153 } |
158 | 154 |
159 // Invoked by InProcessReceiver for each received audio frame. | 155 // Invoked by InProcessReceiver for each received audio frame. |
160 virtual void OnAudioFrame(scoped_ptr<media::AudioBus> audio_frame, | 156 virtual void OnAudioFrame(scoped_ptr<media::AudioBus> audio_frame, |
161 const base::TimeTicks& playout_time, | 157 const base::TimeTicks& playout_time, |
162 bool is_continuous) OVERRIDE { | 158 bool is_continuous) OVERRIDE { |
163 DCHECK(cast_env()->CurrentlyOn(media::cast::CastEnvironment::MAIN)); | 159 DCHECK(cast_env()->CurrentlyOn(media::cast::CastEnvironment::MAIN)); |
164 | 160 |
165 if (audio_frame->frames() <= 0) { | 161 if (audio_frame->frames() <= 0) { |
166 NOTREACHED() << "OnAudioFrame called with no samples?!?"; | 162 NOTREACHED() << "OnAudioFrame called with no samples?!?"; |
167 return; | 163 return; |
168 } | 164 } |
169 | 165 |
| 166 if (done_callback_.is_null() || expected_tones_.empty()) |
| 167 return; // No need to waste CPU doing analysis on the signal. |
| 168 |
170 // Assume the audio signal is a single sine wave (it can have some | 169 // Assume the audio signal is a single sine wave (it can have some |
171 // low-amplitude noise). Count zero crossings, and extrapolate the | 170 // low-amplitude noise). Count zero crossings, and extrapolate the |
172 // frequency of the sine wave in |audio_frame|. | 171 // frequency of the sine wave in |audio_frame|. |
173 int crossings = 0; | 172 int crossings = 0; |
174 for (int ch = 0; ch < audio_frame->channels(); ++ch) { | 173 for (int ch = 0; ch < audio_frame->channels(); ++ch) { |
175 crossings += media::cast::CountZeroCrossings(audio_frame->channel(ch), | 174 crossings += media::cast::CountZeroCrossings(audio_frame->channel(ch), |
176 audio_frame->frames()); | 175 audio_frame->frames()); |
177 } | 176 } |
178 crossings /= audio_frame->channels(); // Take the average. | 177 crossings /= audio_frame->channels(); // Take the average. |
179 const float seconds_per_frame = | 178 const float seconds_per_frame = |
180 audio_frame->frames() / static_cast<float>(audio_config().frequency); | 179 audio_frame->frames() / static_cast<float>(audio_config().frequency); |
181 const float frequency_in_frame = crossings / seconds_per_frame / 2.0f; | 180 const float frequency = crossings / seconds_per_frame / 2.0f; |
| 181 VLOG(1) << "Current audio tone frequency: " << frequency; |
182 | 182 |
183 const float kAveragingWeight = 0.1f; | 183 const int kTargetWindowHz = 20; |
184 UpdateExponentialMovingAverage( | 184 for (std::vector<int>::iterator it = expected_tones_.begin(); |
185 kAveragingWeight, frequency_in_frame, ¤t_tone_frequency_); | 185 it != expected_tones_.end(); ++it) { |
186 VLOG(1) << "Current audio tone frequency: " << current_tone_frequency_; | 186 if (abs(static_cast<int>(frequency) - *it) < kTargetWindowHz) { |
187 | 187 LOG(INFO) << "Heard tone at frequency " << *it << " Hz."; |
188 const float kTargetWindowHz = 20; | 188 expected_tones_.erase(it); |
189 // Update the time at which the current tone started falling within | 189 MaybeRunDoneCallback(); |
190 // kTargetWindowHz of the target tone. | 190 break; |
191 if (fabsf(current_tone_frequency_ - target_tone_frequency_) < | 191 } |
192 kTargetWindowHz) { | |
193 if (first_time_near_target_tone_.is_null()) | |
194 first_time_near_target_tone_ = cast_env()->Clock()->NowTicks(); | |
195 NotifyIfMatched(); | |
196 } else { | |
197 first_time_near_target_tone_ = base::TimeTicks(); | |
198 } | 192 } |
199 } | 193 } |
200 | 194 |
201 virtual void OnVideoFrame(const scoped_refptr<media::VideoFrame>& video_frame, | 195 virtual void OnVideoFrame(const scoped_refptr<media::VideoFrame>& video_frame, |
202 const base::TimeTicks& render_time, | 196 const base::TimeTicks& playout_time, |
203 bool is_continuous) OVERRIDE { | 197 bool is_continuous) OVERRIDE { |
204 DCHECK(cast_env()->CurrentlyOn(media::cast::CastEnvironment::MAIN)); | 198 DCHECK(cast_env()->CurrentlyOn(media::cast::CastEnvironment::MAIN)); |
205 | 199 |
206 CHECK(video_frame->format() == media::VideoFrame::YV12 || | 200 CHECK(video_frame->format() == media::VideoFrame::YV12 || |
207 video_frame->format() == media::VideoFrame::I420 || | 201 video_frame->format() == media::VideoFrame::I420 || |
208 video_frame->format() == media::VideoFrame::YV12A); | 202 video_frame->format() == media::VideoFrame::YV12A); |
209 | 203 |
210 // Note: We take the median value of each plane because the test image will | 204 if (done_callback_.is_null() || expected_yuv_colors_.empty()) |
211 // contain mostly a solid color plus some "cruft" which is the "Testing..." | 205 return; // No need to waste CPU doing analysis on the frame. |
212 // text in the upper-left corner of the video frame. In other words, we | |
213 // want to read "the most common color." | |
214 const int kPlanes[] = {media::VideoFrame::kYPlane, | |
215 media::VideoFrame::kUPlane, | |
216 media::VideoFrame::kVPlane}; | |
217 for (size_t i = 0; i < arraysize(kPlanes); ++i) { | |
218 current_color_[i] = | |
219 ComputeMedianIntensityInPlane(video_frame->row_bytes(kPlanes[i]), | |
220 video_frame->rows(kPlanes[i]), | |
221 video_frame->stride(kPlanes[i]), | |
222 video_frame->data(kPlanes[i])); | |
223 } | |
224 | 206 |
225 VLOG(1) << "Current video color: yuv(" << current_color_[0] << ", " | 207 // Take the median value of each plane because the test image will contain a |
226 << current_color_[1] << ", " << current_color_[2] << ')'; | 208 // letterboxed content region of mostly a solid color plus a small piece of |
| 209 // "something" that's animating to keep the tab capture pipeline generating |
| 210 // new frames. |
| 211 const gfx::Rect region = FindLetterboxedContentRegion(video_frame); |
| 212 YUVColor current_color; |
| 213 current_color.y = ComputeMedianIntensityInRegionInPlane( |
| 214 region, |
| 215 video_frame->stride(media::VideoFrame::kYPlane), |
| 216 video_frame->data(media::VideoFrame::kYPlane)); |
| 217 current_color.u = ComputeMedianIntensityInRegionInPlane( |
| 218 gfx::ScaleToEnclosedRect(region, 0.5f), |
| 219 video_frame->stride(media::VideoFrame::kUPlane), |
| 220 video_frame->data(media::VideoFrame::kUPlane)); |
| 221 current_color.v = ComputeMedianIntensityInRegionInPlane( |
| 222 gfx::ScaleToEnclosedRect(region, 0.5f), |
| 223 video_frame->stride(media::VideoFrame::kVPlane), |
| 224 video_frame->data(media::VideoFrame::kVPlane)); |
| 225 VLOG(1) << "Current video color: yuv(" << current_color.y << ", " |
| 226 << current_color.u << ", " << current_color.v << ')'; |
227 | 227 |
228 const float kTargetWindow = 10.0f; | 228 const int kTargetWindow = 10; |
229 // Update the time at which all color channels started falling within | 229 for (std::vector<YUVColor>::iterator it = expected_yuv_colors_.begin(); |
230 // kTargetWindow of the target. | 230 it != expected_yuv_colors_.end(); ++it) { |
231 if (fabsf(current_color_[0] - target_color_[0]) < kTargetWindow && | 231 if (abs(current_color.y - it->y) < kTargetWindow && |
232 fabsf(current_color_[1] - target_color_[1]) < kTargetWindow && | 232 abs(current_color.u - it->u) < kTargetWindow && |
233 fabsf(current_color_[2] - target_color_[2]) < kTargetWindow) { | 233 abs(current_color.v - it->v) < kTargetWindow) { |
234 if (first_time_near_target_color_.is_null()) | 234 LOG(INFO) << "Saw color yuv(" << it->y << ", " << it->u << ", " |
235 first_time_near_target_color_ = cast_env()->Clock()->NowTicks(); | 235 << it->v << ")."; |
236 NotifyIfMatched(); | 236 expected_yuv_colors_.erase(it); |
237 } else { | 237 MaybeRunDoneCallback(); |
238 first_time_near_target_color_ = base::TimeTicks(); | 238 break; |
| 239 } |
239 } | 240 } |
240 } | 241 } |
241 | 242 |
242 static void UpdateExponentialMovingAverage(float weight, | 243 // Return the region that excludes the black letterboxing borders surrounding |
243 float sample_value, | 244 // the content within |frame|, if any. |
244 float* average) { | 245 static gfx::Rect FindLetterboxedContentRegion( |
245 *average = weight * sample_value + (1.0f - weight) * (*average); | 246 const media::VideoFrame* frame) { |
246 CHECK(base::IsFinite(*average)); | 247 const int kNonBlackIntensityThreshold = 20; // 16 plus some fuzz. |
| 248 const int width = frame->row_bytes(media::VideoFrame::kYPlane); |
| 249 const int height = frame->rows(media::VideoFrame::kYPlane); |
| 250 const int stride = frame->stride(media::VideoFrame::kYPlane); |
| 251 |
| 252 gfx::Rect result; |
| 253 |
| 254 // Scan from the bottom-right until the first non-black pixel is |
| 255 // encountered. |
| 256 for (int y = height - 1; y >= 0; --y) { |
| 257 const uint8* const start = |
| 258 frame->data(media::VideoFrame::kYPlane) + y * stride; |
| 259 const uint8* const end = start + width; |
| 260 for (const uint8* p = end - 1; p >= start; --p) { |
| 261 if (*p > kNonBlackIntensityThreshold) { |
| 262 result.set_width(p - start + 1); |
| 263 result.set_height(y + 1); |
| 264 y = 0; // Discontinue outer loop. |
| 265 break; |
| 266 } |
| 267 } |
| 268 } |
| 269 |
| 270 // Scan from the upper-left until the first non-black pixel is encountered. |
| 271 for (int y = 0; y < result.height(); ++y) { |
| 272 const uint8* const start = |
| 273 frame->data(media::VideoFrame::kYPlane) + y * stride; |
| 274 const uint8* const end = start + result.width(); |
| 275 for (const uint8* p = start; p < end; ++p) { |
| 276 if (*p > kNonBlackIntensityThreshold) { |
| 277 result.set_x(p - start); |
| 278 result.set_width(result.width() - result.x()); |
| 279 result.set_y(y); |
| 280 result.set_height(result.height() - result.y()); |
| 281 y = result.height(); // Discontinue outer loop. |
| 282 break; |
| 283 } |
| 284 } |
| 285 } |
| 286 |
| 287 return result; |
247 } | 288 } |
248 | 289 |
249 static uint8 ComputeMedianIntensityInPlane(int width, | 290 static uint8 ComputeMedianIntensityInRegionInPlane(const gfx::Rect& region, |
250 int height, | 291 int stride, |
251 int stride, | 292 const uint8* data) { |
252 uint8* data) { | 293 if (region.IsEmpty()) |
253 const int num_pixels = width * height; | |
254 if (num_pixels <= 0) | |
255 return 0; | 294 return 0; |
256 // If necessary, re-pack the pixels such that the stride is equal to the | 295 const size_t num_values = region.size().GetArea(); |
257 // width. | 296 scoped_ptr<uint8[]> values(new uint8[num_values]); |
258 if (width < stride) { | 297 for (int y = 0; y < region.height(); ++y) { |
259 for (int y = 1; y < height; ++y) { | 298 memcpy(values.get() + y * region.width(), |
260 uint8* const src = data + y * stride; | 299 data + (region.y() + y) * stride + region.x(), |
261 uint8* const dest = data + y * width; | 300 region.width()); |
262 memmove(dest, src, width); | |
263 } | |
264 } | 301 } |
265 const size_t middle_idx = num_pixels / 2; | 302 const size_t middle_idx = num_values / 2; |
266 std::nth_element(data, data + middle_idx, data + num_pixels); | 303 std::nth_element(values.get(), |
267 return data[middle_idx]; | 304 values.get() + middle_idx, |
| 305 values.get() + num_values); |
| 306 return values[middle_idx]; |
268 } | 307 } |
269 | 308 |
270 base::TimeDelta match_duration_; | 309 std::vector<int> expected_tones_; |
271 base::Closure matched_callback_; | 310 std::vector<YUVColor> expected_yuv_colors_; |
272 | 311 base::Closure done_callback_; |
273 float target_color_[3]; // Y, U, V | |
274 float target_tone_frequency_; | |
275 | |
276 float current_color_[3]; // Y, U, V | |
277 base::TimeTicks first_time_near_target_color_; | |
278 float current_tone_frequency_; | |
279 base::TimeTicks first_time_near_target_tone_; | |
280 | 312 |
281 DISALLOW_COPY_AND_ASSIGN(TestPatternReceiver); | 313 DISALLOW_COPY_AND_ASSIGN(TestPatternReceiver); |
282 }; | 314 }; |
283 | 315 |
284 } // namespace | 316 } // namespace |
285 | 317 |
286 class CastStreamingApiTestWithPixelOutput : public CastStreamingApiTest { | 318 class CastStreamingApiTestWithPixelOutput : public CastStreamingApiTest { |
287 virtual void SetUp() OVERRIDE { | 319 virtual void SetUp() OVERRIDE { |
288 EnablePixelOutput(); | 320 EnablePixelOutput(); |
289 CastStreamingApiTest::SetUp(); | 321 CastStreamingApiTest::SetUp(); |
290 } | 322 } |
291 | 323 |
292 virtual void SetUpCommandLine(CommandLine* command_line) OVERRIDE { | 324 virtual void SetUpCommandLine(CommandLine* command_line) OVERRIDE { |
293 command_line->AppendSwitchASCII(::switches::kWindowSize, "128,128"); | 325 command_line->AppendSwitchASCII(::switches::kWindowSize, "128,128"); |
294 CastStreamingApiTest::SetUpCommandLine(command_line); | 326 CastStreamingApiTest::SetUpCommandLine(command_line); |
295 } | 327 } |
296 }; | 328 }; |
297 | 329 |
298 // http://crbug.com/396413 | |
299 // Tests the Cast streaming API and its basic functionality end-to-end. An | 330 // Tests the Cast streaming API and its basic functionality end-to-end. An |
300 // extension subtest is run to generate test content, capture that content, and | 331 // extension subtest is run to generate test content, capture that content, and |
301 // use the API to send it out. At the same time, this test launches an | 332 // use the API to send it out. At the same time, this test launches an |
302 // in-process Cast receiver, listening on a localhost UDP socket, to receive the | 333 // in-process Cast receiver, listening on a localhost UDP socket, to receive the |
303 // content and check whether it matches expectations. | 334 // content and check whether it matches expectations. |
304 IN_PROC_BROWSER_TEST_F(CastStreamingApiTestWithPixelOutput, DISABLED_EndToEnd) { | 335 // |
| 336 // TODO(miu): In order to get this test up-and-running again, we will first |
| 337 // confirm it is stable on Release build bots, then later we will enable it for |
| 338 // the Debug build bots. http://crbug.com/396413 |
| 339 // Also, it seems that the test fails to generate any video (audio is fine) on |
| 340 // the ChromeOS bot. Need to root-cause and resolve that issue. |
| 341 #if defined(NDEBUG) && !defined(OS_CHROMEOS) |
| 342 #define MAYBE_EndToEnd EndToEnd |
| 343 #else |
| 344 #define MAYBE_EndToEnd DISABLED_EndToEnd |
| 345 #endif |
| 346 IN_PROC_BROWSER_TEST_F(CastStreamingApiTestWithPixelOutput, MAYBE_EndToEnd) { |
305 scoped_ptr<net::UDPSocket> receive_socket( | 347 scoped_ptr<net::UDPSocket> receive_socket( |
306 new net::UDPSocket(net::DatagramSocket::DEFAULT_BIND, | 348 new net::UDPSocket(net::DatagramSocket::DEFAULT_BIND, |
307 net::RandIntCallback(), | 349 net::RandIntCallback(), |
308 NULL, | 350 NULL, |
309 net::NetLog::Source())); | 351 net::NetLog::Source())); |
310 receive_socket->AllowAddressReuse(); | 352 receive_socket->AllowAddressReuse(); |
311 ASSERT_EQ(net::OK, receive_socket->Bind(GetFreeLocalPort())); | 353 ASSERT_EQ(net::OK, receive_socket->Bind(GetFreeLocalPort())); |
312 net::IPEndPoint receiver_end_point; | 354 net::IPEndPoint receiver_end_point; |
313 ASSERT_EQ(net::OK, receive_socket->GetLocalAddress(&receiver_end_point)); | 355 ASSERT_EQ(net::OK, receive_socket->GetLocalAddress(&receiver_end_point)); |
314 receive_socket.reset(); | 356 receive_socket.reset(); |
315 | 357 |
316 // Start the in-process receiver that examines audio/video for the expected | 358 // Start the in-process receiver that examines audio/video for the expected |
317 // test patterns. | 359 // test patterns. |
318 const scoped_refptr<media::cast::StandaloneCastEnvironment> cast_environment( | 360 const scoped_refptr<media::cast::StandaloneCastEnvironment> cast_environment( |
319 new media::cast::StandaloneCastEnvironment()); | 361 new media::cast::StandaloneCastEnvironment()); |
320 TestPatternReceiver* const receiver = | 362 TestPatternReceiver* const receiver = |
321 new TestPatternReceiver(cast_environment, receiver_end_point); | 363 new TestPatternReceiver(cast_environment, receiver_end_point); |
322 | 364 |
323 // Launch the page that: 1) renders the source content; 2) uses the | 365 // Launch the page that: 1) renders the source content; 2) uses the |
324 // chrome.tabCapture and chrome.cast.streaming APIs to capture its content and | 366 // chrome.tabCapture and chrome.cast.streaming APIs to capture its content and |
325 // stream using Cast; and 3) calls chrome.test.succeed() once it is | 367 // stream using Cast; and 3) calls chrome.test.succeed() once it is |
326 // operational. | 368 // operational. |
327 const std::string page_url = base::StringPrintf( | 369 const std::string page_url = base::StringPrintf( |
328 "end_to_end_sender.html?port=%d", receiver_end_point.port()); | 370 "end_to_end_sender.html?port=%d&aesKey=%s&aesIvMask=%s", |
| 371 receiver_end_point.port(), |
| 372 base::HexEncode(receiver->audio_config().aes_key.data(), |
| 373 receiver->audio_config().aes_key.size()).c_str(), |
| 374 base::HexEncode(receiver->audio_config().aes_iv_mask.data(), |
| 375 receiver->audio_config().aes_iv_mask.size()).c_str()); |
329 ASSERT_TRUE(RunExtensionSubtest("cast_streaming", page_url)) << message_; | 376 ASSERT_TRUE(RunExtensionSubtest("cast_streaming", page_url)) << message_; |
330 | 377 |
331 // Examine the Cast receiver for expected audio/video test patterns. The | 378 // Examine the Cast receiver for expected audio/video test patterns. The |
332 // colors and tones specified here must match those in end_to_end_sender.js. | 379 // colors and tones specified here must match those in end_to_end_sender.js. |
| 380 // Note that we do not check that the color and tone are received |
| 381 // simultaneously since A/V sync should be measured in perf tests. |
| 382 receiver->AddExpectedTone(200 /* Hz */); |
| 383 receiver->AddExpectedTone(500 /* Hz */); |
| 384 receiver->AddExpectedTone(1800 /* Hz */); |
| 385 receiver->AddExpectedColor(YUVColor(82, 90, 240)); // rgb(255, 0, 0) |
| 386 receiver->AddExpectedColor(YUVColor(145, 54, 34)); // rgb(0, 255, 0) |
| 387 receiver->AddExpectedColor(YUVColor(41, 240, 110)); // rgb(0, 0, 255) |
333 receiver->Start(); | 388 receiver->Start(); |
334 const uint8 kRedInYUV[3] = {82, 90, 240}; // rgb(255, 0, 0) | 389 receiver->WaitForExpectedTonesAndColors(); |
335 const uint8 kGreenInYUV[3] = {145, 54, 34}; // rgb(0, 255, 0) | |
336 const uint8 kBlueInYUV[3] = {41, 240, 110}; // rgb(0, 0, 255) | |
337 const base::TimeDelta kOneHalfSecond = base::TimeDelta::FromMilliseconds(500); | |
338 receiver->WaitForColorAndTone(kRedInYUV, 200 /* Hz */, kOneHalfSecond); | |
339 receiver->WaitForColorAndTone(kGreenInYUV, 500 /* Hz */, kOneHalfSecond); | |
340 receiver->WaitForColorAndTone(kBlueInYUV, 1800 /* Hz */, kOneHalfSecond); | |
341 receiver->Stop(); | 390 receiver->Stop(); |
342 | 391 |
343 delete receiver; | 392 delete receiver; |
344 cast_environment->Shutdown(); | 393 cast_environment->Shutdown(); |
345 } | 394 } |
346 | 395 |
347 IN_PROC_BROWSER_TEST_F(CastStreamingApiTestWithPixelOutput, RtpStreamError) { | 396 IN_PROC_BROWSER_TEST_F(CastStreamingApiTestWithPixelOutput, RtpStreamError) { |
348 ASSERT_TRUE(RunExtensionSubtest("cast_streaming", "rtp_stream_error.html")); | 397 ASSERT_TRUE(RunExtensionSubtest("cast_streaming", "rtp_stream_error.html")); |
349 } | 398 } |
350 | 399 |
351 } // namespace extensions | 400 } // namespace extensions |
OLD | NEW |