Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(2245)

Unified Diff: chrome/browser/extensions/api/cast_streaming/performance_test.cc

Issue 2866943002: Give performance_browser_tests lots of love and attention. (Closed)
Patch Set: rebase Created 3 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: chrome/browser/extensions/api/cast_streaming/performance_test.cc
diff --git a/chrome/browser/extensions/api/cast_streaming/performance_test.cc b/chrome/browser/extensions/api/cast_streaming/performance_test.cc
index 6fe680465068065ead4ea098398b7aa9c970ad86..8d9bb2fdca49a22a8990251db694ddea30419fa4 100644
--- a/chrome/browser/extensions/api/cast_streaming/performance_test.cc
+++ b/chrome/browser/extensions/api/cast_streaming/performance_test.cc
@@ -5,36 +5,37 @@
#include <stddef.h>
#include <stdint.h>
+#include <algorithm>
+#include <cmath>
+#include <iterator>
#include <map>
#include <vector>
#include "base/command_line.h"
#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/strings/string_number_conversions.h"
#include "base/strings/stringprintf.h"
#include "base/test/trace_event_analyzer.h"
#include "base/time/default_tick_clock.h"
#include "chrome/browser/extensions/extension_apitest.h"
#include "chrome/browser/extensions/extension_service.h"
#include "chrome/browser/extensions/tab_helper.h"
-#include "chrome/browser/profiles/profile.h"
-#include "chrome/browser/ui/exclusive_access/fullscreen_controller.h"
#include "chrome/common/chrome_switches.h"
#include "chrome/test/base/test_launcher_utils.h"
#include "chrome/test/base/test_switches.h"
#include "chrome/test/base/tracing.h"
-#include "content/public/browser/render_process_host.h"
-#include "content/public/browser/render_view_host.h"
#include "content/public/common/content_switches.h"
#include "extensions/common/switches.h"
#include "extensions/test/extension_test_message_listener.h"
#include "media/base/audio_bus.h"
#include "media/base/video_frame.h"
-#include "media/cast/cast_config.h"
-#include "media/cast/cast_environment.h"
+#include "media/cast/test/skewed_tick_clock.h"
#include "media/cast/test/utility/audio_utility.h"
#include "media/cast/test/utility/barcode.h"
#include "media/cast/test/utility/default_config.h"
#include "media/cast/test/utility/in_process_receiver.h"
+#include "media/cast/test/utility/net_utility.h"
#include "media/cast/test/utility/standalone_cast_environment.h"
#include "media/cast/test/utility/udp_proxy.h"
#include "net/base/ip_address.h"
@@ -54,87 +55,117 @@
namespace {
-const char kExtensionId[] = "ddchlicdkolnonkihahngkmmmjnjlkkf";
+constexpr char kExtensionId[] = "ddchlicdkolnonkihahngkmmmjnjlkkf";
-// Skip a few events from the beginning.
-static const size_t kSkipEvents = 3;
+// Number of events to trim from the begining and end. These events don't
+// contribute anything toward stable measurements: A brief moment of startup
+// "jank" is acceptable, and shutdown may result in missing events (e.g., if
+// streaming stops a few frames before capture stops).
+constexpr size_t kTrimEvents = 24; // 1 sec at 24fps, or 0.4 sec at 60 fps.
+
+// Minimum number of events required for a reasonable analysis.
+constexpr size_t kMinDataPoints = 100; // 1 sec of audio, or ~5 sec at 24fps.
enum TestFlags {
- kUseGpu = 1 << 0, // Only execute test if --enable-gpu was given
- // on the command line. This is required for
- // tests that run on GPU.
- kDisableVsync = 1 << 1, // Do not limit framerate to vertical refresh.
- // when on GPU, nor to 60hz when not on GPU.
- kSmallWindow = 1 << 2, // 1 = 800x600, 0 = 2000x1000
- k24fps = 1 << 3, // use 24 fps video
- k30fps = 1 << 4, // use 30 fps video
- k60fps = 1 << 5, // use 60 fps video
- kProxyWifi = 1 << 6, // Run UDP through UDPProxy wifi profile
- kProxyBad = 1 << 7, // Run UDP through UDPProxy bad profile
- kSlowClock = 1 << 8, // Receiver clock is 10 seconds slow
- kFastClock = 1 << 9, // Receiver clock is 10 seconds fast
+ // TODO(miu): Remove kUseGpu (since the GPU is required), and maybe
+ // kDisableVsync. http://crbug.com/567848
+ kUseGpu = 1 << 0, // Only execute test if --enable-gpu was given
+ // on the command line. This is required for
+ // tests that run on GPU.
+ kDisableVsync = 1 << 1, // Do not limit framerate to vertical refresh.
+ // when on GPU, nor to 60hz when not on GPU.
+ kSmallWindow = 1 << 2, // Window size: 1 = 800x600, 0 = 2000x1000
+ k24fps = 1 << 3, // Use 24 fps video.
+ k30fps = 1 << 4, // Use 30 fps video.
+ k60fps = 1 << 5, // Use 60 fps video (captured at 30 fps).
+ kProxyWifi = 1 << 6, // Run UDP through UDPProxy wifi profile.
+ kProxySlow = 1 << 7, // Run UDP through UDPProxy slow profile.
+ kProxyBad = 1 << 8, // Run UDP through UDPProxy bad profile.
+ kSlowClock = 1 << 9, // Receiver clock is 10 seconds slow.
+ kFastClock = 1 << 10, // Receiver clock is 10 seconds fast.
+ kAutoThrottling = 1 << 11, // Use auto-resolution/framerate throttling.
};
-class SkewedTickClock : public base::DefaultTickClock {
- public:
- explicit SkewedTickClock(const base::TimeDelta& delta) : delta_(delta) {
- }
- base::TimeTicks NowTicks() override {
- return DefaultTickClock::NowTicks() + delta_;
- }
- private:
- base::TimeDelta delta_;
-};
+// These are just for testing! Use cryptographically-secure random keys in
+// production code!
+static constexpr char kAesKey[16] = {0, 1, 2, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15};
+static constexpr char kAesIvMask[16] = {15, 14, 13, 12, 11, 10, 9, 8,
+ 7, 6, 5, 4, 3, 2, 1, 0};
+
+media::cast::FrameReceiverConfig WithAesKeyAndIvSet(
+ const media::cast::FrameReceiverConfig& config) {
+ media::cast::FrameReceiverConfig result = config;
+ result.aes_key = std::string(kAesKey, kAesKey + sizeof(kAesKey));
+ result.aes_iv_mask = std::string(kAesIvMask, kAesIvMask + sizeof(kAesIvMask));
+ return result;
+}
class SkewedCastEnvironment : public media::cast::StandaloneCastEnvironment {
public:
explicit SkewedCastEnvironment(const base::TimeDelta& delta) :
StandaloneCastEnvironment() {
- clock_.reset(new SkewedTickClock(delta));
+ auto skewed_clock =
+ base::MakeUnique<media::cast::test::SkewedTickClock>(&default_clock_);
+ // If testing with a receiver clock that is ahead or behind the sender
hubbe 2017/05/23 21:26:28 I don't think this really works, since PostDelayTa
miu 2017/05/23 22:09:22 It sort of does: Code will use Now() to determine
+ // clock, fake a clock that is offset and also ticks at a rate of 50 parts
+ // per million faster or slower than the local sender's clock. This is the
hubbe 2017/05/23 21:26:28 50 ppm might be the worst we encounter in RL, but
miu 2017/05/23 22:09:22 Guidance from the perf folks is not to test things
hubbe 2017/05/23 22:47:08 Probably better to drop it. I think we have some o
+ // worst-case scenario for skew in-the-wild.
+ if (!delta.is_zero()) {
+ const double skew = delta < base::TimeDelta() ? 0.999950 : 1.000050;
+ skewed_clock->SetSkew(skew, delta);
+ }
+ clock_ = std::move(skewed_clock);
}
protected:
~SkewedCastEnvironment() override {}
+
+ private:
+ base::DefaultTickClock default_clock_;
};
// We log one of these for each call to OnAudioFrame/OnVideoFrame.
struct TimeData {
- TimeData(uint16_t frame_no_, base::TimeTicks render_time_)
- : frame_no(frame_no_), render_time(render_time_) {}
+ TimeData(uint16_t frame_no_, base::TimeTicks playout_time_)
+ : frame_no(frame_no_), playout_time(playout_time_) {}
// The unit here is video frames, for audio data there can be duplicates.
// This was decoded from the actual audio/video data.
uint16_t frame_no;
// This is when we should play this data, according to the sender.
- base::TimeTicks render_time;
+ base::TimeTicks playout_time;
};
// TODO(hubbe): Move to media/cast to use for offline log analysis.
class MeanAndError {
public:
- MeanAndError() {}
explicit MeanAndError(const std::vector<double>& values) {
double sum = 0.0;
double sqr_sum = 0.0;
- num_values = values.size();
- if (num_values) {
- for (size_t i = 0; i < num_values; i++) {
+ num_values_ = values.size();
+ if (num_values_ > 0) {
+ for (size_t i = 0; i < num_values_; i++) {
sum += values[i];
sqr_sum += values[i] * values[i];
}
- mean = sum / num_values;
- std_dev = sqrt(std::max(0.0, num_values * sqr_sum - sum * sum)) /
- num_values;
+ mean_ = sum / num_values_;
+ std_dev_ =
+ sqrt(std::max(0.0, num_values_ * sqr_sum - sum * sum)) / num_values_;
+ } else {
+ mean_ = NAN;
+ std_dev_ = NAN;
}
}
+
std::string AsString() const {
- return base::StringPrintf("%f,%f", mean, std_dev);
+ return base::StringPrintf("%f,%f", mean_, std_dev_);
}
void Print(const std::string& measurement,
const std::string& modifier,
const std::string& trace,
const std::string& unit) {
- if (num_values >= 20) {
+ if (num_values_ >= 20) {
perf_test::PrintResultMeanAndError(measurement,
modifier,
trace,
@@ -142,14 +173,15 @@ class MeanAndError {
unit,
true);
} else {
- LOG(ERROR) << "Not enough events for "
+ LOG(ERROR) << "Not enough events (" << num_values_ << ") for "
<< measurement << modifier << " " << trace;
}
}
- size_t num_values;
- double mean;
- double std_dev;
+ private:
+ size_t num_values_;
+ double mean_;
+ double std_dev_;
};
// This function checks how smooth the data in |data| is.
@@ -158,12 +190,12 @@ class MeanAndError {
// The unit is milliseconds.
static MeanAndError AnalyzeJitter(const std::vector<TimeData>& data) {
CHECK_GT(data.size(), 1UL);
- VLOG(0) << "Jitter analyzis on " << data.size() << " values.";
+ VLOG(0) << "Jitter analysis on " << data.size() << " values.";
std::vector<double> deltas;
double sum = 0.0;
for (size_t i = 1; i < data.size(); i++) {
- double delta = (data[i].render_time -
- data[i - 1].render_time).InMillisecondsF();
+ double delta =
+ (data[i].playout_time - data[i - 1].playout_time).InMillisecondsF();
deltas.push_back(delta);
sum += delta;
}
@@ -182,12 +214,12 @@ class TestPatternReceiver : public media::cast::InProcessReceiver {
explicit TestPatternReceiver(
const scoped_refptr<media::cast::CastEnvironment>& cast_environment,
const net::IPEndPoint& local_end_point)
- : InProcessReceiver(cast_environment,
- local_end_point,
- net::IPEndPoint(),
- media::cast::GetDefaultAudioReceiverConfig(),
- media::cast::GetDefaultVideoReceiverConfig()) {
- }
+ : InProcessReceiver(
+ cast_environment,
+ local_end_point,
+ net::IPEndPoint(),
+ WithAesKeyAndIvSet(media::cast::GetDefaultAudioReceiverConfig()),
+ WithAesKeyAndIvSet(media::cast::GetDefaultVideoReceiverConfig())) {}
typedef std::map<uint16_t, base::TimeTicks> TimeMap;
@@ -197,12 +229,12 @@ class TestPatternReceiver : public media::cast::InProcessReceiver {
// want the minimum rtp timestamp, because that audio frame is supposed
// to play at the same time that the corresponding image is presented.
void MapFrameTimes(const std::vector<TimeData>& events, TimeMap* map) {
- for (size_t i = kSkipEvents; i < events.size(); i++) {
+ for (size_t i = kTrimEvents; i < events.size(); i++) {
xjz 2017/05/23 20:51:01 nit: Do we want to trim the events from the end to
miu 2017/05/23 21:51:05 Done.
base::TimeTicks& frame_tick = (*map)[events[i].frame_no];
if (frame_tick.is_null()) {
- frame_tick = events[i].render_time;
+ frame_tick = events[i].playout_time;
} else {
- frame_tick = std::min(events[i].render_time, frame_tick);
+ frame_tick = std::min(events[i].playout_time, frame_tick);
}
}
}
@@ -214,7 +246,9 @@ class TestPatternReceiver : public media::cast::InProcessReceiver {
// 1/30s of "2", etc.
TimeMap audio_frame_times, video_frame_times;
MapFrameTimes(audio_events_, &audio_frame_times);
+ EXPECT_GE(audio_frame_times.size(), kMinDataPoints);
MapFrameTimes(video_events_, &video_frame_times);
+ EXPECT_GE(video_frame_times.size(), kMinDataPoints);
std::vector<double> deltas;
for (TimeMap::const_iterator i = audio_frame_times.begin();
i != audio_frame_times.end();
@@ -224,6 +258,7 @@ class TestPatternReceiver : public media::cast::InProcessReceiver {
deltas.push_back((i->second - j->second).InMillisecondsF());
}
}
+ EXPECT_GE(deltas.size(), kMinDataPoints);
// Close to zero is better. (can be negative)
MeanAndError(deltas).Print(name, modifier, "av_sync", "ms");
@@ -231,6 +266,37 @@ class TestPatternReceiver : public media::cast::InProcessReceiver {
AnalyzeJitter(audio_events_).Print(name, modifier, "audio_jitter", "ms");
// lower is better.
AnalyzeJitter(video_events_).Print(name, modifier, "video_jitter", "ms");
+
+ // Mean resolution of video at receiver. Lower stddev is better, while the
+ // mean should be something reasonable given the network constraints
+ // (usually 480 lines or more). Note that this is the video resolution at
+ // the receiver, but changes originate on the sender side.
+ std::vector<double> slice_for_analysis;
+ if (video_frame_lines_.size() > kTrimEvents * 2) {
+ slice_for_analysis.reserve(video_frame_lines_.size() - kTrimEvents * 2);
+ EXPECT_GE(slice_for_analysis.capacity(), kMinDataPoints);
+ std::transform(video_frame_lines_.begin() + kTrimEvents,
+ video_frame_lines_.end() - kTrimEvents,
+ std::back_inserter(slice_for_analysis),
+ [](int lines) { return static_cast<double>(lines); });
+ }
+ MeanAndError(slice_for_analysis)
+ .Print(name, modifier, "playout_resolution", "lines");
+
+ // Number of resolution changes. Lower is better (and 1 is ideal). Zero
hubbe 2017/05/23 21:26:28 It's probably better to print out nothing if there
miu 2017/05/23 22:09:22 The EXPECT_GT() a few lines down should fail in th
hubbe 2017/05/23 22:47:08 EXPECT_GT should be fine.
+ // indicates a lack of data.
+ int last_lines = -1;
+ int change_count = 0;
+ for (size_t i = kTrimEvents; i < video_frame_lines_.size() - kTrimEvents;
+ ++i) {
+ if (video_frame_lines_[i] != last_lines) {
+ ++change_count;
+ last_lines = video_frame_lines_[i];
+ }
+ }
+ EXPECT_GT(change_count, 0);
+ perf_test::PrintResult(name, modifier, "resolution_changes",
+ base::IntToString(change_count), "count", true);
}
private:
@@ -252,31 +318,35 @@ class TestPatternReceiver : public media::cast::InProcessReceiver {
&frame_no)) {
audio_events_.push_back(TimeData(frame_no, playout_time));
} else {
- VLOG(0) << "Failed to decode audio timestamp!";
+ DVLOG(2) << "Failed to decode audio timestamp!";
}
}
void OnVideoFrame(const scoped_refptr<media::VideoFrame>& video_frame,
- const base::TimeTicks& render_time,
+ const base::TimeTicks& playout_time,
bool is_continuous) override {
CHECK(cast_env()->CurrentlyOn(media::cast::CastEnvironment::MAIN));
- TRACE_EVENT_INSTANT1(
- "mirroring", "TestPatternReceiver::OnVideoFrame",
- TRACE_EVENT_SCOPE_THREAD,
- "render_time", render_time.ToInternalValue());
+ TRACE_EVENT_INSTANT1("cast_perf_test", "VideoFramePlayout",
+ TRACE_EVENT_SCOPE_THREAD, "playout_time",
+ (playout_time - base::TimeTicks()).InMicroseconds());
uint16_t frame_no;
if (media::cast::test::DecodeBarcode(video_frame, &frame_no)) {
- video_events_.push_back(TimeData(frame_no, render_time));
+ video_events_.push_back(TimeData(frame_no, playout_time));
} else {
- VLOG(0) << "Failed to decode barcode!";
+ DVLOG(2) << "Failed to decode barcode!";
}
+
+ video_frame_lines_.push_back(video_frame->visible_rect().height());
}
std::vector<TimeData> audio_events_;
std::vector<TimeData> video_events_;
+ // The height (number of lines) of each video frame received.
+ std::vector<int> video_frame_lines_;
+
DISALLOW_COPY_AND_ASSIGN(TestPatternReceiver);
};
@@ -310,12 +380,16 @@ class CastV2PerformanceTest
suffix += "_60fps";
if (HasFlag(kProxyWifi))
suffix += "_wifi";
+ if (HasFlag(kProxySlow))
+ suffix += "_slowwifi";
if (HasFlag(kProxyBad))
suffix += "_bad";
if (HasFlag(kSlowClock))
suffix += "_slow";
if (HasFlag(kFastClock))
suffix += "_fast";
+ if (HasFlag(kAutoThrottling))
+ suffix += "_autothrottling";
return suffix;
}
@@ -330,20 +404,6 @@ class CastV2PerformanceTest
return 0;
}
- net::IPEndPoint GetFreeLocalPort() {
- // Determine a unused UDP port for the in-process receiver to listen on.
- // Method: Bind a UDP socket on port 0, and then check which port the
- // operating system assigned to it.
- std::unique_ptr<net::UDPServerSocket> receive_socket(
- new net::UDPServerSocket(NULL, net::NetLogSource()));
- receive_socket->AllowAddressReuse();
- CHECK_EQ(net::OK, receive_socket->Listen(
- net::IPEndPoint(net::IPAddress::IPv4Localhost(), 0)));
- net::IPEndPoint endpoint;
- CHECK_EQ(net::OK, receive_socket->GetLocalAddress(&endpoint));
- return endpoint;
- }
-
void SetUp() override {
EnablePixelOutput();
ExtensionApiTest::SetUp();
@@ -385,6 +445,7 @@ class CastV2PerformanceTest
trace_analyzer::Query::EventPhaseIs(TRACE_EVENT_PHASE_INSTANT) ||
trace_analyzer::Query::EventPhaseIs(TRACE_EVENT_PHASE_COMPLETE));
analyzer->FindEvents(query, events);
+ VLOG(0) << "Retrieved " << events->size() << " events for: " << event_name;
}
// The key contains the name of the argument and the argument.
@@ -410,45 +471,26 @@ class CastV2PerformanceTest
}
// Look up an event in |event_map|. The return event will have the same
- // value for the argument |key_name| as |prev_event|. Note that if
- // the |key_name| is "time_delta", then we allow some fuzzy logic since
- // the time deltas are truncated to milliseconds in the code.
+ // value for the argument |key_name| as |prev_event|.
const trace_analyzer::TraceEvent* FindNextEvent(
const EventMap& event_map,
- std::vector<const trace_analyzer::TraceEvent*> prev_events,
+ const trace_analyzer::TraceEvent* prev_event,
std::string key_name) {
- EventMapKey key;
- for (size_t i = prev_events.size(); i;) {
- --i;
- std::map<std::string, double>::const_iterator j =
- prev_events[i]->arg_numbers.find(key_name);
- if (j != prev_events[i]->arg_numbers.end()) {
- key = *j;
- break;
- }
- }
- EventMap::const_iterator i = event_map.lower_bound(key);
- if (i == event_map.end())
- return NULL;
- if (i->first.second == key.second)
- return i->second;
- if (key_name != "time_delta")
- return NULL;
- if (fabs(i->first.second - key.second) < 1000)
- return i->second;
- if (i == event_map.begin())
- return NULL;
- i--;
- if (fabs(i->first.second - key.second) < 1000)
- return i->second;
- return NULL;
+ const auto arg_it = prev_event->arg_numbers.find(key_name);
+ if (arg_it == prev_event->arg_numbers.end())
+ return nullptr;
+ const EventMapKey& key = *arg_it;
+ const auto event_it = event_map.find(key);
+ if (event_it == event_map.end())
+ return nullptr;
+ return event_it->second;
}
// Given a vector of vector of data, extract the difference between
// two columns (|col_a| and |col_b|) and output the result as a
// performance metric.
void OutputMeasurement(const std::string& test_name,
- const std::vector<std::vector<double> > data,
+ const std::vector<std::vector<double>>& data,
const std::string& measurement_name,
int col_a,
int col_b) {
@@ -462,76 +504,92 @@ class CastV2PerformanceTest
"ms");
}
- // Analyzing latency is hard, because there is no unifying identifier for
- // frames throughout the code. At first, we have a capture timestamp, which
- // gets converted to a time delta, then back to a timestamp. Once it enters
- // the cast library it gets converted to an rtp_timestamp, and when it leaves
- // the cast library, all we have is the render_time.
- //
- // To be able to follow the frame throughout all this, we insert TRACE
- // calls that tracks each conversion as it happens. Then we extract all
- // these events and link them together.
+ // Analyze the latency of each frame as it goes from capture to playout. The
+ // event tracing system is used to track the frames.
void AnalyzeLatency(const std::string& test_name,
trace_analyzer::TraceAnalyzer* analyzer) {
+ // Retrieve and index all "checkpoint" events related to frames progressing
+ // from start to finish.
+ trace_analyzer::TraceEventVector capture_events;
+ // Sender side:
+ GetTraceEvents(analyzer, "Capture", &capture_events);
EventMap onbuffer, sink, inserted, encoded, transmitted, decoded, done;
IndexEvents(analyzer, "OnBufferReceived", &onbuffer);
- IndexEvents(analyzer, "MediaStreamVideoSink::OnVideoFrame", &sink);
+ IndexEvents(analyzer, "ConsumeVideoFrame", &sink);
IndexEvents(analyzer, "InsertRawVideoFrame", &inserted);
IndexEvents(analyzer, "VideoFrameEncoded", &encoded);
+ // Receiver side:
IndexEvents(analyzer, "PullEncodedVideoFrame", &transmitted);
- IndexEvents(analyzer, "FrameDecoded", &decoded);
- IndexEvents(analyzer, "TestPatternReceiver::OnVideoFrame", &done);
- std::vector<std::pair<EventMap*, std::string> > event_maps;
+ IndexEvents(analyzer, "VideoFrameDecoded", &decoded);
+ IndexEvents(analyzer, "VideoFramePlayout", &done);
+
+ // Analyzing latency is non-trivial, because only the frame timestamps
+ // uniquely identify frames AND the timestamps take varying forms throughout
+ // the pipeline (TimeTicks, TimeDelta, RtpTimestamp, etc.). Luckily, each
+ // neighboring stage in the pipeline knows about the timestamp from the
+ // prior stage, in whatever form it had, and so it's possible to track
+ // specific frames all the way from capture until playout at the receiver.
+ std::vector<std::pair<EventMap*, std::string>> event_maps;
event_maps.push_back(std::make_pair(&onbuffer, "timestamp"));
event_maps.push_back(std::make_pair(&sink, "time_delta"));
event_maps.push_back(std::make_pair(&inserted, "timestamp"));
event_maps.push_back(std::make_pair(&encoded, "rtp_timestamp"));
event_maps.push_back(std::make_pair(&transmitted, "rtp_timestamp"));
event_maps.push_back(std::make_pair(&decoded, "rtp_timestamp"));
- event_maps.push_back(std::make_pair(&done, "render_time"));
-
- trace_analyzer::TraceEventVector capture_events;
- GetTraceEvents(analyzer, "Capture" , &capture_events);
- EXPECT_GT(capture_events.size(), 0UL);
- std::vector<std::vector<double> > traced_frames;
- for (size_t i = kSkipEvents; i < capture_events.size(); i++) {
+ event_maps.push_back(std::make_pair(&done, "playout_time"));
+
+ // For each "begin capture" event, search for all the events following it,
+ // producing a matrix of when each frame reached each pipeline checkpoint.
+ // See the "cheat sheet" below for a description of each pipeline
+ // checkpoint.
+ ASSERT_GT(capture_events.size(), 2 * kTrimEvents);
+ std::vector<std::vector<double>> traced_frames;
+ for (size_t i = kTrimEvents; i < capture_events.size() - kTrimEvents; i++) {
std::vector<double> times;
- const trace_analyzer::TraceEvent *event = capture_events[i];
+ const trace_analyzer::TraceEvent* event = capture_events[i];
+ if (!event->other_event)
+ continue; // Begin capture event without a corresponding end event.
times.push_back(event->timestamp); // begin capture
event = event->other_event;
- if (!event) {
- continue;
- }
- times.push_back(event->timestamp); // end capture (with timestamp)
- std::vector<const trace_analyzer::TraceEvent*> prev_events;
- prev_events.push_back(event);
+ times.push_back(event->timestamp); // end capture
+ const trace_analyzer::TraceEvent* prev_event = event;
for (size_t j = 0; j < event_maps.size(); j++) {
- event = FindNextEvent(*event_maps[j].first,
- prev_events,
+ event = FindNextEvent(*event_maps[j].first, prev_event,
event_maps[j].second);
- if (!event) {
- break;
- }
- prev_events.push_back(event);
+ if (!event)
+ break; // Missing an event: The frame was dropped along the way.
+ prev_event = event;
times.push_back(event->timestamp);
}
if (event) {
- // Successfully traced frame from beginning to end
- traced_frames.push_back(times);
+ // Successfully traced frame from beginning to end.
+ traced_frames.push_back(std::move(times));
}
}
- // 0 = capture begin
- // 1 = capture end
- // 2 = onbuffer
- // 3 = sink
- // 4 = inserted
- // 5 = encoded
- // 6 = transmitted
- // 7 = decoded
- // 8 = done
-
- // Lower is better for all of these.
+ // Report the fraction of captured frames that were dropped somewhere along
+ // the way (i.e., before playout at the receiver).
+ const size_t capture_event_count = capture_events.size() - 2 * kTrimEvents;
+ EXPECT_GE(capture_event_count, kMinDataPoints);
+ const double success_percent =
+ 100.0 * traced_frames.size() / capture_event_count;
+ perf_test::PrintResult(
+ test_name, GetSuffixForTestFlags(), "frame_drop_rate",
+ base::StringPrintf("%f", 100 - success_percent), "percent", true);
+
+ // Report the latency between various pairs of checkpoints in the pipeline.
+ // Lower latency is better for all of these measurements.
+ //
+ // Cheat sheet:
+ // 0 = Sender: capture begin
+ // 1 = Sender: capture end
+ // 2 = Sender: memory buffer reached the render process
+ // 3 = Sender: frame routed to Cast RTP consumer
+ // 4 = Sender: frame reached VideoSender::InsertRawVideoFrame()
+ // 5 = Sender: frame encoding complete, queueing for transmission
+ // 6 = Receiver: frame fully received from network
+ // 7 = Receiver: frame decoded
+ // 8 = Receiver: frame played out
OutputMeasurement(test_name, traced_frames, "total_latency", 0, 8);
OutputMeasurement(test_name, traced_frames, "capture_duration", 0, 1);
OutputMeasurement(test_name, traced_frames, "send_to_renderer", 1, 3);
@@ -547,7 +605,7 @@ class CastV2PerformanceTest
GetTraceEvents(analyzer, event_name, &events);
std::vector<double> deltas;
- for (size_t i = kSkipEvents + 1; i < events.size(); ++i) {
+ for (size_t i = kTrimEvents + 1; i < events.size() - kTrimEvents; ++i) {
double delta_micros = events[i]->timestamp - events[i - 1]->timestamp;
deltas.push_back(delta_micros / 1000.0);
}
@@ -567,7 +625,7 @@ class CastV2PerformanceTest
(HasFlag(k30fps) ? 1 : 0) +
(HasFlag(k60fps) ? 1 : 0));
- net::IPEndPoint receiver_end_point = GetFreeLocalPort();
+ net::IPEndPoint receiver_end_point = media::cast::test::GetFreeLocalPort();
// Start the in-process receiver that examines audio/video for the expected
// test patterns.
@@ -585,29 +643,35 @@ class CastV2PerformanceTest
receiver->Start();
std::unique_ptr<media::cast::test::UDPProxy> udp_proxy;
- if (HasFlag(kProxyWifi) || HasFlag(kProxyBad)) {
- net::IPEndPoint proxy_end_point = GetFreeLocalPort();
+ if (HasFlag(kProxyWifi) || HasFlag(kProxySlow) || HasFlag(kProxyBad)) {
+ net::IPEndPoint proxy_end_point = media::cast::test::GetFreeLocalPort();
if (HasFlag(kProxyWifi)) {
udp_proxy = media::cast::test::UDPProxy::Create(
proxy_end_point, receiver_end_point,
media::cast::test::WifiNetwork(), media::cast::test::WifiNetwork(),
- NULL);
+ nullptr);
+ } else if (HasFlag(kProxySlow)) {
+ udp_proxy = media::cast::test::UDPProxy::Create(
+ proxy_end_point, receiver_end_point,
+ media::cast::test::SlowNetwork(), media::cast::test::SlowNetwork(),
+ nullptr);
} else if (HasFlag(kProxyBad)) {
udp_proxy = media::cast::test::UDPProxy::Create(
proxy_end_point, receiver_end_point,
media::cast::test::BadNetwork(), media::cast::test::BadNetwork(),
- NULL);
+ nullptr);
}
receiver_end_point = proxy_end_point;
}
std::string json_events;
- ASSERT_TRUE(tracing::BeginTracing(
- "test_fps,mirroring,gpu.capture,cast_perf_test"));
+ ASSERT_TRUE(tracing::BeginTracing("gpu.capture,cast_perf_test"));
const std::string page_url = base::StringPrintf(
- "performance%d.html?port=%d",
- getfps(),
- receiver_end_point.port());
+ "performance%d.html?port=%d&autoThrottling=%s&aesKey=%s&aesIvMask=%s",
+ getfps(), receiver_end_point.port(),
+ HasFlag(kAutoThrottling) ? "true" : "false",
+ base::HexEncode(kAesKey, sizeof(kAesKey)).c_str(),
+ base::HexEncode(kAesIvMask, sizeof(kAesIvMask)).c_str());
ASSERT_TRUE(RunExtensionSubtest("cast_streaming", page_url)) << message_;
ASSERT_TRUE(tracing::EndTracing(&json_events));
receiver->Stop();
@@ -619,21 +683,11 @@ class CastV2PerformanceTest
analyzer.reset(trace_analyzer::TraceAnalyzer::Create(json_events));
analyzer->AssociateAsyncBeginEndEvents();
- MeanAndError frame_data = AnalyzeTraceDistance(
- analyzer.get(),
- "OnSwapCompositorFrame");
- if (frame_data.num_values > 0) {
- // Lower is better.
- frame_data.Print(test_name, GetSuffixForTestFlags(),
- "time_between_frames", "ms");
- } else {
- // TODO(miu): Fix is currently WIP. http://crbug.com/709247
- LOG(WARNING) << "No frame_data values, so no time_between_frames result.";
- }
-
// This prints out the average time between capture events.
- // As the capture frame rate is capped at 30fps, this score
- // cannot get any better than (lower) 33.33 ms.
+ // Depending on the test, the capture frame rate is capped (e.g., at 30fps,
+ // this score cannot get any better than 33.33 ms). However, the measurement
+ // is important since it provides a valuable check that capture can keep up
+ // with the content's framerate.
MeanAndError capture_data = AnalyzeTraceDistance(analyzer.get(), "Capture");
// Lower is better.
capture_data.Print(test_name,
@@ -658,12 +712,14 @@ IN_PROC_BROWSER_TEST_P(CastV2PerformanceTest, Performance) {
INSTANTIATE_TEST_CASE_P(
,
CastV2PerformanceTest,
- testing::Values(
- kUseGpu | k24fps,
- kUseGpu | k30fps,
- kUseGpu | k60fps,
- kUseGpu | k24fps | kDisableVsync,
- kUseGpu | k30fps | kProxyWifi,
- kUseGpu | k30fps | kProxyBad,
- kUseGpu | k30fps | kSlowClock,
- kUseGpu | k30fps | kFastClock));
+ testing::Values(kUseGpu | k24fps,
+ kUseGpu | k30fps,
+ kUseGpu | k60fps,
+ kUseGpu | k24fps | kDisableVsync,
+ kUseGpu | k30fps | kProxyWifi,
+ kUseGpu | k30fps | kProxyBad,
+ kUseGpu | k30fps | kSlowClock,
+ kUseGpu | k30fps | kFastClock,
+ kUseGpu | k30fps | kProxyWifi | kAutoThrottling,
+ kUseGpu | k30fps | kProxySlow | kAutoThrottling,
+ kUseGpu | k30fps | kProxyBad | kAutoThrottling));

Powered by Google App Engine
This is Rietveld 408576698