Chromium Code Reviews| Index: content/renderer/media/webrtc/stun_field_trial.cc |
| diff --git a/content/renderer/media/webrtc/stun_field_trial.cc b/content/renderer/media/webrtc/stun_field_trial.cc |
| index 41be3588a749b09ce5071862cd892effe1095dca..8fcc352ff34470467575391343b904cc6b7ec08f 100644 |
| --- a/content/renderer/media/webrtc/stun_field_trial.cc |
| +++ b/content/renderer/media/webrtc/stun_field_trial.cc |
| @@ -6,8 +6,10 @@ |
| #include <math.h> |
| +#include "base/bind.h" |
| #include "base/logging.h" |
| #include "base/macros.h" |
| +#include "base/message_loop/message_loop.h" |
| #include "base/metrics/histogram.h" |
| #include "base/rand_util.h" |
| #include "base/strings/string_number_conversions.h" |
| @@ -21,7 +23,6 @@ |
| #include "third_party/webrtc/base/socketaddress.h" |
| #include "third_party/webrtc/base/thread.h" |
| #include "third_party/webrtc/p2p/base/packetsocketfactory.h" |
| -#include "third_party/webrtc/p2p/stunprober/stunprober.h" |
| using stunprober::StunProber; |
| @@ -29,6 +30,14 @@ namespace content { |
| namespace { |
| +// Global states to manage the trial. This trial is only run on the first |
| +// renderer so this is ok. |
| +int total_probers = 0; |
| +int reporting_batch_size = 0; |
| +int ready_probers = 0; |
| +int finished_probers = 0; |
| +StunProberWithWeakPtr* prober_head = nullptr; |
|
pthatcher2
2015/10/22 05:27:51
Instead of having global state, can we instead hav
|
| + |
| // This needs to be the same as NatTypeCounters in histograms.xml. |
| enum NatType { |
| NAT_TYPE_NONE, |
| @@ -65,73 +74,134 @@ int ClampProbingInterval(int interval_ms) { |
| std::string HistogramName(const std::string& prefix, |
| NatType nat_type, |
| - int interval_ms) { |
| - return base::StringPrintf("WebRTC.Stun.%s.%s.%dms", prefix.c_str(), |
| - NatTypeNames[nat_type], interval_ms); |
| + int interval_ms, |
| + int current_batch, |
|
pthatcher2
2015/10/22 05:27:51
Should this be batch_index?
guoweis_left_chromium
2015/10/27 17:19:06
Done.
|
| + int total_batch) { |
|
pthatcher2
2015/10/22 05:27:51
And this batch_count or total_batches?
guoweis_left_chromium
2015/10/27 17:19:05
Done.
|
| + return base::StringPrintf("WebRTC.Stun.%s.%s.%dms.%d.%d", prefix.c_str(), |
| + NatTypeNames[nat_type], interval_ms, current_batch, |
| + total_batch); |
| } |
| -void SaveHistogramData(StunProber* prober) { |
| - StunProber::Stats stats; |
| - if (!prober->GetStats(&stats)) |
| - return; |
| - |
| - NatType nat_type = GetNatType(stats.nat_type); |
| - |
| - // Use the real probe interval for reporting, converting from nanosecond to |
| - // millisecond at 5ms boundary. |
| - int interval_ms = |
| - round(static_cast<float>(stats.actual_request_interval_ns) / 5000) * 5; |
| - |
| - interval_ms = ClampProbingInterval(interval_ms); |
| - |
| - UMA_HISTOGRAM_ENUMERATION("WebRTC.NAT.Metrics", nat_type, NAT_TYPE_MAX); |
| - |
| - std::string histogram_name = |
| - HistogramName("SuccessPercent", nat_type, interval_ms); |
| +void SaveHistogramData(StunProberWithWeakPtr* prober) { |
| + NatType nat_type = NAT_TYPE_MAX; |
| + int interval_ms = 0; |
| + int count = 0; |
| + int total_sent = 0; |
| + int total_recv = 0; |
| + while (prober != nullptr) { |
|
pthatcher2
2015/10/22 05:27:52
Wouldn't this be more clear as
for (; prober != n
guoweis_left_chromium
2015/10/27 17:19:06
Done.
|
| + ++count; |
| + |
| + // Get the stats. |
| + StunProber::Stats stats; |
| + if (!prober->GetStats(&stats)) |
| + return; |
| + |
| + // Check if the NAT type is consistent. |
| + if (nat_type == NAT_TYPE_MAX) { |
| + nat_type = GetNatType(stats.nat_type); |
| + // If we can't figure out the nattype at the beginning, just return. |
| + if (nat_type == NAT_TYPE_UNKNOWN) |
| + return; |
| + } |
| + // For subsequent probers, we might get unknown as nattype if all the |
| + // bindings fail, but it's ok. |
| + else if (nat_type != GetNatType(stats.nat_type) && |
| + nat_type != NAT_TYPE_UNKNOWN) |
| + return; |
| + |
| + // Check the interval is consistent. |
| + // Use the real probe interval for reporting, converting from nanosecond to |
| + // millisecond at 5ms boundary. |
| + int new_interval_ms = ClampProbingInterval( |
| + round(static_cast<float>(stats.actual_request_interval_ns) / 5000) * 5); |
| + if (interval_ms == 0) { |
| + interval_ms = new_interval_ms; |
| + } else if (interval_ms != new_interval_ms) |
| + return; |
| + |
| + // Sum up the total sent and recv packets. |
| + total_sent += stats.num_request_sent; |
| + total_recv += stats.num_response_received; |
| + |
| + // At the batch boundary, reporting it. |
| + if (count % reporting_batch_size == 0) { |
| + if (total_sent != 0) { |
| + int success_rate = total_recv * 100 / total_sent; |
| + std::string histogram_name = HistogramName( |
| + "SuccessRate", nat_type, interval_ms, count / reporting_batch_size, |
| + total_probers / reporting_batch_size); |
| + |
| + // Mimic the same behavior as UMA_HISTOGRAM_PERCENTAGE. We can't use |
| + // that macro as the histogram name is determined dynamically. |
| + base::HistogramBase* histogram = base::Histogram::FactoryGet( |
| + histogram_name, 1, 101, 102, |
| + base::Histogram::kUmaTargetedHistogramFlag); |
| + histogram->Add(success_rate); |
| + |
| + DVLOG(1) << "Histogram '" << histogram_name.c_str() |
| + << "' = " << stats.success_percent; |
| + |
| + DVLOG(1) << "Shared Socket Mode: " << stats.shared_socket_mode; |
| + DVLOG(1) << "Requests sent: " << total_sent; |
| + DVLOG(1) << "Responses received: " << total_recv; |
| + DVLOG(1) << "Target interval (ns): " |
| + << stats.target_request_interval_ns; |
| + DVLOG(1) << "Actual interval (ns): " |
| + << stats.actual_request_interval_ns; |
| + DVLOG(1) << "NAT Type: " << NatTypeNames[nat_type]; |
| + DVLOG(1) << "Host IP: " << stats.host_ip; |
| + } |
| + total_sent = 0; |
| + total_recv = 0; |
| + } |
| + prober = prober->GetNextProber(); |
| + } |
| +} |
| - // Mimic the same behavior as UMA_HISTOGRAM_PERCENTAGE. We can't use that |
| - // macro as the histogram name is determined dynamically. |
| - base::HistogramBase* histogram = base::Histogram::FactoryGet( |
| - histogram_name, 1, 101, 102, base::Histogram::kUmaTargetedHistogramFlag); |
| - histogram->Add(stats.success_percent); |
| +void OnStunProbeTrialFinished(StunProber* prober, int result) { |
| + if (result == StunProber::SUCCESS) |
| + ++finished_probers; |
| - DVLOG(1) << "Histogram '" << histogram_name.c_str() |
| - << "' = " << stats.success_percent; |
| + if (finished_probers == total_probers) |
| + SaveHistogramData(prober_head); |
|
pthatcher2
2015/10/22 05:27:51
If any of them fail, we don't record anything? Wh
guoweis_left_chromium
2015/10/27 17:19:06
I think the chance for this to happen is low. If i
|
| +} |
| - histogram_name = HistogramName("ResponseLatency", nat_type, interval_ms); |
| +void OnStunProberPrepared(StunProber* prober, int result) { |
| + if (result == StunProber::SUCCESS) { |
| + ++ready_probers; |
| + } |
|
pthatcher2
2015/10/22 05:27:51
If on prober fails to prepare, then none of them g
guoweis_left_chromium
2015/10/27 17:19:06
yes, I think that should be fine. If we can't DNS
|
| + if (ready_probers == total_probers) { |
| + DCHECK(prober_head); |
| + prober_head->Start(stunprober::AsyncCallback(&OnStunProbeTrialFinished)); |
|
pthatcher2
2015/10/22 05:27:51
Shouldn't the callbeack be called OnStunProberFini
guoweis_left_chromium
2015/10/27 17:19:05
Done.
|
| + } |
| +} |
| - histogram = base::Histogram::FactoryTimeGet( |
| - histogram_name, base::TimeDelta::FromMilliseconds(1), |
| - base::TimeDelta::FromSeconds(10), 50, |
| - base::Histogram::kUmaTargetedHistogramFlag); |
| - histogram->AddTime(base::TimeDelta::FromMilliseconds(stats.average_rtt_ms)); |
| +} // namespace |
| - DVLOG(1) << "Histogram '" << histogram_name.c_str() |
| - << "' = " << stats.average_rtt_ms << " ms"; |
| +StunProberWithWeakPtr::StunProberWithWeakPtr(StunProber* prober) |
| + : prober_(prober), weak_factory_(this) {} |
| - DVLOG(1) << "Shared Socket Mode: " << stats.shared_socket_mode; |
| - DVLOG(1) << "Requests sent: " << stats.num_request_sent; |
| - DVLOG(1) << "Responses received: " << stats.num_response_received; |
| - DVLOG(1) << "Target interval (ns): " << stats.target_request_interval_ns; |
| - DVLOG(1) << "Actual interval (ns): " << stats.actual_request_interval_ns; |
| - DVLOG(1) << "NAT Type: " << NatTypeNames[nat_type]; |
| - DVLOG(1) << "Host IP: " << stats.host_ip; |
| - DVLOG(1) << "Server-reflexive ips: "; |
| - for (const auto& ip : stats.srflx_addrs) |
| - DVLOG(1) << "\t" << ip; |
| +void StunProberWithWeakPtr::set_next_prober( |
| + StunProberWithWeakPtr* next_prober) { |
| + next_prober_ = next_prober; |
| } |
| -void OnStunProbeTrialFinished(StunProber* prober, int result) { |
| - if (result == StunProber::SUCCESS) |
| - SaveHistogramData(prober); |
| +void StunProberWithWeakPtr::Start(stunprober::AsyncCallback callback) { |
| + base::MessageLoop::current()->PostDelayedTask( |
| + FROM_HERE, base::Bind(&StunProberWithWeakPtr::Start, |
| + next_prober_->GetWeakPtr(), callback), |
| + base::TimeDelta::FromMilliseconds(prober_->estimated_execution_time())); |
| + prober_->Run(callback); |
| } |
| -} // namespace |
| +StunProberWithWeakPtr::~StunProberWithWeakPtr() {} |
| bool ParseStunProbeParameters(const std::string& params, |
| int* requests_per_ip, |
| int* interval_ms, |
| int* shared_socket_mode, |
| + int* reporting_batch_size, |
| + int* rounds, |
|
pthatcher2
2015/10/22 05:27:51
Do these two variables never get parsed?
|
| std::vector<rtc::SocketAddress>* servers) { |
| std::vector<std::string> stun_params = base::SplitString( |
| params, "/", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL); |
| @@ -181,42 +251,60 @@ bool ParseStunProbeParameters(const std::string& params, |
| return !servers->empty(); |
| } |
| -scoped_ptr<stunprober::StunProber> StartStunProbeTrial( |
| - const rtc::NetworkManager::NetworkList& networks, |
| - const std::string& params, |
| - rtc::PacketSocketFactory* factory) { |
| +void StartStunProbeTrial(const rtc::NetworkManager::NetworkList& networks, |
| + const std::string& params, |
| + rtc::PacketSocketFactory* factory, |
| + ListOfStunProbers* probers) { |
| DVLOG(1) << "Starting stun trial with params: " << params; |
| // If we don't have local addresses, we won't be able to determine whether |
| // we're behind NAT or not. |
| if (networks.empty()) { |
| DLOG(ERROR) << "No networks specified in StartStunProbeTrial"; |
| - return nullptr; |
| + return; |
| } |
| int requests_per_ip; |
| - int interval_ms; |
| int shared_socket_mode; |
| + int interval_ms; |
| std::vector<rtc::SocketAddress> servers; |
| if (!ParseStunProbeParameters(params, &requests_per_ip, &interval_ms, |
| - &shared_socket_mode, &servers)) { |
| - return nullptr; |
| + &shared_socket_mode, &reporting_batch_size, |
| + &total_probers, &servers)) { |
| + return; |
| } |
| - scoped_ptr<StunProber> prober( |
| - new StunProber(factory, rtc::Thread::Current(), networks)); |
| - |
| - if (!prober->Start( |
| - servers, (shared_socket_mode != 0), interval_ms, requests_per_ip, |
| - 1000, |
| - rtc::Callback2<void, StunProber*, int>(&OnStunProbeTrialFinished))) { |
| - DLOG(ERROR) << "Failed to Start in StartStunProbeTrial"; |
| - OnStunProbeTrialFinished(prober.get(), StunProber::GENERIC_FAILURE); |
| - return nullptr; |
| + int rounds = total_probers; |
| + |
| + StunProberWithWeakPtr* prev_prober = nullptr; |
| + |
| + while (rounds-- > 0) { |
|
pthatcher2
2015/10/22 05:27:51
Wouldn't this be more clear as:
for (int i = 0; i
guoweis_left_chromium
2015/10/27 17:19:06
Done.
|
| + stunprober::StunProber* prober = |
| + new StunProber(factory, rtc::Thread::Current(), networks); |
| + scoped_ptr<StunProberWithWeakPtr> prober_wp( |
| + new StunProberWithWeakPtr(prober)); |
| + if (!prober->Prepare(servers, (shared_socket_mode != 0), interval_ms, |
| + requests_per_ip, 1000, |
| + stunprober::AsyncCallback(&OnStunProberPrepared))) { |
| + DLOG(ERROR) << "Failed to Prepare in StartStunProbeTrial"; |
| + for (auto prober : *probers) { |
| + if (prober) { |
| + delete prober; |
| + } |
| + } |
|
pthatcher2
2015/10/22 05:27:51
I've seen this a few times. Should we have a "Del
|
| + probers->clear(); |
| + return; |
| + } else { |
|
pthatcher2
2015/10/22 05:27:52
You don't need an else. Since it's an early retur
guoweis_left_chromium
2015/10/27 17:19:05
Done.
|
| + if (prev_prober) { |
| + prev_prober->set_next_prober(prober_wp.get()); |
| + } else { |
| + prober_head = prober_wp.get(); |
| + } |
| + prev_prober = prober_wp.release(); |
| + probers->push_back(prev_prober); |
| + } |
| } |
| - |
| - return prober; |
| } |
| } // namespace content |