Index: content/renderer/media/webrtc/stun_field_trial.cc |
diff --git a/content/renderer/media/webrtc/stun_field_trial.cc b/content/renderer/media/webrtc/stun_field_trial.cc |
index 41be3588a749b09ce5071862cd892effe1095dca..98761af520b3094c086d4e189783be6987bf0659 100644 |
--- a/content/renderer/media/webrtc/stun_field_trial.cc |
+++ b/content/renderer/media/webrtc/stun_field_trial.cc |
@@ -6,8 +6,10 @@ |
#include <math.h> |
+#include "base/bind.h" |
#include "base/logging.h" |
#include "base/macros.h" |
+#include "base/message_loop/message_loop.h" |
#include "base/metrics/histogram.h" |
#include "base/rand_util.h" |
#include "base/strings/string_number_conversions.h" |
@@ -21,7 +23,6 @@ |
#include "third_party/webrtc/base/socketaddress.h" |
#include "third_party/webrtc/base/thread.h" |
#include "third_party/webrtc/p2p/base/packetsocketfactory.h" |
-#include "third_party/webrtc/p2p/stunprober/stunprober.h" |
using stunprober::StunProber; |
@@ -65,78 +66,145 @@ int ClampProbingInterval(int interval_ms) { |
std::string HistogramName(const std::string& prefix, |
NatType nat_type, |
- int interval_ms) { |
- return base::StringPrintf("WebRTC.Stun.%s.%s.%dms", prefix.c_str(), |
- NatTypeNames[nat_type], interval_ms); |
+ int interval_ms, |
+ int batch_index) { |
+ return base::StringPrintf("WebRTC.Stun.%s.%s.%dms.%d.%d", prefix.c_str(), |
+ NatTypeNames[nat_type], interval_ms, batch_index); |
} |
-void SaveHistogramData(StunProber* prober) { |
- StunProber::Stats stats; |
- if (!prober->GetStats(&stats)) |
- return; |
- |
- NatType nat_type = GetNatType(stats.nat_type); |
- |
- // Use the real probe interval for reporting, converting from nanosecond to |
- // millisecond at 5ms boundary. |
- int interval_ms = |
- round(static_cast<float>(stats.actual_request_interval_ns) / 5000) * 5; |
- |
- interval_ms = ClampProbingInterval(interval_ms); |
- |
- UMA_HISTOGRAM_ENUMERATION("WebRTC.NAT.Metrics", nat_type, NAT_TYPE_MAX); |
- |
- std::string histogram_name = |
- HistogramName("SuccessPercent", nat_type, interval_ms); |
- |
- // Mimic the same behavior as UMA_HISTOGRAM_PERCENTAGE. We can't use that |
- // macro as the histogram name is determined dynamically. |
- base::HistogramBase* histogram = base::Histogram::FactoryGet( |
- histogram_name, 1, 101, 102, base::Histogram::kUmaTargetedHistogramFlag); |
- histogram->Add(stats.success_percent); |
+} // namespace |
- DVLOG(1) << "Histogram '" << histogram_name.c_str() |
- << "' = " << stats.success_percent; |
+StunProberWithWeakPtr::StunProberWithWeakPtr(StunProber* prober) |
+ : prober_(prober), weak_factory_(this) {} |
- histogram_name = HistogramName("ResponseLatency", nat_type, interval_ms); |
+void StunProberWithWeakPtr::set_next_prober( |
+ StunProberWithWeakPtr* next_prober) { |
+ DCHECK(!next_prober_); |
+ next_prober_ = next_prober; |
+} |
- histogram = base::Histogram::FactoryTimeGet( |
- histogram_name, base::TimeDelta::FromMilliseconds(1), |
- base::TimeDelta::FromSeconds(10), 50, |
- base::Histogram::kUmaTargetedHistogramFlag); |
- histogram->AddTime(base::TimeDelta::FromMilliseconds(stats.average_rtt_ms)); |
+void StunProberWithWeakPtr::Start(StunProber::Observer* observer) { |
+ if (next_prober_) { |
+ base::MessageLoop::current()->PostDelayedTask( |
+ FROM_HERE, base::Bind(&StunProberWithWeakPtr::Start, |
+ next_prober_->GetWeakPtr(), observer), |
Sergey Ulanov
2015/10/30 18:27:49
So if I understand correctly the reason you need w
guoweis_left_chromium
2015/10/30 21:47:13
Done.
|
+ base::TimeDelta::FromMilliseconds(prober_->estimated_execution_time())); |
+ } |
+ prober_->Start(observer); |
+} |
- DVLOG(1) << "Histogram '" << histogram_name.c_str() |
- << "' = " << stats.average_rtt_ms << " ms"; |
+StunProberWithWeakPtr::~StunProberWithWeakPtr() { |
+ if (next_prober_) { |
Alexei Svitkine (slow)
2015/10/30 17:32:18
Nit: No {}'s
|
+ base::MessageLoop::current()->DeleteSoon(FROM_HERE, next_prober_); |
Alexei Svitkine (slow)
2015/10/30 17:32:18
Can you add a comment why DeleteSoon is being used
Sergey Ulanov
2015/10/30 18:27:49
So if you have a chain of 5 probers it would take
Sergey Ulanov
2015/10/30 18:27:49
next_proper_ is effectively owned by StunProberWit
|
+ } |
+} |
- DVLOG(1) << "Shared Socket Mode: " << stats.shared_socket_mode; |
- DVLOG(1) << "Requests sent: " << stats.num_request_sent; |
- DVLOG(1) << "Responses received: " << stats.num_response_received; |
- DVLOG(1) << "Target interval (ns): " << stats.target_request_interval_ns; |
- DVLOG(1) << "Actual interval (ns): " << stats.actual_request_interval_ns; |
- DVLOG(1) << "NAT Type: " << NatTypeNames[nat_type]; |
- DVLOG(1) << "Host IP: " << stats.host_ip; |
- DVLOG(1) << "Server-reflexive ips: "; |
- for (const auto& ip : stats.srflx_addrs) |
- DVLOG(1) << "\t" << ip; |
+StunProberTrial::StunProberTrial(rtc::NetworkManager* network_manager, |
+ const std::string& params, |
+ rtc::PacketSocketFactory* factory) |
+ : network_manager_(network_manager), params_(params), factory_(factory) { |
+ // We have to connect to the signal to avoid a race condition if network |
+ // manager hasn't received the network update when we start, the StunProber |
+ // will just fail. |
+ network_manager_->SignalNetworksChanged.connect( |
+ this, &StunProberTrial::OnNetworksChanged); |
+ network_manager_->StartUpdating(); |
} |
-void OnStunProbeTrialFinished(StunProber* prober, int result) { |
- if (result == StunProber::SUCCESS) |
- SaveHistogramData(prober); |
+StunProberTrial::~StunProberTrial() { |
+ DCHECK(thread_checker_.CalledOnValidThread()); |
Sergey Ulanov
2015/10/30 18:27:49
Don't need this. thread_checker_ verifies that it'
guoweis_left_chromium
2015/10/30 21:47:13
Done.
|
} |
-} // namespace |
+void StunProberTrial::SaveHistogramData() { |
+ DCHECK(thread_checker_.CalledOnValidThread()); |
+ NatType nat_type = NAT_TYPE_MAX; |
+ int interval_ms = 0; |
+ int count = 0; |
+ int total_sent = 0; |
+ int total_recv = 0; |
+ for (StunProberWithWeakPtr* prober = prober_head_.get(); prober != nullptr; |
+ prober = prober->GetNextProber()) { |
+ ++count; |
+ |
+ // Get the stats. |
+ StunProber::Stats stats; |
+ if (!prober->GetStats(&stats)) |
+ return; |
+ |
+ // Check if the NAT type is consistent. |
+ if (nat_type == NAT_TYPE_MAX) { |
+ nat_type = GetNatType(stats.nat_type); |
+ // If we can't figure out the nattype at the beginning, just return. |
+ if (nat_type == NAT_TYPE_UNKNOWN) |
+ return; |
+ } |
+ // For subsequent probers, we might get unknown as nattype if all the |
+ // bindings fail, but it's ok. |
+ else if (nat_type != GetNatType(stats.nat_type) && |
Alexei Svitkine (slow)
2015/10/30 17:32:18
Nit: Put this on the same line as }. Maybe move th
|
+ nat_type != NAT_TYPE_UNKNOWN) |
Alexei Svitkine (slow)
2015/10/30 17:32:19
Nit: {}'s
|
+ return; |
+ |
+ // Check that the interval is consistent. |
+ // Use the real probe interval for reporting, converting from nanosecond to |
+ // millisecond at 5ms boundary. |
+ int new_interval_ms = ClampProbingInterval( |
+ round(static_cast<float>(stats.actual_request_interval_ns) / 5000) * 5); |
+ if (interval_ms == 0) { |
+ interval_ms = new_interval_ms; |
+ } else if (interval_ms != new_interval_ms) |
Alexei Svitkine (slow)
2015/10/30 17:32:19
Nit: {}'s to be consistent with if
|
+ return; |
+ |
+ // Sum up the total sent and recv packets. |
+ total_sent += stats.num_request_sent; |
+ total_recv += stats.num_response_received; |
+ |
+ if (count % batch_size_ > 0) |
+ continue; |
+ |
+ if (total_sent == 0) { |
+ total_recv = 0; |
+ continue; |
+ } |
+ |
+ int success_rate = total_recv * 100 / total_sent; |
+ std::string histogram_name = HistogramName( |
+ "SuccessPercent", nat_type, interval_ms, count / batch_size_); |
+ |
+ // Mimic the same behavior as UMA_HISTOGRAM_PERCENTAGE. We can't use |
+ // that macro as the histogram name is determined dynamically. |
+ base::HistogramBase* histogram = |
+ base::Histogram::FactoryGet(histogram_name, 1, 101, 102, |
+ base::Histogram::kUmaTargetedHistogramFlag); |
+ histogram->Add(success_rate); |
+ |
+ DVLOG(1) << "Histogram '" << histogram_name.c_str() |
+ << "' = " << stats.success_percent; |
+ |
+ DVLOG(1) << "Shared Socket Mode: " << stats.shared_socket_mode; |
+ DVLOG(1) << "Requests sent: " << total_sent; |
+ DVLOG(1) << "Responses received: " << total_recv; |
+ DVLOG(1) << "Target interval (ns): " << stats.target_request_interval_ns; |
+ DVLOG(1) << "Actual interval (ns): " << stats.actual_request_interval_ns; |
+ DVLOG(1) << "NAT Type: " << NatTypeNames[nat_type]; |
+ DVLOG(1) << "Host IP: " << stats.host_ip; |
+ |
+ total_sent = 0; |
+ total_recv = 0; |
+ } |
+} |
-bool ParseStunProbeParameters(const std::string& params, |
- int* requests_per_ip, |
- int* interval_ms, |
- int* shared_socket_mode, |
- std::vector<rtc::SocketAddress>* servers) { |
+bool StunProberTrial::ParseParameters( |
+ const std::string& params, |
+ int* requests_per_ip, |
+ int* interval_ms, |
+ int* shared_socket_mode, |
+ int* batch_size, |
+ int* total_batches, |
+ std::vector<rtc::SocketAddress>* servers) { |
Alexei Svitkine (slow)
2015/10/30 17:32:18
Nit: Suggest making a struct to hold the different
|
std::vector<std::string> stun_params = base::SplitString( |
params, "/", base::TRIM_WHITESPACE, base::SPLIT_WANT_ALL); |
- if (stun_params.size() < 4) { |
+ if (stun_params.size() < 5) { |
DLOG(ERROR) << "Not enough parameters specified in StartStunProbeTrial"; |
return false; |
} |
@@ -168,6 +236,22 @@ bool ParseStunProbeParameters(const std::string& params, |
} |
param++; |
+ if (param->empty()) { |
+ *batch_size = 5; |
+ } else if (!base::StringToInt(*param, batch_size)) { |
+ DLOG(ERROR) << "Failed to parse batch_size in StartStunProbeTrial"; |
+ return false; |
+ } |
+ param++; |
+ |
+ if (param->empty()) { |
+ *total_batches = 5; |
+ } else if (!base::StringToInt(*param, total_batches)) { |
+ DLOG(ERROR) << "Failed to parse total_batches in StartStunProbeTrial"; |
+ return false; |
+ } |
+ param++; |
+ |
while (param != stun_params.end() && !param->empty()) { |
rtc::SocketAddress server; |
if (!server.FromString(*param)) { |
@@ -181,42 +265,77 @@ bool ParseStunProbeParameters(const std::string& params, |
return !servers->empty(); |
} |
-scoped_ptr<stunprober::StunProber> StartStunProbeTrial( |
- const rtc::NetworkManager::NetworkList& networks, |
- const std::string& params, |
- rtc::PacketSocketFactory* factory) { |
- DVLOG(1) << "Starting stun trial with params: " << params; |
+void StunProberTrial::OnNetworksChanged() { |
+ DCHECK(thread_checker_.CalledOnValidThread()); |
+ DVLOG(1) << "Starting stun trial with params: " << params_; |
+ rtc::NetworkManager::NetworkList networks; |
+ network_manager_->GetNetworks(&networks); |
// If we don't have local addresses, we won't be able to determine whether |
// we're behind NAT or not. |
if (networks.empty()) { |
DLOG(ERROR) << "No networks specified in StartStunProbeTrial"; |
- return nullptr; |
+ return; |
} |
+ network_manager_->StopUpdating(); |
+ network_manager_->SignalNetworksChanged.disconnect(this); |
+ |
int requests_per_ip; |
- int interval_ms; |
int shared_socket_mode; |
+ int interval_ms; |
+ int total_batches; |
std::vector<rtc::SocketAddress> servers; |
- if (!ParseStunProbeParameters(params, &requests_per_ip, &interval_ms, |
- &shared_socket_mode, &servers)) { |
- return nullptr; |
+ if (!ParseParameters(params_, &requests_per_ip, &interval_ms, |
+ &shared_socket_mode, &batch_size_, &total_batches, |
+ &servers)) { |
+ return; |
} |
- scoped_ptr<StunProber> prober( |
- new StunProber(factory, rtc::Thread::Current(), networks)); |
+ total_probers_ = total_batches * batch_size_; |
+ |
+ StunProberWithWeakPtr* prev_prober = nullptr; |
+ |
+ for (int i = 0; i < total_probers_; i++) { |
+ stunprober::StunProber* prober = |
+ new StunProber(factory_, rtc::Thread::Current(), networks); |
+ scoped_ptr<StunProberWithWeakPtr> prober_wp( |
+ new StunProberWithWeakPtr(prober)); |
+ if (!prober->Prepare(servers, (shared_socket_mode != 0), interval_ms, |
+ requests_per_ip, 1000, this)) { |
+ DLOG(ERROR) << "Failed to Prepare in StartStunProbeTrial"; |
+ return; |
+ } |
+ |
+ if (prev_prober) |
+ prev_prober->set_next_prober(prober_wp.get()); |
+ else |
+ prober_head_.reset(prober_wp.get()); |
- if (!prober->Start( |
- servers, (shared_socket_mode != 0), interval_ms, requests_per_ip, |
- 1000, |
- rtc::Callback2<void, StunProber*, int>(&OnStunProbeTrialFinished))) { |
- DLOG(ERROR) << "Failed to Start in StartStunProbeTrial"; |
- OnStunProbeTrialFinished(prober.get(), StunProber::GENERIC_FAILURE); |
- return nullptr; |
+ prev_prober = prober_wp.release(); |
} |
+} |
- return prober; |
+void StunProberTrial::OnFinished(StunProber* prober, |
+ StunProber::Status result) { |
+ DCHECK(thread_checker_.CalledOnValidThread()); |
+ if (result == StunProber::SUCCESS) |
+ ++finished_probers_; |
+ |
+ if (finished_probers_ == total_probers_) |
+ SaveHistogramData(); |
} |
+void StunProberTrial::OnPrepared(StunProber* prober, |
+ StunProber::Status result) { |
+ DCHECK(thread_checker_.CalledOnValidThread()); |
+ if (result == StunProber::SUCCESS) |
+ ++ready_probers_; |
+ |
+ if (ready_probers_ == total_probers_) { |
+ DCHECK(prober_head_); |
Alexei Svitkine (slow)
2015/10/30 17:32:19
Nit: I think we usually omit such DCHECKs since th
|
+ prober_head_->Start(this); |
+ } |
+} |
} // namespace content |