OLD | NEW |
| (Empty) |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 // The tests in this file attempt to verify the following through simulation: | |
6 // a) That a server experiencing overload will actually benefit from the | |
7 // anti-DDoS throttling logic, i.e. that its traffic spike will subside | |
8 // and be distributed over a longer period of time; | |
9 // b) That "well-behaved" clients of a server under DDoS attack actually | |
10 // benefit from the anti-DDoS throttling logic; and | |
11 // c) That the approximate increase in "perceived downtime" introduced by | |
12 // anti-DDoS throttling for various different actual downtimes is what | |
13 // we expect it to be. | |
14 | |
15 #include <cmath> | |
16 #include <limits> | |
17 #include <vector> | |
18 | |
19 #include "base/environment.h" | |
20 #include "base/memory/scoped_ptr.h" | |
21 #include "base/memory/scoped_vector.h" | |
22 #include "base/rand_util.h" | |
23 #include "base/time/time.h" | |
24 #include "net/base/request_priority.h" | |
25 #include "net/url_request/url_request.h" | |
26 #include "net/url_request/url_request_context.h" | |
27 #include "net/url_request/url_request_test_util.h" | |
28 #include "net/url_request/url_request_throttler_manager.h" | |
29 #include "net/url_request/url_request_throttler_test_support.h" | |
30 #include "testing/gtest/include/gtest/gtest.h" | |
31 | |
32 using base::TimeDelta; | |
33 using base::TimeTicks; | |
34 | |
35 namespace net { | |
36 namespace { | |
37 | |
38 // Set this variable in your environment if you want to see verbose results | |
39 // of the simulation tests. | |
40 const char kShowSimulationVariableName[] = "SHOW_SIMULATION_RESULTS"; | |
41 | |
42 // Prints output only if a given environment variable is set. We use this | |
43 // to not print any output for human evaluation when the test is run without | |
44 // supervision. | |
45 void VerboseOut(const char* format, ...) { | |
46 static bool have_checked_environment = false; | |
47 static bool should_print = false; | |
48 if (!have_checked_environment) { | |
49 have_checked_environment = true; | |
50 scoped_ptr<base::Environment> env(base::Environment::Create()); | |
51 if (env->HasVar(kShowSimulationVariableName)) | |
52 should_print = true; | |
53 } | |
54 | |
55 if (should_print) { | |
56 va_list arglist; | |
57 va_start(arglist, format); | |
58 vprintf(format, arglist); | |
59 va_end(arglist); | |
60 } | |
61 } | |
62 | |
63 // A simple two-phase discrete time simulation. Actors are added in the order | |
64 // they should take action at every tick of the clock. Ticks of the clock | |
65 // are two-phase: | |
66 // - Phase 1 advances every actor's time to a new absolute time. | |
67 // - Phase 2 asks each actor to perform their action. | |
68 class DiscreteTimeSimulation { | |
69 public: | |
70 class Actor { | |
71 public: | |
72 virtual ~Actor() {} | |
73 virtual void AdvanceTime(const TimeTicks& absolute_time) = 0; | |
74 virtual void PerformAction() = 0; | |
75 }; | |
76 | |
77 DiscreteTimeSimulation() {} | |
78 | |
79 // Adds an |actor| to the simulation. The client of the simulation maintains | |
80 // ownership of |actor| and must ensure its lifetime exceeds that of the | |
81 // simulation. Actors should be added in the order you wish for them to | |
82 // act at each tick of the simulation. | |
83 void AddActor(Actor* actor) { | |
84 actors_.push_back(actor); | |
85 } | |
86 | |
87 // Runs the simulation for, pretending |time_between_ticks| passes from one | |
88 // tick to the next. The start time will be the current real time. The | |
89 // simulation will stop when the simulated duration is equal to or greater | |
90 // than |maximum_simulated_duration|. | |
91 void RunSimulation(const TimeDelta& maximum_simulated_duration, | |
92 const TimeDelta& time_between_ticks) { | |
93 TimeTicks start_time = TimeTicks(); | |
94 TimeTicks now = start_time; | |
95 while ((now - start_time) <= maximum_simulated_duration) { | |
96 for (std::vector<Actor*>::iterator it = actors_.begin(); | |
97 it != actors_.end(); | |
98 ++it) { | |
99 (*it)->AdvanceTime(now); | |
100 } | |
101 | |
102 for (std::vector<Actor*>::iterator it = actors_.begin(); | |
103 it != actors_.end(); | |
104 ++it) { | |
105 (*it)->PerformAction(); | |
106 } | |
107 | |
108 now += time_between_ticks; | |
109 } | |
110 } | |
111 | |
112 private: | |
113 std::vector<Actor*> actors_; | |
114 | |
115 DISALLOW_COPY_AND_ASSIGN(DiscreteTimeSimulation); | |
116 }; | |
117 | |
118 // Represents a web server in a simulation of a server under attack by | |
119 // a lot of clients. Must be added to the simulation's list of actors | |
120 // after all |Requester| objects. | |
121 class Server : public DiscreteTimeSimulation::Actor { | |
122 public: | |
123 Server(int max_queries_per_tick, double request_drop_ratio) | |
124 : max_queries_per_tick_(max_queries_per_tick), | |
125 request_drop_ratio_(request_drop_ratio), | |
126 num_overloaded_ticks_remaining_(0), | |
127 num_current_tick_queries_(0), | |
128 num_overloaded_ticks_(0), | |
129 max_experienced_queries_per_tick_(0), | |
130 mock_request_(context_.CreateRequest( | |
131 GURL(), DEFAULT_PRIORITY, NULL, NULL)) {} | |
132 | |
133 void SetDowntime(const TimeTicks& start_time, const TimeDelta& duration) { | |
134 start_downtime_ = start_time; | |
135 end_downtime_ = start_time + duration; | |
136 } | |
137 | |
138 void AdvanceTime(const TimeTicks& absolute_time) override { | |
139 now_ = absolute_time; | |
140 } | |
141 | |
142 void PerformAction() override { | |
143 // We are inserted at the end of the actor's list, so all Requester | |
144 // instances have already done their bit. | |
145 if (num_current_tick_queries_ > max_experienced_queries_per_tick_) | |
146 max_experienced_queries_per_tick_ = num_current_tick_queries_; | |
147 | |
148 if (num_current_tick_queries_ > max_queries_per_tick_) { | |
149 // We pretend the server fails for the next several ticks after it | |
150 // gets overloaded. | |
151 num_overloaded_ticks_remaining_ = 5; | |
152 ++num_overloaded_ticks_; | |
153 } else if (num_overloaded_ticks_remaining_ > 0) { | |
154 --num_overloaded_ticks_remaining_; | |
155 } | |
156 | |
157 requests_per_tick_.push_back(num_current_tick_queries_); | |
158 num_current_tick_queries_ = 0; | |
159 } | |
160 | |
161 // This is called by Requester. It returns the response code from | |
162 // the server. | |
163 int HandleRequest() { | |
164 ++num_current_tick_queries_; | |
165 if (!start_downtime_.is_null() && | |
166 start_downtime_ < now_ && now_ < end_downtime_) { | |
167 // For the simulation measuring the increase in perceived | |
168 // downtime, it might be interesting to count separately the | |
169 // queries seen by the server (assuming a front-end reverse proxy | |
170 // is what actually serves up the 503s in this case) so that we could | |
171 // visualize the traffic spike seen by the server when it comes up, | |
172 // which would in many situations be ameliorated by the anti-DDoS | |
173 // throttling. | |
174 return 503; | |
175 } | |
176 | |
177 if ((num_overloaded_ticks_remaining_ > 0 || | |
178 num_current_tick_queries_ > max_queries_per_tick_) && | |
179 base::RandDouble() < request_drop_ratio_) { | |
180 return 503; | |
181 } | |
182 | |
183 return 200; | |
184 } | |
185 | |
186 int num_overloaded_ticks() const { | |
187 return num_overloaded_ticks_; | |
188 } | |
189 | |
190 int max_experienced_queries_per_tick() const { | |
191 return max_experienced_queries_per_tick_; | |
192 } | |
193 | |
194 const URLRequest& mock_request() const { | |
195 return *mock_request_.get(); | |
196 } | |
197 | |
198 std::string VisualizeASCII(int terminal_width) { | |
199 // Account for | characters we place at left of graph. | |
200 terminal_width -= 1; | |
201 | |
202 VerboseOut("Overloaded for %d of %d ticks.\n", | |
203 num_overloaded_ticks_, requests_per_tick_.size()); | |
204 VerboseOut("Got maximum of %d requests in a tick.\n\n", | |
205 max_experienced_queries_per_tick_); | |
206 | |
207 VerboseOut("Traffic graph:\n\n"); | |
208 | |
209 // Printing the graph like this is a bit overkill, but was very useful | |
210 // while developing the various simulations to see if they were testing | |
211 // the corner cases we want to simulate. | |
212 | |
213 // Find the smallest number of whole ticks we need to group into a | |
214 // column that will let all ticks fit into the column width we have. | |
215 int num_ticks = requests_per_tick_.size(); | |
216 double ticks_per_column_exact = | |
217 static_cast<double>(num_ticks) / static_cast<double>(terminal_width); | |
218 int ticks_per_column = std::ceil(ticks_per_column_exact); | |
219 DCHECK_GE(ticks_per_column * terminal_width, num_ticks); | |
220 | |
221 // Sum up the column values. | |
222 int num_columns = num_ticks / ticks_per_column; | |
223 if (num_ticks % ticks_per_column) | |
224 ++num_columns; | |
225 DCHECK_LE(num_columns, terminal_width); | |
226 scoped_ptr<int[]> columns(new int[num_columns]); | |
227 for (int tx = 0; tx < num_ticks; ++tx) { | |
228 int cx = tx / ticks_per_column; | |
229 if (tx % ticks_per_column == 0) | |
230 columns[cx] = 0; | |
231 columns[cx] += requests_per_tick_[tx]; | |
232 } | |
233 | |
234 // Find the lowest integer divisor that will let the column values | |
235 // be represented in a graph of maximum height 50. | |
236 int max_value = 0; | |
237 for (int cx = 0; cx < num_columns; ++cx) | |
238 max_value = std::max(max_value, columns[cx]); | |
239 const int kNumRows = 50; | |
240 double row_divisor_exact = max_value / static_cast<double>(kNumRows); | |
241 int row_divisor = std::ceil(row_divisor_exact); | |
242 DCHECK_GE(row_divisor * kNumRows, max_value); | |
243 | |
244 // To show the overload line, we calculate the appropriate value. | |
245 int overload_value = max_queries_per_tick_ * ticks_per_column; | |
246 | |
247 // When num_ticks is not a whole multiple of ticks_per_column, the last | |
248 // column includes fewer ticks than the others. In this case, don't | |
249 // print it so that we don't show an inconsistent value. | |
250 int num_printed_columns = num_columns; | |
251 if (num_ticks % ticks_per_column) | |
252 --num_printed_columns; | |
253 | |
254 // This is a top-to-bottom traversal of rows, left-to-right per row. | |
255 std::string output; | |
256 for (int rx = 0; rx < kNumRows; ++rx) { | |
257 int range_min = (kNumRows - rx) * row_divisor; | |
258 int range_max = range_min + row_divisor; | |
259 if (range_min == 0) | |
260 range_min = -1; // Make 0 values fit in the bottom range. | |
261 output.append("|"); | |
262 for (int cx = 0; cx < num_printed_columns; ++cx) { | |
263 char block = ' '; | |
264 // Show the overload line. | |
265 if (range_min < overload_value && overload_value <= range_max) | |
266 block = '-'; | |
267 | |
268 // Preferentially, show the graph line. | |
269 if (range_min < columns[cx] && columns[cx] <= range_max) | |
270 block = '#'; | |
271 | |
272 output.append(1, block); | |
273 } | |
274 output.append("\n"); | |
275 } | |
276 output.append("|"); | |
277 output.append(num_printed_columns, '='); | |
278 | |
279 return output; | |
280 } | |
281 | |
282 const URLRequestContext& context() const { return context_; } | |
283 | |
284 private: | |
285 TimeTicks now_; | |
286 TimeTicks start_downtime_; // Can be 0 to say "no downtime". | |
287 TimeTicks end_downtime_; | |
288 const int max_queries_per_tick_; | |
289 const double request_drop_ratio_; // Ratio of requests to 503 when failing. | |
290 int num_overloaded_ticks_remaining_; | |
291 int num_current_tick_queries_; | |
292 int num_overloaded_ticks_; | |
293 int max_experienced_queries_per_tick_; | |
294 std::vector<int> requests_per_tick_; | |
295 | |
296 TestURLRequestContext context_; | |
297 scoped_ptr<URLRequest> mock_request_; | |
298 | |
299 DISALLOW_COPY_AND_ASSIGN(Server); | |
300 }; | |
301 | |
302 // Mock throttler entry used by Requester class. | |
303 class MockURLRequestThrottlerEntry : public URLRequestThrottlerEntry { | |
304 public: | |
305 explicit MockURLRequestThrottlerEntry(URLRequestThrottlerManager* manager) | |
306 : URLRequestThrottlerEntry(manager, std::string()), | |
307 mock_backoff_entry_(&backoff_policy_) {} | |
308 | |
309 const BackoffEntry* GetBackoffEntry() const override { | |
310 return &mock_backoff_entry_; | |
311 } | |
312 | |
313 BackoffEntry* GetBackoffEntry() override { return &mock_backoff_entry_; } | |
314 | |
315 TimeTicks ImplGetTimeNow() const override { return fake_now_; } | |
316 | |
317 void SetFakeNow(const TimeTicks& fake_time) { | |
318 fake_now_ = fake_time; | |
319 mock_backoff_entry_.set_fake_now(fake_time); | |
320 } | |
321 | |
322 TimeTicks fake_now() const { | |
323 return fake_now_; | |
324 } | |
325 | |
326 protected: | |
327 ~MockURLRequestThrottlerEntry() override {} | |
328 | |
329 private: | |
330 TimeTicks fake_now_; | |
331 MockBackoffEntry mock_backoff_entry_; | |
332 }; | |
333 | |
334 // Registry of results for a class of |Requester| objects (e.g. attackers vs. | |
335 // regular clients). | |
336 class RequesterResults { | |
337 public: | |
338 RequesterResults() | |
339 : num_attempts_(0), num_successful_(0), num_failed_(0), num_blocked_(0) { | |
340 } | |
341 | |
342 void AddSuccess() { | |
343 ++num_attempts_; | |
344 ++num_successful_; | |
345 } | |
346 | |
347 void AddFailure() { | |
348 ++num_attempts_; | |
349 ++num_failed_; | |
350 } | |
351 | |
352 void AddBlocked() { | |
353 ++num_attempts_; | |
354 ++num_blocked_; | |
355 } | |
356 | |
357 int num_attempts() const { return num_attempts_; } | |
358 int num_successful() const { return num_successful_; } | |
359 int num_failed() const { return num_failed_; } | |
360 int num_blocked() const { return num_blocked_; } | |
361 | |
362 double GetBlockedRatio() { | |
363 DCHECK(num_attempts_); | |
364 return static_cast<double>(num_blocked_) / | |
365 static_cast<double>(num_attempts_); | |
366 } | |
367 | |
368 double GetSuccessRatio() { | |
369 DCHECK(num_attempts_); | |
370 return static_cast<double>(num_successful_) / | |
371 static_cast<double>(num_attempts_); | |
372 } | |
373 | |
374 void PrintResults(const char* class_description) { | |
375 if (num_attempts_ == 0) { | |
376 VerboseOut("No data for %s\n", class_description); | |
377 return; | |
378 } | |
379 | |
380 VerboseOut("Requester results for %s\n", class_description); | |
381 VerboseOut(" %d attempts\n", num_attempts_); | |
382 VerboseOut(" %d successes\n", num_successful_); | |
383 VerboseOut(" %d 5xx responses\n", num_failed_); | |
384 VerboseOut(" %d requests blocked\n", num_blocked_); | |
385 VerboseOut(" %.2f success ratio\n", GetSuccessRatio()); | |
386 VerboseOut(" %.2f blocked ratio\n", GetBlockedRatio()); | |
387 VerboseOut("\n"); | |
388 } | |
389 | |
390 private: | |
391 int num_attempts_; | |
392 int num_successful_; | |
393 int num_failed_; | |
394 int num_blocked_; | |
395 }; | |
396 | |
397 // Represents an Requester in a simulated DDoS situation, that periodically | |
398 // requests a specific resource. | |
399 class Requester : public DiscreteTimeSimulation::Actor { | |
400 public: | |
401 Requester(MockURLRequestThrottlerEntry* throttler_entry, | |
402 const TimeDelta& time_between_requests, | |
403 Server* server, | |
404 RequesterResults* results) | |
405 : throttler_entry_(throttler_entry), | |
406 time_between_requests_(time_between_requests), | |
407 last_attempt_was_failure_(false), | |
408 server_(server), | |
409 results_(results) { | |
410 DCHECK(server_); | |
411 } | |
412 | |
413 void AdvanceTime(const TimeTicks& absolute_time) override { | |
414 if (time_of_last_success_.is_null()) | |
415 time_of_last_success_ = absolute_time; | |
416 | |
417 throttler_entry_->SetFakeNow(absolute_time); | |
418 } | |
419 | |
420 void PerformAction() override { | |
421 TimeDelta effective_delay = time_between_requests_; | |
422 TimeDelta current_jitter = TimeDelta::FromMilliseconds( | |
423 request_jitter_.InMilliseconds() * base::RandDouble()); | |
424 if (base::RandInt(0, 1)) { | |
425 effective_delay -= current_jitter; | |
426 } else { | |
427 effective_delay += current_jitter; | |
428 } | |
429 | |
430 if (throttler_entry_->fake_now() - time_of_last_attempt_ > | |
431 effective_delay) { | |
432 if (!throttler_entry_->ShouldRejectRequest( | |
433 server_->mock_request(), | |
434 server_->context().network_delegate())) { | |
435 int status_code = server_->HandleRequest(); | |
436 MockURLRequestThrottlerHeaderAdapter response_headers(status_code); | |
437 throttler_entry_->UpdateWithResponse(std::string(), &response_headers); | |
438 | |
439 if (status_code == 200) { | |
440 if (results_) | |
441 results_->AddSuccess(); | |
442 | |
443 if (last_attempt_was_failure_) { | |
444 last_downtime_duration_ = | |
445 throttler_entry_->fake_now() - time_of_last_success_; | |
446 } | |
447 | |
448 time_of_last_success_ = throttler_entry_->fake_now(); | |
449 last_attempt_was_failure_ = false; | |
450 } else { | |
451 if (results_) | |
452 results_->AddFailure(); | |
453 last_attempt_was_failure_ = true; | |
454 } | |
455 } else { | |
456 if (results_) | |
457 results_->AddBlocked(); | |
458 last_attempt_was_failure_ = true; | |
459 } | |
460 | |
461 time_of_last_attempt_ = throttler_entry_->fake_now(); | |
462 } | |
463 } | |
464 | |
465 // Adds a delay until the first request, equal to a uniformly distributed | |
466 // value between now and now + max_delay. | |
467 void SetStartupJitter(const TimeDelta& max_delay) { | |
468 int delay_ms = base::RandInt(0, max_delay.InMilliseconds()); | |
469 time_of_last_attempt_ = TimeTicks() + | |
470 TimeDelta::FromMilliseconds(delay_ms) - time_between_requests_; | |
471 } | |
472 | |
473 void SetRequestJitter(const TimeDelta& request_jitter) { | |
474 request_jitter_ = request_jitter; | |
475 } | |
476 | |
477 TimeDelta last_downtime_duration() const { return last_downtime_duration_; } | |
478 | |
479 private: | |
480 scoped_refptr<MockURLRequestThrottlerEntry> throttler_entry_; | |
481 const TimeDelta time_between_requests_; | |
482 TimeDelta request_jitter_; | |
483 TimeTicks time_of_last_attempt_; | |
484 TimeTicks time_of_last_success_; | |
485 bool last_attempt_was_failure_; | |
486 TimeDelta last_downtime_duration_; | |
487 Server* const server_; | |
488 RequesterResults* const results_; // May be NULL. | |
489 | |
490 DISALLOW_COPY_AND_ASSIGN(Requester); | |
491 }; | |
492 | |
493 void SimulateAttack(Server* server, | |
494 RequesterResults* attacker_results, | |
495 RequesterResults* client_results, | |
496 bool enable_throttling) { | |
497 const size_t kNumAttackers = 50; | |
498 const size_t kNumClients = 50; | |
499 DiscreteTimeSimulation simulation; | |
500 URLRequestThrottlerManager manager; | |
501 ScopedVector<Requester> requesters; | |
502 for (size_t i = 0; i < kNumAttackers; ++i) { | |
503 // Use a tiny time_between_requests so the attackers will ping the | |
504 // server at every tick of the simulation. | |
505 scoped_refptr<MockURLRequestThrottlerEntry> throttler_entry( | |
506 new MockURLRequestThrottlerEntry(&manager)); | |
507 if (!enable_throttling) | |
508 throttler_entry->DisableBackoffThrottling(); | |
509 | |
510 Requester* attacker = new Requester(throttler_entry.get(), | |
511 TimeDelta::FromMilliseconds(1), | |
512 server, | |
513 attacker_results); | |
514 attacker->SetStartupJitter(TimeDelta::FromSeconds(120)); | |
515 requesters.push_back(attacker); | |
516 simulation.AddActor(attacker); | |
517 } | |
518 for (size_t i = 0; i < kNumClients; ++i) { | |
519 // Normal clients only make requests every 2 minutes, plus/minus 1 minute. | |
520 scoped_refptr<MockURLRequestThrottlerEntry> throttler_entry( | |
521 new MockURLRequestThrottlerEntry(&manager)); | |
522 if (!enable_throttling) | |
523 throttler_entry->DisableBackoffThrottling(); | |
524 | |
525 Requester* client = new Requester(throttler_entry.get(), | |
526 TimeDelta::FromMinutes(2), | |
527 server, | |
528 client_results); | |
529 client->SetStartupJitter(TimeDelta::FromSeconds(120)); | |
530 client->SetRequestJitter(TimeDelta::FromMinutes(1)); | |
531 requesters.push_back(client); | |
532 simulation.AddActor(client); | |
533 } | |
534 simulation.AddActor(server); | |
535 | |
536 simulation.RunSimulation(TimeDelta::FromMinutes(6), | |
537 TimeDelta::FromSeconds(1)); | |
538 } | |
539 | |
540 TEST(URLRequestThrottlerSimulation, HelpsInAttack) { | |
541 Server unprotected_server(30, 1.0); | |
542 RequesterResults unprotected_attacker_results; | |
543 RequesterResults unprotected_client_results; | |
544 Server protected_server(30, 1.0); | |
545 RequesterResults protected_attacker_results; | |
546 RequesterResults protected_client_results; | |
547 SimulateAttack(&unprotected_server, | |
548 &unprotected_attacker_results, | |
549 &unprotected_client_results, | |
550 false); | |
551 SimulateAttack(&protected_server, | |
552 &protected_attacker_results, | |
553 &protected_client_results, | |
554 true); | |
555 | |
556 // These assert that the DDoS protection actually benefits the | |
557 // server. Manual inspection of the traffic graphs will show this | |
558 // even more clearly. | |
559 EXPECT_GT(unprotected_server.num_overloaded_ticks(), | |
560 protected_server.num_overloaded_ticks()); | |
561 EXPECT_GT(unprotected_server.max_experienced_queries_per_tick(), | |
562 protected_server.max_experienced_queries_per_tick()); | |
563 | |
564 // These assert that the DDoS protection actually benefits non-malicious | |
565 // (and non-degenerate/accidentally DDoSing) users. | |
566 EXPECT_LT(protected_client_results.GetBlockedRatio(), | |
567 protected_attacker_results.GetBlockedRatio()); | |
568 EXPECT_GT(protected_client_results.GetSuccessRatio(), | |
569 unprotected_client_results.GetSuccessRatio()); | |
570 | |
571 // The rest is just for optional manual evaluation of the results; | |
572 // in particular the traffic pattern is interesting. | |
573 | |
574 VerboseOut("\nUnprotected server's results:\n\n"); | |
575 VerboseOut(unprotected_server.VisualizeASCII(132).c_str()); | |
576 VerboseOut("\n\n"); | |
577 VerboseOut("Protected server's results:\n\n"); | |
578 VerboseOut(protected_server.VisualizeASCII(132).c_str()); | |
579 VerboseOut("\n\n"); | |
580 | |
581 unprotected_attacker_results.PrintResults( | |
582 "attackers attacking unprotected server."); | |
583 unprotected_client_results.PrintResults( | |
584 "normal clients making requests to unprotected server."); | |
585 protected_attacker_results.PrintResults( | |
586 "attackers attacking protected server."); | |
587 protected_client_results.PrintResults( | |
588 "normal clients making requests to protected server."); | |
589 } | |
590 | |
591 // Returns the downtime perceived by the client, as a ratio of the | |
592 // actual downtime. | |
593 double SimulateDowntime(const TimeDelta& duration, | |
594 const TimeDelta& average_client_interval, | |
595 bool enable_throttling) { | |
596 TimeDelta time_between_ticks = duration / 200; | |
597 TimeTicks start_downtime = TimeTicks() + (duration / 2); | |
598 | |
599 // A server that never rejects requests, but will go down for maintenance. | |
600 Server server(std::numeric_limits<int>::max(), 1.0); | |
601 server.SetDowntime(start_downtime, duration); | |
602 | |
603 URLRequestThrottlerManager manager; | |
604 scoped_refptr<MockURLRequestThrottlerEntry> throttler_entry( | |
605 new MockURLRequestThrottlerEntry(&manager)); | |
606 if (!enable_throttling) | |
607 throttler_entry->DisableBackoffThrottling(); | |
608 | |
609 Requester requester( | |
610 throttler_entry.get(), average_client_interval, &server, NULL); | |
611 requester.SetStartupJitter(duration / 3); | |
612 requester.SetRequestJitter(average_client_interval); | |
613 | |
614 DiscreteTimeSimulation simulation; | |
615 simulation.AddActor(&requester); | |
616 simulation.AddActor(&server); | |
617 | |
618 simulation.RunSimulation(duration * 2, time_between_ticks); | |
619 | |
620 return static_cast<double>( | |
621 requester.last_downtime_duration().InMilliseconds()) / | |
622 static_cast<double>(duration.InMilliseconds()); | |
623 } | |
624 | |
625 TEST(URLRequestThrottlerSimulation, PerceivedDowntimeRatio) { | |
626 struct Stats { | |
627 // Expected interval that we expect the ratio of downtime when anti-DDoS | |
628 // is enabled and downtime when anti-DDoS is not enabled to fall within. | |
629 // | |
630 // The expected interval depends on two things: The exponential back-off | |
631 // policy encoded in URLRequestThrottlerEntry, and the test or set of | |
632 // tests that the Stats object is tracking (e.g. a test where the client | |
633 // retries very rapidly on a very long downtime will tend to increase the | |
634 // number). | |
635 // | |
636 // To determine an appropriate new interval when parameters have changed, | |
637 // run the test a few times (you may have to Ctrl-C out of it after a few | |
638 // seconds) and choose an interval that the test converges quickly and | |
639 // reliably to. Then set the new interval, and run the test e.g. 20 times | |
640 // in succession to make sure it never takes an obscenely long time to | |
641 // converge to this interval. | |
642 double expected_min_increase; | |
643 double expected_max_increase; | |
644 | |
645 size_t num_runs; | |
646 double total_ratio_unprotected; | |
647 double total_ratio_protected; | |
648 | |
649 bool DidConverge(double* increase_ratio_out) { | |
650 double unprotected_ratio = total_ratio_unprotected / num_runs; | |
651 double protected_ratio = total_ratio_protected / num_runs; | |
652 double increase_ratio = protected_ratio / unprotected_ratio; | |
653 if (increase_ratio_out) | |
654 *increase_ratio_out = increase_ratio; | |
655 return expected_min_increase <= increase_ratio && | |
656 increase_ratio <= expected_max_increase; | |
657 } | |
658 | |
659 void ReportTrialResult(double increase_ratio) { | |
660 VerboseOut( | |
661 " Perceived downtime with throttling is %.4f times without.\n", | |
662 increase_ratio); | |
663 VerboseOut(" Test result after %d trials.\n", num_runs); | |
664 } | |
665 }; | |
666 | |
667 Stats global_stats = { 1.08, 1.15 }; | |
668 | |
669 struct Trial { | |
670 TimeDelta duration; | |
671 TimeDelta average_client_interval; | |
672 Stats stats; | |
673 | |
674 void PrintTrialDescription() { | |
675 double duration_minutes = | |
676 static_cast<double>(duration.InSeconds()) / 60.0; | |
677 double interval_minutes = | |
678 static_cast<double>(average_client_interval.InSeconds()) / 60.0; | |
679 VerboseOut("Trial with %.2f min downtime, avg. interval %.2f min.\n", | |
680 duration_minutes, interval_minutes); | |
681 } | |
682 }; | |
683 | |
684 // We don't set or check expected ratio intervals on individual | |
685 // experiments as this might make the test too fragile, but we | |
686 // print them out at the end for manual evaluation (we want to be | |
687 // able to make claims about the expected ratios depending on the | |
688 // type of behavior of the client and the downtime, e.g. the difference | |
689 // in behavior between a client making requests every few minutes vs. | |
690 // one that makes a request every 15 seconds). | |
691 Trial trials[] = { | |
692 { TimeDelta::FromSeconds(10), TimeDelta::FromSeconds(3) }, | |
693 { TimeDelta::FromSeconds(30), TimeDelta::FromSeconds(7) }, | |
694 { TimeDelta::FromMinutes(5), TimeDelta::FromSeconds(30) }, | |
695 { TimeDelta::FromMinutes(10), TimeDelta::FromSeconds(20) }, | |
696 { TimeDelta::FromMinutes(20), TimeDelta::FromSeconds(15) }, | |
697 { TimeDelta::FromMinutes(20), TimeDelta::FromSeconds(50) }, | |
698 { TimeDelta::FromMinutes(30), TimeDelta::FromMinutes(2) }, | |
699 { TimeDelta::FromMinutes(30), TimeDelta::FromMinutes(5) }, | |
700 { TimeDelta::FromMinutes(40), TimeDelta::FromMinutes(7) }, | |
701 { TimeDelta::FromMinutes(40), TimeDelta::FromMinutes(2) }, | |
702 { TimeDelta::FromMinutes(40), TimeDelta::FromSeconds(15) }, | |
703 { TimeDelta::FromMinutes(60), TimeDelta::FromMinutes(7) }, | |
704 { TimeDelta::FromMinutes(60), TimeDelta::FromMinutes(2) }, | |
705 { TimeDelta::FromMinutes(60), TimeDelta::FromSeconds(15) }, | |
706 { TimeDelta::FromMinutes(80), TimeDelta::FromMinutes(20) }, | |
707 { TimeDelta::FromMinutes(80), TimeDelta::FromMinutes(3) }, | |
708 { TimeDelta::FromMinutes(80), TimeDelta::FromSeconds(15) }, | |
709 | |
710 // Most brutal? | |
711 { TimeDelta::FromMinutes(45), TimeDelta::FromMilliseconds(500) }, | |
712 }; | |
713 | |
714 // If things don't converge by the time we've done 100K trials, then | |
715 // clearly one or more of the expected intervals are wrong. | |
716 while (global_stats.num_runs < 100000) { | |
717 for (size_t i = 0; i < arraysize(trials); ++i) { | |
718 ++global_stats.num_runs; | |
719 ++trials[i].stats.num_runs; | |
720 double ratio_unprotected = SimulateDowntime( | |
721 trials[i].duration, trials[i].average_client_interval, false); | |
722 double ratio_protected = SimulateDowntime( | |
723 trials[i].duration, trials[i].average_client_interval, true); | |
724 global_stats.total_ratio_unprotected += ratio_unprotected; | |
725 global_stats.total_ratio_protected += ratio_protected; | |
726 trials[i].stats.total_ratio_unprotected += ratio_unprotected; | |
727 trials[i].stats.total_ratio_protected += ratio_protected; | |
728 } | |
729 | |
730 double increase_ratio; | |
731 if (global_stats.DidConverge(&increase_ratio)) | |
732 break; | |
733 | |
734 if (global_stats.num_runs > 200) { | |
735 VerboseOut("Test has not yet converged on expected interval.\n"); | |
736 global_stats.ReportTrialResult(increase_ratio); | |
737 } | |
738 } | |
739 | |
740 double average_increase_ratio; | |
741 EXPECT_TRUE(global_stats.DidConverge(&average_increase_ratio)); | |
742 | |
743 // Print individual trial results for optional manual evaluation. | |
744 double max_increase_ratio = 0.0; | |
745 for (size_t i = 0; i < arraysize(trials); ++i) { | |
746 double increase_ratio; | |
747 trials[i].stats.DidConverge(&increase_ratio); | |
748 max_increase_ratio = std::max(max_increase_ratio, increase_ratio); | |
749 trials[i].PrintTrialDescription(); | |
750 trials[i].stats.ReportTrialResult(increase_ratio); | |
751 } | |
752 | |
753 VerboseOut("Average increase ratio was %.4f\n", average_increase_ratio); | |
754 VerboseOut("Maximum increase ratio was %.4f\n", max_increase_ratio); | |
755 } | |
756 | |
757 } // namespace | |
758 } // namespace net | |
OLD | NEW |