Index: url/url_util.cc |
diff --git a/url/url_util.cc b/url/url_util.cc |
index f16af98db85cc986a50af25630efa29da227d741..dc1d935ccdfc0a5b4e25be452475904429e96803 100644 |
--- a/url/url_util.cc |
+++ b/url/url_util.cc |
@@ -123,7 +123,8 @@ template<typename CHAR> |
bool DoCanonicalize(const CHAR* in_spec, int in_spec_len, |
url_canon::CharsetConverter* charset_converter, |
url_canon::CanonOutput* output, |
- url_parse::Parsed* output_parsed) { |
+ url_parse::Parsed* output_parsed, |
+ bool trim_tail = true) { |
// Remove any whitespace from the middle of the relative URL, possibly |
// copying to the new buffer. |
url_canon::RawCanonOutputT<CHAR> whitespace_buffer; |
@@ -188,7 +189,7 @@ bool DoCanonicalize(const CHAR* in_spec, int in_spec_len, |
} else { |
// "Weird" URLs like data: and javascript: |
- url_parse::ParsePathURL(spec, spec_len, &parsed_input); |
+ url_parse::ParsePathURL(spec, spec_len, &parsed_input, trim_tail); |
success = url_canon::CanonicalizePathURL(spec, spec_len, parsed_input, |
output, output_parsed); |
} |
@@ -431,18 +432,21 @@ bool Canonicalize(const char* spec, |
int spec_len, |
url_canon::CharsetConverter* charset_converter, |
url_canon::CanonOutput* output, |
- url_parse::Parsed* output_parsed) { |
+ url_parse::Parsed* output_parsed, |
+ bool trim_tail) { |
return DoCanonicalize(spec, spec_len, charset_converter, |
- output, output_parsed); |
+ output, output_parsed, |
+ trim_tail); |
} |
bool Canonicalize(const base::char16* spec, |
int spec_len, |
url_canon::CharsetConverter* charset_converter, |
url_canon::CanonOutput* output, |
- url_parse::Parsed* output_parsed) { |
+ url_parse::Parsed* output_parsed, |
+ bool trim_tail) { |
return DoCanonicalize(spec, spec_len, charset_converter, |
- output, output_parsed); |
+ output, output_parsed, trim_tail); |
} |
bool ResolveRelative(const char* base_spec, |