Index: chrome/renderer/spellchecker/spellcheck_worditerator.cc |
diff --git a/chrome/renderer/spellchecker/spellcheck_worditerator.cc b/chrome/renderer/spellchecker/spellcheck_worditerator.cc |
index c024bcd54a3ad14c3960bac0d8872e5bd15e3cb0..ca5c7fa92ad937a79b057c815fa0b78e8589966d 100644 |
--- a/chrome/renderer/spellchecker/spellcheck_worditerator.cc |
+++ b/chrome/renderer/spellchecker/spellcheck_worditerator.cc |
@@ -32,7 +32,8 @@ void SpellcheckCharAttribute::SetDefaultLanguage(const std::string& language) { |
CreateRuleSets(language); |
} |
-string16 SpellcheckCharAttribute::GetRuleSet(bool allow_contraction) const { |
+base::string16 SpellcheckCharAttribute::GetRuleSet( |
+ bool allow_contraction) const { |
return allow_contraction ? |
ruleset_allow_contraction_ : ruleset_disallow_contraction_; |
} |
@@ -189,7 +190,8 @@ void SpellcheckCharAttribute::CreateRuleSets(const std::string& language) { |
kDisallowContraction)); |
} |
-bool SpellcheckCharAttribute::OutputChar(UChar c, string16* output) const { |
+bool SpellcheckCharAttribute::OutputChar(UChar c, |
+ base::string16* output) const { |
// Call the language-specific function if necessary. |
// Otherwise, we call the default one. |
switch (script_code_) { |
@@ -207,7 +209,8 @@ bool SpellcheckCharAttribute::OutputChar(UChar c, string16* output) const { |
} |
} |
-bool SpellcheckCharAttribute::OutputArabic(UChar c, string16* output) const { |
+bool SpellcheckCharAttribute::OutputArabic(UChar c, |
+ base::string16* output) const { |
// Discard characters not from Arabic alphabets. We also discard vowel marks |
// of Arabic (Damma, Fatha, Kasra, etc.) to prevent our Arabic dictionary from |
// marking an Arabic word including vowel marks as misspelled. (We need to |
@@ -218,7 +221,8 @@ bool SpellcheckCharAttribute::OutputArabic(UChar c, string16* output) const { |
return true; |
} |
-bool SpellcheckCharAttribute::OutputHangul(UChar c, string16* output) const { |
+bool SpellcheckCharAttribute::OutputHangul(UChar c, |
+ base::string16* output) const { |
// Decompose a Hangul character to a Hangul vowel and consonants used by our |
// spellchecker. A Hangul character of Unicode is a ligature consisting of a |
// Hangul vowel and consonants, e.g. U+AC01 "Gag" consists of U+1100 "G", |
@@ -265,7 +269,8 @@ bool SpellcheckCharAttribute::OutputHangul(UChar c, string16* output) const { |
return true; |
} |
-bool SpellcheckCharAttribute::OutputHebrew(UChar c, string16* output) const { |
+bool SpellcheckCharAttribute::OutputHebrew(UChar c, |
+ base::string16* output) const { |
// Discard characters except Hebrew alphabets. We also discard Hebrew niqquds |
// to prevent our Hebrew dictionary from marking a Hebrew word including |
// niqquds as misspelled. (Same as Arabic vowel marks, we need to check |
@@ -279,7 +284,8 @@ bool SpellcheckCharAttribute::OutputHebrew(UChar c, string16* output) const { |
return true; |
} |
-bool SpellcheckCharAttribute::OutputDefault(UChar c, string16* output) const { |
+bool SpellcheckCharAttribute::OutputDefault(UChar c, |
+ base::string16* output) const { |
// Check the script code of this character and output only if it is the one |
// used by the spellchecker language. |
UErrorCode status = U_ZERO_ERROR; |
@@ -311,7 +317,7 @@ bool SpellcheckWordIterator::Initialize( |
DCHECK(attribute); |
UErrorCode open_status = U_ZERO_ERROR; |
UParseError parse_status; |
- string16 rule(attribute->GetRuleSet(allow_contraction)); |
+ base::string16 rule(attribute->GetRuleSet(allow_contraction)); |
// If there is no rule set, the attributes were invalid. |
if (rule.empty()) |
@@ -354,7 +360,7 @@ bool SpellcheckWordIterator::SetText(const char16* text, size_t length) { |
return true; |
} |
-bool SpellcheckWordIterator::GetNextWord(string16* word_string, |
+bool SpellcheckWordIterator::GetNextWord(base::string16* word_string, |
int* word_start, |
int* word_length) { |
DCHECK(!!text_ && length_ > 0); |
@@ -401,7 +407,7 @@ void SpellcheckWordIterator::Reset() { |
bool SpellcheckWordIterator::Normalize(int input_start, |
int input_length, |
- string16* output_string) const { |
+ base::string16* output_string) const { |
// We use NFKC (Normalization Form, Compatible decomposition, followed by |
// canonical Composition) defined in Unicode Standard Annex #15 to normalize |
// this token because it it the most suitable normalization algorithm for our |