diff options
author | Tim Barron <tjbarron@google.com> | 2022-04-12 14:30:14 -0700 |
---|---|---|
committer | Tim Barron <tjbarron@google.com> | 2022-04-12 14:36:38 -0700 |
commit | d5c9ae94052a0f2f1b9ddec9dbbe502bc4f11d54 (patch) | |
tree | 90b929dc92d5874b5c15caca064401196ab4fc65 /icing/result/snippet-retriever.cc | |
parent | beff93fe1f5165aeeb871d9711963aa1846299ae (diff) | |
download | icing-d5c9ae94052a0f2f1b9ddec9dbbe502bc4f11d54.tar.gz |
Sync from upstream.
======================================================================
Refactor DocumentStore::Initialize to improve readability of document store recovery.
======================================================================
Remove non-NDK API usages of ICU4C in libicing.
======================================================================
Move IcuDataFileHelper to the testing directory since it is a test-only util.
======================================================================
Support dump function for DocumentStore
======================================================================
Switch to use PRead rather than MMap in the proto log.
======================================================================
Support dump function for main/lite index and lexicon
======================================================================
Fix LiteIndex::AppendHits
======================================================================
Enable and fix DocumentStoreTest.LoadScoreCacheAndInitializeSuccessfully
======================================================================
Fix MainIndex::GetStorageInfo.
======================================================================
Fix icing-search-engine_fuzz_test by making IcuLanguageSegmenterIterator::Advance non-recursive.
======================================================================
Allow to return additional information for deleted documents in DeleteByQuery
======================================================================
Using enum class in Token::Type for better type safety.
======================================================================
Bug: 158089703
Bug: 185845269
Bug: 209071710
Bug: 211785521
Bug: 218413237
Bug: 223549255
Change-Id: Id2786047ab279734bdd2aee883e82607b6a0e403
Diffstat (limited to 'icing/result/snippet-retriever.cc')
-rw-r--r-- | icing/result/snippet-retriever.cc | 39 |
1 files changed, 19 insertions, 20 deletions
diff --git a/icing/result/snippet-retriever.cc b/icing/result/snippet-retriever.cc index 6af7017..bd1524e 100644 --- a/icing/result/snippet-retriever.cc +++ b/icing/result/snippet-retriever.cc @@ -78,26 +78,25 @@ inline std::string AddIndexToPath(int values_size, int index, // Returns a string of the normalized text of the input Token. Normalization // is applied based on the Token's type. -std::string NormalizeToken(const Normalizer& normalizer, - const Token& token) { +std::string NormalizeToken(const Normalizer& normalizer, const Token& token) { switch (token.type) { - case Token::REGULAR: + case Token::Type::REGULAR: return normalizer.NormalizeTerm(token.text); - case Token::VERBATIM: + case Token::Type::VERBATIM: return std::string(token.text); - case Token::QUERY_EXCLUSION: + case Token::Type::QUERY_EXCLUSION: [[fallthrough]]; - case Token::QUERY_LEFT_PARENTHESES: + case Token::Type::QUERY_LEFT_PARENTHESES: [[fallthrough]]; - case Token::QUERY_RIGHT_PARENTHESES: + case Token::Type::QUERY_RIGHT_PARENTHESES: [[fallthrough]]; - case Token::QUERY_OR: + case Token::Type::QUERY_OR: [[fallthrough]]; - case Token::QUERY_PROPERTY: + case Token::Type::QUERY_PROPERTY: [[fallthrough]]; - case Token::INVALID: + case Token::Type::INVALID: ICING_LOG(WARNING) << "Unable to normalize token of type: " - << token.type; + << static_cast<int>(token.type); return std::string(token.text); } } @@ -107,7 +106,7 @@ std::string NormalizeToken(const Normalizer& normalizer, CharacterIterator FindMatchEnd(const Normalizer& normalizer, const Token& token, const std::string& match_query_term) { switch (token.type) { - case Token::VERBATIM: { + case Token::Type::VERBATIM: { // VERBATIM tokens are not normalized. This means the non-normalized // matched query term must be either equal to or a prefix of the token's // text. Therefore, the match must end at the end of the matched query @@ -117,22 +116,22 @@ CharacterIterator FindMatchEnd(const Normalizer& normalizer, const Token& token, verbatim_match_end.AdvanceToUtf8(match_query_term.length()); return verbatim_match_end; } - case Token::QUERY_EXCLUSION: + case Token::Type::QUERY_EXCLUSION: [[fallthrough]]; - case Token::QUERY_LEFT_PARENTHESES: + case Token::Type::QUERY_LEFT_PARENTHESES: [[fallthrough]]; - case Token::QUERY_RIGHT_PARENTHESES: + case Token::Type::QUERY_RIGHT_PARENTHESES: [[fallthrough]]; - case Token::QUERY_OR: + case Token::Type::QUERY_OR: [[fallthrough]]; - case Token::QUERY_PROPERTY: + case Token::Type::QUERY_PROPERTY: [[fallthrough]]; - case Token::INVALID: + case Token::Type::INVALID: ICING_LOG(WARNING) - << "Unexpected Token type " << token.type + << "Unexpected Token type " << static_cast<int>(token.type) << " found when finding match end of query term and token."; [[fallthrough]]; - case Token::REGULAR: + case Token::Type::REGULAR: return normalizer.FindNormalizedMatchEndPosition(token.text, match_query_term); } |