OLD | NEW |
1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 package memory | 5 package memory |
6 | 6 |
7 import ( | 7 import ( |
8 "bytes" | 8 "bytes" |
9 "fmt" | 9 "fmt" |
10 "sort" | 10 "sort" |
11 "strings" | 11 "strings" |
12 | 12 |
13 ds "github.com/luci/gae/service/datastore" | 13 ds "github.com/luci/gae/service/datastore" |
14 "github.com/luci/gae/service/datastore/serialize" | 14 "github.com/luci/gae/service/datastore/serialize" |
| 15 "github.com/luci/luci-go/common/stringset" |
15 ) | 16 ) |
16 | 17 |
17 // reducedQuery contains only the pieces of the query necessary to iterate for | 18 // reducedQuery contains only the pieces of the query necessary to iterate for |
18 // results. | 19 // results. |
19 // deduplication is applied externally | 20 // deduplication is applied externally |
20 // projection / keysonly / entity retrieval is done externally | 21 // projection / keysonly / entity retrieval is done externally |
21 type reducedQuery struct { | 22 type reducedQuery struct { |
22 ns string | 23 ns string |
23 kind string | 24 kind string |
24 | 25 |
25 // eqFilters indicate the set of all prefix constraints which need to be | 26 // eqFilters indicate the set of all prefix constraints which need to be |
26 // fulfilled in the composite query. All of these will translate into pr
efix | 27 // fulfilled in the composite query. All of these will translate into pr
efix |
27 // bytes for SOME index. | 28 // bytes for SOME index. |
28 » eqFilters map[string]stringSet | 29 » eqFilters map[string]stringset.Set |
29 | 30 |
30 // suffixFormat is the PRECISE listing of the suffix columns that ALL in
dexes | 31 // suffixFormat is the PRECISE listing of the suffix columns that ALL in
dexes |
31 // in the multi query will have. | 32 // in the multi query will have. |
32 // | 33 // |
33 // suffixFormat ALWAYS includes the inequality filter (if any) as the 0t
h | 34 // suffixFormat ALWAYS includes the inequality filter (if any) as the 0t
h |
34 // element | 35 // element |
35 // suffixFormat ALWAYS includes any additional projections (in ascending | 36 // suffixFormat ALWAYS includes any additional projections (in ascending |
36 // order) after all user defined sort orders | 37 // order) after all user defined sort orders |
37 // suffixFormat ALWAYS has __key__ as the last column | 38 // suffixFormat ALWAYS has __key__ as the last column |
38 suffixFormat []ds.IndexColumn | 39 suffixFormat []ds.IndexColumn |
(...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
106 // maybeAddDefinition possibly adds a new IndexDefinitionSortable to this slice. | 107 // maybeAddDefinition possibly adds a new IndexDefinitionSortable to this slice. |
107 // It's only added if it could be useful in servicing q, otherwise this function | 108 // It's only added if it could be useful in servicing q, otherwise this function |
108 // is a noop. | 109 // is a noop. |
109 // | 110 // |
110 // This returns true iff the proposed index is OK and depletes missingTerms to | 111 // This returns true iff the proposed index is OK and depletes missingTerms to |
111 // empty. | 112 // empty. |
112 // | 113 // |
113 // If the proposed index is PERFECT (e.g. contains enough columns to cover all | 114 // If the proposed index is PERFECT (e.g. contains enough columns to cover all |
114 // equality filters, and also has the correct suffix), idxs will be replaced | 115 // equality filters, and also has the correct suffix), idxs will be replaced |
115 // with JUST that index, and this will return true. | 116 // with JUST that index, and this will return true. |
116 func (idxs *IndexDefinitionSortableSlice) maybeAddDefinition(q *reducedQuery, s
*memStore, missingTerms stringSet, id *ds.IndexDefinition) bool { | 117 func (idxs *IndexDefinitionSortableSlice) maybeAddDefinition(q *reducedQuery, s
*memStore, missingTerms stringset.Set, id *ds.IndexDefinition) bool { |
117 // Kindless queries are handled elsewhere. | 118 // Kindless queries are handled elsewhere. |
118 if id.Kind != q.kind { | 119 if id.Kind != q.kind { |
119 impossible( | 120 impossible( |
120 fmt.Errorf("maybeAddDefinition given index with wrong ki
nd %q v %q", id.Kind, q.kind)) | 121 fmt.Errorf("maybeAddDefinition given index with wrong ki
nd %q v %q", id.Kind, q.kind)) |
121 } | 122 } |
122 | 123 |
123 // If we're an ancestor query, and the index is compound, but doesn't in
clude | 124 // If we're an ancestor query, and the index is compound, but doesn't in
clude |
124 // an Ancestor field, it doesn't work. Builtin indexes can be used for | 125 // an Ancestor field, it doesn't work. Builtin indexes can be used for |
125 // ancestor queries (and have !Ancestor), assuming that it's only equali
ty | 126 // ancestor queries (and have !Ancestor), assuming that it's only equali
ty |
126 // filters (plus inequality on __key__), or a single inequality. | 127 // filters (plus inequality on __key__), or a single inequality. |
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
178 coll := s.GetCollection( | 179 coll := s.GetCollection( |
179 fmt.Sprintf("idx:%s:%s", q.ns, serialize.ToBytes(*id.PrepForIdxT
able()))) | 180 fmt.Sprintf("idx:%s:%s", q.ns, serialize.ToBytes(*id.PrepForIdxT
able()))) |
180 | 181 |
181 // First, see if it's a perfect match. If it is, then our search is over
. | 182 // First, see if it's a perfect match. If it is, then our search is over
. |
182 // | 183 // |
183 // A perfect match contains ALL the equality filter columns (or more, si
nce | 184 // A perfect match contains ALL the equality filter columns (or more, si
nce |
184 // we can use residuals to fill in the extras). | 185 // we can use residuals to fill in the extras). |
185 toAdd := IndexDefinitionSortable{coll: coll} | 186 toAdd := IndexDefinitionSortable{coll: coll} |
186 toAdd.eqFilts = eqFilts | 187 toAdd.eqFilts = eqFilts |
187 for _, sb := range toAdd.eqFilts { | 188 for _, sb := range toAdd.eqFilts { |
188 » » missingTerms.rm(sb.Property) | 189 » » missingTerms.Del(sb.Property) |
189 } | 190 } |
190 | 191 |
191 perfect := false | 192 perfect := false |
192 if len(sortBy) == q.numCols { | 193 if len(sortBy) == q.numCols { |
193 perfect = true | 194 perfect = true |
194 for k, num := range numByProp { | 195 for k, num := range numByProp { |
195 » » » if num < len(q.eqFilters[k]) { | 196 » » » if num < q.eqFilters[k].Len() { |
196 perfect = false | 197 perfect = false |
197 break | 198 break |
198 } | 199 } |
199 } | 200 } |
200 } | 201 } |
201 if perfect { | 202 if perfect { |
202 *idxs = IndexDefinitionSortableSlice{toAdd} | 203 *idxs = IndexDefinitionSortableSlice{toAdd} |
203 } else { | 204 } else { |
204 *idxs = append(*idxs, toAdd) | 205 *idxs = append(*idxs, toAdd) |
205 } | 206 } |
206 » return len(missingTerms) == 0 | 207 » return missingTerms.Len() == 0 |
207 } | 208 } |
208 | 209 |
209 // getRelevantIndexes retrieves the relevant indexes which could be used to | 210 // getRelevantIndexes retrieves the relevant indexes which could be used to |
210 // service q. It returns nil if it's not possible to service q with the current | 211 // service q. It returns nil if it's not possible to service q with the current |
211 // indexes. | 212 // indexes. |
212 func getRelevantIndexes(q *reducedQuery, s *memStore) (IndexDefinitionSortableSl
ice, error) { | 213 func getRelevantIndexes(q *reducedQuery, s *memStore) (IndexDefinitionSortableSl
ice, error) { |
213 » missingTerms := stringSet{} | 214 » missingTerms := stringset.New(len(q.eqFilters)) |
214 for k := range q.eqFilters { | 215 for k := range q.eqFilters { |
215 if k == "__ancestor__" { | 216 if k == "__ancestor__" { |
216 // ancestor is not a prefix which can be satisfied by a
single index. It | 217 // ancestor is not a prefix which can be satisfied by a
single index. It |
217 // must be satisfied by ALL indexes (and has special log
ic for this in | 218 // must be satisfied by ALL indexes (and has special log
ic for this in |
218 // the addDefinition logic) | 219 // the addDefinition logic) |
219 continue | 220 continue |
220 } | 221 } |
221 » » missingTerms.add(k) | 222 » » missingTerms.Add(k) |
222 } | 223 } |
223 idxs := IndexDefinitionSortableSlice{} | 224 idxs := IndexDefinitionSortableSlice{} |
224 | 225 |
225 // First we add builtins | 226 // First we add builtins |
226 // add | 227 // add |
227 // idx:KIND | 228 // idx:KIND |
228 if idxs.maybeAddDefinition(q, s, missingTerms, &ds.IndexDefinition{ | 229 if idxs.maybeAddDefinition(q, s, missingTerms, &ds.IndexDefinition{ |
229 Kind: q.kind, | 230 Kind: q.kind, |
230 }) { | 231 }) { |
231 return idxs, nil | 232 return idxs, nil |
232 } | 233 } |
233 | 234 |
234 // add | 235 // add |
235 // idx:KIND:prop | 236 // idx:KIND:prop |
236 // idx:KIND:-prop | 237 // idx:KIND:-prop |
237 » props := stringSet{} | 238 » props := stringset.New(len(q.eqFilters) + len(q.suffixFormat)) |
238 for prop := range q.eqFilters { | 239 for prop := range q.eqFilters { |
239 » » props.add(prop) | 240 » » props.Add(prop) |
240 } | 241 } |
241 for _, col := range q.suffixFormat[:len(q.suffixFormat)-1] { | 242 for _, col := range q.suffixFormat[:len(q.suffixFormat)-1] { |
242 » » props.add(col.Property) | 243 » » props.Add(col.Property) |
243 } | 244 } |
244 » for prop := range props { | 245 » for _, prop := range props.ToSlice() { |
245 if strings.HasPrefix(prop, "__") && strings.HasSuffix(prop, "__"
) { | 246 if strings.HasPrefix(prop, "__") && strings.HasSuffix(prop, "__"
) { |
246 continue | 247 continue |
247 } | 248 } |
248 if idxs.maybeAddDefinition(q, s, missingTerms, &ds.IndexDefiniti
on{ | 249 if idxs.maybeAddDefinition(q, s, missingTerms, &ds.IndexDefiniti
on{ |
249 Kind: q.kind, | 250 Kind: q.kind, |
250 SortBy: []ds.IndexColumn{ | 251 SortBy: []ds.IndexColumn{ |
251 {Property: prop}, | 252 {Property: prop}, |
252 }, | 253 }, |
253 }) { | 254 }) { |
254 return idxs, nil | 255 return idxs, nil |
(...skipping 14 matching lines...) Expand all Loading... |
269 Ancestor: q.eqFilters["__ancestor__"] != nil, | 270 Ancestor: q.eqFilters["__ancestor__"] != nil, |
270 SortBy: q.suffixFormat, | 271 SortBy: q.suffixFormat, |
271 } | 272 } |
272 walkCompIdxs(s, suffix, func(def *ds.IndexDefinition) bool { | 273 walkCompIdxs(s, suffix, func(def *ds.IndexDefinition) bool { |
273 // keep walking until we find a perfect index. | 274 // keep walking until we find a perfect index. |
274 return !idxs.maybeAddDefinition(q, s, missingTerms, def) | 275 return !idxs.maybeAddDefinition(q, s, missingTerms, def) |
275 }) | 276 }) |
276 | 277 |
277 // this query is impossible to fulfil with the current indexes. Not all
the | 278 // this query is impossible to fulfil with the current indexes. Not all
the |
278 // terms (equality + projection) are satisfied. | 279 // terms (equality + projection) are satisfied. |
279 » if len(missingTerms) < 0 || len(idxs) == 0 { | 280 » if missingTerms.Len() < 0 || len(idxs) == 0 { |
280 remains := &ds.IndexDefinition{ | 281 remains := &ds.IndexDefinition{ |
281 Kind: q.kind, | 282 Kind: q.kind, |
282 Ancestor: q.eqFilters["__ancestor__"] != nil, | 283 Ancestor: q.eqFilters["__ancestor__"] != nil, |
283 } | 284 } |
284 » » terms := make([]string, 0, len(missingTerms)) | 285 » » terms := missingTerms.ToSlice() |
285 » » for mt := range missingTerms { | |
286 » » » terms = append(terms, mt) | |
287 » » } | |
288 if serializationDeterministic { | 286 if serializationDeterministic { |
289 sort.Strings(terms) | 287 sort.Strings(terms) |
290 } | 288 } |
291 for _, term := range terms { | 289 for _, term := range terms { |
292 remains.SortBy = append(remains.SortBy, ds.IndexColumn{P
roperty: term}) | 290 remains.SortBy = append(remains.SortBy, ds.IndexColumn{P
roperty: term}) |
293 } | 291 } |
294 remains.SortBy = append(remains.SortBy, q.suffixFormat...) | 292 remains.SortBy = append(remains.SortBy, q.suffixFormat...) |
295 last := remains.SortBy[len(remains.SortBy)-1] | 293 last := remains.SortBy[len(remains.SortBy)-1] |
296 if last.Direction == ds.ASCENDING { | 294 if last.Direction == ds.ASCENDING { |
297 // this removes the __key__ column, since it's implicit. | 295 // this removes the __key__ column, since it's implicit. |
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
337 // Kind/__key__ | 335 // Kind/__key__ |
338 // Kind/Prop/__key__ | 336 // Kind/Prop/__key__ |
339 // Kind/Prop/-__key__ | 337 // Kind/Prop/-__key__ |
340 if len(q.suffixFormat) > 2 || q.suffixFormat[len(q.suffixFormat)
-1].Property != "__key__" { | 338 if len(q.suffixFormat) > 2 || q.suffixFormat[len(q.suffixFormat)
-1].Property != "__key__" { |
341 // This should never happen. One of the previous validat
ors would have | 339 // This should never happen. One of the previous validat
ors would have |
342 // selected a different index. But just in case. | 340 // selected a different index. But just in case. |
343 impossible(fmt.Errorf("cannot supply an implicit ancesto
r for %#v", idx)) | 341 impossible(fmt.Errorf("cannot supply an implicit ancesto
r for %#v", idx)) |
344 } | 342 } |
345 | 343 |
346 // get the only value out of __ancestor__ | 344 // get the only value out of __ancestor__ |
347 » » anc := q.eqFilters["__ancestor__"].getOne() | 345 » » anc, _ := q.eqFilters["__ancestor__"].Peek() |
348 | 346 |
349 // Intentionally do NOT update prefixLen. This allows multiItera
tor to | 347 // Intentionally do NOT update prefixLen. This allows multiItera
tor to |
350 // correctly include the entire key in the shared iterator suffi
x, instead | 348 // correctly include the entire key in the shared iterator suffi
x, instead |
351 // of just the remainder. | 349 // of just the remainder. |
352 | 350 |
353 // chop the terminal null byte off the q.ancestor key... we can
accept | 351 // chop the terminal null byte off the q.ancestor key... we can
accept |
354 // anything which is a descendant or an exact match. Removing t
he last byte | 352 // anything which is a descendant or an exact match. Removing t
he last byte |
355 // from the key (the terminating null) allows this trick to work
. Otherwise | 353 // from the key (the terminating null) allows this trick to work
. Otherwise |
356 // it would be a closed range of EXACTLY this key. | 354 // it would be a closed range of EXACTLY this key. |
357 chopped := []byte(anc[:len(anc)-1]) | 355 chopped := []byte(anc[:len(anc)-1]) |
(...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
431 // arbitrary value for filling index prefixes which have more equality fields | 429 // arbitrary value for filling index prefixes which have more equality fields |
432 // than are necessary. The value doesn't matter, as long as its an equality | 430 // than are necessary. The value doesn't matter, as long as its an equality |
433 // constraint in the original query. | 431 // constraint in the original query. |
434 func calculateConstraints(q *reducedQuery) *constraints { | 432 func calculateConstraints(q *reducedQuery) *constraints { |
435 ret := &constraints{ | 433 ret := &constraints{ |
436 original: make(map[string][][]byte, len(q.eqFilters)), | 434 original: make(map[string][][]byte, len(q.eqFilters)), |
437 constraints: make(map[string][][]byte, len(q.eqFilters)), | 435 constraints: make(map[string][][]byte, len(q.eqFilters)), |
438 residualMapping: make(map[string]int), | 436 residualMapping: make(map[string]int), |
439 } | 437 } |
440 for prop, vals := range q.eqFilters { | 438 for prop, vals := range q.eqFilters { |
441 » » bvals := make([][]byte, 0, len(vals)) | 439 » » bvals := make([][]byte, 0, vals.Len()) |
442 » » for val := range vals { | 440 » » vals.Iter(func(val string) bool { |
443 bvals = append(bvals, []byte(val)) | 441 bvals = append(bvals, []byte(val)) |
444 » » } | 442 » » » return true |
| 443 » » }) |
445 ret.original[prop] = bvals | 444 ret.original[prop] = bvals |
446 if prop == "__ancestor__" { | 445 if prop == "__ancestor__" { |
447 // exclude __ancestor__ from the constraints. | 446 // exclude __ancestor__ from the constraints. |
448 // | 447 // |
449 // This is because it's handled specially during index p
roposal and | 448 // This is because it's handled specially during index p
roposal and |
450 // generation. Ancestor is used by ALL indexes, and so i
ts residual value | 449 // generation. Ancestor is used by ALL indexes, and so i
ts residual value |
451 // in ret.original above will be sufficient. | 450 // in ret.original above will be sufficient. |
452 continue | 451 continue |
453 } | 452 } |
454 ret.constraints[prop] = bvals | 453 ret.constraints[prop] = bvals |
(...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
532 if bestIdx == nil { | 531 if bestIdx == nil { |
533 // something is really wrong here... if relevantIdxs is
!nil, then we | 532 // something is really wrong here... if relevantIdxs is
!nil, then we |
534 // should always be able to make progress in this loop. | 533 // should always be able to make progress in this loop. |
535 impossible(fmt.Errorf("deadlock: cannot fulfil query?")) | 534 impossible(fmt.Errorf("deadlock: cannot fulfil query?")) |
536 } | 535 } |
537 ret = append(ret, generate(q, bestIdx, constraints)) | 536 ret = append(ret, generate(q, bestIdx, constraints)) |
538 } | 537 } |
539 | 538 |
540 return ret, nil | 539 return ret, nil |
541 } | 540 } |
OLD | NEW |