| OLD | NEW |
| 1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 package memory | 5 package memory |
| 6 | 6 |
| 7 import ( | 7 import ( |
| 8 "bytes" | 8 "bytes" |
| 9 "fmt" | 9 "fmt" |
| 10 "sort" | 10 "sort" |
| (...skipping 30 matching lines...) Expand all Loading... |
| 41 // limits of the inequality and/or full sort order. This is ONLY a suffi
x, | 41 // limits of the inequality and/or full sort order. This is ONLY a suffi
x, |
| 42 // and it will be appended to the prefix during iteration. | 42 // and it will be appended to the prefix during iteration. |
| 43 start []byte | 43 start []byte |
| 44 end []byte | 44 end []byte |
| 45 | 45 |
| 46 // metadata describing the total number of columns that this query requi
res to | 46 // metadata describing the total number of columns that this query requi
res to |
| 47 // execute perfectly. | 47 // execute perfectly. |
| 48 numCols int | 48 numCols int |
| 49 } | 49 } |
| 50 | 50 |
| 51 type IndexDefinitionSortable struct { | 51 type indexDefinitionSortable struct { |
| 52 // eqFilts is the list of ACTUAL prefix columns. Note that it may contai
n | 52 // eqFilts is the list of ACTUAL prefix columns. Note that it may contai
n |
| 53 // redundant columns! (e.g. (tag, tag) is a perfectly valid prefix, becu
ase | 53 // redundant columns! (e.g. (tag, tag) is a perfectly valid prefix, becu
ase |
| 54 // (tag=1, tag=2) is a perfectly valid query). | 54 // (tag=1, tag=2) is a perfectly valid query). |
| 55 eqFilts []ds.IndexColumn | 55 eqFilts []ds.IndexColumn |
| 56 coll *memCollection | 56 coll *memCollection |
| 57 } | 57 } |
| 58 | 58 |
| 59 func (i *IndexDefinitionSortable) hasAncestor() bool { | 59 func (i *indexDefinitionSortable) hasAncestor() bool { |
| 60 return len(i.eqFilts) > 0 && i.eqFilts[0].Property == "__ancestor__" | 60 return len(i.eqFilts) > 0 && i.eqFilts[0].Property == "__ancestor__" |
| 61 } | 61 } |
| 62 | 62 |
| 63 func (i *IndexDefinitionSortable) numEqHits(c *constraints) int { | 63 func (i *indexDefinitionSortable) numEqHits(c *constraints) int { |
| 64 ret := 0 | 64 ret := 0 |
| 65 for _, filt := range i.eqFilts { | 65 for _, filt := range i.eqFilts { |
| 66 if _, ok := c.constraints[filt.Property]; ok { | 66 if _, ok := c.constraints[filt.Property]; ok { |
| 67 ret++ | 67 ret++ |
| 68 } | 68 } |
| 69 } | 69 } |
| 70 return ret | 70 return ret |
| 71 } | 71 } |
| 72 | 72 |
| 73 type IndexDefinitionSortableSlice []IndexDefinitionSortable | 73 type indexDefinitionSortableSlice []indexDefinitionSortable |
| 74 | 74 |
| 75 func (s IndexDefinitionSortableSlice) Len() int { return len(s) } | 75 func (idxs indexDefinitionSortableSlice) Len() int { return len(idxs) } |
| 76 func (s IndexDefinitionSortableSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } | 76 func (idxs indexDefinitionSortableSlice) Swap(i, j int) { idxs[i], idxs[j] = idx
s[j], idxs[i] } |
| 77 func (s IndexDefinitionSortableSlice) Less(i, j int) bool { | 77 func (idxs indexDefinitionSortableSlice) Less(i, j int) bool { |
| 78 » a, b := s[i], s[j] | 78 » a, b := idxs[i], idxs[j] |
| 79 if a.coll == nil && b.coll != nil { | 79 if a.coll == nil && b.coll != nil { |
| 80 return true | 80 return true |
| 81 } else if a.coll != nil && b.coll == nil { | 81 } else if a.coll != nil && b.coll == nil { |
| 82 return false | 82 return false |
| 83 } | 83 } |
| 84 | 84 |
| 85 cmp := len(a.eqFilts) - len(b.eqFilts) | 85 cmp := len(a.eqFilts) - len(b.eqFilts) |
| 86 if cmp < 0 { | 86 if cmp < 0 { |
| 87 return true | 87 return true |
| 88 } else if cmp > 0 { | 88 } else if cmp > 0 { |
| 89 return false | 89 return false |
| 90 } | 90 } |
| 91 for k, col := range a.eqFilts { | 91 for k, col := range a.eqFilts { |
| 92 ocol := b.eqFilts[k] | 92 ocol := b.eqFilts[k] |
| 93 » » if col.Direction == ds.ASCENDING && ocol.Direction == ds.DESCEND
ING { | 93 » » if !col.Descending && ocol.Descending { |
| 94 return true | 94 return true |
| 95 » » } else if col.Direction == ds.DESCENDING && ocol.Direction == ds
.ASCENDING { | 95 » » } else if col.Descending && !ocol.Descending { |
| 96 return false | 96 return false |
| 97 } | 97 } |
| 98 if col.Property < ocol.Property { | 98 if col.Property < ocol.Property { |
| 99 return true | 99 return true |
| 100 } else if col.Property > ocol.Property { | 100 } else if col.Property > ocol.Property { |
| 101 return false | 101 return false |
| 102 } | 102 } |
| 103 } | 103 } |
| 104 return false | 104 return false |
| 105 } | 105 } |
| 106 | 106 |
| 107 // maybeAddDefinition possibly adds a new IndexDefinitionSortable to this slice. | 107 // maybeAddDefinition possibly adds a new indexDefinitionSortable to this slice. |
| 108 // It's only added if it could be useful in servicing q, otherwise this function | 108 // It's only added if it could be useful in servicing q, otherwise this function |
| 109 // is a noop. | 109 // is a noop. |
| 110 // | 110 // |
| 111 // This returns true iff the proposed index is OK and depletes missingTerms to | 111 // This returns true iff the proposed index is OK and depletes missingTerms to |
| 112 // empty. | 112 // empty. |
| 113 // | 113 // |
| 114 // If the proposed index is PERFECT (e.g. contains enough columns to cover all | 114 // If the proposed index is PERFECT (e.g. contains enough columns to cover all |
| 115 // equality filters, and also has the correct suffix), idxs will be replaced | 115 // equality filters, and also has the correct suffix), idxs will be replaced |
| 116 // with JUST that index, and this will return true. | 116 // with JUST that index, and this will return true. |
| 117 func (idxs *IndexDefinitionSortableSlice) maybeAddDefinition(q *reducedQuery, s
*memStore, missingTerms stringset.Set, id *ds.IndexDefinition) bool { | 117 func (idxs *indexDefinitionSortableSlice) maybeAddDefinition(q *reducedQuery, s
*memStore, missingTerms stringset.Set, id *ds.IndexDefinition) bool { |
| 118 // Kindless queries are handled elsewhere. | 118 // Kindless queries are handled elsewhere. |
| 119 if id.Kind != q.kind { | 119 if id.Kind != q.kind { |
| 120 impossible( | 120 impossible( |
| 121 fmt.Errorf("maybeAddDefinition given index with wrong ki
nd %q v %q", id.Kind, q.kind)) | 121 fmt.Errorf("maybeAddDefinition given index with wrong ki
nd %q v %q", id.Kind, q.kind)) |
| 122 } | 122 } |
| 123 | 123 |
| 124 // If we're an ancestor query, and the index is compound, but doesn't in
clude | 124 // If we're an ancestor query, and the index is compound, but doesn't in
clude |
| 125 // an Ancestor field, it doesn't work. Builtin indexes can be used for | 125 // an Ancestor field, it doesn't work. Builtin indexes can be used for |
| 126 // ancestor queries (and have !Ancestor), assuming that it's only equali
ty | 126 // ancestor queries (and have !Ancestor), assuming that it's only equali
ty |
| 127 // filters (plus inequality on __key__), or a single inequality. | 127 // filters (plus inequality on __key__), or a single inequality. |
| (...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 176 // index's potential just because the collection doesn't exist. If it's | 176 // index's potential just because the collection doesn't exist. If it's |
| 177 // a builtin and it doesn't exist, it still needs to be one of the 'poss
ible' | 177 // a builtin and it doesn't exist, it still needs to be one of the 'poss
ible' |
| 178 // indexes... it just means that the user's query will end up with no re
sults. | 178 // indexes... it just means that the user's query will end up with no re
sults. |
| 179 coll := s.GetCollection( | 179 coll := s.GetCollection( |
| 180 fmt.Sprintf("idx:%s:%s", q.ns, serialize.ToBytes(*id.PrepForIdxT
able()))) | 180 fmt.Sprintf("idx:%s:%s", q.ns, serialize.ToBytes(*id.PrepForIdxT
able()))) |
| 181 | 181 |
| 182 // First, see if it's a perfect match. If it is, then our search is over
. | 182 // First, see if it's a perfect match. If it is, then our search is over
. |
| 183 // | 183 // |
| 184 // A perfect match contains ALL the equality filter columns (or more, si
nce | 184 // A perfect match contains ALL the equality filter columns (or more, si
nce |
| 185 // we can use residuals to fill in the extras). | 185 // we can use residuals to fill in the extras). |
| 186 » toAdd := IndexDefinitionSortable{coll: coll} | 186 » toAdd := indexDefinitionSortable{coll: coll} |
| 187 toAdd.eqFilts = eqFilts | 187 toAdd.eqFilts = eqFilts |
| 188 for _, sb := range toAdd.eqFilts { | 188 for _, sb := range toAdd.eqFilts { |
| 189 missingTerms.Del(sb.Property) | 189 missingTerms.Del(sb.Property) |
| 190 } | 190 } |
| 191 | 191 |
| 192 perfect := false | 192 perfect := false |
| 193 if len(sortBy) == q.numCols { | 193 if len(sortBy) == q.numCols { |
| 194 perfect = true | 194 perfect = true |
| 195 for k, num := range numByProp { | 195 for k, num := range numByProp { |
| 196 if num < q.eqFilters[k].Len() { | 196 if num < q.eqFilters[k].Len() { |
| 197 perfect = false | 197 perfect = false |
| 198 break | 198 break |
| 199 } | 199 } |
| 200 } | 200 } |
| 201 } | 201 } |
| 202 if perfect { | 202 if perfect { |
| 203 » » *idxs = IndexDefinitionSortableSlice{toAdd} | 203 » » *idxs = indexDefinitionSortableSlice{toAdd} |
| 204 } else { | 204 } else { |
| 205 *idxs = append(*idxs, toAdd) | 205 *idxs = append(*idxs, toAdd) |
| 206 } | 206 } |
| 207 return missingTerms.Len() == 0 | 207 return missingTerms.Len() == 0 |
| 208 } | 208 } |
| 209 | 209 |
| 210 // getRelevantIndexes retrieves the relevant indexes which could be used to | 210 // getRelevantIndexes retrieves the relevant indexes which could be used to |
| 211 // service q. It returns nil if it's not possible to service q with the current | 211 // service q. It returns nil if it's not possible to service q with the current |
| 212 // indexes. | 212 // indexes. |
| 213 func getRelevantIndexes(q *reducedQuery, s *memStore) (IndexDefinitionSortableSl
ice, error) { | 213 func getRelevantIndexes(q *reducedQuery, s *memStore) (indexDefinitionSortableSl
ice, error) { |
| 214 missingTerms := stringset.New(len(q.eqFilters)) | 214 missingTerms := stringset.New(len(q.eqFilters)) |
| 215 for k := range q.eqFilters { | 215 for k := range q.eqFilters { |
| 216 if k == "__ancestor__" { | 216 if k == "__ancestor__" { |
| 217 // ancestor is not a prefix which can be satisfied by a
single index. It | 217 // ancestor is not a prefix which can be satisfied by a
single index. It |
| 218 // must be satisfied by ALL indexes (and has special log
ic for this in | 218 // must be satisfied by ALL indexes (and has special log
ic for this in |
| 219 // the addDefinition logic) | 219 // the addDefinition logic) |
| 220 continue | 220 continue |
| 221 } | 221 } |
| 222 missingTerms.Add(k) | 222 missingTerms.Add(k) |
| 223 } | 223 } |
| 224 » idxs := IndexDefinitionSortableSlice{} | 224 » idxs := indexDefinitionSortableSlice{} |
| 225 | 225 |
| 226 // First we add builtins | 226 // First we add builtins |
| 227 // add | 227 // add |
| 228 // idx:KIND | 228 // idx:KIND |
| 229 if idxs.maybeAddDefinition(q, s, missingTerms, &ds.IndexDefinition{ | 229 if idxs.maybeAddDefinition(q, s, missingTerms, &ds.IndexDefinition{ |
| 230 Kind: q.kind, | 230 Kind: q.kind, |
| 231 }) { | 231 }) { |
| 232 return idxs, nil | 232 return idxs, nil |
| 233 } | 233 } |
| 234 | 234 |
| (...skipping 15 matching lines...) Expand all Loading... |
| 250 Kind: q.kind, | 250 Kind: q.kind, |
| 251 SortBy: []ds.IndexColumn{ | 251 SortBy: []ds.IndexColumn{ |
| 252 {Property: prop}, | 252 {Property: prop}, |
| 253 }, | 253 }, |
| 254 }) { | 254 }) { |
| 255 return idxs, nil | 255 return idxs, nil |
| 256 } | 256 } |
| 257 if idxs.maybeAddDefinition(q, s, missingTerms, &ds.IndexDefiniti
on{ | 257 if idxs.maybeAddDefinition(q, s, missingTerms, &ds.IndexDefiniti
on{ |
| 258 Kind: q.kind, | 258 Kind: q.kind, |
| 259 SortBy: []ds.IndexColumn{ | 259 SortBy: []ds.IndexColumn{ |
| 260 » » » » {Property: prop, Direction: ds.DESCENDING}, | 260 » » » » {Property: prop, Descending: true}, |
| 261 }, | 261 }, |
| 262 }) { | 262 }) { |
| 263 return idxs, nil | 263 return idxs, nil |
| 264 } | 264 } |
| 265 } | 265 } |
| 266 | 266 |
| 267 // Try adding all compound indexes whose suffix matches. | 267 // Try adding all compound indexes whose suffix matches. |
| 268 suffix := &ds.IndexDefinition{ | 268 suffix := &ds.IndexDefinition{ |
| 269 Kind: q.kind, | 269 Kind: q.kind, |
| 270 Ancestor: q.eqFilters["__ancestor__"] != nil, | 270 Ancestor: q.eqFilters["__ancestor__"] != nil, |
| (...skipping 13 matching lines...) Expand all Loading... |
| 284 } | 284 } |
| 285 terms := missingTerms.ToSlice() | 285 terms := missingTerms.ToSlice() |
| 286 if serializationDeterministic { | 286 if serializationDeterministic { |
| 287 sort.Strings(terms) | 287 sort.Strings(terms) |
| 288 } | 288 } |
| 289 for _, term := range terms { | 289 for _, term := range terms { |
| 290 remains.SortBy = append(remains.SortBy, ds.IndexColumn{P
roperty: term}) | 290 remains.SortBy = append(remains.SortBy, ds.IndexColumn{P
roperty: term}) |
| 291 } | 291 } |
| 292 remains.SortBy = append(remains.SortBy, q.suffixFormat...) | 292 remains.SortBy = append(remains.SortBy, q.suffixFormat...) |
| 293 last := remains.SortBy[len(remains.SortBy)-1] | 293 last := remains.SortBy[len(remains.SortBy)-1] |
| 294 » » if last.Direction == ds.ASCENDING { | 294 » » if !last.Descending { |
| 295 // this removes the __key__ column, since it's implicit. | 295 // this removes the __key__ column, since it's implicit. |
| 296 remains.SortBy = remains.SortBy[:len(remains.SortBy)-1] | 296 remains.SortBy = remains.SortBy[:len(remains.SortBy)-1] |
| 297 } | 297 } |
| 298 if remains.Builtin() { | 298 if remains.Builtin() { |
| 299 impossible( | 299 impossible( |
| 300 fmt.Errorf("recommended missing index would be a
builtin: %s", remains)) | 300 fmt.Errorf("recommended missing index would be a
builtin: %s", remains)) |
| 301 } | 301 } |
| 302 return nil, fmt.Errorf( | 302 return nil, fmt.Errorf( |
| 303 "Your indexes are insufficient! Try adding:\n %s", rema
ins) | 303 "Your indexes are insufficient! Try adding:\n %s", rema
ins) |
| 304 } | 304 } |
| 305 | 305 |
| 306 return idxs, nil | 306 return idxs, nil |
| 307 } | 307 } |
| 308 | 308 |
| 309 // generate generates a single iterDefinition for the given index. | 309 // generate generates a single iterDefinition for the given index. |
| 310 func generate(q *reducedQuery, idx *IndexDefinitionSortable, c *constraints) *it
erDefinition { | 310 func generate(q *reducedQuery, idx *indexDefinitionSortable, c *constraints) *it
erDefinition { |
| 311 def := &iterDefinition{ | 311 def := &iterDefinition{ |
| 312 c: idx.coll, | 312 c: idx.coll, |
| 313 start: q.start, | 313 start: q.start, |
| 314 end: q.end, | 314 end: q.end, |
| 315 } | 315 } |
| 316 toJoin := make([][]byte, len(idx.eqFilts)) | 316 toJoin := make([][]byte, len(idx.eqFilts)) |
| 317 for _, sb := range idx.eqFilts { | 317 for _, sb := range idx.eqFilts { |
| 318 val := c.peel(sb.Property) | 318 val := c.peel(sb.Property) |
| 319 » » if sb.Direction == ds.DESCENDING { | 319 » » if sb.Descending { |
| 320 val = invert(val) | 320 val = invert(val) |
| 321 } | 321 } |
| 322 toJoin = append(toJoin, val) | 322 toJoin = append(toJoin, val) |
| 323 } | 323 } |
| 324 def.prefix = bjoin(toJoin...) | 324 def.prefix = bjoin(toJoin...) |
| 325 def.prefixLen = len(def.prefix) | 325 def.prefixLen = len(def.prefix) |
| 326 | 326 |
| 327 if q.eqFilters["__ancestor__"] != nil && !idx.hasAncestor() { | 327 if q.eqFilters["__ancestor__"] != nil && !idx.hasAncestor() { |
| 328 // The query requires an ancestor, but the index doesn't explici
tly have it | 328 // The query requires an ancestor, but the index doesn't explici
tly have it |
| 329 // as part of the prefix (otherwise it would have been the first
eqFilt | 329 // as part of the prefix (otherwise it would have been the first
eqFilt |
| (...skipping 16 matching lines...) Expand all Loading... |
| 346 | 346 |
| 347 // Intentionally do NOT update prefixLen. This allows multiItera
tor to | 347 // Intentionally do NOT update prefixLen. This allows multiItera
tor to |
| 348 // correctly include the entire key in the shared iterator suffi
x, instead | 348 // correctly include the entire key in the shared iterator suffi
x, instead |
| 349 // of just the remainder. | 349 // of just the remainder. |
| 350 | 350 |
| 351 // chop the terminal null byte off the q.ancestor key... we can
accept | 351 // chop the terminal null byte off the q.ancestor key... we can
accept |
| 352 // anything which is a descendant or an exact match. Removing t
he last byte | 352 // anything which is a descendant or an exact match. Removing t
he last byte |
| 353 // from the key (the terminating null) allows this trick to work
. Otherwise | 353 // from the key (the terminating null) allows this trick to work
. Otherwise |
| 354 // it would be a closed range of EXACTLY this key. | 354 // it would be a closed range of EXACTLY this key. |
| 355 chopped := []byte(anc[:len(anc)-1]) | 355 chopped := []byte(anc[:len(anc)-1]) |
| 356 » » if q.suffixFormat[0].Direction == ds.DESCENDING { | 356 » » if q.suffixFormat[0].Descending { |
| 357 chopped = invert(chopped) | 357 chopped = invert(chopped) |
| 358 } | 358 } |
| 359 def.prefix = bjoin(def.prefix, chopped) | 359 def.prefix = bjoin(def.prefix, chopped) |
| 360 | 360 |
| 361 // Update start and end, since we know that if they contain anyt
hing, they | 361 // Update start and end, since we know that if they contain anyt
hing, they |
| 362 // contain values for the __key__ field. | 362 // contain values for the __key__ field. |
| 363 if def.start != nil { | 363 if def.start != nil { |
| 364 offset := 0 | 364 offset := 0 |
| 365 if len(q.suffixFormat) > 1 { | 365 if len(q.suffixFormat) > 1 { |
| 366 chunks, _ := parseSuffix(q.ns, q.suffixFormat, d
ef.start, 1) | 366 chunks, _ := parseSuffix(q.ns, q.suffixFormat, d
ef.start, 1) |
| (...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 451 continue | 451 continue |
| 452 } | 452 } |
| 453 ret.constraints[prop] = bvals | 453 ret.constraints[prop] = bvals |
| 454 } | 454 } |
| 455 return ret | 455 return ret |
| 456 } | 456 } |
| 457 | 457 |
| 458 // getIndexes returns a set of iterator definitions. Iterating over these | 458 // getIndexes returns a set of iterator definitions. Iterating over these |
| 459 // will result in matching suffixes. | 459 // will result in matching suffixes. |
| 460 func getIndexes(q *reducedQuery, s *memStore) ([]*iterDefinition, error) { | 460 func getIndexes(q *reducedQuery, s *memStore) ([]*iterDefinition, error) { |
| 461 » relevantIdxs := IndexDefinitionSortableSlice(nil) | 461 » relevantIdxs := indexDefinitionSortableSlice(nil) |
| 462 if q.kind == "" { | 462 if q.kind == "" { |
| 463 if coll := s.GetCollection("ents:" + q.ns); coll != nil { | 463 if coll := s.GetCollection("ents:" + q.ns); coll != nil { |
| 464 » » » relevantIdxs = IndexDefinitionSortableSlice{{coll: coll}
} | 464 » » » relevantIdxs = indexDefinitionSortableSlice{{coll: coll}
} |
| 465 } | 465 } |
| 466 } else { | 466 } else { |
| 467 err := error(nil) | 467 err := error(nil) |
| 468 relevantIdxs, err = getRelevantIndexes(q, s) | 468 relevantIdxs, err = getRelevantIndexes(q, s) |
| 469 if err != nil { | 469 if err != nil { |
| 470 return nil, err | 470 return nil, err |
| 471 } | 471 } |
| 472 } | 472 } |
| 473 if len(relevantIdxs) == 0 { | 473 if len(relevantIdxs) == 0 { |
| 474 » » return nil, errQueryDone | 474 » » return nil, ds.ErrNullQuery |
| 475 } | 475 } |
| 476 | 476 |
| 477 // This sorts it so that relevantIdxs goes less filters -> more filters.
We | 477 // This sorts it so that relevantIdxs goes less filters -> more filters.
We |
| 478 // traverse this list backwards, however, so we traverse it in more filt
ers -> | 478 // traverse this list backwards, however, so we traverse it in more filt
ers -> |
| 479 // less filters order. | 479 // less filters order. |
| 480 sort.Sort(relevantIdxs) | 480 sort.Sort(relevantIdxs) |
| 481 | 481 |
| 482 constraints := calculateConstraints(q) | 482 constraints := calculateConstraints(q) |
| 483 | 483 |
| 484 ret := []*iterDefinition{} | 484 ret := []*iterDefinition{} |
| 485 for !constraints.empty() || len(ret) == 0 { | 485 for !constraints.empty() || len(ret) == 0 { |
| 486 » » bestIdx := (*IndexDefinitionSortable)(nil) | 486 » » bestIdx := (*indexDefinitionSortable)(nil) |
| 487 if len(ret) == 0 { | 487 if len(ret) == 0 { |
| 488 // if ret is empty, take the biggest relevantIdx. It's g
uaranteed to have | 488 // if ret is empty, take the biggest relevantIdx. It's g
uaranteed to have |
| 489 // the greatest number of equality filters of any index
in the list, and | 489 // the greatest number of equality filters of any index
in the list, and |
| 490 // we know that every equality filter will be pulled fro
m constraints and | 490 // we know that every equality filter will be pulled fro
m constraints and |
| 491 // not residual. | 491 // not residual. |
| 492 // | 492 // |
| 493 // This also takes care of the case when the query has n
o equality filters, | 493 // This also takes care of the case when the query has n
o equality filters, |
| 494 // in which case relevantIdxs will actually only contain
one index anyway | 494 // in which case relevantIdxs will actually only contain
one index anyway |
| 495 // :) | 495 // :) |
| 496 bestIdx = &relevantIdxs[len(relevantIdxs)-1] | 496 bestIdx = &relevantIdxs[len(relevantIdxs)-1] |
| 497 if bestIdx.coll == nil { | 497 if bestIdx.coll == nil { |
| 498 » » » » return nil, errQueryDone | 498 » » » » return nil, ds.ErrNullQuery |
| 499 } | 499 } |
| 500 } else { | 500 } else { |
| 501 // If ret's not empty, then we need to find the best ind
ex we can. The | 501 // If ret's not empty, then we need to find the best ind
ex we can. The |
| 502 // best index will be the one with the most matching equ
ality columns. | 502 // best index will be the one with the most matching equ
ality columns. |
| 503 // Since relevantIdxs is sorted primarially by the numbe
r of equality | 503 // Since relevantIdxs is sorted primarially by the numbe
r of equality |
| 504 // columns, we walk down the list until the number of po
ssible columns is | 504 // columns, we walk down the list until the number of po
ssible columns is |
| 505 // worse than our best-so-far. | 505 // worse than our best-so-far. |
| 506 // | 506 // |
| 507 // Traversing the list backwards goes from more filters
-> less filters, | 507 // Traversing the list backwards goes from more filters
-> less filters, |
| 508 // but also allows us to remove items from the list as w
e iterate over it. | 508 // but also allows us to remove items from the list as w
e iterate over it. |
| (...skipping 22 matching lines...) Expand all Loading... |
| 531 if bestIdx == nil { | 531 if bestIdx == nil { |
| 532 // something is really wrong here... if relevantIdxs is
!nil, then we | 532 // something is really wrong here... if relevantIdxs is
!nil, then we |
| 533 // should always be able to make progress in this loop. | 533 // should always be able to make progress in this loop. |
| 534 impossible(fmt.Errorf("deadlock: cannot fulfil query?")) | 534 impossible(fmt.Errorf("deadlock: cannot fulfil query?")) |
| 535 } | 535 } |
| 536 ret = append(ret, generate(q, bestIdx, constraints)) | 536 ret = append(ret, generate(q, bestIdx, constraints)) |
| 537 } | 537 } |
| 538 | 538 |
| 539 return ret, nil | 539 return ret, nil |
| 540 } | 540 } |
| OLD | NEW |