Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(147)

Side by Side Diff: impl/memory/datastore_index_selection.go

Issue 1309803004: Add transaction buffer filter. (Closed) Base URL: https://github.com/luci/gae.git@add_query_support
Patch Set: make data flow clearer, implement Count Created 5 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2015 The Chromium Authors. All rights reserved. 1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 package memory 5 package memory
6 6
7 import ( 7 import (
8 "bytes" 8 "bytes"
9 "fmt" 9 "fmt"
10 "sort" 10 "sort"
(...skipping 18 matching lines...) Expand all
29 } 29 }
30 return fmt.Sprintf( 30 return fmt.Sprintf(
31 "Insufficient indexes. Consider adding:\n%s", yaml) 31 "Insufficient indexes. Consider adding:\n%s", yaml)
32 } 32 }
33 33
34 // reducedQuery contains only the pieces of the query necessary to iterate for 34 // reducedQuery contains only the pieces of the query necessary to iterate for
35 // results. 35 // results.
36 // deduplication is applied externally 36 // deduplication is applied externally
37 // projection / keysonly / entity retrieval is done externally 37 // projection / keysonly / entity retrieval is done externally
38 type reducedQuery struct { 38 type reducedQuery struct {
39 aid string
39 ns string 40 ns string
40 kind string 41 kind string
41 42
42 // eqFilters indicate the set of all prefix constraints which need to be 43 // eqFilters indicate the set of all prefix constraints which need to be
43 // fulfilled in the composite query. All of these will translate into pr efix 44 // fulfilled in the composite query. All of these will translate into pr efix
44 // bytes for SOME index. 45 // bytes for SOME index.
45 eqFilters map[string]stringset.Set 46 eqFilters map[string]stringset.Set
46 47
47 // suffixFormat is the PRECISE listing of the suffix columns that ALL in dexes 48 // suffixFormat is the PRECISE listing of the suffix columns that ALL in dexes
48 // in the multi query will have. 49 // in the multi query will have.
(...skipping 111 matching lines...) Expand 10 before | Expand all | Expand 10 after
160 for i, sb := range sortBy[numEqFilts:] { 161 for i, sb := range sortBy[numEqFilts:] {
161 if q.suffixFormat[i] != sb { 162 if q.suffixFormat[i] != sb {
162 return false 163 return false
163 } 164 }
164 } 165 }
165 166
166 if id.Builtin() && numEqFilts == 0 { 167 if id.Builtin() && numEqFilts == 0 {
167 if len(q.eqFilters) > 1 || (len(q.eqFilters) == 1 && q.eqFilters ["__ancestor__"] == nil) { 168 if len(q.eqFilters) > 1 || (len(q.eqFilters) == 1 && q.eqFilters ["__ancestor__"] == nil) {
168 return false 169 return false
169 } 170 }
171 if len(sortBy) > 1 && q.eqFilters["__ancestor__"] != nil {
172 return false
173 }
170 } 174 }
171 175
172 // Make sure the equalities section doesn't contain any properties we do n't 176 // Make sure the equalities section doesn't contain any properties we do n't
173 // want in our query. 177 // want in our query.
174 // 178 //
175 // numByProp && totalEqFilts will be used to see if this is a perfect ma tch 179 // numByProp && totalEqFilts will be used to see if this is a perfect ma tch
176 // later. 180 // later.
177 numByProp := make(map[string]int, len(q.eqFilters)) 181 numByProp := make(map[string]int, len(q.eqFilters))
178 totalEqFilts := 0 182 totalEqFilts := 0
179 183
(...skipping 187 matching lines...) Expand 10 before | Expand all | Expand 10 after
367 // anything which is a descendant or an exact match. Removing t he last byte 371 // anything which is a descendant or an exact match. Removing t he last byte
368 // from the key (the terminating null) allows this trick to work . Otherwise 372 // from the key (the terminating null) allows this trick to work . Otherwise
369 // it would be a closed range of EXACTLY this key. 373 // it would be a closed range of EXACTLY this key.
370 chopped := []byte(anc[:len(anc)-1]) 374 chopped := []byte(anc[:len(anc)-1])
371 if q.suffixFormat[0].Descending { 375 if q.suffixFormat[0].Descending {
372 chopped = serialize.Invert(chopped) 376 chopped = serialize.Invert(chopped)
373 } 377 }
374 def.prefix = serialize.Join(def.prefix, chopped) 378 def.prefix = serialize.Join(def.prefix, chopped)
375 379
376 // Update start and end, since we know that if they contain anyt hing, they 380 // Update start and end, since we know that if they contain anyt hing, they
377 » » // contain values for the __key__ field. 381 » » // contain values for the __key__ field. This is necessary becau se bytes
382 » » // are shifting from the suffix to the prefix, and start/end sho uld only
383 » » // contain suffix (variable) bytes.
378 if def.start != nil { 384 if def.start != nil {
379 » » » offset := 0 385 » » » if !bytes.HasPrefix(def.start, chopped) {
380 » » » if len(q.suffixFormat) > 1 {
381 » » » » chunks, _ := parseSuffix(q.ns, q.suffixFormat, d ef.start, 1)
382 » » » » offset = len(chunks[0])
383 » » » }
iannucci 2015/09/29 04:43:27 So it turned out that these were just wrong. These
384 » » » if !bytes.HasPrefix(def.start[offset:], chopped) {
385 // again, shouldn't happen, but if it does, we w ant to know about it. 386 // again, shouldn't happen, but if it does, we w ant to know about it.
386 impossible(fmt.Errorf( 387 impossible(fmt.Errorf(
387 "start suffix for implied ancestor doesn 't start with ancestor! start:%v ancestor:%v", 388 "start suffix for implied ancestor doesn 't start with ancestor! start:%v ancestor:%v",
388 def.start, chopped)) 389 def.start, chopped))
389 } 390 }
390 » » » def.start = def.start[:offset+len(chopped)] 391 » » » def.start = def.start[len(chopped):]
391 } 392 }
392 if def.end != nil { 393 if def.end != nil {
393 » » » offset := 0 394 » » » if !bytes.HasPrefix(def.end, chopped) {
394 » » » if len(q.suffixFormat) > 1 {
395 » » » » chunks, _ := parseSuffix(q.ns, q.suffixFormat, d ef.end, 1)
396 » » » » offset = len(chunks[0])
397 » » » }
398 » » » if !bytes.HasPrefix(def.end[offset:], chopped) {
399 impossible(fmt.Errorf( 395 impossible(fmt.Errorf(
400 "end suffix for implied ancestor doesn't start with ancestor! end:%v ancestor:%v", 396 "end suffix for implied ancestor doesn't start with ancestor! end:%v ancestor:%v",
401 def.end, chopped)) 397 def.end, chopped))
402 } 398 }
403 » » » def.end = def.end[:offset+len(chopped)] 399 » » » def.end = def.end[len(chopped):]
404 } 400 }
405 } 401 }
406 402
407 return def 403 return def
408 } 404 }
409 405
410 type constraints struct { 406 type constraints struct {
411 constraints map[string][][]byte 407 constraints map[string][][]byte
412 original map[string][][]byte 408 original map[string][][]byte
413 residualMapping map[string]int 409 residualMapping map[string]int
(...skipping 132 matching lines...) Expand 10 before | Expand all | Expand 10 after
546 if bestIdx == nil { 542 if bestIdx == nil {
547 // something is really wrong here... if relevantIdxs is !nil, then we 543 // something is really wrong here... if relevantIdxs is !nil, then we
548 // should always be able to make progress in this loop. 544 // should always be able to make progress in this loop.
549 impossible(fmt.Errorf("deadlock: cannot fulfil query?")) 545 impossible(fmt.Errorf("deadlock: cannot fulfil query?"))
550 } 546 }
551 ret = append(ret, generate(q, bestIdx, constraints)) 547 ret = append(ret, generate(q, bestIdx, constraints))
552 } 548 }
553 549
554 return ret, nil 550 return ret, nil
555 } 551 }
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698