Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(195)

Side by Side Diff: impl/memory/datastore_query.go

Issue 1355783002: Refactor keys and queries in datastore service and implementation. (Closed) Base URL: https://github.com/luci/gae.git@master
Patch Set: appease errcheck Created 5 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « impl/memory/datastore_index_test.go ('k') | impl/memory/datastore_query_execution.go » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2015 The Chromium Authors. All rights reserved. 1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 package memory 5 package memory
6 6
7 import ( 7 import (
8 "bytes" 8 "bytes"
9 "encoding/base64" 9 "encoding/base64"
10 "errors" 10 "errors"
11 "fmt" 11 "fmt"
12 "math"
13 "strings"
14 12
15 ds "github.com/luci/gae/service/datastore" 13 ds "github.com/luci/gae/service/datastore"
16 "github.com/luci/gae/service/datastore/serialize" 14 "github.com/luci/gae/service/datastore/serialize"
17 "github.com/luci/luci-go/common/cmpbin" 15 "github.com/luci/luci-go/common/cmpbin"
18 "github.com/luci/luci-go/common/stringset" 16 "github.com/luci/luci-go/common/stringset"
19 ) 17 )
20 18
21 // MaxQueryComponents was lifted from a hard-coded constant in dev_appserver. 19 // MaxQueryComponents was lifted from a hard-coded constant in dev_appserver.
22 // No idea if it's a real limit or just a convenience in the current dev 20 // No idea if it's a real limit or just a convenience in the current dev
23 // appserver implementation. 21 // appserver implementation.
24 const MaxQueryComponents = 100 22 const MaxQueryComponents = 100
25 23
26 var errQueryDone = errors.New("query is done") 24 // MaxIndexColumns is the maximum number of index columns we're willing to
27 25 // support.
28 type queryOp int 26 const MaxIndexColumns = 64
29
30 const (
31 » qInvalid queryOp = iota
32 » qEqual
33 » qLessThan
34 » qLessEq
35 » qGreaterEq
36 » qGreaterThan
37 )
38
39 var queryOpMap = map[string]queryOp{
40 » "=": qEqual,
41 » "<": qLessThan,
42 » "<=": qLessEq,
43 » ">=": qGreaterEq,
44 » ">": qGreaterThan,
45 }
46
47 type queryFilter struct {
48 » prop string
49 » op queryOp
50 » value interface{}
51 }
52
53 func parseFilter(f string) (prop string, op queryOp, err error) {
54 » toks := strings.SplitN(strings.TrimSpace(f), " ", 2)
55 » if len(toks) != 2 {
56 » » err = errors.New("datastore: invalid filter: " + f)
57 » } else {
58 » » op = queryOpMap[toks[1]]
59 » » if op == qInvalid {
60 » » » err = fmt.Errorf("datastore: invalid operator %q in filt er %q", toks[1], f)
61 » » } else {
62 » » » prop = toks[0]
63 » » }
64 » }
65 » return
66 }
67 27
68 // A queryCursor is: 28 // A queryCursor is:
69 // {#orders} ++ IndexColumn* ++ RawRowData 29 // {#orders} ++ IndexColumn* ++ RawRowData
70 // IndexColumn will always contain __key__ as the last column, and so #orders 30 // IndexColumn will always contain __key__ as the last column, and so #orders
71 // must always be >= 1 31 // must always be >= 1
72 type queryCursor []byte 32 type queryCursor []byte
73 33
74 func newCursor(s string) (ds.Cursor, error) { 34 func newCursor(s string) (ds.Cursor, error) {
75 d, err := base64.URLEncoding.DecodeString(s) 35 d, err := base64.URLEncoding.DecodeString(s)
76 if err != nil { 36 if err != nil {
(...skipping 10 matching lines...) Expand all
87 47
88 // decode returns the encoded IndexColumns, the raw row (cursor) data, or an 48 // decode returns the encoded IndexColumns, the raw row (cursor) data, or an
89 // error. 49 // error.
90 func (q queryCursor) decode() ([]ds.IndexColumn, []byte, error) { 50 func (q queryCursor) decode() ([]ds.IndexColumn, []byte, error) {
91 buf := bytes.NewBuffer([]byte(q)) 51 buf := bytes.NewBuffer([]byte(q))
92 count, _, err := cmpbin.ReadUint(buf) 52 count, _, err := cmpbin.ReadUint(buf)
93 if err != nil { 53 if err != nil {
94 return nil, nil, fmt.Errorf("invalid cursor: bad prefix number") 54 return nil, nil, fmt.Errorf("invalid cursor: bad prefix number")
95 } 55 }
96 56
97 » if count == 0 || count > ds.MaxIndexColumns { 57 » if count == 0 || count > MaxIndexColumns {
98 return nil, nil, fmt.Errorf("invalid cursor: bad column count %d ", count) 58 return nil, nil, fmt.Errorf("invalid cursor: bad column count %d ", count)
99 } 59 }
100 60
101 if count == 0 { 61 if count == 0 {
102 return nil, nil, fmt.Errorf("invalid cursor: zero prefix number" ) 62 return nil, nil, fmt.Errorf("invalid cursor: zero prefix number" )
103 } 63 }
104 64
105 cols := make([]ds.IndexColumn, count) 65 cols := make([]ds.IndexColumn, count)
106 for i := range cols { 66 for i := range cols {
107 if cols[i], err = serialize.ReadIndexColumn(buf); err != nil { 67 if cols[i], err = serialize.ReadIndexColumn(buf); err != nil {
108 return nil, nil, fmt.Errorf("invalid cursor: unable to d ecode IndexColumn %d: %s", i, err) 68 return nil, nil, fmt.Errorf("invalid cursor: unable to d ecode IndexColumn %d: %s", i, err)
109 } 69 }
110 } 70 }
111 71
112 if cols[len(cols)-1].Property != "__key__" { 72 if cols[len(cols)-1].Property != "__key__" {
113 return nil, nil, fmt.Errorf("invalid cursor: last column was not __key__: %v", cols[len(cols)-1]) 73 return nil, nil, fmt.Errorf("invalid cursor: last column was not __key__: %v", cols[len(cols)-1])
114 } 74 }
115 75
116 return cols, buf.Bytes(), nil 76 return cols, buf.Bytes(), nil
117 } 77 }
118 78
119 type queryIneqFilter struct {
120 prop string
121
122 start []byte
123 end []byte
124 }
125
126 // constrain 'folds' a new inequality into the current inequality filter.
127 //
128 // It will bump the end bound down, or the start bound up, assuming the incoming
129 // constraint does so.
130 //
131 // It returns true iff the filter is overconstrained (i.e. start > end)
132 func (q *queryIneqFilter) constrain(op queryOp, val []byte) bool {
133 switch op {
134 case qLessEq:
135 val = increment(val)
136 fallthrough
137 case qLessThan:
138 // adjust upper bound downwards
139 if q.end == nil || bytes.Compare(q.end, val) > 0 {
140 q.end = val
141 }
142
143 case qGreaterThan:
144 val = increment(val)
145 fallthrough
146 case qGreaterEq:
147 // adjust lower bound upwards
148 if q.start == nil || bytes.Compare(q.start, val) < 0 {
149 q.start = val
150 }
151
152 default:
153 impossible(fmt.Errorf("constrain cannot handle filter op %d", op ))
154 }
155
156 if q.start != nil && q.end != nil {
157 return bytes.Compare(q.start, q.end) >= 0
158 }
159 return false
160 }
161
162 type queryImpl struct {
163 ns string
164
165 kind string
166
167 // prop -> encoded values (which are ds.Property objects)
168 // "__ancestor__" is the key for Ancestor queries.
169 eqFilters map[string]stringset.Set
170 ineqFilter queryIneqFilter
171 order []ds.IndexColumn
172 startCursor []byte
173 startCursorColumns []ds.IndexColumn
174 endCursor []byte
175 endCursorColumns []ds.IndexColumn
176
177 // All of these are applied in post (e.g. not during the native index sc an).
178 distinct bool
179 eventualConsistency bool
180 keysOnly bool
181 limitSet bool
182 limit int32
183 offset int32
184 project []string
185
186 err error
187 }
188
189 var _ ds.Query = (*queryImpl)(nil)
190
191 func sortOrdersEqual(as, bs []ds.IndexColumn) bool { 79 func sortOrdersEqual(as, bs []ds.IndexColumn) bool {
192 if len(as) != len(bs) { 80 if len(as) != len(bs) {
193 return false 81 return false
194 } 82 }
195 for i, a := range as { 83 for i, a := range as {
196 if a != bs[i] { 84 if a != bs[i] {
197 return false 85 return false
198 } 86 }
199 } 87 }
200 return true 88 return true
201 } 89 }
202 90
203 func (q *queryImpl) reduce(ns string, isTxn bool) (*reducedQuery, error) { 91 func numComponents(fq *ds.FinalizedQuery) int {
204 » if q.err != nil { 92 » numComponents := len(fq.Orders())
205 » » return nil, q.err 93 » if p, _, _ := fq.IneqFilterLow(); p != "" {
94 » » numComponents++
206 } 95 }
207 » if ns != q.ns { 96 » if p, _, _ := fq.IneqFilterHigh(); p != "" {
208 » » return nil, errors.New( 97 » » numComponents++
209 » » » "gae/memory: Namespace mismatched. Query and Datastore d on't agree " +
210 » » » » "on the current namespace")
211 } 98 }
212 » if isTxn && q.eqFilters["__ancestor__"] == nil { 99 » for _, v := range fq.EqFilters() {
213 » » return nil, errors.New( 100 » » numComponents += v.Len()
214 » » » "gae/memory: Only ancestor queries are allowed inside tr ansactions")
215 } 101 }
216 » if q.numComponents() > MaxQueryComponents { 102 » return numComponents
103 }
104
105 func reduce(fq *ds.FinalizedQuery, ns string, isTxn bool) (*reducedQuery, error) {
106 » if err := fq.Valid(globalAppID, ns); err != nil {
107 » » return nil, err
108 » }
109 » if isTxn && fq.Ancestor() == nil {
110 » » return nil, fmt.Errorf("queries within a transaction must includ e an Ancestor filter")
111 » }
112 » if num := numComponents(fq); num > MaxQueryComponents {
217 return nil, fmt.Errorf( 113 return nil, fmt.Errorf(
218 "gae/memory: query is too large. may not have more than "+ 114 "gae/memory: query is too large. may not have more than "+
219 "%d filters + sort orders + ancestor total: had %d", 115 "%d filters + sort orders + ancestor total: had %d",
220 » » » MaxQueryComponents, q.numComponents()) 116 » » » MaxQueryComponents, num)
221 } 117 }
222 » if len(q.project) == 0 && q.distinct { 118
223 » » // This must be delayed, because q.Distinct().Project("foo") is a valid 119 » ret := &reducedQuery{
224 » » // construction. If we checked this in Distinct, it could be too early, and 120 » » ns: ns,
225 » » // checking it in Project doesn't matter. 121 » » kind: fq.Kind(),
226 » » return nil, errors.New( 122 » » suffixFormat: fq.Orders(),
227 » » » "gae/memory: Distinct() only makes sense on projection q ueries.")
228 } 123 }
229 » if q.eqFilters["__ancestor__"] != nil && q.ineqFilter.prop == "__key__" { 124
230 » » ancS, _ := q.eqFilters["__ancestor__"].Peek() 125 » eqFilts := fq.EqFilters()
231 » » anc := []byte(ancS[:len(ancS)-1]) 126 » ret.eqFilters = make(map[string]stringset.Set, len(eqFilts))
232 » » if q.ineqFilter.start != nil && !bytes.HasPrefix(q.ineqFilter.st art, anc) { 127 » for prop, vals := range eqFilts {
233 » » » return nil, errors.New( 128 » » sVals := stringset.New(len(vals))
234 » » » » "gae/memory: __key__ inequality filter has a val ue outside of Ancestor()") 129 » » for _, v := range vals {
130 » » » sVals.Add(string(serialize.ToBytes(v)))
235 } 131 }
236 » » if q.ineqFilter.end != nil && !bytes.HasPrefix(q.ineqFilter.end, anc) { 132 » » ret.eqFilters[prop] = sVals
237 » » » return nil, errors.New( 133 » }
238 » » » » "gae/memory: __key__ inequality filter has a val ue outside of Ancestor()") 134
135 » // Pick up the start/end range from the inequalities, if any.
136 » //
137 » // start and end in the reducedQuery are normalized so that `start >=
138 » // X < end`. Because of that, we need to tweak the inequality filters
139 » // contained in the query if they use the > or <= operators.
140 » startD := []byte(nil)
141 » endD := []byte(nil)
142 » if ineqProp := fq.IneqFilterProp(); ineqProp != "" {
143 » » _, startOp, startV := fq.IneqFilterLow()
144 » » if startOp != "" {
145 » » » startD = serialize.ToBytes(startV)
146 » » » if startOp == ">" {
147 » » » » startD = increment(startD)
148 » » » }
149 » » }
150
151 » » _, endOp, endV := fq.IneqFilterHigh()
152 » » if endOp != "" {
153 » » » endD = serialize.ToBytes(endV)
154 » » » if endOp == "<=" {
155 » » » » endD = increment(endD)
156 » » » }
157 » » }
158
159 » » // The inequality is specified in natural (ascending) order in t he query's
160 » » // Filter syntax, but the order information may indicate to use a descending
161 » » // index column for it. If that's the case, then we must invert, swap and
162 » » // increment the inequality endpoints.
163 » » //
164 » » // Invert so that the desired numbers are represented correctly in the index.
165 » » // Swap so that our iterators still go from >= start to < end.
166 » » // Increment so that >= and < get correctly bounded (since the i terator is
167 » » // still using natrual bytes ordering)
168 » » if ret.suffixFormat[0].Descending {
169 » » » hi, lo := []byte(nil), []byte(nil)
170 » » » if len(startD) > 0 {
171 » » » » lo = increment(invert(startD))
172 » » » }
173 » » » if len(endD) > 0 {
174 » » » » hi = increment(invert(endD))
175 » » » }
176 » » » endD, startD = lo, hi
239 } 177 }
240 } 178 }
241 179
242 ret := &reducedQuery{
243 ns: q.ns,
244 kind: q.kind,
245 eqFilters: q.eqFilters,
246 suffixFormat: q.order,
247 }
248
249 // if len(q.suffixFormat) > 0, queryImpl already enforces that the first order
250 // is the same as the inequality. Otherwise we need to add it.
251 if len(ret.suffixFormat) == 0 && q.ineqFilter.prop != "" {
252 ret.suffixFormat = []ds.IndexColumn{{Property: q.ineqFilter.prop }}
253 }
254
255 // The inequality is specified in natural (ascending) order in the query 's
256 // Filter syntax, but the order information may indicate to use a descen ding
257 // index column for it. If that's the case, then we must invert, swap an d
258 // increment the inequality endpoints.
259 //
260 // Invert so that the desired numbers are represented correctly in the i ndex.
261 // Swap so that our iterators still go from >= start to < end.
262 // Increment so that >= and < get correctly bounded (since the iterator is
263 // still using natrual bytes ordering)
264 if q.ineqFilter.prop != "" && ret.suffixFormat[0].Direction == ds.DESCEN DING {
265 hi, lo := []byte(nil), []byte(nil)
266 if len(q.ineqFilter.end) > 0 {
267 hi = increment(invert(q.ineqFilter.end))
268 }
269 if len(q.ineqFilter.start) > 0 {
270 lo = increment(invert(q.ineqFilter.start))
271 }
272 q.ineqFilter.end, q.ineqFilter.start = lo, hi
273 }
274
275 // Add any projection columns not mentioned in the user-defined order as
276 // ASCENDING orders. Technically we could be smart and automatically use
277 // a DESCENDING ordered index, if it fit, but the logic gets insane, sin ce all
278 // suffixes of all used indexes need to be PRECISELY equal (and so you'd have
279 // to hunt/invalidate/something to find the combination of indexes that are
280 // compatible with each other as well as the query). If you want to use
281 // a DESCENDING column, just add it to the user sort order, and this loo p will
282 // not synthesize a new suffix entry for it.
283 //
284 // NOTE: if you want to use an index that sorts by -__key__, you MUST
285 // include all of the projected fields for that index in the order expli citly.
286 // Otherwise the generated suffixFormat will be wacky. So:
287 // Query("Foo").Project("A", "B").Order("A").Order("-__key__")
288 //
289 // will turn into a suffixFormat of:
290 // A, ASCENDING
291 // __key__, DESCENDING
292 // B, ASCENDING
293 // __key__, ASCENDING
294 //
295 // To prevent this, your query should have another Order("B") clause bef ore
296 // the -__key__ clause.
297 originalStop := len(ret.suffixFormat)
298 for _, p := range q.project {
299 needAdd := true
300 // originalStop prevents this loop from getting longer every tim e we add
301 // a projected property.
302 for _, col := range ret.suffixFormat[:originalStop] {
303 if col.Property == p {
304 needAdd = false
305 break
306 }
307 }
308 if needAdd {
309 ret.suffixFormat = append(ret.suffixFormat, ds.IndexColu mn{Property: p})
310 }
311 }
312
313 // If the suffix format ends with __key__ already (e.g. .Order("__key__" )),
314 // then we're good to go. Otherwise we need to add it as the last bit of the
315 // suffix, since all indexes implicitly have it as the last column.
316 if len(ret.suffixFormat) == 0 || ret.suffixFormat[len(ret.suffixFormat)- 1].Property != "__key__" {
317 ret.suffixFormat = append(ret.suffixFormat, ds.IndexColumn{Prope rty: "__key__"})
318 }
319
320 // Now we check the start and end cursors. 180 // Now we check the start and end cursors.
321 // 181 //
322 // Cursors are composed of a list of IndexColumns at the beginning, foll owed 182 // Cursors are composed of a list of IndexColumns at the beginning, foll owed
323 // by the raw bytes to use for the suffix. The cursor is only valid if a ll of 183 // by the raw bytes to use for the suffix. The cursor is only valid if a ll of
324 // its IndexColumns match our proposed suffixFormat, as calculated above . 184 // its IndexColumns match our proposed suffixFormat, as calculated above .
325 » ret.start = q.ineqFilter.start 185 » //
326 » if q.startCursor != nil { 186 » // Cursors are mutually exclusive with the start/end we picked up from t he
327 » » if !sortOrdersEqual(q.startCursorColumns, ret.suffixFormat) { 187 » // inequality. In a well formed query, they indicate a subset of results
328 » » » return nil, errors.New("gae/memory: start cursor is inva lid for this query.") 188 » // bounded by the inequality. Technically if the start cursor is not >= the
189 » // low bound, or the end cursor is < the high bound, it's an error, but for
190 » // simplicity we just cap to the narrowest intersection of the inequalit y and
191 » // cursors.
192 » ret.start = startD
193 » ret.end = endD
194 » if start, end := fq.Bounds(); start != nil || end != nil {
195 » » if start != nil {
196 » » » if c, ok := start.(queryCursor); ok {
197 » » » » startCols, startD, err := c.decode()
198 » » » » if err != nil {
199 » » » » » return nil, err
200 » » » » }
201
202 » » » » if !sortOrdersEqual(startCols, ret.suffixFormat) {
203 » » » » » return nil, errors.New("gae/memory: star t cursor is invalid for this query")
204 » » » » }
205 » » » » if ret.start == nil || bytes.Compare(ret.start, startD) < 0 {
206 » » » » » ret.start = startD
207 » » » » }
208 » » » } else {
209 » » » » return nil, errors.New("gae/memory: bad cursor t ype")
210 » » » }
329 } 211 }
330 if ret.start == nil || bytes.Compare(ret.start, q.startCursor) < 0 {
331 ret.start = q.startCursor
332 }
333 }
334 212
335 » ret.end = q.ineqFilter.end 213 » » if end != nil {
336 » if q.endCursor != nil { 214 » » » if c, ok := end.(queryCursor); ok {
337 » » if !sortOrdersEqual(q.endCursorColumns, ret.suffixFormat) { 215 » » » » endCols, endD, err := c.decode()
338 » » » return nil, errors.New("gae/memory: end cursor is invali d for this query.") 216 » » » » if err != nil {
339 » » } 217 » » » » » return nil, err
340 » » if ret.end == nil || bytes.Compare(q.endCursor, ret.end) < 0 { 218 » » » » }
341 » » » ret.end = q.endCursor 219
220 » » » » if !sortOrdersEqual(endCols, ret.suffixFormat) {
221 » » » » » return nil, errors.New("gae/memory: end cursor is invalid for this query")
222 » » » » }
223 » » » » if ret.end == nil || bytes.Compare(endD, ret.end ) < 0 {
224 » » » » » ret.end = endD
225 » » » » }
226 » » » } else {
227 » » » » return nil, errors.New("gae/memory: bad cursor t ype")
228 » » » }
342 } 229 }
343 } 230 }
344 231
345 // Finally, verify that we could even /potentially/ do work. If we have 232 // Finally, verify that we could even /potentially/ do work. If we have
346 // overlapping range ends, then we don't have anything to do. 233 // overlapping range ends, then we don't have anything to do.
347 if ret.end != nil && bytes.Compare(ret.start, ret.end) >= 0 { 234 if ret.end != nil && bytes.Compare(ret.start, ret.end) >= 0 {
348 » » return nil, errQueryDone 235 » » return nil, ds.ErrNullQuery
349 } 236 }
350 237
351 ret.numCols = len(ret.suffixFormat) 238 ret.numCols = len(ret.suffixFormat)
352 for prop, vals := range ret.eqFilters { 239 for prop, vals := range ret.eqFilters {
353 if len(ret.suffixFormat) == 1 && prop == "__ancestor__" { 240 if len(ret.suffixFormat) == 1 && prop == "__ancestor__" {
354 continue 241 continue
355 } 242 }
356 ret.numCols += vals.Len() 243 ret.numCols += vals.Len()
357 } 244 }
358 245
359 return ret, nil 246 return ret, nil
360 } 247 }
361
362 func (q *queryImpl) numComponents() int {
363 numComponents := len(q.order)
364 if q.ineqFilter.prop != "" {
365 if q.ineqFilter.start != nil {
366 numComponents++
367 }
368 if q.ineqFilter.end != nil {
369 numComponents++
370 }
371 }
372 for _, v := range q.eqFilters {
373 numComponents += v.Len()
374 }
375 return numComponents
376 }
377
378 // checkMutateClone sees if the query has an error. If not, it clones the query,
379 // and assigns the output of `check` to the query error slot. If check returns
380 // nil, it calls `mutate` on the cloned query. The (possibly new) query is then
381 // returned.
382 func (q *queryImpl) checkMutateClone(check func() error, mutate func(*queryImpl) ) *queryImpl {
383 if q.err != nil {
384 return q
385 }
386 nq := *q
387 nq.eqFilters = make(map[string]stringset.Set, len(q.eqFilters))
388 for prop, vals := range q.eqFilters {
389 nq.eqFilters[prop] = vals.Dup()
390 }
391 nq.order = make([]ds.IndexColumn, len(q.order))
392 copy(nq.order, q.order)
393 nq.project = make([]string, len(q.project))
394 copy(nq.project, q.project)
395 if check != nil {
396 nq.err = check()
397 }
398 if nq.err == nil {
399 mutate(&nq)
400 }
401 return &nq
402 }
403
404 func (q *queryImpl) Ancestor(k ds.Key) ds.Query {
405 return q.checkMutateClone(
406 func() error {
407 if k == nil {
408 // SDK has an explicit nil-check
409 return errors.New("datastore: nil query ancestor ")
410 }
411 if k.Namespace() != q.ns {
412 return fmt.Errorf("bad namespace: %q (expected % q)", k.Namespace(), q.ns)
413 }
414 if !k.Valid(false, globalAppID, q.ns) {
415 // technically the SDK implementation does a Wei rd Thing (tm) if both the
416 // stringID and intID are set on a key; it only serializes the stringID in
417 // the proto. This means that if you set the Anc estor to an invalid key,
418 // you'll never actually hear about it. Instead of doing that insanity, we
419 // just swap to an error here.
420 return ds.ErrInvalidKey
421 }
422 if q.eqFilters["__ancestor__"] != nil {
423 return errors.New("cannot have more than one anc estor")
424 }
425 return nil
426 },
427 func(q *queryImpl) {
428 q.addEqFilt("__ancestor__", ds.MkProperty(k))
429 })
430 }
431
432 func (q *queryImpl) Distinct() ds.Query {
433 return q.checkMutateClone(nil, func(q *queryImpl) {
434 q.distinct = true
435 })
436 }
437
438 func (q *queryImpl) addEqFilt(prop string, p ds.Property) {
439 binVal := string(serialize.ToBytes(p))
440 if cur, ok := q.eqFilters[prop]; !ok {
441 s := stringset.New(1)
442 s.Add(binVal)
443 q.eqFilters[prop] = s
444 } else {
445 cur.Add(binVal)
446 }
447 }
448
449 func (q *queryImpl) Filter(fStr string, val interface{}) ds.Query {
450 prop := ""
451 op := qInvalid
452 p := ds.Property{}
453 return q.checkMutateClone(
454 func() error {
455 var err error
456 prop, op, err = parseFilter(fStr)
457 if err != nil {
458 return err
459 }
460
461 if q.kind == "" && prop != "__key__" {
462 // https://cloud.google.com/appengine/docs/go/da tastore/queries#Go_Kindless_queries
463 return fmt.Errorf(
464 "kindless queries can only filter on __k ey__, got %q", fStr)
465 }
466
467 err = p.SetValue(val, ds.ShouldIndex)
468 if err != nil {
469 return err
470 }
471
472 if p.Type() == ds.PTKey {
473 if !p.Value().(ds.Key).Valid(false, globalAppID, q.ns) {
474 return ds.ErrInvalidKey
475 }
476 }
477
478 if prop == "__key__" {
479 if op == qEqual {
480 return fmt.Errorf(
481 "query equality filter on __key_ _ is silly: %q", fStr)
482 }
483 if p.Type() != ds.PTKey {
484 return fmt.Errorf("__key__ filter value is not a key: %T", val)
485 }
486 } else if strings.HasPrefix(prop, "__") && strings.HasSu ffix(prop, "__") {
487 return fmt.Errorf("filter on reserved property: %q", prop)
488 }
489
490 if op != qEqual {
491 if q.ineqFilter.prop != "" && q.ineqFilter.prop != prop {
492 return fmt.Errorf(
493 "inequality filters on multiple properties: %q and %q",
494 q.ineqFilter.prop, prop)
495 }
496 if len(q.order) > 0 && q.order[0].Property != pr op {
497 return fmt.Errorf(
498 "first sort order must match ine quality filter: %q v %q",
499 q.order[0].Property, prop)
500 }
501 } else {
502 for _, p := range q.project {
503 if p == prop {
504 return fmt.Errorf(
505 "cannot project on field which is used in an equality filter: %q",
506 prop)
507 }
508 }
509 }
510 return err
511 },
512 func(q *queryImpl) {
513 if op == qEqual {
514 // add it to eq filters
515 q.addEqFilt(prop, p)
516
517 // remove it from sort orders.
518 // https://cloud.google.com/appengine/docs/go/da tastore/queries#sort_orders_are_ignored_on_properties_with_equality_filters
519 toRm := -1
520 for i, o := range q.order {
521 if o.Property == prop {
522 toRm = i
523 break
524 }
525 }
526 if toRm >= 0 {
527 q.order = append(q.order[:toRm], q.order [toRm+1:]...)
528 }
529 } else {
530 q.ineqFilter.prop = prop
531 if q.ineqFilter.constrain(op, serialize.ToBytes( p)) {
532 q.err = errQueryDone
533 }
534 }
535 })
536 }
537
538 func (q *queryImpl) Order(prop string) ds.Query {
539 col := ds.IndexColumn{}
540 return q.checkMutateClone(
541 func() error {
542 // check that first order == first inequality.
543 // if order is an equality already, ignore it
544 col.Property = strings.TrimSpace(prop)
545 if strings.HasPrefix(prop, "-") {
546 col.Direction = ds.DESCENDING
547 col.Property = strings.TrimSpace(prop[1:])
548 } else if strings.HasPrefix(prop, "+") {
549 return fmt.Errorf("datastore: invalid order: %q" , prop)
550 }
551 if len(col.Property) == 0 {
552 return errors.New("datastore: empty order")
553 }
554 if len(q.order) == 0 && q.ineqFilter.prop != "" && q.ine qFilter.prop != col.Property {
555 return fmt.Errorf(
556 "first sort order must match inequality filter: %q v %q",
557 prop, q.ineqFilter.prop)
558 }
559 if q.kind == "" && (col.Property != "__key__" || col.Dir ection != ds.ASCENDING) {
560 return fmt.Errorf("invalid order for kindless qu ery: %#v", col)
561 }
562 return nil
563 },
564 func(q *queryImpl) {
565 if _, ok := q.eqFilters[col.Property]; ok {
566 // skip it if it's an equality filter
567 // https://cloud.google.com/appengine/docs/go/da tastore/queries#sort_orders_are_ignored_on_properties_with_equality_filters
568 return
569 }
570 for _, order := range q.order {
571 if order.Property == col.Property {
572 // can't sort by the same order twice
573 return
574 }
575 }
576 q.order = append(q.order, col)
577 })
578 }
579
580 func (q *queryImpl) Project(fieldName ...string) ds.Query {
581 return q.checkMutateClone(
582 func() error {
583 if q.keysOnly {
584 return errors.New("cannot project a keysOnly que ry")
585 }
586 dupCheck := stringset.New(len(fieldName) + len(q.project ))
587 for _, f := range fieldName {
588 if !dupCheck.Add(f) {
589 return fmt.Errorf("cannot project on the same field twice: %q", f)
590 }
591 if f == "" {
592 return errors.New("cannot project on an empty field name")
593 }
594 if f == "__key__" {
595 return fmt.Errorf("cannot project on __k ey__")
596 }
597 if _, ok := q.eqFilters[f]; ok {
598 return fmt.Errorf(
599 "cannot project on field which i s used in an equality filter: %q", f)
600 }
601 for _, p := range q.project {
602 if p == f {
603 return fmt.Errorf("cannot projec t on the same field twice: %q", f)
604 }
605 }
606 }
607 return nil
608 },
609 func(q *queryImpl) {
610 q.project = append(q.project, fieldName...)
611 })
612 }
613
614 func (q *queryImpl) KeysOnly() ds.Query {
615 return q.checkMutateClone(
616 func() error {
617 if len(q.project) != 0 {
618 return errors.New("cannot project a keysOnly que ry")
619 }
620 return nil
621 },
622 func(q *queryImpl) {
623 q.keysOnly = true
624 })
625 }
626
627 func (q *queryImpl) Limit(limit int) ds.Query {
628 return q.checkMutateClone(
629 func() error {
630 // nonsensically... ANY negative value means 'unlimited' . *shakes head*
631 if limit < math.MinInt32 || limit > math.MaxInt32 {
632 return errors.New("datastore: query limit overfl ow")
633 }
634 return nil
635 },
636 func(q *queryImpl) {
637 q.limitSet = true
638 q.limit = int32(limit)
639 })
640 }
641
642 func (q *queryImpl) Offset(offset int) ds.Query {
643 return q.checkMutateClone(
644 func() error {
645 if offset < 0 {
646 return errors.New("datastore: negative query off set")
647 }
648 if offset > math.MaxInt32 {
649 return errors.New("datastore: query offset overf low")
650 }
651 return nil
652 },
653 func(q *queryImpl) {
654 q.offset = int32(offset)
655 })
656 }
657
658 func queryCursorCheck(ns, flavor string, current []byte, newCursor ds.Cursor) ([ ]ds.IndexColumn, []byte, error) {
659 if current != nil {
660 return nil, nil, fmt.Errorf("%s cursor is multiply defined", fla vor)
661 }
662 curs, ok := newCursor.(queryCursor)
663 if !ok {
664 return nil, nil, fmt.Errorf("%s cursor is unknown type: %T", fla vor, curs)
665 }
666 return curs.decode()
667 }
668
669 func (q *queryImpl) Start(c ds.Cursor) ds.Query {
670 cols := []ds.IndexColumn(nil)
671 curs := []byte(nil)
672 return q.checkMutateClone(
673 func() (err error) {
674 cols, curs, err = queryCursorCheck(q.ns, "start", q.star tCursor, c)
675 return
676 },
677 func(q *queryImpl) {
678 q.startCursorColumns = cols
679 q.startCursor = curs
680 })
681 }
682
683 func (q *queryImpl) End(c ds.Cursor) ds.Query {
684 cols := []ds.IndexColumn(nil)
685 curs := queryCursor(nil)
686 return q.checkMutateClone(
687 func() (err error) {
688 cols, curs, err = queryCursorCheck(q.ns, "end", q.endCur sor, c)
689 return
690 },
691 func(q *queryImpl) {
692 q.endCursorColumns = cols
693 q.endCursor = curs
694 })
695 }
696
697 func (q *queryImpl) EventualConsistency() ds.Query {
698 return q.checkMutateClone(
699 nil, func(q *queryImpl) {
700 q.eventualConsistency = true
701 })
702 }
OLDNEW
« no previous file with comments | « impl/memory/datastore_index_test.go ('k') | impl/memory/datastore_query_execution.go » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698