Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(124)

Side by Side Diff: impl/memory/datastore_query.go

Issue 1302813003: impl/memory: Implement Queries (Closed) Base URL: https://github.com/luci/gae.git@add_multi_iterator
Patch Set: stringSet everywhere Created 5 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « impl/memory/datastore_index_test.go ('k') | impl/memory/datastore_query_execution.go » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2015 The Chromium Authors. All rights reserved. 1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 package memory 5 package memory
6 6
7 import ( 7 import (
8 "bytes"
8 "encoding/base64" 9 "encoding/base64"
9 "errors" 10 "errors"
10 "fmt" 11 "fmt"
11 "math" 12 "math"
12 "strings" 13 "strings"
13 14
14 ds "github.com/luci/gae/service/datastore" 15 ds "github.com/luci/gae/service/datastore"
15 "github.com/luci/gae/service/datastore/serialize" 16 "github.com/luci/gae/service/datastore/serialize"
17 "github.com/luci/luci-go/common/cmpbin"
16 ) 18 )
17 19
18 // MaxQueryComponents was lifted from a hard-coded constant in dev_appserver. 20 // MaxQueryComponents was lifted from a hard-coded constant in dev_appserver.
19 // No idea if it's a real limit or just a convenience in the current dev 21 // No idea if it's a real limit or just a convenience in the current dev
20 // appserver implementation. 22 // appserver implementation.
21 const MaxQueryComponents = 100 23 const MaxQueryComponents = 100
22 24
23 var errQueryDone = errors.New("query is done") 25 var errQueryDone = errors.New("query is done")
24 26
25 type queryOp int 27 type queryOp int
(...skipping 29 matching lines...) Expand all
55 op = queryOpMap[toks[1]] 57 op = queryOpMap[toks[1]]
56 if op == qInvalid { 58 if op == qInvalid {
57 err = fmt.Errorf("datastore: invalid operator %q in filt er %q", toks[1], f) 59 err = fmt.Errorf("datastore: invalid operator %q in filt er %q", toks[1], f)
58 } else { 60 } else {
59 prop = toks[0] 61 prop = toks[0]
60 } 62 }
61 } 63 }
62 return 64 return
63 } 65 }
64 66
65 type queryCursor string 67 // A queryCursor is:
68 // {#orders} ++ IndexColumn* ++ RawRowData
69 // IndexColumn will always contain __key__ as the last column, and so #orders
70 // must always be >= 1
71 type queryCursor []byte
72
73 func newCursor(s string) (ds.Cursor, error) {
74 » d, err := base64.URLEncoding.DecodeString(s)
75 » if err != nil {
76 » » return nil, fmt.Errorf("Failed to Base64-decode cursor: %s", err )
77 » }
78 » c := queryCursor(d)
79 » if _, _, err := c.decode(); err != nil {
80 » » return nil, err
81 » }
82 » return c, nil
83 }
66 84
67 func (q queryCursor) String() string { return base64.URLEncoding.EncodeToString( []byte(q)) } 85 func (q queryCursor) String() string { return base64.URLEncoding.EncodeToString( []byte(q)) }
68 func (q queryCursor) Valid() bool { return q != "" } 86
87 // decode returns the encoded IndexColumns, the raw row (cursor) data, or an
88 // error.
89 func (q queryCursor) decode() ([]ds.IndexColumn, []byte, error) {
90 » buf := bytes.NewBuffer([]byte(q))
91 » count, _, err := cmpbin.ReadUint(buf)
92 » if err != nil {
93 » » return nil, nil, fmt.Errorf("invalid cursor: bad prefix number")
94 » }
95
96 » if count == 0 || count > ds.MaxIndexColumns {
97 » » return nil, nil, fmt.Errorf("invalid cursor: bad column count %d ", count)
98 » }
99
100 » if count == 0 {
101 » » return nil, nil, fmt.Errorf("invalid cursor: zero prefix number" )
102 » }
103
104 » cols := make([]ds.IndexColumn, count)
105 » for i := range cols {
106 » » if cols[i], err = serialize.ReadIndexColumn(buf); err != nil {
107 » » » return nil, nil, fmt.Errorf("invalid cursor: unable to d ecode IndexColumn %d: %s", i, err)
108 » » }
109 » }
110
111 » if cols[len(cols)-1].Property != "__key__" {
112 » » return nil, nil, fmt.Errorf("invalid cursor: last column was not __key__: %v", cols[len(cols)-1])
113 » }
114
115 » return cols, buf.Bytes(), nil
116 }
69 117
70 type queryIneqFilter struct { 118 type queryIneqFilter struct {
71 prop string 119 prop string
72 120
73 » low *string 121 » start []byte
74 » high *string 122 » end []byte
75 }
76
77 func decodeCursor(s string) (ds.Cursor, error) {
78 » d, err := base64.URLEncoding.DecodeString(s)
79 » if err != nil {
80 » » return nil, fmt.Errorf("Failed to Base64-decode cursor: %s", err )
81 » }
82 » c := queryCursor(string(d))
83 » if !c.Valid() {
84 » » return nil, errors.New("Decoded cursor is not valid.")
85 » }
86 » return c, nil
87 }
88
89 func increment(bstr string, positive bool) string {
90 » lastIdx := len(bstr) - 1
91 » last := bstr[lastIdx]
92 » if positive {
93 » » if last == 0xFF {
94 » » » return bstr + "\x00"
95 » » }
96 » » return bstr[:lastIdx-1] + string(last+1)
97 » } else {
98 » » if last == 0 {
99 » » » return bstr[:lastIdx-1]
100 » » }
101 » » return bstr[:lastIdx-1] + string(last-1)
102 » }
103 } 123 }
104 124
105 // constrain 'folds' a new inequality into the current inequality filter. 125 // constrain 'folds' a new inequality into the current inequality filter.
106 // 126 //
107 // It will bump the high bound down, or the low bound up, assuming the incoming 127 // It will bump the end bound down, or the start bound up, assuming the incoming
108 // constraint does so. 128 // constraint does so.
109 // 129 //
110 // It returns true iff the filter is overconstrained (i.e. low > high) 130 // It returns true iff the filter is overconstrained (i.e. start > end)
111 func (q *queryIneqFilter) constrain(op queryOp, val string) bool { 131 func (q *queryIneqFilter) constrain(op queryOp, val []byte) bool {
112 switch op { 132 switch op {
133 case qLessEq:
134 val = increment(val)
135 fallthrough
113 case qLessThan: 136 case qLessThan:
114 val = increment(val, true)
115 fallthrough
116 case qLessEq:
117 // adjust upper bound downwards 137 // adjust upper bound downwards
118 » » if q.high == nil || *q.high > val { 138 » » if q.end == nil || bytes.Compare(q.end, val) > 0 {
119 » » » q.high = &val 139 » » » q.end = val
120 } 140 }
121 141
122 case qGreaterThan: 142 case qGreaterThan:
123 » » val = increment(val, false) 143 » » val = increment(val)
124 fallthrough 144 fallthrough
125 case qGreaterEq: 145 case qGreaterEq:
126 // adjust lower bound upwards 146 // adjust lower bound upwards
127 » » if q.low == nil || *q.low < val { 147 » » if q.start == nil || bytes.Compare(q.start, val) < 0 {
128 » » » q.low = &val 148 » » » q.start = val
129 } 149 }
130 150
131 default: 151 default:
132 » » panic(fmt.Errorf("constrain cannot handle filter op %d", op)) 152 » » impossible(fmt.Errorf("constrain cannot handle filter op %d", op ))
133 } 153 }
134 154
135 » if q.low != nil && q.high != nil { 155 » if q.start != nil && q.end != nil {
136 » » return *q.low > *q.high 156 » » return bytes.Compare(q.start, q.end) >= 0
137 } 157 }
138 return false 158 return false
139 } 159 }
140 160
141 type queryImpl struct { 161 type queryImpl struct {
142 ns string 162 ns string
143 163
144 » kind string 164 » kind string
145 » ancestor ds.Key 165
146 166 » // prop -> encoded values (which are ds.Property objects)
147 » // prop -> encoded values 167 » // "__ancestor__" is the key for Ancestor queries.
148 » eqFilters map[string]map[string]struct{} 168 » eqFilters map[string]stringSet
149 » ineqFilter queryIneqFilter 169 » ineqFilter queryIneqFilter
150 » order []ds.IndexColumn 170 » order []ds.IndexColumn
151 » project map[string]struct{} 171 » startCursor []byte
152 172 » startCursorColumns []ds.IndexColumn
173 » endCursor []byte
174 » endCursorColumns []ds.IndexColumn
175
176 » // All of these are applied in post (e.g. not during the native index sc an).
153 distinct bool 177 distinct bool
154 eventualConsistency bool 178 eventualConsistency bool
155 keysOnly bool 179 keysOnly bool
180 limitSet bool
156 limit int32 181 limit int32
157 offset int32 182 offset int32
158 183 » project []string
159 » start queryCursor
160 » end queryCursor
161 184
162 err error 185 err error
163 } 186 }
164 187
165 var _ ds.Query = (*queryImpl)(nil) 188 var _ ds.Query = (*queryImpl)(nil)
166 189
167 func (q *queryImpl) valid(ns string, isTxn bool) (done bool, err error) { 190 func sortOrdersEqual(as, bs []ds.IndexColumn) bool {
168 » if q.err == errQueryDone { 191 » if len(as) != len(bs) {
169 » » done = true 192 » » return false
170 » } else if q.err != nil { 193 » }
171 » » err = q.err 194 » for i, a := range as {
172 » } else if ns != q.ns { 195 » » if a != bs[i] {
173 » » err = errors.New( 196 » » » return false
197 » » }
198 » }
199 » return true
200 }
201
202 func (q *queryImpl) reduce(ns string, isTxn bool) (*reducedQuery, error) {
203 » if q.err != nil {
204 » » return nil, q.err
205 » }
206 » if ns != q.ns {
207 » » return nil, errors.New(
174 "gae/memory: Namespace mismatched. Query and Datastore d on't agree " + 208 "gae/memory: Namespace mismatched. Query and Datastore d on't agree " +
175 "on the current namespace") 209 "on the current namespace")
176 » } else if isTxn && q.ancestor == nil { 210 » }
177 » » err = errors.New( 211 » if isTxn && q.eqFilters["__ancestor__"] == nil {
212 » » return nil, errors.New(
178 "gae/memory: Only ancestor queries are allowed inside tr ansactions") 213 "gae/memory: Only ancestor queries are allowed inside tr ansactions")
179 » } else if q.numComponents() > MaxQueryComponents { 214 » }
180 » » err = fmt.Errorf( 215 » if q.numComponents() > MaxQueryComponents {
216 » » return nil, fmt.Errorf(
181 "gae/memory: query is too large. may not have more than "+ 217 "gae/memory: query is too large. may not have more than "+
182 "%d filters + sort orders + ancestor total: had %d", 218 "%d filters + sort orders + ancestor total: had %d",
183 MaxQueryComponents, q.numComponents()) 219 MaxQueryComponents, q.numComponents())
184 » } else if len(q.project) == 0 && q.distinct { 220 » }
221 » if len(q.project) == 0 && q.distinct {
185 // This must be delayed, because q.Distinct().Project("foo") is a valid 222 // This must be delayed, because q.Distinct().Project("foo") is a valid
186 // construction. If we checked this in Distinct, it could be too early, and 223 // construction. If we checked this in Distinct, it could be too early, and
187 // checking it in Project doesn't matter. 224 // checking it in Project doesn't matter.
188 » » err = errors.New( 225 » » return nil, errors.New(
189 "gae/memory: Distinct() only makes sense on projection q ueries.") 226 "gae/memory: Distinct() only makes sense on projection q ueries.")
190 } 227 }
191 » return 228 » if q.eqFilters["__ancestor__"] != nil && q.ineqFilter.prop == "__key__" {
229 » » anc := []byte(nil)
230 » » for k := range q.eqFilters["__ancestor__"] {
231 » » » anc = []byte(k)
232 » » » break
233 » » }
234 » » anc = anc[:len(anc)-1]
235 » » if q.ineqFilter.start != nil && !bytes.HasPrefix(q.ineqFilter.st art, anc) {
236 » » » return nil, errors.New(
237 » » » » "gae/memory: __key__ inequality filter has a val ue outside of Ancestor()")
238 » » }
239 » » if q.ineqFilter.end != nil && !bytes.HasPrefix(q.ineqFilter.end, anc) {
240 » » » return nil, errors.New(
241 » » » » "gae/memory: __key__ inequality filter has a val ue outside of Ancestor()")
242 » » }
243 » }
244
245 » ret := &reducedQuery{
246 » » ns: q.ns,
247 » » kind: q.kind,
248 » » eqFilters: q.eqFilters,
249 » » suffixFormat: q.order,
250 » }
251
252 » // if len(q.suffixFormat) > 0, queryImpl already enforces that the first order
253 » // is the same as the inequality. Otherwise we need to add it.
254 » if len(ret.suffixFormat) == 0 && q.ineqFilter.prop != "" {
255 » » ret.suffixFormat = []ds.IndexColumn{{Property: q.ineqFilter.prop }}
256 » }
257
258 » // The inequality is specified in natural (ascending) order in the query 's
259 » // Filter syntax, but the order information may indicate to use a descen ding
260 » // index column for it. If that's the case, then we must invert, swap an d
261 » // increment the inequality endpoints.
262 » //
263 » // Invert so that the desired numbers are represented correctly in the i ndex.
264 » // Swap so that our iterators still go from >= start to < end.
265 » // Increment so that >= and < get correctly bounded (since the iterator is
266 » // still using natrual bytes ordering)
267 » if q.ineqFilter.prop != "" && ret.suffixFormat[0].Direction == ds.DESCEN DING {
268 » » hi, lo := []byte(nil), []byte(nil)
269 » » if len(q.ineqFilter.end) > 0 {
270 » » » hi = increment(invert(q.ineqFilter.end))
271 » » }
272 » » if len(q.ineqFilter.start) > 0 {
273 » » » lo = increment(invert(q.ineqFilter.start))
274 » » }
275 » » q.ineqFilter.end, q.ineqFilter.start = lo, hi
276 » }
277
278 » // Add any projection columns not mentioned in the user-defined order as
279 » // ASCENDING orders. Technically we could be smart and automatically use
280 » // a DESCENDING ordered index, if it fit, but the logic gets insane, sin ce all
281 » // suffixes of all used indexes need to be PRECISELY equal (and so you'd have
282 » // to hunt/invalidate/something to find the combination of indexes that are
283 » // compatible with each other as well as the query). If you want to use
284 » // a DESCENDING column, just add it to the user sort order, and this loo p will
285 » // not synthesize a new suffix entry for it.
286 » //
287 » // NOTE: if you want to use an index that sorts by -__key__, you MUST
288 » // include all of the projected fields for that index in the order expli citly.
289 » // Otherwise the generated suffixFormat will be wacky. So:
290 » // Query("Foo").Project("A", "B").Order("A").Order("-__key__")
291 » //
292 » // will turn into a suffixFormat of:
293 » // A, ASCENDING
294 » // __key__, DESCENDING
295 » // B, ASCENDING
296 » // __key__, ASCENDING
297 » //
298 » // To prevent this, your query should have another Order("B") clause bef ore
299 » // the -__key__ clause.
300 » originalStop := len(ret.suffixFormat)
301 » for _, p := range q.project {
302 » » needAdd := true
303 » » // originalStop prevents this loop from getting longer every tim e we add
304 » » // a projected property.
305 » » for _, col := range ret.suffixFormat[:originalStop] {
306 » » » if col.Property == p {
307 » » » » needAdd = false
308 » » » » break
309 » » » }
310 » » }
311 » » if needAdd {
312 » » » ret.suffixFormat = append(ret.suffixFormat, ds.IndexColu mn{Property: p})
313 » » }
314 » }
315
316 » // If the suffix format ends with __key__ already (e.g. .Order("__key__" )),
317 » // then we're good to go. Otherwise we need to add it as the last bit of the
318 » // suffix, since all indexes implicitly have it as the last column.
319 » if len(ret.suffixFormat) == 0 || ret.suffixFormat[len(ret.suffixFormat)- 1].Property != "__key__" {
320 » » ret.suffixFormat = append(ret.suffixFormat, ds.IndexColumn{Prope rty: "__key__"})
321 » }
322
323 » // Now we check the start and end cursors.
324 » //
325 » // Cursors are composed of a list of IndexColumns at the beginning, foll owed
326 » // by the raw bytes to use for the suffix. The cursor is only valid if a ll of
327 » // its IndexColumns match our proposed suffixFormat, as calculated above .
328 » ret.start = q.ineqFilter.start
329 » if q.startCursor != nil {
330 » » if !sortOrdersEqual(q.startCursorColumns, ret.suffixFormat) {
331 » » » return nil, errors.New("gae/memory: start cursor is inva lid for this query.")
332 » » }
333 » » if ret.start == nil || bytes.Compare(ret.start, q.startCursor) < 0 {
334 » » » ret.start = q.startCursor
335 » » }
336 » }
337
338 » ret.end = q.ineqFilter.end
339 » if q.endCursor != nil {
340 » » if !sortOrdersEqual(q.endCursorColumns, ret.suffixFormat) {
341 » » » return nil, errors.New("gae/memory: end cursor is invali d for this query.")
342 » » }
343 » » if ret.end == nil || bytes.Compare(q.endCursor, ret.end) < 0 {
344 » » » ret.end = q.endCursor
345 » » }
346 » }
347
348 » // Finally, verify that we could even /potentially/ do work. If we have
349 » // overlapping range ends, then we don't have anything to do.
350 » if ret.end != nil && bytes.Compare(ret.start, ret.end) >= 0 {
351 » » return nil, errQueryDone
352 » }
353
354 » ret.numCols = len(ret.suffixFormat)
355 » for prop, vals := range ret.eqFilters {
356 » » if len(ret.suffixFormat) == 1 && prop == "__ancestor__" {
357 » » » continue
358 » » }
359 » » ret.numCols += len(vals)
360 » }
361
362 » return ret, nil
192 } 363 }
193 364
194 func (q *queryImpl) numComponents() int { 365 func (q *queryImpl) numComponents() int {
195 numComponents := len(q.order) 366 numComponents := len(q.order)
196 if q.ineqFilter.prop != "" { 367 if q.ineqFilter.prop != "" {
197 » » if q.ineqFilter.low != nil { 368 » » if q.ineqFilter.start != nil {
198 numComponents++ 369 numComponents++
199 } 370 }
200 » » if q.ineqFilter.high != nil { 371 » » if q.ineqFilter.end != nil {
201 numComponents++ 372 numComponents++
202 } 373 }
203 } 374 }
204 for _, v := range q.eqFilters { 375 for _, v := range q.eqFilters {
205 numComponents += len(v) 376 numComponents += len(v)
206 } 377 }
207 if q.ancestor != nil {
208 numComponents++
209 }
210 return numComponents 378 return numComponents
211 } 379 }
212 380
213 func (q *queryImpl) calculateIndex() *ds.IndexDefinition {
214 // as a nod to simplicity in this code, we'll require that a single inde x
215 // is able to service the entire query. E.g. no zigzag merge joins or
216 // multiqueries. This will mean that the user will need to rely on
217 // dev_appserver to tell them what indicies they need for real, and for thier
218 // tests they'll need to specify the missing composite indices manually.
219 //
220 // This COULD lead to an exploding indicies problem, but we can fix that when
221 // we get to it.
222
223 //sortOrders := []qSortBy{}
224
225 return nil
226 }
227
228 // checkMutateClone sees if the query has an error. If not, it clones the query, 381 // checkMutateClone sees if the query has an error. If not, it clones the query,
229 // and assigns the output of `check` to the query error slot. If check returns 382 // and assigns the output of `check` to the query error slot. If check returns
230 // nil, it calls `mutate` on the cloned query. The (possibly new) query is then 383 // nil, it calls `mutate` on the cloned query. The (possibly new) query is then
231 // returned. 384 // returned.
232 func (q *queryImpl) checkMutateClone(check func() error, mutate func(*queryImpl) ) *queryImpl { 385 func (q *queryImpl) checkMutateClone(check func() error, mutate func(*queryImpl) ) *queryImpl {
233 if q.err != nil { 386 if q.err != nil {
234 return q 387 return q
235 } 388 }
236 nq := *q 389 nq := *q
237 » nq.eqFilters = make(map[string]map[string]struct{}, len(q.eqFilters)) 390 » nq.eqFilters = make(map[string]stringSet, len(q.eqFilters))
238 for prop, vals := range q.eqFilters { 391 for prop, vals := range q.eqFilters {
239 » » nq.eqFilters[prop] = make(map[string]struct{}, len(vals)) 392 » » nq.eqFilters[prop] = vals.dup()
240 » » for v := range vals {
241 » » » nq.eqFilters[prop][v] = struct{}{}
242 » » }
243 } 393 }
244 nq.order = make([]ds.IndexColumn, len(q.order)) 394 nq.order = make([]ds.IndexColumn, len(q.order))
245 copy(nq.order, q.order) 395 copy(nq.order, q.order)
246 » nq.project = make(map[string]struct{}, len(q.project)) 396 » nq.project = make([]string, len(q.project))
247 » for f := range q.project { 397 » copy(nq.project, q.project)
248 » » nq.project[f] = struct{}{}
249 » }
250 if check != nil { 398 if check != nil {
251 nq.err = check() 399 nq.err = check()
252 } 400 }
253 if nq.err == nil { 401 if nq.err == nil {
254 mutate(&nq) 402 mutate(&nq)
255 } 403 }
256 return &nq 404 return &nq
257 } 405 }
258 406
259 func (q *queryImpl) Ancestor(k ds.Key) ds.Query { 407 func (q *queryImpl) Ancestor(k ds.Key) ds.Query {
260 return q.checkMutateClone( 408 return q.checkMutateClone(
261 func() error { 409 func() error {
262 if k == nil { 410 if k == nil {
263 // SDK has an explicit nil-check 411 // SDK has an explicit nil-check
264 return errors.New("datastore: nil query ancestor ") 412 return errors.New("datastore: nil query ancestor ")
265 } 413 }
414 if k.Namespace() != q.ns {
415 return fmt.Errorf("bad namespace: %q (expected % q)", k.Namespace(), q.ns)
416 }
266 if !k.Valid(false, globalAppID, q.ns) { 417 if !k.Valid(false, globalAppID, q.ns) {
267 // technically the SDK implementation does a Wei rd Thing (tm) if both the 418 // technically the SDK implementation does a Wei rd Thing (tm) if both the
268 // stringID and intID are set on a key; it only serializes the stringID in 419 // stringID and intID are set on a key; it only serializes the stringID in
269 // the proto. This means that if you set the Anc estor to an invalid key, 420 // the proto. This means that if you set the Anc estor to an invalid key,
270 // you'll never actually hear about it. Instead of doing that insanity, we 421 // you'll never actually hear about it. Instead of doing that insanity, we
271 // just swap to an error here. 422 // just swap to an error here.
272 return ds.ErrInvalidKey 423 return ds.ErrInvalidKey
273 } 424 }
274 » » » if k.Namespace() != q.ns { 425 » » » if q.eqFilters["__ancestor__"] != nil {
275 » » » » return fmt.Errorf("bad namespace: %q (expected % q)", k.Namespace(), q.ns)
276 » » » }
277 » » » if q.ancestor != nil {
278 return errors.New("cannot have more than one anc estor") 426 return errors.New("cannot have more than one anc estor")
279 } 427 }
280 return nil 428 return nil
281 }, 429 },
282 func(q *queryImpl) { 430 func(q *queryImpl) {
283 » » » q.ancestor = k 431 » » » q.addEqFilt("__ancestor__", ds.MkProperty(k))
284 }) 432 })
285 } 433 }
286 434
287 func (q *queryImpl) Distinct() ds.Query { 435 func (q *queryImpl) Distinct() ds.Query {
288 return q.checkMutateClone(nil, func(q *queryImpl) { 436 return q.checkMutateClone(nil, func(q *queryImpl) {
289 q.distinct = true 437 q.distinct = true
290 }) 438 })
291 } 439 }
292 440
441 func (q *queryImpl) addEqFilt(prop string, p ds.Property) {
442 binVal := string(serialize.ToBytes(p))
443 if cur, ok := q.eqFilters[prop]; !ok {
444 q.eqFilters[prop] = stringSet{binVal: {}}
445 } else {
446 cur.add(binVal)
447 }
448 }
449
293 func (q *queryImpl) Filter(fStr string, val interface{}) ds.Query { 450 func (q *queryImpl) Filter(fStr string, val interface{}) ds.Query {
294 prop := "" 451 prop := ""
295 op := qInvalid 452 op := qInvalid
296 » binVal := "" 453 » p := ds.Property{}
297 return q.checkMutateClone( 454 return q.checkMutateClone(
298 func() error { 455 func() error {
299 var err error 456 var err error
300 prop, op, err = parseFilter(fStr) 457 prop, op, err = parseFilter(fStr)
301 if err != nil { 458 if err != nil {
302 return err 459 return err
303 } 460 }
304 461
305 if q.kind == "" && prop != "__key__" { 462 if q.kind == "" && prop != "__key__" {
306 // https://cloud.google.com/appengine/docs/go/da tastore/queries#Go_Kindless_queries 463 // https://cloud.google.com/appengine/docs/go/da tastore/queries#Go_Kindless_queries
307 return fmt.Errorf( 464 return fmt.Errorf(
308 "kindless queries can only filter on __k ey__, got %q", fStr) 465 "kindless queries can only filter on __k ey__, got %q", fStr)
309 } 466 }
310 467
311 » » » p := ds.Property{} 468 » » » err = p.SetValue(val, ds.ShouldIndex)
312 » » » err = p.SetValue(val, ds.NoIndex)
313 if err != nil { 469 if err != nil {
314 return err 470 return err
315 } 471 }
316 472
317 if p.Type() == ds.PTKey { 473 if p.Type() == ds.PTKey {
318 if !p.Value().(ds.Key).Valid(false, globalAppID, q.ns) { 474 if !p.Value().(ds.Key).Valid(false, globalAppID, q.ns) {
319 return ds.ErrInvalidKey 475 return ds.ErrInvalidKey
320 } 476 }
321 } 477 }
322 478
323 if prop == "__key__" { 479 if prop == "__key__" {
324 if op == qEqual { 480 if op == qEqual {
325 return fmt.Errorf( 481 return fmt.Errorf(
326 "query equality filter on __key_ _ is silly: %q", fStr) 482 "query equality filter on __key_ _ is silly: %q", fStr)
327 } 483 }
328 if p.Type() != ds.PTKey { 484 if p.Type() != ds.PTKey {
329 return fmt.Errorf("__key__ filter value is not a key: %T", val) 485 return fmt.Errorf("__key__ filter value is not a key: %T", val)
330 } 486 }
487 } else if strings.HasPrefix(prop, "__") && strings.HasSu ffix(prop, "__") {
488 return fmt.Errorf("filter on reserved property: %q", prop)
331 } 489 }
332 490
333 if op != qEqual { 491 if op != qEqual {
334 if q.ineqFilter.prop != "" && q.ineqFilter.prop != prop { 492 if q.ineqFilter.prop != "" && q.ineqFilter.prop != prop {
335 return fmt.Errorf( 493 return fmt.Errorf(
336 "inequality filters on multiple properties: %q and %q", 494 "inequality filters on multiple properties: %q and %q",
337 q.ineqFilter.prop, prop) 495 q.ineqFilter.prop, prop)
338 } 496 }
339 if len(q.order) > 0 && q.order[0].Property != pr op { 497 if len(q.order) > 0 && q.order[0].Property != pr op {
340 return fmt.Errorf( 498 return fmt.Errorf(
341 "first sort order must match ine quality filter: %q v %q", 499 "first sort order must match ine quality filter: %q v %q",
342 q.order[0].Property, prop) 500 q.order[0].Property, prop)
343 } 501 }
344 » » » } else if _, ok := q.project[prop]; ok { 502 » » » } else {
345 » » » » return fmt.Errorf( 503 » » » » for _, p := range q.project {
346 » » » » » "cannot project on field which is used i n an equality filter: %q", 504 » » » » » if p == prop {
347 » » » » » prop) 505 » » » » » » return fmt.Errorf(
506 » » » » » » » "cannot project on field which is used in an equality filter: %q",
507 » » » » » » » prop)
508 » » » » » }
509 » » » » }
348 } 510 }
349 binVal = string(serialize.ToBytes(p))
350 return err 511 return err
351 }, 512 },
352 func(q *queryImpl) { 513 func(q *queryImpl) {
353 if op == qEqual { 514 if op == qEqual {
354 // add it to eq filters 515 // add it to eq filters
355 » » » » if _, ok := q.eqFilters[prop]; !ok { 516 » » » » q.addEqFilt(prop, p)
356 » » » » » q.eqFilters[prop] = map[string]struct{}{ binVal: {}}
357 » » » » } else {
358 » » » » » q.eqFilters[prop][binVal] = struct{}{}
359 » » » » }
360 517
361 // remove it from sort orders. 518 // remove it from sort orders.
362 // https://cloud.google.com/appengine/docs/go/da tastore/queries#sort_orders_are_ignored_on_properties_with_equality_filters 519 // https://cloud.google.com/appengine/docs/go/da tastore/queries#sort_orders_are_ignored_on_properties_with_equality_filters
363 toRm := -1 520 toRm := -1
364 for i, o := range q.order { 521 for i, o := range q.order {
365 if o.Property == prop { 522 if o.Property == prop {
366 toRm = i 523 toRm = i
367 break 524 break
368 } 525 }
369 } 526 }
370 if toRm >= 0 { 527 if toRm >= 0 {
371 q.order = append(q.order[:toRm], q.order [toRm+1:]...) 528 q.order = append(q.order[:toRm], q.order [toRm+1:]...)
372 } 529 }
373 } else { 530 } else {
374 q.ineqFilter.prop = prop 531 q.ineqFilter.prop = prop
375 » » » » if q.ineqFilter.constrain(op, binVal) { 532 » » » » if q.ineqFilter.constrain(op, serialize.ToBytes( p)) {
376 q.err = errQueryDone 533 q.err = errQueryDone
377 } 534 }
378 } 535 }
379 }) 536 })
380 } 537 }
381 538
382 func (q *queryImpl) Order(prop string) ds.Query { 539 func (q *queryImpl) Order(prop string) ds.Query {
383 col := ds.IndexColumn{} 540 col := ds.IndexColumn{}
384 return q.checkMutateClone( 541 return q.checkMutateClone(
385 func() error { 542 func() error {
386 // check that first order == first inequality. 543 // check that first order == first inequality.
387 // if order is an equality already, ignore it 544 // if order is an equality already, ignore it
388 col.Property = strings.TrimSpace(prop) 545 col.Property = strings.TrimSpace(prop)
389 if strings.HasPrefix(prop, "-") { 546 if strings.HasPrefix(prop, "-") {
390 col.Direction = ds.DESCENDING 547 col.Direction = ds.DESCENDING
391 col.Property = strings.TrimSpace(prop[1:]) 548 col.Property = strings.TrimSpace(prop[1:])
392 } else if strings.HasPrefix(prop, "+") { 549 } else if strings.HasPrefix(prop, "+") {
393 return fmt.Errorf("datastore: invalid order: %q" , prop) 550 return fmt.Errorf("datastore: invalid order: %q" , prop)
394 } 551 }
395 if len(col.Property) == 0 { 552 if len(col.Property) == 0 {
396 return errors.New("datastore: empty order") 553 return errors.New("datastore: empty order")
397 } 554 }
398 » » » if q.ineqFilter.prop != "" && q.ineqFilter.prop != col.P roperty { 555 » » » if len(q.order) == 0 && q.ineqFilter.prop != "" && q.ine qFilter.prop != col.Property {
399 return fmt.Errorf( 556 return fmt.Errorf(
400 "first sort order must match inequality filter: %q v %q", 557 "first sort order must match inequality filter: %q v %q",
401 prop, q.ineqFilter.prop) 558 prop, q.ineqFilter.prop)
402 } 559 }
403 if q.kind == "" && (col.Property != "__key__" || col.Dir ection != ds.ASCENDING) { 560 if q.kind == "" && (col.Property != "__key__" || col.Dir ection != ds.ASCENDING) {
404 return fmt.Errorf("invalid order for kindless qu ery: %#v", col) 561 return fmt.Errorf("invalid order for kindless qu ery: %#v", col)
405 } 562 }
406 return nil 563 return nil
407 }, 564 },
408 func(q *queryImpl) { 565 func(q *queryImpl) {
409 if _, ok := q.eqFilters[col.Property]; ok { 566 if _, ok := q.eqFilters[col.Property]; ok {
410 // skip it if it's an equality filter 567 // skip it if it's an equality filter
411 // https://cloud.google.com/appengine/docs/go/da tastore/queries#sort_orders_are_ignored_on_properties_with_equality_filters 568 // https://cloud.google.com/appengine/docs/go/da tastore/queries#sort_orders_are_ignored_on_properties_with_equality_filters
412 return 569 return
413 } 570 }
414 for _, order := range q.order { 571 for _, order := range q.order {
415 if order.Property == col.Property { 572 if order.Property == col.Property {
416 // can't sort by the same order twice 573 // can't sort by the same order twice
417 return 574 return
418 } 575 }
419 } 576 }
420 » » » if col.Property == "__key__" { 577 » » » q.order = append(q.order, col)
421 » » » » // __key__ order dominates all other orders
422 » » » » q.order = []ds.IndexColumn{col}
423 » » » } else {
424 » » » » q.order = append(q.order, col)
425 » » » }
426 }) 578 })
427 } 579 }
428 580
429 func (q *queryImpl) Project(fieldName ...string) ds.Query { 581 func (q *queryImpl) Project(fieldName ...string) ds.Query {
430 return q.checkMutateClone( 582 return q.checkMutateClone(
431 func() error { 583 func() error {
432 if q.keysOnly { 584 if q.keysOnly {
433 return errors.New("cannot project a keysOnly que ry") 585 return errors.New("cannot project a keysOnly que ry")
434 } 586 }
587 dupCheck := stringSet{}
435 for _, f := range fieldName { 588 for _, f := range fieldName {
589 if !dupCheck.add(f) {
590 return fmt.Errorf("cannot project on the same field twice: %q", f)
591 }
436 if f == "" { 592 if f == "" {
437 return errors.New("cannot project on an empty field name") 593 return errors.New("cannot project on an empty field name")
438 } 594 }
439 » » » » if strings.HasPrefix(f, "__") && strings.HasSuff ix(f, "__") { 595 » » » » if f == "__key__" {
440 » » » » » return fmt.Errorf("cannot project on %q" , f) 596 » » » » » return fmt.Errorf("cannot project on __k ey__")
441 } 597 }
442 if _, ok := q.eqFilters[f]; ok { 598 if _, ok := q.eqFilters[f]; ok {
443 return fmt.Errorf( 599 return fmt.Errorf(
444 "cannot project on field which i s used in an equality filter: %q", f) 600 "cannot project on field which i s used in an equality filter: %q", f)
445 } 601 }
602 for _, p := range q.project {
603 if p == f {
604 return fmt.Errorf("cannot projec t on the same field twice: %q", f)
605 }
606 }
446 } 607 }
447 return nil 608 return nil
448 }, 609 },
449 func(q *queryImpl) { 610 func(q *queryImpl) {
450 » » » for _, f := range fieldName { 611 » » » q.project = append(q.project, fieldName...)
451 » » » » q.project[f] = struct{}{}
452 » » » }
453 }) 612 })
454 } 613 }
455 614
456 func (q *queryImpl) KeysOnly() ds.Query { 615 func (q *queryImpl) KeysOnly() ds.Query {
457 return q.checkMutateClone( 616 return q.checkMutateClone(
458 func() error { 617 func() error {
459 if len(q.project) != 0 { 618 if len(q.project) != 0 {
460 return errors.New("cannot project a keysOnly que ry") 619 return errors.New("cannot project a keysOnly que ry")
461 } 620 }
462 return nil 621 return nil
463 }, 622 },
464 func(q *queryImpl) { 623 func(q *queryImpl) {
465 q.keysOnly = true 624 q.keysOnly = true
466 }) 625 })
467 } 626 }
468 627
469 func (q *queryImpl) Limit(limit int) ds.Query { 628 func (q *queryImpl) Limit(limit int) ds.Query {
470 return q.checkMutateClone( 629 return q.checkMutateClone(
471 func() error { 630 func() error {
631 // nonsensically... ANY negative value means 'unlimited' . *shakes head*
472 if limit < math.MinInt32 || limit > math.MaxInt32 { 632 if limit < math.MinInt32 || limit > math.MaxInt32 {
473 return errors.New("datastore: query limit overfl ow") 633 return errors.New("datastore: query limit overfl ow")
474 } 634 }
475 return nil 635 return nil
476 }, 636 },
477 func(q *queryImpl) { 637 func(q *queryImpl) {
638 q.limitSet = true
478 q.limit = int32(limit) 639 q.limit = int32(limit)
479 }) 640 })
480 } 641 }
481 642
482 func (q *queryImpl) Offset(offset int) ds.Query { 643 func (q *queryImpl) Offset(offset int) ds.Query {
483 return q.checkMutateClone( 644 return q.checkMutateClone(
484 func() error { 645 func() error {
485 if offset < 0 { 646 if offset < 0 {
486 return errors.New("datastore: negative query off set") 647 return errors.New("datastore: negative query off set")
487 } 648 }
488 if offset > math.MaxInt32 { 649 if offset > math.MaxInt32 {
489 return errors.New("datastore: query offset overf low") 650 return errors.New("datastore: query offset overf low")
490 } 651 }
491 return nil 652 return nil
492 }, 653 },
493 func(q *queryImpl) { 654 func(q *queryImpl) {
494 q.offset = int32(offset) 655 q.offset = int32(offset)
495 }) 656 })
496 } 657 }
497 658
659 func queryCursorCheck(ns, flavor string, current []byte, newCursor ds.Cursor) ([ ]ds.IndexColumn, []byte, error) {
660 if current != nil {
661 return nil, nil, fmt.Errorf("%s cursor is multiply defined", fla vor)
662 }
663 curs, ok := newCursor.(queryCursor)
664 if !ok {
665 return nil, nil, fmt.Errorf("%s cursor is unknown type: %T", fla vor, curs)
666 }
667 return curs.decode()
668 }
669
498 func (q *queryImpl) Start(c ds.Cursor) ds.Query { 670 func (q *queryImpl) Start(c ds.Cursor) ds.Query {
499 » curs := queryCursor("") 671 » cols := []ds.IndexColumn(nil)
672 » curs := []byte(nil)
500 return q.checkMutateClone( 673 return q.checkMutateClone(
501 » » func() error { 674 » » func() (err error) {
502 » » » ok := false 675 » » » cols, curs, err = queryCursorCheck(q.ns, "start", q.star tCursor, c)
503 » » » if curs, ok = c.(queryCursor); !ok { 676 » » » return
504 » » » » return fmt.Errorf("start cursor is unknown type: %T", c)
505 » » » }
506 » » » if !curs.Valid() {
507 » » » » return errors.New("datastore: invalid cursor")
508 » » » }
509 » » » return nil
510 }, 677 },
511 func(q *queryImpl) { 678 func(q *queryImpl) {
512 » » » q.start = curs 679 » » » q.startCursorColumns = cols
680 » » » q.startCursor = curs
513 }) 681 })
514 } 682 }
515 683
516 func (q *queryImpl) End(c ds.Cursor) ds.Query { 684 func (q *queryImpl) End(c ds.Cursor) ds.Query {
517 » curs := queryCursor("") 685 » cols := []ds.IndexColumn(nil)
686 » curs := queryCursor(nil)
518 return q.checkMutateClone( 687 return q.checkMutateClone(
519 » » func() error { 688 » » func() (err error) {
520 » » » ok := false 689 » » » cols, curs, err = queryCursorCheck(q.ns, "end", q.endCur sor, c)
521 » » » if curs, ok = c.(queryCursor); !ok { 690 » » » return
522 » » » » return fmt.Errorf("end cursor is unknown type: % T", c)
523 » » » }
524 » » » if !curs.Valid() {
525 » » » » return errors.New("datastore: invalid cursor")
526 » » » }
527 » » » return nil
528 }, 691 },
529 func(q *queryImpl) { 692 func(q *queryImpl) {
530 » » » q.end = curs 693 » » » q.endCursorColumns = cols
694 » » » q.endCursor = curs
531 }) 695 })
532 } 696 }
533 697
534 func (q *queryImpl) EventualConsistency() ds.Query { 698 func (q *queryImpl) EventualConsistency() ds.Query {
535 return q.checkMutateClone( 699 return q.checkMutateClone(
536 nil, func(q *queryImpl) { 700 nil, func(q *queryImpl) {
537 q.eventualConsistency = true 701 q.eventualConsistency = true
538 }) 702 })
539 } 703 }
OLDNEW
« no previous file with comments | « impl/memory/datastore_index_test.go ('k') | impl/memory/datastore_query_execution.go » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698