OLD | NEW |
1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 package memory | 5 package memory |
6 | 6 |
7 import ( | 7 import ( |
8 "bytes" | 8 "bytes" |
9 "encoding/base64" | 9 "encoding/base64" |
10 "errors" | 10 "errors" |
(...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
95 } | 95 } |
96 if p, _, _ := fq.IneqFilterHigh(); p != "" { | 96 if p, _, _ := fq.IneqFilterHigh(); p != "" { |
97 numComponents++ | 97 numComponents++ |
98 } | 98 } |
99 for _, v := range fq.EqFilters() { | 99 for _, v := range fq.EqFilters() { |
100 numComponents += v.Len() | 100 numComponents += v.Len() |
101 } | 101 } |
102 return numComponents | 102 return numComponents |
103 } | 103 } |
104 | 104 |
105 func reduce(fq *ds.FinalizedQuery, ns string, isTxn bool) (*reducedQuery, error)
{ | 105 // GetBinaryBounds gets the binary encoding of the upper and lower bounds of |
106 » if err := fq.Valid(globalAppID, ns); err != nil { | 106 // the inequality filter on fq, if any is defined. If a bound does not exist, |
107 » » return nil, err | 107 // it is nil. |
108 » } | 108 // |
109 » if isTxn && fq.Ancestor() == nil { | 109 // NOTE: if fq specifies a descending sort order for the inequality, the bounds |
110 » » return nil, fmt.Errorf("queries within a transaction must includ
e an Ancestor filter") | 110 // will be inverted, incremented, and fliped. |
111 » } | 111 func GetBinaryBounds(fq *ds.FinalizedQuery) (lower, upper []byte) { |
112 » if num := numComponents(fq); num > MaxQueryComponents { | |
113 » » return nil, fmt.Errorf( | |
114 » » » "gae/memory: query is too large. may not have more than
"+ | |
115 » » » » "%d filters + sort orders + ancestor total: had
%d", | |
116 » » » MaxQueryComponents, num) | |
117 » } | |
118 | |
119 » ret := &reducedQuery{ | |
120 » » ns: ns, | |
121 » » kind: fq.Kind(), | |
122 » » suffixFormat: fq.Orders(), | |
123 » } | |
124 | |
125 » eqFilts := fq.EqFilters() | |
126 » ret.eqFilters = make(map[string]stringset.Set, len(eqFilts)) | |
127 » for prop, vals := range eqFilts { | |
128 » » sVals := stringset.New(len(vals)) | |
129 » » for _, v := range vals { | |
130 » » » sVals.Add(string(serialize.ToBytes(v))) | |
131 » » } | |
132 » » ret.eqFilters[prop] = sVals | |
133 » } | |
134 | |
135 // Pick up the start/end range from the inequalities, if any. | 112 // Pick up the start/end range from the inequalities, if any. |
136 // | 113 // |
137 // start and end in the reducedQuery are normalized so that `start >= | 114 // start and end in the reducedQuery are normalized so that `start >= |
138 // X < end`. Because of that, we need to tweak the inequality filters | 115 // X < end`. Because of that, we need to tweak the inequality filters |
139 // contained in the query if they use the > or <= operators. | 116 // contained in the query if they use the > or <= operators. |
140 startD := []byte(nil) | |
141 endD := []byte(nil) | |
142 if ineqProp := fq.IneqFilterProp(); ineqProp != "" { | 117 if ineqProp := fq.IneqFilterProp(); ineqProp != "" { |
143 _, startOp, startV := fq.IneqFilterLow() | 118 _, startOp, startV := fq.IneqFilterLow() |
144 if startOp != "" { | 119 if startOp != "" { |
145 » » » startD = serialize.ToBytes(startV) | 120 » » » lower = serialize.ToBytes(startV) |
146 if startOp == ">" { | 121 if startOp == ">" { |
147 » » » » startD = increment(startD) | 122 » » » » lower = increment(lower) |
148 } | 123 } |
149 } | 124 } |
150 | 125 |
151 _, endOp, endV := fq.IneqFilterHigh() | 126 _, endOp, endV := fq.IneqFilterHigh() |
152 if endOp != "" { | 127 if endOp != "" { |
153 » » » endD = serialize.ToBytes(endV) | 128 » » » upper = serialize.ToBytes(endV) |
154 if endOp == "<=" { | 129 if endOp == "<=" { |
155 » » » » endD = increment(endD) | 130 » » » » upper = increment(upper) |
156 } | 131 } |
157 } | 132 } |
158 | 133 |
159 // The inequality is specified in natural (ascending) order in t
he query's | 134 // The inequality is specified in natural (ascending) order in t
he query's |
160 // Filter syntax, but the order information may indicate to use
a descending | 135 // Filter syntax, but the order information may indicate to use
a descending |
161 // index column for it. If that's the case, then we must invert,
swap and | 136 // index column for it. If that's the case, then we must invert,
swap and |
162 // increment the inequality endpoints. | 137 // increment the inequality endpoints. |
163 // | 138 // |
164 // Invert so that the desired numbers are represented correctly
in the index. | 139 // Invert so that the desired numbers are represented correctly
in the index. |
165 // Swap so that our iterators still go from >= start to < end. | 140 // Swap so that our iterators still go from >= start to < end. |
166 // Increment so that >= and < get correctly bounded (since the i
terator is | 141 // Increment so that >= and < get correctly bounded (since the i
terator is |
167 // still using natrual bytes ordering) | 142 // still using natrual bytes ordering) |
168 » » if ret.suffixFormat[0].Descending { | 143 » » if fq.Orders()[0].Descending { |
169 hi, lo := []byte(nil), []byte(nil) | 144 hi, lo := []byte(nil), []byte(nil) |
170 » » » if len(startD) > 0 { | 145 » » » if len(lower) > 0 { |
171 » » » » lo = increment(serialize.Invert(startD)) | 146 » » » » lo = increment(serialize.Invert(lower)) |
172 } | 147 } |
173 » » » if len(endD) > 0 { | 148 » » » if len(upper) > 0 { |
174 » » » » hi = increment(serialize.Invert(endD)) | 149 » » » » hi = increment(serialize.Invert(upper)) |
175 } | 150 } |
176 » » » endD, startD = lo, hi | 151 » » » upper, lower = lo, hi |
177 } | 152 } |
178 } | 153 } |
| 154 return |
| 155 } |
| 156 |
| 157 func reduce(fq *ds.FinalizedQuery, ns string, isTxn bool) (*reducedQuery, error)
{ |
| 158 if err := fq.Valid(globalAppID, ns); err != nil { |
| 159 return nil, err |
| 160 } |
| 161 if isTxn && fq.Ancestor() == nil { |
| 162 return nil, fmt.Errorf("queries within a transaction must includ
e an Ancestor filter") |
| 163 } |
| 164 if num := numComponents(fq); num > MaxQueryComponents { |
| 165 return nil, fmt.Errorf( |
| 166 "gae/memory: query is too large. may not have more than
"+ |
| 167 "%d filters + sort orders + ancestor total: had
%d", |
| 168 MaxQueryComponents, num) |
| 169 } |
| 170 |
| 171 ret := &reducedQuery{ |
| 172 ns: ns, |
| 173 kind: fq.Kind(), |
| 174 suffixFormat: fq.Orders(), |
| 175 } |
| 176 |
| 177 eqFilts := fq.EqFilters() |
| 178 ret.eqFilters = make(map[string]stringset.Set, len(eqFilts)) |
| 179 for prop, vals := range eqFilts { |
| 180 sVals := stringset.New(len(vals)) |
| 181 for _, v := range vals { |
| 182 sVals.Add(string(serialize.ToBytes(v))) |
| 183 } |
| 184 ret.eqFilters[prop] = sVals |
| 185 } |
| 186 |
| 187 startD, endD := GetBinaryBounds(fq) |
179 | 188 |
180 // Now we check the start and end cursors. | 189 // Now we check the start and end cursors. |
181 // | 190 // |
182 // Cursors are composed of a list of IndexColumns at the beginning, foll
owed | 191 // Cursors are composed of a list of IndexColumns at the beginning, foll
owed |
183 // by the raw bytes to use for the suffix. The cursor is only valid if a
ll of | 192 // by the raw bytes to use for the suffix. The cursor is only valid if a
ll of |
184 // its IndexColumns match our proposed suffixFormat, as calculated above
. | 193 // its IndexColumns match our proposed suffixFormat, as calculated above
. |
185 // | 194 // |
186 // Cursors are mutually exclusive with the start/end we picked up from t
he | 195 // Cursors are mutually exclusive with the start/end we picked up from t
he |
187 // inequality. In a well formed query, they indicate a subset of results | 196 // inequality. In a well formed query, they indicate a subset of results |
188 // bounded by the inequality. Technically if the start cursor is not >=
the | 197 // bounded by the inequality. Technically if the start cursor is not >=
the |
(...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
260 // and the new value has overflowed this representation. | 269 // and the new value has overflowed this representation. |
261 // | 270 // |
262 // Fortunately, the first byte of a serialized index column entr
y is a | 271 // Fortunately, the first byte of a serialized index column entr
y is a |
263 // PropertyType byte, and the only valid values that we'll be in
crementing | 272 // PropertyType byte, and the only valid values that we'll be in
crementing |
264 // are never equal to 0xFF, since they have the high bit set (so
either they're | 273 // are never equal to 0xFF, since they have the high bit set (so
either they're |
265 // 0x8*, or 0x7*, depending on if it's inverted). | 274 // 0x8*, or 0x7*, depending on if it's inverted). |
266 impossible(fmt.Errorf("incrementing %v would require more sigfig
s", bstr)) | 275 impossible(fmt.Errorf("incrementing %v would require more sigfig
s", bstr)) |
267 } | 276 } |
268 return ret | 277 return ret |
269 } | 278 } |
OLD | NEW |