OLD | NEW |
| (Empty) |
1 // Copyright 2015 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 package memory | |
6 | |
7 import ( | |
8 "bytes" | |
9 "fmt" | |
10 "sort" | |
11 | |
12 ds "github.com/luci/gae/service/datastore" | |
13 "github.com/luci/gkvlite" | |
14 ) | |
15 | |
16 var indexCreationDeterministic = false | |
17 | |
18 type qIndexSlice []*qIndex | |
19 | |
20 func (s qIndexSlice) Len() int { return len(s) } | |
21 func (s qIndexSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } | |
22 func (s qIndexSlice) Less(i, j int) bool { return s[i].Less(s[j]) } | |
23 | |
24 func defaultIndicies(kind string, pmap ds.PropertyMap) []*qIndex { | |
25 ret := make(qIndexSlice, 0, 2*len(pmap)+1) | |
26 ret = append(ret, &qIndex{kind, false, nil}) | |
27 for name, pvals := range pmap { | |
28 needsIndex := false | |
29 for _, v := range pvals { | |
30 if v.IndexSetting() == ds.ShouldIndex { | |
31 needsIndex = true | |
32 break | |
33 } | |
34 } | |
35 if !needsIndex { | |
36 continue | |
37 } | |
38 ret = append(ret, &qIndex{kind, false, []qSortBy{{name, qASC}}}) | |
39 ret = append(ret, &qIndex{kind, false, []qSortBy{{name, qDEC}}}) | |
40 } | |
41 if indexCreationDeterministic { | |
42 sort.Sort(ret) | |
43 } | |
44 return ret | |
45 } | |
46 | |
47 func indexEntriesWithBuiltins(k ds.Key, pm ds.PropertyMap, complexIdxs []*qIndex
) *memStore { | |
48 sip := partiallySerialize(pm) | |
49 return sip.indexEntries(k, append(defaultIndicies(k.Kind(), pm), complex
Idxs...)) | |
50 } | |
51 | |
52 // serializedPvals is all of the serialized DSProperty values in qASC order. | |
53 type serializedPvals [][]byte | |
54 | |
55 func (s serializedPvals) Len() int { return len(s) } | |
56 func (s serializedPvals) Swap(i, j int) { s[i], s[j] = s[j], s[i] } | |
57 func (s serializedPvals) Less(i, j int) bool { return bytes.Compare(s[i], s[j])
< 0 } | |
58 | |
59 // prop name -> [<serialized DSProperty>, ...] | |
60 type serializedIndexablePmap map[string]serializedPvals | |
61 | |
62 func partiallySerialize(pm ds.PropertyMap) (ret serializedIndexablePmap) { | |
63 if len(pm) == 0 { | |
64 return | |
65 } | |
66 | |
67 buf := &bytes.Buffer{} | |
68 ret = make(serializedIndexablePmap, len(pm)) | |
69 for k, vals := range pm { | |
70 newVals := make(serializedPvals, 0, len(vals)) | |
71 for _, v := range vals { | |
72 if v.IndexSetting() == ds.NoIndex { | |
73 continue | |
74 } | |
75 buf.Reset() | |
76 v.Write(buf, ds.WithoutContext) | |
77 newVal := make([]byte, buf.Len()) | |
78 copy(newVal, buf.Bytes()) | |
79 newVals = append(newVals, newVal) | |
80 } | |
81 if len(newVals) > 0 { | |
82 sort.Sort(newVals) | |
83 ret[k] = newVals | |
84 } | |
85 } | |
86 return | |
87 } | |
88 | |
89 // indexRowGen contains enough information to generate all of the index rows whi
ch | |
90 // correspond with a propertyList and a qIndex. | |
91 type indexRowGen struct { | |
92 propVec []serializedPvals | |
93 orders []qDirection | |
94 } | |
95 | |
96 // permute calls cb for each index row, in the sorted order of the rows. | |
97 func (s indexRowGen) permute(cb func([]byte)) { | |
98 iVec := make([]int, len(s.propVec)) | |
99 iVecLim := make([]int, len(s.propVec)) | |
100 | |
101 incPos := func() bool { | |
102 for i := len(iVec) - 1; i >= 0; i-- { | |
103 var done bool | |
104 var newVal int | |
105 if s.orders[i] == qASC { | |
106 newVal = (iVec[i] + 1) % iVecLim[i] | |
107 done = newVal != 0 | |
108 } else { | |
109 newVal = (iVec[i] - 1) | |
110 if newVal < 0 { | |
111 newVal = iVecLim[i] - 1 | |
112 } else { | |
113 done = true | |
114 } | |
115 } | |
116 iVec[i] = newVal | |
117 if done { | |
118 return true | |
119 } | |
120 } | |
121 return false | |
122 } | |
123 | |
124 for i, sps := range s.propVec { | |
125 iVecLim[i] = len(sps) | |
126 } | |
127 | |
128 for i := range iVec { | |
129 if s.orders[i] == qDEC { | |
130 iVec[i] = iVecLim[i] - 1 | |
131 } | |
132 } | |
133 | |
134 for { | |
135 bufsiz := 0 | |
136 for pvalSliceIdx, pvalIdx := range iVec { | |
137 bufsiz += len(s.propVec[pvalSliceIdx][pvalIdx]) | |
138 } | |
139 buf := bytes.NewBuffer(make([]byte, 0, bufsiz)) | |
140 for pvalSliceIdx, pvalIdx := range iVec { | |
141 data := s.propVec[pvalSliceIdx][pvalIdx] | |
142 if s.orders[pvalSliceIdx] == qASC { | |
143 buf.Write(data) | |
144 } else { | |
145 for _, b := range data { | |
146 buf.WriteByte(b ^ 0xFF) | |
147 } | |
148 } | |
149 } | |
150 cb(buf.Bytes()) | |
151 if !incPos() { | |
152 break | |
153 } | |
154 } | |
155 } | |
156 | |
157 type matcher struct { | |
158 buf indexRowGen | |
159 } | |
160 | |
161 // matcher.match checks to see if the mapped, serialized property values | |
162 // match the index. If they do, it returns a indexRowGen. Do not write or modify | |
163 // the data in the indexRowGen. | |
164 func (m *matcher) match(idx *qIndex, sip serializedIndexablePmap) (indexRowGen,
bool) { | |
165 m.buf.propVec = m.buf.propVec[:0] | |
166 m.buf.orders = m.buf.orders[:0] | |
167 for _, sb := range idx.sortby { | |
168 if pv, ok := sip[sb.prop]; ok { | |
169 m.buf.propVec = append(m.buf.propVec, pv) | |
170 m.buf.orders = append(m.buf.orders, sb.dir) | |
171 } else { | |
172 return indexRowGen{}, false | |
173 } | |
174 } | |
175 return m.buf, true | |
176 } | |
177 | |
178 func (sip serializedIndexablePmap) indexEntries(k ds.Key, idxs []*qIndex) *memSt
ore { | |
179 ret := newMemStore() | |
180 idxColl := ret.SetCollection("idx", nil) | |
181 // getIdxEnts retrieves an index collection or adds it if it's not there
. | |
182 getIdxEnts := func(qi *qIndex) *memCollection { | |
183 buf := &bytes.Buffer{} | |
184 qi.WriteBinary(buf) | |
185 b := buf.Bytes() | |
186 idxColl.Set(b, []byte{}) | |
187 return ret.SetCollection(fmt.Sprintf("idx:%s:%s", k.Namespace(),
b), nil) | |
188 } | |
189 | |
190 buf := &bytes.Buffer{} | |
191 ds.WriteKey(buf, ds.WithoutContext, k) | |
192 keyData := buf.Bytes() | |
193 | |
194 walkPermutations := func(prefix []byte, irg indexRowGen, ents *memCollec
tion) { | |
195 prev := []byte{} // intentionally make a non-nil slice, gkvlite
hates nil. | |
196 irg.permute(func(data []byte) { | |
197 buf := bytes.NewBuffer(make([]byte, 0, len(prefix)+len(d
ata)+len(keyData))) | |
198 buf.Write(prefix) | |
199 buf.Write(data) | |
200 buf.Write(keyData) | |
201 ents.Set(buf.Bytes(), prev) | |
202 prev = data | |
203 }) | |
204 } | |
205 | |
206 mtch := matcher{} | |
207 for _, idx := range idxs { | |
208 if irg, ok := mtch.match(idx, sip); ok { | |
209 idxEnts := getIdxEnts(idx) | |
210 if len(irg.propVec) == 0 { | |
211 idxEnts.Set(keyData, []byte{}) // propless index
, e.g. kind -> key = nil | |
212 } else if idx.ancestor { | |
213 for ancKey := k; ancKey != nil; ancKey = ancKey.
Parent() { | |
214 buf := &bytes.Buffer{} | |
215 ds.WriteKey(buf, ds.WithoutContext, ancK
ey) | |
216 walkPermutations(buf.Bytes(), irg, idxEn
ts) | |
217 } | |
218 } else { | |
219 walkPermutations(nil, irg, idxEnts) | |
220 } | |
221 } | |
222 } | |
223 | |
224 return ret | |
225 } | |
226 | |
227 func updateIndicies(store *memStore, key ds.Key, oldEnt, newEnt ds.PropertyMap)
{ | |
228 idxColl := store.GetCollection("idx") | |
229 if idxColl == nil { | |
230 idxColl = store.SetCollection("idx", nil) | |
231 } | |
232 | |
233 // load all current complex query index definitions. | |
234 compIdx := []*qIndex{} | |
235 idxColl.VisitItemsAscend(complexQueryPrefix, false, func(i *gkvlite.Item
) bool { | |
236 if !bytes.HasPrefix(i.Key, complexQueryPrefix) { | |
237 return false | |
238 } | |
239 qi := &qIndex{} | |
240 if err := qi.ReadBinary(bytes.NewBuffer(i.Key)); err != nil { | |
241 panic(err) // memory corruption | |
242 } | |
243 compIdx = append(compIdx, qi) | |
244 return true | |
245 }) | |
246 | |
247 oldIdx := indexEntriesWithBuiltins(key, oldEnt, compIdx) | |
248 | |
249 newIdx := indexEntriesWithBuiltins(key, newEnt, compIdx) | |
250 | |
251 prefix := "idx:" + key.Namespace() + ":" | |
252 | |
253 gkvCollide(oldIdx.GetCollection("idx"), newIdx.GetCollection("idx"), fun
c(k, ov, nv []byte) { | |
254 ks := prefix + string(k) | |
255 idxColl.Set(k, []byte{}) | |
256 | |
257 coll := store.GetCollection(ks) | |
258 if coll == nil { | |
259 coll = store.SetCollection(ks, nil) | |
260 } | |
261 oldColl := oldIdx.GetCollection(ks) | |
262 newColl := newIdx.GetCollection(ks) | |
263 | |
264 switch { | |
265 case ov == nil && nv != nil: // all additions | |
266 newColl.VisitItemsAscend(nil, false, func(i *gkvlite.Ite
m) bool { | |
267 coll.Set(i.Key, i.Val) | |
268 return true | |
269 }) | |
270 case ov != nil && nv == nil: // all deletions | |
271 oldColl.VisitItemsAscend(nil, false, func(i *gkvlite.Ite
m) bool { | |
272 coll.Delete(i.Key) | |
273 return true | |
274 }) | |
275 case ov != nil && nv != nil: // merge | |
276 gkvCollide(oldColl, newColl, func(k, ov, nv []byte) { | |
277 if nv == nil { | |
278 coll.Delete(k) | |
279 } else { | |
280 coll.Set(k, nv) | |
281 } | |
282 }) | |
283 default: | |
284 panic("impossible") | |
285 } | |
286 // TODO(riannucci): remove entries from idxColl and remove index
collections | |
287 // when there are no index entries for that index any more. | |
288 }) | |
289 } | |
OLD | NEW |