OLD | NEW |
1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 package memory | 5 package memory |
6 | 6 |
7 import ( | 7 import ( |
8 "bytes" | 8 "bytes" |
9 "fmt" | 9 "fmt" |
10 "sort" | 10 "sort" |
11 | 11 |
12 ds "github.com/luci/gae/service/datastore" | 12 ds "github.com/luci/gae/service/datastore" |
13 "github.com/luci/gae/service/datastore/serialize" | 13 "github.com/luci/gae/service/datastore/serialize" |
14 "github.com/luci/gkvlite" | 14 "github.com/luci/gkvlite" |
15 "github.com/luci/luci-go/common/stringset" | |
16 ) | 15 ) |
17 | 16 |
18 type qIndexSlice []*ds.IndexDefinition | 17 type qIndexSlice []*ds.IndexDefinition |
19 | 18 |
20 func (s qIndexSlice) Len() int { return len(s) } | 19 func (s qIndexSlice) Len() int { return len(s) } |
21 func (s qIndexSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } | 20 func (s qIndexSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } |
22 func (s qIndexSlice) Less(i, j int) bool { return s[i].Less(s[j]) } | 21 func (s qIndexSlice) Less(i, j int) bool { return s[i].Less(s[j]) } |
23 | 22 |
24 func defaultIndexes(kind string, pmap ds.PropertyMap) []*ds.IndexDefinition { | 23 func defaultIndexes(kind string, pmap ds.PropertyMap) []*ds.IndexDefinition { |
25 ret := make(qIndexSlice, 0, 2*len(pmap)+1) | 24 ret := make(qIndexSlice, 0, 2*len(pmap)+1) |
(...skipping 12 matching lines...) Expand all Loading... |
38 ret = append(ret, &ds.IndexDefinition{Kind: kind, SortBy: []ds.I
ndexColumn{{Property: name}}}) | 37 ret = append(ret, &ds.IndexDefinition{Kind: kind, SortBy: []ds.I
ndexColumn{{Property: name}}}) |
39 ret = append(ret, &ds.IndexDefinition{Kind: kind, SortBy: []ds.I
ndexColumn{{Property: name, Descending: true}}}) | 38 ret = append(ret, &ds.IndexDefinition{Kind: kind, SortBy: []ds.I
ndexColumn{{Property: name, Descending: true}}}) |
40 } | 39 } |
41 if serializationDeterministic { | 40 if serializationDeterministic { |
42 sort.Sort(ret) | 41 sort.Sort(ret) |
43 } | 42 } |
44 return ret | 43 return ret |
45 } | 44 } |
46 | 45 |
47 func indexEntriesWithBuiltins(k *ds.Key, pm ds.PropertyMap, complexIdxs []*ds.In
dexDefinition) *memStore { | 46 func indexEntriesWithBuiltins(k *ds.Key, pm ds.PropertyMap, complexIdxs []*ds.In
dexDefinition) *memStore { |
48 » sip := partiallySerialize(k, pm) | 47 » sip := serialize.PropertyMapPartially(k, pm) |
49 » return sip.indexEntries(k.Namespace(), append(defaultIndexes(k.Kind(), p
m), complexIdxs...)) | 48 » return indexEntries(sip, k.Namespace(), append(defaultIndexes(k.Kind(),
pm), complexIdxs...)) |
50 } | |
51 | |
52 // serializedPvals is all of the serialized DSProperty values in qASC order. | |
53 type serializedPvals [][]byte | |
54 | |
55 func (s serializedPvals) Len() int { return len(s) } | |
56 func (s serializedPvals) Swap(i, j int) { s[i], s[j] = s[j], s[i] } | |
57 func (s serializedPvals) Less(i, j int) bool { return bytes.Compare(s[i], s[j])
< 0 } | |
58 | |
59 // prop name -> [<serialized DSProperty>, ...] | |
60 // includes special values '__key__' and '__ancestor__' which contains all of | |
61 // the ancestor entries for this key. | |
62 type serializedIndexablePmap map[string]serializedPvals | |
63 | |
64 func serializeRow(vals []ds.Property) serializedPvals { | |
65 » dups := stringset.New(0) | |
66 » ret := make(serializedPvals, 0, len(vals)) | |
67 » for _, v := range vals { | |
68 » » if v.IndexSetting() == ds.NoIndex { | |
69 » » » continue | |
70 » » } | |
71 » » data := serialize.ToBytes(v.ForIndex()) | |
72 » » dataS := string(data) | |
73 » » if !dups.Add(dataS) { | |
74 » » » continue | |
75 » » } | |
76 » » ret = append(ret, data) | |
77 » } | |
78 » return ret | |
79 } | |
80 | |
81 func partiallySerialize(k *ds.Key, pm ds.PropertyMap) (ret serializedIndexablePm
ap) { | |
82 » ret = make(serializedIndexablePmap, len(pm)+2) | |
83 » if k == nil { | |
84 » » impossible(fmt.Errorf("key to partiallySerialize is nil")) | |
85 » } | |
86 » ret["__key__"] = [][]byte{serialize.ToBytes(ds.MkProperty(k))} | |
87 » for k != nil { | |
88 » » ret["__ancestor__"] = append(ret["__ancestor__"], serialize.ToBy
tes(ds.MkProperty(k))) | |
89 » » k = k.Parent() | |
90 » } | |
91 » for k, vals := range pm { | |
92 » » newVals := serializeRow(vals) | |
93 » » if len(newVals) > 0 { | |
94 » » » ret[k] = newVals | |
95 » » } | |
96 » } | |
97 » return | |
98 } | 49 } |
99 | 50 |
100 // indexRowGen contains enough information to generate all of the index rows whi
ch | 51 // indexRowGen contains enough information to generate all of the index rows whi
ch |
101 // correspond with a propertyList and a ds.IndexDefinition. | 52 // correspond with a propertyList and a ds.IndexDefinition. |
102 type indexRowGen struct { | 53 type indexRowGen struct { |
103 » propVec []serializedPvals | 54 » propVec []serialize.SerializedPslice |
104 decending []bool | 55 decending []bool |
105 } | 56 } |
106 | 57 |
107 // permute calls cb for each index row, in the sorted order of the rows. | 58 // permute calls cb for each index row, in the sorted order of the rows. |
108 func (s indexRowGen) permute(collSetFn func(k, v []byte)) { | 59 func (s indexRowGen) permute(collSetFn func(k, v []byte)) { |
109 iVec := make([]int, len(s.propVec)) | 60 iVec := make([]int, len(s.propVec)) |
110 iVecLim := make([]int, len(s.propVec)) | 61 iVecLim := make([]int, len(s.propVec)) |
111 | 62 |
112 incPos := func() bool { | 63 incPos := func() bool { |
113 for i := len(iVec) - 1; i >= 0; i-- { | 64 for i := len(iVec) - 1; i >= 0; i-- { |
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
160 } | 111 } |
161 } | 112 } |
162 | 113 |
163 type matcher struct { | 114 type matcher struct { |
164 buf indexRowGen | 115 buf indexRowGen |
165 } | 116 } |
166 | 117 |
167 // matcher.match checks to see if the mapped, serialized property values | 118 // matcher.match checks to see if the mapped, serialized property values |
168 // match the index. If they do, it returns a indexRowGen. Do not write or modify | 119 // match the index. If they do, it returns a indexRowGen. Do not write or modify |
169 // the data in the indexRowGen. | 120 // the data in the indexRowGen. |
170 func (m *matcher) match(sortBy []ds.IndexColumn, sip serializedIndexablePmap) (i
ndexRowGen, bool) { | 121 func (m *matcher) match(sortBy []ds.IndexColumn, sip serialize.SerializedPmap) (
indexRowGen, bool) { |
171 m.buf.propVec = m.buf.propVec[:0] | 122 m.buf.propVec = m.buf.propVec[:0] |
172 m.buf.decending = m.buf.decending[:0] | 123 m.buf.decending = m.buf.decending[:0] |
173 for _, sb := range sortBy { | 124 for _, sb := range sortBy { |
174 if pv, ok := sip[sb.Property]; ok { | 125 if pv, ok := sip[sb.Property]; ok { |
175 m.buf.propVec = append(m.buf.propVec, pv) | 126 m.buf.propVec = append(m.buf.propVec, pv) |
176 m.buf.decending = append(m.buf.decending, sb.Descending) | 127 m.buf.decending = append(m.buf.decending, sb.Descending) |
177 } else { | 128 } else { |
178 return indexRowGen{}, false | 129 return indexRowGen{}, false |
179 } | 130 } |
180 } | 131 } |
181 return m.buf, true | 132 return m.buf, true |
182 } | 133 } |
183 | 134 |
184 func (sip serializedIndexablePmap) indexEntries(ns string, idxs []*ds.IndexDefin
ition) *memStore { | 135 func indexEntries(sip serialize.SerializedPmap, ns string, idxs []*ds.IndexDefin
ition) *memStore { |
185 ret := newMemStore() | 136 ret := newMemStore() |
186 idxColl := ret.SetCollection("idx", nil) | 137 idxColl := ret.SetCollection("idx", nil) |
187 | 138 |
188 mtch := matcher{} | 139 mtch := matcher{} |
189 for _, idx := range idxs { | 140 for _, idx := range idxs { |
190 idx = idx.Normalize() | 141 idx = idx.Normalize() |
191 if irg, ok := mtch.match(idx.GetFullSortOrder(), sip); ok { | 142 if irg, ok := mtch.match(idx.GetFullSortOrder(), sip); ok { |
192 idxBin := serialize.ToBytes(*idx.PrepForIdxTable()) | 143 idxBin := serialize.ToBytes(*idx.PrepForIdxTable()) |
193 idxColl.Set(idxBin, []byte{}) | 144 idxColl.Set(idxBin, []byte{}) |
194 coll := ret.SetCollection(fmt.Sprintf("idx:%s:%s", ns, i
dxBin), nil) | 145 coll := ret.SetCollection(fmt.Sprintf("idx:%s:%s", ns, i
dxBin), nil) |
(...skipping 89 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
284 if allEnts := store.GetCollection("ents:" + ns); allEnts != nil { | 235 if allEnts := store.GetCollection("ents:" + ns); allEnts != nil { |
285 allEnts.VisitItemsAscend(nil, true, func(i *gkvlite.Item) bool { | 236 allEnts.VisitItemsAscend(nil, true, func(i *gkvlite.Item) bool { |
286 pm, err := rpmWoCtx(i.Val, ns) | 237 pm, err := rpmWoCtx(i.Val, ns) |
287 memoryCorruption(err) | 238 memoryCorruption(err) |
288 | 239 |
289 prop, err := serialize.ReadProperty(bytes.NewBuffer(i.Ke
y), serialize.WithoutContext, globalAppID, ns) | 240 prop, err := serialize.ReadProperty(bytes.NewBuffer(i.Ke
y), serialize.WithoutContext, globalAppID, ns) |
290 memoryCorruption(err) | 241 memoryCorruption(err) |
291 | 242 |
292 k := prop.Value().(*ds.Key) | 243 k := prop.Value().(*ds.Key) |
293 | 244 |
294 » » » sip := partiallySerialize(k, pm) | 245 » » » sip := serialize.PropertyMapPartially(k, pm) |
295 | 246 |
296 mergeIndexes(ns, store, | 247 mergeIndexes(ns, store, |
297 newMemStore(), | 248 newMemStore(), |
298 » » » » sip.indexEntries(ns, normalized)) | 249 » » » » indexEntries(sip, ns, normalized)) |
299 return true | 250 return true |
300 }) | 251 }) |
301 } | 252 } |
302 } | 253 } |
303 | 254 |
304 func updateIndexes(store *memStore, key *ds.Key, oldEnt, newEnt ds.PropertyMap)
{ | 255 func updateIndexes(store *memStore, key *ds.Key, oldEnt, newEnt ds.PropertyMap)
{ |
305 // load all current complex query index definitions. | 256 // load all current complex query index definitions. |
306 compIdx := []*ds.IndexDefinition{} | 257 compIdx := []*ds.IndexDefinition{} |
307 walkCompIdxs(store, nil, func(i *ds.IndexDefinition) bool { | 258 walkCompIdxs(store, nil, func(i *ds.IndexDefinition) bool { |
308 compIdx = append(compIdx, i) | 259 compIdx = append(compIdx, i) |
309 return true | 260 return true |
310 }) | 261 }) |
311 | 262 |
312 mergeIndexes(key.Namespace(), store, | 263 mergeIndexes(key.Namespace(), store, |
313 indexEntriesWithBuiltins(key, oldEnt, compIdx), | 264 indexEntriesWithBuiltins(key, oldEnt, compIdx), |
314 indexEntriesWithBuiltins(key, newEnt, compIdx)) | 265 indexEntriesWithBuiltins(key, newEnt, compIdx)) |
315 } | 266 } |
OLD | NEW |