Chromium Code Reviews

Side by Side Diff: impl/memory/datastore_index.go

Issue 1355783002: Refactor keys and queries in datastore service and implementation. (Closed) Base URL: https://github.com/luci/gae.git@master
Patch Set: appease errcheck Created 5 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments.
Jump to:
View unified diff |
« no previous file with comments | « impl/memory/datastore_data.go ('k') | impl/memory/datastore_index_selection.go » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2015 The Chromium Authors. All rights reserved. 1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 package memory 5 package memory
6 6
7 import ( 7 import (
8 "bytes" 8 "bytes"
9 "fmt" 9 "fmt"
10 "sort" 10 "sort"
(...skipping 18 matching lines...)
29 for _, v := range pvals { 29 for _, v := range pvals {
30 if v.IndexSetting() == ds.ShouldIndex { 30 if v.IndexSetting() == ds.ShouldIndex {
31 needsIndex = true 31 needsIndex = true
32 break 32 break
33 } 33 }
34 } 34 }
35 if !needsIndex { 35 if !needsIndex {
36 continue 36 continue
37 } 37 }
38 ret = append(ret, &ds.IndexDefinition{Kind: kind, SortBy: []ds.I ndexColumn{{Property: name}}}) 38 ret = append(ret, &ds.IndexDefinition{Kind: kind, SortBy: []ds.I ndexColumn{{Property: name}}})
39 » » ret = append(ret, &ds.IndexDefinition{Kind: kind, SortBy: []ds.I ndexColumn{{Property: name, Direction: ds.DESCENDING}}}) 39 » » ret = append(ret, &ds.IndexDefinition{Kind: kind, SortBy: []ds.I ndexColumn{{Property: name, Descending: true}}})
40 } 40 }
41 if serializationDeterministic { 41 if serializationDeterministic {
42 sort.Sort(ret) 42 sort.Sort(ret)
43 } 43 }
44 return ret 44 return ret
45 } 45 }
46 46
47 func indexEntriesWithBuiltins(k ds.Key, pm ds.PropertyMap, complexIdxs []*ds.Ind exDefinition) *memStore { 47 func indexEntriesWithBuiltins(k *ds.Key, pm ds.PropertyMap, complexIdxs []*ds.In dexDefinition) *memStore {
48 sip := partiallySerialize(k, pm) 48 sip := partiallySerialize(k, pm)
49 » return sip.indexEntries(k.Namespace(), append(defaultIndexes(k.Kind(), p m), complexIdxs...)) 49 » return sip.indexEntries(k.Namespace(), append(defaultIndexes(k.Last().Ki nd, pm), complexIdxs...))
50 } 50 }
51 51
52 // serializedPvals is all of the serialized DSProperty values in qASC order. 52 // serializedPvals is all of the serialized DSProperty values in qASC order.
53 type serializedPvals [][]byte 53 type serializedPvals [][]byte
54 54
55 func (s serializedPvals) Len() int { return len(s) } 55 func (s serializedPvals) Len() int { return len(s) }
56 func (s serializedPvals) Swap(i, j int) { s[i], s[j] = s[j], s[i] } 56 func (s serializedPvals) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
57 func (s serializedPvals) Less(i, j int) bool { return bytes.Compare(s[i], s[j]) < 0 } 57 func (s serializedPvals) Less(i, j int) bool { return bytes.Compare(s[i], s[j]) < 0 }
58 58
59 // prop name -> [<serialized DSProperty>, ...] 59 // prop name -> [<serialized DSProperty>, ...]
(...skipping 11 matching lines...)
71 data := serialize.ToBytes(v.ForIndex()) 71 data := serialize.ToBytes(v.ForIndex())
72 dataS := string(data) 72 dataS := string(data)
73 if !dups.Add(dataS) { 73 if !dups.Add(dataS) {
74 continue 74 continue
75 } 75 }
76 ret = append(ret, data) 76 ret = append(ret, data)
77 } 77 }
78 return ret 78 return ret
79 } 79 }
80 80
81 func partiallySerialize(k ds.Key, pm ds.PropertyMap) (ret serializedIndexablePma p) { 81 func partiallySerialize(k *ds.Key, pm ds.PropertyMap) (ret serializedIndexablePm ap) {
82 ret = make(serializedIndexablePmap, len(pm)+2) 82 ret = make(serializedIndexablePmap, len(pm)+2)
83 if k == nil { 83 if k == nil {
84 impossible(fmt.Errorf("key to partiallySerialize is nil")) 84 impossible(fmt.Errorf("key to partiallySerialize is nil"))
85 } 85 }
86 ret["__key__"] = [][]byte{serialize.ToBytes(ds.MkProperty(k))} 86 ret["__key__"] = [][]byte{serialize.ToBytes(ds.MkProperty(k))}
87 for k != nil { 87 for k != nil {
88 ret["__ancestor__"] = append(ret["__ancestor__"], serialize.ToBy tes(ds.MkProperty(k))) 88 ret["__ancestor__"] = append(ret["__ancestor__"], serialize.ToBy tes(ds.MkProperty(k)))
89 k = k.Parent() 89 k = k.Parent()
90 } 90 }
91 for k, vals := range pm { 91 for k, vals := range pm {
92 newVals := serializeRow(vals) 92 newVals := serializeRow(vals)
93 if len(newVals) > 0 { 93 if len(newVals) > 0 {
94 ret[k] = newVals 94 ret[k] = newVals
95 } 95 }
96 } 96 }
97 return 97 return
98 } 98 }
99 99
100 // indexRowGen contains enough information to generate all of the index rows whi ch 100 // indexRowGen contains enough information to generate all of the index rows whi ch
101 // correspond with a propertyList and a ds.IndexDefinition. 101 // correspond with a propertyList and a ds.IndexDefinition.
102 type indexRowGen struct { 102 type indexRowGen struct {
103 » propVec []serializedPvals 103 » propVec []serializedPvals
104 » orders []ds.IndexDirection 104 » decending []bool
105 } 105 }
106 106
107 // permute calls cb for each index row, in the sorted order of the rows. 107 // permute calls cb for each index row, in the sorted order of the rows.
108 func (s indexRowGen) permute(collSetFn func(k, v []byte)) { 108 func (s indexRowGen) permute(collSetFn func(k, v []byte)) {
109 iVec := make([]int, len(s.propVec)) 109 iVec := make([]int, len(s.propVec))
110 iVecLim := make([]int, len(s.propVec)) 110 iVecLim := make([]int, len(s.propVec))
111 111
112 incPos := func() bool { 112 incPos := func() bool {
113 for i := len(iVec) - 1; i >= 0; i-- { 113 for i := len(iVec) - 1; i >= 0; i-- {
114 var done bool 114 var done bool
115 var newVal int 115 var newVal int
116 » » » if s.orders[i] == ds.ASCENDING { 116 » » » if !s.decending[i] {
117 newVal = (iVec[i] + 1) % iVecLim[i] 117 newVal = (iVec[i] + 1) % iVecLim[i]
118 done = newVal != 0 118 done = newVal != 0
119 } else { 119 } else {
120 newVal = (iVec[i] - 1) 120 newVal = (iVec[i] - 1)
121 if newVal < 0 { 121 if newVal < 0 {
122 newVal = iVecLim[i] - 1 122 newVal = iVecLim[i] - 1
123 } else { 123 } else {
124 done = true 124 done = true
125 } 125 }
126 } 126 }
127 iVec[i] = newVal 127 iVec[i] = newVal
128 if done { 128 if done {
129 return true 129 return true
130 } 130 }
131 } 131 }
132 return false 132 return false
133 } 133 }
134 134
135 for i, sps := range s.propVec { 135 for i, sps := range s.propVec {
136 iVecLim[i] = len(sps) 136 iVecLim[i] = len(sps)
137 } 137 }
138 138
139 for i := range iVec { 139 for i := range iVec {
140 » » if s.orders[i] == ds.DESCENDING { 140 » » if s.decending[i] {
141 iVec[i] = iVecLim[i] - 1 141 iVec[i] = iVecLim[i] - 1
142 } 142 }
143 } 143 }
144 144
145 for { 145 for {
146 bufsiz := 0 146 bufsiz := 0
147 for pvalSliceIdx, pvalIdx := range iVec { 147 for pvalSliceIdx, pvalIdx := range iVec {
148 bufsiz += len(s.propVec[pvalSliceIdx][pvalIdx]) 148 bufsiz += len(s.propVec[pvalSliceIdx][pvalIdx])
149 } 149 }
150 buf := serialize.Invertible(bytes.NewBuffer(make([]byte, 0, bufs iz))) 150 buf := serialize.Invertible(bytes.NewBuffer(make([]byte, 0, bufs iz)))
151 for pvalSliceIdx, pvalIdx := range iVec { 151 for pvalSliceIdx, pvalIdx := range iVec {
152 data := s.propVec[pvalSliceIdx][pvalIdx] 152 data := s.propVec[pvalSliceIdx][pvalIdx]
153 » » » buf.SetInvert(s.orders[pvalSliceIdx] == ds.DESCENDING) 153 » » » buf.SetInvert(s.decending[pvalSliceIdx])
154 » » » buf.Write(data) 154 » » » _, _ = buf.Write(data)
155 } 155 }
156 collSetFn(buf.Bytes(), []byte{}) 156 collSetFn(buf.Bytes(), []byte{})
157 if !incPos() { 157 if !incPos() {
158 break 158 break
159 } 159 }
160 } 160 }
161 } 161 }
162 162
163 type matcher struct { 163 type matcher struct {
164 buf indexRowGen 164 buf indexRowGen
165 } 165 }
166 166
167 // matcher.match checks to see if the mapped, serialized property values 167 // matcher.match checks to see if the mapped, serialized property values
168 // match the index. If they do, it returns a indexRowGen. Do not write or modify 168 // match the index. If they do, it returns a indexRowGen. Do not write or modify
169 // the data in the indexRowGen. 169 // the data in the indexRowGen.
170 func (m *matcher) match(sortBy []ds.IndexColumn, sip serializedIndexablePmap) (i ndexRowGen, bool) { 170 func (m *matcher) match(sortBy []ds.IndexColumn, sip serializedIndexablePmap) (i ndexRowGen, bool) {
171 m.buf.propVec = m.buf.propVec[:0] 171 m.buf.propVec = m.buf.propVec[:0]
172 » m.buf.orders = m.buf.orders[:0] 172 » m.buf.decending = m.buf.decending[:0]
173 for _, sb := range sortBy { 173 for _, sb := range sortBy {
174 if pv, ok := sip[sb.Property]; ok { 174 if pv, ok := sip[sb.Property]; ok {
175 m.buf.propVec = append(m.buf.propVec, pv) 175 m.buf.propVec = append(m.buf.propVec, pv)
176 » » » m.buf.orders = append(m.buf.orders, sb.Direction) 176 » » » m.buf.decending = append(m.buf.decending, sb.Descending)
177 } else { 177 } else {
178 return indexRowGen{}, false 178 return indexRowGen{}, false
179 } 179 }
180 } 180 }
181 return m.buf, true 181 return m.buf, true
182 } 182 }
183 183
184 func (sip serializedIndexablePmap) indexEntries(ns string, idxs []*ds.IndexDefin ition) *memStore { 184 func (sip serializedIndexablePmap) indexEntries(ns string, idxs []*ds.IndexDefin ition) *memStore {
185 ret := newMemStore() 185 ret := newMemStore()
186 idxColl := ret.SetCollection("idx", nil) 186 idxColl := ret.SetCollection("idx", nil)
(...skipping 95 matching lines...)
282 } 282 }
283 283
284 if allEnts := store.GetCollection("ents:" + ns); allEnts != nil { 284 if allEnts := store.GetCollection("ents:" + ns); allEnts != nil {
285 allEnts.VisitItemsAscend(nil, true, func(i *gkvlite.Item) bool { 285 allEnts.VisitItemsAscend(nil, true, func(i *gkvlite.Item) bool {
286 pm, err := rpmWoCtx(i.Val, ns) 286 pm, err := rpmWoCtx(i.Val, ns)
287 memoryCorruption(err) 287 memoryCorruption(err)
288 288
289 prop, err := serialize.ReadProperty(bytes.NewBuffer(i.Ke y), serialize.WithoutContext, globalAppID, ns) 289 prop, err := serialize.ReadProperty(bytes.NewBuffer(i.Ke y), serialize.WithoutContext, globalAppID, ns)
290 memoryCorruption(err) 290 memoryCorruption(err)
291 291
292 » » » k := prop.Value().(ds.Key) 292 » » » k := prop.Value().(*ds.Key)
293 293
294 sip := partiallySerialize(k, pm) 294 sip := partiallySerialize(k, pm)
295 295
296 mergeIndexes(ns, store, 296 mergeIndexes(ns, store,
297 newMemStore(), 297 newMemStore(),
298 sip.indexEntries(ns, normalized)) 298 sip.indexEntries(ns, normalized))
299 return true 299 return true
300 }) 300 })
301 } 301 }
302 } 302 }
303 303
304 func updateIndexes(store *memStore, key ds.Key, oldEnt, newEnt ds.PropertyMap) { 304 func updateIndexes(store *memStore, key *ds.Key, oldEnt, newEnt ds.PropertyMap) {
305 // load all current complex query index definitions. 305 // load all current complex query index definitions.
306 compIdx := []*ds.IndexDefinition{} 306 compIdx := []*ds.IndexDefinition{}
307 walkCompIdxs(store, nil, func(i *ds.IndexDefinition) bool { 307 walkCompIdxs(store, nil, func(i *ds.IndexDefinition) bool {
308 compIdx = append(compIdx, i) 308 compIdx = append(compIdx, i)
309 return true 309 return true
310 }) 310 })
311 311
312 mergeIndexes(key.Namespace(), store, 312 mergeIndexes(key.Namespace(), store,
313 indexEntriesWithBuiltins(key, oldEnt, compIdx), 313 indexEntriesWithBuiltins(key, oldEnt, compIdx),
314 indexEntriesWithBuiltins(key, newEnt, compIdx)) 314 indexEntriesWithBuiltins(key, newEnt, compIdx))
315 } 315 }
OLDNEW
« no previous file with comments | « impl/memory/datastore_data.go ('k') | impl/memory/datastore_index_selection.go » ('j') | no next file with comments »

Powered by Google App Engine