| OLD | NEW |
| (Empty) |
| 1 // Copyright 2015 The Chromium Authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 package memory | |
| 6 | |
| 7 import ( | |
| 8 "bytes" | |
| 9 "fmt" | |
| 10 "sort" | |
| 11 | |
| 12 "github.com/luci/gae" | |
| 13 "github.com/luci/gae/helper" | |
| 14 | |
| 15 "github.com/luci/gkvlite" | |
| 16 ) | |
| 17 | |
| 18 var indexCreationDeterministic = false | |
| 19 | |
| 20 type qIndexSlice []*qIndex | |
| 21 | |
| 22 func (s qIndexSlice) Len() int { return len(s) } | |
| 23 func (s qIndexSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } | |
| 24 func (s qIndexSlice) Less(i, j int) bool { return s[i].Less(s[j]) } | |
| 25 | |
| 26 func defaultIndicies(kind string, pmap gae.DSPropertyMap) []*qIndex { | |
| 27 ret := make(qIndexSlice, 0, 2*len(pmap)+1) | |
| 28 ret = append(ret, &qIndex{kind, false, nil}) | |
| 29 for name, pvals := range pmap { | |
| 30 needsIndex := false | |
| 31 for _, v := range pvals { | |
| 32 if v.IndexSetting() == gae.ShouldIndex { | |
| 33 needsIndex = true | |
| 34 break | |
| 35 } | |
| 36 } | |
| 37 if !needsIndex { | |
| 38 continue | |
| 39 } | |
| 40 ret = append(ret, &qIndex{kind, false, []qSortBy{{name, qASC}}}) | |
| 41 ret = append(ret, &qIndex{kind, false, []qSortBy{{name, qDEC}}}) | |
| 42 } | |
| 43 if indexCreationDeterministic { | |
| 44 sort.Sort(ret) | |
| 45 } | |
| 46 return ret | |
| 47 } | |
| 48 | |
| 49 func indexEntriesWithBuiltins(k gae.DSKey, pm gae.DSPropertyMap, complexIdxs []*
qIndex) *memStore { | |
| 50 sip := partiallySerialize(pm) | |
| 51 return sip.indexEntries(k, append(defaultIndicies(k.Kind(), pm), complex
Idxs...)) | |
| 52 } | |
| 53 | |
| 54 // serializedPvals is all of the serialized DSProperty values in qASC order. | |
| 55 type serializedPvals [][]byte | |
| 56 | |
| 57 func (s serializedPvals) Len() int { return len(s) } | |
| 58 func (s serializedPvals) Swap(i, j int) { s[i], s[j] = s[j], s[i] } | |
| 59 func (s serializedPvals) Less(i, j int) bool { return bytes.Compare(s[i], s[j])
< 0 } | |
| 60 | |
| 61 // prop name -> [<serialized DSProperty>, ...] | |
| 62 type serializedIndexablePmap map[string]serializedPvals | |
| 63 | |
| 64 func partiallySerialize(pm gae.DSPropertyMap) (ret serializedIndexablePmap) { | |
| 65 if len(pm) == 0 { | |
| 66 return | |
| 67 } | |
| 68 | |
| 69 buf := &bytes.Buffer{} | |
| 70 ret = make(serializedIndexablePmap, len(pm)) | |
| 71 for k, vals := range pm { | |
| 72 newVals := make(serializedPvals, 0, len(vals)) | |
| 73 for _, v := range vals { | |
| 74 if v.IndexSetting() == gae.NoIndex { | |
| 75 continue | |
| 76 } | |
| 77 buf.Reset() | |
| 78 helper.WriteDSProperty(buf, v, helper.WithoutContext) | |
| 79 newVal := make([]byte, buf.Len()) | |
| 80 copy(newVal, buf.Bytes()) | |
| 81 newVals = append(newVals, newVal) | |
| 82 } | |
| 83 if len(newVals) > 0 { | |
| 84 sort.Sort(newVals) | |
| 85 ret[k] = newVals | |
| 86 } | |
| 87 } | |
| 88 return | |
| 89 } | |
| 90 | |
| 91 // indexRowGen contains enough information to generate all of the index rows whi
ch | |
| 92 // correspond with a propertyList and a qIndex. | |
| 93 type indexRowGen struct { | |
| 94 propVec []serializedPvals | |
| 95 orders []qDirection | |
| 96 } | |
| 97 | |
| 98 // permute calls cb for each index row, in the sorted order of the rows. | |
| 99 func (s indexRowGen) permute(cb func([]byte)) { | |
| 100 iVec := make([]int, len(s.propVec)) | |
| 101 iVecLim := make([]int, len(s.propVec)) | |
| 102 | |
| 103 incPos := func() bool { | |
| 104 for i := len(iVec) - 1; i >= 0; i-- { | |
| 105 var done bool | |
| 106 var newVal int | |
| 107 if s.orders[i] == qASC { | |
| 108 newVal = (iVec[i] + 1) % iVecLim[i] | |
| 109 done = newVal != 0 | |
| 110 } else { | |
| 111 newVal = (iVec[i] - 1) | |
| 112 if newVal < 0 { | |
| 113 newVal = iVecLim[i] - 1 | |
| 114 } else { | |
| 115 done = true | |
| 116 } | |
| 117 } | |
| 118 iVec[i] = newVal | |
| 119 if done { | |
| 120 return true | |
| 121 } | |
| 122 } | |
| 123 return false | |
| 124 } | |
| 125 | |
| 126 for i, sps := range s.propVec { | |
| 127 iVecLim[i] = len(sps) | |
| 128 } | |
| 129 | |
| 130 for i := range iVec { | |
| 131 if s.orders[i] == qDEC { | |
| 132 iVec[i] = iVecLim[i] - 1 | |
| 133 } | |
| 134 } | |
| 135 | |
| 136 for { | |
| 137 bufsiz := 0 | |
| 138 for pvalSliceIdx, pvalIdx := range iVec { | |
| 139 bufsiz += len(s.propVec[pvalSliceIdx][pvalIdx]) | |
| 140 } | |
| 141 buf := bytes.NewBuffer(make([]byte, 0, bufsiz)) | |
| 142 for pvalSliceIdx, pvalIdx := range iVec { | |
| 143 data := s.propVec[pvalSliceIdx][pvalIdx] | |
| 144 if s.orders[pvalSliceIdx] == qASC { | |
| 145 buf.Write(data) | |
| 146 } else { | |
| 147 for _, b := range data { | |
| 148 buf.WriteByte(b ^ 0xFF) | |
| 149 } | |
| 150 } | |
| 151 } | |
| 152 cb(buf.Bytes()) | |
| 153 if !incPos() { | |
| 154 break | |
| 155 } | |
| 156 } | |
| 157 } | |
| 158 | |
| 159 type matcher struct { | |
| 160 buf indexRowGen | |
| 161 } | |
| 162 | |
| 163 // matcher.match checks to see if the mapped, serialized property values | |
| 164 // match the index. If they do, it returns a indexRowGen. Do not write or modify | |
| 165 // the data in the indexRowGen. | |
| 166 func (m *matcher) match(idx *qIndex, sip serializedIndexablePmap) (indexRowGen,
bool) { | |
| 167 m.buf.propVec = m.buf.propVec[:0] | |
| 168 m.buf.orders = m.buf.orders[:0] | |
| 169 for _, sb := range idx.sortby { | |
| 170 if pv, ok := sip[sb.prop]; ok { | |
| 171 m.buf.propVec = append(m.buf.propVec, pv) | |
| 172 m.buf.orders = append(m.buf.orders, sb.dir) | |
| 173 } else { | |
| 174 return indexRowGen{}, false | |
| 175 } | |
| 176 } | |
| 177 return m.buf, true | |
| 178 } | |
| 179 | |
| 180 func (sip serializedIndexablePmap) indexEntries(k gae.DSKey, idxs []*qIndex) *me
mStore { | |
| 181 ret := newMemStore() | |
| 182 idxColl := ret.SetCollection("idx", nil) | |
| 183 // getIdxEnts retrieves an index collection or adds it if it's not there
. | |
| 184 getIdxEnts := func(qi *qIndex) *memCollection { | |
| 185 buf := &bytes.Buffer{} | |
| 186 qi.WriteBinary(buf) | |
| 187 b := buf.Bytes() | |
| 188 idxColl.Set(b, []byte{}) | |
| 189 return ret.SetCollection(fmt.Sprintf("idx:%s:%s", k.Namespace(),
b), nil) | |
| 190 } | |
| 191 | |
| 192 buf := &bytes.Buffer{} | |
| 193 helper.WriteDSKey(buf, helper.WithoutContext, k) | |
| 194 keyData := buf.Bytes() | |
| 195 | |
| 196 walkPermutations := func(prefix []byte, irg indexRowGen, ents *memCollec
tion) { | |
| 197 prev := []byte{} // intentionally make a non-nil slice, gkvlite
hates nil. | |
| 198 irg.permute(func(data []byte) { | |
| 199 buf := bytes.NewBuffer(make([]byte, 0, len(prefix)+len(d
ata)+len(keyData))) | |
| 200 buf.Write(prefix) | |
| 201 buf.Write(data) | |
| 202 buf.Write(keyData) | |
| 203 ents.Set(buf.Bytes(), prev) | |
| 204 prev = data | |
| 205 }) | |
| 206 } | |
| 207 | |
| 208 mtch := matcher{} | |
| 209 for _, idx := range idxs { | |
| 210 if irg, ok := mtch.match(idx, sip); ok { | |
| 211 idxEnts := getIdxEnts(idx) | |
| 212 if len(irg.propVec) == 0 { | |
| 213 idxEnts.Set(keyData, []byte{}) // propless index
, e.g. kind -> key = nil | |
| 214 } else if idx.ancestor { | |
| 215 for ancKey := k; ancKey != nil; ancKey = ancKey.
Parent() { | |
| 216 buf := &bytes.Buffer{} | |
| 217 helper.WriteDSKey(buf, helper.WithoutCon
text, ancKey) | |
| 218 walkPermutations(buf.Bytes(), irg, idxEn
ts) | |
| 219 } | |
| 220 } else { | |
| 221 walkPermutations(nil, irg, idxEnts) | |
| 222 } | |
| 223 } | |
| 224 } | |
| 225 | |
| 226 return ret | |
| 227 } | |
| 228 | |
| 229 func updateIndicies(store *memStore, key gae.DSKey, oldEnt, newEnt gae.DSPropert
yMap) { | |
| 230 idxColl := store.GetCollection("idx") | |
| 231 if idxColl == nil { | |
| 232 idxColl = store.SetCollection("idx", nil) | |
| 233 } | |
| 234 | |
| 235 // load all current complex query index definitions. | |
| 236 compIdx := []*qIndex{} | |
| 237 idxColl.VisitItemsAscend(complexQueryPrefix, false, func(i *gkvlite.Item
) bool { | |
| 238 if !bytes.HasPrefix(i.Key, complexQueryPrefix) { | |
| 239 return false | |
| 240 } | |
| 241 qi := &qIndex{} | |
| 242 if err := qi.ReadBinary(bytes.NewBuffer(i.Key)); err != nil { | |
| 243 panic(err) // memory corruption | |
| 244 } | |
| 245 compIdx = append(compIdx, qi) | |
| 246 return true | |
| 247 }) | |
| 248 | |
| 249 oldIdx := indexEntriesWithBuiltins(key, oldEnt, compIdx) | |
| 250 | |
| 251 newIdx := indexEntriesWithBuiltins(key, newEnt, compIdx) | |
| 252 | |
| 253 prefix := "idx:" + key.Namespace() + ":" | |
| 254 | |
| 255 gkvCollide(oldIdx.GetCollection("idx"), newIdx.GetCollection("idx"), fun
c(k, ov, nv []byte) { | |
| 256 ks := prefix + string(k) | |
| 257 idxColl.Set(k, []byte{}) | |
| 258 | |
| 259 coll := store.GetCollection(ks) | |
| 260 if coll == nil { | |
| 261 coll = store.SetCollection(ks, nil) | |
| 262 } | |
| 263 oldColl := oldIdx.GetCollection(ks) | |
| 264 newColl := newIdx.GetCollection(ks) | |
| 265 | |
| 266 switch { | |
| 267 case ov == nil && nv != nil: // all additions | |
| 268 newColl.VisitItemsAscend(nil, false, func(i *gkvlite.Ite
m) bool { | |
| 269 coll.Set(i.Key, i.Val) | |
| 270 return true | |
| 271 }) | |
| 272 case ov != nil && nv == nil: // all deletions | |
| 273 oldColl.VisitItemsAscend(nil, false, func(i *gkvlite.Ite
m) bool { | |
| 274 coll.Delete(i.Key) | |
| 275 return true | |
| 276 }) | |
| 277 case ov != nil && nv != nil: // merge | |
| 278 gkvCollide(oldColl, newColl, func(k, ov, nv []byte) { | |
| 279 if nv == nil { | |
| 280 coll.Delete(k) | |
| 281 } else { | |
| 282 coll.Set(k, nv) | |
| 283 } | |
| 284 }) | |
| 285 default: | |
| 286 panic("impossible") | |
| 287 } | |
| 288 // TODO(riannucci): remove entries from idxColl and remove index
collections | |
| 289 // when there are no index entries for that index any more. | |
| 290 }) | |
| 291 } | |
| OLD | NEW |