OLD | NEW |
1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 package memory | 5 package memory |
6 | 6 |
7 import ( | 7 import ( |
8 "bytes" | 8 "bytes" |
9 "fmt" | 9 "fmt" |
10 "sort" | 10 "sort" |
(...skipping 25 matching lines...) Expand all Loading... |
36 } | 36 } |
37 ret = append(ret, &ds.IndexDefinition{Kind: kind, SortBy: []ds.I
ndexColumn{{Property: name}}}) | 37 ret = append(ret, &ds.IndexDefinition{Kind: kind, SortBy: []ds.I
ndexColumn{{Property: name}}}) |
38 ret = append(ret, &ds.IndexDefinition{Kind: kind, SortBy: []ds.I
ndexColumn{{Property: name, Descending: true}}}) | 38 ret = append(ret, &ds.IndexDefinition{Kind: kind, SortBy: []ds.I
ndexColumn{{Property: name, Descending: true}}}) |
39 } | 39 } |
40 if serializationDeterministic { | 40 if serializationDeterministic { |
41 sort.Sort(ret) | 41 sort.Sort(ret) |
42 } | 42 } |
43 return ret | 43 return ret |
44 } | 44 } |
45 | 45 |
46 func indexEntriesWithBuiltins(k *ds.Key, pm ds.PropertyMap, complexIdxs []*ds.In
dexDefinition) *memStore { | 46 func indexEntriesWithBuiltins(k *ds.Key, pm ds.PropertyMap, complexIdxs []*ds.In
dexDefinition) memStore { |
47 sip := serialize.PropertyMapPartially(k, pm) | 47 sip := serialize.PropertyMapPartially(k, pm) |
48 return indexEntries(sip, k.Namespace(), append(defaultIndexes(k.Kind(),
pm), complexIdxs...)) | 48 return indexEntries(sip, k.Namespace(), append(defaultIndexes(k.Kind(),
pm), complexIdxs...)) |
49 } | 49 } |
50 | 50 |
51 // indexRowGen contains enough information to generate all of the index rows whi
ch | 51 // indexRowGen contains enough information to generate all of the index rows whi
ch |
52 // correspond with a propertyList and a ds.IndexDefinition. | 52 // correspond with a propertyList and a ds.IndexDefinition. |
53 type indexRowGen struct { | 53 type indexRowGen struct { |
54 propVec []serialize.SerializedPslice | 54 propVec []serialize.SerializedPslice |
55 decending []bool | 55 decending []bool |
56 } | 56 } |
(...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
125 if pv, ok := sip[sb.Property]; ok { | 125 if pv, ok := sip[sb.Property]; ok { |
126 m.buf.propVec = append(m.buf.propVec, pv) | 126 m.buf.propVec = append(m.buf.propVec, pv) |
127 m.buf.decending = append(m.buf.decending, sb.Descending) | 127 m.buf.decending = append(m.buf.decending, sb.Descending) |
128 } else { | 128 } else { |
129 return indexRowGen{}, false | 129 return indexRowGen{}, false |
130 } | 130 } |
131 } | 131 } |
132 return m.buf, true | 132 return m.buf, true |
133 } | 133 } |
134 | 134 |
135 func indexEntries(sip serialize.SerializedPmap, ns string, idxs []*ds.IndexDefin
ition) *memStore { | 135 func indexEntries(sip serialize.SerializedPmap, ns string, idxs []*ds.IndexDefin
ition) memStore { |
136 ret := newMemStore() | 136 ret := newMemStore() |
137 » idxColl := ret.SetCollection("idx", nil) | 137 » idxColl := ret.GetOrCreateCollection("idx") |
138 | 138 |
139 mtch := matcher{} | 139 mtch := matcher{} |
140 for _, idx := range idxs { | 140 for _, idx := range idxs { |
141 idx = idx.Normalize() | 141 idx = idx.Normalize() |
142 if irg, ok := mtch.match(idx.GetFullSortOrder(), sip); ok { | 142 if irg, ok := mtch.match(idx.GetFullSortOrder(), sip); ok { |
143 idxBin := serialize.ToBytes(*idx.PrepForIdxTable()) | 143 idxBin := serialize.ToBytes(*idx.PrepForIdxTable()) |
144 idxColl.Set(idxBin, []byte{}) | 144 idxColl.Set(idxBin, []byte{}) |
145 » » » coll := ret.SetCollection(fmt.Sprintf("idx:%s:%s", ns, i
dxBin), nil) | 145 » » » coll := ret.GetOrCreateCollection(fmt.Sprintf("idx:%s:%s
", ns, idxBin)) |
146 irg.permute(coll.Set) | 146 irg.permute(coll.Set) |
147 } | 147 } |
148 } | 148 } |
149 | 149 |
150 return ret | 150 return ret |
151 } | 151 } |
152 | 152 |
153 // walkCompIdxs walks the table of compound indexes in the store. If `endsWith` | 153 // walkCompIdxs walks the table of compound indexes in the store. If `endsWith` |
154 // is provided, this will only walk over compound indexes which match | 154 // is provided, this will only walk over compound indexes which match |
155 // Kind, Ancestor, and whose SortBy has `endsWith.SortBy` as a suffix. | 155 // Kind, Ancestor, and whose SortBy has `endsWith.SortBy` as a suffix. |
156 func walkCompIdxs(store *memStore, endsWith *ds.IndexDefinition, cb func(*ds.Ind
exDefinition) bool) { | 156 func walkCompIdxs(store memStore, endsWith *ds.IndexDefinition, cb func(*ds.Inde
xDefinition) bool) { |
157 idxColl := store.GetCollection("idx") | 157 idxColl := store.GetCollection("idx") |
158 if idxColl == nil { | 158 if idxColl == nil { |
159 return | 159 return |
160 } | 160 } |
161 itrDef := iterDefinition{c: idxColl} | 161 itrDef := iterDefinition{c: idxColl} |
162 | 162 |
163 if endsWith != nil { | 163 if endsWith != nil { |
164 full := serialize.ToBytes(*endsWith.Flip()) | 164 full := serialize.ToBytes(*endsWith.Flip()) |
165 // chop off the null terminating byte | 165 // chop off the null terminating byte |
166 itrDef.prefix = full[:len(full)-1] | 166 itrDef.prefix = full[:len(full)-1] |
167 } | 167 } |
168 | 168 |
169 it := itrDef.mkIter() | 169 it := itrDef.mkIter() |
170 defer it.stop() | 170 defer it.stop() |
171 for !it.stopped { | 171 for !it.stopped { |
172 it.next(nil, func(i *gkvlite.Item) { | 172 it.next(nil, func(i *gkvlite.Item) { |
173 if i == nil { | 173 if i == nil { |
174 return | 174 return |
175 } | 175 } |
176 qi, err := serialize.ReadIndexDefinition(bytes.NewBuffer
(i.Key)) | 176 qi, err := serialize.ReadIndexDefinition(bytes.NewBuffer
(i.Key)) |
177 memoryCorruption(err) | 177 memoryCorruption(err) |
178 if !cb(qi.Flip()) { | 178 if !cb(qi.Flip()) { |
179 it.stop() | 179 it.stop() |
180 } | 180 } |
181 }) | 181 }) |
182 } | 182 } |
183 } | 183 } |
184 | 184 |
185 func mergeIndexes(ns string, store, oldIdx, newIdx *memStore) { | 185 func mergeIndexes(ns string, store, oldIdx, newIdx memStore) { |
186 prefixBuf := []byte("idx:" + ns + ":") | 186 prefixBuf := []byte("idx:" + ns + ":") |
187 origPrefixBufLen := len(prefixBuf) | 187 origPrefixBufLen := len(prefixBuf) |
| 188 |
| 189 oldIdx = oldIdx.Snapshot() |
| 190 newIdx = newIdx.Snapshot() |
| 191 |
188 gkvCollide(oldIdx.GetCollection("idx"), newIdx.GetCollection("idx"), fun
c(k, ov, nv []byte) { | 192 gkvCollide(oldIdx.GetCollection("idx"), newIdx.GetCollection("idx"), fun
c(k, ov, nv []byte) { |
189 prefixBuf = append(prefixBuf[:origPrefixBufLen], k...) | 193 prefixBuf = append(prefixBuf[:origPrefixBufLen], k...) |
190 ks := string(prefixBuf) | 194 ks := string(prefixBuf) |
191 | 195 |
192 » » coll := store.GetCollection(ks) | 196 » » coll := store.GetOrCreateCollection(ks) |
193 » » if coll == nil { | |
194 » » » coll = store.SetCollection(ks, nil) | |
195 » » } | |
196 | 197 |
197 oldColl := oldIdx.GetCollection(ks) | 198 oldColl := oldIdx.GetCollection(ks) |
198 newColl := newIdx.GetCollection(ks) | 199 newColl := newIdx.GetCollection(ks) |
199 | 200 |
200 switch { | 201 switch { |
201 case ov == nil && nv != nil: // all additions | 202 case ov == nil && nv != nil: // all additions |
202 newColl.VisitItemsAscend(nil, false, func(i *gkvlite.Ite
m) bool { | 203 newColl.VisitItemsAscend(nil, false, func(i *gkvlite.Ite
m) bool { |
203 coll.Set(i.Key, []byte{}) | 204 coll.Set(i.Key, []byte{}) |
204 return true | 205 return true |
205 }) | 206 }) |
(...skipping 11 matching lines...) Expand all Loading... |
217 } | 218 } |
218 }) | 219 }) |
219 default: | 220 default: |
220 impossible(fmt.Errorf("both values from gkvCollide were
nil?")) | 221 impossible(fmt.Errorf("both values from gkvCollide were
nil?")) |
221 } | 222 } |
222 // TODO(riannucci): remove entries from idxColl and remove index
collections | 223 // TODO(riannucci): remove entries from idxColl and remove index
collections |
223 // when there are no index entries for that index any more. | 224 // when there are no index entries for that index any more. |
224 }) | 225 }) |
225 } | 226 } |
226 | 227 |
227 func addIndexes(store *memStore, aid string, compIdx []*ds.IndexDefinition) { | 228 func addIndexes(store memStore, aid string, compIdx []*ds.IndexDefinition) { |
228 normalized := make([]*ds.IndexDefinition, len(compIdx)) | 229 normalized := make([]*ds.IndexDefinition, len(compIdx)) |
229 » idxColl := store.SetCollection("idx", nil) | 230 » idxColl := store.GetOrCreateCollection("idx") |
230 for i, idx := range compIdx { | 231 for i, idx := range compIdx { |
231 normalized[i] = idx.Normalize() | 232 normalized[i] = idx.Normalize() |
232 idxColl.Set(serialize.ToBytes(*normalized[i].PrepForIdxTable()),
[]byte{}) | 233 idxColl.Set(serialize.ToBytes(*normalized[i].PrepForIdxTable()),
[]byte{}) |
233 } | 234 } |
234 | 235 |
235 for _, ns := range namespaces(store) { | 236 for _, ns := range namespaces(store) { |
236 » » if allEnts := store.GetCollection("ents:" + ns); allEnts != nil
{ | 237 » » if allEnts := store.Snapshot().GetCollection("ents:" + ns); allE
nts != nil { |
237 allEnts.VisitItemsAscend(nil, true, func(i *gkvlite.Item
) bool { | 238 allEnts.VisitItemsAscend(nil, true, func(i *gkvlite.Item
) bool { |
238 pm, err := rpm(i.Val) | 239 pm, err := rpm(i.Val) |
239 memoryCorruption(err) | 240 memoryCorruption(err) |
240 | 241 |
241 prop, err := serialize.ReadProperty(bytes.NewBuf
fer(i.Key), serialize.WithoutContext, aid, ns) | 242 prop, err := serialize.ReadProperty(bytes.NewBuf
fer(i.Key), serialize.WithoutContext, aid, ns) |
242 memoryCorruption(err) | 243 memoryCorruption(err) |
243 | 244 |
244 k := prop.Value().(*ds.Key) | 245 k := prop.Value().(*ds.Key) |
245 | 246 |
246 sip := serialize.PropertyMapPartially(k, pm) | 247 sip := serialize.PropertyMapPartially(k, pm) |
247 | 248 |
248 mergeIndexes(ns, store, | 249 mergeIndexes(ns, store, |
249 newMemStore(), | 250 newMemStore(), |
250 indexEntries(sip, ns, normalized)) | 251 indexEntries(sip, ns, normalized)) |
251 return true | 252 return true |
252 }) | 253 }) |
253 } | 254 } |
254 } | 255 } |
255 } | 256 } |
256 | 257 |
257 func updateIndexes(store *memStore, key *ds.Key, oldEnt, newEnt ds.PropertyMap)
{ | 258 func updateIndexes(store memStore, key *ds.Key, oldEnt, newEnt ds.PropertyMap) { |
258 // load all current complex query index definitions. | 259 // load all current complex query index definitions. |
259 compIdx := []*ds.IndexDefinition{} | 260 compIdx := []*ds.IndexDefinition{} |
260 » walkCompIdxs(store, nil, func(i *ds.IndexDefinition) bool { | 261 » walkCompIdxs(store.Snapshot(), nil, func(i *ds.IndexDefinition) bool { |
261 compIdx = append(compIdx, i) | 262 compIdx = append(compIdx, i) |
262 return true | 263 return true |
263 }) | 264 }) |
264 | 265 |
265 mergeIndexes(key.Namespace(), store, | 266 mergeIndexes(key.Namespace(), store, |
266 indexEntriesWithBuiltins(key, oldEnt, compIdx), | 267 indexEntriesWithBuiltins(key, oldEnt, compIdx), |
267 indexEntriesWithBuiltins(key, newEnt, compIdx)) | 268 indexEntriesWithBuiltins(key, newEnt, compIdx)) |
268 } | 269 } |
OLD | NEW |