OLD | NEW |
1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 package memory | 5 package memory |
6 | 6 |
7 import ( | 7 import ( |
8 "bytes" | 8 "bytes" |
9 "fmt" | 9 "fmt" |
10 "reflect" | 10 "reflect" |
| 11 "sort" |
11 "time" | 12 "time" |
12 | 13 |
13 "github.com/luci/luci-go/common/funnybase" | |
14 | |
15 "appengine" | 14 "appengine" |
16 "appengine/datastore" | 15 "appengine/datastore" |
| 16 |
| 17 "github.com/luci/gkvlite" |
| 18 "github.com/luci/luci-go/common/funnybase" |
17 ) | 19 ) |
18 | 20 |
19 type typData struct { | 21 type typData struct { |
20 noIndex bool | 22 noIndex bool |
21 typ propValType | 23 typ propValType |
22 data interface{} | 24 data interface{} |
23 } | 25 } |
24 | 26 |
25 func newTypData(noIndex bool, v interface{}) (ret *typData, err error) { | 27 func newTypData(noIndex bool, v interface{}) (ret *typData, err error) { |
26 typ := pvUNKNOWN | 28 typ := pvUNKNOWN |
(...skipping 18 matching lines...) Expand all Loading... |
45 case appengine.BlobKey: | 47 case appengine.BlobKey: |
46 typ = pvBlobKey | 48 typ = pvBlobKey |
47 case string: | 49 case string: |
48 typ = pvStr | 50 typ = pvStr |
49 case appengine.GeoPoint: | 51 case appengine.GeoPoint: |
50 typ = pvGeoPoint | 52 typ = pvGeoPoint |
51 case *datastore.Key: | 53 case *datastore.Key: |
52 typ = pvKey | 54 typ = pvKey |
53 } | 55 } |
54 if typ == pvUNKNOWN { | 56 if typ == pvUNKNOWN { |
55 » » err = fmt.Errorf("propValTypeOf: unknown type of %#v", v) | 57 » » err = fmt.Errorf("propValTypeOf: unknown type of %T: %#v", v, v) |
56 } | 58 } |
57 | 59 |
58 return &typData{noIndex, typ, v}, err | 60 return &typData{noIndex, typ, v}, err |
59 } | 61 } |
60 | 62 |
61 func (td *typData) WriteBinary(buf *bytes.Buffer) error { | 63 func (td *typData) WriteBinary(buf *bytes.Buffer, nso nsOption) error { |
62 typb := byte(td.typ) | 64 typb := byte(td.typ) |
63 if td.noIndex { | 65 if td.noIndex { |
64 typb |= 0x80 | 66 typb |= 0x80 |
65 } | 67 } |
66 buf.WriteByte(typb) | 68 buf.WriteByte(typb) |
67 switch td.typ { | 69 switch td.typ { |
68 case pvNull, pvBoolFalse, pvBoolTrue: | 70 case pvNull, pvBoolFalse, pvBoolTrue: |
69 return nil | 71 return nil |
70 case pvInt: | 72 case pvInt: |
71 funnybase.Write(buf, td.data.(int64)) | 73 funnybase.Write(buf, td.data.(int64)) |
72 case pvFloat: | 74 case pvFloat: |
73 writeFloat64(buf, td.data.(float64)) | 75 writeFloat64(buf, td.data.(float64)) |
74 case pvStr: | 76 case pvStr: |
75 writeString(buf, td.data.(string)) | 77 writeString(buf, td.data.(string)) |
76 case pvBytes: | 78 case pvBytes: |
77 if td.noIndex { | 79 if td.noIndex { |
78 writeBytes(buf, td.data.([]byte)) | 80 writeBytes(buf, td.data.([]byte)) |
79 } else { | 81 } else { |
80 writeBytes(buf, td.data.(datastore.ByteString)) | 82 writeBytes(buf, td.data.(datastore.ByteString)) |
81 } | 83 } |
82 case pvTime: | 84 case pvTime: |
83 » » t := td.data.(time.Time) | 85 » » writeTime(buf, td.data.(time.Time)) |
84 » » funnybase.WriteUint(buf, uint64(t.Unix())*1e6+uint64(t.Nanosecon
d()/1e3)) | |
85 case pvGeoPoint: | 86 case pvGeoPoint: |
86 » » t := td.data.(appengine.GeoPoint) | 87 » » writeGeoPoint(buf, td.data.(appengine.GeoPoint)) |
87 » » writeFloat64(buf, t.Lat) | |
88 » » writeFloat64(buf, t.Lng) | |
89 case pvKey: | 88 case pvKey: |
90 » » writeKey(buf, withNS, td.data.(*datastore.Key)) | 89 » » writeKey(buf, nso, td.data.(*datastore.Key)) |
91 case pvBlobKey: | 90 case pvBlobKey: |
92 writeString(buf, string(td.data.(appengine.BlobKey))) | 91 writeString(buf, string(td.data.(appengine.BlobKey))) |
93 default: | 92 default: |
94 return fmt.Errorf("write: unknown type! %v", td) | 93 return fmt.Errorf("write: unknown type! %v", td) |
95 } | 94 } |
96 return nil | 95 return nil |
97 } | 96 } |
98 | 97 |
99 func (td *typData) ReadBinary(buf *bytes.Buffer) error { | 98 func (td *typData) ReadBinary(buf *bytes.Buffer, nso nsOption, ns string) error
{ |
100 typb, err := buf.ReadByte() | 99 typb, err := buf.ReadByte() |
101 if err != nil { | 100 if err != nil { |
102 return err | 101 return err |
103 } | 102 } |
104 td.noIndex = (typb & 0x80) != 0 // highbit means noindex | 103 td.noIndex = (typb & 0x80) != 0 // highbit means noindex |
105 td.typ = propValType(typb & 0x7f) | 104 td.typ = propValType(typb & 0x7f) |
106 switch td.typ { | 105 switch td.typ { |
107 case pvNull: | 106 case pvNull: |
108 td.data = nil | 107 td.data = nil |
109 case pvBoolTrue: | 108 case pvBoolTrue: |
110 td.data = true | 109 td.data = true |
111 case pvBoolFalse: | 110 case pvBoolFalse: |
112 td.data = false | 111 td.data = false |
113 case pvInt: | 112 case pvInt: |
114 » » v, err := funnybase.Read(buf) | 113 » » td.data, err = funnybase.Read(buf) |
115 » » if err != nil { | |
116 » » » return err | |
117 » » } | |
118 » » td.data = v | |
119 case pvFloat: | 114 case pvFloat: |
120 td.data, err = readFloat64(buf) | 115 td.data, err = readFloat64(buf) |
121 if err != nil { | |
122 return err | |
123 } | |
124 case pvStr: | 116 case pvStr: |
125 td.data, err = readString(buf) | 117 td.data, err = readString(buf) |
126 if err != nil { | |
127 return err | |
128 } | |
129 case pvBytes: | 118 case pvBytes: |
130 » » b, err := readBytes(buf) | 119 » » b := []byte(nil) |
131 » » if err != nil { | 120 » » if b, err = readBytes(buf); err != nil { |
132 return err | 121 return err |
133 } | 122 } |
134 if td.noIndex { | 123 if td.noIndex { |
135 td.data = b | 124 td.data = b |
136 } else { | 125 } else { |
137 td.data = datastore.ByteString(b) | 126 td.data = datastore.ByteString(b) |
138 } | 127 } |
139 case pvTime: | 128 case pvTime: |
140 » » v, err := funnybase.ReadUint(buf) | 129 » » td.data, err = readTime(buf) |
141 » » if err != nil { | |
142 » » » return err | |
143 » » } | |
144 » » td.data = time.Unix(int64(v/1e6), int64((v%1e6)*1e3)) | |
145 case pvGeoPoint: | 130 case pvGeoPoint: |
146 » » pt := appengine.GeoPoint{} | 131 » » td.data, err = readGeoPoint(buf) |
147 » » pt.Lat, err = readFloat64(buf) | |
148 » » if err != nil { | |
149 » » » return err | |
150 » » } | |
151 » » pt.Lng, err = readFloat64(buf) | |
152 » » if err != nil { | |
153 » » » return err | |
154 » » } | |
155 » » td.data = pt | |
156 case pvKey: | 132 case pvKey: |
157 » » td.data, err = readKey(buf, true) | 133 » » td.data, err = readKey(buf, nso, ns) |
158 » » if err != nil { | |
159 » » » return err | |
160 » » } | |
161 case pvBlobKey: | 134 case pvBlobKey: |
162 » » s, err := readString(buf) | 135 » » s := "" |
163 » » if err != nil { | 136 » » if s, err = readString(buf); err != nil { |
164 return err | 137 return err |
165 } | 138 } |
166 td.data = appengine.BlobKey(s) | 139 td.data = appengine.BlobKey(s) |
167 default: | 140 default: |
168 return fmt.Errorf("read: unknown type! %v", td) | 141 return fmt.Errorf("read: unknown type! %v", td) |
169 } | 142 } |
170 | 143 |
171 » return nil | 144 » return err |
172 } | 145 } |
173 | 146 |
174 type pval struct { | 147 type pvals struct { |
175 » name string | 148 » name string |
176 » multi bool | 149 » vals []*typData |
177 » vals []*typData | |
178 } | 150 } |
179 | 151 |
180 type propertyList []datastore.Property | 152 type propertyList []datastore.Property |
181 | 153 |
182 var _ = datastore.PropertyLoadSaver((*propertyList)(nil)) | 154 var _ = datastore.PropertyLoadSaver((*propertyList)(nil)) |
183 | 155 |
184 func (pl *propertyList) Load(ch <-chan datastore.Property) error { | 156 func (pl *propertyList) Load(ch <-chan datastore.Property) error { |
185 return (*datastore.PropertyList)(pl).Load(ch) | 157 return (*datastore.PropertyList)(pl).Load(ch) |
186 } | 158 } |
187 | 159 |
188 func (pl *propertyList) Save(ch chan<- datastore.Property) error { | 160 func (pl *propertyList) Save(ch chan<- datastore.Property) error { |
189 return (*datastore.PropertyList)(pl).Save(ch) | 161 return (*datastore.PropertyList)(pl).Save(ch) |
190 } | 162 } |
191 | 163 |
192 func (pl *propertyList) collate() ([]*pval, error) { | 164 // collatedProperties is the reduction of a *propertyList such that each entry |
| 165 // in a collatedProperties has a unique name. For example, collating this: |
| 166 // pl := &propertyList{ |
| 167 // datastore.Property{Name: "wat", Val: "hello"}, |
| 168 // datastore.Property{Name: "other", Val: 100}, |
| 169 // datastore.Property{Name: "wat", Val: "goodbye", noIndex: true}, |
| 170 // } |
| 171 // |
| 172 // Would get a collatedProperties which looked like: |
| 173 // c := collatedProperties{ |
| 174 // &pvals{"wat", []*typData{&{false, pvStr, "hello"}, |
| 175 // &{true, pvStr, "goodbye"}}}, |
| 176 // &pvals{"other", []*typData{&{false, pvInt, 100}}} |
| 177 // } |
| 178 type collatedProperties []*pvals |
| 179 |
| 180 func (c collatedProperties) defaultIndicies(kind string) []*qIndex { |
| 181 » ret := make([]*qIndex, 0, 2*len(c)+1) |
| 182 » ret = append(ret, &qIndex{kind, false, nil}) |
| 183 » for _, pvals := range c { |
| 184 » » needsIndex := false |
| 185 » » for _, v := range pvals.vals { |
| 186 » » » if !v.noIndex { |
| 187 » » » » needsIndex = true |
| 188 » » » » break |
| 189 » » » } |
| 190 » » } |
| 191 » » if !needsIndex { |
| 192 » » » continue |
| 193 » » } |
| 194 » » ret = append(ret, &qIndex{kind, false, []qSortBy{{pvals.name, qA
SC}}}) |
| 195 » » ret = append(ret, &qIndex{kind, false, []qSortBy{{pvals.name, qD
EC}}}) |
| 196 » } |
| 197 » return ret |
| 198 } |
| 199 |
| 200 // serializedPval is a single pvals.vals entry which has been serialized (in |
| 201 // qASC order). |
| 202 type serializedPval []byte |
| 203 |
| 204 // serializedPvals is all of the pvals.vals entries from a single pvals (in qASC |
| 205 // order). It does not include the pvals.name field. |
| 206 type serializedPvals []serializedPval |
| 207 |
| 208 func (s serializedPvals) Len() int { return len(s) } |
| 209 func (s serializedPvals) Swap(i, j int) { s[i], s[j] = s[j], s[i] } |
| 210 func (s serializedPvals) Less(i, j int) bool { return bytes.Compare(s[i], s[j])
< 0 } |
| 211 |
| 212 type mappedPlist map[string]serializedPvals |
| 213 |
| 214 func (c collatedProperties) indexableMap() (mappedPlist, error) { |
| 215 » ret := make(mappedPlist, len(c)) |
| 216 » for _, pv := range c { |
| 217 » » data := make(serializedPvals, 0, len(pv.vals)) |
| 218 » » for _, v := range pv.vals { |
| 219 » » » if v.noIndex { |
| 220 » » » » continue |
| 221 » » » } |
| 222 » » » buf := &bytes.Buffer{} |
| 223 » » » if err := v.WriteBinary(buf, noNS); err != nil { |
| 224 » » » » return nil, err |
| 225 » » » } |
| 226 » » » data = append(data, buf.Bytes()) |
| 227 » » } |
| 228 » » if len(data) == 0 { |
| 229 » » » continue |
| 230 » » } |
| 231 » » sort.Sort(data) |
| 232 » » ret[pv.name] = data |
| 233 » } |
| 234 » return ret, nil |
| 235 } |
| 236 |
| 237 // indexRowGen contains enough information to generate all of the index rows whi
ch |
| 238 // correspond with a propertyList and a qIndex. |
| 239 type indexRowGen struct { |
| 240 » propVec []serializedPvals |
| 241 » orders []qDirection |
| 242 } |
| 243 |
| 244 // permute calls cb for each index row, in the sorted order of the rows. |
| 245 func (s indexRowGen) permute(cb func([]byte)) { |
| 246 » iVec := make([]int, len(s.propVec)) |
| 247 » iVecLim := make([]int, len(s.propVec)) |
| 248 |
| 249 » incPos := func() bool { |
| 250 » » for i := len(iVec) - 1; i >= 0; i-- { |
| 251 » » » var done bool |
| 252 » » » var newVal int |
| 253 » » » if s.orders[i] == qASC { |
| 254 » » » » newVal = (iVec[i] + 1) % iVecLim[i] |
| 255 » » » » done = newVal != 0 |
| 256 » » » } else { |
| 257 » » » » newVal = (iVec[i] - 1) |
| 258 » » » » if newVal < 0 { |
| 259 » » » » » newVal = iVecLim[i] - 1 |
| 260 » » » » } else { |
| 261 » » » » » done = true |
| 262 » » » » } |
| 263 » » » } |
| 264 » » » iVec[i] = newVal |
| 265 » » » if done { |
| 266 » » » » return true |
| 267 » » » } |
| 268 » » } |
| 269 » » return false |
| 270 » } |
| 271 |
| 272 » for i, sps := range s.propVec { |
| 273 » » iVecLim[i] = len(sps) |
| 274 » } |
| 275 |
| 276 » for i := range iVec { |
| 277 » » if s.orders[i] == qDEC { |
| 278 » » » iVec[i] = iVecLim[i] - 1 |
| 279 » » } |
| 280 » } |
| 281 |
| 282 » for { |
| 283 » » bufsiz := 0 |
| 284 » » for pvalSliceIdx, pvalIdx := range iVec { |
| 285 » » » bufsiz += len(s.propVec[pvalSliceIdx][pvalIdx]) |
| 286 » » } |
| 287 » » buf := bytes.NewBuffer(make([]byte, 0, bufsiz)) |
| 288 » » for pvalSliceIdx, pvalIdx := range iVec { |
| 289 » » » data := s.propVec[pvalSliceIdx][pvalIdx] |
| 290 » » » if s.orders[pvalSliceIdx] == qASC { |
| 291 » » » » buf.Write(data) |
| 292 » » » } else { |
| 293 » » » » for _, b := range data { |
| 294 » » » » » buf.WriteByte(b ^ 0xFF) |
| 295 » » » » } |
| 296 » » » } |
| 297 » » } |
| 298 » » cb(buf.Bytes()) |
| 299 » » if !incPos() { |
| 300 » » » break |
| 301 » » } |
| 302 » } |
| 303 } |
| 304 |
| 305 type matcher struct { |
| 306 » buf indexRowGen |
| 307 } |
| 308 |
| 309 // matcher.match checks to see if the mapped, serialized property values |
| 310 // match the index. If they do, it returns a indexRowGen. Do not write or modify |
| 311 // the data in the indexRowGen. |
| 312 func (m *matcher) match(idx *qIndex, mpvals mappedPlist) (indexRowGen, bool) { |
| 313 » m.buf.propVec = m.buf.propVec[:0] |
| 314 » m.buf.orders = m.buf.orders[:0] |
| 315 » for _, sb := range idx.sortby { |
| 316 » » if pv, ok := mpvals[sb.prop]; ok { |
| 317 » » » m.buf.propVec = append(m.buf.propVec, pv) |
| 318 » » » m.buf.orders = append(m.buf.orders, sb.dir) |
| 319 » » } else { |
| 320 » » » return indexRowGen{}, false |
| 321 » » } |
| 322 » } |
| 323 » return m.buf, true |
| 324 } |
| 325 |
| 326 func (c collatedProperties) indexEntries(k *datastore.Key, idxs []*qIndex) (*mem
Store, error) { |
| 327 » m, err := c.indexableMap() |
| 328 » if err != nil { |
| 329 » » return nil, err |
| 330 » } |
| 331 |
| 332 » ret := newMemStore() |
| 333 » idxColl := ret.SetCollection("idx", nil) |
| 334 » // getIdxEnts retrieves an index collection or adds it if it's not there
. |
| 335 » getIdxEnts := func(qi *qIndex) *memCollection { |
| 336 » » buf := &bytes.Buffer{} |
| 337 » » qi.WriteBinary(buf) |
| 338 » » b := buf.Bytes() |
| 339 » » idxColl.Set(b, []byte{}) |
| 340 » » return ret.SetCollection(fmt.Sprintf("idx:%s:%s", k.Namespace(),
b), nil) |
| 341 » } |
| 342 |
| 343 » buf := &bytes.Buffer{} |
| 344 » writeKey(buf, noNS, k) // ns is in idxEnts collection name. |
| 345 » keyData := buf.Bytes() |
| 346 |
| 347 » walkPermutations := func(prefix []byte, irg indexRowGen, ents *memCollec
tion) { |
| 348 » » prev := []byte{} // intentionally make a non-nil slice, gkvlite
hates nil. |
| 349 » » irg.permute(func(data []byte) { |
| 350 » » » buf := bytes.NewBuffer(make([]byte, 0, len(prefix)+len(d
ata)+len(keyData))) |
| 351 » » » buf.Write(prefix) |
| 352 » » » buf.Write(data) |
| 353 » » » buf.Write(keyData) |
| 354 » » » ents.Set(buf.Bytes(), prev) |
| 355 » » » prev = data |
| 356 » » }) |
| 357 » } |
| 358 |
| 359 » mtch := matcher{} |
| 360 » for _, idx := range idxs { |
| 361 » » if irg, ok := mtch.match(idx, m); ok { |
| 362 » » » idxEnts := getIdxEnts(idx) |
| 363 » » » if len(irg.propVec) == 0 { |
| 364 » » » » idxEnts.Set(keyData, []byte{}) // propless index
, e.g. kind -> key = nil |
| 365 » » » } else if idx.ancestor { |
| 366 » » » » for ancKey := k; ancKey != nil; ancKey = ancKey.
Parent() { |
| 367 » » » » » buf := &bytes.Buffer{} |
| 368 » » » » » writeKey(buf, noNS, ancKey) |
| 369 » » » » » walkPermutations(buf.Bytes(), irg, idxEn
ts) |
| 370 » » » » } |
| 371 » » » } else { |
| 372 » » » » walkPermutations(nil, irg, idxEnts) |
| 373 » » » } |
| 374 » » } |
| 375 » } |
| 376 |
| 377 » return ret, nil |
| 378 } |
| 379 |
| 380 func (pl *propertyList) indexEntriesWithBuiltins(k *datastore.Key, complexIdxs [
]*qIndex) (ret *memStore, err error) { |
| 381 » c, err := pl.collate() |
| 382 » if err == nil { |
| 383 » » ret, err = c.indexEntries(k, append(c.defaultIndicies(k.Kind()),
complexIdxs...)) |
| 384 » } |
| 385 » return |
| 386 } |
| 387 |
| 388 func (pl *propertyList) collate() (collatedProperties, error) { |
193 if pl == nil || len(*pl) == 0 { | 389 if pl == nil || len(*pl) == 0 { |
194 return nil, nil | 390 return nil, nil |
195 } | 391 } |
196 | 392 |
197 » cols := []*pval{} | 393 » cols := []*pvals{} |
198 colIdx := map[string]int{} | 394 colIdx := map[string]int{} |
199 | 395 |
200 for _, p := range *pl { | 396 for _, p := range *pl { |
201 » » if idx, ok := colIdx[p.Name]; !ok { | 397 » » if idx, ok := colIdx[p.Name]; ok { |
| 398 » » » c := cols[idx] |
| 399 » » » td, err := newTypData(p.NoIndex, p.Value) |
| 400 » » » if err != nil { |
| 401 » » » » return nil, err |
| 402 » » » } |
| 403 » » » c.vals = append(c.vals, td) |
| 404 » » } else { |
202 colIdx[p.Name] = len(cols) | 405 colIdx[p.Name] = len(cols) |
203 td, err := newTypData(p.NoIndex, p.Value) | 406 td, err := newTypData(p.NoIndex, p.Value) |
204 if err != nil { | 407 if err != nil { |
205 return nil, err | 408 return nil, err |
206 } | 409 } |
207 » » » cols = append(cols, &pval{p.Name, p.Multiple, []*typData
{td}}) | 410 » » » cols = append(cols, &pvals{p.Name, []*typData{td}}) |
208 » » } else { | |
209 » » » c := cols[idx] | |
210 » » » if c.multi != p.Multiple { | |
211 » » » » return nil, fmt.Errorf( | |
212 » » » » » "propertyList.MarshalBinary: field %q ha
s conflicting values of Multiple", p.Name) | |
213 » » » } | |
214 » » » td, err := newTypData(p.NoIndex, p.Value) | |
215 » » » if err != nil { | |
216 » » » » return nil, err | |
217 » » » } | |
218 » » » c.vals = append(c.vals, td) | |
219 } | 411 } |
220 } | 412 } |
221 | 413 |
222 return cols, nil | 414 return cols, nil |
223 } | 415 } |
224 | 416 |
225 func (pl *propertyList) addCollated(pv *pval) { | 417 func (pl *propertyList) addCollated(pv *pvals) { |
226 for _, v := range pv.vals { | 418 for _, v := range pv.vals { |
227 *pl = append(*pl, datastore.Property{ | 419 *pl = append(*pl, datastore.Property{ |
228 Name: pv.name, | 420 Name: pv.name, |
229 » » » Multiple: pv.multi, | 421 » » » Multiple: len(pv.vals) > 1, |
230 NoIndex: v.noIndex, | 422 NoIndex: v.noIndex, |
231 Value: v.data, | 423 Value: v.data, |
232 }) | 424 }) |
233 } | 425 } |
234 } | 426 } |
235 | 427 |
| 428 func updateIndicies(store *memStore, key *datastore.Key, oldEnt, newEnt *propert
yList) error { |
| 429 var err error |
| 430 |
| 431 idxColl := store.GetCollection("idx") |
| 432 if idxColl == nil { |
| 433 idxColl = store.SetCollection("idx", nil) |
| 434 } |
| 435 |
| 436 // load all current complex query index definitions. |
| 437 compIdx := []*qIndex{} |
| 438 idxColl.VisitItemsAscend(complexQueryPrefix, false, func(i *gkvlite.Item
) bool { |
| 439 if !bytes.HasPrefix(i.Key, complexQueryPrefix) { |
| 440 return false |
| 441 } |
| 442 qi := &qIndex{} |
| 443 if err = qi.ReadBinary(bytes.NewBuffer(i.Key)); err != nil { |
| 444 return false |
| 445 } |
| 446 compIdx = append(compIdx, qi) |
| 447 return true |
| 448 }) |
| 449 if err != nil { |
| 450 return err |
| 451 } |
| 452 |
| 453 oldIdx, err := oldEnt.indexEntriesWithBuiltins(key, compIdx) |
| 454 if err != nil { |
| 455 return err |
| 456 } |
| 457 |
| 458 newIdx, err := newEnt.indexEntriesWithBuiltins(key, compIdx) |
| 459 if err != nil { |
| 460 return err |
| 461 } |
| 462 |
| 463 prefix := "idx:" + key.Namespace() + ":" |
| 464 |
| 465 gkvCollide(oldIdx.GetCollection("idx"), newIdx.GetCollection("idx"), fun
c(k, ov, nv []byte) { |
| 466 ks := prefix + string(k) |
| 467 idxColl.Set(k, []byte{}) |
| 468 |
| 469 coll := store.GetCollection(ks) |
| 470 if coll == nil { |
| 471 coll = store.SetCollection(ks, nil) |
| 472 } |
| 473 oldColl := oldIdx.GetCollection(ks) |
| 474 newColl := newIdx.GetCollection(ks) |
| 475 |
| 476 switch { |
| 477 case ov == nil && nv != nil: // all additions |
| 478 newColl.VisitItemsAscend(nil, false, func(i *gkvlite.Ite
m) bool { |
| 479 coll.Set(i.Key, i.Val) |
| 480 return true |
| 481 }) |
| 482 case ov != nil && nv == nil: // all deletions |
| 483 oldColl.VisitItemsAscend(nil, false, func(i *gkvlite.Ite
m) bool { |
| 484 coll.Delete(i.Key) |
| 485 return true |
| 486 }) |
| 487 case ov != nil && nv != nil: // merge |
| 488 gkvCollide(oldColl, newColl, func(k, ov, nv []byte) { |
| 489 if nv == nil { |
| 490 coll.Delete(k) |
| 491 } else { |
| 492 coll.Set(k, nv) |
| 493 } |
| 494 }) |
| 495 default: |
| 496 panic("impossible") |
| 497 } |
| 498 // TODO(riannucci): remove entries from idxColl and remove index
collections |
| 499 // when there are no index entries for that index any more. |
| 500 }) |
| 501 |
| 502 return nil |
| 503 } |
| 504 |
236 func (pl *propertyList) MarshalBinary() ([]byte, error) { | 505 func (pl *propertyList) MarshalBinary() ([]byte, error) { |
237 cols, err := pl.collate() | 506 cols, err := pl.collate() |
238 if err != nil || len(cols) == 0 { | 507 if err != nil || len(cols) == 0 { |
239 return nil, err | 508 return nil, err |
240 } | 509 } |
241 | 510 |
242 pieces := make([][]byte, 0, len(*pl)*2+1) | 511 pieces := make([][]byte, 0, len(*pl)*2+1) |
243 for _, pv := range cols { | 512 for _, pv := range cols { |
244 // TODO(riannucci): estimate buffer size better. | 513 // TODO(riannucci): estimate buffer size better. |
245 buf := bytes.NewBuffer(make([]byte, 0, funnybase.MaxFunnyBaseLen
64+len(pv.name))) | 514 buf := bytes.NewBuffer(make([]byte, 0, funnybase.MaxFunnyBaseLen
64+len(pv.name))) |
246 writeString(buf, pv.name) | 515 writeString(buf, pv.name) |
247 err := pv.WriteBinary(buf) | 516 err := pv.WriteBinary(buf) |
248 if err != nil { | 517 if err != nil { |
249 return nil, err | 518 return nil, err |
250 } | 519 } |
251 pieces = append(pieces, buf.Bytes()) | 520 pieces = append(pieces, buf.Bytes()) |
252 } | 521 } |
253 return bytes.Join(pieces, nil), nil | 522 return bytes.Join(pieces, nil), nil |
254 } | 523 } |
255 | 524 |
256 func (pl *propertyList) UnmarshalBinary(data []byte) error { | 525 func (pl *propertyList) UnmarshalBinary(data []byte) error { |
257 buf := bytes.NewBuffer(data) | 526 buf := bytes.NewBuffer(data) |
258 for buf.Len() > 0 { | 527 for buf.Len() > 0 { |
259 name, err := readString(buf) | 528 name, err := readString(buf) |
260 if err != nil { | 529 if err != nil { |
261 return err | 530 return err |
262 } | 531 } |
263 | 532 |
264 » » pv := &pval{name: name} | 533 » » pv := &pvals{name: name} |
265 err = pv.ReadBinary(buf) | 534 err = pv.ReadBinary(buf) |
266 if err != nil { | 535 if err != nil { |
267 return err | 536 return err |
268 } | 537 } |
269 pl.addCollated(pv) | 538 pl.addCollated(pv) |
270 } | 539 } |
271 | 540 |
272 return nil | 541 return nil |
273 } | 542 } |
274 | 543 |
(...skipping 18 matching lines...) Expand all Loading... |
293 return err2 | 562 return err2 |
294 } | 563 } |
295 | 564 |
296 type propValType byte | 565 type propValType byte |
297 | 566 |
298 var byteSliceType = reflect.TypeOf([]byte(nil)) | 567 var byteSliceType = reflect.TypeOf([]byte(nil)) |
299 | 568 |
300 // These constants are in the order described by | 569 // These constants are in the order described by |
301 // https://cloud.google.com/appengine/docs/go/datastore/entities#Go_Value_type
_ordering | 570 // https://cloud.google.com/appengine/docs/go/datastore/entities#Go_Value_type
_ordering |
302 // with a slight divergence for the Int/Time split. | 571 // with a slight divergence for the Int/Time split. |
| 572 // NOTE: this enum can only occupy 7 bits, because we use the high bit to encode |
| 573 // indexed/non-indexed. See typData.WriteBinary. |
303 const ( | 574 const ( |
304 pvNull propValType = iota | 575 pvNull propValType = iota |
305 pvInt | 576 pvInt |
306 | 577 |
307 // NOTE: this is a slight divergence; times and integers actually sort | 578 // NOTE: this is a slight divergence; times and integers actually sort |
308 // together (apparently?) in datastore. This is probably insane, and I d
on't | 579 // together (apparently?) in datastore. This is probably insane, and I d
on't |
309 // want to add the complexity of field 'meaning' as a sparate concept fr
om the | 580 // want to add the complexity of field 'meaning' as a sparate concept fr
om the |
310 // field's 'type' (which is what datastore seems to do, judging from the | 581 // field's 'type' (which is what datastore seems to do, judging from the |
311 // protobufs). So if you're here because you implemented an app which re
lies | 582 // protobufs). So if you're here because you implemented an app which re
lies |
312 // on time.Time and int64 sorting together, then this is why your app ac
ts | 583 // on time.Time and int64 sorting together, then this is why your app ac
ts |
(...skipping 17 matching lines...) Expand all Loading... |
330 | 601 |
331 // These two are problematic, because they force us to bind to the appen
gine | 602 // These two are problematic, because they force us to bind to the appen
gine |
332 // SDK code. If we can drop support for these and turn them into hard er
rors, | 603 // SDK code. If we can drop support for these and turn them into hard er
rors, |
333 // that could let us decouple from the various appengine SDKs. Maybe. | 604 // that could let us decouple from the various appengine SDKs. Maybe. |
334 pvKey // TODO(riannucci): remove support for this (use a string) | 605 pvKey // TODO(riannucci): remove support for this (use a string) |
335 pvBlobKey // TODO(riannucci): remove support for this (use a string) | 606 pvBlobKey // TODO(riannucci): remove support for this (use a string) |
336 | 607 |
337 pvUNKNOWN | 608 pvUNKNOWN |
338 ) | 609 ) |
339 | 610 |
340 func (p *pval) ReadBinary(buf *bytes.Buffer) error { | 611 func (p *pvals) ReadBinary(buf *bytes.Buffer) error { |
341 n, err := funnybase.ReadUint(buf) | 612 n, err := funnybase.ReadUint(buf) |
342 if err != nil { | 613 if err != nil { |
343 return err | 614 return err |
344 } | 615 } |
345 p.multi = n > 1 | |
346 | 616 |
347 p.vals = make([]*typData, n) | 617 p.vals = make([]*typData, n) |
348 for i := range p.vals { | 618 for i := range p.vals { |
349 p.vals[i] = &typData{} | 619 p.vals[i] = &typData{} |
350 » » err := p.vals[i].ReadBinary(buf) | 620 » » err := p.vals[i].ReadBinary(buf, withNS, "") |
351 if err != nil { | 621 if err != nil { |
352 return err | 622 return err |
353 } | 623 } |
354 } | 624 } |
355 | 625 |
356 return nil | 626 return nil |
357 } | 627 } |
358 | 628 |
359 func (p *pval) WriteBinary(buf *bytes.Buffer) error { | 629 func (p *pvals) WriteBinary(buf *bytes.Buffer) error { |
360 funnybase.WriteUint(buf, uint64(len(p.vals))) | 630 funnybase.WriteUint(buf, uint64(len(p.vals))) |
361 for _, v := range p.vals { | 631 for _, v := range p.vals { |
362 » » if err := v.WriteBinary(buf); err != nil { | 632 » » if err := v.WriteBinary(buf, withNS); err != nil { |
363 return err | 633 return err |
364 } | 634 } |
365 } | 635 } |
366 return nil | 636 return nil |
367 } | 637 } |
OLD | NEW |