OLD | NEW |
1 // Copyright 2017 The LUCI Authors. All rights reserved. | 1 // Copyright 2017 The LUCI Authors. All rights reserved. |
2 // Use of this source code is governed under the Apache License, Version 2.0 | 2 // Use of this source code is governed under the Apache License, Version 2.0 |
3 // that can be found in the LICENSE file. | 3 // that can be found in the LICENSE file. |
4 | 4 |
5 package internal | 5 package internal |
6 | 6 |
7 import ( | 7 import ( |
8 "encoding/json" | 8 "encoding/json" |
9 "io/ioutil" | 9 "io/ioutil" |
10 "os" | 10 "os" |
11 "path/filepath" | 11 "path/filepath" |
12 "time" | 12 "time" |
13 | 13 |
14 "golang.org/x/net/context" | 14 "golang.org/x/net/context" |
15 "golang.org/x/oauth2" | 15 "golang.org/x/oauth2" |
16 | 16 |
17 "github.com/luci/luci-go/common/clock" | 17 "github.com/luci/luci-go/common/clock" |
18 "github.com/luci/luci-go/common/errors" | |
19 "github.com/luci/luci-go/common/logging" | 18 "github.com/luci/luci-go/common/logging" |
20 "github.com/luci/luci-go/common/retry" | 19 "github.com/luci/luci-go/common/retry" |
| 20 "github.com/luci/luci-go/common/retry/transient" |
21 ) | 21 ) |
22 | 22 |
23 const ( | 23 const ( |
24 // GCAccessTokenMaxAge defines when to remove unused access tokens from
the | 24 // GCAccessTokenMaxAge defines when to remove unused access tokens from
the |
25 // disk cache. | 25 // disk cache. |
26 // | 26 // |
27 // We define "an access token" as an instance of oauth2.Token with | 27 // We define "an access token" as an instance of oauth2.Token with |
28 // RefreshToken set to "". | 28 // RefreshToken set to "". |
29 // | 29 // |
30 // If an access token expired older than GCAccessTokenMaxAge ago, it wil
l be | 30 // If an access token expired older than GCAccessTokenMaxAge ago, it wil
l be |
(...skipping 160 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
191 } | 191 } |
192 | 192 |
193 // Note that TempFile creates the file in 0600 mode already, so we don't
need | 193 // Note that TempFile creates the file in 0600 mode already, so we don't
need |
194 // to chmod it. | 194 // to chmod it. |
195 // | 195 // |
196 // On Windows Rename may fail with sharing violation error if some other | 196 // On Windows Rename may fail with sharing violation error if some other |
197 // process has opened the file. We treat it as transient error, to trigg
er | 197 // process has opened the file. We treat it as transient error, to trigg
er |
198 // a retry in updateCacheFile. | 198 // a retry in updateCacheFile. |
199 if err = os.Rename(tmp.Name(), c.absPath()); err != nil { | 199 if err = os.Rename(tmp.Name(), c.absPath()); err != nil { |
200 cleanup() | 200 cleanup() |
201 » » return errors.WrapTransient(err) | 201 » » return transient.Tag.Apply(err) |
202 } | 202 } |
203 return nil | 203 return nil |
204 } | 204 } |
205 | 205 |
206 // updateCacheFile reads the token cache file, calls the callback, writes the fi
le | 206 // updateCacheFile reads the token cache file, calls the callback, writes the fi
le |
207 // back if the callback returns 'true'. | 207 // back if the callback returns 'true'. |
208 // | 208 // |
209 // It retries a bunch of times when encountering sharing violation errors on | 209 // It retries a bunch of times when encountering sharing violation errors on |
210 // Windows. | 210 // Windows. |
211 // | 211 // |
212 // TODO(vadimsh): Change this to use file locking - updateCacheFile is a global | 212 // TODO(vadimsh): Change this to use file locking - updateCacheFile is a global |
213 // critical section. | 213 // critical section. |
214 func (c *DiskTokenCache) updateCacheFile(cb func(*cacheFile, time.Time) bool) er
ror { | 214 func (c *DiskTokenCache) updateCacheFile(cb func(*cacheFile, time.Time) bool) er
ror { |
215 retryParams := func() retry.Iterator { | 215 retryParams := func() retry.Iterator { |
216 return &retry.ExponentialBackoff{ | 216 return &retry.ExponentialBackoff{ |
217 Limited: retry.Limited{ | 217 Limited: retry.Limited{ |
218 Delay: 10 * time.Millisecond, | 218 Delay: 10 * time.Millisecond, |
219 Retries: 200, | 219 Retries: 200, |
220 MaxTotal: 4 * time.Second, | 220 MaxTotal: 4 * time.Second, |
221 }, | 221 }, |
222 Multiplier: 1.5, | 222 Multiplier: 1.5, |
223 } | 223 } |
224 } | 224 } |
225 » return retry.Retry(c.Context, retry.TransientOnly(retryParams), func() e
rror { | 225 » return retry.Retry(c.Context, transient.Only(retryParams), func() error
{ |
226 cache, err := c.readCacheFile() | 226 cache, err := c.readCacheFile() |
227 if err != nil { | 227 if err != nil { |
228 return err | 228 return err |
229 } | 229 } |
230 now := clock.Now(c.Context).UTC() | 230 now := clock.Now(c.Context).UTC() |
231 if cb(cache, now) { | 231 if cb(cache, now) { |
232 return c.writeCacheFile(cache, now) | 232 return c.writeCacheFile(cache, now) |
233 } | 233 } |
234 return nil | 234 return nil |
235 }, func(err error, d time.Duration) { | 235 }, func(err error, d time.Duration) { |
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
279 return c.updateCacheFile(func(cache *cacheFile, now time.Time) bool { | 279 return c.updateCacheFile(func(cache *cacheFile, now time.Time) bool { |
280 for i, entry := range cache.Cache { | 280 for i, entry := range cache.Cache { |
281 if EqualCacheKeys(&entry.Key, key) { | 281 if EqualCacheKeys(&entry.Key, key) { |
282 cache.Cache = append(cache.Cache[:i], cache.Cach
e[i+1:]...) | 282 cache.Cache = append(cache.Cache[:i], cache.Cach
e[i+1:]...) |
283 return true | 283 return true |
284 } | 284 } |
285 } | 285 } |
286 return false // not there, this is fine, skip writing the file | 286 return false // not there, this is fine, skip writing the file |
287 }) | 287 }) |
288 } | 288 } |
OLD | NEW |