Index: server/cmd/logdog_archivist/main.go |
diff --git a/server/cmd/logdog_archivist/main.go b/server/cmd/logdog_archivist/main.go |
index 7fce71b316de54a44241667666691a69080f5dc8..4d865fc6b99ccd87df004ba36dcf251bff406341 100644 |
--- a/server/cmd/logdog_archivist/main.go |
+++ b/server/cmd/logdog_archivist/main.go |
@@ -10,11 +10,13 @@ import ( |
"github.com/luci/luci-go/common/auth" |
"github.com/luci/luci-go/common/clock" |
+ "github.com/luci/luci-go/common/config" |
"github.com/luci/luci-go/common/errors" |
"github.com/luci/luci-go/common/gcloud/gs" |
"github.com/luci/luci-go/common/gcloud/pubsub" |
log "github.com/luci/luci-go/common/logging" |
"github.com/luci/luci-go/common/parallel" |
+ "github.com/luci/luci-go/common/proto/logdog/svcconfig" |
"github.com/luci/luci-go/common/tsmon/distribution" |
"github.com/luci/luci-go/common/tsmon/field" |
"github.com/luci/luci-go/common/tsmon/metric" |
@@ -61,29 +63,10 @@ func (a *application) runArchivist(c context.Context) error { |
case acfg == nil: |
return errors.New("missing Archivist configuration") |
- case acfg.GsBase == "": |
- return errors.New("missing archive GS bucket") |
case acfg.GsStagingBase == "": |
return errors.New("missing archive staging GS bucket") |
} |
- // Construct and validate our GS bases. |
- gsBase := gs.Path(acfg.GsBase) |
- if gsBase.Bucket() == "" { |
- log.Fields{ |
- "value": gsBase, |
- }.Errorf(c, "Google Storage base does not include a bucket name.") |
- return errors.New("invalid Google Storage base") |
- } |
- |
- gsStagingBase := gs.Path(acfg.GsStagingBase) |
- if gsStagingBase.Bucket() == "" { |
- log.Fields{ |
- "value": gsStagingBase, |
- }.Errorf(c, "Google Storage staging base does not include a bucket name.") |
- return errors.New("invalid Google Storage staging base") |
- } |
- |
// Initialize Pub/Sub client. |
// |
// We will initialize both an authenticated Client instance and an |
@@ -143,12 +126,6 @@ func (a *application) runArchivist(c context.Context) error { |
Service: a.Coordinator(), |
Storage: st, |
GSClient: gsClient, |
- |
- GSBase: gsBase, |
- GSStagingBase: gsStagingBase, |
- StreamIndexRange: int(acfg.IndexStreamRange), |
- PrefixIndexRange: int(acfg.IndexPrefixRange), |
- ByteRange: int(acfg.IndexByteRange), |
} |
tasks := int(acfg.Tasks) |
@@ -239,6 +216,49 @@ func (a *application) runArchivist(c context.Context) error { |
return nil |
} |
+// GetSettingsLoader is an archivist.SettingsLoader implementation that merges |
+// global and project-specific settings. |
+// |
+// The resulting settings object will be verified by the Archivist. |
+func (a *application) GetSettingsLoader(acfg *svcconfig.Archivist) archivist.SettingsLoader { |
+ return func(c context.Context, proj config.ProjectName) (*archivist.Settings, error) { |
+ // Load our Archivist-wide settings. |
+ st := archivist.Settings{ |
+ GSBase: gs.Path(acfg.GsBase), |
+ GSStagingBase: gs.Path(acfg.GsStagingBase), |
nodir
2016/05/19 15:56:29
I assume this will be rebased to be use GsStagingB
dnj (Google)
2016/05/19 16:34:51
Yeah. This is weird, I did a "dependency upload" a
|
+ |
+ IndexStreamRange: int(acfg.IndexStreamRange), |
+ IndexPrefixRange: int(acfg.IndexPrefixRange), |
+ IndexByteRange: int(acfg.IndexByteRange), |
nodir
2016/05/19 15:56:29
I assume this will be rebased to be use GsStagingB
dnj (Google)
2016/05/19 16:34:51
Acknowledged.
|
+ } |
+ |
+ // Fold in our project-specific configuration, if valid. |
+ pcfg, err := a.ProjectConfig(c, proj) |
+ if err != nil { |
+ log.Fields{ |
+ log.ErrorKey: err, |
+ "project": proj, |
+ }.Errorf(c, "Failed to fetch project configuration.") |
+ } |
nodir
2016/05/19 15:56:29
return err?
dnj (Google)
2016/05/19 16:34:51
er um ... yes.
|
+ |
+ // Fold project settings into loaded ones. |
+ if pcfg.ArchiveGsBase != "" { |
+ st.GSBase = gs.Path(pcfg.ArchiveGsBase) |
+ } |
+ st.AlwaysCreateBinary = (acfg.AlwaysCreateBinary || pcfg.AlwaysCreateBinary) |
+ if r := pcfg.ArchiveIndexStreamRange; r >= 0 { |
nodir
2016/05/19 15:56:29
I still don't get this obsession to save a field v
dnj (Google)
2016/05/19 16:34:51
In this case, I think it is less wordy and therefo
|
+ st.IndexStreamRange = int(r) |
+ } |
+ if r := pcfg.ArchiveIndexPrefixRange; r >= 0 { |
+ st.IndexPrefixRange = int(r) |
+ } |
+ if r := pcfg.ArchiveIndexByteRange; r >= 0 { |
+ st.IndexByteRange = int(r) |
+ } |
+ return &st, nil |
+ } |
+} |
+ |
// Entry point. |
func main() { |
a := application{ |