Index: appengine/logdog/coordinator/endpoints/logs/get_test.go |
diff --git a/appengine/logdog/coordinator/endpoints/logs/get_test.go b/appengine/logdog/coordinator/endpoints/logs/get_test.go |
index fedd875b5b585b5416adb4396d9e789452a5fffb..e252ce7300f1f945ca362a9740569336173aab08 100644 |
--- a/appengine/logdog/coordinator/endpoints/logs/get_test.go |
+++ b/appengine/logdog/coordinator/endpoints/logs/get_test.go |
@@ -21,6 +21,7 @@ import ( |
ct "github.com/luci/luci-go/appengine/logdog/coordinator/coordinatorTest" |
"github.com/luci/luci-go/common/api/logdog_coordinator/logs/v1" |
"github.com/luci/luci-go/common/clock/testclock" |
+ "github.com/luci/luci-go/common/config" |
"github.com/luci/luci-go/common/gcloud/gs" |
"github.com/luci/luci-go/common/iotools" |
"github.com/luci/luci-go/common/logdog/types" |
@@ -157,6 +158,7 @@ func testGetImpl(t *testing.T, archived bool) { |
c = auth.WithState(c, &fs) |
ms := memoryStorage.Storage{} |
+ |
gsc := testGSClient{} |
svcStub := ct.Services{ |
IS: func() (storage.Storage, error) { |
@@ -170,12 +172,19 @@ func testGetImpl(t *testing.T, archived bool) { |
svcStub.ServiceConfig.Coordinator.AdminAuthGroup = "test-administrators" |
c = coordinator.WithServices(c, &svcStub) |
- s := New() |
+ svr := New() |
+ |
+ // di is a datastore bound to the test project namespace. |
+ const project = "test-project" |
+ if err := coordinator.WithProjectNamespace(&c, config.ProjectName(project)); err != nil { |
+ panic(err) |
+ } |
+ di := ds.Get(c) |
// Generate our test stream. |
desc := ct.TestLogStreamDescriptor(c, "foo/bar") |
ls := ct.TestLogStream(c, desc) |
- if err := ds.Get(c).Put(ls); err != nil { |
+ if err := di.Put(ls); err != nil { |
panic(err) |
} |
@@ -233,12 +242,20 @@ func testGetImpl(t *testing.T, archived bool) { |
Convey(`Testing Get requests (no logs)`, func() { |
req := logdog.GetRequest{ |
- Path: string(ls.Path()), |
+ Project: project, |
+ Path: string(ls.Path()), |
} |
+ Convey(`Will succeed with no logs.`, func() { |
+ resp, err := svr.Get(c, &req) |
+ |
+ So(err, ShouldBeRPCOK) |
+ So(resp, shouldHaveLogs) |
+ }) |
+ |
Convey(`Will fail if the Path is not a stream path or a hash.`, func() { |
req.Path = "not/a/full/stream/path" |
- _, err := s.Get(c, &req) |
+ _, err := svr.Get(c, &req) |
So(err, ShouldErrLike, "invalid path value") |
}) |
@@ -246,339 +263,377 @@ func testGetImpl(t *testing.T, archived bool) { |
c, fb := featureBreaker.FilterRDS(c, nil) |
fb.BreakFeatures(errors.New("testing error"), "GetMulti") |
- _, err := s.Get(c, &req) |
+ _, err := svr.Get(c, &req) |
So(err, ShouldBeRPCInternal) |
}) |
- Convey(`Will fail with NotFound if the log stream does not exist.`, func() { |
+ Convey(`Will fail with NotFound if the log stream does not exist (different project).`, func() { |
+ req.Project = "does-not-exist" |
+ _, err := svr.Get(c, &req) |
+ So(err, ShouldBeRPCNotFound) |
+ }) |
+ |
+ Convey(`Will fail with NotFound if the log path does not exist (different path).`, func() { |
req.Path = "testing/+/does/not/exist" |
- _, err := s.Get(c, &req) |
+ _, err := svr.Get(c, &req) |
So(err, ShouldBeRPCNotFound) |
}) |
}) |
- if !archived { |
- // Add the logs to the in-memory temporary storage. |
- for _, le := range entries { |
- err := ms.Put(storage.PutRequest{ |
- Path: ls.Path(), |
- Index: types.MessageIndex(le.StreamIndex), |
- Values: [][]byte{protobufs[le.StreamIndex]}, |
- }) |
- if err != nil { |
- panic(fmt.Errorf("failed to Put() LogEntry: %v", err)) |
- } |
+ Convey(`Testing Tail requests (no logs)`, func() { |
+ req := logdog.TailRequest{ |
+ Project: project, |
+ Path: string(ls.Path()), |
} |
- } else { |
- // Archive this log stream. We will generate one index entry for every |
- // 2 log entries. |
- src := staticArchiveSource(entries) |
- var lbuf, ibuf bytes.Buffer |
- m := archive.Manifest{ |
- Desc: desc, |
- Source: &src, |
- LogWriter: &lbuf, |
- IndexWriter: &ibuf, |
- StreamIndexRange: 2, |
+ |
+ Convey(`Will succeed with no logs.`, func() { |
+ resp, err := svr.Tail(c, &req) |
+ |
+ So(err, ShouldBeRPCOK) |
+ So(resp, shouldHaveLogs) |
+ }) |
+ |
+ Convey(`Will fail with NotFound if the log stream does not exist (different project).`, func() { |
+ req.Project = "does-not-exist" |
+ _, err := svr.Tail(c, &req) |
+ So(err, ShouldBeRPCNotFound) |
+ }) |
+ |
+ Convey(`Will fail with NotFound if the log path does not exist (different path).`, func() { |
+ req.Path = "testing/+/does/not/exist" |
+ _, err := svr.Tail(c, &req) |
+ So(err, ShouldBeRPCNotFound) |
+ }) |
+ }) |
+ |
+ Convey(`When testing log data is added`, func() { |
+ if !archived { |
+ // Add the logs to the in-memory temporary storage. |
+ for _, le := range entries { |
+ err := ms.Put(storage.PutRequest{ |
+ Project: project, |
+ Path: ls.Path(), |
+ Index: types.MessageIndex(le.StreamIndex), |
+ Values: [][]byte{protobufs[le.StreamIndex]}, |
+ }) |
+ if err != nil { |
+ panic(fmt.Errorf("failed to Put() LogEntry: %v", err)) |
+ } |
+ } |
+ } else { |
+ // Archive this log stream. We will generate one index entry for every |
+ // 2 log entries. |
+ src := staticArchiveSource(entries) |
+ var lbuf, ibuf bytes.Buffer |
+ m := archive.Manifest{ |
+ Desc: desc, |
+ Source: &src, |
+ LogWriter: &lbuf, |
+ IndexWriter: &ibuf, |
+ StreamIndexRange: 2, |
+ } |
+ if err := archive.Archive(m); err != nil { |
+ panic(err) |
+ } |
+ |
+ now := tc.Now().UTC() |
+ |
+ gsc.put("gs://testbucket/stream", lbuf.Bytes()) |
+ gsc.put("gs://testbucket/index", ibuf.Bytes()) |
+ ls.State = coordinator.LSArchived |
+ ls.TerminatedTime = now |
+ ls.ArchivedTime = now |
+ ls.ArchiveStreamURL = "gs://testbucket/stream" |
+ ls.ArchiveIndexURL = "gs://testbucket/index" |
} |
- if err := archive.Archive(m); err != nil { |
+ if err := di.Put(ls); err != nil { |
panic(err) |
} |
- now := tc.Now().UTC() |
+ Convey(`Testing Get requests`, func() { |
+ req := logdog.GetRequest{ |
+ Project: project, |
+ Path: string(ls.Path()), |
+ } |
- gsc.put("gs://testbucket/stream", lbuf.Bytes()) |
- gsc.put("gs://testbucket/index", ibuf.Bytes()) |
- ls.State = coordinator.LSArchived |
- ls.TerminatedTime = now |
- ls.ArchivedTime = now |
- ls.ArchiveStreamURL = "gs://testbucket/stream" |
- ls.ArchiveIndexURL = "gs://testbucket/index" |
- } |
- if err := ds.Get(c).Put(ls); err != nil { |
- panic(err) |
- } |
+ Convey(`When the log stream is purged`, func() { |
+ ls.Purged = true |
+ if err := di.Put(ls); err != nil { |
+ panic(err) |
+ } |
- Convey(`Testing Get requests`, func() { |
- req := logdog.GetRequest{ |
- Path: string(ls.Path()), |
- } |
+ Convey(`Will return NotFound if the user is not an administrator.`, func() { |
+ _, err := svr.Get(c, &req) |
+ So(err, ShouldBeRPCNotFound) |
+ }) |
- Convey(`When the log stream is purged`, func() { |
- ls.Purged = true |
- if err := ds.Get(c).Put(ls); err != nil { |
- panic(err) |
- } |
+ Convey(`Will process the request if the user is an administrator.`, func() { |
+ fs.IdentityGroups = []string{"test-administrators"} |
- Convey(`Will return NotFound if the user is not an administrator.`, func() { |
- _, err := s.Get(c, &req) |
- So(err, ShouldBeRPCNotFound) |
+ resp, err := svr.Get(c, &req) |
+ So(err, ShouldBeRPCOK) |
+ So(resp, shouldHaveLogs, 0, 1, 2) |
+ }) |
}) |
- Convey(`Will process the request if the user is an administrator.`, func() { |
- fs.IdentityGroups = []string{"test-administrators"} |
+ Convey(`Will return empty if no records were requested.`, func() { |
+ req.LogCount = -1 |
+ req.State = false |
- resp, err := s.Get(c, &req) |
+ resp, err := svr.Get(c, &req) |
So(err, ShouldBeRPCOK) |
- So(resp, shouldHaveLogs, 0, 1, 2) |
+ So(resp.Logs, ShouldHaveLength, 0) |
}) |
- }) |
- Convey(`Will return empty if no records were requested.`, func() { |
- req.LogCount = -1 |
- req.State = false |
+ Convey(`Will successfully retrieve a stream path.`, func() { |
+ resp, err := svr.Get(c, &req) |
+ So(err, ShouldBeRPCOK) |
+ So(resp, shouldHaveLogs, 0, 1, 2) |
+ }) |
- resp, err := s.Get(c, &req) |
- So(err, ShouldBeRPCOK) |
- So(resp.Logs, ShouldHaveLength, 0) |
- }) |
+ Convey(`Will successfully retrieve a stream path offset at 4.`, func() { |
+ req.Index = 4 |
- Convey(`Will successfully retrieve a stream path.`, func() { |
- resp, err := s.Get(c, &req) |
- So(err, ShouldBeRPCOK) |
- So(resp, shouldHaveLogs, 0, 1, 2) |
- }) |
+ resp, err := svr.Get(c, &req) |
+ So(err, ShouldBeRPCOK) |
+ So(resp, shouldHaveLogs, 4, 5) |
+ }) |
- Convey(`Will successfully retrieve a stream path offset at 4.`, func() { |
- req.Index = 4 |
+ Convey(`Will retrieve no logs for contiguous offset 6.`, func() { |
+ req.Index = 6 |
- resp, err := s.Get(c, &req) |
- So(err, ShouldBeRPCOK) |
- So(resp, shouldHaveLogs, 4, 5) |
- }) |
+ resp, err := svr.Get(c, &req) |
+ So(err, ShouldBeRPCOK) |
+ So(len(resp.Logs), ShouldEqual, 0) |
+ }) |
- Convey(`Will retrieve no logs for contiguous offset 6.`, func() { |
- req.Index = 6 |
+ Convey(`Will retrieve log 7 for non-contiguous offset 6.`, func() { |
+ req.NonContiguous = true |
+ req.Index = 6 |
- resp, err := s.Get(c, &req) |
- So(err, ShouldBeRPCOK) |
- So(len(resp.Logs), ShouldEqual, 0) |
- }) |
+ resp, err := svr.Get(c, &req) |
+ So(err, ShouldBeRPCOK) |
+ So(resp, shouldHaveLogs, 7) |
+ }) |
- Convey(`Will retrieve log 7 for non-contiguous offset 6.`, func() { |
- req.NonContiguous = true |
- req.Index = 6 |
+ Convey(`With a byte limit of 1, will still return at least one log entry.`, func() { |
+ req.ByteCount = 1 |
- resp, err := s.Get(c, &req) |
- So(err, ShouldBeRPCOK) |
- So(resp, shouldHaveLogs, 7) |
- }) |
+ resp, err := svr.Get(c, &req) |
+ So(err, ShouldBeRPCOK) |
+ So(resp, shouldHaveLogs, 0) |
+ }) |
- Convey(`With a byte limit of 1, will still return at least one log entry.`, func() { |
- req.ByteCount = 1 |
+ Convey(`With a byte limit of sizeof(0), will return log entry 0.`, func() { |
+ req.ByteCount = frameSize(0) |
- resp, err := s.Get(c, &req) |
- So(err, ShouldBeRPCOK) |
- So(resp, shouldHaveLogs, 0) |
- }) |
+ resp, err := svr.Get(c, &req) |
+ So(err, ShouldBeRPCOK) |
+ So(resp, shouldHaveLogs, 0) |
+ }) |
- Convey(`With a byte limit of sizeof(0), will return log entry 0.`, func() { |
- req.ByteCount = frameSize(0) |
+ Convey(`With a byte limit of sizeof(0)+1, will return log entry 0.`, func() { |
+ req.ByteCount = frameSize(0) + 1 |
- resp, err := s.Get(c, &req) |
- So(err, ShouldBeRPCOK) |
- So(resp, shouldHaveLogs, 0) |
- }) |
+ resp, err := svr.Get(c, &req) |
+ So(err, ShouldBeRPCOK) |
+ So(resp, shouldHaveLogs, 0) |
+ }) |
- Convey(`With a byte limit of sizeof(0)+1, will return log entry 0.`, func() { |
- req.ByteCount = frameSize(0) + 1 |
+ Convey(`With a byte limit of sizeof({0, 1}), will return log entries {0, 1}.`, func() { |
+ req.ByteCount = frameSize(0, 1) |
- resp, err := s.Get(c, &req) |
- So(err, ShouldBeRPCOK) |
- So(resp, shouldHaveLogs, 0) |
- }) |
+ resp, err := svr.Get(c, &req) |
+ So(err, ShouldBeRPCOK) |
+ So(resp, shouldHaveLogs, 0, 1) |
+ }) |
- Convey(`With a byte limit of sizeof({0, 1}), will return log entries {0, 1}.`, func() { |
- req.ByteCount = frameSize(0, 1) |
+ Convey(`With a byte limit of sizeof({0, 1, 2}), will return log entries {0, 1, 2}.`, func() { |
+ req.ByteCount = frameSize(0, 1, 2) |
- resp, err := s.Get(c, &req) |
- So(err, ShouldBeRPCOK) |
- So(resp, shouldHaveLogs, 0, 1) |
- }) |
+ resp, err := svr.Get(c, &req) |
+ So(err, ShouldBeRPCOK) |
+ So(resp, shouldHaveLogs, 0, 1, 2) |
+ }) |
- Convey(`With a byte limit of sizeof({0, 1, 2}), will return log entries {0, 1, 2}.`, func() { |
- req.ByteCount = frameSize(0, 1, 2) |
+ Convey(`With a byte limit of sizeof({0, 1, 2})+1, will return log entries {0, 1, 2}.`, func() { |
+ req.ByteCount = frameSize(0, 1, 2) + 1 |
- resp, err := s.Get(c, &req) |
- So(err, ShouldBeRPCOK) |
- So(resp, shouldHaveLogs, 0, 1, 2) |
- }) |
+ resp, err := svr.Get(c, &req) |
+ So(err, ShouldBeRPCOK) |
+ So(resp, shouldHaveLogs, 0, 1, 2) |
+ }) |
- Convey(`With a byte limit of sizeof({0, 1, 2})+1, will return log entries {0, 1, 2}.`, func() { |
- req.ByteCount = frameSize(0, 1, 2) + 1 |
+ Convey(`Will successfully retrieve a stream path hash.`, func() { |
+ req.Path = ls.HashID |
+ resp, err := svr.Get(c, &req) |
+ So(err, ShouldBeRPCOK) |
+ So(resp, shouldHaveLogs, 0, 1, 2) |
+ }) |
- resp, err := s.Get(c, &req) |
- So(err, ShouldBeRPCOK) |
- So(resp, shouldHaveLogs, 0, 1, 2) |
- }) |
+ Convey(`When requesting state`, func() { |
+ req.State = true |
+ req.LogCount = -1 |
- Convey(`Will successfully retrieve a stream path hash.`, func() { |
- req.Path = ls.HashID |
- resp, err := s.Get(c, &req) |
- So(err, ShouldBeRPCOK) |
- So(resp, shouldHaveLogs, 0, 1, 2) |
- }) |
+ Convey(`Will successfully retrieve stream state.`, func() { |
+ resp, err := svr.Get(c, &req) |
+ So(err, ShouldBeRPCOK) |
+ So(resp.State, ShouldResemble, loadLogStreamState(ls)) |
+ So(len(resp.Logs), ShouldEqual, 0) |
+ }) |
- Convey(`When requesting state`, func() { |
- req.State = true |
- req.LogCount = -1 |
+ Convey(`Will return Internal if the protobuf descriptor data is corrupt.`, func() { |
+ ls.SetDSValidate(false) |
+ ls.Descriptor = []byte{0x00} // Invalid protobuf, zero tag. |
+ if err := di.Put(ls); err != nil { |
+ panic(err) |
+ } |
- Convey(`Will successfully retrieve stream state.`, func() { |
- resp, err := s.Get(c, &req) |
- So(err, ShouldBeRPCOK) |
- So(resp.State, ShouldResemble, loadLogStreamState(ls)) |
- So(len(resp.Logs), ShouldEqual, 0) |
+ _, err := svr.Get(c, &req) |
+ So(err, ShouldBeRPCInternal) |
+ }) |
}) |
- Convey(`Will return Internal if the protobuf descriptor data is corrupt.`, func() { |
- ls.SetDSValidate(false) |
- ls.Descriptor = []byte{0x00} // Invalid protobuf, zero tag. |
- if err := ds.Get(c).Put(ls); err != nil { |
- panic(err) |
+ Convey(`Will return Internal if the protobuf log entry data is corrupt.`, func() { |
+ if archived { |
+ // Corrupt the archive datastream. |
+ stream := gsc.get("gs://testbucket/stream") |
+ zeroRecords(stream) |
+ } else { |
+ // Add corrupted entry to Storage. Create a new entry here, since |
+ // the storage will reject a duplicate/overwrite. |
+ err := ms.Put(storage.PutRequest{ |
+ Project: project, |
+ Path: types.StreamPath(req.Path), |
+ Index: 666, |
+ Values: [][]byte{{0x00}}, // Invalid protobuf, zero tag. |
+ }) |
+ if err != nil { |
+ panic(err) |
+ } |
+ req.Index = 666 |
} |
- _, err := s.Get(c, &req) |
+ _, err := svr.Get(c, &req) |
So(err, ShouldBeRPCInternal) |
}) |
- }) |
- Convey(`Will return Internal if the protobuf log entry data is corrupt.`, func() { |
- if archived { |
- // Corrupt the archive datastream. |
- stream := gsc.get("gs://testbucket/stream") |
- zeroRecords(stream) |
- } else { |
- // Add corrupted entry to Storage. Create a new entry here, since |
- // the storage will reject a duplicate/overwrite. |
- err := ms.Put(storage.PutRequest{ |
- Path: types.StreamPath(req.Path), |
- Index: 666, |
- Values: [][]byte{{0x00}}, // Invalid protobuf, zero tag. |
- }) |
- if err != nil { |
- panic(err) |
- } |
- req.Index = 666 |
- } |
- |
- _, err := s.Get(c, &req) |
- So(err, ShouldBeRPCInternal) |
- }) |
+ Convey(`Will successfully retrieve both logs and stream state.`, func() { |
+ req.State = true |
- Convey(`Will successfully retrieve both logs and stream state.`, func() { |
- req.State = true |
- |
- resp, err := s.Get(c, &req) |
- So(err, ShouldBeRPCOK) |
- So(resp.State, ShouldResemble, loadLogStreamState(ls)) |
- So(resp, shouldHaveLogs, 0, 1, 2) |
- }) |
+ resp, err := svr.Get(c, &req) |
+ So(err, ShouldBeRPCOK) |
+ So(resp.State, ShouldResemble, loadLogStreamState(ls)) |
+ So(resp, shouldHaveLogs, 0, 1, 2) |
+ }) |
- Convey(`Will return Internal if the Storage is not working.`, func() { |
- if archived { |
- gsc["error"] = []byte("test error") |
- } else { |
- ms.Close() |
- } |
+ Convey(`Will return Internal if the Storage is not working.`, func() { |
+ if archived { |
+ gsc["error"] = []byte("test error") |
+ } else { |
+ ms.Close() |
+ } |
- _, err := s.Get(c, &req) |
- So(err, ShouldBeRPCInternal) |
- }) |
+ _, err := svr.Get(c, &req) |
+ So(err, ShouldBeRPCInternal) |
+ }) |
- Convey(`Will enforce a maximum count of 2.`, func() { |
- req.LogCount = 2 |
- resp, err := s.Get(c, &req) |
- So(err, ShouldBeRPCOK) |
- So(resp, shouldHaveLogs, 0, 1) |
- }) |
+ Convey(`Will enforce a maximum count of 2.`, func() { |
+ req.LogCount = 2 |
+ resp, err := svr.Get(c, &req) |
+ So(err, ShouldBeRPCOK) |
+ So(resp, shouldHaveLogs, 0, 1) |
+ }) |
- Convey(`When requesting protobufs`, func() { |
- req.State = true |
+ Convey(`When requesting protobufs`, func() { |
+ req.State = true |
- resp, err := s.Get(c, &req) |
- So(err, ShouldBeRPCOK) |
- So(resp, shouldHaveLogs, 0, 1, 2) |
+ resp, err := svr.Get(c, &req) |
+ So(err, ShouldBeRPCOK) |
+ So(resp, shouldHaveLogs, 0, 1, 2) |
- // Confirm that this has protobufs. |
- So(len(resp.Logs), ShouldEqual, 3) |
- So(resp.Logs[0], ShouldNotBeNil) |
+ // Confirm that this has protobufs. |
+ So(len(resp.Logs), ShouldEqual, 3) |
+ So(resp.Logs[0], ShouldNotBeNil) |
- // Confirm that there is a descriptor protobuf. |
- So(resp.Desc, ShouldResemble, desc) |
+ // Confirm that there is a descriptor protobuf. |
+ So(resp.Desc, ShouldResemble, desc) |
- // Confirm that the state was returned. |
- So(resp.State, ShouldNotBeNil) |
- }) |
+ // Confirm that the state was returned. |
+ So(resp.State, ShouldNotBeNil) |
+ }) |
- Convey(`Will successfully retrieve all records if non-contiguous is allowed.`, func() { |
- req.NonContiguous = true |
- resp, err := s.Get(c, &req) |
- So(err, ShouldBeRPCOK) |
- So(resp, shouldHaveLogs, 0, 1, 2, 4, 5, 7) |
- }) |
+ Convey(`Will successfully retrieve all records if non-contiguous is allowed.`, func() { |
+ req.NonContiguous = true |
+ resp, err := svr.Get(c, &req) |
+ So(err, ShouldBeRPCOK) |
+ So(resp, shouldHaveLogs, 0, 1, 2, 4, 5, 7) |
+ }) |
- Convey(`When newlines are not requested, does not include delimiters.`, func() { |
- req.LogCount = 1 |
+ Convey(`When newlines are not requested, does not include delimiters.`, func() { |
+ req.LogCount = 1 |
- resp, err := s.Get(c, &req) |
- So(err, ShouldBeRPCOK) |
- So(resp, shouldHaveLogs, 0) |
+ resp, err := svr.Get(c, &req) |
+ So(err, ShouldBeRPCOK) |
+ So(resp, shouldHaveLogs, 0) |
- So(resp.Logs[0].GetText(), ShouldResemble, &logpb.Text{ |
- Lines: []*logpb.Text_Line{ |
- {"log entry #0", "\n"}, |
- {"another line of text", ""}, |
- }, |
+ So(resp.Logs[0].GetText(), ShouldResemble, &logpb.Text{ |
+ Lines: []*logpb.Text_Line{ |
+ {"log entry #0", "\n"}, |
+ {"another line of text", ""}, |
+ }, |
+ }) |
}) |
- }) |
- Convey(`Will get a Binary LogEntry`, func() { |
- req.Index = 4 |
- req.LogCount = 1 |
- resp, err := s.Get(c, &req) |
- So(err, ShouldBeRPCOK) |
- So(resp, shouldHaveLogs, 4) |
- So(resp.Logs[0].GetBinary(), ShouldResemble, &logpb.Binary{ |
- Data: []byte{0x00, 0x01, 0x02, 0x03}, |
+ Convey(`Will get a Binary LogEntry`, func() { |
+ req.Index = 4 |
+ req.LogCount = 1 |
+ resp, err := svr.Get(c, &req) |
+ So(err, ShouldBeRPCOK) |
+ So(resp, shouldHaveLogs, 4) |
+ So(resp.Logs[0].GetBinary(), ShouldResemble, &logpb.Binary{ |
+ Data: []byte{0x00, 0x01, 0x02, 0x03}, |
+ }) |
}) |
- }) |
- Convey(`Will get a Datagram LogEntry`, func() { |
- req.Index = 5 |
- req.LogCount = 1 |
- resp, err := s.Get(c, &req) |
- So(err, ShouldBeRPCOK) |
- So(resp, shouldHaveLogs, 5) |
- So(resp.Logs[0].GetDatagram(), ShouldResemble, &logpb.Datagram{ |
- Data: []byte{0x00, 0x01, 0x02, 0x03}, |
- Partial: &logpb.Datagram_Partial{ |
- Index: 2, |
- Size: 1024, |
- Last: false, |
- }, |
+ Convey(`Will get a Datagram LogEntry`, func() { |
+ req.Index = 5 |
+ req.LogCount = 1 |
+ resp, err := svr.Get(c, &req) |
+ So(err, ShouldBeRPCOK) |
+ So(resp, shouldHaveLogs, 5) |
+ So(resp.Logs[0].GetDatagram(), ShouldResemble, &logpb.Datagram{ |
+ Data: []byte{0x00, 0x01, 0x02, 0x03}, |
+ Partial: &logpb.Datagram_Partial{ |
+ Index: 2, |
+ Size: 1024, |
+ Last: false, |
+ }, |
+ }) |
}) |
}) |
- }) |
- Convey(`Testing tail requests`, func() { |
- req := logdog.TailRequest{ |
- Path: string(ls.Path()), |
- } |
+ Convey(`Testing tail requests`, func() { |
+ req := logdog.TailRequest{ |
+ Project: "test-project", |
+ Path: string(ls.Path()), |
+ } |
- Convey(`Will successfully retrieve a stream path.`, func() { |
- resp, err := s.Tail(c, &req) |
- So(err, ShouldBeRPCOK) |
- So(resp, shouldHaveLogs, 7) |
- }) |
+ Convey(`Will successfully retrieve a stream path.`, func() { |
+ resp, err := svr.Tail(c, &req) |
+ So(err, ShouldBeRPCOK) |
+ So(resp, shouldHaveLogs, 7) |
+ }) |
- Convey(`Will successfully retrieve a stream path hash and state.`, func() { |
- req.Path = ls.HashID |
- req.State = true |
+ Convey(`Will successfully retrieve a stream path hash and state.`, func() { |
+ req.Path = ls.HashID |
+ req.State = true |
- resp, err := s.Tail(c, &req) |
- So(err, ShouldBeRPCOK) |
- So(resp, shouldHaveLogs, 7) |
- So(resp.State, ShouldResemble, loadLogStreamState(ls)) |
+ resp, err := svr.Tail(c, &req) |
+ So(err, ShouldBeRPCOK) |
+ So(resp, shouldHaveLogs, 7) |
+ So(resp.State, ShouldResemble, loadLogStreamState(ls)) |
+ }) |
}) |
}) |
}) |
@@ -593,5 +648,5 @@ func TestGetIntermediate(t *testing.T) { |
func TestGetArchived(t *testing.T) { |
t.Parallel() |
- testGetImpl(t, true) |
+ testGetImpl(t, false) |
} |