Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(9)

Unified Diff: tools/telemetry/third_party/gsutilz/gslib/tests/test_rsync.py

Issue 1376593003: Roll gsutil version to 4.15. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 5 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: tools/telemetry/third_party/gsutilz/gslib/tests/test_rsync.py
diff --git a/tools/telemetry/third_party/gsutilz/gslib/tests/test_rsync.py b/tools/telemetry/third_party/gsutilz/gslib/tests/test_rsync.py
index 0bf6c5ff485929aed7800915a6ab849d726cf02a..40108b84616c2d3eddb2d22f6b6acff479c9050b 100644
--- a/tools/telemetry/third_party/gsutilz/gslib/tests/test_rsync.py
+++ b/tools/telemetry/third_party/gsutilz/gslib/tests/test_rsync.py
@@ -21,7 +21,7 @@ import crcmod
import gslib.tests.testcase as testcase
from gslib.tests.testcase.integration_testcase import SkipForS3
from gslib.tests.util import ObjectToURI as suri
-from gslib.tests.util import PerformsFileToObjectUpload
+from gslib.tests.util import SequentialAndParallelTransfer
from gslib.tests.util import SetBotoConfigForTest
from gslib.tests.util import unittest
from gslib.util import IS_WINDOWS
@@ -50,6 +50,7 @@ def _TailSet(start_point, listing):
"""
return set(l[len(start_point):] for l in listing.strip().split('\n'))
+
# TODO: Add inspection to the retry wrappers in this test suite where the state
# at the end of a retry block is depended upon by subsequent tests (since
# listing content can vary depending on which backend server is reached until
@@ -112,19 +113,20 @@ class TestRsync(testcase.GsUtilIntegrationTestCase):
def test_bucket_to_bucket(self):
"""Tests that flat and recursive rsync between 2 buckets works correctly."""
# Create 2 buckets with 1 overlapping object, 1 extra object at root level
- # in each, and 1 extra object 1 level down in each. Make the overlapping
- # objects named the same but with different content, to test that we detect
- # and properly copy in that case.
+ # in each, and 1 extra object 1 level down in each, where one of the objects
+ # starts with "." to test that we don't skip those objects. Make the
+ # overlapping objects named the same but with different content, to test
+ # that we detect and properly copy in that case.
bucket1_uri = self.CreateBucket()
bucket2_uri = self.CreateBucket()
self.CreateObject(bucket_uri=bucket1_uri, object_name='obj1',
contents='obj1')
- self.CreateObject(bucket_uri=bucket1_uri, object_name='obj2',
- contents='obj2')
+ self.CreateObject(bucket_uri=bucket1_uri, object_name='.obj2',
+ contents='.obj2')
self.CreateObject(bucket_uri=bucket1_uri, object_name='subdir/obj3',
contents='subdir/obj3')
- self.CreateObject(bucket_uri=bucket2_uri, object_name='obj2',
- contents='OBJ2')
+ self.CreateObject(bucket_uri=bucket2_uri, object_name='.obj2',
+ contents='.OBJ2')
self.CreateObject(bucket_uri=bucket2_uri, object_name='obj4',
contents='obj4')
self.CreateObject(bucket_uri=bucket2_uri, object_name='subdir/obj5',
@@ -138,19 +140,19 @@ class TestRsync(testcase.GsUtilIntegrationTestCase):
listing1 = _TailSet(suri(bucket1_uri), self._FlatListBucket(bucket1_uri))
listing2 = _TailSet(suri(bucket2_uri), self._FlatListBucket(bucket2_uri))
# First bucket should have un-altered content.
- self.assertEquals(listing1, set(['/obj1', '/obj2', '/subdir/obj3']))
+ self.assertEquals(listing1, set(['/obj1', '/.obj2', '/subdir/obj3']))
# Second bucket should have new objects added from source bucket (without
# removing extraneeous object found in dest bucket), and without the
# subdir objects synchronized.
self.assertEquals(listing2,
- set(['/obj1', '/obj2', '/obj4', '/subdir/obj5']))
+ set(['/obj1', '/.obj2', '/obj4', '/subdir/obj5']))
# Assert that the src/dest objects that had same length but different
# content were correctly synchronized (bucket to bucket sync uses
# checksums).
- self.assertEquals('obj2', self.RunGsUtil(
- ['cat', suri(bucket1_uri, 'obj2')], return_stdout=True))
- self.assertEquals('obj2', self.RunGsUtil(
- ['cat', suri(bucket2_uri, 'obj2')], return_stdout=True))
+ self.assertEquals('.obj2', self.RunGsUtil(
+ ['cat', suri(bucket1_uri, '.obj2')], return_stdout=True))
+ self.assertEquals('.obj2', self.RunGsUtil(
+ ['cat', suri(bucket2_uri, '.obj2')], return_stdout=True))
_Check1()
# Use @Retry as hedge against bucket listing eventual consistency.
@@ -167,7 +169,7 @@ class TestRsync(testcase.GsUtilIntegrationTestCase):
self.CreateObject(bucket_uri=bucket2_uri, object_name='obj7',
contents='obj7')
self.RunGsUtil(['rm', suri(bucket1_uri, 'obj1')])
- self.RunGsUtil(['rm', suri(bucket2_uri, 'obj2')])
+ self.RunGsUtil(['rm', suri(bucket2_uri, '.obj2')])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
@@ -176,11 +178,11 @@ class TestRsync(testcase.GsUtilIntegrationTestCase):
listing1 = _TailSet(suri(bucket1_uri), self._FlatListBucket(bucket1_uri))
listing2 = _TailSet(suri(bucket2_uri), self._FlatListBucket(bucket2_uri))
# First bucket should have un-altered content.
- self.assertEquals(listing1, set(['/obj2', '/obj6', '/subdir/obj3']))
+ self.assertEquals(listing1, set(['/.obj2', '/obj6', '/subdir/obj3']))
# Second bucket should have objects tha were newly added to first bucket
# (wihout removing extraneous dest bucket objects), and without the
# subdir objects synchronized.
- self.assertEquals(listing2, set(['/obj1', '/obj2', '/obj4', '/obj6',
+ self.assertEquals(listing2, set(['/obj1', '/.obj2', '/obj4', '/obj6',
'/obj7', '/subdir/obj3',
'/subdir/obj5']))
_Check3()
@@ -197,19 +199,20 @@ class TestRsync(testcase.GsUtilIntegrationTestCase):
def test_bucket_to_bucket_minus_d(self):
"""Tests that flat and recursive rsync between 2 buckets works correctly."""
# Create 2 buckets with 1 overlapping object, 1 extra object at root level
- # in each, and 1 extra object 1 level down in each. Make the overlapping
- # objects named the same but with different content, to test that we detect
- # and properly copy in that case.
+ # in each, and 1 extra object 1 level down in each, where one of the objects
+ # starts with "." to test that we don't skip those objects. Make the
+ # overlapping objects named the same but with different content, to test
+ # that we detect and properly copy in that case.
bucket1_uri = self.CreateBucket()
bucket2_uri = self.CreateBucket()
self.CreateObject(bucket_uri=bucket1_uri, object_name='obj1',
contents='obj1')
- self.CreateObject(bucket_uri=bucket1_uri, object_name='obj2',
- contents='obj2')
+ self.CreateObject(bucket_uri=bucket1_uri, object_name='.obj2',
+ contents='.obj2')
self.CreateObject(bucket_uri=bucket1_uri, object_name='subdir/obj3',
contents='subdir/obj3')
- self.CreateObject(bucket_uri=bucket2_uri, object_name='obj2',
- contents='OBJ2')
+ self.CreateObject(bucket_uri=bucket2_uri, object_name='.obj2',
+ contents='.OBJ2')
self.CreateObject(bucket_uri=bucket2_uri, object_name='obj4',
contents='obj4')
self.CreateObject(bucket_uri=bucket2_uri, object_name='subdir/obj5',
@@ -223,17 +226,17 @@ class TestRsync(testcase.GsUtilIntegrationTestCase):
listing1 = _TailSet(suri(bucket1_uri), self._FlatListBucket(bucket1_uri))
listing2 = _TailSet(suri(bucket2_uri), self._FlatListBucket(bucket2_uri))
# First bucket should have un-altered content.
- self.assertEquals(listing1, set(['/obj1', '/obj2', '/subdir/obj3']))
+ self.assertEquals(listing1, set(['/obj1', '/.obj2', '/subdir/obj3']))
# Second bucket should have content like first bucket but without the
# subdir objects synchronized.
- self.assertEquals(listing2, set(['/obj1', '/obj2', '/subdir/obj5']))
+ self.assertEquals(listing2, set(['/obj1', '/.obj2', '/subdir/obj5']))
# Assert that the src/dest objects that had same length but different
# content were correctly synchronized (bucket to bucket sync uses
# checksums).
- self.assertEquals('obj2', self.RunGsUtil(
- ['cat', suri(bucket1_uri, 'obj2')], return_stdout=True))
- self.assertEquals('obj2', self.RunGsUtil(
- ['cat', suri(bucket2_uri, 'obj2')], return_stdout=True))
+ self.assertEquals('.obj2', self.RunGsUtil(
+ ['cat', suri(bucket1_uri, '.obj2')], return_stdout=True))
+ self.assertEquals('.obj2', self.RunGsUtil(
+ ['cat', suri(bucket2_uri, '.obj2')], return_stdout=True))
_Check1()
# Use @Retry as hedge against bucket listing eventual consistency.
@@ -251,7 +254,7 @@ class TestRsync(testcase.GsUtilIntegrationTestCase):
self.CreateObject(bucket_uri=bucket2_uri, object_name='obj7',
contents='obj7')
self.RunGsUtil(['rm', suri(bucket1_uri, 'obj1')])
- self.RunGsUtil(['rm', suri(bucket2_uri, 'obj2')])
+ self.RunGsUtil(['rm', suri(bucket2_uri, '.obj2')])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
@@ -261,10 +264,10 @@ class TestRsync(testcase.GsUtilIntegrationTestCase):
listing1 = _TailSet(suri(bucket1_uri), self._FlatListBucket(bucket1_uri))
listing2 = _TailSet(suri(bucket2_uri), self._FlatListBucket(bucket2_uri))
# First bucket should have un-altered content.
- self.assertEquals(listing1, set(['/obj2', '/obj6', '/subdir/obj3']))
+ self.assertEquals(listing1, set(['/.obj2', '/obj6', '/subdir/obj3']))
# Second bucket should have content like first bucket but without the
# subdir objects synchronized.
- self.assertEquals(listing2, set(['/obj2', '/obj6', '/subdir/obj3']))
+ self.assertEquals(listing2, set(['/.obj2', '/obj6', '/subdir/obj3']))
_Check3()
# Use @Retry as hedge against bucket listing eventual consistency.
@@ -277,13 +280,14 @@ class TestRsync(testcase.GsUtilIntegrationTestCase):
_Check4()
# Test sequential upload as well as parallel composite upload case.
- @PerformsFileToObjectUpload
+ @SequentialAndParallelTransfer
@unittest.skipUnless(UsingCrcmodExtension(crcmod),
'Test requires fast crcmod.')
def test_dir_to_bucket_minus_d(self):
"""Tests that flat and recursive rsync dir to bucket works correctly."""
# Create dir and bucket with 1 overlapping object, 1 extra object at root
- # level in each, and 1 extra object 1 level down in each. Make the
+ # level in each, and 1 extra object 1 level down in each, where one of the
+ # objects starts with "." to test that we don't skip those objects. Make the
# overlapping objects named the same but with different content, to test
# that we detect and properly copy in that case.
tmpdir = self.CreateTempDir()
@@ -291,17 +295,17 @@ class TestRsync(testcase.GsUtilIntegrationTestCase):
os.mkdir(subdir)
bucket_uri = self.CreateBucket()
self.CreateTempFile(tmpdir=tmpdir, file_name='obj1', contents='obj1')
- self.CreateTempFile(tmpdir=tmpdir, file_name='obj2', contents='obj2')
+ self.CreateTempFile(tmpdir=tmpdir, file_name='.obj2', contents='.obj2')
self.CreateTempFile(tmpdir=subdir, file_name='obj3', contents='subdir/obj3')
- self.CreateObject(bucket_uri=bucket_uri, object_name='obj2',
- contents='OBJ2')
+ self.CreateObject(bucket_uri=bucket_uri, object_name='.obj2',
+ contents='.OBJ2')
self.CreateObject(bucket_uri=bucket_uri, object_name='obj4',
contents='obj4')
self.CreateObject(bucket_uri=bucket_uri, object_name='subdir/obj5',
contents='subdir/obj5')
# Need to make sure the bucket listing is caught-up, otherwise the
- # first rsync may not see obj2 and overwrite it.
+ # first rsync may not see .obj2 and overwrite it.
self.AssertNObjectsInBucket(bucket_uri, 3)
# Use @Retry as hedge against bucket listing eventual consistency.
@@ -312,17 +316,17 @@ class TestRsync(testcase.GsUtilIntegrationTestCase):
listing1 = _TailSet(tmpdir, self._FlatListDir(tmpdir))
listing2 = _TailSet(suri(bucket_uri), self._FlatListBucket(bucket_uri))
# Dir should have un-altered content.
- self.assertEquals(listing1, set(['/obj1', '/obj2', '/subdir/obj3']))
+ self.assertEquals(listing1, set(['/obj1', '/.obj2', '/subdir/obj3']))
# Bucket should have content like dir but without the subdir objects
# synchronized.
- self.assertEquals(listing2, set(['/obj1', '/obj2', '/subdir/obj5']))
+ self.assertEquals(listing2, set(['/obj1', '/.obj2', '/subdir/obj5']))
# Assert that the src/dest objects that had same length but different
# content were not synchronized (dir to bucket sync doesn't use checksums
# unless you specify -c).
- with open(os.path.join(tmpdir, 'obj2')) as f:
- self.assertEquals('obj2', '\n'.join(f.readlines()))
- self.assertEquals('OBJ2', self.RunGsUtil(
- ['cat', suri(bucket_uri, 'obj2')], return_stdout=True))
+ with open(os.path.join(tmpdir, '.obj2')) as f:
+ self.assertEquals('.obj2', '\n'.join(f.readlines()))
+ self.assertEquals('.OBJ2', self.RunGsUtil(
+ ['cat', suri(bucket_uri, '.obj2')], return_stdout=True))
_Check1()
# Use @Retry as hedge against bucket listing eventual consistency.
@@ -342,16 +346,16 @@ class TestRsync(testcase.GsUtilIntegrationTestCase):
listing1 = _TailSet(tmpdir, self._FlatListDir(tmpdir))
listing2 = _TailSet(suri(bucket_uri), self._FlatListBucket(bucket_uri))
# Dir should have un-altered content.
- self.assertEquals(listing1, set(['/obj1', '/obj2', '/subdir/obj3']))
+ self.assertEquals(listing1, set(['/obj1', '/.obj2', '/subdir/obj3']))
# Bucket should have content like dir but without the subdir objects
# synchronized.
- self.assertEquals(listing2, set(['/obj1', '/obj2', '/subdir/obj5']))
+ self.assertEquals(listing2, set(['/obj1', '/.obj2', '/subdir/obj5']))
# Assert that the src/dest objects that had same length but different
# content were synchronized (dir to bucket sync with -c uses checksums).
- with open(os.path.join(tmpdir, 'obj2')) as f:
- self.assertEquals('obj2', '\n'.join(f.readlines()))
- self.assertEquals('obj2', self.RunGsUtil(
- ['cat', suri(bucket_uri, 'obj2')], return_stdout=True))
+ with open(os.path.join(tmpdir, '.obj2')) as f:
+ self.assertEquals('.obj2', '\n'.join(f.readlines()))
+ self.assertEquals('.obj2', self.RunGsUtil(
+ ['cat', suri(bucket_uri, '.obj2')], return_stdout=True))
_Check3()
# Use @Retry as hedge against bucket listing eventual consistency.
@@ -367,7 +371,7 @@ class TestRsync(testcase.GsUtilIntegrationTestCase):
self.CreateObject(bucket_uri=bucket_uri, object_name='obj7',
contents='obj7')
os.unlink(os.path.join(tmpdir, 'obj1'))
- self.RunGsUtil(['rm', suri(bucket_uri, 'obj2')])
+ self.RunGsUtil(['rm', suri(bucket_uri, '.obj2')])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
@@ -376,10 +380,10 @@ class TestRsync(testcase.GsUtilIntegrationTestCase):
listing1 = _TailSet(tmpdir, self._FlatListDir(tmpdir))
listing2 = _TailSet(suri(bucket_uri), self._FlatListBucket(bucket_uri))
# Dir should have un-altered content.
- self.assertEquals(listing1, set(['/obj2', '/obj6', '/subdir/obj3']))
+ self.assertEquals(listing1, set(['/.obj2', '/obj6', '/subdir/obj3']))
# Bucket should have content like dir but without the subdir objects
# synchronized.
- self.assertEquals(listing2, set(['/obj2', '/obj6', '/subdir/obj3']))
+ self.assertEquals(listing2, set(['/.obj2', '/obj6', '/subdir/obj3']))
_Check5()
# Use @Retry as hedge against bucket listing eventual consistency.
@@ -395,7 +399,8 @@ class TestRsync(testcase.GsUtilIntegrationTestCase):
def test_dir_to_dir_minus_d(self):
"""Tests that flat and recursive rsync dir to dir works correctly."""
# Create 2 dirs with 1 overlapping file, 1 extra file at root
- # level in each, and 1 extra file 1 level down in each. Make the
+ # level in each, and 1 extra file 1 level down in each, where one of the
+ # objects starts with "." to test that we don't skip those objects. Make the
# overlapping files named the same but with different content, to test
# that we detect and properly copy in that case.
tmpdir1 = self.CreateTempDir()
@@ -405,10 +410,10 @@ class TestRsync(testcase.GsUtilIntegrationTestCase):
os.mkdir(subdir1)
os.mkdir(subdir2)
self.CreateTempFile(tmpdir=tmpdir1, file_name='obj1', contents='obj1')
- self.CreateTempFile(tmpdir=tmpdir1, file_name='obj2', contents='obj2')
+ self.CreateTempFile(tmpdir=tmpdir1, file_name='.obj2', contents='.obj2')
self.CreateTempFile(
tmpdir=subdir1, file_name='obj3', contents='subdir1/obj3')
- self.CreateTempFile(tmpdir=tmpdir2, file_name='obj2', contents='OBJ2')
+ self.CreateTempFile(tmpdir=tmpdir2, file_name='.obj2', contents='.OBJ2')
self.CreateTempFile(tmpdir=tmpdir2, file_name='obj4', contents='obj4')
self.CreateTempFile(
tmpdir=subdir2, file_name='obj5', contents='subdir2/obj5')
@@ -417,17 +422,17 @@ class TestRsync(testcase.GsUtilIntegrationTestCase):
listing1 = _TailSet(tmpdir1, self._FlatListDir(tmpdir1))
listing2 = _TailSet(tmpdir2, self._FlatListDir(tmpdir2))
# dir1 should have un-altered content.
- self.assertEquals(listing1, set(['/obj1', '/obj2', '/subdir1/obj3']))
+ self.assertEquals(listing1, set(['/obj1', '/.obj2', '/subdir1/obj3']))
# dir2 should have content like dir1 but without the subdir1 objects
# synchronized.
- self.assertEquals(listing2, set(['/obj1', '/obj2', '/subdir2/obj5']))
+ self.assertEquals(listing2, set(['/obj1', '/.obj2', '/subdir2/obj5']))
# Assert that the src/dest objects that had same length but different
# checksums were not synchronized (dir to dir sync doesn't use checksums
# unless you specify -c).
- with open(os.path.join(tmpdir1, 'obj2')) as f:
- self.assertEquals('obj2', '\n'.join(f.readlines()))
- with open(os.path.join(tmpdir2, 'obj2')) as f:
- self.assertEquals('OBJ2', '\n'.join(f.readlines()))
+ with open(os.path.join(tmpdir1, '.obj2')) as f:
+ self.assertEquals('.obj2', '\n'.join(f.readlines()))
+ with open(os.path.join(tmpdir2, '.obj2')) as f:
+ self.assertEquals('.OBJ2', '\n'.join(f.readlines()))
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
@@ -442,16 +447,16 @@ class TestRsync(testcase.GsUtilIntegrationTestCase):
listing1 = _TailSet(tmpdir1, self._FlatListDir(tmpdir1))
listing2 = _TailSet(tmpdir2, self._FlatListDir(tmpdir2))
# dir1 should have un-altered content.
- self.assertEquals(listing1, set(['/obj1', '/obj2', '/subdir1/obj3']))
+ self.assertEquals(listing1, set(['/obj1', '/.obj2', '/subdir1/obj3']))
# dir2 should have content like dir but without the subdir objects
# synchronized.
- self.assertEquals(listing2, set(['/obj1', '/obj2', '/subdir2/obj5']))
+ self.assertEquals(listing2, set(['/obj1', '/.obj2', '/subdir2/obj5']))
# Assert that the src/dest objects that had same length but different
# content were synchronized (dir to dir sync with -c uses checksums).
- with open(os.path.join(tmpdir1, 'obj2')) as f:
- self.assertEquals('obj2', '\n'.join(f.readlines()))
- with open(os.path.join(tmpdir1, 'obj2')) as f:
- self.assertEquals('obj2', '\n'.join(f.readlines()))
+ with open(os.path.join(tmpdir1, '.obj2')) as f:
+ self.assertEquals('.obj2', '\n'.join(f.readlines()))
+ with open(os.path.join(tmpdir1, '.obj2')) as f:
+ self.assertEquals('.obj2', '\n'.join(f.readlines()))
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
@@ -465,16 +470,16 @@ class TestRsync(testcase.GsUtilIntegrationTestCase):
self.CreateTempFile(tmpdir=tmpdir1, file_name='obj6', contents='obj6')
self.CreateTempFile(tmpdir=tmpdir2, file_name='obj7', contents='obj7')
os.unlink(os.path.join(tmpdir1, 'obj1'))
- os.unlink(os.path.join(tmpdir2, 'obj2'))
+ os.unlink(os.path.join(tmpdir2, '.obj2'))
self.RunGsUtil(['rsync', '-d', '-r', tmpdir1, tmpdir2])
listing1 = _TailSet(tmpdir1, self._FlatListDir(tmpdir1))
listing2 = _TailSet(tmpdir2, self._FlatListDir(tmpdir2))
# dir1 should have un-altered content.
- self.assertEquals(listing1, set(['/obj2', '/obj6', '/subdir1/obj3']))
+ self.assertEquals(listing1, set(['/.obj2', '/obj6', '/subdir1/obj3']))
# dir2 should have content like dir but without the subdir objects
# synchronized.
- self.assertEquals(listing2, set(['/obj2', '/obj6', '/subdir1/obj3']))
+ self.assertEquals(listing2, set(['/.obj2', '/obj6', '/subdir1/obj3']))
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
@@ -518,7 +523,8 @@ class TestRsync(testcase.GsUtilIntegrationTestCase):
def test_bucket_to_dir_minus_d(self):
"""Tests that flat and recursive rsync bucket to dir works correctly."""
# Create bucket and dir with 1 overlapping object, 1 extra object at root
- # level in each, and 1 extra object 1 level down in each. Make the
+ # level in each, and 1 extra object 1 level down in each, where one of the
+ # objects starts with "." to test that we don't skip those objects. Make the
# overlapping objects named the same but with different content, to test
# that we detect and properly copy in that case.
bucket_uri = self.CreateBucket()
@@ -527,11 +533,11 @@ class TestRsync(testcase.GsUtilIntegrationTestCase):
os.mkdir(subdir)
self.CreateObject(bucket_uri=bucket_uri, object_name='obj1',
contents='obj1')
- self.CreateObject(bucket_uri=bucket_uri, object_name='obj2',
- contents='obj2')
+ self.CreateObject(bucket_uri=bucket_uri, object_name='.obj2',
+ contents='.obj2')
self.CreateObject(bucket_uri=bucket_uri, object_name='subdir/obj3',
contents='subdir/obj3')
- self.CreateTempFile(tmpdir=tmpdir, file_name='obj2', contents='OBJ2')
+ self.CreateTempFile(tmpdir=tmpdir, file_name='.obj2', contents='.OBJ2')
self.CreateTempFile(tmpdir=tmpdir, file_name='obj4', contents='obj4')
self.CreateTempFile(tmpdir=subdir, file_name='obj5', contents='subdir/obj5')
@@ -543,17 +549,17 @@ class TestRsync(testcase.GsUtilIntegrationTestCase):
listing1 = _TailSet(suri(bucket_uri), self._FlatListBucket(bucket_uri))
listing2 = _TailSet(tmpdir, self._FlatListDir(tmpdir))
# Bucket should have un-altered content.
- self.assertEquals(listing1, set(['/obj1', '/obj2', '/subdir/obj3']))
+ self.assertEquals(listing1, set(['/obj1', '/.obj2', '/subdir/obj3']))
# Dir should have content like bucket but without the subdir objects
# synchronized.
- self.assertEquals(listing2, set(['/obj1', '/obj2', '/subdir/obj5']))
+ self.assertEquals(listing2, set(['/obj1', '/.obj2', '/subdir/obj5']))
# Assert that the src/dest objects that had same length but different
# content were not synchronized (bucket to dir sync doesn't use checksums
# unless you specify -c).
- self.assertEquals('obj2', self.RunGsUtil(
- ['cat', suri(bucket_uri, 'obj2')], return_stdout=True))
- with open(os.path.join(tmpdir, 'obj2')) as f:
- self.assertEquals('OBJ2', '\n'.join(f.readlines()))
+ self.assertEquals('.obj2', self.RunGsUtil(
+ ['cat', suri(bucket_uri, '.obj2')], return_stdout=True))
+ with open(os.path.join(tmpdir, '.obj2')) as f:
+ self.assertEquals('.OBJ2', '\n'.join(f.readlines()))
_Check1()
# Use @Retry as hedge against bucket listing eventual consistency.
@@ -573,16 +579,16 @@ class TestRsync(testcase.GsUtilIntegrationTestCase):
listing1 = _TailSet(suri(bucket_uri), self._FlatListBucket(bucket_uri))
listing2 = _TailSet(tmpdir, self._FlatListDir(tmpdir))
# Bucket should have un-altered content.
- self.assertEquals(listing1, set(['/obj1', '/obj2', '/subdir/obj3']))
+ self.assertEquals(listing1, set(['/obj1', '/.obj2', '/subdir/obj3']))
# Dir should have content like bucket but without the subdir objects
# synchronized.
- self.assertEquals(listing2, set(['/obj1', '/obj2', '/subdir/obj5']))
+ self.assertEquals(listing2, set(['/obj1', '/.obj2', '/subdir/obj5']))
# Assert that the src/dest objects that had same length but different
# content were synchronized (bucket to dir sync with -c uses checksums).
- self.assertEquals('obj2', self.RunGsUtil(
- ['cat', suri(bucket_uri, 'obj2')], return_stdout=True))
- with open(os.path.join(tmpdir, 'obj2')) as f:
- self.assertEquals('obj2', '\n'.join(f.readlines()))
+ self.assertEquals('.obj2', self.RunGsUtil(
+ ['cat', suri(bucket_uri, '.obj2')], return_stdout=True))
+ with open(os.path.join(tmpdir, '.obj2')) as f:
+ self.assertEquals('.obj2', '\n'.join(f.readlines()))
_Check3()
# Use @Retry as hedge against bucket listing eventual consistency.
@@ -598,7 +604,7 @@ class TestRsync(testcase.GsUtilIntegrationTestCase):
contents='obj6')
self.CreateTempFile(tmpdir=tmpdir, file_name='obj7', contents='obj7')
self.RunGsUtil(['rm', suri(bucket_uri, 'obj1')])
- os.unlink(os.path.join(tmpdir, 'obj2'))
+ os.unlink(os.path.join(tmpdir, '.obj2'))
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
@@ -607,10 +613,10 @@ class TestRsync(testcase.GsUtilIntegrationTestCase):
listing1 = _TailSet(suri(bucket_uri), self._FlatListBucket(bucket_uri))
listing2 = _TailSet(tmpdir, self._FlatListDir(tmpdir))
# Bucket should have un-altered content.
- self.assertEquals(listing1, set(['/obj2', '/obj6', '/subdir/obj3']))
+ self.assertEquals(listing1, set(['/.obj2', '/obj6', '/subdir/obj3']))
# Dir should have content like bucket but without the subdir objects
# synchronized.
- self.assertEquals(listing2, set(['/obj2', '/obj6', '/subdir/obj3']))
+ self.assertEquals(listing2, set(['/.obj2', '/obj6', '/subdir/obj3']))
_Check5()
# Use @Retry as hedge against bucket listing eventual consistency.
@@ -683,8 +689,6 @@ class TestRsync(testcase.GsUtilIntegrationTestCase):
self.assertEquals(listing1, set(['/obj1', '//']))
# Bucket should not have the placeholder object.
self.assertEquals(listing2, set(['/obj1']))
- # Stdout should report what happened.
- self.assertRegexpMatches(output, r'.*Skipping cloud sub-directory.*')
_Check1()
@unittest.skipIf(IS_WINDOWS, 'os.symlink() is not available on Windows.')
@@ -696,7 +700,7 @@ class TestRsync(testcase.GsUtilIntegrationTestCase):
bucket_uri = self.CreateBucket()
fpath1 = self.CreateTempFile(
tmpdir=tmpdir, file_name='obj1', contents='obj1')
- self.CreateTempFile(tmpdir=tmpdir, file_name='obj2', contents='obj2')
+ self.CreateTempFile(tmpdir=tmpdir, file_name='.obj2', contents='.obj2')
self.CreateTempFile(tmpdir=subdir, file_name='obj3', contents='subdir/obj3')
good_symlink_path = os.path.join(tmpdir, 'symlink1')
os.symlink(fpath1, good_symlink_path)
@@ -704,8 +708,8 @@ class TestRsync(testcase.GsUtilIntegrationTestCase):
# handles that case.
bad_symlink_path = os.path.join(tmpdir, 'symlink2')
os.symlink(os.path.join('/', 'non-existent'), bad_symlink_path)
- self.CreateObject(bucket_uri=bucket_uri, object_name='obj2',
- contents='OBJ2')
+ self.CreateObject(bucket_uri=bucket_uri, object_name='.obj2',
+ contents='.OBJ2')
self.CreateObject(bucket_uri=bucket_uri, object_name='obj4',
contents='obj4')
self.CreateObject(bucket_uri=bucket_uri, object_name='subdir/obj5',
@@ -721,10 +725,10 @@ class TestRsync(testcase.GsUtilIntegrationTestCase):
# Dir should have un-altered content.
self.assertEquals(
listing1,
- set(['/obj1', '/obj2', '/subdir/obj3', '/symlink1', '/symlink2']))
+ set(['/obj1', '/.obj2', '/subdir/obj3', '/symlink1', '/symlink2']))
# Bucket should have content like dir but without the symlink, and
# without subdir objects synchronized.
- self.assertEquals(listing2, set(['/obj1', '/obj2', '/subdir/obj5']))
+ self.assertEquals(listing2, set(['/obj1', '/.obj2', '/subdir/obj5']))
_Check1()
# Now remove invalid symlink and run without -e, and see that symlink gets
@@ -739,11 +743,11 @@ class TestRsync(testcase.GsUtilIntegrationTestCase):
listing2 = _TailSet(suri(bucket_uri), self._FlatListBucket(bucket_uri))
# Dir should have un-altered content.
self.assertEquals(
- listing1, set(['/obj1', '/obj2', '/subdir/obj3', '/symlink1']))
+ listing1, set(['/obj1', '/.obj2', '/subdir/obj3', '/symlink1']))
# Bucket should have content like dir but without the symlink, and
# without subdir objects synchronized.
self.assertEquals(
- listing2, set(['/obj1', '/obj2', '/subdir/obj5', '/symlink1']))
+ listing2, set(['/obj1', '/.obj2', '/subdir/obj5', '/symlink1']))
self.assertEquals('obj1', self.RunGsUtil(
['cat', suri(bucket_uri, 'symlink1')], return_stdout=True))
_Check2()
@@ -763,13 +767,13 @@ class TestRsync(testcase.GsUtilIntegrationTestCase):
bucket2_uri = self.CreateBucket()
self.CreateObject(bucket_uri=bucket1_uri, object_name='obj1',
contents='obj1')
- self.CreateObject(bucket_uri=bucket1_uri, object_name='obj2',
- contents='obj2')
+ self.CreateObject(bucket_uri=bucket1_uri, object_name='.obj2',
+ contents='.obj2')
self.RunGsUtil(
- ['compose', suri(bucket1_uri, 'obj1'), suri(bucket1_uri, 'obj2'),
+ ['compose', suri(bucket1_uri, 'obj1'), suri(bucket1_uri, '.obj2'),
suri(bucket1_uri, 'obj3')])
- self.CreateObject(bucket_uri=bucket2_uri, object_name='obj2',
- contents='OBJ2')
+ self.CreateObject(bucket_uri=bucket2_uri, object_name='.obj2',
+ contents='.OBJ2')
self.CreateObject(bucket_uri=bucket2_uri, object_name='obj4',
contents='obj4')
@@ -780,10 +784,10 @@ class TestRsync(testcase.GsUtilIntegrationTestCase):
listing1 = _TailSet(suri(bucket1_uri), self._FlatListBucket(bucket1_uri))
listing2 = _TailSet(suri(bucket2_uri), self._FlatListBucket(bucket2_uri))
# First bucket should have un-altered content.
- self.assertEquals(listing1, set(['/obj1', '/obj2', '/obj3']))
+ self.assertEquals(listing1, set(['/obj1', '/.obj2', '/obj3']))
# Second bucket should have content like first bucket but without the
# subdir objects synchronized.
- self.assertEquals(listing2, set(['/obj1', '/obj2', '/obj3']))
+ self.assertEquals(listing2, set(['/obj1', '/.obj2', '/obj3']))
_Check1()
# Use @Retry as hedge against bucket listing eventual consistency.
@@ -801,8 +805,8 @@ class TestRsync(testcase.GsUtilIntegrationTestCase):
bucket2_uri = self.CreateBucket()
self.CreateObject(bucket_uri=bucket1_uri, object_name='obj1',
contents='obj1')
- self.CreateObject(bucket_uri=bucket1_uri, object_name='obj2',
- contents='obj2')
+ self.CreateObject(bucket_uri=bucket1_uri, object_name='.obj2',
+ contents='.obj2')
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
@@ -810,8 +814,8 @@ class TestRsync(testcase.GsUtilIntegrationTestCase):
self.RunGsUtil(['rsync', '-d', suri(bucket1_uri), suri(bucket2_uri)])
listing1 = _TailSet(suri(bucket1_uri), self._FlatListBucket(bucket1_uri))
listing2 = _TailSet(suri(bucket2_uri), self._FlatListBucket(bucket2_uri))
- self.assertEquals(listing1, set(['/obj1', '/obj2']))
- self.assertEquals(listing2, set(['/obj1', '/obj2']))
+ self.assertEquals(listing1, set(['/obj1', '/.obj2']))
+ self.assertEquals(listing2, set(['/obj1', '/.obj2']))
_Check1()
# Use @Retry as hedge against bucket listing eventual consistency.
@@ -829,8 +833,8 @@ class TestRsync(testcase.GsUtilIntegrationTestCase):
bucket2_uri = self.CreateBucket()
self.CreateObject(bucket_uri=bucket2_uri, object_name='obj1',
contents='obj1')
- self.CreateObject(bucket_uri=bucket2_uri, object_name='obj2',
- contents='obj2')
+ self.CreateObject(bucket_uri=bucket2_uri, object_name='.obj2',
+ contents='.obj2')
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
@@ -896,7 +900,7 @@ class TestRsync(testcase.GsUtilIntegrationTestCase):
os.mkdir(subdir)
bucket_url = self.CreateBucket()
self.CreateTempFile(tmpdir=tmpdir, file_name='obj1', contents='obj1')
- self.CreateTempFile(tmpdir=tmpdir, file_name='obj2', contents='obj2')
+ self.CreateTempFile(tmpdir=tmpdir, file_name='.obj2', contents='.obj2')
self.CreateTempFile(tmpdir=subdir, file_name='obj3', contents='subdir/obj3')
# Use @Retry as hedge against bucket listing eventual consistency.
@@ -909,9 +913,9 @@ class TestRsync(testcase.GsUtilIntegrationTestCase):
suri(bucket_url, 'subdir'),
self._FlatListBucket(bucket_url.clone_replace_name('subdir')))
# Dir should have un-altered content.
- self.assertEquals(listing1, set(['/obj1', '/obj2', '/subdir/obj3']))
+ self.assertEquals(listing1, set(['/obj1', '/.obj2', '/subdir/obj3']))
# Bucket subdir should have content like dir.
- self.assertEquals(listing2, set(['/obj1', '/obj2', '/subdir/obj3']))
+ self.assertEquals(listing2, set(['/obj1', '/.obj2', '/subdir/obj3']))
_Check1()
# Use @Retry as hedge against bucket listing eventual consistency.
@@ -927,7 +931,7 @@ class TestRsync(testcase.GsUtilIntegrationTestCase):
"""Tests that rsync from a non-existent bucket subdir fails gracefully."""
tmpdir = self.CreateTempDir()
self.CreateTempFile(tmpdir=tmpdir, file_name='obj1', contents='obj1')
- self.CreateTempFile(tmpdir=tmpdir, file_name='obj2', contents='obj2')
+ self.CreateTempFile(tmpdir=tmpdir, file_name='.obj2', contents='.obj2')
bucket_url_str = '%s://%s' % (
self.default_provider, self.nonexistent_bucket_name)
stderr = self.RunGsUtil(['rsync', '-d', bucket_url_str, tmpdir],
@@ -935,13 +939,13 @@ class TestRsync(testcase.GsUtilIntegrationTestCase):
self.assertIn('Caught non-retryable exception', stderr)
listing = _TailSet(tmpdir, self._FlatListDir(tmpdir))
# Dir should have un-altered content.
- self.assertEquals(listing, set(['/obj1', '/obj2']))
+ self.assertEquals(listing, set(['/obj1', '/.obj2']))
def test_rsync_to_nonexistent_bucket(self):
"""Tests that rsync from a non-existent bucket subdir fails gracefully."""
tmpdir = self.CreateTempDir()
self.CreateTempFile(tmpdir=tmpdir, file_name='obj1', contents='obj1')
- self.CreateTempFile(tmpdir=tmpdir, file_name='obj2', contents='obj2')
+ self.CreateTempFile(tmpdir=tmpdir, file_name='.obj2', contents='.obj2')
bucket_url_str = '%s://%s' % (
self.default_provider, self.nonexistent_bucket_name)
stderr = self.RunGsUtil(['rsync', '-d', bucket_url_str, tmpdir],
@@ -949,7 +953,7 @@ class TestRsync(testcase.GsUtilIntegrationTestCase):
self.assertIn('Caught non-retryable exception', stderr)
listing = _TailSet(tmpdir, self._FlatListDir(tmpdir))
# Dir should have un-altered content.
- self.assertEquals(listing, set(['/obj1', '/obj2']))
+ self.assertEquals(listing, set(['/obj1', '/.obj2']))
def test_bucket_to_bucket_minus_d_with_overwrite_and_punc_chars(self):
"""Tests that punc chars in filenames don't confuse sort order."""
@@ -960,12 +964,12 @@ class TestRsync(testcase.GsUtilIntegrationTestCase):
# compared without encoding.
self.CreateObject(bucket_uri=bucket1_uri, object_name='e/obj1',
contents='obj1')
- self.CreateObject(bucket_uri=bucket1_uri, object_name='e-1/obj2',
- contents='obj2')
+ self.CreateObject(bucket_uri=bucket1_uri, object_name='e-1/.obj2',
+ contents='.obj2')
self.CreateObject(bucket_uri=bucket2_uri, object_name='e/obj1',
contents='OBJ1')
- self.CreateObject(bucket_uri=bucket2_uri, object_name='e-1/obj2',
- contents='obj2')
+ self.CreateObject(bucket_uri=bucket2_uri, object_name='e-1/.obj2',
+ contents='.obj2')
# Need to make sure the bucket listings are caught-up, otherwise the
# rsync may not see all objects and fail to synchronize correctly.
self.AssertNObjectsInBucket(bucket1_uri, 2)
@@ -979,13 +983,13 @@ class TestRsync(testcase.GsUtilIntegrationTestCase):
listing1 = _TailSet(suri(bucket1_uri), self._FlatListBucket(bucket1_uri))
listing2 = _TailSet(suri(bucket2_uri), self._FlatListBucket(bucket2_uri))
# First bucket should have un-altered content.
- self.assertEquals(listing1, set(['/e/obj1', '/e-1/obj2']))
- self.assertEquals(listing2, set(['/e/obj1', '/e-1/obj2']))
+ self.assertEquals(listing1, set(['/e/obj1', '/e-1/.obj2']))
+ self.assertEquals(listing2, set(['/e/obj1', '/e-1/.obj2']))
# Assert correct contents.
self.assertEquals('obj1', self.RunGsUtil(
['cat', suri(bucket2_uri, 'e/obj1')], return_stdout=True))
- self.assertEquals('obj2', self.RunGsUtil(
- ['cat', suri(bucket2_uri, 'e-1/obj2')], return_stdout=True))
+ self.assertEquals('.obj2', self.RunGsUtil(
+ ['cat', suri(bucket2_uri, 'e-1/.obj2')], return_stdout=True))
_Check1()
# Use @Retry as hedge against bucket listing eventual consistency.
@@ -1003,17 +1007,17 @@ class TestRsync(testcase.GsUtilIntegrationTestCase):
tmpdir = self.CreateTempDir()
bucket_uri = self.CreateBucket()
self.CreateTempFile(tmpdir=tmpdir, file_name='obj1', contents='obj1')
- self.CreateTempFile(tmpdir=tmpdir, file_name='obj2', contents='obj2')
+ self.CreateTempFile(tmpdir=tmpdir, file_name='.obj2', contents='.obj2')
self.CreateTempFile(tmpdir=tmpdir, file_name='obj3', contents='obj3')
- self.CreateObject(bucket_uri=bucket_uri, object_name='obj2',
- contents='obj2')
+ self.CreateObject(bucket_uri=bucket_uri, object_name='.obj2',
+ contents='.obj2')
self.CreateObject(bucket_uri=bucket_uri, object_name='obj4',
contents='obj4')
self.CreateObject(bucket_uri=bucket_uri, object_name='obj5',
contents='obj5')
# Need to make sure the bucket listing is caught-up, otherwise the
- # first rsync may not see obj2 and overwrite it.
+ # first rsync may not see .obj2 and overwrite it.
self.AssertNObjectsInBucket(bucket_uri, 3)
# Use @Retry as hedge against bucket listing eventual consistency.
@@ -1024,10 +1028,10 @@ class TestRsync(testcase.GsUtilIntegrationTestCase):
listing1 = _TailSet(tmpdir, self._FlatListDir(tmpdir))
listing2 = _TailSet(suri(bucket_uri), self._FlatListBucket(bucket_uri))
# Dir should have un-altered content.
- self.assertEquals(listing1, set(['/obj1', '/obj2', '/obj3']))
+ self.assertEquals(listing1, set(['/obj1', '/.obj2', '/obj3']))
# Bucket should have content like dir but ignoring obj3 from dir and not
# deleting obj4 from bucket (per exclude regex).
- self.assertEquals(listing2, set(['/obj1', '/obj2', '/obj4']))
+ self.assertEquals(listing2, set(['/obj1', '/.obj2', '/obj4']))
_Check1()
# Use @Retry as hedge against bucket listing eventual consistency.
@@ -1038,3 +1042,30 @@ class TestRsync(testcase.GsUtilIntegrationTestCase):
['rsync', '-d', '-x', 'obj[34]', tmpdir, suri(bucket_uri)],
return_stderr=True))
_Check2()
+
+ @unittest.skipIf(IS_WINDOWS,
+ "os.chmod() won't make file unreadable on Windows.")
+ def test_dir_to_bucket_minus_C(self):
+ """Tests that rsync -C option works correctly."""
+ # Create dir with 3 objects, the middle of which is unreadable.
+ tmpdir = self.CreateTempDir()
+ bucket_uri = self.CreateBucket()
+ self.CreateTempFile(tmpdir=tmpdir, file_name='obj1', contents='obj1')
+ path = self.CreateTempFile(tmpdir=tmpdir, file_name='obj2', contents='obj2')
+ os.chmod(path, 0)
+ self.CreateTempFile(tmpdir=tmpdir, file_name='obj3', contents='obj3')
+
+ # Use @Retry as hedge against bucket listing eventual consistency.
+ @Retry(AssertionError, tries=3, timeout_secs=1)
+ def _Check():
+ """Tests rsync works as expected."""
+ stderr = self.RunGsUtil(['rsync', '-C', tmpdir, suri(bucket_uri)],
+ expected_status=1, return_stderr=True)
+ self.assertIn('1 files/objects could not be copied/removed.', stderr)
+ listing1 = _TailSet(tmpdir, self._FlatListDir(tmpdir))
+ listing2 = _TailSet(suri(bucket_uri), self._FlatListBucket(bucket_uri))
+ # Dir should have un-altered content.
+ self.assertEquals(listing1, set(['/obj1', '/obj2', '/obj3']))
+ # Bucket should have obj1 and obj3 even though obj2 was unreadable.
+ self.assertEquals(listing2, set(['/obj1', '/obj3']))
+ _Check()

Powered by Google App Engine
This is Rietveld 408576698