Created
August 5, 2016 05:38
-
-
Save bloodeagle40234/3224f3ccb7d78862cb35dec856c263bb to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
diff --git a/test/unit/obj/test_diskfile.py b/test/unit/obj/test_diskfile.py | |
index cc7d97d..7a2adb1 100644 | |
--- a/test/unit/obj/test_diskfile.py | |
+++ b/test/unit/obj/test_diskfile.py | |
@@ -5052,6 +5052,122 @@ class TestSuffixHashes(unittest.TestCase): | |
hashes = df_mgr.get_hashes('sda1', '0', [], policy) | |
self.assertEqual(hashes, {}) | |
+ def test_hash_suffix_one_reclaim_tombstone_with_hash_pkl(self): | |
+ for policy in self.iter_policies(): | |
+ df_mgr = self.df_router[policy] | |
+ df = df_mgr.get_diskfile( | |
+ 'sda1', '0', 'a', 'c', 'o', policy=policy) | |
+ suffix_dir = os.path.dirname(df._datadir) | |
+ part_dir = os.path.dirname(suffix_dir) | |
+ hash_file = os.path.join(part_dir, diskfile.HASH_FILE) | |
+ # scale back this tests manager's reclaim age a bit | |
+ df_mgr.reclaim_age = 1000 | |
+ # write a tombstone that's just a *little* older | |
+ old_time = time() - 1001 | |
+ timestamp = Timestamp(old_time) | |
+ df.delete(timestamp.internal) | |
+ hashes = df_mgr.get_hashes('sda1', '0', [], policy) | |
+ # sanity | |
+ self.assertEqual(hashes, {}) | |
+ hash_timestamp = os.stat(hash_file).st_mtime | |
+ | |
+ # if hash.pkl exists, that .ts file is not reclaimed | |
+ df = df_mgr.get_diskfile( | |
+ 'sda1', '0', 'a', 'c', 'o', policy=policy) | |
+ df.delete(timestamp.internal) | |
+ | |
+ hashes = df_mgr.get_hashes('sda1', '0', [], policy) | |
+ # yes, tis was a cached value so the value looks empty | |
+ self.assertEqual(hashes, {}) | |
+ # and the hash.pkl is not touched | |
+ self.assertEqual(hash_timestamp, os.stat(hash_file).st_mtime) | |
+ # and we still have tombstone entry | |
+ tombstone = '%s.ts' % timestamp.internal | |
+ self.assertTrue(os.path.exists(df._datadir)) | |
+ self.assertIn(tombstone, os.listdir(df._datadir)) | |
+ | |
+ # However if we call invalidate_hash for the suffix dir, | |
+ # get_hashes can reclaim the tombstone | |
+ with mock.patch('swift.obj.diskfile.lock_path'): | |
+ df_mgr.invalidate_hash(suffix_dir) | |
+ | |
+ hashes = df_mgr.get_hashes('sda1', '0', [], policy) | |
+ | |
+ self.assertEqual(hashes, {}) | |
+ # *NO* if we have no other objects in the suffix, get_hashes | |
+ # doesn't reclaim anything | |
+ self.assertTrue(os.path.exists(df._datadir)) | |
+ self.assertIn(tombstone, os.listdir(df._datadir)) | |
+ self.assertEqual(hash_timestamp, os.stat(hash_file).st_mtime) | |
+ | |
+ # *BUT* if set extra suffix to recalc, it can force to recaim! | |
+ # AWESOME! | |
+ suffix = os.path.dirname(suffix_dir) | |
+ hashes = df_mgr.get_hashes('sda1', '0', [suffix], policy) | |
+ self.assertFalse(os.path.exists(df._datadir)) | |
+ # hash.pkl was updated | |
+ self.assertGreater(os.stat(hash_file).st_mtime, hash_timestamp) | |
+ | |
+ def test_hash_suffix_one_reclaim_tombstone_suffix_has_another_data(self): | |
+ for policy in self.iter_policies(): | |
+ if isinstance(policy, ECStoragePolicy): | |
+ continue | |
+ df_mgr = self.df_router[policy] | |
+ df = df_mgr.get_diskfile( | |
+ 'sda1', '0', 'a', 'c', 'o', policy=policy) | |
+ df2 = df_mgr.get_diskfile( | |
+ 'sda1', '0', 'a', 'c', 'jjVoaPZUQK', policy=policy) | |
+ # scale back this tests manager's reclaim age a bit | |
+ df_mgr.reclaim_age = 1000 | |
+ # write a tombstone that's just a *little* older | |
+ old_time = time() - 1001 | |
+ old_timestamp = Timestamp(old_time) | |
+ df.delete(old_timestamp.internal) | |
+ new_time = time() | |
+ new_timestamp = Timestamp(new_time) | |
+ with df2.create() as writer: | |
+ test_data = 'test file' | |
+ writer.write(test_data) | |
+ metadata = { | |
+ 'X-Timestamp': new_timestamp.internal, | |
+ 'ETag': md5(test_data).hexdigest(), | |
+ 'Content-Length': len(test_data), | |
+ } | |
+ writer.put(metadata) | |
+ hashes = df_mgr.get_hashes('sda1', '0', [], policy) | |
+ # sanity | |
+ self.assertIn('a83', hashes) | |
+ self.assertEqual(os.path.dirname(df._datadir), | |
+ os.path.dirname(df2._datadir)) | |
+ expected_hash = hashes['a83'] | |
+ | |
+ # if hash.pkl exists, that .ts file is not reclaimed | |
+ df = df_mgr.get_diskfile( | |
+ 'sda1', '0', 'a', 'c', 'o', policy=policy) | |
+ df.delete(old_timestamp.internal) | |
+ | |
+ hashes = df_mgr.get_hashes('sda1', '0', [], policy) | |
+ self.assertIn('a83', hashes) | |
+ self.assertEqual(expected_hash, hashes['a83']) | |
+ | |
+ # tombstone is still there | |
+ tombstone = '%s.ts' % old_timestamp.internal | |
+ self.assertTrue(os.path.exists(df._datadir)) | |
+ self.assertIn(tombstone, os.listdir(df._datadir)) | |
+ | |
+ # now doing the invlidate_hash | |
+ suffix_dir = os.path.dirname(df._datadir) | |
+ with mock.patch('swift.obj.diskfile.lock_path'): | |
+ df_mgr.invalidate_hash(suffix_dir) | |
+ | |
+ # sanity | |
+ hashes = df_mgr.get_hashes('sda1', '0', [], policy) | |
+ self.assertIn('a83', hashes) | |
+ self.assertEqual(expected_hash, hashes['a83']) | |
+ | |
+ # ok the file/dir is deleted | |
+ self.assertFalse(os.path.exists(df._datadir)) | |
+ | |
def test_hash_suffix_one_reclaim_and_one_valid_tombstone(self): | |
for policy in self.iter_policies(): | |
paths, suffix = find_paths_with_matching_suffixes(2, 1) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment