diff --git a/README.md b/README.md
index 30e182ad6b36b70b9f84380d1c30c73a4bc9d18c..c06a5ec5280073ee81675c9ebe7d3f8872f7d7e3 100644
--- a/README.md
+++ b/README.md
@@ -45,6 +45,11 @@ TODO:
 * ??? check completeness of the current state of the bucket
 * unlike in other backup solutions, attacker with credentials can restore
   old data from the repository/bucket, this should be discussed (howto threat modeling ?)
+* rgw leaks objects on tests
+
+* rwm drop _cmd from methods which are not other commands wrapper
+* fix microceph start on node reboot
+* drop rclone use-cases
 
 
 ## Usage
diff --git a/rwm.py b/rwm.py
index 7409edbaedeed6320f3adf198d20c1fdadacbd39..3032375ecdbdeb15be3de76411b677c72c29795b 100755
--- a/rwm.py
+++ b/rwm.py
@@ -260,17 +260,24 @@ class StorageManager:
         """deletes all old versions and delete markers from storage to reclaim space"""
 
         # ? lock repo
+        paginator = self.s3.meta.client.get_paginator('list_object_versions')
 
         # drop all active object versions
-        object_versions = self.s3.meta.client.list_object_versions(Bucket=bucket_name)
-        for item in object_versions["Versions"]:
-            if not item["IsLatest"]:
-                self.s3.ObjectVersion(bucket_name, item["Key"], item["VersionId"]).delete()
+        objects = []
+        for page in paginator.paginate(Bucket=bucket_name):
+            for item in page.get("Versions", []):
+                if not item["IsLatest"]:
+                    objects.append([bucket_name, item["Key"], item["VersionId"]])
+        for item in objects:
+            self.s3.ObjectVersion(*item).delete()
 
         # drop all delete markers
-        object_versions = self.s3.meta.client.list_object_versions(Bucket=bucket_name)
-        for item in object_versions["DeleteMarkers"]:
-            self.s3.ObjectVersion(bucket_name, item["Key"], item["VersionId"]).delete()
+        objects = []
+        for page in paginator.paginate(Bucket=bucket_name):
+            for item in page.get("DeleteMarkers", []):
+                objects.append([bucket_name, item["Key"], item["VersionId"]])
+        for item in objects:
+            self.s3.ObjectVersion(*item).delete()
 
         return 0
 
diff --git a/tests/conftest.py b/tests/conftest.py
index 17ba890fe546b54ab4731be6e6844f62f9a68844..539818f8c0679cebf8a8b461c80d85bc4c9b021e 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -59,7 +59,7 @@ def radosuser(microceph_url, username, tenant="tenant1"):
     """rgwuser fixture"""
 
     subprocess.run(
-        ["/snap/bin/radosgw-admin", "user", "rm", f"--uid={tenant}${username}", "--purge-data"],
+        ["/snap/bin/radosgw-admin", "user", "rm", f"--uid={tenant}${username}", "--purge-data", "--purge-keys"],
         stdout=subprocess.DEVNULL,
         stderr=subprocess.DEVNULL,
         check=False
@@ -74,7 +74,7 @@ def radosuser(microceph_url, username, tenant="tenant1"):
     user = json.loads(proc.stdout)
     yield StorageManager(microceph_url, user["keys"][0]["access_key"], user["keys"][0]["secret_key"])
 
-    subprocess.run(["/snap/bin/radosgw-admin", "user", "rm", f"--uid={tenant}${username}", "--purge-data"], check=True)
+    subprocess.run(["/snap/bin/radosgw-admin", "user", "rm", f"--uid={tenant}${username}", "--purge-data", "--purge-keys"], check=True)
 
 
 @pytest.fixture
diff --git a/tests/test_storage.py b/tests/test_storage.py
index 061da3e527fafc649847af3a49dd88335fd9a33c..0a371a645ae934ff1f6d2cf80123f884a38882d6 100644
--- a/tests/test_storage.py
+++ b/tests/test_storage.py
@@ -185,3 +185,21 @@ def test_storage_drop_versions(tmpworkdir: str, microceph: str, radosuser_admin:
 
     object_versions = list(bucket.object_versions.all())
     assert len(object_versions) == 1
+
+
+def test_storage_drop_versions_many(tmpworkdir: str, microceph: str, radosuser_admin: rwm.StorageManager):  # pylint: disable=unused-argument
+    """test manager storage_drop_versions"""
+
+    bucket_name = "testbuckx"
+    target_username = "test1"
+    bucket = radosuser_admin.storage_create(bucket_name, target_username)
+
+    bucket.upload_fileobj(BytesIO(b"dummydata0"), "dummykey")
+    for idx in range(801):
+        bucket.Object("dummykey").delete()
+        bucket.upload_fileobj(BytesIO(f"dummydata{idx}".encode()), "dummykey")
+
+    assert radosuser_admin.storage_drop_versions(bucket.name) == 0
+
+    object_versions = list(bucket.object_versions.all())
+    assert len(object_versions) == 1