File duplicity-data-corruption.patch of Package duplicity.openSUSE_12.1_Update

From 23d3c39c52b47c2a9f5b8618b317f93bb9050791 Mon Sep 17 00:00:00 2001
From: Michael Catanzaro <mike.catanzaro@gmail.com>
Date: Sun, 19 May 2013 12:15:20 -0500
Subject: [PATCH] Backport data corruption fix to 0.6.15

https://bugs.launchpad.net/duplicity/+bug/1091269
https://bugzilla.novell.com/show_bug.cgi?id=813319
---
 duplicity       |   18 +++++++++++++-----
 src/diffdir.py  |    7 +++++++
 src/dup_temp.py |    6 ++++--
 src/gpg.py      |    9 ++++-----
 4 files changed, 28 insertions(+), 12 deletions(-)

diff --git a/duplicity b/duplicity
index cff3af1..2f8f914 100644
--- a/duplicity
+++ b/duplicity
@@ -217,7 +217,13 @@ def restart_position_iterator(tarblock_iter):
         # Just spin our wheels
         while tarblock_iter.next():
             if (tarblock_iter.previous_index == last_index):
-                if (tarblock_iter.previous_block > last_block):
+                # If both the previous index and this index are done, exit now
+                # before we hit the next index, to prevent skipping its first
+                # block.
+                if not last_block and not tarblock_iter.previous_block:
+                    break
+                # Only check block number if last_block is also a number
+                if last_block and tarblock_iter.previous_block > last_block:
                     break
             if tarblock_iter.previous_index > last_index:
                 log.Warn(_("File %s complete in backup set.\n"
@@ -877,11 +883,10 @@ def sync_archive(decrypt):
         """
         Copy data from src_iter to file at fn
         """
-        block_size = 128 * 1024
         file = open(filename, "wb")
         while True:
             try:
-                data = src_iter.next(block_size).data
+                data = src_iter.next().data
             except StopIteration:
                 break
             file.write(data)
@@ -929,9 +934,9 @@ def sync_archive(decrypt):
             def __init__(self, fileobj):
                 self.fileobj = fileobj
 
-            def next(self, size):
+            def next(self):
                 try:
-                    res = Block(self.fileobj.read(size))
+                    res = Block(self.fileobj.read(self.get_read_size()))
                 except Exception:
                     if hasattr(self.fileobj, 'name'):
                         name = self.fileobj.name
@@ -945,6 +950,9 @@ def sync_archive(decrypt):
                     raise StopIteration
                 return res
 
+            def get_read_size(self):
+                return 128 * 1024
+
             def get_footer(self):
                 return ""
 
diff --git a/src/diffdir.py b/src/diffdir.py
index 47b445b..5c40438 100644
--- a/src/diffdir.py
+++ b/src/diffdir.py
@@ -517,6 +517,13 @@ class TarBlockIter:
             self.remember_next = False
         return result
 
+    def get_read_size(self):
+        # read size must always be the same, because if we are restarting a
+        # backup volume where the previous volume ended in a data block, we
+        # have to be able to assume it's length in order to continue reading
+        # the file from the right place.
+        return 64 * 1024
+
     def get_previous_index(self):
         """
         Return index of last tarblock, or None if no previous index
diff --git a/src/dup_temp.py b/src/dup_temp.py
index 1ab2d6c..bd81083 100644
--- a/src/dup_temp.py
+++ b/src/dup_temp.py
@@ -245,9 +245,9 @@ class SrcIter:
     def __init__(self, src):
         self.src = src
         self.fp = src.open("rb")
-    def next(self, size):
+    def next(self):
         try:
-            res = Block(self.fp.read(size))
+            res = Block(self.fp.read(self.get_read_size()))
         except Exception:
             log.FatalError(_("Failed to read %s: %s") %
                            (self.src.name, sys.exc_info()),
@@ -256,5 +256,7 @@ class SrcIter:
             self.fp.close()
             raise StopIteration
         return res
+    def get_read_size(self):
+        return 128 * 1024
     def get_footer(self):
         return ""
diff --git a/src/gpg.py b/src/gpg.py
index 2a6bcfe..9fdc91a 100644
--- a/src/gpg.py
+++ b/src/gpg.py
@@ -281,17 +281,16 @@ def GPGWriteFile(block_iter, filename, profile,
     def get_current_size():
         return os.stat(filename).st_size
 
-    block_size = 128 * 1024        # don't bother requesting blocks smaller, but also don't ask for bigger
     target_size = size - 50 * 1024 # fudge factor, compensate for gpg buffering
     data_size = target_size - max_footer_size
     file = GPGFile(True, path.Path(filename), profile)
     at_end_of_blockiter = 0
     while True:
         bytes_to_go = data_size - get_current_size()
-        if bytes_to_go < block_size:
+        if bytes_to_go < block_iter.get_read_size():
             break
         try:
-            data = block_iter.next(min(block_size, bytes_to_go)).data
+            data = block_iter.next().data
         except StopIteration:
             at_end_of_blockiter = 1
             break
@@ -340,10 +339,10 @@ def GzipWriteFile(block_iter, filename,
     at_end_of_blockiter = 0
     while True:
         bytes_to_go = size - file_counted.byte_count
-        if bytes_to_go < 32 * 1024:
+        if bytes_to_go < block_iter.get_read_size():
             break
         try:
-            new_block = block_iter.next(min(128*1024, bytes_to_go))
+            new_block = block_iter.next()
         except StopIteration:
             at_end_of_blockiter = 1
             break
-- 
1.7.7

openSUSE Build Service is sponsored by