File reader.patch of Package rclone

diff --git a/backend/webdav/tus-upload.go b/backend/webdav/tus-upload.go
index 4b4cc22..20b35a6 100644
--- a/backend/webdav/tus-upload.go
+++ b/backend/webdav/tus-upload.go
@@ -1,7 +1,6 @@
 package webdav
 
 import (
-	"bytes"
 	"encoding/base64"
 	"fmt"
 	"io"
@@ -13,7 +12,7 @@ type Metadata map[string]string
 
 // Upload is a struct containing the file status during upload
 type Upload struct {
-	stream io.ReadSeeker
+	stream io.Reader
 	size   int64
 	offset int64
 
@@ -63,23 +62,12 @@ func b64encode(s string) string {
 
 // NewUpload creates a new upload from an io.Reader.
 func NewUpload(reader io.Reader, size int64, metadata Metadata, fingerprint string) *Upload {
-	stream, ok := reader.(io.ReadSeeker)
-
-	if !ok {
-		buf := new(bytes.Buffer)
-		_, err := buf.ReadFrom(reader)
-		if err != nil {
-			return nil
-		}
-		stream = bytes.NewReader(buf.Bytes())
-	}
-
 	if metadata == nil {
 		metadata = make(Metadata)
 	}
 
 	return &Upload{
-		stream: stream,
+		stream: reader,
 		size:   size,
 
 		Fingerprint: fingerprint,
diff --git a/backend/webdav/tus-uploader.go b/backend/webdav/tus-uploader.go
index ca89085..0d6e61c 100644
--- a/backend/webdav/tus-uploader.go
+++ b/backend/webdav/tus-uploader.go
@@ -126,23 +126,45 @@ func (u *Uploader) UploadChunk(ctx context.Context, cnt int, options ...fs.OpenO
 	chunkSize := u.fs.opt.ChunkSize
 	data := make([]byte, chunkSize)
 
-	_, err := u.upload.stream.Seek(u.offset, 0)
-
-	if err != nil {
-		fs.Errorf(u.fs, "Chunk %d: Error seek in stream failed: %v", cnt, err)
-		return err
-	}
-
 	size, err := u.upload.stream.Read(data)
 
-	if err != nil {
+	if err != nil && err != io.EOF {
 		fs.Errorf(u.fs, "Chunk %d: Error: Can not read from data stream: %v", cnt, err)
 		return err
 	}
 
-	body := bytes.NewBuffer(data[:size])
+	chunk := data[:size]
+
+	newOffset, err := u.uploadChunk(ctx, bytes.NewBuffer(chunk), int64(size), u.offset, options...)
+
+	// If newOffset is not equal to the old offset + size,
+	// upload the remaining chunk to remove the necessity
+	// of seeking in the next chunk upload.
+	// PD: Tus protocol don't talk too much under which conditions
+	// the offset can be different if the request was successful...
+	// but just in case we handle it here.
+	maxRetries := 5
+	retries := 0
+	lastKnownOffset := newOffset
+	for err == nil && newOffset != u.offset+int64(size) {
+		// Some extra robustness checks to prevent issues
+		if newOffset < u.offset {
+			return fmt.Errorf("uploaded chunk no %d failed new offset %d is less than current offset %d", cnt, newOffset, u.offset)
+		}
+		if retries >= maxRetries {
+			if newOffset == lastKnownOffset {
+				return fmt.Errorf("uploaded chunk no %d failed new offset %d is not equal to current offset %d + size %d after %d retries", cnt, newOffset, u.offset, size, maxRetries)
+			}
+			lastKnownOffset = newOffset // The progress is increasing, just a bit slow
+			retries = 0
+		}
+		retries++
 
-	newOffset, err := u.uploadChunk(ctx, body, int64(size), u.offset, options...)
+		uploadedSize := newOffset - u.offset
+		fs.Debugf(u.fs, "Uploading chunk no %d was partial, range %d -> %d retrying from %d to %d", cnt, u.offset, newOffset, newOffset, u.offset+int64(size))
+		remainingBytes := chunk[uploadedSize:]
+		newOffset, err = u.uploadChunk(ctx, bytes.NewBuffer(remainingBytes), int64(size)-uploadedSize, newOffset, options...)
+	}
 
 	if err == nil {
 		fs.Debugf(u.fs, "Uploaded chunk no %d ok, range %d -> %d", cnt, u.offset, newOffset)
openSUSE Build Service is sponsored by