File 8172.patch of Package rclone

From 7c3db3250284bc8aa9a321fe16b0e30579d0044e Mon Sep 17 00:00:00 2001
From: Klaas Freitag <klaas.freitag@kiteworks.com>
Date: Wed, 30 Oct 2024 17:32:31 +0100
Subject: [PATCH 01/13] Add vendor Infinite Scale with TUS upload support

---
 backend/webdav/tus-errors.go   |  28 +++++
 backend/webdav/tus-upload.go   |  83 +++++++++++++++
 backend/webdav/tus-uploader.go | 186 +++++++++++++++++++++++++++++++++
 backend/webdav/tus.go          | 110 +++++++++++++++++++
 backend/webdav/webdav.go       |  53 +++++++---
 5 files changed, 446 insertions(+), 14 deletions(-)
 create mode 100644 backend/webdav/tus-errors.go
 create mode 100644 backend/webdav/tus-upload.go
 create mode 100644 backend/webdav/tus-uploader.go
 create mode 100644 backend/webdav/tus.go

diff --git a/backend/webdav/tus-errors.go b/backend/webdav/tus-errors.go
new file mode 100644
index 0000000000000..6d5bdc7eba49a
--- /dev/null
+++ b/backend/webdav/tus-errors.go
@@ -0,0 +1,28 @@
+package webdav
+
+import (
+	"errors"
+	"fmt"
+)
+
+var (
+	ErrChuckSize         = errors.New("tus chunk size must be greater than zero")
+	ErrNilLogger         = errors.New("tus logger can't be nil")
+	ErrNilStore          = errors.New("tus store can't be nil if Resume is enable")
+	ErrNilUpload         = errors.New("tus upload can't be nil")
+	ErrLargeUpload       = errors.New("tus upload body is to large")
+	ErrVersionMismatch   = errors.New("tus protocol version mismatch")
+	ErrOffsetMismatch    = errors.New("tus upload offset mismatch")
+	ErrUploadNotFound    = errors.New("tus upload not found")
+	ErrResumeNotEnabled  = errors.New("tus resuming not enabled")
+	ErrFingerprintNotSet = errors.New("tus fingerprint not set")
+)
+
+type ClientError struct {
+	Code int
+	Body []byte
+}
+
+func (c ClientError) Error() string {
+	return fmt.Sprintf("unexpected status code: %d", c.Code)
+}
diff --git a/backend/webdav/tus-upload.go b/backend/webdav/tus-upload.go
new file mode 100644
index 0000000000000..6e51bd4a45023
--- /dev/null
+++ b/backend/webdav/tus-upload.go
@@ -0,0 +1,83 @@
+package webdav
+
+import (
+	"bytes"
+	"encoding/base64"
+	"fmt"
+	"io"
+	"strings"
+)
+
+type Metadata map[string]string
+
+type Upload struct {
+	stream io.ReadSeeker
+	size   int64
+	offset int64
+
+	Fingerprint string
+	Metadata    Metadata
+}
+
+// Updates the Upload information based on offset.
+func (u *Upload) updateProgress(offset int64) {
+	u.offset = offset
+}
+
+// Returns whether this upload is finished or not.
+func (u *Upload) Finished() bool {
+	return u.offset >= u.size
+}
+
+// Returns the progress in a percentage.
+func (u *Upload) Progress() int64 {
+	return (u.offset * 100) / u.size
+}
+
+// Returns the current upload offset.
+func (u *Upload) Offset() int64 {
+	return u.offset
+}
+
+// Returns the size of the upload body.
+func (u *Upload) Size() int64 {
+	return u.size
+}
+
+// EncodedMetadata encodes the upload metadata.
+func (u *Upload) EncodedMetadata() string {
+	var encoded []string
+
+	for k, v := range u.Metadata {
+		encoded = append(encoded, fmt.Sprintf("%s %s", k, b64encode(v)))
+	}
+
+	return strings.Join(encoded, ",")
+}
+
+func b64encode(s string) string {
+	return base64.StdEncoding.EncodeToString([]byte(s))
+}
+
+// NewUpload creates a new upload from an io.Reader.
+func NewUpload(reader io.Reader, size int64, metadata Metadata, fingerprint string) *Upload {
+	stream, ok := reader.(io.ReadSeeker)
+
+	if !ok {
+		buf := new(bytes.Buffer)
+		buf.ReadFrom(reader)
+		stream = bytes.NewReader(buf.Bytes())
+	}
+
+	if metadata == nil {
+		metadata = make(Metadata)
+	}
+
+	return &Upload{
+		stream: stream,
+		size:   size,
+
+		Fingerprint: fingerprint,
+		Metadata:    metadata,
+	}
+}
diff --git a/backend/webdav/tus-uploader.go b/backend/webdav/tus-uploader.go
new file mode 100644
index 0000000000000..01bc7dc027baa
--- /dev/null
+++ b/backend/webdav/tus-uploader.go
@@ -0,0 +1,186 @@
+package webdav
+
+import (
+	"bytes"
+	"context"
+	"fmt"
+	"io"
+	"net/http"
+	"net/url"
+	"strconv"
+
+	"github.com/rclone/rclone/fs"
+	"github.com/rclone/rclone/lib/rest"
+)
+
+type Uploader struct {
+	fs                  *Fs
+	url                 string
+	upload              *Upload
+	offset              int64
+	aborted             bool
+	uploadSubs          []chan Upload
+	notifyChan          chan bool
+	overridePatchMethod bool
+}
+
+// Subscribes to progress updates.
+func (u *Uploader) NotifyUploadProgress(c chan Upload) {
+	u.uploadSubs = append(u.uploadSubs, c)
+}
+
+func (f *Fs) shouldRetryChunk(ctx context.Context, resp *http.Response, err error, newOff *int64) (bool, error) {
+
+	switch resp.StatusCode {
+	case 204:
+		if off, err := strconv.ParseInt(resp.Header.Get("Upload-Offset"), 10, 64); err == nil {
+			*newOff = off
+			return false, nil
+		} else {
+			return false, err
+		}
+	case 409:
+		return false, ErrOffsetMismatch
+	case 412:
+		return false, ErrVersionMismatch
+	case 413:
+		return false, ErrLargeUpload
+	}
+
+	return f.shouldRetry(ctx, resp, err)
+}
+
+func (u *Uploader) uploadChunck(ctx context.Context, body io.Reader, size int64, offset int64) (int64, error) {
+	var method string
+
+	if !u.overridePatchMethod {
+		method = "PATCH"
+	} else {
+		method = "POST"
+	}
+
+	extraHeaders := map[string]string{} // FIXME: Use extraHeaders(ctx, src) from Object maybe?
+	extraHeaders["Upload-Offset"] = strconv.FormatInt(offset, 10)
+	extraHeaders["Tus-Resumable"] = "1.0.0"
+	extraHeaders["filetype"] = u.upload.Metadata["filetype"]
+	if u.overridePatchMethod {
+		extraHeaders["X-HTTP-Method-Override"] = "PATCH"
+	}
+
+	url, err := url.Parse(u.url)
+	if err != nil {
+		return 0, fmt.Errorf("upload Chunk failed, could not parse url")
+	}
+
+	// FIXME: Use GetBody func as in chunking.go
+	opts := rest.Opts{
+		Method:        method,
+		Path:          url.Path,
+		NoResponse:    true,
+		RootURL:       fmt.Sprintf("%s://%s", url.Scheme, url.Host),
+		ContentLength: &size,
+		Body:          body,
+		ContentType:   "application/offset+octet-stream",
+		ExtraHeaders:  extraHeaders,
+	}
+
+	var newOffset int64
+
+	err = u.fs.pacer.CallNoRetry(func() (bool, error) {
+		res, err := u.fs.srv.Call(ctx, &opts)
+		return u.fs.shouldRetryChunk(ctx, res, err, &newOffset)
+	})
+	if err != nil {
+		return 0, fmt.Errorf("uploadChunk failed: %w", err)
+		// FIXME What do we do here? Remove the entire upload?
+		// See https://github.com/tus/tusd/issues/176
+	}
+
+	return newOffset, nil
+}
+
+// Upload uploads the entire body to the server.
+func (u *Uploader) Upload(ctx context.Context) error {
+	var cnt int = 1
+
+	fs.Debug(u.fs, "Uploaded starts")
+	for u.offset < u.upload.size && !u.aborted {
+		err := u.UploadChunck(ctx, cnt)
+		cnt++
+		if err != nil {
+			return err
+		}
+	}
+	fs.Debug(u.fs, "-- Uploaded finished")
+
+	return nil
+}
+
+// UploadChunck uploads a single chunck.
+func (u *Uploader) UploadChunck(ctx context.Context, cnt int) error {
+	chunkSize := u.fs.opt.ChunkSize
+	data := make([]byte, chunkSize)
+
+	_, err := u.upload.stream.Seek(u.offset, 0)
+
+	if err != nil {
+		fs.Errorf(u.fs, "Chunk %d: Error seek in stream failed: %v", cnt, err)
+		return err
+	}
+
+	size, err := u.upload.stream.Read(data)
+
+	if err != nil {
+		fs.Errorf(u.fs, "Chunk %d: Error: Can not read from data strem: %v", cnt, err)
+		return err
+	}
+
+	body := bytes.NewBuffer(data[:size])
+
+	newOffset, err := u.uploadChunck(ctx, body, int64(size), u.offset)
+
+	if err == nil {
+		fs.Debugf(u.fs, "Uploaded chunk no %d ok, range %d -> %d", cnt, u.offset, newOffset)
+	} else {
+		fs.Errorf(u.fs, "Uploaded chunk no %d failed: %v", cnt, err)
+
+		return err
+	}
+
+	u.offset = newOffset
+
+	u.upload.updateProgress(u.offset)
+
+	u.notifyChan <- true
+
+	return nil
+}
+
+// Waits for a signal to broadcast to all subscribers
+func (u *Uploader) broadcastProgress() {
+	for _ = range u.notifyChan {
+		for _, c := range u.uploadSubs {
+			c <- *u.upload
+		}
+	}
+}
+
+// NewUploader creates a new Uploader.
+func NewUploader(f *Fs, url string, upload *Upload, offset int64) *Uploader {
+	notifyChan := make(chan bool)
+
+	uploader := &Uploader{
+		f,
+		url,
+		upload,
+		offset,
+		false,
+		nil,
+		notifyChan,
+		false,
+	}
+
+	go uploader.broadcastProgress()
+
+	return uploader
+}
diff --git a/backend/webdav/tus.go b/backend/webdav/tus.go
new file mode 100644
index 0000000000000..30853146080d2
--- /dev/null
+++ b/backend/webdav/tus.go
@@ -0,0 +1,110 @@
+package webdav
+
+/*
+   Chunked upload based on the tus protocol for ownCloud Infinite Scale
+   See https://tus.io/protocols/resumable-upload
+*/
+
+import (
+	"context"
+	"fmt"
+	"io"
+	"net/http"
+	"path/filepath"
+	"strconv"
+
+	"github.com/rclone/rclone/fs"
+	"github.com/rclone/rclone/lib/rest"
+)
+
+// set the chunk size for testing
+func (f *Fs) setUploadTusSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
+	old, f.opt.ChunkSize = f.opt.ChunkSize, cs
+	return
+}
+
+func (o *Object) updateViaTus(ctx context.Context, in io.Reader, contentType string, src fs.ObjectInfo, options ...fs.OpenOption) (err error) {
+
+	fn := filepath.Base(src.Remote())
+	metadata := map[string]string{
+		"filename": fn,
+		"mtime":    strconv.FormatInt(src.ModTime(ctx).Unix(), 10),
+		"filetype": contentType,
+	}
+
+	// Fingerprint is used to identify the upload when resuming. That is not yet implemented
+	fingerprint := ""
+
+	// create an upload from a file.
+	upload := NewUpload(in, src.Size(), metadata, fingerprint)
+
+	// create the uploader.
+	uploader, err := o.CreateUploader(ctx, upload)
+	if err == nil {
+		// start the uploading process.
+		err = uploader.Upload(ctx)
+	}
+
+	return err
+}
+
+func (f *Fs) shouldRetryCreateUpload(ctx context.Context, resp *http.Response, err error) (bool, error) {
+
+	switch resp.StatusCode {
+	case 201:
+		location := resp.Header.Get("Location")
+		f.chunksUploadURL = location
+		return false, nil
+	case 412:
+		return false, ErrVersionMismatch
+	case 413:
+		return false, ErrLargeUpload
+	}
+
+	return f.shouldRetry(ctx, resp, err)
+}
+
+// CreateUpload creates a new upload in the server.
+func (o *Object) CreateUploader(ctx context.Context, u *Upload) (*Uploader, error) {
+	if u == nil {
+		return nil, ErrNilUpload
+	}
+
+	// if c.Config.Resume && len(u.Fingerprint) == 0 {
+	//		return nil, ErrFingerprintNotSet
+	//	}
+
+	l := int64(0)
+	p := o.filePath()
+	// cut the filename off
+	dir, _ := filepath.Split(p)
+	if dir == "" {
+		dir = "/"
+	}
+
+	opts := rest.Opts{
+		Method:        "POST",
+		Path:          dir,
+		NoResponse:    true,
+		RootURL:       o.fs.endpointURL,
+		ContentLength: &l,
+		ExtraHeaders:  o.extraHeaders(ctx, o),
+	}
+	opts.ExtraHeaders["Upload-Length"] = strconv.FormatInt(u.size, 10)
+	opts.ExtraHeaders["Upload-Metadata"] = u.EncodedMetadata()
+	opts.ExtraHeaders["Tus-Resumable"] = "1.0.0"
+	// opts.ExtraHeaders["mtime"] = strconv.FormatInt(src.ModTime(ctx).Unix(), 10)
+
+	// rclone http call
+	err := o.fs.pacer.CallNoRetry(func() (bool, error) {
+		res, err := o.fs.srv.Call(ctx, &opts)
+		return o.fs.shouldRetryCreateUpload(ctx, res, err)
+	})
+	if err != nil {
+		return nil, fmt.Errorf("making upload directory failed: %w", err)
+	}
+
+	uploader := NewUploader(o.fs, o.fs.chunksUploadURL, u, 0)
+
+	return uploader, nil
+}
diff --git a/backend/webdav/webdav.go b/backend/webdav/webdav.go
index f3e2ee9057385..6a8d80724efdb 100644
--- a/backend/webdav/webdav.go
+++ b/backend/webdav/webdav.go
@@ -84,7 +84,10 @@ func init() {
 				Help:  "Nextcloud",
 			}, {
 				Value: "owncloud",
-				Help:  "Owncloud",
+				Help:  "Owncloud 10 PHP based WebDAV server",
+			}, {
+				Value: "InfiniteScale",
+				Help:  "ownCloud Infinite Scale",
 			}, {
 				Value: "sharepoint",
 				Help:  "Sharepoint Online, authenticated by Microsoft account",
@@ -612,6 +615,14 @@ func (f *Fs) setQuirks(ctx context.Context, vendor string) error {
 		f.propsetMtime = true
 		f.hasOCMD5 = true
 		f.hasOCSHA1 = true
+	case "Infinite Scale":
+		f.precision = time.Second
+		f.useOCMtime = true
+		f.propsetMtime = true
+		f.hasOCMD5 = false
+		f.hasOCSHA1 = true
+		f.canChunk = true
+		f.opt.ChunkSize = 10 * fs.Mebi
 	case "nextcloud":
 		f.precision = time.Second
 		f.useOCMtime = true
@@ -1478,24 +1489,38 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
 		return fmt.Errorf("Update mkParentDir failed: %w", err)
 	}
 
-	if o.shouldUseChunkedUpload(src) {
-		fs.Debugf(src, "Update will use the chunked upload strategy")
-		err = o.updateChunked(ctx, in, src, options...)
+	if o.fs.opt.Vendor == "Infinite Scale" {
+		// Infinite Scale always prefers tus for upload
+		fs.Debugf(src, "Update will use the tus protocol to upload")
+		contentType := fs.MimeType(ctx, src)
+		err = o.updateViaTus(ctx, in, contentType, src, options...)
 		if err != nil {
-			return err
+			fs.Debug(src, "tus update failed.")
+			return fmt.Errorf("tus update failed: %w", err)
 		}
 	} else {
-		fs.Debugf(src, "Update will use the normal upload strategy (no chunks)")
-		contentType := fs.MimeType(ctx, src)
-		filePath := o.filePath()
-		extraHeaders := o.extraHeaders(ctx, src)
-		// TODO: define getBody() to enable low-level HTTP/2 retries
-		err = o.updateSimple(ctx, in, nil, filePath, src.Size(), contentType, extraHeaders, o.fs.endpointURL, options...)
-		if err != nil {
-			return err
+		if o.shouldUseChunkedUpload(src) {
+			if o.fs.opt.Vendor == "nextcloud" {
+				fs.Debugf(src, "Update will use the chunked upload strategy")
+				err = o.updateChunked(ctx, in, src, options...)
+			} else {
+				fs.Debug(src, "Chunking - unknown vendor")
+			}
+			if err != nil {
+				return err
+			}
+		} else {
+			fs.Debugf(src, "Update will use the normal upload strategy (no chunks)")
+			contentType := fs.MimeType(ctx, src)
+			filePath := o.filePath()
+			extraHeaders := o.extraHeaders(ctx, src)
+			// TODO: define getBody() to enable low-level HTTP/2 retries
+			err = o.updateSimple(ctx, in, nil, filePath, src.Size(), contentType, extraHeaders, o.fs.endpointURL, options...)
+			if err != nil {
+				return fmt.Errorf("unchunked simple update failed: %w", err)
+			}
 		}
 	}
-
 	// read metadata from remote
 	o.hasMetaData = false
 	return o.readMetaData(ctx)

From 53d28896f0b44d48a3daea2474777e2fc803be2d Mon Sep 17 00:00:00 2001
From: Klaas Freitag <klaas.freitag@kiteworks.com>
Date: Mon, 11 Nov 2024 12:31:45 +0100
Subject: [PATCH 02/13] Add documentation for ownCloud Infinite Scale

Fix ownCloud spelling from Owncloud -> ownCloud
---
 docs/content/webdav.md | 39 ++++++++++++++++++++++++++-------------
 1 file changed, 26 insertions(+), 13 deletions(-)

diff --git a/docs/content/webdav.md b/docs/content/webdav.md
index 548b3b9d475bd..c0c7a7ce86fbc 100644
--- a/docs/content/webdav.md
+++ b/docs/content/webdav.md
@@ -47,15 +47,17 @@ Choose a number from below, or type in your own value
    \ (fastmail)
  2 / Nextcloud
    \ (nextcloud)
- 3 / Owncloud
+ 3 / ownCloud 10 PHP based WebDAV server
    \ (owncloud)
- 4 / Sharepoint Online, authenticated by Microsoft account
+ 4 / ownCloud Infinite Scale
+   \ (infinitescale)
+ 5 / Sharepoint Online, authenticated by Microsoft account
    \ (sharepoint)
- 5 / Sharepoint with NTLM authentication, usually self-hosted or on-premises
+ 6 / Sharepoint with NTLM authentication, usually self-hosted or on-premises
    \ (sharepoint-ntlm)
- 6 / rclone WebDAV server to serve a remote over HTTP via the WebDAV protocol
+ 7 / rclone WebDAV server to serve a remote over HTTP via the WebDAV protocol
    \ (rclone)
- 7 / Other site/service or software
+ 8 / Other site/service or software
    \ (other)
 vendor> 2
 User name
@@ -104,11 +106,11 @@ To copy a local directory to an WebDAV directory called backup
 ### Modification times and hashes
 
 Plain WebDAV does not support modified times.  However when used with
-Fastmail Files, Owncloud or Nextcloud rclone will support modified times.
+Fastmail Files, ownCloud or Nextcloud rclone will support modified times.
 
 Likewise plain WebDAV does not support hashes, however when used with
-Fastmail Files, Owncloud or Nextcloud rclone will support SHA1 and MD5 hashes.
-Depending on the exact version of Owncloud or Nextcloud hashes may
+Fastmail Files, ownCloud or Nextcloud rclone will support SHA1 and MD5 hashes.
+Depending on the exact version of ownCloud or Nextcloud hashes may
 appear on all objects, or only on objects which had a hash uploaded
 with them.
 
@@ -146,7 +148,9 @@ Properties:
     - "nextcloud"
         - Nextcloud
     - "owncloud"
-        - Owncloud
+        - ownCloud
+    - "infinitescale"
+        - ownCloud Infinite Scale
     - "sharepoint"
         - Sharepoint Online, authenticated by Microsoft account
     - "sharepoint-ntlm"
@@ -332,19 +336,28 @@ this as the password.
 
 Fastmail supports modified times using the `X-OC-Mtime` header.
 
-### Owncloud
+### ownCloud
 
 Click on the settings cog in the bottom right of the page and this
 will show the WebDAV URL that rclone needs in the config step.  It
 will look something like `https://example.com/remote.php/webdav/`.
 
-Owncloud supports modified times using the `X-OC-Mtime` header.
+ownCloud supports modified times using the `X-OC-Mtime` header.
 
 ### Nextcloud
 
-This is configured in an identical way to Owncloud.  Note that
+This is configured in an identical way to ownCloud.  Note that
 Nextcloud initially did not support streaming of files (`rcat`) whereas
-Owncloud did, but [this](https://github.com/nextcloud/nextcloud-snap/issues/365) seems to be fixed as of 2020-11-27 (tested with rclone v1.53.1 and Nextcloud Server v19).
+ownCloud did, but [this](https://github.com/nextcloud/nextcloud-snap/issues/365) seems to be fixed as of 2020-11-27 (tested with rclone v1.53.1 and Nextcloud Server v19).
+
+### ownCloud Infinite Scale
+
+The WebDAV URL for Infinite Scale can be found in the details panel of
+any space in Infinite Scale, if the display was enabled in the personal
+settings of the user through a checkbox there.
+
+Infinite Scale works with the chunking [tus](https://tus.io) upload protocol.
+The chunk size is currently fixed 10 MB.
 
 ### Sharepoint Online
 

From f9f810ccfcf9a632a70c0a1ffa5e39b65a15848a Mon Sep 17 00:00:00 2001
From: Klaas Freitag <klaas.freitag@kiteworks.com>
Date: Mon, 11 Nov 2024 12:32:30 +0100
Subject: [PATCH 03/13] Fix vendor string to single word lowercase
 "infinitescale"

---
 backend/webdav/webdav.go | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/backend/webdav/webdav.go b/backend/webdav/webdav.go
index 6a8d80724efdb..ea47d4d8e872c 100644
--- a/backend/webdav/webdav.go
+++ b/backend/webdav/webdav.go
@@ -86,7 +86,7 @@ func init() {
 				Value: "owncloud",
 				Help:  "Owncloud 10 PHP based WebDAV server",
 			}, {
-				Value: "InfiniteScale",
+				Value: "infinitescale",
 				Help:  "ownCloud Infinite Scale",
 			}, {
 				Value: "sharepoint",
@@ -615,7 +615,7 @@ func (f *Fs) setQuirks(ctx context.Context, vendor string) error {
 		f.propsetMtime = true
 		f.hasOCMD5 = true
 		f.hasOCSHA1 = true
-	case "Infinite Scale":
+	case "infinitescale":
 		f.precision = time.Second
 		f.useOCMtime = true
 		f.propsetMtime = true
@@ -1489,7 +1489,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
 		return fmt.Errorf("Update mkParentDir failed: %w", err)
 	}
 
-	if o.fs.opt.Vendor == "Infinite Scale" {
+	if o.fs.opt.Vendor == "infinitescale" {
 		// Infinite Scale always prefers tus for upload
 		fs.Debugf(src, "Update will use the tus protocol to upload")
 		contentType := fs.MimeType(ctx, src)

From e07847e3ea0cf2e4fc299ec78707b340ce985cda Mon Sep 17 00:00:00 2001
From: Klaas Freitag <klaas.freitag@kiteworks.com>
Date: Mon, 11 Nov 2024 15:46:53 +0100
Subject: [PATCH 04/13] Consider the extra options for rclone http operations

Add some documentation bits for functions and structs
---
 backend/webdav/tus-errors.go   |  2 ++
 backend/webdav/tus-upload.go   |  2 ++
 backend/webdav/tus-uploader.go | 14 ++++++++------
 backend/webdav/tus.go          |  9 +++++----
 4 files changed, 17 insertions(+), 10 deletions(-)

diff --git a/backend/webdav/tus-errors.go b/backend/webdav/tus-errors.go
index 6d5bdc7eba49a..4e44276c47177 100644
--- a/backend/webdav/tus-errors.go
+++ b/backend/webdav/tus-errors.go
@@ -18,11 +18,13 @@ var (
 	ErrFingerprintNotSet = errors.New("tus fingerprint not set")
 )
 
+// ClientError represents an error state of a client
 type ClientError struct {
 	Code int
 	Body []byte
 }
 
+// Error returns an error string containing the client error code
 func (c ClientError) Error() string {
 	return fmt.Sprintf("unexpected status code: %d", c.Code)
 }
diff --git a/backend/webdav/tus-upload.go b/backend/webdav/tus-upload.go
index 6e51bd4a45023..9a957f5b686d9 100644
--- a/backend/webdav/tus-upload.go
+++ b/backend/webdav/tus-upload.go
@@ -8,8 +8,10 @@ import (
 	"strings"
 )
 
+// Metadata is a typedef for a string to string map to hold metadata
 type Metadata map[string]string
 
+// Upload is a struct containing the file status during upload
 type Upload struct {
 	stream io.ReadSeeker
 	size   int64
diff --git a/backend/webdav/tus-uploader.go b/backend/webdav/tus-uploader.go
index 01bc7dc027baa..f51683b115e6e 100644
--- a/backend/webdav/tus-uploader.go
+++ b/backend/webdav/tus-uploader.go
@@ -13,6 +13,7 @@ import (
 	"github.com/rclone/rclone/lib/rest"
 )
 
+// Uploader holds all information about a currently running upload
 type Uploader struct {
 	fs                  *Fs
 	url                 string
@@ -50,7 +51,7 @@ func (f *Fs) shouldRetryChunk(ctx context.Context, resp *http.Response, err erro
 	return f.shouldRetry(ctx, resp, err)
 }
 
-func (u *Uploader) uploadChunck(ctx context.Context, body io.Reader, size int64, offset int64) (int64, error) {
+func (u *Uploader) uploadChunck(ctx context.Context, body io.Reader, size int64, offset int64, options ...fs.OpenOption) (int64, error) {
 	var method string
 
 	if !u.overridePatchMethod {
@@ -82,6 +83,7 @@ func (u *Uploader) uploadChunck(ctx context.Context, body io.Reader, size int64,
 		Body:          body,
 		ContentType:   "application/offset+octet-stream",
 		ExtraHeaders:  extraHeaders,
+		Options:       options,
 	}
 
 	var newOffset int64
@@ -100,12 +102,12 @@ func (u *Uploader) uploadChunck(ctx context.Context, body io.Reader, size int64,
 }
 
 // Upload uploads the entire body to the server.
-func (u *Uploader) Upload(ctx context.Context) error {
+func (u *Uploader) Upload(ctx context.Context, options ...fs.OpenOption) error {
 	var cnt int = 1
 
 	fs.Debug(u.fs, "Uploaded starts")
 	for u.offset < u.upload.size && !u.aborted {
-		err := u.UploadChunck(ctx, cnt)
+		err := u.UploadChunck(ctx, cnt, options...)
 		cnt++
 		if err != nil {
 			return err
@@ -117,7 +119,7 @@ func (u *Uploader) Upload(ctx context.Context) error {
 }
 
 // UploadChunck uploads a single chunck.
-func (u *Uploader) UploadChunck(ctx context.Context, cnt int) error {
+func (u *Uploader) UploadChunck(ctx context.Context, cnt int, options ...fs.OpenOption) error {
 	chunkSize := u.fs.opt.ChunkSize
 	data := make([]byte, chunkSize)
 
@@ -137,7 +139,7 @@ func (u *Uploader) UploadChunck(ctx context.Context, cnt int) error {
 
 	body := bytes.NewBuffer(data[:size])
 
-	newOffset, err := u.uploadChunck(ctx, body, int64(size), u.offset)
+	newOffset, err := u.uploadChunck(ctx, body, int64(size), u.offset, options...)
 
 	if err == nil {
 		fs.Debugf(u.fs, "Uploaded chunk no %d ok, range %d -> %d", cnt, u.offset, newOffset)
@@ -158,7 +160,7 @@ func (u *Uploader) UploadChunck(ctx context.Context, cnt int) error {
 
 // Waits for a signal to broadcast to all subscribers
 func (u *Uploader) broadcastProgress() {
-	for _ = range u.notifyChan {
+	for range u.notifyChan {
 		for _, c := range u.uploadSubs {
 			c <- *u.upload
 		}
diff --git a/backend/webdav/tus.go b/backend/webdav/tus.go
index 30853146080d2..e94d24f318c69 100644
--- a/backend/webdav/tus.go
+++ b/backend/webdav/tus.go
@@ -39,10 +39,10 @@ func (o *Object) updateViaTus(ctx context.Context, in io.Reader, contentType str
 	upload := NewUpload(in, src.Size(), metadata, fingerprint)
 
 	// create the uploader.
-	uploader, err := o.CreateUploader(ctx, upload)
+	uploader, err := o.CreateUploader(ctx, upload, options...)
 	if err == nil {
 		// start the uploading process.
-		err = uploader.Upload(ctx)
+		err = uploader.Upload(ctx, options...)
 	}
 
 	return err
@@ -64,8 +64,8 @@ func (f *Fs) shouldRetryCreateUpload(ctx context.Context, resp *http.Response, e
 	return f.shouldRetry(ctx, resp, err)
 }
 
-// CreateUpload creates a new upload in the server.
-func (o *Object) CreateUploader(ctx context.Context, u *Upload) (*Uploader, error) {
+// CreateUpload creates a new upload to the server.
+func (o *Object) CreateUploader(ctx context.Context, u *Upload, options ...fs.OpenOption) (*Uploader, error) {
 	if u == nil {
 		return nil, ErrNilUpload
 	}
@@ -89,6 +89,7 @@ func (o *Object) CreateUploader(ctx context.Context, u *Upload) (*Uploader, erro
 		RootURL:       o.fs.endpointURL,
 		ContentLength: &l,
 		ExtraHeaders:  o.extraHeaders(ctx, o),
+		Options:       options,
 	}
 	opts.ExtraHeaders["Upload-Length"] = strconv.FormatInt(u.size, 10)
 	opts.ExtraHeaders["Upload-Metadata"] = u.EncodedMetadata()

From 88b0263bb872353aa009b6f6ddc5502b2243ba7b Mon Sep 17 00:00:00 2001
From: Klaas Freitag <klaas.freitag@kiteworks.com>
Date: Tue, 12 Nov 2024 15:47:56 +0100
Subject: [PATCH 05/13] Handle nil pointer of response struct to avoid crash

---
 backend/webdav/tus-uploader.go | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/backend/webdav/tus-uploader.go b/backend/webdav/tus-uploader.go
index f51683b115e6e..2b3c850b517e2 100644
--- a/backend/webdav/tus-uploader.go
+++ b/backend/webdav/tus-uploader.go
@@ -31,6 +31,9 @@ func (u *Uploader) NotifyUploadProgress(c chan Upload) {
 }
 
 func (f *Fs) shouldRetryChunk(ctx context.Context, resp *http.Response, err error, newOff *int64) (bool, error) {
+	if resp == nil {
+		return true, err
+	}
 
 	switch resp.StatusCode {
 	case 204:

From 49543518f000b6228e9decd9454901f0a7f00070 Mon Sep 17 00:00:00 2001
From: Christian Richter <crichter@owncloud.com>
Date: Mon, 25 Nov 2024 09:09:39 +0100
Subject: [PATCH 06/13] fix typo

Signed-off-by: Christian Richter <crichter@owncloud.com>
---
 backend/webdav/tus-uploader.go | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/backend/webdav/tus-uploader.go b/backend/webdav/tus-uploader.go
index 2b3c850b517e2..e0caa79f92605 100644
--- a/backend/webdav/tus-uploader.go
+++ b/backend/webdav/tus-uploader.go
@@ -54,7 +54,7 @@ func (f *Fs) shouldRetryChunk(ctx context.Context, resp *http.Response, err erro
 	return f.shouldRetry(ctx, resp, err)
 }
 
-func (u *Uploader) uploadChunck(ctx context.Context, body io.Reader, size int64, offset int64, options ...fs.OpenOption) (int64, error) {
+func (u *Uploader) uploadChunk(ctx context.Context, body io.Reader, size int64, offset int64, options ...fs.OpenOption) (int64, error) {
 	var method string
 
 	if !u.overridePatchMethod {
@@ -110,7 +110,7 @@ func (u *Uploader) Upload(ctx context.Context, options ...fs.OpenOption) error {
 
 	fs.Debug(u.fs, "Uploaded starts")
 	for u.offset < u.upload.size && !u.aborted {
-		err := u.UploadChunck(ctx, cnt, options...)
+		err := u.UploadChunk(ctx, cnt, options...)
 		cnt++
 		if err != nil {
 			return err
@@ -121,8 +121,8 @@ func (u *Uploader) Upload(ctx context.Context, options ...fs.OpenOption) error {
 	return nil
 }
 
-// UploadChunck uploads a single chunck.
-func (u *Uploader) UploadChunck(ctx context.Context, cnt int, options ...fs.OpenOption) error {
+// UploadChunk uploads a single chunk.
+func (u *Uploader) UploadChunk(ctx context.Context, cnt int, options ...fs.OpenOption) error {
 	chunkSize := u.fs.opt.ChunkSize
 	data := make([]byte, chunkSize)
 
@@ -142,7 +142,7 @@ func (u *Uploader) UploadChunck(ctx context.Context, cnt int, options ...fs.Open
 
 	body := bytes.NewBuffer(data[:size])
 
-	newOffset, err := u.uploadChunck(ctx, body, int64(size), u.offset, options...)
+	newOffset, err := u.uploadChunk(ctx, body, int64(size), u.offset, options...)
 
 	if err == nil {
 		fs.Debugf(u.fs, "Uploaded chunk no %d ok, range %d -> %d", cnt, u.offset, newOffset)

From 5b77b2f27dbb91f07770e6eeb583d4ebe32ff031 Mon Sep 17 00:00:00 2001
From: Christian Richter <crichter@owncloud.com>
Date: Wed, 4 Dec 2024 08:18:12 +0100
Subject: [PATCH 07/13] make linter happy

Signed-off-by: Christian Richter <crichter@owncloud.com>
---
 backend/webdav/tus-errors.go   | 28 +++++++++++++++++++---------
 backend/webdav/tus-upload.go   | 13 ++++++++-----
 backend/webdav/tus-uploader.go |  4 ++--
 backend/webdav/tus.go          |  2 +-
 4 files changed, 30 insertions(+), 17 deletions(-)

diff --git a/backend/webdav/tus-errors.go b/backend/webdav/tus-errors.go
index 4e44276c47177..43ff0de8e7d53 100644
--- a/backend/webdav/tus-errors.go
+++ b/backend/webdav/tus-errors.go
@@ -6,15 +6,25 @@ import (
 )
 
 var (
-	ErrChuckSize         = errors.New("tus chunk size must be greater than zero")
-	ErrNilLogger         = errors.New("tus logger can't be nil")
-	ErrNilStore          = errors.New("tus store can't be nil if Resume is enable")
-	ErrNilUpload         = errors.New("tus upload can't be nil")
-	ErrLargeUpload       = errors.New("tus upload body is to large")
-	ErrVersionMismatch   = errors.New("tus protocol version mismatch")
-	ErrOffsetMismatch    = errors.New("tus upload offset mismatch")
-	ErrUploadNotFound    = errors.New("tus upload not found")
-	ErrResumeNotEnabled  = errors.New("tus resuming not enabled")
+	// ErrChuckSize is returned when the chunk size is zero
+	ErrChuckSize = errors.New("tus chunk size must be greater than zero")
+	// ErrNilConfig is returned when the logger is nil
+	ErrNilLogger = errors.New("tus logger can't be nil")
+	// ErrNilStore is returned when the store is nil
+	ErrNilStore = errors.New("tus store can't be nil if resume is enable")
+	// ErrNilUpload is returned when the upload is nil
+	ErrNilUpload = errors.New("tus upload can't be nil")
+	// ErrLargeUpload is returned when the upload body is to large
+	ErrLargeUpload = errors.New("tus upload body is to large")
+	// ErrVersionMismatch is returned when the tus protocol version is mismatching
+	ErrVersionMismatch = errors.New("tus protocol version mismatch")
+	// ErrOffsetMismatch is returned when the tus upload offset is mismatching
+	ErrOffsetMismatch = errors.New("tus upload offset mismatch")
+	// ErrUploadNotFound is returned when the tus upload is not found
+	ErrUploadNotFound = errors.New("tus upload not found")
+	// ErrResumeNotEnabled is returned when the tus resuming is not enabled
+	ErrResumeNotEnabled = errors.New("tus resuming not enabled")
+	// ErrFingerprintNotSet is returned when the tus fingerprint is not set
 	ErrFingerprintNotSet = errors.New("tus fingerprint not set")
 )
 
diff --git a/backend/webdav/tus-upload.go b/backend/webdav/tus-upload.go
index 9a957f5b686d9..4b4cc2272255b 100644
--- a/backend/webdav/tus-upload.go
+++ b/backend/webdav/tus-upload.go
@@ -26,22 +26,22 @@ func (u *Upload) updateProgress(offset int64) {
 	u.offset = offset
 }
 
-// Returns whether this upload is finished or not.
+// Finished returns whether this upload is finished or not.
 func (u *Upload) Finished() bool {
 	return u.offset >= u.size
 }
 
-// Returns the progress in a percentage.
+// Progress returns the progress in a percentage.
 func (u *Upload) Progress() int64 {
 	return (u.offset * 100) / u.size
 }
 
-// Returns the current upload offset.
+// Offset returns the current upload offset.
 func (u *Upload) Offset() int64 {
 	return u.offset
 }
 
-// Returns the size of the upload body.
+// Size returns the size of the upload body.
 func (u *Upload) Size() int64 {
 	return u.size
 }
@@ -67,7 +67,10 @@ func NewUpload(reader io.Reader, size int64, metadata Metadata, fingerprint stri
 
 	if !ok {
 		buf := new(bytes.Buffer)
-		buf.ReadFrom(reader)
+		_, err := buf.ReadFrom(reader)
+		if err != nil {
+			return nil
+		}
 		stream = bytes.NewReader(buf.Bytes())
 	}
 
diff --git a/backend/webdav/tus-uploader.go b/backend/webdav/tus-uploader.go
index e0caa79f92605..bad2ef63766ad 100644
--- a/backend/webdav/tus-uploader.go
+++ b/backend/webdav/tus-uploader.go
@@ -25,7 +25,7 @@ type Uploader struct {
 	overridePatchMethod bool
 }
 
-// Subscribes to progress updates.
+// NotifyUploadProgress subscribes to progress updates.
 func (u *Uploader) NotifyUploadProgress(c chan Upload) {
 	u.uploadSubs = append(u.uploadSubs, c)
 }
@@ -106,7 +106,7 @@ func (u *Uploader) uploadChunk(ctx context.Context, body io.Reader, size int64,
 
 // Upload uploads the entire body to the server.
 func (u *Uploader) Upload(ctx context.Context, options ...fs.OpenOption) error {
-	var cnt int = 1
+	cnt := 1
 
 	fs.Debug(u.fs, "Uploaded starts")
 	for u.offset < u.upload.size && !u.aborted {
diff --git a/backend/webdav/tus.go b/backend/webdav/tus.go
index e94d24f318c69..8868e54499f09 100644
--- a/backend/webdav/tus.go
+++ b/backend/webdav/tus.go
@@ -64,7 +64,7 @@ func (f *Fs) shouldRetryCreateUpload(ctx context.Context, resp *http.Response, e
 	return f.shouldRetry(ctx, resp, err)
 }
 
-// CreateUpload creates a new upload to the server.
+// CreateUploader creates a new upload to the server.
 func (o *Object) CreateUploader(ctx context.Context, u *Upload, options ...fs.OpenOption) (*Uploader, error) {
 	if u == nil {
 		return nil, ErrNilUpload

From bc39e5bdfa2b63ed44c0454ce454c7a34a3341c3 Mon Sep 17 00:00:00 2001
From: Klaas Freitag <klaas.freitag@kiteworks.com>
Date: Tue, 17 Dec 2024 15:34:24 +0100
Subject: [PATCH 08/13] Fix to use proper mtime for OC-Mtime header

---
 backend/webdav/webdav.go | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/backend/webdav/webdav.go b/backend/webdav/webdav.go
index ea47d4d8e872c..822b5f54fc9a0 100644
--- a/backend/webdav/webdav.go
+++ b/backend/webdav/webdav.go
@@ -1530,7 +1530,7 @@ func (o *Object) extraHeaders(ctx context.Context, src fs.ObjectInfo) map[string
 	extraHeaders := map[string]string{}
 	if o.fs.useOCMtime || o.fs.hasOCMD5 || o.fs.hasOCSHA1 {
 		if o.fs.useOCMtime {
-			extraHeaders["X-OC-Mtime"] = fmt.Sprintf("%d", src.ModTime(ctx).Unix())
+			extraHeaders["X-OC-Mtime"] = fmt.Sprintf("%d", o.modTime.Unix())
 		}
 		// Set one upload checksum
 		// Owncloud uses one checksum only to check the upload and stores its own SHA1 and MD5

From e27acfe085824db87e29d9e67220663ea513518d Mon Sep 17 00:00:00 2001
From: Klaas Freitag <klaas.freitag@kiteworks.com>
Date: Tue, 17 Dec 2024 15:34:50 +0100
Subject: [PATCH 09/13] Set a more suitable logging class

---
 backend/webdav/webdav.go | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/backend/webdav/webdav.go b/backend/webdav/webdav.go
index 822b5f54fc9a0..8d72ee2ad7642 100644
--- a/backend/webdav/webdav.go
+++ b/backend/webdav/webdav.go
@@ -1320,7 +1320,7 @@ func (o *Object) Size() int64 {
 	ctx := context.TODO()
 	err := o.readMetaData(ctx)
 	if err != nil {
-		fs.Logf(o, "Failed to read metadata: %v", err)
+		fs.Infof(o, "Failed to read metadata: %v", err)
 		return 0
 	}
 	return o.size
@@ -1364,7 +1364,7 @@ func (o *Object) readMetaData(ctx context.Context) (err error) {
 func (o *Object) ModTime(ctx context.Context) time.Time {
 	err := o.readMetaData(ctx)
 	if err != nil {
-		fs.Logf(o, "Failed to read metadata: %v", err)
+		fs.Infof(o, "Failed to read metadata: %v", err)
 		return time.Now()
 	}
 	return o.modTime

From 7bb5f3e7de0c6b958491755118de6451fee8e9bf Mon Sep 17 00:00:00 2001
From: Klaas Freitag <klaas.freitag@kiteworks.com>
Date: Tue, 17 Dec 2024 18:30:32 +0100
Subject: [PATCH 10/13] Add script to spin up infinitescale for testing

---
 .../testserver/init.d/TestWebdavInfiniteScale | 49 +++++++++++++++++++
 1 file changed, 49 insertions(+)
 create mode 100755 fstest/testserver/init.d/TestWebdavInfiniteScale

diff --git a/fstest/testserver/init.d/TestWebdavInfiniteScale b/fstest/testserver/init.d/TestWebdavInfiniteScale
new file mode 100755
index 0000000000000..7226055c4c8fe
--- /dev/null
+++ b/fstest/testserver/init.d/TestWebdavInfiniteScale
@@ -0,0 +1,49 @@
+#!/usr/bin/env bash
+
+set -e
+
+NAME=infinitescale
+USER=admin
+PASS=admin
+PORT=9200
+
+. $(dirname "$0")/docker.bash
+
+start() {
+
+    docker run --rm --name $NAME \
+           -v $(pwd):/etc/ocis \
+           -e "OCIS_INSECURE=true" \
+           -e "IDM_ADMIN_PASSWORD=$PASS" \
+           -e "OCIS_FORCE_CONFIG_OVERWRITE=true" \
+           -e "OCIS_URL=https://127.0.0.1:$PORT" \
+           owncloud/ocis \
+           init
+
+    docker run --rm -d --name $NAME \
+           -e "OCIS_LOG_LEVEL=debug" \
+           -e "OCIS_LOG_PRETTY=true" \
+           -e "OCIS_URL=https://127.0.0.1:$PORT" \
+           -e "OCIS_ADMIN_USER_ID=some-admin-user-id-0000-100000000000" \
+           -e "IDM_ADMIN_PASSWORD=$PASS" \
+           -e "OCIS_INSECURE=true" \
+           -e "PROXY_ENABLE_BASIC_AUTH=true" \
+           -v $(pwd):/etc/ocis \
+           -p 127.0.0.1:${PORT}:9200 \
+           owncloud/ocis
+    
+    echo type=webdav
+    echo url=https://127.0.0.1:${PORT}/dav/spaces/some-admin-user-id-0000-100000000000
+    echo user=$USER
+    echo pass=$(rclone obscure $PASS)
+    echo vendor=infinitescale
+    echo _connect=127.0.0.1:${PORT}
+}
+
+stop() {
+    # Clean up the mess
+    docker stop infinitescale
+    rm -f ./ocis.yaml
+}
+
+. $(dirname "$0")/run.bash

From 523496ac0c2757c8570642b5b53b2f2dd21bf313 Mon Sep 17 00:00:00 2001
From: Klaas Freitag <klaas.freitag@kiteworks.com>
Date: Wed, 18 Dec 2024 15:44:42 +0100
Subject: [PATCH 11/13] Add InfiniteScale test to test_all

---
 fstest/test_all/config.yaml | 6 ++++++
 1 file changed, 6 insertions(+)

diff --git a/fstest/test_all/config.yaml b/fstest/test_all/config.yaml
index 1565c7e8ea14d..100f09c92e5d1 100644
--- a/fstest/test_all/config.yaml
+++ b/fstest/test_all/config.yaml
@@ -387,6 +387,12 @@ backends:
      - TestIntegration/FsMkdir/FsEncoding/punctuation
      - TestIntegration/FsMkdir/FsEncoding/invalid_UTF-8
    fastlist: false
+ - backend:  "webdav"
+   remote:   "TestWebdavInfiniteScale:"
+   ignore:
+     - TestIntegration/FsMkdir/FsEncoding/punctuation
+     - TestIntegration/FsMkdir/FsEncoding/invalid_UTF-8
+   fastlist: false
  - backend:  "webdav"
    remote:   "TestWebdavRclone:"
    ignore:

From 6261312eb48f497949d6a2e5f2ffc86d0a27c763 Mon Sep 17 00:00:00 2001
From: Klaas Freitag <klaas.freitag@kiteworks.com>
Date: Wed, 18 Dec 2024 15:45:13 +0100
Subject: [PATCH 12/13] Introduce a canTus option flag to avoid vendor string
 checks

Feedback from review considered
---
 backend/webdav/webdav.go | 45 ++++++++++++++++++++--------------------
 1 file changed, 22 insertions(+), 23 deletions(-)

diff --git a/backend/webdav/webdav.go b/backend/webdav/webdav.go
index 8d72ee2ad7642..9acef9224ab3c 100644
--- a/backend/webdav/webdav.go
+++ b/backend/webdav/webdav.go
@@ -197,6 +197,7 @@ type Fs struct {
 	pacer              *fs.Pacer     // pacer for API calls
 	precision          time.Duration // mod time precision
 	canStream          bool          // set if can stream
+	canTus             bool          // supports the TUS upload protocol
 	useOCMtime         bool          // set if can use X-OC-Mtime
 	propsetMtime       bool          // set if can use propset
 	retryWithZeroDepth bool          // some vendors (sharepoint) won't list files when Depth is 1 (our default)
@@ -621,7 +622,8 @@ func (f *Fs) setQuirks(ctx context.Context, vendor string) error {
 		f.propsetMtime = true
 		f.hasOCMD5 = false
 		f.hasOCSHA1 = true
-		f.canChunk = true
+		f.canChunk = false
+		f.canTus = true
 		f.opt.ChunkSize = 10 * fs.Mebi
 	case "nextcloud":
 		f.precision = time.Second
@@ -1489,8 +1491,7 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
 		return fmt.Errorf("Update mkParentDir failed: %w", err)
 	}
 
-	if o.fs.opt.Vendor == "infinitescale" {
-		// Infinite Scale always prefers tus for upload
+	if o.fs.canTus { // supports the tus upload protocol, ie. InfiniteScale
 		fs.Debugf(src, "Update will use the tus protocol to upload")
 		contentType := fs.MimeType(ctx, src)
 		err = o.updateViaTus(ctx, in, contentType, src, options...)
@@ -1498,27 +1499,25 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
 			fs.Debug(src, "tus update failed.")
 			return fmt.Errorf("tus update failed: %w", err)
 		}
-	} else {
-		if o.shouldUseChunkedUpload(src) {
-			if o.fs.opt.Vendor == "nextcloud" {
-				fs.Debugf(src, "Update will use the chunked upload strategy")
-				err = o.updateChunked(ctx, in, src, options...)
-			} else {
-				fs.Debug(src, "Chunking - unknown vendor")
-			}
-			if err != nil {
-				return err
-			}
+	} else if o.shouldUseChunkedUpload(src) {
+		if o.fs.opt.Vendor == "nextcloud" {
+			fs.Debugf(src, "Update will use the chunked upload strategy")
+			err = o.updateChunked(ctx, in, src, options...)
 		} else {
-			fs.Debugf(src, "Update will use the normal upload strategy (no chunks)")
-			contentType := fs.MimeType(ctx, src)
-			filePath := o.filePath()
-			extraHeaders := o.extraHeaders(ctx, src)
-			// TODO: define getBody() to enable low-level HTTP/2 retries
-			err = o.updateSimple(ctx, in, nil, filePath, src.Size(), contentType, extraHeaders, o.fs.endpointURL, options...)
-			if err != nil {
-				return fmt.Errorf("unchunked simple update failed: %w", err)
-			}
+			fs.Debug(src, "Chunking - unknown vendor")
+		}
+		if err != nil {
+			return err
+		}
+	} else {
+		fs.Debugf(src, "Update will use the normal upload strategy (no chunks)")
+		contentType := fs.MimeType(ctx, src)
+		filePath := o.filePath()
+		extraHeaders := o.extraHeaders(ctx, src)
+		// TODO: define getBody() to enable low-level HTTP/2 retries
+		err = o.updateSimple(ctx, in, nil, filePath, src.Size(), contentType, extraHeaders, o.fs.endpointURL, options...)
+		if err != nil {
+			return fmt.Errorf("unchunked simple update failed: %w", err)
 		}
 	}
 	// read metadata from remote

From 587bf80f82049d4f62711346339ca1128753f683 Mon Sep 17 00:00:00 2001
From: Klaas Freitag <klaas.freitag@kiteworks.com>
Date: Wed, 18 Dec 2024 15:49:28 +0100
Subject: [PATCH 13/13] Fixed typo errChuckSize -> errChunkSize

---
 backend/webdav/tus-errors.go | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/backend/webdav/tus-errors.go b/backend/webdav/tus-errors.go
index 43ff0de8e7d53..4cb592398a012 100644
--- a/backend/webdav/tus-errors.go
+++ b/backend/webdav/tus-errors.go
@@ -7,7 +7,7 @@ import (
 
 var (
 	// ErrChuckSize is returned when the chunk size is zero
-	ErrChuckSize = errors.New("tus chunk size must be greater than zero")
+	ErrChunkSize = errors.New("tus chunk size must be greater than zero")
 	// ErrNilConfig is returned when the logger is nil
 	ErrNilLogger = errors.New("tus logger can't be nil")
 	// ErrNilStore is returned when the store is nil
openSUSE Build Service is sponsored by