File 0025-LU-16321-osd-Allow-fiemap-on-kernel-buffers.patch of Package lustre_2_15

From afcb10f29c6fb78bce20585314ff2b470bafb1e1 Mon Sep 17 00:00:00 2001
From: Shaun Tancheff <shaun.tancheff@hpe.com>
Date: Tue, 22 Nov 2022 04:05:07 -0600
Subject: [PATCH 25/30] LU-16321 osd: Allow fiemap on kernel buffers

Linux commit v5.17-rc3-19-g967747bbc084
  uaccess: remove CONFIG_SET_FS

When KERNEL_DS gone lustre needs an alternative for fiemap to
copy extents to kernel space memory.

Direct in-kernel calls to inode->f_ops->fiemap() can utilize
an otherwise unused flag on fiemap_extent_info fi_flags
to indicate the fiemap extent buffer is allocated in kernel space.

Include ldiskfs patches for ldiskfs_fiemap() to
define EXT4_FIEMAP_FLAG_MEMCPY and utilize it.

HPE-bug-id: LUS-11337
Fixes: d0337cab8e ("LU-14195 osd: don't use set_fs() for ->fiemap() calls.")
Signed-off-by: Shaun Tancheff <shaun.tancheff@hpe.com>
Change-Id: I7a8edb481833fd1bdcf7b6cd6e08397c1754baee
---
 .../linux-5.10/ext4-fiemap-kernel-data.patch  | 312 ++++++++++++++++++
 .../series/ldiskfs-5.10.0-ml.series           |   1 +
 .../series/ldiskfs-5.11.0-40-ubuntu20.series  |   1 +
 .../series/ldiskfs-5.14.21-sles15sp4.series   |   1 +
 lustre/osd-ldiskfs/osd_io.c                   |  23 +-
 5 files changed, 337 insertions(+), 1 deletion(-)
 create mode 100644 ldiskfs/kernel_patches/patches/linux-5.10/ext4-fiemap-kernel-data.patch

diff --git a/ldiskfs/kernel_patches/patches/linux-5.10/ext4-fiemap-kernel-data.patch b/ldiskfs/kernel_patches/patches/linux-5.10/ext4-fiemap-kernel-data.patch
new file mode 100644
index 000000000000..4f7cbc9bdf3f
--- /dev/null
+++ b/ldiskfs/kernel_patches/patches/linux-5.10/ext4-fiemap-kernel-data.patch
@@ -0,0 +1,312 @@
+Subject: [PATCH] ext4-fiemap-kernel-data
+
+Pull in enough upstream fiemap handling to conditionally use
+memcpy instead of copy_to_user in fiemap_fill_next_extent.
+Common kernel functions prefixed with ext4_ or _ext4_
+
+---
+ fs/ext4/ext4.h    |   3 +
+ fs/ext4/extents.c | 265 +++++++++++++++++++++++++++++++++++++++++++++-
+ 2 files changed, 264 insertions(+), 4 deletions(-)
+
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index c931e3a..0bb54fe 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -766,6 +766,9 @@ enum {
+  */
+ #define EXT4_FIEMAP_EXTENT_HOLE		0x08000000
+ 
++/* Otherwise unused fi_flags ext4 use memcpy instead of copy_[to|from]_uiser */
++#define EXT4_FIEMAP_FLAG_MEMCPY		0x80000000
++
+ /* Max physical block we can address w/o extents */
+ #define EXT4_MAX_BLOCK_FILE_PHYS	0xFFFFFFFF
+ 
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 2e62f83..176d2b8 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -2138,6 +2138,264 @@ cleanup:
+ 	return err;
+ }
+ 
++#ifdef KERNEL_DS
++#define ext4_iomap_fiemap(i, f, s, l, ops) \
++	iomap_fiemap((i), (f), (s), (l), (ops))
++#else
++/*
++ * linux:
++ *   ext4_fiemap_fill_next_extent <--- fiemap_fill_next_extent
++ *   ext4_iomap_to_fiemap <----------- iomap_to_fiemap
++ *   ext4_iomap_fiemap_actor <-------- iomap_fiemap_actor
++ *   ext4_iomap_apply <----------------iomap_apply
++ *   _ext4_iomap_fiemap <------------- iomap_fiemap
++ */
++/**
++ * ext4_fiemap_fill_next_extent - Fiemap helper function
++ * @fieinfo:	Fiemap context passed into ->fiemap
++ * @logical:	Extent logical start offset, in bytes
++ * @phys:	Extent physical start offset, in bytes
++ * @len:	Extent length, in bytes
++ * @flags:	FIEMAP_EXTENT flags that describe this extent
++ *
++ * Called from file system ->fiemap callback. Will populate extent
++ * info as passed in via arguments and copy to user memory. On
++ * success, extent count on fieinfo is incremented.
++ *
++ * Returns 0 on success, -errno on error, 1 if this was the last
++ * extent that will fit in user array.
++ */
++#define SET_UNKNOWN_FLAGS	(FIEMAP_EXTENT_DELALLOC)
++#define SET_NO_UNMOUNTED_IO_FLAGS	(FIEMAP_EXTENT_DATA_ENCRYPTED)
++#define SET_NOT_ALIGNED_FLAGS	(FIEMAP_EXTENT_DATA_TAIL|FIEMAP_EXTENT_DATA_INLINE)
++static int ext4_fiemap_fill_next_extent(struct fiemap_extent_info *fieinfo,
++					u64 logical, u64 phys, u64 len,
++					u32 flags)
++{
++	struct fiemap_extent extent;
++	struct fiemap_extent __user *dest = fieinfo->fi_extents_start;
++
++	/* only count the extents */
++	if (fieinfo->fi_extents_max == 0) {
++		fieinfo->fi_extents_mapped++;
++		return (flags & FIEMAP_EXTENT_LAST) ? 1 : 0;
++	}
++
++	if (fieinfo->fi_extents_mapped >= fieinfo->fi_extents_max)
++		return 1;
++
++	if (flags & SET_UNKNOWN_FLAGS)
++		flags |= FIEMAP_EXTENT_UNKNOWN;
++	if (flags & SET_NO_UNMOUNTED_IO_FLAGS)
++		flags |= FIEMAP_EXTENT_ENCODED;
++	if (flags & SET_NOT_ALIGNED_FLAGS)
++		flags |= FIEMAP_EXTENT_NOT_ALIGNED;
++
++	memset(&extent, 0, sizeof(extent));
++	extent.fe_logical = logical;
++	extent.fe_physical = phys;
++	extent.fe_length = len;
++	extent.fe_flags = flags;
++
++	dest += fieinfo->fi_extents_mapped;
++	if (fieinfo->fi_flags & EXT4_FIEMAP_FLAG_MEMCPY)
++		memcpy((__force void *)dest, &extent, sizeof(extent));
++	else if (copy_to_user(dest, &extent, sizeof(extent)))
++		return -EFAULT;
++
++	fieinfo->fi_extents_mapped++;
++	if (fieinfo->fi_extents_mapped == fieinfo->fi_extents_max)
++		return 1;
++	return (flags & FIEMAP_EXTENT_LAST) ? 1 : 0;
++}
++
++static int ext4_iomap_to_fiemap(struct fiemap_extent_info *fi,
++				struct iomap *iomap, u32 flags)
++{
++	switch (iomap->type) {
++	case IOMAP_HOLE:
++		/* skip holes */
++		return 0;
++	case IOMAP_DELALLOC:
++		flags |= FIEMAP_EXTENT_DELALLOC | FIEMAP_EXTENT_UNKNOWN;
++		break;
++	case IOMAP_MAPPED:
++		break;
++	case IOMAP_UNWRITTEN:
++		flags |= FIEMAP_EXTENT_UNWRITTEN;
++		break;
++	case IOMAP_INLINE:
++		flags |= FIEMAP_EXTENT_DATA_INLINE;
++		break;
++	}
++
++	if (iomap->flags & IOMAP_F_MERGED)
++		flags |= FIEMAP_EXTENT_MERGED;
++	if (iomap->flags & IOMAP_F_SHARED)
++		flags |= FIEMAP_EXTENT_SHARED;
++
++	return ext4_fiemap_fill_next_extent(fi, iomap->offset,
++			iomap->addr != IOMAP_NULL_ADDR ? iomap->addr : 0,
++			iomap->length, flags);
++}
++
++struct fiemap_ctx {
++	struct fiemap_extent_info *fi;
++	struct iomap prev;
++};
++
++static loff_t
++ext4_iomap_fiemap_actor(struct inode *inode, loff_t pos, loff_t length,
++			void *data, struct iomap *iomap, struct iomap *srcmap)
++{
++	struct fiemap_ctx *ctx = data;
++	loff_t ret = length;
++
++	if (iomap->type == IOMAP_HOLE)
++		return length;
++
++	ret = ext4_iomap_to_fiemap(ctx->fi, &ctx->prev, 0);
++	ctx->prev = *iomap;
++	switch (ret) {
++	case 0:		/* success */
++		return length;
++	case 1:		/* extent array full */
++		return 0;
++	default:
++		return ret;
++	}
++}
++
++/*
++ * Execute a iomap write on a segment of the mapping that spans a
++ * contiguous range of pages that have identical block mapping state.
++ *
++ * This avoids the need to map pages individually, do individual allocations
++ * for each page and most importantly avoid the need for filesystem specific
++ * locking per page. Instead, all the operations are amortised over the entire
++ * range of pages. It is assumed that the filesystems will lock whatever
++ * resources they require in the iomap_begin call, and release them in the
++ * iomap_end call.
++ */
++static loff_t
++ext4_iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags,
++		const struct iomap_ops *ops, void *data, iomap_actor_t actor)
++{
++	struct iomap iomap = { .type = IOMAP_HOLE };
++	struct iomap srcmap = { .type = IOMAP_HOLE };
++	loff_t written = 0, ret;
++	u64 end;
++
++	/*
++	 * Need to map a range from start position for length bytes. This can
++	 * span multiple pages - it is only guaranteed to return a range of a
++	 * single type of pages (e.g. all into a hole, all mapped or all
++	 * unwritten). Failure at this point has nothing to undo.
++	 *
++	 * If allocation is required for this range, reserve the space now so
++	 * that the allocation is guaranteed to succeed later on. Once we copy
++	 * the data into the page cache pages, then we cannot fail otherwise we
++	 * expose transient stale data. If the reserve fails, we can safely
++	 * back out at this point as there is nothing to undo.
++	 */
++	ret = ops->iomap_begin(inode, pos, length, flags, &iomap, &srcmap);
++	if (ret)
++		return ret;
++	if (WARN_ON(iomap.offset > pos)) {
++		written = -EIO;
++		goto out;
++	}
++	if (WARN_ON(iomap.length == 0)) {
++		written = -EIO;
++		goto out;
++	}
++
++	/*
++	 * Cut down the length to the one actually provided by the filesystem,
++	 * as it might not be able to give us the whole size that we requested.
++	 */
++	end = iomap.offset + iomap.length;
++	if (srcmap.type != IOMAP_HOLE)
++		end = min(end, srcmap.offset + srcmap.length);
++	if (pos + length > end)
++		length = end - pos;
++
++	/*
++	 * Now that we have guaranteed that the space allocation will succeed,
++	 * we can do the copy-in page by page without having to worry about
++	 * failures exposing transient data.
++	 *
++	 * To support COW operations, we read in data for partially blocks from
++	 * the srcmap if the file system filled it in.  In that case we the
++	 * length needs to be limited to the earlier of the ends of the iomaps.
++	 * If the file system did not provide a srcmap we pass in the normal
++	 * iomap into the actors so that they don't need to have special
++	 * handling for the two cases.
++	 */
++	written = actor(inode, pos, length, data, &iomap,
++			srcmap.type != IOMAP_HOLE ? &srcmap : &iomap);
++
++out:
++	/*
++	 * Now the data has been copied, commit the range we've copied.  This
++	 * should not fail unless the filesystem has had a fatal error.
++	 */
++	if (ops->iomap_end) {
++		ret = ops->iomap_end(inode, pos, length,
++				     written > 0 ? written : 0,
++				     flags, &iomap);
++	}
++
++	return written ? written : ret;
++}
++
++static
++int _ext4_iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fi,
++		u64 start, u64 len, const struct iomap_ops *ops)
++{
++	struct fiemap_ctx ctx;
++	loff_t ret;
++	bool in_kernel = fi->fi_flags & EXT4_FIEMAP_FLAG_MEMCPY;
++
++	memset(&ctx, 0, sizeof(ctx));
++	ctx.fi = fi;
++	ctx.prev.type = IOMAP_HOLE;
++
++	fi->fi_flags &= ~EXT4_FIEMAP_FLAG_MEMCPY;
++	ret = fiemap_prep(inode, fi, start, &len, 0);
++	if (in_kernel)
++		fi->fi_flags |= EXT4_FIEMAP_FLAG_MEMCPY;
++	if (ret)
++		return ret;
++
++	while (len > 0) {
++		ret = ext4_iomap_apply(inode, start, len, IOMAP_REPORT, ops,
++				       &ctx, ext4_iomap_fiemap_actor);
++		/* inode with no (attribute) mapping will give ENOENT */
++		if (ret == -ENOENT)
++			break;
++		if (ret < 0)
++			return ret;
++		if (ret == 0)
++			break;
++
++		start += ret;
++		len -= ret;
++	}
++
++	if (ctx.prev.type != IOMAP_HOLE) {
++		ret = ext4_iomap_to_fiemap(fi, &ctx.prev, FIEMAP_EXTENT_LAST);
++		if (ret < 0)
++			return ret;
++	}
++
++	return 0;
++}
++
++#define ext4_iomap_fiemap(i, f, s, l, ops) \
++	_ext4_iomap_fiemap((i), (f), (s), (l), (ops))
++#endif /* KERNEL_DS */
++
+ static int ext4_fill_es_cache_info(struct inode *inode,
+ 				   ext4_lblk_t block, ext4_lblk_t num,
+ 				   struct fiemap_extent_info *fieinfo)
+@@ -4918,11 +5176,10 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
+ 
+ 	if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
+ 		fieinfo->fi_flags &= ~FIEMAP_FLAG_XATTR;
+-		return iomap_fiemap(inode, fieinfo, start, len,
+-				    &ext4_iomap_xattr_ops);
++		return ext4_iomap_fiemap(inode, fieinfo, start, len,
++					 &ext4_iomap_xattr_ops);
+ 	}
+-
+-	return iomap_fiemap(inode, fieinfo, start, len, &ext4_iomap_report_ops);
++	return ext4_iomap_fiemap(inode, fieinfo, start, len, &ext4_iomap_report_ops);
+ }
+ 
+ int ext4_get_es_cache(struct inode *inode, struct fiemap_extent_info *fieinfo,
+-- 
+2.34.1
+
diff --git a/ldiskfs/kernel_patches/series/ldiskfs-5.10.0-ml.series b/ldiskfs/kernel_patches/series/ldiskfs-5.10.0-ml.series
index 6e2f0ddae82d..fba982a3203d 100644
--- a/ldiskfs/kernel_patches/series/ldiskfs-5.10.0-ml.series
+++ b/ldiskfs/kernel_patches/series/ldiskfs-5.10.0-ml.series
@@ -30,3 +30,4 @@ rhel8/ext4-ialloc-uid-gid-and-pass-owner-down.patch
 base/ext4-projid-xattrs.patch
 linux-5.8/ext4-enc-flag.patch
 base/ext4-delayed-iput.patch
+linux-5.10/ext4-fiemap-kernel-data.patch
diff --git a/ldiskfs/kernel_patches/series/ldiskfs-5.11.0-40-ubuntu20.series b/ldiskfs/kernel_patches/series/ldiskfs-5.11.0-40-ubuntu20.series
index 4790c812e66d..36705f7a7464 100644
--- a/ldiskfs/kernel_patches/series/ldiskfs-5.11.0-40-ubuntu20.series
+++ b/ldiskfs/kernel_patches/series/ldiskfs-5.11.0-40-ubuntu20.series
@@ -30,3 +30,4 @@ rhel8/ext4-ialloc-uid-gid-and-pass-owner-down.patch
 base/ext4-projid-xattrs.patch
 linux-5.8/ext4-enc-flag.patch
 base/ext4-delayed-iput.patch
+linux-5.10/ext4-fiemap-kernel-data.patch
diff --git a/ldiskfs/kernel_patches/series/ldiskfs-5.14.21-sles15sp4.series b/ldiskfs/kernel_patches/series/ldiskfs-5.14.21-sles15sp4.series
index 5791d9ce8d48..d69ff72984ed 100644
--- a/ldiskfs/kernel_patches/series/ldiskfs-5.14.21-sles15sp4.series
+++ b/ldiskfs/kernel_patches/series/ldiskfs-5.14.21-sles15sp4.series
@@ -75,3 +75,4 @@ linux-5.14/ext4-ialloc-uid-gid-and-pass-owner-down.patch
 linux-5.14/ext4-projid-xattrs.patch
 base/ext4-delayed-iput.patch
 linux-5.14/ext4-xattr-disable-credits-check.patch
+linux-5.10/ext4-fiemap-kernel-data.patch
diff --git a/lustre/osd-ldiskfs/osd_io.c b/lustre/osd-ldiskfs/osd_io.c
index a3d0c8b04895..8612f57e6a17 100644
--- a/lustre/osd-ldiskfs/osd_io.c
+++ b/lustre/osd-ldiskfs/osd_io.c
@@ -1340,6 +1340,22 @@ struct osd_fextent {
 	unsigned int	mapped:1;
 };
 
+#ifdef KERNEL_DS
+#define DECLARE_MM_SEGMENT_T(name)		mm_segment_t name
+#define access_set_kernel(saved_fs, fei)				\
+do {									\
+	saved_fs = get_fs();						\
+	set_fs(KERNEL_DS);						\
+} while (0)
+#define access_unset_kernel(saved_fs, fei)		set_fs((saved_fs))
+#else
+#define DECLARE_MM_SEGMENT_T(name)
+#define access_set_kernel(saved_fs, fei)				\
+	(fei)->fi_flags |= LDISKFS_FIEMAP_FLAG_MEMCPY
+#define access_unset_kernel(saved_fs, fei) \
+	(fei)->fi_flags &= ~(LDISKFS_FIEMAP_FLAG_MEMCPY)
+#endif /* KERNEL_DS */
+
 static int osd_is_mapped(struct dt_object *dt, __u64 offset,
 			 struct osd_fextent *cached_extent)
 {
@@ -1349,6 +1365,7 @@ static int osd_is_mapped(struct dt_object *dt, __u64 offset,
 	struct fiemap_extent_info fei = { 0 };
 	struct fiemap_extent fe = { 0 };
 	int rc;
+	DECLARE_MM_SEGMENT_T(saved_fs);
 
 	if (block >= cached_extent->start && block < cached_extent->end)
 		return cached_extent->mapped;
@@ -1362,8 +1379,9 @@ static int osd_is_mapped(struct dt_object *dt, __u64 offset,
 
 	fei.fi_extents_max = 1;
 	fei.fi_extents_start = &fe;
-
+	access_set_kernel(saved_fs, &fei);
 	rc = inode->i_op->fiemap(inode, &fei, offset, FIEMAP_MAX_OFFSET-offset);
+	access_unset_kernel(saved_fs, &fei);
 	if (rc != 0)
 		return 0;
 
@@ -2617,6 +2635,7 @@ static int osd_fiemap_get(const struct lu_env *env, struct dt_object *dt,
 	struct inode *inode = osd_dt_obj(dt)->oo_inode;
 	u64 len;
 	int rc;
+	DECLARE_MM_SEGMENT_T(saved_fs);
 
 	LASSERT(inode);
 	if (inode->i_op->fiemap == NULL)
@@ -2636,7 +2655,9 @@ static int osd_fiemap_get(const struct lu_env *env, struct dt_object *dt,
 	if (fieinfo.fi_flags & FIEMAP_FLAG_SYNC)
 		filemap_write_and_wait(inode->i_mapping);
 
+	access_set_kernel(saved_fs, &fieinfo);
 	rc = inode->i_op->fiemap(inode, &fieinfo, fm->fm_start, len);
+	access_unset_kernel(saved_fs, &fieinfo);
 	fm->fm_flags = fieinfo.fi_flags;
 	fm->fm_mapped_extents = fieinfo.fi_extents_mapped;
 
-- 
2.38.1

openSUSE Build Service is sponsored by