File 0018-LU-13783-libcfs-switch-from-mmap_sem-to-mmap_lock.patch of Package lustre_2_12
From 2d1ce0f5707b85d5b348eec99a28d128bf7fce31 Mon Sep 17 00:00:00 2001
From: Mr NeilBrown <neilb@suse.de>
Date: Fri, 16 Oct 2020 17:18:29 +1100
Subject: [PATCH 18/35] LU-13783 libcfs: switch from ->mmap_sem to mmap_lock()
In Linux 5.8, ->mmap_sem is gone and the preferred interface
for locking the mmap is to suite of mmap*lock() functions.
So provide those functions when not available, and use them
as needed in Lustre.
Signed-off-by: Mr NeilBrown <neilb@suse.de>
Change-Id: I4ce3959f9e93eae10a7b7db03e2b0a1525723138
Reviewed-on: https://review.whamcloud.com/40288
Tested-by: jenkins <devops@whamcloud.com>
Tested-by: Maloo <maloo@whamcloud.com>
Reviewed-by: James Simmons <jsimmons@infradead.org>
Reviewed-by: Andreas Dilger <adilger@whamcloud.com>
---
libcfs/autoconf/lustre-libcfs.m4 | 20 +++++++++++
libcfs/include/libcfs/linux/linux-mem.h | 32 +++++++++++++++++
libcfs/libcfs/linux/linux-curproc.c | 8 ++---
lustre/llite/llite_mmap.c | 27 +++++++-------
lustre/llite/rw26.c | 4 +--
lustre/llite/vvp_io.c | 48 ++++++++++++-------------
6 files changed, 96 insertions(+), 43 deletions(-)
diff --git a/libcfs/autoconf/lustre-libcfs.m4 b/libcfs/autoconf/lustre-libcfs.m4
index c5d31ac92e39..24b1fa6c9d3c 100644
--- a/libcfs/autoconf/lustre-libcfs.m4
+++ b/libcfs/autoconf/lustre-libcfs.m4
@@ -1278,6 +1278,24 @@ nr_unstable_nfs_exists, [
EXTRA_KCFLAGS="$tmp_flags"
]) # LIBCFS_HAVE_NR_UNSTABLE_NFS
+#
+# LIBCFS_HAVE_MMAP_LOCK
+#
+# kernel v5.8-rc1~83^2~24
+# mmap locking API: rename mmap_sem to mmap_lock
+#
+AC_DEFUN([LIBCFS_HAVE_MMAP_LOCK], [
+LB_CHECK_COMPILE([if mmap_lock API is available],
+mmap_write_lock, [
+ #include <linux/mm.h>
+],[
+ mmap_write_lock(NULL);
+],[
+ AC_DEFINE(HAVE_MMAP_LOCK, 1,
+ [mmap_lock API is available.])
+])
+]) # LIBCFS_HAVE_MMAP_LOCK
+
#
# LIBCFS_PARAM_SET_UINT_MINMAX
#
@@ -1411,6 +1429,8 @@ LIBCFS_GET_REQUEST_KEY_AUTH
LIBCFS_LOOKUP_USER_KEY
LIBCFS_CACHE_DETAIL_WRITERS
LIBCFS_HAVE_NR_UNSTABLE_NFS
+# 5.8
+LIBCFS_HAVE_MMAP_LOCK
# 5.15
LIBCFS_PARAM_SET_UINT_MINMAX
]) # LIBCFS_PROG_LINUX
diff --git a/libcfs/include/libcfs/linux/linux-mem.h b/libcfs/include/libcfs/linux/linux-mem.h
index 6aca65b38a7f..1ae22859ce67 100644
--- a/libcfs/include/libcfs/linux/linux-mem.h
+++ b/libcfs/include/libcfs/linux/linux-mem.h
@@ -121,4 +121,36 @@ void remove_shrinker(struct shrinker *shrinker)
kfree(shrinker);
}
+#ifndef HAVE_MMAP_LOCK
+static inline void mmap_write_lock(struct mm_struct *mm)
+{
+ down_write(&mm->mmap_sem);
+}
+
+static inline bool mmap_write_trylock(struct mm_struct *mm)
+{
+ return down_write_trylock(&mm->mmap_sem) != 0;
+}
+
+static inline void mmap_write_unlock(struct mm_struct *mm)
+{
+ up_write(&mm->mmap_sem);
+}
+
+static inline void mmap_read_lock(struct mm_struct *mm)
+{
+ down_read(&mm->mmap_sem);
+}
+
+static inline bool mmap_read_trylock(struct mm_struct *mm)
+{
+ return down_read_trylock(&mm->mmap_sem) != 0;
+}
+
+static inline void mmap_read_unlock(struct mm_struct *mm)
+{
+ up_read(&mm->mmap_sem);
+}
+#endif
+
#endif /* __LINUX_CFS_MEM_H__ */
diff --git a/libcfs/libcfs/linux/linux-curproc.c b/libcfs/libcfs/linux/linux-curproc.c
index 8e843a204f39..799c40ea638e 100644
--- a/libcfs/libcfs/linux/linux-curproc.c
+++ b/libcfs/libcfs/linux/linux-curproc.c
@@ -141,11 +141,11 @@ static int cfs_access_process_vm(struct task_struct *tsk,
struct page *page;
void *old_buf = buf;
- /* Avoid deadlocks on mmap_sem if called from sys_mmap_pgoff(),
- * which is already holding mmap_sem for writes. If some other
+ /* Avoid deadlocks on mmap_lock if called from sys_mmap_pgoff(),
+ * which is already holding mmap_lock for writes. If some other
* thread gets the write lock in the meantime, this thread will
* block, but at least it won't deadlock on itself. LU-1735 */
- if (down_read_trylock(&mm->mmap_sem) == 0)
+ if (!mmap_read_trylock(mm))
return -EDEADLK;
/* ignore errors, just check how much was successfully transferred */
@@ -183,7 +183,7 @@ static int cfs_access_process_vm(struct task_struct *tsk,
buf += bytes;
addr += bytes;
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
return buf - old_buf;
}
diff --git a/lustre/llite/llite_mmap.c b/lustre/llite/llite_mmap.c
index fee90067a27e..9be9bd690ee6 100644
--- a/lustre/llite/llite_mmap.c
+++ b/lustre/llite/llite_mmap.c
@@ -54,21 +54,22 @@ void policy_from_vma(union ldlm_policy_data *policy, struct vm_area_struct *vma,
struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr,
size_t count)
{
- struct vm_area_struct *vma, *ret = NULL;
- ENTRY;
+ struct vm_area_struct *vma, *ret = NULL;
+ ENTRY;
- /* mmap_sem must have been held by caller. */
- LASSERT(!down_write_trylock(&mm->mmap_sem));
+ /* mmap_lock must have been held by caller. */
+ LASSERT(!mmap_write_trylock(mm));
- for(vma = find_vma(mm, addr);
- vma != NULL && vma->vm_start < (addr + count); vma = vma->vm_next) {
- if (vma->vm_ops && vma->vm_ops == &ll_file_vm_ops &&
- vma->vm_flags & VM_SHARED) {
- ret = vma;
- break;
- }
- }
- RETURN(ret);
+ for (vma = find_vma(mm, addr);
+ vma != NULL && vma->vm_start < (addr + count);
+ vma = vma->vm_next) {
+ if (vma->vm_ops && vma->vm_ops == &ll_file_vm_ops &&
+ vma->vm_flags & VM_SHARED) {
+ ret = vma;
+ break;
+ }
+ }
+ RETURN(ret);
}
/**
diff --git a/lustre/llite/rw26.c b/lustre/llite/rw26.c
index c5a4d4d8172b..9a1f0b6021ba 100644
--- a/lustre/llite/rw26.c
+++ b/lustre/llite/rw26.c
@@ -453,11 +453,11 @@ static inline int ll_get_user_pages(int rw, unsigned long user_addr,
OBD_ALLOC_LARGE(*pages, *max_pages * sizeof(**pages));
if (*pages) {
- down_read(¤t->mm->mmap_sem);
+ mmap_read_lock(current->mm);
result = get_user_pages(current, current->mm, user_addr,
*max_pages, (rw == READ), 0, *pages,
NULL);
- up_read(¤t->mm->mmap_sem);
+ mmap_read_unlock(current->mm);
if (unlikely(result <= 0))
OBD_FREE_LARGE(*pages, *max_pages * sizeof(**pages));
}
diff --git a/lustre/llite/vvp_io.c b/lustre/llite/vvp_io.c
index 8bcc2f469b16..6d8070c5b8bf 100644
--- a/lustre/llite/vvp_io.c
+++ b/lustre/llite/vvp_io.c
@@ -468,17 +468,17 @@ static int vvp_mmap_locks(const struct lu_env *env,
addr = (unsigned long)iov.iov_base;
count = iov.iov_len;
- if (count == 0)
- continue;
+ if (count == 0)
+ continue;
count += addr & ~PAGE_MASK;
addr &= PAGE_MASK;
- down_read(&mm->mmap_sem);
- while((vma = our_vma(mm, addr, count)) != NULL) {
+ mmap_read_lock(mm);
+ while ((vma = our_vma(mm, addr, count)) != NULL) {
struct dentry *de = file_dentry(vma->vm_file);
struct inode *inode = de->d_inode;
- int flags = CEF_MUST;
+ int flags = CEF_MUST;
if (ll_file_nolock(vma->vm_file)) {
/*
@@ -488,24 +488,24 @@ static int vvp_mmap_locks(const struct lu_env *env,
break;
}
- /*
- * XXX: Required lock mode can be weakened: CIT_WRITE
- * io only ever reads user level buffer, and CIT_READ
- * only writes on it.
- */
- policy_from_vma(&policy, vma, addr, count);
- descr->cld_mode = vvp_mode_from_vma(vma);
- descr->cld_obj = ll_i2info(inode)->lli_clob;
- descr->cld_start = cl_index(descr->cld_obj,
- policy.l_extent.start);
- descr->cld_end = cl_index(descr->cld_obj,
- policy.l_extent.end);
- descr->cld_enq_flags = flags;
- result = cl_io_lock_alloc_add(env, io, descr);
-
- CDEBUG(D_VFSTRACE, "lock: %d: [%lu, %lu]\n",
- descr->cld_mode, descr->cld_start,
- descr->cld_end);
+ /*
+ * XXX: Required lock mode can be weakened: CIT_WRITE
+ * io only ever reads user level buffer, and CIT_READ
+ * only writes on it.
+ */
+ policy_from_vma(&policy, vma, addr, count);
+ descr->cld_mode = vvp_mode_from_vma(vma);
+ descr->cld_obj = ll_i2info(inode)->lli_clob;
+ descr->cld_start = cl_index(descr->cld_obj,
+ policy.l_extent.start);
+ descr->cld_end = cl_index(descr->cld_obj,
+ policy.l_extent.end);
+ descr->cld_enq_flags = flags;
+ result = cl_io_lock_alloc_add(env, io, descr);
+
+ CDEBUG(D_VFSTRACE, "lock: %d: [%lu, %lu]\n",
+ descr->cld_mode, descr->cld_start,
+ descr->cld_end);
if (result < 0)
break;
@@ -516,7 +516,7 @@ static int vvp_mmap_locks(const struct lu_env *env,
count -= vma->vm_end - addr;
addr = vma->vm_end;
}
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
if (result < 0)
break;
}
--
2.41.0