File lincdadrv_whitespace.diff of Package degirum

diff --git a/.gitattributes b/.gitattributes
new file mode 100644
index 0000000..a9f06af
--- /dev/null
+++ b/.gitattributes
@@ -0,0 +1,6 @@
+* text=auto
+
+*.c text
+*.h text
+Makefile text
+*.sh text
diff --git a/src/cdadrv.c b/src/cdadrv.c
index 7e31633..e4c7b58 100644
--- a/src/cdadrv.c
+++ b/src/cdadrv.c
@@ -1,405 +1,410 @@
-// SPDX-License-Identifier: GPL-2.0
-// Copyright(c) 2020 DeGirum Corp., Egor Pomozov.
-//
-// CDA linux driver mem blocks/mem maps and interrupt request handler
-//
-// This program is free software; you can redistribute it and/or modify it
-// under the terms and conditions of the GNU General Public License,
-// version 2, as published by the Free Software Foundation.
-//
-#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/fs.h>
-
-#include "cdadrv.h"
-#include "cdaioctl.h"
-
-MODULE_AUTHOR("DeGirum Corp., Egor Pomozov");
-MODULE_DESCRIPTION("CDA linux driver to access pci devices");
-MODULE_LICENSE("GPL");
-MODULE_VERSION("0.5.0.3");
-// The version has to be in the format n.n.n.n, where each n is a single digit
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(4,9,0)
-#error Too old kernel
-#endif
-
-static dev_t cdadev_first;
-static const char cda_name[] = "cda";
-static int req_pci_did = 0;
-static int req_pci_vid = 0;
-static int test_probe = 0;
-
-#define CDA_DEV_MINOR_MAX 32
-static DEFINE_SPINLOCK(cdadevlist_sl);
-static DEFINE_IDA(cdaminor_ida);
-static LIST_HEAD(cdadevs);
-
-// Module parameters
-module_param_named(did, req_pci_did, int, 0644);
-MODULE_PARM_DESC(did, "Set required PCI device ID");
-module_param_named(vid, req_pci_vid, int, 0644);
-MODULE_PARM_DESC(vid, "Set required PCI vendor ID");
-module_param_named(test_probe, test_probe, int, 0644);
-MODULE_PARM_DESC(test_probe, "Check permissions to load driver");
-
-static void cdadev_release(struct device *kdev);
-static void cda_pci_remove(struct pci_dev *pcidev);
-static int cda_pci_probe(struct pci_dev *pcidev,
-			       const struct pci_device_id *id);
-
-static int cda_cdev_open(struct inode *ino, struct file *file);
-static int cda_cdev_release(struct inode *ino, struct file *file);
-static long cda_cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
-
-static struct pci_device_id cda_pci_ids[] = {
-	{ PCI_DEVICE(0x1f0d, 0x0100) },
-	{ PCI_DEVICE(0x1f0d, 0x8101) },
-	{ PCI_DEVICE(0x1f0d, 0x0101) },
-	{ PCI_DEVICE(0x10ee, 0x8011) },
-	{ PCI_DEVICE(0, 0) }, 
-	{ PCI_DEVICE(0, 0) },
-};
-
-static struct pci_driver cda_pci = {
-    .name = cda_name,
-    .probe = cda_pci_probe,
-    .remove = cda_pci_remove,
-    .id_table = cda_pci_ids,
-};
-
-static struct file_operations cda_fileops = {
-	.owner = THIS_MODULE,
-	.open = cda_cdev_open,
-	.release = cda_cdev_release,
-    .unlocked_ioctl = cda_cdev_ioctl,
-
-};
-
-static struct class cda_class = {
-	.name = cda_name,
-	.dev_release = cdadev_release,
-};
-/*
-static inline bool cda_kernel_is_locked_down(void)
-{
-#ifdef CONFIG_LOCK_DOWN_KERNEL
-#ifdef CONFIG_LOCK_DOWN_IN_EFI_SECURE_BOOT / * fedora * /
-	return kernel_is_locked_down(NULL);
-#elif CONFIG_EFI_SECURE_BOOT_LOCK_DOWN / * ubuntu * /
-	return kernel_is_locked_down();
-#else
-	return false;
-#endif
-#else
-	return false;
-#endif
-}
-*/
-static void cdadev_free(struct cda_dev *cdadev)
-{
-	ida_simple_remove(&cdaminor_ida, cdadev->minor);
-	device_del(&cdadev->dev);
-	put_device(&cdadev->dev);
-}
-
-static int cdadev_init(struct cda_dev *cdadev)
-{
-    // Create and initialize device structures
-	int ret = -ENOMEM;
-	struct device *dev = &cdadev->dev;
-	device_initialize(dev);
-
-	dev->class = &cda_class;
-	dev->parent = &cdadev->pcidev->dev;
-	
-	cdadev->dummy_blk = kzalloc(sizeof(*cdadev->dummy_blk), in_atomic() ? GFP_ATOMIC : GFP_KERNEL);
-	if (!cdadev->dummy_blk) {
-		dev_err(&cdadev->pcidev->dev, "Can't alloc dummy blk\n");
-		goto alloc_dummy;
-	}
-	idr_init(&cdadev->mblk_idr);
-	ret = ida_simple_get(&cdaminor_ida, 0, CDA_DEV_MINOR_MAX, GFP_KERNEL);
-	if( ret < 0 )
-		goto err_minor_get;
-
-	cdadev->minor = ret;
-	dev->devt = MKDEV(MAJOR(cdadev_first), cdadev->minor);
-	ret = dev_set_name(dev, "cda%02d", cdadev->minor);
-	if (ret)
-		goto err_set_name;
-
-	ret = device_add(dev);
-	if (ret) {
-		dev_err(&cdadev->pcidev->dev, "Unable to create device. Error 0x%x\n", ret);
-		goto err_device_add;
-	}
-
-	INIT_LIST_HEAD(&cdadev->mem_maps);
-	INIT_LIST_HEAD(&cdadev->mem_blocks);
-	spin_lock_init(&cdadev->mblk_sl);
-
-	mutex_init(&cdadev->ilock);
-	cdadev->ints = NULL;
-
-	return 0;
-err_device_add:
-err_set_name:
-	ida_simple_remove(&cdaminor_ida, cdadev->minor);
-err_minor_get:
-alloc_dummy:
-	put_device(dev);
-	return ret;
-}
-
-static int cda_pci_init(struct pci_dev *pcidev)
-{
-    // PCI initialization
-	int ret;
-	ret = pci_enable_device_mem(pcidev);
-	if( ret ) {
-		dev_err(&pcidev->dev, "Cannot enable PCI device mem\n");
-		goto err_en_devmem;
-	}
-
-	if( dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(64)) &&
-		(ret = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(32))) ) {
-		dev_err(&pcidev->dev, "Set DMA mask 32/64 failed: 0x%x\n", ret);
-		goto err_dma_set_mask;
-	}
-
-	ret = pci_request_regions(pcidev, cda_name);
-	if( ret ) {
-		dev_err(&pcidev->dev, "Fail request regions: 0x%x\n", ret);
-		goto err_req_regions;
-	}
-
-	pci_set_master(pcidev);
-	return 0;
-err_req_regions:
-err_dma_set_mask:
-	pci_disable_device(pcidev);
-err_en_devmem:
-	return ret;
-}
-
-static int cda_cdev_init(struct cda_dev *cdadev)
-{
-	int ret;
-	struct cdev *cdev = &cdadev->cdev;
-
-	cdev_init(cdev, &cda_fileops);
-	cdev->owner = THIS_MODULE;
-	kobject_set_name(&cdev->kobj, "%s%d", cda_name, cdadev->minor);
-	ret = cdev_add(cdev, MKDEV(MAJOR(cdadev_first), cdadev->minor), CDA_DEV_MINOR_MAX);
-	if (ret)
-		return ret;
-    return 0;
-}
-
-static int cda_pci_probe(struct pci_dev *pcidev, 
-                        const struct pci_device_id *id)
-{
-	int ret;
-	struct cda_dev *cdadev = kzalloc(sizeof(*cdadev), in_atomic() ? GFP_ATOMIC : GFP_KERNEL);
-	if (!cdadev) {
-		return -ENOMEM;
-	}
-
-	cdadev->pcidev = pcidev;
-	ret = cdadev_init(cdadev);
-	if( ret ) 
-		goto err_cdadev_init;
-
-	ret = cda_pci_init(pcidev);
-	if( ret ) 
-		goto err_pci_init;
-
-	ret = cda_mems_create(cdadev);
-	if( ret )
-		goto err_sysfsmem;
-
-	ret = cda_open_bars(cdadev);
-	if( ret )
-		goto err_check_bar;
-
-	ret = cda_cdev_init(cdadev);
-	if( ret )
-		goto err_cdev_init;
-
-	spin_lock(&cdadevlist_sl);
-	list_add(&cdadev->devices, &cdadevs);
-	spin_unlock(&cdadevlist_sl);
-
-	pci_set_drvdata(pcidev, cdadev);
-	return 0;
-err_cdev_init:
-	cda_release_bars(cdadev);
-err_check_bar:
-	cda_mems_release(cdadev);
-err_sysfsmem:
-	pci_release_regions(pcidev);
-	pci_disable_device(pcidev);
-err_pci_init:
-	cdadev_free(cdadev);
-err_cdadev_init:
-	return ret;
-}
-
-static void cda_pci_remove(struct pci_dev *pcidev)
-{
-	struct cda_dev *cdadev = pci_get_drvdata(pcidev);
-
-	if (!cdadev)
-		return;
-
-	spin_lock(&cdadevlist_sl);
-	list_del(&cdadev->devices);
-	spin_unlock(&cdadevlist_sl);
-
-	cdev_del(&cdadev->cdev);
-	cda_release_bars(cdadev);
-
-	cda_mems_release(cdadev);
-
-	cda_free_irqs(cdadev, NULL);
-    cda_unmmap_dev_mem(cdadev, NULL);
-    cda_free_dev_mem(cdadev, NULL);
-
-	pci_release_regions(pcidev);
-	pci_disable_device(pcidev);
-
-	cdadev_free(cdadev);
-}
-
-static int cda_cdev_open(struct inode *ino, struct file *file)
-{
-	int ret;
-	struct cda_dev *cdadev = 
-		container_of(ino->i_cdev,
-		struct cda_dev,
-		cdev);
-	if (!cdadev) {
-		ret = -ENODEV;
-		goto out;
-	}
-	get_device(&cdadev->dev);
-	file->private_data = cdadev;
-	return nonseekable_open(ino, file);
-out:
-	return ret;
-}
-
-static int cda_cdev_release(struct inode *ino, struct file *file)
-{
-	struct cda_dev *cdadev = file->private_data;
-	if (!cdadev)
-		return -ENODEV;
-
-	cda_cancel_req(cdadev, (void *)file);
-	cda_free_irqs(cdadev, (void *)file);
-    cda_unmmap_dev_mem(cdadev, (void *)file);
-    cda_free_dev_mem(cdadev, (void *)file);
-    cda_sem_rel_by_owner(cdadev, (void *)file);
-
-	put_device(&cdadev->dev);
-	return 0;
-}
-
-static long cda_cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
-	struct cda_dev *cdadev = file->private_data;
-	switch (cmd) {
-
-	case CDA_ALLOC_MEM:
-		return cda_alloc_mem(cdadev, (void *)file, (void __user *)arg);
-
-	case CDA_FREE_MEM:
-		return cda_free_mem_by_idx(cdadev, (void *)file, (void __user *)arg);
-
-	case CDA_MAP_MEM:
-		return cda_map_mem(cdadev, (void *)file, (void __user *)arg);
-
-	case CDA_UNMAP_MEM:
-		return cda_unmap_mem_by_idx(cdadev, (void *)file, (void __user *)arg);
-
-	case CDA_INIT_INT:
-		return cda_init_interrupts(cdadev, (void *)file, (void __user *)arg);
-
-	case CDA_FREE_INT:
-		return cda_free_irqs(cdadev, (void *)file);
-
-	case CDA_REQ_INT: 
-		return cda_req_int(cdadev, (void *)file, (void __user *) arg);
-
-	case CDA_INT_CANCEL:
-		return cda_cancel_req(cdadev, (void *)file);
-
-	case CDA_SEM_AQ: 
-		return cda_sem_aq(cdadev, (void *)file, (void __user *) arg);
-
-	case CDA_SEM_REL:
-		return cda_sem_rel(cdadev, (void *)file, (void __user *) arg);
-
-	default:
-		return -ENOTTY;
-	}
-}
-
-static void cdadev_release(struct device *dev)
-{
-	struct cda_dev *cdadev = container_of(dev, struct cda_dev, dev);
-	kfree(cdadev);
-}
-
-static int __init cdadrv_init(void)
-{
-    int ret;
-	size_t pci_id_table_size = ARRAY_SIZE(cda_pci_ids);
-	if( test_probe ) {
-		printk("Test run. Nothing initialized\n");
-		return 0;
-	}
-
-	ret = alloc_chrdev_region(&cdadev_first, 0, CDA_DEV_MINOR_MAX, cda_name);
-	if( ret )
-		goto err_alloc_cdev_reg;
-
-	ret = class_register(&cda_class);
-	if( ret )
-		goto err_cls_reg;
-
-	if( (req_pci_did || req_pci_vid) && pci_id_table_size >= 2 ) {
-		// Last table element is 0,0
-		// Update pre-last item
-		cda_pci_ids[pci_id_table_size-2].vendor = req_pci_vid;
-		cda_pci_ids[pci_id_table_size-2].device = req_pci_did;
-	}
-	ret = pci_register_driver(&cda_pci);
-	if( ret )
-        goto err_pci_reg_drv;
-
-	return 0;
-
-err_pci_reg_drv:
-	class_unregister(&cda_class);
-err_cls_reg:
-	unregister_chrdev_region(cdadev_first, CDA_DEV_MINOR_MAX);
-err_alloc_cdev_reg:
-	return ret;
-}
-
-static void __exit dcadrv_exit(void)
-{	
-	if( test_probe ) { 
-		printk("Stop test run. Nothing initialized\n");
-		return;
-	}
-    pci_unregister_driver(&cda_pci);
-	class_unregister(&cda_class);
-	unregister_chrdev_region(cdadev_first, CDA_DEV_MINOR_MAX);
-}
-
-module_init(cdadrv_init);
-module_exit(dcadrv_exit);
+// SPDX-License-Identifier: GPL-2.0
+// Copyright(c) 2020 DeGirum Corp., Egor Pomozov.
+//
+// CDA linux driver mem blocks/mem maps and interrupt request handler
+//
+// This program is free software; you can redistribute it and/or modify it
+// under the terms and conditions of the GNU General Public License,
+// version 2, as published by the Free Software Foundation.
+//
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+
+#include "cdadrv.h"
+#include "cdaioctl.h"
+
+MODULE_AUTHOR("DeGirum Corp., Egor Pomozov");
+MODULE_DESCRIPTION("CDA linux driver to access pci devices");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("0.5.0.3");
+// The version has to be in the format n.n.n.n, where each n is a single digit
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)
+#error Too old kernel
+#endif
+
+static dev_t cdadev_first;
+static const char cda_name[] = "cda";
+static int req_pci_did;
+static int req_pci_vid;
+static int test_probe;
+
+#define CDA_DEV_MINOR_MAX 32
+static DEFINE_SPINLOCK(cdadevlist_sl);
+static DEFINE_IDA(cdaminor_ida);
+static LIST_HEAD(cdadevs);
+
+// Module parameters
+module_param_named(did, req_pci_did, int, 0644);
+MODULE_PARM_DESC(did, "Set required PCI device ID");
+module_param_named(vid, req_pci_vid, int, 0644);
+MODULE_PARM_DESC(vid, "Set required PCI vendor ID");
+module_param_named(test_probe, test_probe, int, 0644);
+MODULE_PARM_DESC(test_probe, "Check permissions to load driver");
+
+static void cdadev_release(struct device *kdev);
+static void cda_pci_remove(struct pci_dev *pcidev);
+static int cda_pci_probe(struct pci_dev *pcidev,
+			 const struct pci_device_id *id);
+
+static int cda_cdev_open(struct inode *ino, struct file *file);
+static int cda_cdev_release(struct inode *ino, struct file *file);
+static long cda_cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+
+static struct pci_device_id cda_pci_ids[] = {
+	{ PCI_DEVICE(0x1f0d, 0x0100) },
+	{ PCI_DEVICE(0x1f0d, 0x8101) },
+	{ PCI_DEVICE(0x1f0d, 0x0101) },
+	{ PCI_DEVICE(0x10ee, 0x8011) },
+	{ PCI_DEVICE(0, 0) },
+	{ PCI_DEVICE(0, 0) },
+};
+
+static struct pci_driver cda_pci = {
+	.name = cda_name,
+	.probe = cda_pci_probe,
+	.remove = cda_pci_remove,
+	.id_table = cda_pci_ids,
+};
+
+static const struct file_operations cda_fileops = {
+	.owner = THIS_MODULE,
+	.open = cda_cdev_open,
+	.release = cda_cdev_release,
+	.unlocked_ioctl = cda_cdev_ioctl,
+};
+
+static struct class cda_class = {
+	.name = cda_name,
+	.dev_release = cdadev_release,
+};
+/*
+static inline bool cda_kernel_is_locked_down(void)
+{
+#ifdef CONFIG_LOCK_DOWN_KERNEL
+#ifdef CONFIG_LOCK_DOWN_IN_EFI_SECURE_BOOT / * fedora * /
+	return kernel_is_locked_down(NULL);
+#elif CONFIG_EFI_SECURE_BOOT_LOCK_DOWN / * ubuntu * /
+	return kernel_is_locked_down();
+#else
+	return false;
+#endif
+#else
+	return false;
+#endif
+}
+*/
+static void cdadev_free(struct cda_dev *cdadev)
+{
+	ida_simple_remove(&cdaminor_ida, cdadev->minor);
+	device_del(&cdadev->dev);
+	put_device(&cdadev->dev);
+}
+
+static int cdadev_init(struct cda_dev *cdadev)
+{
+	// Create and initialize device structures
+	int ret = -ENOMEM;
+	struct device *dev = &cdadev->dev;
+
+	device_initialize(dev);
+
+	dev->class = &cda_class;
+	dev->parent = &cdadev->pcidev->dev;
+
+	cdadev->dummy_blk = kzalloc(sizeof(*cdadev->dummy_blk), GFP_KERNEL);
+	if (!cdadev->dummy_blk)
+		goto alloc_dummy;
+	idr_init(&cdadev->mblk_idr);
+	ret = ida_simple_get(&cdaminor_ida, 0, CDA_DEV_MINOR_MAX, GFP_KERNEL);
+	if (ret < 0)
+		goto err_minor_get;
+
+	cdadev->minor = ret;
+	dev->devt = MKDEV(MAJOR(cdadev_first), cdadev->minor);
+	ret = dev_set_name(dev, "cda%02d", cdadev->minor);
+	if (ret)
+		goto err_set_name;
+
+	ret = device_add(dev);
+	if (ret) {
+		dev_err(&cdadev->pcidev->dev, "Unable to create device. Error 0x%x\n", ret);
+		goto err_device_add;
+	}
+
+	INIT_LIST_HEAD(&cdadev->mem_maps);
+	INIT_LIST_HEAD(&cdadev->mem_blocks);
+	spin_lock_init(&cdadev->mblk_sl);
+
+	mutex_init(&cdadev->ilock);
+	cdadev->ints = NULL;
+
+	return 0;
+err_device_add:
+err_set_name:
+	ida_simple_remove(&cdaminor_ida, cdadev->minor);
+err_minor_get:
+alloc_dummy:
+	put_device(dev);
+	return ret;
+}
+
+static int cda_pci_init(struct pci_dev *pcidev)
+{
+	// PCI initialization
+	int ret;
+
+	ret = pci_enable_device_mem(pcidev);
+	if (ret) {
+		dev_err(&pcidev->dev, "Cannot enable PCI device mem\n");
+		goto err_en_devmem;
+	}
+
+	ret = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(64));
+	if (ret) {
+		ret = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(32));
+		if (ret) {
+			dev_err(&pcidev->dev, "Set DMA mask 32/64 failed: 0x%x\n", ret);
+			goto err_dma_set_mask;
+		}
+	}
+
+	ret = pci_request_regions(pcidev, cda_name);
+	if (ret) {
+		dev_err(&pcidev->dev, "Fail request regions: 0x%x\n", ret);
+		goto err_req_regions;
+	}
+
+	pci_set_master(pcidev);
+	return 0;
+err_req_regions:
+err_dma_set_mask:
+	pci_disable_device(pcidev);
+err_en_devmem:
+	return ret;
+}
+
+static int cda_cdev_init(struct cda_dev *cdadev)
+{
+	int ret;
+	struct cdev *cdev = &cdadev->cdev;
+
+	cdev_init(cdev, &cda_fileops);
+	cdev->owner = THIS_MODULE;
+	kobject_set_name(&cdev->kobj, "%s%d", cda_name, cdadev->minor);
+	ret = cdev_add(cdev, MKDEV(MAJOR(cdadev_first), cdadev->minor), CDA_DEV_MINOR_MAX);
+	if (ret)
+		return ret;
+	return 0;
+}
+
+static int cda_pci_probe(struct pci_dev *pcidev,
+			 const struct pci_device_id *id)
+{
+	int ret;
+	struct cda_dev *cdadev = kzalloc(sizeof(*cdadev), GFP_KERNEL);
+
+	if (!cdadev)
+		return -ENOMEM;
+
+	cdadev->pcidev = pcidev;
+	ret = cdadev_init(cdadev);
+	if (ret)
+		goto err_cdadev_init;
+
+	ret = cda_pci_init(pcidev);
+	if (ret)
+		goto err_pci_init;
+
+	ret = cda_mems_create(cdadev);
+	if (ret)
+		goto err_sysfsmem;
+
+	ret = cda_open_bars(cdadev);
+	if (ret)
+		goto err_check_bar;
+
+	ret = cda_cdev_init(cdadev);
+	if (ret)
+		goto err_cdev_init;
+
+	spin_lock(&cdadevlist_sl);
+	list_add(&cdadev->devices, &cdadevs);
+	spin_unlock(&cdadevlist_sl);
+
+	pci_set_drvdata(pcidev, cdadev);
+	return 0;
+err_cdev_init:
+	cda_release_bars(cdadev);
+err_check_bar:
+	cda_mems_release(cdadev);
+err_sysfsmem:
+	pci_release_regions(pcidev);
+	pci_disable_device(pcidev);
+err_pci_init:
+	cdadev_free(cdadev);
+err_cdadev_init:
+	return ret;
+}
+
+static void cda_pci_remove(struct pci_dev *pcidev)
+{
+	struct cda_dev *cdadev = pci_get_drvdata(pcidev);
+
+	if (!cdadev)
+		return;
+
+	spin_lock(&cdadevlist_sl);
+	list_del(&cdadev->devices);
+	spin_unlock(&cdadevlist_sl);
+
+	cdev_del(&cdadev->cdev);
+	cda_release_bars(cdadev);
+
+	cda_mems_release(cdadev);
+
+	cda_free_irqs(cdadev, NULL);
+	cda_unmmap_dev_mem(cdadev, NULL);
+	cda_free_dev_mem(cdadev, NULL);
+
+	pci_release_regions(pcidev);
+	pci_disable_device(pcidev);
+
+	cdadev_free(cdadev);
+}
+
+static int cda_cdev_open(struct inode *ino, struct file *file)
+{
+	int ret;
+	struct cda_dev *cdadev =
+		container_of(ino->i_cdev,
+		struct cda_dev,
+		cdev);
+	if (!cdadev) {
+		ret = -ENODEV;
+		goto out;
+	}
+	get_device(&cdadev->dev);
+	file->private_data = cdadev;
+	return nonseekable_open(ino, file);
+out:
+	return ret;
+}
+
+static int cda_cdev_release(struct inode *ino, struct file *file)
+{
+	struct cda_dev *cdadev = file->private_data;
+
+	if (!cdadev)
+		return -ENODEV;
+
+	cda_cancel_req(cdadev, (void *)file);
+	cda_free_irqs(cdadev, (void *)file);
+	cda_unmmap_dev_mem(cdadev, (void *)file);
+	cda_free_dev_mem(cdadev, (void *)file);
+	cda_sem_rel_by_owner(cdadev, (void *)file);
+
+	put_device(&cdadev->dev);
+	return 0;
+}
+
+static long cda_cdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	struct cda_dev *cdadev = file->private_data;
+
+	switch (cmd) {
+	case CDA_ALLOC_MEM:
+		return cda_alloc_mem(cdadev, (void *)file, (void __user *)arg);
+
+	case CDA_FREE_MEM:
+		return cda_free_mem_by_idx(cdadev, (void *)file, (void __user *)arg);
+
+	case CDA_MAP_MEM:
+		return cda_map_mem(cdadev, (void *)file, (void __user *)arg);
+
+	case CDA_UNMAP_MEM:
+		return cda_unmap_mem_by_idx(cdadev, (void *)file, (void __user *)arg);
+
+	case CDA_INIT_INT:
+		return cda_init_interrupts(cdadev, (void *)file, (void __user *)arg);
+
+	case CDA_FREE_INT:
+		return cda_free_irqs(cdadev, (void *)file);
+
+	case CDA_REQ_INT:
+		return cda_req_int(cdadev, (void *)file, (void __user *) arg);
+
+	case CDA_INT_CANCEL:
+		return cda_cancel_req(cdadev, (void *)file);
+
+	case CDA_SEM_AQ:
+		return cda_sem_aq(cdadev, (void *)file, (void __user *) arg);
+
+	case CDA_SEM_REL:
+		return cda_sem_rel(cdadev, (void *)file, (void __user *) arg);
+
+	default:
+		return -ENOTTY;
+	}
+}
+
+static void cdadev_release(struct device *dev)
+{
+	struct cda_dev *cdadev = container_of(dev, struct cda_dev, dev);
+
+	kfree(cdadev);
+}
+
+static int __init cdadrv_init(void)
+{
+	int ret;
+	size_t pci_id_table_size = ARRAY_SIZE(cda_pci_ids);
+
+	if (test_probe) {
+		printk("Test run. Nothing initialized\n");
+		return 0;
+	}
+
+	ret = alloc_chrdev_region(&cdadev_first, 0, CDA_DEV_MINOR_MAX, cda_name);
+	if (ret)
+		goto err_alloc_cdev_reg;
+
+	ret = class_register(&cda_class);
+	if (ret)
+		goto err_cls_reg;
+
+	if ((req_pci_did || req_pci_vid) && pci_id_table_size >= 2) {
+		// Last table element is 0,0
+		// Update pre-last item
+		cda_pci_ids[pci_id_table_size-2].vendor = req_pci_vid;
+		cda_pci_ids[pci_id_table_size-2].device = req_pci_did;
+	}
+	ret = pci_register_driver(&cda_pci);
+	if (ret)
+		goto err_pci_reg_drv;
+
+	return 0;
+
+err_pci_reg_drv:
+	class_unregister(&cda_class);
+err_cls_reg:
+	unregister_chrdev_region(cdadev_first, CDA_DEV_MINOR_MAX);
+err_alloc_cdev_reg:
+	return ret;
+}
+
+static void __exit dcadrv_exit(void)
+{
+	if (test_probe) {
+		printk("Stop test run. Nothing initialized\n");
+		return;
+	}
+	pci_unregister_driver(&cda_pci);
+	class_unregister(&cda_class);
+	unregister_chrdev_region(cdadev_first, CDA_DEV_MINOR_MAX);
+}
+
+module_init(cdadrv_init);
+module_exit(dcadrv_exit);
diff --git a/src/cdadrv.h b/src/cdadrv.h
index 3a8ca2d..67a7e2d 100644
--- a/src/cdadrv.h
+++ b/src/cdadrv.h
@@ -1,71 +1,71 @@
-// SPDX-License-Identifier: GPL-2.0
-// Copyright(c) 2020 DeGirum Corp., Egor Pomozov.
-//
-// CDA linux driver mem blocks/mem maps and interrupt request handler
-//
-// This program is free software; you can redistribute it and/or modify it
-// under the terms and conditions of the GNU General Public License,
-// version 2, as published by the Free Software Foundation.
-//
-#include <linux/pci.h>
-#include <linux/cdev.h>
-#include <linux/version.h>
-
-
-#define CDA_MAX_DRV_SEMAPHORES (16)
-
-struct cda_interrupts;
-struct cda_bar;
-struct cda_dev;
-// Dummy block for fast releasing
-struct cda_dummy_blk {
-	struct cda_dev *dev;
-	int index;
-};
-
-struct cda_dev {
-    struct cdev cdev;
-    struct device dev;
-
-	int minor;
-	struct list_head devices;
-
-	struct pci_dev *pcidev;
-	unsigned long stored_flags[PCI_ROM_RESOURCE];
-	
-	struct mutex ilock;
-	struct cda_interrupts *ints;
-
-	struct kobject *kobj_mems;
-	struct cda_dummy_blk *dummy_blk;
-	struct idr mblk_idr;
-	spinlock_t mblk_sl;
-	struct list_head mem_blocks;
-	struct list_head mem_maps;
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,4,0)
-	// Security kernel lock needs w/a to access BARs
-	struct kobject *kobj_bars;
-	struct cda_bar *sysfs_bar[PCI_ROM_RESOURCE]; // 6 BARs excl. ROM
-#endif
-	u64 semaphores[CDA_MAX_DRV_SEMAPHORES];
-	void *sem_owner[CDA_MAX_DRV_SEMAPHORES];
-};
-
-int cda_alloc_mem(struct cda_dev *dev, void *owner, void __user *arg);
-int cda_free_mem_by_idx(struct cda_dev *dev, void *owner, void __user *ureq);
-int cda_map_mem(struct cda_dev *dev, void *owner, void __user *arg);
-int cda_unmap_mem_by_idx(struct cda_dev *dev, void *owner, void __user *ureq);
-void cda_unmmap_dev_mem(struct cda_dev *dev, void *owner);
-void cda_free_dev_mem(struct cda_dev *dev, void *owner);
-int cda_init_interrupts(struct cda_dev *dev, void *owner, void __user *ureq);
-int cda_mems_create(struct cda_dev *dev);
-int cda_free_irqs(struct cda_dev *dev, void *owner);
-void cda_mems_release(struct cda_dev *dev);
-int cda_req_int(struct cda_dev *dev, void *owner, void __user *ureq);
-int cda_cancel_req(struct cda_dev *dev, void *owner);
-int cda_open_bars(struct cda_dev *cdadev);
-void cda_release_bars(struct cda_dev *cdadev);
-int cda_sem_aq(struct cda_dev *cdadev, void *owner, void __user *ureq);
-int cda_sem_rel(struct cda_dev *cdadev, void *owner, void __user *ureq);
-void cda_sem_rel_by_owner(struct cda_dev *dev, void *owner);
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright(c) 2020 DeGirum Corp., Egor Pomozov.
+//
+// CDA linux driver mem blocks/mem maps and interrupt request handler
+//
+// This program is free software; you can redistribute it and/or modify it
+// under the terms and conditions of the GNU General Public License,
+// version 2, as published by the Free Software Foundation.
+//
+#include <linux/pci.h>
+#include <linux/cdev.h>
+#include <linux/version.h>
+
+
+#define CDA_MAX_DRV_SEMAPHORES (16)
+
+struct cda_interrupts;
+struct cda_bar;
+struct cda_dev;
+// Dummy block for fast releasing
+struct cda_dummy_blk {
+	struct cda_dev *dev;
+	int index;
+};
+
+struct cda_dev {
+	struct cdev cdev;
+	struct device dev;
+
+	int minor;
+	struct list_head devices;
+
+	struct pci_dev *pcidev;
+	unsigned long stored_flags[PCI_ROM_RESOURCE];
+
+	struct mutex ilock;
+	struct cda_interrupts *ints;
+
+	struct kobject *kobj_mems;
+	struct cda_dummy_blk *dummy_blk;
+	struct idr mblk_idr;
+	spinlock_t mblk_sl;
+	struct list_head mem_blocks;
+	struct list_head mem_maps;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)
+	// Security kernel lock needs w/a to access BARs
+	struct kobject *kobj_bars;
+	struct cda_bar *sysfs_bar[PCI_ROM_RESOURCE]; // 6 BARs excl. ROM
+#endif
+	u64 semaphores[CDA_MAX_DRV_SEMAPHORES];
+	void *sem_owner[CDA_MAX_DRV_SEMAPHORES];
+};
+
+int cda_alloc_mem(struct cda_dev *dev, void *owner, void __user *arg);
+int cda_free_mem_by_idx(struct cda_dev *dev, void *owner, void __user *ureq);
+int cda_map_mem(struct cda_dev *dev, void *owner, void __user *arg);
+int cda_unmap_mem_by_idx(struct cda_dev *dev, void *owner, void __user *ureq);
+void cda_unmmap_dev_mem(struct cda_dev *dev, void *owner);
+void cda_free_dev_mem(struct cda_dev *dev, void *owner);
+int cda_init_interrupts(struct cda_dev *dev, void *owner, void __user *ureq);
+int cda_mems_create(struct cda_dev *dev);
+int cda_free_irqs(struct cda_dev *dev, void *owner);
+void cda_mems_release(struct cda_dev *dev);
+int cda_req_int(struct cda_dev *dev, void *owner, void __user *ureq);
+int cda_cancel_req(struct cda_dev *dev, void *owner);
+int cda_open_bars(struct cda_dev *cdadev);
+void cda_release_bars(struct cda_dev *cdadev);
+int cda_sem_aq(struct cda_dev *cdadev, void *owner, void __user *ureq);
+int cda_sem_rel(struct cda_dev *cdadev, void *owner, void __user *ureq);
+void cda_sem_rel_by_owner(struct cda_dev *dev, void *owner);
diff --git a/src/cdaioctl.h b/src/cdaioctl.h
index bb10a9c..6d1cec4 100644
--- a/src/cdaioctl.h
+++ b/src/cdaioctl.h
@@ -1,62 +1,62 @@
-// SPDX-License-Identifier: LGPL-3.0
-// Copyright(c) 2020 DeGirum Corp., Egor Pomozov.
-//
-// CDA linux driver mem blocks/mem maps and interrupt request handler
-//
-// This program is free software; you can redistribute it and/or modify it
-// under the terms and conditions of the Lesser GNU General Public License,
-// version 3, as published by the Free Software Foundation.
-//
-#ifndef LINUX_VERSION_CODE
-#include <stdint.h> // only include if we're not compiling against the kernel
-#endif
-
-#define CDA_IOCTL_MAGIC 'C'
-#define CDA_ALLOC_MEM _IOWR(CDA_IOCTL_MAGIC, 0x1, long)
-#define CDA_FREE_MEM _IOW(CDA_IOCTL_MAGIC, 0x2, long)
-#define CDA_MAP_MEM _IOWR(CDA_IOCTL_MAGIC, 0x3, long)
-#define CDA_UNMAP_MEM _IOW(CDA_IOCTL_MAGIC, 0x4, long)
-#define CDA_INIT_INT _IOWR(CDA_IOCTL_MAGIC, 0x5, long)
-#define CDA_FREE_INT _IOW(CDA_IOCTL_MAGIC, 0x6, long)
-#define CDA_REQ_INT _IOWR(CDA_IOCTL_MAGIC, 0x7, long)
-#define CDA_INT_CANCEL _IOWR(CDA_IOCTL_MAGIC, 0x8, long)
-#define CDA_SEM_AQ _IOW(CDA_IOCTL_MAGIC, 0x9, long)
-#define CDA_SEM_REL _IOW(CDA_IOCTL_MAGIC, 0xA, long)
-
-struct cda_alloc_mem {
-	uint32_t size;
-	uint32_t index;
-};
-
-struct cda_map_mem {
-	uintptr_t vaddr;
-	uint32_t size;
-	uint32_t index;
-};
-
-struct cda_drv_sg_item {
-	uint64_t paddr;
-	uint32_t size;
-};
-
-struct cda_req_int {
-	uint32_t vector;
-	uint64_t timeout;
-	uint32_t reset;
-};
-
-enum int_type {
-    LEGACY_INTERRUPT = 0,
-    MSI = 1,
-    MSIX = 2
-};
-
-struct cda_int_lock {
-	uint32_t inttype;
-	uint32_t vectors;
-};
-
-struct cda_sem_aq {
-	uint32_t sem_id;
-	uint64_t time_ns;
-};
+/* SPDX-License-Identifier: LGPL-3.0 */
+// Copyright(c) 2020 DeGirum Corp., Egor Pomozov.
+//
+// CDA linux driver mem blocks/mem maps and interrupt request handler
+//
+// This program is free software; you can redistribute it and/or modify it
+// under the terms and conditions of the Lesser GNU General Public License,
+// version 3, as published by the Free Software Foundation.
+//
+#ifndef LINUX_VERSION_CODE
+#include <stdint.h> // only include if we're not compiling against the kernel
+#endif
+
+#define CDA_IOCTL_MAGIC 'C'
+#define CDA_ALLOC_MEM _IOWR(CDA_IOCTL_MAGIC, 0x1, long)
+#define CDA_FREE_MEM _IOW(CDA_IOCTL_MAGIC, 0x2, long)
+#define CDA_MAP_MEM _IOWR(CDA_IOCTL_MAGIC, 0x3, long)
+#define CDA_UNMAP_MEM _IOW(CDA_IOCTL_MAGIC, 0x4, long)
+#define CDA_INIT_INT _IOWR(CDA_IOCTL_MAGIC, 0x5, long)
+#define CDA_FREE_INT _IOW(CDA_IOCTL_MAGIC, 0x6, long)
+#define CDA_REQ_INT _IOWR(CDA_IOCTL_MAGIC, 0x7, long)
+#define CDA_INT_CANCEL _IOWR(CDA_IOCTL_MAGIC, 0x8, long)
+#define CDA_SEM_AQ _IOW(CDA_IOCTL_MAGIC, 0x9, long)
+#define CDA_SEM_REL _IOW(CDA_IOCTL_MAGIC, 0xA, long)
+
+struct cda_alloc_mem {
+	uint32_t size;
+	uint32_t index;
+};
+
+struct cda_map_mem {
+	uintptr_t vaddr;
+	uint32_t size;
+	uint32_t index;
+};
+
+struct cda_drv_sg_item {
+	uint64_t paddr;
+	uint32_t size;
+};
+
+struct cda_req_int {
+	uint32_t vector;
+	uint64_t timeout;
+	uint32_t reset;
+};
+
+enum int_type {
+	LEGACY_INTERRUPT = 0,
+	MSI = 1,
+	MSIX = 2
+};
+
+struct cda_int_lock {
+	uint32_t inttype;
+	uint32_t vectors;
+};
+
+struct cda_sem_aq {
+	uint32_t sem_id;
+	uint64_t time_ns;
+};
diff --git a/src/cdamem.c b/src/cdamem.c
index 5280185..ed39573 100644
--- a/src/cdamem.c
+++ b/src/cdamem.c
@@ -1,861 +1,858 @@
-// SPDX-License-Identifier: GPL-2.0
-// Copyright(c) 2020 Egor Pomozov.
-//
-// Originally memalloc sequence was designed for simple driver
-// in Aquantia Corp by Vadim Solomin 
-// Later was updated by QA team in Aquantia Corp.
-// Later it was additionally modifyied by Egor Pomozov
-// 
-// CDA linux driver memory request handler
-//
-// This program is free software; you can redistribute it and/or modify it
-// under the terms and conditions of the GNU General Public License,
-// version 2, as published by the Free Software Foundation.
-//
-
-#include <linux/fs.h>
-#include <linux/uaccess.h>
-#include <linux/mm.h>
-#include <linux/dma-mapping.h>
-
-#include "cdadrv.h"
-#include "cdaioctl.h"
-
-#include <linux/version.h>
-
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(5,6,0)
-/**
- * pin_user_pages_fast() - pin user pages in memory without taking locks
- *
- * For now, this is a placeholder function, until various call sites are
- * converted to use the correct get_user_pages*() or pin_user_pages*() API. So,
- * this is identical to get_user_pages_fast().
- *
- * This is intended for Case 1 (DIO) in Documentation/vm/pin_user_pages.rst. It
- * is NOT intended for Case 2 (RDMA: long-term pins).
- */
-static int pin_user_pages_fast(unsigned long start, int nr_pages,
-			unsigned int gup_flags, struct page **pages)
-{
-	/*
-	 * This is a placeholder, until the pin functionality is activated.
-	 * Until then, just behave like the corresponding get_user_pages*()
-	 * routine.
-	 */
-	return get_user_pages_fast(start, nr_pages, gup_flags, pages);
-}
-
-/**
- * unpin_user_page() - release a gup-pinned page
- * @page:            pointer to page to be released
- *
- * Pages that were pinned via pin_user_pages*() must be released via either
- * unpin_user_page(), or one of the unpin_user_pages*() routines. This is so
- * that eventually such pages can be separately tracked and uniquely handled. In
- * particular, interactions with RDMA and filesystems need special handling.
- *
- * unpin_user_page() and put_page() are not interchangeable, despite this early
- * implementation that makes them look the same. unpin_user_page() calls must
- * be perfectly matched up with pin*() calls.
- */
-static inline void unpin_user_page(struct page *page)
-{
-	put_page(page);
-}
-
-/**
- * unpin_user_pages() - release an array of gup-pinned pages.
- * @pages:  array of pages to be marked dirty and released.
- * @npages: number of pages in the @pages array.
- *
- * For each page in the @pages array, release the page using unpin_user_page().
- *
- * Please see the unpin_user_page() documentation for details.
- */
-static void unpin_user_pages(struct page **pages, unsigned long npages)
-{
-	unsigned long index;
-
-	/*
-	 * TODO: this can be optimized for huge pages: if a series of pages is
-	 * physically contiguous and part of the same compound page, then a
-	 * single operation to the head page should suffice.
-	 */
-	for (index = 0; index < npages; index++)
-		unpin_user_page(pages[index]);
-}
-
-/**
- * unpin_user_pages_dirty_lock() - release and optionally dirty gup-pinned pages
- * @pages:  array of pages to be maybe marked dirty, and definitely released.
- * @npages: number of pages in the @pages array.
- * @make_dirty: whether to mark the pages dirty
- *
- * "gup-pinned page" refers to a page that has had one of the get_user_pages()
- * variants called on that page.
- *
- * For each page in the @pages array, make that page (or its head page, if a
- * compound page) dirty, if @make_dirty is true, and if the page was previously
- * listed as clean. In any case, releases all pages using unpin_user_page(),
- * possibly via unpin_user_pages(), for the non-dirty case.
- *
- * Please see the unpin_user_page() documentation for details.
- *
- * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is
- * required, then the caller should a) verify that this is really correct,
- * because _lock() is usually required, and b) hand code it:
- * set_page_dirty_lock(), unpin_user_page().
- *
- */
-static void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
-				 bool make_dirty)
-{
-	unsigned long index;
-
-	/*
-	 * TODO: this can be optimized for huge pages: if a series of pages is
-	 * physically contiguous and part of the same compound page, then a
-	 * single operation to the head page should suffice.
-	 */
-
-	if (!make_dirty) {
-		unpin_user_pages(pages, npages);
-		return;
-	}
-
-	for (index = 0; index < npages; index++) {
-		struct page *page = compound_head(pages[index]);
-		/*
-		 * Checking PageDirty at this point may race with
-		 * clear_page_dirty_for_io(), but that's OK. Two key
-		 * cases:
-		 *
-		 * 1) This code sees the page as already dirty, so it
-		 * skips the call to set_page_dirty(). That could happen
-		 * because clear_page_dirty_for_io() called
-		 * page_mkclean(), followed by set_page_dirty().
-		 * However, now the page is going to get written back,
-		 * which meets the original intention of setting it
-		 * dirty, so all is well: clear_page_dirty_for_io() goes
-		 * on to call TestClearPageDirty(), and write the page
-		 * back.
-		 *
-		 * 2) This code sees the page as clean, so it calls
-		 * set_page_dirty(). The page stays dirty, despite being
-		 * written back, so it gets written back again in the
-		 * next writeback cycle. This is harmless.
-		 */
-		if (!PageDirty(page))
-			set_page_dirty_lock(page);
-		unpin_user_page(page);
-	}
-}
-
-#endif
-
-static ssize_t name_show(struct device *dev,
-					struct device_attribute *attr,
-					char *buf)
-{
-	struct cda_dev *cdadev = container_of((dev), struct cda_dev, dev);
-	return sprintf(buf, "cda%d\n", cdadev->minor);
-}
-static DEVICE_ATTR_RO(name);
-
-static struct attribute *cda_attrs[] = {
-	&dev_attr_name.attr,
-	NULL,
-};
-
-static struct attribute_group cda_attr_grp = {
-	.attrs = cda_attrs,
-};
-
-static ssize_t mblk_attr_show(
-	struct kobject *kobj, 
-	struct attribute *attr,
-	char *buf);
-
-static void mblk_release(struct kobject *kobj);
-
-struct cda_mblk {
-	struct cda_dev *dev;
-	int index;
-
-	struct kobject kobj;
-	uint32_t req_size;
-	void *vaddr; //kernel
-	uint32_t size;
-	dma_addr_t paddr;
-	void *owner;
-	struct list_head list;
-	struct bin_attribute mmap_attr;
-};
-
-struct cda_mmap {
-	struct cda_dev *dev;
-	int index;
-
-	struct kobject kobj;
-	void *owner;
-
-	void *vaddr; //original user
-	uint32_t size; //original user
-	uint32_t blk_cnt;
-	uint32_t mapped_blk_cnt;
-	uint32_t show_cnt;
-	struct sg_table sgt;
-	struct page **pages;
-	struct cda_drv_sg_item *sg_list;
-	struct list_head list;
-	struct bin_attribute mmap_attr;
-};
-
-struct mblkitem_sysfs_entry {
-	struct attribute attr;
-	ssize_t (*show)(struct cda_mblk *, char *);
-	ssize_t (*store)(struct cda_mblk *, char*, size_t);
-};
-
-#define cda_dev_mblk_attr(_field, _fmt)					\
-	static ssize_t							\
-	mblk_##_field##_show(struct cda_mblk *mblk, char *buf)	\
-	{								\
-		return sprintf(buf, _fmt, mblk->_field);		\
-	}								\
-	static struct mblkitem_sysfs_entry mblk_##_field##_attr =	\
-		__ATTR(_field, S_IRUGO, mblk_##_field##_show, NULL);
-
-#pragma GCC diagnostic ignored "-Wformat"
-cda_dev_mblk_attr(vaddr, "0x%lx\n");
-cda_dev_mblk_attr(paddr, "0x%lx\n");
-cda_dev_mblk_attr(size, "0x%x\n");
-cda_dev_mblk_attr(req_size, "0x%x\n");
-cda_dev_mblk_attr(owner, "0x%p\n");
-cda_dev_mblk_attr(index, "%d\n");
-#pragma GCC diagnostic warning "-Wformat"
-
-static struct attribute *mblk_attrs[] = {
-	&mblk_vaddr_attr.attr,
-	&mblk_paddr_attr.attr,
-	&mblk_size_attr.attr,
-	&mblk_owner_attr.attr,
-	&mblk_req_size_attr.attr,
-	&mblk_index_attr.attr,
-	NULL,
-};
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,18,0)
-ATTRIBUTE_GROUPS(mblk);
-#endif
-static const struct sysfs_ops mblk_ops = {
-	.show = mblk_attr_show,
-};
-
-struct kobj_type mblk_type = {
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,18,0)
-	.default_groups = mblk_groups,
-#else
-	.default_attrs = mblk_attrs,
-#endif
-	.sysfs_ops = &mblk_ops,
-	.release = mblk_release,
-};
-
-static ssize_t mblk_attr_show(struct kobject *kobj, 
-	struct attribute *attr, char *buf)
-{
-	struct cda_mblk *mblk = container_of(kobj, struct cda_mblk, kobj);
-	struct mblkitem_sysfs_entry *entry =
-		container_of(attr, struct mblkitem_sysfs_entry, attr);
-
-	if (!entry->show)
-		return -EIO;
-
-	return entry->show(mblk, buf);
-}
-
-static void mblk_release(struct kobject *kobj)
-{
-	struct cda_mblk *mblk = container_of(kobj, struct cda_mblk, kobj);
-	kfree(mblk);
-}
-
-#define to_memmap(obj) container_of(obj, struct cda_mmap, kobj)
-
-struct memmapitem_sysfs_entry {
-	struct attribute attr;
-	ssize_t (*show)(struct cda_mmap *, char *);
-	ssize_t (*store)(struct cda_mmap *, char *, size_t);
-};
-
-#define cda_dev_memmap_attr(_field, _fmt)					\
-	static ssize_t							\
-	memmap_##_field##_show(struct cda_mmap *memmap, char *buf)	\
-	{								\
-		return sprintf(buf, _fmt, memmap->_field);		\
-	}								\
-	static struct memmapitem_sysfs_entry memmap_##_field##_attr =	\
-		__ATTR(_field, S_IRUGO, memmap_##_field##_show, NULL);
-
-#pragma GCC diagnostic ignored "-Wformat"
-cda_dev_memmap_attr(owner, "0x%p\n");
-cda_dev_memmap_attr(vaddr, "0x%lx\n");
-cda_dev_memmap_attr(size, "0x%x\n");
-cda_dev_memmap_attr(index, "%d\n");
-cda_dev_memmap_attr(blk_cnt, "%d\n");
-
-static ssize_t
-memmap_sglist_show(struct cda_mmap *memmap, char *buf)
-{
-	const int sg_list_item_size = 16 + 8 + 2; //"%016llx %08lx\n"
-	int res = 0;
-	int i = memmap->show_cnt;
-	memmap->show_cnt = 0;
-	buf[0] = '\0';
-	for( ; i < memmap->blk_cnt; i++ ) {
-		if( (res + sg_list_item_size) >= (PAGE_SIZE - 1)) /* https://lwn.net/Articles/178634/ */{
-			memmap->show_cnt = i;
-			//printk("Split SG list. Next read starts with item: %d\n", i);
-			break;
-		}
-		res += sprintf(&buf[res], "%016llx %08lx\n", memmap->sg_list[i].paddr, memmap->sg_list[i].size);
-	}
-	return res;
-}
-
-static struct memmapitem_sysfs_entry memmap_sglist_attr =
-	__ATTR(sglist, S_IRUGO, memmap_sglist_show, NULL);
-
-#pragma GCC diagnostic warning "-Wformat"
-static struct attribute *memmap_attrs[] = {
-	&memmap_owner_attr.attr,
-	&memmap_vaddr_attr.attr,
-	&memmap_size_attr.attr,
-	&memmap_index_attr.attr,
-	&memmap_blk_cnt_attr.attr,
-	&memmap_sglist_attr.attr,
-	NULL,
-};
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,18,0)
-ATTRIBUTE_GROUPS(memmap);
-#endif
-
-static ssize_t memmap_attr_show(struct kobject *kobj, 
-	struct attribute *attr, char *buf)
-{
-	struct cda_mmap *memmap = to_memmap(kobj);
-	struct memmapitem_sysfs_entry *entry =
-		container_of(attr, struct memmapitem_sysfs_entry, attr);
-
-	if (!entry->show)
-		return -EIO;
-
-	return entry->show(memmap, buf);
-}
-
-static void memmap_release(struct kobject *kobj)
-{
-	struct cda_mmap *memmap = to_memmap(kobj);
-	kfree(memmap);
-}
-
-static const struct sysfs_ops memmap_ops = {
-	.show = memmap_attr_show,
-};
-
-struct kobj_type memmap_type = {
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,18,0)
-	.default_groups = memmap_groups,
-#else
-	.default_attrs = memmap_attrs,
-#endif
-	.sysfs_ops = &memmap_ops,
-	.release = memmap_release,
-};
-
-static int mblk_mmap( struct file *file, 
-						struct kobject *kobj, 
-						struct bin_attribute *attr,
-			   			struct vm_area_struct *vma)
-{
-	struct cda_mblk *mblk = attr->private;
-	unsigned long requested = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
-	unsigned long pages = (unsigned long)mblk->req_size >> PAGE_SHIFT;
-
-	if (vma->vm_pgoff + requested > pages)
-		return -EINVAL;
-
-	if( dma_mmap_coherent(  &mblk->dev->pcidev->dev,
-							vma,
-							mblk->vaddr,
-							mblk->paddr,
-							mblk->req_size) )
-	{
-		dev_err(&mblk->dev->pcidev->dev, "DMA remapping failed");
-		return -ENXIO;
-	}
-	return 0;
-}
-
-void cda_hide_memmap(struct cda_mmap *memmap);
-int cda_publish_memmap(struct cda_mmap *memmap);
-void cda_hide_mblk(struct cda_mblk *mblk);
-int cda_publish_mblk(struct cda_mblk *mblk);
-
-int cda_publish_mblk(struct cda_mblk *mblk)
-{
-	int ret;
-	struct bin_attribute *mmap_attr = &mblk->mmap_attr;
-
-	ret = kobject_add(  &mblk->kobj, mblk->dev->kobj_mems,
-						"%04d", mblk->index);
-	if (ret)
-		goto err_add;
-
-	mmap_attr->mmap = mblk_mmap;
-	mmap_attr->attr.name = "mmap";
-	mmap_attr->attr.mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH;
-	mmap_attr->size = mblk->req_size;
-	mmap_attr->private = mblk;
-	ret = sysfs_create_bin_file(&mblk->kobj, mmap_attr);
-	if (ret)
-		goto err_map_add;
-
-	return 0;
-
-err_map_add:
-	kobject_del(&mblk->kobj);
-err_add:
-	kobject_put(&mblk->kobj);
-	return ret;
-}
-
-
-void cda_hide_mblk(struct cda_mblk *mblk)
-{
-	sysfs_remove_bin_file(&mblk->kobj, &mblk->mmap_attr);
-	kobject_del(&mblk->kobj);
-}
-
-int cda_publish_memmap(struct cda_mmap *memmap)
-{
-	int ret;
-	struct bin_attribute *mmap_attr = &memmap->mmap_attr;
-
-	ret = kobject_add(  &memmap->kobj, memmap->dev->kobj_mems,
-						"%04d", memmap->index);
-	if (ret)
-		goto err_add;
-
-	mmap_attr->attr.name = "memmapobj";
-	mmap_attr->attr.mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH;
-	mmap_attr->size = memmap->size;
-	mmap_attr->private = memmap;
-	ret = sysfs_create_bin_file(&memmap->kobj, mmap_attr);
-	if (ret)
-		goto err_map_add;
-
-	return 0;
-
-err_map_add:
-	kobject_del(&memmap->kobj);
-err_add:
-	kobject_put(&memmap->kobj);
-	return ret;
-}
-
-void cda_hide_memmap(struct cda_mmap *memmap)
-{
-	sysfs_remove_bin_file(&memmap->kobj, &memmap->mmap_attr);
-	kobject_del(&memmap->kobj);
-}
-
-int cda_alloc_mem(struct cda_dev *dev, void *owner, void __user *ureq)
-{
-	int ret = -ENOMEM;
-	int idx;
-	struct cda_mblk *mblk;
-	struct cda_alloc_mem req;
-	if (copy_from_user(&req, ureq, sizeof(req)))
-		return -EFAULT;
-
-	mblk = kzalloc(sizeof(*mblk), in_atomic() ? GFP_ATOMIC : GFP_KERNEL);
-	if (!mblk) {
-		dev_err(&dev->dev, "Can't alloc mblk\n");
-		goto out;
-	}
-	INIT_LIST_HEAD(&mblk->list);
-	mblk->dev = dev;
-	kobject_init(&mblk->kobj, &mblk_type);
-	mblk->owner = owner;
-	mblk->size = req.size;
-	req.size = ALIGN(req.size, PAGE_SIZE);
-
-	idr_preload(in_atomic() ? GFP_ATOMIC : GFP_KERNEL);
-	spin_lock(&dev->mblk_sl);
-	ret = idr_alloc(&dev->mblk_idr, mblk,
-		1L, 0, in_atomic() ? GFP_ATOMIC : GFP_KERNEL);
-	spin_unlock(&dev->mblk_sl);
-	idr_preload_end();
-	if (ret < 0)
-		goto err_idr;
-	mblk->index = req.index = idx = ret;
-
-	mblk->vaddr = dma_alloc_coherent(
-		&dev->pcidev->dev, 
-		req.size, 
-		&mblk->paddr, 
-		in_atomic() ? GFP_ATOMIC | GFP_KERNEL : GFP_KERNEL);
-	if (!mblk->vaddr) {
-		dev_err(&dev->dev, "Can't alloc DMA memory (size %u)", req.size);
-		ret = -1;
-		goto err_dma_alloc;
-	}
-	mblk->req_size = req.size;
-
-	ret = cda_publish_mblk(mblk);
-	if (ret) {
-		dev_err(&dev->dev, "Can't publish mblk to sysfs: %d", ret);
-		goto err_publish;
-	}
-
-	if(copy_to_user(ureq, &req, sizeof(req))) {
-		ret = -EFAULT;
-		goto err_copy_to_user;
-	}
-
-	spin_lock(&dev->mblk_sl);
-	list_add(&mblk->list, &dev->mem_blocks);
-	spin_unlock(&dev->mblk_sl);
-
-	return 0;
-
-err_copy_to_user:
-	cda_hide_mblk(mblk);
-err_publish:
-	dma_free_coherent(&dev->pcidev->dev, mblk->req_size,
-				mblk->vaddr, mblk->paddr);
-err_dma_alloc:
-	spin_lock(&dev->mblk_sl);
-	idr_remove(&dev->mblk_idr, idx);
-	spin_unlock(&dev->mblk_sl);
-err_idr:
-	kobject_put(&mblk->kobj);
-out:
-	return ret;
-}
-
-static void cda_free_mem(struct cda_mblk *mblk)
-{
-	cda_hide_mblk(mblk);
-	dma_free_coherent(&mblk->dev->pcidev->dev, mblk->req_size,
-		mblk->vaddr, mblk->paddr);
-	kobject_put(&mblk->kobj);
-}
-
-int cda_free_mem_by_idx(struct cda_dev *dev, void *owner, void __user *ureq)
-{
-	int memidx;
-	struct cda_mblk *mblk;
-	if (copy_from_user(&memidx, (void __user *)ureq, sizeof(memidx))) {
-		return -EFAULT;
-	}
-
-	spin_lock(&dev->mblk_sl);
-	mblk = idr_find(&dev->mblk_idr, memidx);
-	if (mblk && mblk->index == memidx) {
-		if( mblk->owner != owner ) {
-			dev_warn(&dev->dev, "Free mblk from another owner\n");
-			idr_replace(&dev->mblk_idr, dev->dummy_blk, memidx);
-		}
-		list_del(&mblk->list);
-	} else if(mblk) {
-		dev_warn(&dev->dev, "Free mblk with index %d, required %d\n", mblk->index, memidx);
-	}
-	spin_unlock(&dev->mblk_sl);
-	if (!mblk)
-		return -ENOENT;
-	if (mblk->index) {
-		cda_free_mem(mblk);
-		spin_lock(&dev->mblk_sl);
-		idr_remove(&dev->mblk_idr, mblk->index);
-		spin_unlock(&dev->mblk_sl);
-	}
-	return 0;
-}
-
-void cda_free_dev_mem(struct cda_dev *dev, void *owner)
-{
-	struct cda_mblk *mblk, *tmp;
-	LIST_HEAD(mblks);
-
-	spin_lock(&dev->mblk_sl);
-	if( owner == NULL ){
-		idr_destroy(&dev->mblk_idr);
-		list_replace_init(&dev->mem_blocks, &mblks);
-	} else {
-		list_for_each_entry_safe(mblk, tmp, &dev->mem_blocks, list) {
-			if( mblk->index > 0L && mblk->owner == owner ) {
-				list_move(&mblk->list, &mblks);
-				idr_replace(&dev->mblk_idr, dev->dummy_blk, mblk->index);
-			}
-		}
-	}
-	spin_unlock(&dev->mblk_sl);
-	list_for_each_entry_safe(mblk, tmp, &mblks, list) {
-		// Unmap blocks owned by specified owner or all if owner is NULL
-		cda_free_mem(mblk);
-		if( owner != NULL ){
-			spin_lock(&dev->mblk_sl);
-			idr_remove(&dev->mblk_idr, mblk->index);
-			spin_unlock(&dev->mblk_sl);
-		}
-	}
-}
-
-static void cda_release_map(struct cda_mmap *memmap)
-{	
-	dma_unmap_sg(memmap->dev->pcidev == NULL ? NULL : &memmap->dev->pcidev->dev, memmap->sgt.sgl, memmap->sgt.orig_nents, DMA_BIDIRECTIONAL);
-	unpin_user_pages_dirty_lock(memmap->pages, memmap->blk_cnt, 1);
-	memmap->mapped_blk_cnt = 0;
-}
-
-static int cda_perform_mapping(
-	struct cda_mmap *memmap)
-{
-	uint i;
-	int nents;
-	struct scatterlist *sg;
-	ulong len = memmap->size;
-	void __user *buf = memmap->vaddr;
-	struct cda_drv_sg_item *cda_sg_list = memmap->sg_list;
-	sg = memmap->sgt.sgl;
-	for (i = 0; i < memmap->sgt.orig_nents; i++, sg = sg_next(sg)) {
-		unsigned int offset = offset_in_page(buf);
-		unsigned int nbytes =
-			min_t(unsigned int, PAGE_SIZE - offset, len);
-
-		sg_set_page(sg, memmap->pages[i], nbytes, offset);
-
-		buf += nbytes;
-		len -= nbytes;
-	}
-
-	nents = dma_map_sg(&memmap->dev->pcidev->dev, memmap->sgt.sgl, memmap->sgt.orig_nents, DMA_BIDIRECTIONAL);
-	if (!nents) {
-		dev_err(&memmap->dev->dev, "map sgl failed, sgt 0x%p.\n", &memmap->sgt);
-		return -EIO;
-	}
-	memmap->sgt.nents = nents;
-
-	for (i = 0, sg = memmap->sgt.sgl; i < nents; i++, sg = sg_next(sg)) {
-		cda_sg_list[i].size = sg_dma_len(sg);
-		cda_sg_list[i].paddr = sg_dma_address(sg);
-	}
-
-	memmap->mapped_blk_cnt = nents;
-	return 0;
-}
-
-int cda_map_mem(struct cda_dev *dev, void *owner, void __user *ureq)
-{
-	int ret = -ENOMEM;
-	int idx;
-	int npages;
-	struct cda_mmap *memmap;
-	struct cda_map_mem req;
-	unsigned long offset;
-	void *req_vaddr;
-
-	if (copy_from_user(&req, ureq, sizeof(req)))
-		return -EFAULT;
-	req_vaddr = (void __user *)req.vaddr;
-	offset = offset_in_page(req_vaddr);
-	npages = DIV_ROUND_UP(offset + req.size, PAGE_SIZE);
-	memmap = kzalloc(sizeof(*memmap) + npages * (sizeof(struct cda_drv_sg_item) + sizeof(struct page *)), 
-		in_atomic() ? GFP_ATOMIC : GFP_KERNEL);
-	if (!memmap) {
-		dev_err(&dev->dev, "Can't alloc memmap\n");
-		goto out;
-	}
-	memmap->owner = owner;
-	memmap->sg_list = (struct cda_drv_sg_item *)((void *)memmap + sizeof(*memmap));
-	memmap->pages = (struct page **)((void *)memmap + sizeof(*memmap) + npages * (sizeof(struct cda_drv_sg_item)));
-
-	if (sg_alloc_table(&memmap->sgt, npages, 
-		in_atomic() ? GFP_ATOMIC :GFP_KERNEL)) {
-		dev_err(&dev->dev, "Can't alloc sg table\n");
-		goto out;
-	}
-	INIT_LIST_HEAD(&memmap->list);
-	memmap->dev = dev;
-	kobject_init(&memmap->kobj, &memmap_type);
-
-	memmap->vaddr = req_vaddr;
-	memmap->size = req.size;
-	memmap->blk_cnt = npages;
-	idr_preload(in_atomic() ? GFP_ATOMIC : GFP_KERNEL);
-	spin_lock(&dev->mblk_sl);
-	ret = idr_alloc(&dev->mblk_idr, memmap,
-		1L, 0, in_atomic() ? GFP_ATOMIC : GFP_KERNEL);
-	spin_unlock(&dev->mblk_sl);
-	idr_preload_end();
-	if (ret < 0)
-		goto err_idr;
-	memmap->index = req.index = idx = ret;
-	
-	ret = pin_user_pages_fast((ulong)req_vaddr, npages,
-					FOLL_WRITE, memmap->pages);
-	if ( ret < 0 ) {
-		dev_err(&dev->pcidev->dev,
-			"Pin user pages failed for addr=0x%p [ret=%d]\n",
-			req_vaddr, ret);
-		goto err_pin;
-	}
-	if (ret != npages) {
-		dev_err(&dev->pcidev->dev,
-			"Unable to pin all user pages for addr=0x%p\n", req_vaddr);
-		ret = -EFAULT;
-		goto err_pin;
-	}
-
-	ret = cda_perform_mapping(memmap);
-	if ( ret ) {
-		dev_err(&dev->dev, "Can't map user memory for DMA (size %u)", req.size);
-		goto err_dma_alloc;
-	}
-
-	ret = cda_publish_memmap(memmap);
-	if (ret) {
-		dev_err(&dev->dev, "Can't publish memmap to sysfs: %d", ret);
-		goto err_publish;
-	}
-
-	if(copy_to_user(ureq, &req, sizeof(req))) {
-		ret = -EFAULT;
-		goto err_copy_to_user;
-	}
-
-	spin_lock(&dev->mblk_sl);
-	list_add(&memmap->list, &dev->mem_maps);
-	spin_unlock(&dev->mblk_sl);
-
-	dev_dbg(&dev->dev, "map vaddr %p, pages %d\n", memmap->vaddr, npages);
-	return 0;
-
-err_copy_to_user:
-	cda_hide_memmap(memmap);
-err_publish:
-	cda_release_map(memmap);
-err_dma_alloc:
-	unpin_user_pages_dirty_lock(memmap->pages, memmap->blk_cnt, 1);	
-err_pin:
-	spin_lock(&dev->mblk_sl);
-	idr_remove(&dev->mblk_idr, idx);
-	spin_unlock(&dev->mblk_sl);
-err_idr:
-	kobject_put(&memmap->kobj);
-out:
-	if( memmap ) {
-		if( memmap->pages ) {
-			kfree(memmap->pages);
-		}
-		kfree(memmap);
-	}
-	return ret;
-}
-
-static void cda_free_map(struct cda_mmap *memmap)
-{
-	dev_dbg(&memmap->dev->dev, "unmap vaddr %p, pages %d\n", memmap->vaddr, memmap->blk_cnt);
-	cda_hide_memmap(memmap);
-	cda_release_map(memmap);
-	kobject_put(&memmap->kobj);
-}
-
-int cda_unmap_mem_by_idx(struct cda_dev *dev, void *owner, void __user *ureq)
-{
-	int memidx;
-	struct cda_mmap *memmap;
-	if (copy_from_user(&memidx, (void __user *)ureq, sizeof(memidx)))
-		return -EFAULT;
-
-	spin_lock(&dev->mblk_sl);
-	memmap = idr_find(&dev->mblk_idr, memidx);
-	if (memmap && memmap->index == memidx) {
-		if( memmap->owner != owner )
-			dev_warn(&dev->dev, "Unmap buffer by another user\n");
-		idr_replace(&dev->mblk_idr, dev->dummy_blk, memidx);
-		list_del(&memmap->list);
-	} else if (memmap)
-		dev_warn(&dev->dev, "Unmap buffer with index %d, required %d\n", memmap->index, memidx);
-	spin_unlock(&dev->mblk_sl);
-
-	if (!memmap)
-		return -ENOENT; // Somebody may already release this block in parallel
-
-	if( memmap->index ) {
-		cda_free_map(memmap);
-		spin_lock(&dev->mblk_sl);
-		idr_remove(&dev->mblk_idr, memmap->index);
-		spin_unlock(&dev->mblk_sl);
-	}
-	return 0;
-}
-
-void cda_unmmap_dev_mem(struct cda_dev *dev, void *owner)
-{
-	struct cda_mmap *memmap, *tmp;
-	LIST_HEAD(memmaps);
-
-	spin_lock(&dev->mblk_sl);
-	if( owner == NULL ){
-		list_replace_init(&dev->mem_maps, &memmaps);
-	} else {
-		list_for_each_entry_safe(memmap, tmp, &dev->mem_maps, list) {
-			if( memmap->index > 0L && memmap->owner == owner ) {
-				idr_replace(&dev->mblk_idr, dev->dummy_blk, memmap->index);
-				list_move(&memmap->list, &memmaps);
-			}
-		}
-	}
-	spin_unlock(&dev->mblk_sl);
-	list_for_each_entry_safe(memmap, tmp, &memmaps, list) {
-		// Unmap blocks owned by specified owner or all if owner is NULL
-		cda_free_map(memmap);
-		if( owner != NULL ){
-			spin_lock(&dev->mblk_sl);
-			idr_remove(&dev->mblk_idr, memmap->index);
-			spin_unlock(&dev->mblk_sl);
-		}
-	}
-}
-
-int cda_mems_create(struct cda_dev *cdadev)
-{
-	int ret = sysfs_create_group(&cdadev->dev.kobj, &cda_attr_grp);
-	if (ret)
-		goto err_group;
-
-	cdadev->kobj_mems = kobject_create_and_add("mems", &cdadev->dev.kobj);
-	if (!cdadev->kobj_mems)
-		goto err_mems;
-	return 0;
-
-err_mems:
-	sysfs_remove_group(&cdadev->dev.kobj, &cda_attr_grp);
-err_group:
-	dev_err(&cdadev->dev, "Couldn't create sysfs files: %d\n", ret);
-	return ret;
-}
-
-void cda_mems_release(struct cda_dev *dev)
-{
-	//cda_release_bars(dev);
-	kobject_del(dev->kobj_mems);
-	kobject_put(dev->kobj_mems);
-	sysfs_remove_group(&dev->dev.kobj, &cda_attr_grp);
-}
+// SPDX-License-Identifier: GPL-2.0
+// Copyright(c) 2020 Egor Pomozov.
+//
+// Originally memalloc sequence was designed for simple driver
+// in Aquantia Corp by Vadim Solomin
+// Later was updated by QA team in Aquantia Corp.
+// Later it was additionally modified by Egor Pomozov
+//
+// CDA linux driver memory request handler
+//
+// This program is free software; you can redistribute it and/or modify it
+// under the terms and conditions of the GNU General Public License,
+// version 2, as published by the Free Software Foundation.
+//
+
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
+
+#include "cdadrv.h"
+#include "cdaioctl.h"
+
+#include <linux/version.h>
+
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 6, 0)
+/**
+ * pin_user_pages_fast() - pin user pages in memory without taking locks
+ *
+ * For now, this is a placeholder function, until various call sites are
+ * converted to use the correct get_user_pages*() or pin_user_pages*() API. So,
+ * this is identical to get_user_pages_fast().
+ *
+ * This is intended for Case 1 (DIO) in Documentation/vm/pin_user_pages.rst. It
+ * is NOT intended for Case 2 (RDMA: long-term pins).
+ */
+static int pin_user_pages_fast(unsigned long start, int nr_pages,
+			unsigned int gup_flags, struct page **pages)
+{
+	/*
+	 * This is a placeholder, until the pin functionality is activated.
+	 * Until then, just behave like the corresponding get_user_pages*()
+	 * routine.
+	 */
+	return get_user_pages_fast(start, nr_pages, gup_flags, pages);
+}
+
+/**
+ * unpin_user_page() - release a gup-pinned page
+ * @page:            pointer to page to be released
+ *
+ * Pages that were pinned via pin_user_pages*() must be released via either
+ * unpin_user_page(), or one of the unpin_user_pages*() routines. This is so
+ * that eventually such pages can be separately tracked and uniquely handled. In
+ * particular, interactions with RDMA and filesystems need special handling.
+ *
+ * unpin_user_page() and put_page() are not interchangeable, despite this early
+ * implementation that makes them look the same. unpin_user_page() calls must
+ * be perfectly matched up with pin*() calls.
+ */
+static inline void unpin_user_page(struct page *page)
+{
+	put_page(page);
+}
+
+/**
+ * unpin_user_pages() - release an array of gup-pinned pages.
+ * @pages:  array of pages to be marked dirty and released.
+ * @npages: number of pages in the @pages array.
+ *
+ * For each page in the @pages array, release the page using unpin_user_page().
+ *
+ * Please see the unpin_user_page() documentation for details.
+ */
+static void unpin_user_pages(struct page **pages, unsigned long npages)
+{
+	unsigned long index;
+
+	/*
+	 * TODO: this can be optimized for huge pages: if a series of pages is
+	 * physically contiguous and part of the same compound page, then a
+	 * single operation to the head page should suffice.
+	 */
+	for (index = 0; index < npages; index++)
+		unpin_user_page(pages[index]);
+}
+
+/**
+ * unpin_user_pages_dirty_lock() - release and optionally dirty gup-pinned pages
+ * @pages:  array of pages to be maybe marked dirty, and definitely released.
+ * @npages: number of pages in the @pages array.
+ * @make_dirty: whether to mark the pages dirty
+ *
+ * "gup-pinned page" refers to a page that has had one of the get_user_pages()
+ * variants called on that page.
+ *
+ * For each page in the @pages array, make that page (or its head page, if a
+ * compound page) dirty, if @make_dirty is true, and if the page was previously
+ * listed as clean. In any case, releases all pages using unpin_user_page(),
+ * possibly via unpin_user_pages(), for the non-dirty case.
+ *
+ * Please see the unpin_user_page() documentation for details.
+ *
+ * set_page_dirty_lock() is used internally. If instead, set_page_dirty() is
+ * required, then the caller should a) verify that this is really correct,
+ * because _lock() is usually required, and b) hand code it:
+ * set_page_dirty_lock(), unpin_user_page().
+ *
+ */
+static void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
+				 bool make_dirty)
+{
+	unsigned long index;
+
+	/*
+	 * TODO: this can be optimized for huge pages: if a series of pages is
+	 * physically contiguous and part of the same compound page, then a
+	 * single operation to the head page should suffice.
+	 */
+
+	if (!make_dirty) {
+		unpin_user_pages(pages, npages);
+		return;
+	}
+
+	for (index = 0; index < npages; index++) {
+		struct page *page = compound_head(pages[index]);
+		/*
+		 * Checking PageDirty at this point may race with
+		 * clear_page_dirty_for_io(), but that's OK. Two key
+		 * cases:
+		 *
+		 * 1) This code sees the page as already dirty, so it
+		 * skips the call to set_page_dirty(). That could happen
+		 * because clear_page_dirty_for_io() called
+		 * page_mkclean(), followed by set_page_dirty().
+		 * However, now the page is going to get written back,
+		 * which meets the original intention of setting it
+		 * dirty, so all is well: clear_page_dirty_for_io() goes
+		 * on to call TestClearPageDirty(), and write the page
+		 * back.
+		 *
+		 * 2) This code sees the page as clean, so it calls
+		 * set_page_dirty(). The page stays dirty, despite being
+		 * written back, so it gets written back again in the
+		 * next writeback cycle. This is harmless.
+		 */
+		if (!PageDirty(page))
+			set_page_dirty_lock(page);
+		unpin_user_page(page);
+	}
+}
+
+#endif
+
+static ssize_t name_show(struct device *dev,
+					struct device_attribute *attr,
+					char *buf)
+{
+	struct cda_dev *cdadev = container_of((dev), struct cda_dev, dev);
+
+	return sprintf(buf, "cda%d\n", cdadev->minor);
+}
+static DEVICE_ATTR_RO(name);
+
+static struct attribute *cda_attrs[] = {
+	&dev_attr_name.attr,
+	NULL,
+};
+
+static struct attribute_group cda_attr_grp = {
+	.attrs = cda_attrs,
+};
+
+static ssize_t mblk_attr_show(
+	struct kobject *kobj,
+	struct attribute *attr,
+	char *buf);
+
+static void mblk_release(struct kobject *kobj);
+
+struct cda_mblk {
+	struct cda_dev *dev;
+	int index;
+
+	struct kobject kobj;
+	uint32_t req_size;
+	void *vaddr; //kernel
+	uint32_t size;
+	dma_addr_t paddr;
+	void *owner;
+	struct list_head list;
+	struct bin_attribute mmap_attr;
+};
+
+struct cda_mmap {
+	struct cda_dev *dev;
+	int index;
+
+	struct kobject kobj;
+	void *owner;
+
+	void *vaddr; //original user
+	uint32_t size; //original user
+	uint32_t blk_cnt;
+	uint32_t mapped_blk_cnt;
+	uint32_t show_cnt;
+	struct sg_table sgt;
+	struct page **pages;
+	struct cda_drv_sg_item *sg_list;
+	struct list_head list;
+	struct bin_attribute mmap_attr;
+};
+
+struct mblkitem_sysfs_entry {
+	struct attribute attr;
+	ssize_t (*show)(struct cda_mblk *, char *);
+	ssize_t (*store)(struct cda_mblk *, char*, size_t);
+};
+
+#define cda_dev_mblk_attr(_field, _fmt)					\
+	static ssize_t							\
+	mblk_##_field##_show(struct cda_mblk *mblk, char *buf)	\
+	{								\
+		return sprintf(buf, _fmt, mblk->_field);		\
+	}								\
+	static struct mblkitem_sysfs_entry mblk_##_field##_attr =	\
+		__ATTR(_field, S_IRUGO, mblk_##_field##_show, NULL)
+
+#pragma GCC diagnostic ignored "-Wformat"
+cda_dev_mblk_attr(vaddr, "0x%lx\n");
+cda_dev_mblk_attr(paddr, "0x%lx\n");
+cda_dev_mblk_attr(size, "0x%x\n");
+cda_dev_mblk_attr(req_size, "0x%x\n");
+cda_dev_mblk_attr(owner, "0x%p\n");
+cda_dev_mblk_attr(index, "%d\n");
+#pragma GCC diagnostic warning "-Wformat"
+
+static struct attribute *mblk_attrs[] = {
+	&mblk_vaddr_attr.attr,
+	&mblk_paddr_attr.attr,
+	&mblk_size_attr.attr,
+	&mblk_owner_attr.attr,
+	&mblk_req_size_attr.attr,
+	&mblk_index_attr.attr,
+	NULL,
+};
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0)
+ATTRIBUTE_GROUPS(mblk);
+#endif
+static const struct sysfs_ops mblk_ops = {
+	.show = mblk_attr_show,
+};
+
+static const struct kobj_type mblk_type = {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0)
+	.default_groups = mblk_groups,
+#else
+	.default_attrs = mblk_attrs,
+#endif
+	.sysfs_ops = &mblk_ops,
+	.release = mblk_release,
+};
+
+static ssize_t mblk_attr_show(struct kobject *kobj,
+	struct attribute *attr, char *buf)
+{
+	struct cda_mblk *mblk = container_of(kobj, struct cda_mblk, kobj);
+	struct mblkitem_sysfs_entry *entry =
+		container_of(attr, struct mblkitem_sysfs_entry, attr);
+
+	if (!entry->show)
+		return -EIO;
+
+	return entry->show(mblk, buf);
+}
+
+static void mblk_release(struct kobject *kobj)
+{
+	struct cda_mblk *mblk = container_of(kobj, struct cda_mblk, kobj);
+
+	kfree(mblk);
+}
+
+#define to_memmap(obj) container_of(obj, struct cda_mmap, kobj)
+
+struct memmapitem_sysfs_entry {
+	struct attribute attr;
+	ssize_t (*show)(struct cda_mmap *, char *);
+	ssize_t (*store)(struct cda_mmap *, char *, size_t);
+};
+
+#define cda_dev_memmap_attr(_field, _fmt)					\
+	static ssize_t							\
+	memmap_##_field##_show(struct cda_mmap *memmap, char *buf)	\
+	{								\
+		return sprintf(buf, _fmt, memmap->_field);		\
+	}								\
+	static struct memmapitem_sysfs_entry memmap_##_field##_attr =	\
+		__ATTR(_field, S_IRUGO, memmap_##_field##_show, NULL)
+
+#pragma GCC diagnostic ignored "-Wformat"
+cda_dev_memmap_attr(owner, "0x%p\n");
+cda_dev_memmap_attr(vaddr, "0x%lx\n");
+cda_dev_memmap_attr(size, "0x%x\n");
+cda_dev_memmap_attr(index, "%d\n");
+cda_dev_memmap_attr(blk_cnt, "%d\n");
+
+static ssize_t
+memmap_sglist_show(struct cda_mmap *memmap, char *buf)
+{
+	const int sg_list_item_size = 16 + 8 + 2; //"%016llx %08lx\n"
+	int res = 0;
+	int i = memmap->show_cnt;
+
+	memmap->show_cnt = 0;
+	buf[0] = '\0';
+	for ( ; i < memmap->blk_cnt; i++) {
+		if ((res + sg_list_item_size) >= (PAGE_SIZE - 1)) /* https://lwn.net/Articles/178634/ */{
+			memmap->show_cnt = i;
+			//printk("Split SG list. Next read starts with item: %d\n", i);
+			break;
+		}
+		res += sprintf(&buf[res], "%016llx %08lx\n", memmap->sg_list[i].paddr, memmap->sg_list[i].size);
+	}
+	return res;
+}
+
+static struct memmapitem_sysfs_entry memmap_sglist_attr =
+	__ATTR(sglist, S_IRUGO, memmap_sglist_show, NULL);
+
+#pragma GCC diagnostic warning "-Wformat"
+static struct attribute *memmap_attrs[] = {
+	&memmap_owner_attr.attr,
+	&memmap_vaddr_attr.attr,
+	&memmap_size_attr.attr,
+	&memmap_index_attr.attr,
+	&memmap_blk_cnt_attr.attr,
+	&memmap_sglist_attr.attr,
+	NULL,
+};
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0)
+ATTRIBUTE_GROUPS(memmap);
+#endif
+
+static ssize_t memmap_attr_show(struct kobject *kobj,
+	struct attribute *attr, char *buf)
+{
+	struct cda_mmap *memmap = to_memmap(kobj);
+	struct memmapitem_sysfs_entry *entry =
+		container_of(attr, struct memmapitem_sysfs_entry, attr);
+
+	if (!entry->show)
+		return -EIO;
+
+	return entry->show(memmap, buf);
+}
+
+static void memmap_release(struct kobject *kobj)
+{
+	struct cda_mmap *memmap = to_memmap(kobj);
+
+	kfree(memmap);
+}
+
+static const struct sysfs_ops memmap_ops = {
+	.show = memmap_attr_show,
+};
+
+static const struct kobj_type memmap_type = {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0)
+	.default_groups = memmap_groups,
+#else
+	.default_attrs = memmap_attrs,
+#endif
+	.sysfs_ops = &memmap_ops,
+	.release = memmap_release,
+};
+
+static int mblk_mmap(struct file *file,
+		     struct kobject *kobj,
+		     struct bin_attribute *attr,
+		     struct vm_area_struct *vma)
+{
+	struct cda_mblk *mblk = attr->private;
+	unsigned long requested = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+	unsigned long pages = (unsigned long)mblk->req_size >> PAGE_SHIFT;
+
+	if (vma->vm_pgoff + requested > pages)
+		return -EINVAL;
+
+	if (dma_mmap_coherent(&mblk->dev->pcidev->dev,
+			      vma,
+			      mblk->vaddr,
+			      mblk->paddr,
+			      mblk->req_size)) {
+		dev_err(&mblk->dev->pcidev->dev, "DMA remapping failed");
+		return -ENXIO;
+	}
+	return 0;
+}
+
+static int cda_publish_mblk(struct cda_mblk *mblk)
+{
+	int ret;
+	struct bin_attribute *mmap_attr = &mblk->mmap_attr;
+
+	ret = kobject_add(&mblk->kobj, mblk->dev->kobj_mems,
+						"%04d", mblk->index);
+	if (ret)
+		goto err_add;
+
+	mmap_attr->mmap = mblk_mmap;
+	mmap_attr->attr.name = "mmap";
+	mmap_attr->attr.mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH;
+	mmap_attr->size = mblk->req_size;
+	mmap_attr->private = mblk;
+	ret = sysfs_create_bin_file(&mblk->kobj, mmap_attr);
+	if (ret)
+		goto err_map_add;
+
+	return 0;
+
+err_map_add:
+	kobject_del(&mblk->kobj);
+err_add:
+	kobject_put(&mblk->kobj);
+	return ret;
+}
+
+
+static void cda_hide_mblk(struct cda_mblk *mblk)
+{
+	sysfs_remove_bin_file(&mblk->kobj, &mblk->mmap_attr);
+	kobject_del(&mblk->kobj);
+}
+
+static int cda_publish_memmap(struct cda_mmap *memmap)
+{
+	int ret;
+	struct bin_attribute *mmap_attr = &memmap->mmap_attr;
+
+	ret = kobject_add(&memmap->kobj, memmap->dev->kobj_mems,
+						"%04d", memmap->index);
+	if (ret)
+		goto err_add;
+
+	mmap_attr->attr.name = "memmapobj";
+	mmap_attr->attr.mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH;
+	mmap_attr->size = memmap->size;
+	mmap_attr->private = memmap;
+	ret = sysfs_create_bin_file(&memmap->kobj, mmap_attr);
+	if (ret)
+		goto err_map_add;
+
+	return 0;
+
+err_map_add:
+	kobject_del(&memmap->kobj);
+err_add:
+	kobject_put(&memmap->kobj);
+	return ret;
+}
+
+static void cda_hide_memmap(struct cda_mmap *memmap)
+{
+	sysfs_remove_bin_file(&memmap->kobj, &memmap->mmap_attr);
+	kobject_del(&memmap->kobj);
+}
+
+int cda_alloc_mem(struct cda_dev *dev, void *owner, void __user *ureq)
+{
+	int ret = -ENOMEM;
+	int idx;
+	struct cda_mblk *mblk;
+	struct cda_alloc_mem req;
+
+	if (copy_from_user(&req, ureq, sizeof(req)))
+		return -EFAULT;
+
+	mblk = kzalloc(sizeof(*mblk), in_atomic() ? GFP_ATOMIC : GFP_KERNEL);
+	if (!mblk)
+		goto out;
+	INIT_LIST_HEAD(&mblk->list);
+	mblk->dev = dev;
+	kobject_init(&mblk->kobj, &mblk_type);
+	mblk->owner = owner;
+	mblk->size = req.size;
+	req.size = ALIGN(req.size, PAGE_SIZE);
+
+	idr_preload(in_atomic() ? GFP_ATOMIC : GFP_KERNEL);
+	spin_lock(&dev->mblk_sl);
+	ret = idr_alloc(&dev->mblk_idr, mblk,
+		1L, 0, in_atomic() ? GFP_ATOMIC : GFP_KERNEL);
+	spin_unlock(&dev->mblk_sl);
+	idr_preload_end();
+	if (ret < 0)
+		goto err_idr;
+	mblk->index = req.index = idx = ret;
+
+	mblk->vaddr = dma_alloc_coherent(
+		&dev->pcidev->dev,
+		req.size,
+		&mblk->paddr,
+		in_atomic() ? GFP_ATOMIC | GFP_KERNEL : GFP_KERNEL);
+	if (!mblk->vaddr) {
+		dev_err(&dev->dev, "Can't alloc DMA memory (size %u)", req.size);
+		ret = -1;
+		goto err_dma_alloc;
+	}
+	mblk->req_size = req.size;
+
+	ret = cda_publish_mblk(mblk);
+	if (ret) {
+		dev_err(&dev->dev, "Can't publish mblk to sysfs: %d", ret);
+		goto err_publish;
+	}
+
+	if (copy_to_user(ureq, &req, sizeof(req))) {
+		ret = -EFAULT;
+		goto err_copy_to_user;
+	}
+
+	spin_lock(&dev->mblk_sl);
+	list_add(&mblk->list, &dev->mem_blocks);
+	spin_unlock(&dev->mblk_sl);
+
+	return 0;
+
+err_copy_to_user:
+	cda_hide_mblk(mblk);
+err_publish:
+	dma_free_coherent(&dev->pcidev->dev, mblk->req_size,
+				mblk->vaddr, mblk->paddr);
+err_dma_alloc:
+	spin_lock(&dev->mblk_sl);
+	idr_remove(&dev->mblk_idr, idx);
+	spin_unlock(&dev->mblk_sl);
+err_idr:
+	kobject_put(&mblk->kobj);
+out:
+	return ret;
+}
+
+static void cda_free_mem(struct cda_mblk *mblk)
+{
+	cda_hide_mblk(mblk);
+	dma_free_coherent(&mblk->dev->pcidev->dev, mblk->req_size,
+		mblk->vaddr, mblk->paddr);
+	kobject_put(&mblk->kobj);
+}
+
+int cda_free_mem_by_idx(struct cda_dev *dev, void *owner, void __user *ureq)
+{
+	int memidx;
+	struct cda_mblk *mblk;
+
+	if (copy_from_user(&memidx, (void __user *)ureq, sizeof(memidx)))
+		return -EFAULT;
+
+	spin_lock(&dev->mblk_sl);
+	mblk = idr_find(&dev->mblk_idr, memidx);
+	if (mblk && mblk->index == memidx) {
+		if (mblk->owner != owner) {
+			dev_warn(&dev->dev, "Free mblk from another owner\n");
+			idr_replace(&dev->mblk_idr, dev->dummy_blk, memidx);
+		}
+		list_del(&mblk->list);
+	} else if (mblk) {
+		dev_warn(&dev->dev, "Free mblk with index %d, required %d\n", mblk->index, memidx);
+	}
+	spin_unlock(&dev->mblk_sl);
+	if (!mblk)
+		return -ENOENT;
+	if (mblk->index) {
+		cda_free_mem(mblk);
+		spin_lock(&dev->mblk_sl);
+		idr_remove(&dev->mblk_idr, mblk->index);
+		spin_unlock(&dev->mblk_sl);
+	}
+	return 0;
+}
+
+void cda_free_dev_mem(struct cda_dev *dev, void *owner)
+{
+	struct cda_mblk *mblk, *tmp;
+	LIST_HEAD(mblks);
+
+	spin_lock(&dev->mblk_sl);
+	if (owner == NULL) {
+		idr_destroy(&dev->mblk_idr);
+		list_replace_init(&dev->mem_blocks, &mblks);
+	} else {
+		list_for_each_entry_safe(mblk, tmp, &dev->mem_blocks, list) {
+			if (mblk->index > 0L && mblk->owner == owner) {
+				list_move(&mblk->list, &mblks);
+				idr_replace(&dev->mblk_idr, dev->dummy_blk, mblk->index);
+			}
+		}
+	}
+	spin_unlock(&dev->mblk_sl);
+	list_for_each_entry_safe(mblk, tmp, &mblks, list) {
+		// Unmap blocks owned by specified owner or all if owner is NULL
+		cda_free_mem(mblk);
+		if (owner != NULL) {
+			spin_lock(&dev->mblk_sl);
+			idr_remove(&dev->mblk_idr, mblk->index);
+			spin_unlock(&dev->mblk_sl);
+		}
+	}
+}
+
+static void cda_release_map(struct cda_mmap *memmap)
+{
+	dma_unmap_sg(memmap->dev->pcidev == NULL ? NULL : &memmap->dev->pcidev->dev, memmap->sgt.sgl, memmap->sgt.orig_nents, DMA_BIDIRECTIONAL);
+	unpin_user_pages_dirty_lock(memmap->pages, memmap->blk_cnt, 1);
+	memmap->mapped_blk_cnt = 0;
+}
+
+static int cda_perform_mapping(
+	struct cda_mmap *memmap)
+{
+	uint i;
+	int nents;
+	struct scatterlist *sg;
+	ulong len = memmap->size;
+	void __user *buf = memmap->vaddr;
+	struct cda_drv_sg_item *cda_sg_list = memmap->sg_list;
+
+	sg = memmap->sgt.sgl;
+	for (i = 0; i < memmap->sgt.orig_nents; i++, sg = sg_next(sg)) {
+		unsigned int offset = offset_in_page(buf);
+		unsigned int nbytes =
+			min_t(unsigned int, PAGE_SIZE - offset, len);
+
+		sg_set_page(sg, memmap->pages[i], nbytes, offset);
+
+		buf += nbytes;
+		len -= nbytes;
+	}
+
+	nents = dma_map_sg(&memmap->dev->pcidev->dev, memmap->sgt.sgl, memmap->sgt.orig_nents, DMA_BIDIRECTIONAL);
+	if (!nents) {
+		dev_err(&memmap->dev->dev, "map sgl failed, sgt 0x%p.\n", &memmap->sgt);
+		return -EIO;
+	}
+	memmap->sgt.nents = nents;
+
+	for (i = 0, sg = memmap->sgt.sgl; i < nents; i++, sg = sg_next(sg)) {
+		cda_sg_list[i].size = sg_dma_len(sg);
+		cda_sg_list[i].paddr = sg_dma_address(sg);
+	}
+
+	memmap->mapped_blk_cnt = nents;
+	return 0;
+}
+
+int cda_map_mem(struct cda_dev *dev, void *owner, void __user *ureq)
+{
+	int ret = -ENOMEM;
+	int idx;
+	int npages;
+	struct cda_mmap *memmap;
+	struct cda_map_mem req;
+	unsigned long offset;
+	void *req_vaddr;
+
+	if (copy_from_user(&req, ureq, sizeof(req)))
+		return -EFAULT;
+	req_vaddr = (void __user *)req.vaddr;
+	offset = offset_in_page(req_vaddr);
+	npages = DIV_ROUND_UP(offset + req.size, PAGE_SIZE);
+	memmap = kzalloc(sizeof(*memmap) + npages * (sizeof(struct cda_drv_sg_item) + sizeof(struct page *)),
+		in_atomic() ? GFP_ATOMIC : GFP_KERNEL);
+	if (!memmap)
+		goto out;
+	memmap->owner = owner;
+	memmap->sg_list = (struct cda_drv_sg_item *)((void *)memmap + sizeof(*memmap));
+	memmap->pages = (struct page **)((void *)memmap + sizeof(*memmap) + npages * (sizeof(struct cda_drv_sg_item)));
+
+	if (sg_alloc_table(&memmap->sgt, npages,
+		in_atomic() ? GFP_ATOMIC : GFP_KERNEL)) {
+		dev_err(&dev->dev, "Can't alloc sg table\n");
+		goto out;
+	}
+	INIT_LIST_HEAD(&memmap->list);
+	memmap->dev = dev;
+	kobject_init(&memmap->kobj, &memmap_type);
+
+	memmap->vaddr = req_vaddr;
+	memmap->size = req.size;
+	memmap->blk_cnt = npages;
+	idr_preload(in_atomic() ? GFP_ATOMIC : GFP_KERNEL);
+	spin_lock(&dev->mblk_sl);
+	ret = idr_alloc(&dev->mblk_idr, memmap,
+		1L, 0, in_atomic() ? GFP_ATOMIC : GFP_KERNEL);
+	spin_unlock(&dev->mblk_sl);
+	idr_preload_end();
+	if (ret < 0)
+		goto err_idr;
+	memmap->index = req.index = idx = ret;
+
+	ret = pin_user_pages_fast((ulong)req_vaddr, npages,
+					FOLL_WRITE, memmap->pages);
+	if (ret < 0) {
+		dev_err(&dev->pcidev->dev,
+			"Pin user pages failed for addr=0x%p [ret=%d]\n",
+			req_vaddr, ret);
+		goto err_pin;
+	}
+	if (ret != npages) {
+		dev_err(&dev->pcidev->dev,
+			"Unable to pin all user pages for addr=0x%p\n", req_vaddr);
+		ret = -EFAULT;
+		goto err_pin;
+	}
+
+	ret = cda_perform_mapping(memmap);
+	if (ret) {
+		dev_err(&dev->dev, "Can't map user memory for DMA (size %u)", req.size);
+		goto err_dma_alloc;
+	}
+
+	ret = cda_publish_memmap(memmap);
+	if (ret) {
+		dev_err(&dev->dev, "Can't publish memmap to sysfs: %d", ret);
+		goto err_publish;
+	}
+
+	if (copy_to_user(ureq, &req, sizeof(req))) {
+		ret = -EFAULT;
+		goto err_copy_to_user;
+	}
+
+	spin_lock(&dev->mblk_sl);
+	list_add(&memmap->list, &dev->mem_maps);
+	spin_unlock(&dev->mblk_sl);
+
+	dev_dbg(&dev->dev, "map vaddr %p, pages %d\n", memmap->vaddr, npages);
+	return 0;
+
+err_copy_to_user:
+	cda_hide_memmap(memmap);
+err_publish:
+	cda_release_map(memmap);
+err_dma_alloc:
+	unpin_user_pages_dirty_lock(memmap->pages, memmap->blk_cnt, 1);
+err_pin:
+	spin_lock(&dev->mblk_sl);
+	idr_remove(&dev->mblk_idr, idx);
+	spin_unlock(&dev->mblk_sl);
+err_idr:
+	kobject_put(&memmap->kobj);
+out:
+	if (memmap) {
+		kfree(memmap->pages);
+		kfree(memmap);
+	}
+	return ret;
+}
+
+static void cda_free_map(struct cda_mmap *memmap)
+{
+	dev_dbg(&memmap->dev->dev, "unmap vaddr %p, pages %d\n", memmap->vaddr, memmap->blk_cnt);
+	cda_hide_memmap(memmap);
+	cda_release_map(memmap);
+	kobject_put(&memmap->kobj);
+}
+
+int cda_unmap_mem_by_idx(struct cda_dev *dev, void *owner, void __user *ureq)
+{
+	int memidx;
+	struct cda_mmap *memmap;
+
+	if (copy_from_user(&memidx, (void __user *)ureq, sizeof(memidx)))
+		return -EFAULT;
+
+	spin_lock(&dev->mblk_sl);
+	memmap = idr_find(&dev->mblk_idr, memidx);
+	if (memmap && memmap->index == memidx) {
+		if (memmap->owner != owner)
+			dev_warn(&dev->dev, "Unmap buffer by another user\n");
+		idr_replace(&dev->mblk_idr, dev->dummy_blk, memidx);
+		list_del(&memmap->list);
+	} else if (memmap)
+		dev_warn(&dev->dev, "Unmap buffer with index %d, required %d\n", memmap->index, memidx);
+	spin_unlock(&dev->mblk_sl);
+
+	if (!memmap)
+		return -ENOENT; // Somebody may already release this block in parallel
+
+	if (memmap->index) {
+		cda_free_map(memmap);
+		spin_lock(&dev->mblk_sl);
+		idr_remove(&dev->mblk_idr, memmap->index);
+		spin_unlock(&dev->mblk_sl);
+	}
+	return 0;
+}
+
+void cda_unmmap_dev_mem(struct cda_dev *dev, void *owner)
+{
+	struct cda_mmap *memmap, *tmp;
+	LIST_HEAD(memmaps);
+
+	spin_lock(&dev->mblk_sl);
+	if (owner == NULL) {
+		list_replace_init(&dev->mem_maps, &memmaps);
+	} else {
+		list_for_each_entry_safe(memmap, tmp, &dev->mem_maps, list) {
+			if (memmap->index > 0L && memmap->owner == owner) {
+				idr_replace(&dev->mblk_idr, dev->dummy_blk, memmap->index);
+				list_move(&memmap->list, &memmaps);
+			}
+		}
+	}
+	spin_unlock(&dev->mblk_sl);
+	list_for_each_entry_safe(memmap, tmp, &memmaps, list) {
+		// Unmap blocks owned by specified owner or all if owner is NULL
+		cda_free_map(memmap);
+		if (owner != NULL) {
+			spin_lock(&dev->mblk_sl);
+			idr_remove(&dev->mblk_idr, memmap->index);
+			spin_unlock(&dev->mblk_sl);
+		}
+	}
+}
+
+int cda_mems_create(struct cda_dev *cdadev)
+{
+	int ret;
+
+	ret = sysfs_create_group(&cdadev->dev.kobj, &cda_attr_grp);
+	if (ret)
+		goto err_group;
+
+	cdadev->kobj_mems = kobject_create_and_add("mems", &cdadev->dev.kobj);
+	if (!cdadev->kobj_mems)
+		goto err_mems;
+	return 0;
+
+err_mems:
+	sysfs_remove_group(&cdadev->dev.kobj, &cda_attr_grp);
+err_group:
+	dev_err(&cdadev->dev, "Couldn't create sysfs files: %d\n", ret);
+	return ret;
+}
+
+void cda_mems_release(struct cda_dev *dev)
+{
+	//cda_release_bars(dev);
+	kobject_del(dev->kobj_mems);
+	kobject_put(dev->kobj_mems);
+	sysfs_remove_group(&dev->dev.kobj, &cda_attr_grp);
+}
diff --git a/src/cdares.c b/src/cdares.c
index 2a75df9..102dcfc 100644
--- a/src/cdares.c
+++ b/src/cdares.c
@@ -1,565 +1,578 @@
-// SPDX-License-Identifier: GPL-2.0
-// Copyright(c) 2020 DeGirum Corp., Egor Pomozov.
-//
-// CDA linux driver mem blocks/mem maps and interrupt request handler
-//
-// This program is free software; you can redistribute it and/or modify it
-// under the terms and conditions of the GNU General Public License,
-// version 2, as published by the Free Software Foundation.
-//
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/pci.h>
-#include <linux/interrupt.h>
-#include <linux/sched.h>
-#include <linux/delay.h>
-#include <linux/uaccess.h>
-
-#include "cdadrv.h"
-#include "cdaioctl.h"
-
-struct cda_vector {
-	volatile bool busy;
-	wait_queue_head_t wait;
-	atomic_t count;
-	unsigned irq;
-};
-
-struct cda_interrupts {
-	int num;
-	enum int_type type;
-	void *owner;
-	struct cda_vector *vecs;
-	struct msix_entry *msix_entries;
-};
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,4,0)
-struct cda_bar {
-	struct kobject kobj;
-	/* struct resource *res; */
-	int index;
-	phys_addr_t paddr;
-	phys_addr_t len;
-	void *vaddr;
-	struct cda_dev *dev;
-	struct bin_attribute mmap_attr;
-};
-#else
-#if defined __has_attribute 
-# if __has_attribute (__fallthrough__)
-#  define fallthrough                    __attribute__((__fallthrough__))
-# endif
-#else
-# define fallthrough                    do {} while (0)  /* fallthrough */
-#endif //has_attribute
-#endif // LINUX_VERSION_CODE
-
-static int cda_alloc_msix(struct cda_dev *cdadev, uint32_t rvecs, struct cda_interrupts *ints)
-{
-	int i, ret;
-	struct msix_entry *entries;
-	entries = kcalloc(rvecs, sizeof(struct msix_entry), GFP_KERNEL);
-	if( !entries ) {
-		return -ENOMEM;
-	}
-
-	for (i = 0; i < rvecs; i++) {
-		entries[i].entry = i;
-		entries[i].vector = 0;
-	}
-
-	ret = pci_enable_msix_exact(cdadev->pcidev, entries, rvecs);
-	if( !ret ) {
-		ints->num = rvecs;
-		ints->msix_entries = entries;
-		ints->type = MSIX;
-	} else {
-		kfree(entries);
-	}
-	return ret;
-}
-
-static irqreturn_t cda_isr(int irq, void *priv)
-{
-	struct cda_vector *vec = priv;
-	atomic_inc_return(&vec->count);
-	wake_up(&vec->wait);
-	return IRQ_HANDLED;
-}
-
-int cda_init_interrupts(struct cda_dev *cdadev, void *owner, void __user *ureq)
-{
-	int ret = 0;
-	int nvecs, i;
-	struct cda_vector *vec;
-	struct cda_int_lock req;
-	struct cda_interrupts *ints;
-
-	if( cdadev->ints ) {
-		dev_dbg(&cdadev->pcidev->dev, "Interrupts are already attached");
-		return -EINVAL; // Already attached
-	}
-	if( copy_from_user(&req, (void __user *)ureq, sizeof(req)) )
-		return -EFAULT;
-
-	ints = kcalloc(1, sizeof(struct cda_interrupts), GFP_KERNEL);
-	if( !ints )
-		return -ENOMEM;
-
-	ints->owner = owner;
-	switch( req.inttype ) {
-	case MSIX:
-		ret = cda_alloc_msix(cdadev, req.vectors, ints);
-		if( !ret ) {
-			nvecs = req.vectors;
-			break;
-		}
-		if( ret == -ENOMEM ) {
-			kfree(ints);
-			return ret;
-		}
-		dev_warn(&cdadev->pcidev->dev, "No MSI-X vectors, try MSI. Error %x\n", ret);
-		fallthrough;
-	case MSI:
-#if LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)
-		nvecs = pci_alloc_irq_vectors(cdadev->pcidev, 1, req.vectors, PCI_IRQ_MSI);
-#else
-		nvecs = pci_alloc_irq_vectors_affinity(cdadev->pcidev, 1, req.vectors, PCI_IRQ_MSI, NULL);
-#endif
-		if( nvecs > 0 ) {
-			ints->num = nvecs;
-			ints->type = MSI;
-			break;
-		}
-		dev_warn(&cdadev->pcidev->dev, "No MSI vectors, try legacy. Error %x\n", nvecs);
-		fallthrough;
-	case LEGACY_INTERRUPT:
-		ints->num = 1;
-		ints->type = LEGACY_INTERRUPT;
-		break;
-	}
-
-	ints->vecs = kcalloc(ints->num, sizeof(struct cda_vector), GFP_KERNEL);
-	if( !ints->vecs ) {
-		ret = -ENOMEM;
-		goto err_alloc_vecs;
-	}
-
-	for( i = 0; i < ints->num; i++ ) {
-		char name[10];
-		vec = &ints->vecs[i];
-		vec->irq = ints->type == MSIX ? 
-					cdadev->ints->msix_entries[i].vector : 
-					cdadev->pcidev->irq + i;
-		snprintf(name, sizeof(name), "cda%02d-%x", cdadev->minor, i);
-		init_waitqueue_head(&vec->wait);
-		atomic_set(&vec->count, 0);
-		ret = request_irq(vec->irq, cda_isr, ints->type == LEGACY_INTERRUPT ? IRQF_SHARED : 0, name, vec);
-		if( ret ) {
-			dev_err(&cdadev->pcidev->dev, "request_irq failed for vector %d: %d", i, ret);
-			break;
-		}
-	}
-
-	// Return interrupt type and vector count to user
-	if( !ret ) {
-		req.inttype = ints->type;
-		req.vectors = ints->num;
-		if( copy_to_user(ureq, &req, sizeof(req)) )
-			ret = -EFAULT;
-	}
-
-	if( !ret ) {
-		cdadev->ints = ints;
-		return ret;
-	}
-	// Fail. Release
-	for( i -= 1; i >= 0; i-- ) {
-		struct cda_vector *vec = &ints->vecs[i];
-		free_irq(vec->irq, vec);
-	}
-	kfree(ints->vecs);
-
-err_alloc_vecs:
-	pci_free_irq_vectors(cdadev->pcidev);
-	kfree(ints->msix_entries);
-	kfree(ints);
-
-	return ret;
-}
-
-int cda_free_irqs(struct cda_dev *cdadev, void *owner)
-{
-	int i;
-	struct cda_interrupts *ints;
-	if( cdadev->ints == NULL )
-		return -EINVAL;
-	if( cdadev->ints->owner != owner ) {
-		dev_dbg(&cdadev->pcidev->dev, "Interrupts are not owned by %p", owner);
-		return -EINVAL;
-	}
-	mutex_lock(&cdadev->ilock);
-	ints = cdadev->ints;
-	cdadev->ints = NULL;
-	mutex_unlock(&cdadev->ilock);
-	if( ints && ints->num > 0 ) {
-		for( i = 0; i < ints->num; i++ ) {
-			struct cda_vector *vec = &ints->vecs[i];
-			while( vec->busy ) {
-				wake_up(&vec->wait);
-				udelay(1);
-			}
-			free_irq(vec->irq, vec);
-		}
-		pci_free_irq_vectors(cdadev->pcidev);
-		kfree(ints->vecs);
-		kfree(ints->msix_entries);
-		kfree(ints);
-	}
-	return 0;
-}
-
-int cda_req_int(struct cda_dev *cdadev, void *owner, void __user *ureq)
-{
-	struct cda_interrupts *ints;
-	struct cda_req_int req;
-	struct cda_vector *vec;
-	unsigned long timeout;
-	unsigned count;
-
-	if (copy_from_user(&req, ureq, sizeof(req)))
-		return -EFAULT;
-
-	if( cdadev->ints == NULL )
-		return -EINVAL;
-
-	if( cdadev->ints->owner != owner ) {
-		dev_err(&cdadev->pcidev->dev, "Interrupts are not owned by %p", owner);
-		return -EINVAL;
-	}
-
-	mutex_lock(&cdadev->ilock);
-	ints = cdadev->ints;
-	if( !ints || (req.vector > ints->num) ) {
-		mutex_unlock(&cdadev->ilock);
-		return -EINVAL;
-	}
-
-	vec = &ints->vecs[req.vector];
-	if (req.reset)
-		atomic_set(&vec->count, 0);
-
-	timeout = nsecs_to_jiffies(req.timeout);
-	count = atomic_xchg(&vec->count, 0);
-	if( !count )
-	{
-		vec->busy = true;
-		mutex_unlock(&cdadev->ilock);
-		timeout = wait_event_interruptible_timeout(vec->wait,
-							(count = atomic_xchg(&vec->count, 0)),
-							timeout);
-		mutex_lock(&cdadev->ilock);
-		vec->busy = false;
-	}
-	mutex_unlock(&cdadev->ilock);
-	dev_dbg(&cdadev->pcidev->dev, "Interrupt vector %d timeout: %ld count %u reset %d\n", req.vector, timeout, count, req.reset);
-	return timeout > 0 ? 0 : timeout == 0 ? -ETIME : timeout;
-}
-
-int cda_cancel_req(struct cda_dev *cdadev, void *owner)
-{
-	int i;
-	struct cda_interrupts *ints;
-	if( cdadev->ints == NULL )
-		return -EINVAL;
-
-	if( cdadev->ints->owner != owner ) {
-		dev_dbg(&cdadev->pcidev->dev, "Interrupts are not owned by %p", owner);
-		return -EINVAL;
-	}
-
-	mutex_lock(&cdadev->ilock);
-	ints = cdadev->ints;
-	for (i = 0; ints && i < ints->num; i++) {
-		if( ints->vecs[i].busy )
-			wake_up(&ints->vecs[i].wait);
-	}
-	mutex_unlock(&cdadev->ilock);
-	return 0;
-}
-
-int cda_sem_aq(struct cda_dev *cdadev, void *owner, void __user *ureq)
-{
-	int res = 0;
-	struct cda_sem_aq req;
-	u64 cur_time;
-	if (copy_from_user(&req, ureq, sizeof(req)))
-		return -EFAULT;
-
-	mutex_lock(&cdadev->ilock);
-	cur_time = ktime_get_ns();
-	if( cdadev->semaphores[req.sem_id] < cur_time ) {
-		cdadev->semaphores[req.sem_id] = cur_time + req.time_ns > cur_time ? cur_time + req.time_ns : 0xFFFFFFFFFFFFFFFFULL;
-		cdadev->sem_owner[req.sem_id] = owner;
-	} else {
-		res = 1;
-	}
-	mutex_unlock(&cdadev->ilock);
-	return res;
-}
-
-int cda_sem_rel(struct cda_dev *cdadev, void *owner, void __user *ureq)
-{
-	int res = 0;
-	int req_sem;
-	if (copy_from_user(&req_sem, ureq, sizeof(req_sem)))
-		return -EFAULT;
-	if( cdadev->sem_owner[req_sem] != owner ) {
-		dev_warn(&cdadev->pcidev->dev, "Semaphore %d is not owned by %p", req_sem, owner);
-	} else {
-		mutex_lock(&cdadev->ilock);
-		cdadev->semaphores[req_sem] = 0ULL;
-		cdadev->sem_owner[req_sem] = NULL;
-		mutex_unlock(&cdadev->ilock);
-	}
-	return res;
-}
-
-void cda_sem_rel_by_owner(struct cda_dev *dev, void *owner)
-{
-	uint32_t i;
-	mutex_lock(&dev->ilock);
-	for( i = 0; i < CDA_MAX_DRV_SEMAPHORES; i++ ) {
-		if( dev->sem_owner[i] == owner ) {
-			dev->semaphores[i] = 0ULL;
-			dev->sem_owner[i] = NULL;
-		}
-	}
-	mutex_unlock(&dev->ilock);
-}
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,4,0)
-#define to_bar(obj) container_of((obj), struct cda_bar, kobj)
-struct bar_sysfs_entry {
-	struct attribute attr;
-	ssize_t (*show)(struct cda_bar *, char *);
-	ssize_t (*store)(struct cda_bar *, char*, size_t);
-};
-
-#define cdadev_bar_attr(_field, _fmt)					\
-	static ssize_t							\
-	bar_##_field##_show(struct cda_bar *bar, char *buf)		\
-	{								\
-		return sprintf(buf, _fmt, bar->_field);			\
-	}								\
-	static struct bar_sysfs_entry bar_##_field##_attr =		\
-		__ATTR(_field, S_IRUGO, bar_##_field##_show, NULL);
-
-#pragma GCC diagnostic ignored "-Wformat"
-cdadev_bar_attr(paddr, "0x%lx\n");
-cdadev_bar_attr(len, "0x%lx\n");
-cdadev_bar_attr(index, "%d\n");
-#pragma GCC diagnostic warning "-Wformat"
-
-static ssize_t bar_attr_show(struct kobject *kobj, struct attribute *attr,
-				 char *buf)
-{
-	struct cda_bar *bar = to_bar(kobj);
-	struct bar_sysfs_entry *entry =
-		container_of(attr, struct bar_sysfs_entry, attr);
-
-	if (!entry->show)
-		return -EIO;
-
-	return entry->show(bar, buf);
-}
-
-static const struct sysfs_ops bar_ops = {
-	.show = bar_attr_show,
-};
-
-static void bar_release(struct kobject *kobj)
-{
-	struct cda_bar *bar = to_bar(kobj);
-	kfree(bar);
-}
-
-static struct attribute *bar_attrs[] = {
-	&bar_paddr_attr.attr,
-	&bar_len_attr.attr,
-	&bar_index_attr.attr,
-	NULL,
-};
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,18,0)
-ATTRIBUTE_GROUPS(bar);
-#endif
-
-static struct kobj_type bar_type = {
-	.sysfs_ops = &bar_ops,
-	.release = bar_release,
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,18,0)
-	.default_groups = bar_groups,
-#else
-	.default_attrs = bar_attrs,
-#endif
-};
-
-// Secure enable support
-static const struct vm_operations_struct pci_phys_vm_ops = {
-#ifdef CONFIG_HAVE_IOREMAP_PROT
-	.access = generic_access_phys,
-#endif
-};
-
-static int bar_mmap( struct file *file, 
-						struct kobject *kobj, 
-						struct bin_attribute *attr,
-			   			struct vm_area_struct *vma)
-{
-	struct cda_bar *bar = attr->private;
-	unsigned long requested = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
-	unsigned long pages = (unsigned long)bar->len >> PAGE_SHIFT;
-	unsigned long size;
-
-	if (vma->vm_pgoff + requested > pages)
-		return -EINVAL;
-
-	size = ((pci_resource_len(bar->dev->pcidev, bar->index) - 1) >> PAGE_SHIFT) + 1;
-	if (vma->vm_pgoff + vma_pages(vma) > size)
-		return -EINVAL;
-
-	vma->vm_page_prot = pgprot_device(vma->vm_page_prot);
-	vma->vm_pgoff += (pci_resource_start(bar->dev->pcidev, bar->index) >> PAGE_SHIFT);
-	vma->vm_ops = &pci_phys_vm_ops;
-
-	return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
-									vma->vm_end - vma->vm_start,
-									vma->vm_page_prot);
-}
-
-int cda_open_bars(struct cda_dev *cdadev)
-{
-	int i;
-	int ret;
-	struct cda_bar *bar;
-	struct resource *res_child;
-	int bars = pci_select_bars(cdadev->pcidev, IORESOURCE_MEM);
-
-	ret = -EINVAL;
-	cdadev->kobj_bars = kobject_create_and_add("bars", &cdadev->dev.kobj);
-	if (!cdadev->kobj_bars)
-		goto err;
-
-	for (i = 0; bars && i < PCI_ROM_RESOURCE; bars >>= 1, i++){
-		struct bin_attribute *mmap_attr;
-		if (!(bars & 1))
-			continue;
-
-		if( !(pci_resource_flags(cdadev->pcidev, i) & IORESOURCE_MEM) )
-			continue;
-		
-		ret = -ENOMEM;
-		bar = kzalloc(sizeof(*bar), GFP_KERNEL);
-		if (!bar)
-			goto err;
-		bar->index = i;
-		bar->paddr = pci_resource_start(cdadev->pcidev, i);
-		bar->len = pci_resource_len(cdadev->pcidev, i);
-		bar->vaddr = NULL;
-		bar->dev = cdadev;
-		cdadev->sysfs_bar[i] = bar;
-		kobject_init(&bar->kobj, &bar_type);
-
-		if( (bar->vaddr = pci_iomap(cdadev->pcidev, i, bar->len)) == NULL )
-			goto err;
-		ret = kobject_add(&bar->kobj, cdadev->kobj_bars, "mmio_bar%d", i);
-		if (ret)
-			goto err;
-
-		mmap_attr = &bar->mmap_attr;
-		mmap_attr->mmap = bar_mmap;
-		mmap_attr->attr.name = "mmap";
-		mmap_attr->attr.mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH;
-		mmap_attr->size = bar->len;
-		mmap_attr->private = bar;
-
-		ret = sysfs_create_bin_file(&bar->kobj, mmap_attr);
-		if (ret) {
-			dev_err(&cdadev->dev, "Can't create kobject file for mmap");
-			goto err;
-		}
-
-		//Drop busy bit
-		res_child = cdadev->pcidev->resource[i].child;
-
-		dev_info(&cdadev->dev, "Store resource %d flag: 0x%lx\n", i, res_child->flags);
-		cdadev->stored_flags[i] = res_child->flags;
-		if (IORESOURCE_BUSY & res_child->flags) {
-			res_child->flags &= ~IORESOURCE_BUSY;
-		}
-	}
-	return 0;
-
-err:
-	cda_release_bars(cdadev);
-	return ret;
-}
-
-void cda_release_bars(struct cda_dev *cdadev)
-{
-	int i;
-	int bars = pci_select_bars(cdadev->pcidev, IORESOURCE_MEM);
-	for( i = 0; i < PCI_ROM_RESOURCE; i++ ) {
-		struct cda_bar *bar = cdadev->sysfs_bar[i];
-		if (!bar)
-			continue;
-
-		cdadev->sysfs_bar[i] = NULL;
-		sysfs_remove_bin_file(&bar->kobj, &bar->mmap_attr);
-		kobject_del(&bar->kobj);
-		kobject_put(&bar->kobj);
-
-		if( bars & (1 << i) ) {
-			cdadev->pcidev->resource[i].child->flags = cdadev->stored_flags[i];
-			dev_info(&cdadev->dev, "Restore resource %d flag: %lx\n", i, cdadev->stored_flags[i]);
-		}
-	}
-
-	kobject_del(cdadev->kobj_bars);
-	kobject_put(cdadev->kobj_bars);
-}
-#else // LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0)
-int cda_open_bars(struct cda_dev *cdadev)
-{
-	int i;
-	struct resource *res_child;
-	int bars = pci_select_bars(cdadev->pcidev, IORESOURCE_MEM);
-
-	for( i = 0; i < PCI_ROM_RESOURCE; i++ ) {
-		// Drop busy bit
-		if( bars & (1 << i) ) {
-			res_child = cdadev->pcidev->resource[i].child;
-			cdadev->stored_flags[i] = res_child->flags;
-			dev_info(&cdadev->dev, "Store resource %d flag: 0x%lx\n", i, res_child->flags);
-			if( IORESOURCE_BUSY & res_child->flags ) {
-				res_child->flags &= ~IORESOURCE_BUSY;
-				dev_dbg(&cdadev->dev, "Drop busy bit for resource %d", i);
-			}
-		}
-	}
-	return 0;
-}
-
-void cda_release_bars(struct cda_dev *cdadev)
-{
-	int i;
-	int bars = pci_select_bars(cdadev->pcidev, IORESOURCE_MEM);
-	for( i = 0; i < PCI_ROM_RESOURCE; i++ ) {
-		if( bars & (1 << i) ) {
-			cdadev->pcidev->resource[i].child->flags = cdadev->stored_flags[i];
-			dev_info(&cdadev->dev, "Restore resource %d flag: %lx\n", i, cdadev->stored_flags[i]);
-		}
-	}
-}
-#endif
+// SPDX-License-Identifier: GPL-2.0
+// Copyright(c) 2020 DeGirum Corp., Egor Pomozov.
+//
+// CDA linux driver mem blocks/mem maps and interrupt request handler
+//
+// This program is free software; you can redistribute it and/or modify it
+// under the terms and conditions of the GNU General Public License,
+// version 2, as published by the Free Software Foundation.
+//
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+
+#include "cdadrv.h"
+#include "cdaioctl.h"
+
+struct cda_vector {
+	volatile bool busy;
+	wait_queue_head_t wait;
+	atomic_t count;
+	unsigned int irq;
+};
+
+struct cda_interrupts {
+	int num;
+	enum int_type type;
+	void *owner;
+	struct cda_vector *vecs;
+	struct msix_entry *msix_entries;
+};
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)
+struct cda_bar {
+	struct kobject kobj;
+	/* struct resource *res; */
+	int index;
+	phys_addr_t paddr;
+	phys_addr_t len;
+	void *vaddr;
+	struct cda_dev *dev;
+	struct bin_attribute mmap_attr;
+};
+#else
+#if defined __has_attribute
+# if __has_attribute(__fallthrough__)
+#  define fallthrough                    __attribute__((__fallthrough__))
+# endif
+#else
+# define fallthrough                    do {} while (0)  /* fallthrough */
+#endif //has_attribute
+#endif // LINUX_VERSION_CODE
+
+static int cda_alloc_msix(struct cda_dev *cdadev, uint32_t rvecs, struct cda_interrupts *ints)
+{
+	int i, ret;
+	struct msix_entry *entries;
+
+	entries = kcalloc(rvecs, sizeof(struct msix_entry), GFP_KERNEL);
+	if (!entries)
+		return -ENOMEM;
+
+	for (i = 0; i < rvecs; i++) {
+		entries[i].entry = i;
+		entries[i].vector = 0;
+	}
+
+	ret = pci_enable_msix_exact(cdadev->pcidev, entries, rvecs);
+	if (!ret) {
+		ints->num = rvecs;
+		ints->msix_entries = entries;
+		ints->type = MSIX;
+	} else {
+		kfree(entries);
+	}
+	return ret;
+}
+
+static irqreturn_t cda_isr(int irq, void *priv)
+{
+	struct cda_vector *vec = priv;
+
+	atomic_inc_return(&vec->count);
+	wake_up(&vec->wait);
+	return IRQ_HANDLED;
+}
+
+int cda_init_interrupts(struct cda_dev *cdadev, void *owner, void __user *ureq)
+{
+	int ret = 0;
+	int nvecs, i;
+	struct cda_vector *vec;
+	struct cda_int_lock req;
+	struct cda_interrupts *ints;
+
+	if (cdadev->ints) {
+		dev_dbg(&cdadev->pcidev->dev, "Interrupts are already attached");
+		return -EINVAL; // Already attached
+	}
+	if (copy_from_user(&req, (void __user *)ureq, sizeof(req)))
+		return -EFAULT;
+
+	ints = kcalloc(1, sizeof(struct cda_interrupts), GFP_KERNEL);
+	if (!ints)
+		return -ENOMEM;
+
+	ints->owner = owner;
+	switch (req.inttype) {
+	case MSIX:
+		ret = cda_alloc_msix(cdadev, req.vectors, ints);
+		if (!ret) {
+			nvecs = req.vectors;
+			break;
+		}
+		if (ret == -ENOMEM) {
+			kfree(ints);
+			return ret;
+		}
+		dev_warn(&cdadev->pcidev->dev, "No MSI-X vectors, try MSI. Error %x\n", ret);
+		fallthrough;
+	case MSI:
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
+		nvecs = pci_alloc_irq_vectors(cdadev->pcidev, 1, req.vectors, PCI_IRQ_MSI);
+#else
+		nvecs = pci_alloc_irq_vectors_affinity(cdadev->pcidev, 1, req.vectors, PCI_IRQ_MSI, NULL);
+#endif
+		if (nvecs > 0) {
+			ints->num = nvecs;
+			ints->type = MSI;
+			break;
+		}
+		dev_warn(&cdadev->pcidev->dev, "No MSI vectors, try legacy. Error %x\n", nvecs);
+		fallthrough;
+	case LEGACY_INTERRUPT:
+		ints->num = 1;
+		ints->type = LEGACY_INTERRUPT;
+		break;
+	}
+
+	ints->vecs = kcalloc(ints->num, sizeof(struct cda_vector), GFP_KERNEL);
+	if (!ints->vecs) {
+		ret = -ENOMEM;
+		goto err_alloc_vecs;
+	}
+
+	for (i = 0; i < ints->num; i++) {
+		char name[10];
+
+		vec = &ints->vecs[i];
+		vec->irq = ints->type == MSIX ?
+					cdadev->ints->msix_entries[i].vector :
+					cdadev->pcidev->irq + i;
+		snprintf(name, sizeof(name), "cda%02d-%x", cdadev->minor, i);
+		init_waitqueue_head(&vec->wait);
+		atomic_set(&vec->count, 0);
+		ret = request_irq(vec->irq, cda_isr, ints->type == LEGACY_INTERRUPT ? IRQF_SHARED : 0, name, vec);
+		if (ret) {
+			dev_err(&cdadev->pcidev->dev, "request_irq failed for vector %d: %d", i, ret);
+			break;
+		}
+	}
+
+	// Return interrupt type and vector count to user
+	if (!ret) {
+		req.inttype = ints->type;
+		req.vectors = ints->num;
+		if (copy_to_user(ureq, &req, sizeof(req)))
+			ret = -EFAULT;
+	}
+
+	if (!ret) {
+		cdadev->ints = ints;
+		return ret;
+	}
+	// Fail. Release
+	for (i -= 1; i >= 0; i--) {
+		struct cda_vector *vec = &ints->vecs[i];
+
+		free_irq(vec->irq, vec);
+	}
+	kfree(ints->vecs);
+
+err_alloc_vecs:
+	pci_free_irq_vectors(cdadev->pcidev);
+	kfree(ints->msix_entries);
+	kfree(ints);
+
+	return ret;
+}
+
+int cda_free_irqs(struct cda_dev *cdadev, void *owner)
+{
+	int i;
+	struct cda_interrupts *ints;
+
+	if (cdadev->ints == NULL)
+		return -EINVAL;
+	if (cdadev->ints->owner != owner) {
+		dev_dbg(&cdadev->pcidev->dev, "Interrupts are not owned by %p", owner);
+		return -EINVAL;
+	}
+	mutex_lock(&cdadev->ilock);
+	ints = cdadev->ints;
+	cdadev->ints = NULL;
+	mutex_unlock(&cdadev->ilock);
+	if (ints && ints->num > 0) {
+		for (i = 0; i < ints->num; i++) {
+			struct cda_vector *vec = &ints->vecs[i];
+
+			while (vec->busy) {
+				wake_up(&vec->wait);
+				udelay(1);
+			}
+			free_irq(vec->irq, vec);
+		}
+		pci_free_irq_vectors(cdadev->pcidev);
+		kfree(ints->vecs);
+		kfree(ints->msix_entries);
+		kfree(ints);
+	}
+	return 0;
+}
+
+int cda_req_int(struct cda_dev *cdadev, void *owner, void __user *ureq)
+{
+	struct cda_interrupts *ints;
+	struct cda_req_int req;
+	struct cda_vector *vec;
+	unsigned long timeout;
+	unsigned int count;
+
+	if (copy_from_user(&req, ureq, sizeof(req)))
+		return -EFAULT;
+
+	if (cdadev->ints == NULL)
+		return -EINVAL;
+
+	if (cdadev->ints->owner != owner) {
+		dev_err(&cdadev->pcidev->dev, "Interrupts are not owned by %p", owner);
+		return -EINVAL;
+	}
+
+	mutex_lock(&cdadev->ilock);
+	ints = cdadev->ints;
+	if (!ints || (req.vector > ints->num)) {
+		mutex_unlock(&cdadev->ilock);
+		return -EINVAL;
+	}
+
+	vec = &ints->vecs[req.vector];
+	if (req.reset)
+		atomic_set(&vec->count, 0);
+
+	timeout = nsecs_to_jiffies(req.timeout);
+	count = atomic_xchg(&vec->count, 0);
+	if (!count) {
+		vec->busy = true;
+		mutex_unlock(&cdadev->ilock);
+		timeout = wait_event_interruptible_timeout(vec->wait,
+							(count = atomic_xchg(&vec->count, 0)),
+							timeout);
+		mutex_lock(&cdadev->ilock);
+		vec->busy = false;
+	}
+	mutex_unlock(&cdadev->ilock);
+	dev_dbg(&cdadev->pcidev->dev, "Interrupt vector %d timeout: %ld count %u reset %d\n", req.vector, timeout, count, req.reset);
+	return timeout > 0 ? 0 : timeout == 0 ? -ETIME : timeout;
+}
+
+int cda_cancel_req(struct cda_dev *cdadev, void *owner)
+{
+	int i;
+	struct cda_interrupts *ints;
+
+	if (cdadev->ints == NULL)
+		return -EINVAL;
+
+	if (cdadev->ints->owner != owner) {
+		dev_dbg(&cdadev->pcidev->dev, "Interrupts are not owned by %p", owner);
+		return -EINVAL;
+	}
+
+	mutex_lock(&cdadev->ilock);
+	ints = cdadev->ints;
+	for (i = 0; ints && i < ints->num; i++) {
+		if (ints->vecs[i].busy)
+			wake_up(&ints->vecs[i].wait);
+	}
+	mutex_unlock(&cdadev->ilock);
+	return 0;
+}
+
+int cda_sem_aq(struct cda_dev *cdadev, void *owner, void __user *ureq)
+{
+	int res = 0;
+	struct cda_sem_aq req;
+	u64 cur_time;
+
+	if (copy_from_user(&req, ureq, sizeof(req)))
+		return -EFAULT;
+
+	mutex_lock(&cdadev->ilock);
+	cur_time = ktime_get_ns();
+	if (cdadev->semaphores[req.sem_id] < cur_time) {
+		cdadev->semaphores[req.sem_id] = cur_time + req.time_ns > cur_time ? cur_time + req.time_ns : 0xFFFFFFFFFFFFFFFFULL;
+		cdadev->sem_owner[req.sem_id] = owner;
+	} else {
+		res = 1;
+	}
+	mutex_unlock(&cdadev->ilock);
+	return res;
+}
+
+int cda_sem_rel(struct cda_dev *cdadev, void *owner, void __user *ureq)
+{
+	int res = 0;
+	int req_sem;
+
+	if (copy_from_user(&req_sem, ureq, sizeof(req_sem)))
+		return -EFAULT;
+	if (cdadev->sem_owner[req_sem] != owner) {
+		dev_warn(&cdadev->pcidev->dev, "Semaphore %d is not owned by %p", req_sem, owner);
+	} else {
+		mutex_lock(&cdadev->ilock);
+		cdadev->semaphores[req_sem] = 0ULL;
+		cdadev->sem_owner[req_sem] = NULL;
+		mutex_unlock(&cdadev->ilock);
+	}
+	return res;
+}
+
+void cda_sem_rel_by_owner(struct cda_dev *dev, void *owner)
+{
+	uint32_t i;
+
+	mutex_lock(&dev->ilock);
+	for (i = 0; i < CDA_MAX_DRV_SEMAPHORES; i++) {
+		if (dev->sem_owner[i] == owner) {
+			dev->semaphores[i] = 0ULL;
+			dev->sem_owner[i] = NULL;
+		}
+	}
+	mutex_unlock(&dev->ilock);
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)
+#define to_bar(obj) container_of((obj), struct cda_bar, kobj)
+struct bar_sysfs_entry {
+	struct attribute attr;
+	ssize_t (*show)(struct cda_bar *, char *);
+	ssize_t (*store)(struct cda_bar *, char*, size_t);
+};
+
+#define cdadev_bar_attr(_field, _fmt)					\
+	static ssize_t							\
+	bar_##_field##_show(struct cda_bar *bar, char *buf)		\
+	{								\
+		return sprintf(buf, _fmt, bar->_field);			\
+	}								\
+	static struct bar_sysfs_entry bar_##_field##_attr =		\
+		__ATTR(_field, S_IRUGO, bar_##_field##_show, NULL)
+
+#pragma GCC diagnostic ignored "-Wformat"
+cdadev_bar_attr(paddr, "0x%lx\n");
+cdadev_bar_attr(len, "0x%lx\n");
+cdadev_bar_attr(index, "%d\n");
+#pragma GCC diagnostic warning "-Wformat"
+
+static ssize_t bar_attr_show(struct kobject *kobj, struct attribute *attr,
+				 char *buf)
+{
+	struct cda_bar *bar = to_bar(kobj);
+	struct bar_sysfs_entry *entry =
+		container_of(attr, struct bar_sysfs_entry, attr);
+
+	if (!entry->show)
+		return -EIO;
+
+	return entry->show(bar, buf);
+}
+
+static const struct sysfs_ops bar_ops = {
+	.show = bar_attr_show,
+};
+
+static void bar_release(struct kobject *kobj)
+{
+	struct cda_bar *bar = to_bar(kobj);
+
+	kfree(bar);
+}
+
+static struct attribute *bar_attrs[] = {
+	&bar_paddr_attr.attr,
+	&bar_len_attr.attr,
+	&bar_index_attr.attr,
+	NULL,
+};
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0)
+ATTRIBUTE_GROUPS(bar);
+#endif
+
+static const struct kobj_type bar_type = {
+	.sysfs_ops = &bar_ops,
+	.release = bar_release,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 18, 0)
+	.default_groups = bar_groups,
+#else
+	.default_attrs = bar_attrs,
+#endif
+};
+
+// Secure enable support
+static const struct vm_operations_struct pci_phys_vm_ops = {
+#ifdef CONFIG_HAVE_IOREMAP_PROT
+	.access = generic_access_phys,
+#endif
+};
+
+static int bar_mmap(struct file *file,
+		    struct kobject *kobj,
+		    struct bin_attribute *attr,
+		    struct vm_area_struct *vma)
+{
+	struct cda_bar *bar = attr->private;
+	unsigned long requested = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+	unsigned long pages = (unsigned long)bar->len >> PAGE_SHIFT;
+	unsigned long size;
+
+	if (vma->vm_pgoff + requested > pages)
+		return -EINVAL;
+
+	size = ((pci_resource_len(bar->dev->pcidev, bar->index) - 1) >> PAGE_SHIFT) + 1;
+	if (vma->vm_pgoff + vma_pages(vma) > size)
+		return -EINVAL;
+
+	vma->vm_page_prot = pgprot_device(vma->vm_page_prot);
+	vma->vm_pgoff += (pci_resource_start(bar->dev->pcidev, bar->index) >> PAGE_SHIFT);
+	vma->vm_ops = &pci_phys_vm_ops;
+
+	return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+				  vma->vm_end - vma->vm_start,
+				  vma->vm_page_prot);
+}
+
+int cda_open_bars(struct cda_dev *cdadev)
+{
+	int i;
+	int ret;
+	struct cda_bar *bar;
+	struct resource *res_child;
+	int bars = pci_select_bars(cdadev->pcidev, IORESOURCE_MEM);
+
+	ret = -EINVAL;
+	cdadev->kobj_bars = kobject_create_and_add("bars", &cdadev->dev.kobj);
+	if (!cdadev->kobj_bars)
+		goto err;
+
+	for (i = 0; bars && i < PCI_ROM_RESOURCE; bars >>= 1, i++) {
+		struct bin_attribute *mmap_attr;
+
+		if (!(bars & 1))
+			continue;
+
+		if (!(pci_resource_flags(cdadev->pcidev, i) & IORESOURCE_MEM))
+			continue;
+
+		ret = -ENOMEM;
+		bar = kzalloc(sizeof(*bar), GFP_KERNEL);
+		if (!bar)
+			goto err;
+		bar->index = i;
+		bar->paddr = pci_resource_start(cdadev->pcidev, i);
+		bar->len = pci_resource_len(cdadev->pcidev, i);
+		bar->vaddr = NULL;
+		bar->dev = cdadev;
+		cdadev->sysfs_bar[i] = bar;
+		kobject_init(&bar->kobj, &bar_type);
+
+		bar->vaddr = pci_iomap(cdadev->pcidev, i, bar->len);
+		if (!bar->vaddr)
+			goto err;
+		ret = kobject_add(&bar->kobj, cdadev->kobj_bars, "mmio_bar%d", i);
+		if (ret)
+			goto err;
+
+		mmap_attr = &bar->mmap_attr;
+		mmap_attr->mmap = bar_mmap;
+		mmap_attr->attr.name = "mmap";
+		mmap_attr->attr.mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH;
+		mmap_attr->size = bar->len;
+		mmap_attr->private = bar;
+
+		ret = sysfs_create_bin_file(&bar->kobj, mmap_attr);
+		if (ret) {
+			dev_err(&cdadev->dev, "Can't create kobject file for mmap");
+			goto err;
+		}
+
+		//Drop busy bit
+		res_child = cdadev->pcidev->resource[i].child;
+
+		dev_info(&cdadev->dev, "Store resource %d flag: 0x%lx\n", i, res_child->flags);
+		cdadev->stored_flags[i] = res_child->flags;
+		if (IORESOURCE_BUSY & res_child->flags)
+			res_child->flags &= ~IORESOURCE_BUSY;
+	}
+	return 0;
+
+err:
+	cda_release_bars(cdadev);
+	return ret;
+}
+
+void cda_release_bars(struct cda_dev *cdadev)
+{
+	int i;
+	int bars = pci_select_bars(cdadev->pcidev, IORESOURCE_MEM);
+
+	for (i = 0; i < PCI_ROM_RESOURCE; i++) {
+		struct cda_bar *bar = cdadev->sysfs_bar[i];
+
+		if (!bar)
+			continue;
+
+		cdadev->sysfs_bar[i] = NULL;
+		sysfs_remove_bin_file(&bar->kobj, &bar->mmap_attr);
+		kobject_del(&bar->kobj);
+		kobject_put(&bar->kobj);
+
+		if (bars & (1 << i)) {
+			cdadev->pcidev->resource[i].child->flags = cdadev->stored_flags[i];
+			dev_info(&cdadev->dev, "Restore resource %d flag: %lx\n", i, cdadev->stored_flags[i]);
+		}
+	}
+
+	kobject_del(cdadev->kobj_bars);
+	kobject_put(cdadev->kobj_bars);
+}
+#else // LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0)
+int cda_open_bars(struct cda_dev *cdadev)
+{
+	int i;
+	struct resource *res_child;
+	int bars = pci_select_bars(cdadev->pcidev, IORESOURCE_MEM);
+
+	for (i = 0; i < PCI_ROM_RESOURCE; i++) {
+		// Drop busy bit
+		if (bars & (1 << i)) {
+			res_child = cdadev->pcidev->resource[i].child;
+			cdadev->stored_flags[i] = res_child->flags;
+			dev_info(&cdadev->dev, "Store resource %d flag: 0x%lx\n", i, res_child->flags);
+			if (IORESOURCE_BUSY & res_child->flags) {
+				res_child->flags &= ~IORESOURCE_BUSY;
+				dev_dbg(&cdadev->dev, "Drop busy bit for resource %d", i);
+			}
+		}
+	}
+	return 0;
+}
+
+void cda_release_bars(struct cda_dev *cdadev)
+{
+	int i;
+	int bars = pci_select_bars(cdadev->pcidev, IORESOURCE_MEM);
+
+	for (i = 0; i < PCI_ROM_RESOURCE; i++) {
+		if (bars & (1 << i)) {
+			cdadev->pcidev->resource[i].child->flags = cdadev->stored_flags[i];
+			dev_info(&cdadev->dev, "Restore resource %d flag: %lx\n", i, cdadev->stored_flags[i]);
+		}
+	}
+}
+#endif
openSUSE Build Service is sponsored by