File Make_RBD_user-id_configurable.patch of Package ganeti
commit 738de7daa8227a6e9c2e102d22adeb8ea0319393
Author: Rudolph Bott <r@spam.wtf>
Date: Fri Jan 9 23:21:08 2026 +0100
Make RBD user-id configurable (#1680)
This picks up where PR #1515 left and adds the missing bits. Most of the
work has been done by @oleeander.
The user id is used by ceph to determine the keyring to use for
authentication. By default the admin keyring is used, which may
not be desirable. Example usage:
$ gnt-cluster modify -D rbd:user-id=foobar
Signed-off-by: Rudolph Bott <r@spam.wtf>
Signed-off-by: Sascha Lucas <sascha_lucas@web.de>
Co-authored-by: Oleander <oleander@oleander.cc>
Co-authored-by: Sascha Lucas <sascha_lucas@web.de>
diff --git a/doc/design-ceph-ganeti-support.rst b/doc/design-ceph-ganeti-support.rst
index facaab56d..d3c0abe6c 100644
--- a/doc/design-ceph-ganeti-support.rst
+++ b/doc/design-ceph-ganeti-support.rst
@@ -76,7 +76,14 @@ Updated commands
$ gnt-instance info
``access:userspace/kernelspace`` will be added to Disks category. This
-output applies to KVM based instances only.
+output applies to KVM based instances only::
+
+ $ gnt-cluster modify -D rbd:user-id=foobar
+
+The user id for ceph authentication is an optional setting. If it is not
+provided, then no special option is passed to ceph. If it is provided,
+then all ceph commands are run with the ``--id`` option and the
+configured username.
Ceph configuration on Ganeti nodes
==================================
@@ -124,17 +131,17 @@ appropriate services on the newly assigned node.
Updated Commands
----------------
-Following are the affected commands.::
+Following are the affected commands::
$ gnt-cluster init -S ceph:disk=/dev/sdb,option=value...
During cluster initialization, ceph specific options are provided which
-apply at cluster-level.::
+apply at cluster-level::
$ gnt-cluster modify -S ceph:option=value2...
For now, cluster modification will be allowed when there is no
-initialized storage cluster.::
+initialized storage cluster::
$ gnt-storage init-distributed-storage -s{--storage-type} ceph \
<node-group>
@@ -142,18 +149,18 @@ initialized storage cluster.::
Ensure that no other node-group is configured as distributed storage
cluster and configure ceph on the specified node-group. If there is no
node in the node-group, it'll only be marked as distributed storage
-enabled and no action will be taken.::
+enabled and no action will be taken::
$ gnt-group assign-nodes <group> <node>
It ensures that the node is offline if the node-group specified is
distributed storage capable. Ceph configuration on the newly assigned
-node is not performed at this step.::
+node is not performed at this step::
$ gnt-node --offline
If the node is part of storage node-group, an offline call will stop/remove
-ceph daemons.::
+ceph daemons::
$ gnt-node add --readd
diff --git a/lib/hypervisor/hv_kvm/kvm_utils.py b/lib/hypervisor/hv_kvm/kvm_utils.py
index 4570ff994..6dab51799 100644
--- a/lib/hypervisor/hv_kvm/kvm_utils.py
+++ b/lib/hypervisor/hv_kvm/kvm_utils.py
@@ -37,7 +37,12 @@ _BLOCKDEV_URI_REGEX_GLUSTER = (
r"^gluster:\/\/(?P<host>[a-z0-9-.]+):"
r"(?P<port>\d+)/(?P<volume>[^/]+)/(?P<path>.+)$"
)
-_BLOCKDEV_URI_REGEX_RBD = r"^rbd:(?P<pool>\w+)/(?P<image>[a-z0-9-\.]+)$"
+
+# rbd:{pool-name}/{image-name}[@snapshot-name][:opt1=val1][:opt2=val2...]
+_BLOCKDEV_URI_REGEX_RBD = (
+ r"^rbd:(?P<pool>\w+)/(?P<image>[a-z0-9-\.]+)"
+ r"(?P<id_opt>:id=[a-z0-9-\._]+)?$"
+)
def TranslateBoolToOnOff(value):
"""Converts a given boolean to 'on'|'off' for use in QEMUs cmdline
@@ -75,11 +80,17 @@ def ParseStorageUriToBlockdevParam(uri):
}
match = re.match(_BLOCKDEV_URI_REGEX_RBD, uri)
if match is not None:
- return {
+ param = {
"driver": "rbd",
"pool": match.group("pool"),
"image": match.group("image")
}
+
+ id_opt = match.group("id_opt")
+ if id_opt is not None:
+ param["user"] = id_opt.partition("=")[2]
+
+ return param
raise errors.HypervisorError("Unsupported storage URI scheme: %s" % (uri))
@@ -137,4 +148,4 @@ def GetCacheSettings(cache_type, dev_type):
elif cache_type == constants.HT_CACHE_WTHROUGH:
return False, False, False
else:
- raise errors.HypervisorError("Invalid KVM cache setting '%s'" % cache_type)
\ No newline at end of file
+ raise errors.HypervisorError("Invalid KVM cache setting '%s'" % cache_type)
diff --git a/lib/storage/bdev.py b/lib/storage/bdev.py
index 901a8045a..c13b197b6 100644
--- a/lib/storage/bdev.py
+++ b/lib/storage/bdev.py
@@ -906,8 +906,8 @@ class RADOSBlockDevice(base.BlockDev):
rbd_name = unique_id[1]
# Provision a new rbd volume (Image) inside the RADOS cluster.
- cmd = [constants.RBD_CMD, "create", "-p", rbd_pool,
- rbd_name, "--size", "%s" % size]
+ cmd = cls.MakeRbdCmd(params, ["create", "-p", rbd_pool, rbd_name,
+ "--size", str(size)])
result = utils.RunCmd(cmd)
if result.failed:
base.ThrowError("rbd creation failed (%s): %s",
@@ -931,7 +931,8 @@ class RADOSBlockDevice(base.BlockDev):
self.Shutdown()
# Remove the actual Volume (Image) from the RADOS cluster.
- cmd = [constants.RBD_CMD, "rm", "-p", rbd_pool, rbd_name]
+ cmd = self.__class__.MakeRbdCmd(self.params, ["rm", "-p", rbd_pool,
+ rbd_name])
result = utils.RunCmd(cmd)
if result.failed:
base.ThrowError("Can't remove Volume from cluster with rbd rm: %s - %s",
@@ -971,6 +972,15 @@ class RADOSBlockDevice(base.BlockDev):
return True
+ @classmethod
+ def MakeRbdCmd(cls, params, cmd):
+ """Add user id option to rbd command if configured.
+ """
+ if params.get(constants.RBD_USER_ID, ""):
+ cmd.extend(["--id", str(params[constants.RBD_USER_ID])])
+
+ return [constants.RBD_CMD] + cmd
+
def _MapVolumeToBlockdev(self, unique_id):
"""Maps existing rbd volumes to block devices.
@@ -990,7 +1000,7 @@ class RADOSBlockDevice(base.BlockDev):
return rbd_dev
# The mapping doesn't exist. Create it.
- map_cmd = [constants.RBD_CMD, "map", "-p", pool, name]
+ map_cmd = self.__class__.MakeRbdCmd(self.params, ["map", "-p", pool, name])
result = utils.RunCmd(map_cmd)
if result.failed:
base.ThrowError("rbd map failed (%s): %s",
@@ -1020,12 +1030,7 @@ class RADOSBlockDevice(base.BlockDev):
try:
# Newer versions of the rbd tool support json output formatting. Use it
# if available.
- showmap_cmd = [
- constants.RBD_CMD,
- "showmapped",
- "--format",
- "json"
- ]
+ showmap_cmd = cls.MakeRbdCmd({}, ["showmapped", "--format", "json"])
result = utils.RunCmd(showmap_cmd)
if result.failed:
logging.error("rbd JSON output formatting returned error (%s): %s,"
@@ -1037,7 +1042,7 @@ class RADOSBlockDevice(base.BlockDev):
except RbdShowmappedJsonError:
# For older versions of rbd, we have to parse the plain / text output
# manually.
- showmap_cmd = [constants.RBD_CMD, "showmapped", "-p", pool]
+ showmap_cmd = cls.MakeRbdCmd({}, ["showmapped", "-p", pool])
result = utils.RunCmd(showmap_cmd)
if result.failed:
base.ThrowError("rbd showmapped failed (%s): %s",
@@ -1179,7 +1184,8 @@ class RADOSBlockDevice(base.BlockDev):
if rbd_dev:
# The mapping exists. Unmap the rbd device.
- unmap_cmd = [constants.RBD_CMD, "unmap", "%s" % rbd_dev]
+ unmap_cmd = self.__class__.MakeRbdCmd(self.params, ["unmap",
+ str(rbd_dev)])
result = utils.RunCmd(unmap_cmd)
if result.failed:
base.ThrowError("rbd unmap failed (%s): %s",
@@ -1223,8 +1229,9 @@ class RADOSBlockDevice(base.BlockDev):
new_size = self.size + amount
# Resize the rbd volume (Image) inside the RADOS cluster.
- cmd = [constants.RBD_CMD, "resize", "-p", rbd_pool,
- rbd_name, "--size", "%s" % new_size]
+ cmd = self.__class__.MakeRbdCmd(self.params, ["resize", "-p", rbd_pool,
+ rbd_name, "--size",
+ "%s" % new_size])
result = utils.RunCmd(cmd)
if result.failed:
base.ThrowError("rbd resize failed (%s): %s",
@@ -1259,16 +1266,16 @@ class RADOSBlockDevice(base.BlockDev):
self._UnmapVolumeFromBlockdev(self.unique_id)
# Remove the actual Volume (Image) from the RADOS cluster.
- cmd = [constants.RBD_CMD, "rm", "-p", rbd_pool, rbd_name]
+ cmd = self.__class__.MakeRbdCmd(self.params, ["rm", "-p", rbd_pool,
+ rbd_name])
result = utils.RunCmd(cmd)
if result.failed:
base.ThrowError("Can't remove Volume from cluster with rbd rm: %s - %s",
result.fail_reason, result.output)
# We use "-" for importing from stdin
- return [constants.RBD_CMD, "import",
- "-p", rbd_pool,
- "-", rbd_name]
+ return self.__class__.MakeRbdCmd(self.params, ["import", "-p", rbd_pool,
+ "-", rbd_name])
def Export(self):
"""Builds the shell command for exporting data from device.
@@ -1284,9 +1291,8 @@ class RADOSBlockDevice(base.BlockDev):
rbd_name = self.unique_id[1]
# We use "-" for exporting to stdout.
- return [constants.RBD_CMD, "export",
- "-p", rbd_pool,
- rbd_name, "-"]
+ return self.__class__.MakeRbdCmd(self.params, ["export", "-p", rbd_pool,
+ rbd_name, "-"])
def GetUserspaceAccessUri(self, hypervisor):
"""Generate KVM userspace URIs to be used as `-drive file` settings.
@@ -1295,7 +1301,10 @@ class RADOSBlockDevice(base.BlockDev):
"""
if hypervisor == constants.HT_KVM:
- return "rbd:" + self.rbd_pool + "/" + self.rbd_name
+ uri = "rbd:" + self.rbd_pool + "/" + self.rbd_name
+ if self.params.get(constants.RBD_USER_ID, ""):
+ uri += ":id=%s" % self.params[constants.RBD_USER_ID]
+ return uri
else:
base.ThrowError("Hypervisor %s doesn't support RBD userspace access" %
hypervisor)
diff --git a/lib/tools/cfgupgrade.py b/lib/tools/cfgupgrade.py
index 709409978..c0c882de0 100644
--- a/lib/tools/cfgupgrade.py
+++ b/lib/tools/cfgupgrade.py
@@ -762,11 +762,42 @@ class CfgUpgrade(object):
if variant in hvparams:
hvparams[variant]["xen_cmd"] = "xl"
+ @OrFail("Removing the rbd/user-id parameter")
+ def DowngradeRbdUserId(self):
+ """Remove rbd/user-id parameters if set
+
+ """
+ def _removeRbdUserId(data):
+ diskparams = data.get("diskparams", None)
+ if diskparams is None:
+ return
+
+ rbdparams = diskparams.get("rbd", None)
+ if rbdparams is None:
+ return
+
+ if "user-id" in rbdparams:
+ rbdparams.pop("user-id")
+
+ # pylint: disable=E1103
+ # Because config_data is a dictionary which has the get method.
+ cluster = self.config_data.get("cluster", None)
+ if cluster is None:
+ raise Error("Can't find the cluster entry in the configuration")
+
+ _removeRbdUserId(cluster)
+
+ nodegroups = self.config_data.get("nodegroups", None)
+ if nodegroups is None:
+ raise Error("Can't find the cluster entry in the configuration")
+ _removeRbdUserId(nodegroups)
+
def DowngradeAll(self):
self.config_data["version"] = version.BuildVersion(DOWNGRADE_MAJOR,
DOWNGRADE_MINOR, 0)
self.DowngradeXenSettings()
+ self.DowngradeRbdUserId()
return not self.errors
def _ComposePaths(self):
diff --git a/man/gnt-cluster.rst b/man/gnt-cluster.rst
index ca3ea228d..e84a3de9c 100644
--- a/man/gnt-cluster.rst
+++ b/man/gnt-cluster.rst
@@ -498,6 +498,11 @@ access
Attempts to use this feature without rbd support compiled in KVM
result in a "no such file or directory" error messages.
+user-id
+ The user id is used by ceph to determine the keyring to use for
+ authentication. By default the admin keyring is used.
+
+
.. _deadlocks: http://tracker.ceph.com/issues/3076
The option ``--maintain-node-health`` allows one to enable/disable
diff --git a/src/Ganeti/Constants.hs b/src/Ganeti/Constants.hs
index 05043765a..da722ef64 100644
--- a/src/Ganeti/Constants.hs
+++ b/src/Ganeti/Constants.hs
@@ -2299,6 +2299,9 @@ ldpPlanAhead = "c-plan-ahead"
ldpPool :: String
ldpPool = "pool"
+ldpUserId :: String
+ldpUserId = "user-id"
+
ldpProtocol :: String
ldpProtocol = "protocol"
@@ -2326,7 +2329,8 @@ diskLdTypes =
(ldpDelayTarget, VTypeInt),
(ldpMaxRate, VTypeInt),
(ldpMinRate, VTypeInt),
- (ldpPool, VTypeString)]
+ (ldpPool, VTypeString),
+ (ldpUserId, VTypeString)]
diskLdParameters :: FrozenSet String
diskLdParameters = ConstantUtils.mkSet (Map.keys diskLdTypes)
@@ -2390,6 +2394,9 @@ rbdAccess = "access"
rbdPool :: String
rbdPool = "pool"
+rbdUserId :: String
+rbdUserId = "user-id"
+
diskDtTypes :: Map String VType
diskDtTypes =
Map.fromList [(drbdResyncRate, VTypeInt),
@@ -2410,6 +2417,7 @@ diskDtTypes =
(lvStripes, VTypeInt),
(rbdAccess, VTypeString),
(rbdPool, VTypeString),
+ (rbdUserId, VTypeString),
(glusterHost, VTypeString),
(glusterVolume, VTypeString),
(glusterPort, VTypeInt)
@@ -4226,6 +4234,9 @@ defaultPlanAhead = 20
defaultRbdPool :: String
defaultRbdPool = "rbd"
+defaultRbdUserId :: String
+defaultRbdUserId = ""
+
diskLdDefaults :: Map DiskTemplate (Map String PyValueEx)
diskLdDefaults =
Map.fromList
@@ -4253,6 +4264,7 @@ diskLdDefaults =
, (DTRbd, Map.fromList
[ (ldpPool, PyValueEx defaultRbdPool)
, (ldpAccess, PyValueEx diskKernelspace)
+ , (ldpUserId, PyValueEx defaultRbdUserId)
])
, (DTSharedFile, Map.empty)
, (DTGluster, Map.fromList
@@ -4293,6 +4305,7 @@ diskDtDefaults =
, (DTRbd, Map.fromList
[ (rbdPool, PyValueEx defaultRbdPool)
, (rbdAccess, PyValueEx diskKernelspace)
+ , (rbdUserId, PyValueEx defaultRbdUserId)
])
, (DTSharedFile, Map.empty)
, (DTGluster, Map.fromList
diff --git a/test/py/legacy/cmdlib/cluster_unittest.py b/test/py/legacy/cmdlib/cluster_unittest.py
index f888f5c06..3d1ce923a 100644
--- a/test/py/legacy/cmdlib/cluster_unittest.py
+++ b/test/py/legacy/cmdlib/cluster_unittest.py
@@ -459,7 +459,8 @@ class TestLUClusterSetParams(CmdlibTestCase):
def testValidDiskparams(self):
diskparams = {constants.DT_RBD: {constants.RBD_POOL: "mock_pool",
- constants.RBD_ACCESS: "kernelspace"}}
+ constants.RBD_ACCESS: "kernelspace",
+ constants.RBD_USER_ID: "mock_user"}}
op = opcodes.OpClusterSetParams(diskparams=diskparams)
self.ExecOpCode(op)
self.assertEqual(diskparams[constants.DT_RBD],