Change in vdsm[master]: udevadm: More precise error handling
by Nir Soffer
Nir Soffer has uploaded a new change for review.
Change subject: udevadm: More precise error handling
......................................................................
udevadm: More precise error handling
udevadm provides a --timeout option, but there is no robust way to
detect a timeout in EL6, EL7, and Fedora 20. In Fedora 21 and upstream,
udevadm ignores the timeout option. This patch improves error handling
by using our own timeout.
udevadm.settle() raises now udevadm.Failure or udevadm.Timeout, and the
caller is responsible to handle the error.
In both multipath.rescan() and IscsiConnection.connect(), we warn about
timeout but do not handle other errors, so real errors in udevadm will
fail loudly.
Change-Id: Ia0a7380b1b181ec93399ea741122cfa2e98086fb
Relates-To: https://bugzilla.redhat.com/1209474
Signed-off-by: Nir Soffer <nsoffer(a)redhat.com>
---
A tests/udevadmTests.py
M vdsm/storage/multipath.py
M vdsm/storage/storageServer.py
M vdsm/storage/udevadm.py
4 files changed, 106 insertions(+), 21 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/40/39740/1
diff --git a/tests/udevadmTests.py b/tests/udevadmTests.py
new file mode 100644
index 0000000..90841b2
--- /dev/null
+++ b/tests/udevadmTests.py
@@ -0,0 +1,52 @@
+#
+# Copyright 2015 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+# Refer to the README and COPYING files for full details of the license
+#
+
+
+from monkeypatch import MonkeyPatch
+from testlib import VdsmTestCase
+
+from vdsm import utils
+from storage import udevadm
+
+TRUE = utils.CommandPath("true", "/bin/true", "/usr/bin/true")
+FALSE = utils.CommandPath("false", "/bin/false", "/usr/bin/false")
+READ = utils.CommandPath("read", "/bin/read", "/usr/bin/read")
+
+
+class UdevadmSettleTests(VdsmTestCase):
+
+ @MonkeyPatch(udevadm, "_UDEVADM", TRUE)
+ def test_success(self):
+ udevadm.settle(5)
+
+ @MonkeyPatch(udevadm, "_UDEVADM", FALSE)
+ def test_error(self):
+ try:
+ udevadm.settle(5)
+ except udevadm.Failure as e:
+ self.assertEqual(e.rc, 1)
+ self.assertEqual(e.out, "")
+ self.assertEqual(e.err, "")
+ else:
+ self.fail("Failure not raised")
+
+ @MonkeyPatch(udevadm, "_UDEVADM", READ)
+ def test_timeout(self):
+ self.assertRaises(udevadm.Timeout, udevadm.settle, 1)
diff --git a/vdsm/storage/multipath.py b/vdsm/storage/multipath.py
index a1c42b3..925c411 100644
--- a/vdsm/storage/multipath.py
+++ b/vdsm/storage/multipath.py
@@ -73,7 +73,10 @@
# events are processed, ensuring detection of new devices and creation or
# update of multipath devices.
timeout = config.getint('irs', 'scsi_settle_timeout')
- udevadm.settle(timeout)
+ try:
+ udevadm.settle(timeout)
+ except udevadm.Timeout as e:
+ log.warning("Timeout waiting for udev events: %s", e)
def deduceType(a, b):
diff --git a/vdsm/storage/storageServer.py b/vdsm/storage/storageServer.py
index 22a90d1..c19fb8d 100644
--- a/vdsm/storage/storageServer.py
+++ b/vdsm/storage/storageServer.py
@@ -382,7 +382,10 @@
def connect(self):
iscsi.addIscsiNode(self._iface, self._target, self._cred)
timeout = config.getint("irs", "scsi_settle_timeout")
- udevadm.settle(timeout)
+ try:
+ udevadm.settle(timeout)
+ except udevadm.Timeout as e:
+ self.log.warning("Timeout waiting for udev events: %s", e)
def _match(self, session):
target = session.target
diff --git a/vdsm/storage/udevadm.py b/vdsm/storage/udevadm.py
index 4b4b54a..a2afd04 100644
--- a/vdsm/storage/udevadm.py
+++ b/vdsm/storage/udevadm.py
@@ -18,22 +18,39 @@
# Refer to the README and COPYING files for full details of the license
#
-import logging
+import errno
+import signal
+
from vdsm import utils
+from vdsm.infra import zombiereaper
_UDEVADM = utils.CommandPath("udevadm", "/sbin/udevadm", "/usr/sbin/udevadm")
class Error(Exception):
+ message = None
- def __init__(self, rc, out, err):
+ def __str__(self):
+ return self.message.format(self=self)
+
+
+class Failure(Error):
+ message = ("udevadm failed cmd={self.cmd} rc={self.rc} out={self.out!r} "
+ "err={self.err!r}")
+
+ def __init__(self, cmd, rc, out, err):
+ self.cmd = cmd
self.rc = rc
self.out = out
self.err = err
- def __str__(self):
- return "Process failed with rc=%d out=%r err=%r" % (
- self.rc, self.out, self.err)
+
+class Timeout(Error):
+ message = ("udevadm timed out cmd={self.cmd} timeout={self.timeout}")
+
+ def __init__(self, cmd, timeout):
+ self.cmd = cmd
+ self.timeout = timeout
def settle(timeout, exit_if_exists=None):
@@ -44,25 +61,35 @@
Arguments:
timeout Maximum number of seconds to wait for the event queue to
- become empty. A value of 0 will check if the queue is empty
- and always return immediately.
+ become empty.
exit_if_exists Stop waiting if file exists.
+
+ Raises Failure if udevadm failed, or Timeout if udevadm did not terminate
+ within the requested timeout.
"""
- args = ["settle", "--timeout=%s" % timeout]
+ cmd = [_UDEVADM.cmd, "settle"]
if exit_if_exists:
- args.append("--exit-if-exists=%s" % exit_if_exists)
+ cmd.append("--exit-if-exists=%s" % exit_if_exists)
- try:
- _run_command(args)
- except Error as e:
- logging.error("%s", e)
+ _run_command(cmd, timeout)
-def _run_command(args):
- cmd = [_UDEVADM.cmd]
- cmd.extend(args)
- rc, out, err = utils.execCmd(cmd, raw=True)
- if rc != 0:
- raise Error(rc, out, err)
+def _run_command(cmd, timeout=None):
+ proc = utils.execCmd(cmd, sync=False, deathSignal=signal.SIGKILL)
+
+ if not proc.wait(timeout):
+ try:
+ proc.kill()
+ except OSError as e:
+ if e.errno != errno.ESRCH:
+ raise
+ finally:
+ zombiereaper.autoReapPID(proc.pid)
+ raise Timeout(cmd, timeout)
+
+ if proc.returncode != 0:
+ out = "".join(proc.stdout)
+ err = "".join(proc.stderr)
+ raise Failure(cmd, proc.returncode, out, err)
--
To view, visit https://gerrit.ovirt.org/39740
To unsubscribe, visit https://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: Ia0a7380b1b181ec93399ea741122cfa2e98086fb
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Nir Soffer <nsoffer(a)redhat.com>
7 years, 10 months
Change in vdsm[master]: monitor: Add udev monitor
by rmohr@redhat.com
Roman Mohr has uploaded a new change for review.
Change subject: monitor: Add udev monitor
......................................................................
monitor: Add udev monitor
Allow easy monitoring of device hotplug and device state changes which
are covered by udev.
Change-Id: I4b91753424d83896fa538eb6b57f8653b6332fbb
Signed-off-by: Roman Mohr <rmohr(a)redhat.com>
---
A lib/vdsm/udev/monitor.py
M tests/Makefile.am
A tests/udevMonitorTests.py
M vdsm.spec.in
4 files changed, 272 insertions(+), 0 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/29/47729/1
diff --git a/lib/vdsm/udev/monitor.py b/lib/vdsm/udev/monitor.py
new file mode 100644
index 0000000..8698bf3
--- /dev/null
+++ b/lib/vdsm/udev/monitor.py
@@ -0,0 +1,133 @@
+# Copyright 2015 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+# Refer to the README and COPYING files for full details of the license
+#
+from functools import partial
+import logging
+import threading
+from time import sleep
+
+from pyudev import Context, Monitor, MonitorObserver
+
+
+class UdevMonitor(object):
+
+ """
+ ``udev`` event monitor. Usage:
+
+ The monitor is a thread-save thin wrapper arount pyudev.MonitorObserver.
+ This allows multiple callbacks for the same netlink socket. To avoid
+ listening for udev events the application is not interested in, the
+ monitoring thread only starts listening on the socket when the monitor is
+ started and at least one subscriber is added.
+
+ The simplest way to use the monitor is to subscribe a callback to a
+ specific subsystem event and let the callback do the work:
+
+ dev listen_for_disabled_cpu_events(device):
+ if device.action == 'offline':
+ print('CPU {0.name} is now offline'.format(device))
+
+ monitor = UdevMonitor()
+ monitor.start()
+ monitor.subscribe(listen_for_disabled_cpu_events, subsystem='cpu')
+
+ Another approach would be to just enqueue the udev event in a queue and do
+ the actual work in another thread or a thread pool. This is one approach to
+ do this:
+
+ queue = Queue.Queue()
+ def new_device_listener(device):
+ if device.action == 'add':
+ queue.put(device)
+
+ monitor = UdevMonitor()
+ monitor.subscribe(new_device_listener, subsystem='usb',
+ device_type='usb_device')
+ monitor.start()
+ """
+
+ def __init__(self):
+ self._subsystems = {}
+ self._context = Context()
+ self._monitor = Monitor.from_netlink(self._context)
+ self._observer = MonitorObserver(self._monitor, callback=partial(
+ UdevMonitor._event_loop, self), name='udev-monitor')
+ self._filter_lock = threading.Lock()
+ self._is_started = False
+ self._can_start = False
+
+ def _event_loop(self, device):
+ subsystem = self._subsystems[device.subsystem]
+ for callback in subsystem.get(device.device_type, []):
+ _execute_callback(callback, device)
+ if device.device_type:
+ for callback in subsystem.get(None, []):
+ _execute_callback(callback, device)
+
+ def subscribe(self, callback, subsystem, device_type=None):
+ """
+ Raise :exc:`~exceptions.ValueError` if the callback is None
+
+ :param callback: function to invoke
+ :param subsystem: byte or unicode string representing the subsystem to
+ listen on (e.g. ``'cpu'``, ``'usb'``)
+ :param device_type: byte or unicode string representing the device type
+ to listen for changes (e.g. ``'usb_device'``,
+ ``'block'``)
+ :return: None
+ """
+ if callback is None:
+ raise ValueError('callback missing')
+ with self._filter_lock:
+ self._monitor.filter_by(subsystem, device_type)
+ device_types = self._subsystems.get(subsystem, {})
+ callbacks = device_types.get(device_type, [])
+ callbacks.append(callback)
+ device_types[device_type] = callbacks
+ self._subsystems[subsystem] = device_types
+ self._start_if_necessary()
+
+ def start(self):
+ self._can_start = True
+ with self._filter_lock:
+ if self._subsystems:
+ self._start_if_necessary()
+
+ def _start_if_necessary(self):
+ if self._can_start and not self._is_started:
+ self._observer.start()
+ self._is_started = True
+
+ def stop(self):
+ """
+ Stops the monitoring thread. It is guaranteed that callbacks are no
+ longer invoked after calling this method. The method can be called
+ multiple times.
+
+ Note that this only stops the monitoring thread and not the monitor
+ itself. The monitor stops listening when it is dereferenced.
+ """
+ self._observer.stop()
+
+
+def _execute_callback(callback, device):
+ try:
+ callback(device)
+ except Exception as callbackException:
+ logging.error(
+ 'Callback execution threw an exception: %s', callbackException)
diff --git a/tests/Makefile.am b/tests/Makefile.am
index 1168862..7483d0c 100644
--- a/tests/Makefile.am
+++ b/tests/Makefile.am
@@ -110,6 +110,7 @@
testlibTests.py \
toolTests.py \
transportWrapperTests.py \
+ udevMonitorTests.py \
utilsTests.py \
vdscliTests.py \
vdsClientTests.py \
diff --git a/tests/udevMonitorTests.py b/tests/udevMonitorTests.py
new file mode 100644
index 0000000..2526459
--- /dev/null
+++ b/tests/udevMonitorTests.py
@@ -0,0 +1,137 @@
+# Copyright 2015 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+# Refer to the README and COPYING files for full details of the license
+#
+
+from functools import partial
+
+import Queue
+
+from nettestlib import Bridge
+from testValidation import ValidateRunningAsRoot
+from testlib import VdsmTestCase
+from vdsm.udev.monitor import UdevMonitor
+
+
+class UdevMonitorTest(VdsmTestCase):
+
+ def setUp(self):
+ self._queue = Queue.Queue()
+ self._monitor = UdevMonitor()
+ self._monitor.start()
+
+ def tearDown(self):
+ self._monitor.stop()
+
+ @ValidateRunningAsRoot
+ def testAddDevice(self):
+ self._monitor.subscribe(
+ partial(UdevMonitorTest._device_listener, self),
+ subsystem='net',
+ device_type='bridge')
+ bridge = Bridge()
+ try:
+ bridge.addDevice()
+ device = None
+ try:
+ device = self._queue.get(timeout=1)
+ except Queue.Empty:
+ pass
+ self.assertIsNotNone(device,
+ msg='Should have detected an event')
+ self.assertEqual(device.action, 'add')
+ finally:
+ bridge.delDevice()
+
+ @ValidateRunningAsRoot
+ def testAddDeviceForSubsystem(self):
+ self._monitor.subscribe(
+ partial(UdevMonitorTest._device_listener, self),
+ subsystem='net',
+ device_type=None)
+ bridge = Bridge()
+ try:
+ bridge.addDevice()
+ device = None
+ try:
+ device = self._queue.get(timeout=1)
+ except Queue.Empty:
+ pass
+ self.assertIsNotNone(device,
+ msg='Should have detected an event')
+ self.assertEqual(device.action, 'add')
+ finally:
+ bridge.delDevice()
+
+ @ValidateRunningAsRoot
+ def testRemoveDevice(self):
+
+ bridge = Bridge()
+ try:
+ bridge.addDevice()
+ self._monitor.subscribe(
+ partial(UdevMonitorTest._device_listener, self),
+ subsystem='net',
+ device_type='bridge')
+ bridge.delDevice()
+ device = None
+ try:
+ device = self._queue.get(timeout=1)
+ except Queue.Empty:
+ pass
+ self.assertIsNotNone(device,
+ msg='Should have detected an event')
+ self.assertEqual(device.action, 'remove')
+ except:
+ bridge.delDevice()
+
+ @ValidateRunningAsRoot
+ def testEventSeries(self):
+
+ bridge1 = Bridge()
+ bridge2 = Bridge()
+ try:
+ self._monitor.subscribe(
+ partial(UdevMonitorTest._device_listener, self),
+ subsystem='net',
+ device_type='bridge')
+ bridge1.addDevice()
+ bridge1.delDevice()
+ bridge2.addDevice()
+ bridge2.delDevice()
+ events = []
+ try:
+ events.append(self._queue.get(timeout=1))
+ events.append(self._queue.get(timeout=1))
+ events.append(self._queue.get(timeout=1))
+ events.append(self._queue.get(timeout=1))
+ except Queue.Empty:
+ pass
+ self.assertEqual(len(events), 4)
+ except:
+ _delBridge(bridge1)
+ _delBridge(bridge2)
+
+ def _device_listener(self, device):
+ self._queue.put(device)
+
+
+def _delBridge(bridge):
+ try:
+ bridge.delDevice()
+ except:
+ pass
diff --git a/vdsm.spec.in b/vdsm.spec.in
index 8f758ca..ae5e483 100644
--- a/vdsm.spec.in
+++ b/vdsm.spec.in
@@ -126,6 +126,7 @@
Requires: python-pthreading >= 0.1.3-3
Requires: python-six
Requires: python-requests
+Requires: python-udev >= 0.15
Requires: %{name}-infra = %{version}-%{release}
Requires: rpm-python
Requires: nfs-utils
--
To view, visit https://gerrit.ovirt.org/47729
To unsubscribe, visit https://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I4b91753424d83896fa538eb6b57f8653b6332fbb
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Roman Mohr <rmohr(a)redhat.com>
7 years, 10 months
Change in vdsm[master]: FileVolumeMetadata: split getMetadata
by alitke@redhat.com
Adam Litke has uploaded a new change for review.
Change subject: FileVolumeMetadata: split getMetadata
......................................................................
FileVolumeMetadata: split getMetadata
Change-Id: I1d3fb61831de5b50a3e562b80bf38ef15ede254f
Signed-off-by: Adam Litke <alitke(a)redhat.com>
---
M vdsm/storage/fileVolume.py
1 file changed, 16 insertions(+), 13 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/69/44569/1
diff --git a/vdsm/storage/fileVolume.py b/vdsm/storage/fileVolume.py
index 08e3f50..74aa911 100644
--- a/vdsm/storage/fileVolume.py
+++ b/vdsm/storage/fileVolume.py
@@ -123,18 +123,10 @@
"""
return (self.getVolumePath(),)
- def getMetadata(self, metaId=None):
- """
- Get Meta data array of key,values lines
- """
- if not metaId:
- metaId = self.getMetadataId()
-
- volPath, = metaId
- metaPath = self._getMetaVolumePath(volPath)
-
+ @classmethod
+ def read_metadata(cls, oop, meta_path):
try:
- f = self.oop.directReadLines(metaPath)
+ f = oop.directReadLines(meta_path)
out = {}
for l in f:
if l.startswith("EOF"):
@@ -145,11 +137,22 @@
out[key.strip()] = value.strip()
except Exception as e:
- self.log.error(e, exc_info=True)
- raise se.VolumeMetadataReadError("%s: %s" % (metaId, e))
+ cls.log.error(e, exc_info=True)
+ raise se.VolumeMetadataReadError("%s: %s" % (meta_path, e))
return out
+ def getMetadata(self, metaId=None):
+ """
+ Get Meta data array of key,values lines
+ """
+ if not metaId:
+ metaId = self.getMetadataId()
+
+ volPath, = metaId
+ metaPath = self._getMetaVolumePath(volPath)
+ return self.read_metadata(self.oop, metaPath)
+
def getParentId(self):
"""
Return parent volume UUID
--
To view, visit https://gerrit.ovirt.org/44569
To unsubscribe, visit https://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I1d3fb61831de5b50a3e562b80bf38ef15ede254f
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Adam Litke <alitke(a)redhat.com>
7 years, 10 months
Change in vdsm[master]: VolumeMetadata: move prepare and teardown
by alitke@redhat.com
Adam Litke has uploaded a new change for review.
Change subject: VolumeMetadata: move prepare and teardown
......................................................................
VolumeMetadata: move prepare and teardown
Change-Id: Iba0954ace3b1da5ea9afc41aeb6ea69a729fe29c
Signed-off-by: Adam Litke <alitke(a)redhat.com>
---
M vdsm/storage/blockVolume.py
M vdsm/storage/fileVolume.py
M vdsm/storage/volume.py
3 files changed, 117 insertions(+), 109 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/50/44050/1
diff --git a/vdsm/storage/blockVolume.py b/vdsm/storage/blockVolume.py
index 0168d64..597e7ee 100644
--- a/vdsm/storage/blockVolume.py
+++ b/vdsm/storage/blockVolume.py
@@ -74,6 +74,8 @@
volume.VolumeMetadata.__init__(self, repoPath, sdUUID, imgUUID,
volUUID)
self.metaoff = None
+ self.lvmActivationNamespace = sd.getNamespace(self.sdUUID,
+ LVM_ACTIVATION_NAMESPACE)
def getMetadataId(self):
"""
@@ -332,6 +334,49 @@
lvs = lvm.lvsByTag(sdUUID, "%s%s" % (TAG_PREFIX_IMAGE, imgUUID))
return [lv.name for lv in lvs]
+ @logskip("ResourceManager")
+ def llPrepare(self, rw=False, setrw=False):
+ """
+ Perform low level volume use preparation
+
+ For the Block Volumes the actual LV activation is wrapped
+ into lvmActivation resource. It is being initialized by the
+ storage domain sitting on top of the encapsulating VG.
+ We just use it here.
+ """
+ if setrw:
+ self.setrw(rw=rw)
+ access = rm.LockType.exclusive if rw else rm.LockType.shared
+ activation = rmanager.acquireResource(self.lvmActivationNamespace,
+ self.volUUID, access)
+ activation.autoRelease = False
+
+ @classmethod
+ def teardown(cls, sdUUID, volUUID, justme=False):
+ """
+ Deactivate volume and release resources.
+ Volume deactivation occurs as part of resource releasing.
+ If justme is false, the entire COW chain should be torn down.
+ """
+ cls.log.info("Tearing down volume %s/%s justme %s"
+ % (sdUUID, volUUID, justme))
+ lvmActivationNamespace = sd.getNamespace(sdUUID,
+ LVM_ACTIVATION_NAMESPACE)
+ rmanager.releaseResource(lvmActivationNamespace, volUUID)
+ if not justme:
+ try:
+ pvolUUID = _getVolumeTag(sdUUID, volUUID, TAG_PREFIX_PARENT)
+ except Exception as e:
+ # If storage not accessible or lvm error occurred
+ # we will failure to get the parent volume.
+ # We can live with it and still succeed in volume's teardown.
+ pvolUUID = volume.BLANK_UUID
+ cls.log.warn("Failure to get parent of volume %s/%s (%s)"
+ % (sdUUID, volUUID, e))
+
+ if pvolUUID != volume.BLANK_UUID:
+ cls.teardown(sdUUID=sdUUID, volUUID=pvolUUID, justme=False)
+
class BlockVolume(volume.Volume):
""" Actually represents a single volume (i.e. part of virtual disk).
@@ -341,8 +386,6 @@
def __init__(self, repoPath, sdUUID, imgUUID, volUUID):
md = self.MetadataClass(repoPath, sdUUID, imgUUID, volUUID)
volume.Volume.__init__(self, md)
- self.lvmActivationNamespace = sd.getNamespace(self.sdUUID,
- LVM_ACTIVATION_NAMESPACE)
@property
def metaoff(self):
@@ -609,49 +652,6 @@
def shareVolumeRollback(cls, taskObj, volPath):
cls.log.info("Volume rollback for volPath=%s", volPath)
utils.rmFile(volPath)
-
- @logskip("ResourceManager")
- def llPrepare(self, rw=False, setrw=False):
- """
- Perform low level volume use preparation
-
- For the Block Volumes the actual LV activation is wrapped
- into lvmActivation resource. It is being initialized by the
- storage domain sitting on top of the encapsulating VG.
- We just use it here.
- """
- if setrw:
- self.setrw(rw=rw)
- access = rm.LockType.exclusive if rw else rm.LockType.shared
- activation = rmanager.acquireResource(self.lvmActivationNamespace,
- self.volUUID, access)
- activation.autoRelease = False
-
- @classmethod
- def teardown(cls, sdUUID, volUUID, justme=False):
- """
- Deactivate volume and release resources.
- Volume deactivation occurs as part of resource releasing.
- If justme is false, the entire COW chain should be torn down.
- """
- cls.log.info("Tearing down volume %s/%s justme %s"
- % (sdUUID, volUUID, justme))
- lvmActivationNamespace = sd.getNamespace(sdUUID,
- LVM_ACTIVATION_NAMESPACE)
- rmanager.releaseResource(lvmActivationNamespace, volUUID)
- if not justme:
- try:
- pvolUUID = _getVolumeTag(sdUUID, volUUID, TAG_PREFIX_PARENT)
- except Exception as e:
- # If storage not accessible or lvm error occurred
- # we will failure to get the parent volume.
- # We can live with it and still succeed in volume's teardown.
- pvolUUID = volume.BLANK_UUID
- cls.log.warn("Failure to get parent of volume %s/%s (%s)"
- % (sdUUID, volUUID, e))
-
- if pvolUUID != volume.BLANK_UUID:
- cls.teardown(sdUUID=sdUUID, volUUID=pvolUUID, justme=False)
def getVolumeTag(self, tagPrefix):
return self.md.getVolumeTag(tagPrefix)
diff --git a/vdsm/storage/fileVolume.py b/vdsm/storage/fileVolume.py
index 2dad91f..08e3f50 100644
--- a/vdsm/storage/fileVolume.py
+++ b/vdsm/storage/fileVolume.py
@@ -337,6 +337,26 @@
volList.append(volid)
return volList
+ def llPrepare(self, rw=False, setrw=False):
+ """
+ Make volume accessible as readonly (internal) or readwrite (leaf)
+ """
+ volPath = self.getVolumePath()
+
+ # Volumes leaves created in 2.2 did not have group writeable bit
+ # set. We have to set it here if we want qemu-kvm to write to old
+ # NFS volumes.
+ self.oop.fileUtils.copyUserModeToGroup(volPath)
+
+ if setrw:
+ self.setrw(rw=rw)
+ if rw:
+ if not self.oop.os.access(volPath, os.R_OK | os.W_OK):
+ raise se.VolumeAccessError(volPath)
+ else:
+ if not self.oop.os.access(volPath, os.R_OK):
+ raise se.VolumeAccessError(volPath)
+
class FileVolume(volume.Volume):
""" Actually represents a single volume (i.e. part of virtual disk).
@@ -488,26 +508,6 @@
procPool.utils.rmFile(volPath)
procPool.utils.rmFile(cls.__metaVolumePath(volPath))
procPool.utils.rmFile(cls.MetadataClass._leaseVolumePath(volPath))
-
- def llPrepare(self, rw=False, setrw=False):
- """
- Make volume accessible as readonly (internal) or readwrite (leaf)
- """
- volPath = self.getVolumePath()
-
- # Volumes leaves created in 2.2 did not have group writeable bit
- # set. We have to set it here if we want qemu-kvm to write to old
- # NFS volumes.
- self.oop.fileUtils.copyUserModeToGroup(volPath)
-
- if setrw:
- self.setrw(rw=rw)
- if rw:
- if not self.oop.os.access(volPath, os.R_OK | os.W_OK):
- raise se.VolumeAccessError(volPath)
- else:
- if not self.oop.os.access(volPath, os.R_OK):
- raise se.VolumeAccessError(volPath)
@classmethod
def __putMetadata(cls, metaId, meta):
diff --git a/vdsm/storage/volume.py b/vdsm/storage/volume.py
index a098eb7..22f7ead 100644
--- a/vdsm/storage/volume.py
+++ b/vdsm/storage/volume.py
@@ -525,6 +525,56 @@
return apparentSize
return req_size
+ def prepare(self, rw=True, justme=False,
+ chainrw=False, setrw=False, force=False):
+ """
+ Prepare volume for use by consumer.
+ If justme is false, the entire COW chain is prepared.
+ Note: setrw arg may be used only by SPM flows.
+ """
+ self.log.info("Volume: preparing volume %s/%s",
+ self.sdUUID, self.volUUID)
+
+ if not force:
+ # Cannot prepare ILLEGAL volume
+ if not self.isLegal():
+ raise se.prepareIllegalVolumeError(self.volUUID)
+
+ if rw and self.isShared():
+ if chainrw:
+ rw = False # Shared cannot be set RW
+ else:
+ raise se.SharedVolumeNonWritable(self)
+
+ if (not chainrw and rw and self.isInternal() and setrw and
+ not self.recheckIfLeaf()):
+ raise se.InternalVolumeNonWritable(self)
+
+ self.llPrepare(rw=rw, setrw=setrw)
+ self.updateInvalidatedSize()
+
+ try:
+ if justme:
+ return True
+ pvol = self.produceParent()
+ if pvol:
+ pvol.prepare(rw=chainrw, justme=False,
+ chainrw=chainrw, setrw=setrw)
+ except Exception:
+ self.log.error("Unexpected error", exc_info=True)
+ self.teardown(self.sdUUID, self.volUUID)
+ raise
+
+ return True
+
+ @classmethod
+ def teardown(cls, sdUUID, volUUID, justme=False):
+ """
+ Teardown volume.
+ If justme is false, the entire COW chain is teared down.
+ """
+ pass
+
class Volume(object):
log = logging.getLogger('Storage.Volume')
@@ -1091,53 +1141,11 @@
def prepare(self, rw=True, justme=False,
chainrw=False, setrw=False, force=False):
- """
- Prepare volume for use by consumer.
- If justme is false, the entire COW chain is prepared.
- Note: setrw arg may be used only by SPM flows.
- """
- self.log.info("Volume: preparing volume %s/%s",
- self.sdUUID, self.volUUID)
-
- if not force:
- # Cannot prepare ILLEGAL volume
- if not self.isLegal():
- raise se.prepareIllegalVolumeError(self.volUUID)
-
- if rw and self.isShared():
- if chainrw:
- rw = False # Shared cannot be set RW
- else:
- raise se.SharedVolumeNonWritable(self)
-
- if (not chainrw and rw and self.isInternal() and setrw and
- not self.recheckIfLeaf()):
- raise se.InternalVolumeNonWritable(self)
-
- self.llPrepare(rw=rw, setrw=setrw)
- self.updateInvalidatedSize()
-
- try:
- if justme:
- return True
- pvol = self.produceParent()
- if pvol:
- pvol.prepare(rw=chainrw, justme=False,
- chainrw=chainrw, setrw=setrw)
- except Exception:
- self.log.error("Unexpected error", exc_info=True)
- self.teardown(self.sdUUID, self.volUUID)
- raise
-
- return True
+ return self.md.prepare(rw, justme, chainrw, setrw, force)
@classmethod
def teardown(cls, sdUUID, volUUID, justme=False):
- """
- Teardown volume.
- If justme is false, the entire COW chain is teared down.
- """
- pass
+ cls.MetadataClass.teardown(sdUUID, volUUID, justme)
@classmethod
def newMetadata(cls, metaId, sdUUID, imgUUID, puuid, size, format, type,
--
To view, visit https://gerrit.ovirt.org/44050
To unsubscribe, visit https://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: Iba0954ace3b1da5ea9afc41aeb6ea69a729fe29c
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Adam Litke <alitke(a)redhat.com>
7 years, 10 months
Change in vdsm[master]: sdc: Allow StorageDomainCache to cache StorageDomainManifest...
by alitke@redhat.com
Adam Litke has uploaded a new change for review.
Change subject: sdc: Allow StorageDomainCache to cache StorageDomainManifest objects
......................................................................
sdc: Allow StorageDomainCache to cache StorageDomainManifest objects
When HSM is operating without SPM the system should always be using
StorageDomainManifest objects instead of StorageDomains. With this
patch we can instruct the cache to serve the correct type of object
without changing all consumers of sdCache.
Change-Id: I9a3dc7d9bf24f7d8b60ddff6f5364b65a9354e45
Signed-off-by: Adam Litke <alitke(a)redhat.com>
---
M vdsm/storage/sdc.py
1 file changed, 12 insertions(+), 3 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/41/44041/1
diff --git a/vdsm/storage/sdc.py b/vdsm/storage/sdc.py
index ecb9708..cbdaed9 100644
--- a/vdsm/storage/sdc.py
+++ b/vdsm/storage/sdc.py
@@ -64,12 +64,13 @@
STORAGE_STALE = 1
STORAGE_REFRESHING = 2
- def __init__(self, storage_repo):
+ def __init__(self, storage_repo, use_manifests=False):
self._syncroot = threading.Condition()
self.__domainCache = {}
self.__inProgress = set()
self.__staleStatus = self.STORAGE_STALE
self.storage_repo = storage_repo
+ self.use_manifests = use_manifests
self.knownSDs = {} # {sdUUID: mod.findDomain}
def invalidateStorage(self):
@@ -162,12 +163,18 @@
# this changes, please update the order.
for mod in (blockSD, glusterSD, localFsSD, nfsSD):
try:
- return mod.findDomain(sdUUID)
+ if self.use_manifests:
+ ret = mod.findDomainManifest(sdUUID)
+ else:
+ ret = mod.findDomain(sdUUID)
except se.StorageDomainDoesNotExist:
pass
except Exception:
self.log.error("Error while looking for domain `%s`", sdUUID,
exc_info=True)
+ else:
+ self.log.debug("Found domain %s", ret)
+ return ret
raise se.StorageDomainDoesNotExist(sdUUID)
@@ -181,10 +188,12 @@
return uuids
- def refresh(self):
+ def refresh(self, use_manifests=None):
with self._syncroot:
lvm.invalidateCache()
self.__domainCache.clear()
+ if use_manifests is not None:
+ self.use_manifests = use_manifests
def manuallyAddDomain(self, domain):
with self._syncroot:
--
To view, visit https://gerrit.ovirt.org/44041
To unsubscribe, visit https://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I9a3dc7d9bf24f7d8b60ddff6f5364b65a9354e45
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Adam Litke <alitke(a)redhat.com>
7 years, 10 months
Change in vdsm[master]: VolumeMetadata: Move getParentVolume
by alitke@redhat.com
Adam Litke has uploaded a new change for review.
Change subject: VolumeMetadata: Move getParentVolume
......................................................................
VolumeMetadata: Move getParentVolume
Move getParentVolume into VolumeMetadata. If HSM configured sdCache to
return StorageDomainManifest objects then this will return
VolumeMetadata objects. Otherwise it will continue to return Volume
objects as it always has.
Change-Id: I74def8861e0b7b97ca4d218a969440ccba7f07dd
Signed-off-by: Adam Litke <alitke(a)redhat.com>
---
M vdsm/storage/volume.py
1 file changed, 11 insertions(+), 5 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/42/44042/1
diff --git a/vdsm/storage/volume.py b/vdsm/storage/volume.py
index c40ba77..f7e5bc0 100644
--- a/vdsm/storage/volume.py
+++ b/vdsm/storage/volume.py
@@ -485,6 +485,16 @@
"""
pass # Do not remove this method or the V3 upgrade will fail.
+ def getParentVolume(self):
+ """
+ Return parent volume object
+ """
+ puuid = self.getParent()
+ if puuid and puuid != BLANK_UUID:
+ return sdCache.produce(self.sdUUID).produceVolume(self.imgUUID,
+ puuid)
+ return None
+
class Volume(object):
log = logging.getLogger('Storage.Volume')
@@ -1128,11 +1138,7 @@
"""
Return parent volume object
"""
- puuid = self.getParent()
- if puuid and puuid != BLANK_UUID:
- return sdCache.produce(self.sdUUID).produceVolume(self.imgUUID,
- puuid)
- return None
+ return self.md.getParentVolume()
def setParent(self, puuid):
"""
--
To view, visit https://gerrit.ovirt.org/44042
To unsubscribe, visit https://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I74def8861e0b7b97ca4d218a969440ccba7f07dd
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Adam Litke <alitke(a)redhat.com>
7 years, 10 months
Change in vdsm[master]: storage: Make Image.__chainSizeCalc public
by alitke@redhat.com
Adam Litke has uploaded a new change for review.
Change subject: storage: Make Image.__chainSizeCalc public
......................................................................
storage: Make Image.__chainSizeCalc public
The new SDM copyVolumeData wants to make use of the same logic being
used by the classic copy flows to extend the size of the target volume
to the appropriate size. Make Image.__chainSizeCalc public so it can be
accessed from the SDM code.
Change-Id: Id079eb5067c16f934370e42b5f4e09bbcef1512b
Signed-off-by: Adam Litke <alitke(a)redhat.com>
---
M vdsm/storage/image.py
1 file changed, 2 insertions(+), 2 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/95/38995/1
diff --git a/vdsm/storage/image.py b/vdsm/storage/image.py
index 791b48c..8d5e8c2 100644
--- a/vdsm/storage/image.py
+++ b/vdsm/storage/image.py
@@ -140,7 +140,7 @@
randomStr = misc.randomStr(RENAME_RANDOM_STRING_LEN)
return "%s%s_%s" % (sd.REMOVED_IMAGE_PREFIX, randomStr, uuid)
- def __chainSizeCalc(self, sdUUID, imgUUID, volUUID, size):
+ def chainSizeCalc(self, sdUUID, imgUUID, volUUID, size):
"""
Compute an estimate of the whole chain size
using the sum of the actual size of the chain's volumes
@@ -763,7 +763,7 @@
if volParams['volFormat'] != volume.COW_FORMAT or \
volParams['prealloc'] != volume.SPARSE_VOL:
raise se.IncorrectFormat(self)
- volParams['apparentsize'] = self.__chainSizeCalc(
+ volParams['apparentsize'] = self.chainSizeCalc(
sdUUID, srcImgUUID, srcVolUUID, volParams['size'])
# Find out dest volume parameters
--
To view, visit https://gerrit.ovirt.org/38995
To unsubscribe, visit https://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: Id079eb5067c16f934370e42b5f4e09bbcef1512b
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Adam Litke <alitke(a)redhat.com>
7 years, 10 months
Change in vdsm[master]: SDM: Add extendVolumeContainer API
by alitke@redhat.com
Adam Litke has uploaded a new change for review.
Change subject: SDM: Add extendVolumeContainer API
......................................................................
SDM: Add extendVolumeContainer API
The extendVolumeContainer API is used to extend LVM logical volumes
which store thinly-provisioned vdsm volumes.
Change-Id: I6a128ba3eab4116ff4e794e94a171e51d9e432de
Signed-off-by: Adam Litke <alitke(a)redhat.com>
---
M client/vdsClient.py
M vdsm/API.py
M vdsm/rpc/BindingXMLRPC.py
M vdsm/rpc/vdsmapi-schema.json
M vdsm/storage/hsm.py
M vdsm/storage/sdm/__init__.py
M vdsm/storage/sdm/blockstore.py
M vdsm/storage/sdm/filestore.py
8 files changed, 123 insertions(+), 9 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/96/39696/1
diff --git a/client/vdsClient.py b/client/vdsClient.py
index 23bfc1a..f6c0c83 100755
--- a/client/vdsClient.py
+++ b/client/vdsClient.py
@@ -1953,6 +1953,17 @@
else:
return status['status']['code'], status['status']['message']
+ def extendVolumeContainer(self, args):
+ if len(args) != 4:
+ raise ValueError("Wrong number of arguments")
+
+ sdUUID, imgUUID, volUUID, size = args
+ status = self.s.extendVolumeContainer(sdUUID, imgUUID, volUUID, size)
+ if status['status']['code'] == 0:
+ return 0, ''
+ else:
+ return status['status']['code'], status['status']['message']
+
if __name__ == '__main__':
if _glusterEnabled:
@@ -2844,6 +2855,11 @@
'<srcImage> <dstImage> <collapse>',
'Copy the date from one volume into another.'
)),
+ 'extendVolumeContainer': (
+ serv.extendVolumeContainer, (
+ '<sdUUID> <imgUUID> <volUUID>',
+ 'Extend a thinly-provisioned block volume.'
+ )),
}
if _glusterEnabled:
commands.update(ge.getGlusterCmdDict(serv))
diff --git a/vdsm/API.py b/vdsm/API.py
index b4b7308..5661b82 100644
--- a/vdsm/API.py
+++ b/vdsm/API.py
@@ -1675,6 +1675,10 @@
def copyData(self, srcImage, dstImage, collapse):
return self._cif.irs.copyData(srcImage, dstImage, collapse)
+ def extendVolumeContainer(self, sdUUID, imgUUID, volUUID, size):
+ return self._cif.irs.extendVolumeContainer(sdUUID, imgUUID, volUUID,
+ size)
+
# take a rough estimate on how much free mem is available for new vm
# memTotal = memFree + memCached + mem_used_by_non_qemu + resident .
# simply returning (memFree + memCached) is not good enough, as the
diff --git a/vdsm/rpc/BindingXMLRPC.py b/vdsm/rpc/BindingXMLRPC.py
index 3e70e28..ae43614 100644
--- a/vdsm/rpc/BindingXMLRPC.py
+++ b/vdsm/rpc/BindingXMLRPC.py
@@ -993,6 +993,10 @@
api = API.Global()
return api.copyData(srcImage, dstImage, collapse)
+ def extendVolumeContainer(self, sdUUID, imgUUID, volUUID, size):
+ api = API.Global()
+ return api.extendVolumeContainer(sdUUID, imgUUID, volUUID, size)
+
def getGlobalMethods(self):
return ((self.vmDestroy, 'destroy'),
(self.vmCreate, 'create'),
@@ -1143,7 +1147,8 @@
'storageServer_ConnectionRefs_statuses'),
(self.volumeCreateContainer, 'createVolumeContainer'),
(self.volumeRemove, 'removeVolume'),
- (self.copyData, 'copyData'))
+ (self.copyData, 'copyData'),
+ (self.extendVolumeContainer, 'extendVolumeContainer'))
def wrapApiMethod(f):
diff --git a/vdsm/rpc/vdsmapi-schema.json b/vdsm/rpc/vdsmapi-schema.json
index 8720703..f507324 100644
--- a/vdsm/rpc/vdsmapi-schema.json
+++ b/vdsm/rpc/vdsmapi-schema.json
@@ -4182,6 +4182,24 @@
'data': {'srcImage': 'VolumeSpec', 'dstImage': 'VolumeSpec',
'collapse': 'bool'}}
+##
+# @Host.extendVolumeContainer:
+#
+# Extend a thinly-provisioned volume.
+#
+# @sdUUID: The UUID of the storage domain containing the volume
+#
+# @imgUUID: The UUID of the image containing the volume
+#
+# @volUUID: The UUID of the volume
+#
+# @size: The new desired size (in bytes)
+#
+# Since: 4.18.0
+##
+{'command': {'class': 'Host', 'name': 'extendVolumeContainer'},
+ 'data': {'sdUUID': 'UUID', 'imgUUID': 'UUID', 'volUUID': 'UUID',
+ 'size': 'uint'}}
## Category: @ConnectionRefs ##################################################
##
diff --git a/vdsm/storage/hsm.py b/vdsm/storage/hsm.py
index 2236457..34730dd 100644
--- a/vdsm/storage/hsm.py
+++ b/vdsm/storage/hsm.py
@@ -854,13 +854,22 @@
"""
newSize = misc.validateN(newSize, "newSize") / 2 ** 20
- try:
- pool = self.getPool(spUUID)
- except se.StoragePoolUnknown:
- pass
+ enableSDM = True # Replace this with a check on the SD version
+ if enableSDM:
+ domain = sdCache.produce(volDict['domainID'])
+ self._sdmSchedule('extendVolumeContainer',
+ sdm.extendVolumeContainer, domain,
+ volDict['imageID'], volDict['volumeID'], newSize,
+ callbackFunc, volDict)
else:
- if pool.hsmMailer:
- pool.hsmMailer.sendExtendMsg(volDict, newSize, callbackFunc)
+ try:
+ pool = self.getPool(spUUID)
+ except se.StoragePoolUnknown:
+ pass
+ else:
+ if pool.hsmMailer:
+ pool.hsmMailer.sendExtendMsg(volDict, newSize,
+ callbackFunc)
def _spmSchedule(self, spUUID, name, func, *args):
self.validateSPM(spUUID)
@@ -3744,3 +3753,18 @@
se.VolumeCopyError("Unsupported combination of image types: "
"src:%s, dst:%s" % (src.get('type'),
dst.get('type')))
+
+ @public
+ def extendVolumeContainer(self, sdUUID, imgUUID, volUUID, size):
+ vars.task.setDefaultException(
+ se.VolumeExtendingError("sdUUID=%s, volumeUUID=%s, size=%s" % (
+ sdUUID, volUUID, size)))
+ size = misc.validateN(size, "size")
+ # ExtendVolume expects size in MB
+ size = math.ceil(size / 2 ** 20)
+
+ dom = sdCache.produce(sdUUID=sdUUID)
+ misc.validateUUID(imgUUID, 'imgUUID')
+ misc.validateUUID(volUUID, 'volUUID')
+ vars.task.getSharedLock(STORAGE, sdUUID)
+ return sdm.extendVolumeContainer(dom, imgUUID, volUUID, size)
diff --git a/vdsm/storage/sdm/__init__.py b/vdsm/storage/sdm/__init__.py
index 9d74bd5..7f80ca2 100644
--- a/vdsm/storage/sdm/__init__.py
+++ b/vdsm/storage/sdm/__init__.py
@@ -195,3 +195,16 @@
finally:
dstDom.releaseVolumeLease(dstImage['imgUUID'], dstImage['volUUID'])
srcDom.releaseVolumeLease(srcImage['imgUUID'], srcImage['volUUID'])
+
+
+def extendVolumeContainer(domain, imgUUID, volUUID, size,
+ cbFn=None, cbData=None):
+ cls = __getStoreClass(domain)
+ hostId = getDomainHostId(domain.sdUUID)
+ domain.acquireClusterLock(hostId)
+ try:
+ cls.extendVolume(domain, imgUUID, volUUID, size)
+ finally:
+ domain.releaseClusterLock()
+ if cbFn:
+ cbFn(cbData)
diff --git a/vdsm/storage/sdm/blockstore.py b/vdsm/storage/sdm/blockstore.py
index b80f869..73a12a8 100644
--- a/vdsm/storage/sdm/blockstore.py
+++ b/vdsm/storage/sdm/blockstore.py
@@ -20,6 +20,7 @@
import os
import logging
+import math
import vdsm.utils as utils
from vdsm.config import config
@@ -27,9 +28,13 @@
import volumestore
from .. import blockVolume
from .. import lvm
+from .. import resourceManager as rm
+from .. import sd
from .. import storage_exception as se
from .. import volume
+from ..resourceFactories import IMAGE_NAMESPACE
+rmanager = rm.ResourceManager.getInstance()
log = logging.getLogger('Storage.sdm.blockstore')
SECTORS_TO_MB = (1 << 20) / volume.BLOCK_SIZE
@@ -37,6 +42,9 @@
class BlockStore(volumestore.VolumeStore):
volClass = blockVolume.BlockVolume
+
+ # Estimate of the additional space needed for qcow format internal data.
+ VOLWM_COW_OVERHEAD = 1.1
@classmethod
def volFormatToPreallocate(cls, volFormat):
@@ -109,7 +117,7 @@
parent = tag[len(blockVolume.TAG_PREFIX_PARENT):]
if parent and image:
break
- vols.append(volumestore.GCVol(lv.name, volUUID, image, parent))
+ vols.append(volumestore.GCVol(lv.name, volUUID, image, parent)
return vols
@classmethod
@@ -119,3 +127,24 @@
except se.VolumeMetadataReadError:
pass
lvm.removeLVs(dom.sdUUID, volName)
+
+ @classmethod
+ def extendVolume(cls, dom, imgUUID, volUUID, size):
+ imageResourcesNamespace = sd.getNamespace(dom.sdUUID, IMAGE_NAMESPACE)
+ with rmanager.acquireResource(imageResourcesNamespace, imgUUID,
+ rm.LockType.shared):
+ # Verify that the requested size is valid
+ vol = dom.produceVolume(imgUUID, volUUID)
+ volInfo = vol.getInfo()
+ maxSize = int(volInfo['capacity'])
+ if volInfo['format'] == volume.type2name(volume.COW_FORMAT):
+ maxSize = maxSize * cls.VOLWM_COW_OVERHEAD
+ maxSize = math.ceil(maxSize / 2 ** 20)
+ if size > maxSize:
+ raise se.VolumeExtendingError(
+ "Size %i exceeds the maximum extend size of %i for volume "
+ "%s" % (size, maxSize, volUUID))
+
+ dom.extendVolume(volUUID, size)
+
+
diff --git a/vdsm/storage/sdm/filestore.py b/vdsm/storage/sdm/filestore.py
index dd6c58f..cc3e2bb 100644
--- a/vdsm/storage/sdm/filestore.py
+++ b/vdsm/storage/sdm/filestore.py
@@ -157,4 +157,9 @@
except se.ImageDeleteError:
dom.imageGarbageCollector()
else:
- dom.oop.os.unlink(volPath)
\ No newline at end of file
+ dom.oop.os.unlink(volPath)
+
+ @classmethod
+ def extendVolume(cls, dom, imgUUID, volUUID, size):
+ # There is nothing to do for file domains. The filesystem handles it,
+ pass
--
To view, visit https://gerrit.ovirt.org/39696
To unsubscribe, visit https://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I6a128ba3eab4116ff4e794e94a171e51d9e432de
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Adam Litke <alitke(a)redhat.com>
7 years, 10 months
Change in vdsm[master]: SDM: isolateVolumes API
by alitke@redhat.com
Adam Litke has uploaded a new change for review.
Change subject: SDM: isolateVolumes API
......................................................................
SDM: isolateVolumes API
Change-Id: I9b67e2df82afba9956e8246c1a4f9093aed729f2
Signed-off-by: Adam Litke <alitke(a)redhat.com>
---
M client/vdsClient.py
M vdsm/API.py
M vdsm/rpc/BindingXMLRPC.py
M vdsm/rpc/vdsmapi-schema.json
M vdsm/storage/hsm.py
M vdsm/storage/sdm/__init__.py
M vdsm/storage/sdm/blockstore.py
M vdsm/storage/sdm/filestore.py
M vdsm/storage/sdm/volumestore.py
M vdsm/storage/storage_exception.py
10 files changed, 134 insertions(+), 1 deletion(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/79/40379/1
diff --git a/client/vdsClient.py b/client/vdsClient.py
index 633d1b7..d3c482e 100755
--- a/client/vdsClient.py
+++ b/client/vdsClient.py
@@ -1963,6 +1963,17 @@
else:
return status['status']['code'], status['status']['message']
+ def isolateVolumes(self, args):
+ if len(args) != 4:
+ raise ValueError('Wrong number of arguments')
+ sdUUID, srcImgUUID, dstImgUUID, volStr = args
+ volList = volStr.split(',')
+ status = self.s.isolateVolumes(sdUUID, srcImgUUID, dstImgUUID, volList)
+ if status['status']['code'] == 0:
+ return 0, ''
+ else:
+ return status['status']['code'], status['status']['message']
+
if __name__ == '__main__':
if _glusterEnabled:
@@ -2855,6 +2866,12 @@
'<sdUUID> <imgUUID> <volUUID>',
'Extend a thinly-provisioned block volume.'
)),
+ 'isolateVolumes': (
+ serv.isolateVolumes, (
+ '<sdUUID> <srcImgUUID> <dstImgUUID> <volUUID>[...,<volUUID>]',
+ 'Isolate volumes from one image into a new image for '
+ 'post-processing.'
+ )),
}
if _glusterEnabled:
commands.update(ge.getGlusterCmdDict(serv))
diff --git a/vdsm/API.py b/vdsm/API.py
index a50025f..44dddb4 100644
--- a/vdsm/API.py
+++ b/vdsm/API.py
@@ -1049,6 +1049,10 @@
def validate(self):
return self._irs.validateStorageDomain(self._UUID)
+ def isolateVolumes(self, srcImageID, dstImageID, volumeList):
+ return self._irs.isolateVolumes(self._UUID, srcImageID, dstImageID,
+ volumeList)
+
class StoragePool(APIBase):
ctorArgs = ['storagepoolID']
diff --git a/vdsm/rpc/BindingXMLRPC.py b/vdsm/rpc/BindingXMLRPC.py
index 7834e83..0e1e5e4 100644
--- a/vdsm/rpc/BindingXMLRPC.py
+++ b/vdsm/rpc/BindingXMLRPC.py
@@ -1000,6 +1000,10 @@
api = API.Global()
return api.extendVolumeContainer(sdUUID, imgUUID, volUUID, size)
+ def isolateVolumes(self, sdUUID, srcImgUUID, dstImgUUID, volumeList):
+ api = API.StorageDomain(sdUUID)
+ return api.isolateVolumes(srcImgUUID, dstImgUUID, volumeList)
+
def getGlobalMethods(self):
return ((self.vmDestroy, 'destroy'),
(self.vmCreate, 'create'),
@@ -1151,7 +1155,8 @@
'storageServer_ConnectionRefs_statuses'),
(self.volumeCreateContainer, 'createVolumeContainer'),
(self.copyData, 'copyData'),
- (self.extendVolumeContainer, 'extendVolumeContainer'))
+ (self.extendVolumeContainer, 'extendVolumeContainer'),
+ (self.isolateVolumes, 'isolateVolumes'))
def wrapApiMethod(f):
diff --git a/vdsm/rpc/vdsmapi-schema.json b/vdsm/rpc/vdsmapi-schema.json
index c0d8caf..a873d22 100644
--- a/vdsm/rpc/vdsmapi-schema.json
+++ b/vdsm/rpc/vdsmapi-schema.json
@@ -5459,6 +5459,25 @@
{'command': {'class': 'StorageDomain', 'name': 'validate'},
'data': {'storagedomainID': 'UUID'}}
+##
+# @StorageDomain.isolateVolumes:
+#
+# Isolate volumes from one image into a new image.
+#
+# @storagedomainID: The UUID of the Storage Domain
+#
+# @srcImageID: The UUID of the Image containing the volumes
+#
+# @dstImageID: The UUID of the destination Image
+#
+# @volumeList: Identifies a set of volumes to move
+#
+# Since: 4.18.0
+##
+{'command': {'class': 'StorageDomain', 'name': 'isolateVolumes'},
+ 'data': {'storagedomainID': 'UUID', 'srcImageID': 'UUID',
+ 'dstImageID': 'UUID', 'volumeList': ['UUID']}}
+
## Category: @StoragePool #####################################################
##
# @StoragePool:
diff --git a/vdsm/storage/hsm.py b/vdsm/storage/hsm.py
index 63c9b3b..746423d 100644
--- a/vdsm/storage/hsm.py
+++ b/vdsm/storage/hsm.py
@@ -3760,3 +3760,13 @@
misc.validateUUID(volUUID, 'volUUID')
vars.task.getSharedLock(STORAGE, sdUUID)
return sdm.extendVolumeContainer(dom, imgUUID, volUUID, size)
+
+ @public
+ def isolateVolumes(self, sdUUID, srcImgUUID, dstImgUUID, volumeList):
+ vars.task.setDefaultException(
+ se.IsolateVolumesError(sdUUID, srcImgUUID, dstImgUUID, volumeList))
+ dom = sdCache.produce(sdUUID=sdUUID)
+ misc.validateUUID(srcImgUUID, 'srcImgUUID')
+ misc.validateUUID(dstImgUUID, 'dstImgUUID')
+ vars.task.getSharedLock(STORAGE, sdUUID)
+ return sdm.isolateVolumes(dom, srcImgUUID, dstImgUUID, volumeList)
diff --git a/vdsm/storage/sdm/__init__.py b/vdsm/storage/sdm/__init__.py
index da1e858..1636e63 100644
--- a/vdsm/storage/sdm/__init__.py
+++ b/vdsm/storage/sdm/__init__.py
@@ -208,3 +208,21 @@
domain.releaseClusterLock()
if cbFn:
cbFn(cbData)
+
+
+def isolateVolumes(domain, srcImgUUID, dstImgUUID, volumeList):
+ cls = __getStoreClass(domain)
+ imageResourcesNamespace = sd.getNamespace(domain.sdUUID, IMAGE_NAMESPACE)
+
+ hostId = getDomainHostId(domain.sdUUID)
+ domain.acquireClusterLock(hostId)
+ try:
+ with nested(rmanager.acquireResource(imageResourcesNamespace,
+ srcImgUUID,
+ rm.LockType.exclusive),
+ rmanager.acquireResource(imageResourcesNamespace,
+ dstImgUUID,
+ rm.LockType.exclusive)):
+ cls.isolateVolumes(domain, srcImgUUID, dstImgUUID, volumeList)
+ finally:
+ domain.releaseClusterLock()
diff --git a/vdsm/storage/sdm/blockstore.py b/vdsm/storage/sdm/blockstore.py
index 06a77e4..bd9d3f1 100644
--- a/vdsm/storage/sdm/blockstore.py
+++ b/vdsm/storage/sdm/blockstore.py
@@ -101,6 +101,18 @@
return newName
@classmethod
+ def _isolateVolume(cls, dom, srcImgUUID, dstImgUUID, vol):
+ pVolUUID = vol.getParent()
+ toAdd = [blockVolume.TAG_PREFIX_PARENT + volume.BLANK_UUID,
+ blockVolume.TAG_PREFIX_IMAGE + dstImgUUID]
+ toDel = [blockVolume.TAG_PREFIX_PARENT + pVolUUID,
+ blockVolume.TAG_PREFIX_IMAGE + srcImgUUID]
+ lvm.changeLVTags(dom.sdUUID, vol.volUUID, addTags=toAdd, delTags=toDel)
+ if pVolUUID and pVolUUID != volume.BLANK_UUID:
+ pVol = dom.produceVolume(srcImgUUID, pVolUUID)
+ cls.recheckIfLeaf(pVol)
+
+ @classmethod
def _getGCVolumes(cls, dom, onlyImg, onlyVol):
lvs = lvm.getLV(dom.sdUUID)
vols = []
diff --git a/vdsm/storage/sdm/filestore.py b/vdsm/storage/sdm/filestore.py
index 3e99ee9..9385d87 100644
--- a/vdsm/storage/sdm/filestore.py
+++ b/vdsm/storage/sdm/filestore.py
@@ -98,6 +98,21 @@
return newName
@classmethod
+ def _isolateVolume(cls, dom, srcImgUUID, dstImgUUID, vol):
+ srcImgPath = os.path.join(dom.getRepoPath(), dom.sdUUID,
+ sd.DOMAIN_IMAGES, srcImgUUID)
+ dstImgPath = os.path.join(dom.getRepoPath(), dom.sdUUID,
+ sd.DOMAIN_IMAGES, dstImgUUID)
+ pUUID = vol.getParent()
+ vol._share(dstImgPath)
+ dstVol = dom.produceVolume(dstImgUUID, vol.volUUID)
+ dstVol.setParent(volume.BLANK_UUID)
+ dstVol.setImage(dstImgUUID)
+ newName = cls._beginRemoveVolume(dom, srcImgPath, vol.volUUID)
+ volInfo = volumestore.GCVol(newName, vol.volUUID, srcImgUUID, pUUID)
+ cls._garbageCollectVolume(dom, volInfo)
+
+ @classmethod
def _getGCVolumes(cls, dom, onlyImg, onlyVol):
vols = []
volPaths = []
diff --git a/vdsm/storage/sdm/volumestore.py b/vdsm/storage/sdm/volumestore.py
index d518f20..6568d75 100644
--- a/vdsm/storage/sdm/volumestore.py
+++ b/vdsm/storage/sdm/volumestore.py
@@ -355,3 +355,27 @@
newName = cls._beginRemoveVolume(dom, imageDir, volUUID)
volInfo = GCVol(newName, volUUID, imgUUID, pUUID)
cls._garbageCollectVolume(dom, volInfo)
+
+ @classmethod
+ def isolateVolumes(cls, dom, srcImgUUID, dstImgUUID, volumeList):
+ repoPath = dom.getRepoPath()
+ # Create dest image
+ cls.createImage(repoPath, dom.sdUUID, dstImgUUID)
+ # Verify dest image contains only volumes in volumeList
+ uuidList = cls.volClass.getImageVolumes(repoPath, dom.sdUUID,
+ dstImgUUID)
+ extraVols = set(uuidList) - set(volumeList)
+ if extraVols:
+ log.error("Destination image contains unexpected volumes: %s",
+ extraVols)
+ raise se.IsolateVolumesError(dom.sdUUID, srcImgUUID,
+ dstImgUUID, volumeList)
+ # Iterate over volumes in volumeList
+ for volUUID in volumeList:
+ try:
+ vol = cls.volClass(repoPath, dom.sdUUID, srcImgUUID, volUUID)
+ except se.VolumeDoesNotExist:
+ log.debug("Skipping non-existent source volume %s", volUUID)
+ continue
+ vol.validateDelete()
+ cls._isolateVolume(dom, srcImgUUID, dstImgUUID, vol)
diff --git a/vdsm/storage/storage_exception.py b/vdsm/storage/storage_exception.py
index 1cfc8e4..a695cf4 100644
--- a/vdsm/storage/storage_exception.py
+++ b/vdsm/storage/storage_exception.py
@@ -453,6 +453,15 @@
message = "Image does not exist in domain"
+class IsolateVolumesError(StorageException):
+ def __init__(self, sdUUID, srcImgUUID, dstImgUUID, volumeList):
+ self.value = ("domain=%s srcImg=%s dstImg=%s "
+ "volumes=%s" % (sdUUID, srcImgUUID, dstImgUUID,
+ volumeList))
+ code = 269
+ message = "Unable to isolate volumes"
+
+
#################################################
# Pool Exceptions
#################################################
--
To view, visit https://gerrit.ovirt.org/40379
To unsubscribe, visit https://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I9b67e2df82afba9956e8246c1a4f9093aed729f2
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Adam Litke <alitke(a)redhat.com>
7 years, 10 months
Change in vdsm[master]: HACK: run GC in domain monitor
by alitke@redhat.com
Adam Litke has uploaded a new change for review.
Change subject: HACK: run GC in domain monitor
......................................................................
HACK: run GC in domain monitor
Change-Id: I3c560e6fbccdf50b135cc9c90b23824ae04b0376
Signed-off-by: Adam Litke <alitke(a)redhat.com>
---
M vdsm/storage/monitor.py
M vdsm/storage/sdm/__init__.py
2 files changed, 22 insertions(+), 0 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/80/40380/1
diff --git a/vdsm/storage/monitor.py b/vdsm/storage/monitor.py
index ef032a5..a2a1bd7 100644
--- a/vdsm/storage/monitor.py
+++ b/vdsm/storage/monitor.py
@@ -28,6 +28,7 @@
from . import clusterlock
from . import misc
+from . import sdm
from .sdc import sdCache
@@ -249,6 +250,7 @@
self._performDomainSelftest()
self._checkReadDelay()
self._collectStatistics()
+ self._garbageCollect()
except Exception as e:
self.log.exception("Error monitoring domain %s", self.sdUUID)
self.nextStatus.error = e
@@ -340,6 +342,14 @@
self.nextStatus.isoPrefix = self.isoPrefix
self.nextStatus.version = self.domain.getVersion()
+ def _garbageCollect(self):
+ if True: # XXX: limit this to domain ver 4 or later
+ try:
+ sdm.garbageCollectStorageDomain(self.domain)
+ except:
+ self.log.exception("Garbage collection failed for domain %s",
+ self.domain.sdUUID)
+
# Managing host id
def _shouldAcquireHostId(self):
diff --git a/vdsm/storage/sdm/__init__.py b/vdsm/storage/sdm/__init__.py
index 1636e63..4a31332 100644
--- a/vdsm/storage/sdm/__init__.py
+++ b/vdsm/storage/sdm/__init__.py
@@ -226,3 +226,15 @@
cls.isolateVolumes(domain, srcImgUUID, dstImgUUID, volumeList)
finally:
domain.releaseClusterLock()
+
+
+def garbageCollectStorageDomain(domain):
+ if domain.isISO():
+ return
+ cls = __getStoreClass(domain)
+ hostId = getDomainHostId(domain.sdUUID)
+ domain.acquireClusterLock(hostId)
+ try:
+ cls.garbageCollectStorageDomain(domain)
+ finally:
+ domain.releaseClusterLock()
--
To view, visit https://gerrit.ovirt.org/40380
To unsubscribe, visit https://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I3c560e6fbccdf50b135cc9c90b23824ae04b0376
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Adam Litke <alitke(a)redhat.com>
7 years, 10 months