Eduardo has uploaded a new change for review.
Change subject: BZ#836161 - Refactored deleteImage.
......................................................................
BZ#836161 - Refactored deleteImage.
Change-Id: I304ff5cd70186ffc9789cd1ac9337efa6c5ff695
Signed-off-by: Eduardo <ewarszaw(a)redhat.com>
---
M vdsm/storage/blockSD.py
M vdsm/storage/fileSD.py
M vdsm/storage/hsm.py
M vdsm/storage/sd.py
4 files changed, 142 insertions(+), 14 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/06/8506/1
diff --git a/vdsm/storage/blockSD.py b/vdsm/storage/blockSD.py
index e596fb6..d26e53c 100644
--- a/vdsm/storage/blockSD.py
+++ b/vdsm/storage/blockSD.py
@@ -163,13 +163,88 @@
res[vName]['parent'] = vPar
if vImg not in res[vName]['imgs']:
res[vName]['imgs'].insert(0, vImg)
- if vPar != sd.BLANK_UUID and \
- not vName.startswith(blockVolume.image.REMOVED_IMAGE_PREFIX) \
+ if vPar != sd.BLANK_UUID \
+ and not vName.startswith(sd.REMOVED_IMAGE_PREFIX) \
+ and not vName.startswith(sd.ZERO_ME_IMAGE_PREFIX) \
and vImg not in res[vPar]['imgs']:
res[vPar]['imgs'].append(vImg)
return dict((k, sd.ImgsPar(tuple(v['imgs']), v['parent']))
for k, v in res.iteritems())
+
+
+def deleteVolumes(sdUUID, vols):
+ lvm.removeLVs(sdUUID, vols)
+
+
+def _zeroVolume(sdUUID, volUUID):
+ """
+ This function requires an active LV.
+ """
+ # Change for zero 128 M chuncks and log.
+ # 128 M is the vdsm extent size default
+ cmd = tuple(constants.EXT_DD, "if=/dev/zero",
+ "of=%s" % os.path.join("/dev", sdUUID,
volUUID))
+ p = misc.execCmd(cmd, sudo=False, sync=False)
+ return p
+
+
+def _getZerosFinished(zeroPs):
+ for volUUID, zeroProc in zeroPs.items():
+ finished = {}
+ if not zeroProc.wait(0):
+ continue
+ else:
+ zeroPs.pop(volUUID)
+ finished[volUUID] = zeroProc.returncode
+ if zeroProc.returncode != 0:
+ log.warning("zeroing %s failed. %s", volUUID,
+ zeroProc.stderr)
+ else:
+ log.debug("zeroing %s finished", volUUID)
+ return finished
+
+
+def zeroImgVolumes(sdUUID, imgUUID, volUUIDs):
+ # Put a sensible value for dd zeroing a 128 M or 1 G chunk and lvremove
+ # spent time.
+ ZERO_SLEEP = 60 # [seconds]
+ # Prepare for zeroing
+ try:
+ lvm.changelv(sdUUID, volUUIDs, (("-a", "y"),
("--deltag", imgUUID),
+ ("--addtag", sd.ZERO_ME_IMAGE_PREFIX + imgUUID)))
+ except se.StorageException as e:
+ log.debug("SD %s, Image %s pre zeroing ops failed", sdUUID, imgUUID,
+ volUUIDs)
+ try:
+ lvm.changelv(sdUUID, volUUIDs, ("--permission", "rw"))
+ except se.StorageException as e:
+ # Hope this only means that some volumes were already writable.
+ log.debug("IGNORED: %s", e)
+ # blank the volumes.
+ zeroPs = {}
+ for volUUID in volUUIDs:
+ zeroPs[volUUID] = _zeroVolume(sdUUID, volUUID)
+
+ # Yes, this is a potentially infinite loop. Kill the vdsm task.
+ last = time.time()
+ while len(zeroPs) > 0:
+ time.sleep(ZERO_SLEEP)
+ toDelete = []
+ for volUUID, rc in _getZerosFinished(zeroPs):
+ if rc != 0:
+ log.warning("zero leftover: %s/%s rc = %s", sdUUID, volUUID,
rc)
+ else:
+ toDelete.append(volUUID)
+ if toDelete:
+ deleteVolumes(sdUUID, toDelete)
+
+ now = time.time()
+ if now - (last + 10 * ZERO_SLEEP) > 0:
+ log.warning("Still zeroing VG:%s LVs: %s",
+ sdUUID, zeroPs.iterkeys())
+
+ log.debug("All rm_zeroed VG:%s LVs: %s, img: %s", sdUUID, volUUIDs,
imgUUID)
class VGTagMetadataRW(object):
@@ -827,6 +902,12 @@
if i.startswith(blockVolume.TAG_PREFIX_IMAGE) ]
return images
+ def deleteImage(self, sdUUID, imgUUID, volsImgs):
+ return deleteVolumes(sdUUID, volsImgs)
+
+ def zeroImage(self, sdUUID, imgUUID, volsImgs):
+ return zeroImgVolumes(sdUUID, imgUUID, volsImgs)
+
def getAllVolumes(self):
"""
Return all the images that depend on a volume.
diff --git a/vdsm/storage/fileSD.py b/vdsm/storage/fileSD.py
index 054fadb..d5b298f 100644
--- a/vdsm/storage/fileSD.py
+++ b/vdsm/storage/fileSD.py
@@ -62,6 +62,21 @@
return True
+def getDomPath(sdUUID):
+ # /rhev/data-center/mnt/export-path/sdUUID/dom_md/metadata
+ # /rhev/data-center/mnt/blockSD/sdUUID/dom_md/metadata
+ pattern = os.path.join(sd.mountBasePath, '*', sdUUID)
+ # Warning! You need a global proc pool big as the number of NFS domains.
+ domPaths = getProcPool.glob.glob(pattern)
+ if len(domPaths) != 1:
+ raise se.StorageDomainLayoutError("domain", sdUUID)
+ return domPaths[0]
+
+
+def getImagePath(sdUUID, imgUUID):
+ return os.path.join(getDomPath(sdUUID), 'images', imgUUID)
+
+
def getDomUuidFromMetafilePath(metafile):
# Metafile path has pattern:
# /rhev/data-center/mnt/export-path/sdUUID/dom_md/metadata
@@ -298,6 +313,31 @@
imgList.append(os.path.basename(i))
return imgList
+ def deleteImage(self, sdUUID, imgUUID, volsImgs):
+ oPath = getImagePath(sdUUID, imgUUID)
+ head, tail = self.oop.os.path.split(oPath)
+ nPath = os.tempnam(head, sd.REMOVED_IMAGE_PREFIX + tail)
+ try:
+ self.oop.os.rename(oPath, nPath)
+ except OSError as e:
+ self.log.error("image: %s can't be moved", oPath)
+ raise se.ImageDeleteError("%s %s" % (imgUUID, str(e)))
+ for volUUID in volsImgs:
+ volPath = os.path.join(nPath, volUUID)
+ try:
+ self.oop.os.remove(volPath)
+ self.oop.os.remove(volPath + '.meta')
+ except OSError as e:
+ self.log.error("vol: %s can't be removed.", volPath)
+ try:
+ self.oop.os.remove(nPath)
+ except OSError as e:
+ self.log.error("removed image dir: %s can't be removed",
nPath)
+ raise se.ImageDeleteError("%s %s" % (imgUUID, str(e)))
+
+ def zeroImage(self, sdUUID, imgUUID, volsImgs):
+ return self.deleteImage(sdUUID, imgUUID, volsImgs)
+
def getAllVolumes(self):
"""
Return dict {volUUID: ((imgUUIDs,), parentUUID)} of the domain.
diff --git a/vdsm/storage/hsm.py b/vdsm/storage/hsm.py
index 78eef77..2a8bc59 100644
--- a/vdsm/storage/hsm.py
+++ b/vdsm/storage/hsm.py
@@ -1260,24 +1260,26 @@
Delete Image folder with all volumes
"""
#vars.task.setDefaultException(se.ChangeMeError("%s" % args))
- pool = self.getPool(spUUID) #Validates that the pool is connected. WHY?
- self.validateSdUUID(sdUUID)
+ self.getPool(spUUID) #Validates that the pool is connected. WHY?
+ dom = self.validateSdUUID(sdUUID)
vars.task.getExclusiveLock(STORAGE, imgUUID)
vars.task.getSharedLock(STORAGE, sdUUID)
+ allVols = dom.getAllVolumes()
+ imgsByVol = sd.getVolsOfImage(allVols, imgUUID)
# Do not validate if forced.
- if not misc.parseBool(force):
- pool.validateDelete(sdUUID, imgUUID)
- # Rename image if postZero and perform delete as async operation
- # else delete image in sync. stage
+ if not misc.parseBool(force) and not dom.isBackup() \
+ and not all(len(v.imgs) == 1 for v in imgsByVol.itervalues()):
+ msg = "Cannot delete shared image %s. volImgs: %s" \
+ % (imgUUID, imgsByVol)
+ raise se.CannotDeleteSharedVolume(msg)
+
+ # zeroImage will delete zeroed volumes at the end.
if misc.parseBool(postZero):
- newImgUUID = pool.preDeleteRename(sdUUID, imgUUID)
- self._spmSchedule(spUUID, "deleteImage", pool.deleteImage, sdUUID,
newImgUUID,
- misc.parseBool(postZero), misc.parseBool(force)
- )
+ self._spmSchedule(spUUID, "zeroImage_%s" % imgUUID, dom.zeroImage,
+ sdUUID, imgUUID, imgsByVol.keys())
else:
- pool.deleteImage(sdUUID, imgUUID,
- misc.parseBool(postZero), misc.parseBool(force))
+ dom.deleteImage(sdUUID, imgUUID, imgsByVol)
# This is a hack to keep the interface consistent
# We currently have race conditions in delete image, to quickly fix
# this we delete images in the "synchronous" state. This only
works
diff --git a/vdsm/storage/sd.py b/vdsm/storage/sd.py
index 55c5b12..fcb796c 100644
--- a/vdsm/storage/sd.py
+++ b/vdsm/storage/sd.py
@@ -134,12 +134,17 @@
ImgsPar = namedtuple("ImgsPar", "imgs,parent")
ISO_IMAGE_UUID = '11111111-1111-1111-1111-111111111111'
BLANK_UUID = '00000000-0000-0000-0000-000000000000'
+REMOVED_IMAGE_PREFIX = "_remove_me_"
+ZERO_ME_IMAGE_PREFIX = "_zero_me_"
# Blocks used for each lease (valid on all domain types)
LEASE_BLOCKS = 2048
UNICODE_MINIMAL_VERSION = 3
+storage_repository = config.get('irs', 'repository')
+mountBasePath = os.path.join(storage_repository, DOMAIN_MNT_POINT)
+
def getVolsOfImage(allVols, imgUUID):
""" Filter allVols dict for volumes related to imgUUID.
--
To view, visit
http://gerrit.ovirt.org/8506
To unsubscribe, visit
http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I304ff5cd70186ffc9789cd1ac9337efa6c5ff695
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Eduardo <ewarszaw(a)redhat.com>