Change in vdsm[master]: Makefile: add vdsm-reg-setup.in to pyflakes
by Douglas Schilling Landgraf
Douglas Schilling Landgraf has uploaded a new change for review.
Change subject: Makefile: add vdsm-reg-setup.in to pyflakes
......................................................................
Makefile: add vdsm-reg-setup.in to pyflakes
We should run pyflakes in vdsm-reg-setup.in as well.
Change-Id: I6be99965f3249374c99c1d4ab71145d571c13921
Signed-off-by: Douglas Schilling Landgraf <dougsland(a)redhat.com>
---
M Makefile.am
1 file changed, 2 insertions(+), 1 deletion(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/25/27025/1
diff --git a/Makefile.am b/Makefile.am
index a7fc23f..ea3a4c3 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -66,7 +66,8 @@
check-local:
find . -path './.git' -prune -type f -o \
- -name '*.py' -o -name '*.py.in' | xargs $(PYFLAKES) | \
+ -name '*.py' -o -name '*.py.in' -o \
+ -name 'vdsm-reg-setup.in' | xargs $(PYFLAKES) | \
grep -w -v $(SKIP_PYFLAKES_ERR) | \
while read LINE; do echo "$$LINE"; false; done
$(PEP8) --version
--
To view, visit http://gerrit.ovirt.org/27025
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I6be99965f3249374c99c1d4ab71145d571c13921
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Douglas Schilling Landgraf <dougsland(a)redhat.com>
9 years, 6 months
Change in vdsm[master]: net_tests: Move network unit tests into package
by asegurap@redhat.com
Antoni Segura Puimedon has uploaded a new change for review.
Change subject: net_tests: Move network unit tests into package
......................................................................
net_tests: Move network unit tests into package
This patch brings together the network unit tests under a directory
to improve the organization and as a possible step for a future
cohesive network package with code and tests.
Change-Id: I6960ce365d67ab4bb0a5475d7957e6117bef7e60
Signed-off-by: Antoni S. Puimedon <asegurap(a)redhat.com>
---
M configure.ac
M tests/Makefile.am
A tests/network/Makefile.am
R tests/network/apiTests.py
R tests/network/ipwrapperTests.py
R tests/network/modelsTests.py
R tests/network/netconfTests.py
R tests/network/netconfpersistenceTests.py
R tests/network/netinfoTests.py
R tests/network/netmaskconversions
R tests/network/tcTests.py
R tests/network/tc_filter_show.out
M tests/run_tests_local.sh.in
13 files changed, 63 insertions(+), 15 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/74/26874/1
diff --git a/configure.ac b/configure.ac
index 12828be..944e17f 100644
--- a/configure.ac
+++ b/configure.ac
@@ -267,6 +267,7 @@
lib/zombiereaper/Makefile
tests/Makefile
tests/functional/Makefile
+ tests/network/Makefile
vds_bootstrap/Makefile
vdsm-tool/Makefile
vdsm/Makefile
diff --git a/tests/Makefile.am b/tests/Makefile.am
index 39c6cad..6fb834f 100644
--- a/tests/Makefile.am
+++ b/tests/Makefile.am
@@ -1,5 +1,5 @@
#
-# Copyright 2012 Red Hat, Inc.
+# Copyright 2012-2014 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -20,7 +20,7 @@
include $(top_srcdir)/build-aux/Makefile.subs
-SUBDIRS = functional
+SUBDIRS = functional network
test_modules = \
alignmentScanTests.py \
@@ -29,7 +29,6 @@
cPopenTests.py \
capsTests.py \
clientifTests.py \
- configNetworkTests.py \
fileVolumeTests.py \
fileUtilTests.py \
fuserTests.py \
@@ -38,7 +37,6 @@
glusterTestData.py \
guestagentTests.py \
hooksTests.py \
- ipwrapperTests.py \
iscsiTests.py \
jsonRpcTests.py \
jsonRpcUtils.py \
@@ -52,10 +50,6 @@
mkimageTests.py \
monkeypatchTests.py \
mountTests.py \
- netconfpersistenceTests.py \
- netconfTests.py \
- netinfoTests.py \
- netmodelsTests.py \
outOfProcessTests.py \
parted_utils_tests.py \
permutationTests.py \
@@ -66,7 +60,6 @@
securableTests.py \
sslTests.py \
storageMailboxTests.py \
- tcTests.py \
toolTests.py \
transportWrapperTests.py \
utilsTests.py \
@@ -111,8 +104,6 @@
glusterVolumeTasks.xml \
lvs_3386c6f2-926f-42c4-839c-38287fac8998.out \
mem_info.out \
- netmaskconversions \
- tc_filter_show.out \
$(NULL)
dist_vdsmtests_PYTHON = \
diff --git a/tests/network/Makefile.am b/tests/network/Makefile.am
new file mode 100644
index 0000000..f241a4b
--- /dev/null
+++ b/tests/network/Makefile.am
@@ -0,0 +1,53 @@
+#
+# Copyright 2014 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+# Refer to the README and COPYING files for full details of the license
+#
+
+net_testsdir = ${vdsmtestsdir}/network
+
+dist_net_tests_PYTHON = \
+ apiTests.py \
+ ipwrapperTests.py \
+ netconfpersistenceTests.py \
+ netconfTests.py \
+ netinfoTests.py \
+ modelsTests.py \
+ tcTests.py \
+ $(NULL)
+
+MODULES = \
+ network/apiTests.py \
+ network/ipwrapperTests.py \
+ network/netconfpersistenceTests.py \
+ network/netconfTests.py \
+ network/netinfoTests.py \
+ network/modelsTests.py \
+ network/tcTests.py \
+ $(NULL)
+
+
+
+dist_net_tests_DATA = \
+ netmaskconversions \
+ tc_filter_show.out \
+ $(NULL)
+
+check-local:
+ @echo '*** Running network tests. To skip this step place NOSE_EXCLUDE=.* ***'
+ @echo '*** into your environment. Do not submit untested code! ***'
+ $(top_srcdir)/tests/run_tests_local.sh $(MODULES)
diff --git a/tests/configNetworkTests.py b/tests/network/apiTests.py
similarity index 100%
rename from tests/configNetworkTests.py
rename to tests/network/apiTests.py
diff --git a/tests/ipwrapperTests.py b/tests/network/ipwrapperTests.py
similarity index 100%
rename from tests/ipwrapperTests.py
rename to tests/network/ipwrapperTests.py
diff --git a/tests/netmodelsTests.py b/tests/network/modelsTests.py
similarity index 98%
rename from tests/netmodelsTests.py
rename to tests/network/modelsTests.py
index a2a3ee3..5da708b 100644
--- a/tests/netmodelsTests.py
+++ b/tests/network/modelsTests.py
@@ -34,7 +34,7 @@
from monkeypatch import MonkeyPatch
-class TestNetmodels(TestCaseBase):
+class TestModels(TestCaseBase):
def testIsVlanIdValid(self):
vlanIds = ('badValue', Vlan.MAX_ID + 1)
diff --git a/tests/netconfTests.py b/tests/network/netconfTests.py
similarity index 100%
rename from tests/netconfTests.py
rename to tests/network/netconfTests.py
diff --git a/tests/netconfpersistenceTests.py b/tests/network/netconfpersistenceTests.py
similarity index 100%
rename from tests/netconfpersistenceTests.py
rename to tests/network/netconfpersistenceTests.py
diff --git a/tests/netinfoTests.py b/tests/network/netinfoTests.py
similarity index 100%
rename from tests/netinfoTests.py
rename to tests/network/netinfoTests.py
diff --git a/tests/netmaskconversions b/tests/network/netmaskconversions
similarity index 100%
rename from tests/netmaskconversions
rename to tests/network/netmaskconversions
diff --git a/tests/tcTests.py b/tests/network/tcTests.py
similarity index 100%
rename from tests/tcTests.py
rename to tests/network/tcTests.py
diff --git a/tests/tc_filter_show.out b/tests/network/tc_filter_show.out
similarity index 100%
rename from tests/tc_filter_show.out
rename to tests/network/tc_filter_show.out
diff --git a/tests/run_tests_local.sh.in b/tests/run_tests_local.sh.in
index 0a229c0..787d9b2 100644
--- a/tests/run_tests_local.sh.in
+++ b/tests/run_tests_local.sh.in
@@ -3,8 +3,11 @@
PYTHON_EXE="@PYTHON@"
fi
-if [ ! -f @top_srcdir(a)/tests/jsonrpc-tests.server.crt ] || [ ! -f @top_srcdir(a)/tests/jsonrpc-tests.server.csr ] || [ ! -f @top_srcdir(a)/tests/jsonrpc-tests.server.key ]; then
- @top_srcdir(a)/tests/makecert.sh
+# The following line is taken from http://stackoverflow.com/a/246128/206009
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+
+if [ ! -f "$DIR/jsonrpc-tests.server.crt" ] || [ ! -f "$DIR/jsonrpc-tests.server.csr" ] || [ ! -f "$DIR/jsonrpc-tests.server.key" ]; then
+ $DIR/makecert.sh
fi
-PYTHONDONTWRITEBYTECODE=1 LC_ALL=C PYTHONPATH="@top_srcdir@/lib:@top_srcdir@/vdsm:@top_srcdir@/client:@top_srcdir@/vdsm_api:$PYTHONPATH" "$PYTHON_EXE" @top_srcdir(a)/tests/testrunner.py --local-modules $@
+PYTHONDONTWRITEBYTECODE=1 LC_ALL=C PYTHONPATH="$DIR/../lib:$DIR/../vdsm:$DIR/../client:$DIR/../vdsm_api:$PYTHONPATH" "$PYTHON_EXE" $DIR/testrunner.py --local-modules $@
--
To view, visit http://gerrit.ovirt.org/26874
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I6960ce365d67ab4bb0a5475d7957e6117bef7e60
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Antoni Segura Puimedon <asegurap(a)redhat.com>
9 years, 6 months
Change in vdsm[master]: cleanup: drop several unused local variables
by asegurap@redhat.com
Antoni Segura Puimedon has uploaded a new change for review.
Change subject: cleanup: drop several unused local variables
......................................................................
cleanup: drop several unused local variables
Change-Id: Ib81c292f900154819e8852c21ae389c323034999
Signed-off-by: Antoni S. Puimedon <asegurap(a)redhat.com>
---
M client/vdsClient.py
M lib/vdsm/netinfo.py
M lib/yajsonrpc/protonReactor.py
M tests/functional/networkTests.py
M tests/functional/virtTests.py
M tests/hookValidation.py
M tests/hooksTests.py
M tests/jsonRpcTests.py
M tests/miscTests.py
M tests/testValidation.py
M tests/vmTests.py
M vds_bootstrap/setup
M vds_bootstrap/vds_bootstrap.py
M vds_bootstrap/vds_bootstrap_complete.py
M vdsm/storage/hsm.py
M vdsm/storage/iscsi.py
M vdsm/storage/misc.py
M vdsm/storage/remoteFileHandler.py
M vdsm/storage/resourceManager.py
M vdsm/storage/sp.py
M vdsm/storage/task.py
M vdsm/vm.py
M vdsm_api/Bridge.py
M vdsm_api/process-schema.py
M vdsm_api/vdsmapi.py
25 files changed, 23 insertions(+), 49 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/35/20535/1
diff --git a/client/vdsClient.py b/client/vdsClient.py
index 37dd7cb..4c09546 100644
--- a/client/vdsClient.py
+++ b/client/vdsClient.py
@@ -842,7 +842,6 @@
masterDom = args[3]
domList = args[4].split(",")
mVer = int(args[5])
- pool = None
if len(args) > 6:
pool = self.s.createStoragePool(poolType, spUUID,
poolName, masterDom,
diff --git a/lib/vdsm/netinfo.py b/lib/vdsm/netinfo.py
index e8f2b8d..cf70089 100644
--- a/lib/vdsm/netinfo.py
+++ b/lib/vdsm/netinfo.py
@@ -93,8 +93,6 @@
The list of nics is built by filtering out nics defined
as hidden, fake or hidden bonds (with related nics'slaves).
"""
- res = []
-
def isHiddenNic(device):
"""
Returns boolean given the device name stating
@@ -397,7 +395,7 @@
"Convert an integer to the corresponding ip address in the dot-notation"
ip_address = []
- for i in xrange(4):
+ for _ in xrange(4):
ip_num, ip_val = divmod(ip_num, 256)
ip_address.append(str(ip_val))
diff --git a/lib/yajsonrpc/protonReactor.py b/lib/yajsonrpc/protonReactor.py
index 557600c..7892e3c 100644
--- a/lib/yajsonrpc/protonReactor.py
+++ b/lib/yajsonrpc/protonReactor.py
@@ -376,7 +376,6 @@
proton.pn_link_advance(link)
def createListener(self, address, acceptHandler):
- host, port = address
return self._scheduleOp(True, self._createListener, address,
acceptHandler)
diff --git a/tests/functional/networkTests.py b/tests/functional/networkTests.py
index a9149be..ba93343 100644
--- a/tests/functional/networkTests.py
+++ b/tests/functional/networkTests.py
@@ -377,7 +377,7 @@
for index in range(VLAN_COUNT)]
with dummyIf(1) as nics:
firstVlan, firstVlanId = NET_VLANS[0]
- status, msg = self.vdsm_net.addNetwork(firstVlan, vlan=firstVlanId,
+ _ = self.vdsm_net.addNetwork(firstVlan, vlan=firstVlanId,
bond=BONDING_NAME,
nics=nics, opts=opts)
with nonChangingOperstate(BONDING_NAME):
diff --git a/tests/functional/virtTests.py b/tests/functional/virtTests.py
index cdb695a..85781f7 100644
--- a/tests/functional/virtTests.py
+++ b/tests/functional/virtTests.py
@@ -83,7 +83,7 @@
def _genInitramfs():
fd, path = tempfile.mkstemp()
cmd = [_mkinitrd.cmd, "-f", path, _kernelVer]
- rc, out, err = execCmd(cmd, sudo=False)
+ _ = execCmd(cmd, sudo=False)
os.chmod(path, 0o644)
return path
diff --git a/tests/hookValidation.py b/tests/hookValidation.py
index 80e7239..208ed35 100644
--- a/tests/hookValidation.py
+++ b/tests/hookValidation.py
@@ -67,8 +67,6 @@
cookie_file = _createHookScript(hook_path, hook_name, hook_script)
- output = None
-
try:
kwargs['hook_cookiefile'] = cookie_file
output = test_function(*args, **kwargs)
diff --git a/tests/hooksTests.py b/tests/hooksTests.py
index 1018a4e..ddb3530 100644
--- a/tests/hooksTests.py
+++ b/tests/hooksTests.py
@@ -42,7 +42,7 @@
echo -n %s >> "$_hook_domxml"
"""
scripts = [tempfile.NamedTemporaryFile(dir=dirName, delete=False)
- for n in xrange(Q)]
+ for _ in xrange(Q)]
scripts.sort(key=lambda f: f.name)
for n, script in enumerate(scripts):
script.write(code % n)
diff --git a/tests/jsonRpcTests.py b/tests/jsonRpcTests.py
index a7b565f..00025ae 100644
--- a/tests/jsonRpcTests.py
+++ b/tests/jsonRpcTests.py
@@ -85,7 +85,7 @@
def serve(reactor):
try:
reactor.process_requests()
- except socket.error as e:
+ except socket.error:
pass
except Exception as e:
self.log.error("Reactor died unexpectedly", exc_info=True)
diff --git a/tests/miscTests.py b/tests/miscTests.py
index c836e55..1a9a16c 100644
--- a/tests/miscTests.py
+++ b/tests/miscTests.py
@@ -432,7 +432,7 @@
os.chmod(dstPath, 0o666)
#Copy
- rc, out, err = misc.ddWatchCopy(srcPath, dstPath, None, len(data))
+ _ = misc.ddWatchCopy(srcPath, dstPath, None, len(data))
#Get copied data
readData = open(dstPath).read()
@@ -448,7 +448,7 @@
fd, path = tempfile.mkstemp()
try:
- for i in xrange(repetitions):
+ for _ in xrange(repetitions):
os.write(fd, data)
self.assertEquals(os.stat(path).st_size, misc.MEGA)
except:
@@ -474,7 +474,7 @@
self.assertEquals(os.stat(path).st_size, misc.MEGA * 2)
with open(path, "r") as f:
- for i in xrange(repetitions):
+ for _ in xrange(repetitions):
self.assertEquals(f.read(len(data)), data)
finally:
os.unlink(path)
@@ -501,7 +501,7 @@
misc.MEGA * 2 + len(add_data))
with open(path, "r") as f:
- for i in xrange(repetitions):
+ for _ in xrange(repetitions):
self.assertEquals(f.read(len(data)), data)
# Checking the additional data
self.assertEquals(f.read(len(add_data)), add_data)
@@ -535,7 +535,7 @@
os.chmod(dstPath, 0o666)
#Copy
- rc, out, err = misc.ddWatchCopy(srcPath, dstPath, None, len(data))
+ _ = misc.ddWatchCopy(srcPath, dstPath, None, len(data))
#Get copied data
readData = open(dstPath).read()
diff --git a/tests/testValidation.py b/tests/testValidation.py
index d370971..92790d9 100644
--- a/tests/testValidation.py
+++ b/tests/testValidation.py
@@ -110,7 +110,7 @@
def wrapper(*args, **kwargs):
if not os.path.exists('/sys/module/dummy'):
cmd_modprobe = [modprobe.cmd, "dummy"]
- rc, out, err = utils.execCmd(cmd_modprobe, sudo=True)
+ _ = utils.execCmd(cmd_modprobe, sudo=True)
return f(*args, **kwargs)
return wrapper
diff --git a/tests/vmTests.py b/tests/vmTests.py
index 1f69f0a..9d91723 100644
--- a/tests/vmTests.py
+++ b/tests/vmTests.py
@@ -390,7 +390,7 @@
driveInput.update({'shared': 'UNKNOWN-VALUE'})
with self.assertRaises(ValueError):
- drive = vm.Drive({}, self.log, **driveInput)
+ _ = vm.Drive({}, self.log, **driveInput)
def testDriveXML(self):
SERIAL = '54-a672-23e5b495a9ea'
diff --git a/vds_bootstrap/setup b/vds_bootstrap/setup
index 778dc12..701df8b 100755
--- a/vds_bootstrap/setup
+++ b/vds_bootstrap/setup
@@ -63,7 +63,6 @@
return False, HYPERVISOR_RELEASE_FILE + ", " + REDHAT_RELEASE_FILE
def get_id_line():
- line = ''
RELEASE_FILE = None
try:
@@ -193,7 +192,6 @@
import calendar
return_value = False
- ticket = None
try:
time_struct = time.strptime(systime, '%Y-%m-%dT%H:%M:%S')
diff --git a/vds_bootstrap/vds_bootstrap.py b/vds_bootstrap/vds_bootstrap.py
index a9dc901..e45c8b6 100755
--- a/vds_bootstrap/vds_bootstrap.py
+++ b/vds_bootstrap/vds_bootstrap.py
@@ -289,7 +289,6 @@
"""
status = "OK"
message = 'Host properly registered with RHN/Satellite.'
- rc = True
try:
rc = bool(deployUtil.yumListPackages(VDSM_NAME))
@@ -316,7 +315,6 @@
"""
status = "OK"
message = 'Available VDSM matches requirements'
- rc = True
try:
rc = deployUtil.yumSearchVersion(VDSM_NAME, VDSM_MIN_VER)
@@ -393,7 +391,6 @@
"""
os_status = "FAIL"
kernel_status = "FAIL"
- os_message = "Unsupported platform version"
os_name = "Unknown OS"
kernel_message = ''
self.rc = True
@@ -741,8 +738,6 @@
return self.rc
def _addNetwork(self, vdcName, vdcPort):
- fReturn = True
-
#add management bridge
try:
fReturn = deployUtil.makeBridge(
@@ -859,9 +854,6 @@
# TODO remove legacy
if deployUtil.getBootstrapInterfaceVersion() == 1 and \
engine_ssh_key is None:
- vdcAddress = None
- vdcPort = None
-
vdcAddress, vdcPort = deployUtil.getAddress(url)
if vdcAddress is not None:
strKey = deployUtil.getAuthKeysFile(vdcAddress, vdcPort)
diff --git a/vds_bootstrap/vds_bootstrap_complete.py b/vds_bootstrap/vds_bootstrap_complete.py
index fd18847..07c3610 100755
--- a/vds_bootstrap/vds_bootstrap_complete.py
+++ b/vds_bootstrap/vds_bootstrap_complete.py
@@ -101,7 +101,6 @@
except:
arg = 1
- res = True
try:
res = deployUtil.instCert(rnum, VDSM_CONF_FILE)
if res:
diff --git a/vdsm/storage/hsm.py b/vdsm/storage/hsm.py
index 4579763..322ee8b 100644
--- a/vdsm/storage/hsm.py
+++ b/vdsm/storage/hsm.py
@@ -1095,7 +1095,7 @@
misc.validateN(hostID, 'hostID')
# already disconnected/or pool is just unknown - return OK
try:
- pool = self.getPool(spUUID)
+ _ = self.getPool(spUUID)
except se.StoragePoolUnknown:
self.log.warning("disconnect sp: %s failed. Known pools %s",
spUUID, self.pools)
@@ -1861,7 +1861,7 @@
self.log.info("spUUID=%s master=%s", spUUID, masterDom)
try:
- pool = self.getPool(spUUID)
+ _ = self.getPool(spUUID)
except se.StoragePoolUnknown:
pool = sp.StoragePool(spUUID, self.domainMonitor, self.taskMng)
else:
diff --git a/vdsm/storage/iscsi.py b/vdsm/storage/iscsi.py
index 7da94ab..9976026 100644
--- a/vdsm/storage/iscsi.py
+++ b/vdsm/storage/iscsi.py
@@ -415,7 +415,7 @@
log.debug("Performing SCSI scan, this will take up to %s seconds",
maxTimeout)
time.sleep(minTimeout)
- for i in xrange(maxTimeout - minTimeout):
+ for _ in xrange(maxTimeout - minTimeout):
for p in processes[:]:
(hba, proc) = p
if proc.wait(0):
@@ -429,7 +429,7 @@
time.sleep(1)
else:
log.warning("Still waiting for scsi scan of hbas: %s",
- tuple(hba for p in processes))
+ tuple(hba for _ in processes))
def devIsiSCSI(dev):
diff --git a/vdsm/storage/misc.py b/vdsm/storage/misc.py
index fc13a9c..5245264 100644
--- a/vdsm/storage/misc.py
+++ b/vdsm/storage/misc.py
@@ -484,7 +484,6 @@
log.debug("dir: %s, prefixName: %s, versions: %s" %
(directory, prefixName, gen))
gen = int(gen)
- files = os.listdir(directory)
files = glob.glob("%s*" % prefixName)
fd = {}
for fname in files:
@@ -614,7 +613,6 @@
return self.acquire(True)
def acquire(self, exclusive):
- currentEvent = None
currentThread = threading.currentThread()
# Handle reacquiring lock in the same thread
@@ -1081,7 +1079,7 @@
maxthreads -= 1
# waiting for rest threads to end
- for i in xrange(threadsCount):
+ for _ in xrange(threadsCount):
yield respQueue.get()
diff --git a/vdsm/storage/remoteFileHandler.py b/vdsm/storage/remoteFileHandler.py
index 5b24053..accf51c 100644
--- a/vdsm/storage/remoteFileHandler.py
+++ b/vdsm/storage/remoteFileHandler.py
@@ -275,7 +275,7 @@
def __init__(self, numOfHandlers):
self._numOfHandlers = numOfHandlers
self.handlers = [None] * numOfHandlers
- self.occupied = [Lock() for i in xrange(numOfHandlers)]
+ self.occupied = [Lock() for _ in xrange(numOfHandlers)]
def _isHandlerAvailable(self, poolHandler):
if poolHandler is None:
diff --git a/vdsm/storage/resourceManager.py b/vdsm/storage/resourceManager.py
index 14049dc..486ea18 100644
--- a/vdsm/storage/resourceManager.py
+++ b/vdsm/storage/resourceManager.py
@@ -926,7 +926,7 @@
return req.wait(timeout)
# req not found - check that it is not granted
- for fullName in self.resources:
+ for _ in self.resources:
return True
# Note that there is a risk of another thread that is racing with us
diff --git a/vdsm/storage/sp.py b/vdsm/storage/sp.py
index 38cd453..db66662 100644
--- a/vdsm/storage/sp.py
+++ b/vdsm/storage/sp.py
@@ -1326,7 +1326,6 @@
self.log.info("spUUID=%s sdUUID=%s", self.spUUID, sdUUID)
vms = self._getVMsPath(sdUUID)
# We should exclude 'masterd' link from IMG_METAPATTERN globing
- vmUUID = ovf = imgList = ''
for vm in vmList:
if not vm:
continue
diff --git a/vdsm/storage/task.py b/vdsm/storage/task.py
index 4eff5c1..0532b02 100644
--- a/vdsm/storage/task.py
+++ b/vdsm/storage/task.py
@@ -872,10 +872,7 @@
def _runJobs(self):
result = ""
- code = 100
- message = "Unknown Error"
i = 0
- j = None
try:
if self.aborting():
raise se.TaskAborted("shutting down")
@@ -891,7 +888,6 @@
if result is None:
result = ""
i += 1
- j = None
self._updateResult(0, "%s jobs completed successfully" % i, result)
self._updateState(State.finished)
self.log.debug('Task.run: exit - success: result %s' % result)
diff --git a/vdsm/vm.py b/vdsm/vm.py
index 0c12334..5e1c7f1 100644
--- a/vdsm/vm.py
+++ b/vdsm/vm.py
@@ -3058,7 +3058,7 @@
self._dom.attachDevice(nicXml)
except libvirt.libvirtError as e:
self.log.error("Hotplug failed", exc_info=True)
- nicXml = hooks.after_nic_hotplug_fail(
+ _ = hooks.after_nic_hotplug_fail(
nicXml, self.conf, params=customProps)
if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
return errCode['noVM']
@@ -3760,7 +3760,7 @@
"trying again without it (%s)", e)
try:
self._dom.snapshotCreateXML(snapxml, snapFlags)
- except Exception as e:
+ except Exception:
self.log.error("Unable to take snapshot", exc_info=True)
if memoryParams:
self.cif.teardownVolumePath(memoryVol)
diff --git a/vdsm_api/Bridge.py b/vdsm_api/Bridge.py
index b9fdaf8..4812354 100644
--- a/vdsm_api/Bridge.py
+++ b/vdsm_api/Bridge.py
@@ -34,7 +34,6 @@
def dispatch(self, name, argobj):
methodName = name.replace('.', '_')
- result = None
try:
fn = getattr(self, methodName)
except AttributeError:
diff --git a/vdsm_api/process-schema.py b/vdsm_api/process-schema.py
index c4bda0d..307d498 100755
--- a/vdsm_api/process-schema.py
+++ b/vdsm_api/process-schema.py
@@ -255,12 +255,12 @@
# Union member types
names = strip_stars(s.get('data', []))
types = filter_types(names)
- details = [None for n in names]
+ details = [None for _ in names]
attr_table('Types', names, types, details)
elif 'enum' in s:
# Enum values
names = strip_stars(s.get('data', []))
- types = [None for n in names]
+ types = [None for _ in names]
details = [s['info_data'][n] for n in names]
attr_table('Values', names, types, details)
elif 'map' in s:
diff --git a/vdsm_api/vdsmapi.py b/vdsm_api/vdsmapi.py
index db29c13..c38ff01 100644
--- a/vdsm_api/vdsmapi.py
+++ b/vdsm_api/vdsmapi.py
@@ -92,7 +92,6 @@
def parse_schema(fp):
exprs = []
expr = ''
- expr_eval = None
for line in fp:
if line.startswith('#') or line == '\n':
--
To view, visit http://gerrit.ovirt.org/20535
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: Ib81c292f900154819e8852c21ae389c323034999
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Antoni Segura Puimedon <asegurap(a)redhat.com>
9 years, 6 months
Change in vdsm[master]: vm: migration: exponential downtime increment
by fromani@redhat.com
Francesco Romani has uploaded a new change for review.
Change subject: vm: migration: exponential downtime increment
......................................................................
vm: migration: exponential downtime increment
Migration downtime is calculated using an interpolation
using the current downtime step (up to a configurabile
maximum) and the maximum downtime.
The downtime is incremented at each downtime step until
it reaches the maximum, or the migration is finished.
This patch changes the interpolation formula from linear
to exponential, being the new one:
d = c ** s
where d is the downtime, s is the step and c is the
exponentiation coefficient computed in such a way
of when s = S (number of steps to perform),
then we will have d = D (selected downtime).
Please note that this patch still preserve an linear
increment of the wait time.
Change-Id: I6401772f52ea28144452e67198bddff18f6703eb
Bug-Url: https://bugzilla.redhat.com/show_bug.cgi?id=1004101
Signed-off-by: Francesco Romani <fromani(a)redhat.com>
---
M vdsm/vm.py
1 file changed, 2 insertions(+), 1 deletion(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/20/25820/1
diff --git a/vdsm/vm.py b/vdsm/vm.py
index c53f1d4..1d7b643 100644
--- a/vdsm/vm.py
+++ b/vdsm/vm.py
@@ -722,6 +722,7 @@
delay_per_gib = config.getint('vars', 'migration_downtime_delay')
memSize = int(vm.conf['memSize'])
self._wait = (delay_per_gib * max(memSize, 2048) + 1023) / 1024
+ self._coeff = self._downtime ** (1 / float(self.DOWNTIME_STEPS))
self.daemon = True
self.start()
@@ -735,7 +736,7 @@
if self._stop.isSet():
break
- downtime = self._downtime * (i + 1) / self.DOWNTIME_STEPS
+ downtime = int(self._coeff ** i)
self._vm.log.debug('setting migration downtime to %d', downtime)
self._vm._dom.migrateSetMaxDowntime(downtime, 0)
--
To view, visit http://gerrit.ovirt.org/25820
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I6401772f52ea28144452e67198bddff18f6703eb
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Francesco Romani <fromani(a)redhat.com>
9 years, 6 months
Change in vdsm[master]: virt: stats: move VmStatsThread away from vm.py
by fromani@redhat.com
Francesco Romani has uploaded a new change for review.
Change subject: virt: stats: move VmStatsThread away from vm.py
......................................................................
virt: stats: move VmStatsThread away from vm.py
This patch moves VmStatsThread from vm.py to sampling.py
where it fits better.
isVdsmImage is moved to utils.py to accomodate the needs
of both sampling.py and vm.py.
Change-Id: Icd18288b94c7593ddd2a5e6a1314b6be7a7d8f92
Signed-off-by: Francesco Romani <fromani(a)redhat.com>
---
M lib/vdsm/utils.py
M tests/vmTests.py
M vdsm/virt/sampling.py
M vdsm/virt/vm.py
4 files changed, 283 insertions(+), 271 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/92/26792/1
diff --git a/lib/vdsm/utils.py b/lib/vdsm/utils.py
index cd178da..c80341f 100644
--- a/lib/vdsm/utils.py
+++ b/lib/vdsm/utils.py
@@ -105,6 +105,18 @@
return stat.S_ISBLK(os.stat(path).st_mode)
+def isVdsmImage(drive):
+ """
+ Tell if drive looks like a vdsm image
+
+ :param drive: drive to check
+ :type drive: dict or vm.Drive
+ :return: bool
+ """
+ required = ('domainID', 'imageID', 'poolID', 'volumeID')
+ return all(k in drive for k in required)
+
+
def touchFile(filePath):
"""
http://www.unix.com/man-page/POSIX/1posix/touch/
diff --git a/tests/vmTests.py b/tests/vmTests.py
index 17967c2..44ef984 100644
--- a/tests/vmTests.py
+++ b/tests/vmTests.py
@@ -25,6 +25,7 @@
import tempfile
import xml.etree.ElementTree as ET
+from virt import sampling
from virt import vm
from virt import vmexitreason
from vdsm import constants
@@ -860,7 +861,7 @@
GBPS = 10 ** 9 / 8
MAC = '52:54:00:59:F5:3F'
with FakeVM() as fake:
- mock_stats_thread = vm.VmStatsThread(fake)
+ mock_stats_thread = sampling.VmStatsThread(fake)
res = mock_stats_thread._getNicStats(
name='vnettest', model='virtio', mac=MAC,
start_sample=(2 ** 64 - 15 * GBPS, 1, 2, 3, 0, 4, 5, 6),
diff --git a/vdsm/virt/sampling.py b/vdsm/virt/sampling.py
index 389bb65..7df9b36 100644
--- a/vdsm/virt/sampling.py
+++ b/vdsm/virt/sampling.py
@@ -33,10 +33,14 @@
import errno
import ethtool
+import libvirt
+
from vdsm import utils
from vdsm import netinfo
-from vdsm.ipwrapper import getLinks
+from vdsm.config import config
from vdsm.constants import P_VDSM_RUN
+from vdsm.ipwrapper import getLinks
+from vdsm.utils import isVdsmImage
_THP_STATE_PATH = '/sys/kernel/mm/transparent_hugepage/enabled'
if not os.path.exists(_THP_STATE_PATH):
@@ -523,3 +527,259 @@
stats['txDropped'] = txDropped
return stats
+
+
+class VmStatsThread(AdvancedStatsThread):
+ MBPS_TO_BPS = 10 ** 6 / 8
+
+ def __init__(self, vm):
+ AdvancedStatsThread.__init__(self, log=vm.log, daemon=True)
+ self._vm = vm
+
+ self.highWrite = (
+ AdvancedStatsFunction(
+ self._highWrite,
+ config.getint('vars', 'vm_watermark_interval')))
+ self.updateVolumes = (
+ AdvancedStatsFunction(
+ self._updateVolumes,
+ config.getint('irs', 'vol_size_sample_interval')))
+
+ self.sampleCpu = (
+ AdvancedStatsFunction(
+ self._sampleCpu,
+ config.getint('vars', 'vm_sample_cpu_interval'),
+ config.getint('vars', 'vm_sample_cpu_window')))
+ self.sampleDisk = (
+ AdvancedStatsFunction(
+ self._sampleDisk,
+ config.getint('vars', 'vm_sample_disk_interval'),
+ config.getint('vars', 'vm_sample_disk_window')))
+ self.sampleDiskLatency = (
+ AdvancedStatsFunction(
+ self._sampleDiskLatency,
+ config.getint('vars', 'vm_sample_disk_latency_interval'),
+ config.getint('vars', 'vm_sample_disk_latency_window')))
+ self.sampleNet = (
+ AdvancedStatsFunction(
+ self._sampleNet,
+ config.getint('vars', 'vm_sample_net_interval'),
+ config.getint('vars', 'vm_sample_net_window')))
+
+ self.addStatsFunction(
+ self.highWrite, self.updateVolumes, self.sampleCpu,
+ self.sampleDisk, self.sampleDiskLatency, self.sampleNet)
+
+ def _highWrite(self):
+ if not self._vm.isDisksStatsCollectionEnabled():
+ # Avoid queries from storage during recovery process
+ return
+ self._vm.extendDrivesIfNeeded()
+
+ def _updateVolumes(self):
+ if not self._vm.isDisksStatsCollectionEnabled():
+ # Avoid queries from storage during recovery process
+ return
+
+ for vmDrive in self._vm.getDiskDevices():
+ self._vm.updateDriveVolume(vmDrive)
+
+ def _sampleCpu(self):
+ cpuStats = self._vm._dom.getCPUStats(True, 0)
+ return cpuStats[0]
+
+ def _sampleDisk(self):
+ diskSamples = {}
+ # Avoid queries from storage during recovery process
+ if self._vm.isDisksStatsCollectionEnabled():
+ for vmDrive in self._vm.getDiskDevices():
+ diskSamples[vmDrive.name] = self._vm._dom.blockStats(
+ vmDrive.name)
+ return diskSamples
+
+ def _sampleDiskLatency(self):
+ # {'wr_total_times': 0L, 'rd_operations': 9638L,
+ # 'flush_total_times': 0L,'rd_total_times': 7622718001L,
+ # 'rd_bytes': 85172430L, 'flush_operations': 0L,
+ # 'wr_operations': 0L, 'wr_bytes': 0L}
+ diskLatency = {}
+ # Avoid queries from storage during recovery process
+ if self._vm.isDisksStatsCollectionEnabled():
+ for vmDrive in self._vm.getDiskDevices():
+ diskLatency[vmDrive.name] = self._vm._dom.blockStatsFlags(
+ vmDrive.name, flags=libvirt.VIR_TYPED_PARAM_STRING_OKAY)
+ return diskLatency
+
+ def _sampleNet(self):
+ netSamples = {}
+ for nic in self._vm.getNicDevices():
+ netSamples[nic.name] = self._vm._dom.interfaceStats(nic.name)
+ return netSamples
+
+ def _diff(self, prev, curr, val):
+ return prev[val] - curr[val]
+
+ def _usagePercentage(self, val, sampleInterval):
+ return 100 * val / sampleInterval / 1000 ** 3
+
+ def _getCpuStats(self, stats):
+ try:
+ sInfo, eInfo, sampleInterval = self.sampleCpu.getStats()
+ except NotEnoughSamplesError:
+ return
+
+ try:
+ stats['cpuSys'] = self._usagePercentage(
+ self._diff(eInfo, sInfo, 'user_time') +
+ self._diff(eInfo, sInfo, 'system_time'),
+ sampleInterval)
+ stats['cpuUser'] = self._usagePercentage(
+ self._diff(eInfo, sInfo, 'cpu_time')
+ - self._diff(eInfo, sInfo, 'user_time')
+ - self._diff(eInfo, sInfo, 'system_time'),
+ sampleInterval)
+
+ except (TypeError, ZeroDivisionError) as e:
+ self._log.debug("CPU stats not available: %s", e)
+ stats['cpuUser'] = 0.0
+ stats['cpuSys'] = 0.0
+
+ @classmethod
+ def _getNicStats(cls, name, model, mac,
+ start_sample, end_sample, interval):
+ ifSpeed = [100, 1000][model in ('e1000', 'virtio')]
+
+ ifStats = {'macAddr': mac,
+ 'name': name,
+ 'speed': str(ifSpeed),
+ 'state': 'unknown'}
+
+ ifStats['rxErrors'] = str(end_sample[2])
+ ifStats['rxDropped'] = str(end_sample[3])
+ ifStats['txErrors'] = str(end_sample[6])
+ ifStats['txDropped'] = str(end_sample[7])
+
+ ifRxBytes = (100.0 *
+ ((end_sample[0] - start_sample[0]) % 2 ** 32) /
+ interval / ifSpeed / cls.MBPS_TO_BPS)
+ ifTxBytes = (100.0 *
+ ((end_sample[4] - start_sample[4]) % 2 ** 32) /
+ interval / ifSpeed / cls.MBPS_TO_BPS)
+
+ ifStats['rxRate'] = '%.1f' % ifRxBytes
+ ifStats['txRate'] = '%.1f' % ifTxBytes
+
+ return ifStats
+
+ def _getNetworkStats(self, stats):
+ stats['network'] = {}
+ try:
+ sInfo, eInfo, sampleInterval = self.sampleNet.getStats()
+ except NotEnoughSamplesError:
+ return
+
+ for nic in self._vm.getNicDevices():
+ if nic.name.startswith('hostdev'):
+ continue
+
+ # may happen if nic is a new hot-plugged one
+ if nic.name not in sInfo or nic.name not in eInfo:
+ continue
+
+ stats['network'][nic.name] = self._getNicStats(
+ nic.name, nic.nicModel, nic.macAddr,
+ sInfo[nic.name], eInfo[nic.name], sampleInterval)
+
+ def _getDiskStats(self, stats):
+ try:
+ sInfo, eInfo, sampleInterval = self.sampleDisk.getStats()
+ except NotEnoughSamplesError:
+ return
+
+ for vmDrive in self._vm.getDiskDevices():
+ dName = vmDrive.name
+ dStats = {}
+ try:
+ dStats = {'truesize': str(vmDrive.truesize),
+ 'apparentsize': str(vmDrive.apparentsize)}
+ if isVdsmImage(vmDrive):
+ dStats['imageID'] = vmDrive.imageID
+ elif "GUID" in vmDrive:
+ dStats['lunGUID'] = vmDrive.GUID
+ dStats['readRate'] = ((eInfo[dName][1] - sInfo[dName][1]) /
+ sampleInterval)
+ dStats['writeRate'] = ((eInfo[dName][3] - sInfo[dName][3]) /
+ sampleInterval)
+ except (AttributeError, KeyError, TypeError, ZeroDivisionError):
+ self._log.debug("Disk %s stats not available", dName)
+
+ stats[dName] = dStats
+
+ def _getDiskLatency(self, stats):
+ try:
+ sInfo, eInfo, sampleInterval = self.sampleDiskLatency.getStats()
+ except NotEnoughSamplesError:
+ return
+
+ def _avgLatencyCalc(sData, eData):
+ readLatency = (0 if not (eData['rd_operations'] -
+ sData['rd_operations'])
+ else (eData['rd_total_times'] -
+ sData['rd_total_times']) /
+ (eData['rd_operations'] -
+ sData['rd_operations']))
+ writeLatency = (0 if not (eData['wr_operations'] -
+ sData['wr_operations'])
+ else (eData['wr_total_times'] -
+ sData['wr_total_times']) /
+ (eData['wr_operations'] -
+ sData['wr_operations']))
+ flushLatency = (0 if not (eData['flush_operations'] -
+ sData['flush_operations'])
+ else (eData['flush_total_times'] -
+ sData['flush_total_times']) /
+ (eData['flush_operations'] -
+ sData['flush_operations']))
+ return str(readLatency), str(writeLatency), str(flushLatency)
+
+ for vmDrive in self._vm.getDeskDevices():
+ dName = vmDrive.name
+ dLatency = {'readLatency': '0',
+ 'writeLatency': '0',
+ 'flushLatency': '0'}
+ try:
+ (dLatency['readLatency'],
+ dLatency['writeLatency'],
+ dLatency['flushLatency']) = _avgLatencyCalc(sInfo[dName],
+ eInfo[dName])
+ except (KeyError, TypeError):
+ self._log.debug("Disk %s latency not available", dName)
+ else:
+ stats[dName].update(dLatency)
+
+ def get(self):
+ stats = {}
+
+ try:
+ stats['statsAge'] = time.time() - self.getLastSampleTime()
+ except TypeError:
+ self._log.debug("Stats age not available")
+ stats['statsAge'] = -1.0
+
+ self._getCpuStats(stats)
+ self._getNetworkStats(stats)
+ self._getDiskStats(stats)
+ self._getDiskLatency(stats)
+
+ return stats
+
+ def handleStatsException(self, ex):
+ # We currently handle only libvirt exceptions
+ if not hasattr(ex, "get_error_code"):
+ return False
+
+ # We currently handle only the missing domain exception
+ if ex.get_error_code() != libvirt.VIR_ERR_NO_DOMAIN:
+ return False
+
+ return True
diff --git a/vdsm/virt/vm.py b/vdsm/virt/vm.py
index 42025c0..92a179e 100644
--- a/vdsm/virt/vm.py
+++ b/vdsm/virt/vm.py
@@ -44,6 +44,7 @@
from vdsm.config import config
from vdsm.define import ERROR, NORMAL, doneCode, errCode
from vdsm.netinfo import DUMMY_BRIDGE
+from vdsm.utils import isVdsmImage
from storage import outOfProcess as oop
from storage import sd
from storage import fileUtils
@@ -85,18 +86,6 @@
CONSOLE_DEVICES = 'console'
SMARTCARD_DEVICES = 'smartcard'
TPM_DEVICES = 'tpm'
-
-
-def isVdsmImage(drive):
- """
- Tell if drive looks like a vdsm image
-
- :param drive: drive to check
- :type drive: dict or vm.Drive
- :return: bool
- """
- required = ('domainID', 'imageID', 'poolID', 'volumeID')
- return all(k in drive for k in required)
def _filterSnappableDiskDevices(diskDeviceXmlElements):
@@ -165,262 +154,6 @@
class UpdatePortMirroringError(Exception):
pass
-
-
-class VmStatsThread(sampling.AdvancedStatsThread):
- MBPS_TO_BPS = 10 ** 6 / 8
-
- def __init__(self, vm):
- sampling.AdvancedStatsThread.__init__(self, log=vm.log, daemon=True)
- self._vm = vm
-
- self.highWrite = (
- sampling.AdvancedStatsFunction(
- self._highWrite,
- config.getint('vars', 'vm_watermark_interval')))
- self.updateVolumes = (
- sampling.AdvancedStatsFunction(
- self._updateVolumes,
- config.getint('irs', 'vol_size_sample_interval')))
-
- self.sampleCpu = (
- sampling.AdvancedStatsFunction(
- self._sampleCpu,
- config.getint('vars', 'vm_sample_cpu_interval'),
- config.getint('vars', 'vm_sample_cpu_window')))
- self.sampleDisk = (
- sampling.AdvancedStatsFunction(
- self._sampleDisk,
- config.getint('vars', 'vm_sample_disk_interval'),
- config.getint('vars', 'vm_sample_disk_window')))
- self.sampleDiskLatency = (
- sampling.AdvancedStatsFunction(
- self._sampleDiskLatency,
- config.getint('vars', 'vm_sample_disk_latency_interval'),
- config.getint('vars', 'vm_sample_disk_latency_window')))
- self.sampleNet = (
- sampling.AdvancedStatsFunction(
- self._sampleNet,
- config.getint('vars', 'vm_sample_net_interval'),
- config.getint('vars', 'vm_sample_net_window')))
-
- self.addStatsFunction(
- self.highWrite, self.updateVolumes, self.sampleCpu,
- self.sampleDisk, self.sampleDiskLatency, self.sampleNet)
-
- def _highWrite(self):
- if not self._vm.isDisksStatsCollectionEnabled():
- # Avoid queries from storage during recovery process
- return
- self._vm.extendDrivesIfNeeded()
-
- def _updateVolumes(self):
- if not self._vm.isDisksStatsCollectionEnabled():
- # Avoid queries from storage during recovery process
- return
-
- for vmDrive in self._vm._devices[DISK_DEVICES]:
- self._vm.updateDriveVolume(vmDrive)
-
- def _sampleCpu(self):
- cpuStats = self._vm._dom.getCPUStats(True, 0)
- return cpuStats[0]
-
- def _sampleDisk(self):
- diskSamples = {}
- # Avoid queries from storage during recovery process
- if self._vm.isDisksStatsCollectionEnabled():
- for vmDrive in self._vm._devices[DISK_DEVICES]:
- diskSamples[vmDrive.name] = self._vm._dom.blockStats(
- vmDrive.name)
- return diskSamples
-
- def _sampleDiskLatency(self):
- # {'wr_total_times': 0L, 'rd_operations': 9638L,
- # 'flush_total_times': 0L,'rd_total_times': 7622718001L,
- # 'rd_bytes': 85172430L, 'flush_operations': 0L,
- # 'wr_operations': 0L, 'wr_bytes': 0L}
- diskLatency = {}
- # Avoid queries from storage during recovery process
- if self._vm.isDisksStatsCollectionEnabled():
- for vmDrive in self._vm._devices[DISK_DEVICES]:
- diskLatency[vmDrive.name] = self._vm._dom.blockStatsFlags(
- vmDrive.name, flags=libvirt.VIR_TYPED_PARAM_STRING_OKAY)
- return diskLatency
-
- def _sampleNet(self):
- netSamples = {}
- for nic in self._vm._devices[NIC_DEVICES]:
- netSamples[nic.name] = self._vm._dom.interfaceStats(nic.name)
- return netSamples
-
- def _diff(self, prev, curr, val):
- return prev[val] - curr[val]
-
- def _usagePercentage(self, val, sampleInterval):
- return 100 * val / sampleInterval / 1000 ** 3
-
- def _getCpuStats(self, stats):
- try:
- sInfo, eInfo, sampleInterval = self.sampleCpu.getStats()
- except sampling.NotEnoughSamplesError:
- return
-
- try:
- stats['cpuSys'] = self._usagePercentage(
- self._diff(eInfo, sInfo, 'user_time') +
- self._diff(eInfo, sInfo, 'system_time'),
- sampleInterval)
- stats['cpuUser'] = self._usagePercentage(
- self._diff(eInfo, sInfo, 'cpu_time')
- - self._diff(eInfo, sInfo, 'user_time')
- - self._diff(eInfo, sInfo, 'system_time'),
- sampleInterval)
-
- except (TypeError, ZeroDivisionError) as e:
- self._log.debug("CPU stats not available: %s", e)
- stats['cpuUser'] = 0.0
- stats['cpuSys'] = 0.0
-
- @classmethod
- def _getNicStats(cls, name, model, mac,
- start_sample, end_sample, interval):
- ifSpeed = [100, 1000][model in ('e1000', 'virtio')]
-
- ifStats = {'macAddr': mac,
- 'name': name,
- 'speed': str(ifSpeed),
- 'state': 'unknown'}
-
- ifStats['rxErrors'] = str(end_sample[2])
- ifStats['rxDropped'] = str(end_sample[3])
- ifStats['txErrors'] = str(end_sample[6])
- ifStats['txDropped'] = str(end_sample[7])
-
- ifRxBytes = (100.0 *
- ((end_sample[0] - start_sample[0]) % 2 ** 32) /
- interval / ifSpeed / cls.MBPS_TO_BPS)
- ifTxBytes = (100.0 *
- ((end_sample[4] - start_sample[4]) % 2 ** 32) /
- interval / ifSpeed / cls.MBPS_TO_BPS)
-
- ifStats['rxRate'] = '%.1f' % ifRxBytes
- ifStats['txRate'] = '%.1f' % ifTxBytes
-
- return ifStats
-
- def _getNetworkStats(self, stats):
- stats['network'] = {}
- try:
- sInfo, eInfo, sampleInterval = self.sampleNet.getStats()
- except sampling.NotEnoughSamplesError:
- return
-
- for nic in self._vm._devices[NIC_DEVICES]:
- if nic.name.startswith('hostdev'):
- continue
-
- # may happen if nic is a new hot-plugged one
- if nic.name not in sInfo or nic.name not in eInfo:
- continue
-
- stats['network'][nic.name] = self._getNicStats(
- nic.name, nic.nicModel, nic.macAddr,
- sInfo[nic.name], eInfo[nic.name], sampleInterval)
-
- def _getDiskStats(self, stats):
- try:
- sInfo, eInfo, sampleInterval = self.sampleDisk.getStats()
- except sampling.NotEnoughSamplesError:
- return
-
- for vmDrive in self._vm._devices[DISK_DEVICES]:
- dName = vmDrive.name
- dStats = {}
- try:
- dStats = {'truesize': str(vmDrive.truesize),
- 'apparentsize': str(vmDrive.apparentsize)}
- if isVdsmImage(vmDrive):
- dStats['imageID'] = vmDrive.imageID
- elif "GUID" in vmDrive:
- dStats['lunGUID'] = vmDrive.GUID
- dStats['readRate'] = ((eInfo[dName][1] - sInfo[dName][1]) /
- sampleInterval)
- dStats['writeRate'] = ((eInfo[dName][3] - sInfo[dName][3]) /
- sampleInterval)
- except (AttributeError, KeyError, TypeError, ZeroDivisionError):
- self._log.debug("Disk %s stats not available", dName)
-
- stats[dName] = dStats
-
- def _getDiskLatency(self, stats):
- try:
- sInfo, eInfo, sampleInterval = self.sampleDiskLatency.getStats()
- except sampling.NotEnoughSamplesError:
- return
-
- def _avgLatencyCalc(sData, eData):
- readLatency = (0 if not (eData['rd_operations'] -
- sData['rd_operations'])
- else (eData['rd_total_times'] -
- sData['rd_total_times']) /
- (eData['rd_operations'] -
- sData['rd_operations']))
- writeLatency = (0 if not (eData['wr_operations'] -
- sData['wr_operations'])
- else (eData['wr_total_times'] -
- sData['wr_total_times']) /
- (eData['wr_operations'] -
- sData['wr_operations']))
- flushLatency = (0 if not (eData['flush_operations'] -
- sData['flush_operations'])
- else (eData['flush_total_times'] -
- sData['flush_total_times']) /
- (eData['flush_operations'] -
- sData['flush_operations']))
- return str(readLatency), str(writeLatency), str(flushLatency)
-
- for vmDrive in self._vm._devices[DISK_DEVICES]:
- dName = vmDrive.name
- dLatency = {'readLatency': '0',
- 'writeLatency': '0',
- 'flushLatency': '0'}
- try:
- (dLatency['readLatency'],
- dLatency['writeLatency'],
- dLatency['flushLatency']) = _avgLatencyCalc(sInfo[dName],
- eInfo[dName])
- except (KeyError, TypeError):
- self._log.debug("Disk %s latency not available", dName)
- else:
- stats[dName].update(dLatency)
-
- def get(self):
- stats = {}
-
- try:
- stats['statsAge'] = time.time() - self.getLastSampleTime()
- except TypeError:
- self._log.debug("Stats age not available")
- stats['statsAge'] = -1.0
-
- self._getCpuStats(stats)
- self._getNetworkStats(stats)
- self._getDiskStats(stats)
- self._getDiskLatency(stats)
-
- return stats
-
- def handleStatsException(self, ex):
- # We currently handle only libvirt exceptions
- if not hasattr(ex, "get_error_code"):
- return False
-
- # We currently handle only the missing domain exception
- if ex.get_error_code() != libvirt.VIR_ERR_NO_DOMAIN:
- return False
-
- return True
class TimeoutError(libvirt.libvirtError):
@@ -2654,7 +2387,7 @@
return domxml.toxml()
def _initVmStats(self):
- self._vmStats = VmStatsThread(self)
+ self._vmStats = sampling.VmStatsThread(self)
self._vmStats.start()
self._guestEventTime = self._startTime
@@ -4923,6 +4656,12 @@
hooks.before_vm_migrate_destination(srcDomXML, self.conf)
return True
+ def getDiskDevices(self):
+ return self._devices[DISK_DEVICES]
+
+ def getNicDevices(self):
+ return self._devices[NIC_DEVICES]
+
# A little unrelated hack to make xml.dom.minidom.Document.toprettyxml()
# not wrap Text node with whitespace.
--
To view, visit http://gerrit.ovirt.org/26792
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: Icd18288b94c7593ddd2a5e6a1314b6be7a7d8f92
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Francesco Romani <fromani(a)redhat.com>
9 years, 6 months
Change in vdsm[master]: vdsm: Refactoring of retrieving device info from xml
by Vinzenz Feenstra
Vinzenz Feenstra has uploaded a new change for review.
Change subject: vdsm: Refactoring of retrieving device info from xml
......................................................................
vdsm: Refactoring of retrieving device info from xml
Reworked a bit the retrieval of device info from the libvirt domain xml.
Now VDSM won't parse the code in lastXmlDesc every time and the retrieval
of elements from the domain xml has been a bit abstracted.
Additionally the retrieval of an alias has been moved into a separate
function call to make the readability a bit better.
Change-Id: I7e106b2f2d3f4160d4e882f1a2880cb1b52fbb22
Signed-off-by: Vinzenz Feenstra <vfeenstr(a)redhat.com>
---
M vdsm/vm.py
1 file changed, 63 insertions(+), 76 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/94/17694/1
diff --git a/vdsm/vm.py b/vdsm/vm.py
index dc52909..e51050e 100644
--- a/vdsm/vm.py
+++ b/vdsm/vm.py
@@ -1698,6 +1698,7 @@
self._guestSocketFile = self._makeChannelPath(_VMCHANNEL_DEVICE_NAME)
self._qemuguestSocketFile = self._makeChannelPath(_QEMU_GA_DEVICE_NAME)
self._lastXMLDesc = '<domain><uuid>%s</uuid></domain>' % self.id
+ self._lastParsedXmlDesc = _domParseStr(self._lastXMLDesc)
self._devXmlHash = '0'
self._released = False
self._releaseLock = threading.Lock()
@@ -2722,24 +2723,30 @@
self._guestCpuRunning = (self._dom.info()[0] ==
libvirt.VIR_DOMAIN_RUNNING)
+ def _getDevicesXml(self, parsedXml=None):
+ parsedXml = parsedXml or self._lastParsedXmlDesc
+ return parsedXml.childNodes[0].getElementsByTagName('devices')[0]
+
def _getUnderlyingVmDevicesInfo(self):
"""
Obtain underlying vm's devices info from libvirt.
"""
- self._getUnderlyingNetworkInterfaceInfo()
- self._getUnderlyingDriveInfo()
- self._getUnderlyingDisplayPort()
- self._getUnderlyingSoundDeviceInfo()
- self._getUnderlyingVideoDeviceInfo()
- self._getUnderlyingControllerDeviceInfo()
- self._getUnderlyingBalloonDeviceInfo()
- self._getUnderlyingWatchdogDeviceInfo()
- self._getUnderlyingSmartcardDeviceInfo()
- self._getUnderlyingConsoleDeviceInfo()
+ devicesXml = self._getDevicesXml(parsedXml=self._lastParsedXmlDesc)
+ self._getUnderlyingNetworkInterfaceInfo(devicesXml=devicesXml)
+ self._getUnderlyingDriveInfo(devicesXml=devicesXml)
+ self._getUnderlyingDisplayPort(xml=self._lastParsedXmlDesc)
+ self._getUnderlyingSoundDeviceInfo(devicesXml=devicesXml)
+ self._getUnderlyingVideoDeviceInfo(devicesXml=devicesXml)
+ self._getUnderlyingControllerDeviceInfo(devicesXml=devicesXml)
+ self._getUnderlyingBalloonDeviceInfo(devicesXml=devicesXml)
+ self._getUnderlyingWatchdogDeviceInfo(devicesXml=devicesXml)
+ self._getUnderlyingSmartcardDeviceInfo(devicesXml=devicesXml)
+ self._getUnderlyingConsoleDeviceInfo(devicesXml=devicesXml)
# Obtain info of all unknown devices. Must be last!
- self._getUnderlyingUnknownDeviceInfo()
+ self._getUnderlyingUnknownDeviceInfo(devicesXml=devicesXml)
+ self._updateAgentChannels(devicesXml=devicesXml)
- def _updateAgentChannels(self):
+ def _updateAgentChannels(self, devicesXml):
"""
We moved the naming of guest agent channel sockets. To keep backwards
compatability we need to make symlinks from the old channel sockets, to
@@ -2747,9 +2754,7 @@
This is necessary to prevent incoming migrations, restoring of VMs and
the upgrade of VDSM with running VMs to fail on this.
"""
- agentChannelXml = _domParseStr(self._lastXMLDesc).childNodes[0]. \
- getElementsByTagName('devices')[0]. \
- getElementsByTagName('channel')
+ agentChannelXml = devicesXml.getElementsByTagName('channel')
for channel in agentChannelXml:
try:
name = channel.getElementsByTagName('target')[0].\
@@ -2781,7 +2786,6 @@
self._getUnderlyingVmInfo()
self._getUnderlyingVmDevicesInfo()
- self._updateAgentChannels()
#Currently there is no protection agains mirroring a network twice,
for nic in self._devices[NIC_DEVICES]:
@@ -2937,9 +2941,8 @@
or revert to snapshot.
"""
parsedSrcDomXML = _domParseStr(srcDomXML)
-
- allDiskDeviceXmlElements = parsedSrcDomXML.childNodes[0]. \
- getElementsByTagName('devices')[0].getElementsByTagName('disk')
+ devicesXml = self._getDevicesXml(parsedXml=parsedSrcDomXML)
+ allDiskDeviceXmlElements = devicesXml.getElementsByTagName('disk')
snappableDiskDeviceXmlElements = \
_filterSnappableDiskDevices(allDiskDeviceXmlElements)
@@ -3008,7 +3011,8 @@
with self._confLock:
self.conf['devices'].append(nicParams)
self.saveState()
- self._getUnderlyingNetworkInterfaceInfo()
+ self._getUnderlyingNetworkInterfaceInfo(
+ devicesXml=self._getDevicesXml())
hooks.after_nic_hotplug(nicXml, self.conf,
params=customProps)
@@ -3264,7 +3268,7 @@
with self._confLock:
self.conf['devices'].append(diskParams)
self.saveState()
- self._getUnderlyingDriveInfo()
+ self._getUnderlyingDriveInfo(devicesXml=self._getDevicesXml())
hooks.after_disk_hotplug(driveXml, self.conf,
params=customProps)
@@ -4181,8 +4185,8 @@
def _getUnderlyingVmInfo(self):
self._lastXMLDesc = self._dom.XMLDesc(0)
- devxml = _domParseStr(self._lastXMLDesc).childNodes[0]. \
- getElementsByTagName('devices')[0]
+ self._lastParsedXmlDesc = _domParseStr(self._lastXMLDesc)
+ devxml = self._getDevicesXml()
self._devXmlHash = str(hash(devxml.toxml()))
return self._lastXMLDesc
@@ -4331,6 +4335,9 @@
self.saveState()
return {'status': doneCode}
+ def _getUnderlyingDeviceAliasName(self, devXml):
+ return devXml.getElementsByTagName('alias')[0].getAttribute('name')
+
def _getUnderlyingDeviceAddress(self, devXml):
"""
Obtain device's address from libvirt
@@ -4347,7 +4354,7 @@
return address
- def _getUnderlyingUnknownDeviceInfo(self):
+ def _getUnderlyingUnknownDeviceInfo(self, devicesXml):
"""
Obtain unknown devices info from libvirt.
@@ -4360,16 +4367,13 @@
return True
return False
- devsxml = _domParseStr(self._lastXMLDesc).childNodes[0]. \
- getElementsByTagName('devices')[0]
-
- for x in devsxml.childNodes:
+ for x in devicesXml.childNodes:
# Ignore empty nodes and devices without address
if (x.nodeName == '#text' or
not x.getElementsByTagName('address')):
continue
- alias = x.getElementsByTagName('alias')[0].getAttribute('name')
+ alias = self._getUnderlyingDeviceAliasName(x)
if not isKnownDevice(alias):
address = self._getUnderlyingDeviceAddress(x)
# I general case we assume that device has attribute 'type',
@@ -4381,18 +4385,16 @@
'address': address}
self.conf['devices'].append(newDev)
- def _getUnderlyingControllerDeviceInfo(self):
+ def _getUnderlyingControllerDeviceInfo(self, devicesXml):
"""
Obtain controller devices info from libvirt.
"""
- ctrlsxml = _domParseStr(self._lastXMLDesc).childNodes[0]. \
- getElementsByTagName('devices')[0]. \
- getElementsByTagName('controller')
+ ctrlsxml = devicesXml.getElementsByTagName('controller')
for x in ctrlsxml:
# Ignore controller devices without address
if not x.getElementsByTagName('address'):
continue
- alias = x.getElementsByTagName('alias')[0].getAttribute('name')
+ alias = self._getUnderlyingDeviceAliasName(x)
device = x.getAttribute('type')
# Get model and index. Relevant for USB controllers.
model = x.getAttribute('model')
@@ -4428,20 +4430,18 @@
'address': address,
'alias': alias})
- def _getUnderlyingBalloonDeviceInfo(self):
+ def _getUnderlyingBalloonDeviceInfo(self, devicesXml):
"""
Obtain balloon device info from libvirt.
"""
- balloonxml = _domParseStr(self._lastXMLDesc).childNodes[0]. \
- getElementsByTagName('devices')[0]. \
- getElementsByTagName('memballoon')
+ balloonxml = devicesXml.getElementsByTagName('memballoon')
for x in balloonxml:
# Ignore balloon devices without address.
if not x.getElementsByTagName('address'):
address = None
else:
address = self._getUnderlyingDeviceAddress(x)
- alias = x.getElementsByTagName('alias')[0].getAttribute('name')
+ alias = self._getUnderlyingDeviceAliasName(x)
for dev in self._devices[BALLOON_DEVICES]:
if address and not hasattr(dev, 'address'):
@@ -4456,16 +4456,14 @@
if not dev.get('alias'):
dev['alias'] = alias
- def _getUnderlyingConsoleDeviceInfo(self):
+ def _getUnderlyingConsoleDeviceInfo(self, devicesXml):
"""
Obtain the alias for the console device from libvirt
"""
- consolexml = _domParseStr(self._lastXMLDesc).childNodes[0].\
- getElementsByTagName('devices')[0].\
- getElementsByTagName('console')
+ consolexml = devicesXml.getElementsByTagName('console')
for x in consolexml:
# All we care about is the alias
- alias = x.getElementsByTagName('alias')[0].getAttribute('name')
+ alias = self._getUnderlyingDeviceAliasName(x)
for dev in self._devices[CONSOLE_DEVICES]:
if not hasattr(dev, 'alias'):
dev.alias = alias
@@ -4475,19 +4473,17 @@
not dev.get('alias'):
dev['alias'] = alias
- def _getUnderlyingSmartcardDeviceInfo(self):
+ def _getUnderlyingSmartcardDeviceInfo(self, devicesXml):
"""
Obtain smartcard device info from libvirt.
"""
- smartcardxml = _domParseStr(self._lastXMLDesc).childNodes[0].\
- getElementsByTagName('devices')[0].\
- getElementsByTagName('smartcard')
+ smartcardxml = devicesXml.getElementsByTagName('smartcard')
for x in smartcardxml:
if not x.getElementsByTagName('address'):
continue
address = self._getUnderlyingDeviceAddress(x)
- alias = x.getElementsByTagName('alias')[0].getAttribute('name')
+ alias = self._getUnderlyingDeviceAliasName(x)
for dev in self._devices[SMARTCARD_DEVICES]:
if not hasattr(dev, 'address'):
@@ -4500,19 +4496,17 @@
dev['address'] = address
dev['alias'] = alias
- def _getUnderlyingWatchdogDeviceInfo(self):
+ def _getUnderlyingWatchdogDeviceInfo(self, devicesXml):
"""
Obtain watchdog device info from libvirt.
"""
- watchdogxml = _domParseStr(self._lastXMLDesc).childNodes[0]. \
- getElementsByTagName('devices')[0]. \
- getElementsByTagName('watchdog')
+ watchdogxml = devicesXml.getElementsByTagName('watchdog')
for x in watchdogxml:
# PCI watchdog has "address" different from ISA watchdog
if x.getElementsByTagName('address'):
address = self._getUnderlyingDeviceAddress(x)
- alias = x.getElementsByTagName('alias')[0].getAttribute('name')
+ alias = self._getUnderlyingDeviceAliasName(x)
for wd in self._devices[WATCHDOG_DEVICES]:
if not hasattr(wd, 'address') or not hasattr(wd, 'alias'):
@@ -4525,14 +4519,13 @@
dev['address'] = address
dev['alias'] = alias
- def _getUnderlyingVideoDeviceInfo(self):
+ def _getUnderlyingVideoDeviceInfo(self, devicesXml):
"""
Obtain video devices info from libvirt.
"""
- videosxml = _domParseStr(self._lastXMLDesc).childNodes[0]. \
- getElementsByTagName('devices')[0].getElementsByTagName('video')
+ videosxml = devicesXml.getElementsByTagName('video')
for x in videosxml:
- alias = x.getElementsByTagName('alias')[0].getAttribute('name')
+ alias = self._getUnderlyingDeviceAliasName(x)
# Get video card address
address = self._getUnderlyingDeviceAddress(x)
@@ -4553,14 +4546,13 @@
dev['alias'] = alias
break
- def _getUnderlyingSoundDeviceInfo(self):
+ def _getUnderlyingSoundDeviceInfo(self, devicesXml):
"""
Obtain sound devices info from libvirt.
"""
- soundsxml = _domParseStr(self._lastXMLDesc).childNodes[0]. \
- getElementsByTagName('devices')[0].getElementsByTagName('sound')
+ soundsxml = devicesXml.getElementsByTagName('sound')
for x in soundsxml:
- alias = x.getElementsByTagName('alias')[0].getAttribute('name')
+ alias = self._getUnderlyingDeviceAliasName(x)
# Get sound card address
address = self._getUnderlyingDeviceAddress(x)
@@ -4581,12 +4573,11 @@
dev['alias'] = alias
break
- def _getUnderlyingDriveInfo(self):
+ def _getUnderlyingDriveInfo(self, devicesXml):
"""
Obtain block devices info from libvirt.
"""
- disksxml = _domParseStr(self._lastXMLDesc).childNodes[0]. \
- getElementsByTagName('devices')[0].getElementsByTagName('disk')
+ disksxml = devicesXml.getElementsByTagName('disk')
# FIXME! We need to gather as much info as possible from the libvirt.
# In the future we can return this real data to management instead of
# vm's conf
@@ -4600,7 +4591,7 @@
target = x.getElementsByTagName('target')
name = target[0].getAttribute('dev') if target else ''
- alias = x.getElementsByTagName('alias')[0].getAttribute('name')
+ alias = self._getUnderlyingDeviceAliasName(x)
readonly = bool(x.getElementsByTagName('readonly'))
boot = x.getElementsByTagName('boot')
bootOrder = boot[0].getAttribute('order') if boot else ''
@@ -4646,12 +4637,11 @@
diskDev['bootOrder'] = bootOrder
self.conf['devices'].append(diskDev)
- def _getUnderlyingDisplayPort(self):
+ def _getUnderlyingDisplayPort(self, xml):
"""
Obtain display port info from libvirt.
"""
- graphics = _domParseStr(self._lastXMLDesc).childNodes[0]. \
- getElementsByTagName('graphics')[0]
+ graphics = xml.childNodes[0].getElementsByTagName('graphics')[0]
port = graphics.getAttribute('port')
if port:
self.conf['displayPort'] = port
@@ -4659,18 +4649,16 @@
if port:
self.conf['displaySecurePort'] = port
- def _getUnderlyingNetworkInterfaceInfo(self):
+ def _getUnderlyingNetworkInterfaceInfo(self, devicesXml):
"""
Obtain network interface info from libvirt.
"""
# TODO use xpath instead of parseString (here and elsewhere)
- ifsxml = _domParseStr(self._lastXMLDesc).childNodes[0]. \
- getElementsByTagName('devices')[0]. \
- getElementsByTagName('interface')
+ ifsxml = devicesXml.getElementsByTagName('interface')
for x in ifsxml:
devType = x.getAttribute('type')
mac = x.getElementsByTagName('mac')[0].getAttribute('address')
- alias = x.getElementsByTagName('alias')[0].getAttribute('name')
+ alias = self._getUnderlyingDeviceAliasName(x)
if devType == 'hostdev':
name = alias
model = 'passthrough'
@@ -4802,8 +4790,7 @@
"during migration at destination host" %
devType)
- devices = _domParseStr(xml).childNodes[0]. \
- getElementsByTagName('devices')[0]
+ devices = self._getDevicesXml(parsedXml=_domParseStr(xml))
for deviceXML in devices.childNodes:
if deviceXML.nodeType != Node.ELEMENT_NODE:
--
To view, visit http://gerrit.ovirt.org/17694
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I7e106b2f2d3f4160d4e882f1a2880cb1b52fbb22
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Vinzenz Feenstra <vfeenstr(a)redhat.com>
9 years, 6 months
Change in vdsm[master]: open: Change file() to open()
by dkuznets@redhat.com
Dima Kuznetsov has uploaded a new change for review.
Change subject: open: Change file() to open()
......................................................................
open: Change file() to open()
Update file calls to open calls due to file's deprecation.
Change-Id: I80007044be1c648ebb668704731eae8b83366025
Signed-off-by: Dima Kuznetsov <dkuznets(a)redhat.com>
---
M vdsm/API.py
M vdsm/hooks.py
M vdsm/storage/fileUtils.py
M vdsm/storage/hba.py
M vdsm/virt/sampling.py
M vdsm_hooks/nestedvt/before_vm_start.py
6 files changed, 13 insertions(+), 13 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/76/26776/1
diff --git a/vdsm/API.py b/vdsm/API.py
index 94b39b6..6724559 100644
--- a/vdsm/API.py
+++ b/vdsm/API.py
@@ -185,8 +185,8 @@
# parmas were stored.
fname = self._cif.prepareVolumePath(paramFilespec)
try:
- with file(fname) as f:
- pickledMachineParams = pickle.load(f)
+ with open(fname) as file:
+ pickledMachineParams = pickle.load(file)
if type(pickledMachineParams) == dict:
self.log.debug('loaded pickledMachineParams ' +
diff --git a/vdsm/hooks.py b/vdsm/hooks.py
index 75f33f9..ba7bad7 100644
--- a/vdsm/hooks.py
+++ b/vdsm/hooks.py
@@ -341,8 +341,8 @@
def _getScriptInfo(path):
try:
- with file(path) as f:
- md5 = hashlib.md5(f.read()).hexdigest()
+ with open(path) as file:
+ md5 = hashlib.md5(file.read()).hexdigest()
except:
md5 = ''
return {'md5': md5}
diff --git a/vdsm/storage/fileUtils.py b/vdsm/storage/fileUtils.py
index 56dc1ef..4ffe113 100644
--- a/vdsm/storage/fileUtils.py
+++ b/vdsm/storage/fileUtils.py
@@ -401,6 +401,6 @@
def padToBlockSize(path):
- with file(path, 'a') as f:
- size = os.fstat(f.fileno()).st_size
- os.ftruncate(f.fileno(), 512 * ((size + 511) / 512))
+ with open(path, 'a') as file:
+ size = os.fstat(file.fileno()).st_size
+ os.ftruncate(file.fileno(), 512 * ((size + 511) / 512))
diff --git a/vdsm/storage/hba.py b/vdsm/storage/hba.py
index da3feef..b649f6c 100644
--- a/vdsm/storage/hba.py
+++ b/vdsm/storage/hba.py
@@ -43,8 +43,8 @@
"""
hbas = []
try:
- with file(ISCSI_INITIATOR_NAME) as f:
- for line in f:
+ with open(ISCSI_INITIATOR_NAME) as file:
+ for line in file:
if line.startswith(INITIATOR_NAME):
hba = {'InitiatorName': line.split("=")[1].strip()}
hbas.append(hba)
diff --git a/vdsm/virt/sampling.py b/vdsm/virt/sampling.py
index 991c969..ed8f58e 100644
--- a/vdsm/virt/sampling.py
+++ b/vdsm/virt/sampling.py
@@ -189,8 +189,8 @@
self.cpuLoad = '0.0'
self.diskStats = self._getDiskStats()
try:
- with file(_THP_STATE_PATH) as f:
- s = f.read()
+ with open(_THP_STATE_PATH) as file:
+ s = file.read()
self.thpState = s[s.index('[') + 1:s.index(']')]
except:
self.thpState = 'never'
diff --git a/vdsm_hooks/nestedvt/before_vm_start.py b/vdsm_hooks/nestedvt/before_vm_start.py
index 03f0f6f..d4fb806 100755
--- a/vdsm_hooks/nestedvt/before_vm_start.py
+++ b/vdsm_hooks/nestedvt/before_vm_start.py
@@ -29,8 +29,8 @@
for kvm_mod in ("kvm_intel", "kvm_amd"):
kvm_mod_path = "/sys/module/%s/parameters/nested" % kvm_mod
try:
- with file(kvm_mod_path) as f:
- if f.readline().strip() in ("Y", "1"):
+ with open(kvm_mod_path) as file:
+ if file.readline().strip() in ("Y", "1"):
break
except IOError:
pass
--
To view, visit http://gerrit.ovirt.org/26776
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I80007044be1c648ebb668704731eae8b83366025
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Dima Kuznetsov <dkuznets(a)redhat.com>
9 years, 7 months
Change in vdsm[master]: drop dead ksmtuned-related code
by Dan Kenigsberg
Dan Kenigsberg has uploaded a new change for review.
Change subject: drop dead ksmtuned-related code
......................................................................
drop dead ksmtuned-related code
mom has replaced ksmtuned completely, so we can safely remove the unused
code.
Change-Id: I511fa1754e55076084529834fa9758c2fd139761
Signed-off-by: Dan Kenigsberg <danken(a)redhat.com>
---
M vdsm/ksm.py
M vdsm/sudoers.vdsm.in
M vdsm/virt/vm.py
3 files changed, 1 insertion(+), 29 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/26/27026/1
diff --git a/vdsm/ksm.py b/vdsm/ksm.py
index 7ace217..c9cb3b1 100644
--- a/vdsm/ksm.py
+++ b/vdsm/ksm.py
@@ -45,7 +45,6 @@
self.setDaemon(True)
self._cif = cif
self.state, self.pages = False, 0
- self._lock = threading.Lock()
if config.getboolean('ksm', 'ksm_monitor_thread'):
pids = utils.execCmd([constants.EXT_PGREP, '-xf', 'ksmd'],
raw=False)[1]
@@ -78,18 +77,6 @@
except:
self._cif.log.error("Error monitoring KSM", exc_info=True)
- def adjust(self):
- """Adjust ksm's vigor
-
- Recalculate how hard should ksm work, according to configuration and
- current memory stress.
- Return whether ksm is running"""
-
- with self._lock:
- utils.execCmd([constants.EXT_SERVICE, 'ksmtuned', 'retune'],
- sudo=True)
- return running()
-
def memsharing(self):
return _readProcFSInt('/sys/kernel/mm/ksm/pages_sharing')
@@ -101,11 +88,6 @@
def npages():
return _readProcFSInt('/sys/kernel/mm/ksm/pages_to_scan')
-
-
-def start():
- utils.execCmd([constants.EXT_SERVICE, 'ksmtuned', 'start'], sudo=True)
- utils.execCmd([constants.EXT_SERVICE, 'ksm', 'start'], sudo=True)
def tune(params):
diff --git a/vdsm/sudoers.vdsm.in b/vdsm/sudoers.vdsm.in
index 584807d..2b745d3 100644
--- a/vdsm/sudoers.vdsm.in
+++ b/vdsm/sudoers.vdsm.in
@@ -1,8 +1,6 @@
Cmnd_Alias VDSM_LIFECYCLE = \
@DMIDECODE_PATH@, \
- @VDSMDIR@/mk_sysprep_floppy, \
- @SERVICE_PATH@ ksmtuned *, \
- @SERVICE_PATH@ ksm *
+ @VDSMDIR@/mk_sysprep_floppy
Cmnd_Alias VDSM_STORAGE = @MOUNT_PATH@, @UMOUNT_PATH@, \
@FSCK_PATH@ -p *, \
@TUNE2FS_PATH@ -j *, \
diff --git a/vdsm/virt/vm.py b/vdsm/virt/vm.py
index 79ff40b..0bb1e85 100644
--- a/vdsm/virt/vm.py
+++ b/vdsm/virt/vm.py
@@ -1925,12 +1925,6 @@
self._vmCreationEvent.set()
try:
self._run()
- if self.lastStatus != vmstatus.DOWN and not self.recovering \
- and not self.cif.mom:
- # If MOM is available, we needn't tell it to adjust KSM
- # behaviors on VM start/destroy, because the tuning can be
- # done automatically according to its statistical data.
- self.cif.ksmMonitor.adjust()
except Exception:
if not self.recovering:
raise
@@ -4273,8 +4267,6 @@
else:
self.log.warn("VM %s is not running", self.conf['vmId'])
- if not self.cif.mom:
- self.cif.ksmMonitor.adjust()
self._cleanup()
self.cif.irs.inappropriateDevices(self.id)
--
To view, visit http://gerrit.ovirt.org/27026
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I511fa1754e55076084529834fa9758c2fd139761
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Dan Kenigsberg <danken(a)redhat.com>
9 years, 8 months
Change in vdsm[master]: virt: migration: decouple monitoring from thread
by fromani@redhat.com
Francesco Romani has uploaded a new change for review.
Change subject: virt: migration: decouple monitoring from thread
......................................................................
virt: migration: decouple monitoring from thread
make the monitor logic decoupled from the monitor
thread/scheduler in order to improve the testability.
Change-Id: I364a9eeb72e3b4213278adff352f3eade19548a3
Signed-off-by: Francesco Romani <fromani(a)redhat.com>
---
M vdsm/virt/migration.py
1 file changed, 38 insertions(+), 26 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/79/26279/1
diff --git a/vdsm/virt/migration.py b/vdsm/virt/migration.py
index 5d00120..8ac25e0 100644
--- a/vdsm/virt/migration.py
+++ b/vdsm/virt/migration.py
@@ -343,6 +343,32 @@
class MonitorThread(threading.Thread):
+ def __init__(self, vm, startTime, downTime):
+ super(MonitorThread, self).__init__()
+ self._stop = threading.Event()
+ self.daemon = True
+ self._mon = Monitor(vm, startTime, downTime)
+
+ def run(self):
+ self._vm.log.debug('starting migration monitor thread')
+
+ step = 1
+ done = False
+
+ while not self._stop.isSet() and not done:
+ self._stop.wait(1.0)
+ done = (self._mon.monitor_migration(step) and
+ self._mon.monitor_downtime(step))
+ step += 1
+
+ self._vm.log.debug('migration monitor thread exiting')
+
+ def stop(self):
+ self._vm.log.debug('stopping migration monitor thread')
+ self._stop.set()
+
+
+class Monitor(object):
_MONITOR_INTERVAL = config.getint(
'vars', 'migration_monitor_interval') # seconds
_MAX_TIME_PER_GIB = config.getint(
@@ -355,11 +381,8 @@
'vars', 'migration_downtime_steps')
def __init__(self, vm, startTime, downTime):
- super(MonitorThread, self).__init__()
- self._stop = threading.Event()
self._vm = vm
self._startTime = startTime
- self.daemon = True
self.progress = 0
memSize = int(self._vm.conf['memSize'])
@@ -371,28 +394,20 @@
self._DELAY_PER_GIB * max(memSize, 2048) + 1023) / 1024
self._downtimeInterval = self._wait / self._DOWNTIME_STEPS
self._downtimeStep = 0
+ self._lowmark = None
+ self._lastProgressTime = None
@property
def enabled(self):
return MonitorThread._MIGRATION_MONITOR_INTERVAL > 0
- def run(self):
- self._vm.log.debug('starting migration monitor thread')
-
- step = 1
- self._lastProgressTime = time.time()
- self._lowmark = None
-
- while not self._stop.isSet():
- self._stop.wait(1.0)
- if self.enabled:
- self.monitor_migration(step)
- self.monitor_downtime(step)
- step += 1
-
- self._vm.log.debug('migration monitor thread exiting')
-
def monitor_migration(self, step):
+ if not self.enabled:
+ return False
+
+ if self._lastProgressTime is None:
+ self._lastProgressTime = time.time()
+
def calculateProgress(remaining, total):
if remaining == 0:
return 100
@@ -428,8 +443,7 @@
if abort:
self._vm._dom.abortJob()
- self.stop()
- return
+ return abort
if dataRemaining > self._lowmark:
self._vm.log.warn(
@@ -439,13 +453,14 @@
dataRemaining / Mbytes, self._lowmark / Mbytes)
if jobType == 0:
- return
+ return False
self.progress = calculateProgress(dataRemaining, dataTotal)
self._vm.log.info('Migration Progress: %s seconds elapsed, %s%% of'
' data processed' %
(timeElapsed / 1000, self.progress))
+ return False
def update_downtime(self, i):
return self._downtime * (i + 1) / self._DOWNTIME_STEPS
@@ -457,7 +472,4 @@
self._vm.log.debug('setting migration downtime to %d', downtime)
self._vm._dom.migrateSetMaxDowntime(downtime, 0)
self._downtimeStep += 1
-
- def stop(self):
- self._vm.log.debug('stopping migration monitor thread')
- self._stop.set()
+ return False
--
To view, visit http://gerrit.ovirt.org/26279
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I364a9eeb72e3b4213278adff352f3eade19548a3
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Francesco Romani <fromani(a)redhat.com>
9 years, 8 months
Change in vdsm[master]: virt: migration: merge monitor and downtime thread
by fromani@redhat.com
Francesco Romani has uploaded a new change for review.
Change subject: virt: migration: merge monitor and downtime thread
......................................................................
virt: migration: merge monitor and downtime thread
This patch merges the MigrationDowntimeThread
into the MigrationMonitorThread.
The benefits are
* less code
* less threads
* better (and simpler) integration between
migration progress tracking and downtime setting.
Change-Id: I7ac66331b44435a9cffeb9de1454db6843245979
Signed-off-by: Francesco Romani <fromani(a)redhat.com>
---
M vdsm/migration.py
1 file changed, 22 insertions(+), 41 deletions(-)
git pull ssh://gerrit.ovirt.org:29418/vdsm refs/changes/77/25977/1
diff --git a/vdsm/migration.py b/vdsm/migration.py
index 1234471..8c33f1e 100644
--- a/vdsm/migration.py
+++ b/vdsm/migration.py
@@ -293,10 +293,9 @@
self._vm.log.debug('starting migration to %s '
'with miguri %s', duri, muri)
- t = MigrationDowntimeThread(self._vm, int(self._downtime))
-
self._monitorThread = MigrationMonitorThread(self._vm,
- startTime)
+ startTime,
+ int(self._downtime))
self._monitorThread.start()
try:
@@ -325,7 +324,6 @@
self._raiseAbortError()
finally:
- t.cancel()
self._monitorThread.stop()
def stop(self):
@@ -339,53 +337,24 @@
raise
-class MigrationDowntimeThread(threading.Thread):
- def __init__(self, vm, downtime):
- super(MigrationDowntimeThread, self).__init__()
- self.DOWNTIME_STEPS = config.getint('vars', 'migration_downtime_steps')
-
- self._vm = vm
- self._downtime = downtime
- self._stop = threading.Event()
-
- delay_per_gib = config.getint('vars', 'migration_downtime_delay')
- memSize = int(vm.conf['memSize'])
- self._wait = (delay_per_gib * max(memSize, 2048) + 1023) / 1024
-
- self.daemon = True
- self.start()
-
- def run(self):
- self._vm.log.debug('migration downtime thread started')
-
- for i in range(self.DOWNTIME_STEPS):
- self._stop.wait(self._wait / self.DOWNTIME_STEPS)
-
- if self._stop.isSet():
- break
-
- downtime = self._downtime * (i + 1) / self.DOWNTIME_STEPS
- self._vm.log.debug('setting migration downtime to %d', downtime)
- self._vm._dom.migrateSetMaxDowntime(downtime, 0)
-
- self._vm.log.debug('migration downtime thread exiting')
-
- def cancel(self):
- self._vm.log.debug('canceling migration downtime thread')
- self._stop.set()
-
-
class MigrationMonitorThread(threading.Thread):
_MIGRATION_MONITOR_INTERVAL = config.getint(
'vars', 'migration_monitor_interval') # seconds
- def __init__(self, vm, startTime):
+ def __init__(self, vm, startTime, downTime):
super(MigrationMonitorThread, self).__init__()
self._stop = threading.Event()
self._vm = vm
self._startTime = startTime
self.daemon = True
self.progress = 0
+
+ self._downtime = downTime
+ delay_per_gib = config.getint('vars', 'migration_downtime_delay')
+ memSize = int(vm.conf['memSize'])
+ self._wait = (delay_per_gib * max(memSize, 2048) + 1023) / 1024
+ self._downtime_interval = self._wait / self.DOWNTIME_STEPS
+ self._downtime_step = 0
@property
def enabled(self):
@@ -402,6 +371,7 @@
self._stop.wait(1.0)
if self.enabled:
self.monitor_migration(step)
+ self.monitor_downtime(step)
step += 1
self._vm.log.debug('migration monitor thread exiting')
@@ -467,6 +437,17 @@
' data processed' %
(timeElapsed / 1000, self.progress))
+ def update_downtime(self, i):
+ return self._downtime * (i + 1) / self.DOWNTIME_STEPS
+
+ def monitor_downtime(self, step):
+ if self._downtime_step < self.DOWNTIME_STEPS and \
+ step % self._downtime_interval == 0:
+ downtime = self.update_downtime(self._downtime_step)
+ self._vm.log.debug('setting migration downtime to %d', downtime)
+ self._vm._dom.migrateSetMaxDowntime(downtime, 0)
+ self._downtime_step += 1
+
def stop(self):
self._vm.log.debug('stopping migration monitor thread')
self._stop.set()
--
To view, visit http://gerrit.ovirt.org/25977
To unsubscribe, visit http://gerrit.ovirt.org/settings
Gerrit-MessageType: newchange
Gerrit-Change-Id: I7ac66331b44435a9cffeb9de1454db6843245979
Gerrit-PatchSet: 1
Gerrit-Project: vdsm
Gerrit-Branch: master
Gerrit-Owner: Francesco Romani <fromani(a)redhat.com>
9 years, 8 months