Fix inconsistent indentation problems
Signed-off-by: John Kacur <jkacur(a)redhat.com>
---
tuna-cmd.py | 1232 +++++++++++++++++++++++++++++-----------------------------
tuna/tuna.py | 1174 +++++++++++++++++++++++++++----------------------------
2 files changed, 1203 insertions(+), 1203 deletions(-)
diff --git a/tuna-cmd.py b/tuna-cmd.py
index 5c413cd0a061..7c4e26c42742 100755
--- a/tuna-cmd.py
+++ b/tuna-cmd.py
@@ -22,16 +22,16 @@ import locale
from functools import reduce
try:
- import inet_diag
- have_inet_diag = True
+ import inet_diag
+ have_inet_diag = True
except:
- have_inet_diag = False
+ have_inet_diag = False
try:
- set
+ set
except NameError:
- # In python < 2.4, "set" is not the first class citizen.
- from sets import Set as set
+ # In python < 2.4, "set" is not the first class citizen.
+ from sets import Set as set
# FIXME: ETOOMANYGLOBALS, we need a class!
@@ -41,671 +41,671 @@ irqs = None
version = "0.13.1"
def usage():
- print(_('Usage: tuna [OPTIONS]'))
- fmt = '\t%-40s %s'
- print(fmt % ('-h, --help', _('Give this help list')))
- print(fmt % ('-a, --config_file_apply=profilename', _('Apply changes
described in profile')))
- print(fmt % ('-l, --config_file_list', _('List preloaded
profiles')))
- print(fmt % ('-g, --gui', _('Start the GUI')))
- print(fmt % ('-G, --cgroup', _('Display the processes with the type of
cgroups they are in')))
- print(fmt % ('-c, --cpus=' + _('CPU-LIST'), _('%(cpulist)s affected
by commands') % \
- {"cpulist": _('CPU-LIST')}))
- print(fmt % ('-C, --affect_children', _('Operation will affect children
threads')))
- print(fmt % ('-f, --filter', _('Display filter the selected
entities')))
- print(fmt % ('-i, --isolate', _('Move all threads away from
%(cpulist)s') % \
- {"cpulist": _('CPU-LIST')}))
- print(fmt % ('-I, --include', _('Allow all threads to run on
%(cpulist)s') % \
- {"cpulist": _('CPU-LIST')}))
- print(fmt % ('-K, --no_kthreads', _('Operations will not affect kernel
threads')))
- print(fmt % ('-m, --move', _('Move selected entities to
%(cpulist)s') % \
- {"cpulist": _('CPU-LIST')}))
- print(fmt % ('-N, --nohz_full', _('CPUs in nohz_full= kernel command
line will be affected by operations')))
- if have_inet_diag:
- print(fmt % ('-n, --show_sockets', _('Show network sockets in use by
threads')))
- print(fmt % ('-p, --priority=[' +
- _('POLICY') + ':]' +
- _('RTPRIO'), _('Set thread scheduler tunables: %(policy)s and
%(rtprio)s') % \
- {"policy": _('POLICY'), "rtprio":
_('RTPRIO')}))
- print(fmt % ('-P, --show_threads', _('Show thread list')))
- print(fmt % ('-Q, --show_irqs', _('Show IRQ list')))
- print(fmt % ('-q, --irqs=' + _('IRQ-LIST'), _('%(irqlist)s affected
by commands') %
- {"irqlist": _('IRQ-LIST')}))
- print(fmt % ('-r, --run=' + _('COMMAND'), _('fork a new process
and run the %(command)s') % \
- {"command": _('COMMAND')}))
- print(fmt % ('-s, --save=' + _('FILENAME'), _('Save kthreads sched
tunables to %(filename)s') % \
- {"filename": _('FILENAME')}))
- print(fmt % ('-S, --sockets=' +
- _('CPU-SOCKET-LIST'), _('%(cpusocketlist)s affected by
commands') % \
- {"cpusocketlist": _('CPU-SOCKET-LIST')}))
- print(fmt % ('-t, --threads=' +
- _('THREAD-LIST'), _('%(threadlist)s affected by commands') %
\
- {"threadlist": _('THREAD-LIST')}))
- print(fmt % ('-U, --no_uthreads', _('Operations will not affect user
threads')))
- print(fmt % ('-v, --version', _('Show version')))
- print(fmt % ('-W, --what_is', _('Provides help about selected
entities')))
- print(fmt % ('-x, --spread', _('Spread selected entities over
%(cpulist)s') % \
- {"cpulist": _('CPU-LIST')}))
+ print(_('Usage: tuna [OPTIONS]'))
+ fmt = '\t%-40s %s'
+ print(fmt % ('-h, --help', _('Give this help
list')))
+ print(fmt % ('-a, --config_file_apply=profilename',
_('Apply changes described in profile')))
+ print(fmt % ('-l, --config_file_list', _('List preloaded
profiles')))
+ print(fmt % ('-g, --gui', _('Start the GUI')))
+ print(fmt % ('-G, --cgroup', _('Display the processes
with the type of cgroups they are in')))
+ print(fmt % ('-c, --cpus=' + _('CPU-LIST'), _('%(cpulist)s
affected by commands') % \
+ {"cpulist":
_('CPU-LIST')}))
+ print(fmt % ('-C, --affect_children', _('Operation will affect
children threads')))
+ print(fmt % ('-f, --filter', _('Display filter the
selected entities')))
+ print(fmt % ('-i, --isolate', _('Move all threads away
from %(cpulist)s') % \
+ {"cpulist":
_('CPU-LIST')}))
+ print(fmt % ('-I, --include', _('Allow all threads to
run on %(cpulist)s') % \
+ {"cpulist":
_('CPU-LIST')}))
+ print(fmt % ('-K, --no_kthreads', _('Operations will not
affect kernel threads')))
+ print(fmt % ('-m, --move', _('Move selected entities
to %(cpulist)s') % \
+ {"cpulist":
_('CPU-LIST')}))
+ print(fmt % ('-N, --nohz_full', _('CPUs in nohz_full=
kernel command line will be affected by operations')))
+ if have_inet_diag:
+ print(fmt % ('-n, --show_sockets', _('Show network sockets
in use by threads')))
+ print(fmt % ('-p, --priority=[' +
+ _('POLICY') + ':]' +
+ _('RTPRIO'), _('Set thread scheduler
tunables: %(policy)s and %(rtprio)s') % \
+ {"policy":
_('POLICY'), "rtprio": _('RTPRIO')}))
+ print(fmt % ('-P, --show_threads', _('Show thread
list')))
+ print(fmt % ('-Q, --show_irqs', _('Show IRQ list')))
+ print(fmt % ('-q, --irqs=' + _('IRQ-LIST'), _('%(irqlist)s
affected by commands') %
+ {"irqlist":
_('IRQ-LIST')}))
+ print(fmt % ('-r, --run=' + _('COMMAND'), _('fork a new
process and run the %(command)s') % \
+ {"command":
_('COMMAND')}))
+ print(fmt % ('-s, --save=' + _('FILENAME'), _('Save kthreads
sched tunables to %(filename)s') % \
+ {"filename":
_('FILENAME')}))
+ print(fmt % ('-S, --sockets=' +
+ _('CPU-SOCKET-LIST'), _('%(cpusocketlist)s
affected by commands') % \
+ {"cpusocketlist":
_('CPU-SOCKET-LIST')}))
+ print(fmt % ('-t, --threads=' +
+ _('THREAD-LIST'), _('%(threadlist)s
affected by commands') % \
+ {"threadlist":
_('THREAD-LIST')}))
+ print(fmt % ('-U, --no_uthreads', _('Operations will not
affect user threads')))
+ print(fmt % ('-v, --version', _('Show version')))
+ print(fmt % ('-W, --what_is', _('Provides help about
selected entities')))
+ print(fmt % ('-x, --spread', _('Spread selected
entities over %(cpulist)s') % \
+ {"cpulist":
_('CPU-LIST')}))
def get_nr_cpus():
- global nr_cpus
- if nr_cpus:
- return nr_cpus
- nr_cpus = procfs.cpuinfo().nr_cpus
- return nr_cpus
+ global nr_cpus
+ if nr_cpus:
+ return nr_cpus
+ nr_cpus = procfs.cpuinfo().nr_cpus
+ return nr_cpus
nics = None
def get_nics():
- global nics
- if nics:
- return nics
- nics = ethtool.get_active_devices()
- return nics
+ global nics
+ if nics:
+ return nics
+ nics = ethtool.get_active_devices()
+ return nics
def thread_help(tid):
- global ps
- if not ps:
- ps = procfs.pidstats()
+ global ps
+ if not ps:
+ ps = procfs.pidstats()
- if tid not in ps:
- print("tuna: " + _("thread %d doesn't exists!") % tid)
- return
+ if tid not in ps:
+ print("tuna: " + _("thread %d doesn't exists!") %
tid)
+ return
- pinfo = ps[tid]
- cmdline = procfs.process_cmdline(pinfo)
- help, title = tuna.kthread_help_plain_text(tid, cmdline)
- print("%s\n\n%s" % (title, _(help)))
+ pinfo = ps[tid]
+ cmdline = procfs.process_cmdline(pinfo)
+ help, title = tuna.kthread_help_plain_text(tid, cmdline)
+ print("%s\n\n%s" % (title, _(help)))
def save(cpu_list, thread_list, filename):
- kthreads = tuna.get_kthread_sched_tunings()
- for name in list(kthreads.keys()):
- kt = kthreads[name]
- if (cpu_list and not set(kt.affinity).intersection(set(cpu_list))) or \
- (thread_list and kt.pid not in thread_list) :
- del kthreads[name]
- tuna.generate_rtgroups(filename, kthreads, get_nr_cpus())
+ kthreads = tuna.get_kthread_sched_tunings()
+ for name in list(kthreads.keys()):
+ kt = kthreads[name]
+ if (cpu_list and not set(kt.affinity).intersection(set(cpu_list))) or \
+ (thread_list and kt.pid not in thread_list) :
+ del kthreads[name]
+ tuna.generate_rtgroups(filename, kthreads, get_nr_cpus())
def ps_show_header(has_ctxt_switch_info,cgroups = False):
- print("%7s %6s %5s %7s %s" % \
- (" ", " ", " ", _("thread"),
- has_ctxt_switch_info and "ctxt_switches" or ""))
- print("%7s %6s %5s %7s%s %15s" % \
- ("pid", "SCHED_", "rtpri", "affinity",
- has_ctxt_switch_info and " %9s %12s" % ("voluntary",
"nonvoluntary") or "",
- "cmd"), end=' ')
- if cgroups:
- print(" %7s" % ("cgroup"))
- else:
- print("")
+ print("%7s %6s %5s %7s %s" % \
+ (" ", " ", " ", _("thread"),
+ has_ctxt_switch_info and "ctxt_switches" or ""))
+ print("%7s %6s %5s %7s%s %15s" % \
+ ("pid", "SCHED_", "rtpri",
"affinity",
+ has_ctxt_switch_info and " %9s %12s" % ("voluntary",
"nonvoluntary") or "",
+ "cmd"), end=' ')
+ if cgroups:
+ print(" %7s" % ("cgroup"))
+ else:
+ print("")
def ps_show_sockets(pid, ps, inodes, inode_re, indent = 0):
- header_printed = False
- dirname = "/proc/%s/fd" % pid
- try:
- filenames = os.listdir(dirname)
- except: # Process died
- return
- sindent = " " * indent
- for filename in filenames:
- pathname = os.path.join(dirname, filename)
- try:
- linkto = os.readlink(pathname)
- except: # Process died
- continue
- inode_match = inode_re.match(linkto)
- if not inode_match:
- continue
- inode = int(inode_match.group(1))
- if inode not in inodes:
- continue
- if not header_printed:
- print("%s%-10s %-6s %-6s %15s:%-5s %15s:%-5s" % \
- (sindent, "State", "Recv-Q", "Send-Q",
- "Local Address", "Port",
- "Peer Address", "Port"))
- header_printed = True
- s = inodes[inode]
- print("%s%-10s %-6d %-6d %15s:%-5d %15s:%-5d" % \
- (sindent, s.state(),
- s.receive_queue(), s.write_queue(),
- s.saddr(), s.sport(), s.daddr(), s.dport()))
+ header_printed = False
+ dirname = "/proc/%s/fd" % pid
+ try:
+ filenames = os.listdir(dirname)
+ except: # Process died
+ return
+ sindent = " " * indent
+ for filename in filenames:
+ pathname = os.path.join(dirname, filename)
+ try:
+ linkto = os.readlink(pathname)
+ except: # Process died
+ continue
+ inode_match = inode_re.match(linkto)
+ if not inode_match:
+ continue
+ inode = int(inode_match.group(1))
+ if inode not in inodes:
+ continue
+ if not header_printed:
+ print("%s%-10s %-6s %-6s %15s:%-5s %15s:%-5s" % \
+ (sindent, "State", "Recv-Q",
"Send-Q",
+ "Local Address", "Port",
+ "Peer Address", "Port"))
+ header_printed = True
+ s = inodes[inode]
+ print("%s%-10s %-6d %-6d %15s:%-5d %15s:%-5d" % \
+ (sindent, s.state(),
+ s.receive_queue(), s.write_queue(),
+ s.saddr(), s.sport(), s.daddr(), s.dport()))
def format_affinity(affinity):
- if len(affinity) <= 4:
- return ",".join(str(a) for a in affinity)
+ if len(affinity) <= 4:
+ return ",".join(str(a) for a in affinity)
- return ",".join(str(hex(a)) for a in procfs.hexbitmask(affinity,
get_nr_cpus()))
+ return ",".join(str(hex(a)) for a in procfs.hexbitmask(affinity,
get_nr_cpus()))
def ps_show_thread(pid, affect_children, ps,
- has_ctxt_switch_info, sock_inodes, sock_inode_re, cgroups):
- global irqs
- try:
- affinity = format_affinity(schedutils.get_affinity(pid))
- except (SystemError, OSError) as e: # old python-schedutils incorrectly raised
SystemError
- if e[0] == errno.ESRCH:
- return
- raise e
-
- sched = schedutils.schedstr(schedutils.get_scheduler(pid))[6:]
- rtprio = int(ps[pid]["stat"]["rt_priority"])
- cgout = ps[pid]["cgroups"]
- cmd = ps[pid]["stat"]["comm"]
- users = ""
- if tuna.is_irq_thread(cmd):
- try:
- if not irqs:
- irqs = procfs.interrupts()
- if cmd[:4] == "IRQ-":
- users = irqs[tuna.irq_thread_number(cmd)]["users"]
- for u in users:
- if u in get_nics():
- users[users.index(u)] = "%s(%s)" % (u, ethtool.get_module(u))
- users = ",".join(users)
- else:
- u = cmd[cmd.find('-') + 1:]
- if u in get_nics():
- users = ethtool.get_module(u)
- except:
- users = "Not found in /proc/interrupts!"
-
- ctxt_switch_info = ""
- if has_ctxt_switch_info:
- voluntary_ctxt_switches =
int(ps[pid]["status"]["voluntary_ctxt_switches"])
- nonvoluntary_ctxt_switches =
int(ps[pid]["status"]["nonvoluntary_ctxt_switches"])
- ctxt_switch_info = " %9d %12s" % (voluntary_ctxt_switches,
- nonvoluntary_ctxt_switches)
-
- if affect_children:
- print(" %-5d " % pid, end=' ')
- else:
- print(" %-5d" % pid, end=' ')
- print("%6s %5d %8s%s %15s %s" % (sched, rtprio, affinity,
- ctxt_switch_info, cmd, users), end=' ')
- if cgroups:
- print(" %9s" % cgout, end=' ')
- print("")
- if sock_inodes:
- ps_show_sockets(pid, ps, sock_inodes, sock_inode_re,
- affect_children and 3 or 4)
- if affect_children and "threads" in ps[pid]:
- for tid in list(ps[pid]["threads"].keys()):
- ps_show_thread(tid, False, ps[pid]["threads"],
- has_ctxt_switch_info,
- sock_inodes, sock_inode_re, cgroups)
-
+ has_ctxt_switch_info, sock_inodes, sock_inode_re, cgroups):
+ global irqs
+ try:
+ affinity = format_affinity(schedutils.get_affinity(pid))
+ except (SystemError, OSError) as e: # old python-schedutils incorrectly raised
SystemError
+ if e[0] == errno.ESRCH:
+ return
+ raise e
+
+ sched = schedutils.schedstr(schedutils.get_scheduler(pid))[6:]
+ rtprio = int(ps[pid]["stat"]["rt_priority"])
+ cgout = ps[pid]["cgroups"]
+ cmd = ps[pid]["stat"]["comm"]
+ users = ""
+ if tuna.is_irq_thread(cmd):
+ try:
+ if not irqs:
+ irqs = procfs.interrupts()
+ if cmd[:4] == "IRQ-":
+ users =
irqs[tuna.irq_thread_number(cmd)]["users"]
+ for u in users:
+ if u in get_nics():
+ users[users.index(u)] =
"%s(%s)" % (u, ethtool.get_module(u))
+ users = ",".join(users)
+ else:
+ u = cmd[cmd.find('-') + 1:]
+ if u in get_nics():
+ users = ethtool.get_module(u)
+ except:
+ users = "Not found in /proc/interrupts!"
+
+ ctxt_switch_info = ""
+ if has_ctxt_switch_info:
+ voluntary_ctxt_switches =
int(ps[pid]["status"]["voluntary_ctxt_switches"])
+ nonvoluntary_ctxt_switches =
int(ps[pid]["status"]["nonvoluntary_ctxt_switches"])
+ ctxt_switch_info = " %9d %12s" % (voluntary_ctxt_switches,
+ nonvoluntary_ctxt_switches)
+
+ if affect_children:
+ print(" %-5d " % pid, end=' ')
+ else:
+ print(" %-5d" % pid, end=' ')
+ print("%6s %5d %8s%s %15s %s" % (sched, rtprio, affinity,
+ ctxt_switch_info, cmd, users), end=' ')
+ if cgroups:
+ print(" %9s" % cgout, end=' ')
+ print("")
+ if sock_inodes:
+ ps_show_sockets(pid, ps, sock_inodes, sock_inode_re,
+ affect_children and 3 or 4)
+ if affect_children and "threads" in ps[pid]:
+ for tid in list(ps[pid]["threads"].keys()):
+ ps_show_thread(tid, False, ps[pid]["threads"],
+ has_ctxt_switch_info,
+ sock_inodes, sock_inode_re, cgroups)
+
def ps_show(ps, affect_children, thread_list, cpu_list,
- irq_list_numbers, show_uthreads, show_kthreads,
- has_ctxt_switch_info, sock_inodes, sock_inode_re, cgroups):
-
- ps_list = []
- for pid in list(ps.keys()):
- iskth = tuna.iskthread(pid)
- if not show_uthreads and not iskth:
- continue
- if not show_kthreads and iskth:
- continue
- in_irq_list = False
- if irq_list_numbers:
- if tuna.is_hardirq_handler(ps, pid):
- try:
- irq = int(ps[pid]["stat"]["comm"][4:])
- if irq not in irq_list_numbers:
- if not thread_list:
- continue
- else:
- in_irq_list = True
- except:
- pass
- elif not thread_list:
- continue
- if not in_irq_list and thread_list and pid not in thread_list:
- continue
- try:
- affinity = schedutils.get_affinity(pid)
- except (SystemError, OSError) as e: # old python-schedutils incorrectly raised
SystemError
- if e[0] == errno.ESRCH:
- continue
- raise e
- if cpu_list and not set(cpu_list).intersection(set(affinity)):
- continue
- ps_list.append(pid)
-
- ps_list.sort()
-
- for pid in ps_list:
- ps_show_thread(pid, affect_children, ps,
- has_ctxt_switch_info, sock_inodes,
- sock_inode_re, cgroups)
+ irq_list_numbers, show_uthreads, show_kthreads,
+ has_ctxt_switch_info, sock_inodes, sock_inode_re, cgroups):
+
+ ps_list = []
+ for pid in list(ps.keys()):
+ iskth = tuna.iskthread(pid)
+ if not show_uthreads and not iskth:
+ continue
+ if not show_kthreads and iskth:
+ continue
+ in_irq_list = False
+ if irq_list_numbers:
+ if tuna.is_hardirq_handler(ps, pid):
+ try:
+ irq =
int(ps[pid]["stat"]["comm"][4:])
+ if irq not in irq_list_numbers:
+ if not thread_list:
+ continue
+ else:
+ in_irq_list = True
+ except:
+ pass
+ elif not thread_list:
+ continue
+ if not in_irq_list and thread_list and pid not in thread_list:
+ continue
+ try:
+ affinity = schedutils.get_affinity(pid)
+ except (SystemError, OSError) as e: # old python-schedutils incorrectly
raised SystemError
+ if e[0] == errno.ESRCH:
+ continue
+ raise e
+ if cpu_list and not set(cpu_list).intersection(set(affinity)):
+ continue
+ ps_list.append(pid)
+
+ ps_list.sort()
+
+ for pid in ps_list:
+ ps_show_thread(pid, affect_children, ps,
+ has_ctxt_switch_info, sock_inodes,
+ sock_inode_re, cgroups)
def load_socktype(socktype, inodes):
- idiag = inet_diag.create(socktype = socktype)
- while True:
- try:
- s = idiag.get()
- except:
- break
- inodes[s.inode()] = s
+ idiag = inet_diag.create(socktype = socktype)
+ while True:
+ try:
+ s = idiag.get()
+ except:
+ break
+ inodes[s.inode()] = s
def load_sockets():
- inodes = {}
- for socktype in (inet_diag.TCPDIAG_GETSOCK,
- inet_diag.DCCPDIAG_GETSOCK):
- load_socktype(socktype, inodes)
- return inodes
+ inodes = {}
+ for socktype in (inet_diag.TCPDIAG_GETSOCK,
+ inet_diag.DCCPDIAG_GETSOCK):
+ load_socktype(socktype, inodes)
+ return inodes
def do_ps(thread_list, cpu_list, irq_list, show_uthreads,
- show_kthreads, affect_children, show_sockets, cgroups):
- ps = procfs.pidstats()
- if affect_children:
- ps.reload_threads()
-
- sock_inodes = None
- sock_inode_re = None
- if show_sockets:
- sock_inodes = load_sockets()
- sock_inode_re = re.compile(r"socket:\[(\d+)\]")
-
- has_ctxt_switch_info = "voluntary_ctxt_switches" in ps[1]["status"]
- try:
- if sys.stdout.isatty():
- ps_show_header(has_ctxt_switch_info, cgroups)
- ps_show(ps, affect_children, thread_list,
- cpu_list, irq_list, show_uthreads, show_kthreads,
- has_ctxt_switch_info, sock_inodes, sock_inode_re, cgroups)
- except IOError:
- # 'tuna -P | head' for instance
- pass
+ show_kthreads, affect_children, show_sockets, cgroups):
+ ps = procfs.pidstats()
+ if affect_children:
+ ps.reload_threads()
+
+ sock_inodes = None
+ sock_inode_re = None
+ if show_sockets:
+ sock_inodes = load_sockets()
+ sock_inode_re = re.compile(r"socket:\[(\d+)\]")
+
+ has_ctxt_switch_info = "voluntary_ctxt_switches" in
ps[1]["status"]
+ try:
+ if sys.stdout.isatty():
+ ps_show_header(has_ctxt_switch_info, cgroups)
+ ps_show(ps, affect_children, thread_list,
+ cpu_list, irq_list, show_uthreads, show_kthreads,
+ has_ctxt_switch_info, sock_inodes, sock_inode_re, cgroups)
+ except IOError:
+ # 'tuna -P | head' for instance
+ pass
def find_drivers_by_users(users):
- nics = get_nics()
- drivers = []
- for u in users:
- try:
- idx = u.index('-')
- u = u[:idx]
- except:
- pass
- if u in nics:
- driver = ethtool.get_module(u)
- if driver not in drivers:
- drivers.append(driver)
-
- return drivers
+ nics = get_nics()
+ drivers = []
+ for u in users:
+ try:
+ idx = u.index('-')
+ u = u[:idx]
+ except:
+ pass
+ if u in nics:
+ driver = ethtool.get_module(u)
+ if driver not in drivers:
+ drivers.append(driver)
+
+ return drivers
def show_irqs(irq_list, cpu_list):
- global irqs
- if not irqs:
- irqs = procfs.interrupts()
-
- if sys.stdout.isatty():
- print("%4s %-16s %8s" % ("#", _("users"),
_("affinity"),))
- sorted_irqs = []
- for k in list(irqs.keys()):
- try:
- irqn = int(k)
- affinity = irqs[irqn]["affinity"]
- except:
- continue
- if irq_list and irqn not in irq_list:
- continue
-
- if cpu_list and not set(cpu_list).intersection(set(affinity)):
- continue
- sorted_irqs.append(irqn)
-
- sorted_irqs.sort()
- for irq in sorted_irqs:
- affinity = format_affinity(irqs[irq]["affinity"])
- users = irqs[irq]["users"]
- print("%4d %-16s %8s" % (irq, ",".join(users), affinity), end='
')
- drivers = find_drivers_by_users(users)
- if drivers:
- print(" %s" % ",".join(drivers))
- else:
- print()
+ global irqs
+ if not irqs:
+ irqs = procfs.interrupts()
+
+ if sys.stdout.isatty():
+ print("%4s %-16s %8s" % ("#", _("users"),
_("affinity"),))
+ sorted_irqs = []
+ for k in list(irqs.keys()):
+ try:
+ irqn = int(k)
+ affinity = irqs[irqn]["affinity"]
+ except:
+ continue
+ if irq_list and irqn not in irq_list:
+ continue
+
+ if cpu_list and not set(cpu_list).intersection(set(affinity)):
+ continue
+ sorted_irqs.append(irqn)
+
+ sorted_irqs.sort()
+ for irq in sorted_irqs:
+ affinity = format_affinity(irqs[irq]["affinity"])
+ users = irqs[irq]["users"]
+ print("%4d %-16s %8s" % (irq, ",".join(users),
affinity), end=' ')
+ drivers = find_drivers_by_users(users)
+ if drivers:
+ print(" %s" % ",".join(drivers))
+ else:
+ print()
def do_list_op(op, current_list, op_list):
- if not current_list:
- current_list = []
- if op == '+':
- return list(set(current_list + op_list))
- if op == '-':
- return list(set(current_list) - set(op_list))
- return list(set(op_list))
+ if not current_list:
+ current_list = []
+ if op == '+':
+ return list(set(current_list + op_list))
+ if op == '-':
+ return list(set(current_list) - set(op_list))
+ return list(set(op_list))
def thread_mapper(s):
- global ps
- try:
- return [ int(s), ]
- except:
- pass
+ global ps
+ try:
+ return [ int(s), ]
+ except:
+ pass
- ps = procfs.pidstats()
+ ps = procfs.pidstats()
- try:
- return ps.find_by_regex(re.compile(fnmatch.translate(s)))
- except:
- return ps.find_by_name(s)
+ try:
+ return ps.find_by_regex(re.compile(fnmatch.translate(s)))
+ except:
+ return ps.find_by_name(s)
def irq_mapper(s):
- global irqs
- try:
- return [ int(s), ]
- except:
- pass
- if not irqs:
- irqs = procfs.interrupts()
-
- irq_list_str = irqs.find_by_user_regex(re.compile(fnmatch.translate(s)))
- irq_list = []
- for i in irq_list_str:
- try:
- irq_list.append(int(i))
- except:
- pass
-
- return irq_list
+ global irqs
+ try:
+ return [ int(s), ]
+ except:
+ pass
+ if not irqs:
+ irqs = procfs.interrupts()
+
+ irq_list_str = irqs.find_by_user_regex(re.compile(fnmatch.translate(s)))
+ irq_list = []
+ for i in irq_list_str:
+ try:
+ irq_list.append(int(i))
+ except:
+ pass
+
+ return irq_list
def pick_op(argument):
if argument == "":
return (None, argument)
- if argument[0] in ('+', '-'):
- return (argument[0], argument[1:])
- return (None, argument)
+ if argument[0] in ('+', '-'):
+ return (argument[0], argument[1:])
+ return (None, argument)
def i18n_init():
- (app, localedir) = ('tuna', '/usr/share/locale')
- locale.setlocale(locale.LC_ALL, '')
- gettext.bindtextdomain(app, localedir)
- gettext.textdomain(app)
- gettext.install(app, localedir)
+ (app, localedir) = ('tuna', '/usr/share/locale')
+ locale.setlocale(locale.LC_ALL, '')
+ gettext.bindtextdomain(app, localedir)
+ gettext.textdomain(app)
+ gettext.install(app, localedir)
def apply_config(filename):
- from tuna.config import Config
- config = Config()
- if os.path.exists(filename):
- config.config['root'] = os.getcwd() + "/"
- filename = os.path.basename(filename)
- else:
- if not os.path.exists(config.config['root']+filename):
- print(filename + _(" not found!"))
- exit(-1)
- if config.loadTuna(filename):
- exit(1)
- ctrl = 0
- values = {}
- values['toapply'] = {}
- for index in range(len(config.ctlParams)):
- for opt in config.ctlParams[index]:
- values['toapply'][ctrl] = {}
- values['toapply'][ctrl]['label'] = opt
- values['toapply'][ctrl]['value'] = config.ctlParams[index][opt]
- ctrl = ctrl + 1
- config.applyChanges(values)
+ from tuna.config import Config
+ config = Config()
+ if os.path.exists(filename):
+ config.config['root'] = os.getcwd() + "/"
+ filename = os.path.basename(filename)
+ else:
+ if not os.path.exists(config.config['root']+filename):
+ print(filename + _(" not found!"))
+ exit(-1)
+ if config.loadTuna(filename):
+ exit(1)
+ ctrl = 0
+ values = {}
+ values['toapply'] = {}
+ for index in range(len(config.ctlParams)):
+ for opt in config.ctlParams[index]:
+ values['toapply'][ctrl] = {}
+ values['toapply'][ctrl]['label'] = opt
+ values['toapply'][ctrl]['value'] =
config.ctlParams[index][opt]
+ ctrl = ctrl + 1
+ config.applyChanges(values)
def list_config():
- from tuna.config import Config
- config = Config()
- print(_("Preloaded config files:"))
- for value in config.populate():
- print(value)
- exit(1)
+ from tuna.config import Config
+ config = Config()
+ print(_("Preloaded config files:"))
+ for value in config.populate():
+ print(value)
+ exit(1)
def main():
- global ps
-
- i18n_init()
- try:
- short = "a:c:CfgGhiIKlmNp:PQq:r:s:S:t:UvWx"
- long = ["cpus=", "affect_children", "filter",
"gui", "help",
- "isolate", "include", "no_kthreads", "move",
"nohz_full",
- "show_sockets", "priority=", "show_threads",
- "show_irqs", "irqs=",
- "save=", "sockets=", "threads=",
"no_uthreads",
- "version", "what_is",
"spread","cgroup","config_file_apply=","config_file_list=",
- "run=" ]
- if have_inet_diag:
- short += "n"
- int.append("show_sockets")
- opts, args = getopt.getopt(sys.argv[1:], short, int)
- except getopt.GetoptError as err:
- usage()
- print(str(err))
- sys.exit(2)
-
- run_gui = not opts
- kthreads = True
- uthreads = True
- cgroups = False
- cpu_list = None
- irq_list = None
- irq_list_str = None
- rtprio = None
- policy = None
- thread_list = []
- thread_list_str = None
- filter = False
- affect_children = False
- show_sockets = False
- p_waiting_action = False
-
- for o, a in opts:
- if o in ("-h", "--help"):
- usage()
- return
- elif o in ("-a", "--config_file_apply"):
- apply_config(a)
- elif o in ("-l", "--config_file_list"):
- list_config()
- elif o in ("-c", "--cpus"):
- (op, a) = pick_op(a)
+ global ps
+
+ i18n_init()
+ try:
+ short = "a:c:CfgGhiIKlmNp:PQq:r:s:S:t:UvWx"
+ long = ["cpus=", "affect_children",
"filter", "gui", "help",
+ "isolate", "include",
"no_kthreads", "move", "nohz_full",
+ "show_sockets", "priority=",
"show_threads",
+ "show_irqs", "irqs=",
+ "save=", "sockets=", "threads=",
"no_uthreads",
+ "version", "what_is",
"spread","cgroup","config_file_apply=","config_file_list=",
+ "run=" ]
+ if have_inet_diag:
+ short += "n"
+ int.append("show_sockets")
+ opts, args = getopt.getopt(sys.argv[1:], short, int)
+ except getopt.GetoptError as err:
+ usage()
+ print(str(err))
+ sys.exit(2)
+
+ run_gui = not opts
+ kthreads = True
+ uthreads = True
+ cgroups = False
+ cpu_list = None
+ irq_list = None
+ irq_list_str = None
+ rtprio = None
+ policy = None
+ thread_list = []
+ thread_list_str = None
+ filter = False
+ affect_children = False
+ show_sockets = False
+ p_waiting_action = False
+
+ for o, a in opts:
+ if o in ("-h", "--help"):
+ usage()
+ return
+ elif o in ("-a", "--config_file_apply"):
+ apply_config(a)
+ elif o in ("-l", "--config_file_list"):
+ list_config()
+ elif o in ("-c", "--cpus"):
+ (op, a) = pick_op(a)
try:
- op_list = tuna.cpustring_to_list(a)
+ op_list = tuna.cpustring_to_list(a)
except ValueError:
- usage()
- return
- cpu_list = do_list_op(op, cpu_list, op_list)
- elif o in ("-N", "--nohz_full"):
- try:
- cpu_list = tuna.nohz_full_list()
- except:
- print("tuna: --nohz_full " + _(" needs nohz_full=cpulist on the kernel
command line"))
- sys.exit(2)
- elif o in ("-C", "--affect_children"):
- affect_children = True
- elif o in ("-G", "--cgroup"):
- cgroups = True
- elif o in ("-t", "--threads"):
- # The -t - will reset thread list
- if a == '-':
- thread_list = []
- thread_list_str = ''
- else:
- (op, a) = pick_op(a)
- op_list = reduce(lambda i, j: i + j,
- list(map(thread_mapper, a.split(","))))
- op_list = list(set(op_list))
- thread_list = do_list_op(op, thread_list, op_list)
- # Check if a process name was especified and no
- # threads was found, which would result in an empty
- # thread list, i.e. we would print all the threads
- # in the system when we should print nothing.
- if not op_list and type(a) == type(''):
- thread_list_str = do_list_op(op, thread_list_str,
- a.split(","))
- if not op:
- irq_list = None
- elif o in ("-f", "--filter"):
- filter = True
- elif o in ("-g", "--gui"):
- run_gui = True
- elif o in ("-i", "--isolate"):
- if not cpu_list:
- print("tuna: --isolate " + _("requires a cpu list!"))
- sys.exit(2)
- tuna.isolate_cpus(cpu_list, get_nr_cpus())
- elif o in ("-I", "--include"):
- if not cpu_list:
- print("tuna: --include " + _("requires a cpu list!"))
- sys.exit(2)
- tuna.include_cpus(cpu_list, get_nr_cpus())
- elif o in ("-p", "--priority"):
- # Save policy and rtprio for future Actions (e.g. --run).
- (policy, rtprio) = tuna.get_policy_and_rtprio(a)
- if not thread_list:
- # For backward compatibility
- p_waiting_action = True
- else:
- try:
- tuna.threads_set_priority(thread_list, a, affect_children)
- except (SystemError, OSError) as err: # old python-schedutils incorrectly raised
SystemError
- print("tuna: %s" % err)
- sys.exit(2)
- elif o in ("-P", "--show_threads"):
- # If the user specified process names that weren't
- # resolved to pids, don't show all threads.
- if not thread_list and not irq_list:
- if thread_list_str or irq_list_str:
- continue
- do_ps(thread_list, cpu_list, irq_list, uthreads,
- kthreads, affect_children, show_sockets, cgroups)
- elif o in ("-Q", "--show_irqs"):
- # If the user specified IRQ names that weren't
- # resolved to IRQs, don't show all IRQs.
- if not irq_list and irq_list_str:
- continue
- show_irqs(irq_list, cpu_list)
- elif o in ("-n", "--show_sockets"):
- show_sockets = True
- elif o in ("-m", "--move", "-x", "--spread"):
- if not cpu_list:
- print("tuna: --move " + _("requires a cpu list!"))
- sys.exit(2)
- if not (thread_list or irq_list):
- print("tuna: --move " + _("requires a list of threads/irqs!"))
- sys.exit(2)
-
- spread = o in ("-x", "--spread")
-
- if thread_list:
- tuna.move_threads_to_cpu(cpu_list, thread_list,
- spread = spread)
-
- if irq_list:
- tuna.move_irqs_to_cpu(cpu_list, irq_list,
- spread = spread)
- elif o in ("-s", "--save"):
- save(cpu_list, thread_list, a)
- elif o in ("-S", "--sockets"):
- (op, a) = pick_op(a)
- sockets = [socket for socket in a.split(",")]
-
- if not cpu_list:
- cpu_list = []
-
- cpu_info = sysfs.cpus()
- op_list = []
- for socket in sockets:
- if socket not in cpu_info.sockets:
- print("tuna: %s" % \
- (_("invalid socket %(socket)s sockets available: %(available)s") %
\
- {"socket": socket,
- "available": ",".join(list(cpu_info.sockets.keys()))}))
- sys.exit(2)
- op_list += [ int(cpu.name[3:]) for cpu in cpu_info.sockets[socket] ]
- cpu_list = do_list_op(op, cpu_list, op_list)
- elif o in ("-K", "--no_kthreads"):
- kthreads = False
- elif o in ("-q", "--irqs"):
- (op, a) = pick_op(a)
- op_list = reduce(lambda i, j: i + j,
- list(map(irq_mapper, list(set(a.split(","))))))
- irq_list = do_list_op(op, irq_list, op_list)
- # See comment above about thread_list_str
- if not op_list and type(a) == type(''):
- irq_list_str = do_list_op(op, irq_list_str,
- a.split(","))
- if not op:
- thread_list = []
- if not ps:
- ps = procfs.pidstats()
- if tuna.has_threaded_irqs(ps):
- for irq in irq_list:
- irq_re = tuna.threaded_irq_re(irq)
- irq_threads = ps.find_by_regex(irq_re)
- if irq_threads:
- # Change the affinity of the thread too
- # as we can't rely on changing the irq
- # affinity changing the affinity of the
- # thread or vice versa. We need to change
- # both.
- thread_list += irq_threads
-
- elif o in ("-U", "--no_uthreads"):
- uthreads = False
- elif o in ("-v", "--version"):
- print(version)
- elif o in ("-W", "--what_is"):
- if not thread_list:
- print("tuna: --what_is " + _("requires a thread list!"))
- sys.exit(2)
- for tid in thread_list:
- thread_help(tid)
- elif o in ("-r", "--run"):
- # If -p is set, it will be consumed. So, no backward compatible
- # error handling action must be taken.
- p_waiting_action = False
-
- # pick_op() before run the command: to remove the prefix
- # + or - from command line.
- (op, a) = pick_op(a)
-
- # In order to include the new process, it must run
- # the command first, and then get the list of pids,
- tuna.run_command(a, policy, rtprio, cpu_list)
-
- op_list = reduce(lambda i, j: i + j,
- list(map(thread_mapper, a.split(","))))
- op_list = list(set(op_list))
- thread_list = do_list_op(op, thread_list, op_list)
-
- # Check if a process name was especified and no
- # threads was found, which would result in an empty
- # thread list, i.e. we would print all the threads
- # in the system when we should print nothing.
- if not op_list and type(a) == type(''):
- thread_list_str = do_list_op(op, thread_list_str,
- a.split(","))
- if not op:
- irq_list = None
-
- # For backward compatibility: when -p used to be only an Action, it
- # used to exit(2) if no action was taken (i.e. if no threads_list
- # was set).
- if p_waiting_action:
- print(("tuna: -p ") + _("requires a thread list!"))
- sys.exit(2)
-
- if run_gui:
- try:
- from tuna import tuna_gui
- except ImportError:
- # gui packages not installed
- print(_('tuna: packages needed for the GUI missing.'))
- print(_(' Make sure xauth, pygtk2-libglade are installed.'))
- usage()
- return
- except RuntimeError:
- print("tuna: machine needs to be authorized via xhost or ssh -X?")
- return
-
- try:
- cpus_filtered = filter and cpu_list or []
- app = tuna_gui.main_gui(kthreads, uthreads, cpus_filtered)
- app.run()
- except KeyboardInterrupt:
- pass
+ usage()
+ return
+ cpu_list = do_list_op(op, cpu_list, op_list)
+ elif o in ("-N", "--nohz_full"):
+ try:
+ cpu_list = tuna.nohz_full_list()
+ except:
+ print("tuna: --nohz_full " + _(" needs
nohz_full=cpulist on the kernel command line"))
+ sys.exit(2)
+ elif o in ("-C", "--affect_children"):
+ affect_children = True
+ elif o in ("-G", "--cgroup"):
+ cgroups = True
+ elif o in ("-t", "--threads"):
+ # The -t - will reset thread list
+ if a == '-':
+ thread_list = []
+ thread_list_str = ''
+ else:
+ (op, a) = pick_op(a)
+ op_list = reduce(lambda i, j: i + j,
+ list(map(thread_mapper,
a.split(","))))
+ op_list = list(set(op_list))
+ thread_list = do_list_op(op, thread_list, op_list)
+ # Check if a process name was especified and no
+ # threads was found, which would result in an empty
+ # thread list, i.e. we would print all the threads
+ # in the system when we should print nothing.
+ if not op_list and type(a) == type(''):
+ thread_list_str = do_list_op(op,
thread_list_str,
+
a.split(","))
+ if not op:
+ irq_list = None
+ elif o in ("-f", "--filter"):
+ filter = True
+ elif o in ("-g", "--gui"):
+ run_gui = True
+ elif o in ("-i", "--isolate"):
+ if not cpu_list:
+ print("tuna: --isolate " + _("requires a
cpu list!"))
+ sys.exit(2)
+ tuna.isolate_cpus(cpu_list, get_nr_cpus())
+ elif o in ("-I", "--include"):
+ if not cpu_list:
+ print("tuna: --include " + _("requires a
cpu list!"))
+ sys.exit(2)
+ tuna.include_cpus(cpu_list, get_nr_cpus())
+ elif o in ("-p", "--priority"):
+ # Save policy and rtprio for future Actions (e.g. --run).
+ (policy, rtprio) = tuna.get_policy_and_rtprio(a)
+ if not thread_list:
+ # For backward compatibility
+ p_waiting_action = True
+ else:
+ try:
+ tuna.threads_set_priority(thread_list, a,
affect_children)
+ except (SystemError, OSError) as err: # old
python-schedutils incorrectly raised SystemError
+ print("tuna: %s" % err)
+ sys.exit(2)
+ elif o in ("-P", "--show_threads"):
+ # If the user specified process names that weren't
+ # resolved to pids, don't show all threads.
+ if not thread_list and not irq_list:
+ if thread_list_str or irq_list_str:
+ continue
+ do_ps(thread_list, cpu_list, irq_list, uthreads,
+ kthreads, affect_children, show_sockets, cgroups)
+ elif o in ("-Q", "--show_irqs"):
+ # If the user specified IRQ names that weren't
+ # resolved to IRQs, don't show all IRQs.
+ if not irq_list and irq_list_str:
+ continue
+ show_irqs(irq_list, cpu_list)
+ elif o in ("-n", "--show_sockets"):
+ show_sockets = True
+ elif o in ("-m", "--move", "-x",
"--spread"):
+ if not cpu_list:
+ print("tuna: --move " + _("requires a cpu
list!"))
+ sys.exit(2)
+ if not (thread_list or irq_list):
+ print("tuna: --move " + _("requires a list
of threads/irqs!"))
+ sys.exit(2)
+
+ spread = o in ("-x", "--spread")
+
+ if thread_list:
+ tuna.move_threads_to_cpu(cpu_list, thread_list,
+ spread = spread)
+
+ if irq_list:
+ tuna.move_irqs_to_cpu(cpu_list, irq_list,
+ spread = spread)
+ elif o in ("-s", "--save"):
+ save(cpu_list, thread_list, a)
+ elif o in ("-S", "--sockets"):
+ (op, a) = pick_op(a)
+ sockets = [socket for socket in a.split(",")]
+
+ if not cpu_list:
+ cpu_list = []
+
+ cpu_info = sysfs.cpus()
+ op_list = []
+ for socket in sockets:
+ if socket not in cpu_info.sockets:
+ print("tuna: %s" % \
+ (_("invalid socket %(socket)s sockets
available: %(available)s") % \
+ {"socket": socket,
+ "available":
",".join(list(cpu_info.sockets.keys()))}))
+ sys.exit(2)
+ op_list += [ int(cpu.name[3:]) for cpu in
cpu_info.sockets[socket] ]
+ cpu_list = do_list_op(op, cpu_list, op_list)
+ elif o in ("-K", "--no_kthreads"):
+ kthreads = False
+ elif o in ("-q", "--irqs"):
+ (op, a) = pick_op(a)
+ op_list = reduce(lambda i, j: i + j,
+ list(map(irq_mapper,
list(set(a.split(","))))))
+ irq_list = do_list_op(op, irq_list, op_list)
+ # See comment above about thread_list_str
+ if not op_list and type(a) == type(''):
+ irq_list_str = do_list_op(op, irq_list_str,
+ a.split(","))
+ if not op:
+ thread_list = []
+ if not ps:
+ ps = procfs.pidstats()
+ if tuna.has_threaded_irqs(ps):
+ for irq in irq_list:
+ irq_re = tuna.threaded_irq_re(irq)
+ irq_threads = ps.find_by_regex(irq_re)
+ if irq_threads:
+ # Change the affinity of the thread too
+ # as we can't rely on changing the
irq
+ # affinity changing the affinity of the
+ # thread or vice versa. We need to
change
+ # both.
+ thread_list += irq_threads
+
+ elif o in ("-U", "--no_uthreads"):
+ uthreads = False
+ elif o in ("-v", "--version"):
+ print(version)
+ elif o in ("-W", "--what_is"):
+ if not thread_list:
+ print("tuna: --what_is " + _("requires a
thread list!"))
+ sys.exit(2)
+ for tid in thread_list:
+ thread_help(tid)
+ elif o in ("-r", "--run"):
+ # If -p is set, it will be consumed. So, no backward compatible
+ # error handling action must be taken.
+ p_waiting_action = False
+
+ # pick_op() before run the command: to remove the prefix
+ # + or - from command line.
+ (op, a) = pick_op(a)
+
+ # In order to include the new process, it must run
+ # the command first, and then get the list of pids,
+ tuna.run_command(a, policy, rtprio, cpu_list)
+
+ op_list = reduce(lambda i, j: i + j,
+ list(map(thread_mapper,
a.split(","))))
+ op_list = list(set(op_list))
+ thread_list = do_list_op(op, thread_list, op_list)
+
+ # Check if a process name was especified and no
+ # threads was found, which would result in an empty
+ # thread list, i.e. we would print all the threads
+ # in the system when we should print nothing.
+ if not op_list and type(a) == type(''):
+ thread_list_str = do_list_op(op, thread_list_str,
+ a.split(","))
+ if not op:
+ irq_list = None
+
+ # For backward compatibility: when -p used to be only an Action, it
+ # used to exit(2) if no action was taken (i.e. if no threads_list
+ # was set).
+ if p_waiting_action:
+ print(("tuna: -p ") + _("requires a thread list!"))
+ sys.exit(2)
+
+ if run_gui:
+ try:
+ from tuna import tuna_gui
+ except ImportError:
+ # gui packages not installed
+ print(_('tuna: packages needed for the GUI missing.'))
+ print(_(' Make sure xauth, pygtk2-libglade are
installed.'))
+ usage()
+ return
+ except RuntimeError:
+ print("tuna: machine needs to be authorized via xhost or ssh
-X?")
+ return
+
+ try:
+ cpus_filtered = filter and cpu_list or []
+ app = tuna_gui.main_gui(kthreads, uthreads, cpus_filtered)
+ app.run()
+ except KeyboardInterrupt:
+ pass
if __name__ == '__main__':
main()
diff --git a/tuna/tuna.py b/tuna/tuna.py
index 29248352231e..367f3f7b8eb6 100755
--- a/tuna/tuna.py
+++ b/tuna/tuna.py
@@ -7,651 +7,651 @@ import fnmatch
from procfs import utilist
try:
- set
+ set
except NameError:
- from sets import Set as set
+ from sets import Set as set
try:
- fntable
+ fntable
except NameError:
- fntable = []
+ fntable = []
def kthread_help(key):
- if '/' in key:
- key = key[:key.rfind('/')+1]
- return help.KTHREAD_HELP.get(key, " ")
+ if '/' in key:
+ key = key[:key.rfind('/')+1]
+ return help.KTHREAD_HELP.get(key, " ")
def proc_sys_help(key):
- if not len(fntable):
- regMatch = ['[', '*', '?']
- for value in help.PROC_SYS_HELP:
- for char in regMatch:
- if char in value:
- fntable.append(value)
- temp = help.PROC_SYS_HELP.get(key, "")
- if len(temp):
- return key + ":\n" + temp
- else:
- for value in fntable:
- if fnmatch.fnmatch(key, value):
- return key + ":\n" + help.PROC_SYS_HELP.get(value, "")
- return key
+ if not len(fntable):
+ regMatch = ['[', '*', '?']
+ for value in help.PROC_SYS_HELP:
+ for char in regMatch:
+ if char in value:
+ fntable.append(value)
+ temp = help.PROC_SYS_HELP.get(key, "")
+ if len(temp):
+ return key + ":\n" + temp
+ else:
+ for value in fntable:
+ if fnmatch.fnmatch(key, value):
+ return key + ":\n" +
help.PROC_SYS_HELP.get(value, "")
+ return key
def kthread_help_plain_text(pid, cmdline):
- cmdline = cmdline.split(' ')[0]
- params = {'pid':pid, 'cmdline':cmdline}
+ cmdline = cmdline.split(' ')[0]
+ params = {'pid':pid, 'cmdline':cmdline}
- if iskthread(pid):
- title = _("Kernel Thread %(pid)d (%(cmdline)s):") % params
- help = kthread_help(cmdline)
- else:
- title = _("User Thread %(pid)d (%(cmdline)s):") % params
- help = title
+ if iskthread(pid):
+ title = _("Kernel Thread %(pid)d (%(cmdline)s):") % params
+ help = kthread_help(cmdline)
+ else:
+ title = _("User Thread %(pid)d (%(cmdline)s):") % params
+ help = title
- return help, title
+ return help, title
def iskthread(pid):
- # FIXME: we should leave to the callers to handle all the exceptions,
- # in this function, so that they know that the thread vanished and
- # can act accordingly, removing entries from tree views, etc
- try:
- f = file("/proc/%d/smaps" % pid)
- except IOError:
- # Thread has vanished
- return True
-
- line = f.readline()
- f.close()
- if line:
- return False
- # Zombies also doesn't have smaps entries, so check the
- # state:
- try:
- p = procfs.pidstat(pid)
- except:
- return True
-
- if p["state"] == 'Z':
- return False
- return True
-
+ # FIXME: we should leave to the callers to handle all the exceptions,
+ # in this function, so that they know that the thread vanished and
+ # can act accordingly, removing entries from tree views, etc
+ try:
+ f = file("/proc/%d/smaps" % pid)
+ except IOError:
+ # Thread has vanished
+ return True
+
+ line = f.readline()
+ f.close()
+ if line:
+ return False
+ # Zombies also doesn't have smaps entries, so check the
+ # state:
+ try:
+ p = procfs.pidstat(pid)
+ except:
+ return True
+
+ if p["state"] == 'Z':
+ return False
+ return True
+
def irq_thread_number(cmd):
- if cmd[:4] == "irq/":
- return cmd[4:cmd.find('-')]
- elif cmd[:4] == "IRQ-":
- return cmd[4:]
- else:
- raise LookupError
+ if cmd[:4] == "irq/":
+ return cmd[4:cmd.find('-')]
+ elif cmd[:4] == "IRQ-":
+ return cmd[4:]
+ else:
+ raise LookupError
def is_irq_thread(cmd):
- return cmd[:4] in ("IRQ-", "irq/")
+ return cmd[:4] in ("IRQ-", "irq/")
def threaded_irq_re(irq):
- return re.compile("(irq/%s-.+|IRQ-%s)" % (irq, irq))
+ return re.compile("(irq/%s-.+|IRQ-%s)" % (irq, irq))
# FIXME: Move to python-linux-procfs
def has_threaded_irqs(ps):
- irq_re = re.compile("(irq/[0-9]+-.+|IRQ-[0-9]+)")
- return len(ps.find_by_regex(irq_re)) > 0
+ irq_re = re.compile("(irq/[0-9]+-.+|IRQ-[0-9]+)")
+ return len(ps.find_by_regex(irq_re)) > 0
def set_irq_affinity_filename(filename, bitmasklist):
- pathname="/proc/irq/%s" % filename
- f = file(pathname, "w")
- text = ",".join(["%x" % a for a in bitmasklist])
- f.write("%s\n" % text)
- try:
- f.close()
- except IOError:
- # This happens with IRQ 0, for instance
- return False
- return True
+ pathname="/proc/irq/%s" % filename
+ f = file(pathname, "w")
+ text = ",".join(["%x" % a for a in bitmasklist])
+ f.write("%s\n" % text)
+ try:
+ f.close()
+ except IOError:
+ # This happens with IRQ 0, for instance
+ return False
+ return True
def set_irq_affinity(irq, bitmasklist):
- return set_irq_affinity_filename("%d/smp_affinity" % irq, bitmasklist)
+ return set_irq_affinity_filename("%d/smp_affinity" % irq, bitmasklist)
def cpustring_to_list(cpustr):
- """Convert a string of numbers to an integer list.
+ """Convert a string of numbers to an integer list.
- Given a string of comma-separated numbers and number ranges,
- return a simple sorted list of the integers it represents.
+ Given a string of comma-separated numbers and number ranges,
+ return a simple sorted list of the integers it represents.
- This function will throw exceptions for badly-formatted strings.
+ This function will throw exceptions for badly-formatted strings.
- Returns a list of integers."""
-
- fields = cpustr.strip().split(",")
- cpu_list = []
- for field in fields:
- ends = [ int(a, 0) for a in field.split("-") ]
- if len(ends) > 2:
- raise SyntaxError("Syntax error")
- if len(ends) == 2:
- cpu_list += list(range(ends[0], ends[1] + 1))
- else:
- cpu_list += [ends[0]]
- return list(set(cpu_list))
+ Returns a list of integers."""
+
+ fields = cpustr.strip().split(",")
+ cpu_list = []
+ for field in fields:
+ ends = [ int(a, 0) for a in field.split("-") ]
+ if len(ends) > 2:
+ raise SyntaxError("Syntax error")
+ if len(ends) == 2:
+ cpu_list += list(range(ends[0], ends[1] + 1))
+ else:
+ cpu_list += [ends[0]]
+ return list(set(cpu_list))
def list_to_cpustring(l):
- """Convert a list of integers into a range string.
-
- Consecutive values will be collapsed into ranges.
-
- This should not throw any exceptions as long as the list is all
- positive integers.
-
- Returns a string."""
-
- l = list(set(l))
- strings = []
- inrange = False
- prev = -2
- while len(l):
- i = l.pop(0)
- if i - 1 == prev:
- while len(l):
- j = l.pop(0)
- if j - 1 != i:
- l.insert(0, j)
- break;
- i = j
- t = strings.pop()
- if int(t) + 1 == i:
- strings.append("%s,%u" % (t, i))
- else:
- strings.append("%s-%u" % (t, i))
- else:
- strings.append("%u" % i)
- prev = i
- return ",".join(strings)
+ """Convert a list of integers into a range string.
+
+ Consecutive values will be collapsed into ranges.
+
+ This should not throw any exceptions as long as the list is all
+ positive integers.
+
+ Returns a string."""
+
+ l = list(set(l))
+ strings = []
+ inrange = False
+ prev = -2
+ while len(l):
+ i = l.pop(0)
+ if i - 1 == prev:
+ while len(l):
+ j = l.pop(0)
+ if j - 1 != i:
+ l.insert(0, j)
+ break;
+ i = j
+ t = strings.pop()
+ if int(t) + 1 == i:
+ strings.append("%s,%u" % (t, i))
+ else:
+ strings.append("%s-%u" % (t, i))
+ else:
+ strings.append("%u" % i)
+ prev = i
+ return ",".join(strings)
# FIXME: move to python-linux-procfs
def is_hardirq_handler(self, pid):
- PF_HARDIRQ = 0x08000000
- try:
- return int(self.processes[pid]["stat"]["flags"]) & \
- PF_HARDIRQ and True or False
- except:
- return False
+ PF_HARDIRQ = 0x08000000
+ try:
+ return
int(self.processes[pid]["stat"]["flags"]) & \
+ PF_HARDIRQ and True or False
+ except:
+ return False
# FIXME: move to python-linux-procfs
def cannot_set_affinity(self, pid):
- PF_NO_SETAFFINITY = 0x04000000
- try:
- return int(self.processes[pid]["stat"]["flags"]) & \
- PF_NO_SETAFFINITY and True or False
- except:
- return True
+ PF_NO_SETAFFINITY = 0x04000000
+ try:
+ return
int(self.processes[pid]["stat"]["flags"]) & \
+ PF_NO_SETAFFINITY and True or False
+ except:
+ return True
# FIXME: move to python-linux-procfs
def cannot_set_thread_affinity(self, pid, tid):
- PF_NO_SETAFFINITY = 0x04000000
- try:
- return int(self.processes[pid].threads[tid]["stat"]["flags"])
& \
- PF_NO_SETAFFINITY and True or False
- except:
- return True
+ PF_NO_SETAFFINITY = 0x04000000
+ try:
+ return
int(self.processes[pid].threads[tid]["stat"]["flags"]) & \
+ PF_NO_SETAFFINITY and True or False
+ except:
+ return True
def move_threads_to_cpu(cpus, pid_list, set_affinity_warning = None,
- spread = False):
- changed = False
-
- ps = procfs.pidstats()
- cpu_idx = 0
- nr_cpus = len(cpus)
- new_affinity = cpus
- last_cpu = max(cpus) + 1
- for pid in pid_list:
- if spread:
- new_affinity = [cpus[cpu_idx]]
- cpu_idx += 1
- if cpu_idx == nr_cpus:
- cpu_idx = 0
-
- try:
- try:
- curr_affinity = schedutils.get_affinity(pid)
- except (SystemError, OSError) as e: # old python-schedutils incorrectly raised
SystemError
- if e[0] == errno.ESRCH:
- continue
- curr_affinity = None
- raise e
- if set(curr_affinity) != set(new_affinity):
- try:
- schedutils.set_affinity(pid, new_affinity)
- curr_affinity = schedutils.get_affinity(pid)
- except (SystemError, OSError) as e: # old python-schedutils incorrectly raised
SystemError
- if e[0] == errno.ESRCH:
- continue
- curr_affinity == None
- raise e
- if set(curr_affinity) == set(new_affinity):
- changed = True
- if is_hardirq_handler(ps, pid):
- try:
- irq = int(ps[pid]["stat"]["comm"][4:])
- bitmasklist = procfs.hexbitmask(new_affinity, last_cpu)
- set_irq_affinity(irq, bitmasklist)
- except:
- pass
- elif set_affinity_warning:
- set_affinity_warning(pid, new_affinity)
- else:
- print("move_threads_to_cpu: %s " % \
- (_("could not change %(pid)d affinity to %(new_affinity)s") % \
- {'pid':pid, 'new_affinity':new_affinity}))
-
- # See if this is the thread group leader
- if pid not in ps:
- continue
-
- threads = procfs.pidstats("/proc/%d/task" % pid)
- for tid in list(threads.keys()):
- try:
- curr_affinity = schedutils.get_affinity(tid)
- except (SystemError, OSError) as e: # old python-schedutils incorrectly raised
SystemError
- if e[0] == errno.ESRCH:
- continue
- raise e
- if set(curr_affinity) != set(new_affinity):
- try:
- schedutils.set_affinity(tid, new_affinity)
- curr_affinity = schedutils.get_affinity(tid)
- except (SystemError, OSError) as e: # old python-schedutils incorrectly raised
SystemError
- if e[0] == errno.ESRCH:
- continue
- raise e
- if set(curr_affinity) == set(new_affinity):
- changed = True
- elif set_affinity_warning:
- set_affinity_warning(tid, new_affinity)
- else:
- print("move_threads_to_cpu: %s " % \
- (_("could not change %(pid)d affinity to %(new_affinity)s") % \
- {'pid':pid, 'new_affinity':new_affinity}))
- except (SystemError, OSError) as e: # old python-schedutils incorrectly raised
SystemError
- if e[0] == errno.ESRCH:
- # process died
- continue
+ spread = False):
+ changed = False
+
+ ps = procfs.pidstats()
+ cpu_idx = 0
+ nr_cpus = len(cpus)
+ new_affinity = cpus
+ last_cpu = max(cpus) + 1
+ for pid in pid_list:
+ if spread:
+ new_affinity = [cpus[cpu_idx]]
+ cpu_idx += 1
+ if cpu_idx == nr_cpus:
+ cpu_idx = 0
+
+ try:
+ try:
+ curr_affinity = schedutils.get_affinity(pid)
+ except (SystemError, OSError) as e: # old python-schedutils
incorrectly raised SystemError
+ if e[0] == errno.ESRCH:
+ continue
+ curr_affinity = None
+ raise e
+ if set(curr_affinity) != set(new_affinity):
+ try:
+ schedutils.set_affinity(pid, new_affinity)
+ curr_affinity = schedutils.get_affinity(pid)
+ except (SystemError, OSError) as e: # old
python-schedutils incorrectly raised SystemError
+ if e[0] == errno.ESRCH:
+ continue
+ curr_affinity == None
+ raise e
+ if set(curr_affinity) == set(new_affinity):
+ changed = True
+ if is_hardirq_handler(ps, pid):
+ try:
+ irq =
int(ps[pid]["stat"]["comm"][4:])
+ bitmasklist =
procfs.hexbitmask(new_affinity, last_cpu)
+ set_irq_affinity(irq,
bitmasklist)
+ except:
+ pass
+ elif set_affinity_warning:
+ set_affinity_warning(pid, new_affinity)
+ else:
+ print("move_threads_to_cpu: %s " % \
+ (_("could not change %(pid)d affinity
to %(new_affinity)s") % \
+ {'pid':pid,
'new_affinity':new_affinity}))
+
+ # See if this is the thread group leader
+ if pid not in ps:
+ continue
+
+ threads = procfs.pidstats("/proc/%d/task" % pid)
+ for tid in list(threads.keys()):
+ try:
+ curr_affinity = schedutils.get_affinity(tid)
+ except (SystemError, OSError) as e: # old
python-schedutils incorrectly raised SystemError
+ if e[0] == errno.ESRCH:
+ continue
+ raise e
+ if set(curr_affinity) != set(new_affinity):
+ try:
+ schedutils.set_affinity(tid,
new_affinity)
+ curr_affinity =
schedutils.get_affinity(tid)
+ except (SystemError, OSError) as e: # old
python-schedutils incorrectly raised SystemError
+ if e[0] == errno.ESRCH:
+ continue
+ raise e
+ if set(curr_affinity) == set(new_affinity):
+ changed = True
+ elif set_affinity_warning:
+ set_affinity_warning(tid, new_affinity)
+ else:
+ print("move_threads_to_cpu: %s
" % \
+ (_("could not change %(pid)d
affinity to %(new_affinity)s") % \
+ {'pid':pid,
'new_affinity':new_affinity}))
+ except (SystemError, OSError) as e: # old python-schedutils incorrectly
raised SystemError
+ if e[0] == errno.ESRCH:
+ # process died
+ continue
elif e[0] == errno.EINVAL: # unmovable thread)
- print("thread %(pid)d cannot be moved as requested" %{'pid':pid},
file=stderr)
- continue
- raise e
- return changed
+ print("thread %(pid)d cannot be moved as
requested" %{'pid':pid}, file=stderr)
+ continue
+ raise e
+ return changed
def move_irqs_to_cpu(cpus, irq_list, spread = False):
- changed = 0
- unprocessed = []
-
- cpu_idx = 0
- nr_cpus = len(cpus)
- new_affinity = cpus
- last_cpu = max(cpus) + 1
- irqs = None
- ps = procfs.pidstats()
- for i in irq_list:
- try:
- irq = int(i)
- except:
- if not irqs:
- irqs = procfs.interrupts()
- irq = irqs.find_by_user(i)
- if not irq:
- unprocessed.append(i)
- continue
- try:
- irq = int(irq)
- except:
- unprocessed.append(i)
- continue
-
- if spread:
- new_affinity = [cpus[cpu_idx]]
- cpu_idx += 1
- if cpu_idx == nr_cpus:
- cpu_idx = 0
-
- bitmasklist = procfs.hexbitmask(new_affinity, last_cpu)
- set_irq_affinity(irq, bitmasklist)
- changed += 1
- pid = ps.find_by_name("IRQ-%d" % irq)
- if pid:
- pid = int(pid[0])
- try:
- schedutils.set_affinity(pid, new_affinity)
- except (SystemError, OSError) as e: # old python-schedutils incorrectly raised
SystemError
- if e[0] == errno.ESRCH:
- unprocessed.append(i)
- changed -= 1
- continue
- raise e
-
- return (changed, unprocessed)
+ changed = 0
+ unprocessed = []
+
+ cpu_idx = 0
+ nr_cpus = len(cpus)
+ new_affinity = cpus
+ last_cpu = max(cpus) + 1
+ irqs = None
+ ps = procfs.pidstats()
+ for i in irq_list:
+ try:
+ irq = int(i)
+ except:
+ if not irqs:
+ irqs = procfs.interrupts()
+ irq = irqs.find_by_user(i)
+ if not irq:
+ unprocessed.append(i)
+ continue
+ try:
+ irq = int(irq)
+ except:
+ unprocessed.append(i)
+ continue
+
+ if spread:
+ new_affinity = [cpus[cpu_idx]]
+ cpu_idx += 1
+ if cpu_idx == nr_cpus:
+ cpu_idx = 0
+
+ bitmasklist = procfs.hexbitmask(new_affinity, last_cpu)
+ set_irq_affinity(irq, bitmasklist)
+ changed += 1
+ pid = ps.find_by_name("IRQ-%d" % irq)
+ if pid:
+ pid = int(pid[0])
+ try:
+ schedutils.set_affinity(pid, new_affinity)
+ except (SystemError, OSError) as e: # old python-schedutils
incorrectly raised SystemError
+ if e[0] == errno.ESRCH:
+ unprocessed.append(i)
+ changed -= 1
+ continue
+ raise e
+
+ return (changed, unprocessed)
def affinity_remove_cpus(affinity, cpus, nr_cpus):
- # If the cpu being isolated was the only one in the current affinity
- affinity = list(set(affinity) - set(cpus))
- if not affinity:
- affinity = list(range(nr_cpus))
- affinity = list(set(affinity) - set(cpus))
- return affinity
+ # If the cpu being isolated was the only one in the current affinity
+ affinity = list(set(affinity) - set(cpus))
+ if not affinity:
+ affinity = list(range(nr_cpus))
+ affinity = list(set(affinity) - set(cpus))
+ return affinity
# Shound be moved to python_linux_procfs.interrupts, shared with
interrupts.parse_affinity, etc.
def parse_irq_affinity_filename(filename, nr_cpus):
- f = file("/proc/irq/%s" % filename)
- line = f.readline()
- f.close()
- return utilist.bitmasklist(line, nr_cpus)
+ f = file("/proc/irq/%s" % filename)
+ line = f.readline()
+ f.close()
+ return utilist.bitmasklist(line, nr_cpus)
def isolate_cpus(cpus, nr_cpus):
fname = sys._getframe( ).f_code.co_name # Function name
- ps = procfs.pidstats()
- ps.reload_threads()
- previous_pid_affinities = {}
- for pid in list(ps.keys()):
- if cannot_set_affinity(ps, pid):
- continue
- try:
- affinity = schedutils.get_affinity(pid)
- except (SystemError, OSError) as e: # old python-schedutils incorrectly raised
SystemError
- if e[0] == errno.ESRCH:
- continue
+ ps = procfs.pidstats()
+ ps.reload_threads()
+ previous_pid_affinities = {}
+ for pid in list(ps.keys()):
+ if cannot_set_affinity(ps, pid):
+ continue
+ try:
+ affinity = schedutils.get_affinity(pid)
+ except (SystemError, OSError) as e: # old python-schedutils incorrectly
raised SystemError
+ if e[0] == errno.ESRCH:
+ continue
elif e[0] == errno.EINVAL:
print("Function:", fname, ",",
e.strerror, file=sys.stderr)
sys.exit(2)
- raise e
- if set(affinity).intersection(set(cpus)):
- previous_pid_affinities[pid] = copy.copy(affinity)
- affinity = affinity_remove_cpus(affinity, cpus, nr_cpus)
- try:
- schedutils.set_affinity(pid, affinity)
- except (SystemError, OSError) as e: # old python-schedutils incorrectly raised
SystemError
- if e[0] == errno.ESRCH:
- continue
+ raise e
+ if set(affinity).intersection(set(cpus)):
+ previous_pid_affinities[pid] = copy.copy(affinity)
+ affinity = affinity_remove_cpus(affinity, cpus, nr_cpus)
+ try:
+ schedutils.set_affinity(pid, affinity)
+ except (SystemError, OSError) as e: # old python-schedutils
incorrectly raised SystemError
+ if e[0] == errno.ESRCH:
+ continue
elif e[0] == errno.EINVAL:
print("Function:", fname, ",",
e.strerror, file=sys.stderr)
sys.exit(2)
- raise e
-
- if "threads" not in ps[pid]:
- continue
- threads = ps[pid]["threads"]
- for tid in list(threads.keys()):
- if cannot_set_thread_affinity(ps, pid, tid):
- continue
- try:
- affinity = schedutils.get_affinity(tid)
- except (SystemError, OSError) as e: # old python-schedutils incorrectly raised
SystemError
- if e[0] == errno.ESRCH:
- continue
+ raise e
+
+ if "threads" not in ps[pid]:
+ continue
+ threads = ps[pid]["threads"]
+ for tid in list(threads.keys()):
+ if cannot_set_thread_affinity(ps, pid, tid):
+ continue
+ try:
+ affinity = schedutils.get_affinity(tid)
+ except (SystemError, OSError) as e: # old python-schedutils
incorrectly raised SystemError
+ if e[0] == errno.ESRCH:
+ continue
elif e[0] == errno.EINVAL:
print("Function:", fname, ",",
e.strerror, file=sys.stderr)
sys.exit(2)
- raise e
- if set(affinity).intersection(set(cpus)):
- previous_pid_affinities[tid] = copy.copy(affinity)
- affinity = affinity_remove_cpus(affinity, cpus, nr_cpus)
- try:
- schedutils.set_affinity(tid, affinity)
- except (SystemError, OSError) as e: # old python-schedutils incorrectly raised
SystemError
- if e[0] == errno.ESRCH:
- continue
+ raise e
+ if set(affinity).intersection(set(cpus)):
+ previous_pid_affinities[tid] = copy.copy(affinity)
+ affinity = affinity_remove_cpus(affinity, cpus, nr_cpus)
+ try:
+ schedutils.set_affinity(tid, affinity)
+ except (SystemError, OSError) as e: # old
python-schedutils incorrectly raised SystemError
+ if e[0] == errno.ESRCH:
+ continue
elif e[0] == errno.EINVAL:
print("Function:", fname,
",", e.strerror, file=sys.stderr)
sys.exit(2)
- raise e
-
- del ps
-
- # Now isolate it from IRQs too
- irqs = procfs.interrupts()
- previous_irq_affinities = {}
- for irq in list(irqs.keys()):
- # LOC, NMI, TLB, etc
- if "affinity" not in irqs[irq]:
- continue
- affinity = irqs[irq]["affinity"]
- if set(affinity).intersection(set(cpus)):
- previous_irq_affinities[irq] = copy.copy(affinity)
- affinity = affinity_remove_cpus(affinity, cpus, nr_cpus)
- set_irq_affinity(int(irq),
- procfs.hexbitmask(affinity,
- nr_cpus))
-
- affinity = parse_irq_affinity_filename("default_smp_affinity", nr_cpus)
- affinity = affinity_remove_cpus(affinity, cpus, nr_cpus)
- set_irq_affinity_filename("default_smp_affinity", procfs.hexbitmask(affinity,
nr_cpus))
-
- return (previous_pid_affinities, previous_irq_affinities)
+ raise e
+
+ del ps
+
+ # Now isolate it from IRQs too
+ irqs = procfs.interrupts()
+ previous_irq_affinities = {}
+ for irq in list(irqs.keys()):
+ # LOC, NMI, TLB, etc
+ if "affinity" not in irqs[irq]:
+ continue
+ affinity = irqs[irq]["affinity"]
+ if set(affinity).intersection(set(cpus)):
+ previous_irq_affinities[irq] = copy.copy(affinity)
+ affinity = affinity_remove_cpus(affinity, cpus, nr_cpus)
+ set_irq_affinity(int(irq),
+ procfs.hexbitmask(affinity,
+ nr_cpus))
+
+ affinity = parse_irq_affinity_filename("default_smp_affinity",
nr_cpus)
+ affinity = affinity_remove_cpus(affinity, cpus, nr_cpus)
+ set_irq_affinity_filename("default_smp_affinity",
procfs.hexbitmask(affinity, nr_cpus))
+
+ return (previous_pid_affinities, previous_irq_affinities)
def include_cpus(cpus, nr_cpus):
- ps = procfs.pidstats()
- ps.reload_threads()
- previous_pid_affinities = {}
- for pid in list(ps.keys()):
- if cannot_set_affinity(ps, pid):
- continue
- try:
- affinity = schedutils.get_affinity(pid)
- except (SystemError, OSError) as e: # old python-schedutils incorrectly raised
SystemError
- if e[0] == errno.ESRCH:
- continue
- raise e
- if set(affinity).intersection(set(cpus)) != set(cpus):
- previous_pid_affinities[pid] = copy.copy(affinity)
- affinity = list(set(affinity + cpus))
- try:
- schedutils.set_affinity(pid, affinity)
- except (SystemError, OSError) as e: # old python-schedutils incorrectly raised
SystemError
- if e[0] == errno.ESRCH:
- continue
- raise e
-
- if "threads" not in ps[pid]:
- continue
- threads = ps[pid]["threads"]
- for tid in list(threads.keys()):
- if cannot_set_thread_affinity(ps, pid, tid):
- continue
- try:
- affinity = schedutils.get_affinity(tid)
- except (SystemError, OSError) as e: # old python-schedutils incorrectly raised
SystemError
- if e[0] == errno.ESRCH:
- continue
- raise e
- if set(affinity).intersection(set(cpus)) != set(cpus):
- previous_pid_affinities[tid] = copy.copy(affinity)
- affinity = list(set(affinity + cpus))
- try:
- schedutils.set_affinity(tid, affinity)
- except (SystemError, OSError) as e: # old python-schedutils incorrectly raised
SystemError
- if e[0] == errno.ESRCH:
- continue
- raise e
-
- del ps
-
- # Now include it in IRQs too
- irqs = procfs.interrupts()
- previous_irq_affinities = {}
- for irq in list(irqs.keys()):
- # LOC, NMI, TLB, etc
- if "affinity" not in irqs[irq]:
- continue
- affinity = irqs[irq]["affinity"]
- if set(affinity).intersection(set(cpus)) != set(cpus):
- previous_irq_affinities[irq] = copy.copy(affinity)
- affinity = list(set(affinity + cpus))
- set_irq_affinity(int(irq),
- procfs.hexbitmask(affinity, nr_cpus))
-
- affinity = parse_irq_affinity_filename("default_smp_affinity", nr_cpus)
- affinity = list(set(affinity + cpus))
- set_irq_affinity_filename("default_smp_affinity", procfs.hexbitmask(affinity,
nr_cpus))
-
- return (previous_pid_affinities, previous_irq_affinities)
+ ps = procfs.pidstats()
+ ps.reload_threads()
+ previous_pid_affinities = {}
+ for pid in list(ps.keys()):
+ if cannot_set_affinity(ps, pid):
+ continue
+ try:
+ affinity = schedutils.get_affinity(pid)
+ except (SystemError, OSError) as e: # old python-schedutils incorrectly
raised SystemError
+ if e[0] == errno.ESRCH:
+ continue
+ raise e
+ if set(affinity).intersection(set(cpus)) != set(cpus):
+ previous_pid_affinities[pid] = copy.copy(affinity)
+ affinity = list(set(affinity + cpus))
+ try:
+ schedutils.set_affinity(pid, affinity)
+ except (SystemError, OSError) as e: # old python-schedutils
incorrectly raised SystemError
+ if e[0] == errno.ESRCH:
+ continue
+ raise e
+
+ if "threads" not in ps[pid]:
+ continue
+ threads = ps[pid]["threads"]
+ for tid in list(threads.keys()):
+ if cannot_set_thread_affinity(ps, pid, tid):
+ continue
+ try:
+ affinity = schedutils.get_affinity(tid)
+ except (SystemError, OSError) as e: # old python-schedutils
incorrectly raised SystemError
+ if e[0] == errno.ESRCH:
+ continue
+ raise e
+ if set(affinity).intersection(set(cpus)) != set(cpus):
+ previous_pid_affinities[tid] = copy.copy(affinity)
+ affinity = list(set(affinity + cpus))
+ try:
+ schedutils.set_affinity(tid, affinity)
+ except (SystemError, OSError) as e: # old
python-schedutils incorrectly raised SystemError
+ if e[0] == errno.ESRCH:
+ continue
+ raise e
+
+ del ps
+
+ # Now include it in IRQs too
+ irqs = procfs.interrupts()
+ previous_irq_affinities = {}
+ for irq in list(irqs.keys()):
+ # LOC, NMI, TLB, etc
+ if "affinity" not in irqs[irq]:
+ continue
+ affinity = irqs[irq]["affinity"]
+ if set(affinity).intersection(set(cpus)) != set(cpus):
+ previous_irq_affinities[irq] = copy.copy(affinity)
+ affinity = list(set(affinity + cpus))
+ set_irq_affinity(int(irq),
+ procfs.hexbitmask(affinity, nr_cpus))
+
+ affinity = parse_irq_affinity_filename("default_smp_affinity",
nr_cpus)
+ affinity = list(set(affinity + cpus))
+ set_irq_affinity_filename("default_smp_affinity",
procfs.hexbitmask(affinity, nr_cpus))
+
+ return (previous_pid_affinities, previous_irq_affinities)
def get_irq_users(irqs, irq, nics = None):
- if not nics:
- nics = ethtool.get_active_devices()
- users = irqs[irq]["users"]
- for u in users:
- if u in nics:
- try:
- users[users.index(u)] = "%s(%s)" % (u, ethtool.get_module(u))
- except IOError:
- # Old kernel, doesn't implement ETHTOOL_GDRVINFO
- pass
- return users
+ if not nics:
+ nics = ethtool.get_active_devices()
+ users = irqs[irq]["users"]
+ for u in users:
+ if u in nics:
+ try:
+ users[users.index(u)] = "%s(%s)" % (u,
ethtool.get_module(u))
+ except IOError:
+ # Old kernel, doesn't implement ETHTOOL_GDRVINFO
+ pass
+ return users
def get_irq_affinity_text(irqs, irq):
- affinity_list = irqs[irq]["affinity"]
- try:
- return list_to_cpustring(affinity_list)
- except:
- # needs root prio to read /proc/irq/<NUM>/smp_affinity
- return ""
+ affinity_list = irqs[irq]["affinity"]
+ try:
+ return list_to_cpustring(affinity_list)
+ except:
+ # needs root prio to read /proc/irq/<NUM>/smp_affinity
+ return ""
def get_policy_and_rtprio(parm):
- parms = parm.split(":")
- rtprio = 0
- policy = None
- if parms[0].upper() in ["OTHER", "BATCH", "IDLE",
"FIFO", "RR"]:
- policy = schedutils.schedfromstr("SCHED_%s" % parms[0].upper())
- if len(parms) > 1:
- rtprio = int(parms[1])
- elif parms[0].upper() in ["FIFO", "RR"]:
- rtprio = 1
- elif parms[0].isdigit():
- rtprio = int(parms[0])
- else:
- raise ValueError
- return (policy, rtprio)
+ parms = parm.split(":")
+ rtprio = 0
+ policy = None
+ if parms[0].upper() in ["OTHER", "BATCH", "IDLE",
"FIFO", "RR"]:
+ policy = schedutils.schedfromstr("SCHED_%s" %
parms[0].upper())
+ if len(parms) > 1:
+ rtprio = int(parms[1])
+ elif parms[0].upper() in ["FIFO", "RR"]:
+ rtprio = 1
+ elif parms[0].isdigit():
+ rtprio = int(parms[0])
+ else:
+ raise ValueError
+ return (policy, rtprio)
def thread_filtered(tid, cpus_filtered, show_kthreads, show_uthreads):
- if cpus_filtered:
- try:
- affinity = schedutils.get_affinity(tid)
- except (SystemError, OSError) as e: # old python-schedutils incorrectly raised
SystemError
- if e[0] == errno.ESRCH:
- return False
- raise e
+ if cpus_filtered:
+ try:
+ affinity = schedutils.get_affinity(tid)
+ except (SystemError, OSError) as e: # old python-schedutils incorrectly
raised SystemError
+ if e[0] == errno.ESRCH:
+ return False
+ raise e
- if set(cpus_filtered + affinity) == set(cpus_filtered):
- return True
+ if set(cpus_filtered + affinity) == set(cpus_filtered):
+ return True
- if not (show_kthreads and show_uthreads):
- kthread = iskthread(tid)
- if ((not show_kthreads) and kthread) or \
- ((not show_uthreads) and not kthread):
- return True
+ if not (show_kthreads and show_uthreads):
+ kthread = iskthread(tid)
+ if ((not show_kthreads) and kthread) or \
+ ((not show_uthreads) and not kthread):
+ return True
- return False
+ return False
def irq_filtered(irq, irqs, cpus_filtered, is_root):
- if cpus_filtered and is_root:
- affinity = irqs[irq]["affinity"]
- if set(cpus_filtered + affinity) == set(cpus_filtered):
- return True
+ if cpus_filtered and is_root:
+ affinity = irqs[irq]["affinity"]
+ if set(cpus_filtered + affinity) == set(cpus_filtered):
+ return True
- return False
+ return False
def thread_set_priority(tid, policy, rtprio):
- if not policy and policy != 0:
- policy = schedutils.get_scheduler(tid)
- schedutils.set_scheduler(tid, policy, rtprio)
+ if not policy and policy != 0:
+ policy = schedutils.get_scheduler(tid)
+ schedutils.set_scheduler(tid, policy, rtprio)
def threads_set_priority(tids, parm, affect_children = False):
- try:
- (policy, rtprio) = get_policy_and_rtprio(parm)
- except ValueError:
- print("tuna: " + _("\"%s\" is unsupported priority
value!") % parms[0])
- return
-
- for tid in tids:
- try:
- thread_set_priority(tid, policy, rtprio)
- except (SystemError, OSError) as e: # old python-schedutils incorrectly raised
SystemError
- if e[0] == errno.ESRCH:
- continue
- raise e
- if affect_children:
- for child in [int (a) for a in os.listdir("/proc/%d/task" % tid)]:
- if child != tid:
- try:
- thread_set_priority(child, policy, rtprio)
- except (SystemError, OSError) as e: # old python-schedutils incorrectly raised
SystemError
- if e[0] == errno.ESRCH:
- continue
- raise e
+ try:
+ (policy, rtprio) = get_policy_and_rtprio(parm)
+ except ValueError:
+ print("tuna: " + _("\"%s\" is unsupported
priority value!") % parms[0])
+ return
+
+ for tid in tids:
+ try:
+ thread_set_priority(tid, policy, rtprio)
+ except (SystemError, OSError) as e: # old python-schedutils incorrectly
raised SystemError
+ if e[0] == errno.ESRCH:
+ continue
+ raise e
+ if affect_children:
+ for child in [int (a) for a in
os.listdir("/proc/%d/task" % tid)]:
+ if child != tid:
+ try:
+ thread_set_priority(child, policy,
rtprio)
+ except (SystemError, OSError) as e: # old
python-schedutils incorrectly raised SystemError
+ if e[0] == errno.ESRCH:
+ continue
+ raise e
class sched_tunings:
- def __init__(self, name, pid, policy, rtprio, affinity, percpu):
- self.name = name
- self.pid = pid
- self.policy = policy
- self.rtprio = int(rtprio)
- self.affinity = affinity
- self.percpu = percpu
+ def __init__(self, name, pid, policy, rtprio, affinity, percpu):
+ self.name = name
+ self.pid = pid
+ self.policy = policy
+ self.rtprio = int(rtprio)
+ self.affinity = affinity
+ self.percpu = percpu
def get_kthread_sched_tunings(proc = None):
- if not proc:
- proc = procfs.pidstats()
-
- kthreads = {}
- for pid in list(proc.keys()):
- name = proc[pid]["stat"]["comm"]
- # Trying to set the priority of the migration threads will
- # fail, at least on 3.6.0-rc1 and doesn't make sense anyway
- # and this function is only used to save those priorities
- # to reset them using tools like rtctl, skip those to
- # avoid sched_setscheduler/chrt to fail
- if iskthread(pid) and not name.startswith("migration/"):
- rtprio = int(proc[pid]["stat"]["rt_priority"])
- try:
- policy = schedutils.get_scheduler(pid)
- affinity = schedutils.get_affinity(pid)
- except (SystemError, OSError) as e: # old python-schedutils incorrectly raised
SystemError
- if e[0] == errno.ESRCH:
- continue
- raise e
- percpu = iskthread(pid) and \
- proc.is_bound_to_cpu(pid)
- kthreads[name] = sched_tunings(name, pid, policy,
- rtprio, affinity,
- percpu)
-
- return kthreads
+ if not proc:
+ proc = procfs.pidstats()
+
+ kthreads = {}
+ for pid in list(proc.keys()):
+ name = proc[pid]["stat"]["comm"]
+ # Trying to set the priority of the migration threads will
+ # fail, at least on 3.6.0-rc1 and doesn't make sense anyway
+ # and this function is only used to save those priorities
+ # to reset them using tools like rtctl, skip those to
+ # avoid sched_setscheduler/chrt to fail
+ if iskthread(pid) and not name.startswith("migration/"):
+ rtprio =
int(proc[pid]["stat"]["rt_priority"])
+ try:
+ policy = schedutils.get_scheduler(pid)
+ affinity = schedutils.get_affinity(pid)
+ except (SystemError, OSError) as e: # old python-schedutils
incorrectly raised SystemError
+ if e[0] == errno.ESRCH:
+ continue
+ raise e
+ percpu = iskthread(pid) and \
+ proc.is_bound_to_cpu(pid)
+ kthreads[name] = sched_tunings(name, pid, policy,
+ rtprio, affinity,
+ percpu)
+
+ return kthreads
def run_command(cmd, policy, rtprio, cpu_list):
- newpid = os.fork()
- if newpid == 0:
- cmd_list = shlex.split(cmd)
- pid = os.getpid()
- if rtprio:
- try:
- thread_set_priority(pid, policy, rtprio)
- except (SystemError, OSError) as err:
- print("tuna: %s" % err)
- sys.exit(2)
- if cpu_list:
- try:
- schedutils.set_affinity(pid, cpu_list)
- except (SystemError, OSError) as err:
- print("tuna: %s" % err)
- sys.exit(2)
-
- try:
- os.execvp(cmd_list[0], cmd_list)
- except (SystemError, OSError) as err:
- print("tuna: %s" % err)
- sys.exit(2)
- else:
- os.waitpid(newpid, 0);
+ newpid = os.fork()
+ if newpid == 0:
+ cmd_list = shlex.split(cmd)
+ pid = os.getpid()
+ if rtprio:
+ try:
+ thread_set_priority(pid, policy, rtprio)
+ except (SystemError, OSError) as err:
+ print("tuna: %s" % err)
+ sys.exit(2)
+ if cpu_list:
+ try:
+ schedutils.set_affinity(pid, cpu_list)
+ except (SystemError, OSError) as err:
+ print("tuna: %s" % err)
+ sys.exit(2)
+
+ try:
+ os.execvp(cmd_list[0], cmd_list)
+ except (SystemError, OSError) as err:
+ print("tuna: %s" % err)
+ sys.exit(2)
+ else:
+ os.waitpid(newpid, 0);
def generate_rtgroups(filename, kthreads, nr_cpus):
- f = file(filename, "w")
- f.write('''# Generated by tuna
+ f = file(filename, "w")
+ f.write('''# Generated by tuna
#
# Use it with rtctl:
#
@@ -675,44 +675,44 @@ def generate_rtgroups(filename, kthreads, nr_cpus):
# The regex is matched against process names as printed by "ps -eo cmd".
''' % filename)
- f.write("kthreads:*:1:*:\[.*\]$\n\n")
-
- per_cpu_kthreads = []
- names = list(kthreads.keys())
- names.sort()
- for name in names:
- kt = kthreads[name]
- try:
- idx = name.index("/")
- common = name[:idx]
- if common in per_cpu_kthreads:
- continue
- per_cpu_kthreads.append(common)
- name = common
- if common[:5] == "sirq-":
- common = "(sirq|softirq)" + common[4:]
- elif common[:8] == "softirq-":
- common = "(sirq|softirq)" + common[7:]
- name = "s" + name[4:]
- regex = common + "\/.*"
- except:
- idx = 0
- regex = name
- pass
- if kt.percpu or idx != 0 or name == "posix_cpu_timer":
- # Don't mess with workqueues, etc
- # posix_cpu_timer is too long and doesn't
- # have PF_THREAD_BOUND in its per process
- # flags...
- mask = "*"
- else:
- mask = ",".join([hex(a) for a in \
- procfs.hexbitmask(kt.affinity, nr_cpus)])
- f.write("%s:%c:%d:%s:\[%s\]$\n" % (name,
- schedutils.schedstr(kt.policy)[6].lower(),
- kt.rtprio, mask, regex))
- f.close()
+ f.write("kthreads:*:1:*:\[.*\]$\n\n")
+
+ per_cpu_kthreads = []
+ names = list(kthreads.keys())
+ names.sort()
+ for name in names:
+ kt = kthreads[name]
+ try:
+ idx = name.index("/")
+ common = name[:idx]
+ if common in per_cpu_kthreads:
+ continue
+ per_cpu_kthreads.append(common)
+ name = common
+ if common[:5] == "sirq-":
+ common = "(sirq|softirq)" + common[4:]
+ elif common[:8] == "softirq-":
+ common = "(sirq|softirq)" + common[7:]
+ name = "s" + name[4:]
+ regex = common + "\/.*"
+ except:
+ idx = 0
+ regex = name
+ pass
+ if kt.percpu or idx != 0 or name == "posix_cpu_timer":
+ # Don't mess with workqueues, etc
+ # posix_cpu_timer is too long and doesn't
+ # have PF_THREAD_BOUND in its per process
+ # flags...
+ mask = "*"
+ else:
+ mask = ",".join([hex(a) for a in \
+ procfs.hexbitmask(kt.affinity, nr_cpus)])
+ f.write("%s:%c:%d:%s:\[%s\]$\n" % (name,
+
schedutils.schedstr(kt.policy)[6].lower(),
+ kt.rtprio, mask, regex))
+ f.close()
def nohz_full_list():
- return [ int(cpu) for cpu in
procfs.cmdline().options["nohz_full"].split(",") ]
+ return [ int(cpu) for cpu in
procfs.cmdline().options["nohz_full"].split(",") ]
--
2.14.3