diff --git a/.pybuild/cpython3_3.12_linux-procfs/.pydistutils.cfg b/.pybuild/cpython3_3.12_linux-procfs/.pydistutils.cfg new file mode 100644 index 0000000..a223dbe --- /dev/null +++ b/.pybuild/cpython3_3.12_linux-procfs/.pydistutils.cfg @@ -0,0 +1,10 @@ +[clean] +all=1 +[build] +build_lib=/home/xzl/work/python-linux-procfs/python-linux-procfs/.pybuild/cpython3_3.12_linux-procfs/build +[install] +force=1 +install_layout=deb +install_scripts=$base/bin +install_lib=/usr/lib/python3.12/dist-packages +prefix=/usr diff --git a/.pybuild/cpython3_3.12_linux-procfs/build/procfs/__init__.py b/.pybuild/cpython3_3.12_linux-procfs/build/procfs/__init__.py new file mode 100644 index 0000000..6deedf4 --- /dev/null +++ b/.pybuild/cpython3_3.12_linux-procfs/build/procfs/__init__.py @@ -0,0 +1,17 @@ +#! /usr/bin/python3 +# -*- python -*- +# -*- coding: utf-8 -*- +# SPDX-License-Identifier: GPL-2.0-only +# +# Copyright (C) 2008, 2009 Red Hat, Inc. +# +""" +Copyright (c) 2008, 2009 Red Hat Inc. + +Abstractions to extract information from the Linux kernel /proc files. +""" +__author__ = "Arnaldo Carvalho de Melo " +__license__ = "GPLv2 License" + +from .procfs import * +from .utilist import * diff --git a/.pybuild/cpython3_3.12_linux-procfs/build/procfs/__pycache__/__init__.cpython-312.pyc b/.pybuild/cpython3_3.12_linux-procfs/build/procfs/__pycache__/__init__.cpython-312.pyc new file mode 100644 index 0000000..e2abaa4 Binary files /dev/null and b/.pybuild/cpython3_3.12_linux-procfs/build/procfs/__pycache__/__init__.cpython-312.pyc differ diff --git a/.pybuild/cpython3_3.12_linux-procfs/build/procfs/__pycache__/procfs.cpython-312.pyc b/.pybuild/cpython3_3.12_linux-procfs/build/procfs/__pycache__/procfs.cpython-312.pyc new file mode 100644 index 0000000..10180a7 Binary files /dev/null and b/.pybuild/cpython3_3.12_linux-procfs/build/procfs/__pycache__/procfs.cpython-312.pyc differ diff --git a/.pybuild/cpython3_3.12_linux-procfs/build/procfs/__pycache__/utilist.cpython-312.pyc b/.pybuild/cpython3_3.12_linux-procfs/build/procfs/__pycache__/utilist.cpython-312.pyc new file mode 100644 index 0000000..6379987 Binary files /dev/null and b/.pybuild/cpython3_3.12_linux-procfs/build/procfs/__pycache__/utilist.cpython-312.pyc differ diff --git a/.pybuild/cpython3_3.12_linux-procfs/build/procfs/procfs.py b/.pybuild/cpython3_3.12_linux-procfs/build/procfs/procfs.py new file mode 100644 index 0000000..3fcc45c --- /dev/null +++ b/.pybuild/cpython3_3.12_linux-procfs/build/procfs/procfs.py @@ -0,0 +1,1110 @@ +#!/usr/bin/python3 +# -*- python -*- +# -*- coding: utf-8 -*- +# SPDX-License-Identifier: GPL-2.0-only +# +# Copyright (C) 2007-2015 Red Hat, Inc. +# + +import os +import platform +import re +import time +from functools import reduce +from procfs.utilist import bitmasklist + +VERSION = "0.7.3" + + +def is_s390(): + """ Return True if running on s390 or s390x """ + machine = platform.machine() + return bool(re.search('s390', machine)) + + +def process_cmdline(pid_info): + """ + Returns the process command line, if available in the given `process' class, + if not available, falls back to using the comm (short process name) in its + pidstat key. + """ + if pid_info["cmdline"]: + return reduce(lambda a, b: a + " %s" % b, pid_info["cmdline"]).strip() + + try: + """ If a pid disappears before we query it, return None """ + return pid_info["stat"]["comm"] + except: + return None + + +class pidstat: + """ + Provides a dictionary to access the fields in the + per process /proc/PID/stat files. + + One can obtain the available fields by asking for the keys of the + dictionary, e.g.: + + >>> p = procfs.pidstat(1) + >>> print p.keys() + ['majflt', 'rss', 'cnswap', 'cstime', 'pid', 'session', 'startstack', 'startcode', 'cmajflt', 'blocked', 'exit_signal', 'minflt', 'nswap', 'environ', 'priority', 'state', 'delayacct_blkio_ticks', 'policy', 'rt_priority', 'ppid', 'nice', 'cutime', 'endcode', 'wchan', 'num_threads', 'sigcatch', 'comm', 'stime', 'sigignore', 'tty_nr', 'kstkeip', 'utime', 'tpgid', 'itrealvalue', 'kstkesp', 'rlim', 'signal', 'pgrp', 'flags', 'starttime', 'cminflt', 'vsize', 'processor'] + + And then access the various process properties using it as a dictionary: + + >>> print p['comm'] + systemd + >>> print p['priority'] + 20 + >>> print p['state'] + S + + Please refer to the 'procfs(5)' man page, by using: + + $ man 5 procfs + + To see information for each of the above fields, it is part of the + 'man-pages' RPM package. + """ + + # Entries with the same value, the one with a comment after it is the + # more recent, having replaced the other name in v4.1-rc kernel times. + + PF_ALIGNWARN = 0x00000001 + PF_STARTING = 0x00000002 + PF_EXITING = 0x00000004 + PF_EXITPIDONE = 0x00000008 + PF_VCPU = 0x00000010 + PF_WQ_WORKER = 0x00000020 # /* I'm a workqueue worker */ + PF_FORKNOEXEC = 0x00000040 + PF_MCE_PROCESS = 0x00000080 # /* process policy on mce errors */ + PF_SUPERPRIV = 0x00000100 + PF_DUMPCORE = 0x00000200 + PF_SIGNALED = 0x00000400 + PF_MEMALLOC = 0x00000800 + # /* set_user noticed that RLIMIT_NPROC was exceeded */ + PF_NPROC_EXCEEDED = 0x00001000 + PF_FLUSHER = 0x00001000 + PF_USED_MATH = 0x00002000 + PF_USED_ASYNC = 0x00004000 # /* used async_schedule*(), used by module init */ + PF_NOFREEZE = 0x00008000 + PF_FROZEN = 0x00010000 + PF_FSTRANS = 0x00020000 + PF_KSWAPD = 0x00040000 + PF_MEMALLOC_NOIO = 0x00080000 # /* Allocating memory without IO involved */ + PF_SWAPOFF = 0x00080000 + PF_LESS_THROTTLE = 0x00100000 + PF_KTHREAD = 0x00200000 + PF_RANDOMIZE = 0x00400000 + PF_SWAPWRITE = 0x00800000 + PF_SPREAD_PAGE = 0x01000000 + PF_SPREAD_SLAB = 0x02000000 + PF_THREAD_BOUND = 0x04000000 + # /* Userland is not allowed to meddle with cpus_allowed */ + PF_NO_SETAFFINITY = 0x04000000 + PF_MCE_EARLY = 0x08000000 # /* Early kill for mce process policy */ + PF_MEMPOLICY = 0x10000000 + PF_MUTEX_TESTER = 0x20000000 + PF_FREEZER_SKIP = 0x40000000 + PF_FREEZER_NOSIG = 0x80000000 + # /* this thread called freeze_processes and should not be frozen */ + PF_SUSPEND_TASK = 0x80000000 + + proc_stat_fields = ["pid", "comm", "state", "ppid", "pgrp", "session", + "tty_nr", "tpgid", "flags", "minflt", "cminflt", + "majflt", "cmajflt", "utime", "stime", "cutime", + "cstime", "priority", "nice", "num_threads", + "itrealvalue", "starttime", "vsize", "rss", + "rlim", "startcode", "endcode", "startstack", + "kstkesp", "kstkeip", "signal", "blocked", + "sigignore", "sigcatch", "wchan", "nswap", + "cnswap", "exit_signal", "processor", + "rt_priority", "policy", + "delayacct_blkio_ticks", "environ"] + + def __init__(self, pid, basedir="/proc"): + self.pid = pid + try: + self.load(basedir) + except FileNotFoundError: + # The file representing the pid has disappeared + # propagate the error to the user to handle + raise + + def __getitem__(self, fieldname): + return self.fields[fieldname] + + def keys(self): + return list(self.fields.keys()) + + def values(self): + return list(self.fields.values()) + + def has_key(self, fieldname): + return fieldname in self.fields + + def items(self): + return self.fields + + def __contains__(self, fieldname): + return fieldname in self.fields + + def load(self, basedir="/proc"): + try: + f = open(f"{basedir}/{self.pid}/stat") + except FileNotFoundError: + # The pid has disappeared, propagate the error + raise + fields = f.readline().strip().split(') ') + f.close() + fields = fields[0].split(' (') + fields[1].split() + self.fields = {} + nr_fields = min(len(fields), len(self.proc_stat_fields)) + for i in range(nr_fields): + attrname = self.proc_stat_fields[i] + value = fields[i] + if attrname == "comm": + self.fields["comm"] = value.strip('()') + else: + try: + self.fields[attrname] = int(value) + except: + self.fields[attrname] = value + + def is_bound_to_cpu(self): + """ + Returns true if this process has a fixed smp affinity mask, + not allowing it to be moved to a different set of CPUs. + """ + return bool(self.fields["flags"] & self.PF_THREAD_BOUND) + + def process_flags(self): + """ + Returns a list with all the process flags known, details depend + on kernel version, declared in the file include/linux/sched.h in + the kernel sources. + + As of v4.2-rc7 these include (from include/linux/sched.h comments): + + PF_EXITING Getting shut down + PF_EXITPIDONE Pi exit done on shut down + PF_VCPU I'm a virtual CPU + PF_WQ_WORKER I'm a workqueue worker + PF_FORKNOEXEC Forked but didn't exec + PF_MCE_PROCESS Process policy on mce errors + PF_SUPERPRIV Used super-user privileges + PF_DUMPCORE Dumped core + PF_SIGNALED Killed by a signal + PF_MEMALLOC Allocating memory + PF_NPROC_EXCEEDED Set_user noticed that RLIMIT_NPROC was exceeded + PF_USED_MATH If unset the fpu must be initialized before use + PF_USED_ASYNC Used async_schedule*(), used by module init + PF_NOFREEZE This thread should not be frozen + PF_FROZEN Frozen for system suspend + PF_FSTRANS Inside a filesystem transaction + PF_KSWAPD I am kswapd + PF_MEMALLOC_NOIO Allocating memory without IO involved + PF_LESS_THROTTLE Throttle me less: I clean memory + PF_KTHREAD I am a kernel thread + PF_RANDOMIZE Randomize virtual address space + PF_SWAPWRITE Allowed to write to swap + PF_NO_SETAFFINITY Userland is not allowed to meddle with cpus_allowed + PF_MCE_EARLY Early kill for mce process policy + PF_MUTEX_TESTER Thread belongs to the rt mutex tester + PF_FREEZER_SKIP Freezer should not count it as freezable + PF_SUSPEND_TASK This thread called freeze_processes and + should not be frozen + + """ + sflags = [] + for attr in dir(self): + if attr[:3] != "PF_": + continue + value = getattr(self, attr) + if value & self.fields["flags"]: + sflags.append(attr) + + return sflags + + +def cannot_set_affinity(self, pid): + PF_NO_SETAFFINITY = 0x04000000 + try: + return bool(int(self.processes[pid]["stat"]["flags"]) & + PF_NO_SETAFFINITY) + except: + return True + + +def cannot_set_thread_affinity(self, pid, tid): + PF_NO_SETAFFINITY = 0x04000000 + try: + return bool(int(self.processes[pid].threads[tid]["stat"]["flags"]) & + PF_NO_SETAFFINITY) + except: + return True + + +class pidstatus: + """ + Provides a dictionary to access the fields + in the per process /proc/PID/status files. + This provides additional information about processes and threads to + what can be obtained with the procfs.pidstat() class. + + One can obtain the available fields by asking for the keys of the + dictionary, e.g.: + + >>> import procfs + >>> p = procfs.pidstatus(1) + >>> print p.keys() + ['VmExe', 'CapBnd', 'NSpgid', 'Tgid', 'NSpid', 'VmSize', 'VmPMD', 'ShdPnd', 'State', 'Gid', 'nonvoluntary_ctxt_switches', 'SigIgn', 'VmStk', 'VmData', 'SigCgt', 'CapEff', 'VmPTE', 'Groups', 'NStgid', 'Threads', 'PPid', 'VmHWM', 'NSsid', 'VmSwap', 'Name', 'SigBlk', 'Mems_allowed_list', 'VmPeak', 'Ngid', 'VmLck', 'SigQ', 'VmPin', 'Mems_allowed', 'CapPrm', 'Seccomp', 'VmLib', 'Cpus_allowed', 'Uid', 'SigPnd', 'Pid', 'Cpus_allowed_list', 'TracerPid', 'CapInh', 'voluntary_ctxt_switches', 'VmRSS', 'FDSize'] + >>> print p["Pid"] + 1 + >>> print p["Threads"] + 1 + >>> print p["VmExe"] + 1248 kB + >>> print p["Cpus_allowed"] + f + >>> print p["SigQ"] + 0/30698 + >>> print p["VmPeak"] + 320300 kB + >>> + + Please refer to the 'procfs(5)' man page, by using: + + $ man 5 procfs + + To see information for each of the above fields, it is part of the + 'man-pages' RPM package. + + In the man page there will be references to further documentation, like + referring to the "getrlimit(2)" man page when explaining the "SigQ" + line/field. + """ + + def __init__(self, pid, basedir="/proc"): + self.pid = pid + self.load(basedir) + + def __getitem__(self, fieldname): + return self.fields[fieldname] + + def keys(self): + return list(self.fields.keys()) + + def values(self): + return list(self.fields.values()) + + def has_key(self, fieldname): + return fieldname in self.fields + + def items(self): + return self.fields + + def __contains__(self, fieldname): + return fieldname in self.fields + + def load(self, basedir="/proc"): + self.fields = {} + with open(f"{basedir}/{self.pid}/status") as f: + for line in f.readlines(): + fields = line.split(":") + if len(fields) != 2: + continue + name = fields[0] + value = fields[1].strip() + try: + self.fields[name] = int(value) + except: + self.fields[name] = value + + +class process: + """ + Information about a process with a given pid, provides a dictionary with + two entries, instances of different wrappers for /proc/ process related + meta files: "stat" and "status", see the documentation for procfs.pidstat + and procfs.pidstatus for further info about those classes. + """ + + def __init__(self, pid, basedir="/proc"): + self.pid = pid + self.basedir = basedir + + def __getitem__(self, attr): + if not hasattr(self, attr): + if attr in ("stat", "status"): + if attr == "stat": + sclass = pidstat + else: + sclass = pidstatus + + try: + setattr(self, attr, sclass(self.pid, self.basedir)) + except FileNotFoundError: + # The pid has disappeared, progate the error + raise + elif attr == "cmdline": + self.load_cmdline() + elif attr == "threads": + self.load_threads() + elif attr == "cgroups": + self.load_cgroups() + elif attr == "environ": + self.load_environ() + + return getattr(self, attr) + + def has_key(self, attr): + return hasattr(self, attr) + + def __contains__(self, attr): + return hasattr(self, attr) + + def load_cmdline(self): + try: + with open(f"/proc/{self.pid}/cmdline") as f: + self.cmdline = f.readline().strip().split('\0')[:-1] + except FileNotFoundError: + """ This can happen when a pid disappears """ + self.cmdline = None + except UnicodeDecodeError: + """ TODO - this shouldn't happen, needs to be investigated """ + self.cmdline = None + + def load_threads(self): + self.threads = pidstats(f"/proc/{self.pid}/task/") + # remove thread leader + del self.threads[self.pid] + + def load_cgroups(self): + self.cgroups = "" + with open(f"/proc/{self.pid}/cgroup") as f: + for line in reversed(f.readlines()): + if len(self.cgroups) != 0: + self.cgroups = self.cgroups + "," + line[:-1] + else: + self.cgroups = line[:-1] + + def load_environ(self): + """ + Loads the environment variables for this process. The entries then + become available via the 'environ' member, or via the 'environ' + dict key when accessing as p["environ"]. + + E.g.: + + + >>> all_processes = procfs.pidstats() + >>> firefox_pid = all_processes.find_by_name("firefox") + >>> firefox_process = all_processes[firefox_pid[0]] + >>> print firefox_process["environ"]["PWD"] + /home/acme + >>> print len(firefox_process.environ.keys()) + 66 + >>> print firefox_process["environ"]["SHELL"] + /bin/bash + >>> print firefox_process["environ"]["USERNAME"] + acme + >>> print firefox_process["environ"]["HOME"] + /home/acme + >>> print firefox_process["environ"]["MAIL"] + /var/spool/mail/acme + >>> + """ + self.environ = {} + with open(f"/proc/{self.pid}/environ") as f: + for x in f.readline().split('\0'): + if len(x) > 0: + y = x.split('=') + self.environ[y[0]] = y[1] + + +class pidstats: + """ + Provides access to all the processes in the system, to get a picture of + how many processes there are at any given moment. + + The entries can be accessed as a dictionary, keyed by pid. Also there are + methods to find processes that match a given COMM or regular expression. + """ + + def __init__(self, basedir="/proc"): + self.basedir = basedir + self.processes = {} + self.reload() + + def __getitem__(self, key): + return self.processes[key] + + def __delitem__(self, key): + # not clear on why this can fail, but it can + try: + del self.processes[key] + except: + pass + + def keys(self): + return list(self.processes.keys()) + + def values(self): + return list(self.processes.values()) + + def has_key(self, key): + return key in self.processes + + def items(self): + return self.processes + + def __contains__(self, key): + return key in self.processes + + def reload(self): + """ + This operation will throw away the current dictionary contents, + if any, and read all the pid files from /proc/, instantiating a + 'process' instance for each of them. + + This is a high overhead operation, and should be avoided if the + perf python binding can be used to detect when new threads appear + and existing ones terminate. + + In RHEL it is found in the python-perf rpm package. + + More information about the perf facilities can be found in the + 'perf_event_open' man page. + """ + del self.processes + self.processes = {} + pids = os.listdir(self.basedir) + for spid in pids: + try: + pid = int(spid) + except: + continue + + self.processes[pid] = process(pid, self.basedir) + + def reload_threads(self): + to_remove = [] + for pid in list(self.processes.keys()): + try: + self.processes[pid].load_threads() + except OSError: + # process vanished, remove it + to_remove.append(pid) + for pid in to_remove: + del self.processes[pid] + + def find_by_name(self, name): + name = name[:15] + pids = [] + for pid in list(self.processes.keys()): + try: + if name == self.processes[pid]["stat"]["comm"]: + pids.append(pid) + except IOError: + # We're doing lazy loading of /proc files + # So if we get this exception is because the + # process vanished, remove it + del self.processes[pid] + + return pids + + def find_by_regex(self, regex): + pids = [] + for pid in list(self.processes.keys()): + try: + if regex.match(self.processes[pid]["stat"]["comm"]): + pids.append(pid) + except IOError: + # We're doing lazy loading of /proc files + # So if we get this exception is because the + # process vanished, remove it + del self.processes[pid] + return pids + + def find_by_cmdline_regex(self, regex): + pids = [] + for pid in list(self.processes.keys()): + try: + if regex.match(process_cmdline(self.processes[pid])): + pids.append(pid) + except IOError: + # We're doing lazy loading of /proc files + # So if we get this exception is because the + # process vanished, remove it + del self.processes[pid] + return pids + + def get_per_cpu_rtprios(self, basename): + cpu = 0 + priorities = "" + processed_pids = [] + while True: + name = f"{basename}/{cpu}" + pids = self.find_by_name(name) + if not pids or len([n for n in pids if n not in processed_pids]) == 0: + break + for pid in pids: + try: + priorities += f'{self.processes[pid]["stat"]["rt_priority"]}' + except IOError: + # We're doing lazy loading of /proc files + # So if we get this exception is because the + # process vanished, remove it + del self.processes[pid] + processed_pids += pids + cpu += 1 + + priorities = priorities.strip(',') + return priorities + + def get_rtprios(self, name): + cpu = 0 + priorities = "" + processed_pids = [] + while True: + pids = self.find_by_name(name) + if not pids or len([n for n in pids if n not in processed_pids]) == 0: + break + for pid in pids: + try: + priorities += f'{self.processes[pid]["stat"]["rt_priority"]}' + except IOError: + # We're doing lazy loading of /proc files + # So if we get this exception is because the + # process vanished, remove it + del self.processes[pid] + processed_pids += pids + cpu += 1 + + priorities = priorities.strip(',') + return priorities + + def is_bound_to_cpu(self, pid): + """ + Checks if a given pid can't have its SMP affinity mask changed. + """ + return self.processes[pid]["stat"].is_bound_to_cpu() + + +class interrupts: + """ + Information about IRQs in the system. A dictionary keyed by IRQ number + will have as its value another dictionary with "cpu", "type" and "users" + keys, with the SMP affinity mask, type of IRQ and the drivers associated + with each interrupt. + + The information comes from the /proc/interrupts file, documented in + 'man procfs(5)', for instance, the 'cpu' dict is an array with one entry + per CPU present in the sistem, each value being the number of interrupts + that took place per CPU. + + E.g.: + + >>> import procfs + >>> interrupts = procfs.interrupts() + >>> thunderbolt_irq = interrupts.find_by_user("thunderbolt") + >>> print thunderbolt_irq + 34 + >>> thunderbolt = interrupts[thunderbolt_irq] + >>> print thunderbolt + {'affinity': [0, 1, 2, 3], 'type': 'PCI-MSI', 'cpu': [3495, 0, 81, 0], 'users': ['thunderbolt']} + >>> + """ + + def __init__(self): + self.interrupts = {} + self.reload() + + def __getitem__(self, key): + return self.interrupts[str(key)] + + def keys(self): + return list(self.interrupts.keys()) + + def values(self): + return list(self.interrupts.values()) + + def has_key(self, key): + return str(key) in self.interrupts + + def items(self): + return self.interrupts + + def __contains__(self, key): + return str(key) in self.interrupts + + def reload(self): + del self.interrupts + self.interrupts = {} + with open("/proc/interrupts") as f: + for line in f.readlines(): + line = line.strip() + fields = line.split() + if fields[0][:3] == "CPU": + self.nr_cpus = len(fields) + continue + irq = fields[0].strip(":") + self.interrupts[irq] = {} + self.interrupts[irq] = self.parse_entry(fields[1:], line) + try: + nirq = int(irq) + except: + continue + self.interrupts[irq]["affinity"] = self.parse_affinity(nirq) + + def parse_entry(self, fields, line): + dict = {} + dict["cpu"] = [] + dict["cpu"].append(int(fields[0])) + nr_fields = len(fields) + if nr_fields >= self.nr_cpus: + dict["cpu"] += [int(i) for i in fields[1:self.nr_cpus]] + if nr_fields > self.nr_cpus: + dict["type"] = fields[self.nr_cpus] + # look if there are users (interrupts 3 and 4 haven't) + if nr_fields > self.nr_cpus + 1: + dict["users"] = [a.strip() + for a in fields[nr_fields - 1].split(',')] + else: + dict["users"] = [] + return dict + + def parse_affinity(self, irq): + try: + with open(f"/proc/irq/{irq}/smp_affinity") as f: + line = f.readline() + return bitmasklist(line, self.nr_cpus) + except IOError: + return [0, ] + + def find_by_user(self, user): + """ + Looks up a interrupt number by the name of one of its users" + + E.g.: + + >>> import procfs + >>> interrupts = procfs.interrupts() + >>> thunderbolt_irq = interrupts.find_by_user("thunderbolt") + >>> print thunderbolt_irq + 34 + >>> thunderbolt = interrupts[thunderbolt_irq] + >>> print thunderbolt + {'affinity': [0, 1, 2, 3], 'type': 'PCI-MSI', 'cpu': [3495, 0, 81, 0], 'users': ['thunderbolt']} + >>> + """ + for i in list(self.interrupts.keys()): + if "users" in self.interrupts[i] and \ + user in self.interrupts[i]["users"]: + return i + return None + + def find_by_user_regex(self, regex): + """ + Looks up a interrupt number by a regex that matches names of its users" + + E.g.: + + >>> import procfs + >>> import re + >>> interrupts = procfs.interrupts() + >>> usb_controllers = interrupts.find_by_user_regex(re.compile(".*hcd")) + >>> print usb_controllers + ['22', '23', '31'] + >>> print [ interrupts[irq]["users"] for irq in usb_controllers ] + [['ehci_hcd:usb4'], ['ehci_hcd:usb3'], ['xhci_hcd']] + >>> + """ + irqs = [] + for i in list(self.interrupts.keys()): + if "users" not in self.interrupts[i]: + continue + for user in self.interrupts[i]["users"]: + if regex.match(user): + irqs.append(i) + break + return irqs + + +class cmdline: + """ + Parses the kernel command line (/proc/cmdline), turning it into a dictionary." + + Useful to figure out if some kernel boolean knob has been turned on, + as well as to find the value associated to other kernel knobs. + + It can also be used to find out about parameters passed to the + init process, such as 'BOOT_IMAGE', etc. + + E.g.: + >>> import procfs + >>> kcmd = procfs.cmdline() + >>> print kcmd.keys() + ['LANG', 'BOOT_IMAGE', 'quiet', 'rhgb', 'rd.lvm.lv', 'ro', 'root'] + >>> print kcmd["BOOT_IMAGE"] + /vmlinuz-4.3.0-rc1+ + >>> + """ + + def __init__(self): + self.options = {} + self.parse() + + def parse(self): + with open("/proc/cmdline") as f: + for option in f.readline().strip().split(): + fields = option.split("=") + if len(fields) == 1: + self.options[fields[0]] = True + else: + self.options[fields[0]] = fields[1] + + def __getitem__(self, key): + return self.options[key] + + def keys(self): + return list(self.options.keys()) + + def values(self): + return list(self.options.values()) + + def items(self): + return self.options + + +class cpuinfo: + """ + Dictionary with information about CPUs in the system. + + Please refer to 'man procfs(5)' for further information about the + '/proc/cpuinfo' file, that is the source of the information provided + by this class. The 'man lscpu(1)' also has information about a program that + uses the '/proc/cpuinfo' file. + + Using this class one can obtain the number of CPUs in a system: + + >>> cpus = procfs.cpuinfo() + >>> print cpus.nr_cpus + 4 + + It is also possible to figure out aspects of the CPU topology, such as + how many CPU physical sockets exists, i.e. groups of CPUs sharing + components such as CPU memory caches: + + >>> print len(cpus.sockets) + 1 + + Additionally dictionary with information common to all CPUs in the system + is available: + + >>> print cpus["model name"] + Intel(R) Core(TM) i7-3667U CPU @ 2.00GHz + >>> print cpus["cache size"] + 4096 KB + >>> + """ + + def __init__(self, filename="/proc/cpuinfo"): + self.tags = {} + self.nr_cpus = 0 + self.sockets = [] + self.parse(filename) + + def __getitem__(self, key): + return self.tags[key.lower()] + + def keys(self): + return list(self.tags.keys()) + + def values(self): + return list(self.tags.values()) + + def items(self): + return self.tags + + def parse(self, filename): + with open(filename) as f: + for line in f.readlines(): + line = line.strip() + if not line: + continue + fields = line.split(":") + tagname = fields[0].strip().lower() + if tagname == "processor": + self.nr_cpus += 1 + continue + if is_s390() and tagname == "cpu number": + self.nr_cpus += 1 + continue + if tagname == "core id": + continue + self.tags[tagname] = fields[1].strip() + if tagname == "physical id": + socket_id = self.tags[tagname] + if socket_id not in self.sockets: + self.sockets.append(socket_id) + self.nr_sockets = self.sockets and len(self.sockets) or \ + (self.nr_cpus / + ("siblings" in self.tags and int(self.tags["siblings"]) or 1)) + self.nr_cores = ("cpu cores" in self.tags and int( + self.tags["cpu cores"]) or 1) * self.nr_sockets + + +class smaps_lib: + """ + Representation of an mmap in place for a process. Can be used to figure + out which processes have an library mapped, etc. + + The 'perm' member can be used to figure out executable mmaps, + i.e. libraries. + + The 'vm_start' and 'vm_end' in turn can be used when trying to resolve + processor instruction pointer addresses to a symbol name in a library. + """ + + def __init__(self, lines): + fields = lines[0].split() + self.vm_start, self.vm_end = [int(a, 16) for a in fields[0].split("-")] + self.perms = fields[1] + self.offset = int(fields[2], 16) + self.major, self.minor = fields[3].split(":") + self.inode = int(fields[4]) + if len(fields) > 5: + self.name = fields[5] + else: + self.name = None + self.tags = {} + for line in lines[1:]: + fields = line.split() + tag = fields[0][:-1].lower() + try: + self.tags[tag] = int(fields[1]) + except: + # VmFlags are strings + self.tags[tag] = fields + + def __getitem__(self, key): + return self.tags[key.lower()] + + def keys(self): + return list(self.tags.keys()) + + def values(self): + return list(self.tags.values()) + + def items(self): + return self.tags + + +class smaps: + """ + List of libraries mapped by a process. Parses the lines in + the /proc/PID/smaps file, that is further documented in the + procfs(5) man page. + + Example: Listing the executable maps for the 'sshd' process: + + >>> import procfs + >>> processes = procfs.pidstats() + >>> sshd = processes.find_by_name("sshd") + >>> sshd_maps = procfs.smaps(sshd[0]) + >>> for i in range(len(sshd_maps)): + ... if 'x' in sshd_maps[i].perms: + ... print "%s: %s" % (sshd_maps[i].name, sshd_maps[i].perms) + ... + /usr/sbin/sshd: r-xp + /usr/lib64/libnss_files-2.20.so: r-xp + /usr/lib64/librt-2.20.so: r-xp + /usr/lib64/libkeyutils.so.1.5: r-xp + /usr/lib64/libkrb5support.so.0.1: r-xp + /usr/lib64/libfreebl3.so: r-xp + /usr/lib64/libpthread-2.20.so: r-xp + ... + """ + + def __init__(self, pid): + self.pid = pid + self.entries = [] + self.reload() + + def parse_entry(self, f, line): + lines = [] + if not line: + line = f.readline().strip() + if not line: + return + lines.append(line) + while True: + line = f.readline() + if not line: + break + line = line.strip() + if line.split()[0][-1] == ':': + lines.append(line) + else: + break + self.entries.append(smaps_lib(lines)) + return line + + def __len__(self): + return len(self.entries) + + def __getitem__(self, index): + return self.entries[index] + + def reload(self): + line = None + with open(f"/proc/{self.pid}/smaps") as f: + while True: + line = self.parse_entry(f, line) + if not line: + break + self.nr_entries = len(self.entries) + + def find_by_name_fragment(self, fragment): + result = [] + for i in range(self.nr_entries): + if self.entries[i].name and \ + self.entries[i].name.find(fragment) >= 0: + result.append(self.entries[i]) + + return result + + +class cpustat: + """ + CPU statistics, obtained from a line in the '/proc/stat' file, Please + refer to 'man procfs(5)' for further information about the '/proc/stat' + file, that is the source of the information provided by this class. + """ + + def __init__(self, fields): + self.name = fields[0] + (self.user, + self.nice, + self.system, + self.idle, + self.iowait, + self.irq, + self.softirq) = [int(i) for i in fields[1:8]] + if len(fields) > 7: + self.steal = int(fields[7]) + if len(fields) > 8: + self.guest = int(fields[8]) + + def __repr__(self): + s = f"< user: {self.user}, nice: {self.nice}, system: {self.system}, idle: {self.idle}, iowait: {self.iowait}, irq: {self.irq}, softirq: {self.softirq}" + if hasattr(self, 'steal'): + s += f", steal: {self.steal}" + if hasattr(self, 'guest'): + s += f", guest: {self.guest}" + return s + ">" + + +class cpusstats: + """ + Dictionary with information about CPUs in the system. First entry in the + dictionary gives an aggregate view of all CPUs, each other entry is about + separate CPUs. Please refer to 'man procfs(5)' for further information + about the '/proc/stat' file, that is the source of the information provided + by this class. + """ + + def __init__(self, filename="/proc/stat"): + self.entries = {} + self.time = None + self.hertz = os.sysconf(2) + self.filename = filename + self.reload() + + def __iter__(self): + return iter(self.entries) + + def __getitem__(self, key): + return self.entries[key] + + def __len__(self): + return len(list(self.entries.keys())) + + def keys(self): + return list(self.entries.keys()) + + def values(self): + return list(self.entries.values()) + + def items(self): + return self.entries + + def reload(self): + last_entries = self.entries + self.entries = {} + with open(self.filename) as f: + for line in f.readlines(): + fields = line.strip().split() + if fields[0][:3].lower() != "cpu": + continue + c = cpustat(fields) + if c.name == "cpu": + idx = 0 + else: + idx = int(c.name[3:]) + 1 + self.entries[idx] = c + last_time = self.time + self.time = time.time() + if last_entries: + delta_sec = self.time - last_time + interval_hz = delta_sec * self.hertz + for cpu in list(self.entries.keys()): + if cpu not in last_entries: + curr.usage = 0 + continue + curr = self.entries[cpu] + prev = last_entries[cpu] + delta = (curr.user - prev.user) + \ + (curr.nice - prev.nice) + \ + (curr.system - prev.system) + curr.usage = (delta / interval_hz) * 100 + curr.usage = min(curr.usage, 100) + + +if __name__ == '__main__': + import sys + + ints = interrupts() + + for i in list(ints.interrupts.keys()): + print(f"{i}: {ints.interrupts[i]}") + + options = cmdline() + for o in list(options.options.keys()): + print(f"{o}: {options.options[o]}") + + cpu = cpuinfo() + print(f"\ncpuinfo data: {cpu.nr_cpus} processors") + for tag in list(cpu.keys()): + print(f"{tag}={cpu[tag]}") + + print("smaps:\n" + ("-" * 40)) + s = smaps(int(sys.argv[1])) + for i in range(s.nr_entries): + print(f"{s.entries[i].vm_start:#x} {s.entries[i].name}") + print("-" * 40) + for a in s.find_by_name_fragment(sys.argv[2]): + print(a["Size"]) + + ps = pidstats() + print(ps[1]) + + cs = cpusstats() + while True: + time.sleep(1) + cs.reload() + for cpu in cs: + print(f"{cpu}: {cs[cpu]}") + print("-" * 10) diff --git a/.pybuild/cpython3_3.12_linux-procfs/build/procfs/utilist.py b/.pybuild/cpython3_3.12_linux-procfs/build/procfs/utilist.py new file mode 100644 index 0000000..2e260b0 --- /dev/null +++ b/.pybuild/cpython3_3.12_linux-procfs/build/procfs/utilist.py @@ -0,0 +1,40 @@ +#! /usr/bin/python3 +# -*- python -*- +# -*- coding: utf-8 -*- +# SPDX-License-Identifier: GPL-2.0-only +# +# Copyright (C) 2007 Red Hat, Inc. +# + + + +def hexbitmask(l, nr_entries): + hexbitmask = [] + bit = 0 + mask = 0 + for entry in range(nr_entries): + if entry in l: + mask |= (1 << bit) + bit += 1 + if bit == 32: + bit = 0 + hexbitmask.insert(0, mask) + mask = 0 + + if bit < 32 and mask != 0: + hexbitmask.insert(0, mask) + + return hexbitmask + +def bitmasklist(line, nr_entries): + hexmask = line.strip().replace(",", "") + bitmasklist = [] + entry = 0 + bitmask = bin(int(hexmask, 16))[2::] + for i in reversed(bitmask): + if int(i) & 1: + bitmasklist.append(entry) + entry += 1 + if entry == nr_entries: + break + return bitmasklist diff --git a/COPYING b/COPYING new file mode 100644 index 0000000..1699f18 --- /dev/null +++ b/COPYING @@ -0,0 +1,5 @@ +python-linux-procfs is provided under + + SPDX-License-Identifier: GPL-2.0-only + +All contributions to python-linux-procfs are subject to this COPYING file. diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..6819214 --- /dev/null +++ b/Makefile @@ -0,0 +1,14 @@ +CTAGS_EXTRA := $(shell ctags --version 2>&1 | grep -iq universal && echo extras || echo extra) +.PHONY: tags +tags: + ctags -R --$(CTAGS_EXTRA)=+fq --python-kinds=+cfmvi + +.PHONY: cleantags +cleantags: + rm -f tags + +.PHONY: pyclean +pyclean: + @find . -type f \( -name \*~ -o -name \*.pyc \) -delete + +clean: pyclean cleantags diff --git a/README.md b/README.md deleted file mode 100644 index 9ebb840..0000000 --- a/README.md +++ /dev/null @@ -1 +0,0 @@ -# template-repository \ No newline at end of file diff --git a/bitmasklist_test.py b/bitmasklist_test.py new file mode 100755 index 0000000..af26884 --- /dev/null +++ b/bitmasklist_test.py @@ -0,0 +1,75 @@ +#!/usr/bin/python3 +# SPDX-License-Identifier: GPL-2.0-only + +""" Module to test bitmasklist functionality """ + +import sys +from procfs import bitmasklist + +class BitmasklistTest: + """ class to run the bitmasklist test """ + # Assume true (passed) until proven false + # Many tests can be run, but just one failure is recorded overall here + unit_test_result = 0 # Assume true (passed) until proven false + + def __init__(self, line, nr_entries, expected_result): + self.result = 0 # Assume pass + self.line = line + self.nr_entries = nr_entries # Corresponds to the number of cpus + self.expected_result = expected_result + + # A failure in any single test is recorded as an overall failure + def set_unit_test_result(self): + """ set unit_test_result to fail if any test fails """ + if BitmasklistTest.unit_test_result == 1: + return + if self.result == 1: + BitmasklistTest.unit_test_result = 1 + return + + # This is the function that actually runs the test + def bitmasklist_test(self): + """ Run the test """ + print("\n##################\n") + cpu = bitmasklist(self.line, self.nr_entries) + print("Converted : ", self.line, "\nto ", cpu) + if cpu == self.expected_result: + self.result = 0 + print("PASS") + else: + self.result = 1 + print("expected : ", self.expected_result) + print("FAIL") + self.set_unit_test_result() + +# CPU 2 +t = \ + BitmasklistTest("00000000,00000000,00000000,00000000,00000000,00000004", 44, [2]) +t.bitmasklist_test() + +# CPU 34 +t = \ + BitmasklistTest("00000000,00000000,00000000,00000000,00000004,00000000", 44, [34]) +t.bitmasklist_test() + +# CPU 30 +t = \ + BitmasklistTest("00000000,00000000,00000000,00000000,00000000,40000000", 44, [30]) +t.bitmasklist_test() + +# CPU 0, 32 +t = \ + BitmasklistTest("00000000,00000000,00000000,00000000,00000001,00000001", 44, [0, 32]) +t.bitmasklist_test() + +# cpu 0-15 +t = \ + BitmasklistTest("ffff", 44, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) +t.bitmasklist_test() + +#cpu 0-71 +t = \ + BitmasklistTest("ff,ffffffff,ffffffff", 96, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71]) +t.bitmasklist_test() + +sys.exit(BitmasklistTest.unit_test_result) diff --git a/build/scripts-3.12/pflags b/build/scripts-3.12/pflags new file mode 100755 index 0000000..35fa661 --- /dev/null +++ b/build/scripts-3.12/pflags @@ -0,0 +1,86 @@ +#!/usr/bin/python3 +# -*- python -*- +# -*- coding: utf-8 -*- +# print process flags +# Copyright (C) 2015 Red Hat Inc. +# Arnaldo Carvalho de Melo +# +# This application is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; version 2. +# +# This application is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. + + +import procfs, re, fnmatch, sys +import argparse +from functools import reduce + +ps = None + +def thread_mapper(s): + global ps + + try: + return [int(s), ] + except: + pass + try: + return ps.find_by_regex(re.compile(fnmatch.translate(s))) + except: + return ps.find_by_name(s) + +def main(argv): + + global ps + ps = procfs.pidstats() + + parser = argparse.ArgumentParser(description='Print process flags') + parser.add_argument('pid', nargs='*', help='a list of pids or names') + args = parser.parse_args() + + if len(argv) > 1: + pids = args.pid + pids = reduce(lambda i, j: i + j, list(map(thread_mapper, pids))) + else: + pids = list(ps.processes.keys()) + + pids.sort() + len_comms = [] + for pid in pids: + if pid in ps: + try: + len(ps[pid]["stat"]["comm"]) + except (TypeError, FileNotFoundError): + continue + len_comms.append(len(ps[pid]["stat"]["comm"])) + + max_comm_len = max(len_comms, default=0) + del len_comms + + for pid in pids: + if pid not in ps: + continue + try: + flags = ps[pid].stat.process_flags() + except AttributeError: + continue + # Remove flags that were superseeded + if "PF_THREAD_BOUND" in flags and "PF_NO_SETAFFINITY" in flags: + flags.remove("PF_THREAD_BOUND") + if "PF_FLUSHER" in flags and "PF_NPROC_EXCEEDED" in flags: + flags.remove("PF_FLUSHER") + if "PF_SWAPOFF" in flags and "PF_MEMALLOC_NOIO" in flags: + flags.remove("PF_SWAPOFF") + if "PF_FREEZER_NOSIG" in flags and "PF_SUSPEND_TASK" in flags: + flags.remove("PF_FREEZER_NOSIG") + comm = ps[pid].stat["comm"] + flags.sort() + sflags = reduce(lambda i, j: "%s|%s" % (i, j), [a[3:] for a in flags]) + print("%6d %*s %s" %(pid, max_comm_len, comm, sflags)) + +if __name__ == '__main__': + main(sys.argv) diff --git a/debian/.debhelper/generated/python3-linux-procfs/dh_installchangelogs.dch.trimmed b/debian/.debhelper/generated/python3-linux-procfs/dh_installchangelogs.dch.trimmed new file mode 100644 index 0000000..3b08628 --- /dev/null +++ b/debian/.debhelper/generated/python3-linux-procfs/dh_installchangelogs.dch.trimmed @@ -0,0 +1,77 @@ +python-linux-procfs (0.7.3-3) unstable; urgency=medium + + * Team upload. + * Patch-out usage of python3-six. No clear way to contact upstream. + + -- Alexandre Detiste Mon, 11 Nov 2024 12:26:35 +0100 + +python-linux-procfs (0.7.3-2) unstable; urgency=medium + + * Team Upload + * Set DPT as Maintainer per new Team Policy + * Update Homepage + + -- Alexandre Detiste Thu, 22 Aug 2024 10:15:36 +0200 + +python-linux-procfs (0.7.3-1) unstable; urgency=medium + + * Team Upload + * New upstream version 0.7.3 + + [ Debian Janitor ] + * Update standards version to 4.6.1, no changes needed. + * Remove constraints unnecessary since buster (oldstable): + + python3-linux-procfs: Drop versioned constraint + on python-linux-procfs in Replaces & Breaks. + + -- Alexandre Detiste Sun, 14 Apr 2024 21:15:20 +0200 + +python-linux-procfs (0.6.3-1) unstable; urgency=medium + + [ Ondřej Nový ] + * d/control: Update Vcs-* fields with new Debian Python Team Salsa + layout. + + [ Sandro Tosi ] + * Use the new Debian Python Team contact name and address + + [ Stewart Ferguson ] + * Imported Upstream version 0.6.3 + * Removing upstream patch + * Bumping Standards-Version: 4.5.1 + + -- Stewart Ferguson Wed, 20 Jan 2021 14:38:46 +0100 + +python-linux-procfs (0.6.2-1) unstable; urgency=medium + + * Imported Upstream version 0.6.2 + * Adding upstream patch to fix failed utilist import + * Bump Standards-Version to 4.5.0 + * Removing unused install override + * Moving man page from patch to d/ + * Removing clean target from rules. Using debian/clean instead + * Bumping compat 13 + * Adding Rules-Requires-Root + * Bumping copyright years + * Now using dh-sequence-python3 instead of --with python3 + + -- Stewart Ferguson Sat, 11 Jul 2020 11:59:58 +0200 + +python-linux-procfs (0.6.1-2) unstable; urgency=medium + + [ Ondřej Nový ] + * Bump Standards-Version to 4.4.0. + + [ Stewart Ferguson ] + * Removing python2 binary package + * Replacing pflags3 with pflags and removing upodate-alternatives + + Breaks python-linux-procfs + * Removing unused .gitignore ignore rule + * Removing superfluous copyright block for COPYING file + * Improving package description + * Removing redundant debhelper build-dep + + -- Stewart Ferguson Mon, 29 Jul 2019 21:44:23 +0200 + +# Older entries have been removed from this changelog. +# To read the complete changelog use `apt changelog python3-linux-procfs`. diff --git a/debian/.debhelper/generated/python3-linux-procfs/installed-by-dh_installdocs b/debian/.debhelper/generated/python3-linux-procfs/installed-by-dh_installdocs new file mode 100644 index 0000000..e69de29 diff --git a/debian/.debhelper/generated/python3-linux-procfs/installed-by-dh_installman b/debian/.debhelper/generated/python3-linux-procfs/installed-by-dh_installman new file mode 100644 index 0000000..248adaa --- /dev/null +++ b/debian/.debhelper/generated/python3-linux-procfs/installed-by-dh_installman @@ -0,0 +1 @@ +./debian/pflags.8 diff --git a/debian/.gitlab-ci.yml b/debian/.gitlab-ci.yml new file mode 100644 index 0000000..79d6bf5 --- /dev/null +++ b/debian/.gitlab-ci.yml @@ -0,0 +1,11 @@ +image: registry.salsa.debian.org/salsa-ci-team/ci-image-git-buildpackage:latest + +pages: + stage: deploy + artifacts: + paths: + - public + script: + - gitlab-ci-git-buildpackage + - gitlab-ci-lintian + - gitlab-ci-aptly diff --git a/debian/changelog b/debian/changelog index bad88e2..297be81 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,5 +1,96 @@ -template-repository (1.0-1) unstable; urgency=medium +python-linux-procfs (0.7.3-3) unstable; urgency=medium - * Initial release + * Team upload. + * Patch-out usage of python3-six. No clear way to contact upstream. - -- Tsic404 Sat, 28 Jan 2023 13:46:49 +0800 + -- Alexandre Detiste Mon, 11 Nov 2024 12:26:35 +0100 + +python-linux-procfs (0.7.3-2) unstable; urgency=medium + + * Team Upload + * Set DPT as Maintainer per new Team Policy + * Update Homepage + + -- Alexandre Detiste Thu, 22 Aug 2024 10:15:36 +0200 + +python-linux-procfs (0.7.3-1) unstable; urgency=medium + + * Team Upload + * New upstream version 0.7.3 + + [ Debian Janitor ] + * Update standards version to 4.6.1, no changes needed. + * Remove constraints unnecessary since buster (oldstable): + + python3-linux-procfs: Drop versioned constraint + on python-linux-procfs in Replaces & Breaks. + + -- Alexandre Detiste Sun, 14 Apr 2024 21:15:20 +0200 + +python-linux-procfs (0.6.3-1) unstable; urgency=medium + + [ Ondřej Nový ] + * d/control: Update Vcs-* fields with new Debian Python Team Salsa + layout. + + [ Sandro Tosi ] + * Use the new Debian Python Team contact name and address + + [ Stewart Ferguson ] + * Imported Upstream version 0.6.3 + * Removing upstream patch + * Bumping Standards-Version: 4.5.1 + + -- Stewart Ferguson Wed, 20 Jan 2021 14:38:46 +0100 + +python-linux-procfs (0.6.2-1) unstable; urgency=medium + + * Imported Upstream version 0.6.2 + * Adding upstream patch to fix failed utilist import + * Bump Standards-Version to 4.5.0 + * Removing unused install override + * Moving man page from patch to d/ + * Removing clean target from rules. Using debian/clean instead + * Bumping compat 13 + * Adding Rules-Requires-Root + * Bumping copyright years + * Now using dh-sequence-python3 instead of --with python3 + + -- Stewart Ferguson Sat, 11 Jul 2020 11:59:58 +0200 + +python-linux-procfs (0.6.1-2) unstable; urgency=medium + + [ Ondřej Nový ] + * Bump Standards-Version to 4.4.0. + + [ Stewart Ferguson ] + * Removing python2 binary package + * Replacing pflags3 with pflags and removing upodate-alternatives + + Breaks python-linux-procfs + * Removing unused .gitignore ignore rule + * Removing superfluous copyright block for COPYING file + * Improving package description + * Removing redundant debhelper build-dep + + -- Stewart Ferguson Mon, 29 Jul 2019 21:44:23 +0200 + +python-linux-procfs (0.6.1-1) unstable; urgency=medium + + * Upstream release 0.6 -> 0.6.1 + * debhelper compat 10 -> 12 + + Build system python_distutils -> pybuild + + Removed old d/compat file + * Standards-Version 4.2.1 -> 4.3.0 + + No changes required + * Removing .gitignore from debian source + * Correcting license from GPL-2+ to GPL-2 + * Adding dh-python to Build-Depends + + -- Stewart Ferguson Sun, 10 Feb 2019 21:32:12 +0100 + +python-linux-procfs (0.6-1) unstable; urgency=medium + + * Initial release (Closes: #912771) + * Python 2 version supplied to satisfy current state of tuna. Tuna's GUI-mode + * still relies on python2. + + -- Stewart Ferguson Sat, 03 Nov 2018 17:32:03 +0100 diff --git a/debian/clean b/debian/clean new file mode 100644 index 0000000..79acad2 --- /dev/null +++ b/debian/clean @@ -0,0 +1,2 @@ +debian/pflags.8 +python_linux_procfs.egg-info/ diff --git a/debian/compat b/debian/compat deleted file mode 100644 index b4de394..0000000 --- a/debian/compat +++ /dev/null @@ -1 +0,0 @@ -11 diff --git a/debian/control b/debian/control index cb7c4a0..856a042 100644 --- a/debian/control +++ b/debian/control @@ -1,15 +1,33 @@ -Source: template-repository -Section: unknown +Source: python-linux-procfs +Section: python Priority: optional -Maintainer: Tsic404 -Build-Depends: debhelper (>= 11) -Standards-Version: 4.1.3 -Homepage: https://github.com/deepin-community/template-repository -#Vcs-Browser: https://salsa.debian.org/debian/deepin-community-template-repository -#Vcs-Git: https://salsa.debian.org/debian/deepin-community-template-repository.git +Maintainer: Debian Python Team +Uploaders: Stewart Ferguson , +Build-Depends: + debhelper-compat (= 13), + dh-sequence-python3, + python3-setuptools, + python3-all-dev, + asciidoc-base, + libxml2-utils, + docbook-xml, + docbook-xsl, + xsltproc +Standards-Version: 4.6.1 +Homepage: https://git.kernel.org/pub/scm/libs/python/python-linux-procfs/python-linux-procfs.git/ +Vcs-Browser: https://salsa.debian.org/python-team/packages/python-linux-procfs +Vcs-Git: https://salsa.debian.org/python-team/packages/python-linux-procfs.git +Rules-Requires-Root: no -Package: template-repository -Architecture: any -Depends: ${shlibs:Depends}, ${misc:Depends} -Description: - +Package: python3-linux-procfs +Architecture: linux-any +Depends: + ${misc:Depends}, + ${python3:Depends}, +Description: Linux /proc abstraction classes in Python + Python abstractions to extract information from the Linux kernel /proc + files. + . + The proc filesystem is a pseudo-filesystem which provides an interface to + kernel data structures. This package provides a means to query that system + from a Python module. diff --git a/debian/copyright b/debian/copyright index f5c805e..a33a9bc 100644 --- a/debian/copyright +++ b/debian/copyright @@ -1,14 +1,25 @@ Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ -Upstream-Name: template-repository -Source: https://github.com/deepin-community/template-repository +Upstream-Name: python-linux-procfs +Upstream-Contact: Jiri Kastner +Source: https://git.kernel.org/pub/scm/libs/python/python-linux-procfs/python-linux-procfs.git/ Files: * -Copyright: 2023 Tsic404 -License: GPL-2+ +Copyright: 2007-2015 Red Hat Inc. +License: GPL-2 +Comment: Content was authored by Arnaldo Carvalho de Melo in + 2007. Maintainership transferred to Jiri Kastner in + 2015. Jiri Kasterner (as of 2018) is the point of contact for all upstream + matters related to this library. The dates of this copyright were deduced from + procfs/procfs.py, a primary file in this library. + +Files: debian/* +Copyright: 2018-2020 Stewart Ferguson +License: GPL-2 + +License: GPL-2 This package is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. + the Free Software Foundation; version 2 of the License . This package is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of diff --git a/debian/debhelper-build-stamp b/debian/debhelper-build-stamp new file mode 100644 index 0000000..7421372 --- /dev/null +++ b/debian/debhelper-build-stamp @@ -0,0 +1 @@ +python3-linux-procfs diff --git a/debian/files b/debian/files new file mode 100644 index 0000000..56e2150 --- /dev/null +++ b/debian/files @@ -0,0 +1,2 @@ +python-linux-procfs_0.7.3-3_amd64.buildinfo python optional +python3-linux-procfs_0.7.3-3_amd64.deb python optional diff --git a/debian/patches/remove-six.patch b/debian/patches/remove-six.patch new file mode 100644 index 0000000..3c5e057 --- /dev/null +++ b/debian/patches/remove-six.patch @@ -0,0 +1,38 @@ +--- a/pflags ++++ b/pflags +@@ -18,7 +18,6 @@ + import procfs, re, fnmatch, sys + import argparse + from functools import reduce +-from six.moves import map + + ps = None + +--- a/procfs/procfs.py ++++ b/procfs/procfs.py +@@ -11,7 +11,6 @@ + import re + import time + from functools import reduce +-from six.moves import range + from procfs.utilist import bitmasklist + + VERSION = "0.7.3" +--- a/procfs/utilist.py ++++ b/procfs/utilist.py +@@ -6,7 +6,6 @@ + # Copyright (C) 2007 Red Hat, Inc. + # + +-from six.moves import range + + + def hexbitmask(l, nr_entries): +--- a/setup.py ++++ b/setup.py +@@ -29,5 +29,4 @@ + """, + packages = ["procfs"], + scripts = ['pflags'], +- install_requires = ['six'], + ) diff --git a/debian/patches/series b/debian/patches/series new file mode 100644 index 0000000..6a9f04d --- /dev/null +++ b/debian/patches/series @@ -0,0 +1 @@ +remove-six.patch diff --git a/debian/pflags.8 b/debian/pflags.8 new file mode 100644 index 0000000..9f34edc --- /dev/null +++ b/debian/pflags.8 @@ -0,0 +1,53 @@ +'\" t +.\" Title: pflags +.\" Author: [see the "AUTHORS" section] +.\" Generator: DocBook XSL Stylesheets vsnapshot +.\" Date: 11/11/2024 +.\" Manual: \ \& +.\" Source: \ \& +.\" Language: English +.\" +.TH "PFLAGS" "8" "11/11/2024" "\ \&" "\ \&" +.\" ----------------------------------------------------------------- +.\" * Define some portability stuff +.\" ----------------------------------------------------------------- +.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +.\" http://bugs.debian.org/507673 +.\" http://lists.gnu.org/archive/html/groff/2009-02/msg00013.html +.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +.ie \n(.g .ds Aq \(aq +.el .ds Aq ' +.\" ----------------------------------------------------------------- +.\" * set default formatting +.\" ----------------------------------------------------------------- +.\" disable hyphenation +.nh +.\" disable justification (adjust text to left margin only) +.ad l +.\" ----------------------------------------------------------------- +.\" * MAIN CONTENT STARTS HERE * +.\" ----------------------------------------------------------------- +.SH "NAME" +pflags \- Print process flags +.SH "SYNOPSIS" +.sp +pflags [PID] [NAME] +.SH "DESCRIPTION" +.sp +This script prints process flags and is written purely in python using the python\-linux\-procfs package +.SH "OPTIONS" +.PP +PID +.RS 4 +Is the process ID to be queried +.RE +.PP +NAME +.RS 4 +Is the name of the process to be queried +.RE +.SH "AUTHORS" +.sp +Arnaldo Carvalho de Melo +.sp +Man page written by Stewart Ferguson diff --git a/debian/pflags.8.asciidoc b/debian/pflags.8.asciidoc new file mode 100644 index 0000000..37be999 --- /dev/null +++ b/debian/pflags.8.asciidoc @@ -0,0 +1,35 @@ +pflags(8) +=========== + +NAME +---- +pflags - Print process flags + + +SYNOPSIS +-------- +pflags [PID] [NAME] + + +DESCRIPTION +----------- + +This script prints process flags and is written purely in python +using the python-linux-procfs package + + +OPTIONS +------- + +PID:: +Is the process ID to be queried + +NAME:: +Is the name of the process to be queried + + +AUTHORS +------- +Arnaldo Carvalho de Melo + +Man page written by Stewart Ferguson diff --git a/debian/python3-linux-procfs.debhelper.log b/debian/python3-linux-procfs.debhelper.log new file mode 100644 index 0000000..1108d89 --- /dev/null +++ b/debian/python3-linux-procfs.debhelper.log @@ -0,0 +1 @@ +dh_auto_build diff --git a/debian/python3-linux-procfs.manpages b/debian/python3-linux-procfs.manpages new file mode 100644 index 0000000..d25733c --- /dev/null +++ b/debian/python3-linux-procfs.manpages @@ -0,0 +1 @@ +debian/pflags.8 diff --git a/debian/python3-linux-procfs.postinst.debhelper b/debian/python3-linux-procfs.postinst.debhelper new file mode 100644 index 0000000..57cbc15 --- /dev/null +++ b/debian/python3-linux-procfs.postinst.debhelper @@ -0,0 +1,10 @@ + +# Automatically added by dh_python3 +if command -v py3compile >/dev/null 2>&1; then + py3compile -p python3-linux-procfs:amd64 +fi +if command -v pypy3compile >/dev/null 2>&1; then + pypy3compile -p python3-linux-procfs:amd64 || true +fi + +# End automatically added section diff --git a/debian/python3-linux-procfs.prerm.debhelper b/debian/python3-linux-procfs.prerm.debhelper new file mode 100644 index 0000000..d324ec3 --- /dev/null +++ b/debian/python3-linux-procfs.prerm.debhelper @@ -0,0 +1,10 @@ + +# Automatically added by dh_python3 +if command -v py3clean >/dev/null 2>&1; then + py3clean -p python3-linux-procfs:amd64 +else + dpkg -L python3-linux-procfs:amd64 | sed -En -e '/^(.*)\/(.+)\.py$/s,,rm "\1/__pycache__/\2".*,e' + find /usr/lib/python3/dist-packages/ -type d -name __pycache__ -empty -print0 | xargs --null --no-run-if-empty rmdir +fi + +# End automatically added section diff --git a/debian/python3-linux-procfs.substvars b/debian/python3-linux-procfs.substvars new file mode 100644 index 0000000..13de292 --- /dev/null +++ b/debian/python3-linux-procfs.substvars @@ -0,0 +1,3 @@ +python3:Depends=python3:any +misc:Depends= +misc:Pre-Depends= diff --git a/debian/python3-linux-procfs/DEBIAN/control b/debian/python3-linux-procfs/DEBIAN/control new file mode 100644 index 0000000..d138d46 --- /dev/null +++ b/debian/python3-linux-procfs/DEBIAN/control @@ -0,0 +1,17 @@ +Package: python3-linux-procfs +Source: python-linux-procfs +Version: 0.7.3-3 +Architecture: amd64 +Maintainer: Debian Python Team +Installed-Size: 65 +Depends: python3:any +Section: python +Priority: optional +Homepage: https://git.kernel.org/pub/scm/libs/python/python-linux-procfs/python-linux-procfs.git/ +Description: Linux /proc abstraction classes in Python + Python abstractions to extract information from the Linux kernel /proc + files. + . + The proc filesystem is a pseudo-filesystem which provides an interface to + kernel data structures. This package provides a means to query that system + from a Python module. diff --git a/debian/python3-linux-procfs/DEBIAN/md5sums b/debian/python3-linux-procfs/DEBIAN/md5sums new file mode 100644 index 0000000..b046c6d --- /dev/null +++ b/debian/python3-linux-procfs/DEBIAN/md5sums @@ -0,0 +1,10 @@ +4c7610655610a41cea153e050ed18702 usr/bin/pflags +7a3fd3f40d661912de53b121c7856ffa usr/lib/python3/dist-packages/procfs/__init__.py +0da5faf1bb10b3ed7bd3fc5d1c1aaff8 usr/lib/python3/dist-packages/procfs/procfs.py +45d274f39bbab8fef5b116afe84446fb usr/lib/python3/dist-packages/procfs/utilist.py +e040cb73f88d60fafe9abd2c56c54723 usr/lib/python3/dist-packages/python_linux_procfs-0.7.3.egg-info/PKG-INFO +68b329da9893e34099c7d8ad5cb9c940 usr/lib/python3/dist-packages/python_linux_procfs-0.7.3.egg-info/dependency_links.txt +64bba31d8b839dd7a83ad8657965aeef usr/lib/python3/dist-packages/python_linux_procfs-0.7.3.egg-info/top_level.txt +646972ec72638c2557194e0eb8b1a587 usr/share/doc/python3-linux-procfs/changelog.Debian.gz +d12fc6b3ebffc15b27b7295134930090 usr/share/doc/python3-linux-procfs/copyright +cffda690327637b5a2da63835e4bd271 usr/share/man/man8/pflags.8.gz diff --git a/debian/python3-linux-procfs/DEBIAN/postinst b/debian/python3-linux-procfs/DEBIAN/postinst new file mode 100755 index 0000000..fcf42e6 --- /dev/null +++ b/debian/python3-linux-procfs/DEBIAN/postinst @@ -0,0 +1,12 @@ +#!/bin/sh +set -e + +# Automatically added by dh_python3 +if command -v py3compile >/dev/null 2>&1; then + py3compile -p python3-linux-procfs:amd64 +fi +if command -v pypy3compile >/dev/null 2>&1; then + pypy3compile -p python3-linux-procfs:amd64 || true +fi + +# End automatically added section diff --git a/debian/python3-linux-procfs/DEBIAN/prerm b/debian/python3-linux-procfs/DEBIAN/prerm new file mode 100755 index 0000000..6120461 --- /dev/null +++ b/debian/python3-linux-procfs/DEBIAN/prerm @@ -0,0 +1,12 @@ +#!/bin/sh +set -e + +# Automatically added by dh_python3 +if command -v py3clean >/dev/null 2>&1; then + py3clean -p python3-linux-procfs:amd64 +else + dpkg -L python3-linux-procfs:amd64 | sed -En -e '/^(.*)\/(.+)\.py$/s,,rm "\1/__pycache__/\2".*,e' + find /usr/lib/python3/dist-packages/ -type d -name __pycache__ -empty -print0 | xargs --null --no-run-if-empty rmdir +fi + +# End automatically added section diff --git a/debian/python3-linux-procfs/usr/bin/pflags b/debian/python3-linux-procfs/usr/bin/pflags new file mode 100755 index 0000000..35fa661 --- /dev/null +++ b/debian/python3-linux-procfs/usr/bin/pflags @@ -0,0 +1,86 @@ +#!/usr/bin/python3 +# -*- python -*- +# -*- coding: utf-8 -*- +# print process flags +# Copyright (C) 2015 Red Hat Inc. +# Arnaldo Carvalho de Melo +# +# This application is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; version 2. +# +# This application is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. + + +import procfs, re, fnmatch, sys +import argparse +from functools import reduce + +ps = None + +def thread_mapper(s): + global ps + + try: + return [int(s), ] + except: + pass + try: + return ps.find_by_regex(re.compile(fnmatch.translate(s))) + except: + return ps.find_by_name(s) + +def main(argv): + + global ps + ps = procfs.pidstats() + + parser = argparse.ArgumentParser(description='Print process flags') + parser.add_argument('pid', nargs='*', help='a list of pids or names') + args = parser.parse_args() + + if len(argv) > 1: + pids = args.pid + pids = reduce(lambda i, j: i + j, list(map(thread_mapper, pids))) + else: + pids = list(ps.processes.keys()) + + pids.sort() + len_comms = [] + for pid in pids: + if pid in ps: + try: + len(ps[pid]["stat"]["comm"]) + except (TypeError, FileNotFoundError): + continue + len_comms.append(len(ps[pid]["stat"]["comm"])) + + max_comm_len = max(len_comms, default=0) + del len_comms + + for pid in pids: + if pid not in ps: + continue + try: + flags = ps[pid].stat.process_flags() + except AttributeError: + continue + # Remove flags that were superseeded + if "PF_THREAD_BOUND" in flags and "PF_NO_SETAFFINITY" in flags: + flags.remove("PF_THREAD_BOUND") + if "PF_FLUSHER" in flags and "PF_NPROC_EXCEEDED" in flags: + flags.remove("PF_FLUSHER") + if "PF_SWAPOFF" in flags and "PF_MEMALLOC_NOIO" in flags: + flags.remove("PF_SWAPOFF") + if "PF_FREEZER_NOSIG" in flags and "PF_SUSPEND_TASK" in flags: + flags.remove("PF_FREEZER_NOSIG") + comm = ps[pid].stat["comm"] + flags.sort() + sflags = reduce(lambda i, j: "%s|%s" % (i, j), [a[3:] for a in flags]) + print("%6d %*s %s" %(pid, max_comm_len, comm, sflags)) + +if __name__ == '__main__': + main(sys.argv) diff --git a/debian/python3-linux-procfs/usr/lib/python3/dist-packages/procfs/__init__.py b/debian/python3-linux-procfs/usr/lib/python3/dist-packages/procfs/__init__.py new file mode 100644 index 0000000..6deedf4 --- /dev/null +++ b/debian/python3-linux-procfs/usr/lib/python3/dist-packages/procfs/__init__.py @@ -0,0 +1,17 @@ +#! /usr/bin/python3 +# -*- python -*- +# -*- coding: utf-8 -*- +# SPDX-License-Identifier: GPL-2.0-only +# +# Copyright (C) 2008, 2009 Red Hat, Inc. +# +""" +Copyright (c) 2008, 2009 Red Hat Inc. + +Abstractions to extract information from the Linux kernel /proc files. +""" +__author__ = "Arnaldo Carvalho de Melo " +__license__ = "GPLv2 License" + +from .procfs import * +from .utilist import * diff --git a/debian/python3-linux-procfs/usr/lib/python3/dist-packages/procfs/procfs.py b/debian/python3-linux-procfs/usr/lib/python3/dist-packages/procfs/procfs.py new file mode 100644 index 0000000..3fcc45c --- /dev/null +++ b/debian/python3-linux-procfs/usr/lib/python3/dist-packages/procfs/procfs.py @@ -0,0 +1,1110 @@ +#!/usr/bin/python3 +# -*- python -*- +# -*- coding: utf-8 -*- +# SPDX-License-Identifier: GPL-2.0-only +# +# Copyright (C) 2007-2015 Red Hat, Inc. +# + +import os +import platform +import re +import time +from functools import reduce +from procfs.utilist import bitmasklist + +VERSION = "0.7.3" + + +def is_s390(): + """ Return True if running on s390 or s390x """ + machine = platform.machine() + return bool(re.search('s390', machine)) + + +def process_cmdline(pid_info): + """ + Returns the process command line, if available in the given `process' class, + if not available, falls back to using the comm (short process name) in its + pidstat key. + """ + if pid_info["cmdline"]: + return reduce(lambda a, b: a + " %s" % b, pid_info["cmdline"]).strip() + + try: + """ If a pid disappears before we query it, return None """ + return pid_info["stat"]["comm"] + except: + return None + + +class pidstat: + """ + Provides a dictionary to access the fields in the + per process /proc/PID/stat files. + + One can obtain the available fields by asking for the keys of the + dictionary, e.g.: + + >>> p = procfs.pidstat(1) + >>> print p.keys() + ['majflt', 'rss', 'cnswap', 'cstime', 'pid', 'session', 'startstack', 'startcode', 'cmajflt', 'blocked', 'exit_signal', 'minflt', 'nswap', 'environ', 'priority', 'state', 'delayacct_blkio_ticks', 'policy', 'rt_priority', 'ppid', 'nice', 'cutime', 'endcode', 'wchan', 'num_threads', 'sigcatch', 'comm', 'stime', 'sigignore', 'tty_nr', 'kstkeip', 'utime', 'tpgid', 'itrealvalue', 'kstkesp', 'rlim', 'signal', 'pgrp', 'flags', 'starttime', 'cminflt', 'vsize', 'processor'] + + And then access the various process properties using it as a dictionary: + + >>> print p['comm'] + systemd + >>> print p['priority'] + 20 + >>> print p['state'] + S + + Please refer to the 'procfs(5)' man page, by using: + + $ man 5 procfs + + To see information for each of the above fields, it is part of the + 'man-pages' RPM package. + """ + + # Entries with the same value, the one with a comment after it is the + # more recent, having replaced the other name in v4.1-rc kernel times. + + PF_ALIGNWARN = 0x00000001 + PF_STARTING = 0x00000002 + PF_EXITING = 0x00000004 + PF_EXITPIDONE = 0x00000008 + PF_VCPU = 0x00000010 + PF_WQ_WORKER = 0x00000020 # /* I'm a workqueue worker */ + PF_FORKNOEXEC = 0x00000040 + PF_MCE_PROCESS = 0x00000080 # /* process policy on mce errors */ + PF_SUPERPRIV = 0x00000100 + PF_DUMPCORE = 0x00000200 + PF_SIGNALED = 0x00000400 + PF_MEMALLOC = 0x00000800 + # /* set_user noticed that RLIMIT_NPROC was exceeded */ + PF_NPROC_EXCEEDED = 0x00001000 + PF_FLUSHER = 0x00001000 + PF_USED_MATH = 0x00002000 + PF_USED_ASYNC = 0x00004000 # /* used async_schedule*(), used by module init */ + PF_NOFREEZE = 0x00008000 + PF_FROZEN = 0x00010000 + PF_FSTRANS = 0x00020000 + PF_KSWAPD = 0x00040000 + PF_MEMALLOC_NOIO = 0x00080000 # /* Allocating memory without IO involved */ + PF_SWAPOFF = 0x00080000 + PF_LESS_THROTTLE = 0x00100000 + PF_KTHREAD = 0x00200000 + PF_RANDOMIZE = 0x00400000 + PF_SWAPWRITE = 0x00800000 + PF_SPREAD_PAGE = 0x01000000 + PF_SPREAD_SLAB = 0x02000000 + PF_THREAD_BOUND = 0x04000000 + # /* Userland is not allowed to meddle with cpus_allowed */ + PF_NO_SETAFFINITY = 0x04000000 + PF_MCE_EARLY = 0x08000000 # /* Early kill for mce process policy */ + PF_MEMPOLICY = 0x10000000 + PF_MUTEX_TESTER = 0x20000000 + PF_FREEZER_SKIP = 0x40000000 + PF_FREEZER_NOSIG = 0x80000000 + # /* this thread called freeze_processes and should not be frozen */ + PF_SUSPEND_TASK = 0x80000000 + + proc_stat_fields = ["pid", "comm", "state", "ppid", "pgrp", "session", + "tty_nr", "tpgid", "flags", "minflt", "cminflt", + "majflt", "cmajflt", "utime", "stime", "cutime", + "cstime", "priority", "nice", "num_threads", + "itrealvalue", "starttime", "vsize", "rss", + "rlim", "startcode", "endcode", "startstack", + "kstkesp", "kstkeip", "signal", "blocked", + "sigignore", "sigcatch", "wchan", "nswap", + "cnswap", "exit_signal", "processor", + "rt_priority", "policy", + "delayacct_blkio_ticks", "environ"] + + def __init__(self, pid, basedir="/proc"): + self.pid = pid + try: + self.load(basedir) + except FileNotFoundError: + # The file representing the pid has disappeared + # propagate the error to the user to handle + raise + + def __getitem__(self, fieldname): + return self.fields[fieldname] + + def keys(self): + return list(self.fields.keys()) + + def values(self): + return list(self.fields.values()) + + def has_key(self, fieldname): + return fieldname in self.fields + + def items(self): + return self.fields + + def __contains__(self, fieldname): + return fieldname in self.fields + + def load(self, basedir="/proc"): + try: + f = open(f"{basedir}/{self.pid}/stat") + except FileNotFoundError: + # The pid has disappeared, propagate the error + raise + fields = f.readline().strip().split(') ') + f.close() + fields = fields[0].split(' (') + fields[1].split() + self.fields = {} + nr_fields = min(len(fields), len(self.proc_stat_fields)) + for i in range(nr_fields): + attrname = self.proc_stat_fields[i] + value = fields[i] + if attrname == "comm": + self.fields["comm"] = value.strip('()') + else: + try: + self.fields[attrname] = int(value) + except: + self.fields[attrname] = value + + def is_bound_to_cpu(self): + """ + Returns true if this process has a fixed smp affinity mask, + not allowing it to be moved to a different set of CPUs. + """ + return bool(self.fields["flags"] & self.PF_THREAD_BOUND) + + def process_flags(self): + """ + Returns a list with all the process flags known, details depend + on kernel version, declared in the file include/linux/sched.h in + the kernel sources. + + As of v4.2-rc7 these include (from include/linux/sched.h comments): + + PF_EXITING Getting shut down + PF_EXITPIDONE Pi exit done on shut down + PF_VCPU I'm a virtual CPU + PF_WQ_WORKER I'm a workqueue worker + PF_FORKNOEXEC Forked but didn't exec + PF_MCE_PROCESS Process policy on mce errors + PF_SUPERPRIV Used super-user privileges + PF_DUMPCORE Dumped core + PF_SIGNALED Killed by a signal + PF_MEMALLOC Allocating memory + PF_NPROC_EXCEEDED Set_user noticed that RLIMIT_NPROC was exceeded + PF_USED_MATH If unset the fpu must be initialized before use + PF_USED_ASYNC Used async_schedule*(), used by module init + PF_NOFREEZE This thread should not be frozen + PF_FROZEN Frozen for system suspend + PF_FSTRANS Inside a filesystem transaction + PF_KSWAPD I am kswapd + PF_MEMALLOC_NOIO Allocating memory without IO involved + PF_LESS_THROTTLE Throttle me less: I clean memory + PF_KTHREAD I am a kernel thread + PF_RANDOMIZE Randomize virtual address space + PF_SWAPWRITE Allowed to write to swap + PF_NO_SETAFFINITY Userland is not allowed to meddle with cpus_allowed + PF_MCE_EARLY Early kill for mce process policy + PF_MUTEX_TESTER Thread belongs to the rt mutex tester + PF_FREEZER_SKIP Freezer should not count it as freezable + PF_SUSPEND_TASK This thread called freeze_processes and + should not be frozen + + """ + sflags = [] + for attr in dir(self): + if attr[:3] != "PF_": + continue + value = getattr(self, attr) + if value & self.fields["flags"]: + sflags.append(attr) + + return sflags + + +def cannot_set_affinity(self, pid): + PF_NO_SETAFFINITY = 0x04000000 + try: + return bool(int(self.processes[pid]["stat"]["flags"]) & + PF_NO_SETAFFINITY) + except: + return True + + +def cannot_set_thread_affinity(self, pid, tid): + PF_NO_SETAFFINITY = 0x04000000 + try: + return bool(int(self.processes[pid].threads[tid]["stat"]["flags"]) & + PF_NO_SETAFFINITY) + except: + return True + + +class pidstatus: + """ + Provides a dictionary to access the fields + in the per process /proc/PID/status files. + This provides additional information about processes and threads to + what can be obtained with the procfs.pidstat() class. + + One can obtain the available fields by asking for the keys of the + dictionary, e.g.: + + >>> import procfs + >>> p = procfs.pidstatus(1) + >>> print p.keys() + ['VmExe', 'CapBnd', 'NSpgid', 'Tgid', 'NSpid', 'VmSize', 'VmPMD', 'ShdPnd', 'State', 'Gid', 'nonvoluntary_ctxt_switches', 'SigIgn', 'VmStk', 'VmData', 'SigCgt', 'CapEff', 'VmPTE', 'Groups', 'NStgid', 'Threads', 'PPid', 'VmHWM', 'NSsid', 'VmSwap', 'Name', 'SigBlk', 'Mems_allowed_list', 'VmPeak', 'Ngid', 'VmLck', 'SigQ', 'VmPin', 'Mems_allowed', 'CapPrm', 'Seccomp', 'VmLib', 'Cpus_allowed', 'Uid', 'SigPnd', 'Pid', 'Cpus_allowed_list', 'TracerPid', 'CapInh', 'voluntary_ctxt_switches', 'VmRSS', 'FDSize'] + >>> print p["Pid"] + 1 + >>> print p["Threads"] + 1 + >>> print p["VmExe"] + 1248 kB + >>> print p["Cpus_allowed"] + f + >>> print p["SigQ"] + 0/30698 + >>> print p["VmPeak"] + 320300 kB + >>> + + Please refer to the 'procfs(5)' man page, by using: + + $ man 5 procfs + + To see information for each of the above fields, it is part of the + 'man-pages' RPM package. + + In the man page there will be references to further documentation, like + referring to the "getrlimit(2)" man page when explaining the "SigQ" + line/field. + """ + + def __init__(self, pid, basedir="/proc"): + self.pid = pid + self.load(basedir) + + def __getitem__(self, fieldname): + return self.fields[fieldname] + + def keys(self): + return list(self.fields.keys()) + + def values(self): + return list(self.fields.values()) + + def has_key(self, fieldname): + return fieldname in self.fields + + def items(self): + return self.fields + + def __contains__(self, fieldname): + return fieldname in self.fields + + def load(self, basedir="/proc"): + self.fields = {} + with open(f"{basedir}/{self.pid}/status") as f: + for line in f.readlines(): + fields = line.split(":") + if len(fields) != 2: + continue + name = fields[0] + value = fields[1].strip() + try: + self.fields[name] = int(value) + except: + self.fields[name] = value + + +class process: + """ + Information about a process with a given pid, provides a dictionary with + two entries, instances of different wrappers for /proc/ process related + meta files: "stat" and "status", see the documentation for procfs.pidstat + and procfs.pidstatus for further info about those classes. + """ + + def __init__(self, pid, basedir="/proc"): + self.pid = pid + self.basedir = basedir + + def __getitem__(self, attr): + if not hasattr(self, attr): + if attr in ("stat", "status"): + if attr == "stat": + sclass = pidstat + else: + sclass = pidstatus + + try: + setattr(self, attr, sclass(self.pid, self.basedir)) + except FileNotFoundError: + # The pid has disappeared, progate the error + raise + elif attr == "cmdline": + self.load_cmdline() + elif attr == "threads": + self.load_threads() + elif attr == "cgroups": + self.load_cgroups() + elif attr == "environ": + self.load_environ() + + return getattr(self, attr) + + def has_key(self, attr): + return hasattr(self, attr) + + def __contains__(self, attr): + return hasattr(self, attr) + + def load_cmdline(self): + try: + with open(f"/proc/{self.pid}/cmdline") as f: + self.cmdline = f.readline().strip().split('\0')[:-1] + except FileNotFoundError: + """ This can happen when a pid disappears """ + self.cmdline = None + except UnicodeDecodeError: + """ TODO - this shouldn't happen, needs to be investigated """ + self.cmdline = None + + def load_threads(self): + self.threads = pidstats(f"/proc/{self.pid}/task/") + # remove thread leader + del self.threads[self.pid] + + def load_cgroups(self): + self.cgroups = "" + with open(f"/proc/{self.pid}/cgroup") as f: + for line in reversed(f.readlines()): + if len(self.cgroups) != 0: + self.cgroups = self.cgroups + "," + line[:-1] + else: + self.cgroups = line[:-1] + + def load_environ(self): + """ + Loads the environment variables for this process. The entries then + become available via the 'environ' member, or via the 'environ' + dict key when accessing as p["environ"]. + + E.g.: + + + >>> all_processes = procfs.pidstats() + >>> firefox_pid = all_processes.find_by_name("firefox") + >>> firefox_process = all_processes[firefox_pid[0]] + >>> print firefox_process["environ"]["PWD"] + /home/acme + >>> print len(firefox_process.environ.keys()) + 66 + >>> print firefox_process["environ"]["SHELL"] + /bin/bash + >>> print firefox_process["environ"]["USERNAME"] + acme + >>> print firefox_process["environ"]["HOME"] + /home/acme + >>> print firefox_process["environ"]["MAIL"] + /var/spool/mail/acme + >>> + """ + self.environ = {} + with open(f"/proc/{self.pid}/environ") as f: + for x in f.readline().split('\0'): + if len(x) > 0: + y = x.split('=') + self.environ[y[0]] = y[1] + + +class pidstats: + """ + Provides access to all the processes in the system, to get a picture of + how many processes there are at any given moment. + + The entries can be accessed as a dictionary, keyed by pid. Also there are + methods to find processes that match a given COMM or regular expression. + """ + + def __init__(self, basedir="/proc"): + self.basedir = basedir + self.processes = {} + self.reload() + + def __getitem__(self, key): + return self.processes[key] + + def __delitem__(self, key): + # not clear on why this can fail, but it can + try: + del self.processes[key] + except: + pass + + def keys(self): + return list(self.processes.keys()) + + def values(self): + return list(self.processes.values()) + + def has_key(self, key): + return key in self.processes + + def items(self): + return self.processes + + def __contains__(self, key): + return key in self.processes + + def reload(self): + """ + This operation will throw away the current dictionary contents, + if any, and read all the pid files from /proc/, instantiating a + 'process' instance for each of them. + + This is a high overhead operation, and should be avoided if the + perf python binding can be used to detect when new threads appear + and existing ones terminate. + + In RHEL it is found in the python-perf rpm package. + + More information about the perf facilities can be found in the + 'perf_event_open' man page. + """ + del self.processes + self.processes = {} + pids = os.listdir(self.basedir) + for spid in pids: + try: + pid = int(spid) + except: + continue + + self.processes[pid] = process(pid, self.basedir) + + def reload_threads(self): + to_remove = [] + for pid in list(self.processes.keys()): + try: + self.processes[pid].load_threads() + except OSError: + # process vanished, remove it + to_remove.append(pid) + for pid in to_remove: + del self.processes[pid] + + def find_by_name(self, name): + name = name[:15] + pids = [] + for pid in list(self.processes.keys()): + try: + if name == self.processes[pid]["stat"]["comm"]: + pids.append(pid) + except IOError: + # We're doing lazy loading of /proc files + # So if we get this exception is because the + # process vanished, remove it + del self.processes[pid] + + return pids + + def find_by_regex(self, regex): + pids = [] + for pid in list(self.processes.keys()): + try: + if regex.match(self.processes[pid]["stat"]["comm"]): + pids.append(pid) + except IOError: + # We're doing lazy loading of /proc files + # So if we get this exception is because the + # process vanished, remove it + del self.processes[pid] + return pids + + def find_by_cmdline_regex(self, regex): + pids = [] + for pid in list(self.processes.keys()): + try: + if regex.match(process_cmdline(self.processes[pid])): + pids.append(pid) + except IOError: + # We're doing lazy loading of /proc files + # So if we get this exception is because the + # process vanished, remove it + del self.processes[pid] + return pids + + def get_per_cpu_rtprios(self, basename): + cpu = 0 + priorities = "" + processed_pids = [] + while True: + name = f"{basename}/{cpu}" + pids = self.find_by_name(name) + if not pids or len([n for n in pids if n not in processed_pids]) == 0: + break + for pid in pids: + try: + priorities += f'{self.processes[pid]["stat"]["rt_priority"]}' + except IOError: + # We're doing lazy loading of /proc files + # So if we get this exception is because the + # process vanished, remove it + del self.processes[pid] + processed_pids += pids + cpu += 1 + + priorities = priorities.strip(',') + return priorities + + def get_rtprios(self, name): + cpu = 0 + priorities = "" + processed_pids = [] + while True: + pids = self.find_by_name(name) + if not pids or len([n for n in pids if n not in processed_pids]) == 0: + break + for pid in pids: + try: + priorities += f'{self.processes[pid]["stat"]["rt_priority"]}' + except IOError: + # We're doing lazy loading of /proc files + # So if we get this exception is because the + # process vanished, remove it + del self.processes[pid] + processed_pids += pids + cpu += 1 + + priorities = priorities.strip(',') + return priorities + + def is_bound_to_cpu(self, pid): + """ + Checks if a given pid can't have its SMP affinity mask changed. + """ + return self.processes[pid]["stat"].is_bound_to_cpu() + + +class interrupts: + """ + Information about IRQs in the system. A dictionary keyed by IRQ number + will have as its value another dictionary with "cpu", "type" and "users" + keys, with the SMP affinity mask, type of IRQ and the drivers associated + with each interrupt. + + The information comes from the /proc/interrupts file, documented in + 'man procfs(5)', for instance, the 'cpu' dict is an array with one entry + per CPU present in the sistem, each value being the number of interrupts + that took place per CPU. + + E.g.: + + >>> import procfs + >>> interrupts = procfs.interrupts() + >>> thunderbolt_irq = interrupts.find_by_user("thunderbolt") + >>> print thunderbolt_irq + 34 + >>> thunderbolt = interrupts[thunderbolt_irq] + >>> print thunderbolt + {'affinity': [0, 1, 2, 3], 'type': 'PCI-MSI', 'cpu': [3495, 0, 81, 0], 'users': ['thunderbolt']} + >>> + """ + + def __init__(self): + self.interrupts = {} + self.reload() + + def __getitem__(self, key): + return self.interrupts[str(key)] + + def keys(self): + return list(self.interrupts.keys()) + + def values(self): + return list(self.interrupts.values()) + + def has_key(self, key): + return str(key) in self.interrupts + + def items(self): + return self.interrupts + + def __contains__(self, key): + return str(key) in self.interrupts + + def reload(self): + del self.interrupts + self.interrupts = {} + with open("/proc/interrupts") as f: + for line in f.readlines(): + line = line.strip() + fields = line.split() + if fields[0][:3] == "CPU": + self.nr_cpus = len(fields) + continue + irq = fields[0].strip(":") + self.interrupts[irq] = {} + self.interrupts[irq] = self.parse_entry(fields[1:], line) + try: + nirq = int(irq) + except: + continue + self.interrupts[irq]["affinity"] = self.parse_affinity(nirq) + + def parse_entry(self, fields, line): + dict = {} + dict["cpu"] = [] + dict["cpu"].append(int(fields[0])) + nr_fields = len(fields) + if nr_fields >= self.nr_cpus: + dict["cpu"] += [int(i) for i in fields[1:self.nr_cpus]] + if nr_fields > self.nr_cpus: + dict["type"] = fields[self.nr_cpus] + # look if there are users (interrupts 3 and 4 haven't) + if nr_fields > self.nr_cpus + 1: + dict["users"] = [a.strip() + for a in fields[nr_fields - 1].split(',')] + else: + dict["users"] = [] + return dict + + def parse_affinity(self, irq): + try: + with open(f"/proc/irq/{irq}/smp_affinity") as f: + line = f.readline() + return bitmasklist(line, self.nr_cpus) + except IOError: + return [0, ] + + def find_by_user(self, user): + """ + Looks up a interrupt number by the name of one of its users" + + E.g.: + + >>> import procfs + >>> interrupts = procfs.interrupts() + >>> thunderbolt_irq = interrupts.find_by_user("thunderbolt") + >>> print thunderbolt_irq + 34 + >>> thunderbolt = interrupts[thunderbolt_irq] + >>> print thunderbolt + {'affinity': [0, 1, 2, 3], 'type': 'PCI-MSI', 'cpu': [3495, 0, 81, 0], 'users': ['thunderbolt']} + >>> + """ + for i in list(self.interrupts.keys()): + if "users" in self.interrupts[i] and \ + user in self.interrupts[i]["users"]: + return i + return None + + def find_by_user_regex(self, regex): + """ + Looks up a interrupt number by a regex that matches names of its users" + + E.g.: + + >>> import procfs + >>> import re + >>> interrupts = procfs.interrupts() + >>> usb_controllers = interrupts.find_by_user_regex(re.compile(".*hcd")) + >>> print usb_controllers + ['22', '23', '31'] + >>> print [ interrupts[irq]["users"] for irq in usb_controllers ] + [['ehci_hcd:usb4'], ['ehci_hcd:usb3'], ['xhci_hcd']] + >>> + """ + irqs = [] + for i in list(self.interrupts.keys()): + if "users" not in self.interrupts[i]: + continue + for user in self.interrupts[i]["users"]: + if regex.match(user): + irqs.append(i) + break + return irqs + + +class cmdline: + """ + Parses the kernel command line (/proc/cmdline), turning it into a dictionary." + + Useful to figure out if some kernel boolean knob has been turned on, + as well as to find the value associated to other kernel knobs. + + It can also be used to find out about parameters passed to the + init process, such as 'BOOT_IMAGE', etc. + + E.g.: + >>> import procfs + >>> kcmd = procfs.cmdline() + >>> print kcmd.keys() + ['LANG', 'BOOT_IMAGE', 'quiet', 'rhgb', 'rd.lvm.lv', 'ro', 'root'] + >>> print kcmd["BOOT_IMAGE"] + /vmlinuz-4.3.0-rc1+ + >>> + """ + + def __init__(self): + self.options = {} + self.parse() + + def parse(self): + with open("/proc/cmdline") as f: + for option in f.readline().strip().split(): + fields = option.split("=") + if len(fields) == 1: + self.options[fields[0]] = True + else: + self.options[fields[0]] = fields[1] + + def __getitem__(self, key): + return self.options[key] + + def keys(self): + return list(self.options.keys()) + + def values(self): + return list(self.options.values()) + + def items(self): + return self.options + + +class cpuinfo: + """ + Dictionary with information about CPUs in the system. + + Please refer to 'man procfs(5)' for further information about the + '/proc/cpuinfo' file, that is the source of the information provided + by this class. The 'man lscpu(1)' also has information about a program that + uses the '/proc/cpuinfo' file. + + Using this class one can obtain the number of CPUs in a system: + + >>> cpus = procfs.cpuinfo() + >>> print cpus.nr_cpus + 4 + + It is also possible to figure out aspects of the CPU topology, such as + how many CPU physical sockets exists, i.e. groups of CPUs sharing + components such as CPU memory caches: + + >>> print len(cpus.sockets) + 1 + + Additionally dictionary with information common to all CPUs in the system + is available: + + >>> print cpus["model name"] + Intel(R) Core(TM) i7-3667U CPU @ 2.00GHz + >>> print cpus["cache size"] + 4096 KB + >>> + """ + + def __init__(self, filename="/proc/cpuinfo"): + self.tags = {} + self.nr_cpus = 0 + self.sockets = [] + self.parse(filename) + + def __getitem__(self, key): + return self.tags[key.lower()] + + def keys(self): + return list(self.tags.keys()) + + def values(self): + return list(self.tags.values()) + + def items(self): + return self.tags + + def parse(self, filename): + with open(filename) as f: + for line in f.readlines(): + line = line.strip() + if not line: + continue + fields = line.split(":") + tagname = fields[0].strip().lower() + if tagname == "processor": + self.nr_cpus += 1 + continue + if is_s390() and tagname == "cpu number": + self.nr_cpus += 1 + continue + if tagname == "core id": + continue + self.tags[tagname] = fields[1].strip() + if tagname == "physical id": + socket_id = self.tags[tagname] + if socket_id not in self.sockets: + self.sockets.append(socket_id) + self.nr_sockets = self.sockets and len(self.sockets) or \ + (self.nr_cpus / + ("siblings" in self.tags and int(self.tags["siblings"]) or 1)) + self.nr_cores = ("cpu cores" in self.tags and int( + self.tags["cpu cores"]) or 1) * self.nr_sockets + + +class smaps_lib: + """ + Representation of an mmap in place for a process. Can be used to figure + out which processes have an library mapped, etc. + + The 'perm' member can be used to figure out executable mmaps, + i.e. libraries. + + The 'vm_start' and 'vm_end' in turn can be used when trying to resolve + processor instruction pointer addresses to a symbol name in a library. + """ + + def __init__(self, lines): + fields = lines[0].split() + self.vm_start, self.vm_end = [int(a, 16) for a in fields[0].split("-")] + self.perms = fields[1] + self.offset = int(fields[2], 16) + self.major, self.minor = fields[3].split(":") + self.inode = int(fields[4]) + if len(fields) > 5: + self.name = fields[5] + else: + self.name = None + self.tags = {} + for line in lines[1:]: + fields = line.split() + tag = fields[0][:-1].lower() + try: + self.tags[tag] = int(fields[1]) + except: + # VmFlags are strings + self.tags[tag] = fields + + def __getitem__(self, key): + return self.tags[key.lower()] + + def keys(self): + return list(self.tags.keys()) + + def values(self): + return list(self.tags.values()) + + def items(self): + return self.tags + + +class smaps: + """ + List of libraries mapped by a process. Parses the lines in + the /proc/PID/smaps file, that is further documented in the + procfs(5) man page. + + Example: Listing the executable maps for the 'sshd' process: + + >>> import procfs + >>> processes = procfs.pidstats() + >>> sshd = processes.find_by_name("sshd") + >>> sshd_maps = procfs.smaps(sshd[0]) + >>> for i in range(len(sshd_maps)): + ... if 'x' in sshd_maps[i].perms: + ... print "%s: %s" % (sshd_maps[i].name, sshd_maps[i].perms) + ... + /usr/sbin/sshd: r-xp + /usr/lib64/libnss_files-2.20.so: r-xp + /usr/lib64/librt-2.20.so: r-xp + /usr/lib64/libkeyutils.so.1.5: r-xp + /usr/lib64/libkrb5support.so.0.1: r-xp + /usr/lib64/libfreebl3.so: r-xp + /usr/lib64/libpthread-2.20.so: r-xp + ... + """ + + def __init__(self, pid): + self.pid = pid + self.entries = [] + self.reload() + + def parse_entry(self, f, line): + lines = [] + if not line: + line = f.readline().strip() + if not line: + return + lines.append(line) + while True: + line = f.readline() + if not line: + break + line = line.strip() + if line.split()[0][-1] == ':': + lines.append(line) + else: + break + self.entries.append(smaps_lib(lines)) + return line + + def __len__(self): + return len(self.entries) + + def __getitem__(self, index): + return self.entries[index] + + def reload(self): + line = None + with open(f"/proc/{self.pid}/smaps") as f: + while True: + line = self.parse_entry(f, line) + if not line: + break + self.nr_entries = len(self.entries) + + def find_by_name_fragment(self, fragment): + result = [] + for i in range(self.nr_entries): + if self.entries[i].name and \ + self.entries[i].name.find(fragment) >= 0: + result.append(self.entries[i]) + + return result + + +class cpustat: + """ + CPU statistics, obtained from a line in the '/proc/stat' file, Please + refer to 'man procfs(5)' for further information about the '/proc/stat' + file, that is the source of the information provided by this class. + """ + + def __init__(self, fields): + self.name = fields[0] + (self.user, + self.nice, + self.system, + self.idle, + self.iowait, + self.irq, + self.softirq) = [int(i) for i in fields[1:8]] + if len(fields) > 7: + self.steal = int(fields[7]) + if len(fields) > 8: + self.guest = int(fields[8]) + + def __repr__(self): + s = f"< user: {self.user}, nice: {self.nice}, system: {self.system}, idle: {self.idle}, iowait: {self.iowait}, irq: {self.irq}, softirq: {self.softirq}" + if hasattr(self, 'steal'): + s += f", steal: {self.steal}" + if hasattr(self, 'guest'): + s += f", guest: {self.guest}" + return s + ">" + + +class cpusstats: + """ + Dictionary with information about CPUs in the system. First entry in the + dictionary gives an aggregate view of all CPUs, each other entry is about + separate CPUs. Please refer to 'man procfs(5)' for further information + about the '/proc/stat' file, that is the source of the information provided + by this class. + """ + + def __init__(self, filename="/proc/stat"): + self.entries = {} + self.time = None + self.hertz = os.sysconf(2) + self.filename = filename + self.reload() + + def __iter__(self): + return iter(self.entries) + + def __getitem__(self, key): + return self.entries[key] + + def __len__(self): + return len(list(self.entries.keys())) + + def keys(self): + return list(self.entries.keys()) + + def values(self): + return list(self.entries.values()) + + def items(self): + return self.entries + + def reload(self): + last_entries = self.entries + self.entries = {} + with open(self.filename) as f: + for line in f.readlines(): + fields = line.strip().split() + if fields[0][:3].lower() != "cpu": + continue + c = cpustat(fields) + if c.name == "cpu": + idx = 0 + else: + idx = int(c.name[3:]) + 1 + self.entries[idx] = c + last_time = self.time + self.time = time.time() + if last_entries: + delta_sec = self.time - last_time + interval_hz = delta_sec * self.hertz + for cpu in list(self.entries.keys()): + if cpu not in last_entries: + curr.usage = 0 + continue + curr = self.entries[cpu] + prev = last_entries[cpu] + delta = (curr.user - prev.user) + \ + (curr.nice - prev.nice) + \ + (curr.system - prev.system) + curr.usage = (delta / interval_hz) * 100 + curr.usage = min(curr.usage, 100) + + +if __name__ == '__main__': + import sys + + ints = interrupts() + + for i in list(ints.interrupts.keys()): + print(f"{i}: {ints.interrupts[i]}") + + options = cmdline() + for o in list(options.options.keys()): + print(f"{o}: {options.options[o]}") + + cpu = cpuinfo() + print(f"\ncpuinfo data: {cpu.nr_cpus} processors") + for tag in list(cpu.keys()): + print(f"{tag}={cpu[tag]}") + + print("smaps:\n" + ("-" * 40)) + s = smaps(int(sys.argv[1])) + for i in range(s.nr_entries): + print(f"{s.entries[i].vm_start:#x} {s.entries[i].name}") + print("-" * 40) + for a in s.find_by_name_fragment(sys.argv[2]): + print(a["Size"]) + + ps = pidstats() + print(ps[1]) + + cs = cpusstats() + while True: + time.sleep(1) + cs.reload() + for cpu in cs: + print(f"{cpu}: {cs[cpu]}") + print("-" * 10) diff --git a/debian/python3-linux-procfs/usr/lib/python3/dist-packages/procfs/utilist.py b/debian/python3-linux-procfs/usr/lib/python3/dist-packages/procfs/utilist.py new file mode 100644 index 0000000..2e260b0 --- /dev/null +++ b/debian/python3-linux-procfs/usr/lib/python3/dist-packages/procfs/utilist.py @@ -0,0 +1,40 @@ +#! /usr/bin/python3 +# -*- python -*- +# -*- coding: utf-8 -*- +# SPDX-License-Identifier: GPL-2.0-only +# +# Copyright (C) 2007 Red Hat, Inc. +# + + + +def hexbitmask(l, nr_entries): + hexbitmask = [] + bit = 0 + mask = 0 + for entry in range(nr_entries): + if entry in l: + mask |= (1 << bit) + bit += 1 + if bit == 32: + bit = 0 + hexbitmask.insert(0, mask) + mask = 0 + + if bit < 32 and mask != 0: + hexbitmask.insert(0, mask) + + return hexbitmask + +def bitmasklist(line, nr_entries): + hexmask = line.strip().replace(",", "") + bitmasklist = [] + entry = 0 + bitmask = bin(int(hexmask, 16))[2::] + for i in reversed(bitmask): + if int(i) & 1: + bitmasklist.append(entry) + entry += 1 + if entry == nr_entries: + break + return bitmasklist diff --git a/debian/python3-linux-procfs/usr/lib/python3/dist-packages/python_linux_procfs-0.7.3.egg-info/PKG-INFO b/debian/python3-linux-procfs/usr/lib/python3/dist-packages/python_linux_procfs-0.7.3.egg-info/PKG-INFO new file mode 100644 index 0000000..15f662b --- /dev/null +++ b/debian/python3-linux-procfs/usr/lib/python3/dist-packages/python_linux_procfs-0.7.3.egg-info/PKG-INFO @@ -0,0 +1,11 @@ +Metadata-Version: 2.1 +Name: python-linux-procfs +Version: 0.7.3 +Summary: Linux /proc abstraction classes +Home-page: http://userweb.kernel.org/python-linux-procfs +Author: Arnaldo Carvalho de Melo +Author-email: acme@redhat.com +License: GPLv2 +License-File: COPYING + +Abstractions to extract information from the Linux kernel /proc files. diff --git a/debian/python3-linux-procfs/usr/lib/python3/dist-packages/python_linux_procfs-0.7.3.egg-info/dependency_links.txt b/debian/python3-linux-procfs/usr/lib/python3/dist-packages/python_linux_procfs-0.7.3.egg-info/dependency_links.txt new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/debian/python3-linux-procfs/usr/lib/python3/dist-packages/python_linux_procfs-0.7.3.egg-info/dependency_links.txt @@ -0,0 +1 @@ + diff --git a/debian/python3-linux-procfs/usr/lib/python3/dist-packages/python_linux_procfs-0.7.3.egg-info/top_level.txt b/debian/python3-linux-procfs/usr/lib/python3/dist-packages/python_linux_procfs-0.7.3.egg-info/top_level.txt new file mode 100644 index 0000000..8b71071 --- /dev/null +++ b/debian/python3-linux-procfs/usr/lib/python3/dist-packages/python_linux_procfs-0.7.3.egg-info/top_level.txt @@ -0,0 +1 @@ +procfs diff --git a/debian/python3-linux-procfs/usr/share/doc/python3-linux-procfs/changelog.Debian.gz b/debian/python3-linux-procfs/usr/share/doc/python3-linux-procfs/changelog.Debian.gz new file mode 100644 index 0000000..f72bb01 Binary files /dev/null and b/debian/python3-linux-procfs/usr/share/doc/python3-linux-procfs/changelog.Debian.gz differ diff --git a/debian/python3-linux-procfs/usr/share/doc/python3-linux-procfs/copyright b/debian/python3-linux-procfs/usr/share/doc/python3-linux-procfs/copyright new file mode 100644 index 0000000..a33a9bc --- /dev/null +++ b/debian/python3-linux-procfs/usr/share/doc/python3-linux-procfs/copyright @@ -0,0 +1,33 @@ +Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ +Upstream-Name: python-linux-procfs +Upstream-Contact: Jiri Kastner +Source: https://git.kernel.org/pub/scm/libs/python/python-linux-procfs/python-linux-procfs.git/ + +Files: * +Copyright: 2007-2015 Red Hat Inc. +License: GPL-2 +Comment: Content was authored by Arnaldo Carvalho de Melo in + 2007. Maintainership transferred to Jiri Kastner in + 2015. Jiri Kasterner (as of 2018) is the point of contact for all upstream + matters related to this library. The dates of this copyright were deduced from + procfs/procfs.py, a primary file in this library. + +Files: debian/* +Copyright: 2018-2020 Stewart Ferguson +License: GPL-2 + +License: GPL-2 + This package is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License + . + This package is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + . + You should have received a copy of the GNU General Public License + along with this program. If not, see + . + On Debian systems, the complete text of the GNU General + Public License version 2 can be found in "/usr/share/common-licenses/GPL-2". diff --git a/debian/python3-linux-procfs/usr/share/man/man8/pflags.8.gz b/debian/python3-linux-procfs/usr/share/man/man8/pflags.8.gz new file mode 100644 index 0000000..9d7354b Binary files /dev/null and b/debian/python3-linux-procfs/usr/share/man/man8/pflags.8.gz differ diff --git a/debian/rules b/debian/rules index 2d33f6a..7c9800a 100755 --- a/debian/rules +++ b/debian/rules @@ -1,4 +1,12 @@ #!/usr/bin/make -f +# Uncomment this to turn on verbose mode. +#export DH_VERBOSE=1 +export PYBUILD_NAME=linux-procfs + +override_dh_auto_build: + dh_auto_build + a2x -d manpage -f manpage debian/pflags.8.asciidoc + %: - dh $@ + dh $@ --buildsystem pybuild diff --git a/debian/watch b/debian/watch new file mode 100644 index 0000000..c67f76c --- /dev/null +++ b/debian/watch @@ -0,0 +1,2 @@ +version=4 +https://git.kernel.org/pub/scm/libs/python/python-linux-procfs/python-linux-procfs.git /pub/scm/libs/python/python-linux-procfs/python-linux-procfs\.git/snapshot/python-linux-procfs-(.+)\.(?:zip|tgz|tbz|txz|(?:tar\.(?:gz|bz2|xz))) diff --git a/pflags b/pflags new file mode 100755 index 0000000..46d396c --- /dev/null +++ b/pflags @@ -0,0 +1,87 @@ +#! /usr/bin/python3 +# -*- python -*- +# -*- coding: utf-8 -*- +# print process flags +# Copyright (C) 2015 Red Hat Inc. +# Arnaldo Carvalho de Melo +# +# This application is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; version 2. +# +# This application is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. + + +import procfs, re, fnmatch, sys +import argparse +from functools import reduce +from six.moves import map + +ps = None + +def thread_mapper(s): + global ps + + try: + return [int(s), ] + except: + pass + try: + return ps.find_by_regex(re.compile(fnmatch.translate(s))) + except: + return ps.find_by_name(s) + +def main(argv): + + global ps + ps = procfs.pidstats() + + parser = argparse.ArgumentParser(description='Print process flags') + parser.add_argument('pid', nargs='*', help='a list of pids or names') + args = parser.parse_args() + + if len(argv) > 1: + pids = args.pid + pids = reduce(lambda i, j: i + j, list(map(thread_mapper, pids))) + else: + pids = list(ps.processes.keys()) + + pids.sort() + len_comms = [] + for pid in pids: + if pid in ps: + try: + len(ps[pid]["stat"]["comm"]) + except (TypeError, FileNotFoundError): + continue + len_comms.append(len(ps[pid]["stat"]["comm"])) + + max_comm_len = max(len_comms, default=0) + del len_comms + + for pid in pids: + if pid not in ps: + continue + try: + flags = ps[pid].stat.process_flags() + except AttributeError: + continue + # Remove flags that were superseeded + if "PF_THREAD_BOUND" in flags and "PF_NO_SETAFFINITY" in flags: + flags.remove("PF_THREAD_BOUND") + if "PF_FLUSHER" in flags and "PF_NPROC_EXCEEDED" in flags: + flags.remove("PF_FLUSHER") + if "PF_SWAPOFF" in flags and "PF_MEMALLOC_NOIO" in flags: + flags.remove("PF_SWAPOFF") + if "PF_FREEZER_NOSIG" in flags and "PF_SUSPEND_TASK" in flags: + flags.remove("PF_FREEZER_NOSIG") + comm = ps[pid].stat["comm"] + flags.sort() + sflags = reduce(lambda i, j: "%s|%s" % (i, j), [a[3:] for a in flags]) + print("%6d %*s %s" %(pid, max_comm_len, comm, sflags)) + +if __name__ == '__main__': + main(sys.argv) diff --git a/procfs/__init__.py b/procfs/__init__.py new file mode 100644 index 0000000..6deedf4 --- /dev/null +++ b/procfs/__init__.py @@ -0,0 +1,17 @@ +#! /usr/bin/python3 +# -*- python -*- +# -*- coding: utf-8 -*- +# SPDX-License-Identifier: GPL-2.0-only +# +# Copyright (C) 2008, 2009 Red Hat, Inc. +# +""" +Copyright (c) 2008, 2009 Red Hat Inc. + +Abstractions to extract information from the Linux kernel /proc files. +""" +__author__ = "Arnaldo Carvalho de Melo " +__license__ = "GPLv2 License" + +from .procfs import * +from .utilist import * diff --git a/procfs/procfs.py b/procfs/procfs.py new file mode 100755 index 0000000..7cc7371 --- /dev/null +++ b/procfs/procfs.py @@ -0,0 +1,1111 @@ +#!/usr/bin/python3 +# -*- python -*- +# -*- coding: utf-8 -*- +# SPDX-License-Identifier: GPL-2.0-only +# +# Copyright (C) 2007-2015 Red Hat, Inc. +# + +import os +import platform +import re +import time +from functools import reduce +from six.moves import range +from procfs.utilist import bitmasklist + +VERSION = "0.7.3" + + +def is_s390(): + """ Return True if running on s390 or s390x """ + machine = platform.machine() + return bool(re.search('s390', machine)) + + +def process_cmdline(pid_info): + """ + Returns the process command line, if available in the given `process' class, + if not available, falls back to using the comm (short process name) in its + pidstat key. + """ + if pid_info["cmdline"]: + return reduce(lambda a, b: a + " %s" % b, pid_info["cmdline"]).strip() + + try: + """ If a pid disappears before we query it, return None """ + return pid_info["stat"]["comm"] + except: + return None + + +class pidstat: + """ + Provides a dictionary to access the fields in the + per process /proc/PID/stat files. + + One can obtain the available fields by asking for the keys of the + dictionary, e.g.: + + >>> p = procfs.pidstat(1) + >>> print p.keys() + ['majflt', 'rss', 'cnswap', 'cstime', 'pid', 'session', 'startstack', 'startcode', 'cmajflt', 'blocked', 'exit_signal', 'minflt', 'nswap', 'environ', 'priority', 'state', 'delayacct_blkio_ticks', 'policy', 'rt_priority', 'ppid', 'nice', 'cutime', 'endcode', 'wchan', 'num_threads', 'sigcatch', 'comm', 'stime', 'sigignore', 'tty_nr', 'kstkeip', 'utime', 'tpgid', 'itrealvalue', 'kstkesp', 'rlim', 'signal', 'pgrp', 'flags', 'starttime', 'cminflt', 'vsize', 'processor'] + + And then access the various process properties using it as a dictionary: + + >>> print p['comm'] + systemd + >>> print p['priority'] + 20 + >>> print p['state'] + S + + Please refer to the 'procfs(5)' man page, by using: + + $ man 5 procfs + + To see information for each of the above fields, it is part of the + 'man-pages' RPM package. + """ + + # Entries with the same value, the one with a comment after it is the + # more recent, having replaced the other name in v4.1-rc kernel times. + + PF_ALIGNWARN = 0x00000001 + PF_STARTING = 0x00000002 + PF_EXITING = 0x00000004 + PF_EXITPIDONE = 0x00000008 + PF_VCPU = 0x00000010 + PF_WQ_WORKER = 0x00000020 # /* I'm a workqueue worker */ + PF_FORKNOEXEC = 0x00000040 + PF_MCE_PROCESS = 0x00000080 # /* process policy on mce errors */ + PF_SUPERPRIV = 0x00000100 + PF_DUMPCORE = 0x00000200 + PF_SIGNALED = 0x00000400 + PF_MEMALLOC = 0x00000800 + # /* set_user noticed that RLIMIT_NPROC was exceeded */ + PF_NPROC_EXCEEDED = 0x00001000 + PF_FLUSHER = 0x00001000 + PF_USED_MATH = 0x00002000 + PF_USED_ASYNC = 0x00004000 # /* used async_schedule*(), used by module init */ + PF_NOFREEZE = 0x00008000 + PF_FROZEN = 0x00010000 + PF_FSTRANS = 0x00020000 + PF_KSWAPD = 0x00040000 + PF_MEMALLOC_NOIO = 0x00080000 # /* Allocating memory without IO involved */ + PF_SWAPOFF = 0x00080000 + PF_LESS_THROTTLE = 0x00100000 + PF_KTHREAD = 0x00200000 + PF_RANDOMIZE = 0x00400000 + PF_SWAPWRITE = 0x00800000 + PF_SPREAD_PAGE = 0x01000000 + PF_SPREAD_SLAB = 0x02000000 + PF_THREAD_BOUND = 0x04000000 + # /* Userland is not allowed to meddle with cpus_allowed */ + PF_NO_SETAFFINITY = 0x04000000 + PF_MCE_EARLY = 0x08000000 # /* Early kill for mce process policy */ + PF_MEMPOLICY = 0x10000000 + PF_MUTEX_TESTER = 0x20000000 + PF_FREEZER_SKIP = 0x40000000 + PF_FREEZER_NOSIG = 0x80000000 + # /* this thread called freeze_processes and should not be frozen */ + PF_SUSPEND_TASK = 0x80000000 + + proc_stat_fields = ["pid", "comm", "state", "ppid", "pgrp", "session", + "tty_nr", "tpgid", "flags", "minflt", "cminflt", + "majflt", "cmajflt", "utime", "stime", "cutime", + "cstime", "priority", "nice", "num_threads", + "itrealvalue", "starttime", "vsize", "rss", + "rlim", "startcode", "endcode", "startstack", + "kstkesp", "kstkeip", "signal", "blocked", + "sigignore", "sigcatch", "wchan", "nswap", + "cnswap", "exit_signal", "processor", + "rt_priority", "policy", + "delayacct_blkio_ticks", "environ"] + + def __init__(self, pid, basedir="/proc"): + self.pid = pid + try: + self.load(basedir) + except FileNotFoundError: + # The file representing the pid has disappeared + # propagate the error to the user to handle + raise + + def __getitem__(self, fieldname): + return self.fields[fieldname] + + def keys(self): + return list(self.fields.keys()) + + def values(self): + return list(self.fields.values()) + + def has_key(self, fieldname): + return fieldname in self.fields + + def items(self): + return self.fields + + def __contains__(self, fieldname): + return fieldname in self.fields + + def load(self, basedir="/proc"): + try: + f = open(f"{basedir}/{self.pid}/stat") + except FileNotFoundError: + # The pid has disappeared, propagate the error + raise + fields = f.readline().strip().split(') ') + f.close() + fields = fields[0].split(' (') + fields[1].split() + self.fields = {} + nr_fields = min(len(fields), len(self.proc_stat_fields)) + for i in range(nr_fields): + attrname = self.proc_stat_fields[i] + value = fields[i] + if attrname == "comm": + self.fields["comm"] = value.strip('()') + else: + try: + self.fields[attrname] = int(value) + except: + self.fields[attrname] = value + + def is_bound_to_cpu(self): + """ + Returns true if this process has a fixed smp affinity mask, + not allowing it to be moved to a different set of CPUs. + """ + return bool(self.fields["flags"] & self.PF_THREAD_BOUND) + + def process_flags(self): + """ + Returns a list with all the process flags known, details depend + on kernel version, declared in the file include/linux/sched.h in + the kernel sources. + + As of v4.2-rc7 these include (from include/linux/sched.h comments): + + PF_EXITING Getting shut down + PF_EXITPIDONE Pi exit done on shut down + PF_VCPU I'm a virtual CPU + PF_WQ_WORKER I'm a workqueue worker + PF_FORKNOEXEC Forked but didn't exec + PF_MCE_PROCESS Process policy on mce errors + PF_SUPERPRIV Used super-user privileges + PF_DUMPCORE Dumped core + PF_SIGNALED Killed by a signal + PF_MEMALLOC Allocating memory + PF_NPROC_EXCEEDED Set_user noticed that RLIMIT_NPROC was exceeded + PF_USED_MATH If unset the fpu must be initialized before use + PF_USED_ASYNC Used async_schedule*(), used by module init + PF_NOFREEZE This thread should not be frozen + PF_FROZEN Frozen for system suspend + PF_FSTRANS Inside a filesystem transaction + PF_KSWAPD I am kswapd + PF_MEMALLOC_NOIO Allocating memory without IO involved + PF_LESS_THROTTLE Throttle me less: I clean memory + PF_KTHREAD I am a kernel thread + PF_RANDOMIZE Randomize virtual address space + PF_SWAPWRITE Allowed to write to swap + PF_NO_SETAFFINITY Userland is not allowed to meddle with cpus_allowed + PF_MCE_EARLY Early kill for mce process policy + PF_MUTEX_TESTER Thread belongs to the rt mutex tester + PF_FREEZER_SKIP Freezer should not count it as freezable + PF_SUSPEND_TASK This thread called freeze_processes and + should not be frozen + + """ + sflags = [] + for attr in dir(self): + if attr[:3] != "PF_": + continue + value = getattr(self, attr) + if value & self.fields["flags"]: + sflags.append(attr) + + return sflags + + +def cannot_set_affinity(self, pid): + PF_NO_SETAFFINITY = 0x04000000 + try: + return bool(int(self.processes[pid]["stat"]["flags"]) & + PF_NO_SETAFFINITY) + except: + return True + + +def cannot_set_thread_affinity(self, pid, tid): + PF_NO_SETAFFINITY = 0x04000000 + try: + return bool(int(self.processes[pid].threads[tid]["stat"]["flags"]) & + PF_NO_SETAFFINITY) + except: + return True + + +class pidstatus: + """ + Provides a dictionary to access the fields + in the per process /proc/PID/status files. + This provides additional information about processes and threads to + what can be obtained with the procfs.pidstat() class. + + One can obtain the available fields by asking for the keys of the + dictionary, e.g.: + + >>> import procfs + >>> p = procfs.pidstatus(1) + >>> print p.keys() + ['VmExe', 'CapBnd', 'NSpgid', 'Tgid', 'NSpid', 'VmSize', 'VmPMD', 'ShdPnd', 'State', 'Gid', 'nonvoluntary_ctxt_switches', 'SigIgn', 'VmStk', 'VmData', 'SigCgt', 'CapEff', 'VmPTE', 'Groups', 'NStgid', 'Threads', 'PPid', 'VmHWM', 'NSsid', 'VmSwap', 'Name', 'SigBlk', 'Mems_allowed_list', 'VmPeak', 'Ngid', 'VmLck', 'SigQ', 'VmPin', 'Mems_allowed', 'CapPrm', 'Seccomp', 'VmLib', 'Cpus_allowed', 'Uid', 'SigPnd', 'Pid', 'Cpus_allowed_list', 'TracerPid', 'CapInh', 'voluntary_ctxt_switches', 'VmRSS', 'FDSize'] + >>> print p["Pid"] + 1 + >>> print p["Threads"] + 1 + >>> print p["VmExe"] + 1248 kB + >>> print p["Cpus_allowed"] + f + >>> print p["SigQ"] + 0/30698 + >>> print p["VmPeak"] + 320300 kB + >>> + + Please refer to the 'procfs(5)' man page, by using: + + $ man 5 procfs + + To see information for each of the above fields, it is part of the + 'man-pages' RPM package. + + In the man page there will be references to further documentation, like + referring to the "getrlimit(2)" man page when explaining the "SigQ" + line/field. + """ + + def __init__(self, pid, basedir="/proc"): + self.pid = pid + self.load(basedir) + + def __getitem__(self, fieldname): + return self.fields[fieldname] + + def keys(self): + return list(self.fields.keys()) + + def values(self): + return list(self.fields.values()) + + def has_key(self, fieldname): + return fieldname in self.fields + + def items(self): + return self.fields + + def __contains__(self, fieldname): + return fieldname in self.fields + + def load(self, basedir="/proc"): + self.fields = {} + with open(f"{basedir}/{self.pid}/status") as f: + for line in f.readlines(): + fields = line.split(":") + if len(fields) != 2: + continue + name = fields[0] + value = fields[1].strip() + try: + self.fields[name] = int(value) + except: + self.fields[name] = value + + +class process: + """ + Information about a process with a given pid, provides a dictionary with + two entries, instances of different wrappers for /proc/ process related + meta files: "stat" and "status", see the documentation for procfs.pidstat + and procfs.pidstatus for further info about those classes. + """ + + def __init__(self, pid, basedir="/proc"): + self.pid = pid + self.basedir = basedir + + def __getitem__(self, attr): + if not hasattr(self, attr): + if attr in ("stat", "status"): + if attr == "stat": + sclass = pidstat + else: + sclass = pidstatus + + try: + setattr(self, attr, sclass(self.pid, self.basedir)) + except FileNotFoundError: + # The pid has disappeared, progate the error + raise + elif attr == "cmdline": + self.load_cmdline() + elif attr == "threads": + self.load_threads() + elif attr == "cgroups": + self.load_cgroups() + elif attr == "environ": + self.load_environ() + + return getattr(self, attr) + + def has_key(self, attr): + return hasattr(self, attr) + + def __contains__(self, attr): + return hasattr(self, attr) + + def load_cmdline(self): + try: + with open(f"/proc/{self.pid}/cmdline") as f: + self.cmdline = f.readline().strip().split('\0')[:-1] + except FileNotFoundError: + """ This can happen when a pid disappears """ + self.cmdline = None + except UnicodeDecodeError: + """ TODO - this shouldn't happen, needs to be investigated """ + self.cmdline = None + + def load_threads(self): + self.threads = pidstats(f"/proc/{self.pid}/task/") + # remove thread leader + del self.threads[self.pid] + + def load_cgroups(self): + self.cgroups = "" + with open(f"/proc/{self.pid}/cgroup") as f: + for line in reversed(f.readlines()): + if len(self.cgroups) != 0: + self.cgroups = self.cgroups + "," + line[:-1] + else: + self.cgroups = line[:-1] + + def load_environ(self): + """ + Loads the environment variables for this process. The entries then + become available via the 'environ' member, or via the 'environ' + dict key when accessing as p["environ"]. + + E.g.: + + + >>> all_processes = procfs.pidstats() + >>> firefox_pid = all_processes.find_by_name("firefox") + >>> firefox_process = all_processes[firefox_pid[0]] + >>> print firefox_process["environ"]["PWD"] + /home/acme + >>> print len(firefox_process.environ.keys()) + 66 + >>> print firefox_process["environ"]["SHELL"] + /bin/bash + >>> print firefox_process["environ"]["USERNAME"] + acme + >>> print firefox_process["environ"]["HOME"] + /home/acme + >>> print firefox_process["environ"]["MAIL"] + /var/spool/mail/acme + >>> + """ + self.environ = {} + with open(f"/proc/{self.pid}/environ") as f: + for x in f.readline().split('\0'): + if len(x) > 0: + y = x.split('=') + self.environ[y[0]] = y[1] + + +class pidstats: + """ + Provides access to all the processes in the system, to get a picture of + how many processes there are at any given moment. + + The entries can be accessed as a dictionary, keyed by pid. Also there are + methods to find processes that match a given COMM or regular expression. + """ + + def __init__(self, basedir="/proc"): + self.basedir = basedir + self.processes = {} + self.reload() + + def __getitem__(self, key): + return self.processes[key] + + def __delitem__(self, key): + # not clear on why this can fail, but it can + try: + del self.processes[key] + except: + pass + + def keys(self): + return list(self.processes.keys()) + + def values(self): + return list(self.processes.values()) + + def has_key(self, key): + return key in self.processes + + def items(self): + return self.processes + + def __contains__(self, key): + return key in self.processes + + def reload(self): + """ + This operation will throw away the current dictionary contents, + if any, and read all the pid files from /proc/, instantiating a + 'process' instance for each of them. + + This is a high overhead operation, and should be avoided if the + perf python binding can be used to detect when new threads appear + and existing ones terminate. + + In RHEL it is found in the python-perf rpm package. + + More information about the perf facilities can be found in the + 'perf_event_open' man page. + """ + del self.processes + self.processes = {} + pids = os.listdir(self.basedir) + for spid in pids: + try: + pid = int(spid) + except: + continue + + self.processes[pid] = process(pid, self.basedir) + + def reload_threads(self): + to_remove = [] + for pid in list(self.processes.keys()): + try: + self.processes[pid].load_threads() + except OSError: + # process vanished, remove it + to_remove.append(pid) + for pid in to_remove: + del self.processes[pid] + + def find_by_name(self, name): + name = name[:15] + pids = [] + for pid in list(self.processes.keys()): + try: + if name == self.processes[pid]["stat"]["comm"]: + pids.append(pid) + except IOError: + # We're doing lazy loading of /proc files + # So if we get this exception is because the + # process vanished, remove it + del self.processes[pid] + + return pids + + def find_by_regex(self, regex): + pids = [] + for pid in list(self.processes.keys()): + try: + if regex.match(self.processes[pid]["stat"]["comm"]): + pids.append(pid) + except IOError: + # We're doing lazy loading of /proc files + # So if we get this exception is because the + # process vanished, remove it + del self.processes[pid] + return pids + + def find_by_cmdline_regex(self, regex): + pids = [] + for pid in list(self.processes.keys()): + try: + if regex.match(process_cmdline(self.processes[pid])): + pids.append(pid) + except IOError: + # We're doing lazy loading of /proc files + # So if we get this exception is because the + # process vanished, remove it + del self.processes[pid] + return pids + + def get_per_cpu_rtprios(self, basename): + cpu = 0 + priorities = "" + processed_pids = [] + while True: + name = f"{basename}/{cpu}" + pids = self.find_by_name(name) + if not pids or len([n for n in pids if n not in processed_pids]) == 0: + break + for pid in pids: + try: + priorities += f'{self.processes[pid]["stat"]["rt_priority"]}' + except IOError: + # We're doing lazy loading of /proc files + # So if we get this exception is because the + # process vanished, remove it + del self.processes[pid] + processed_pids += pids + cpu += 1 + + priorities = priorities.strip(',') + return priorities + + def get_rtprios(self, name): + cpu = 0 + priorities = "" + processed_pids = [] + while True: + pids = self.find_by_name(name) + if not pids or len([n for n in pids if n not in processed_pids]) == 0: + break + for pid in pids: + try: + priorities += f'{self.processes[pid]["stat"]["rt_priority"]}' + except IOError: + # We're doing lazy loading of /proc files + # So if we get this exception is because the + # process vanished, remove it + del self.processes[pid] + processed_pids += pids + cpu += 1 + + priorities = priorities.strip(',') + return priorities + + def is_bound_to_cpu(self, pid): + """ + Checks if a given pid can't have its SMP affinity mask changed. + """ + return self.processes[pid]["stat"].is_bound_to_cpu() + + +class interrupts: + """ + Information about IRQs in the system. A dictionary keyed by IRQ number + will have as its value another dictionary with "cpu", "type" and "users" + keys, with the SMP affinity mask, type of IRQ and the drivers associated + with each interrupt. + + The information comes from the /proc/interrupts file, documented in + 'man procfs(5)', for instance, the 'cpu' dict is an array with one entry + per CPU present in the sistem, each value being the number of interrupts + that took place per CPU. + + E.g.: + + >>> import procfs + >>> interrupts = procfs.interrupts() + >>> thunderbolt_irq = interrupts.find_by_user("thunderbolt") + >>> print thunderbolt_irq + 34 + >>> thunderbolt = interrupts[thunderbolt_irq] + >>> print thunderbolt + {'affinity': [0, 1, 2, 3], 'type': 'PCI-MSI', 'cpu': [3495, 0, 81, 0], 'users': ['thunderbolt']} + >>> + """ + + def __init__(self): + self.interrupts = {} + self.reload() + + def __getitem__(self, key): + return self.interrupts[str(key)] + + def keys(self): + return list(self.interrupts.keys()) + + def values(self): + return list(self.interrupts.values()) + + def has_key(self, key): + return str(key) in self.interrupts + + def items(self): + return self.interrupts + + def __contains__(self, key): + return str(key) in self.interrupts + + def reload(self): + del self.interrupts + self.interrupts = {} + with open("/proc/interrupts") as f: + for line in f.readlines(): + line = line.strip() + fields = line.split() + if fields[0][:3] == "CPU": + self.nr_cpus = len(fields) + continue + irq = fields[0].strip(":") + self.interrupts[irq] = {} + self.interrupts[irq] = self.parse_entry(fields[1:], line) + try: + nirq = int(irq) + except: + continue + self.interrupts[irq]["affinity"] = self.parse_affinity(nirq) + + def parse_entry(self, fields, line): + dict = {} + dict["cpu"] = [] + dict["cpu"].append(int(fields[0])) + nr_fields = len(fields) + if nr_fields >= self.nr_cpus: + dict["cpu"] += [int(i) for i in fields[1:self.nr_cpus]] + if nr_fields > self.nr_cpus: + dict["type"] = fields[self.nr_cpus] + # look if there are users (interrupts 3 and 4 haven't) + if nr_fields > self.nr_cpus + 1: + dict["users"] = [a.strip() + for a in fields[nr_fields - 1].split(',')] + else: + dict["users"] = [] + return dict + + def parse_affinity(self, irq): + try: + with open(f"/proc/irq/{irq}/smp_affinity") as f: + line = f.readline() + return bitmasklist(line, self.nr_cpus) + except IOError: + return [0, ] + + def find_by_user(self, user): + """ + Looks up a interrupt number by the name of one of its users" + + E.g.: + + >>> import procfs + >>> interrupts = procfs.interrupts() + >>> thunderbolt_irq = interrupts.find_by_user("thunderbolt") + >>> print thunderbolt_irq + 34 + >>> thunderbolt = interrupts[thunderbolt_irq] + >>> print thunderbolt + {'affinity': [0, 1, 2, 3], 'type': 'PCI-MSI', 'cpu': [3495, 0, 81, 0], 'users': ['thunderbolt']} + >>> + """ + for i in list(self.interrupts.keys()): + if "users" in self.interrupts[i] and \ + user in self.interrupts[i]["users"]: + return i + return None + + def find_by_user_regex(self, regex): + """ + Looks up a interrupt number by a regex that matches names of its users" + + E.g.: + + >>> import procfs + >>> import re + >>> interrupts = procfs.interrupts() + >>> usb_controllers = interrupts.find_by_user_regex(re.compile(".*hcd")) + >>> print usb_controllers + ['22', '23', '31'] + >>> print [ interrupts[irq]["users"] for irq in usb_controllers ] + [['ehci_hcd:usb4'], ['ehci_hcd:usb3'], ['xhci_hcd']] + >>> + """ + irqs = [] + for i in list(self.interrupts.keys()): + if "users" not in self.interrupts[i]: + continue + for user in self.interrupts[i]["users"]: + if regex.match(user): + irqs.append(i) + break + return irqs + + +class cmdline: + """ + Parses the kernel command line (/proc/cmdline), turning it into a dictionary." + + Useful to figure out if some kernel boolean knob has been turned on, + as well as to find the value associated to other kernel knobs. + + It can also be used to find out about parameters passed to the + init process, such as 'BOOT_IMAGE', etc. + + E.g.: + >>> import procfs + >>> kcmd = procfs.cmdline() + >>> print kcmd.keys() + ['LANG', 'BOOT_IMAGE', 'quiet', 'rhgb', 'rd.lvm.lv', 'ro', 'root'] + >>> print kcmd["BOOT_IMAGE"] + /vmlinuz-4.3.0-rc1+ + >>> + """ + + def __init__(self): + self.options = {} + self.parse() + + def parse(self): + with open("/proc/cmdline") as f: + for option in f.readline().strip().split(): + fields = option.split("=") + if len(fields) == 1: + self.options[fields[0]] = True + else: + self.options[fields[0]] = fields[1] + + def __getitem__(self, key): + return self.options[key] + + def keys(self): + return list(self.options.keys()) + + def values(self): + return list(self.options.values()) + + def items(self): + return self.options + + +class cpuinfo: + """ + Dictionary with information about CPUs in the system. + + Please refer to 'man procfs(5)' for further information about the + '/proc/cpuinfo' file, that is the source of the information provided + by this class. The 'man lscpu(1)' also has information about a program that + uses the '/proc/cpuinfo' file. + + Using this class one can obtain the number of CPUs in a system: + + >>> cpus = procfs.cpuinfo() + >>> print cpus.nr_cpus + 4 + + It is also possible to figure out aspects of the CPU topology, such as + how many CPU physical sockets exists, i.e. groups of CPUs sharing + components such as CPU memory caches: + + >>> print len(cpus.sockets) + 1 + + Additionally dictionary with information common to all CPUs in the system + is available: + + >>> print cpus["model name"] + Intel(R) Core(TM) i7-3667U CPU @ 2.00GHz + >>> print cpus["cache size"] + 4096 KB + >>> + """ + + def __init__(self, filename="/proc/cpuinfo"): + self.tags = {} + self.nr_cpus = 0 + self.sockets = [] + self.parse(filename) + + def __getitem__(self, key): + return self.tags[key.lower()] + + def keys(self): + return list(self.tags.keys()) + + def values(self): + return list(self.tags.values()) + + def items(self): + return self.tags + + def parse(self, filename): + with open(filename) as f: + for line in f.readlines(): + line = line.strip() + if not line: + continue + fields = line.split(":") + tagname = fields[0].strip().lower() + if tagname == "processor": + self.nr_cpus += 1 + continue + if is_s390() and tagname == "cpu number": + self.nr_cpus += 1 + continue + if tagname == "core id": + continue + self.tags[tagname] = fields[1].strip() + if tagname == "physical id": + socket_id = self.tags[tagname] + if socket_id not in self.sockets: + self.sockets.append(socket_id) + self.nr_sockets = self.sockets and len(self.sockets) or \ + (self.nr_cpus / + ("siblings" in self.tags and int(self.tags["siblings"]) or 1)) + self.nr_cores = ("cpu cores" in self.tags and int( + self.tags["cpu cores"]) or 1) * self.nr_sockets + + +class smaps_lib: + """ + Representation of an mmap in place for a process. Can be used to figure + out which processes have an library mapped, etc. + + The 'perm' member can be used to figure out executable mmaps, + i.e. libraries. + + The 'vm_start' and 'vm_end' in turn can be used when trying to resolve + processor instruction pointer addresses to a symbol name in a library. + """ + + def __init__(self, lines): + fields = lines[0].split() + self.vm_start, self.vm_end = [int(a, 16) for a in fields[0].split("-")] + self.perms = fields[1] + self.offset = int(fields[2], 16) + self.major, self.minor = fields[3].split(":") + self.inode = int(fields[4]) + if len(fields) > 5: + self.name = fields[5] + else: + self.name = None + self.tags = {} + for line in lines[1:]: + fields = line.split() + tag = fields[0][:-1].lower() + try: + self.tags[tag] = int(fields[1]) + except: + # VmFlags are strings + self.tags[tag] = fields + + def __getitem__(self, key): + return self.tags[key.lower()] + + def keys(self): + return list(self.tags.keys()) + + def values(self): + return list(self.tags.values()) + + def items(self): + return self.tags + + +class smaps: + """ + List of libraries mapped by a process. Parses the lines in + the /proc/PID/smaps file, that is further documented in the + procfs(5) man page. + + Example: Listing the executable maps for the 'sshd' process: + + >>> import procfs + >>> processes = procfs.pidstats() + >>> sshd = processes.find_by_name("sshd") + >>> sshd_maps = procfs.smaps(sshd[0]) + >>> for i in range(len(sshd_maps)): + ... if 'x' in sshd_maps[i].perms: + ... print "%s: %s" % (sshd_maps[i].name, sshd_maps[i].perms) + ... + /usr/sbin/sshd: r-xp + /usr/lib64/libnss_files-2.20.so: r-xp + /usr/lib64/librt-2.20.so: r-xp + /usr/lib64/libkeyutils.so.1.5: r-xp + /usr/lib64/libkrb5support.so.0.1: r-xp + /usr/lib64/libfreebl3.so: r-xp + /usr/lib64/libpthread-2.20.so: r-xp + ... + """ + + def __init__(self, pid): + self.pid = pid + self.entries = [] + self.reload() + + def parse_entry(self, f, line): + lines = [] + if not line: + line = f.readline().strip() + if not line: + return + lines.append(line) + while True: + line = f.readline() + if not line: + break + line = line.strip() + if line.split()[0][-1] == ':': + lines.append(line) + else: + break + self.entries.append(smaps_lib(lines)) + return line + + def __len__(self): + return len(self.entries) + + def __getitem__(self, index): + return self.entries[index] + + def reload(self): + line = None + with open(f"/proc/{self.pid}/smaps") as f: + while True: + line = self.parse_entry(f, line) + if not line: + break + self.nr_entries = len(self.entries) + + def find_by_name_fragment(self, fragment): + result = [] + for i in range(self.nr_entries): + if self.entries[i].name and \ + self.entries[i].name.find(fragment) >= 0: + result.append(self.entries[i]) + + return result + + +class cpustat: + """ + CPU statistics, obtained from a line in the '/proc/stat' file, Please + refer to 'man procfs(5)' for further information about the '/proc/stat' + file, that is the source of the information provided by this class. + """ + + def __init__(self, fields): + self.name = fields[0] + (self.user, + self.nice, + self.system, + self.idle, + self.iowait, + self.irq, + self.softirq) = [int(i) for i in fields[1:8]] + if len(fields) > 7: + self.steal = int(fields[7]) + if len(fields) > 8: + self.guest = int(fields[8]) + + def __repr__(self): + s = f"< user: {self.user}, nice: {self.nice}, system: {self.system}, idle: {self.idle}, iowait: {self.iowait}, irq: {self.irq}, softirq: {self.softirq}" + if hasattr(self, 'steal'): + s += f", steal: {self.steal}" + if hasattr(self, 'guest'): + s += f", guest: {self.guest}" + return s + ">" + + +class cpusstats: + """ + Dictionary with information about CPUs in the system. First entry in the + dictionary gives an aggregate view of all CPUs, each other entry is about + separate CPUs. Please refer to 'man procfs(5)' for further information + about the '/proc/stat' file, that is the source of the information provided + by this class. + """ + + def __init__(self, filename="/proc/stat"): + self.entries = {} + self.time = None + self.hertz = os.sysconf(2) + self.filename = filename + self.reload() + + def __iter__(self): + return iter(self.entries) + + def __getitem__(self, key): + return self.entries[key] + + def __len__(self): + return len(list(self.entries.keys())) + + def keys(self): + return list(self.entries.keys()) + + def values(self): + return list(self.entries.values()) + + def items(self): + return self.entries + + def reload(self): + last_entries = self.entries + self.entries = {} + with open(self.filename) as f: + for line in f.readlines(): + fields = line.strip().split() + if fields[0][:3].lower() != "cpu": + continue + c = cpustat(fields) + if c.name == "cpu": + idx = 0 + else: + idx = int(c.name[3:]) + 1 + self.entries[idx] = c + last_time = self.time + self.time = time.time() + if last_entries: + delta_sec = self.time - last_time + interval_hz = delta_sec * self.hertz + for cpu in list(self.entries.keys()): + if cpu not in last_entries: + curr.usage = 0 + continue + curr = self.entries[cpu] + prev = last_entries[cpu] + delta = (curr.user - prev.user) + \ + (curr.nice - prev.nice) + \ + (curr.system - prev.system) + curr.usage = (delta / interval_hz) * 100 + curr.usage = min(curr.usage, 100) + + +if __name__ == '__main__': + import sys + + ints = interrupts() + + for i in list(ints.interrupts.keys()): + print(f"{i}: {ints.interrupts[i]}") + + options = cmdline() + for o in list(options.options.keys()): + print(f"{o}: {options.options[o]}") + + cpu = cpuinfo() + print(f"\ncpuinfo data: {cpu.nr_cpus} processors") + for tag in list(cpu.keys()): + print(f"{tag}={cpu[tag]}") + + print("smaps:\n" + ("-" * 40)) + s = smaps(int(sys.argv[1])) + for i in range(s.nr_entries): + print(f"{s.entries[i].vm_start:#x} {s.entries[i].name}") + print("-" * 40) + for a in s.find_by_name_fragment(sys.argv[2]): + print(a["Size"]) + + ps = pidstats() + print(ps[1]) + + cs = cpusstats() + while True: + time.sleep(1) + cs.reload() + for cpu in cs: + print(f"{cpu}: {cs[cpu]}") + print("-" * 10) diff --git a/procfs/utilist.py b/procfs/utilist.py new file mode 100755 index 0000000..e6314f0 --- /dev/null +++ b/procfs/utilist.py @@ -0,0 +1,41 @@ +#! /usr/bin/python3 +# -*- python -*- +# -*- coding: utf-8 -*- +# SPDX-License-Identifier: GPL-2.0-only +# +# Copyright (C) 2007 Red Hat, Inc. +# + +from six.moves import range + + +def hexbitmask(l, nr_entries): + hexbitmask = [] + bit = 0 + mask = 0 + for entry in range(nr_entries): + if entry in l: + mask |= (1 << bit) + bit += 1 + if bit == 32: + bit = 0 + hexbitmask.insert(0, mask) + mask = 0 + + if bit < 32 and mask != 0: + hexbitmask.insert(0, mask) + + return hexbitmask + +def bitmasklist(line, nr_entries): + hexmask = line.strip().replace(",", "") + bitmasklist = [] + entry = 0 + bitmask = bin(int(hexmask, 16))[2::] + for i in reversed(bitmask): + if int(i) & 1: + bitmasklist.append(entry) + entry += 1 + if entry == nr_entries: + break + return bitmasklist diff --git a/python_linux_procfs.egg-info/PKG-INFO b/python_linux_procfs.egg-info/PKG-INFO new file mode 100644 index 0000000..15f662b --- /dev/null +++ b/python_linux_procfs.egg-info/PKG-INFO @@ -0,0 +1,11 @@ +Metadata-Version: 2.1 +Name: python-linux-procfs +Version: 0.7.3 +Summary: Linux /proc abstraction classes +Home-page: http://userweb.kernel.org/python-linux-procfs +Author: Arnaldo Carvalho de Melo +Author-email: acme@redhat.com +License: GPLv2 +License-File: COPYING + +Abstractions to extract information from the Linux kernel /proc files. diff --git a/python_linux_procfs.egg-info/SOURCES.txt b/python_linux_procfs.egg-info/SOURCES.txt new file mode 100644 index 0000000..119bebd --- /dev/null +++ b/python_linux_procfs.egg-info/SOURCES.txt @@ -0,0 +1,10 @@ +COPYING +pflags +setup.py +procfs/__init__.py +procfs/procfs.py +procfs/utilist.py +python_linux_procfs.egg-info/PKG-INFO +python_linux_procfs.egg-info/SOURCES.txt +python_linux_procfs.egg-info/dependency_links.txt +python_linux_procfs.egg-info/top_level.txt \ No newline at end of file diff --git a/python_linux_procfs.egg-info/dependency_links.txt b/python_linux_procfs.egg-info/dependency_links.txt new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/python_linux_procfs.egg-info/dependency_links.txt @@ -0,0 +1 @@ + diff --git a/python_linux_procfs.egg-info/top_level.txt b/python_linux_procfs.egg-info/top_level.txt new file mode 100644 index 0000000..8b71071 --- /dev/null +++ b/python_linux_procfs.egg-info/top_level.txt @@ -0,0 +1 @@ +procfs diff --git a/setup.py b/setup.py new file mode 100755 index 0000000..144e07e --- /dev/null +++ b/setup.py @@ -0,0 +1,33 @@ +#!/usr/bin/python3 +# SPDX-License-Identifier: GPL-2.0-only + +import os +from os.path import isfile, relpath +import sysconfig +from setuptools import setup + +if isfile("MANIFEST"): + os.unlink("MANIFEST") + +SCHEME = 'rpm_prefix' +if not SCHEME in sysconfig.get_scheme_names(): + SCHEME = 'posix_prefix' + +# Get PYTHONLIB with no prefix so --prefix installs work. +PYTHONLIB = relpath(sysconfig.get_path('platlib', SCHEME), '/usr') + +setup(name="python-linux-procfs", + version = "0.7.3", + description = "Linux /proc abstraction classes", + author = "Arnaldo Carvalho de Melo", + author_email = "acme@redhat.com", + url = "http://userweb.kernel.org/python-linux-procfs", + license = "GPLv2", + long_description = +"""\ +Abstractions to extract information from the Linux kernel /proc files. +""", + packages = ["procfs"], + scripts = ['pflags'], + install_requires = ['six'], +)