aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPonnuvel Palaniyappan <pponnuvel@gmail.com>2024-02-25 09:01:16 +0000
committerJake Hunsaker <jacob.r.hunsaker@gmail.com>2024-03-03 15:28:51 -0500
commitab01c8e3b3308081a62ec1f8de305766457c9447 (patch)
tree85f6669acbff6f4e60f35ce6dcbe2ffb5f810df5
parent4255d1309ec097c7b44f4d3838812e7d9ebe6c98 (diff)
downloadsos-ab01c8e3b3308081a62ec1f8de305766457c9447.tar.gz
[plugins] Fix Pylint and PEP8 issues
Continuation of #3530. Signed-off-by: Ponnuvel Palaniyappan <pponnuvel@gmail.com>
-rw-r--r--sos/report/plugins/fcoe.py2
-rw-r--r--sos/report/plugins/filesys.py2
-rw-r--r--sos/report/plugins/firewall_tables.py10
-rw-r--r--sos/report/plugins/foreman.py39
-rw-r--r--sos/report/plugins/foreman_installer.py6
-rw-r--r--sos/report/plugins/foreman_proxy.py1
-rw-r--r--sos/report/plugins/gcp.py24
-rw-r--r--sos/report/plugins/gluster.py12
-rw-r--r--sos/report/plugins/gluster_block.py4
-rw-r--r--sos/report/plugins/grafana.py7
-rw-r--r--sos/report/plugins/grub2.py4
-rw-r--r--sos/report/plugins/haproxy.py5
-rw-r--r--sos/report/plugins/hpssm.py3
-rw-r--r--sos/report/plugins/infiniband.py42
-rw-r--r--sos/report/plugins/ipa.py37
-rw-r--r--sos/report/plugins/iprconfig.py11
-rw-r--r--sos/report/plugins/iscsi.py7
-rw-r--r--sos/report/plugins/iscsitarget.py4
-rw-r--r--sos/report/plugins/jars.py22
-rw-r--r--sos/report/plugins/kata_containers.py5
-rw-r--r--sos/report/plugins/kdump.py26
-rw-r--r--sos/report/plugins/kernel.py5
-rw-r--r--sos/report/plugins/kernelrt.py4
-rw-r--r--sos/report/plugins/kpatch.py4
-rw-r--r--sos/report/plugins/krb5.py1
-rw-r--r--sos/report/plugins/kubernetes.py86
-rw-r--r--sos/report/plugins/landscape.py4
-rw-r--r--sos/report/plugins/ldap.py14
-rw-r--r--sos/report/plugins/libraries.py6
-rw-r--r--sos/report/plugins/libvirt.py9
-rw-r--r--sos/report/plugins/logs.py13
-rw-r--r--sos/report/plugins/lustre.py2
-rw-r--r--sos/report/plugins/maas.py8
-rw-r--r--sos/report/plugins/manageiq.py2
-rw-r--r--sos/report/plugins/mellanox_firmware.py65
-rw-r--r--sos/report/plugins/memcached.py4
-rw-r--r--sos/report/plugins/microshift.py12
-rw-r--r--sos/report/plugins/microshift_ovn.py6
-rw-r--r--sos/report/plugins/mongodb.py8
-rw-r--r--sos/report/plugins/monit.py3
-rw-r--r--sos/report/plugins/mssql.py8
-rw-r--r--sos/report/plugins/mvcli.py2
-rw-r--r--sos/report/plugins/mysql.py8
-rw-r--r--sos/report/plugins/networkmanager.py4
44 files changed, 277 insertions, 274 deletions
diff --git a/sos/report/plugins/fcoe.py b/sos/report/plugins/fcoe.py
index 6b6bbccf..991bb638 100644
--- a/sos/report/plugins/fcoe.py
+++ b/sos/report/plugins/fcoe.py
@@ -9,7 +9,7 @@
from sos.report.plugins import Plugin, RedHatPlugin
-class fcoe(Plugin, RedHatPlugin):
+class FCoE(Plugin, RedHatPlugin):
short_desc = 'Fibre Channel over Ethernet'
diff --git a/sos/report/plugins/filesys.py b/sos/report/plugins/filesys.py
index 92f3027e..781d03b5 100644
--- a/sos/report/plugins/filesys.py
+++ b/sos/report/plugins/filesys.py
@@ -107,7 +107,7 @@ class Filesys(Plugin, DebianPlugin, UbuntuPlugin, CosPlugin):
class RedHatFilesys(Filesys, RedHatPlugin):
def setup(self):
- super(RedHatFilesys, self).setup()
+ super().setup()
self.add_cmd_output("ls -ltradZ /tmp")
# vim: set et ts=4 sw=4 :
diff --git a/sos/report/plugins/firewall_tables.py b/sos/report/plugins/firewall_tables.py
index ec97fdf0..f923d911 100644
--- a/sos/report/plugins/firewall_tables.py
+++ b/sos/report/plugins/firewall_tables.py
@@ -9,7 +9,7 @@
from sos.report.plugins import (Plugin, IndependentPlugin, SoSPredicate)
-class firewall_tables(Plugin, IndependentPlugin):
+class FirewallTables(Plugin, IndependentPlugin):
"""Collects information about local firewall tables, such as iptables,
and nf_tables (via nft). Note that this plugin does _not_ collect firewalld
information, which is handled by a separate plugin.
@@ -70,14 +70,15 @@ class firewall_tables(Plugin, IndependentPlugin):
for line in nft_lines.splitlines():
words = line.split()[0:3]
if len(words) == 3 and words[0] == 'table' and \
- words[1] in nft_ip_tables.keys():
+ words[1] in nft_ip_tables:
nft_ip_tables[words[1]].append(words[2])
# collect iptables -t for any existing table, if we can't read the
# tables, collect 2 default ones (mangle, filter)
# do collect them only when relevant nft list ruleset exists
default_ip_tables = "mangle\nfilter\nnat\n"
try:
- with open('/proc/net/ip_tables_names', 'r') as ifile:
+ proc_net_ip_tables = '/proc/net/ip_tables_names'
+ with open(proc_net_ip_tables, 'r', encoding='UTF-8') as ifile:
ip_tables_names = ifile.read()
except IOError:
ip_tables_names = default_ip_tables
@@ -86,7 +87,8 @@ class firewall_tables(Plugin, IndependentPlugin):
self.collect_iptable(table)
# collect the same for ip6tables
try:
- with open('/proc/net/ip6_tables_names', 'r') as ipfile:
+ proc_net_ip6_tables = '/proc/net/ip6_tables_names'
+ with open(proc_net_ip6_tables, 'r', encoding='UTF-8') as ipfile:
ip_tables_names = ipfile.read()
except IOError:
ip_tables_names = default_ip_tables
diff --git a/sos/report/plugins/foreman.py b/sos/report/plugins/foreman.py
index 0ae84b8a..3b13fe65 100644
--- a/sos/report/plugins/foreman.py
+++ b/sos/report/plugins/foreman.py
@@ -9,10 +9,10 @@
#
# See the LICENSE file in the source distribution for further information.
+from re import match
+from shlex import quote
from sos.report.plugins import (Plugin, RedHatPlugin, SCLPlugin,
DebianPlugin, UbuntuPlugin, PluginOpt)
-from pipes import quote
-from re import match
from sos.utilities import is_executable
@@ -24,6 +24,10 @@ class Foreman(Plugin):
plugin_timeout = 1800
profiles = ('sysmgmt',)
packages = ('foreman',)
+ apachepkg = None
+ dbhost = "localhost"
+ dbpasswd = ""
+ env = {"PGPASSWORD": ""}
option_list = [
PluginOpt('days', default=14,
desc='number of days for dynflow output'),
@@ -40,10 +44,9 @@ class Foreman(Plugin):
# ..
# host: some.hostname
production_scope = False
- self.dbhost = "localhost"
- self.dbpasswd = ""
try:
- with open('/etc/foreman/database.yml', 'r') as dfile:
+ foreman_db = '/etc/foreman/database.yml'
+ with open(foreman_db, 'r', encoding='UTF-8') as dfile:
foreman_lines = dfile.read().splitlines()
for line in foreman_lines:
# skip empty lines and lines with comments
@@ -176,7 +179,11 @@ class Foreman(Plugin):
)
self.add_cmd_output(_cmd, suggest_filename='foreman_db_tables_sizes',
env=self.env)
+ self.collect_foreman_db()
+ self.collect_proxies()
+ def collect_foreman_db(self):
+ """ Collect foreman db and dynflow data """
days = '%s days' % self.get_option('days')
# Construct the DB queries, using the days option to limit the range
@@ -187,11 +194,6 @@ class Foreman(Plugin):
"'%(pass|key|secret)%'"
)
- authcmd = (
- 'select id,type,name,host,port,account,base_dn,attr_login,'
- 'onthefly_register,tls from auth_sources'
- )
-
dyncmd = (
'select dynflow_execution_plans.* from foreman_tasks_tasks join '
'dynflow_execution_plans on (foreman_tasks_tasks.external_id = '
@@ -231,7 +233,9 @@ class Foreman(Plugin):
foremandb = {
'foreman_settings_table': scmd,
'foreman_schema_migrations': 'select * from schema_migrations',
- 'foreman_auth_table': authcmd,
+ 'foreman_auth_table': 'select id,type,name,host,port,account,'
+ 'base_dn,attr_login,onthefly_register,tls '
+ 'from auth_sources',
'dynflow_schema_info': 'select * from dynflow_schema_info',
'audits_table_count': 'select count(*) from audits',
'logs_table_count': 'select count(*) from logs',
@@ -249,8 +253,8 @@ class Foreman(Plugin):
'dynflow_steps': dstepscmd,
}
- for table in foremandb:
- _cmd = self.build_query_cmd(foremandb[table])
+ for table, val in foremandb.items():
+ _cmd = self.build_query_cmd(val)
self.add_cmd_output(_cmd, suggest_filename=table, timeout=600,
sizelimit=100, env=self.env)
@@ -258,15 +262,16 @@ class Foreman(Plugin):
# case, psql-msgpack-decode wrapper tool from dynflow-utils (any
# version) must be used instead of plain psql command
dynutils = self.is_installed('dynflow-utils')
- for dyn in foremancsv:
+ for dyn, val in foremancsv.items():
binary = "psql"
if dyn != 'foreman_tasks_tasks' and dynutils:
binary = "/usr/libexec/psql-msgpack-decode"
- _cmd = self.build_query_cmd(foremancsv[dyn], csv=True,
- binary=binary)
+ _cmd = self.build_query_cmd(val, csv=True, binary=binary)
self.add_cmd_output(_cmd, suggest_filename=dyn, timeout=600,
sizelimit=100, env=self.env)
+ def collect_proxies(self):
+ """ Collect foreman proxies """
if self.get_option('proxyfeatures'):
# get a list of proxy names and URLs, and query for their features
# store results in smart_proxies_features subdirectory
@@ -331,7 +336,7 @@ class RedHatForeman(Foreman, SCLPlugin, RedHatPlugin):
if self.policy.dist_version() == 7 and is_executable('scl'):
self.pumactl = "scl enable tfm '%s'" % self.pumactl
- super(RedHatForeman, self).setup()
+ super().setup()
self.add_cmd_output('gem list')
diff --git a/sos/report/plugins/foreman_installer.py b/sos/report/plugins/foreman_installer.py
index f04a9750..d5a7c245 100644
--- a/sos/report/plugins/foreman_installer.py
+++ b/sos/report/plugins/foreman_installer.py
@@ -89,12 +89,12 @@ class RedHatForemanInstaller(ForemanInstaller, RedHatPlugin):
self.add_file_tags({
'/var/log/foreman-installer/satellite.log':
- ['foreman_satellite_log' 'satellite_installer_log'],
+ ['foreman_satellite_log', 'satellite_installer_log'],
'/var/log/foreman-installer/capsule.log':
- ['capsule_log' 'capsule_installer_log'],
+ ['capsule_log', 'capsule_installer_log'],
})
- super(RedHatForemanInstaller, self).setup()
+ super().setup()
# vim: set et ts=4 sw=4 :
diff --git a/sos/report/plugins/foreman_proxy.py b/sos/report/plugins/foreman_proxy.py
index d0a3fbd5..5f684be2 100644
--- a/sos/report/plugins/foreman_proxy.py
+++ b/sos/report/plugins/foreman_proxy.py
@@ -19,6 +19,7 @@ class ForemanProxy(Plugin):
plugin_name = 'foreman_proxy'
profiles = ('sysmgmt',)
packages = ('foreman-proxy',)
+ apachepkg = None
def setup(self):
self.add_file_tags({
diff --git a/sos/report/plugins/gcp.py b/sos/report/plugins/gcp.py
index 8ed0ec8f..fb040bc3 100644
--- a/sos/report/plugins/gcp.py
+++ b/sos/report/plugins/gcp.py
@@ -31,6 +31,7 @@ class GCP(Plugin, IndependentPlugin):
METADATA_QUERY = "http://metadata.google.internal/computeMetadata/v1/" \
"?recursive=true"
REDACTED = "[--REDACTED--]"
+ metadata = None
# A line we will be looking for in the dmesg output. If it's there,
# that means we're running on a Google Cloud Compute instance.
@@ -89,15 +90,17 @@ class GCP(Plugin, IndependentPlugin):
"""
try:
req = request.Request(url, headers={'Metadata-Flavor': 'Google'})
- response = request.urlopen(req)
+ with request.urlopen(req) as response:
+ if response.code != 200:
+ raise RuntimeError(
+ f"Failed to communicate with Metadata Server "
+ f"(code: {response.code}): " +
+ response.read().decode())
+ return response
except URLError as err:
raise RuntimeError(
- "Failed to communicate with Metadata Server: " + str(err))
- if response.code != 200:
- raise RuntimeError(
- f"Failed to communicate with Metadata Server "
- f"(code: {response.code}): " + response.read().decode())
- return response
+ "Failed to communicate with Metadata Server: " + str(err)) \
+ from err
def scrub_metadata(self):
"""
@@ -122,12 +125,12 @@ class GCP(Plugin, IndependentPlugin):
# tokens, but you can't be too careful.
data['token'] = self.REDACTED
return {scrub(k): scrub(v) for k, v in data.items()}
- elif isinstance(data, list):
+ if isinstance(data, list):
return [scrub(value) for value in data]
- elif isinstance(data, str):
+ if isinstance(data, str):
return data.replace(project_number, self.REDACTED)\
.replace(project_id, self.REDACTED)
- elif isinstance(data, int):
+ if isinstance(data, int):
return self.REDACTED if data == project_number_int else data
return data
@@ -140,5 +143,6 @@ class GCP(Plugin, IndependentPlugin):
@classmethod
def safe_redact_key(cls, dict_obj: dict, key: str):
+ """ Redact keys """
if key in dict_obj:
dict_obj[key] = cls.REDACTED
diff --git a/sos/report/plugins/gluster.py b/sos/report/plugins/gluster.py
index eab26953..58c3ea13 100644
--- a/sos/report/plugins/gluster.py
+++ b/sos/report/plugins/gluster.py
@@ -6,10 +6,9 @@
#
# See the LICENSE file in the source distribution for further information.
-import time
-import os
import glob
-import string
+import os
+import time
from sos.report.plugins import Plugin, RedHatPlugin, PluginOpt
@@ -29,6 +28,7 @@ class Gluster(Plugin, RedHatPlugin):
]
def wait_for_statedump(self, name_dir):
+ """ Wait until state dump is done """
statedumps_present = 0
statedump_entries = [
f for f in self.listdir(name_dir) if self.path_isfile(f)
@@ -38,9 +38,9 @@ class Gluster(Plugin, RedHatPlugin):
_spath = self.path_join(name_dir, statedump_file)
ret = -1
while ret == -1:
- with open(_spath, 'r') as sfile:
+ with open(_spath, 'r', encoding='UTF-8') as sfile:
last_line = sfile.readlines()[-1]
- ret = string.count(last_line, 'DUMP_END_TIME')
+ ret = last_line.count('DUMP_END_TIME')
def postproc(self):
if self.get_option("dump"):
@@ -104,7 +104,7 @@ class Gluster(Plugin, RedHatPlugin):
"glusterd processes")
else:
self.soslog.warning("Unable to generate statedumps, no such "
- "directory: %s" % self.statedump_dir)
+ "directory: %s", self.statedump_dir)
state = self.exec_cmd("gluster get-state")
if state['status'] == 0:
state_file = state['output'].split()[-1]
diff --git a/sos/report/plugins/gluster_block.py b/sos/report/plugins/gluster_block.py
index 0fa65a16..cfc26425 100644
--- a/sos/report/plugins/gluster_block.py
+++ b/sos/report/plugins/gluster_block.py
@@ -28,7 +28,7 @@ class GlusterBlock(Plugin, RedHatPlugin):
limit = 0
if limit:
- for f in glob.glob("/var/log/gluster-block/*.log"):
- self.add_copy_spec(f, limit)
+ for file in glob.glob("/var/log/gluster-block/*.log"):
+ self.add_copy_spec(file, limit)
else:
self.add_copy_spec("/var/log/gluster-block")
diff --git a/sos/report/plugins/grafana.py b/sos/report/plugins/grafana.py
index 0bc51fd4..4343ecd6 100644
--- a/sos/report/plugins/grafana.py
+++ b/sos/report/plugins/grafana.py
@@ -18,6 +18,7 @@ class Grafana(Plugin, IndependentPlugin):
profiles = ('services', 'openstack', 'openstack_controller')
packages = ('grafana',)
+ is_snap = False
def _is_snap_installed(self):
grafana_pkg = self.policy.package_manager.pkg_by_name('grafana')
@@ -26,8 +27,8 @@ class Grafana(Plugin, IndependentPlugin):
return False
def setup(self):
- self._is_snap = self._is_snap_installed()
- if self._is_snap:
+ self.is_snap = self._is_snap_installed()
+ if self.is_snap:
grafana_cli = "grafana.grafana-cli"
log_path = "/var/snap/grafana/common/data/log/"
config_path = "/var/snap/grafana/current/conf/grafana.ini"
@@ -62,7 +63,7 @@ class Grafana(Plugin, IndependentPlugin):
]
inifile = (
"/var/snap/grafana/current/conf/grafana.ini"
- if self._is_snap
+ if self.is_snap
else "/etc/grafana/grafana.ini"
)
diff --git a/sos/report/plugins/grub2.py b/sos/report/plugins/grub2.py
index 5fb2a965..1c500d12 100644
--- a/sos/report/plugins/grub2.py
+++ b/sos/report/plugins/grub2.py
@@ -46,8 +46,8 @@ class Grub2(Plugin, IndependentPlugin):
# to prevent removing of extra args in $kernel_opts, and (only) if so,
# call the command with this argument
grub_cmd = 'grub2-mkconfig'
- co = {'cmd': '%s --help' % grub_cmd, 'output': '--no-grubenv-update'}
- if self.test_predicate(self, pred=SoSPredicate(self, cmd_outputs=co)):
+ out = {'cmd': '%s --help' % grub_cmd, 'output': '--no-grubenv-update'}
+ if self.test_predicate(self, pred=SoSPredicate(self, cmd_outputs=out)):
grub_cmd += ' --no-grubenv-update'
self.add_cmd_output(grub_cmd, env={'GRUB_DISABLE_OS_PROBER': 'true'},
pred=SoSPredicate(self, kmods=['dm_mod']))
diff --git a/sos/report/plugins/haproxy.py b/sos/report/plugins/haproxy.py
index 5d71e223..2ca7bf93 100644
--- a/sos/report/plugins/haproxy.py
+++ b/sos/report/plugins/haproxy.py
@@ -8,8 +8,8 @@
#
# See the LICENSE file in the source distribution for further information.
-from sos.report.plugins import Plugin, RedHatPlugin, DebianPlugin
from re import match
+from sos.report.plugins import Plugin, RedHatPlugin, DebianPlugin
try:
from urllib.parse import urlparse
@@ -48,7 +48,8 @@ class HAProxy(Plugin, RedHatPlugin, DebianPlugin):
matched = None
provision_ip = None
try:
- with open("/etc/haproxy/haproxy.cfg", 'r') as hfile:
+ _haproxy_file = "/etc/haproxy/haproxy.cfg"
+ with open(_haproxy_file, 'r', encoding='UTF-8') as hfile:
for line in hfile.read().splitlines():
if matched:
provision_ip = line.split()[1]
diff --git a/sos/report/plugins/hpssm.py b/sos/report/plugins/hpssm.py
index 9a33eafd..b59770c6 100644
--- a/sos/report/plugins/hpssm.py
+++ b/sos/report/plugins/hpssm.py
@@ -6,8 +6,8 @@
#
# See the LICENSE file in the source distribution for further information.
-from sos.report.plugins import Plugin, IndependentPlugin, PluginOpt
import re
+from sos.report.plugins import Plugin, IndependentPlugin, PluginOpt
class Hpssm(Plugin, IndependentPlugin):
@@ -75,6 +75,7 @@ class Hpssm(Plugin, IndependentPlugin):
self.do_debug(logpath)
def do_debug(self, logpath):
+ """ Collect debug logs """
self.add_cmd_output(
'ilorest serverlogs --selectlog=AHS --directorypath=%s' % logpath,
runat=logpath, suggest_filename='ilorest.log'
diff --git a/sos/report/plugins/infiniband.py b/sos/report/plugins/infiniband.py
index dad8c4a7..a5aae196 100644
--- a/sos/report/plugins/infiniband.py
+++ b/sos/report/plugins/infiniband.py
@@ -43,40 +43,36 @@ class Infiniband(Plugin, IndependentPlugin):
"sminfo",
"perfquery"
]
- IB_SYS_DIR = "/sys/class/infiniband/"
- ibs = self.listdir(IB_SYS_DIR) if self.path_isdir(IB_SYS_DIR) else []
- for ib in ibs:
- """
- Skip OPA hardware, as infiniband-diags tools does not understand
- OPA specific MAD sent by opa-fm. Intel provides OPA specific tools
- for OPA fabric diagnose.
- """
- if ib.startswith("hfi"):
+ ib_sysdir = "/sys/class/infiniband/"
+ ib_devs = self.listdir(ib_sysdir) if self.path_isdir(ib_sysdir) else []
+ for ibdev in ib_devs:
+ # Skip OPA hardware, as infiniband-diags tools does not understand
+ # OPA specific MAD sent by opa-fm. Intel provides OPA specific
+ # tools for OPA fabric diagnose.
+ if ibdev.startswith("hfi"):
continue
- for port in self.listdir(IB_SYS_DIR + ib + "/ports"):
+ for port in self.listdir(ib_sysdir + ibdev + "/ports"):
# skip IWARP and RoCE devices
+ lfile = ib_sysdir + ibdev + "/ports/" + port + "/link_layer"
try:
- p = open(IB_SYS_DIR + ib + "/ports/" + port +
- "/link_layer")
+ with open(lfile, 'r', encoding='UTF-8') as link_fp:
+ link_layer = link_fp.readline()
+ if link_layer != "InfiniBand\n":
+ continue
except IOError:
continue
- link_layer = p.readline()
- p.close()
- if link_layer != "InfiniBand\n":
- continue
+ sfile = ib_sysdir + ibdev + "/ports/" + port + "/state"
try:
- s = open(IB_SYS_DIR + ib + "/ports/" + port + "/state")
+ with open(sfile, 'r', encoding='UTF-8') as state_fp:
+ state = state_fp.readline()
+ if not state.endswith(": ACTIVE\n"):
+ continue
except IOError:
continue
- state = s.readline()
- s.close()
-
- if not state.endswith(": ACTIVE\n"):
- continue
- opts = "-C %s -P %s" % (ib, port)
+ opts = "-C %s -P %s" % (ibdev, port)
self.add_cmd_output(["%s %s" % (c, opts) for c in ports_cmds])
# vim: set et ts=4 sw=4 :
diff --git a/sos/report/plugins/ipa.py b/sos/report/plugins/ipa.py
index 7f5ec9c3..c6a76a48 100644
--- a/sos/report/plugins/ipa.py
+++ b/sos/report/plugins/ipa.py
@@ -8,8 +8,8 @@
#
# See the LICENSE file in the source distribution for further information.
-from sos.report.plugins import Plugin, RedHatPlugin, SoSPredicate
from glob import glob
+from sos.report.plugins import Plugin, RedHatPlugin, SoSPredicate
class Ipa(Plugin, RedHatPlugin):
@@ -25,17 +25,24 @@ class Ipa(Plugin, RedHatPlugin):
files = ('/etc/ipa',)
packages = ('ipa-server', 'ipa-client', 'freeipa-server', 'freeipa-client')
+ pki_tomcat_dir_v4 = None
+ pki_tomcat_dir_v3 = None
+ pki_tomcat_conf_dir_v4 = None
+ pki_tomcat_conf_dir_v3 = None
+
def check_ipa_server_version(self):
+ """ Get IPA server version """
if self.is_installed("pki-server") \
or self.path_exists("/var/lib/pki") \
or self.path_exists("/usr/share/doc/ipa-server-4.2.0"):
return "v4"
- elif self.is_installed("pki-common") \
+ if self.is_installed("pki-common") \
or self.path_exists("/var/lib/pki-ca/"):
return "v3"
return None
def ca_installed(self):
+ """ Check if any CA is installed """
# Follow the same checks as IPA CA installer code
return any(
self.path_exists(path) for path in [
@@ -45,11 +52,13 @@ class Ipa(Plugin, RedHatPlugin):
)
def ipa_server_installed(self):
+ """ Check if IPA server is installed """
return any(
self.is_installed(pkg) for pkg in ['ipa-server', 'freeipa-server']
)
- def retrieve_pki_logs(self, ipa_version):
+ def collect_pki_logs(self, ipa_version):
+ """ Collect PKI logs """
if ipa_version == "v4":
self.add_copy_spec([
"/var/log/pki/pki-tomcat/ca/debug*",
@@ -99,7 +108,7 @@ class Ipa(Plugin, RedHatPlugin):
if self.ca_installed():
self._log_debug("CA is installed: retrieving PKI logs")
- self.retrieve_pki_logs(ipa_version)
+ self.collect_pki_logs(ipa_version)
self.add_copy_spec([
"/var/log/ipaclient-install.log",
@@ -130,14 +139,14 @@ class Ipa(Plugin, RedHatPlugin):
# Make sure to use the right PKI config and NSS DB folders
if ipa_version == "v4":
- self.pki_tomcat_dir = self.pki_tomcat_dir_v4
- self.pki_tomcat_conf_dir = self.pki_tomcat_conf_dir_v4
+ pki_tomcat_dir = self.pki_tomcat_dir_v4
+ pki_tomcat_conf_dir = self.pki_tomcat_conf_dir_v4
else:
- self.pki_tomcat_dir = self.pki_tomcat_dir_v3
- self.pki_tomcat_conf_dir = self.pki_tomcat_conf_dir_v3
+ pki_tomcat_dir = self.pki_tomcat_dir_v3
+ pki_tomcat_conf_dir = self.pki_tomcat_conf_dir_v3
- self.add_cmd_output("certutil -L -d %s/alias" % self.pki_tomcat_dir)
- self.add_copy_spec("%s/CS.cfg" % self.pki_tomcat_conf_dir)
+ self.add_cmd_output("certutil -L -d %s/alias" % pki_tomcat_dir)
+ self.add_copy_spec("%s/CS.cfg" % pki_tomcat_conf_dir)
self.add_forbidden_path([
"/etc/pki/nssdb/key*",
@@ -149,9 +158,9 @@ class Ipa(Plugin, RedHatPlugin):
"/etc/httpd/alias/pin.txt",
"/etc/httpd/alias/pwdfile.txt",
"/etc/named.keytab",
- "%s/alias/key*" % self.pki_tomcat_dir,
- "%s/flatfile.txt" % self.pki_tomcat_conf_dir,
- "%s/password.conf" % self.pki_tomcat_conf_dir,
+ "%s/alias/key*" % pki_tomcat_dir,
+ "%s/flatfile.txt" % pki_tomcat_conf_dir,
+ "%s/password.conf" % pki_tomcat_conf_dir,
])
self.add_cmd_output([
@@ -178,8 +187,6 @@ class Ipa(Plugin, RedHatPlugin):
"freeipa_healthcheck_log"
})
- return
-
def postproc(self):
match = r"(\s*arg \"password )[^\"]*"
subst = r"\1********"
diff --git a/sos/report/plugins/iprconfig.py b/sos/report/plugins/iprconfig.py
index a304107f..126b8ddb 100644
--- a/sos/report/plugins/iprconfig.py
+++ b/sos/report/plugins/iprconfig.py
@@ -44,11 +44,11 @@ class IprConfig(Plugin, IndependentPlugin):
devices = []
if show_ioas['output']:
- p = re.compile('sg')
+ dev = re.compile('sg')
for line in show_ioas['output'].splitlines():
temp = line.split(' ')
# temp[0] holds the device name
- if p.search(temp[0]):
+ if dev.search(temp[0]):
devices.append(temp[0])
for device in devices:
@@ -79,10 +79,7 @@ class IprConfig(Plugin, IndependentPlugin):
show_alt_config = "iprconfig -c show-alt-config"
altconfig = self.collect_cmd_output(show_alt_config)
- if not (altconfig['status'] == 0):
- return
-
- if not altconfig['output']:
+ if (altconfig['status'] != 0) or not altconfig['output']:
return
# iprconfig -c show-alt-config
@@ -102,7 +99,7 @@ class IprConfig(Plugin, IndependentPlugin):
if "Enclosure" in line:
temp = re.split(r'\s+', line)
# temp[1] holds the PCI/SCSI location
- pci, scsi = temp[1].split('/')
+ _, scsi = temp[1].split('/')
for alt_line in altconfig['output'].splitlines():
if scsi in alt_line:
temp = alt_line.split(' ')
diff --git a/sos/report/plugins/iscsi.py b/sos/report/plugins/iscsi.py
index 3a93aa84..eda383d4 100644
--- a/sos/report/plugins/iscsi.py
+++ b/sos/report/plugins/iscsi.py
@@ -56,18 +56,13 @@ class Iscsi(Plugin):
class RedHatIscsi(Iscsi, RedHatPlugin):
+ """ RedHatPlugin's setup() will be invoked """
packages = ('iscsi-initiator-utils',)
- def setup(self):
- super(RedHatIscsi, self).setup()
-
class DebianIscsi(Iscsi, DebianPlugin, UbuntuPlugin):
packages = ('open-iscsi',)
- def setup(self):
- super(DebianIscsi, self).setup()
-
# vim: set et ts=4 sw=4 :
diff --git a/sos/report/plugins/iscsitarget.py b/sos/report/plugins/iscsitarget.py
index 6efe50bf..9c334f12 100644
--- a/sos/report/plugins/iscsitarget.py
+++ b/sos/report/plugins/iscsitarget.py
@@ -25,7 +25,7 @@ class RedHatIscsiTarget(IscsiTarget, RedHatPlugin):
packages = ('scsi-target-utils',)
def setup(self):
- super(RedHatIscsiTarget, self).setup()
+ super().setup()
self.add_copy_spec("/etc/tgt/targets.conf")
self.add_cmd_output("tgtadm --lld iscsi --op show --mode target")
@@ -35,7 +35,7 @@ class DebianIscsiTarget(IscsiTarget, DebianPlugin, UbuntuPlugin):
packages = ('iscsitarget',)
def setup(self):
- super(DebianIscsiTarget, self).setup()
+ super().setup()
self.add_copy_spec([
"/etc/iet",
"/etc/sysctl.d/30-iscsitarget.conf",
diff --git a/sos/report/plugins/jars.py b/sos/report/plugins/jars.py
index 4b98684e..9c33c513 100644
--- a/sos/report/plugins/jars.py
+++ b/sos/report/plugins/jars.py
@@ -90,8 +90,8 @@ class Jars(Plugin, RedHatPlugin):
"""
if os.path.isfile(path) and zipfile.is_zipfile(path):
try:
- with zipfile.ZipFile(path) as f:
- if "META-INF/MANIFEST.MF" in f.namelist():
+ with zipfile.ZipFile(path) as file:
+ if "META-INF/MANIFEST.MF" in file.namelist():
return True
except (IOError, zipfile.BadZipfile):
pass
@@ -107,12 +107,12 @@ class Jars(Plugin, RedHatPlugin):
"""
props = {}
try:
- with zipfile.ZipFile(jar_path) as f:
- r = re.compile("META-INF/maven/[^/]+/[^/]+/pom.properties$")
- result = [x for x in f.namelist() if r.match(x)]
+ with zipfile.ZipFile(jar_path) as file:
+ rgx = re.compile("META-INF/maven/[^/]+/[^/]+/pom.properties$")
+ result = [x for x in file.namelist() if rgx.match(x)]
if len(result) != 1:
return None
- with f.open(result[0]) as props_f:
+ with file.open(result[0]) as props_f:
for line in props_f.readlines():
line = line.strip()
if not line.startswith(b"#"):
@@ -135,11 +135,11 @@ class Jars(Plugin, RedHatPlugin):
"""
jar_id = ""
try:
- with open(jar_path, mode="rb") as f:
- m = hashlib.sha1()
- for buf in iter(partial(f.read, 4096), b''):
- m.update(buf)
- jar_id = m.hexdigest()
+ with open(jar_path, mode="rb") as file:
+ digest = hashlib.sha1()
+ for buf in iter(partial(file.read, 4096), b''):
+ digest.update(buf)
+ jar_id = digest.hexdigest()
except IOError:
pass
return jar_id
diff --git a/sos/report/plugins/kata_containers.py b/sos/report/plugins/kata_containers.py
index 21b254f4..306df6c0 100644
--- a/sos/report/plugins/kata_containers.py
+++ b/sos/report/plugins/kata_containers.py
@@ -18,11 +18,6 @@ class KataContainers(Plugin, IndependentPlugin):
packages = ('kata-containers',)
def setup(self):
- self.limit = self.get_option('log_size')
-
- if self.get_option('all_logs'):
- # no limit on amount of data recorded
- self.limit = None
self.add_cmd_output('kata-runtime kata-env')
diff --git a/sos/report/plugins/kdump.py b/sos/report/plugins/kdump.py
index bedb6316..e31e9408 100644
--- a/sos/report/plugins/kdump.py
+++ b/sos/report/plugins/kdump.py
@@ -43,27 +43,31 @@ class RedHatKDump(KDump, RedHatPlugin):
packages = ('kexec-tools',)
def fstab_parse_fs(self, device):
- with open(self.path_join('/etc/fstab'), 'r') as fp:
- for line in fp:
+ """ Parse /etc/fstab file """
+ fstab = self.path_join('/etc/fstab')
+ with open(fstab, 'r', encoding='UTF-8') as file:
+ for line in file:
if line.startswith((device)):
return line.split()[1].rstrip('/')
return ""
def read_kdump_conffile(self):
- fs = ""
+ """ Parse /etc/kdump file """
+ fsys = ""
path = "/var/crash"
- with open(self.path_join('/etc/kdump.conf'), 'r') as fp:
- for line in fp:
+ kdump = '/etc/kdump.conf'
+ with open(kdump, 'r', encoding='UTF-8') as file:
+ for line in file:
if line.startswith("path"):
path = line.split()[1]
elif line.startswith(("ext2", "ext3", "ext4", "xfs")):
device = line.split()[1]
- fs = self.fstab_parse_fs(device)
- return fs + path
+ fsys = self.fstab_parse_fs(device)
+ return fsys + path
def setup(self):
- super(RedHatKDump, self).setup()
+ super().setup()
initramfs_img = "/boot/initramfs-" + platform.release() \
+ "kdump.img"
@@ -80,7 +84,7 @@ class RedHatKDump(KDump, RedHatPlugin):
tags="vmcore_dmesg")
try:
path = self.read_kdump_conffile()
- except Exception:
+ except Exception: # pylint: disable=broad-except
# set no filesystem and default path
path = "/var/crash"
@@ -94,7 +98,7 @@ class DebianKDump(KDump, DebianPlugin, UbuntuPlugin):
packages = ('kdump-tools',)
def setup(self):
- super(DebianKDump, self).setup()
+ super().setup()
initramfs_img = "/var/lib/kdump/initrd.img-" + platform.release()
if self.path_exists(initramfs_img):
@@ -115,7 +119,7 @@ class CosKDump(KDump, CosPlugin):
]
def setup(self):
- super(CosKDump, self).setup()
+ super().setup()
self.add_cmd_output('ls -alRh /var/kdump*')
if self.get_option("collect-kdumps"):
self.add_copy_spec(["/var/kdump-*"])
diff --git a/sos/report/plugins/kernel.py b/sos/report/plugins/kernel.py
index f9c6dfeb..d09d1176 100644
--- a/sos/report/plugins/kernel.py
+++ b/sos/report/plugins/kernel.py
@@ -6,9 +6,9 @@
#
# See the LICENSE file in the source distribution for further information.
-from sos.report.plugins import Plugin, IndependentPlugin, PluginOpt
-from sos.policies.distros.redhat import RedHatPolicy
import glob
+from sos.policies.distros.redhat import RedHatPolicy
+from sos.report.plugins import Plugin, IndependentPlugin, PluginOpt
class Kernel(Plugin, IndependentPlugin):
@@ -82,7 +82,6 @@ class Kernel(Plugin, IndependentPlugin):
clocksource_path = "/sys/devices/system/clocksource/clocksource0/"
- # FIXME: provide a a long-term solution for #1299
self.add_forbidden_path([
'/sys/kernel/debug/tracing/trace_pipe',
'/sys/kernel/debug/tracing/README',
diff --git a/sos/report/plugins/kernelrt.py b/sos/report/plugins/kernelrt.py
index 6d14507c..b867ec5b 100644
--- a/sos/report/plugins/kernelrt.py
+++ b/sos/report/plugins/kernelrt.py
@@ -36,9 +36,9 @@ class KernelRT(Plugin, RedHatPlugin):
# note: rhbz#1059685 'tuna - NameError: global name 'cgroups' is not
# defined this command throws an exception on versions prior to
# 0.10.4-5.
- co = {'cmd': 'tuna --help', 'output': '-P'}
+ cout = {'cmd': 'tuna --help', 'output': '-P'}
option_present = self.test_predicate(
- self, pred=SoSPredicate(self, cmd_outputs=co)
+ self, pred=SoSPredicate(self, cmd_outputs=cout)
)
self.add_cmd_output(
f"tuna {'-CP' if option_present else 'show_threads -C'}"
diff --git a/sos/report/plugins/kpatch.py b/sos/report/plugins/kpatch.py
index 0c12991b..ee3987f5 100644
--- a/sos/report/plugins/kpatch.py
+++ b/sos/report/plugins/kpatch.py
@@ -8,8 +8,8 @@
#
# See the LICENSE file in the source distribution for further information.
-from sos.report.plugins import Plugin, RedHatPlugin
import re
+from sos.report.plugins import Plugin, RedHatPlugin
class Kpatch(Plugin, RedHatPlugin):
@@ -28,7 +28,7 @@ class Kpatch(Plugin, RedHatPlugin):
for patch in kpatches:
if not re.match(r"^kpatch-.*\(.*\)", patch):
continue
- (module, version) = patch.split()
+ (module, _) = patch.split()
self.add_cmd_output("kpatch info " + module)
diff --git a/sos/report/plugins/krb5.py b/sos/report/plugins/krb5.py
index c7daac2f..7612652b 100644
--- a/sos/report/plugins/krb5.py
+++ b/sos/report/plugins/krb5.py
@@ -23,6 +23,7 @@ class Krb5(Plugin):
short_desc = 'Kerberos authentication'
plugin_name = 'krb5'
profiles = ('identity', 'system')
+ kdcdir = None
def setup(self):
self.add_copy_spec([
diff --git a/sos/report/plugins/kubernetes.py b/sos/report/plugins/kubernetes.py
index 4f0c1c6f..12b8a776 100644
--- a/sos/report/plugins/kubernetes.py
+++ b/sos/report/plugins/kubernetes.py
@@ -9,10 +9,10 @@
#
# See the LICENSE file in the source distribution for further information.
-from sos.report.plugins import (Plugin, RedHatPlugin, DebianPlugin,
- UbuntuPlugin, PluginOpt)
from fnmatch import translate
import re
+from sos.report.plugins import (Plugin, RedHatPlugin, DebianPlugin,
+ UbuntuPlugin, PluginOpt)
class Kubernetes(Plugin):
@@ -34,9 +34,22 @@ class Kubernetes(Plugin):
]
kube_cmd = "kubectl"
+ resources = [
+ 'deployments',
+ 'ingresses',
+ 'limitranges',
+ 'pods',
+ 'policies',
+ 'pvc',
+ 'rc',
+ 'resourcequotas',
+ 'routes',
+ 'services'
+ ]
def check_is_master(self):
- return any([self.path_exists(f) for f in self.files])
+ """ Check if this is the master node """
+ return any(self.path_exists(f) for f in self.files)
def setup(self):
self.add_copy_spec("/etc/kubernetes")
@@ -69,29 +82,9 @@ class Kubernetes(Plugin):
if not self.check_is_master():
return
- kube_get_cmd = "get -o json "
for subcmd in ['version', 'config view']:
self.add_cmd_output('%s %s' % (self.kube_cmd, subcmd))
- # get all namespaces in use
- kn = self.collect_cmd_output('%s get namespaces' % self.kube_cmd)
- # namespace is the 1st word on line, until the line has spaces only
- kn_output = kn['output'].splitlines()[1:]
- knsps = [n.split()[0] for n in kn_output if n and len(n.split())]
-
- resources = [
- 'deployments',
- 'ingresses',
- 'limitranges',
- 'pods',
- 'policies',
- 'pvc',
- 'rc',
- 'resourcequotas',
- 'routes',
- 'services'
- ]
-
# these are not namespaced, must pull separately.
global_resources = [
'namespaces',
@@ -121,40 +114,51 @@ class Kubernetes(Plugin):
# CNV is not part of the base installation, but can be added
if self.is_installed('kubevirt-virtctl'):
- resources.extend(['vms', 'vmis'])
+ self.resources.extend(['vms', 'vmis'])
self.add_cmd_output('virtctl version')
- for n in knsps:
- knsp = '--namespace=%s' % n
+ self.collect_per_resource_details()
+ self.collect_all_resources()
+
+ def collect_per_resource_details(self):
+ """ Collect details about each resource in all namespaces """
+ # get all namespaces in use
+ kns = self.collect_cmd_output('%s get namespaces' % self.kube_cmd)
+ # namespace is the 1st word on line, until the line has spaces only
+ kn_output = kns['output'].splitlines()[1:]
+ knsps = [n.split()[0] for n in kn_output if n and len(n.split())]
+
+ for nspace in knsps:
+ knsp = '--namespace=%s' % nspace
if self.get_option('all'):
- k_cmd = '%s %s %s' % (self.kube_cmd, kube_get_cmd, knsp)
+ k_cmd = '%s %s %s' % (self.kube_cmd, "get -o json", knsp)
self.add_cmd_output('%s events' % k_cmd)
- for res in resources:
+ for res in self.resources:
self.add_cmd_output('%s %s' % (k_cmd, res), subdir=res)
if self.get_option('describe'):
# need to drop json formatting for this
k_cmd = '%s %s' % (self.kube_cmd, knsp)
- for res in resources:
- r = self.exec_cmd('%s get %s' % (k_cmd, res))
- if r['status'] == 0:
+ for res in self.resources:
+ ret = self.exec_cmd('%s get %s' % (k_cmd, res))
+ if ret['status'] == 0:
k_list = [k.split()[0] for k in
- r['output'].splitlines()[1:]]
- for k in k_list:
+ ret['output'].splitlines()[1:]]
+ for item in k_list:
k_cmd = '%s %s' % (self.kube_cmd, knsp)
self.add_cmd_output(
- '%s describe %s %s' % (k_cmd, res, k),
+ '%s describe %s %s' % (k_cmd, res, item),
subdir=res
)
if self.get_option('podlogs'):
k_cmd = '%s %s' % (self.kube_cmd, knsp)
- r = self.exec_cmd('%s get pods' % k_cmd)
- if r['status'] == 0:
+ ret = self.exec_cmd('%s get pods' % k_cmd)
+ if ret['status'] == 0:
pods = [p.split()[0] for p in
- r['output'].splitlines()[1:]]
+ ret['output'].splitlines()[1:]]
# allow shell-style regex
reg = (translate(self.get_option('podlogs-filter')) if
self.get_option('podlogs-filter') else None)
@@ -164,9 +168,11 @@ class Kubernetes(Plugin):
self.add_cmd_output('%s logs %s' % (k_cmd, pod),
subdir='pods')
+ def collect_all_resources(self):
+ """ Collect details about all resources """
if not self.get_option('all'):
k_cmd = '%s get --all-namespaces=true' % self.kube_cmd
- for res in resources:
+ for res in self.resources:
self.add_cmd_output('%s %s' % (k_cmd, res), subdir=res)
def postproc(self):
@@ -203,7 +209,7 @@ class RedHatKubernetes(Kubernetes, RedHatPlugin):
# other changes the `oc` binary may implement
if self.path_exists('/etc/origin/master/admin.kubeconfig'):
self.kube_cmd = 'oc'
- super(RedHatKubernetes, self).setup()
+ super().setup()
class UbuntuKubernetes(Kubernetes, UbuntuPlugin, DebianPlugin):
@@ -232,7 +238,7 @@ class UbuntuKubernetes(Kubernetes, UbuntuPlugin, DebianPlugin):
if self.is_installed('microk8s'):
self.kube_cmd = 'microk8s kubectl'
- super(UbuntuKubernetes, self).setup()
+ super().setup()
# vim: et ts=5 sw=4
diff --git a/sos/report/plugins/landscape.py b/sos/report/plugins/landscape.py
index 1fd53ceb..cd401e64 100644
--- a/sos/report/plugins/landscape.py
+++ b/sos/report/plugins/landscape.py
@@ -6,8 +6,8 @@
#
# See the LICENSE file in the source distribution for further information.
-from sos.report.plugins import Plugin, UbuntuPlugin
import os
+from sos.report.plugins import Plugin, UbuntuPlugin
class Landscape(Plugin, UbuntuPlugin):
@@ -27,7 +27,7 @@ class Landscape(Plugin, UbuntuPlugin):
'LANDSCAPE_API_URI',
]]
- if not (all(vars_all)):
+ if not all(vars_all):
self.soslog.warning("Not all environment variables set. "
"Source the environment file for the user "
"intended to connect to the Landscape "
diff --git a/sos/report/plugins/ldap.py b/sos/report/plugins/ldap.py
index 00373b04..f1b63a3a 100644
--- a/sos/report/plugins/ldap.py
+++ b/sos/report/plugins/ldap.py
@@ -18,7 +18,7 @@ class Ldap(Plugin):
ldap_conf = "/etc/openldap/ldap.conf"
def setup(self):
- super(Ldap, self).setup()
+ super().setup()
self.add_copy_spec("/etc/ldap.conf")
def postproc(self):
@@ -31,7 +31,7 @@ class RedHatLdap(Ldap, RedHatPlugin):
files = ('/etc/ldap.conf', '/etc/pam_ldap.conf')
def setup(self):
- super(RedHatLdap, self).setup()
+ super().setup()
self.add_forbidden_path([
"/etc/openldap/certs/password",
"/etc/openldap/certs/pwfile.txt",
@@ -50,10 +50,10 @@ class RedHatLdap(Ldap, RedHatPlugin):
self.add_cmd_output("certutil -L -d /etc/openldap")
def postproc(self):
- super(RedHatLdap, self).postproc()
- for f in ["/etc/nslcd.conf", "/etc/pam_ldap.conf"]:
+ super().postproc()
+ for file in ["/etc/nslcd.conf", "/etc/pam_ldap.conf"]:
self.do_file_sub(
- f,
+ file,
r"(\s*bindpw\s*)\S+",
r"\1********"
)
@@ -65,7 +65,7 @@ class DebianLdap(Ldap, DebianPlugin, UbuntuPlugin):
packages = ('slapd', 'ldap-utils')
def setup(self):
- super(DebianLdap, self).setup()
+ super().setup()
ldap_search = "ldapsearch -Q -LLL -Y EXTERNAL -H ldapi:/// "
@@ -88,7 +88,7 @@ class DebianLdap(Ldap, DebianPlugin, UbuntuPlugin):
suggest_filename="access_control_lists")
def postproc(self):
- super(DebianLdap, self).postproc()
+ super().postproc()
self.do_file_sub(
"/etc/nslcd.conf",
r"(\s*bindpw\s*)\S+",
diff --git a/sos/report/plugins/libraries.py b/sos/report/plugins/libraries.py
index 206cdfb6..015b3777 100644
--- a/sos/report/plugins/libraries.py
+++ b/sos/report/plugins/libraries.py
@@ -38,10 +38,10 @@ class Libraries(Plugin, IndependentPlugin):
# Collect library directories from ldconfig's cache
dirs = set()
for lib in ldconfig['output'].splitlines():
- s = lib.split(" => ", 2)
- if len(s) != 2:
+ fqlib = lib.split(" => ", 2)
+ if len(fqlib) != 2:
continue
- dirs.add(s[1].rsplit('/', 1)[0])
+ dirs.add(fqlib[1].rsplit('/', 1)[0])
if dirs:
self.add_cmd_output("ls -lanH %s" % " ".join(dirs),
diff --git a/sos/report/plugins/libvirt.py b/sos/report/plugins/libvirt.py
index 1404514e..cec249ad 100644
--- a/sos/report/plugins/libvirt.py
+++ b/sos/report/plugins/libvirt.py
@@ -6,8 +6,8 @@
#
# See the LICENSE file in the source distribution for further information.
-from sos.report.plugins import Plugin, IndependentPlugin
import glob
+from sos.report.plugins import Plugin, IndependentPlugin
class Libvirt(Plugin, IndependentPlugin):
@@ -78,10 +78,11 @@ class Libvirt(Plugin, IndependentPlugin):
# get details of processes of KVM hosts
for pidfile in glob.glob("/run/libvirt/*/*.pid"):
- with open(pidfile, 'r') as pfile:
+ with open(pidfile, 'r', encoding='UTF-8') as pfile:
pid = pfile.read().splitlines()[0]
- for pf in ["environ", "cgroup", "maps", "numa_maps", "limits"]:
- self.add_copy_spec("/proc/%s/%s" % (pid, pf))
+ pr_files = ["environ", "cgroup", "maps", "numa_maps", "limits"]
+ for file in pr_files:
+ self.add_copy_spec("/proc/%s/%s" % (pid, file))
self.add_file_tags({
"/run/libvirt/qemu/*.xml": "var_qemu_xml",
diff --git a/sos/report/plugins/logs.py b/sos/report/plugins/logs.py
index f36dbae3..7487efdb 100644
--- a/sos/report/plugins/logs.py
+++ b/sos/report/plugins/logs.py
@@ -18,13 +18,14 @@ class LogsBase(Plugin):
profiles = ('system', 'hardware', 'storage')
def setup(self):
- confs = ['/etc/syslog.conf', '/etc/rsyslog.conf']
+ rsyslog = 'etc/rsyslog.conf'
+ confs = ['/etc/syslog.conf', rsyslog]
logs = []
since = self.get_option("since")
- if self.path_exists('/etc/rsyslog.conf'):
- with open(self.path_join('/etc/rsyslog.conf'), 'r') as conf:
+ if self.path_exists(rsyslog):
+ with open(self.path_join(rsyslog), 'r', encoding='UTF-8') as conf:
for line in conf.readlines():
if line.startswith('$IncludeConfig'):
confs += glob.glob(line.split()[1])
@@ -61,8 +62,8 @@ class LogsBase(Plugin):
# - there is some data present, either persistent or runtime only
# - systemd-journald service exists
# otherwise fallback to collecting few well known logfiles directly
- journal = any([self.path_exists(self.path_join(p, "log/journal/"))
- for p in ["/var", "/run"]])
+ journal = any(self.path_exists(self.path_join(p, "log/journal/"))
+ for p in ["/var", "/run"])
if journal and self.is_service("systemd-journald"):
self.add_journal(since=since, tags=['journal_full', 'journal_all'],
priority=100)
@@ -126,7 +127,7 @@ class CosLogs(LogsBase, CosPlugin):
]
def setup(self):
- super(CosLogs, self).setup()
+ super().setup()
if self.get_option("all_logs"):
self.add_cmd_output("journalctl -o export")
else:
diff --git a/sos/report/plugins/lustre.py b/sos/report/plugins/lustre.py
index a2aeb0ef..d3ad0139 100644
--- a/sos/report/plugins/lustre.py
+++ b/sos/report/plugins/lustre.py
@@ -6,8 +6,8 @@
#
# See the LICENSE file in the source distribution for further information.
-from sos.report.plugins import Plugin, RedHatPlugin
import re
+from sos.report.plugins import Plugin, RedHatPlugin
class Lustre(Plugin, RedHatPlugin):
diff --git a/sos/report/plugins/maas.py b/sos/report/plugins/maas.py
index ef05d590..1718f7b9 100644
--- a/sos/report/plugins/maas.py
+++ b/sos/report/plugins/maas.py
@@ -47,6 +47,8 @@ class Maas(Plugin, UbuntuPlugin):
desc='Credentials, or the API key')
]
+ is_snap = False
+
def _has_login_options(self):
return self.get_option("url") and self.get_option("credentials") \
and self.get_option("profile-name")
@@ -69,8 +71,8 @@ class Maas(Plugin, UbuntuPlugin):
return False
def setup(self):
- self._is_snap = self._is_snap_installed()
- if self._is_snap:
+ self.is_snap = self._is_snap_installed()
+ if self.is_snap:
self.add_cmd_output([
'snap info maas',
'maas status'
@@ -130,7 +132,7 @@ class Maas(Plugin, UbuntuPlugin):
"Cannot login into MAAS remote API with provided creds.")
def postproc(self):
- if self._is_snap:
+ if self.is_snap:
regiond_path = "/var/snap/maas/current/maas/regiond.conf"
else:
regiond_path = "/etc/maas/regiond.conf"
diff --git a/sos/report/plugins/manageiq.py b/sos/report/plugins/manageiq.py
index e20c4a2a..e22e44b8 100644
--- a/sos/report/plugins/manageiq.py
+++ b/sos/report/plugins/manageiq.py
@@ -11,9 +11,9 @@
#
# See the LICENSE file in the source distribution for further information.
-from sos.report.plugins import Plugin, RedHatPlugin
from os import environ
import os.path
+from sos.report.plugins import Plugin, RedHatPlugin
class ManageIQ(Plugin, RedHatPlugin):
diff --git a/sos/report/plugins/mellanox_firmware.py b/sos/report/plugins/mellanox_firmware.py
index c56c65a4..e68543b6 100644
--- a/sos/report/plugins/mellanox_firmware.py
+++ b/sos/report/plugins/mellanox_firmware.py
@@ -8,9 +8,9 @@
#
# See the LICENSE file in the source distribution for further information.
-from sos.report.plugins import Plugin, IndependentPlugin
import os
import time
+from sos.report.plugins import Plugin, IndependentPlugin
class MellanoxFirmware(Plugin, IndependentPlugin):
@@ -39,24 +39,22 @@ class MellanoxFirmware(Plugin, IndependentPlugin):
"enable this collection.")
return
- """
- Run only if mft package is installed.
- flint is available from the mft package.
- """
- co = self.exec_cmd('flint --version')
- if co['status'] != 0:
+ # Run only if mft package is installed.
+ # flint is available from the mft package.
+ cout = self.exec_cmd('flint --version')
+ if cout['status'] != 0:
return
- co = self.collect_cmd_output('mst start')
- if co['status'] != 0:
+ cout = self.collect_cmd_output('mst start')
+ if cout['status'] != 0:
return
self.collect_cmd_output('mst cable add')
self.collect_cmd_output("mst status -v", timeout=10)
self.collect_cmd_output("mlxcables", timeout=10)
- co = os.listdir("/dev/mst")
+ cout = os.listdir("/dev/mst")
mlxcables = []
- for device in co:
+ for device in cout:
if 'cable' in device:
mlxcables.append(device)
for mlxcable in mlxcables:
@@ -70,39 +68,30 @@ class MellanoxFirmware(Plugin, IndependentPlugin):
# Get all devices which have the vendor Mellanox Technologies
devices = []
device_list = self.collect_cmd_output('lspci -D -d 15b3::0200')
- """
- Will return a string of the following format:
- 0000:08:00.0 Ethernet controller: Mellanox Technologies MT2892
- Family
- """
+ # Will return a string of the following format:
+ # 0000:08:00.0 Ethernet controller: Mellanox Technologies MT2892 Family
if device_list['status'] != 0:
# bail out if there no Mellanox PCI devices
return
for line in device_list["output"].splitlines():
- """
- Should return 0000:08:00.0
- from the following string
- 0000:08:00.0 Ethernet controller: Mellanox Technologies MT2892
- Family
- """
+ # Should return 0000:08:00.0
+ # from the following string
+ # 0000:08:00.0 Ethernet controller: Mellanox Technologies MT2892
+ # Family
devices.append(line[0:8]+'00.0')
devices = set(devices)
- """
# Mft package is present if OFED is installed
# mstflint package is part of the distro and can be installed.
- """
commands = []
# mft package is installed if flint command is available
- co = self.exec_cmd('flint --version')
- if co['status'] != 0:
- """
- mstflint package commands
- the commands do not support position independent arguments
- """
+ cout = self.exec_cmd('flint --version')
+ if cout['status'] != 0:
+ # mstflint package commands
+ # the commands do not support position independent arguments
commands = [
["mstconfig -d ", " -e q"],
["mstflint -d ", " dc"],
@@ -111,10 +100,8 @@ class MellanoxFirmware(Plugin, IndependentPlugin):
["mstlink -d ", ""],
]
else:
- """
- mft package commands
- the commands do not support position independent arguments
- """
+ # mft package commands
+ # the commands do not support position independent arguments
commands = [
["mlxdump -d ", " pcie_uc --all"],
["mstconfig -d ", " -e q"],
@@ -129,12 +116,10 @@ class MellanoxFirmware(Plugin, IndependentPlugin):
self.add_cmd_output(f"{command[0]} {device} "
f"{command[1]}", timeout=30)
- """
- Dump the output of the mstdump command three times
- waiting for one second. This output is useful to check
- if certain registers changed
- """
- for i in range(3):
+ # Dump the output of the mstdump command three times
+ # waiting for one second. This output is useful to check
+ # if certain registers changed
+ for _ in range(3):
self.add_cmd_output(f"mstdump {device}")
time.sleep(1)
diff --git a/sos/report/plugins/memcached.py b/sos/report/plugins/memcached.py
index ab262243..1b02baa4 100644
--- a/sos/report/plugins/memcached.py
+++ b/sos/report/plugins/memcached.py
@@ -25,7 +25,7 @@ class RedHatMemcached(Memcached, RedHatPlugin):
files = ('/etc/sysconfig/memcached',)
def setup(self):
- super(RedHatMemcached, self).setup()
+ super().setup()
self.add_copy_spec("/etc/sysconfig/memcached",
tags="sysconfig_memcached")
@@ -35,7 +35,7 @@ class DebianMemcached(Memcached, DebianPlugin, UbuntuPlugin):
files = ('/etc/default/memcached',)
def setup(self):
- super(DebianMemcached, self).setup()
+ super().setup()
self.add_copy_spec([
"/etc/memcached.conf",
"/etc/default/memcached"
diff --git a/sos/report/plugins/microshift.py b/sos/report/plugins/microshift.py
index 13bbed66..a6605e84 100644
--- a/sos/report/plugins/microshift.py
+++ b/sos/report/plugins/microshift.py
@@ -7,8 +7,8 @@
#
# See the LICENSE file in the source distribution for further information.
-from sos.report.plugins import Plugin, RedHatPlugin, PluginOpt
import re
+from sos.report.plugins import Plugin, RedHatPlugin, PluginOpt
class Microshift(Plugin, RedHatPlugin):
@@ -47,7 +47,7 @@ class Microshift(Plugin, RedHatPlugin):
allow for end users to specify namespace regexes of their own.
"""
if self.get_option('only-namespaces'):
- return [n for n in self.get_option('only-namespaces').split(':')]
+ return list(self.get_option('only-namespaces').split(':'))
collect_regexes = [
r'^openshift\-.+$',
@@ -68,21 +68,21 @@ class Microshift(Plugin, RedHatPlugin):
:param nsps list: Namespace names from oc output
"""
- def _match_namespace(namespace):
+ def _match_namespace(namespace, regexes):
"""Match a particular namespace for inclusion (or not) in the
collection phases
:param namespace str: The name of a namespace
"""
- for regex in self.collect_regexes:
+ for regex in regexes:
if re.match(regex, namespace):
return True
return False
- self.collect_regexes = self._setup_namespace_regexes()
+ regexes = self._setup_namespace_regexes()
- return list(set([n for n in nsps if _match_namespace(n)]))
+ return list(set(n for n in nsps if _match_namespace(n, regexes)))
def _get_namespaces(self):
res = self.exec_cmd(
diff --git a/sos/report/plugins/microshift_ovn.py b/sos/report/plugins/microshift_ovn.py
index 2725eda5..68cf0405 100644
--- a/sos/report/plugins/microshift_ovn.py
+++ b/sos/report/plugins/microshift_ovn.py
@@ -33,13 +33,13 @@ class MicroshiftOVN(Plugin, RedHatPlugin):
'memory/show',
'ovsdb-server/sync-status'
]
- for file, db in [('ovnnb_db.ctl', 'OVN_Northbound'),
- ('ovnsb_db.ctl', 'OVN_Southbound')]:
+ for file, dbn in [('ovnnb_db.ctl', 'OVN_Northbound'),
+ ('ovnsb_db.ctl', 'OVN_Southbound')]:
self.add_cmd_output(
[f"{_ovs_cmd}{file} {cmd}" for cmd in _subcmds],
timeout=MicroshiftOVN.plugin_timeout)
self.add_cmd_output(
- f"{_ovs_cmd}{file} ovsdb-server/get-db-storage-status {db}",
+ f"{_ovs_cmd}{file} ovsdb-server/get-db-storage-status {dbn}",
timeout=MicroshiftOVN.plugin_timeout)
self.add_cmd_output(
diff --git a/sos/report/plugins/mongodb.py b/sos/report/plugins/mongodb.py
index a21ad769..c7379677 100644
--- a/sos/report/plugins/mongodb.py
+++ b/sos/report/plugins/mongodb.py
@@ -37,10 +37,10 @@ class MongoDb(Plugin, DebianPlugin, UbuntuPlugin):
self.add_cmd_output("du -sh /var/lib/mongodb/")
def postproc(self):
- for f in ["/etc/mongodb.conf",
- self.var_puppet_gen + "/etc/mongodb.conf"]:
+ for file in ["/etc/mongodb.conf",
+ self.var_puppet_gen + "/etc/mongodb.conf"]:
self.do_file_sub(
- f,
+ file,
r"(mms-token)\s*=\s*(.*)",
r"\1 = ********"
)
@@ -56,7 +56,7 @@ class RedHatMongoDb(MongoDb, RedHatPlugin):
)
def setup(self):
- super(RedHatMongoDb, self).setup()
+ super().setup()
self.add_copy_spec([
"/etc/sysconfig/mongodb",
"/etc/rh-mongodb*-mongo*.conf",
diff --git a/sos/report/plugins/monit.py b/sos/report/plugins/monit.py
index 26776f4b..0c061f63 100644
--- a/sos/report/plugins/monit.py
+++ b/sos/report/plugins/monit.py
@@ -9,8 +9,8 @@
#
# See the LICENSE file in the source distribution for further information.
-from sos.report.plugins import Plugin, RedHatPlugin
from glob import glob
+from sos.report.plugins import Plugin, RedHatPlugin
class Monit(Plugin, RedHatPlugin):
@@ -21,7 +21,6 @@ class Monit(Plugin, RedHatPlugin):
plugin_name = 'monit'
# Define configuration files
- # FIXME: direct globs will fail in container environments.
monit_conf = glob("/etc/monit.d/*")
monit_conf.append("/etc/monit.conf")
monit_conf.append("/etc/monitrc")
diff --git a/sos/report/plugins/mssql.py b/sos/report/plugins/mssql.py
index 9a4d643c..58f5aef5 100644
--- a/sos/report/plugins/mssql.py
+++ b/sos/report/plugins/mssql.py
@@ -43,7 +43,7 @@ class MsSQL(Plugin, RedHatPlugin):
sqlagent_errorlogfile = '/var/opt/mssql/log/sqlagentstartup.log'
kerberoskeytabfile = None
try:
- with open(mssql_conf, 'r') as mfile:
+ with open(mssql_conf, 'r', encoding='UTF-8') as mfile:
for line in mfile.read().splitlines():
if line.startswith('['):
section = line
@@ -54,9 +54,9 @@ class MsSQL(Plugin, RedHatPlugin):
errorlogfile = words[1].strip()
elif section == '[sqlagent]':
sqlagent_errorlogfile = words[1].strip()
- elif words[0].strip() == 'kerberoskeytabfile':
- if section == '[network]':
- kerberoskeytabfile = words[1].strip()
+ elif (words[0].strip() == 'kerberoskeytabfile') and \
+ (section == '[network]'):
+ kerberoskeytabfile = words[1].strip()
except IOError as ex:
self._log_error('Could not open conf file %s: %s' %
(mssql_conf, ex))
diff --git a/sos/report/plugins/mvcli.py b/sos/report/plugins/mvcli.py
index 93d81a8c..f5d1feee 100644
--- a/sos/report/plugins/mvcli.py
+++ b/sos/report/plugins/mvcli.py
@@ -13,7 +13,7 @@
from sos.report.plugins import Plugin, IndependentPlugin
-class mvCLI(Plugin, IndependentPlugin):
+class MvCLI(Plugin, IndependentPlugin):
short_desc = 'mvCLI Integrated RAID adapter information'
diff --git a/sos/report/plugins/mysql.py b/sos/report/plugins/mysql.py
index 8c4d534a..ac30d16b 100644
--- a/sos/report/plugins/mysql.py
+++ b/sos/report/plugins/mysql.py
@@ -6,9 +6,9 @@
#
# See the LICENSE file in the source distribution for further information.
+import os
from sos.report.plugins import (Plugin, RedHatPlugin, DebianPlugin,
UbuntuPlugin, PluginOpt)
-import os
class Mysql(Plugin):
@@ -30,7 +30,7 @@ class Mysql(Plugin):
]
def setup(self):
- super(Mysql, self).setup()
+ super().setup()
self.add_copy_spec([
self.mysql_cnf,
@@ -96,7 +96,7 @@ class RedHatMysql(Mysql, RedHatPlugin):
)
def setup(self):
- super(RedHatMysql, self).setup()
+ super().setup()
self.add_copy_spec([
"/etc/ld.so.conf.d/mysql-*.conf",
"/etc/ld.so.conf.d/mariadb-*.conf",
@@ -116,7 +116,7 @@ class DebianMysql(Mysql, DebianPlugin, UbuntuPlugin):
)
def setup(self):
- super(DebianMysql, self).setup()
+ super().setup()
self.add_copy_spec([
"/etc/mysql/",
"/var/log/mysql/error.log",
diff --git a/sos/report/plugins/networkmanager.py b/sos/report/plugins/networkmanager.py
index e91d77fd..5d0dcd99 100644
--- a/sos/report/plugins/networkmanager.py
+++ b/sos/report/plugins/networkmanager.py
@@ -38,7 +38,7 @@ class NetworkManager(Plugin, RedHatPlugin, UbuntuPlugin):
# NetworkManager >= 0.9.9 will use the long names of
# "nmcli" objects.
- # All versions conform to the following templates with differnt
+ # All versions conform to the following templates with different
# strings for the object being operated on.
nmcli_con_details_template = "nmcli con %s id"
nmcli_dev_details_template = "nmcli dev %s"
@@ -91,7 +91,7 @@ class NetworkManager(Plugin, RedHatPlugin, UbuntuPlugin):
# nm names may contain embedded quotes (" and '). These
# will cause an exception in shlex.split() if the quotes
# are unbalanced. This may happen with names like:
- # "Foobar's Wireless Network". Although the problen will
+ # "Foobar's Wireless Network". Although the problem will
# occur for both single and double quote characters the
# former is considerably more likely in object names since
# it is syntactically valid in many human languages.