aboutsummaryrefslogtreecommitdiffstats
path: root/src/lib
diff options
context:
space:
mode:
authorastokes <astokes@ef72aa8b-4018-0410-8976-d6e080ef94d8>2008-10-23 13:31:29 +0000
committerastokes <astokes@ef72aa8b-4018-0410-8976-d6e080ef94d8>2008-10-23 13:31:29 +0000
commit8d443ca846f670e971f09235ca792e45239f6f81 (patch)
treedc9caa0c9414fe876707162f53b4722b54f3cdf7 /src/lib
parentc5bf8bbac0c3c672d6bb22d0c54c9ab479abfde3 (diff)
downloadsos-8d443ca846f670e971f09235ca792e45239f6f81.tar.gz
sync
git-svn-id: svn+ssh://svn.fedorahosted.org/svn/sos/trunk@532 ef72aa8b-4018-0410-8976-d6e080ef94d8
Diffstat (limited to 'src/lib')
-rwxr-xr-xsrc/lib/sos/helpers.py123
-rw-r--r--src/lib/sos/plugins/autofs.py36
-rw-r--r--src/lib/sos/plugins/cluster.py351
-rw-r--r--src/lib/sos/plugins/devicemapper.py18
-rw-r--r--src/lib/sos/plugins/filesys.py9
-rw-r--r--src/lib/sos/plugins/general.py11
-rw-r--r--src/lib/sos/plugins/hardware.py17
-rw-r--r--src/lib/sos/plugins/initrd.py4
-rw-r--r--src/lib/sos/plugins/kernel.py57
-rw-r--r--src/lib/sos/plugins/ldap.py25
-rw-r--r--src/lib/sos/plugins/named.py25
-rw-r--r--src/lib/sos/plugins/networking.py14
-rw-r--r--src/lib/sos/plugins/nfsserver.py5
-rw-r--r--src/lib/sos/plugins/pam.py2
-rw-r--r--src/lib/sos/plugins/process.py3
-rw-r--r--src/lib/sos/plugins/rpm.py14
-rw-r--r--src/lib/sos/plugins/s390.py6
-rw-r--r--src/lib/sos/plugins/selinux.py24
-rw-r--r--src/lib/sos/plugins/squid.py6
-rw-r--r--src/lib/sos/plugins/startup.py1
-rw-r--r--src/lib/sos/plugins/systemtap.py9
-rw-r--r--src/lib/sos/plugins/x11.py13
-rw-r--r--src/lib/sos/plugins/xen.py17
-rw-r--r--src/lib/sos/plugins/yum.py21
-rw-r--r--src/lib/sos/plugintools.py323
-rwxr-xr-xsrc/lib/sos/policyredhat.py329
26 files changed, 847 insertions, 616 deletions
diff --git a/src/lib/sos/helpers.py b/src/lib/sos/helpers.py
index bcdee6fb..77f85b01 100755
--- a/src/lib/sos/helpers.py
+++ b/src/lib/sos/helpers.py
@@ -25,9 +25,29 @@
"""
helper functions used by sosreport and plugins
"""
-import os, popen2, fcntl, select, itertools, sys, commands
-from time import time
-from tempfile import mkdtemp
+import os, popen2, fcntl, select, sys, commands, signal
+from time import time, sleep
+
+if sys.version_info[0] <= 2 and sys.version_info[1] <= 2:
+ # it's a RHEL3, activate work-arounds
+ #
+ import sos.rhel3_logging
+ logging = sos.rhel3_logging
+
+ def mkdtemp(suffix = "", prefix = "temp_"):
+ import random
+ while True:
+ tempdir = "/tmp/%s_%d%s" % (prefix, random.randint(1,9999999), suffix)
+ if not os.path.exists(tempdir): break
+ os.mkdir(tempdir)
+ return tempdir
+
+ os.path.sep = "/"
+ os.path.pardir = ".."
+else:
+ # RHEL4+, business as usual
+ import logging
+ from tempfile import mkdtemp
def importPlugin(pluginname, name):
""" Import a plugin to extend capabilities of sosreport
@@ -57,17 +77,72 @@ def makeNonBlocking(afd):
fcntl.fcntl(afd, fcntl.F_SETFL, fl | os.FNDELAY)
-def sosGetCommandOutput(command):
+def sosGetCommandOutput(command, timeout = 300):
""" Execute a command and gather stdin, stdout, and return status.
"""
- stime = time()
- inpipe, pipe = os.popen4(command, 'r')
- inpipe.close()
- text = pipe.read()
- sts = pipe.close()
- if sts is None: sts = 0
- if text[-1:] == '\n': text = text[:-1]
- return (sts, text, time()-stime)
+ soslog = logging.getLogger('sos')
+
+ # Log if binary is not runnable or does not exist
+ for path in os.environ["PATH"].split(":"):
+ cmdfile = command.strip("(").split()[0]
+ # handle both absolute or relative paths
+ if ( ( not os.path.isabs(cmdfile) and os.access(os.path.join(path,cmdfile), os.X_OK) ) or \
+ ( os.path.isabs(cmdfile) and os.access(cmdfile, os.X_OK) ) ):
+ break
+ else:
+ soslog.log(logging.VERBOSE, "binary '%s' does not exist or is not runnable" % cmdfile)
+ return (127, "", 0)
+
+ # these are file descriptors, not file objects
+ r, w = os.pipe()
+
+ pid = os.fork()
+
+ if pid:
+ # we are the parent
+ os.close(w) # use os.close() to close a file descriptor
+ r_fd = os.fdopen(r) # turn r into a file object
+ stime=time()
+ txt = ""
+ sts = -1
+ soslog.log(logging.VERBOSE2, 'forked command "%s" with pid %d, timeout is %d' % (command, pid, timeout) )
+ while True:
+ # read output from pipe
+ ready = select.select([r], [], [], 1)
+ if r in ready[0]:
+ txt = txt + r_fd.read()
+ # is child still running ?
+ try: os.waitpid(pid, os.WNOHANG)
+ except:
+ # not running, make sure the child process gets cleaned up
+ try: sts = os.waitpid(pid, 0)[1]
+ except: pass
+ break
+ # has timeout passed ?
+ if time() - stime > timeout:
+ soslog.log(logging.VERBOSE, 'killing hung child with pid %s after %d seconds (command was "%s")' % (pid,timeout,command) )
+ try: os.kill(pid, signal.SIGKILL)
+ except: pass
+ break
+ if txt[-1:] == '\n': txt = txt[:-1]
+ return (sts, txt, time()-stime)
+ else:
+ # we are the child
+ os.dup2(r, 0)
+ os.dup2(w, 1)
+ os.dup2(w, 2)
+
+ import resource
+ maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
+ if not hasattr(resource, "RLIM_INFINITY"):
+ resource.RLIM_INFINITY = -1L
+ if (maxfd == resource.RLIM_INFINITY):
+ maxfd = MAXFD
+ for fd in range(3, maxfd):
+ try: os.close(fd)
+ except OSError: pass
+ os.execl("/bin/sh", "/bin/sh", "-c", command)
+ os._exit(127)
# FIXME: this needs to be made clean and moved to the plugin tools, so
# that it prints nice color output like sysreport if the progress bar
@@ -92,23 +167,11 @@ def allEqual(elements):
return False
return True
-
-def commonPrefix(*sequences):
+def commonPrefix(l1, l2, common = []):
''' return a list of common elements at the start of all sequences,
then a list of lists that are the unique tails of each sequence. '''
- # if there are no sequences at all, we're done
- if not sequences:
- return [], []
- # loop in parallel on the sequences
- common = []
- for elements in itertools.izip(*sequences):
- # unless all elements are equal, bail out of the loop
- if not allEqual(elements):
- break
- # got one more common element, append it and keep looping
- common.append(elements[0])
- # return the common prefix and unique tails
- return common, [ sequence[len(common):] for sequence in sequences ]
+ if len(l1) < 1 or len(l2) < 1 or l1[0] != l2[0]: return common, [l1, l2]
+ return commonPrefix(l1[1:], l2[1:], common+[l1[0]])
def sosRelPath(path1, path2, sep=os.path.sep, pardir=os.path.pardir):
''' return a relative path from path1 equivalent to path path2.
@@ -124,3 +187,9 @@ def sosRelPath(path1, path2, sep=os.path.sep, pardir=os.path.pardir):
return path2 # leave path absolute if nothing at all in common
return sep.join( [pardir]*len(u1) + u2 )
+def sosReadFile(fname):
+ ''' reads a file and returns its contents'''
+ fp = open(fname,"r")
+ content = fp.read()
+ fp.close()
+ return content
diff --git a/src/lib/sos/plugins/autofs.py b/src/lib/sos/plugins/autofs.py
index 48dd57b4..16a8d3fb 100644
--- a/src/lib/sos/plugins/autofs.py
+++ b/src/lib/sos/plugins/autofs.py
@@ -15,7 +15,7 @@
## Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
import sos.plugintools
-import os, re
+import os
class autofs(sos.plugintools.PluginBase):
"""autofs server-related information
@@ -24,38 +24,8 @@ class autofs(sos.plugintools.PluginBase):
if self.cInfo["policy"].runlevelDefault() in self.cInfo["policy"].runlevelByService("autofs"):
return True
return False
-
- def checkdebug(self):
- """ testing if autofs debug has been enabled anywhere
- """
- # Global debugging
- optlist=[]
- opt = self.fileGrep(r"^(DEFAULT_LOGGING|DAEMONOPTIONS)=(.*)", "/etc/sysconfig/autofs")
- for opt1 in opt:
- for opt2 in opt1.split(" "):
- optlist.append(opt2)
- for dtest in optlist:
- if dtest == "--debug" or dtest == "debug":
- return True
-
- def getdaemondebug(self):
- """ capture daemon debug output
- """
- debugout = self.fileGrep(r"^(daemon.*)\s+(\/var\/log\/.*)", "/etc/sysconfig/autofs")
- for i in debugout:
- return i[1]
-
+
def setup(self):
self.addCopySpec("/etc/auto*")
self.addCopySpec("/etc/sysconfig/autofs")
- self.addCopySpec("/etc/init.d/autofs")
- self.collectExtOutput("/bin/rpm -qV autofs")
- self.collectExtOutput("/etc/init.d/autofs status")
- self.collectExtOutput("ps auxwww | grep automount")
- self.collectExtOutput("/bin/egrep -e 'automount|pid.*nfs' /proc/mounts")
- self.collectExtOutput("/bin/mount | egrep -e 'automount|pid.*nfs'")
- self.collectExtOutput("/sbin/chkconfig --list autofs")
- if self.checkdebug():
- self.addCopySpec(self.getdaemondebug())
- return
-
+ self.addCopySpec("/etc/rc.d/init.d/autofs")
diff --git a/src/lib/sos/plugins/cluster.py b/src/lib/sos/plugins/cluster.py
index f6169ddb..6d4de2a0 100644
--- a/src/lib/sos/plugins/cluster.py
+++ b/src/lib/sos/plugins/cluster.py
@@ -25,139 +25,178 @@ class cluster(sos.plugintools.PluginBase):
('taskdump', 'trigger 3 sysrq+t dumps every 5 seconds (dangerous)', 'slow', False)]
def checkenabled(self):
- # enable if any related package is installed
- for pkg in [ "rgmanager", "luci", "ricci", "system-config-cluster",
- "gfs-utils", "gnbd", "kmod-gfs", "kmod-gnbd", "lvm2-cluster" ]:
- if self.cInfo["policy"].pkgByName(pkg) != None:
- return True
-
- # enable if any related file is present
- for fname in [ "/etc/cluster/cluster.conf", "/proc/cluster" ]:
- try: os.stat(fname)
- except:pass
- else: return True
-
- # no data related to RHCS/GFS exists
- return False
+ rhelver = self.cInfo["policy"].rhelVersion()
+ if rhelver == 4:
+ self.packages = [ "ccs", "cman", "cman-kernel", "magma", "magma-plugins",
+ "rgmanager", "fence", "dlm", "dlm-kernel", "gulm",
+ "GFS", "GFS-kernel", "lvm2-cluster" ]
+ elif rhelver == 5:
+ self.packages = [ "rgmanager", "luci", "ricci", "system-config-cluster",
+ "gfs-utils", "gnbd", "kmod-gfs", "kmod-gnbd", "lvm2-cluster" ]
+
+ self.files = [ "/etc/cluster/cluster.conf", "/proc/cluster" ]
+ return sos.plugintools.PluginBase.checkenabled(self)
def has_gfs(self):
- fp = open("/proc/mounts","r")
- for line in fp.readlines():
- mntline = line.split(" ")
- if mntline[2] == "gfs":
- return True
- fp.close()
- return False
+ try:
+ if len(self.doRegexFindAll(r'^\S+\s+\S+\s+gfs\s+.*$', "/etc/mtab")):
+ return True
+ except:
+ return False
def diagnose(self):
- try: rhelver = self.cInfo["policy"].pkgByName("redhat-release")[1]
- except: rhelver = None
-
- # FIXME: we should only run tests specific for the version, now just do them all regardless
- if rhelver.startswith("5"):
- # check that kernel module packages are installed for
- # running kernel version
- pkgs_check = [ ]
- if self.has_gfs(): pkgs_check.append("kmod-gfs")
-
- for pkgname in pkgs_check:
- if not self.cInfo["policy"].pkgByName(pkgname):
- self.addDiagnose("required package is missing: %s" % pkgname)
-
- # check if the minimum set of packages is installed
- # for RHEL4 RHCS(ccs, cman, cman-kernel, magma, magma-plugins, (dlm, dlm-kernel) || gulm, perl-Net-Telnet, rgmanager, fence)
- # RHEL4 GFS (GFS, GFS-kernel, ccs, lvm2-cluster, fence)
-
- for pkg in [ "cman", "perl-Net-Telnet", "rgmanager" ]:
- if self.cInfo["policy"].pkgByName(pkg) == None:
- self.addDiagnose("required package is missing: %s" % pkg)
-
- # let's make modules sure are loaded
- mods_check = [ "dlm" ]
- if self.has_gfs(): mods_check.append("gfs")
- for module in mods_check:
- if len(self.fileGrep("^%s " % module, "/proc/modules")) == 0:
- self.addDiagnose("required module is not loaded: %s" % module)
-
- # check if all the needed daemons are active at sosreport time
- # check if they are started at boot time in RHEL5 RHCS (rgmanager, cman)
- # and GFS (gfs, ccsd, clvmd, fenced)
- checkserv = [ "cman", "rgmanager" ]
- if self.has_gfs(): checkserv.extend( ["gfs", "clvmd"] )
- for service in checkserv:
- status, output = commands.getstatusoutput("/sbin/service %s status" % service)
- if status:
- self.addDiagnose("service %s is not running" % service)
-
- if not self.cInfo["policy"].runlevelDefault() in self.cInfo["policy"].runlevelByService(service):
- self.addDiagnose("service %s is not started in default runlevel" % service)
-
- # FIXME: any cman service whose state != run ?
- # Fence Domain: "default" 2 2 run -
-
- # is cluster quorate
- if not self.is_cluster_quorate():
- self.addDiagnose("cluster node is not quorate")
-
- # if there is no cluster.conf, diagnose() finishes here.
- try:
- os.stat("/etc/cluster/cluster.conf")
- except:
- self.addDiagnose("/etc/cluster/cluster.conf is missing")
- return
-
- # setup XML xpath context
- xml = libxml2.parseFile("/etc/cluster/cluster.conf")
- xpathContext = xml.xpathNewContext()
-
- # check fencing (warn on no fencing)
- if len(xpathContext.xpathEval("/cluster/clusternodes/clusternode[not(fence/method/device)]")):
- if self.has_gfs():
- self.addDiagnose("one or more nodes have no fencing agent configured: fencing is required for GFS to work")
- else:
- self.addDiagnose("one or more nodes have no fencing agent configured: the cluster infrastructure might not work as intended")
-
- # check fencing (warn on manual)
- if len(xpathContext.xpathEval("/cluster/clusternodes/clusternode[/cluster/fencedevices/fencedevice[@agent='fence_manual']/@name=fence/method/device/@name]")):
- self.addDiagnose("one or more nodes have manual fencing agent configured (data integrity is not guaranteed)")
-
- # if fence_ilo or fence_drac, make sure acpid is not running
- hostname = commands.getoutput("/bin/uname -n").split(".")[0]
- if len(xpathContext.xpathEval('/cluster/clusternodes/clusternode[@name = "%s" and /cluster/fencedevices/fencedevice[@agent="fence_rsa" or @agent="fence_drac"]/@name=fence/method/device/@name]' % hostname )):
- status, output = commands.getstatusoutput("/sbin/service acpid status")
- if status == 0 or self.cInfo["policy"].runlevelDefault() in self.cInfo["policy"].runlevelByService("acpid"):
- self.addDiagnose("acpid is enabled, this may cause problems with your fencing method.")
-
- # check for fs exported via nfs without nfsid attribute
- if len(xpathContext.xpathEval("/cluster/rm/service//fs[not(@fsid)]/nfsexport")):
- self.addDiagnose("one or more nfs export do not have a fsid attribute set.")
-
- # cluster.conf file version and the in-memory cluster configuration version matches
- status, cluster_version = commands.getstatusoutput("cman_tool status | grep 'Config version'")
- if not status: cluster_version = cluster_version[16:]
- else: cluster_version = None
- conf_version = xpathContext.xpathEval("/cluster/@config_version")[0].content
-
- if status == 0 and conf_version != cluster_version:
- self.addDiagnose("cluster.conf and in-memory configuration version differ (%s != %s)" % (conf_version, cluster_version) )
-
- # make sure the first part of the lock table matches the cluster name
- # and that the locking protocol is sane
- cluster_name = xpathContext.xpathEval("/cluster/@name")[0].content
-
- for fs in self.fileGrep(r'^[^#][/\w]*\W*[/\w]*\W*gfs', "/etc/fstab"):
- # for each gfs entry
- fs = fs.split()
-
- lockproto = self.get_gfs_sb_field(fs[0], "sb_lockproto")
- if lockproto and lockproto != self.get_locking_proto():
- self.addDiagnose("gfs mountpoint (%s) is using the wrong locking protocol (%s)" % (fs[0], lockproto) )
-
- locktable = self.get_gfs_sb_field(fs[0], "sb_locktable")
- try: locktable = locktable.split(":")[0]
- except: continue
- if locktable != cluster_name:
- self.addDiagnose("gfs mountpoint (%s) is using the wrong locking table" % fs[0])
+ rhelver = self.cInfo["policy"].rhelVersion()
+
+ # check if the minimum set of packages is installed
+ # for RHEL4 RHCS(ccs, cman, cman-kernel, magma, magma-plugins, (dlm, dlm-kernel) || gulm, perl-Net-Telnet, rgmanager, fence)
+ # RHEL4 GFS (GFS, GFS-kernel, ccs, lvm2-cluster, fence)
+
+ pkgs_check = []
+ mods_check = []
+ serv_check = []
+
+ if rhelver == 4:
+ pkgs_check.extend( [ "ccs", "cman", "magma", "magma-plugins", "perl-Net-Telnet", "rgmanager", "fence" ] )
+ mods_check.extend( [ "cman", "dlm" ] )
+ if self.has_gfs():
+ mods_check.append("gfs")
+ serv_check.extend( [ "cman", "ccsd", "rgmanager", "fenced" ] )
+ if self.has_gfs():
+ serv_check.extend( ["gfs", "clvmd"] )
+ elif rhelver == 5:
+ pkgs_check.extend ( [ "cman", "perl-Net-Telnet", "rgmanager" ] )
+ mods_check.extend( [ "dlm" ] )
+ if self.has_gfs():
+ mods_check.extend( ["gfs", "gfs2"] )
+ serv_check.extend( [ "cman", "rgmanager" ] )
+ if self.has_gfs():
+ serv_check.extend( ["gfs", "clvmd"] )
+
+ # check that kernel module packages are installed for
+ # running kernel version
+
+ for modname in mods_check:
+ found = 0
+
+ if self.cInfo["policy"].allPkgsByNameRegex( "^" + modname ):
+ found = 1
+
+ status, output = commands.getstatusoutput('/sbin/modinfo -F vermagic ' + modname)
+
+ if status == 0:
+ found = 2
+
+ if len(self.fileGrep("^%s\s+" % modname, "/proc/modules")) > 0:
+ found = 3
+
+ if found == 0:
+ self.addDiagnose("required kernel module is missing: %s" % modname)
+ elif found == 1:
+ self.addDiagnose("required module is not available for current kernel: %s" % modname)
+ elif found == 2:
+ self.addDiagnose("required module is available but not loaded: %s" % modname)
+
+ for pkg in pkgs_check:
+ if self.cInfo["policy"].pkgByName(pkg) == None:
+ self.addDiagnose("required package is missing: %s" % pkg)
+
+ if rhelver == "4":
+ # (dlm, dlm-kernel) || gulm
+ if not ((self.cInfo["policy"].pkgByName("dlm") and self.cInfo["policy"].pkgByName("dlm-kernel")) or self.cInfo["policy"].pkgByName("gulm")):
+ self.addDiagnose("required packages are missing: (dlm, dlm-kernel) || gulm")
+
+ # check if all the needed daemons are active at sosreport time
+ # check if they are started at boot time in RHEL4 RHCS (cman, ccsd, rgmanager, fenced)
+ # and GFS (gfs, ccsd, clvmd, fenced)
+
+ for service in serv_check:
+ status, output = commands.getstatusoutput("/sbin/service %s status &> /dev/null" % service)
+ if status != 0:
+ self.addDiagnose("service %s is not running" % service)
+
+ if not self.cInfo["policy"].runlevelDefault() in self.cInfo["policy"].runlevelByService(service):
+ self.addDiagnose("service %s is not started in default runlevel" % service)
+
+ # FIXME: missing important cman services
+ # FIXME: any cman service whose state != run ?
+ # Fence Domain: "default" 2 2 run -
+
+ # is cluster quorate
+ if not self.is_cluster_quorate():
+ self.addDiagnose("cluster node is not quorate")
+
+ # if there is no cluster.conf, diagnose() finishes here.
+ try:
+ os.stat("/etc/cluster/cluster.conf")
+ except:
+ self.addDiagnose("/etc/cluster/cluster.conf is missing")
+ return
+
+ # setup XML xpath context
+ xml = libxml2.parseFile("/etc/cluster/cluster.conf")
+ xpathContext = xml.xpathNewContext()
+
+ # make sure that the node names are valid according to RFC 2181
+ for hostname in xpathContext.xpathEval('/cluster/clusternodes/clusternode/@name'):
+ if not re.match('^[a-zA-Z]([a-zA-Z0-9-]*[a-zA-Z0-9])?(\.[a-zA-Z]([a-zA-Z0-9-]*[a-zA-Z0-9])?)*$', hostname.content):
+ self.addDiagnose("node name (%s) contains invalid characters" % hostname.content)
+
+ # do not rely on DNS to resolve node names, must have them in /etc/hosts
+ for hostname in xpathContext.xpathEval('/cluster/clusternodes/clusternode/@name'):
+ if len(self.fileGrep(r'^.*\W+%s' % hostname.content , "/etc/hosts")) == 0:
+ self.addDiagnose("node %s is not defined in /etc/hosts" % hostname.content)
+
+ # check fencing (warn on no fencing)
+ if len(xpathContext.xpathEval("/cluster/clusternodes/clusternode[not(fence/method/device)]")):
+ if self.has_gfs():
+ self.addDiagnose("one or more nodes have no fencing agent configured: fencing is required for GFS to work")
+ else:
+ self.addDiagnose("one or more nodes have no fencing agent configured: the cluster infrastructure might not work as intended")
+
+ # check fencing (warn on manual)
+ if len(xpathContext.xpathEval("/cluster/clusternodes/clusternode[/cluster/fencedevices/fencedevice[@agent='fence_manual']/@name=fence/method/device/@name]")):
+ self.addDiagnose("one or more nodes have manual fencing agent configured (data integrity is not guaranteed)")
+
+ # if fence_ilo or fence_drac, make sure acpid is not running
+ hostname = commands.getoutput("/bin/uname -n").split(".")[0]
+ if len(xpathContext.xpathEval('/cluster/clusternodes/clusternode[@name = "%s" and /cluster/fencedevices/fencedevice[@agent="fence_rsa" or @agent="fence_drac"]/@name=fence/method/device/@name]' % hostname )):
+ status, output = commands.getstatusoutput("/sbin/service acpid status")
+ if status == 0 or self.cInfo["policy"].runlevelDefault() in self.cInfo["policy"].runlevelByService("acpid"):
+ self.addDiagnose("acpid is enabled, this may cause problems with your fencing method.")
+
+ # check for fs exported via nfs without nfsid attribute
+ if len(xpathContext.xpathEval("/cluster/rm/service//fs[not(@fsid)]/nfsexport")):
+ self.addDiagnose("one or more nfs export do not have a fsid attribute set.")
+
+ # cluster.conf file version and the in-memory cluster configuration version matches
+ status, cluster_version = commands.getstatusoutput("cman_tool status | grep 'Config version'")
+ if not status: cluster_version = cluster_version[16:]
+ else: cluster_version = None
+ conf_version = xpathContext.xpathEval("/cluster/@config_version")[0].content
+
+ if status == 0 and conf_version != cluster_version:
+ self.addDiagnose("cluster.conf and in-memory configuration version differ (%s != %s)" % (conf_version, cluster_version) )
+
+ status, output = commands.getstatusoutput("/usr/sbin/rg_test test /etc/cluster/cluster.conf")
+ if output.find("Error: ") > 0:
+ self.addDiagnose("configuration errors are present according to rg_test")
+
+ # make sure the first part of the lock table matches the cluster name
+ # and that the locking protocol is sane
+ cluster_name = xpathContext.xpathEval("/cluster/@name")[0].content
+
+ for fs in self.fileGrep(r'^[^#][/\w]*\W*[/\w]*\W*gfs', "/etc/fstab"):
+ # for each gfs entry
+ fs = fs.split()
+ lockproto = self.get_gfs_sb_field(fs[0], "sb_lockproto")
+ if lockproto and lockproto != self.get_locking_proto():
+ self.addDiagnose("gfs mountpoint (%s) is using the wrong locking protocol (%s)" % (fs[0], lockproto) )
+
+ locktable = self.get_gfs_sb_field(fs[0], "sb_locktable")
+ try: locktable = locktable.split(":")[0]
+ except: continue
+ if locktable != cluster_name:
+ self.addDiagnose("gfs mountpoint (%s) is using the wrong locking table" % fs[0])
def setup(self):
self.collectExtOutput("/sbin/fdisk -l")
@@ -175,9 +214,9 @@ class cluster(sos.plugintools.PluginBase):
self.collectExtOutput("/sbin/ipvsadm -L")
- if self.isOptionEnabled('gfslockdump'): self.do_gfslockdump()
- if self.isOptionEnabled('lockdump'): self.do_lockdump()
- if self.isOptionEnabled('taskdump'): self.do_taskdump()
+ if self.getOption('gfslockdump'): self.do_gfslockdump()
+ if self.getOption('lockdump'): self.do_lockdump()
+ if self.getOption('taskdump'): self.do_taskdump()
return
@@ -194,20 +233,24 @@ class cluster(sos.plugintools.PluginBase):
self.addCopySpec("/var/log/messages")
def do_lockdump(self):
- try:
- fp = open("/proc/cluster/services","r")
- except:
- return
- for line in fp.readlines():
- if line[0:14] == "DLM Lock Space":
- try:
- lockspace = line.split('"')[1]
- except:
- pass
- else:
- commands.getstatusoutput("echo %s > /proc/cluster/dlm_locks" % lockspace)
- self.collectOutputNow("cat /proc/cluster/dlm_locks", root_symlink = "dlm_locks_%s" % lockspace)
- fp.close()
+ status, output = commands.getstatusoutput("cman_tool services")
+ if status:
+ # command somehow failed
+ return False
+
+ import re
+
+ rhelver = self.get_redhat_release()
+
+ if rhelver == "4":
+ regex = r'^DLM Lock Space:\s*"([^"]*)".*$'
+ elif rhelver == "5Server" or rhelver == "5Client":
+ regex = r'^dlm\s+[^\s]+\s+([^\s]+)\s.*$'
+
+ reg=re.compile(regex,re.MULTILINE)
+ for lockspace in reg.findall(output):
+ commands.getstatusoutput("echo %s > /proc/cluster/dlm_locks" % lockspace)
+ self.collectOutputNow("cat /proc/cluster/dlm_locks", root_symlink = "dlm_locks_%s" % lockspace)
def get_locking_proto(self):
# FIXME: what's the best way to find out ?
@@ -215,15 +258,11 @@ class cluster(sos.plugintools.PluginBase):
return "lock_gulm"
def do_gfslockdump(self):
- fp = open("/proc/mounts","r")
- for line in fp.readlines():
- mntline = line.split(" ")
- if mntline[2] == "gfs":
- self.collectExtOutput("/sbin/gfs_tool lockdump %s" % mntline[1], root_symlink = "gfs_lockdump_" + self.mangleCommand(mntline[1]) )
- fp.close()
-
- def do_rgmgr_bt(self):
- # FIXME: threads backtrace
+ for mntpoint in self.doRegexFindAll(r'^\S+\s+([^\s]+)\s+gfs\s+.*$', "/proc/mounts"):
+ self.collectExtOutput("/sbin/gfs_tool lockdump %s" % mntpoint, root_symlink = "gfs_lockdump_" + self.mangleCommand(mntpoint) )
+
+ def do_rgmanager_bt(self):
+ # FIXME: threads backtrace via SIGALRM
return
def postproc(self):
@@ -233,7 +272,7 @@ class cluster(sos.plugintools.PluginBase):
def is_cluster_quorate(self):
output = commands.getoutput("cman_tool status | grep '^Membership state: '")
try:
- if output.split(":")[1].strip() == "Cluster-Member":
+ if output[18:] == "Cluster-Member":
return True
else:
return False
diff --git a/src/lib/sos/plugins/devicemapper.py b/src/lib/sos/plugins/devicemapper.py
index 38009689..5a1b63c2 100644
--- a/src/lib/sos/plugins/devicemapper.py
+++ b/src/lib/sos/plugins/devicemapper.py
@@ -22,11 +22,6 @@ class devicemapper(sos.plugintools.PluginBase):
optionList = [("lvmdump", 'collect raw metadata from PVs', 'slow', False)]
- def do_lvmdump(self):
- """Collects raw metadata directly from the PVs using dd
- """
- sosGetCommandOutput("lvmdump -d %s" % os.path.join(self.cInfo['dstroot'],"lvmdump"))
-
def setup(self):
self.collectExtOutput("/sbin/dmsetup info -c")
self.collectExtOutput("/sbin/dmsetup table")
@@ -45,17 +40,18 @@ class devicemapper(sos.plugintools.PluginBase):
self.addCopySpec("/var/lib/multipath/bindings")
self.collectExtOutput("/sbin/multipath -v4 -ll")
- self.collectExtOutput("/usr/bin/systool -v -c -b scsi")
+ self.collectExtOutput("/usr/bin/systool -v -C -b scsi")
self.collectExtOutput("/bin/ls -laR /dev")
self.collectExtOutput("/bin/ls -laR /sys/block")
if self.getOption('lvmdump'):
- self.do_lvmdump()
+ sosGetCommandOutput("lvmdump -d %s" % os.path.join(self.cInfo['dstroot'],"lvmdump"))
- for disk in os.listdir("/sys/block"):
- if disk in [ ".", ".." ] or disk.startswith("ram"):
- continue
- self.collectExtOutput("/usr/bin/udevinfo -ap /sys/block/%s" % (disk))
+ if os.path.isdir("/sys/block"):
+ for disk in os.listdir("/sys/block"):
+ if disk in [ ".", ".." ] or disk.startswith("ram"):
+ continue
+ self.collectExtOutput("/usr/bin/udevinfo -ap /sys/block/%s" % (disk))
return
diff --git a/src/lib/sos/plugins/filesys.py b/src/lib/sos/plugins/filesys.py
index bec046f2..93645314 100644
--- a/src/lib/sos/plugins/filesys.py
+++ b/src/lib/sos/plugins/filesys.py
@@ -13,7 +13,7 @@
## Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
import sos.plugintools
-import commands
+import os
class filesys(sos.plugintools.PluginBase):
"""information on filesystems
@@ -29,12 +29,11 @@ class filesys(sos.plugintools.PluginBase):
self.addCopySpec("/etc/mdadm.conf")
self.collectExtOutput("/bin/df -al", root_symlink = "df")
- self.collectExtOutput("/usr/sbin/lsof -b +M -n -l", root_symlink = "lsof")
self.collectExtOutput("/sbin/blkid")
-
+
self.collectExtOutput("/sbin/fdisk -l", root_symlink = "fdisk-l")
for extfs in self.doRegexFindAll(r"^(/dev/.+) on .+ type ext.\s+", mounts):
- self.collectExtOutput("/sbin/dumpe2fs %s" % (extfs))
- return
+ self.collectExtOutput("/sbin/tune2fs -l %s" % (extfs))
+ return
diff --git a/src/lib/sos/plugins/general.py b/src/lib/sos/plugins/general.py
index 648dee75..434bfdaa 100644
--- a/src/lib/sos/plugins/general.py
+++ b/src/lib/sos/plugins/general.py
@@ -20,25 +20,26 @@ class general(sos.plugintools.PluginBase):
"""basic system information
"""
- optionList = [("syslogsize", "max size (MiB) to collect per syslog file", "", 15)]
+ optionList = [("syslogsize", "max size (MiB) to collect per syslog file", "", 15),
+ ("all_logs", "collect all log files defined in syslog.conf", "", False)]
def setup(self):
self.addCopySpec("/etc/redhat-release")
self.addCopySpec("/etc/fedora-release")
+ self.addCopySpec("/etc/inittab")
+ self.addCopySpec("/etc/sos.conf")
self.addCopySpec("/etc/sysconfig")
self.addCopySpec("/proc/stat")
self.addCopySpec("/var/log/dmesg")
self.addCopySpec("/var/log/messages")
- self.addCopySpecLimit("/var/log/messages.*", sizelimit = self.isOptionEnabled("syslogsize"))
+ self.addCopySpecLimit("/var/log/messages.*", sizelimit = self.getOption("syslogsize"))
self.addCopySpec("/var/log/secure")
- self.addCopySpecLimit("/var/log/secure.*", sizelimit = self.isOptionEnabled("syslogsize"))
+ self.addCopySpecLimit("/var/log/secure.*", sizelimit = self.getOption("syslogsize"))
self.addCopySpec("/var/log/sa")
self.addCopySpec("/var/log/up2date")
- self.addCopySpec("/etc/exports")
self.collectExtOutput("/bin/hostname", root_symlink = "hostname")
self.collectExtOutput("/bin/date", root_symlink = "date")
self.collectExtOutput("/usr/bin/uptime", root_symlink = "uptime")
- self.addCopySpec("/root/anaconda-ks.cfg")
self.collectExtOutput("/bin/env")
if self.getOption('all_logs'):
diff --git a/src/lib/sos/plugins/hardware.py b/src/lib/sos/plugins/hardware.py
index ba10a356..ad4a5537 100644
--- a/src/lib/sos/plugins/hardware.py
+++ b/src/lib/sos/plugins/hardware.py
@@ -36,23 +36,12 @@ class hardware(sos.plugintools.PluginBase):
self.addCopySpec("/proc/dasd")
self.addCopySpec("/proc/s390dbf/tape")
self.collectExtOutput("/usr/share/rhn/up2dateclient/hardware.py")
- self.collectExtOutput("""/bin/echo "lspci:" ; /bin/echo ; /sbin/lspci ; /bin/echo ; /bin/echo "lspci -nvv:" ; /bin/echo ; /sbin/lspci -nvv""", suggest_filename = "lspci", root_symlink = "lspci")
+ self.collectExtOutput("""/bin/echo -e "lspci:\n" ; /sbin/lspci ; /bin/echo -e "\nlspci -nvv:\n" ; /sbin/lspci -nvv ; /bin/echo -e "\nlspci -tv:\n" ; /sbin/lspci -tv""", suggest_filename = "lspci", root_symlink = "lspci")
self.collectExtOutput("/usr/sbin/dmidecode", root_symlink = "dmidecode")
- # FIXME: if arch == i386:
-# self.collectExtOutput("/usr/sbin/x86info -a")
-
- # FIXME: what is this for?
- self.collectExtOutput("/bin/dmesg | /bin/grep -e 'e820.' -e 'agp.'")
-
- # FIXME: what is this for?
- tmpreg = ""
- for hwmodule in commands.getoutput('cat /lib/modules/$(uname -r)/modules.pcimap | cut -d " " -f 1 | grep "[[:alpha:]]" | sort -u').split("\n"):
- hwmodule = hwmodule.strip()
- if len(hwmodule):
- tmpreg = tmpreg + "|" + hwmodule
- self.collectExtOutput("/bin/dmesg | /bin/egrep '(%s)'" % tmpreg[1:])
+ if self.cInfo["policy"].getArch().endswith("386"):
+ self.collectExtOutput("/usr/sbin/x86info -a")
self.collectExtOutput("/sbin/lsusb")
self.collectExtOutput("/usr/bin/lshal")
diff --git a/src/lib/sos/plugins/initrd.py b/src/lib/sos/plugins/initrd.py
index 83356548..7bd7e1f3 100644
--- a/src/lib/sos/plugins/initrd.py
+++ b/src/lib/sos/plugins/initrd.py
@@ -20,9 +20,9 @@ class initrd(sos.plugintools.PluginBase):
"""
def setup(self):
for initrd in glob.glob('/boot/initrd-*.img'):
- self.collectExtOutput("/bin/zcat "+initrd+" | /bin/cpio "+
+ self.collectExtOutput("/bin/zcat "+initrd+" | /bin/cpio "+
"--extract --to-stdout init" )
return
def defaultenabled(self):
- return False
+ return False
diff --git a/src/lib/sos/plugins/kernel.py b/src/lib/sos/plugins/kernel.py
index 8ca53b66..4fb0a864 100644
--- a/src/lib/sos/plugins/kernel.py
+++ b/src/lib/sos/plugins/kernel.py
@@ -18,7 +18,7 @@ import commands, os, re
class kernel(sos.plugintools.PluginBase):
"""kernel related information
"""
- optionList = [("modinfo", 'gathers module information on all modules', 'fast', True),
+ optionList = [("modinfo", 'gathers information on all kernel modules', 'fast', True),
('sysrq', 'trigger sysrq+[m,p,t] dumps', 'fast', False)]
moduleFile = ""
taintList = [
@@ -28,7 +28,7 @@ class kernel(sos.plugintools.PluginBase):
{'regex':'vxportal*', 'description':'Veritas module'},
{'regex':'vxdmp*', 'description':'Veritas dynamic multipathing module'},
{'regex':'vxio*', 'description':'Veritas module'},
- {'regex':'vxspec*"', 'description':'Veritas module'},
+ {'regex':'vxspec*', 'description':'Veritas module'},
{'regex':'dcd*', 'description':'Dell OpenManage Server Administrator module'},
{'regex':'ocfs', 'description':'Oracle cluster filesystem module'},
{'regex':'oracle*', 'description':'Oracle module'},
@@ -49,22 +49,22 @@ class kernel(sos.plugintools.PluginBase):
def setup(self):
self.collectExtOutput("/bin/uname -a", root_symlink = "uname")
self.moduleFile = self.collectOutputNow("/sbin/lsmod", root_symlink = "lsmod")
- if self.isOptionEnabled('modinfo'):
- runcmd = ""
- for kmod in commands.getoutput('/sbin/lsmod | /bin/cut -f1 -d" " 2>/dev/null | /bin/grep -v Module 2>/dev/null').split('\n'):
- if '' != kmod.strip():
- runcmd = runcmd + " " + kmod
- if len(runcmd):
- self.collectExtOutput("/sbin/modinfo " + runcmd)
+
+ if self.getOption('modinfo'):
+ runcmd = ""
+ for kmod in commands.getoutput('/sbin/lsmod | /bin/cut -f1 -d" " 2>/dev/null | /bin/grep -v Module 2>/dev/null').split('\n'):
+ if '' != kmod.strip():
+ runcmd = runcmd + " " + kmod
+ if len(runcmd):
+ self.collectExtOutput("/sbin/modinfo " + runcmd)
+
self.collectExtOutput("/sbin/sysctl -a")
self.collectExtOutput("/sbin/ksyms")
self.addCopySpec("/sys/module/*/parameters")
self.addCopySpec("/proc/filesystems")
self.addCopySpec("/proc/ksyms")
self.addCopySpec("/proc/slabinfo")
- kver = commands.getoutput('/bin/uname -r')
- depfile = "/lib/modules/%s/modules.dep" % (kver,)
- self.addCopySpec(depfile)
+ self.addCopySpec("/lib/modules/%s/modules.dep" % self.cInfo["policy"].kernelVersion())
self.addCopySpec("/etc/conf.modules")
self.addCopySpec("/etc/modules.conf")
self.addCopySpec("/etc/modprobe.conf")
@@ -72,25 +72,18 @@ class kernel(sos.plugintools.PluginBase):
self.addCopySpec("/proc/cmdline")
self.addCopySpec("/proc/driver")
self.addCopySpec("/proc/sys/kernel/tainted")
- # FIXME: both RHEL4 and RHEL5 don't need sysrq to be enabled to trigger via sysrq-trigger
- if self.isOptionEnabled('sysrq') and os.access("/proc/sysrq-trigger", os.W_OK) and os.access("/proc/sys/kernel/sysrq", os.R_OK):
- sysrq_state = commands.getoutput("/bin/cat /proc/sys/kernel/sysrq")
- commands.getoutput("/bin/echo 1 > /proc/sys/kernel/sysrq")
- for key in ['m', 'p', 't']:
- commands.getoutput("/bin/echo %s > /proc/sysrq-trigger" % (key,))
- commands.getoutput("/bin/echo %s > /proc/sys/kernel/sysrq" % (sysrq_state,))
- # No need to grab syslog here if we can't trigger sysrq, so keep this
- # inside the if
- self.addCopySpec("/var/log/messages")
-
+
+ if self.getOption('sysrq') and os.access("/proc/sysrq-trigger", os.W_OK):
+ for key in ['m', 'p', 't']:
+ commands.getoutput("/bin/echo %s > /proc/sysrq-trigger" % (key,))
+ self.addCopySpec("/var/log/messages")
+
return
- def analyze(self):
- infd = open("/proc/modules", "r")
- modules = infd.readlines()
- infd.close()
+ def diagnose(self):
- for modname in modules:
+ infd = open("/proc/modules", "r")
+ for modname in infd.readlines():
modname=modname.split(" ")[0]
modinfo_srcver = commands.getoutput("/sbin/modinfo -F srcversion %s" % modname)
if not os.access("/sys/module/%s/srcversion" % modname, os.R_OK):
@@ -99,13 +92,17 @@ class kernel(sos.plugintools.PluginBase):
sys_srcver = infd.read().strip("\n")
infd.close()
if modinfo_srcver != sys_srcver:
- self.addAlert("Loaded module %s differs from the one present on the file-system")
+ self.addDiagnose("loaded module %s differs from the one present on the file-system" % modname)
# this would be a good moment to check the module's signature
# but at the moment there's no easy way to do that outside of
# the kernel. i will probably need to write a C lib (derived from
# the kernel sources to do this verification.
+ infd.close()
+
+ def analyze(self):
+
savedtaint = os.path.join(self.cInfo['dstroot'], "/proc/sys/kernel/tainted")
infd = open(savedtaint, "r")
line = infd.read()
@@ -114,12 +111,10 @@ class kernel(sos.plugintools.PluginBase):
if (line != "0"):
self.addAlert("Kernel taint flag is <%s>\n" % line)
-
infd = open(self.moduleFile, "r")
modules = infd.readlines()
infd.close()
- #print(modules)
for tainter in self.taintList:
p = re.compile(tainter['regex'])
for line in modules:
diff --git a/src/lib/sos/plugins/ldap.py b/src/lib/sos/plugins/ldap.py
index 318a3ba9..b1a48420 100644
--- a/src/lib/sos/plugins/ldap.py
+++ b/src/lib/sos/plugins/ldap.py
@@ -13,14 +13,37 @@
## Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
import sos.plugintools
+import os
class ldap(sos.plugintools.PluginBase):
"""LDAP related information
"""
+ def checkenabled(self):
+ self.packages = [ "openldap" ]
+ self.files = [ "/etc/openldap/ldap.conf" ]
+ return sos.plugintools.PluginBase.checkenabled(self)
+
+ def get_ldap_opts(self):
+ # capture /etc/openldap/ldap.conf options in dict
+ # FIXME: possibly not hardcode these options in?
+ ldapopts=["URI","BASE","TLS_CACERTDIR"]
+ results={}
+ tmplist=[]
+ for i in ldapopts:
+ t=self.doRegexFindAll(r"^(%s)\s+(.*)" % i,"/etc/openldap/ldap.conf")
+ for x in t:
+ results[x[0]]=x[1].rstrip("\n")
+ return results
+
+ def diagnose(self):
+ # Validate ldap client options
+ ldapopts=self.get_ldap_opts()
+ if ldapopts.has_key("TLS_CACERTDIR") and not os.path.exists(ldapopts["TLS_CACERTDIR"]):
+ self.addDiagnose("%s does not exist and can cause connection issues involving TLS" % ldapopts["TLS_CACERTDIR"])
+
def setup(self):
self.addCopySpec("/etc/ldap.conf")
self.addCopySpec("/etc/openldap")
- return
def postproc(self):
self.doRegexSub("/etc/ldap.conf", r"(\s*bindpw\s*)\S+", r"\1***")
diff --git a/src/lib/sos/plugins/named.py b/src/lib/sos/plugins/named.py
index 421143cb..bb0d079e 100644
--- a/src/lib/sos/plugins/named.py
+++ b/src/lib/sos/plugins/named.py
@@ -20,21 +20,14 @@ class named(sos.plugintools.PluginBase):
"""named related information
"""
def checkenabled(self):
- if self.cInfo["policy"].pkgByName("bind") or os.path.exists("/etc/named.conf") or os.path.exists("/etc/sysconfig/named"):
- return True
- return False
+ self.files = [ "/etc/named.conf", "/etc/sysconfig/named" ]
+ self.packages = [ "bind", "bind-chroot" ]
+ return sos.plugintools.PluginBase.checkenabled(self)
def setup(self):
- dnsdir = ""
- self.addCopySpec("/etc/named.boot")
- self.addCopySpec("/etc/named.conf")
- self.addCopySpec("/etc/sysconfig/named")
- if os.access("/etc/named.conf", os.R_OK):
- dnsdir = commands.getoutput("/bin/grep -i directory /etc/named.conf | /bin/gawk '{print $2}' | /bin/sed 's/\\\"//g' | /bin/sed 's/\;//g'")
- if os.access("/etc/named.boot", os.R_OK):
- dnsdir = commands.getoutput("/bin/grep -i directory /etc/named.boot | /bin/gawk '{print $2}' | /bin/sed 's/\\\"//g' | /bin/sed 's/\;//g'")
- if '' != dnsdir.strip():
- self.addCopySpec(dnsdir)
- self.addForbiddenPath('/var/named/chroot/proc')
- self.addForbiddenPath('/var/named/chroot/dev')
- return
+ self.addCopySpec("/etc/named.boot")
+ self.addCopySpec("/etc/named.conf")
+ self.addCopySpec("/etc/sysconfig/named")
+ self.addCopySpec("/var/named")
+ self.addForbiddenPath('/var/named/chroot/proc')
+ self.addForbiddenPath('/var/named/chroot/dev')
diff --git a/src/lib/sos/plugins/networking.py b/src/lib/sos/plugins/networking.py
index 080ae694..164ed43a 100644
--- a/src/lib/sos/plugins/networking.py
+++ b/src/lib/sos/plugins/networking.py
@@ -24,14 +24,7 @@ class networking(sos.plugintools.PluginBase):
"""Return a dictionary for which key are interface name according to the
output of ifconifg-a stored in ifconfigFile.
"""
- out={}
- if(os.path.isfile(ifconfigFile)):
- f=open(ifconfigFile,'r')
- content=f.read()
- f.close()
- reg=re.compile(r"^(eth\d+)\D",re.MULTILINE)
- for name in reg.findall(content):
- out[name]=1
+ out=self.doRegexFindAll(r"^(eth\d+)\D", ifconfigFile)
return out
def collectIPTable(self,tablename):
@@ -60,20 +53,19 @@ class networking(sos.plugintools.PluginBase):
self.addCopySpec("/etc/resolv.conf")
ifconfigFile=self.collectOutputNow("/sbin/ifconfig -a", root_symlink = "ifconfig")
self.collectExtOutput("/sbin/route -n", root_symlink = "route")
- self.collectExtOutput("/sbin/ipchains -nvL")
self.collectIPTable("filter")
self.collectIPTable("nat")
self.collectIPTable("mangle")
self.collectExtOutput("/bin/netstat -s")
self.collectExtOutput("/bin/netstat -neopa", root_symlink = "netstat")
- # FIXME: we should collect "ip route table <tablename>" for all tables (from "ip rule")
+ self.collectExtOutput("/sbin/ip route show table all")
self.collectExtOutput("/sbin/ip link")
self.collectExtOutput("/sbin/ip address")
self.collectExtOutput("/sbin/ifenslave -a")
if ifconfigFile:
for eth in self.get_interface_name(ifconfigFile):
self.collectExtOutput("/sbin/ethtool "+eth)
- if self.isOptionEnabled("traceroute"):
+ if self.getOption("traceroute"):
self.collectExtOutput("/bin/traceroute -n rhn.redhat.com")
return
diff --git a/src/lib/sos/plugins/nfsserver.py b/src/lib/sos/plugins/nfsserver.py
index 9564aa22..1e352032 100644
--- a/src/lib/sos/plugins/nfsserver.py
+++ b/src/lib/sos/plugins/nfsserver.py
@@ -35,7 +35,10 @@ class nfsserver(sos.plugintools.PluginBase):
def setup(self):
self.addCopySpec("/etc/exports")
+ self.addCopySpec("/var/lib/nfs/etab")
+ self.addCopySpec("/var/lib/nfs/xtab")
+ self.addCopySpec("/var/lib/nfs/rmtab")
self.collectExtOutput("/usr/sbin/rpcinfo -p localhost")
- self.collectExtOutput("/usr/sbin/nfsstat")
+ self.collectExtOutput("/usr/sbin/nfsstat -a")
return
diff --git a/src/lib/sos/plugins/pam.py b/src/lib/sos/plugins/pam.py
index 8164bba3..1a50811b 100644
--- a/src/lib/sos/plugins/pam.py
+++ b/src/lib/sos/plugins/pam.py
@@ -20,6 +20,6 @@ class pam(sos.plugintools.PluginBase):
def setup(self):
self.addCopySpec("/etc/pam.d")
self.addCopySpec("/etc/security")
- self.collectExtOutput("/bin/ls -laF /lib/security/pam_*so")
+ self.collectExtOutput("/bin/ls -laF /lib/security")
return
diff --git a/src/lib/sos/plugins/process.py b/src/lib/sos/plugins/process.py
index 47e37edf..0d89e06b 100644
--- a/src/lib/sos/plugins/process.py
+++ b/src/lib/sos/plugins/process.py
@@ -25,6 +25,7 @@ class process(sos.plugintools.PluginBase):
self.collectExtOutput("/bin/ps auxwwwm")
self.collectExtOutput("/bin/ps alxwww")
self.collectExtOutput("/usr/bin/pstree", root_symlink = "pstree")
+ self.collectExtOutput("/usr/sbin/lsof -b +M -n -l", root_symlink = "lsof")
return
def find_mountpoint(s):
@@ -40,7 +41,7 @@ class process(sos.plugintools.PluginBase):
line = line.split()
if line[0] == "D":
# keep an eye on the process to see if the stat changes
- for inc in range(1,5):
+ for inc in range(1,10):
try:
if len(self.fileGrep("^State: D", " /proc/%d/status" % int(line[1]))) == 0:
# status is not D, good. let's get out of the loop.
diff --git a/src/lib/sos/plugins/rpm.py b/src/lib/sos/plugins/rpm.py
index d9f1e79b..f1c09e33 100644
--- a/src/lib/sos/plugins/rpm.py
+++ b/src/lib/sos/plugins/rpm.py
@@ -22,12 +22,12 @@ class rpm(sos.plugintools.PluginBase):
def setup(self):
self.addCopySpec("/var/log/rpmpkgs")
-
- if self.isOptionEnabled("rpmq"):
- self.collectExtOutput("/bin/rpm -qa --qf=\"%{NAME}-%{VERSION}-%{RELEASE}-%{ARCH}~~%{INSTALLTIME:date}\n\"|/bin/awk -F ~~ '{printf \"%-60s%s\\n\",$1,$2}'", root_symlink = "installed-rpms")
-
- if self.isOptionEnabled("rpmva"):
- self.eta_weight += 800 # this plugins takes 200x longer (for ETA)
- self.collectExtOutput("/bin/rpm -Va", root_symlink = "rpm-Va")
+
+ if self.getOption("rpmq"):
+ self.collectExtOutput("/bin/rpm -qa --qf \"%{NAME}-%{VERSION}-%{RELEASE}-%{ARCH}\n\"", root_symlink = "installed-rpms")
+
+ if self.getOption("rpmva"):
+ self.eta_weight += 1500 # this plugins takes 200x longer (for ETA)
+ self.collectExtOutput("/bin/rpm -Va", root_symlink = "rpm-Va", timeout = 3600)
return
diff --git a/src/lib/sos/plugins/s390.py b/src/lib/sos/plugins/s390.py
index 39c16c07..e9bfbb6f 100644
--- a/src/lib/sos/plugins/s390.py
+++ b/src/lib/sos/plugins/s390.py
@@ -38,10 +38,8 @@ class s390(sos.plugintools.PluginBase):
self.addCopySpec("/proc/crypto")
self.addCopySpec("/proc/dasd/devices")
self.addCopySpec("/proc/dasd/statistics")
- self.addCopySpec("/proc/misc")
self.addCopySpec("/proc/qeth")
self.addCopySpec("/proc/qeth_perf")
- self.addCopySpec("/proc/qeth_ipa_takeover")
self.addCopySpec("/proc/sys/appldata/*")
self.addCopySpec("/proc/sys/kernel/hz_timer")
self.addCopySpec("/proc/sysinfo")
@@ -53,15 +51,11 @@ class s390(sos.plugintools.PluginBase):
self.addCopySpec("/etc/zfcp.conf")
self.addCopySpec("/etc/sysconfig/dumpconf")
self.addCopySpec("/etc/src_vipa.conf")
- self.addCopySpec("/etc/ccwgroup.conf")
- self.addCopySpec("/etc/chandev.conf")
self.collectExtOutput("/sbin/lscss")
self.collectExtOutput("/sbin/lsdasd")
self.collectExtOutput("/sbin/lsqeth")
self.collectExtOutput("/sbin/lszfcp")
self.collectExtOutput("/sbin/lstape")
- self.collectExtOutput("find /sys -type -f")
- self.collectExtOutput("find /proc/s390dbf -type -f")
self.collectExtOutput("/sbin/qethconf list_all")
dasdDev = commands.getoutput("ls /dev/dasd?")
for x in dasdDev.split('\n'):
diff --git a/src/lib/sos/plugins/selinux.py b/src/lib/sos/plugins/selinux.py
index 897c3991..5f0c1f40 100644
--- a/src/lib/sos/plugins/selinux.py
+++ b/src/lib/sos/plugins/selinux.py
@@ -19,18 +19,32 @@ class selinux(sos.plugintools.PluginBase):
"""selinux related information
"""
def setup(self):
- self.addCopySpec("/etc/selinux/*")
+ # sestatus is always collected in checkenabled()
+ self.addCopySpec("/etc/selinux")
self.collectExtOutput("/usr/bin/selinuxconfig")
- self.collectExtOutput("/usr/sbin/sestatus", root_symlink = "sestatus")
- self.collectExtOutput("/bin/rpm -q -V selinux-policy-targeted")
- self.collectExtOutput("/bin/rpm -q -V selinux-policy-strict")
+ self.eta_weight += 120 # this plugins takes 120x longer (for ETA)
+ self.collectExtOutput("/sbin/fixfiles check")
+
+ self.addForbiddenPath("/etc/selinux/targeted")
+
return
def checkenabled(self):
# is selinux enabled ?
try:
- if commands.getoutput("/usr/sbin/sestatus").split(":")[1].strip() == "disabled":
+ if self.collectOutputNow("/usr/sbin/sestatus", root_symlink = "sestatus").split(":")[1].strip() == "disabled":
return False
except:
pass
return True
+
+ def analyze(self):
+ # Check for SELinux denials and capture raw output from sealert
+ if self.cInfo["policy"].runlevelDefault() in self.cInfo["policy"].runlevelByService("setroubleshoot"):
+ # TODO: fixup regex for more precise matching
+ sealert=doRegexFindAll(r"^.*setroubleshoot:.*(sealert\s-l\s.*)","/var/log/messages")
+ if sealert:
+ for i in sealert:
+ self.collectExtOutput("%s" % i)
+ self.addAlert("There are numerous selinux errors present and "+
+ "possible fixes stated in the sealert output.")
diff --git a/src/lib/sos/plugins/squid.py b/src/lib/sos/plugins/squid.py
index fdd3b8cf..3e9b3d8b 100644
--- a/src/lib/sos/plugins/squid.py
+++ b/src/lib/sos/plugins/squid.py
@@ -19,9 +19,9 @@ class squid(sos.plugintools.PluginBase):
"""squid related information
"""
def checkenabled(self):
- if self.cInfo["policy"].pkgByName("squid") != None or os.path.exists("/etc/squid/squid.conf"):
- return True
- return False
+ self.files = [ "/etc/squid/squid.conf" ]
+ self.packages = [ "squid" ]
+ return sos.plugintools.PluginBase.checkenabled(self)
def setup(self):
self.addCopySpec("/etc/squid/squid.conf")
diff --git a/src/lib/sos/plugins/startup.py b/src/lib/sos/plugins/startup.py
index 59014aaa..a0d3e400 100644
--- a/src/lib/sos/plugins/startup.py
+++ b/src/lib/sos/plugins/startup.py
@@ -19,7 +19,6 @@ class startup(sos.plugintools.PluginBase):
"""
def setup(self):
self.addCopySpec("/etc/rc.d")
-
self.collectExtOutput("/sbin/chkconfig --list", root_symlink = "chkconfig")
self.collectExtOutput("/sbin/runlevel")
return
diff --git a/src/lib/sos/plugins/systemtap.py b/src/lib/sos/plugins/systemtap.py
index b99ce0cf..beab832c 100644
--- a/src/lib/sos/plugins/systemtap.py
+++ b/src/lib/sos/plugins/systemtap.py
@@ -17,13 +17,18 @@
import sos.plugintools
class systemtap(sos.plugintools.PluginBase):
- """SystemTap pre-requisites information
+ """SystemTap information
"""
+ def checkenabled(self):
+ self.files = [ "/usr/bin/stap" ]
+ self.packages = [ "systemtap", "systemtap-runtime" ]
+ return sos.plugintools.PluginBase.checkenabled(self)
+
def setup(self):
# requires systemtap, systemtap-runtime, kernel-devel,
# kernel-debuginfo, kernel-debuginfo-common
+ # FIXME: do not use rpm -qa
self.collectExtOutput("/bin/rpm -qa | /bin/egrep -e kernel.*`uname -r` -e systemtap -e elfutils | sort")
self.collectExtOutput("/usr/bin/stap -V 2")
- self.collectExtOutput("/bin/uname -r")
return
diff --git a/src/lib/sos/plugins/x11.py b/src/lib/sos/plugins/x11.py
index 4abd8782..755352ef 100644
--- a/src/lib/sos/plugins/x11.py
+++ b/src/lib/sos/plugins/x11.py
@@ -19,16 +19,19 @@ class x11(sos.plugintools.PluginBase):
"""X related information
"""
def checkenabled(self):
- try: os.stat("/etc/X11")
- except: pass
- else: return True
-
- return False
+ try:os.stat("/etc/X11")
+ except:pass
+ else:return True
+ return False
def setup(self):
self.addCopySpec("/etc/X11")
self.addCopySpec("/var/log/Xorg.*.log")
self.addCopySpec("/var/log/XFree86.*.log")
self.collectExtOutput("/bin/dmesg | grep -e 'agpgart.'")
+
+ self.addForbiddenPath("/etc/X11/X")
+ self.addForbiddenPath("/etc/X11/fontpath.d")
+
return
diff --git a/src/lib/sos/plugins/xen.py b/src/lib/sos/plugins/xen.py
index 28c0ed43..d6daec63 100644
--- a/src/lib/sos/plugins/xen.py
+++ b/src/lib/sos/plugins/xen.py
@@ -38,11 +38,6 @@ class xen(sos.plugintools.PluginBase):
return False
return True
- def is_running_xenstored(self):
- xs_pid = os.popen("pidof xenstored").read()
- xs_pidnum = re.split('\n$',xs_pid)[0]
- return xs_pidnum.isdigit()
-
def domCollectProc(self):
self.addCopySpec("/proc/xen/balloon")
self.addCopySpec("/proc/xen/capabilities")
@@ -73,17 +68,7 @@ class xen(sos.plugintools.PluginBase):
self.collectExtOutput("/usr/sbin/xm info")
self.collectExtOutput("/usr/sbin/brctl show")
self.domCollectProc()
- self.addCopySpec("/sys/hypervisor/version")
- self.addCopySpec("/sys/hypervisor/compilation")
- self.addCopySpec("/sys/hypervisor/properties")
- self.addCopySpec("/sys/hypervisor/type")
- if is_xenstored_running():
- self.addCopySpec("/sys/hypervisor/uuid")
- self.collectExtOutput("/usr/bin/xenstore-ls")
- else:
- # we need tdb instead of xenstore-ls if cannot get it.
- self.addCopySpec("/var/lib/xenstored/tdb")
-
+ self.addCopySpec("/sys/hypervisor")
# FIXME: we *might* want to collect things in /sys/bus/xen*,
# /sys/class/xen*, /sys/devices/xen*, /sys/modules/blk*,
# /sys/modules/net*, but I've never heard of them actually being
diff --git a/src/lib/sos/plugins/yum.py b/src/lib/sos/plugins/yum.py
index 1f389d30..672451ee 100644
--- a/src/lib/sos/plugins/yum.py
+++ b/src/lib/sos/plugins/yum.py
@@ -22,22 +22,17 @@ class yum(sos.plugintools.PluginBase):
optionList = [("yumlist", "list repositories and packages", "slow", False)]
def checkenabled(self):
- if self.cInfo["policy"].pkgByName("yum") or os.path.exists("/etc/yum.conf"):
- return True
- return False
+ self.files = [ "/etc/yum.conf" ]
+ self.packages = [ "yum" ]
+ return sos.plugintools.PluginBase.checkenabled(self)
- def diagnose(self):
- # FIXME: diagnose should only report actual problems, disabling this for now.
- return True
+ def analyze(self):
# repo sanity checking
# TODO: elaborate/validate actual repo files, however this directory should
# be empty on RHEL 5+ systems.
- try: rhelver = self.cInfo["policy"].pkgDictByName("redhat-release")[0]
- except: rhelver = None
-
- if rhelver == "5" or True:
- if len(os.listdir('/etc/yum.repos.d/')):
- self.addDiagnose("/etc/yum.repos.d/ contains additional repository "+
+ if self.cInfo["policy"].rhelVersion() == 5:
+ if len(os.listdir("/etc/yum.repos.d/")):
+ self.addAlert("/etc/yum.repos.d/ contains additional repository "+
"information and can cause rpm conflicts.")
def setup(self):
@@ -47,7 +42,7 @@ class yum(sos.plugintools.PluginBase):
self.addCopySpec("/etc/yum.conf")
self.addCopySpec("/var/log/yum.log")
- if self.isOptionEnabled("yumlist"):
+ if self.getOption("yumlist"):
# Get a list of channels the machine is subscribed to.
self.collectExtOutput("/bin/echo \"repo list\" | /usr/bin/yum shell")
# List various information about available packages
diff --git a/src/lib/sos/plugintools.py b/src/lib/sos/plugintools.py
index fc4d4b97..f9e10ae1 100644
--- a/src/lib/sos/plugintools.py
+++ b/src/lib/sos/plugintools.py
@@ -30,11 +30,31 @@ This is the base class for sosreport plugins
"""
from sos.helpers import *
from threading import Thread, activeCount
-import os, os.path, sys, string, itertools, glob, re, traceback
-import logging
+import os, os.path, sys, string, glob, re, traceback
+import shutil
from stat import *
from time import time
+# RHEL3 doesn't have a logging module, activate work-around
+try:
+ import logging
+except ImportError:
+ import sos.rhel3_logging
+ logging = sos.rhel3_logging
+
+# python < 2.4 (RHEL3 and RHEL4) doesn't have format_exc, activate work-around
+if sys.version_info[0] <= 2 and sys.version_info[1] < 4:
+ def format_exc():
+ import StringIO
+
+ output = StringIO.StringIO()
+ traceback.print_exc(file = output)
+ toret = output.getvalue()
+ output.close()
+ return toret
+
+ traceback.format_exc = format_exc
+
class PluginBase:
"""
Base class for plugins
@@ -65,6 +85,11 @@ class PluginBase:
self.time_start = None
self.time_stop = None
+ self.packages = []
+ self.files = []
+
+ self.must_exit = False
+
self.soslog = logging.getLogger('sos')
# get the option list into a dictionary
@@ -94,10 +119,10 @@ class PluginBase:
except KeyboardInterrupt:
raise KeyboardInterrupt
except Exception, e:
- self.soslog.log(logging.VERBOSE, "Problem at path %s (%s)" % (abspath,e))
+ self.soslog.log(logging.VERBOSE, "problem at path %s (%s)" % (abspath,e))
break
return False
-
+
def doRegexFindAll(self,regex,fname):
''' Return a list of all non overlapping matches in the string(s)
'''
@@ -109,7 +134,7 @@ class PluginBase:
for i in reg.findall(content):
out.append(i)
return out
-
+
# Methods for copying files and shelling out
def doCopyFileOrDir(self, srcpath):
# pylint: disable-msg = R0912
@@ -126,113 +151,103 @@ class PluginBase:
if copyProhibited:
return ''
+ if not os.path.exists(srcpath):
+ self.soslog.debug("file or directory %s does not exist" % srcpath)
+ return
+
if os.path.islink(srcpath):
# This is a symlink - We need to also copy the file that it points to
- # file and dir symlinks ar ehandled the same
+
+ # FIXME: ignore directories for now
+ if os.path.isdir(srcpath):
+ return
+
link = os.readlink(srcpath)
+ # What's the name of the symlink on the dest tree?
+ dstslname = os.path.join(self.cInfo['dstroot'], srcpath.lstrip(os.path.sep))
+
if os.path.isabs(link):
# the link was an absolute path, and will not point to the new
# tree. We must adjust it.
-
- # What's the name of the symlink on the dest tree?
- dstslname = os.path.join(self.cInfo['dstroot'], srcpath.lstrip(os.path.sep))
-
- # make sure the dst dir exists
- if not (os.path.exists(os.path.dirname(dstslname)) and os.path.isdir(os.path.dirname(dstslname))):
- # create the directory
- os.makedirs(os.path.dirname(dstslname))
-
- dstsldir = os.path.join(self.cInfo['dstroot'], link.lstrip(os.path.sep))
- # Create the symlink on the dst tree
- rpth = sosRelPath(os.path.dirname(dstslname), dstsldir)
- os.symlink(rpth, dstslname)
+ rpth = sosRelPath(os.path.dirname(dstslname), os.path.join(self.cInfo['dstroot'], link.lstrip(os.path.sep)))
else:
# no adjustment, symlink is the relative path
- dstslname = link
+ rpth = link
- if os.path.isdir(srcpath): # symlink to a directory
- # FIXME: don't recurse symlinks until vicious loops are detected
+ # make sure the link doesn't already exists
+ if os.path.exists(dstslname):
+ self.soslog.log(logging.DEBUG, "skipping symlink creation: already exists (%s)" % dstslname)
return
- abslink = os.path.abspath(os.path.dirname(srcpath) + "/" + link)
- self.soslog.log(logging.VERBOSE2, "DIRLINK %s to %s [%s]" % (srcpath,link,abslink))
- for tmplink in self.copiedDirs:
- if tmplink["srcpath"] == abslink or tmplink["pointsto"] == abslink:
- self.soslog.log(logging.VERBOSE2, "already copied [%s]" % srcpath)
- return
+ # make sure the dst dir exists
+ if not (os.path.exists(os.path.dirname(dstslname)) and os.path.isdir(os.path.dirname(dstslname))):
+ os.makedirs(os.path.dirname(dstslname))
- for afile in os.listdir(srcpath):
- if afile == '.' or afile == '..':
- pass
- else:
- self.soslog.log(logging.VERBOSE2, "copying (file or dir) %s" % srcpath+'/'+afile)
- try:
- abspath = self.doCopyFileOrDir(srcpath+'/'+afile)
- except SystemExit:
- raise SystemExit
- except KeyboardInterrupt:
- raise KeyboardInterrupt
- except Exception, e:
- self.soslog.verbose(traceback.format_exc())
-
- # if on forbidden list, abspath is null
- if not abspath == '':
- dstslname = sosRelPath(self.cInfo['rptdir'], abspath)
- self.copiedDirs.append({'srcpath':srcpath, 'dstpath':dstslname, 'symlink':"yes", 'pointsto':os.path.abspath(srcpath+'/'+afile) })
- else:
- self.soslog.log(logging.VERBOSE2, "copying symlink %s" % srcpath)
- try:
- dstslname, abspath = self.__copyFile(srcpath)
- self.copiedFiles.append({'srcpath':srcpath, 'dstpath':dstslname, 'symlink':"yes", 'pointsto':link})
- except SystemExit:
- raise SystemExit
- except KeyboardInterrupt:
- raise KeyboardInterrupt
- except Exception, e:
- self.soslog.log(logging.VERBOSE, "Problem at path %s (%s)" % (srcpath, e))
-
- return abspath
+ self.soslog.log(logging.VERBOSE3, "creating symlink %s -> %s" % (dstslname, rpth))
+ os.symlink(rpth, dstslname)
+ self.copiedFiles.append({'srcpath':srcpath, 'dstpath':rpth, 'symlink':"yes", 'pointsto':link})
+ self.doCopyFileOrDir(link)
+ return
else: # not a symlink
- if not os.path.exists(srcpath):
- self.soslog.debug("File or directory %s does not exist\n" % srcpath)
- elif os.path.isdir(srcpath):
+ if os.path.isdir(srcpath):
for afile in os.listdir(srcpath):
if afile == '.' or afile == '..':
pass
else:
self.doCopyFileOrDir(srcpath+'/'+afile)
- else:
- # This is not a directory or a symlink
- tdstpath, abspath = self.__copyFile(srcpath)
- self.copiedFiles.append({'srcpath':srcpath, 'dstpath':tdstpath, 'symlink':"no"}) # save in our list
- return abspath
+ return
+
+ # if we get here, it's definitely a regular file (not a symlink or dir)
+
+ self.soslog.log(logging.VERBOSE3, "copying file %s" % srcpath)
+ try:
+ tdstpath, abspath = self.__copyFile(srcpath)
+ except "AlreadyExists":
+ self.soslog.log(logging.DEBUG, "error copying file %s (already exists)" % (srcpath))
+ return
+ except IOError:
+ self.soslog.log(logging.VERBOSE2, "error copying file %s (IOError)" % (srcpath))
+ return
+ except:
+ self.soslog.log(logging.VERBOSE2, "error copying file %s (SOMETHING HAPPENED)" % (srcpath))
+
+ self.copiedFiles.append({'srcpath':srcpath, 'dstpath':tdstpath, 'symlink':"no"}) # save in our list
+
+ return abspath
def __copyFile(self, src):
""" call cp to copy a file, collect return status and output. Returns the
destination file name.
"""
- try:
- # pylint: disable-msg = W0612
- status, shout, runtime = sosGetCommandOutput("/bin/cp --parents -P --preserve=mode,ownership,timestamps,links " + src +" " + self.cInfo['dstroot'])
- if status:
- self.soslog.debug(shout)
- abspath = os.path.join(self.cInfo['dstroot'], src.lstrip(os.path.sep))
- relpath = sosRelPath(self.cInfo['rptdir'], abspath)
- return relpath, abspath
- except SystemExit:
- raise SystemExit
- except KeyboardInterrupt:
- raise KeyboardInterrupt
- except Exception,e:
- self.soslog.warning("Problem copying file %s (%s)" % (src, e))
+ rel_dir = os.path.dirname(src).lstrip(os.path.sep)
+# if rel_dir[0] == "/": rel_dir = rel_dir[1:]
+ new_dir = os.path.join(self.cInfo['dstroot'], rel_dir)
+ new_fname = os.path.join(new_dir, os.path.basename(src))
+
+ if not os.path.exists(new_fname):
+ if not os.path.isdir(new_dir):
+ os.makedirs(new_dir)
+
+ if os.path.islink(src):
+ linkto = os.readlink(src)
+ os.symlink(linkto, new_fname)
+ else:
+ shutil.copy2(src, new_dir)
+ else:
+ raise "AlreadyExists"
+
+ abspath = os.path.join(self.cInfo['dstroot'], src.lstrip(os.path.sep))
+ relpath = sosRelPath(self.cInfo['rptdir'], abspath)
+ return (relpath, abspath)
def addForbiddenPath(self, forbiddenPath):
"""Specify a path to not copy, even if it's part of a copyPaths[] entry.
- Note: do NOT use globs here.
"""
- self.forbiddenPaths.append(forbiddenPath)
+ # Glob case handling is such that a valid non-glob is a reduced glob
+ for filespec in glob.glob(forbiddenPath):
+ self.forbiddenPaths.append(filespec)
def getAllOptions(self):
"""
@@ -240,14 +255,22 @@ class PluginBase:
"""
return (self.optNames, self.optParms)
- def setOption(self, optionname, enable):
- ''' enable or disable the named option.
+ def setOption(self, optionname, value):
+ ''' set the named option to value.
'''
for name, parms in zip(self.optNames, self.optParms):
if name == optionname:
- parms['enabled'] = enable
+ parms['enabled'] = value
+ return True
+ else:
+ return False
def isOptionEnabled(self, optionname):
+ ''' Deprecated, use getOption() instead
+ '''
+ return self.getOption(optionname)
+
+ def getOption(self, optionname):
''' see whether the named option is enabled.
'''
for name, parms in zip(self.optNames, self.optParms):
@@ -259,6 +282,9 @@ class PluginBase:
def addCopySpecLimit(self,fname,sizelimit = None):
"""Add a file specification (with limits)
"""
+ if not ( fname and len(fname) ):
+ self.soslog.warning("invalid file path")
+ return False
files = glob.glob(fname)
files.sort()
cursize = 0
@@ -272,57 +298,26 @@ class PluginBase:
""" Add a file specification (can be file, dir,or shell glob) to be
copied into the sosreport by this module
"""
+ if not ( copyspec and len(copyspec) ):
+ self.soslog.warning("invalid file path")
+ return False
# Glob case handling is such that a valid non-glob is a reduced glob
for filespec in glob.glob(copyspec):
self.copyPaths.append(filespec)
- def copyFileGlob(self, srcglob):
- """ Deprecated - please modify modules to use addCopySpec()
- """
- sys.stderr.write("Warning: thecopyFileGlob() function has been deprecated. Please")
- sys.stderr.write("use addCopySpec() instead. Calling addCopySpec() now.")
- self.addCopySpec(srcglob)
-
- def copyFileOrDir(self, srcpath):
- """ Deprecated - please modify modules to use addCopySpec()
- """
- sys.stderr.write("Warning: the copyFileOrDir() function has been deprecated. Please\n")
- sys.stderr.write("use addCopySpec() instead. Calling addCopySpec() now.\n")
- raise ValueError
- #self.addCopySpec(srcpath)
-
- def runExeInd(self, exe):
- """ Deprecated - use callExtProg()
- """
- sys.stderr.write("Warning: the runExeInd() function has been deprecated. Please use\n")
- sys.stderr.write("the callExtProg() function. This should only be called\n")
- sys.stderr.write("if collect() is overridden.")
- pass
-
def callExtProg(self, prog):
""" Execute a command independantly of the output gathering part of
sosreport
- """
- # Log if binary is not runnable or does not exist
- if not os.access(prog.split()[0], os.X_OK):
- self.soslog.log(logging.VERBOSE, "binary '%s' does not exist or is not runnable" % prog.split()[0])
-
+ """
# pylint: disable-msg = W0612
- status, shout, runtime = sosGetCommandOutput(prog)
+ status, shout, runtime = sosGetCommandOutput(prog)
return status
-
- def runExe(self, exe):
- """ Deprecated - use collectExtOutput()
- """
- sys.stderr.write("Warning: the runExe() function has been deprecated. Please use\n")
- sys.stderr.write("the collectExtOutput() function.\n")
- pass
- def collectExtOutput(self, exe, suggest_filename = None, root_symlink = None):
+ def collectExtOutput(self, exe, suggest_filename = None, root_symlink = None, timeout = 300):
"""
Run a program and collect the output
"""
- self.collectProgs.append( (exe,suggest_filename,root_symlink) )
+ self.collectProgs.append( (exe, suggest_filename, root_symlink, timeout) )
def fileGrep(self, regexp, fname):
results = []
@@ -358,18 +353,13 @@ class PluginBase:
return outfn
- def collectOutputNow(self, exe, suggest_filename = None, root_symlink = False):
+ def collectOutputNow(self, exe, suggest_filename = None, root_symlink = False, timeout = 300):
""" Execute a command and save the output to a file for inclusion in
the report
"""
- # First check to make sure the binary exists and is runnable.
- if not os.access(exe.split()[0], os.X_OK):
- self.soslog.log(logging.VERBOSE, "binary '%s' does not exist or is not runnable, trying anyways" % exe.split()[0])
-
- # FIXME: we should have a timeout or we may end waiting forever
# pylint: disable-msg = W0612
- status, shout, runtime = sosGetCommandOutput(exe)
+ status, shout, runtime = sosGetCommandOutput(exe, timeout = timeout)
if suggest_filename:
outfn = self.makeCommandFilename(suggest_filename)
@@ -379,7 +369,7 @@ class PluginBase:
if not os.path.isdir(os.path.dirname(outfn)):
os.mkdir(os.path.dirname(outfn))
- if not (status == 127 or status == 32512):
+ if not (status == 127 or status == 32512): # if not command_not_found
outfd = open(outfn, "w")
if len(shout): outfd.write(shout+"\n")
outfd.close()
@@ -387,7 +377,8 @@ class PluginBase:
if root_symlink:
curdir = os.getcwd()
os.chdir(self.cInfo['dstroot'])
- os.symlink(outfn[len(self.cInfo['dstroot'])+1:], root_symlink.strip("/."))
+ try: os.symlink(outfn[len(self.cInfo['dstroot'])+1:], root_symlink.strip("/."))
+ except: pass
os.chdir(curdir)
outfn_strip = outfn[len(self.cInfo['cmddir'])+1:]
@@ -476,6 +467,10 @@ class PluginBase:
if semaphore: semaphore.acquire()
+ if self.must_exit:
+ semaphore.release()
+ return
+
self.soslog.log(logging.VERBOSE, "starting threaded plugin %s" % self.piName)
self.time_start = time()
@@ -486,33 +481,29 @@ class PluginBase:
try:
self.doCopyFileOrDir(path)
except SystemExit:
- if threaded:
- return SystemExit
- else:
- raise SystemExit
+ if semaphore: semaphore.release()
+ if threaded: return KeyboardInterrupt
+ else: raise KeyboardInterrupt
except KeyboardInterrupt:
- if threaded:
- return KeyboardInterrupt
- else:
- raise KeyboardInterrupt
+ if semaphore: semaphore.release()
+ if threaded: return KeyboardInterrupt
+ else: raise KeyboardInterrupt
except Exception, e:
self.soslog.log(logging.VERBOSE2, "error copying from pathspec %s (%s), traceback follows:" % (path,e))
self.soslog.log(logging.VERBOSE2, traceback.format_exc())
- for (prog,suggest_filename,root_symlink) in self.collectProgs:
+ for (prog, suggest_filename, root_symlink, timeout) in self.collectProgs:
self.soslog.debug("collecting output of '%s'" % prog)
try:
- self.collectOutputNow(prog, suggest_filename, root_symlink)
+ self.collectOutputNow(prog, suggest_filename, root_symlink, timeout)
except SystemExit:
- if threaded:
- return SystemExit
- else:
- raise SystemExit
+ if semaphore: semaphore.release()
+ if threaded: return SystemExit
+ else: raise SystemExit
except KeyboardInterrupt:
- if threaded:
- return KeyboardInterrupt
- else:
- raise KeyboardInterrupt
- except:
+ if semaphore: semaphore.release()
+ if threaded: return KeyboardInterrupt
+ else: raise KeyboardInterrupt
+ except Exception, e:
self.soslog.log(logging.VERBOSE2, "error collection output of '%s', traceback follows:" % prog)
self.soslog.log(logging.VERBOSE2, traceback.format_exc())
@@ -521,6 +512,10 @@ class PluginBase:
if semaphore: semaphore.release()
self.soslog.log(logging.VERBOSE, "plugin %s returning" % self.piName)
+ def exit_please(self):
+ """ This function tells the plugin that it should exit ASAP"""
+ self.must_exit = True
+
def get_description(self):
""" This function will return the description for the plugin"""
try:
@@ -532,7 +527,17 @@ class PluginBase:
""" This function can be overidden to let the plugin decide whether
it should run or not.
"""
- return True
+ # some files or packages have been specified for this package
+ if len(self.files) or len(self.packages):
+ for fname in self.files:
+ if os.path.exists(fname):
+ return True
+ for pkgname in self.packages:
+ if self.cInfo["policy"].pkgByName(pkgname):
+ return True
+ return False
+
+ return True
def defaultenabled(self):
"""This devices whether a plugin should be automatically loaded or
@@ -604,11 +609,11 @@ class PluginBase:
html = html + "<p>Commands Executed:<br><ul>\n"
# convert file name to relative path from our root
for cmd in self.executedCommands:
- if cmd["file"] and len(cmd["file"]):
- cmdOutRelPath = sosRelPath(self.cInfo['rptdir'], self.cInfo['cmddir'] + "/" + cmd['file'])
- html = html + '<li><a href="%s">%s</a></li>\n' % (cmdOutRelPath, cmd['exe'])
- else:
- html = html + '<li>%s</li>\n' % (cmd['exe'])
+ if cmd["file"] and len(cmd["file"]):
+ cmdOutRelPath = sosRelPath(self.cInfo['rptdir'], self.cInfo['cmddir'] + "/" + cmd['file'])
+ html = html + '<li><a href="%s">%s</a></li>\n' % (cmdOutRelPath, cmd['exe'])
+ else:
+ html = html + '<li>%s</li>\n' % (cmd['exe'])
html = html + "</ul></p>\n"
# Alerts
@@ -624,5 +629,3 @@ class PluginBase:
html = html + self.customText + "</p>\n"
return html
-
-
diff --git a/src/lib/sos/policyredhat.py b/src/lib/sos/policyredhat.py
index c25a9d55..a10e7dfb 100755
--- a/src/lib/sos/policyredhat.py
+++ b/src/lib/sos/policyredhat.py
@@ -26,22 +26,45 @@ from sos.helpers import *
import random
import re
import md5
-
-SOME_PATH = "/tmp/SomePath"
+import rpm
+import time
+
+sys.path.insert(0, "/usr/share/rhn/")
+try:
+ from up2date_client import up2dateAuth
+ from up2date_client import config
+ from rhn import rpclib
+except:
+ # might fail if non-RHEL
+ pass
#class SosError(Exception):
# def __init__(self, code, message):
# self.code = code
# self.message = message
-#
+#
# def __str__(self):
# return 'Sos Error %s: %s' % (self.code, self.message)
+def memoized(function):
+ ''' function decorator to allow caching of return values
+ '''
+ function.cache={}
+ def f(*args):
+ try:
+ return function.cache[args]
+ except KeyError:
+ result = function.cache[args] = function(*args)
+ return result
+ return f
class SosPolicy:
"This class implements various policies for sos"
def __init__(self):
- #print "Policy init"
+ self.report_file = ""
+ self.report_md5 = ""
+ self.reportName = ""
+ self.ticketNumber = ""
return
def setCommons(self, commons):
@@ -55,35 +78,45 @@ class SosPolicy:
#print "validating %s" % pluginpath
return True
+ def pkgProvides(self, name):
+ pkg = self.pkgByName(name)
+ return pkg['providename']
+
def pkgRequires(self, name):
- # FIXME: we're relying on rpm to sort the output list
- cmd = "/bin/rpm -q --requires %s" % (name)
- return [requires[:-1].split() for requires in os.popen(cmd).readlines()]
+ pkg = self.pkgByName(name)
+ return pkg['requirename']
def allPkgsByName(self, name):
- # FIXME: we're relying on rpm to sort the output list
- cmd = "/bin/rpm --qf '%%{N} %%{V} %%{R} %%{ARCH}\n' -q %s" % (name,)
- pkgs = os.popen(cmd).readlines()
- return [pkg[:-1].split() for pkg in pkgs if pkg.startswith(name)]
+ return self.allPkgs("name", name)
+
+ def allPkgsByNameRegex(self, regex_name):
+ reg = re.compile(regex_name)
+ return [pkg for pkg in self.allPkgs() if reg.match(pkg['name'])]
def pkgByName(self, name):
# TODO: do a full NEVRA compare and return newest version, best arch
try:
# lame attempt at locating newest
- pkg = self.allPkgsByName(name)[-1]
- except IndexError:
- pkg = None
-
- return pkg
-
- def pkgDictByName(self, name):
- # FIXME: what does this do again ??
- pkgName = self.pkgByName(name)
- print pkgName
- if pkgName and len(pkgName) > len(name):
- return pkgName[len(name)+1:].split("-")
+ return self.allPkgsByName(name)[-1]
+ except:
+ pass
+ return None
+
+ def allPkgs(self, ds = None, value = None):
+ # if possible return the cached values
+ try: return self._cache_rpm[ "%s-%s" % (ds,value) ]
+ except AttributeError: self._cache_rpm = {}
+ except KeyError: pass
+
+ ts = rpm.TransactionSet()
+ if ds and value:
+ mi = ts.dbMatch(ds, value)
else:
- return None
+ mi = ts.dbMatch()
+
+ self._cache_rpm[ "%s-%s" % (ds,value) ] = [pkg for pkg in mi]
+ del mi, ts
+ return self._cache_rpm[ "%s-%s" % (ds,value) ]
def runlevelByService(self, name):
ret = []
@@ -91,7 +124,7 @@ class SosPolicy:
for tabs in commands.getoutput("/sbin/chkconfig --list %s" % name).split():
try:
(runlevel, onoff) = tabs.split(":", 1)
- except ValueError:
+ except:
pass
else:
if onoff == "on":
@@ -102,7 +135,7 @@ class SosPolicy:
def runlevelDefault(self):
try:
- reg=self.fileGrep(r"^id:(\d{1}):initdefault:", "/etc/inittab")
+ reg=self.doRegexFindAll(r"^id:(\d{1}):initdefault:", "/etc/inittab")
for initlevel in reg:
return initlevel
except:
@@ -111,9 +144,36 @@ class SosPolicy:
def kernelVersion(self):
return commands.getoutput("/bin/uname -r").strip("\n")
+ def hostName(self):
+ return commands.getoutput("/bin/hostname").split(".")[0]
+
+ def rhelVersion(self):
+ try:
+ pkgname = self.pkgByName("redhat-release")["version"]
+ if pkgname[0] == "4":
+ return 4
+ elif pkgname in [ "5Server", "5Client" ]:
+ return 5
+ except: pass
+ return False
+
+ def rhnUsername(self):
+ try:
+ cfg = config.initUp2dateConfig()
+
+ return rpclib.xmlrpclib.loads(up2dateAuth.getSystemId())[0][0]['username']
+ except:
+ # ignore any exception and return an empty username
+ return ""
+
def isKernelSMP(self):
- if self.kernelVersion()[-3:]=="smp": return True
- else: return False
+ if commands.getoutput("/bin/uname -v").split()[1] == "SMP":
+ return True
+ else:
+ return False
+
+ def getArch(self):
+ return commands.getoutput("/bin/uname -m").strip()
def pkgNVRA(self, pkg):
fields = pkg.split("-")
@@ -121,85 +181,188 @@ class SosPolicy:
name = "-".join(fields[:-3])
return (name, version, release, arch)
- def preWork(self, name="", ticket=""):
+ def getDstroot(self):
+ """Find a temp directory to form the root for our gathered information
+ and reports.
+ """
+ dstroot = "/tmp/%s-%s" % (self.hostName(), time.strftime("%Y%m%d%H%M%S"))
+ try:
+ os.mkdir(dstroot, 0700)
+ except:
+ return False
+ return dstroot
+
+ def preWork(self):
# this method will be called before the gathering begins
- localname = commands.getoutput("/bin/uname -n").split(".")[0]
+ localname = self.rhnUsername()
+ if len(localname) == 0: localname = self.hostName()
- try:
- if len(name) == 0:
+ if not self.cInfo['cmdlineopts'].batch:
+ try:
self.reportName = raw_input(_("Please enter your first initial and last name [%s]: ") % localname)
- else:
- self.reportName = str(name)
- self.reportName = re.sub(r"[^a-zA-Z.0-9]", "", self.reportName)
-
- if len(self.reportName) == 0:
- self.reportName = localname
-
- if len(ticket) == 0:
- self.ticketNumber = raw_input(_("Please enter the case number that you are generating this report for: "))
- else:
- self.ticketNumber = str(ticket)
- self.ticketNumber = re.sub(r"[^0-9]", "", self.ticketNumber)
+ self.reportName = re.sub(r"[^a-zA-Z.0-9]", "", self.reportName)
- print
- except KeyboardInterrupt:
- print
- sys.exit(0)
+ self.ticketNumber = raw_input(_("Please enter the case number that you are generating this report for: "))
+ self.ticketNumber = re.sub(r"[^0-9]", "", self.ticketNumber)
+ print
+ except:
+ print
+ sys.exit(0)
- def packageResults(self):
+ if len(self.reportName) == 0:
+ self.reportName = localname
- if len(self.ticketNumber):
- namestr = self.reportName + "." + self.ticketNumber
- else:
- namestr = self.reportName
+ return
- ourtempdir = gettempdir()
- tarballName = os.path.join(ourtempdir, "sosreport-" + namestr + ".tar.bz2")
+ def renameResults(self, newName):
+ newName = os.path.join(gettempdir(), newName)
+ if len(self.report_file) and os.path.isfile(self.report_file):
+ try: os.rename(self.report_file, newName)
+ except: return False
+ self.report_file = newName
- namestr = namestr + "-" + str(random.randint(1, 999999))
+ def packageResults(self):
- aliasdir = os.path.join(ourtempdir, namestr)
+ self.renameResults("sosreport-%s-%s.tar.bz2" % (self.reportName, time.strftime("%Y%m%d%H%M%S")))
- tarcmd = "/bin/tar -jcf %s %s" % (tarballName, namestr)
+ tarcmd = "/bin/tar -jcf %s %s" % (self.report_file, os.path.basename(self.cInfo['dstroot']))
print _("Creating compressed archive...")
- if not os.access(string.split(tarcmd)[0], os.X_OK):
- print "Unable to create tarball"
- return
- # FIXME: gotta be a better way...
- os.system("/bin/mv %s %s" % (self.cInfo['dstroot'], aliasdir))
curwd = os.getcwd()
- os.chdir(ourtempdir)
+ os.chdir(os.path.dirname(self.cInfo['dstroot']))
oldmask = os.umask(077)
- # pylint: disable-msg = W0612
- status, shout, runtime = sosGetCommandOutput(tarcmd)
+ status, shout = commands.getstatusoutput(tarcmd)
os.umask(oldmask)
os.chdir(curwd)
- # FIXME: use python internal command
- os.system("/bin/mv %s %s" % (aliasdir, self.cInfo['dstroot']))
- # add last 6 chars from md5sum to file name
- fp = open(tarballName, "r")
- md5out = md5.new(fp.read()).hexdigest()
+ return
+
+ def cleanDstroot(self):
+ if not os.path.isdir(os.path.join(self.cInfo['dstroot'],"sos_commands")):
+ # doesn't look like a dstroot, refusing to clean
+ return False
+ os.system("/bin/rm -rf %s" % self.cInfo['dstroot'])
+
+ def encryptResults(self):
+ # make sure a report exists
+ if not self.report_file:
+ return False
+
+ print _("Encrypting archive...")
+ gpgname = self.report_file + ".gpg"
+
+ try:
+ keyring = self.cInfo['config'].get("general", "gpg_keyring")
+ except:
+ keyring = "/usr/share/sos/rhsupport.pub"
+
+ try:
+ recipient = self.cInfo['config'].get("general", "gpg_recipient")
+ except:
+ recipient = "support@redhat.com"
+
+ status, output = commands.getstatusoutput("""/usr/bin/gpg --trust-model always --batch --keyring "%s" --no-default-keyring --compress-level 0 --encrypt --recipient "%s" --output "%s" "%s" """ % (keyring, recipient, gpgname, self.report_file))
+ if status == 0:
+ os.unlink(self.report_file)
+ self.report_file = gpgname
+ else:
+ print _("There was a problem encrypting your report.")
+ sys.exit(1)
+
+ def displayResults(self):
+ # make sure a report exists
+ if not self.report_file:
+ return False
+
+ # calculate md5
+ fp = open(self.report_file, "r")
+ self.report_md5 = md5.new(fp.read()).hexdigest()
fp.close()
- oldtarballName = tarballName
- tarballName = os.path.join(ourtempdir, "sosreport-%s-%s.tar.bz2" % (namestr, md5out[-6:]) )
- os.system("/bin/mv %s %s" % (oldtarballName, tarballName) )
- # store md5 to a file
- fp = open(tarballName + ".md5", "w")
- fp.write(md5out + "\n")
+
+ self.renameResults("sosreport-%s-%s-%s.tar.bz2" % (self.reportName, time.strftime("%Y%m%d%H%M%S"), self.report_md5[-4:]))
+
+ # store md5 into file
+ fp = open(self.report_file + ".md5", "w")
+ fp.write(self.report_md5 + "\n")
fp.close()
- sys.stdout.write("\n")
- print _("Your sosreport has been generated and saved in:\n %s") % tarballName
print
- if md5out:
- print _("The md5sum is: ") + md5out
+ print _("Your sosreport has been generated and saved in:\n %s") % self.report_file
+ print
+ if len(self.report_md5):
+ print _("The md5sum is: ") + self.report_md5
print
print _("Please send this file to your support representative.")
- sys.stdout.write("\n")
+ print
- return
+ def uploadResults(self):
+ # make sure a report exists
+ if not self.report_file:
+ return False
+
+ print
+ # make sure it's readable
+ try:
+ fp = open(self.report_file, "r")
+ except:
+ return False
+
+ # read ftp URL from configuration
+ try:
+ upload_url = self.cInfo['config'].get("general", "upload_url")
+ except:
+ upload_url = "ftp://dropbox.redhat.com/incoming"
+
+ from urlparse import urlparse
+ url = urlparse(upload_url)
+
+ if url[0] != "ftp":
+ print _("Cannot upload to specified URL.")
+ return
+
+ # extract username and password from URL, if present
+ if url[1].find("@") > 0:
+ username, host = url[1].split("@", 1)
+ if username.find(":") > 0:
+ username, passwd = username.split(":", 1)
+ else:
+ passwd = None
+ else:
+ username, passwd, host = None, None, url[1]
+
+ # extract port, if present
+ if host.find(":") > 0:
+ host, port = host.split(":", 1)
+ port = int(port)
+ else:
+ port = 21
+
+ path = url[2]
+
+ try:
+ from ftplib import FTP
+ upload_name = os.path.basename(self.report_file)
+
+ ftp = FTP()
+ ftp.connect(host, port)
+ if username and passwd:
+ ftp.login(username, passwd)
+ else:
+ ftp.login()
+ ftp.cwd(path)
+ ftp.set_pasv(True)
+ ftp.storbinary('STOR %s' % upload_name, fp)
+ ftp.quit()
+ except:
+ print _("There was a problem uploading your report to Red Hat support.")
+ else:
+ print _("Your report was successfully uploaded to Red Hat's ftp server with name:")
+ print " " + upload_name
+ print
+ print _("Please communicate this name to your support representative.")
+ print
+
+ fp.close()