aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--sos/plugins/gluster.py67
1 files changed, 30 insertions, 37 deletions
diff --git a/sos/plugins/gluster.py b/sos/plugins/gluster.py
index abf0b402..101b0da2 100644
--- a/sos/plugins/gluster.py
+++ b/sos/plugins/gluster.py
@@ -20,7 +20,7 @@ class Gluster(Plugin, RedHatPlugin):
plugin_name = 'gluster'
profiles = ('storage', 'virt')
- statedump_dir = '/tmp/glusterfs-statedumps'
+ statedump_dir = '/var/run/gluster'
packages = ["glusterfs", "glusterfs-core"]
files = ["/etc/glusterd", "/var/lib/glusterd"]
@@ -40,17 +40,6 @@ class Gluster(Plugin, RedHatPlugin):
fp.close()
return out
- def make_preparations(self, name_dir):
- try:
- os.mkdir(name_dir)
- except OSError:
- pass
- fp = open('/tmp/glusterdump.options', 'w')
- data = 'path=' + name_dir + '\n'
- fp.write(data)
- fp.write('all=yes')
- fp.close()
-
def wait_for_statedump(self, name_dir):
statedumps_present = 0
statedump_entries = os.listdir(name_dir)
@@ -76,7 +65,11 @@ class Gluster(Plugin, RedHatPlugin):
def setup(self):
self.add_forbidden_path("/var/lib/glusterd/geo-replication/secret.pem")
- self.add_cmd_output("gluster peer status")
+ self.add_cmd_output([
+ "gluster peer status",
+ "gluster pool list",
+ "gluster volume status"
+ ])
self.add_copy_spec([
"/etc/redhat-storage-release",
@@ -91,41 +84,44 @@ class Gluster(Plugin, RedHatPlugin):
"/var/run/gluster/shared_storage/nfs-ganesha/"
] + glob.glob('/var/run/gluster/*tier-dht/*'))
- # collect logs - apply log_size for any individual file
- # all_logs takes precedence over logsize
if not self.get_option("all_logs"):
- limit = self.get_option("log_size")
- else:
- limit = 0
-
- if limit:
- for f in (glob.glob("/var/log/glusterfs/*log") +
- glob.glob("/var/log/glusterfs/*/*log") +
- glob.glob("/var/log/glusterfs/geo-replication/*/*log")):
- self.add_copy_spec(f, limit)
+ self.add_copy_spec([
+ "/var/log/glusterfs/*log",
+ "/var/log/glusterfs/*/*log",
+ "/var/log/glusterfs/geo-replication/*/*log"
+ ])
else:
self.add_copy_spec("/var/log/glusterfs")
if self.get_option("dump"):
- self.make_preparations(self.statedump_dir)
- if self.check_ext_prog("killall -USR1 glusterfs glusterfsd"):
- # let all the processes catch the signal and create
- # statedump file entries.
- time.sleep(1)
- self.wait_for_statedump(self.statedump_dir)
- self.add_copy_spec('/tmp/glusterdump.options')
- self.add_copy_spec(self.statedump_dir)
+ if os.path.exists(self.statedump_dir):
+ if self.check_ext_prog(
+ "killall -USR1 glusterfs glusterfsd glusterd"):
+ # let all the processes catch the signal and create
+ # statedump file entries.
+ time.sleep(1)
+ self.wait_for_statedump(self.statedump_dir)
+ self.add_copy_spec(self.statedump_dir)
+ else:
+ self.soslog.info("could not send SIGUSR1 to glusterfs/"
+ "glusterd processes")
else:
- self.soslog.info("could not send SIGUSR1 to glusterfs \
- processes")
+ self.soslog.warn("Unable to generate statedumps, no such "
+ "directory: %s" % self.statedump_dir)
+ state = self.get_command_output("gluster get-state")
+ if state['status'] == 0:
+ state_file = state['output'].split()[-1]
+ self.add_copy_spec(state_file)
volume_file = self.get_cmd_output_now("gluster volume info")
if volume_file:
for volname in self.get_volume_names(volume_file):
self.add_cmd_output([
+ "gluster volume get %s all" % volname,
"gluster volume geo-replication %s status" % volname,
"gluster volume heal %s info" % volname,
"gluster volume heal %s info split-brain" % volname,
+ "gluster volume status %s clients" % volname,
"gluster snapshot list %s" % volname,
"gluster volume quota %s list" % volname,
"gluster volume rebalance %s status" % volname,
@@ -133,7 +129,4 @@ class Gluster(Plugin, RedHatPlugin):
"gluster snapshot status %s" % volname
])
- self.add_cmd_output("gluster pool list")
- self.add_cmd_output("gluster volume status")
-
# vim: set et ts=4 sw=4 :