diff options
-rw-r--r-- | .cirrus.yml | 2 | ||||
-rw-r--r-- | README.md | 8 | ||||
-rw-r--r-- | docs/index.rst | 4 | ||||
-rw-r--r-- | man/en/sos-collect.1 | 24 | ||||
-rw-r--r-- | man/en/sos.1 | 2 | ||||
-rw-r--r-- | man/en/sos.conf.5 | 2 | ||||
-rw-r--r-- | plugins_overview.py | 2 | ||||
-rw-r--r-- | sos.conf | 2 | ||||
-rw-r--r-- | sos/collector/__init__.py | 101 | ||||
-rw-r--r-- | sos/collector/clusters/__init__.py | 33 | ||||
-rw-r--r-- | sos/collector/clusters/ocp.py | 12 | ||||
-rw-r--r-- | sos/collector/clusters/ovirt.py | 14 | ||||
-rw-r--r-- | sos/collector/clusters/satellite.py | 2 | ||||
-rw-r--r-- | sos/collector/sosnode.py | 14 | ||||
-rw-r--r-- | tests/test_data/etc/sos/sos.conf | 2 |
15 files changed, 102 insertions, 122 deletions
diff --git a/.cirrus.yml b/.cirrus.yml index 56f18e94..38f10d42 100644 --- a/.cirrus.yml +++ b/.cirrus.yml @@ -31,7 +31,7 @@ env: # Default task timeout timeout_in: 30m -# enable auto cancelling concurrent builds on master when multiple PRs are +# enable auto cancelling concurrent builds on main when multiple PRs are # merged at once auto_cancellation: true @@ -1,4 +1,4 @@ -[![Build Status](https://api.cirrus-ci.com/github/sosreport/sos.svg?branch=master)](https://cirrus-ci.com/github/sosreport/sos) [![Documentation Status](https://readthedocs.org/projects/sos/badge/?version=master)](https://sos.readthedocs.io/en/master/?badge=master) [![Language grade: Python](https://img.shields.io/lgtm/grade/python/g/sosreport/sos.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/sosreport/sos/context:python) +[![Build Status](https://api.cirrus-ci.com/github/sosreport/sos.svg?branch=main)](https://cirrus-ci.com/github/sosreport/sos) [![Documentation Status](https://readthedocs.org/projects/sos/badge/?version=main)](https://sos.readthedocs.io/en/main/?badge=main) [![Language grade: Python](https://img.shields.io/lgtm/grade/python/g/sosreport/sos.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/sosreport/sos/context:python) # SoS @@ -12,7 +12,7 @@ This project is hosted at: For the latest version, to contribute, and for more information, please visit the project pages or join the mailing list. -To clone the current master (development) branch run: +To clone the current main (development) branch run: ``` git clone git://github.com/sosreport/sos.git @@ -42,9 +42,9 @@ discussion. Patch submissions and reviews are welcome too. ## Patches and pull requests Patches can be submitted via the mailing list or as GitHub pull requests. If -using GitHub please make sure your branch applies to the current master as a +using GitHub please make sure your branch applies to the current main as a 'fast forward' merge (i.e. without creating a merge commit). Use the `git -rebase` command to update your branch to the current master if necessary. +rebase` command to update your branch to the current main if necessary. Please refer to the [contributor guidelines][0] for guidance on formatting patches and commit messages. diff --git a/docs/index.rst b/docs/index.rst index bb4904b6..16f765a2 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -13,7 +13,7 @@ https://github.com/sosreport/sos For the latest version, to contribute, and for more information, please visit the project pages or join the mailing list. -To clone the current master (development) branch run: +To clone the current main (development) branch run: .. code:: @@ -32,7 +32,7 @@ Mailing list Patches and pull requests ^^^^^^^^^^^^^^^^^^^^^^^^^ -Patches can be submitted via the mailing list or as GitHub pull requests. If using GitHub please make sure your branch applies to the current master as a 'fast forward' merge (i.e. without creating a merge commit). Use the git rebase command to update your branch to the current master if necessary. +Patches can be submitted via the mailing list or as GitHub pull requests. If using GitHub please make sure your branch applies to the current main branch as a 'fast forward' merge (i.e. without creating a merge commit). Use the git rebase command to update your branch to the current main branch if necessary. Documentation ============= diff --git a/man/en/sos-collect.1 b/man/en/sos-collect.1 index e0e23504..e930023e 100644 --- a/man/en/sos-collect.1 +++ b/man/en/sos-collect.1 @@ -25,7 +25,7 @@ sos collect \- Collect sosreports from multiple (cluster) nodes [\-\-nodes NODES] [\-\-no\-pkg\-check] [\-\-no\-local] - [\-\-master MASTER] + [\-\-primary PRIMARY] [\-\-image IMAGE] [\-\-force-pull-image] [\-\-registry-user USER] @@ -54,7 +54,7 @@ collect is an sos subcommand to collect sosreports from multiple nodes and packa them in a single useful tar archive. sos collect can be run either on a workstation that has SSH key authentication setup -for the nodes in a given cluster, or from a "master" node in a cluster that has SSH +for the nodes in a given cluster, or from a "primary" node in a cluster that has SSH keys configured for the other nodes. Some sosreport options are supported by sos-collect and are passed directly to @@ -99,7 +99,7 @@ Sosreport option. Specifies a case number identifier. \fB\-\-cluster\-type\fR CLUSTER_TYPE When run by itself, sos collect will attempt to identify the type of cluster at play. This is done by checking package or configuration information against the localhost, or -the master node if \fB"--master"\fR is supplied. +the primary node if \fB"--primary"\fR is supplied. Setting \fB--cluster-type\fR skips this step and forcibly sets a particular profile. @@ -152,10 +152,10 @@ rather than key-pair encryption. \fB\-\-group\fR GROUP Specify an existing host group definition to use. -Host groups are pre-defined settings for the cluster-type, master, and nodes options +Host groups are pre-defined settings for the cluster-type, primary node, and nodes options saved in JSON-formatted files under /var/lib/sos collect/<GROUP>. -If cluster_type and/or master are set in the group, sos collect behaves as if +If cluster_type and/or primary are set in the group, sos collect behaves as if these values were specified on the command-line. If nodes is defined, sos collect \fBextends\fR the \fB\-\-nodes\fR option, if set, @@ -171,7 +171,7 @@ to none. Save the results of this run of sos collect to a host group definition. sos-colllector will write a JSON-formatted file with name GROUP to /var/lib/sos collect/ -with the settings for cluster-type, master, and the node list as discovered by cluster enumeration. +with the settings for cluster-type, primary, and the node list as discovered by cluster enumeration. Note that this means regexes are not directly saved to host groups, but the results of matching against those regexes are. .TP @@ -234,20 +234,20 @@ names/addresses and regex strings may be provided at the same time. Do not perform package checks. Most cluster profiles check against installed packages to determine if the cluster profile should be applied or not. -Use this with \fB\-\-cluster-type\fR if there are rpm or apt issues on the master/local node. +Use this with \fB\-\-cluster-type\fR if there are rpm or apt issues on the primary/local node. .TP \fB\-\-no\-local\fR Do not collect a sosreport from the local system. -If \fB--master\fR is not supplied, it is assumed that the host running sosreport is part of +If \fB--primary\fR is not supplied, it is assumed that the host running sosreport is part of the cluster that is to be collected. Use this option to skip collection of a local sosreport. -This option is NOT needed if \fB--master\fR is provided. +This option is NOT needed if \fB--primary\fR is provided. .TP -\fB\-\-master\fR MASTER -Specify a master node for the cluster. +\fB\-\-primary\fR PRIMARY +Specify a primary node IP address or hostname for the cluster. -If provided, then sos collect will check the master node, not localhost, for determining +If provided, then sos collect will check the primary node, not localhost, for determining the type of cluster in use. .TP \fB\-\-image IMAGE\fR diff --git a/man/en/sos.1 b/man/en/sos.1 index 79f9a130..ce4918f9 100644 --- a/man/en/sos.1 +++ b/man/en/sos.1 @@ -37,7 +37,7 @@ May also be invoked via the alias \fBrep\fR or the deprecated command \fBsosrepo .B collect Collect is used to capture reports on multiple systems simultaneously. These systems can either be defined by the user at the command line and/or defined by -clustering software that exists either on the local system or on a "master" system +clustering software that exists either on the local system or on a "primary" system that is able to inform about other nodes in the cluster. When running collect, sos report will be run on the remote nodes, and then the diff --git a/man/en/sos.conf.5 b/man/en/sos.conf.5 index 442b699d..8bc17b46 100644 --- a/man/en/sos.conf.5 +++ b/man/en/sos.conf.5 @@ -54,7 +54,7 @@ Expected content of an extras file is as follows: \fBgroups.d\fP This directory is used to store host group configuration files for \fBsos collect\fP. -These files can specify any/all of the \fBmaster\fP, \fBnodes\fP, and \fBcluster-type\fP +These files can specify any/all of the \fBprimary\fP, \fBnodes\fP, and \fBcluster-type\fP options. Users may create their own private host groups in $HOME/.config/sos/groups.d/. If diff --git a/plugins_overview.py b/plugins_overview.py index 26c3d38f..46bcbf5a 100644 --- a/plugins_overview.py +++ b/plugins_overview.py @@ -73,7 +73,7 @@ for plugfile in sorted(os.listdir(PLUGDIR)): # if plugname != 'bcache': # continue plugs_data[plugname] = { - 'sourcecode': 'https://github.com/sosreport/sos/blob/master/sos/report/plugins/%s.py' % plugname, + 'sourcecode': 'https://github.com/sosreport/sos/blob/main/sos/report/plugins/%s.py' % plugname, 'distros': [], 'profiles': [], 'packages': [], @@ -19,7 +19,7 @@ # Options that will apply to any `sos collect` run should be listed here. # Note that the option names *must* be the long-form name as seen in --help # output. Use a comma for list delimitations -#master = myhost.example.com +#primary = myhost.example.com #ssh-key = /home/user/.ssh/mykey #password = true diff --git a/sos/collector/__init__.py b/sos/collector/__init__.py index 70b7a69e..a9d4c09c 100644 --- a/sos/collector/__init__.py +++ b/sos/collector/__init__.py @@ -72,7 +72,6 @@ class SoSCollector(SoSComponent): 'list_options': False, 'log_size': 0, 'map_file': '/etc/sos/cleaner/default_mapping', - 'master': '', 'primary': '', 'namespaces': None, 'nodes': [], @@ -118,7 +117,7 @@ class SoSCollector(SoSComponent): os.umask(0o77) self.client_list = [] self.node_list = [] - self.master = False + self.primary = False self.retrieved = 0 self.cluster = None self.cluster_type = None @@ -350,8 +349,6 @@ class SoSCollector(SoSComponent): help='List options available for profiles') collect_grp.add_argument('--label', help='Assign a label to the archives') - collect_grp.add_argument('--master', - help='DEPRECATED: Specify a master node') collect_grp.add_argument('--primary', '--manager', '--controller', dest='primary', default='', help='Specify a primary node for cluster ' @@ -485,7 +482,7 @@ class SoSCollector(SoSComponent): 'cmdlineopts': self.opts, 'need_sudo': True if self.opts.ssh_user != 'root' else False, 'tmpdir': self.tmpdir, - 'hostlen': len(self.opts.master) or len(self.hostname), + 'hostlen': len(self.opts.primary) or len(self.hostname), 'policy': self.policy } @@ -656,7 +653,7 @@ class SoSCollector(SoSComponent): on the commandline to point to one existing anywhere on the system Host groups define a list of nodes and/or regexes and optionally the - master and cluster-type options. + primary and cluster-type options. """ grp = self.opts.group paths = [ @@ -677,7 +674,7 @@ class SoSCollector(SoSComponent): with open(fname, 'r') as hf: _group = json.load(hf) - for key in ['master', 'primary', 'cluster_type']: + for key in ['primary', 'cluster_type']: if _group[key]: self.log_debug("Setting option '%s' to '%s' per host group" % (key, _group[key])) @@ -691,12 +688,12 @@ class SoSCollector(SoSComponent): Saves the results of this run of sos-collector to a host group file on the system so it can be used later on. - The host group will save the options master, cluster_type, and nodes + The host group will save the options primary, cluster_type, and nodes as determined by sos-collector prior to execution of sosreports. """ cfg = { 'name': self.opts.save_group, - 'primary': self.opts.master, + 'primary': self.opts.primary, 'cluster_type': self.cluster.cluster_type[0], 'nodes': [n for n in self.node_list] } @@ -722,7 +719,7 @@ class SoSCollector(SoSComponent): self.ui_log.info(self._fmt_msg(msg)) if ((self.opts.password or (self.opts.password_per_node and - self.opts.master)) + self.opts.primary)) and not self.opts.batch): self.log_debug('password specified, not using SSH keys') msg = ('Provide the SSH password for user %s: ' @@ -769,8 +766,8 @@ class SoSCollector(SoSComponent): self.policy.pre_work() - if self.opts.master: - self.connect_to_master() + if self.opts.primary: + self.connect_to_primary() self.opts.no_local = True else: try: @@ -797,9 +794,9 @@ class SoSCollector(SoSComponent): self.ui_log.info(skip_local_msg) can_run_local = False self.opts.no_local = True - self.master = SosNode('localhost', self.commons, - local_sudo=local_sudo, - load_facts=can_run_local) + self.primary = SosNode('localhost', self.commons, + local_sudo=local_sudo, + load_facts=can_run_local) except Exception as err: self.log_debug("Unable to determine local installation: %s" % err) @@ -807,11 +804,11 @@ class SoSCollector(SoSComponent): '--no-local option if localhost should not be ' 'included.\nAborting...\n', 1) - self.collect_md.add_field('primary', self.master.address) + self.collect_md.add_field('primary', self.primary.address) self.collect_md.add_section('nodes') - self.collect_md.nodes.add_section(self.master.address) - self.master.set_node_manifest(getattr(self.collect_md.nodes, - self.master.address)) + self.collect_md.nodes.add_section(self.primary.address) + self.primary.set_node_manifest(getattr(self.collect_md.nodes, + self.primary.address)) if self.opts.cluster_type: if self.opts.cluster_type == 'none': @@ -819,7 +816,7 @@ class SoSCollector(SoSComponent): else: self.cluster = self.clusters[self.opts.cluster_type] self.cluster_type = self.opts.cluster_type - self.cluster.master = self.master + self.cluster.primary = self.primary else: self.determine_cluster() @@ -835,7 +832,7 @@ class SoSCollector(SoSComponent): self.cluster_type = 'none' self.collect_md.add_field('cluster_type', self.cluster_type) if self.cluster: - self.master.cluster = self.cluster + self.primary.cluster = self.cluster self.cluster.setup() if self.cluster.cluster_ssh_key: if not self.opts.ssh_key: @@ -858,15 +855,15 @@ class SoSCollector(SoSComponent): """ self.ui_log.info('') - if not self.node_list and not self.master.connected: + if not self.node_list and not self.primary.connected: self.exit('No nodes were detected, or nodes do not have sos ' 'installed.\nAborting...') self.ui_log.info('The following is a list of nodes to collect from:') - if self.master.connected and self.master.hostname is not None: - if not (self.master.local and self.opts.no_local): + if self.primary.connected and self.primary.hostname is not None: + if not (self.primary.local and self.opts.no_local): self.ui_log.info('\t%-*s' % (self.commons['hostlen'], - self.master.hostname)) + self.primary.hostname)) for node in sorted(self.node_list): self.ui_log.info("\t%-*s" % (self.commons['hostlen'], node)) @@ -919,17 +916,17 @@ class SoSCollector(SoSComponent): self.commons['sos_cmd'] = self.sos_cmd self.collect_md.add_field('initial_sos_cmd', self.sos_cmd) - def connect_to_master(self): - """If run with --master, we will run cluster checks again that + def connect_to_primary(self): + """If run with --primary, we will run cluster checks again that instead of the localhost. """ try: - self.master = SosNode(self.opts.master, self.commons) + self.primary = SosNode(self.opts.primary, self.commons) self.ui_log.info('Connected to %s, determining cluster type...' - % self.opts.master) + % self.opts.primary) except Exception as e: - self.log_debug('Failed to connect to master: %s' % e) - self.exit('Could not connect to master node. Aborting...', 1) + self.log_debug('Failed to connect to primary node: %s' % e) + self.exit('Could not connect to primary node. Aborting...', 1) def determine_cluster(self): """This sets the cluster type and loads that cluster's cluster. @@ -943,7 +940,7 @@ class SoSCollector(SoSComponent): checks = list(self.clusters.values()) for cluster in self.clusters.values(): checks.remove(cluster) - cluster.master = self.master + cluster.primary = self.primary if cluster.check_enabled(): cname = cluster.__class__.__name__ self.log_debug("Installation matches %s, checking for layered " @@ -954,7 +951,7 @@ class SoSCollector(SoSComponent): self.log_debug("Layered profile %s found. " "Checking installation" % rname) - remaining.master = self.master + remaining.primary = self.primary if remaining.check_enabled(): self.log_debug("Installation matches both layered " "profile %s and base profile %s, " @@ -978,18 +975,18 @@ class SoSCollector(SoSComponent): return [] def reduce_node_list(self): - """Reduce duplicate entries of the localhost and/or master node + """Reduce duplicate entries of the localhost and/or primary node if applicable""" if (self.hostname in self.node_list and self.opts.no_local): self.node_list.remove(self.hostname) for i in self.ip_addrs: if i in self.node_list: self.node_list.remove(i) - # remove the master node from the list, since we already have + # remove the primary node from the list, since we already have # an open session to it. - if self.master: + if self.primary: for n in self.node_list: - if n == self.master.hostname or n == self.opts.master: + if n == self.primary.hostname or n == self.opts.primary: self.node_list.remove(n) self.node_list = list(set(n for n in self.node_list if n)) self.log_debug('Node list reduced to %s' % self.node_list) @@ -1010,9 +1007,9 @@ class SoSCollector(SoSComponent): def get_nodes(self): """ Sets the list of nodes to collect sosreports from """ - if not self.master and not self.cluster: + if not self.primary and not self.cluster: msg = ('Could not determine a cluster type and no list of ' - 'nodes or master node was provided.\nAborting...' + 'nodes or primary node was provided.\nAborting...' ) self.exit(msg) @@ -1041,7 +1038,7 @@ class SoSCollector(SoSComponent): self.log_debug("Force adding %s to node list" % node) self.node_list.append(node) - if not self.master: + if not self.primary: host = self.hostname.split('.')[0] # trust the local hostname before the node report from cluster for node in self.node_list: @@ -1052,7 +1049,7 @@ class SoSCollector(SoSComponent): try: self.commons['hostlen'] = len(max(self.node_list, key=len)) except (TypeError, ValueError): - self.commons['hostlen'] = len(self.opts.master) + self.commons['hostlen'] = len(self.opts.primary) def _connect_to_node(self, node): """Try to connect to the node, and if we can add to the client list to @@ -1099,12 +1096,6 @@ this utility or remote systems that it connects to. intro_msg = self._fmt_msg(disclaimer % self.tmpdir) self.ui_log.info(intro_msg) - if self.opts.master: - self.ui_log.info( - "NOTE: Use of '--master' is DEPRECATED and will be removed in " - "a future release.\nUse '--primary', '--manager', or " - "'--controller' instead.") - prompt = "\nPress ENTER to continue, or CTRL-C to quit\n" if not self.opts.batch: try: @@ -1126,12 +1117,6 @@ this utility or remote systems that it connects to. self.intro() - if self.opts.primary: - # for now, use the new option name and simply override the existing - # value that the rest of the component references. Full conversion - # of master -> primary is a 4.3 item. - self.opts.master = self.opts.primary - self.configure_sos_cmd() self.prep() self.display_nodes() @@ -1147,11 +1132,11 @@ this utility or remote systems that it connects to. def collect(self): """ For each node, start a collection thread and then tar all collected sosreports """ - if self.master.connected: - self.client_list.append(self.master) + if self.primary.connected: + self.client_list.append(self.primary) self.ui_log.info("\nConnecting to nodes...") - filters = [self.master.address, self.master.hostname] + filters = [self.primary.address, self.primary.hostname] nodes = [(n, None) for n in self.node_list if n not in filters] if self.opts.password_per_node: @@ -1206,10 +1191,10 @@ this utility or remote systems that it connects to. os._exit(1) if hasattr(self.cluster, 'run_extra_cmd'): - self.ui_log.info('Collecting additional data from master node...') + self.ui_log.info('Collecting additional data from primary node...') files = self.cluster._run_extra_cmd() if files: - self.master.collect_extra_cmd(files) + self.primary.collect_extra_cmd(files) msg = '\nSuccessfully captured %s of %s sosreports' self.log_info(msg % (self.retrieved, self.report_num)) self.close_all_connections() diff --git a/sos/collector/clusters/__init__.py b/sos/collector/clusters/__init__.py index bb728bc0..2b5d7018 100644 --- a/sos/collector/clusters/__init__.py +++ b/sos/collector/clusters/__init__.py @@ -58,7 +58,7 @@ class Cluster(): cluster_name = None def __init__(self, commons): - self.master = None + self.primary = None self.cluster_ssh_key = None self.tmpdir = commons['tmpdir'] self.opts = commons['cmdlineopts'] @@ -135,7 +135,7 @@ class Cluster(): key rather than prompting the user for one or a password. Note this will only function if collector is being run locally on the - master node. + primary node. """ self.cluster_ssh_key = key @@ -149,31 +149,26 @@ class Cluster(): """ pass - def set_master_options(self, node): + def set_primary_options(self, node): """If there is a need to set specific options in the sos command being - run on the cluster's master nodes, override this method in the cluster + run on the cluster's primary nodes, override this method in the cluster profile and do that here. - :param node: The master node + :param node: The primary node :type node: ``SoSNode`` """ pass - def check_node_is_master(self, node): - """In the event there are multiple masters, or if the collect command + def check_node_is_primary(self, node): + """In the event there are multiple primaries, or if the collect command is being run from a system that is technically capable of enumerating - nodes but the cluster profiles needs to specify master-specific options - for other nodes, override this method in the cluster profile + nodes but the cluster profiles needs to specify primary-specific + options for other nodes, override this method in the cluster profile :param node: The node for the cluster to check :type node: ``SoSNode`` """ - return node.address == self.master.address - - def exec_master_cmd(self, cmd, need_root=False): - self.log_debug("Use of exec_master_cmd() is deprecated and will be " - "removed. Use exec_primary_cmd() instead") - return self.exec_primary_cmd(cmd, need_root) + return node.address == self.primary.address def exec_primary_cmd(self, cmd, need_root=False): """Used to retrieve command output from a (primary) node in a cluster @@ -187,7 +182,7 @@ class Cluster(): :returns: The output and status of `cmd` :rtype: ``dict`` """ - res = self.master.run_command(cmd, get_pty=True, need_root=need_root) + res = self.primary.run_command(cmd, get_pty=True, need_root=need_root) if res['stdout']: res['stdout'] = res['stdout'].replace('Password:', '') return res @@ -214,7 +209,7 @@ class Cluster(): :rtype: ``bool`` """ for pkg in self.packages: - if self.master.is_installed(pkg): + if self.primary.is_installed(pkg): return True return False @@ -255,8 +250,8 @@ class Cluster(): def set_node_label(self, node): """This may be overridden by clusters profiles subclassing this class - If there is a distinction between masters and nodes, or types of nodes, - then this can be used to label the sosreport archive differently. + If there is a distinction between primaries and nodes, or types of + nodes, then this can be used to label the sosreport archive differently """ return '' diff --git a/sos/collector/clusters/ocp.py b/sos/collector/clusters/ocp.py index ddff84a4..5479417d 100644 --- a/sos/collector/clusters/ocp.py +++ b/sos/collector/clusters/ocp.py @@ -53,11 +53,11 @@ class ocp(Cluster): if self.token: self._attempt_oc_login() _who = self.fmt_oc_cmd('whoami') - return self.exec_master_cmd(_who)['status'] == 0 + return self.exec_primary_cmd(_who)['status'] == 0 def _build_dict(self, nodelist): """From the output of get_nodes(), construct an easier-to-reference - dict of nodes that will be used in determining labels, master status, + dict of nodes that will be used in determining labels, primary status, etc... :param nodelist: The split output of `oc get nodes` @@ -90,7 +90,7 @@ class ocp(Cluster): if self.get_option('label'): labels = ','.join(self.get_option('label').split(':')) cmd += " -l %s" % quote(labels) - res = self.exec_master_cmd(self.fmt_oc_cmd(cmd)) + res = self.exec_primary_cmd(self.fmt_oc_cmd(cmd)) if res['status'] == 0: roles = [r for r in self.get_option('role').split(':')] self.node_dict = self._build_dict(res['stdout'].splitlines()) @@ -104,7 +104,7 @@ class ocp(Cluster): else: msg = "'oc' command failed" if 'Missing or incomplete' in res['stdout']: - msg = ("'oc' failed due to missing kubeconfig on master node." + msg = ("'oc' failed due to missing kubeconfig on primary node." " Specify one via '-c ocp.kubeconfig=<path>'") raise Exception(msg) return nodes @@ -117,12 +117,12 @@ class ocp(Cluster): return label return '' - def check_node_is_master(self, sosnode): + def check_node_is_primary(self, sosnode): if sosnode.address not in self.node_dict: return False return 'master' in self.node_dict[sosnode.address]['roles'] - def set_master_options(self, node): + def set_primary_options(self, node): node.enable_plugins.append('openshift') if self.api_collect_enabled: # a primary has already been enabled for API collection, disable diff --git a/sos/collector/clusters/ovirt.py b/sos/collector/clusters/ovirt.py index 0382e393..079a122e 100644 --- a/sos/collector/clusters/ovirt.py +++ b/sos/collector/clusters/ovirt.py @@ -32,7 +32,7 @@ class ovirt(Cluster): def _run_db_query(self, query): ''' - Wrapper for running DB queries on the master. Any scrubbing of the + Wrapper for running DB queries on the manager. Any scrubbing of the query should be done _before_ passing the query to this method. ''' cmd = "%s %s" % (self.db_exec, quote(query)) @@ -62,10 +62,10 @@ class ovirt(Cluster): This only runs if we're locally on the RHV-M, *and* if no ssh-keys are called out on the command line, *and* no --password option is given. ''' - if self.master.local: + if self.primary.local: if not any([self.opts.ssh_key, self.opts.password, self.opts.password_per_node]): - if self.master.file_exists(ENGINE_KEY): + if self.primary.file_exists(ENGINE_KEY): self.add_default_ssh_key(ENGINE_KEY) self.log_debug("Found engine SSH key. User command line" " does not specify a key or password, using" @@ -144,8 +144,8 @@ class ovirt(Cluster): for line in db_sos['stdout'].splitlines(): if fnmatch.fnmatch(line, '*sosreport-*tar*'): _pg_dump = line.strip() - self.master.manifest.add_field('postgresql_dump', - _pg_dump.split('/')[-1]) + self.primary.manifest.add_field('postgresql_dump', + _pg_dump.split('/')[-1]) return _pg_dump self.log_error('Failed to gather database dump') return False @@ -158,7 +158,7 @@ class rhv(ovirt): sos_preset = 'rhv' def set_node_label(self, node): - if node.address == self.master.address: + if node.address == self.primary.address: return 'manager' if node.is_installed('ovirt-node-ng-nodectl'): return 'rhvh' @@ -174,7 +174,7 @@ class rhhi_virt(rhv): sos_preset = 'rhv' def check_enabled(self): - return (self.master.is_installed('rhvm') and self._check_for_rhhiv()) + return (self.primary.is_installed('rhvm') and self._check_for_rhhiv()) def _check_for_rhhiv(self): ret = self._run_db_query('SELECT count(server_id) FROM gluster_server') diff --git a/sos/collector/clusters/satellite.py b/sos/collector/clusters/satellite.py index 4a5de31b..e123c8a3 100644 --- a/sos/collector/clusters/satellite.py +++ b/sos/collector/clusters/satellite.py @@ -35,6 +35,6 @@ class satellite(Cluster): return [] def set_node_label(self, node): - if node.address == self.master.address: + if node.address == self.primary.address: return 'satellite' return 'capsule' diff --git a/sos/collector/sosnode.py b/sos/collector/sosnode.py index 5d05c297..4b1ee109 100644 --- a/sos/collector/sosnode.py +++ b/sos/collector/sosnode.py @@ -85,7 +85,7 @@ class SosNode(): self.need_sudo = os.getuid() != 0 # load the host policy now, even if we don't want to load further # host information. This is necessary if we're running locally on the - # cluster master but do not want a local report as we still need to do + # cluster primary but do not want a local report as we still need to do # package checks in that instance self.host = self.determine_host_policy() self.get_hostname() @@ -314,8 +314,8 @@ class SosNode(): if self.sos_info['version']: self.log_info('sos version is %s' % self.sos_info['version']) else: - if not self.address == self.opts.master: - # in the case where the 'master' enumerates nodes but is not + if not self.address == self.opts.primary: + # in the case where the 'primary' enumerates nodes but is not # intended for collection (bastions), don't worry about sos not # being present self.log_error('sos is not installed on this node') @@ -672,10 +672,10 @@ class SosNode(): self.cluster.sos_plugin_options[opt]) self.plugopts.append(option) - # set master-only options - if self.cluster.check_node_is_master(self): + # set primary-only options + if self.cluster.check_node_is_primary(self): with self.cluster.lock: - self.cluster.set_master_options(self) + self.cluster.set_primary_options(self) else: with self.cluster.lock: self.cluster.set_node_options(self) @@ -1023,7 +1023,7 @@ class SosNode(): else: self.log_error("Unable to retrieve file %s" % filename) except Exception as e: - msg = 'Error collecting additional data from master: %s' % e + msg = 'Error collecting additional data from primary: %s' % e self.log_error(msg) def make_archive_readable(self, filepath): diff --git a/tests/test_data/etc/sos/sos.conf b/tests/test_data/etc/sos/sos.conf index bb38e248..45bdf3fe 100644 --- a/tests/test_data/etc/sos/sos.conf +++ b/tests/test_data/etc/sos/sos.conf @@ -5,7 +5,7 @@ #skip-plugins = rpm,selinux,dovecot [collect] -#master = myhost.example.com +#primary = myhost.example.com [clean] keywords = shibboleth |