diff --git a/api-guide/source/conf.py b/api-guide/source/conf.py index 8774e047d1..75f28b17d5 100644 --- a/api-guide/source/conf.py +++ b/api-guide/source/conf.py @@ -46,8 +46,8 @@ # The encoding of source files. # source_encoding = 'utf-8-sig' -# The master toctree document. -master_doc = 'index' +# The main toctree document. +main_doc = 'index' # General information about the project. project = u'Compute API Guide' bug_tag = u'api-guide' diff --git a/api-ref/source/conf.py b/api-ref/source/conf.py index 23fd173a81..27c6a7ee2b 100644 --- a/api-ref/source/conf.py +++ b/api-ref/source/conf.py @@ -36,8 +36,8 @@ # The suffix of source filenames. source_suffix = '.rst' -# The master toctree document. -master_doc = 'index' +# The main toctree document. +main_doc = 'index' # General information about the project. copyright = u'2010-present, OpenStack Foundation' diff --git a/doc/source/conf.py b/doc/source/conf.py index ecfc6f5fb3..8590452f38 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -65,8 +65,8 @@ todo_include_todos = True -# The master toctree document. -master_doc = 'index' +# The main toctree document. +main_doc = 'index' # General information about the project. copyright = u'2010-present, OpenStack Foundation' diff --git a/nova/api/openstack/compute/server_groups.py b/nova/api/openstack/compute/server_groups.py index 14fdd485f4..bc39533f54 100644 --- a/nova/api/openstack/compute/server_groups.py +++ b/nova/api/openstack/compute/server_groups.py @@ -55,7 +55,7 @@ def _get_not_deleted(context, uuids): cell_mappings = {} found_inst_uuids = [] - # Get a master list of cell mappings, and a list of instance + # Get a main list of cell mappings, and a list of instance # uuids organized by cell for im in mappings: if not im.cell_mapping: diff --git a/nova/compute/api.py b/nova/compute/api.py index aebe650439..e42e94f260 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -3486,7 +3486,7 @@ def resize(self, context, instance, flavor_id=None, clean_shutdown=True, # Check whether host exists or not. node = objects.ComputeNode.get_first_node_by_host_for_old_compat( - context, host_name, use_slave=True) + context, host_name, use_subordinate=True) self._check_auto_disk_config(instance, **extra_instance_updates) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 6bcfb05719..c55f6d5377 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -637,7 +637,7 @@ def _get_instances_on_driver(self, context, filters=None): return objects.InstanceList() filters['uuid'] = driver_uuids local_instances = objects.InstanceList.get_by_filters( - context, filters, use_slave=True) + context, filters, use_subordinate=True) return local_instances except NotImplementedError: pass @@ -649,7 +649,7 @@ def _get_instances_on_driver(self, context, filters=None): # Without this all instance data would be fetched from db. filters['host'] = self.host instances = objects.InstanceList.get_by_filters(context, filters, - use_slave=True) + use_subordinate=True) name_map = {instance.name: instance for instance in instances} local_instances = [] for driver_instance in driver_instances: @@ -1477,7 +1477,7 @@ def _check_instance_build_time(self, context): 'host': self.host} building_insts = objects.InstanceList.get_by_filters(context, - filters, expected_attrs=[], use_slave=True) + filters, expected_attrs=[], use_subordinate=True) for instance in building_insts: if timeutils.is_older_than(instance.created_at, timeout): @@ -1789,7 +1789,7 @@ def _sync_scheduler_instance_info(self, context): context = context.elevated() instances = objects.InstanceList.get_by_host(context, self.host, expected_attrs=[], - use_slave=True) + use_subordinate=True) uuids = [instance.uuid for instance in instances] self.query_client.sync_instance_info(context, self.host, uuids) @@ -7524,7 +7524,7 @@ def _heal_instance_info_cache(self, context): # The list of instances to heal is empty so rebuild it LOG.debug('Rebuilding the list of instances to heal') db_instances = objects.InstanceList.get_by_host( - context, self.host, expected_attrs=[], use_slave=True) + context, self.host, expected_attrs=[], use_subordinate=True) for inst in db_instances: # We don't want to refresh the cache for instances # which are building or deleting so don't put them @@ -7555,7 +7555,7 @@ def _heal_instance_info_cache(self, context): context, instance_uuids.pop(0), expected_attrs=['system_metadata', 'info_cache', 'flavor'], - use_slave=True) + use_subordinate=True) except exception.InstanceNotFound: # Instance is gone. Try to grab another. continue @@ -7616,7 +7616,7 @@ def _poll_rebooting_instances(self, context): task_states.REBOOT_PENDING], 'host': self.host} rebooting = objects.InstanceList.get_by_filters( - context, filters, expected_attrs=[], use_slave=True) + context, filters, expected_attrs=[], use_subordinate=True) to_poll = [] for instance in rebooting: @@ -7633,7 +7633,7 @@ def _poll_rescued_instances(self, context): 'host': self.host} rescued_instances = objects.InstanceList.get_by_filters( context, filters, expected_attrs=["system_metadata"], - use_slave=True) + use_subordinate=True) to_unrescue = [] for instance in rescued_instances: @@ -7651,7 +7651,7 @@ def _poll_unconfirmed_resizes(self, context): migrations = objects.MigrationList.get_unconfirmed_by_dest_compute( context, CONF.resize_confirm_window, self.host, - use_slave=True) + use_subordinate=True) migrations_info = dict(migration_count=len(migrations), confirm_window=CONF.resize_confirm_window) @@ -7679,7 +7679,7 @@ def _set_migration_to_error(migration, reason, **kwargs): try: instance = objects.Instance.get_by_uuid(context, instance_uuid, expected_attrs=expected_attrs, - use_slave=True) + use_subordinate=True) except exception.InstanceNotFound: reason = (_("Instance %s not found") % instance_uuid) @@ -7739,7 +7739,7 @@ def _poll_shelved_instances(self, context): 'host': self.host} shelved_instances = objects.InstanceList.get_by_filters( context, filters=filters, expected_attrs=['system_metadata'], - use_slave=True) + use_subordinate=True) to_gc = [] for instance in shelved_instances: @@ -7772,7 +7772,7 @@ def _instance_usage_audit(self, context): context, begin, end, host=self.host, expected_attrs=['system_metadata', 'info_cache', 'metadata', 'flavor'], - use_slave=True) + use_subordinate=True) num_instances = len(instances) errors = 0 successes = 0 @@ -7826,7 +7826,7 @@ def _poll_bandwidth_usage(self, context): instances = objects.InstanceList.get_by_host(context, self.host, - use_slave=True) + use_subordinate=True) try: bw_counters = self.driver.get_all_bw_counters(instances) except NotImplementedError: @@ -7851,7 +7851,7 @@ def _poll_bandwidth_usage(self, context): last_ctr_out = None usage = objects.BandwidthUsage.get_by_instance_uuid_and_mac( context, bw_ctr['uuid'], bw_ctr['mac_address'], - start_period=start_time, use_slave=True) + start_period=start_time, use_subordinate=True) if usage: bw_in = usage.bw_in bw_out = usage.bw_out @@ -7861,7 +7861,7 @@ def _poll_bandwidth_usage(self, context): usage = (objects.BandwidthUsage. get_by_instance_uuid_and_mac( context, bw_ctr['uuid'], bw_ctr['mac_address'], - start_period=prev_time, use_slave=True)) + start_period=prev_time, use_subordinate=True)) if usage: last_ctr_in = usage.last_ctr_in last_ctr_out = usage.last_ctr_out @@ -7890,14 +7890,14 @@ def _poll_bandwidth_usage(self, context): start_period=start_time, last_refreshed=refreshed) - def _get_host_volume_bdms(self, context, use_slave=False): + def _get_host_volume_bdms(self, context, use_subordinate=False): """Return all block device mappings on a compute host.""" compute_host_bdms = [] instances = objects.InstanceList.get_by_host(context, self.host, - use_slave=use_slave) + use_subordinate=use_subordinate) for instance in instances: bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( - context, instance.uuid, use_slave=use_slave) + context, instance.uuid, use_subordinate=use_subordinate) instance_bdms = [bdm for bdm in bdms if bdm.is_volume] compute_host_bdms.append(dict(instance=instance, instance_bdms=instance_bdms)) @@ -7930,7 +7930,7 @@ def _poll_volume_usage(self, context): return compute_host_bdms = self._get_host_volume_bdms(context, - use_slave=True) + use_subordinate=True) if not compute_host_bdms: return @@ -7956,7 +7956,7 @@ def _sync_power_states(self, context): """ db_instances = objects.InstanceList.get_by_host(context, self.host, expected_attrs=[], - use_slave=True) + use_subordinate=True) try: num_vm_instances = self.driver.get_num_instances() @@ -8022,7 +8022,7 @@ def _query_driver_power_state_and_sync(self, context, db_instance): self._sync_instance_power_state(context, db_instance, vm_power_state, - use_slave=True) + use_subordinate=True) except exception.InstanceNotFound: # NOTE(hanlind): If the instance gets deleted during sync, # silently ignore. @@ -8067,7 +8067,7 @@ def _stop_unexpected_shutdown_instance(self, context, vm_state, instance=db_instance) def _sync_instance_power_state(self, context, db_instance, vm_power_state, - use_slave=False): + use_subordinate=False): """Align instance power state between the database and hypervisor. If the instance is not found on the hypervisor, but is in the database, @@ -8076,7 +8076,7 @@ def _sync_instance_power_state(self, context, db_instance, vm_power_state, # We re-query the DB to get the latest instance info to minimize # (not eliminate) race condition. - db_instance.refresh(use_slave=use_slave) + db_instance.refresh(use_subordinate=use_subordinate) db_power_state = db_instance.power_state vm_state = db_instance.vm_state @@ -8216,7 +8216,7 @@ def _reclaim_queued_deletes(self, context): instances = objects.InstanceList.get_by_filters( context, filters, expected_attrs=objects.instance.INSTANCE_DEFAULT_FIELDS, - use_slave=True) + use_subordinate=True) for instance in instances: if self._deleted_old_enough(instance, interval): bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( @@ -8289,7 +8289,7 @@ def update_available_resource(self, context, startup=False): """ compute_nodes_in_db = self._get_compute_nodes_in_db(context, - use_slave=True, + use_subordinate=True, startup=startup) try: nodenames = set(self.driver.get_available_nodes()) @@ -8316,11 +8316,11 @@ def update_available_resource(self, context, startup=False): self._update_available_resource_for_node(context, nodename, startup=startup) - def _get_compute_nodes_in_db(self, context, use_slave=False, + def _get_compute_nodes_in_db(self, context, use_subordinate=False, startup=False): try: return objects.ComputeNodeList.get_all_by_host(context, self.host, - use_slave=use_slave) + use_subordinate=use_subordinate) except exception.NotFound: if startup: LOG.warning( @@ -8394,7 +8394,7 @@ def _cleanup_running_deleted_instances(self, context): "DELETED but still present on host.", instance.name, instance=instance) bdms = objects.BlockDeviceMappingList.get_by_instance_uuid( - context, instance.uuid, use_slave=True) + context, instance.uuid, use_subordinate=True) self.instance_events.clear_events_for_instance(instance) try: self._shutdown_instance(context, instance, bdms, @@ -8486,11 +8486,11 @@ def _error_out_instance_on_exception(self, context, instance, self._set_instance_obj_error_state(context, instance) @wrap_exception() - def add_aggregate_host(self, context, aggregate, host, slave_info): + def add_aggregate_host(self, context, aggregate, host, subordinate_info): """Notify hypervisor of change (for hypervisor pools).""" try: self.driver.add_to_aggregate(context, aggregate, host, - slave_info=slave_info) + subordinate_info=subordinate_info) except NotImplementedError: LOG.debug('Hypervisor driver does not support ' 'add_aggregate_host') @@ -8502,11 +8502,11 @@ def add_aggregate_host(self, context, aggregate, host, slave_info): aggregate, host) @wrap_exception() - def remove_aggregate_host(self, context, host, slave_info, aggregate): + def remove_aggregate_host(self, context, host, subordinate_info, aggregate): """Removes a host from a physical hypervisor pool.""" try: self.driver.remove_from_aggregate(context, aggregate, host, - slave_info=slave_info) + subordinate_info=subordinate_info) except NotImplementedError: LOG.debug('Hypervisor driver does not support ' 'remove_aggregate_host') @@ -8709,7 +8709,7 @@ def _run_image_cache_manager_pass(self, context): 'soft_deleted': True, 'host': nodes} filtered_instances = objects.InstanceList.get_by_filters(context, - filters, expected_attrs=[], use_slave=True) + filters, expected_attrs=[], use_subordinate=True) self.driver.manage_image_cache(context, filtered_instances) @@ -8724,7 +8724,7 @@ def _run_pending_deletes(self, context): attrs = ['system_metadata'] with utils.temporary_mutation(context, read_deleted='yes'): instances = objects.InstanceList.get_by_filters( - context, filters, expected_attrs=attrs, use_slave=True) + context, filters, expected_attrs=attrs, use_subordinate=True) LOG.debug('There are %d instances to clean', len(instances)) for instance in instances: @@ -8768,7 +8768,7 @@ def _cleanup_incomplete_migrations(self, context): attrs = ['info_cache', 'security_groups', 'system_metadata'] with utils.temporary_mutation(context, read_deleted='yes'): instances = objects.InstanceList.get_by_filters( - context, inst_filters, expected_attrs=attrs, use_slave=True) + context, inst_filters, expected_attrs=attrs, use_subordinate=True) for instance in instances: if instance.host != CONF.host: diff --git a/nova/compute/rpcapi.py b/nova/compute/rpcapi.py index 2106ff5458..2ee8d33668 100644 --- a/nova/compute/rpcapi.py +++ b/nova/compute/rpcapi.py @@ -156,7 +156,7 @@ class ComputeAPI(object): * 2.0 - Remove 1.x backwards compat * 2.1 - Adds orig_sys_metadata to rebuild_instance() - * 2.2 - Adds slave_info parameter to add_aggregate_host() and + * 2.2 - Adds subordinate_info parameter to add_aggregate_host() and remove_aggregate_host() * 2.3 - Adds volume_id to reserve_block_device_name() * 2.4 - Add bdms to terminate_instance @@ -479,7 +479,7 @@ def get_client(self, target, version_cap, serializer): call_monitor_timeout=cmt) def add_aggregate_host(self, ctxt, host, aggregate, host_param, - slave_info=None): + subordinate_info=None): '''Add aggregate host. :param ctxt: request context @@ -493,7 +493,7 @@ def add_aggregate_host(self, ctxt, host, aggregate, host_param, server=host, version=version) cctxt.cast(ctxt, 'add_aggregate_host', aggregate=aggregate, host=host_param, - slave_info=slave_info) + subordinate_info=subordinate_info) def add_fixed_ip_to_instance(self, ctxt, instance, network_id): version = '5.0' @@ -818,7 +818,7 @@ def rebuild_instance(self, ctxt, instance, new_pass, injected_files, **msg_args) def remove_aggregate_host(self, ctxt, host, aggregate, host_param, - slave_info=None): + subordinate_info=None): '''Remove aggregate host. :param ctxt: request context @@ -832,7 +832,7 @@ def remove_aggregate_host(self, ctxt, host, aggregate, host_param, server=host, version=version) cctxt.cast(ctxt, 'remove_aggregate_host', aggregate=aggregate, host=host_param, - slave_info=slave_info) + subordinate_info=subordinate_info) def remove_fixed_ip_from_instance(self, ctxt, instance, address): version = '5.0' diff --git a/nova/conductor/manager.py b/nova/conductor/manager.py index 82b125cd45..cbf15ec039 100644 --- a/nova/conductor/manager.py +++ b/nova/conductor/manager.py @@ -938,7 +938,7 @@ def _allocate_for_evacuate_dest_host(self, context, instance, host, context, instance.host, instance.node) dest_node = ( objects.ComputeNode.get_first_node_by_host_for_old_compat( - context, host, use_slave=True)) + context, host, use_subordinate=True)) except exception.ComputeHostNotFound as ex: with excutils.save_and_reraise_exception(): self._set_vm_state_and_notify( diff --git a/nova/conductor/rpcapi.py b/nova/conductor/rpcapi.py index 5efb069be8..9478ca0e78 100644 --- a/nova/conductor/rpcapi.py +++ b/nova/conductor/rpcapi.py @@ -124,7 +124,7 @@ class ConductorAPI(object): * 1.62 - Added object_backport() * 1.63 - Changed the format of values['stats'] from a dict to a JSON string in compute_node_update() - * 1.64 - Added use_slave to instance_get_all_filters() + * 1.64 - Added use_subordinate to instance_get_all_filters() - Remove instance_type_get() - Remove aggregate_get() - Remove aggregate_get_by_host() diff --git a/nova/conf/database.py b/nova/conf/database.py index 90e88b81cb..0793a8a52a 100644 --- a/nova/conf/database.py +++ b/nova/conf/database.py @@ -49,7 +49,7 @@ cfg.BoolOpt('sqlite_synchronous', default=True, help=''), - cfg.StrOpt('slave_connection', + cfg.StrOpt('subordinate_connection', secret=True, help=''), cfg.StrOpt('mysql_sql_mode', diff --git a/nova/conf/network.py b/nova/conf/network.py index 0e588b68ac..f4ccef24f1 100644 --- a/nova/conf/network.py +++ b/nova/conf/network.py @@ -1012,19 +1012,19 @@ nova-network is deprecated, as are any related configuration options. """, help="Bind user's password for LDAP server"), - cfg.StrOpt('ldap_dns_soa_hostmaster', - default='hostmaster@example.org', + cfg.StrOpt('ldap_dns_soa_hostmain', + default='hostmain@example.org', deprecated_for_removal=True, deprecated_since='16.0.0', deprecated_reason=""" nova-network is deprecated, as are any related configuration options. """, help=""" -Hostmaster for LDAP DNS driver Statement of Authority +Hostmain for LDAP DNS driver Statement of Authority Possible values: -* Any valid string representing LDAP DNS hostmaster. +* Any valid string representing LDAP DNS hostmain. """), cfg.MultiStrOpt('ldap_dns_servers', default=['dns.example.org'], @@ -1062,7 +1062,7 @@ help=""" Refresh interval (in seconds) for LDAP DNS driver Start of Authority -Time interval, a secondary/slave DNS server waits before requesting for +Time interval, a secondary/subordinate DNS server waits before requesting for primary DNS server's current SOA record. If the records are different, secondary DNS server will request a zone transfer from primary. @@ -1078,7 +1078,7 @@ help=""" Retry interval (in seconds) for LDAP DNS driver Start of Authority -Time interval, a secondary/slave DNS server should wait, if an +Time interval, a secondary/subordinate DNS server should wait, if an attempt to transfer zone failed during the previous refresh interval. """), cfg.IntOpt('ldap_dns_soa_expiry', @@ -1091,7 +1091,7 @@ help=""" Expiry interval (in seconds) for LDAP DNS driver Start of Authority -Time interval, a secondary/slave DNS server holds the information +Time interval, a secondary/subordinate DNS server holds the information before it is no longer considered authoritative. """), cfg.IntOpt('ldap_dns_soa_minimum', diff --git a/nova/conf/xvp.py b/nova/conf/xvp.py index fe83484ce2..605520f8dc 100644 --- a/nova/conf/xvp.py +++ b/nova/conf/xvp.py @@ -40,7 +40,7 @@ cfg.StrOpt('console_xvp_pid', default='/var/run/xvp.pid', deprecated_group='DEFAULT', - help='XVP master process pid file'), + help='XVP main process pid file'), cfg.StrOpt('console_xvp_log', default='/var/log/xvp.log', deprecated_group='DEFAULT', diff --git a/nova/db/api.py b/nova/db/api.py index e60fefca55..f2f9f4a624 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -85,8 +85,8 @@ def create_context_manager(connection): def select_db_reader_mode(f): """Decorator to select synchronous or asynchronous reader mode. - The kwarg argument 'use_slave' defines reader mode. Asynchronous reader - will be used if 'use_slave' is True and synchronous reader otherwise. + The kwarg argument 'use_subordinate' defines reader mode. Asynchronous reader + will be used if 'use_subordinate' is True and synchronous reader otherwise. """ return IMPL.select_db_reader_mode(f) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index fe623f534e..73b3623a79 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -135,14 +135,14 @@ def get_context_manager(context): return _context_manager_from_context(context) or main_context_manager -def get_engine(use_slave=False, context=None): +def get_engine(use_subordinate=False, context=None): """Get a database engine object. - :param use_slave: Whether to use the slave connection + :param use_subordinate: Whether to use the subordinate connection :param context: The request context that can contain a context manager """ ctxt_mgr = get_context_manager(context) - if use_slave: + if use_subordinate: return ctxt_mgr.reader.get_engine() return ctxt_mgr.writer.get_engine() @@ -183,9 +183,9 @@ def wrapper(*args, **kwargs): def select_db_reader_mode(f): """Decorator to select synchronous or asynchronous reader mode. - The kwarg argument 'use_slave' defines reader mode. Asynchronous reader - will be used if 'use_slave' is True and synchronous reader otherwise. - If 'use_slave' is not specified default value 'False' will be used. + The kwarg argument 'use_subordinate' defines reader mode. Asynchronous reader + will be used if 'use_subordinate' is True and synchronous reader otherwise. + If 'use_subordinate' is not specified default value 'False' will be used. Wrapped function must have a context in the arguments. """ @@ -196,9 +196,9 @@ def wrapper(*args, **kwargs): keyed_args = inspect.getcallargs(wrapped_func, *args, **kwargs) context = keyed_args['context'] - use_slave = keyed_args.get('use_slave', False) + use_subordinate = keyed_args.get('use_subordinate', False) - if use_slave: + if use_subordinate: reader_mode = get_context_manager(context).async_ else: reader_mode = get_context_manager(context).reader @@ -5538,7 +5538,7 @@ def archive_deleted_rows(max_rows=None, before=None): table_to_rows_archived = {} deleted_instance_uuids = [] total_rows_archived = 0 - meta = MetaData(get_engine(use_slave=True)) + meta = MetaData(get_engine(use_subordinate=True)) meta.reflect() # Reverse sort the tables so we get the leaf nodes first for processing. for table in reversed(meta.sorted_tables): diff --git a/nova/network/ldapdns.py b/nova/network/ldapdns.py index 047d0042ac..4c279dcf94 100644 --- a/nova/network/ldapdns.py +++ b/nova/network/ldapdns.py @@ -116,7 +116,7 @@ def _soa(cls): date = time.strftime('%Y%m%d%H%M%S') soa = '%s %s %s %d %d %d %d' % ( CONF.ldap_dns_servers[0], - CONF.ldap_dns_soa_hostmaster, + CONF.ldap_dns_soa_hostmain, date, CONF.ldap_dns_soa_refresh, CONF.ldap_dns_soa_retry, diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 6fbef8cb9b..99defa5b9e 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -1375,7 +1375,7 @@ def ensure_bridge(bridge, interface, net_attrs=None, gateway=True, out, err = _execute('brctl', 'addif', bridge, interface, check_exit_code=False, run_as_root=True) if (err and err != "device %s is already a member of a bridge; " - "can't enslave it to bridge %s.\n" % (interface, bridge)): + "can't ensubordinate it to bridge %s.\n" % (interface, bridge)): msg = _('Failed to add interface: %s') % err raise exception.NovaException(msg) diff --git a/nova/objects/bandwidth_usage.py b/nova/objects/bandwidth_usage.py index 809e3c63af..684cb54b93 100644 --- a/nova/objects/bandwidth_usage.py +++ b/nova/objects/bandwidth_usage.py @@ -18,7 +18,7 @@ @base.NovaObjectRegistry.register class BandwidthUsage(base.NovaPersistentObject, base.NovaObject): # Version 1.0: Initial version - # Version 1.1: Add use_slave to get_by_instance_uuid_and_mac + # Version 1.1: Add use_subordinate to get_by_instance_uuid_and_mac # Version 1.2: Add update_cells to create VERSION = '1.2' @@ -46,17 +46,17 @@ def _from_db_object(context, bw_usage, db_bw_usage): @staticmethod @db.select_db_reader_mode - def _db_bw_usage_get(context, uuid, start_period, mac, use_slave=False): + def _db_bw_usage_get(context, uuid, start_period, mac, use_subordinate=False): return db.bw_usage_get(context, uuid=uuid, start_period=start_period, mac=mac) @base.serialize_args @base.remotable_classmethod def get_by_instance_uuid_and_mac(cls, context, instance_uuid, mac, - start_period=None, use_slave=False): + start_period=None, use_subordinate=False): db_bw_usage = cls._db_bw_usage_get(context, uuid=instance_uuid, start_period=start_period, mac=mac, - use_slave=use_slave) + use_subordinate=use_subordinate) if db_bw_usage: return cls._from_db_object(context, cls(), db_bw_usage) @@ -76,7 +76,7 @@ def create(self, uuid, mac, bw_in, bw_out, last_ctr_in, @base.NovaObjectRegistry.register class BandwidthUsageList(base.ObjectListBase, base.NovaObject): # Version 1.0: Initial version - # Version 1.1: Add use_slave to get_by_uuids + # Version 1.1: Add use_subordinate to get_by_uuids # Version 1.2: BandwidthUsage <= version 1.2 VERSION = '1.2' fields = { @@ -86,14 +86,14 @@ class BandwidthUsageList(base.ObjectListBase, base.NovaObject): @staticmethod @db.select_db_reader_mode def _db_bw_usage_get_by_uuids(context, uuids, start_period, - use_slave=False): + use_subordinate=False): return db.bw_usage_get_by_uuids(context, uuids=uuids, start_period=start_period) @base.serialize_args @base.remotable_classmethod - def get_by_uuids(cls, context, uuids, start_period=None, use_slave=False): + def get_by_uuids(cls, context, uuids, start_period=None, use_subordinate=False): db_bw_usages = cls._db_bw_usage_get_by_uuids(context, uuids=uuids, start_period=start_period, - use_slave=use_slave) + use_subordinate=use_subordinate) return base.obj_make_list(context, cls(), BandwidthUsage, db_bw_usages) diff --git a/nova/objects/block_device.py b/nova/objects/block_device.py index f81fc0dec1..8b4893aceb 100644 --- a/nova/objects/block_device.py +++ b/nova/objects/block_device.py @@ -130,8 +130,8 @@ def _create_uuid(context, bdm_id): # non-nullable in a future release. # NOTE(mdbooth): We wrap this method in a retry loop because it can - # fail (safely) on multi-master galera if concurrent updates happen on - # different masters. It will never fail on single-master. We can only + # fail (safely) on multi-main galera if concurrent updates happen on + # different mains. It will never fail on single-main. We can only # ever need one retry. uuid = uuidutils.generate_uuid() @@ -335,7 +335,7 @@ def obj_load_attr(self, attrname): class BlockDeviceMappingList(base.ObjectListBase, base.NovaObject): # Version 1.0: Initial version # Version 1.1: BlockDeviceMapping <= version 1.1 - # Version 1.2: Added use_slave to get_by_instance_uuid + # Version 1.2: Added use_subordinate to get_by_instance_uuid # Version 1.3: BlockDeviceMapping <= version 1.2 # Version 1.4: BlockDeviceMapping <= version 1.3 # Version 1.5: BlockDeviceMapping <= version 1.4 @@ -373,28 +373,28 @@ def bdms_by_instance_uuid(cls, context, instance_uuids): @staticmethod @db.select_db_reader_mode def _db_block_device_mapping_get_all_by_instance_uuids( - context, instance_uuids, use_slave=False): + context, instance_uuids, use_subordinate=False): return db.block_device_mapping_get_all_by_instance_uuids( context, instance_uuids) @base.remotable_classmethod - def get_by_instance_uuids(cls, context, instance_uuids, use_slave=False): + def get_by_instance_uuids(cls, context, instance_uuids, use_subordinate=False): db_bdms = cls._db_block_device_mapping_get_all_by_instance_uuids( - context, instance_uuids, use_slave=use_slave) + context, instance_uuids, use_subordinate=use_subordinate) return base.obj_make_list( context, cls(), objects.BlockDeviceMapping, db_bdms or []) @staticmethod @db.select_db_reader_mode def _db_block_device_mapping_get_all_by_instance( - context, instance_uuid, use_slave=False): + context, instance_uuid, use_subordinate=False): return db.block_device_mapping_get_all_by_instance( context, instance_uuid) @base.remotable_classmethod - def get_by_instance_uuid(cls, context, instance_uuid, use_slave=False): + def get_by_instance_uuid(cls, context, instance_uuid, use_subordinate=False): db_bdms = cls._db_block_device_mapping_get_all_by_instance( - context, instance_uuid, use_slave=use_slave) + context, instance_uuid, use_subordinate=use_subordinate) return base.obj_make_list( context, cls(), objects.BlockDeviceMapping, db_bdms or []) diff --git a/nova/objects/compute_node.py b/nova/objects/compute_node.py index b7f7f966b9..1f6403da41 100644 --- a/nova/objects/compute_node.py +++ b/nova/objects/compute_node.py @@ -285,8 +285,8 @@ def get_by_nodename(cls, context, hypervisor_hostname): # TODO(pkholkin): Remove this method in the next major version bump @base.remotable_classmethod def get_first_node_by_host_for_old_compat(cls, context, host, - use_slave=False): - computes = ComputeNodeList.get_all_by_host(context, host, use_slave) + use_subordinate=False): + computes = ComputeNodeList.get_all_by_host(context, host, use_subordinate) # FIXME(sbauza): Ironic deployments can return multiple # nodes per host, we should return all the nodes and modify the callers # instead. @@ -389,7 +389,7 @@ class ComputeNodeList(base.ObjectListBase, base.NovaObject): # Version 1.2 Add get_by_service() # Version 1.3 ComputeNode version 1.4 # Version 1.4 ComputeNode version 1.5 - # Version 1.5 Add use_slave to get_by_service + # Version 1.5 Add use_subordinate to get_by_service # Version 1.6 ComputeNode version 1.6 # Version 1.7 ComputeNode version 1.7 # Version 1.8 ComputeNode version 1.8 + add get_all_by_host() @@ -438,7 +438,7 @@ def get_by_hypervisor(cls, context, hypervisor_match): # NOTE(hanlind): This is deprecated and should be removed on the next # major version bump @base.remotable_classmethod - def _get_by_service(cls, context, service_id, use_slave=False): + def _get_by_service(cls, context, service_id, use_subordinate=False): try: db_computes = db.compute_nodes_get_by_service_id( context, service_id) @@ -451,13 +451,13 @@ def _get_by_service(cls, context, service_id, use_slave=False): @staticmethod @db.select_db_reader_mode - def _db_compute_node_get_all_by_host(context, host, use_slave=False): + def _db_compute_node_get_all_by_host(context, host, use_subordinate=False): return db.compute_node_get_all_by_host(context, host) @base.remotable_classmethod - def get_all_by_host(cls, context, host, use_slave=False): + def get_all_by_host(cls, context, host, use_subordinate=False): db_computes = cls._db_compute_node_get_all_by_host(context, host, - use_slave=use_slave) + use_subordinate=use_subordinate) return base.obj_make_list(context, cls(context), objects.ComputeNode, db_computes) diff --git a/nova/objects/instance.py b/nova/objects/instance.py index edebf84cf5..7c0be15de4 100644 --- a/nova/objects/instance.py +++ b/nova/objects/instance.py @@ -491,17 +491,17 @@ def _extra_attributes_from_db_object(instance, db_inst, @staticmethod @db.select_db_reader_mode def _db_instance_get_by_uuid(context, uuid, columns_to_join, - use_slave=False): + use_subordinate=False): return db.instance_get_by_uuid(context, uuid, columns_to_join=columns_to_join) @base.remotable_classmethod - def get_by_uuid(cls, context, uuid, expected_attrs=None, use_slave=False): + def get_by_uuid(cls, context, uuid, expected_attrs=None, use_subordinate=False): if expected_attrs is None: expected_attrs = ['info_cache', 'security_groups'] columns_to_join = _expected_cols(expected_attrs) db_inst = cls._db_instance_get_by_uuid(context, uuid, columns_to_join, - use_slave=use_slave) + use_subordinate=use_subordinate) return cls._from_db_object(context, cls(), db_inst, expected_attrs) @@ -813,12 +813,12 @@ def save(self, expected_vm_state=None, self.obj_reset_changes() @base.remotable - def refresh(self, use_slave=False): + def refresh(self, use_subordinate=False): extra = [field for field in INSTANCE_OPTIONAL_ATTRS if self.obj_attr_is_set(field)] current = self.__class__.get_by_uuid(self._context, uuid=self.uuid, expected_attrs=extra, - use_slave=use_slave) + use_subordinate=use_subordinate) # NOTE(danms): We orphan the instance copy so we do not unexpectedly # trigger a lazy-load (which would mean we failed to calculate the # expected_attrs properly) @@ -1230,7 +1230,7 @@ class InstanceList(base.ObjectListBase, base.NovaObject): @db.select_db_reader_mode def _get_by_filters_impl(cls, context, filters, sort_key='created_at', sort_dir='desc', limit=None, - marker=None, expected_attrs=None, use_slave=False, + marker=None, expected_attrs=None, use_subordinate=False, sort_keys=None, sort_dirs=None): if sort_keys or sort_dirs: db_inst_list = db.instance_get_all_by_filters_sort( @@ -1246,12 +1246,12 @@ def _get_by_filters_impl(cls, context, filters, @base.remotable_classmethod def get_by_filters(cls, context, filters, sort_key='created_at', sort_dir='desc', limit=None, - marker=None, expected_attrs=None, use_slave=False, + marker=None, expected_attrs=None, use_subordinate=False, sort_keys=None, sort_dirs=None): db_inst_list = cls._get_by_filters_impl( context, filters, sort_key=sort_key, sort_dir=sort_dir, limit=limit, marker=marker, expected_attrs=expected_attrs, - use_slave=use_slave, sort_keys=sort_keys, sort_dirs=sort_dirs) + use_subordinate=use_subordinate, sort_keys=sort_keys, sort_dirs=sort_dirs) # NOTE(melwitt): _make_instance_list could result in joined objects' # (from expected_attrs) _from_db_object methods being called during # Instance._from_db_object, each of which might choose to perform @@ -1263,15 +1263,15 @@ def get_by_filters(cls, context, filters, @staticmethod @db.select_db_reader_mode def _db_instance_get_all_by_host(context, host, columns_to_join, - use_slave=False): + use_subordinate=False): return db.instance_get_all_by_host(context, host, columns_to_join=columns_to_join) @base.remotable_classmethod - def get_by_host(cls, context, host, expected_attrs=None, use_slave=False): + def get_by_host(cls, context, host, expected_attrs=None, use_subordinate=False): db_inst_list = cls._db_instance_get_all_by_host( context, host, columns_to_join=_expected_cols(expected_attrs), - use_slave=use_slave) + use_subordinate=use_subordinate) return _make_instance_list(context, cls(), db_inst_list, expected_attrs) @@ -1329,7 +1329,7 @@ def get_hung_in_rebooting(cls, context, reboot_window, @db.select_db_reader_mode def _db_instance_get_active_by_window_joined( context, begin, end, project_id, host, columns_to_join, - use_slave=False, limit=None, marker=None): + use_subordinate=False, limit=None, marker=None): return db.instance_get_active_by_window_joined( context, begin, end, project_id, host, columns_to_join=columns_to_join, limit=limit, marker=marker) @@ -1337,7 +1337,7 @@ def _db_instance_get_active_by_window_joined( @base.remotable_classmethod def _get_active_by_window_joined(cls, context, begin, end=None, project_id=None, host=None, - expected_attrs=None, use_slave=False, + expected_attrs=None, use_subordinate=False, limit=None, marker=None): # NOTE(mriedem): We need to convert the begin/end timestamp strings # to timezone-aware datetime objects for the DB API call. @@ -1346,14 +1346,14 @@ def _get_active_by_window_joined(cls, context, begin, end=None, db_inst_list = cls._db_instance_get_active_by_window_joined( context, begin, end, project_id, host, columns_to_join=_expected_cols(expected_attrs), - use_slave=use_slave, limit=limit, marker=marker) + use_subordinate=use_subordinate, limit=limit, marker=marker) return _make_instance_list(context, cls(), db_inst_list, expected_attrs) @classmethod def get_active_by_window_joined(cls, context, begin, end=None, project_id=None, host=None, - expected_attrs=None, use_slave=False, + expected_attrs=None, use_subordinate=False, limit=None, marker=None): """Get instances and joins active during a certain time window. @@ -1364,7 +1364,7 @@ def get_active_by_window_joined(cls, context, begin, end=None, :param:host: used to filter instances on a given compute host :param:expected_attrs: list of related fields that can be joined in the database layer when querying for instances - :param use_slave if True, ship this query off to a DB slave + :param use_subordinate if True, ship this query off to a DB subordinate :param limit: maximum number of instances to return per page :param marker: last instance uuid from the previous page :returns: InstanceList @@ -1377,7 +1377,7 @@ def get_active_by_window_joined(cls, context, begin, end=None, return cls._get_active_by_window_joined(context, begin, end, project_id, host, expected_attrs, - use_slave=use_slave, + use_subordinate=use_subordinate, limit=limit, marker=marker) @base.remotable_classmethod diff --git a/nova/objects/migration.py b/nova/objects/migration.py index bf1911ecaf..b6884c5255 100644 --- a/nova/objects/migration.py +++ b/nova/objects/migration.py @@ -193,7 +193,7 @@ def is_same_host(self): class MigrationList(base.ObjectListBase, base.NovaObject): # Version 1.0: Initial version # Migration <= 1.1 - # Version 1.1: Added use_slave to get_unconfirmed_by_dest_compute + # Version 1.1: Added use_subordinate to get_unconfirmed_by_dest_compute # Version 1.2: Migration version 1.2 # Version 1.3: Added a new function to get in progress migrations # for an instance. @@ -208,15 +208,15 @@ class MigrationList(base.ObjectListBase, base.NovaObject): @staticmethod @db.select_db_reader_mode def _db_migration_get_unconfirmed_by_dest_compute( - context, confirm_window, dest_compute, use_slave=False): + context, confirm_window, dest_compute, use_subordinate=False): return db.migration_get_unconfirmed_by_dest_compute( context, confirm_window, dest_compute) @base.remotable_classmethod def get_unconfirmed_by_dest_compute(cls, context, confirm_window, - dest_compute, use_slave=False): + dest_compute, use_subordinate=False): db_migrations = cls._db_migration_get_unconfirmed_by_dest_compute( - context, confirm_window, dest_compute, use_slave=use_slave) + context, confirm_window, dest_compute, use_subordinate=use_subordinate) return base.obj_make_list(context, cls(context), objects.Migration, db_migrations) diff --git a/nova/objects/service.py b/nova/objects/service.py index 98d49669f8..f66dd0def1 100644 --- a/nova/objects/service.py +++ b/nova/objects/service.py @@ -164,7 +164,7 @@ class Service(base.NovaPersistentObject, base.NovaObject, # Version 1.1: Added compute_node nested object # Version 1.2: String attributes updated to support unicode # Version 1.3: ComputeNode version 1.5 - # Version 1.4: Added use_slave to get_by_compute_host + # Version 1.4: Added use_subordinate to get_by_compute_host # Version 1.5: ComputeNode version 1.6 # Version 1.6: ComputeNode version 1.7 # Version 1.7: ComputeNode version 1.8 @@ -340,13 +340,13 @@ def get_by_host_and_binary(cls, context, host, binary): @staticmethod @db.select_db_reader_mode - def _db_service_get_by_compute_host(context, host, use_slave=False): + def _db_service_get_by_compute_host(context, host, use_subordinate=False): return db.service_get_by_compute_host(context, host) @base.remotable_classmethod - def get_by_compute_host(cls, context, host, use_slave=False): + def get_by_compute_host(cls, context, host, use_subordinate=False): db_service = cls._db_service_get_by_compute_host(context, host, - use_slave=use_slave) + use_subordinate=use_subordinate) return cls._from_db_object(context, cls(), db_service) # NOTE(ndipanov): This is deprecated and should be removed on the next @@ -441,11 +441,11 @@ def clear_min_version_cache(cls): @staticmethod @db.select_db_reader_mode - def _db_service_get_minimum_version(context, binaries, use_slave=False): + def _db_service_get_minimum_version(context, binaries, use_subordinate=False): return db.service_get_minimum_version(context, binaries) @base.remotable_classmethod - def get_minimum_version_multi(cls, context, binaries, use_slave=False): + def get_minimum_version_multi(cls, context, binaries, use_subordinate=False): if not all(binary.startswith('nova-') for binary in binaries): LOG.warning('get_minimum_version called with likely-incorrect ' 'binaries `%s\'', ','.join(binaries)) @@ -456,7 +456,7 @@ def get_minimum_version_multi(cls, context, binaries, use_slave=False): any(binary not in cls._MIN_VERSION_CACHE for binary in binaries)): min_versions = cls._db_service_get_minimum_version( - context, binaries, use_slave=use_slave) + context, binaries, use_subordinate=use_subordinate) if min_versions: min_versions = {binary: version or 0 for binary, version in @@ -477,9 +477,9 @@ def get_minimum_version_multi(cls, context, binaries, use_slave=False): return version @base.remotable_classmethod - def get_minimum_version(cls, context, binary, use_slave=False): + def get_minimum_version(cls, context, binary, use_subordinate=False): return cls.get_minimum_version_multi(context, [binary], - use_slave=use_slave) + use_subordinate=use_subordinate) def get_minimum_version_all_cells(context, binaries, require_all=False): diff --git a/nova/objects/virtual_interface.py b/nova/objects/virtual_interface.py index b44eeb83ef..3d70a678d3 100644 --- a/nova/objects/virtual_interface.py +++ b/nova/objects/virtual_interface.py @@ -139,13 +139,13 @@ def get_all(cls, context): @staticmethod @db.select_db_reader_mode def _db_virtual_interface_get_by_instance(context, instance_uuid, - use_slave=False): + use_subordinate=False): return db.virtual_interface_get_by_instance(context, instance_uuid) @base.remotable_classmethod - def get_by_instance_uuid(cls, context, instance_uuid, use_slave=False): + def get_by_instance_uuid(cls, context, instance_uuid, use_subordinate=False): db_vifs = cls._db_virtual_interface_get_by_instance( - context, instance_uuid, use_slave=use_slave) + context, instance_uuid, use_subordinate=use_subordinate) return base.obj_make_list(context, cls(context), objects.VirtualInterface, db_vifs) diff --git a/nova/pci/manager.py b/nova/pci/manager.py index 3084643f5e..a7e75fc567 100644 --- a/nova/pci/manager.py +++ b/nova/pci/manager.py @@ -40,7 +40,7 @@ class PciDevTracker(object): devices to/from instances, and to update the available pci passthrough device information from the hypervisor periodically. - The `pci_devs` attribute of this class is the in-memory "master copy" of + The `pci_devs` attribute of this class is the in-memory "main copy" of all devices on each compute host, and all data changes that happen when claiming/allocating/freeing devices HAVE TO be made against instances contained in `pci_devs` list, because they are periodically flushed to the diff --git a/nova/tests/functional/api_sample_tests/test_volumes.py b/nova/tests/functional/api_sample_tests/test_volumes.py index 353660cb6a..5063765e43 100644 --- a/nova/tests/functional/api_sample_tests/test_volumes.py +++ b/nova/tests/functional/api_sample_tests/test_volumes.py @@ -212,7 +212,7 @@ def _get_tags_per_volume(self): def _stub_db_bdms_get_all_by_instance(self, server_id): def fake_bdms_get_all_by_instance(context, instance_uuid, - use_slave=False): + use_subordinate=False): bdms = [ fake_block_device.FakeDbBlockDeviceDict( {'id': 1, 'volume_id': self.OLD_VOLUME_ID, diff --git a/nova/tests/unit/api/openstack/compute/test_server_actions.py b/nova/tests/unit/api/openstack/compute/test_server_actions.py index b9c9685da6..2c581bbb06 100644 --- a/nova/tests/unit/api/openstack/compute/test_server_actions.py +++ b/nova/tests/unit/api/openstack/compute/test_server_actions.py @@ -963,7 +963,7 @@ def _fake_id(x): delete_on_termination=False)] def fake_block_device_mapping_get_all_by_instance(context, inst_id, - use_slave=False): + use_subordinate=False): return [fake_block_device.FakeDbBlockDeviceDict( {'volume_id': _fake_id('a'), 'source_type': 'snapshot', @@ -1074,7 +1074,7 @@ def _fake_id(x): image_service = glance.get_default_image_service() def fake_block_device_mapping_get_all_by_instance(context, inst_id, - use_slave=False): + use_subordinate=False): return [fake_block_device.FakeDbBlockDeviceDict( {'volume_id': _fake_id('a'), 'source_type': 'snapshot', diff --git a/nova/tests/unit/api/openstack/compute/test_server_metadata.py b/nova/tests/unit/api/openstack/compute/test_server_metadata.py index df2340b464..f18a082ffe 100644 --- a/nova/tests/unit/api/openstack/compute/test_server_metadata.py +++ b/nova/tests/unit/api/openstack/compute/test_server_metadata.py @@ -78,7 +78,7 @@ def stub_max_server_metadata(): def return_server_nonexistent(context, server_id, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): raise exception.InstanceNotFound(instance_id=server_id) diff --git a/nova/tests/unit/api/openstack/compute/test_serversV21.py b/nova/tests/unit/api/openstack/compute/test_serversV21.py index b1040a013f..06e6061acb 100644 --- a/nova/tests/unit/api/openstack/compute/test_serversV21.py +++ b/nova/tests/unit/api/openstack/compute/test_serversV21.py @@ -115,7 +115,7 @@ def fake_start_stop_invalid_state(self, context, instance): def fake_instance_get_by_uuid_not_found(context, uuid, - columns_to_join, use_slave=False): + columns_to_join, use_subordinate=False): raise exception.InstanceNotFound(instance_id=uuid) diff --git a/nova/tests/unit/api/openstack/compute/test_simple_tenant_usage.py b/nova/tests/unit/api/openstack/compute/test_simple_tenant_usage.py index e41e671fff..8d5718a8dd 100644 --- a/nova/tests/unit/api/openstack/compute/test_simple_tenant_usage.py +++ b/nova/tests/unit/api/openstack/compute/test_simple_tenant_usage.py @@ -115,7 +115,7 @@ def _fake_instance_deleted_flavorless(context, start, end, instance_id, @classmethod def fake_get_active_deleted_flavorless(cls, context, begin, end=None, project_id=None, host=None, - expected_attrs=None, use_slave=False, + expected_attrs=None, use_subordinate=False, limit=None, marker=None): # First get some normal instances to have actual usage instances = [ @@ -135,7 +135,7 @@ def fake_get_active_deleted_flavorless(cls, context, begin, end=None, @classmethod def fake_get_active_by_window_joined(cls, context, begin, end=None, project_id=None, host=None, - expected_attrs=None, use_slave=False, + expected_attrs=None, use_subordinate=False, limit=None, marker=None): return objects.InstanceList(objects=[ _fake_instance(START, STOP, x, @@ -246,12 +246,12 @@ def _get_tenant_usages(self, detailed=''): def fake_get_active_by_window_joined(context, begin, end=None, project_id=None, host=None, - expected_attrs=None, use_slave=False, + expected_attrs=None, use_subordinate=False, limit=None, marker=None): self.assertEqual(['flavor'], expected_attrs) return orig_get_active_by_window_joined(context, begin, end, project_id, host, - expected_attrs, use_slave) + expected_attrs, use_subordinate) with mock.patch.object(objects.InstanceList, 'get_active_by_window_joined', diff --git a/nova/tests/unit/api/openstack/fakes.py b/nova/tests/unit/api/openstack/fakes.py index 8279bcd814..2875a63c2a 100644 --- a/nova/tests/unit/api/openstack/fakes.py +++ b/nova/tests/unit/api/openstack/fakes.py @@ -356,7 +356,7 @@ def get_fake_uuid(token=0): def fake_instance_get(**kwargs): - def _return_server(context, uuid, columns_to_join=None, use_slave=False): + def _return_server(context, uuid, columns_to_join=None, use_subordinate=False): if 'project_id' not in kwargs: kwargs['project_id'] = 'fake' return stub_instance(1, **kwargs) @@ -387,8 +387,8 @@ def _return_servers(context, *args, **kwargs): if 'columns_to_join' in kwargs: kwargs.pop('columns_to_join') - if 'use_slave' in kwargs: - kwargs.pop('use_slave') + if 'use_subordinate' in kwargs: + kwargs.pop('use_subordinate') if 'sort_keys' in kwargs: kwargs.pop('sort_keys') @@ -693,7 +693,7 @@ def stub_snapshot_get_all(self, context): def stub_bdm_get_all_by_instance_uuids(context, instance_uuids, - use_slave=False): + use_subordinate=False): i = 1 result = [] for instance_uuid in instance_uuids: diff --git a/nova/tests/unit/compute/test_compute.py b/nova/tests/unit/compute/test_compute.py index 29a80bf6d4..78044d10d5 100644 --- a/nova/tests/unit/compute/test_compute.py +++ b/nova/tests/unit/compute/test_compute.py @@ -796,7 +796,7 @@ def test_poll_bandwidth_usage_not_implemented(self, mock_get_counter, mock_get_counter.assert_called_once_with([]) mock_last.assert_called_once_with() mock_get_host.assert_called_once_with(ctxt, 'fake-mini', - use_slave=True) + use_subordinate=True) @mock.patch.object(objects.InstanceList, 'get_by_host') @mock.patch.object(objects.BlockDeviceMappingList, @@ -815,10 +815,10 @@ def test_get_host_volume_bdms(self, mock_get_by_inst, mock_get_by_host): got_host_bdms = self.compute._get_host_volume_bdms('fake-context') mock_get_by_host.assert_called_once_with('fake-context', self.compute.host, - use_slave=False) + use_subordinate=False) mock_get_by_inst.assert_called_once_with('fake-context', uuids.volume_instance, - use_slave=False) + use_subordinate=False) self.assertEqual(expected_host_bdms, got_host_bdms) @mock.patch.object(utils, 'last_completed_audit_period') @@ -844,7 +844,7 @@ def test_poll_volume_usage_returns_no_vols(self, mock_get_usage, self.flags(volume_usage_poll_interval=10) self.compute._poll_volume_usage(ctxt) - mock_get_bdms.assert_called_once_with(ctxt, use_slave=True) + mock_get_bdms.assert_called_once_with(ctxt, use_subordinate=True) @mock.patch.object(compute_utils, 'notify_about_volume_usage') @mock.patch.object(compute_manager.ComputeManager, '_get_host_volume_bdms') @@ -864,7 +864,7 @@ def test_poll_volume_usage_with_data(self, mock_get_usage, mock_get_bdms, self.flags(volume_usage_poll_interval=10) self.compute._poll_volume_usage(self.context) - mock_get_bdms.assert_called_once_with(self.context, use_slave=True) + mock_get_bdms.assert_called_once_with(self.context, use_subordinate=True) mock_notify.assert_called_once_with( self.context, test.MatchType(objects.VolumeUsage), self.compute.host) @@ -983,7 +983,7 @@ def fake_get_volume_encryption_metadata(self, context, volume_id): mock_get.assert_called_once_with(self.context, uuids.volume_id, instance.uuid) mock_stats.assert_called_once_with(instance, 'vdb') - mock_get_bdms.assert_called_once_with(self.context, use_slave=True) + mock_get_bdms.assert_called_once_with(self.context, use_subordinate=True) mock_get_all(self.context, host_volume_bdms) mock_exists.assert_called_once_with(mock.ANY) @@ -7048,8 +7048,8 @@ def test_cleanup_running_deleted_instances_reap(self, mock_get_uuid, mock_cleanup.assert_called_once_with(ctxt, inst2, bdms, detach=False) mock_get_uuid.assert_has_calls([ - mock.call(ctxt, inst1.uuid, use_slave=True), - mock.call(ctxt, inst2.uuid, use_slave=True)]) + mock.call(ctxt, inst1.uuid, use_subordinate=True), + mock.call(ctxt, inst2.uuid, use_subordinate=True)]) mock_get_inst.assert_called_once_with(ctxt, {'deleted': True, 'soft_deleted': False}) @@ -7241,13 +7241,13 @@ def _heal_instance_info_cache(self, 'require_nw_info': 0, 'setup_network': 0} def fake_instance_get_all_by_host(context, host, - columns_to_join, use_slave=False): + columns_to_join, use_subordinate=False): call_info['get_all_by_host'] += 1 self.assertEqual([], columns_to_join) return instances[:] def fake_instance_get_by_uuid(context, instance_uuid, - columns_to_join, use_slave=False): + columns_to_join, use_subordinate=False): if instance_uuid not in instance_map: raise exception.InstanceNotFound(instance_id=instance_uuid) call_info['get_by_uuid'] += 1 @@ -7421,7 +7421,7 @@ def test_poll_rescued_instances(self, unrescue, get): def fake_instance_get_all_by_filters(context, filters, expected_attrs=None, - use_slave=False): + use_subordinate=False): self.assertEqual(["system_metadata"], expected_attrs) return instances @@ -7471,7 +7471,7 @@ def test_poll_rebooting_instances(self, get): task_states.REBOOTING, task_states.REBOOT_STARTED, task_states.REBOOT_PENDING]} get.assert_called_once_with(ctxt, filters, - expected_attrs=[], use_slave=True) + expected_attrs=[], use_subordinate=True) def test_poll_unconfirmed_resizes(self): instances = [ @@ -7523,7 +7523,7 @@ def test_poll_unconfirmed_resizes(self): migrations.append(fake_mig) def fake_instance_get_by_uuid(context, instance_uuid, - columns_to_join=None, use_slave=False): + columns_to_join=None, use_subordinate=False): self.assertIn('metadata', columns_to_join) self.assertIn('system_metadata', columns_to_join) # raise InstanceNotFound exception for non-existing instance @@ -7535,7 +7535,7 @@ def fake_instance_get_by_uuid(context, instance_uuid, return instance def fake_migration_get_unconfirmed_by_dest_compute(context, - resize_confirm_window, dest_compute, use_slave=False): + resize_confirm_window, dest_compute, use_subordinate=False): self.assertEqual(dest_compute, CONF.host) return migrations @@ -8040,7 +8040,7 @@ def test_reclaim_queued_deletes_continue_on_error(self, mock_delete_inst, mock_get_filter.assert_called_once_with(ctxt, mock.ANY, expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS, - use_slave=True) + use_subordinate=True) mock_delete_old.assert_has_calls([mock.call(instance1, 3600), mock.call(instance2, 3600)]) mock_get_uuid.assert_has_calls([mock.call(ctxt, instance1.uuid), @@ -8070,9 +8070,9 @@ def test_sync_power_states(self, mock_sync, mock_get): mock_get.assert_has_calls([mock.call(mock.ANY), mock.call(mock.ANY), mock.call(mock.ANY)]) mock_sync.assert_has_calls([ - mock.call(ctxt, mock.ANY, power_state.NOSTATE, use_slave=True), - mock.call(ctxt, mock.ANY, power_state.RUNNING, use_slave=True), - mock.call(ctxt, mock.ANY, power_state.SHUTDOWN, use_slave=True)]) + mock.call(ctxt, mock.ANY, power_state.NOSTATE, use_subordinate=True), + mock.call(ctxt, mock.ANY, power_state.RUNNING, use_subordinate=True), + mock.call(ctxt, mock.ANY, power_state.SHUTDOWN, use_subordinate=True)]) @mock.patch.object(compute_manager.ComputeManager, '_get_power_state') @mock.patch.object(compute_manager.ComputeManager, @@ -12401,7 +12401,7 @@ def fake_driver_add_to_aggregate(self, context, aggregate, host, fake_driver_add_to_aggregate) self.compute.add_aggregate_host(self.context, host="host", - aggregate=self.aggr, slave_info=None) + aggregate=self.aggr, subordinate_info=None) self.assertTrue(fake_driver_add_to_aggregate.called) def test_remove_aggregate_host(self): @@ -12414,35 +12414,35 @@ def fake_driver_remove_from_aggregate(cls, context, aggregate, host, fake_driver_remove_from_aggregate) self.compute.remove_aggregate_host(self.context, - aggregate=self.aggr, host="host", slave_info=None) + aggregate=self.aggr, host="host", subordinate_info=None) self.assertTrue(fake_driver_remove_from_aggregate.called) - def test_add_aggregate_host_passes_slave_info_to_driver(self): + def test_add_aggregate_host_passes_subordinate_info_to_driver(self): def driver_add_to_aggregate(cls, context, aggregate, host, **kwargs): self.assertEqual(self.context, context) self.assertEqual(aggregate.id, self.aggr.id) self.assertEqual(host, "the_host") - self.assertEqual("SLAVE_INFO", kwargs.get("slave_info")) + self.assertEqual("SLAVE_INFO", kwargs.get("subordinate_info")) self.stub_out("nova.virt.fake.FakeDriver.add_to_aggregate", driver_add_to_aggregate) self.compute.add_aggregate_host(self.context, host="the_host", - slave_info="SLAVE_INFO", aggregate=self.aggr) + subordinate_info="SLAVE_INFO", aggregate=self.aggr) - def test_remove_from_aggregate_passes_slave_info_to_driver(self): + def test_remove_from_aggregate_passes_subordinate_info_to_driver(self): def driver_remove_from_aggregate(cls, context, aggregate, host, **kwargs): self.assertEqual(self.context, context) self.assertEqual(aggregate.id, self.aggr.id) self.assertEqual(host, "the_host") - self.assertEqual("SLAVE_INFO", kwargs.get("slave_info")) + self.assertEqual("SLAVE_INFO", kwargs.get("subordinate_info")) self.stub_out("nova.virt.fake.FakeDriver.remove_from_aggregate", driver_remove_from_aggregate) self.compute.remove_aggregate_host(self.context, - aggregate=self.aggr, host="the_host", slave_info="SLAVE_INFO") + aggregate=self.aggr, host="the_host", subordinate_info="SLAVE_INFO") class DisabledInstanceTypesTestCase(BaseTestCase): diff --git a/nova/tests/unit/compute/test_compute_mgr.py b/nova/tests/unit/compute/test_compute_mgr.py index 05237fbbb9..4c0f3d3077 100644 --- a/nova/tests/unit/compute/test_compute_mgr.py +++ b/nova/tests/unit/compute/test_compute_mgr.py @@ -320,7 +320,7 @@ def test_update_available_resource(self, get_db_nodes, get_avail_nodes, get_db_nodes.return_value = db_nodes get_avail_nodes.return_value = avail_nodes self.compute.update_available_resource(self.context, startup=True) - get_db_nodes.assert_called_once_with(self.context, use_slave=True, + get_db_nodes.assert_called_once_with(self.context, use_subordinate=True, startup=True) update_mock.has_calls( [mock.call(self.context, node, startup=True) @@ -380,7 +380,7 @@ def test_get_compute_nodes_in_db_on_startup(self, mock_log, self.assertEqual([], self.compute._get_compute_nodes_in_db( self.context, startup=True)) get_all_by_host.assert_called_once_with( - self.context, self.compute.host, use_slave=False) + self.context, self.compute.host, use_subordinate=False) self.assertTrue(mock_log.warning.called) self.assertFalse(mock_log.error.called) @@ -1678,7 +1678,7 @@ def _make_instance_list(db_list): [x['uuid'] for x in result]) expected_filters = {'uuid': driver_uuids} mock_instance_list.assert_called_with(self.context, expected_filters, - use_slave=True) + use_subordinate=True) @mock.patch('nova.objects.InstanceList.get_by_filters') def test_get_instances_on_driver_empty(self, mock_instance_list): @@ -1733,7 +1733,7 @@ def _make_instance_list(db_list): [x['uuid'] for x in result]) expected_filters = {'host': self.compute.host} mock_instance_list.assert_called_with(self.context, expected_filters, - use_slave=True) + use_subordinate=True) @mock.patch.object(compute_utils, 'notify_usage_exists') @mock.patch.object(objects.TaskLog, 'end_task') @@ -1773,7 +1773,7 @@ def test_sync_power_states(self, mock_get): self.compute._sync_power_states(mock.sentinel.context) mock_get.assert_called_with(mock.sentinel.context, self.compute.host, expected_attrs=[], - use_slave=True) + use_subordinate=True) mock_spawn.assert_called_once_with(mock.ANY, instance) @mock.patch('nova.objects.InstanceList.get_by_host', new=mock.Mock()) @@ -1807,7 +1807,7 @@ def test_sync_instance_power_state_match(self, mock_refresh): vm_states.ACTIVE) self.compute._sync_instance_power_state(self.context, instance, power_state.RUNNING) - mock_refresh.assert_called_once_with(use_slave=False) + mock_refresh.assert_called_once_with(use_subordinate=False) @mock.patch.object(fake_driver.FakeDriver, 'get_info') @mock.patch.object(objects.Instance, 'refresh') @@ -1822,7 +1822,7 @@ def test_sync_instance_power_state_running_stopped(self, mock_save, self.compute._sync_instance_power_state(self.context, instance, power_state.SHUTDOWN) self.assertEqual(instance.power_state, power_state.SHUTDOWN) - mock_refresh.assert_called_once_with(use_slave=False) + mock_refresh.assert_called_once_with(use_subordinate=False) self.assertTrue(mock_save.called) mock_get_info.assert_called_once_with(instance, use_cache=False) @@ -1857,7 +1857,7 @@ def _test_sync_to_stop(self, vm_power_state, vm_state, driver_power_state, power_state.CRASHED)): mock_get_info.assert_called_once_with(instance, use_cache=False) - mock_refresh.assert_called_once_with(use_slave=False) + mock_refresh.assert_called_once_with(use_subordinate=False) self.assertTrue(mock_save.called) def test_sync_instance_power_state_to_stop(self): @@ -1914,7 +1914,7 @@ def test_query_driver_power_state_and_sync_not_found_driver( mock_sync_power_state.assert_called_once_with(self.context, db_instance, power_state.NOSTATE, - use_slave=True) + use_subordinate=True) @mock.patch.object(virt_driver.ComputeDriver, 'delete_instance_files') @mock.patch.object(objects.InstanceList, 'get_by_filters') @@ -1934,13 +1934,13 @@ def __getitem__(self, name): def save(self): pass - def _fake_get(ctx, filter, expected_attrs, use_slave): + def _fake_get(ctx, filter, expected_attrs, use_subordinate): mock_get.assert_called_once_with( {'read_deleted': 'yes'}, {'deleted': True, 'soft_deleted': False, 'host': 'fake-mini', 'cleaned': False}, expected_attrs=['system_metadata'], - use_slave=True) + use_subordinate=True) return [a, b, c] a = FakeInstance(uuids.instanceA, 'apple', {'clean_attempts': '100'}) @@ -4645,7 +4645,7 @@ def test_poll_bandwidth_usage(self, bw_usage_update, get_by_uuid_mac, self.compute._poll_bandwidth_usage(self.context) get_by_uuid_mac.assert_called_once_with(self.context, uuids.instance, 'fake-mac', - start_period=0, use_slave=True) + start_period=0, use_subordinate=True) # NOTE(sdague): bw_usage_update happens at some time in # the future, so what last_refreshed is irrelevant. bw_usage_update.assert_called_once_with(self.context, @@ -4750,7 +4750,7 @@ def test_sync_scheduler_instance_info(self, mock_sync, mock_get_by_host, self.compute._sync_scheduler_instance_info(self.context) mock_get_by_host.assert_called_once_with( fake_elevated, self.compute.host, expected_attrs=[], - use_slave=True) + use_subordinate=True) mock_sync.assert_called_once_with(fake_elevated, self.compute.host, exp_uuids) diff --git a/nova/tests/unit/compute/test_compute_xen.py b/nova/tests/unit/compute/test_compute_xen.py index 6fbab5e6eb..368c2d06b4 100644 --- a/nova/tests/unit/compute/test_compute_xen.py +++ b/nova/tests/unit/compute/test_compute_xen.py @@ -60,10 +60,10 @@ def do_test(mock_compute_sync_powerstate, self.compute._sync_power_states(ctxt) mock_instance_list_get_by_host.assert_called_once_with( - ctxt, self.compute.host, expected_attrs=[], use_slave=True) + ctxt, self.compute.host, expected_attrs=[], use_subordinate=True) mock_compute_get_num_instances.assert_called_once_with() mock_compute_sync_powerstate.assert_called_once_with( - ctxt, instance, power_state.NOSTATE, use_slave=True) + ctxt, instance, power_state.NOSTATE, use_subordinate=True) mock_vm_utils_lookup.assert_called_once_with( self.compute.driver._session, instance['name'], False) diff --git a/nova/tests/unit/compute/test_rpcapi.py b/nova/tests/unit/compute/test_rpcapi.py index 68da4d20fb..842c14e20e 100644 --- a/nova/tests/unit/compute/test_rpcapi.py +++ b/nova/tests/unit/compute/test_rpcapi.py @@ -186,7 +186,7 @@ def _test_compute_api(self, method, rpc_method, def test_add_aggregate_host(self): self._test_compute_api('add_aggregate_host', 'cast', aggregate={'id': 'fake_id'}, host_param='host', host='host', - slave_info={}) + subordinate_info={}) def test_add_fixed_ip_to_instance(self): self._test_compute_api('add_fixed_ip_to_instance', 'cast', @@ -425,7 +425,7 @@ def test_refresh_instance_security_rules(self): def test_remove_aggregate_host(self): self._test_compute_api('remove_aggregate_host', 'cast', aggregate={'id': 'fake_id'}, host_param='host', host='host', - slave_info={}) + subordinate_info={}) def test_remove_fixed_ip_from_instance(self): self._test_compute_api('remove_fixed_ip_from_instance', 'cast', diff --git a/nova/tests/unit/conductor/test_conductor.py b/nova/tests/unit/conductor/test_conductor.py index 31bbb5d8fd..2e7858f484 100644 --- a/nova/tests/unit/conductor/test_conductor.py +++ b/nova/tests/unit/conductor/test_conductor.py @@ -3198,7 +3198,7 @@ def test_allocate_for_evacuate_dest_host_dest_node_not_found_reqspec( get_source_node.assert_called_once_with( self.ctxt, instance.host, instance.node) get_dest_node.assert_called_once_with( - self.ctxt, 'dest-host', use_slave=True) + self.ctxt, 'dest-host', use_subordinate=True) notify.assert_called_once_with( self.ctxt, instance.uuid, 'rebuild_server', {'vm_state': instance.vm_state, 'task_state': None}, ex, reqspec) @@ -3230,7 +3230,7 @@ def test_allocate_for_evacuate_dest_host_claim_fails( get_source_node.assert_called_once_with( self.ctxt, instance.host, instance.node) get_dest_node.assert_called_once_with( - self.ctxt, 'dest-host', use_slave=True) + self.ctxt, 'dest-host', use_subordinate=True) claim.assert_called_once_with( self.ctxt, self.conductor.report_client, instance, get_source_node.return_value, get_dest_node.return_value) diff --git a/nova/tests/unit/db/test_db_api.py b/nova/tests/unit/db/test_db_api.py index 68c32650db..76d00d1c77 100644 --- a/nova/tests/unit/db/test_db_api.py +++ b/nova/tests/unit/db/test_db_api.py @@ -244,7 +244,7 @@ def test_require_deadlock_retry_wraps_functions_properly(self): def test_select_db_reader_mode_select_sync(self, mock_clone, mock_using): @db.select_db_reader_mode - def func(self, context, value, use_slave=False): + def func(self, context, value, use_subordinate=False): pass mock_clone.return_value = enginefacade._TransactionContextManager( @@ -261,21 +261,21 @@ def func(self, context, value, use_slave=False): def test_select_db_reader_mode_select_async(self, mock_clone, mock_using): @db.select_db_reader_mode - def func(self, context, value, use_slave=False): + def func(self, context, value, use_subordinate=False): pass mock_clone.return_value = enginefacade._TransactionContextManager( mode=enginefacade._ASYNC_READER) ctxt = context.get_admin_context() value = 'some_value' - func(self, ctxt, value, use_slave=True) + func(self, ctxt, value, use_subordinate=True) mock_clone.assert_called_once_with(mode=enginefacade._ASYNC_READER) mock_using.assert_called_once_with(ctxt) @mock.patch.object(enginefacade._TransactionContextManager, 'using') @mock.patch.object(enginefacade._TransactionContextManager, '_clone') - def test_select_db_reader_mode_no_use_slave_select_sync(self, mock_clone, + def test_select_db_reader_mode_no_use_subordinate_select_sync(self, mock_clone, mock_using): @db.select_db_reader_mode @@ -738,8 +738,8 @@ def test_get_engine(self, mock_ctxt_mgr): mock_ctxt_mgr.writer.get_engine.assert_called_once_with() @mock.patch.object(sqlalchemy_api, 'main_context_manager') - def test_get_engine_use_slave(self, mock_ctxt_mgr): - sqlalchemy_api.get_engine(use_slave=True) + def test_get_engine_use_subordinate(self, mock_ctxt_mgr): + sqlalchemy_api.get_engine(use_subordinate=True) mock_ctxt_mgr.reader.get_engine.assert_called_once_with() def test_get_db_conf_with_connection(self): diff --git a/nova/tests/unit/network/test_api.py b/nova/tests/unit/network/test_api.py index ba73d4c89a..27295e7261 100644 --- a/nova/tests/unit/network/test_api.py +++ b/nova/tests/unit/network/test_api.py @@ -135,7 +135,7 @@ def fake_associate(*args, **kwargs): def fake_instance_get_by_uuid(context, instance_uuid, columns_to_join=None, - use_slave=None): + use_subordinate=None): if instance_uuid == orig_instance_uuid: self.assertIn('extra.flavor', columns_to_join) return fake_instance.fake_db_instance(uuid=instance_uuid) diff --git a/nova/tests/unit/network/test_linux_net.py b/nova/tests/unit/network/test_linux_net.py index 6ce993f4fb..209dade395 100644 --- a/nova/tests/unit/network/test_linux_net.py +++ b/nova/tests/unit/network/test_linux_net.py @@ -359,7 +359,7 @@ def setUp(self): self.context = context.RequestContext('testuser', 'testproject', is_admin=True) - def get_vifs(_context, instance_uuid, use_slave): + def get_vifs(_context, instance_uuid, use_subordinate): return [vif for vif in vifs if vif['instance_uuid'] == instance_uuid] diff --git a/nova/tests/unit/objects/test_instance.py b/nova/tests/unit/objects/test_instance.py index 2fc7a1bf22..26ece098ef 100644 --- a/nova/tests/unit/objects/test_instance.py +++ b/nova/tests/unit/objects/test_instance.py @@ -427,7 +427,7 @@ def test_refresh_does_not_recurse(self, mock_get): inst.refresh() mock_get.assert_called_once_with(self.context, uuid=inst.uuid, - expected_attrs=['metadata'], use_slave=False) + expected_attrs=['metadata'], use_subordinate=False) @mock.patch.object(notifications, 'send_update') @mock.patch.object(db, 'instance_info_cache_update') @@ -515,7 +515,7 @@ def test_save_rename_sends_notification(self, mock_send, mock_get, mock_update_and_get.return_value = (old_ref, new_ref) inst = objects.Instance.get_by_uuid(self.context, old_ref['uuid'], - use_slave=False) + use_subordinate=False) self.assertEqual('hello', inst.display_name) inst.display_name = 'goodbye' inst.save() @@ -1592,7 +1592,7 @@ def test_get_all_by_filters(self, mock_get_all): inst_list = objects.InstanceList.get_by_filters( self.context, {'foo': 'bar'}, 'uuid', 'asc', - expected_attrs=['metadata'], use_slave=False) + expected_attrs=['metadata'], use_subordinate=False) for i in range(0, len(fakes)): self.assertIsInstance(inst_list.objects[i], instance.Instance) @@ -1609,7 +1609,7 @@ def test_get_all_by_filters_sorted(self, mock_get_all): inst_list = objects.InstanceList.get_by_filters( self.context, {'foo': 'bar'}, expected_attrs=['metadata'], - use_slave=False, sort_keys=['uuid'], sort_dirs=['asc']) + use_subordinate=False, sort_keys=['uuid'], sort_dirs=['asc']) for i in range(0, len(fakes)): self.assertIsInstance(inst_list.objects[i], instance.Instance) @@ -1630,7 +1630,7 @@ def test_get_all_by_filters_calls_non_sort(self, # Single sort key/direction is set, call non-sorted DB function objects.InstanceList.get_by_filters( self.context, {'foo': 'bar'}, sort_key='key', sort_dir='dir', - limit=100, marker='uuid', use_slave=True) + limit=100, marker='uuid', use_subordinate=True) mock_get_by_filters.assert_called_once_with( self.context, {'foo': 'bar'}, 'key', 'dir', limit=100, marker='uuid', columns_to_join=None) @@ -1645,7 +1645,7 @@ def test_get_all_by_filters_calls_sort(self, # Multiple sort keys/directions are set, call sorted DB function objects.InstanceList.get_by_filters( self.context, {'foo': 'bar'}, limit=100, marker='uuid', - use_slave=True, sort_keys=['key1', 'key2'], + use_subordinate=True, sort_keys=['key1', 'key2'], sort_dirs=['dir1', 'dir2']) mock_get_by_filters_sort.assert_called_once_with( self.context, {'foo': 'bar'}, limit=100, @@ -1663,7 +1663,7 @@ def test_get_all_by_filters_works_for_cleaned(self, mock_get_all): inst_list = objects.InstanceList.get_by_filters( self.context, {'deleted': True, 'cleaned': False}, 'uuid', 'asc', - expected_attrs=['metadata'], use_slave=False) + expected_attrs=['metadata'], use_subordinate=False) self.assertEqual(1, len(inst_list)) self.assertIsInstance(inst_list.objects[0], instance.Instance) @@ -1793,7 +1793,7 @@ def test_with_fault(self, mock_get_all, mock_fault_get): instances = objects.InstanceList.get_by_host(self.context, 'host', expected_attrs=['fault'], - use_slave=False) + use_subordinate=False) self.assertEqual(2, len(instances)) self.assertEqual(fake_faults['fake-uuid'][0], dict(instances[0].fault)) diff --git a/nova/tests/unit/objects/test_migration.py b/nova/tests/unit/objects/test_migration.py index 6e5a5ff35d..0aec47dc9b 100644 --- a/nova/tests/unit/objects/test_migration.py +++ b/nova/tests/unit/objects/test_migration.py @@ -186,7 +186,7 @@ def test_get_unconfirmed_by_dest_compute(self, mock_get): mock_get.return_value = db_migrations migrations = ( migration.MigrationList.get_unconfirmed_by_dest_compute( - ctxt, 'window', 'foo', use_slave=False)) + ctxt, 'window', 'foo', use_subordinate=False)) self.assertEqual(2, len(migrations)) for index, db_migration in enumerate(db_migrations): self.compare_obj(migrations[index], db_migration) diff --git a/nova/tests/unit/test_fixtures.py b/nova/tests/unit/test_fixtures.py index d6f2aaa9a6..666291673a 100644 --- a/nova/tests/unit/test_fixtures.py +++ b/nova/tests/unit/test_fixtures.py @@ -451,7 +451,7 @@ def test_services_current(self, mock_db): self.assertEqual(123, service_obj.Service.get_minimum_version( None, 'nova-compute')) mock_db.assert_called_once_with(None, ['nova-compute'], - use_slave=False) + use_subordinate=False) mock_db.reset_mock() compute_rpcapi.LAST_VERSION = 123 self.useFixture(fixtures.AllServicesCurrent()) diff --git a/nova/tests/unit/virt/libvirt/test_driver.py b/nova/tests/unit/virt/libvirt/test_driver.py index 22d850d518..d1e14140d9 100644 --- a/nova/tests/unit/virt/libvirt/test_driver.py +++ b/nova/tests/unit/virt/libvirt/test_driver.py @@ -15087,7 +15087,7 @@ def get_info(cfg, block_device_info): self.assertEqual(2, mock_info.call_count) filters = {'uuid': instance_uuids} - mock_get.assert_called_once_with(mock.ANY, filters, use_slave=True) + mock_get.assert_called_once_with(mock.ANY, filters, use_subordinate=True) mock_bdms.assert_called_with(mock.ANY, instance_uuids) @mock.patch.object(host.Host, "list_instance_domains") @@ -15197,7 +15197,7 @@ def side_effect(cfg, block_device_info): mock_list.assert_called_once_with(only_running=False) self.assertEqual(5, get_disk_info.call_count) filters = {'uuid': instance_uuids} - mock_get.assert_called_once_with(mock.ANY, filters, use_slave=True) + mock_get.assert_called_once_with(mock.ANY, filters, use_subordinate=True) mock_bdms.assert_called_with(mock.ANY, instance_uuids) @mock.patch.object(host.Host, "list_instance_domains") diff --git a/nova/tests/unit/virt/libvirt/test_imagecache.py b/nova/tests/unit/virt/libvirt/test_imagecache.py index 66297d11b6..4f41d8af26 100644 --- a/nova/tests/unit/virt/libvirt/test_imagecache.py +++ b/nova/tests/unit/virt/libvirt/test_imagecache.py @@ -662,7 +662,7 @@ def fake_instances(ctxt): 'soft_deleted': True, } mock_instance_list.assert_called_once_with( - ctxt, filters, expected_attrs=[], use_slave=True) + ctxt, filters, expected_attrs=[], use_subordinate=True) def test_store_swap_image(self): image_cache_manager = imagecache.ImageCacheManager() diff --git a/nova/tests/unit/virt/xenapi/test_xenapi.py b/nova/tests/unit/virt/xenapi/test_xenapi.py index 176d64d73d..9978faa698 100644 --- a/nova/tests/unit/virt/xenapi/test_xenapi.py +++ b/nova/tests/unit/virt/xenapi/test_xenapi.py @@ -3173,7 +3173,7 @@ def setUp(self): self.aggr = objects.Aggregate(context=self.context, id=1, **values) self.fake_metadata = {pool_states.POOL_FLAG: 'XenAPI', - 'master_compute': 'host', + 'main_compute': 'host', 'availability_zone': 'fake_zone', pool_states.KEY: pool_states.ACTIVE, 'host': xenapi_fake.get_record('host', @@ -3183,15 +3183,15 @@ def setUp(self): @mock.patch('nova.virt.xenapi.pool.ResourcePool.add_to_aggregate') def test_pool_add_to_aggregate_called_by_driver( self, mock_add_to_aggregate): - def pool_add_to_aggregate(context, aggregate, host, slave_info=None): + def pool_add_to_aggregate(context, aggregate, host, subordinate_info=None): self.assertEqual("CONTEXT", context) self.assertEqual("AGGREGATE", aggregate) self.assertEqual("HOST", host) - self.assertEqual("SLAVEINFO", slave_info) + self.assertEqual("SLAVEINFO", subordinate_info) mock_add_to_aggregate.side_effect = pool_add_to_aggregate self.conn.add_to_aggregate("CONTEXT", "AGGREGATE", "HOST", - slave_info="SLAVEINFO") + subordinate_info="SLAVEINFO") self.assertTrue(mock_add_to_aggregate.called) @@ -3199,14 +3199,14 @@ def pool_add_to_aggregate(context, aggregate, host, slave_info=None): def test_pool_remove_from_aggregate_called_by_driver( self, mock_remove_from_aggregate): def pool_remove_from_aggregate(context, aggregate, host, - slave_info=None): + subordinate_info=None): self.assertEqual("CONTEXT", context) self.assertEqual("AGGREGATE", aggregate) self.assertEqual("HOST", host) - self.assertEqual("SLAVEINFO", slave_info) + self.assertEqual("SLAVEINFO", subordinate_info) mock_remove_from_aggregate.side_effect = pool_remove_from_aggregate self.conn.remove_from_aggregate("CONTEXT", "AGGREGATE", "HOST", - slave_info="SLAVEINFO") + subordinate_info="SLAVEINFO") self.assertTrue(mock_remove_from_aggregate.called) @@ -3220,9 +3220,9 @@ def test_add_to_aggregate_for_first_host_sets_metadata( self.assertThat(self.fake_metadata, matchers.DictMatches(result.metadata)) - @mock.patch('nova.virt.xenapi.pool.ResourcePool._join_slave') - def test_join_slave(self, mock_join_slave): - # Ensure join_slave gets called when the request gets to master. + @mock.patch('nova.virt.xenapi.pool.ResourcePool._join_subordinate') + def test_join_subordinate(self, mock_join_subordinate): + # Ensure join_subordinate gets called when the request gets to main. aggregate = self._aggregate_setup(hosts=['host', 'host2'], metadata=self.fake_metadata) self.conn._pool.add_to_aggregate(self.context, aggregate, "host2", @@ -3231,7 +3231,7 @@ def test_join_slave(self, mock_join_slave): user='fake_user', passwd='fake_pass', xenhost_uuid='fake_uuid')) - self.assertTrue(mock_join_slave.called) + self.assertTrue(mock_join_subordinate.called) @mock.patch.object(xenapi_fake.SessionBase, 'pool_set_name_label') def test_add_to_aggregate_first_host(self, mock_pool_set_name_label): @@ -3263,17 +3263,17 @@ def test_remove_from_empty_aggregate(self): self.conn._pool.remove_from_aggregate, self.context, result, "test_host") - @mock.patch('nova.virt.xenapi.pool.ResourcePool._eject_slave') - def test_remove_slave(self, mock_eject_slave): - # Ensure eject slave gets called. + @mock.patch('nova.virt.xenapi.pool.ResourcePool._eject_subordinate') + def test_remove_subordinate(self, mock_eject_subordinate): + # Ensure eject subordinate gets called. self.fake_metadata['host2'] = 'fake_host2_uuid' aggregate = self._aggregate_setup(hosts=['host', 'host2'], metadata=self.fake_metadata, aggr_state=pool_states.ACTIVE) self.conn._pool.remove_from_aggregate(self.context, aggregate, "host2") - self.assertTrue(mock_eject_slave.called) + self.assertTrue(mock_eject_subordinate.called) @mock.patch('nova.virt.xenapi.pool.ResourcePool._clear_pool') - def test_remove_master_solo(self, mock_clear_pool): + def test_remove_main_solo(self, mock_clear_pool): # Ensure metadata are cleared after removal. aggregate = self._aggregate_setup(metadata=self.fake_metadata) self.conn._pool.remove_from_aggregate(self.context, aggregate, "host") @@ -3284,8 +3284,8 @@ def test_remove_master_solo(self, mock_clear_pool): pool_states.KEY: pool_states.ACTIVE}, matchers.DictMatches(result.metadata)) - def test_remote_master_non_empty_pool(self): - # Ensure AggregateError is raised if removing the master. + def test_remote_main_non_empty_pool(self): + # Ensure AggregateError is raised if removing the main. aggregate = self._aggregate_setup(hosts=['host', 'host2'], metadata=self.fake_metadata) @@ -3403,7 +3403,7 @@ def test_add_aggregate_host_raise_err(self): self.compute.add_aggregate_host, self.context, host="fake_host", aggregate=self.aggr, - slave_info=None) + subordinate_info=None) self.assertEqual(self.aggr.metadata[pool_states.KEY], pool_states.ERROR) self.assertEqual(self.aggr.hosts, ['fake_host']) @@ -3414,16 +3414,16 @@ def __init__(self): self._mock_calls = [] def add_aggregate_host(self, ctxt, aggregate, - host_param, host, slave_info): + host_param, host, subordinate_info): self._mock_calls.append(( self.add_aggregate_host, ctxt, aggregate, - host_param, host, slave_info)) + host_param, host, subordinate_info)) def remove_aggregate_host(self, ctxt, host, aggregate_id, host_param, - slave_info): + subordinate_info): self._mock_calls.append(( self.remove_aggregate_host, ctxt, host, aggregate_id, - host_param, slave_info)) + host_param, subordinate_info)) class StubDependencies(object): @@ -3438,10 +3438,10 @@ def _is_hv_pool(self, *_ignore): def _get_metadata(self, *_ignore): return { pool_states.KEY: {}, - 'master_compute': 'master' + 'main_compute': 'main' } - def _create_slave_info(self, *ignore): + def _create_subordinate_info(self, *ignore): return "SLAVE_INFO" @@ -3455,33 +3455,33 @@ class HypervisorPoolTestCase(test.NoDBTestCase): 'id': 98, 'hosts': [], 'metadata': { - 'master_compute': 'master', + 'main_compute': 'main', pool_states.POOL_FLAG: '', pool_states.KEY: '' } } fake_aggregate = objects.Aggregate(**fake_aggregate) - def test_slave_asks_master_to_add_slave_to_pool(self): - slave = ResourcePoolWithStubs() + def test_subordinate_asks_main_to_add_subordinate_to_pool(self): + subordinate = ResourcePoolWithStubs() - slave.add_to_aggregate("CONTEXT", self.fake_aggregate, "slave") + subordinate.add_to_aggregate("CONTEXT", self.fake_aggregate, "subordinate") self.assertIn( - (slave.compute_rpcapi.add_aggregate_host, - "CONTEXT", "slave", self.fake_aggregate, - "master", "SLAVE_INFO"), - slave.compute_rpcapi._mock_calls) + (subordinate.compute_rpcapi.add_aggregate_host, + "CONTEXT", "subordinate", self.fake_aggregate, + "main", "SLAVE_INFO"), + subordinate.compute_rpcapi._mock_calls) - def test_slave_asks_master_to_remove_slave_from_pool(self): - slave = ResourcePoolWithStubs() + def test_subordinate_asks_main_to_remove_subordinate_from_pool(self): + subordinate = ResourcePoolWithStubs() - slave.remove_from_aggregate("CONTEXT", self.fake_aggregate, "slave") + subordinate.remove_from_aggregate("CONTEXT", self.fake_aggregate, "subordinate") self.assertIn( - (slave.compute_rpcapi.remove_aggregate_host, - "CONTEXT", "slave", 98, "master", "SLAVE_INFO"), - slave.compute_rpcapi._mock_calls) + (subordinate.compute_rpcapi.remove_aggregate_host, + "CONTEXT", "subordinate", 98, "main", "SLAVE_INFO"), + subordinate.compute_rpcapi._mock_calls) class SwapXapiHostTestCase(test.NoDBTestCase): diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py index 436b7d5c53..3c8dae66da 100644 --- a/nova/virt/libvirt/driver.py +++ b/nova/virt/libvirt/driver.py @@ -8747,7 +8747,7 @@ def _get_disk_over_committed_size_total(self): # in _update_available_resource method for calculating usages based # on instance utilization. local_instance_list = objects.InstanceList.get_by_filters( - ctx, filters, use_slave=True) + ctx, filters, use_subordinate=True) # Convert instance list to dictionary with instance uuid as key. local_instances = {inst.uuid: inst for inst in local_instance_list} diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index df2a78286c..b1e77cfc11 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -124,7 +124,7 @@ def create_host(name_label, hostname='fake_name', address='fake_addr', # Create a pool if we don't have one already if len(_db_content['pool']) == 0: pool_ref = _create_pool('') - _db_content['pool'][pool_ref]['master'] = host_ref + _db_content['pool'][pool_ref]['main'] = host_ref _db_content['pool'][pool_ref]['default-SR'] = host_default_sr_ref _db_content['pool'][pool_ref]['suspend-image-SR'] = host_default_sr_ref @@ -938,7 +938,7 @@ def __getattr__(self, name): return self._session elif name == 'xenapi': return _Dispatcher(self.xenapi_request, None) - elif name.startswith('login') or name.startswith('slave_local'): + elif name.startswith('login') or name.startswith('subordinate_local'): return lambda *params: self._login(name, params) elif name.startswith('Async'): return lambda *params: self._async(name, params) diff --git a/nova/virt/xenapi/pool.py b/nova/virt/xenapi/pool.py index 5487d4fb8f..671b344155 100644 --- a/nova/virt/xenapi/pool.py +++ b/nova/virt/xenapi/pool.py @@ -58,7 +58,7 @@ def undo_aggregate_operation(self, context, op, aggregate, 'state during operation on %(host)s'), {'aggregate_id': aggregate.id, 'host': host}) - def add_to_aggregate(self, context, aggregate, host, slave_info=None): + def add_to_aggregate(self, context, aggregate, host, subordinate_info=None): """Add a compute host to an aggregate.""" if not pool_states.is_hv_pool(aggregate.metadata): return @@ -80,38 +80,38 @@ def add_to_aggregate(self, context, aggregate, host, slave_info=None): if (aggregate.metadata[pool_states.KEY] == pool_states.CREATED): aggregate.update_metadata({pool_states.KEY: pool_states.CHANGING}) if len(aggregate.hosts) == 1: - # this is the first host of the pool -> make it master + # this is the first host of the pool -> make it main self._init_pool(aggregate.id, aggregate.name) - # save metadata so that we can find the master again - metadata = {'master_compute': host, + # save metadata so that we can find the main again + metadata = {'main_compute': host, host: self._host_uuid, pool_states.KEY: pool_states.ACTIVE} aggregate.update_metadata(metadata) else: # the pool is already up and running, we need to figure out # whether we can serve the request from this host or not. - master_compute = aggregate.metadata['master_compute'] - if master_compute == CONF.host and master_compute != host: - # this is the master -> do a pool-join - # To this aim, nova compute on the slave has to go down. + main_compute = aggregate.metadata['main_compute'] + if main_compute == CONF.host and main_compute != host: + # this is the main -> do a pool-join + # To this aim, nova compute on the subordinate has to go down. # NOTE: it is assumed that ONLY nova compute is running now - self._join_slave(aggregate.id, host, - slave_info.get('compute_uuid'), - slave_info.get('url'), slave_info.get('user'), - slave_info.get('passwd')) - metadata = {host: slave_info.get('xenhost_uuid'), } + self._join_subordinate(aggregate.id, host, + subordinate_info.get('compute_uuid'), + subordinate_info.get('url'), subordinate_info.get('user'), + subordinate_info.get('passwd')) + metadata = {host: subordinate_info.get('xenhost_uuid'), } aggregate.update_metadata(metadata) - elif master_compute and master_compute != host: - # send rpc cast to master, asking to add the following + elif main_compute and main_compute != host: + # send rpc cast to main, asking to add the following # host with specified credentials. - slave_info = self._create_slave_info() + subordinate_info = self._create_subordinate_info() self.compute_rpcapi.add_aggregate_host( - context, host, aggregate, master_compute, slave_info) + context, host, aggregate, main_compute, subordinate_info) - def remove_from_aggregate(self, context, aggregate, host, slave_info=None): + def remove_from_aggregate(self, context, aggregate, host, subordinate_info=None): """Remove a compute host from an aggregate.""" - slave_info = slave_info or dict() + subordinate_info = subordinate_info or dict() if not pool_states.is_hv_pool(aggregate.metadata): return @@ -123,19 +123,19 @@ def remove_from_aggregate(self, context, aggregate, host, slave_info=None): aggregate_id=aggregate.id, reason=invalid[aggregate.metadata[pool_states.KEY]]) - master_compute = aggregate.metadata['master_compute'] - if master_compute == CONF.host and master_compute != host: - # this is the master -> instruct it to eject a host from the pool + main_compute = aggregate.metadata['main_compute'] + if main_compute == CONF.host and main_compute != host: + # this is the main -> instruct it to eject a host from the pool host_uuid = aggregate.metadata[host] - self._eject_slave(aggregate.id, - slave_info.get('compute_uuid'), host_uuid) + self._eject_subordinate(aggregate.id, + subordinate_info.get('compute_uuid'), host_uuid) aggregate.update_metadata({host: None}) - elif master_compute == host: - # Remove master from its own pool -> destroy pool only if the - # master is on its own, otherwise raise fault. Destroying a - # pool made only by master is fictional + elif main_compute == host: + # Remove main from its own pool -> destroy pool only if the + # main is on its own, otherwise raise fault. Destroying a + # pool made only by main is fictional if len(aggregate.hosts) > 1: - # NOTE: this could be avoided by doing a master + # NOTE: this could be avoided by doing a main # re-election, but this is simpler for now. raise exception.InvalidAggregateActionDelete( aggregate_id=aggregate.id, @@ -143,32 +143,32 @@ def remove_from_aggregate(self, context, aggregate, host, slave_info=None): 'from the pool; pool not empty') % host) self._clear_pool(aggregate.id) - aggregate.update_metadata({'master_compute': None, host: None}) - elif master_compute and master_compute != host: - # A master exists -> forward pool-eject request to master - slave_info = self._create_slave_info() + aggregate.update_metadata({'main_compute': None, host: None}) + elif main_compute and main_compute != host: + # A main exists -> forward pool-eject request to main + subordinate_info = self._create_subordinate_info() self.compute_rpcapi.remove_aggregate_host( - context, host, aggregate.id, master_compute, slave_info) + context, host, aggregate.id, main_compute, subordinate_info) else: # this shouldn't have happened raise exception.AggregateError(aggregate_id=aggregate.id, action='remove_from_aggregate', reason=_('Unable to eject %s ' - 'from the pool; No master found') + 'from the pool; No main found') % host) - def _join_slave(self, aggregate_id, host, compute_uuid, url, user, passwd): - """Joins a slave into a XenServer resource pool.""" + def _join_subordinate(self, aggregate_id, host, compute_uuid, url, user, passwd): + """Joins a subordinate into a XenServer resource pool.""" try: args = {'compute_uuid': compute_uuid, 'url': url, 'user': user, 'password': passwd, 'force': jsonutils.dumps(CONF.xenserver.use_join_force), - 'master_addr': self._host_addr, - 'master_user': CONF.xenserver.connection_username, - 'master_pass': CONF.xenserver.connection_password, } + 'main_addr': self._host_addr, + 'main_user': CONF.xenserver.connection_username, + 'main_pass': CONF.xenserver.connection_password, } self._session.call_plugin('xenhost.py', 'host_join', args) except self._session.XenAPI.Failure as e: LOG.error("Pool-Join failed: %s", e) @@ -177,8 +177,8 @@ def _join_slave(self, aggregate_id, host, compute_uuid, url, user, passwd): reason=_('Unable to join %s ' 'in the pool') % host) - def _eject_slave(self, aggregate_id, compute_uuid, host_uuid): - """Eject a slave from a XenServer resource pool.""" + def _eject_subordinate(self, aggregate_id, compute_uuid, host_uuid): + """Eject a subordinate from a XenServer resource pool.""" try: # shutdown nova-compute; if there are other VMs running, e.g. # guest instances, the eject will fail. That's a precaution @@ -217,7 +217,7 @@ def _clear_pool(self, aggregate_id): action='remove_from_aggregate', reason=six.text_type(e.details)) - def _create_slave_info(self): + def _create_subordinate_info(self): """XenServer specific info needed to join the hypervisor pool.""" # replace the address from the xenapi connection url # because this might be 169.254.0.1, i.e. xenapi diff --git a/nova/virt/xenapi/pool_states.py b/nova/virt/xenapi/pool_states.py index ae431ddecb..f4acdf541b 100644 --- a/nova/virt/xenapi/pool_states.py +++ b/nova/virt/xenapi/pool_states.py @@ -25,7 +25,7 @@ A 'created' pool becomes 'changing' during the first request of adding a host. During a 'changing' status no other requests will be accepted; this is to allow the hypervisor layer to instantiate the underlying pool -without any potential race condition that may incur in master/slave-based +without any potential race condition that may incur in main/subordinate-based configurations. The pool goes into the 'active' state when the underlying pool has been correctly instantiated. All other operations (e.g. add/remove hosts) that succeed will keep the diff --git a/releasenotes/source/conf.py b/releasenotes/source/conf.py index 55d4348188..c718572960 100644 --- a/releasenotes/source/conf.py +++ b/releasenotes/source/conf.py @@ -37,8 +37,8 @@ # The encoding of source files. #source_encoding = 'utf-8-sig' -# The master toctree document. -master_doc = 'index' +# The main toctree document. +main_doc = 'index' # General information about the project. copyright = u'2015, Nova developers' diff --git a/tools/db/schema_diff.py b/tools/db/schema_diff.py index bb389532f0..3f46120378 100755 --- a/tools/db/schema_diff.py +++ b/tools/db/schema_diff.py @@ -33,12 +33,12 @@ MYSQL: ./tools/db/schema_diff.py mysql+pymysql://root@localhost \ - master:latest my_branch:82 + main:latest my_branch:82 POSTGRESQL: ./tools/db/schema_diff.py postgresql://localhost \ - master:latest my_branch:82 + main:latest my_branch:82 """ @@ -229,12 +229,12 @@ def parse_options(): try: orig_branch, orig_version = sys.argv[2].split(':') except IndexError: - usage('original branch and version required (e.g. master:82)') + usage('original branch and version required (e.g. main:82)') try: new_branch, new_version = sys.argv[3].split(':') except IndexError: - usage('new branch and version required (e.g. master:82)') + usage('new branch and version required (e.g. main:82)') return db_url, orig_branch, orig_version, new_branch, new_version