Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions api-guide/source/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,8 +46,8 @@
# The encoding of source files.
# source_encoding = 'utf-8-sig'

# The master toctree document.
master_doc = 'index'
# The main toctree document.
main_doc = 'index'
# General information about the project.
project = u'Compute API Guide'
bug_tag = u'api-guide'
Expand Down
4 changes: 2 additions & 2 deletions api-ref/source/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,8 +36,8 @@
# The suffix of source filenames.
source_suffix = '.rst'

# The master toctree document.
master_doc = 'index'
# The main toctree document.
main_doc = 'index'

# General information about the project.
copyright = u'2010-present, OpenStack Foundation'
Expand Down
4 changes: 2 additions & 2 deletions doc/source/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,8 +65,8 @@

todo_include_todos = True

# The master toctree document.
master_doc = 'index'
# The main toctree document.
main_doc = 'index'

# General information about the project.
copyright = u'2010-present, OpenStack Foundation'
Expand Down
2 changes: 1 addition & 1 deletion nova/api/openstack/compute/server_groups.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ def _get_not_deleted(context, uuids):
cell_mappings = {}
found_inst_uuids = []

# Get a master list of cell mappings, and a list of instance
# Get a main list of cell mappings, and a list of instance
# uuids organized by cell
for im in mappings:
if not im.cell_mapping:
Expand Down
2 changes: 1 addition & 1 deletion nova/compute/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -3486,7 +3486,7 @@ def resize(self, context, instance, flavor_id=None, clean_shutdown=True,

# Check whether host exists or not.
node = objects.ComputeNode.get_first_node_by_host_for_old_compat(
context, host_name, use_slave=True)
context, host_name, use_subordinate=True)

self._check_auto_disk_config(instance, **extra_instance_updates)

Expand Down
70 changes: 35 additions & 35 deletions nova/compute/manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -637,7 +637,7 @@ def _get_instances_on_driver(self, context, filters=None):
return objects.InstanceList()
filters['uuid'] = driver_uuids
local_instances = objects.InstanceList.get_by_filters(
context, filters, use_slave=True)
context, filters, use_subordinate=True)
return local_instances
except NotImplementedError:
pass
Expand All @@ -649,7 +649,7 @@ def _get_instances_on_driver(self, context, filters=None):
# Without this all instance data would be fetched from db.
filters['host'] = self.host
instances = objects.InstanceList.get_by_filters(context, filters,
use_slave=True)
use_subordinate=True)
name_map = {instance.name: instance for instance in instances}
local_instances = []
for driver_instance in driver_instances:
Expand Down Expand Up @@ -1477,7 +1477,7 @@ def _check_instance_build_time(self, context):
'host': self.host}

building_insts = objects.InstanceList.get_by_filters(context,
filters, expected_attrs=[], use_slave=True)
filters, expected_attrs=[], use_subordinate=True)

for instance in building_insts:
if timeutils.is_older_than(instance.created_at, timeout):
Expand Down Expand Up @@ -1789,7 +1789,7 @@ def _sync_scheduler_instance_info(self, context):
context = context.elevated()
instances = objects.InstanceList.get_by_host(context, self.host,
expected_attrs=[],
use_slave=True)
use_subordinate=True)
uuids = [instance.uuid for instance in instances]
self.query_client.sync_instance_info(context, self.host, uuids)

Expand Down Expand Up @@ -7524,7 +7524,7 @@ def _heal_instance_info_cache(self, context):
# The list of instances to heal is empty so rebuild it
LOG.debug('Rebuilding the list of instances to heal')
db_instances = objects.InstanceList.get_by_host(
context, self.host, expected_attrs=[], use_slave=True)
context, self.host, expected_attrs=[], use_subordinate=True)
for inst in db_instances:
# We don't want to refresh the cache for instances
# which are building or deleting so don't put them
Expand Down Expand Up @@ -7555,7 +7555,7 @@ def _heal_instance_info_cache(self, context):
context, instance_uuids.pop(0),
expected_attrs=['system_metadata', 'info_cache',
'flavor'],
use_slave=True)
use_subordinate=True)
except exception.InstanceNotFound:
# Instance is gone. Try to grab another.
continue
Expand Down Expand Up @@ -7616,7 +7616,7 @@ def _poll_rebooting_instances(self, context):
task_states.REBOOT_PENDING],
'host': self.host}
rebooting = objects.InstanceList.get_by_filters(
context, filters, expected_attrs=[], use_slave=True)
context, filters, expected_attrs=[], use_subordinate=True)

to_poll = []
for instance in rebooting:
Expand All @@ -7633,7 +7633,7 @@ def _poll_rescued_instances(self, context):
'host': self.host}
rescued_instances = objects.InstanceList.get_by_filters(
context, filters, expected_attrs=["system_metadata"],
use_slave=True)
use_subordinate=True)

to_unrescue = []
for instance in rescued_instances:
Expand All @@ -7651,7 +7651,7 @@ def _poll_unconfirmed_resizes(self, context):

migrations = objects.MigrationList.get_unconfirmed_by_dest_compute(
context, CONF.resize_confirm_window, self.host,
use_slave=True)
use_subordinate=True)

migrations_info = dict(migration_count=len(migrations),
confirm_window=CONF.resize_confirm_window)
Expand Down Expand Up @@ -7679,7 +7679,7 @@ def _set_migration_to_error(migration, reason, **kwargs):
try:
instance = objects.Instance.get_by_uuid(context,
instance_uuid, expected_attrs=expected_attrs,
use_slave=True)
use_subordinate=True)
except exception.InstanceNotFound:
reason = (_("Instance %s not found") %
instance_uuid)
Expand Down Expand Up @@ -7739,7 +7739,7 @@ def _poll_shelved_instances(self, context):
'host': self.host}
shelved_instances = objects.InstanceList.get_by_filters(
context, filters=filters, expected_attrs=['system_metadata'],
use_slave=True)
use_subordinate=True)

to_gc = []
for instance in shelved_instances:
Expand Down Expand Up @@ -7772,7 +7772,7 @@ def _instance_usage_audit(self, context):
context, begin, end, host=self.host,
expected_attrs=['system_metadata', 'info_cache', 'metadata',
'flavor'],
use_slave=True)
use_subordinate=True)
num_instances = len(instances)
errors = 0
successes = 0
Expand Down Expand Up @@ -7826,7 +7826,7 @@ def _poll_bandwidth_usage(self, context):

instances = objects.InstanceList.get_by_host(context,
self.host,
use_slave=True)
use_subordinate=True)
try:
bw_counters = self.driver.get_all_bw_counters(instances)
except NotImplementedError:
Expand All @@ -7851,7 +7851,7 @@ def _poll_bandwidth_usage(self, context):
last_ctr_out = None
usage = objects.BandwidthUsage.get_by_instance_uuid_and_mac(
context, bw_ctr['uuid'], bw_ctr['mac_address'],
start_period=start_time, use_slave=True)
start_period=start_time, use_subordinate=True)
if usage:
bw_in = usage.bw_in
bw_out = usage.bw_out
Expand All @@ -7861,7 +7861,7 @@ def _poll_bandwidth_usage(self, context):
usage = (objects.BandwidthUsage.
get_by_instance_uuid_and_mac(
context, bw_ctr['uuid'], bw_ctr['mac_address'],
start_period=prev_time, use_slave=True))
start_period=prev_time, use_subordinate=True))
if usage:
last_ctr_in = usage.last_ctr_in
last_ctr_out = usage.last_ctr_out
Expand Down Expand Up @@ -7890,14 +7890,14 @@ def _poll_bandwidth_usage(self, context):
start_period=start_time,
last_refreshed=refreshed)

def _get_host_volume_bdms(self, context, use_slave=False):
def _get_host_volume_bdms(self, context, use_subordinate=False):
"""Return all block device mappings on a compute host."""
compute_host_bdms = []
instances = objects.InstanceList.get_by_host(context, self.host,
use_slave=use_slave)
use_subordinate=use_subordinate)
for instance in instances:
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid, use_slave=use_slave)
context, instance.uuid, use_subordinate=use_subordinate)
instance_bdms = [bdm for bdm in bdms if bdm.is_volume]
compute_host_bdms.append(dict(instance=instance,
instance_bdms=instance_bdms))
Expand Down Expand Up @@ -7930,7 +7930,7 @@ def _poll_volume_usage(self, context):
return

compute_host_bdms = self._get_host_volume_bdms(context,
use_slave=True)
use_subordinate=True)
if not compute_host_bdms:
return

Expand All @@ -7956,7 +7956,7 @@ def _sync_power_states(self, context):
"""
db_instances = objects.InstanceList.get_by_host(context, self.host,
expected_attrs=[],
use_slave=True)
use_subordinate=True)

try:
num_vm_instances = self.driver.get_num_instances()
Expand Down Expand Up @@ -8022,7 +8022,7 @@ def _query_driver_power_state_and_sync(self, context, db_instance):
self._sync_instance_power_state(context,
db_instance,
vm_power_state,
use_slave=True)
use_subordinate=True)
except exception.InstanceNotFound:
# NOTE(hanlind): If the instance gets deleted during sync,
# silently ignore.
Expand Down Expand Up @@ -8067,7 +8067,7 @@ def _stop_unexpected_shutdown_instance(self, context, vm_state,
instance=db_instance)

def _sync_instance_power_state(self, context, db_instance, vm_power_state,
use_slave=False):
use_subordinate=False):
"""Align instance power state between the database and hypervisor.

If the instance is not found on the hypervisor, but is in the database,
Expand All @@ -8076,7 +8076,7 @@ def _sync_instance_power_state(self, context, db_instance, vm_power_state,

# We re-query the DB to get the latest instance info to minimize
# (not eliminate) race condition.
db_instance.refresh(use_slave=use_slave)
db_instance.refresh(use_subordinate=use_subordinate)
db_power_state = db_instance.power_state
vm_state = db_instance.vm_state

Expand Down Expand Up @@ -8216,7 +8216,7 @@ def _reclaim_queued_deletes(self, context):
instances = objects.InstanceList.get_by_filters(
context, filters,
expected_attrs=objects.instance.INSTANCE_DEFAULT_FIELDS,
use_slave=True)
use_subordinate=True)
for instance in instances:
if self._deleted_old_enough(instance, interval):
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
Expand Down Expand Up @@ -8289,7 +8289,7 @@ def update_available_resource(self, context, startup=False):
"""

compute_nodes_in_db = self._get_compute_nodes_in_db(context,
use_slave=True,
use_subordinate=True,
startup=startup)
try:
nodenames = set(self.driver.get_available_nodes())
Expand All @@ -8316,11 +8316,11 @@ def update_available_resource(self, context, startup=False):
self._update_available_resource_for_node(context, nodename,
startup=startup)

def _get_compute_nodes_in_db(self, context, use_slave=False,
def _get_compute_nodes_in_db(self, context, use_subordinate=False,
startup=False):
try:
return objects.ComputeNodeList.get_all_by_host(context, self.host,
use_slave=use_slave)
use_subordinate=use_subordinate)
except exception.NotFound:
if startup:
LOG.warning(
Expand Down Expand Up @@ -8394,7 +8394,7 @@ def _cleanup_running_deleted_instances(self, context):
"DELETED but still present on host.",
instance.name, instance=instance)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid, use_slave=True)
context, instance.uuid, use_subordinate=True)
self.instance_events.clear_events_for_instance(instance)
try:
self._shutdown_instance(context, instance, bdms,
Expand Down Expand Up @@ -8486,11 +8486,11 @@ def _error_out_instance_on_exception(self, context, instance,
self._set_instance_obj_error_state(context, instance)

@wrap_exception()
def add_aggregate_host(self, context, aggregate, host, slave_info):
def add_aggregate_host(self, context, aggregate, host, subordinate_info):
"""Notify hypervisor of change (for hypervisor pools)."""
try:
self.driver.add_to_aggregate(context, aggregate, host,
slave_info=slave_info)
subordinate_info=subordinate_info)
except NotImplementedError:
LOG.debug('Hypervisor driver does not support '
'add_aggregate_host')
Expand All @@ -8502,11 +8502,11 @@ def add_aggregate_host(self, context, aggregate, host, slave_info):
aggregate, host)

@wrap_exception()
def remove_aggregate_host(self, context, host, slave_info, aggregate):
def remove_aggregate_host(self, context, host, subordinate_info, aggregate):
"""Removes a host from a physical hypervisor pool."""
try:
self.driver.remove_from_aggregate(context, aggregate, host,
slave_info=slave_info)
subordinate_info=subordinate_info)
except NotImplementedError:
LOG.debug('Hypervisor driver does not support '
'remove_aggregate_host')
Expand Down Expand Up @@ -8709,7 +8709,7 @@ def _run_image_cache_manager_pass(self, context):
'soft_deleted': True,
'host': nodes}
filtered_instances = objects.InstanceList.get_by_filters(context,
filters, expected_attrs=[], use_slave=True)
filters, expected_attrs=[], use_subordinate=True)

self.driver.manage_image_cache(context, filtered_instances)

Expand All @@ -8724,7 +8724,7 @@ def _run_pending_deletes(self, context):
attrs = ['system_metadata']
with utils.temporary_mutation(context, read_deleted='yes'):
instances = objects.InstanceList.get_by_filters(
context, filters, expected_attrs=attrs, use_slave=True)
context, filters, expected_attrs=attrs, use_subordinate=True)
LOG.debug('There are %d instances to clean', len(instances))

for instance in instances:
Expand Down Expand Up @@ -8768,7 +8768,7 @@ def _cleanup_incomplete_migrations(self, context):
attrs = ['info_cache', 'security_groups', 'system_metadata']
with utils.temporary_mutation(context, read_deleted='yes'):
instances = objects.InstanceList.get_by_filters(
context, inst_filters, expected_attrs=attrs, use_slave=True)
context, inst_filters, expected_attrs=attrs, use_subordinate=True)

for instance in instances:
if instance.host != CONF.host:
Expand Down
10 changes: 5 additions & 5 deletions nova/compute/rpcapi.py
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,7 @@ class ComputeAPI(object):

* 2.0 - Remove 1.x backwards compat
* 2.1 - Adds orig_sys_metadata to rebuild_instance()
* 2.2 - Adds slave_info parameter to add_aggregate_host() and
* 2.2 - Adds subordinate_info parameter to add_aggregate_host() and
remove_aggregate_host()
* 2.3 - Adds volume_id to reserve_block_device_name()
* 2.4 - Add bdms to terminate_instance
Expand Down Expand Up @@ -479,7 +479,7 @@ def get_client(self, target, version_cap, serializer):
call_monitor_timeout=cmt)

def add_aggregate_host(self, ctxt, host, aggregate, host_param,
slave_info=None):
subordinate_info=None):
'''Add aggregate host.

:param ctxt: request context
Expand All @@ -493,7 +493,7 @@ def add_aggregate_host(self, ctxt, host, aggregate, host_param,
server=host, version=version)
cctxt.cast(ctxt, 'add_aggregate_host',
aggregate=aggregate, host=host_param,
slave_info=slave_info)
subordinate_info=subordinate_info)

def add_fixed_ip_to_instance(self, ctxt, instance, network_id):
version = '5.0'
Expand Down Expand Up @@ -818,7 +818,7 @@ def rebuild_instance(self, ctxt, instance, new_pass, injected_files,
**msg_args)

def remove_aggregate_host(self, ctxt, host, aggregate, host_param,
slave_info=None):
subordinate_info=None):
'''Remove aggregate host.

:param ctxt: request context
Expand All @@ -832,7 +832,7 @@ def remove_aggregate_host(self, ctxt, host, aggregate, host_param,
server=host, version=version)
cctxt.cast(ctxt, 'remove_aggregate_host',
aggregate=aggregate, host=host_param,
slave_info=slave_info)
subordinate_info=subordinate_info)

def remove_fixed_ip_from_instance(self, ctxt, instance, address):
version = '5.0'
Expand Down
2 changes: 1 addition & 1 deletion nova/conductor/manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -938,7 +938,7 @@ def _allocate_for_evacuate_dest_host(self, context, instance, host,
context, instance.host, instance.node)
dest_node = (
objects.ComputeNode.get_first_node_by_host_for_old_compat(
context, host, use_slave=True))
context, host, use_subordinate=True))
except exception.ComputeHostNotFound as ex:
with excutils.save_and_reraise_exception():
self._set_vm_state_and_notify(
Expand Down
Loading