mirror of
https://github.com/gryf/openstack.git
synced 2025-12-17 11:30:24 +01:00
Added fixes and readme for the feature
This commit is contained in:
@@ -0,0 +1,215 @@
|
||||
From 0f820a60994586debef47a59ebf8d9eef225b69c Mon Sep 17 00:00:00 2001
|
||||
From: Roman Dobosz <roman.dobosz@intel.com>
|
||||
Date: Wed, 27 Dec 2017 13:51:25 +0100
|
||||
Subject: [PATCH 1/8] allow compute nodes to be associated with host agg
|
||||
|
||||
This is basically an Ocata backport patch from Jay Pipes:
|
||||
https://review.openstack.org/#/c/526753
|
||||
---
|
||||
nova/compute/api.py | 36 +++++-
|
||||
nova/tests/functional/compute/__init__.py | 0
|
||||
.../tests/functional/compute/test_aggregate_api.py | 127 +++++++++++++++++++++
|
||||
3 files changed, 159 insertions(+), 4 deletions(-)
|
||||
create mode 100644 nova/tests/functional/compute/__init__.py
|
||||
create mode 100644 nova/tests/functional/compute/test_aggregate_api.py
|
||||
|
||||
diff --git a/nova/compute/api.py b/nova/compute/api.py
|
||||
index 6f1371b45f..39437e6c16 100644
|
||||
--- a/nova/compute/api.py
|
||||
+++ b/nova/compute/api.py
|
||||
@@ -4548,6 +4548,31 @@ class AggregateAPI(base.Base):
|
||||
availability_zones.update_host_availability_zone_cache(context,
|
||||
host_name)
|
||||
|
||||
+ def _service_or_compute_node_exists(self, ctx, host_or_node):
|
||||
+ """Returns True if a service host or compute node record could be found
|
||||
+ for the supplied host_or_node string. We first check to see if a
|
||||
+ service record can be found with the host matching the host_or_node
|
||||
+ parameter by looking at the host mapping records in the API database.
|
||||
+ If we don't find a service record there, we then ask all cell databases
|
||||
+ to find a compute node with a hypervisor_hostname matching the supplied
|
||||
+ host_or_node parameter.
|
||||
+ """
|
||||
+ # NOTE(gryf): we don't handle cells in Ocata yet
|
||||
+ try:
|
||||
+ objects.Service.get_by_compute_host(ctx, host_or_node)
|
||||
+ return True
|
||||
+ except exception.ComputeHostNotFound:
|
||||
+ pass
|
||||
+
|
||||
+ found_nodes = (len(objects.ComputeNodeList
|
||||
+ .get_by_hypervisor(ctx, host_or_node)))
|
||||
+
|
||||
+ if found_nodes > 1:
|
||||
+ LOG.debug("Searching for compute nodes matching %s "
|
||||
+ "found %d results but expected 1 result.",
|
||||
+ host_or_node, len(found_nodes))
|
||||
+ return found_nodes == 1
|
||||
+
|
||||
@wrap_exception()
|
||||
def add_host_to_aggregate(self, context, aggregate_id, host_name):
|
||||
"""Adds the host to an aggregate."""
|
||||
@@ -4556,8 +4581,9 @@ class AggregateAPI(base.Base):
|
||||
compute_utils.notify_about_aggregate_update(context,
|
||||
"addhost.start",
|
||||
aggregate_payload)
|
||||
- # validates the host; ComputeHostNotFound is raised if invalid
|
||||
- objects.Service.get_by_compute_host(context, host_name)
|
||||
+
|
||||
+ if not self._service_or_compute_node_exists(context, host_name):
|
||||
+ raise exception.ComputeHostNotFound(host=host_name)
|
||||
|
||||
aggregate = objects.Aggregate.get_by_id(context, aggregate_id)
|
||||
self.is_safe_to_update_az(context, aggregate.metadata,
|
||||
@@ -4583,8 +4609,10 @@ class AggregateAPI(base.Base):
|
||||
compute_utils.notify_about_aggregate_update(context,
|
||||
"removehost.start",
|
||||
aggregate_payload)
|
||||
- # validates the host; ComputeHostNotFound is raised if invalid
|
||||
- objects.Service.get_by_compute_host(context, host_name)
|
||||
+
|
||||
+ if not self._service_or_compute_node_exists(context, host_name):
|
||||
+ raise exception.ComputeHostNotFound(host=host_name)
|
||||
+
|
||||
aggregate = objects.Aggregate.get_by_id(context, aggregate_id)
|
||||
aggregate.delete_host(host_name)
|
||||
self.scheduler_client.update_aggregates(context, [aggregate])
|
||||
diff --git a/nova/tests/functional/compute/__init__.py b/nova/tests/functional/compute/__init__.py
|
||||
new file mode 100644
|
||||
index 0000000000..e69de29bb2
|
||||
diff --git a/nova/tests/functional/compute/test_aggregate_api.py b/nova/tests/functional/compute/test_aggregate_api.py
|
||||
new file mode 100644
|
||||
index 0000000000..7946fddcfe
|
||||
--- /dev/null
|
||||
+++ b/nova/tests/functional/compute/test_aggregate_api.py
|
||||
@@ -0,0 +1,127 @@
|
||||
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
+# not use this file except in compliance with the License. You may obtain
|
||||
+# a copy of the License at
|
||||
+#
|
||||
+# http://www.apache.org/licenses/LICENSE-2.0
|
||||
+#
|
||||
+# Unless required by applicable law or agreed to in writing, software
|
||||
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
+# License for the specific language governing permissions and limitations
|
||||
+# under the License.
|
||||
+
|
||||
+from nova.compute import api as compute_api
|
||||
+from nova import context
|
||||
+from nova import exception
|
||||
+from nova import objects
|
||||
+from nova import test
|
||||
+from nova.tests import fixtures as nova_fixtures
|
||||
+from nova.tests import uuidsentinel as uuids
|
||||
+
|
||||
+
|
||||
+class ComputeAggregateAPIMultiCellTestCase(test.NoDBTestCase):
|
||||
+ """Tests for the AggregateAPI with multiple cells allowing either service
|
||||
+ hosts or compute nodes to be associated with an aggregate.
|
||||
+ """
|
||||
+
|
||||
+ USES_DB_SELF = True
|
||||
+
|
||||
+ def setUp(self):
|
||||
+ super(ComputeAggregateAPIMultiCellTestCase, self).setUp()
|
||||
+ self.agg_api = compute_api.AggregateAPI()
|
||||
+ self.useFixture(nova_fixtures.Database(database='api'))
|
||||
+ celldbs = nova_fixtures.CellDatabases()
|
||||
+ celldbs.add_cell_database(objects.CellMapping.CELL0_UUID, default=True)
|
||||
+ self.useFixture(celldbs)
|
||||
+
|
||||
+ self.ctxt = context.get_admin_context()
|
||||
+ cell0 = objects.CellMapping(
|
||||
+ context=self.ctxt, uuid=objects.CellMapping.CELL0_UUID,
|
||||
+ database_connection=objects.CellMapping.CELL0_UUID,
|
||||
+ transport_url='none:///')
|
||||
+ cell0.create()
|
||||
+ self.cell_mappings = (cell0,)
|
||||
+
|
||||
+ # create two Ironic nodes
|
||||
+ for id_ in (1, 2):
|
||||
+ hostname = 'ironic_host_%s' % id_
|
||||
+ with context.target_cell(self.ctxt, cell0) as cctxt:
|
||||
+ svc = objects.Service(cctxt, host=hostname,
|
||||
+ binary='nova-compute',
|
||||
+ topic='nova-compute')
|
||||
+ svc.create()
|
||||
+
|
||||
+ nodename = 'ironic_node_%s' % id_
|
||||
+ compute_node_uuid = getattr(uuids, nodename)
|
||||
+ node = objects.ComputeNode(
|
||||
+ cctxt, uuid=compute_node_uuid, host=hostname,
|
||||
+ vcpus=2, memory_mb=2048, local_gb=128, vcpus_used=0,
|
||||
+ memory_mb_used=0, local_gb_used=0, cpu_info='{}',
|
||||
+ hypervisor_type='ironic', hypervisor_version=10,
|
||||
+ hypervisor_hostname=nodename)
|
||||
+ node.create()
|
||||
+
|
||||
+ # create a compute node for VMs along with a corresponding nova-compute
|
||||
+ # service host in cell1
|
||||
+ with context.target_cell(self.ctxt, cell0) as cctxt:
|
||||
+ hostname = 'vm_host_1'
|
||||
+ svc = objects.Service(cctxt, host=hostname,
|
||||
+ binary='nova-compute',
|
||||
+ topic='nova-compute')
|
||||
+ svc.create()
|
||||
+ compute_node_uuid = getattr(uuids, hostname)
|
||||
+ node = objects.ComputeNode(
|
||||
+ cctxt, uuid=compute_node_uuid, host=hostname,
|
||||
+ vcpus=2, memory_mb=2048, local_gb=128, vcpus_used=0,
|
||||
+ memory_mb_used=0, local_gb_used=0, cpu_info='{}',
|
||||
+ hypervisor_type='libvirt', hypervisor_version=10,
|
||||
+ hypervisor_hostname=hostname)
|
||||
+ node.create()
|
||||
+
|
||||
+ def test_service_hostname(self):
|
||||
+ """Test to make sure we can associate and disassociate an aggregate
|
||||
+ with a service host.
|
||||
+ """
|
||||
+ agg = objects.Aggregate(self.ctxt, name="rack1_baremetal")
|
||||
+ agg.create()
|
||||
+
|
||||
+ agg_id = agg.id
|
||||
+
|
||||
+ # There is no such service host called unknown_host_cell1, so should
|
||||
+ # get back a ComputeHostNotFound
|
||||
+ self.assertRaises(exception.ComputeHostNotFound,
|
||||
+ self.agg_api.add_host_to_aggregate, self.ctxt,
|
||||
+ agg_id, 'unknown_host_cell1')
|
||||
+ self.assertRaises(exception.ComputeHostNotFound,
|
||||
+ self.agg_api.remove_host_from_aggregate, self.ctxt,
|
||||
+ agg_id, 'unknown_host_cell1')
|
||||
+
|
||||
+ hosts = ('ironic_host_1', 'vm_host_1')
|
||||
+ for service_host in hosts:
|
||||
+ self.agg_api.add_host_to_aggregate(self.ctxt, agg_id, service_host)
|
||||
+ self.agg_api.remove_host_from_aggregate(self.ctxt, agg_id,
|
||||
+ service_host)
|
||||
+
|
||||
+ def test_compute_nodename(self):
|
||||
+ """Test to make sure we can associate and disassociate an aggregate
|
||||
+ with a compute node by its hypervisor_hostname.
|
||||
+ """
|
||||
+ agg = objects.Aggregate(self.ctxt, name="rack1_baremetal")
|
||||
+ agg.create()
|
||||
+
|
||||
+ agg_id = agg.id
|
||||
+
|
||||
+ # There is no such compute node called unknown_host_cell1, so should
|
||||
+ # get back a ComputeHostNotFound
|
||||
+ self.assertRaises(exception.ComputeHostNotFound,
|
||||
+ self.agg_api.add_host_to_aggregate, self.ctxt,
|
||||
+ agg_id, getattr(uuids, 'unknown_node_cell1'))
|
||||
+ self.assertRaises(exception.ComputeHostNotFound,
|
||||
+ self.agg_api.remove_host_from_aggregate, self.ctxt,
|
||||
+ agg_id, getattr(uuids, 'unknown_host_cell1'))
|
||||
+
|
||||
+ nodenames = ('ironic_node_2', 'ironic_node_1', 'vm_host_')
|
||||
+ for nodename in nodenames:
|
||||
+ self.agg_api.add_host_to_aggregate(self.ctxt, agg_id, nodename)
|
||||
+ self.agg_api.remove_host_from_aggregate(self.ctxt, agg_id,
|
||||
+ nodename)
|
||||
--
|
||||
2.16.1
|
||||
|
||||
@@ -0,0 +1,241 @@
|
||||
From f5e23e436d341a44dafe5a18876cfcadc809b46b Mon Sep 17 00:00:00 2001
|
||||
From: Roman Dobosz <roman.dobosz@intel.com>
|
||||
Date: Mon, 8 Jan 2018 14:33:45 +0100
|
||||
Subject: [PATCH 2/8] Remove server group sched filter support caching
|
||||
|
||||
Backport of https://review.openstack.org/#/c/529200 by Jay Pipes to
|
||||
Ocata.
|
||||
---
|
||||
nova/scheduler/utils.py | 91 ++++++++++-------------
|
||||
nova/tests/functional/test_server_group.py | 19 -----
|
||||
nova/tests/unit/scheduler/test_scheduler_utils.py | 16 ++--
|
||||
3 files changed, 46 insertions(+), 80 deletions(-)
|
||||
|
||||
diff --git a/nova/scheduler/utils.py b/nova/scheduler/utils.py
|
||||
index 5e9222d14e..682b4cc199 100644
|
||||
--- a/nova/scheduler/utils.py
|
||||
+++ b/nova/scheduler/utils.py
|
||||
@@ -250,23 +250,31 @@ def parse_options(opts, sep='=', converter=str, name=""):
|
||||
return good
|
||||
|
||||
|
||||
-def validate_filter(filter):
|
||||
- """Validates that the filter is configured in the default filters."""
|
||||
- return filter in CONF.filter_scheduler.enabled_filters
|
||||
+def _validate_filter(filter):
|
||||
+ """Validates that the filter is configured in the defaults.
|
||||
|
||||
+ :param filter: Either short or long module-scoped name of a filter
|
||||
+ """
|
||||
+ for enabled_filter in CONF.filter_scheduler.enabled_filters:
|
||||
+ if filter in enabled_filter:
|
||||
+ return True
|
||||
+ return False
|
||||
+
|
||||
+
|
||||
+def _validate_weigher(weigher):
|
||||
+ """Validates that the weigher is configured in the defaults.
|
||||
+
|
||||
+ :param weigher: Either short or long module-scoped name of a weigher
|
||||
+ """
|
||||
|
||||
-def validate_weigher(weigher):
|
||||
- """Validates that the weigher is configured in the default weighers."""
|
||||
weight_classes = CONF.filter_scheduler.weight_classes
|
||||
if 'nova.scheduler.weights.all_weighers' in weight_classes:
|
||||
return True
|
||||
- return weigher in weight_classes
|
||||
-
|
||||
|
||||
-_SUPPORTS_AFFINITY = None
|
||||
-_SUPPORTS_ANTI_AFFINITY = None
|
||||
-_SUPPORTS_SOFT_AFFINITY = None
|
||||
-_SUPPORTS_SOFT_ANTI_AFFINITY = None
|
||||
+ for enabled_weigher in weight_classes:
|
||||
+ if weigher in enabled_weigher:
|
||||
+ return True
|
||||
+ return False
|
||||
|
||||
|
||||
def _get_group_details(context, instance_uuid, user_group_hosts=None):
|
||||
@@ -279,23 +287,6 @@ def _get_group_details(context, instance_uuid, user_group_hosts=None):
|
||||
|
||||
:returns: None or namedtuple GroupDetails
|
||||
"""
|
||||
- global _SUPPORTS_AFFINITY
|
||||
- if _SUPPORTS_AFFINITY is None:
|
||||
- _SUPPORTS_AFFINITY = validate_filter(
|
||||
- 'ServerGroupAffinityFilter')
|
||||
- global _SUPPORTS_ANTI_AFFINITY
|
||||
- if _SUPPORTS_ANTI_AFFINITY is None:
|
||||
- _SUPPORTS_ANTI_AFFINITY = validate_filter(
|
||||
- 'ServerGroupAntiAffinityFilter')
|
||||
- global _SUPPORTS_SOFT_AFFINITY
|
||||
- if _SUPPORTS_SOFT_AFFINITY is None:
|
||||
- _SUPPORTS_SOFT_AFFINITY = validate_weigher(
|
||||
- 'nova.scheduler.weights.affinity.ServerGroupSoftAffinityWeigher')
|
||||
- global _SUPPORTS_SOFT_ANTI_AFFINITY
|
||||
- if _SUPPORTS_SOFT_ANTI_AFFINITY is None:
|
||||
- _SUPPORTS_SOFT_ANTI_AFFINITY = validate_weigher(
|
||||
- 'nova.scheduler.weights.affinity.'
|
||||
- 'ServerGroupSoftAntiAffinityWeigher')
|
||||
|
||||
if not instance_uuid:
|
||||
return
|
||||
@@ -306,31 +297,25 @@ def _get_group_details(context, instance_uuid, user_group_hosts=None):
|
||||
except exception.InstanceGroupNotFound:
|
||||
return
|
||||
|
||||
- policies = set(('anti-affinity', 'affinity', 'soft-affinity',
|
||||
- 'soft-anti-affinity'))
|
||||
- if any((policy in policies) for policy in group.policies):
|
||||
- if not _SUPPORTS_AFFINITY and 'affinity' in group.policies:
|
||||
- msg = _("ServerGroupAffinityFilter not configured")
|
||||
- LOG.error(msg)
|
||||
- raise exception.UnsupportedPolicyException(reason=msg)
|
||||
- if not _SUPPORTS_ANTI_AFFINITY and 'anti-affinity' in group.policies:
|
||||
- msg = _("ServerGroupAntiAffinityFilter not configured")
|
||||
- LOG.error(msg)
|
||||
- raise exception.UnsupportedPolicyException(reason=msg)
|
||||
- if (not _SUPPORTS_SOFT_AFFINITY
|
||||
- and 'soft-affinity' in group.policies):
|
||||
- msg = _("ServerGroupSoftAffinityWeigher not configured")
|
||||
- LOG.error(msg)
|
||||
- raise exception.UnsupportedPolicyException(reason=msg)
|
||||
- if (not _SUPPORTS_SOFT_ANTI_AFFINITY
|
||||
- and 'soft-anti-affinity' in group.policies):
|
||||
- msg = _("ServerGroupSoftAntiAffinityWeigher not configured")
|
||||
- LOG.error(msg)
|
||||
- raise exception.UnsupportedPolicyException(reason=msg)
|
||||
- group_hosts = set(group.get_hosts())
|
||||
- user_hosts = set(user_group_hosts) if user_group_hosts else set()
|
||||
- return GroupDetails(hosts=user_hosts | group_hosts,
|
||||
- policies=group.policies, members=group.members)
|
||||
+ # NOTE(jaypipes): There's only a single policy for the server group
|
||||
+ group_policy = group.policies[0]
|
||||
+ checks = {'affinity': (_validate_filter, 'ServerGroupAffinityFilter'),
|
||||
+ 'anti-affinity': (_validate_filter,
|
||||
+ 'ServerGroupAntiAffinityFilter'),
|
||||
+ 'soft-affinity': (_validate_weigher,
|
||||
+ 'ServerGroupSoftAffinityWeigher'),
|
||||
+ 'soft-anti-affinity': (_validate_weigher,
|
||||
+ 'ServerGroupSoftAntiAffinityWeigher')}
|
||||
+
|
||||
+ check_fn, class_name = checks[group_policy]
|
||||
+ if not check_fn(class_name):
|
||||
+ msg = _("%s not configured") % class_name
|
||||
+ LOG.error(msg)
|
||||
+ raise exception.UnsupportedPolicyException(reason=msg)
|
||||
+ group_hosts = set(group.get_hosts())
|
||||
+ user_hosts = set(user_group_hosts) if user_group_hosts else set()
|
||||
+ return GroupDetails(hosts=user_hosts | group_hosts,
|
||||
+ policies=group.policies, members=group.members)
|
||||
|
||||
|
||||
def setup_instance_group(context, request_spec, filter_properties):
|
||||
diff --git a/nova/tests/functional/test_server_group.py b/nova/tests/functional/test_server_group.py
|
||||
index cc7bec29f7..5e52088c14 100644
|
||||
--- a/nova/tests/functional/test_server_group.py
|
||||
+++ b/nova/tests/functional/test_server_group.py
|
||||
@@ -15,7 +15,6 @@
|
||||
|
||||
import time
|
||||
|
||||
-import mock
|
||||
from oslo_config import cfg
|
||||
|
||||
from nova import context
|
||||
@@ -496,7 +495,6 @@ class ServerGroupAffinityConfTest(ServerGroupTestBase):
|
||||
# Load only anti-affinity filter so affinity will be missing
|
||||
_enabled_filters = ['ServerGroupAntiAffinityFilter']
|
||||
|
||||
- @mock.patch('nova.scheduler.utils._SUPPORTS_AFFINITY', None)
|
||||
def test_affinity_no_filter(self):
|
||||
created_group = self.api.post_server_groups(self.affinity)
|
||||
|
||||
@@ -513,7 +511,6 @@ class ServerGroupAntiAffinityConfTest(ServerGroupTestBase):
|
||||
# Load only affinity filter so anti-affinity will be missing
|
||||
_enabled_filters = ['ServerGroupAffinityFilter']
|
||||
|
||||
- @mock.patch('nova.scheduler.utils._SUPPORTS_ANTI_AFFINITY', None)
|
||||
def test_anti_affinity_no_filter(self):
|
||||
created_group = self.api.post_server_groups(self.anti_affinity)
|
||||
|
||||
@@ -536,7 +533,6 @@ class ServerGroupSoftAffinityConfTest(ServerGroupTestBase):
|
||||
return ['nova.scheduler.weights.affinity.'
|
||||
'ServerGroupSoftAntiAffinityWeigher']
|
||||
|
||||
- @mock.patch('nova.scheduler.utils._SUPPORTS_SOFT_AFFINITY', None)
|
||||
def test_soft_affinity_no_filter(self):
|
||||
created_group = self.api.post_server_groups(self.soft_affinity)
|
||||
|
||||
@@ -559,7 +555,6 @@ class ServerGroupSoftAntiAffinityConfTest(ServerGroupTestBase):
|
||||
return ['nova.scheduler.weights.affinity.'
|
||||
'ServerGroupSoftAffinityWeigher']
|
||||
|
||||
- @mock.patch('nova.scheduler.utils._SUPPORTS_SOFT_ANTI_AFFINITY', None)
|
||||
def test_soft_anti_affinity_no_filter(self):
|
||||
created_group = self.api.post_server_groups(self.soft_anti_affinity)
|
||||
|
||||
@@ -580,20 +575,6 @@ class ServerGroupTestV215(ServerGroupTestV21):
|
||||
soft_affinity = {'name': 'fake-name-4',
|
||||
'policies': ['soft-affinity']}
|
||||
|
||||
- def setUp(self):
|
||||
- super(ServerGroupTestV215, self).setUp()
|
||||
-
|
||||
- soft_affinity_patcher = mock.patch(
|
||||
- 'nova.scheduler.utils._SUPPORTS_SOFT_AFFINITY')
|
||||
- soft_anti_affinity_patcher = mock.patch(
|
||||
- 'nova.scheduler.utils._SUPPORTS_SOFT_ANTI_AFFINITY')
|
||||
- self.addCleanup(soft_affinity_patcher.stop)
|
||||
- self.addCleanup(soft_anti_affinity_patcher.stop)
|
||||
- self.mock_soft_affinity = soft_affinity_patcher.start()
|
||||
- self.mock_soft_anti_affinity = soft_anti_affinity_patcher.start()
|
||||
- self.mock_soft_affinity.return_value = None
|
||||
- self.mock_soft_anti_affinity.return_value = None
|
||||
-
|
||||
def _get_weight_classes(self):
|
||||
return ['nova.scheduler.weights.affinity.'
|
||||
'ServerGroupSoftAffinityWeigher',
|
||||
diff --git a/nova/tests/unit/scheduler/test_scheduler_utils.py b/nova/tests/unit/scheduler/test_scheduler_utils.py
|
||||
index 1893a7e212..63035e742a 100644
|
||||
--- a/nova/tests/unit/scheduler/test_scheduler_utils.py
|
||||
+++ b/nova/tests/unit/scheduler/test_scheduler_utils.py
|
||||
@@ -244,25 +244,25 @@ class SchedulerUtilsTestCase(test.NoDBTestCase):
|
||||
def test_validate_filters_configured(self):
|
||||
self.flags(enabled_filters='FakeFilter1,FakeFilter2',
|
||||
group='filter_scheduler')
|
||||
- self.assertTrue(scheduler_utils.validate_filter('FakeFilter1'))
|
||||
- self.assertTrue(scheduler_utils.validate_filter('FakeFilter2'))
|
||||
- self.assertFalse(scheduler_utils.validate_filter('FakeFilter3'))
|
||||
+ self.assertTrue(scheduler_utils._validate_filter('FakeFilter1'))
|
||||
+ self.assertTrue(scheduler_utils._validate_filter('FakeFilter2'))
|
||||
+ self.assertFalse(scheduler_utils._validate_filter('FakeFilter3'))
|
||||
|
||||
def test_validate_weighers_configured(self):
|
||||
self.flags(weight_classes=[
|
||||
'ServerGroupSoftAntiAffinityWeigher', 'FakeFilter1'],
|
||||
group='filter_scheduler')
|
||||
|
||||
- self.assertTrue(scheduler_utils.validate_weigher(
|
||||
+ self.assertTrue(scheduler_utils._validate_weigher(
|
||||
'ServerGroupSoftAntiAffinityWeigher'))
|
||||
- self.assertTrue(scheduler_utils.validate_weigher('FakeFilter1'))
|
||||
- self.assertFalse(scheduler_utils.validate_weigher(
|
||||
+ self.assertTrue(scheduler_utils._validate_weigher('FakeFilter1'))
|
||||
+ self.assertFalse(scheduler_utils._validate_weigher(
|
||||
'ServerGroupSoftAffinityWeigher'))
|
||||
|
||||
def test_validate_weighers_configured_all_weighers(self):
|
||||
- self.assertTrue(scheduler_utils.validate_weigher(
|
||||
+ self.assertTrue(scheduler_utils._validate_weigher(
|
||||
'ServerGroupSoftAffinityWeigher'))
|
||||
- self.assertTrue(scheduler_utils.validate_weigher(
|
||||
+ self.assertTrue(scheduler_utils._validate_weigher(
|
||||
'ServerGroupSoftAntiAffinityWeigher'))
|
||||
|
||||
def _create_server_group(self, policy='anti-affinity'):
|
||||
--
|
||||
2.16.1
|
||||
|
||||
@@ -0,0 +1,254 @@
|
||||
From 69d0e023edfc2edc123fd5ed29b79ebbd3abe97f Mon Sep 17 00:00:00 2001
|
||||
From: Roman Dobosz <roman.dobosz@intel.com>
|
||||
Date: Wed, 10 Jan 2018 10:37:54 +0100
|
||||
Subject: [PATCH 3/8] get instance group's aggregate associations
|
||||
|
||||
Ocata backport for patch from Jay Pipes:
|
||||
https://review.openstack.org/#/c/531243/
|
||||
---
|
||||
nova/objects/instance_group.py | 36 +++++++-
|
||||
nova/tests/functional/db/test_instance_group.py | 116 ++++++++++++++++++++++++
|
||||
nova/tests/unit/objects/test_instance_group.py | 21 +++++
|
||||
nova/tests/unit/objects/test_objects.py | 2 +-
|
||||
4 files changed, 173 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/nova/objects/instance_group.py b/nova/objects/instance_group.py
|
||||
index 670813b77e..2be47278b2 100644
|
||||
--- a/nova/objects/instance_group.py
|
||||
+++ b/nova/objects/instance_group.py
|
||||
@@ -17,6 +17,7 @@ import copy
|
||||
from oslo_db import exception as db_exc
|
||||
from oslo_utils import uuidutils
|
||||
from oslo_utils import versionutils
|
||||
+import sqlalchemy as sa
|
||||
from sqlalchemy.orm import contains_eager
|
||||
from sqlalchemy.orm import joinedload
|
||||
|
||||
@@ -122,7 +123,8 @@ class InstanceGroup(base.NovaPersistentObject, base.NovaObject,
|
||||
# Version 1.8: Add count_members_by_user()
|
||||
# Version 1.9: Add get_by_instance_uuid()
|
||||
# Version 1.10: Add hosts field
|
||||
- VERSION = '1.10'
|
||||
+ # Version 1.11: Add get_aggregate_uuids()
|
||||
+ VERSION = '1.11'
|
||||
|
||||
fields = {
|
||||
'id': fields.IntegerField(),
|
||||
@@ -454,6 +456,38 @@ class InstanceGroup(base.NovaPersistentObject, base.NovaObject,
|
||||
return list(set([instance.host for instance in instances
|
||||
if instance.host]))
|
||||
|
||||
+ @base.remotable
|
||||
+ def get_aggregate_uuids(self, exclude=None):
|
||||
+ """Returns a set of aggregate UUIDs associated with all compute nodes
|
||||
+ that are housing all non-deleted instances in the group
|
||||
+ """
|
||||
+ filter_uuids = self.members
|
||||
+ if exclude:
|
||||
+ filter_uuids = set(filter_uuids) - set(exclude)
|
||||
+ filters = {'uuid': filter_uuids, 'deleted': False}
|
||||
+ instances = objects.InstanceList.get_by_filters(self._context,
|
||||
+ filters=filters)
|
||||
+ instance_nodes = set([instance.node for instance in instances
|
||||
+ if instance.node])
|
||||
+ if not instance_nodes:
|
||||
+ return set()
|
||||
+ return self._get_aggregate_uuids(self._context, instance_nodes)
|
||||
+
|
||||
+ @staticmethod
|
||||
+ @db_api.api_context_manager.reader
|
||||
+ def _get_aggregate_uuids(ctx, instance_nodes):
|
||||
+ # Now find the aggregates associated with all those nodes
|
||||
+ agg_tbl = api_models.Aggregate.__table__
|
||||
+ agg_host_tbl = api_models.AggregateHost.__table__
|
||||
+ join = sa.join(agg_tbl, agg_host_tbl,
|
||||
+ agg_tbl.c.id == agg_host_tbl.c.aggregate_id)
|
||||
+ sel = sa.select([agg_tbl.c.uuid]).select_from(join)
|
||||
+ sel = sel.where(agg_host_tbl.c.host.in_(instance_nodes))
|
||||
+ sel = sel.group_by(agg_tbl.c.uuid)
|
||||
+ res = ctx.session.execute(sel)
|
||||
+ agg_uuids = [r[0] for r in res]
|
||||
+ return set(agg_uuids)
|
||||
+
|
||||
@base.remotable
|
||||
def count_members_by_user(self, user_id):
|
||||
"""Count the number of instances in a group belonging to a user."""
|
||||
diff --git a/nova/tests/functional/db/test_instance_group.py b/nova/tests/functional/db/test_instance_group.py
|
||||
index 4c4f627fe2..b4c7ef3fd8 100644
|
||||
--- a/nova/tests/functional/db/test_instance_group.py
|
||||
+++ b/nova/tests/functional/db/test_instance_group.py
|
||||
@@ -18,6 +18,7 @@ from nova.db.sqlalchemy import api as db_api
|
||||
from nova import exception
|
||||
from nova import objects
|
||||
from nova.objects import base
|
||||
+from nova.objects import fields as obj_fields
|
||||
from nova.objects import instance_group
|
||||
from nova import test
|
||||
from nova.tests import uuidsentinel as uuids
|
||||
@@ -238,3 +239,118 @@ class InstanceGroupObjectTestCase(test.TestCase):
|
||||
self.context, 100)
|
||||
self.assertEqual(0, total)
|
||||
self.assertEqual(0, done)
|
||||
+
|
||||
+
|
||||
+class InstanceGroupAggregatesTestCase(test.TestCase):
|
||||
+ def setUp(self):
|
||||
+ super(InstanceGroupAggregatesTestCase, self).setUp()
|
||||
+ self.ctx = context.RequestContext('fake-user', 'fake-project')
|
||||
+
|
||||
+ def _create_compute_node(self, host, node):
|
||||
+ cn = objects.ComputeNode(
|
||||
+ self.ctx,
|
||||
+ host=host,
|
||||
+ vcpus=2,
|
||||
+ memory_mb=2048,
|
||||
+ local_gb=100,
|
||||
+ vcpus_used=2,
|
||||
+ memory_mb_used=2048,
|
||||
+ local_gb_used=100,
|
||||
+ hypervisor_type='ironic',
|
||||
+ hypervisor_version=0,
|
||||
+ hypervisor_hostname=node,
|
||||
+ free_ram_mb=0,
|
||||
+ free_disk_gb=0,
|
||||
+ current_workload=0,
|
||||
+ running_vms=0,
|
||||
+ cpu_info='{}',
|
||||
+ disk_available_least=0,
|
||||
+ host_ip='1.1.1.1',
|
||||
+ supported_hv_specs=[
|
||||
+ objects.HVSpec.from_list([
|
||||
+ obj_fields.Architecture.I686,
|
||||
+ obj_fields.HVType.KVM,
|
||||
+ obj_fields.VMMode.HVM])
|
||||
+ ],
|
||||
+ metrics=None,
|
||||
+ pci_device_pools=None,
|
||||
+ extra_resources=None,
|
||||
+ stats={},
|
||||
+ numa_topology=None,
|
||||
+ cpu_allocation_ratio=1.0,
|
||||
+ ram_allocation_ratio=1.0,
|
||||
+ disk_allocation_ratio=1.0)
|
||||
+ cn.create()
|
||||
+ return cn
|
||||
+
|
||||
+ def test_get_aggregate_uuids(self):
|
||||
+ """Tests that when associating compute nodes to aggregates, and
|
||||
+ creating an instance group with instances on those compute nodes, that
|
||||
+ we are able to retrieve the correct set() of aggregate UUIDs from the
|
||||
+ instance group.
|
||||
+ """
|
||||
+ agg1 = objects.Aggregate(self.ctx, name='agg1')
|
||||
+ agg1.create()
|
||||
+ agg2 = objects.Aggregate(self.ctx, name='agg2')
|
||||
+ agg2.create()
|
||||
+
|
||||
+ # NOTE(gryf): We are passing project_id explicitly, due to not going
|
||||
+ # through all the process, like calling
|
||||
+ # nova.compute.api.API._validate_and_build_base_options(), which
|
||||
+ # return among other things base_options which contain project_id.
|
||||
+ # We could do the i1.update({'project_id': ctx.project_id}) instead,
|
||||
+ # but passing project_id as a parameter during object init is cleaner.
|
||||
+ i1 = objects.Instance(self.ctx, host='host1', node='node1',
|
||||
+ project_id=self.ctx.project_id)
|
||||
+ i1.create()
|
||||
+ i2 = objects.Instance(self.ctx, host='host1', node='node2',
|
||||
+ project_id=self.ctx.project_id)
|
||||
+ i2.create()
|
||||
+ i3 = objects.Instance(self.ctx, host='host2', node='node3',
|
||||
+ project_id=self.ctx.project_id)
|
||||
+ i3.create()
|
||||
+
|
||||
+ all_insts = objects.InstanceList.get_all(self.ctx)
|
||||
+ exp_inst_uuids = set([i1.uuid, i2.uuid, i3.uuid])
|
||||
+ self.assertEqual(exp_inst_uuids,
|
||||
+ set([inst.uuid for inst in all_insts]))
|
||||
+
|
||||
+ # Create a server group with just i1 and i2 and verify no aggregate
|
||||
+ # UUIDs returned from InstanceGroup.get_aggregate_uuids() since the
|
||||
+ # compute nodes have not yet been associated with any aggregates
|
||||
+ g1 = objects.InstanceGroup(self.ctx,
|
||||
+ name='g1',
|
||||
+ user_id=self.ctx.user_id,
|
||||
+ project_id=self.ctx.project_id,
|
||||
+ members=[i1.uuid, i2.uuid],
|
||||
+ policies=['aggregate-affinity'])
|
||||
+ g1.create()
|
||||
+
|
||||
+ # Create a server group with just i1 and i2 and verify no aggregate
|
||||
+ # UUIDs returned from InstanceGroup.get_aggregate_uuids() since the
|
||||
+ # compute nodes have not yet been associated with any aggregates
|
||||
+ g2 = objects.InstanceGroup(self.ctx,
|
||||
+ name='g2',
|
||||
+ user_id=self.ctx.user_id,
|
||||
+ project_id=self.ctx.project_id,
|
||||
+ members=[i3.uuid],
|
||||
+ policies=['aggregate-anti-affinity'])
|
||||
+
|
||||
+ g1_agg_uuids = g1.get_aggregate_uuids()
|
||||
+ self.assertEqual(set(), g1_agg_uuids)
|
||||
+
|
||||
+ g2_agg_uuids = g2.get_aggregate_uuids()
|
||||
+ self.assertEqual(set(), g2_agg_uuids)
|
||||
+
|
||||
+ # OK, now associate the compute nodes with various aggregates and
|
||||
+ # verify the aggregate UUIDs returned by each instance group is
|
||||
+ # correct.
|
||||
+ agg1.add_host('node1')
|
||||
+ agg1.add_host('node2')
|
||||
+ agg2.add_host('node3')
|
||||
+
|
||||
+ g1_agg_uuids = g1.get_aggregate_uuids()
|
||||
+ self.assertEqual(set([agg1.uuid]), g1_agg_uuids)
|
||||
+
|
||||
+ g2_agg_uuids = g2.get_aggregate_uuids()
|
||||
+ self.assertEqual(set([agg2.uuid]), g2_agg_uuids)
|
||||
diff --git a/nova/tests/unit/objects/test_instance_group.py b/nova/tests/unit/objects/test_instance_group.py
|
||||
index d542c18afc..8da6712f6e 100644
|
||||
--- a/nova/tests/unit/objects/test_instance_group.py
|
||||
+++ b/nova/tests/unit/objects/test_instance_group.py
|
||||
@@ -241,6 +241,27 @@ class _TestInstanceGroupObject(object):
|
||||
mock_il_get.assert_called_once_with(self.context,
|
||||
filters=expected_filters)
|
||||
|
||||
+ @mock.patch('nova.objects.InstanceGroup._get_aggregate_uuids')
|
||||
+ @mock.patch('nova.objects.InstanceList.get_by_filters')
|
||||
+ @mock.patch('nova.objects.InstanceGroup._get_from_db_by_uuid',
|
||||
+ return_value=_INST_GROUP_DB)
|
||||
+ def test_get_aggregate_uuids(self, mock_get_db, mock_il_get,
|
||||
+ mock_internal):
|
||||
+ mock_il_get.return_value = [objects.Instance(node='node1'),
|
||||
+ objects.Instance(node='node2'),
|
||||
+ objects.Instance(node=None)]
|
||||
+ obj = objects.InstanceGroup.get_by_uuid(self.context, _DB_UUID)
|
||||
+ obj.get_aggregate_uuids()
|
||||
+ self.assertEqual(['instance_id1', 'instance_id2'], obj.members)
|
||||
+ expected_filters = {
|
||||
+ 'uuid': ['instance_id1', 'instance_id2'],
|
||||
+ 'deleted': False
|
||||
+ }
|
||||
+ mock_il_get.assert_called_once_with(self.context,
|
||||
+ filters=expected_filters)
|
||||
+ exp_nodes = set(['node1', 'node2'])
|
||||
+ mock_internal.assert_called_once_with(self.context, exp_nodes)
|
||||
+
|
||||
def test_obj_make_compatible(self):
|
||||
obj = objects.InstanceGroup(self.context, **_INST_GROUP_DB)
|
||||
obj_primitive = obj.obj_to_primitive()
|
||||
diff --git a/nova/tests/unit/objects/test_objects.py b/nova/tests/unit/objects/test_objects.py
|
||||
index 71b919597f..a577820d0c 100644
|
||||
--- a/nova/tests/unit/objects/test_objects.py
|
||||
+++ b/nova/tests/unit/objects/test_objects.py
|
||||
@@ -1106,7 +1106,7 @@ object_data = {
|
||||
'InstanceExternalEvent': '1.1-6e446ceaae5f475ead255946dd443417',
|
||||
'InstanceFault': '1.2-7ef01f16f1084ad1304a513d6d410a38',
|
||||
'InstanceFaultList': '1.2-6bb72de2872fe49ded5eb937a93f2451',
|
||||
- 'InstanceGroup': '1.10-1a0c8c7447dc7ecb9da53849430c4a5f',
|
||||
+ 'InstanceGroup': '1.11-bdd9fa6ab3c80e92fd43b3ba5393e368',
|
||||
'InstanceGroupList': '1.7-be18078220513316abd0ae1b2d916873',
|
||||
'InstanceInfoCache': '1.5-cd8b96fefe0fc8d4d337243ba0bf0e1e',
|
||||
'InstanceList': '2.2-ff71772c7bf6d72f6ef6eee0199fb1c9',
|
||||
--
|
||||
2.16.1
|
||||
|
||||
@@ -0,0 +1,469 @@
|
||||
From f69827ff3502552a45a19a50ef2cfad30c41af2d Mon Sep 17 00:00:00 2001
|
||||
From: Roman Dobosz <roman.dobosz@intel.com>
|
||||
Date: Thu, 18 Jan 2018 09:17:04 +0100
|
||||
Subject: [PATCH 4/8] Support aggregate affinity filters
|
||||
|
||||
Jay patch for two new policies: aggregate-affinity and
|
||||
aggregate-antiaffinity backported to Ocata.
|
||||
|
||||
https://review.openstack.org/#/c/529201/
|
||||
---
|
||||
doc/api_samples/versions/v21-version-get-resp.json | 2 +-
|
||||
doc/api_samples/versions/versions-get-resp.json | 2 +-
|
||||
nova/api/openstack/api_version_request.py | 4 +-
|
||||
.../openstack/compute/rest_api_version_history.rst | 12 ++
|
||||
.../api/openstack/compute/schemas/server_groups.py | 5 +
|
||||
nova/api/openstack/compute/server_groups.py | 3 +-
|
||||
nova/compute/manager.py | 33 ++++-
|
||||
nova/scheduler/filters/affinity_filter.py | 71 +++++++++++
|
||||
nova/scheduler/host_manager.py | 7 ++
|
||||
nova/scheduler/utils.py | 21 ++--
|
||||
nova/tests/functional/test_server_group.py | 140 ++++++++++++++++++++-
|
||||
11 files changed, 285 insertions(+), 15 deletions(-)
|
||||
|
||||
diff --git a/doc/api_samples/versions/v21-version-get-resp.json b/doc/api_samples/versions/v21-version-get-resp.json
|
||||
index 64120de26c..241212017e 100644
|
||||
--- a/doc/api_samples/versions/v21-version-get-resp.json
|
||||
+++ b/doc/api_samples/versions/v21-version-get-resp.json
|
||||
@@ -19,7 +19,7 @@
|
||||
}
|
||||
],
|
||||
"status": "CURRENT",
|
||||
- "version": "2.42",
|
||||
+ "version": "2.43",
|
||||
"min_version": "2.1",
|
||||
"updated": "2013-07-23T11:33:21Z"
|
||||
}
|
||||
diff --git a/doc/api_samples/versions/versions-get-resp.json b/doc/api_samples/versions/versions-get-resp.json
|
||||
index 27ad9b3703..924b060df1 100644
|
||||
--- a/doc/api_samples/versions/versions-get-resp.json
|
||||
+++ b/doc/api_samples/versions/versions-get-resp.json
|
||||
@@ -22,7 +22,7 @@
|
||||
}
|
||||
],
|
||||
"status": "CURRENT",
|
||||
- "version": "2.42",
|
||||
+ "version": "2.43",
|
||||
"min_version": "2.1",
|
||||
"updated": "2013-07-23T11:33:21Z"
|
||||
}
|
||||
diff --git a/nova/api/openstack/api_version_request.py b/nova/api/openstack/api_version_request.py
|
||||
index dde18537e4..9957afd0af 100644
|
||||
--- a/nova/api/openstack/api_version_request.py
|
||||
+++ b/nova/api/openstack/api_version_request.py
|
||||
@@ -102,6 +102,8 @@ REST_API_VERSION_HISTORY = """REST API Version History:
|
||||
re-introduce the tag attribute that, due to bugs, was lost
|
||||
starting with version 2.33 for block devices and starting with
|
||||
version 2.37 for network interfaces.
|
||||
+ * 2.43 - Add 'aggregate-affinity' and 'aggregate-anti-affinity' server
|
||||
+ group policies
|
||||
"""
|
||||
|
||||
# The minimum and maximum versions of the API supported
|
||||
@@ -110,7 +112,7 @@ REST_API_VERSION_HISTORY = """REST API Version History:
|
||||
# Note(cyeoh): This only applies for the v2.1 API once microversions
|
||||
# support is fully merged. It does not affect the V2 API.
|
||||
_MIN_API_VERSION = "2.1"
|
||||
-_MAX_API_VERSION = "2.42"
|
||||
+_MAX_API_VERSION = "2.43"
|
||||
DEFAULT_API_VERSION = _MIN_API_VERSION
|
||||
|
||||
# Almost all proxy APIs which related to network, images and baremetal
|
||||
diff --git a/nova/api/openstack/compute/rest_api_version_history.rst b/nova/api/openstack/compute/rest_api_version_history.rst
|
||||
index fee83ead14..deccddfebf 100644
|
||||
--- a/nova/api/openstack/compute/rest_api_version_history.rst
|
||||
+++ b/nova/api/openstack/compute/rest_api_version_history.rst
|
||||
@@ -483,3 +483,15 @@ user documentation.
|
||||
2.37 and for block_device_mapping_v2 starting with version 2.33. Microversion
|
||||
2.42 restores the tag parameter to both networks and block_device_mapping_v2,
|
||||
allowing networks and block devices to be tagged again.
|
||||
+
|
||||
+2.43
|
||||
+----
|
||||
+
|
||||
+From this version of the API users can choose `aggregate-affinity` and
|
||||
+`aggregate-anti-affinity` rules for server-groups. The `aggregate-affinity`
|
||||
+policy for a server group indicates that new members of the server group should
|
||||
+be placed on hosts that are in the same aggregate(s) associated with hosts
|
||||
+where other members of the server group are placed. The
|
||||
+`aggregate-anti-affinity` policy enforces the opposite: ensure that instances
|
||||
+launched in a server group land on hosts that are *not* associated with
|
||||
+aggregates that other members of the server group are associated to.
|
||||
diff --git a/nova/api/openstack/compute/schemas/server_groups.py b/nova/api/openstack/compute/schemas/server_groups.py
|
||||
index 52a08413aa..4b274e3251 100644
|
||||
--- a/nova/api/openstack/compute/schemas/server_groups.py
|
||||
+++ b/nova/api/openstack/compute/schemas/server_groups.py
|
||||
@@ -43,3 +43,8 @@ create = {
|
||||
create_v215 = copy.deepcopy(create)
|
||||
policies = create_v215['properties']['server_group']['properties']['policies']
|
||||
policies['items'][0]['enum'].extend(['soft-anti-affinity', 'soft-affinity'])
|
||||
+
|
||||
+create_v243 = copy.deepcopy(create_v215)
|
||||
+policies = create_v243['properties']['server_group']['properties']['policies']
|
||||
+policies['items'][0]['enum'].extend(['aggregate-anti-affinity',
|
||||
+ 'aggregate-affinity'])
|
||||
diff --git a/nova/api/openstack/compute/server_groups.py b/nova/api/openstack/compute/server_groups.py
|
||||
index dfd2380ec2..82429af5a6 100644
|
||||
--- a/nova/api/openstack/compute/server_groups.py
|
||||
+++ b/nova/api/openstack/compute/server_groups.py
|
||||
@@ -132,7 +132,8 @@ class ServerGroupController(wsgi.Controller):
|
||||
@wsgi.Controller.api_version("2.1")
|
||||
@extensions.expected_errors((400, 403))
|
||||
@validation.schema(schema.create, "2.0", "2.14")
|
||||
- @validation.schema(schema.create_v215, "2.15")
|
||||
+ @validation.schema(schema.create_v215, "2.15", "2.42")
|
||||
+ @validation.schema(schema.create_v243, "2.43")
|
||||
def create(self, req, body):
|
||||
"""Creates a new server group."""
|
||||
context = _authorize_context(req, 'create')
|
||||
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
|
||||
index 86c39c190a..10ed9d3df0 100644
|
||||
--- a/nova/compute/manager.py
|
||||
+++ b/nova/compute/manager.py
|
||||
@@ -1294,7 +1294,9 @@ class ComputeManager(manager.Manager):
|
||||
@utils.synchronized(group_hint)
|
||||
def _do_validation(context, instance, group_hint):
|
||||
group = objects.InstanceGroup.get_by_hint(context, group_hint)
|
||||
- if 'anti-affinity' in group.policies:
|
||||
+ # NOTE(jaypipes): A server group only has 1 policy...
|
||||
+ group_policy = group.policies[0]
|
||||
+ if 'anti-affinity' == group_policy:
|
||||
group_hosts = group.get_hosts(exclude=[instance.uuid])
|
||||
if self.host in group_hosts:
|
||||
msg = _("Anti-affinity instance group policy "
|
||||
@@ -1302,14 +1304,39 @@ class ComputeManager(manager.Manager):
|
||||
raise exception.RescheduledException(
|
||||
instance_uuid=instance.uuid,
|
||||
reason=msg)
|
||||
- elif 'affinity' in group.policies:
|
||||
+ elif 'affinity' == group_policy:
|
||||
group_hosts = group.get_hosts(exclude=[instance.uuid])
|
||||
if group_hosts and self.host not in group_hosts:
|
||||
msg = _("Affinity instance group policy was violated.")
|
||||
raise exception.RescheduledException(
|
||||
instance_uuid=instance.uuid,
|
||||
reason=msg)
|
||||
-
|
||||
+ elif 'aggregate' in group_policy:
|
||||
+ # NOTE(jaypipes): We look up aggregates by the **node** here,
|
||||
+ # not the instance.host. This is because the compute node (not
|
||||
+ # the service record) is expected to be associated with
|
||||
+ # aggregates when the aggregate affinity filters are being
|
||||
+ # used.
|
||||
+ node_aggs = objects.AggregateList.get_by_host(
|
||||
+ context, instance.node)
|
||||
+ node_aggs = set(agg.uuid for agg in node_aggs)
|
||||
+ if 'aggregate-affinity' == group_policy:
|
||||
+ group_aggs = group.get_aggregate_uuids()
|
||||
+ if not node_aggs.issubset(group_aggs):
|
||||
+ msg = _("Aggregate affinity server group policy was "
|
||||
+ "violated.")
|
||||
+ raise exception.RescheduledException(
|
||||
+ instance_uuid=instance.uuid,
|
||||
+ reason=msg)
|
||||
+ else:
|
||||
+ group_aggs = group.get_aggregate_uuids(
|
||||
+ exclude=[instance.uuid])
|
||||
+ if not node_aggs.isdisjoint(group_aggs):
|
||||
+ msg = _("Aggregate anti-affinity server group policy "
|
||||
+ "was violated.")
|
||||
+ raise exception.RescheduledException(
|
||||
+ instance_uuid=instance.uuid,
|
||||
+ reason=msg)
|
||||
_do_validation(context, instance, group_hint)
|
||||
|
||||
def _log_original_error(self, exc_info, instance_uuid):
|
||||
diff --git a/nova/scheduler/filters/affinity_filter.py b/nova/scheduler/filters/affinity_filter.py
|
||||
index f8aa47ee03..f025df45df 100644
|
||||
--- a/nova/scheduler/filters/affinity_filter.py
|
||||
+++ b/nova/scheduler/filters/affinity_filter.py
|
||||
@@ -145,3 +145,74 @@ class ServerGroupAffinityFilter(_GroupAffinityFilter):
|
||||
def __init__(self):
|
||||
self.policy_name = 'affinity'
|
||||
super(ServerGroupAffinityFilter, self).__init__()
|
||||
+
|
||||
+
|
||||
+class ServerGroupAggregateAffinityFilter(filters.BaseHostFilter):
|
||||
+ """Filter out any host that isn't in the same host aggregates as the
|
||||
+ aggregates associated with the host from another member of the server
|
||||
+ group.
|
||||
+ """
|
||||
+
|
||||
+ RUN_ON_REBUILD = False
|
||||
+ POLICY_NAME = 'aggregate-affinity'
|
||||
+ REVERSE_CHECK = False
|
||||
+
|
||||
+ def host_passes(self, host_state, spec_obj):
|
||||
+ if not spec_obj.instance_group.policies:
|
||||
+ return True
|
||||
+ policy = spec_obj.instance_group.policies[0]
|
||||
+ if self.POLICY_NAME != policy:
|
||||
+ return True
|
||||
+
|
||||
+ host_aggs = set(agg.id for agg in host_state.aggregates)
|
||||
+
|
||||
+ if not host_aggs:
|
||||
+ if not self.REVERSE_CHECK:
|
||||
+ # Clearly, if the host doesn't belong to any aggregates, then
|
||||
+ # it cannot satisfy an aggregate affinity constraint
|
||||
+ return False
|
||||
+ else:
|
||||
+ # And clearly, if the host doesn't belong to any aggregates,
|
||||
+ # then it must always satisfy an aggregate anti-affinity
|
||||
+ # constraint
|
||||
+ return True
|
||||
+
|
||||
+ group_hosts = (spec_obj.instance_group.hosts
|
||||
+ if spec_obj.instance_group else [])
|
||||
+ if not group_hosts:
|
||||
+ # There are no members of the server group yet, so this host meets
|
||||
+ # the aggregate affinity (or anti-affinity) constraint
|
||||
+ return True
|
||||
+
|
||||
+ # TODO(jaypipes): The sets used here contain the autoincremented
|
||||
+ # integer keys for aggregates. :( This means this isn't multi-cell
|
||||
+ # safe. We would need to modify the host_aggregate_map and associated
|
||||
+ # HostState.aggregates sets to contain UUIDs instead of IDs to make
|
||||
+ # this multi-cell safe
|
||||
+
|
||||
+ # Grab all aggregates for all hosts in the server group and ensure we
|
||||
+ # have an intersection with this host's aggregates
|
||||
+ group_aggs = set()
|
||||
+ for group_host in group_hosts:
|
||||
+ group_aggs |= self.host_manager.host_aggregates_map[group_host]
|
||||
+
|
||||
+ LOG.debug("%(policy_name)s: check if %(host_aggs)s "
|
||||
+ "is %(policy_cond)s subset of %(group_aggs)s",
|
||||
+ {
|
||||
+ 'policy_name': self.POLICY_NAME,
|
||||
+ 'policy_cond': 'not a' if self.REVERSE_CHECK else 'a',
|
||||
+ 'group_aggs': group_aggs,
|
||||
+ 'host_aggs': host_aggs})
|
||||
+ if self.REVERSE_CHECK:
|
||||
+ return host_aggs.isdisjoint(group_aggs)
|
||||
+ return host_aggs.issubset(group_aggs)
|
||||
+
|
||||
+
|
||||
+class ServerGroupAggregateAntiAffinityFilter(
|
||||
+ ServerGroupAggregateAffinityFilter):
|
||||
+ """Filter out any host that is in the same host aggregates as the
|
||||
+ aggregates associated with any host from another member of the server
|
||||
+ group.
|
||||
+ """
|
||||
+ POLICY_NAME = 'aggregate-anti-affinity'
|
||||
+ REVERSE_CHECK = True
|
||||
diff --git a/nova/scheduler/host_manager.py b/nova/scheduler/host_manager.py
|
||||
index 4f66d40913..7347722a94 100644
|
||||
--- a/nova/scheduler/host_manager.py
|
||||
+++ b/nova/scheduler/host_manager.py
|
||||
@@ -341,6 +341,13 @@ class HostManager(object):
|
||||
self.filter_cls_map = {cls.__name__: cls for cls in filter_classes}
|
||||
self.filter_obj_map = {}
|
||||
self.enabled_filters = self._choose_host_filters(self._load_filters())
|
||||
+ # NOTE(jaypipes): This is a total hack because the design of the
|
||||
+ # scheduler and scheduler filters is teh suck. Basically, we are just
|
||||
+ # jamming a pointer to the host manager into each filter object so that
|
||||
+ # the filter objects can have access to things like the host manager's
|
||||
+ # cached aggregate map. Ugly, but it works...
|
||||
+ for f in self.enabled_filters:
|
||||
+ f.host_manager = self
|
||||
self.weight_handler = weights.HostWeightHandler()
|
||||
weigher_classes = self.weight_handler.get_matching_classes(
|
||||
CONF.filter_scheduler.weight_classes)
|
||||
diff --git a/nova/scheduler/utils.py b/nova/scheduler/utils.py
|
||||
index 682b4cc199..57a306e07a 100644
|
||||
--- a/nova/scheduler/utils.py
|
||||
+++ b/nova/scheduler/utils.py
|
||||
@@ -299,13 +299,20 @@ def _get_group_details(context, instance_uuid, user_group_hosts=None):
|
||||
|
||||
# NOTE(jaypipes): There's only a single policy for the server group
|
||||
group_policy = group.policies[0]
|
||||
- checks = {'affinity': (_validate_filter, 'ServerGroupAffinityFilter'),
|
||||
- 'anti-affinity': (_validate_filter,
|
||||
- 'ServerGroupAntiAffinityFilter'),
|
||||
- 'soft-affinity': (_validate_weigher,
|
||||
- 'ServerGroupSoftAffinityWeigher'),
|
||||
- 'soft-anti-affinity': (_validate_weigher,
|
||||
- 'ServerGroupSoftAntiAffinityWeigher')}
|
||||
+ checks = {
|
||||
+ 'affinity': (
|
||||
+ _validate_filter, 'ServerGroupAffinityFilter'),
|
||||
+ 'anti-affinity': (
|
||||
+ _validate_filter, 'ServerGroupAntiAffinityFilter'),
|
||||
+ 'soft-affinity': (
|
||||
+ _validate_weigher, 'ServerGroupSoftAffinityWeigher'),
|
||||
+ 'soft-anti-affinity': (
|
||||
+ _validate_weigher, 'ServerGroupSoftAntiAffinityWeigher'),
|
||||
+ 'aggregate-affinity': (
|
||||
+ _validate_filter, 'ServerGroupAggregateAffinityFilter'),
|
||||
+ 'aggregate-anti-affinity': (
|
||||
+ _validate_filter, 'ServerGroupAggregateAntiAffinityFilter')
|
||||
+ }
|
||||
|
||||
check_fn, class_name = checks[group_policy]
|
||||
if not check_fn(class_name):
|
||||
diff --git a/nova/tests/functional/test_server_group.py b/nova/tests/functional/test_server_group.py
|
||||
index 5e52088c14..52af7688bb 100644
|
||||
--- a/nova/tests/functional/test_server_group.py
|
||||
+++ b/nova/tests/functional/test_server_group.py
|
||||
@@ -17,6 +17,7 @@ import time
|
||||
|
||||
from oslo_config import cfg
|
||||
|
||||
+from nova.compute import api as compute_api
|
||||
from nova import context
|
||||
from nova import db
|
||||
from nova.db.sqlalchemy import api as db_api
|
||||
@@ -46,7 +47,9 @@ class ServerGroupTestBase(test.TestCase,
|
||||
|
||||
_enabled_filters = (CONF.filter_scheduler.enabled_filters
|
||||
+ ['ServerGroupAntiAffinityFilter',
|
||||
- 'ServerGroupAffinityFilter'])
|
||||
+ 'ServerGroupAffinityFilter',
|
||||
+ 'ServerGroupAggregateAntiAffinityFilter',
|
||||
+ 'ServerGroupAggregateAffinityFilter'])
|
||||
|
||||
# Override servicegroup parameters to make the tests run faster
|
||||
_service_down_time = 10
|
||||
@@ -812,3 +815,138 @@ class ServerGroupTestV215(ServerGroupTestV21):
|
||||
|
||||
def test_soft_affinity_not_supported(self):
|
||||
pass
|
||||
+
|
||||
+
|
||||
+class ServerGroupAggregateAffinityConfTest(ServerGroupTestBase):
|
||||
+ api_major_version = 'v2.1'
|
||||
+ group = {
|
||||
+ 'name': 'fake-rack-affinity',
|
||||
+ 'policies': ['aggregate-affinity'],
|
||||
+ }
|
||||
+
|
||||
+ # Load only anti-affinity filter so affinity will be missing
|
||||
+ _enabled_filters = ['ServerGroupAggregateAntiAffinityFilter']
|
||||
+
|
||||
+ def test_aggregate_affinity_no_filter(self):
|
||||
+ # We need to do this because _boot_a_server_to_group() ends up calling
|
||||
+ # the images API, and if we set the test case class's microversion
|
||||
+ # attribute to 2.43, then we will blow up the call to images API (which
|
||||
+ # was removed in 2.35). yay.
|
||||
+ self.api.microversion = '2.43'
|
||||
+ created_group = self.api.post_server_groups(self.group)
|
||||
+ self.api.microversion = None
|
||||
+
|
||||
+ failed_server = self._boot_a_server_to_group(created_group,
|
||||
+ expected_status='ERROR')
|
||||
+ self.assertEqual(
|
||||
+ 'ServerGroup policy is not supported: '
|
||||
+ 'ServerGroupAggregateAffinityFilter not configured',
|
||||
+ failed_server['fault']['message'])
|
||||
+ self.assertEqual(400, failed_server['fault']['code'])
|
||||
+
|
||||
+
|
||||
+class ServerGroupAggregateAntiAffinityConfTest(ServerGroupTestBase):
|
||||
+ api_major_version = 'v2.1'
|
||||
+ group = {
|
||||
+ 'name': 'fake-rack-anti-affinity',
|
||||
+ 'policies': ['aggregate-anti-affinity'],
|
||||
+ }
|
||||
+
|
||||
+ # Load only affinity filter so anti-affinity will be missing
|
||||
+ _enabled_filters = ['ServerGroupAggregateAffinityFilter']
|
||||
+
|
||||
+ def test_aggregate_anti_affinity_no_filter(self):
|
||||
+ # We need to do this because _boot_a_server_to_group() ends up calling
|
||||
+ # the images API, and if we set the test case class's microversion
|
||||
+ # attribute to 2.43, then we will blow up the call to images API (which
|
||||
+ # was removed in 2.35). yay.
|
||||
+ self.api.microversion = '2.43'
|
||||
+ created_group = self.api.post_server_groups(self.group)
|
||||
+ self.api.microversion = None
|
||||
+
|
||||
+ failed_server = self._boot_a_server_to_group(created_group,
|
||||
+ expected_status='ERROR')
|
||||
+ self.assertEqual(
|
||||
+ 'ServerGroup policy is not supported: '
|
||||
+ 'ServerGroupAggregateAntiAffinityFilter not configured',
|
||||
+ failed_server['fault']['message'])
|
||||
+ self.assertEqual(400, failed_server['fault']['code'])
|
||||
+
|
||||
+
|
||||
+def _host_from_instance(instance):
|
||||
+ return instance['OS-EXT-SRV-ATTR:host']
|
||||
+
|
||||
+
|
||||
+class AggregateAffinityTest(ServerGroupTestBase):
|
||||
+
|
||||
+ def setUp(self):
|
||||
+ super(AggregateAffinityTest, self).setUp()
|
||||
+
|
||||
+ self.stub_out('nova.virt.driver.load_compute_driver',
|
||||
+ _fake_load_compute_driver)
|
||||
+
|
||||
+ # Start up two compute nodes, associating each with a different host
|
||||
+ # aggregate
|
||||
+ self.agg_api = compute_api.AggregateAPI()
|
||||
+ fake.set_nodes(['node1'])
|
||||
+ self.compute1 = self.start_service('compute', host='node1')
|
||||
+ fake.set_nodes(['node2'])
|
||||
+ self.compute2 = self.start_service('compute', host='node2')
|
||||
+
|
||||
+ self.addCleanup(fake.restore_nodes)
|
||||
+ fake_network.set_stub_network_methods(self)
|
||||
+
|
||||
+ payload = {'aggregate': {'name': 'rack1'}}
|
||||
+ self.agg1 = self.admin_api.post_aggregate(payload)
|
||||
+ payload = {'aggregate': {'name': 'rack2'}}
|
||||
+ self.agg2 = self.admin_api.post_aggregate(payload)
|
||||
+
|
||||
+ ctxt = context.get_admin_context()
|
||||
+ self.agg_api.add_host_to_aggregate(ctxt, self.agg1['id'], 'node1')
|
||||
+ self.agg_api.add_host_to_aggregate(ctxt, self.agg2['id'], 'node2')
|
||||
+
|
||||
+ def _create_server_group(self, payload):
|
||||
+ # We need to do this because _boot_a_server_to_group() ends up calling
|
||||
+ # the images API, and if we set the test case class's microversion
|
||||
+ # attribute to 2.43, then we will blow up the call to images API (which
|
||||
+ # was removed in 2.35). yay.
|
||||
+ self.api.microversion = '2.43'
|
||||
+ group = self.api.post_server_groups(payload)
|
||||
+ self.api.microversion = None
|
||||
+ return group
|
||||
+
|
||||
+ def test_aggregate_affinity(self):
|
||||
+ """Create a server group with a policy of aggregate-affinity, launch
|
||||
+ one instance into this group. Launch another instance into the same
|
||||
+ group and ensure that the instance ends up on a host with the same
|
||||
+ aggregate as the first instance's destination compute node.
|
||||
+ """
|
||||
+ group_name = 'keep-in-rack'
|
||||
+ group_payload = {
|
||||
+ 'name': group_name,
|
||||
+ 'policies': ['aggregate-affinity'],
|
||||
+ }
|
||||
+ group = self._create_server_group(group_payload)
|
||||
+ inst1 = self._boot_a_server_to_group(group)
|
||||
+ inst2 = self._boot_a_server_to_group(group)
|
||||
+
|
||||
+ self.assertEqual(_host_from_instance(inst1),
|
||||
+ _host_from_instance(inst2))
|
||||
+
|
||||
+ def test_aggregate_anti_affinity(self):
|
||||
+ """Create a server group with a policy of aggregate-anti-affinity,
|
||||
+ launch one instance into this group. Launch another instance into the
|
||||
+ same group and ensure that the instance ends up on a host in a
|
||||
+ different aggregate as the first instance's destination compute node.
|
||||
+ """
|
||||
+ group_name = 'not-in-rack'
|
||||
+ group_payload = {
|
||||
+ 'name': group_name,
|
||||
+ 'policies': ['aggregate-anti-affinity'],
|
||||
+ }
|
||||
+ group = self._create_server_group(group_payload)
|
||||
+ inst1 = self._boot_a_server_to_group(group)
|
||||
+ inst2 = self._boot_a_server_to_group(group)
|
||||
+
|
||||
+ self.assertNotEqual(_host_from_instance(inst1),
|
||||
+ _host_from_instance(inst2))
|
||||
--
|
||||
2.16.1
|
||||
|
||||
@@ -0,0 +1,170 @@
|
||||
From 9014195f11d981da4dc158ab9b9b6bb594c8ea0d Mon Sep 17 00:00:00 2001
|
||||
From: Roman Dobosz <roman.dobosz@intel.com>
|
||||
Date: Fri, 23 Feb 2018 07:26:05 +0100
|
||||
Subject: [PATCH 5/8] Added node field for InstanceGroup objects
|
||||
|
||||
Currently, there is only a way for getting the information which hosts
|
||||
belongs for certain instance group. By 'hosts' it means a hostname, on
|
||||
which compute service is running. In case of bare metal instances, there
|
||||
is no way to get the information out of instance group object which
|
||||
ironic nodes are belonging for such group. This patch adds an ability
|
||||
for fetching such information.
|
||||
|
||||
InstanceGroup class now have new field - nodes - and corresponding method
|
||||
get_nodes, to gather information about nodes out of instance objects. Also
|
||||
request spec object was updated to reset new InstanceGroup nodes field during
|
||||
group population.
|
||||
---
|
||||
nova/objects/instance_group.py | 34 ++++++++++++++++++++-----
|
||||
nova/objects/request_spec.py | 5 ++--
|
||||
nova/tests/functional/db/test_instance_group.py | 2 +-
|
||||
nova/tests/unit/objects/test_instance_group.py | 6 +++--
|
||||
nova/tests/unit/objects/test_objects.py | 2 +-
|
||||
5 files changed, 37 insertions(+), 12 deletions(-)
|
||||
|
||||
diff --git a/nova/objects/instance_group.py b/nova/objects/instance_group.py
|
||||
index 2be47278b2..142fff6128 100644
|
||||
--- a/nova/objects/instance_group.py
|
||||
+++ b/nova/objects/instance_group.py
|
||||
@@ -32,7 +32,7 @@ from nova.objects import base
|
||||
from nova.objects import fields
|
||||
|
||||
|
||||
-LAZY_LOAD_FIELDS = ['hosts']
|
||||
+LAZY_LOAD_FIELDS = ['hosts', 'nodes']
|
||||
|
||||
|
||||
def _instance_group_get_query(context, id_field=None, id=None):
|
||||
@@ -124,7 +124,8 @@ class InstanceGroup(base.NovaPersistentObject, base.NovaObject,
|
||||
# Version 1.9: Add get_by_instance_uuid()
|
||||
# Version 1.10: Add hosts field
|
||||
# Version 1.11: Add get_aggregate_uuids()
|
||||
- VERSION = '1.11'
|
||||
+ # Version 1.12: Add nodes field
|
||||
+ VERSION = '1.12'
|
||||
|
||||
fields = {
|
||||
'id': fields.IntegerField(),
|
||||
@@ -138,6 +139,7 @@ class InstanceGroup(base.NovaPersistentObject, base.NovaObject,
|
||||
'policies': fields.ListOfStringsField(nullable=True),
|
||||
'members': fields.ListOfStringsField(nullable=True),
|
||||
'hosts': fields.ListOfStringsField(nullable=True),
|
||||
+ 'nodes': fields.ListOfStringsField(nullable=True),
|
||||
}
|
||||
|
||||
def obj_make_compatible(self, primitive, target_version):
|
||||
@@ -283,12 +285,13 @@ class InstanceGroup(base.NovaPersistentObject, base.NovaObject,
|
||||
|
||||
def obj_load_attr(self, attrname):
|
||||
# NOTE(sbauza): Only hosts could be lazy-loaded right now
|
||||
- if attrname != 'hosts':
|
||||
+ if attrname not in LAZY_LOAD_FIELDS:
|
||||
raise exception.ObjectActionError(
|
||||
action='obj_load_attr', reason='unable to load %s' % attrname)
|
||||
|
||||
self.hosts = self.get_hosts()
|
||||
- self.obj_reset_changes(['hosts'])
|
||||
+ self.nodes = self.get_nodes()
|
||||
+ self.obj_reset_changes(LAZY_LOAD_FIELDS)
|
||||
|
||||
@base.remotable_classmethod
|
||||
def get_by_uuid(cls, context, uuid):
|
||||
@@ -348,8 +351,9 @@ class InstanceGroup(base.NovaPersistentObject, base.NovaObject,
|
||||
# field explicitly, we prefer to raise an Exception so the developer
|
||||
# knows he has to call obj_reset_changes(['hosts']) right after setting
|
||||
# the field.
|
||||
- if 'hosts' in updates:
|
||||
- raise exception.InstanceGroupSaveException(field='hosts')
|
||||
+ for attribute in LAZY_LOAD_FIELDS:
|
||||
+ if attribute in updates:
|
||||
+ raise exception.InstanceGroupSaveException(field=attribute)
|
||||
|
||||
if not updates:
|
||||
return
|
||||
@@ -456,6 +460,24 @@ class InstanceGroup(base.NovaPersistentObject, base.NovaObject,
|
||||
return list(set([instance.host for instance in instances
|
||||
if instance.host]))
|
||||
|
||||
+ @base.remotable
|
||||
+ def get_nodes(self, exclude=None):
|
||||
+ """Get a list of nodes for non-deleted instances in the group
|
||||
+
|
||||
+ This method allows you to get a list of the (ironic) hosts where
|
||||
+ instances in this group are currently running. There's also an option
|
||||
+ to exclude certain instance UUIDs from this calculation.
|
||||
+
|
||||
+ """
|
||||
+ filter_uuids = self.members
|
||||
+ if exclude:
|
||||
+ filter_uuids = set(filter_uuids) - set(exclude)
|
||||
+ filters = {'uuid': filter_uuids, 'deleted': False}
|
||||
+ instances = objects.InstanceList.get_by_filters(self._context,
|
||||
+ filters=filters)
|
||||
+ return list(set([instance.node for instance in instances
|
||||
+ if instance.node]))
|
||||
+
|
||||
@base.remotable
|
||||
def get_aggregate_uuids(self, exclude=None):
|
||||
"""Returns a set of aggregate UUIDs associated with all compute nodes
|
||||
diff --git a/nova/objects/request_spec.py b/nova/objects/request_spec.py
|
||||
index 9040735153..24eaef9327 100644
|
||||
--- a/nova/objects/request_spec.py
|
||||
+++ b/nova/objects/request_spec.py
|
||||
@@ -200,8 +200,9 @@ class RequestSpec(base.NovaObject):
|
||||
self.instance_group = objects.InstanceGroup(policies=policies,
|
||||
hosts=hosts,
|
||||
members=members)
|
||||
- # hosts has to be not part of the updates for saving the object
|
||||
- self.instance_group.obj_reset_changes(['hosts'])
|
||||
+ # hosts and nodes cannot be a part of the updates for saving the
|
||||
+ # object
|
||||
+ self.instance_group.obj_reset_changes(['hosts', 'nodes'])
|
||||
else:
|
||||
# Set the value anyway to avoid any call to obj_attr_is_set for it
|
||||
self.instance_group = None
|
||||
diff --git a/nova/tests/functional/db/test_instance_group.py b/nova/tests/functional/db/test_instance_group.py
|
||||
index b4c7ef3fd8..3c608b929f 100644
|
||||
--- a/nova/tests/functional/db/test_instance_group.py
|
||||
+++ b/nova/tests/functional/db/test_instance_group.py
|
||||
@@ -221,7 +221,7 @@ class InstanceGroupObjectTestCase(test.TestCase):
|
||||
api_models = sorted(api_models, key=key_func)
|
||||
orig_main_models = sorted(orig_main_models, key=key_func)
|
||||
ignore_fields = ('id', 'hosts', 'deleted', 'deleted_at', 'created_at',
|
||||
- 'updated_at')
|
||||
+ 'updated_at', 'nodes')
|
||||
for i in range(len(api_models)):
|
||||
for field in instance_group.InstanceGroup.fields:
|
||||
if field not in ignore_fields:
|
||||
diff --git a/nova/tests/unit/objects/test_instance_group.py b/nova/tests/unit/objects/test_instance_group.py
|
||||
index 8da6712f6e..37a71b57ce 100644
|
||||
--- a/nova/tests/unit/objects/test_instance_group.py
|
||||
+++ b/nova/tests/unit/objects/test_instance_group.py
|
||||
@@ -271,8 +271,10 @@ class _TestInstanceGroupObject(object):
|
||||
|
||||
@mock.patch.object(objects.InstanceList, 'get_by_filters')
|
||||
def test_load_hosts(self, mock_get_by_filt):
|
||||
- mock_get_by_filt.return_value = [objects.Instance(host='host1'),
|
||||
- objects.Instance(host='host2')]
|
||||
+ mock_get_by_filt.return_value = [objects.Instance(host='host1',
|
||||
+ node='node1'),
|
||||
+ objects.Instance(host='host2',
|
||||
+ node='node2')]
|
||||
|
||||
obj = objects.InstanceGroup(self.context, members=['uuid1'])
|
||||
self.assertEqual(2, len(obj.hosts))
|
||||
diff --git a/nova/tests/unit/objects/test_objects.py b/nova/tests/unit/objects/test_objects.py
|
||||
index a577820d0c..f80182357c 100644
|
||||
--- a/nova/tests/unit/objects/test_objects.py
|
||||
+++ b/nova/tests/unit/objects/test_objects.py
|
||||
@@ -1106,7 +1106,7 @@ object_data = {
|
||||
'InstanceExternalEvent': '1.1-6e446ceaae5f475ead255946dd443417',
|
||||
'InstanceFault': '1.2-7ef01f16f1084ad1304a513d6d410a38',
|
||||
'InstanceFaultList': '1.2-6bb72de2872fe49ded5eb937a93f2451',
|
||||
- 'InstanceGroup': '1.11-bdd9fa6ab3c80e92fd43b3ba5393e368',
|
||||
+ 'InstanceGroup': '1.12-4eaaffc4d20d0901cd0cfaef9e8a41cd',
|
||||
'InstanceGroupList': '1.7-be18078220513316abd0ae1b2d916873',
|
||||
'InstanceInfoCache': '1.5-cd8b96fefe0fc8d4d337243ba0bf0e1e',
|
||||
'InstanceList': '2.2-ff71772c7bf6d72f6ef6eee0199fb1c9',
|
||||
--
|
||||
2.16.1
|
||||
|
||||
@@ -0,0 +1,57 @@
|
||||
From 3e4ef01cb6f3fa5545cd3be31d84295d65f73fa7 Mon Sep 17 00:00:00 2001
|
||||
From: Roman Dobosz <roman.dobosz@intel.com>
|
||||
Date: Fri, 23 Feb 2018 09:19:54 +0000
|
||||
Subject: [PATCH 6/8] Add ability to search aggregate map via ironic node as a
|
||||
key in HostManager
|
||||
|
||||
With this change now it will be possible for mapping nodes with aggregate.
|
||||
Changed signature of _get_aggregates_info in scheduler HostManager class to be
|
||||
able to accept compute object as a parameter, so that in HostManager (base
|
||||
class) aggregate map will be searched by host, while in IronicHostManager
|
||||
(subclass) it will search by hypervisor_hostname - which is the UUID of the
|
||||
node, and which is stored as an member of aggregate.
|
||||
---
|
||||
nova/scheduler/host_manager.py | 6 +++---
|
||||
nova/scheduler/ironic_host_manager.py | 4 ++++
|
||||
2 files changed, 7 insertions(+), 3 deletions(-)
|
||||
|
||||
diff --git a/nova/scheduler/host_manager.py b/nova/scheduler/host_manager.py
|
||||
index 7347722a94..8612a36328 100644
|
||||
--- a/nova/scheduler/host_manager.py
|
||||
+++ b/nova/scheduler/host_manager.py
|
||||
@@ -631,7 +631,7 @@ class HostManager(object):
|
||||
# happening after setting this field for the first time
|
||||
host_state.update(compute,
|
||||
dict(service),
|
||||
- self._get_aggregates_info(host),
|
||||
+ self._get_aggregates_info(compute),
|
||||
self._get_instance_info(context, compute))
|
||||
|
||||
seen_nodes.add(state_key)
|
||||
@@ -652,9 +652,9 @@ class HostManager(object):
|
||||
return (self.host_state_map[host] for host in seen_nodes
|
||||
if host in self.host_state_map)
|
||||
|
||||
- def _get_aggregates_info(self, host):
|
||||
+ def _get_aggregates_info(self, compute):
|
||||
return [self.aggs_by_id[agg_id] for agg_id in
|
||||
- self.host_aggregates_map[host]]
|
||||
+ self.host_aggregates_map[compute.host]]
|
||||
|
||||
def _get_instance_info(self, context, compute):
|
||||
"""Gets the host instance info from the compute host.
|
||||
diff --git a/nova/scheduler/ironic_host_manager.py b/nova/scheduler/ironic_host_manager.py
|
||||
index 5156ed6df9..c703a810a9 100644
|
||||
--- a/nova/scheduler/ironic_host_manager.py
|
||||
+++ b/nova/scheduler/ironic_host_manager.py
|
||||
@@ -123,3 +123,7 @@ class IronicHostManager(host_manager.HostManager):
|
||||
else:
|
||||
return super(IronicHostManager, self)._get_instance_info(context,
|
||||
compute)
|
||||
+
|
||||
+ def _get_aggregates_info(self, compute):
|
||||
+ return [self.aggs_by_id[agg_id] for agg_id in
|
||||
+ self.host_aggregates_map[compute.hypervisor_hostname]]
|
||||
--
|
||||
2.16.1
|
||||
|
||||
@@ -0,0 +1,32 @@
|
||||
From 6f8af77366402aca0555005abe469b29509d0eb3 Mon Sep 17 00:00:00 2001
|
||||
From: Roman Dobosz <roman.dobosz@intel.com>
|
||||
Date: Fri, 23 Feb 2018 11:28:52 +0000
|
||||
Subject: [PATCH 7/8] Add nodes to group hosts to be checked against
|
||||
aggregation
|
||||
|
||||
Currently, only hostnames (which origin from machine, on which compute service
|
||||
is running, and which belong to the requested group) was checked against host
|
||||
aggregates. This patch adds also instance_group.nodes to the set of the keys
|
||||
being a criteria for a search in aggregates.
|
||||
---
|
||||
nova/scheduler/filters/affinity_filter.py | 4 ++--
|
||||
1 file changed, 2 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/nova/scheduler/filters/affinity_filter.py b/nova/scheduler/filters/affinity_filter.py
|
||||
index f025df45df..587293f832 100644
|
||||
--- a/nova/scheduler/filters/affinity_filter.py
|
||||
+++ b/nova/scheduler/filters/affinity_filter.py
|
||||
@@ -177,8 +177,8 @@ class ServerGroupAggregateAffinityFilter(filters.BaseHostFilter):
|
||||
# constraint
|
||||
return True
|
||||
|
||||
- group_hosts = (spec_obj.instance_group.hosts
|
||||
- if spec_obj.instance_group else [])
|
||||
+ group_hosts = set(spec_obj.instance_group.nodes +
|
||||
+ spec_obj.instance_group.hosts)
|
||||
if not group_hosts:
|
||||
# There are no members of the server group yet, so this host meets
|
||||
# the aggregate affinity (or anti-affinity) constraint
|
||||
--
|
||||
2.16.1
|
||||
|
||||
@@ -0,0 +1,25 @@
|
||||
From 72af3e6b58c3a732549b40fbb24067a41c7065ac Mon Sep 17 00:00:00 2001
|
||||
From: Roman Dobosz <roman.dobosz@intel.com>
|
||||
Date: Fri, 23 Feb 2018 11:37:16 +0000
|
||||
Subject: [PATCH 8/8] Fix for checking policies in non existing instance_group
|
||||
|
||||
---
|
||||
nova/scheduler/filters/affinity_filter.py | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/nova/scheduler/filters/affinity_filter.py b/nova/scheduler/filters/affinity_filter.py
|
||||
index 587293f832..a316aafbcb 100644
|
||||
--- a/nova/scheduler/filters/affinity_filter.py
|
||||
+++ b/nova/scheduler/filters/affinity_filter.py
|
||||
@@ -158,7 +158,7 @@ class ServerGroupAggregateAffinityFilter(filters.BaseHostFilter):
|
||||
REVERSE_CHECK = False
|
||||
|
||||
def host_passes(self, host_state, spec_obj):
|
||||
- if not spec_obj.instance_group.policies:
|
||||
+ if not (spec_obj.instance_group and spec_obj.instance_group.policies):
|
||||
return True
|
||||
policy = spec_obj.instance_group.policies[0]
|
||||
if self.POLICY_NAME != policy:
|
||||
--
|
||||
2.16.1
|
||||
|
||||
Reference in New Issue
Block a user