mirror of
https://github.com/gryf/openstack.git
synced 2025-12-17 11:30:24 +01:00
Added another portion of Jay patches backported to Ocata
This commit is contained in:
@@ -0,0 +1,241 @@
|
|||||||
|
From f5e23e436d341a44dafe5a18876cfcadc809b46b Mon Sep 17 00:00:00 2001
|
||||||
|
From: Roman Dobosz <roman.dobosz@intel.com>
|
||||||
|
Date: Mon, 8 Jan 2018 14:33:45 +0100
|
||||||
|
Subject: [PATCH 2/3] Remove server group sched filter support caching
|
||||||
|
|
||||||
|
Backport of https://review.openstack.org/#/c/529200 by Jay Pipes to
|
||||||
|
Ocata.
|
||||||
|
---
|
||||||
|
nova/scheduler/utils.py | 91 ++++++++++-------------
|
||||||
|
nova/tests/functional/test_server_group.py | 19 -----
|
||||||
|
nova/tests/unit/scheduler/test_scheduler_utils.py | 16 ++--
|
||||||
|
3 files changed, 46 insertions(+), 80 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/nova/scheduler/utils.py b/nova/scheduler/utils.py
|
||||||
|
index 5e9222d14e..682b4cc199 100644
|
||||||
|
--- a/nova/scheduler/utils.py
|
||||||
|
+++ b/nova/scheduler/utils.py
|
||||||
|
@@ -250,23 +250,31 @@ def parse_options(opts, sep='=', converter=str, name=""):
|
||||||
|
return good
|
||||||
|
|
||||||
|
|
||||||
|
-def validate_filter(filter):
|
||||||
|
- """Validates that the filter is configured in the default filters."""
|
||||||
|
- return filter in CONF.filter_scheduler.enabled_filters
|
||||||
|
+def _validate_filter(filter):
|
||||||
|
+ """Validates that the filter is configured in the defaults.
|
||||||
|
|
||||||
|
+ :param filter: Either short or long module-scoped name of a filter
|
||||||
|
+ """
|
||||||
|
+ for enabled_filter in CONF.filter_scheduler.enabled_filters:
|
||||||
|
+ if filter in enabled_filter:
|
||||||
|
+ return True
|
||||||
|
+ return False
|
||||||
|
+
|
||||||
|
+
|
||||||
|
+def _validate_weigher(weigher):
|
||||||
|
+ """Validates that the weigher is configured in the defaults.
|
||||||
|
+
|
||||||
|
+ :param weigher: Either short or long module-scoped name of a weigher
|
||||||
|
+ """
|
||||||
|
|
||||||
|
-def validate_weigher(weigher):
|
||||||
|
- """Validates that the weigher is configured in the default weighers."""
|
||||||
|
weight_classes = CONF.filter_scheduler.weight_classes
|
||||||
|
if 'nova.scheduler.weights.all_weighers' in weight_classes:
|
||||||
|
return True
|
||||||
|
- return weigher in weight_classes
|
||||||
|
-
|
||||||
|
|
||||||
|
-_SUPPORTS_AFFINITY = None
|
||||||
|
-_SUPPORTS_ANTI_AFFINITY = None
|
||||||
|
-_SUPPORTS_SOFT_AFFINITY = None
|
||||||
|
-_SUPPORTS_SOFT_ANTI_AFFINITY = None
|
||||||
|
+ for enabled_weigher in weight_classes:
|
||||||
|
+ if weigher in enabled_weigher:
|
||||||
|
+ return True
|
||||||
|
+ return False
|
||||||
|
|
||||||
|
|
||||||
|
def _get_group_details(context, instance_uuid, user_group_hosts=None):
|
||||||
|
@@ -279,23 +287,6 @@ def _get_group_details(context, instance_uuid, user_group_hosts=None):
|
||||||
|
|
||||||
|
:returns: None or namedtuple GroupDetails
|
||||||
|
"""
|
||||||
|
- global _SUPPORTS_AFFINITY
|
||||||
|
- if _SUPPORTS_AFFINITY is None:
|
||||||
|
- _SUPPORTS_AFFINITY = validate_filter(
|
||||||
|
- 'ServerGroupAffinityFilter')
|
||||||
|
- global _SUPPORTS_ANTI_AFFINITY
|
||||||
|
- if _SUPPORTS_ANTI_AFFINITY is None:
|
||||||
|
- _SUPPORTS_ANTI_AFFINITY = validate_filter(
|
||||||
|
- 'ServerGroupAntiAffinityFilter')
|
||||||
|
- global _SUPPORTS_SOFT_AFFINITY
|
||||||
|
- if _SUPPORTS_SOFT_AFFINITY is None:
|
||||||
|
- _SUPPORTS_SOFT_AFFINITY = validate_weigher(
|
||||||
|
- 'nova.scheduler.weights.affinity.ServerGroupSoftAffinityWeigher')
|
||||||
|
- global _SUPPORTS_SOFT_ANTI_AFFINITY
|
||||||
|
- if _SUPPORTS_SOFT_ANTI_AFFINITY is None:
|
||||||
|
- _SUPPORTS_SOFT_ANTI_AFFINITY = validate_weigher(
|
||||||
|
- 'nova.scheduler.weights.affinity.'
|
||||||
|
- 'ServerGroupSoftAntiAffinityWeigher')
|
||||||
|
|
||||||
|
if not instance_uuid:
|
||||||
|
return
|
||||||
|
@@ -306,31 +297,25 @@ def _get_group_details(context, instance_uuid, user_group_hosts=None):
|
||||||
|
except exception.InstanceGroupNotFound:
|
||||||
|
return
|
||||||
|
|
||||||
|
- policies = set(('anti-affinity', 'affinity', 'soft-affinity',
|
||||||
|
- 'soft-anti-affinity'))
|
||||||
|
- if any((policy in policies) for policy in group.policies):
|
||||||
|
- if not _SUPPORTS_AFFINITY and 'affinity' in group.policies:
|
||||||
|
- msg = _("ServerGroupAffinityFilter not configured")
|
||||||
|
- LOG.error(msg)
|
||||||
|
- raise exception.UnsupportedPolicyException(reason=msg)
|
||||||
|
- if not _SUPPORTS_ANTI_AFFINITY and 'anti-affinity' in group.policies:
|
||||||
|
- msg = _("ServerGroupAntiAffinityFilter not configured")
|
||||||
|
- LOG.error(msg)
|
||||||
|
- raise exception.UnsupportedPolicyException(reason=msg)
|
||||||
|
- if (not _SUPPORTS_SOFT_AFFINITY
|
||||||
|
- and 'soft-affinity' in group.policies):
|
||||||
|
- msg = _("ServerGroupSoftAffinityWeigher not configured")
|
||||||
|
- LOG.error(msg)
|
||||||
|
- raise exception.UnsupportedPolicyException(reason=msg)
|
||||||
|
- if (not _SUPPORTS_SOFT_ANTI_AFFINITY
|
||||||
|
- and 'soft-anti-affinity' in group.policies):
|
||||||
|
- msg = _("ServerGroupSoftAntiAffinityWeigher not configured")
|
||||||
|
- LOG.error(msg)
|
||||||
|
- raise exception.UnsupportedPolicyException(reason=msg)
|
||||||
|
- group_hosts = set(group.get_hosts())
|
||||||
|
- user_hosts = set(user_group_hosts) if user_group_hosts else set()
|
||||||
|
- return GroupDetails(hosts=user_hosts | group_hosts,
|
||||||
|
- policies=group.policies, members=group.members)
|
||||||
|
+ # NOTE(jaypipes): There's only a single policy for the server group
|
||||||
|
+ group_policy = group.policies[0]
|
||||||
|
+ checks = {'affinity': (_validate_filter, 'ServerGroupAffinityFilter'),
|
||||||
|
+ 'anti-affinity': (_validate_filter,
|
||||||
|
+ 'ServerGroupAntiAffinityFilter'),
|
||||||
|
+ 'soft-affinity': (_validate_weigher,
|
||||||
|
+ 'ServerGroupSoftAffinityWeigher'),
|
||||||
|
+ 'soft-anti-affinity': (_validate_weigher,
|
||||||
|
+ 'ServerGroupSoftAntiAffinityWeigher')}
|
||||||
|
+
|
||||||
|
+ check_fn, class_name = checks[group_policy]
|
||||||
|
+ if not check_fn(class_name):
|
||||||
|
+ msg = _("%s not configured") % class_name
|
||||||
|
+ LOG.error(msg)
|
||||||
|
+ raise exception.UnsupportedPolicyException(reason=msg)
|
||||||
|
+ group_hosts = set(group.get_hosts())
|
||||||
|
+ user_hosts = set(user_group_hosts) if user_group_hosts else set()
|
||||||
|
+ return GroupDetails(hosts=user_hosts | group_hosts,
|
||||||
|
+ policies=group.policies, members=group.members)
|
||||||
|
|
||||||
|
|
||||||
|
def setup_instance_group(context, request_spec, filter_properties):
|
||||||
|
diff --git a/nova/tests/functional/test_server_group.py b/nova/tests/functional/test_server_group.py
|
||||||
|
index cc7bec29f7..5e52088c14 100644
|
||||||
|
--- a/nova/tests/functional/test_server_group.py
|
||||||
|
+++ b/nova/tests/functional/test_server_group.py
|
||||||
|
@@ -15,7 +15,6 @@
|
||||||
|
|
||||||
|
import time
|
||||||
|
|
||||||
|
-import mock
|
||||||
|
from oslo_config import cfg
|
||||||
|
|
||||||
|
from nova import context
|
||||||
|
@@ -496,7 +495,6 @@ class ServerGroupAffinityConfTest(ServerGroupTestBase):
|
||||||
|
# Load only anti-affinity filter so affinity will be missing
|
||||||
|
_enabled_filters = ['ServerGroupAntiAffinityFilter']
|
||||||
|
|
||||||
|
- @mock.patch('nova.scheduler.utils._SUPPORTS_AFFINITY', None)
|
||||||
|
def test_affinity_no_filter(self):
|
||||||
|
created_group = self.api.post_server_groups(self.affinity)
|
||||||
|
|
||||||
|
@@ -513,7 +511,6 @@ class ServerGroupAntiAffinityConfTest(ServerGroupTestBase):
|
||||||
|
# Load only affinity filter so anti-affinity will be missing
|
||||||
|
_enabled_filters = ['ServerGroupAffinityFilter']
|
||||||
|
|
||||||
|
- @mock.patch('nova.scheduler.utils._SUPPORTS_ANTI_AFFINITY', None)
|
||||||
|
def test_anti_affinity_no_filter(self):
|
||||||
|
created_group = self.api.post_server_groups(self.anti_affinity)
|
||||||
|
|
||||||
|
@@ -536,7 +533,6 @@ class ServerGroupSoftAffinityConfTest(ServerGroupTestBase):
|
||||||
|
return ['nova.scheduler.weights.affinity.'
|
||||||
|
'ServerGroupSoftAntiAffinityWeigher']
|
||||||
|
|
||||||
|
- @mock.patch('nova.scheduler.utils._SUPPORTS_SOFT_AFFINITY', None)
|
||||||
|
def test_soft_affinity_no_filter(self):
|
||||||
|
created_group = self.api.post_server_groups(self.soft_affinity)
|
||||||
|
|
||||||
|
@@ -559,7 +555,6 @@ class ServerGroupSoftAntiAffinityConfTest(ServerGroupTestBase):
|
||||||
|
return ['nova.scheduler.weights.affinity.'
|
||||||
|
'ServerGroupSoftAffinityWeigher']
|
||||||
|
|
||||||
|
- @mock.patch('nova.scheduler.utils._SUPPORTS_SOFT_ANTI_AFFINITY', None)
|
||||||
|
def test_soft_anti_affinity_no_filter(self):
|
||||||
|
created_group = self.api.post_server_groups(self.soft_anti_affinity)
|
||||||
|
|
||||||
|
@@ -580,20 +575,6 @@ class ServerGroupTestV215(ServerGroupTestV21):
|
||||||
|
soft_affinity = {'name': 'fake-name-4',
|
||||||
|
'policies': ['soft-affinity']}
|
||||||
|
|
||||||
|
- def setUp(self):
|
||||||
|
- super(ServerGroupTestV215, self).setUp()
|
||||||
|
-
|
||||||
|
- soft_affinity_patcher = mock.patch(
|
||||||
|
- 'nova.scheduler.utils._SUPPORTS_SOFT_AFFINITY')
|
||||||
|
- soft_anti_affinity_patcher = mock.patch(
|
||||||
|
- 'nova.scheduler.utils._SUPPORTS_SOFT_ANTI_AFFINITY')
|
||||||
|
- self.addCleanup(soft_affinity_patcher.stop)
|
||||||
|
- self.addCleanup(soft_anti_affinity_patcher.stop)
|
||||||
|
- self.mock_soft_affinity = soft_affinity_patcher.start()
|
||||||
|
- self.mock_soft_anti_affinity = soft_anti_affinity_patcher.start()
|
||||||
|
- self.mock_soft_affinity.return_value = None
|
||||||
|
- self.mock_soft_anti_affinity.return_value = None
|
||||||
|
-
|
||||||
|
def _get_weight_classes(self):
|
||||||
|
return ['nova.scheduler.weights.affinity.'
|
||||||
|
'ServerGroupSoftAffinityWeigher',
|
||||||
|
diff --git a/nova/tests/unit/scheduler/test_scheduler_utils.py b/nova/tests/unit/scheduler/test_scheduler_utils.py
|
||||||
|
index 1893a7e212..63035e742a 100644
|
||||||
|
--- a/nova/tests/unit/scheduler/test_scheduler_utils.py
|
||||||
|
+++ b/nova/tests/unit/scheduler/test_scheduler_utils.py
|
||||||
|
@@ -244,25 +244,25 @@ class SchedulerUtilsTestCase(test.NoDBTestCase):
|
||||||
|
def test_validate_filters_configured(self):
|
||||||
|
self.flags(enabled_filters='FakeFilter1,FakeFilter2',
|
||||||
|
group='filter_scheduler')
|
||||||
|
- self.assertTrue(scheduler_utils.validate_filter('FakeFilter1'))
|
||||||
|
- self.assertTrue(scheduler_utils.validate_filter('FakeFilter2'))
|
||||||
|
- self.assertFalse(scheduler_utils.validate_filter('FakeFilter3'))
|
||||||
|
+ self.assertTrue(scheduler_utils._validate_filter('FakeFilter1'))
|
||||||
|
+ self.assertTrue(scheduler_utils._validate_filter('FakeFilter2'))
|
||||||
|
+ self.assertFalse(scheduler_utils._validate_filter('FakeFilter3'))
|
||||||
|
|
||||||
|
def test_validate_weighers_configured(self):
|
||||||
|
self.flags(weight_classes=[
|
||||||
|
'ServerGroupSoftAntiAffinityWeigher', 'FakeFilter1'],
|
||||||
|
group='filter_scheduler')
|
||||||
|
|
||||||
|
- self.assertTrue(scheduler_utils.validate_weigher(
|
||||||
|
+ self.assertTrue(scheduler_utils._validate_weigher(
|
||||||
|
'ServerGroupSoftAntiAffinityWeigher'))
|
||||||
|
- self.assertTrue(scheduler_utils.validate_weigher('FakeFilter1'))
|
||||||
|
- self.assertFalse(scheduler_utils.validate_weigher(
|
||||||
|
+ self.assertTrue(scheduler_utils._validate_weigher('FakeFilter1'))
|
||||||
|
+ self.assertFalse(scheduler_utils._validate_weigher(
|
||||||
|
'ServerGroupSoftAffinityWeigher'))
|
||||||
|
|
||||||
|
def test_validate_weighers_configured_all_weighers(self):
|
||||||
|
- self.assertTrue(scheduler_utils.validate_weigher(
|
||||||
|
+ self.assertTrue(scheduler_utils._validate_weigher(
|
||||||
|
'ServerGroupSoftAffinityWeigher'))
|
||||||
|
- self.assertTrue(scheduler_utils.validate_weigher(
|
||||||
|
+ self.assertTrue(scheduler_utils._validate_weigher(
|
||||||
|
'ServerGroupSoftAntiAffinityWeigher'))
|
||||||
|
|
||||||
|
def _create_server_group(self, policy='anti-affinity'):
|
||||||
|
--
|
||||||
|
2.13.6
|
||||||
|
|
||||||
@@ -0,0 +1,250 @@
|
|||||||
|
From 1374f28766d56361b9342d77a32c0e52034060f1 Mon Sep 17 00:00:00 2001
|
||||||
|
From: Roman Dobosz <roman.dobosz@intel.com>
|
||||||
|
Date: Wed, 10 Jan 2018 10:37:54 +0100
|
||||||
|
Subject: [PATCH 3/3] get instance group's aggregate associations
|
||||||
|
|
||||||
|
Ocata backport for patch from Jay Pipes:
|
||||||
|
https://review.openstack.org/#/c/531243/
|
||||||
|
---
|
||||||
|
nova/objects/instance_group.py | 36 +++++++-
|
||||||
|
nova/tests/functional/db/test_instance_group.py | 112 ++++++++++++++++++++++++
|
||||||
|
nova/tests/unit/objects/test_instance_group.py | 21 +++++
|
||||||
|
nova/tests/unit/objects/test_objects.py | 2 +-
|
||||||
|
4 files changed, 169 insertions(+), 2 deletions(-)
|
||||||
|
|
||||||
|
diff --git a/nova/objects/instance_group.py b/nova/objects/instance_group.py
|
||||||
|
index 670813b77e..2be47278b2 100644
|
||||||
|
--- a/nova/objects/instance_group.py
|
||||||
|
+++ b/nova/objects/instance_group.py
|
||||||
|
@@ -17,6 +17,7 @@ import copy
|
||||||
|
from oslo_db import exception as db_exc
|
||||||
|
from oslo_utils import uuidutils
|
||||||
|
from oslo_utils import versionutils
|
||||||
|
+import sqlalchemy as sa
|
||||||
|
from sqlalchemy.orm import contains_eager
|
||||||
|
from sqlalchemy.orm import joinedload
|
||||||
|
|
||||||
|
@@ -122,7 +123,8 @@ class InstanceGroup(base.NovaPersistentObject, base.NovaObject,
|
||||||
|
# Version 1.8: Add count_members_by_user()
|
||||||
|
# Version 1.9: Add get_by_instance_uuid()
|
||||||
|
# Version 1.10: Add hosts field
|
||||||
|
- VERSION = '1.10'
|
||||||
|
+ # Version 1.11: Add get_aggregate_uuids()
|
||||||
|
+ VERSION = '1.11'
|
||||||
|
|
||||||
|
fields = {
|
||||||
|
'id': fields.IntegerField(),
|
||||||
|
@@ -455,6 +457,38 @@ class InstanceGroup(base.NovaPersistentObject, base.NovaObject,
|
||||||
|
if instance.host]))
|
||||||
|
|
||||||
|
@base.remotable
|
||||||
|
+ def get_aggregate_uuids(self, exclude=None):
|
||||||
|
+ """Returns a set of aggregate UUIDs associated with all compute nodes
|
||||||
|
+ that are housing all non-deleted instances in the group
|
||||||
|
+ """
|
||||||
|
+ filter_uuids = self.members
|
||||||
|
+ if exclude:
|
||||||
|
+ filter_uuids = set(filter_uuids) - set(exclude)
|
||||||
|
+ filters = {'uuid': filter_uuids, 'deleted': False}
|
||||||
|
+ instances = objects.InstanceList.get_by_filters(self._context,
|
||||||
|
+ filters=filters)
|
||||||
|
+ instance_nodes = set([instance.node for instance in instances
|
||||||
|
+ if instance.node])
|
||||||
|
+ if not instance_nodes:
|
||||||
|
+ return set()
|
||||||
|
+ return self._get_aggregate_uuids(self._context, instance_nodes)
|
||||||
|
+
|
||||||
|
+ @staticmethod
|
||||||
|
+ @db_api.api_context_manager.reader
|
||||||
|
+ def _get_aggregate_uuids(ctx, instance_nodes):
|
||||||
|
+ # Now find the aggregates associated with all those nodes
|
||||||
|
+ agg_tbl = api_models.Aggregate.__table__
|
||||||
|
+ agg_host_tbl = api_models.AggregateHost.__table__
|
||||||
|
+ join = sa.join(agg_tbl, agg_host_tbl,
|
||||||
|
+ agg_tbl.c.id == agg_host_tbl.c.aggregate_id)
|
||||||
|
+ sel = sa.select([agg_tbl.c.uuid]).select_from(join)
|
||||||
|
+ sel = sel.where(agg_host_tbl.c.host.in_(instance_nodes))
|
||||||
|
+ sel = sel.group_by(agg_tbl.c.uuid)
|
||||||
|
+ res = ctx.session.execute(sel)
|
||||||
|
+ agg_uuids = [r[0] for r in res]
|
||||||
|
+ return set(agg_uuids)
|
||||||
|
+
|
||||||
|
+ @base.remotable
|
||||||
|
def count_members_by_user(self, user_id):
|
||||||
|
"""Count the number of instances in a group belonging to a user."""
|
||||||
|
filter_uuids = self.members
|
||||||
|
diff --git a/nova/tests/functional/db/test_instance_group.py b/nova/tests/functional/db/test_instance_group.py
|
||||||
|
index 4c4f627fe2..445ae655cc 100644
|
||||||
|
--- a/nova/tests/functional/db/test_instance_group.py
|
||||||
|
+++ b/nova/tests/functional/db/test_instance_group.py
|
||||||
|
@@ -18,6 +18,7 @@ from nova.db.sqlalchemy import api as db_api
|
||||||
|
from nova import exception
|
||||||
|
from nova import objects
|
||||||
|
from nova.objects import base
|
||||||
|
+from nova.objects import fields as obj_fields
|
||||||
|
from nova.objects import instance_group
|
||||||
|
from nova import test
|
||||||
|
from nova.tests import uuidsentinel as uuids
|
||||||
|
@@ -238,3 +239,114 @@ class InstanceGroupObjectTestCase(test.TestCase):
|
||||||
|
self.context, 100)
|
||||||
|
self.assertEqual(0, total)
|
||||||
|
self.assertEqual(0, done)
|
||||||
|
+
|
||||||
|
+
|
||||||
|
+class InstanceGroupAggregatesTestCase(test.TestCase):
|
||||||
|
+ def setUp(self):
|
||||||
|
+ super(InstanceGroupAggregatesTestCase, self).setUp()
|
||||||
|
+ self.ctx = context.RequestContext('fake-user', 'fake-project')
|
||||||
|
+
|
||||||
|
+ def _create_compute_node(self, host, node):
|
||||||
|
+ cn = objects.ComputeNode(
|
||||||
|
+ self.ctx,
|
||||||
|
+ host=host,
|
||||||
|
+ vcpus=2,
|
||||||
|
+ memory_mb=2048,
|
||||||
|
+ local_gb=100,
|
||||||
|
+ vcpus_used=2,
|
||||||
|
+ memory_mb_used=2048,
|
||||||
|
+ local_gb_used=100,
|
||||||
|
+ hypervisor_type='ironic',
|
||||||
|
+ hypervisor_version=0,
|
||||||
|
+ hypervisor_hostname=node,
|
||||||
|
+ free_ram_mb=0,
|
||||||
|
+ free_disk_gb=0,
|
||||||
|
+ current_workload=0,
|
||||||
|
+ running_vms=0,
|
||||||
|
+ cpu_info='{}',
|
||||||
|
+ disk_available_least=0,
|
||||||
|
+ host_ip='1.1.1.1',
|
||||||
|
+ supported_hv_specs=[
|
||||||
|
+ objects.HVSpec.from_list([
|
||||||
|
+ obj_fields.Architecture.I686,
|
||||||
|
+ obj_fields.HVType.KVM,
|
||||||
|
+ obj_fields.VMMode.HVM])
|
||||||
|
+ ],
|
||||||
|
+ metrics=None,
|
||||||
|
+ pci_device_pools=None,
|
||||||
|
+ extra_resources=None,
|
||||||
|
+ stats={},
|
||||||
|
+ numa_topology=None,
|
||||||
|
+ cpu_allocation_ratio=1.0,
|
||||||
|
+ ram_allocation_ratio=1.0,
|
||||||
|
+ disk_allocation_ratio=1.0)
|
||||||
|
+ cn.create()
|
||||||
|
+ return cn
|
||||||
|
+
|
||||||
|
+ def test_get_aggregate_uuids(self):
|
||||||
|
+ """Tests that when associating compute nodes to aggregates, and
|
||||||
|
+ creating an instance group with instances on those compute nodes, that
|
||||||
|
+ we are able to retrieve the correct set() of aggregate UUIDs from the
|
||||||
|
+ instance group.
|
||||||
|
+ """
|
||||||
|
+ agg1 = objects.Aggregate(self.ctx, name='agg1')
|
||||||
|
+ agg1.create()
|
||||||
|
+ agg2 = objects.Aggregate(self.ctx, name='agg2')
|
||||||
|
+ agg2.create()
|
||||||
|
+
|
||||||
|
+ # NOTE(gryf): creating bare instances doesn't propagate project_id
|
||||||
|
+ # for some reason, so that test would fail.
|
||||||
|
+ i1 = objects.Instance(self.ctx, host='host1', node='node1',
|
||||||
|
+ project_id=self.ctx.project_id)
|
||||||
|
+ i1.create()
|
||||||
|
+ i2 = objects.Instance(self.ctx, host='host1', node='node2',
|
||||||
|
+ project_id=self.ctx.project_id)
|
||||||
|
+ i2.create()
|
||||||
|
+ i3 = objects.Instance(self.ctx, host='host2', node='node3',
|
||||||
|
+ project_id=self.ctx.project_id)
|
||||||
|
+ i3.create()
|
||||||
|
+
|
||||||
|
+ all_insts = objects.InstanceList.get_all(self.ctx)
|
||||||
|
+ exp_inst_uuids = set([i1.uuid, i2.uuid, i3.uuid])
|
||||||
|
+ self.assertEqual(exp_inst_uuids,
|
||||||
|
+ set([inst.uuid for inst in all_insts]))
|
||||||
|
+
|
||||||
|
+ # Create a server group with just i1 and i2 and verify no aggregate
|
||||||
|
+ # UUIDs returned from InstanceGroup.get_aggregate_uuids() since the
|
||||||
|
+ # compute nodes have not yet been associated with any aggregates
|
||||||
|
+ g1 = objects.InstanceGroup(self.ctx,
|
||||||
|
+ name='g1',
|
||||||
|
+ user_id=self.ctx.user_id,
|
||||||
|
+ project_id=self.ctx.project_id,
|
||||||
|
+ members=[i1.uuid, i2.uuid],
|
||||||
|
+ policies=['aggregate-affinity'])
|
||||||
|
+ g1.create()
|
||||||
|
+
|
||||||
|
+ # Create a server group with just i1 and i2 and verify no aggregate
|
||||||
|
+ # UUIDs returned from InstanceGroup.get_aggregate_uuids() since the
|
||||||
|
+ # compute nodes have not yet been associated with any aggregates
|
||||||
|
+ g2 = objects.InstanceGroup(self.ctx,
|
||||||
|
+ name='g2',
|
||||||
|
+ user_id=self.ctx.user_id,
|
||||||
|
+ project_id=self.ctx.project_id,
|
||||||
|
+ members=[i3.uuid],
|
||||||
|
+ policies=['aggregate-anti-affinity'])
|
||||||
|
+
|
||||||
|
+ g1_agg_uuids = g1.get_aggregate_uuids()
|
||||||
|
+ self.assertEqual(set(), g1_agg_uuids)
|
||||||
|
+
|
||||||
|
+ g2_agg_uuids = g2.get_aggregate_uuids()
|
||||||
|
+ self.assertEqual(set(), g2_agg_uuids)
|
||||||
|
+
|
||||||
|
+ # OK, now associate the compute nodes with various aggregates and
|
||||||
|
+ # verify the aggregate UUIDs returned by each instance group is
|
||||||
|
+ # correct.
|
||||||
|
+ agg1.add_host('node1')
|
||||||
|
+ agg1.add_host('node2')
|
||||||
|
+ agg2.add_host('node3')
|
||||||
|
+
|
||||||
|
+ g1_agg_uuids = g1.get_aggregate_uuids()
|
||||||
|
+ self.assertEqual(set([agg1.uuid]), g1_agg_uuids)
|
||||||
|
+
|
||||||
|
+ g2_agg_uuids = g2.get_aggregate_uuids()
|
||||||
|
+ self.assertEqual(set([agg2.uuid]), g2_agg_uuids)
|
||||||
|
diff --git a/nova/tests/unit/objects/test_instance_group.py b/nova/tests/unit/objects/test_instance_group.py
|
||||||
|
index d542c18afc..8da6712f6e 100644
|
||||||
|
--- a/nova/tests/unit/objects/test_instance_group.py
|
||||||
|
+++ b/nova/tests/unit/objects/test_instance_group.py
|
||||||
|
@@ -241,6 +241,27 @@ class _TestInstanceGroupObject(object):
|
||||||
|
mock_il_get.assert_called_once_with(self.context,
|
||||||
|
filters=expected_filters)
|
||||||
|
|
||||||
|
+ @mock.patch('nova.objects.InstanceGroup._get_aggregate_uuids')
|
||||||
|
+ @mock.patch('nova.objects.InstanceList.get_by_filters')
|
||||||
|
+ @mock.patch('nova.objects.InstanceGroup._get_from_db_by_uuid',
|
||||||
|
+ return_value=_INST_GROUP_DB)
|
||||||
|
+ def test_get_aggregate_uuids(self, mock_get_db, mock_il_get,
|
||||||
|
+ mock_internal):
|
||||||
|
+ mock_il_get.return_value = [objects.Instance(node='node1'),
|
||||||
|
+ objects.Instance(node='node2'),
|
||||||
|
+ objects.Instance(node=None)]
|
||||||
|
+ obj = objects.InstanceGroup.get_by_uuid(self.context, _DB_UUID)
|
||||||
|
+ obj.get_aggregate_uuids()
|
||||||
|
+ self.assertEqual(['instance_id1', 'instance_id2'], obj.members)
|
||||||
|
+ expected_filters = {
|
||||||
|
+ 'uuid': ['instance_id1', 'instance_id2'],
|
||||||
|
+ 'deleted': False
|
||||||
|
+ }
|
||||||
|
+ mock_il_get.assert_called_once_with(self.context,
|
||||||
|
+ filters=expected_filters)
|
||||||
|
+ exp_nodes = set(['node1', 'node2'])
|
||||||
|
+ mock_internal.assert_called_once_with(self.context, exp_nodes)
|
||||||
|
+
|
||||||
|
def test_obj_make_compatible(self):
|
||||||
|
obj = objects.InstanceGroup(self.context, **_INST_GROUP_DB)
|
||||||
|
obj_primitive = obj.obj_to_primitive()
|
||||||
|
diff --git a/nova/tests/unit/objects/test_objects.py b/nova/tests/unit/objects/test_objects.py
|
||||||
|
index 71b919597f..a577820d0c 100644
|
||||||
|
--- a/nova/tests/unit/objects/test_objects.py
|
||||||
|
+++ b/nova/tests/unit/objects/test_objects.py
|
||||||
|
@@ -1106,7 +1106,7 @@ object_data = {
|
||||||
|
'InstanceExternalEvent': '1.1-6e446ceaae5f475ead255946dd443417',
|
||||||
|
'InstanceFault': '1.2-7ef01f16f1084ad1304a513d6d410a38',
|
||||||
|
'InstanceFaultList': '1.2-6bb72de2872fe49ded5eb937a93f2451',
|
||||||
|
- 'InstanceGroup': '1.10-1a0c8c7447dc7ecb9da53849430c4a5f',
|
||||||
|
+ 'InstanceGroup': '1.11-bdd9fa6ab3c80e92fd43b3ba5393e368',
|
||||||
|
'InstanceGroupList': '1.7-be18078220513316abd0ae1b2d916873',
|
||||||
|
'InstanceInfoCache': '1.5-cd8b96fefe0fc8d4d337243ba0bf0e1e',
|
||||||
|
'InstanceList': '2.2-ff71772c7bf6d72f6ef6eee0199fb1c9',
|
||||||
|
--
|
||||||
|
2.13.6
|
||||||
|
|
||||||
Reference in New Issue
Block a user