VYPR
High severity8.6NVD Advisory· Published Dec 5, 2017· Updated May 13, 2026

CVE-2017-17051

CVE-2017-17051

Description

An issue was discovered in the default FilterScheduler in OpenStack Nova 16.0.3. By repeatedly rebuilding an instance with new images, an authenticated user may consume untracked resources on a hypervisor host leading to a denial of service, aka doubled resource allocations. This regression was introduced with the fix for OSSA-2017-005 (CVE-2017-16239); however, only Nova stable/pike or later deployments with that fix applied and relying on the default FilterScheduler are affected.

Affected packages

Versions sourced from the GitHub Security Advisory.

PackageAffected versionsPatched versions
novaPyPI
< 16.0.416.0.4

Affected products

1

Patches

2
25a1d78e8306

Fix doubling allocations on rebuild

https://github.com/openstack/novaDan SmithNov 20, 2017via ghsa
4 files changed · +42 17
  • nova/scheduler/filter_scheduler.py+10 0 modified
    @@ -30,6 +30,7 @@
     from nova import rpc
     from nova.scheduler import client
     from nova.scheduler import driver
    +from nova.scheduler import utils
     
     CONF = nova.conf.CONF
     LOG = logging.getLogger(__name__)
    @@ -290,6 +291,15 @@ def _claim_resources(self, ctx, spec_obj, instance_uuid, alloc_reqs):
                                API's PUT /allocations/{consumer_uuid} call to claim
                                resources for the instance
             """
    +
    +        if utils.request_is_rebuild(spec_obj):
    +            # NOTE(danms): This is a rebuild-only scheduling request, so we
    +            # should not be doing any extra claiming
    +            LOG.debug('Not claiming resources in the placement API for '
    +                      'rebuild-only scheduling of instance %(uuid)s',
    +                      {'uuid': instance_uuid})
    +            return True
    +
             LOG.debug("Attempting to claim resources in the placement API for "
                       "instance %s", instance_uuid)
     
    
  • nova/tests/functional/test_servers.py+3 16 modified
    @@ -1170,15 +1170,6 @@ def assertFlavorMatchesAllocation(flavor, allocation):
                 self.assertEqual(flavor['ram'], allocation['MEMORY_MB'])
                 self.assertEqual(flavor['disk'], allocation['DISK_GB'])
     
    -        def assertFlavorsMatchAllocation(old_flavor, new_flavor,
    -                                         allocation):
    -            self.assertEqual(old_flavor['vcpus'] + new_flavor['vcpus'],
    -                             allocation['VCPU'])
    -            self.assertEqual(old_flavor['ram'] + new_flavor['ram'],
    -                             allocation['MEMORY_MB'])
    -            self.assertEqual(old_flavor['disk'] + new_flavor['disk'],
    -                             allocation['DISK_GB'])
    -
             nodename = self.compute.manager._get_nodename(None)
             rp_uuid = _get_provider_uuid_by_host(nodename)
             # make sure we start with no usage on the compute node
    @@ -1225,16 +1216,12 @@ def assertFlavorsMatchAllocation(old_flavor, new_flavor,
     
             # The usage and allocations should not have changed.
             rp_usages = _get_provider_usages(rp_uuid)
    -        # FIXME(mriedem): This is a bug where the scheduler doubled up the
    -        # allocations for the instance even though we're just rebuilding
    -        # to the same host. Uncomment this once fixed.
    -        # assertFlavorMatchesAllocation(flavor, rp_usages)
    -        assertFlavorsMatchAllocation(flavor, flavor, rp_usages)
    +        assertFlavorMatchesAllocation(flavor, rp_usages)
    +
             allocs = _get_allocations_by_server_uuid(server['id'])
             self.assertIn(rp_uuid, allocs)
             allocs = allocs[rp_uuid]['resources']
    -        # assertFlavorMatchesAllocation(flavor, allocs)
    -        assertFlavorsMatchAllocation(flavor, flavor, allocs)
    +        assertFlavorMatchesAllocation(flavor, allocs)
     
     
     class ProviderUsageBaseTestCase(test.TestCase,
    
  • nova/tests/unit/scheduler/test_filter_scheduler.py+11 1 modified
    @@ -526,7 +526,7 @@ def test_claim_resources(self):
             instance.
             """
             ctx = mock.Mock(user_id=uuids.user_id)
    -        spec_obj = mock.Mock(project_id=uuids.project_id)
    +        spec_obj = objects.RequestSpec(project_id=uuids.project_id)
             instance_uuid = uuids.instance
             alloc_reqs = [mock.sentinel.alloc_req]
     
    @@ -539,6 +539,16 @@ def test_claim_resources(self):
                 mock.sentinel.alloc_req, uuids.project_id, uuids.user_id)
             self.assertTrue(res)
     
    +    @mock.patch('nova.scheduler.utils.request_is_rebuild')
    +    def test_claim_resouces_for_policy_check(self, mock_policy):
    +        mock_policy.return_value = True
    +        res = self.driver._claim_resources(None, mock.sentinel.spec_obj,
    +                                           mock.sentinel.instance_uuid,
    +                                           [])
    +        self.assertTrue(res)
    +        mock_policy.assert_called_once_with(mock.sentinel.spec_obj)
    +        self.assertFalse(self.placement_client.claim_resources.called)
    +
         def test_add_retry_host(self):
             retry = dict(num_attempts=1, hosts=[])
             filter_properties = dict(retry=retry)
    
  • releasenotes/notes/bug-1732976-doubled-allocations-rebuild-23e4d3b06eb4f43f.yaml+18 0 added
    @@ -0,0 +1,18 @@
    +---
    +security:
    +  - |
    +    `OSSA-2017-006`_: Nova FilterScheduler doubles resource allocations during
    +    rebuild with new image (CVE-2017-17051)
    +
    +    By repeatedly rebuilding an instance with new images, an authenticated user
    +    may consume untracked resources on a hypervisor host leading to a denial of
    +    service. This regression was introduced with the fix for `OSSA-2017-005`_
    +    (CVE-2017-16239), however, only Nova stable/pike or later deployments with
    +    that fix applied and relying on the default FilterScheduler are affected.
    +
    +    The fix is in the `nova-api` and `nova-scheduler` services.
    +
    +    .. note:: The fix for errata in `OSSA-2017-005`_ (CVE-2017-16239) will
    +              need to be applied in addition to this fix.
    +
    +    .. _OSSA-2017-006: https://security.openstack.org/ossa/OSSA-2017-006.html
    
fed660c1189f

Fix doubling allocations on rebuild

https://github.com/openstack/novaDan SmithNov 20, 2017via ghsa
4 files changed · +42 25
  • nova/scheduler/filter_scheduler.py+10 0 modified
    @@ -30,6 +30,7 @@
     from nova import rpc
     from nova.scheduler import client
     from nova.scheduler import driver
    +from nova.scheduler import utils
     
     CONF = nova.conf.CONF
     LOG = logging.getLogger(__name__)
    @@ -271,6 +272,15 @@ def _claim_resources(self, ctx, spec_obj, instance_uuid, alloc_reqs):
                                PUT /allocations/{consumer_uuid} call to claim
                                resources for the instance
             """
    +
    +        if utils.request_is_rebuild(spec_obj):
    +            # NOTE(danms): This is a rebuild-only scheduling request, so we
    +            # should not be doing any extra claiming
    +            LOG.debug('Not claiming resources in the placement API for '
    +                      'rebuild-only scheduling of instance %(uuid)s',
    +                      {'uuid': instance_uuid})
    +            return True
    +
             LOG.debug("Attempting to claim resources in the placement API for "
                       "instance %s", instance_uuid)
     
    
  • nova/tests/functional/test_servers.py+3 24 modified
    @@ -1065,14 +1065,6 @@ class ServerRebuildTestCase(integrated_helpers._IntegratedTestBase,
         # can use to update image metadata via our compute images proxy API.
         microversion = '2.38'
     
    -    def setUp(self):
    -        # We have to use the MediumFakeDriver for test_rebuild_with_new_image
    -        # since the scheduler doubles the VCPU allocation until the bug is
    -        # fixed.
    -        # TODO(mriedem): Remove this once the bug is fixed.
    -        self.flags(compute_driver='fake.MediumFakeDriver')
    -        super(ServerRebuildTestCase, self).setUp()
    -
         # We need the ImagePropertiesFilter so override the base class setup
         # which configures to use the chance_scheduler.
         def _setup_scheduler_service(self):
    @@ -1182,15 +1174,6 @@ def assertFlavorMatchesAllocation(flavor, allocation):
                 self.assertEqual(flavor['ram'], allocation['MEMORY_MB'])
                 self.assertEqual(flavor['disk'], allocation['DISK_GB'])
     
    -        def assertFlavorsMatchAllocation(old_flavor, new_flavor,
    -                                         allocation):
    -            self.assertEqual(old_flavor['vcpus'] + new_flavor['vcpus'],
    -                             allocation['VCPU'])
    -            self.assertEqual(old_flavor['ram'] + new_flavor['ram'],
    -                             allocation['MEMORY_MB'])
    -            self.assertEqual(old_flavor['disk'] + new_flavor['disk'],
    -                             allocation['DISK_GB'])
    -
             rp_uuid = _get_provider_uuid()
             # make sure we start with no usage on the compute node
             rp_usages = _get_provider_usages(rp_uuid)
    @@ -1236,16 +1219,12 @@ def assertFlavorsMatchAllocation(old_flavor, new_flavor,
     
             # The usage and allocations should not have changed.
             rp_usages = _get_provider_usages(rp_uuid)
    -        # FIXME(mriedem): This is a bug where the scheduler doubled up the
    -        # allocations for the instance even though we're just rebuilding
    -        # to the same host. Uncomment this once fixed.
    -        # assertFlavorMatchesAllocation(flavor, rp_usages)
    -        assertFlavorsMatchAllocation(flavor, flavor, rp_usages)
    +        assertFlavorMatchesAllocation(flavor, rp_usages)
    +
             allocs = _get_allocations_by_server_uuid(server['id'])
             self.assertIn(rp_uuid, allocs)
             allocs = allocs[rp_uuid]['resources']
    -        # assertFlavorMatchesAllocation(flavor, allocs)
    -        assertFlavorsMatchAllocation(flavor, flavor, allocs)
    +        assertFlavorMatchesAllocation(flavor, allocs)
     
     
     class ProviderUsageBaseTestCase(test.TestCase,
    
  • nova/tests/unit/scheduler/test_filter_scheduler.py+11 1 modified
    @@ -482,7 +482,7 @@ def test_claim_resources(self):
             instance.
             """
             ctx = mock.Mock(user_id=uuids.user_id)
    -        spec_obj = mock.Mock(project_id=uuids.project_id)
    +        spec_obj = objects.RequestSpec(project_id=uuids.project_id)
             instance_uuid = uuids.instance
             alloc_reqs = [mock.sentinel.alloc_req]
     
    @@ -495,6 +495,16 @@ def test_claim_resources(self):
                 mock.sentinel.alloc_req, uuids.project_id, uuids.user_id)
             self.assertTrue(res)
     
    +    @mock.patch('nova.scheduler.utils.request_is_rebuild')
    +    def test_claim_resouces_for_policy_check(self, mock_policy):
    +        mock_policy.return_value = True
    +        res = self.driver._claim_resources(None, mock.sentinel.spec_obj,
    +                                           mock.sentinel.instance_uuid,
    +                                           [])
    +        self.assertTrue(res)
    +        mock_policy.assert_called_once_with(mock.sentinel.spec_obj)
    +        self.assertFalse(self.placement_client.claim_resources.called)
    +
         def test_add_retry_host(self):
             retry = dict(num_attempts=1, hosts=[])
             filter_properties = dict(retry=retry)
    
  • releasenotes/notes/bug-1732976-doubled-allocations-rebuild-23e4d3b06eb4f43f.yaml+18 0 added
    @@ -0,0 +1,18 @@
    +---
    +security:
    +  - |
    +    `OSSA-2017-006`_: Nova FilterScheduler doubles resource allocations during
    +    rebuild with new image (CVE-2017-17051)
    +
    +    By repeatedly rebuilding an instance with new images, an authenticated user
    +    may consume untracked resources on a hypervisor host leading to a denial of
    +    service. This regression was introduced with the fix for `OSSA-2017-005`_
    +    (CVE-2017-16239), however, only Nova stable/pike or later deployments with
    +    that fix applied and relying on the default FilterScheduler are affected.
    +
    +    The fix is in the `nova-api` and `nova-scheduler` services.
    +
    +    .. note:: The fix for errata in `OSSA-2017-005`_ (CVE-2017-16239) will
    +              need to be applied in addition to this fix.
    +
    +    .. _OSSA-2017-006: https://security.openstack.org/ossa/OSSA-2017-006.html
    

Vulnerability mechanics

Generated by null/stub on May 9, 2026. Inputs: CWE entries + fix-commit diffs from this CVE's patches. Citations validated against bundle.

References

9

News mentions

0

No linked articles in our index yet.