VYPR
Moderate severityNVD Advisory· Published Aug 25, 2014· Updated May 6, 2026

CVE-2014-5356

CVE-2014-5356

Description

OpenStack Image Registry and Delivery Service (Glance) before 2013.2.4, 2014.x before 2014.1.3, and Juno before Juno-3, when using the V2 API, does not properly enforce the image_size_cap configuration option, which allows remote authenticated users to cause a denial of service (disk consumption) by uploading a large image.

Affected packages

Versions sourced from the GitHub Security Advisory.

PackageAffected versionsPatched versions
glancePyPI
< 11.0.0a011.0.0a0

Affected products

10
  • cpe:2.3:a:openstack:image_registry_and_delivery_service_\(glance\):*:*:*:*:*:*:*:*+ 8 more
    • cpe:2.3:a:openstack:image_registry_and_delivery_service_\(glance\):*:*:*:*:*:*:*:*range: <=2013.2.3
    • cpe:2.3:a:openstack:image_registry_and_delivery_service_\(glance\):2013.2:*:*:*:*:*:*:*
    • cpe:2.3:a:openstack:image_registry_and_delivery_service_\(glance\):2013.2.1:*:*:*:*:*:*:*
    • cpe:2.3:a:openstack:image_registry_and_delivery_service_\(glance\):2013.2.2:*:*:*:*:*:*:*
    • cpe:2.3:a:openstack:image_registry_and_delivery_service_\(glance\):2014.1:*:*:*:*:*:*:*
    • cpe:2.3:a:openstack:image_registry_and_delivery_service_\(glance\):2014.1.1:*:*:*:*:*:*:*
    • cpe:2.3:a:openstack:image_registry_and_delivery_service_\(glance\):2014.1.2:*:*:*:*:*:*:*
    • cpe:2.3:a:openstack:image_registry_and_delivery_service_\(glance\):juno-1:*:*:*:*:*:*:*
    • cpe:2.3:a:openstack:image_registry_and_delivery_service_\(glance\):juno-2:*:*:*:*:*:*:*
  • cpe:2.3:o:canonical:ubuntu_linux:14.04:*:*:*:lts:*:*:*

Patches

3
92ab00fca692

Enforce image_size_cap on v2 upload

https://github.com/openstack/glanceTom LeamanMay 2, 2014via ghsa
6 files changed · +61 4
  • glance/db/__init__.py+5 0 modified
    @@ -27,6 +27,7 @@
     
     
     CONF = cfg.CONF
    +CONF.import_opt('image_size_cap', 'glance.common.config')
     CONF.import_opt('metadata_encryption_key', 'glance.common.config')
     
     
    @@ -150,6 +151,8 @@ def _format_image_to_db(self, image):
     
         def add(self, image):
             image_values = self._format_image_to_db(image)
    +        if image_values['size'] > CONF.image_size_cap:
    +            raise exception.ImageSizeLimitExceeded
             # the updated_at value is not set in the _format_image_to_db
             # function since it is specific to image create
             image_values['updated_at'] = image.updated_at
    @@ -161,6 +164,8 @@ def add(self, image):
     
         def save(self, image):
             image_values = self._format_image_to_db(image)
    +        if image_values['size'] > CONF.image_size_cap:
    +            raise exception.ImageSizeLimitExceeded
             try:
                 new_values = self.db_api.image_update(self.context,
                                                       image.image_id,
    
  • glance/location.py+4 1 modified
    @@ -354,7 +354,10 @@ def set_data(self, data, size=None):
                 size = 0  # NOTE(markwash): zero -> unknown size
             location, size, checksum, loc_meta = self.store_api.add_to_backend(
                 self.context, CONF.default_store,
    -            self.image.image_id, utils.CooperativeReader(data), size)
    +            self.image.image_id,
    +            utils.LimitingReader(utils.CooperativeReader(data),
    +                                 CONF.image_size_cap),
    +            size)
             self.image.locations = [{'url': location, 'metadata': loc_meta,
                                      'status': 'active'}]
             self.image.size = size
    
  • glance/tests/functional/__init__.py+2 0 modified
    @@ -279,6 +279,7 @@ def __init__(self, test_dir, port, policy_file, delayed_delete=False,
             self.pid_file = pid_file or os.path.join(self.test_dir, "api.pid")
             self.scrubber_datadir = os.path.join(self.test_dir, "scrubber")
             self.log_file = os.path.join(self.test_dir, "api.log")
    +        self.image_size_cap = 1099511627776
             self.s3_store_host = "s3.amazonaws.com"
             self.s3_store_access_key = ""
             self.s3_store_secret_key = ""
    @@ -342,6 +343,7 @@ def __init__(self, test_dir, port, policy_file, delayed_delete=False,
     registry_host = 127.0.0.1
     registry_port = %(registry_port)s
     log_file = %(log_file)s
    +image_size_cap = %(image_size_cap)d
     s3_store_host = %(s3_store_host)s
     s3_store_access_key = %(s3_store_access_key)s
     s3_store_secret_key = %(s3_store_secret_key)s
    
  • glance/tests/functional/v2/test_images.py+42 0 modified
    @@ -520,6 +520,48 @@ def test_download_policy_when_cache_is_not_enabled(self):
     
             self.stop_servers()
     
    +    def test_image_size_cap(self):
    +        self.api_server.image_size_cap = 128
    +        self.start_servers(**self.__dict__.copy())
    +        # create an image
    +        path = self._url('/v2/images')
    +        headers = self._headers({'content-type': 'application/json'})
    +        data = jsonutils.dumps({'name': 'image-size-cap-test-image',
    +                                'type': 'kernel', 'disk_format': 'aki',
    +                                'container_format': 'aki'})
    +        response = requests.post(path, headers=headers, data=data)
    +        self.assertEqual(201, response.status_code)
    +
    +        image = jsonutils.loads(response.text)
    +        image_id = image['id']
    +
    +        #try to populate it with oversized data
    +        path = self._url('/v2/images/%s/file' % image_id)
    +        headers = self._headers({'Content-Type': 'application/octet-stream'})
    +
    +        class StreamSim(object):
    +            # Using a one-shot iterator to force chunked transfer in the PUT
    +            # request
    +            def __init__(self, size):
    +                self.size = size
    +
    +            def __iter__(self):
    +                yield 'Z' * self.size
    +
    +        response = requests.put(path, headers=headers, data=StreamSim(
    +                                self.api_server.image_size_cap + 1))
    +        self.assertEqual(413, response.status_code)
    +
    +        # hashlib.md5('Z'*129).hexdigest()
    +        #     == '76522d28cb4418f12704dfa7acd6e7ee'
    +        # If the image has this checksum, it means that the whole stream was
    +        # accepted and written to the store, which should not be the case.
    +        path = self._url('/v2/images/{0}'.format(image_id))
    +        headers = self._headers({'content-type': 'application/json'})
    +        response = requests.get(path, headers=headers)
    +        image_checksum = jsonutils.loads(response.text).get('checksum')
    +        self.assertNotEqual(image_checksum, '76522d28cb4418f12704dfa7acd6e7ee')
    +
         def test_permissions(self):
             self.start_servers(**self.__dict__.copy())
             # Create an image that belongs to TENANT1
    
  • glance/tests/unit/test_store_image.py+4 2 modified
    @@ -124,8 +124,10 @@ def fake_get_from_backend(self, context, location):
     
             self.stubs.Set(unit_test_utils.FakeStoreAPI, 'get_from_backend',
                            fake_get_from_backend)
    -
    -        self.assertEqual(image1.get_data().fd, 'ZZZ')
    +        # This time, image1.get_data() returns the data wrapped in a
    +        # LimitingReader|CooperativeReader pipeline, so peeking under
    +        # the hood of those objects to get at the underlying string.
    +        self.assertEqual(image1.get_data().data.fd, 'ZZZ')
             image1.locations.pop(0)
             self.assertEqual(len(image1.locations), 1)
             image2.delete()
    
  • glance/tests/unit/utils.py+4 1 modified
    @@ -160,7 +160,10 @@ def add_to_backend(self, context, scheme, image_id, data, size):
                 if image_id in location:
                     raise exception.Duplicate()
             if not size:
    -            size = len(data.fd)
    +            # 'data' is a string wrapped in a LimitingReader|CooperativeReader
    +            # pipeline, so peek under the hood of those objects to get at the
    +            # string itself.
    +            size = len(data.data.fd)
             if (current_store_size + size) > store_max_size:
                 raise exception.StorageFull()
             if context.user == USER2:
    
12f43cfed5a4

Enforce image_size_cap on v2 upload

https://github.com/openstack/glanceTom LeamanMay 2, 2014via ghsa
6 files changed · +61 4
  • glance/db/__init__.py+5 0 modified
    @@ -32,6 +32,7 @@
                          'all DB API calls')
     
     CONF = cfg.CONF
    +CONF.import_opt('image_size_cap', 'glance.common.config')
     CONF.import_opt('metadata_encryption_key', 'glance.common.config')
     CONF.register_opt(db_opt)
     
    @@ -148,6 +149,8 @@ def _format_image_to_db(self, image):
     
         def add(self, image):
             image_values = self._format_image_to_db(image)
    +        if image_values['size'] > CONF.image_size_cap:
    +            raise exception.ImageSizeLimitExceeded
             # the updated_at value is not set in the _format_image_to_db
             # function since it is specific to image create
             image_values['updated_at'] = image.updated_at
    @@ -159,6 +162,8 @@ def add(self, image):
     
         def save(self, image):
             image_values = self._format_image_to_db(image)
    +        if image_values['size'] > CONF.image_size_cap:
    +            raise exception.ImageSizeLimitExceeded
             try:
                 new_values = self.db_api.image_update(self.context,
                                                       image.image_id,
    
  • glance/store/__init__.py+4 1 modified
    @@ -646,7 +646,10 @@ def set_data(self, data, size=None):
                 size = 0  # NOTE(markwash): zero -> unknown size
             location, size, checksum, loc_meta = self.store_api.add_to_backend(
                     self.context, CONF.default_store,
    -                self.image.image_id, utils.CooperativeReader(data), size)
    +                self.image.image_id,
    +                utils.LimitingReader(utils.CooperativeReader(data),
    +                                     CONF.image_size_cap),
    +                size)
             self.image.locations = [{'url': location, 'metadata': loc_meta}]
             self.image.size = size
             self.image.checksum = checksum
    
  • glance/tests/functional/__init__.py+2 0 modified
    @@ -279,6 +279,7 @@ def __init__(self, test_dir, port, policy_file, delayed_delete=False,
             self.pid_file = pid_file or os.path.join(self.test_dir, "api.pid")
             self.scrubber_datadir = os.path.join(self.test_dir, "scrubber")
             self.log_file = os.path.join(self.test_dir, "api.log")
    +        self.image_size_cap = 1099511627776
             self.s3_store_host = "s3.amazonaws.com"
             self.s3_store_access_key = ""
             self.s3_store_secret_key = ""
    @@ -332,6 +333,7 @@ def __init__(self, test_dir, port, policy_file, delayed_delete=False,
     registry_host = 127.0.0.1
     registry_port = %(registry_port)s
     log_file = %(log_file)s
    +image_size_cap = %(image_size_cap)d
     s3_store_host = %(s3_store_host)s
     s3_store_access_key = %(s3_store_access_key)s
     s3_store_secret_key = %(s3_store_secret_key)s
    
  • glance/tests/functional/v2/test_images.py+42 0 modified
    @@ -259,6 +259,48 @@ def test_image_lifecycle(self):
     
             self.stop_servers()
     
    +    def test_image_size_cap(self):
    +        self.api_server.image_size_cap = 128
    +        self.start_servers(**self.__dict__.copy())
    +        # create an image
    +        path = self._url('/v2/images')
    +        headers = self._headers({'content-type': 'application/json'})
    +        data = json.dumps({'name': 'image-size-cap-test-image',
    +                           'type': 'kernel', 'disk_format': 'aki',
    +                           'container_format': 'aki'})
    +        response = requests.post(path, headers=headers, data=data)
    +        self.assertEqual(201, response.status_code)
    +
    +        image = json.loads(response.text)
    +        image_id = image['id']
    +
    +        #try to populate it with oversized data
    +        path = self._url('/v2/images/%s/file' % image_id)
    +        headers = self._headers({'Content-Type': 'application/octet-stream'})
    +
    +        class StreamSim(object):
    +            # Using a one-shot iterator to force chunked transfer in the PUT
    +            # request
    +            def __init__(self, size):
    +                self.size = size
    +
    +            def __iter__(self):
    +                yield 'Z' * self.size
    +
    +        response = requests.put(path, headers=headers, data=StreamSim(
    +                                self.api_server.image_size_cap + 1))
    +        self.assertEqual(413, response.status_code)
    +
    +        # hashlib.md5('Z'*129).hexdigest()
    +        #     == '76522d28cb4418f12704dfa7acd6e7ee'
    +        # If the image has this checksum, it means that the whole stream was
    +        # accepted and written to the store, which should not be the case.
    +        path = self._url('/v2/images/{0}'.format(image_id))
    +        headers = self._headers({'content-type': 'application/json'})
    +        response = requests.get(path, headers=headers)
    +        image_checksum = json.loads(response.text).get('checksum')
    +        self.assertNotEqual(image_checksum, '76522d28cb4418f12704dfa7acd6e7ee')
    +
         def test_permissions(self):
             # Create an image that belongs to TENANT1
             path = self._url('/v2/images')
    
  • glance/tests/unit/test_store_image.py+4 2 modified
    @@ -126,8 +126,10 @@ def fake_get_from_backend(self, context, location):
     
             self.stubs.Set(unit_test_utils.FakeStoreAPI, 'get_from_backend',
                            fake_get_from_backend)
    -
    -        self.assertEquals(image1.get_data().fd, 'ZZZ')
    +        # This time, image1.get_data() returns the data wrapped in a
    +        # LimitingReader|CooperativeReader pipeline, so peeking under
    +        # the hood of those objects to get at the underlying string.
    +        self.assertEquals(image1.get_data().data.fd, 'ZZZ')
             image1.locations.pop(0)
             self.assertEquals(len(image1.locations), 1)
             image2.delete()
    
  • glance/tests/unit/utils.py+4 1 modified
    @@ -149,7 +149,10 @@ def add_to_backend(self, context, scheme, image_id, data, size):
                 if image_id in location:
                     raise exception.Duplicate()
             if not size:
    -            size = len(data.fd)
    +            # 'data' is a string wrapped in a LimitingReader|CooperativeReader
    +            # pipeline, so peek under the hood of those objects to get at the
    +            # string itself.
    +            size = len(data.data.fd)
             if (current_store_size + size) > store_max_size:
                 raise exception.StorageFull()
             if context.user == USER2:
    
31a4d1852a0c

Enforce image_size_cap on v2 upload

https://github.com/openstack/glanceTom LeamanMay 2, 2014via ghsa
6 files changed · +61 4
  • glance/db/__init__.py+5 0 modified
    @@ -27,6 +27,7 @@
     
     
     CONF = cfg.CONF
    +CONF.import_opt('image_size_cap', 'glance.common.config')
     CONF.import_opt('metadata_encryption_key', 'glance.common.config')
     
     
    @@ -150,6 +151,8 @@ def _format_image_to_db(self, image):
     
         def add(self, image):
             image_values = self._format_image_to_db(image)
    +        if image_values['size'] > CONF.image_size_cap:
    +            raise exception.ImageSizeLimitExceeded
             # the updated_at value is not set in the _format_image_to_db
             # function since it is specific to image create
             image_values['updated_at'] = image.updated_at
    @@ -161,6 +164,8 @@ def add(self, image):
     
         def save(self, image):
             image_values = self._format_image_to_db(image)
    +        if image_values['size'] > CONF.image_size_cap:
    +            raise exception.ImageSizeLimitExceeded
             try:
                 new_values = self.db_api.image_update(self.context,
                                                       image.image_id,
    
  • glance/store/__init__.py+4 1 modified
    @@ -721,7 +721,10 @@ def set_data(self, data, size=None):
                 size = 0  # NOTE(markwash): zero -> unknown size
             location, size, checksum, loc_meta = self.store_api.add_to_backend(
                 self.context, CONF.default_store,
    -            self.image.image_id, utils.CooperativeReader(data), size)
    +            self.image.image_id,
    +            utils.LimitingReader(utils.CooperativeReader(data),
    +                                 CONF.image_size_cap),
    +            size)
             self.image.locations = [{'url': location, 'metadata': loc_meta}]
             self.image.size = size
             self.image.checksum = checksum
    
  • glance/tests/functional/__init__.py+2 0 modified
    @@ -280,6 +280,7 @@ def __init__(self, test_dir, port, policy_file, delayed_delete=False,
             self.pid_file = pid_file or os.path.join(self.test_dir, "api.pid")
             self.scrubber_datadir = os.path.join(self.test_dir, "scrubber")
             self.log_file = os.path.join(self.test_dir, "api.log")
    +        self.image_size_cap = 1099511627776
             self.s3_store_host = "s3.amazonaws.com"
             self.s3_store_access_key = ""
             self.s3_store_secret_key = ""
    @@ -341,6 +342,7 @@ def __init__(self, test_dir, port, policy_file, delayed_delete=False,
     registry_host = 127.0.0.1
     registry_port = %(registry_port)s
     log_file = %(log_file)s
    +image_size_cap = %(image_size_cap)d
     s3_store_host = %(s3_store_host)s
     s3_store_access_key = %(s3_store_access_key)s
     s3_store_secret_key = %(s3_store_secret_key)s
    
  • glance/tests/functional/v2/test_images.py+42 0 modified
    @@ -451,6 +451,48 @@ def _verify_image_checksum_and_status(checksum, status):
     
             self.stop_servers()
     
    +    def test_image_size_cap(self):
    +        self.api_server.image_size_cap = 128
    +        self.start_servers(**self.__dict__.copy())
    +        # create an image
    +        path = self._url('/v2/images')
    +        headers = self._headers({'content-type': 'application/json'})
    +        data = jsonutils.dumps({'name': 'image-size-cap-test-image',
    +                                'type': 'kernel', 'disk_format': 'aki',
    +                                'container_format': 'aki'})
    +        response = requests.post(path, headers=headers, data=data)
    +        self.assertEqual(201, response.status_code)
    +
    +        image = jsonutils.loads(response.text)
    +        image_id = image['id']
    +
    +        #try to populate it with oversized data
    +        path = self._url('/v2/images/%s/file' % image_id)
    +        headers = self._headers({'Content-Type': 'application/octet-stream'})
    +
    +        class StreamSim(object):
    +            # Using a one-shot iterator to force chunked transfer in the PUT
    +            # request
    +            def __init__(self, size):
    +                self.size = size
    +
    +            def __iter__(self):
    +                yield 'Z' * self.size
    +
    +        response = requests.put(path, headers=headers, data=StreamSim(
    +                                self.api_server.image_size_cap + 1))
    +        self.assertEqual(413, response.status_code)
    +
    +        # hashlib.md5('Z'*129).hexdigest()
    +        #     == '76522d28cb4418f12704dfa7acd6e7ee'
    +        # If the image has this checksum, it means that the whole stream was
    +        # accepted and written to the store, which should not be the case.
    +        path = self._url('/v2/images/{0}'.format(image_id))
    +        headers = self._headers({'content-type': 'application/json'})
    +        response = requests.get(path, headers=headers)
    +        image_checksum = jsonutils.loads(response.text).get('checksum')
    +        self.assertNotEqual(image_checksum, '76522d28cb4418f12704dfa7acd6e7ee')
    +
         def test_permissions(self):
             # Create an image that belongs to TENANT1
             path = self._url('/v2/images')
    
  • glance/tests/unit/test_store_image.py+4 2 modified
    @@ -119,8 +119,10 @@ def fake_get_from_backend(self, context, location):
     
             self.stubs.Set(unit_test_utils.FakeStoreAPI, 'get_from_backend',
                            fake_get_from_backend)
    -
    -        self.assertEqual(image1.get_data().fd, 'ZZZ')
    +        # This time, image1.get_data() returns the data wrapped in a
    +        # LimitingReader|CooperativeReader pipeline, so peeking under
    +        # the hood of those objects to get at the underlying string.
    +        self.assertEqual(image1.get_data().data.fd, 'ZZZ')
             image1.locations.pop(0)
             self.assertEqual(len(image1.locations), 1)
             image2.delete()
    
  • glance/tests/unit/utils.py+4 1 modified
    @@ -148,7 +148,10 @@ def add_to_backend(self, context, scheme, image_id, data, size):
                 if image_id in location:
                     raise exception.Duplicate()
             if not size:
    -            size = len(data.fd)
    +            # 'data' is a string wrapped in a LimitingReader|CooperativeReader
    +            # pipeline, so peek under the hood of those objects to get at the
    +            # string itself.
    +            size = len(data.data.fd)
             if (current_store_size + size) > store_max_size:
                 raise exception.StorageFull()
             if context.user == USER2:
    

Vulnerability mechanics

Generated by null/stub on May 9, 2026. Inputs: CWE entries + fix-commit diffs from this CVE's patches. Citations validated against bundle.

References

12

News mentions

0

No linked articles in our index yet.