components/openstack/glance/patches/06-CVE-2014-5356.patch
branchs11u2-sru
changeset 3290 e4a964ba3f11
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/components/openstack/glance/patches/06-CVE-2014-5356.patch	Tue Aug 26 15:05:45 2014 -0700
@@ -0,0 +1,187 @@
+Upstream patch to fix CVE-2014-5356.  This fix will be included in
+future 2013.2.4 and 2014.1.3 releases.
+
+From 12f43cfed5a47cd16f08b7dad2424da0fc362e47 Mon Sep 17 00:00:00 2001
+From: Tom Leaman <[email protected]>
+Date: Fri, 2 May 2014 10:09:20 +0000
+Subject: [PATCH] Enforce image_size_cap on v2 upload
+
+image_size_cap should be checked and enforced on upload
+
+Enforcement is in two places:
+- on image metadata save
+- during image save to backend store
+
+(cherry picked from commit 92ab00fca6926eaf3f7f92a955a5e07140063718)
+Conflicts:
+	glance/location.py
+	glance/tests/functional/v2/test_images.py
+	glance/tests/unit/test_store_image.py
+
+Closes-Bug: 1315321
+Change-Id: I45bfb360703617bc394e9e27fe17adf43b09c0e1
+Co-Author: Manuel Desbonnet <[email protected]>
+---
+ glance/db/__init__.py                     |    5 ++++
+ glance/store/__init__.py                  |    5 +++-
+ glance/tests/functional/__init__.py       |    2 ++
+ glance/tests/functional/v2/test_images.py |   42 +++++++++++++++++++++++++++++
+ glance/tests/unit/test_store_image.py     |    6 +++--
+ glance/tests/unit/utils.py                |    5 +++-
+ 6 files changed, 61 insertions(+), 4 deletions(-)
+
+diff --git a/glance/db/__init__.py b/glance/db/__init__.py
+index 56f4dac..8ac3606 100644
+--- a/glance/db/__init__.py
++++ b/glance/db/__init__.py
+@@ -32,6 +32,7 @@ db_opt = cfg.BoolOpt('use_tpool',
+                      'all DB API calls')
+ 
+ CONF = cfg.CONF
++CONF.import_opt('image_size_cap', 'glance.common.config')
+ CONF.import_opt('metadata_encryption_key', 'glance.common.config')
+ CONF.register_opt(db_opt)
+ 
+@@ -148,6 +149,8 @@ class ImageRepo(object):
+ 
+     def add(self, image):
+         image_values = self._format_image_to_db(image)
++        if image_values['size'] > CONF.image_size_cap:
++            raise exception.ImageSizeLimitExceeded
+         # the updated_at value is not set in the _format_image_to_db
+         # function since it is specific to image create
+         image_values['updated_at'] = image.updated_at
+@@ -159,6 +162,8 @@ class ImageRepo(object):
+ 
+     def save(self, image):
+         image_values = self._format_image_to_db(image)
++        if image_values['size'] > CONF.image_size_cap:
++            raise exception.ImageSizeLimitExceeded
+         try:
+             new_values = self.db_api.image_update(self.context,
+                                                   image.image_id,
+diff --git a/glance/store/__init__.py b/glance/store/__init__.py
+index fa17f4b..fd25e27 100644
+--- a/glance/store/__init__.py
++++ b/glance/store/__init__.py
+@@ -646,7 +646,10 @@ class ImageProxy(glance.domain.proxy.Image):
+             size = 0  # NOTE(markwash): zero -> unknown size
+         location, size, checksum, loc_meta = self.store_api.add_to_backend(
+                 self.context, CONF.default_store,
+-                self.image.image_id, utils.CooperativeReader(data), size)
++                self.image.image_id,
++                utils.LimitingReader(utils.CooperativeReader(data),
++                                     CONF.image_size_cap),
++                size)
+         self.image.locations = [{'url': location, 'metadata': loc_meta}]
+         self.image.size = size
+         self.image.checksum = checksum
+diff --git a/glance/tests/functional/__init__.py b/glance/tests/functional/__init__.py
+index 1256133..18a1a7e 100644
+--- a/glance/tests/functional/__init__.py
++++ b/glance/tests/functional/__init__.py
+@@ -279,6 +279,7 @@ class ApiServer(Server):
+         self.pid_file = pid_file or os.path.join(self.test_dir, "api.pid")
+         self.scrubber_datadir = os.path.join(self.test_dir, "scrubber")
+         self.log_file = os.path.join(self.test_dir, "api.log")
++        self.image_size_cap = 1099511627776
+         self.s3_store_host = "s3.amazonaws.com"
+         self.s3_store_access_key = ""
+         self.s3_store_secret_key = ""
+@@ -332,6 +333,7 @@ metadata_encryption_key = %(metadata_encryption_key)s
+ registry_host = 127.0.0.1
+ registry_port = %(registry_port)s
+ log_file = %(log_file)s
++image_size_cap = %(image_size_cap)d
+ s3_store_host = %(s3_store_host)s
+ s3_store_access_key = %(s3_store_access_key)s
+ s3_store_secret_key = %(s3_store_secret_key)s
+diff --git a/glance/tests/functional/v2/test_images.py b/glance/tests/functional/v2/test_images.py
+index a9f9147..c25d4e2 100644
+--- a/glance/tests/functional/v2/test_images.py
++++ b/glance/tests/functional/v2/test_images.py
+@@ -259,6 +259,48 @@ class TestImages(functional.FunctionalTest):
+ 
+         self.stop_servers()
+ 
++    def test_image_size_cap(self):
++        self.api_server.image_size_cap = 128
++        self.start_servers(**self.__dict__.copy())
++        # create an image
++        path = self._url('/v2/images')
++        headers = self._headers({'content-type': 'application/json'})
++        data = json.dumps({'name': 'image-size-cap-test-image',
++                           'type': 'kernel', 'disk_format': 'aki',
++                           'container_format': 'aki'})
++        response = requests.post(path, headers=headers, data=data)
++        self.assertEqual(201, response.status_code)
++
++        image = json.loads(response.text)
++        image_id = image['id']
++
++        #try to populate it with oversized data
++        path = self._url('/v2/images/%s/file' % image_id)
++        headers = self._headers({'Content-Type': 'application/octet-stream'})
++
++        class StreamSim(object):
++            # Using a one-shot iterator to force chunked transfer in the PUT
++            # request
++            def __init__(self, size):
++                self.size = size
++
++            def __iter__(self):
++                yield 'Z' * self.size
++
++        response = requests.put(path, headers=headers, data=StreamSim(
++                                self.api_server.image_size_cap + 1))
++        self.assertEqual(413, response.status_code)
++
++        # hashlib.md5('Z'*129).hexdigest()
++        #     == '76522d28cb4418f12704dfa7acd6e7ee'
++        # If the image has this checksum, it means that the whole stream was
++        # accepted and written to the store, which should not be the case.
++        path = self._url('/v2/images/{0}'.format(image_id))
++        headers = self._headers({'content-type': 'application/json'})
++        response = requests.get(path, headers=headers)
++        image_checksum = json.loads(response.text).get('checksum')
++        self.assertNotEqual(image_checksum, '76522d28cb4418f12704dfa7acd6e7ee')
++
+     def test_permissions(self):
+         # Create an image that belongs to TENANT1
+         path = self._url('/v2/images')
+diff --git a/glance/tests/unit/test_store_image.py b/glance/tests/unit/test_store_image.py
+index f9f5d85..5bdd51e 100644
+--- a/glance/tests/unit/test_store_image.py
++++ b/glance/tests/unit/test_store_image.py
+@@ -126,8 +126,10 @@ class TestStoreImage(utils.BaseTestCase):
+ 
+         self.stubs.Set(unit_test_utils.FakeStoreAPI, 'get_from_backend',
+                        fake_get_from_backend)
+-
+-        self.assertEquals(image1.get_data().fd, 'ZZZ')
++        # This time, image1.get_data() returns the data wrapped in a
++        # LimitingReader|CooperativeReader pipeline, so peeking under
++        # the hood of those objects to get at the underlying string.
++        self.assertEquals(image1.get_data().data.fd, 'ZZZ')
+         image1.locations.pop(0)
+         self.assertEquals(len(image1.locations), 1)
+         image2.delete()
+diff --git a/glance/tests/unit/utils.py b/glance/tests/unit/utils.py
+index dff87b1..ec62828 100644
+--- a/glance/tests/unit/utils.py
++++ b/glance/tests/unit/utils.py
+@@ -149,7 +149,10 @@ class FakeStoreAPI(object):
+             if image_id in location:
+                 raise exception.Duplicate()
+         if not size:
+-            size = len(data.fd)
++            # 'data' is a string wrapped in a LimitingReader|CooperativeReader
++            # pipeline, so peek under the hood of those objects to get at the
++            # string itself.
++            size = len(data.data.fd)
+         if (current_store_size + size) > store_max_size:
+             raise exception.StorageFull()
+         if context.user == USER2:
+-- 
+1.7.9.5
+