summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTomáš Mózes <hydrapolic@gmail.com>2023-10-18 20:36:03 +0200
committerTomáš Mózes <hydrapolic@gmail.com>2023-10-18 20:36:03 +0200
commit9a4ce2cf2f3f7ac0a5cb7adf0b6ab6bf3ea3301c (patch)
tree2fe0e12f95912d12b3fc7ea8a9b17fbff71786f1 /0001-xen-arm-page-Handle-cache-flush-of-an-element-at-the.patch
parentXen 4.16.4-pre-patchset-0 (diff)
downloadxen-upstream-patches-9a4ce2cf2f3f7ac0a5cb7adf0b6ab6bf3ea3301c.tar.gz
xen-upstream-patches-9a4ce2cf2f3f7ac0a5cb7adf0b6ab6bf3ea3301c.tar.bz2
xen-upstream-patches-9a4ce2cf2f3f7ac0a5cb7adf0b6ab6bf3ea3301c.zip
Xen 4.16.6-pre-patchset-04.16.6-pre-patchset-0
Signed-off-by: Tomáš Mózes <hydrapolic@gmail.com>
Diffstat (limited to '0001-xen-arm-page-Handle-cache-flush-of-an-element-at-the.patch')
-rw-r--r--0001-xen-arm-page-Handle-cache-flush-of-an-element-at-the.patch111
1 files changed, 111 insertions, 0 deletions
diff --git a/0001-xen-arm-page-Handle-cache-flush-of-an-element-at-the.patch b/0001-xen-arm-page-Handle-cache-flush-of-an-element-at-the.patch
new file mode 100644
index 0000000..8da66a5
--- /dev/null
+++ b/0001-xen-arm-page-Handle-cache-flush-of-an-element-at-the.patch
@@ -0,0 +1,111 @@
+From d720c2310a7ac8878c01fe9d9fdc13f43cb266b3 Mon Sep 17 00:00:00 2001
+From: Stefano Stabellini <stefano.stabellini@amd.com>
+Date: Tue, 5 Sep 2023 14:34:28 +0200
+Subject: [PATCH 01/27] xen/arm: page: Handle cache flush of an element at the
+ top of the address space
+
+The region that needs to be cleaned/invalidated may be at the top
+of the address space. This means that 'end' (i.e. 'p + size') will
+be 0 and therefore nothing will be cleaned/invalidated as the check
+in the loop will always be false.
+
+On Arm64, we only support we only support up to 48-bit Virtual
+address space. So this is not a concern there. However, for 32-bit,
+the mapcache is using the last 2GB of the address space. Therefore
+we may not clean/invalidate properly some pages. This could lead
+to memory corruption or data leakage (the scrubbed value may
+still sit in the cache when the guest could read directly the memory
+and therefore read the old content).
+
+Rework invalidate_dcache_va_range(), clean_dcache_va_range(),
+clean_and_invalidate_dcache_va_range() to handle a cache flush
+with an element at the top of the address space.
+
+This is CVE-2023-34321 / XSA-437.
+
+Reported-by: Julien Grall <jgrall@amazon.com>
+Signed-off-by: Stefano Stabellini <stefano.stabellini@amd.com>
+Signed-off-by: Julien Grall <jgrall@amazon.com>
+Acked-by: Bertrand Marquis <bertrand.marquis@arm.com>
+master commit: 9a216e92de9f9011097e4f1fb55ff67ba0a21704
+master date: 2023-09-05 14:30:08 +0200
+---
+ xen/include/asm-arm/page.h | 33 ++++++++++++++++++++-------------
+ 1 file changed, 20 insertions(+), 13 deletions(-)
+
+diff --git a/xen/include/asm-arm/page.h b/xen/include/asm-arm/page.h
+index c6f9fb0d4e..eff5883ef8 100644
+--- a/xen/include/asm-arm/page.h
++++ b/xen/include/asm-arm/page.h
+@@ -152,26 +152,25 @@ static inline size_t read_dcache_line_bytes(void)
+
+ static inline int invalidate_dcache_va_range(const void *p, unsigned long size)
+ {
+- const void *end = p + size;
+ size_t cacheline_mask = dcache_line_bytes - 1;
+
+ dsb(sy); /* So the CPU issues all writes to the range */
+
+ if ( (uintptr_t)p & cacheline_mask )
+ {
++ size -= dcache_line_bytes - ((uintptr_t)p & cacheline_mask);
+ p = (void *)((uintptr_t)p & ~cacheline_mask);
+ asm volatile (__clean_and_invalidate_dcache_one(0) : : "r" (p));
+ p += dcache_line_bytes;
+ }
+- if ( (uintptr_t)end & cacheline_mask )
+- {
+- end = (void *)((uintptr_t)end & ~cacheline_mask);
+- asm volatile (__clean_and_invalidate_dcache_one(0) : : "r" (end));
+- }
+
+- for ( ; p < end; p += dcache_line_bytes )
++ for ( ; size >= dcache_line_bytes;
++ p += dcache_line_bytes, size -= dcache_line_bytes )
+ asm volatile (__invalidate_dcache_one(0) : : "r" (p));
+
++ if ( size > 0 )
++ asm volatile (__clean_and_invalidate_dcache_one(0) : : "r" (p));
++
+ dsb(sy); /* So we know the flushes happen before continuing */
+
+ return 0;
+@@ -179,10 +178,14 @@ static inline int invalidate_dcache_va_range(const void *p, unsigned long size)
+
+ static inline int clean_dcache_va_range(const void *p, unsigned long size)
+ {
+- const void *end = p + size;
++ size_t cacheline_mask = dcache_line_bytes - 1;
++
+ dsb(sy); /* So the CPU issues all writes to the range */
+- p = (void *)((uintptr_t)p & ~(dcache_line_bytes - 1));
+- for ( ; p < end; p += dcache_line_bytes )
++ size += (uintptr_t)p & cacheline_mask;
++ size = (size + cacheline_mask) & ~cacheline_mask;
++ p = (void *)((uintptr_t)p & ~cacheline_mask);
++ for ( ; size >= dcache_line_bytes;
++ p += dcache_line_bytes, size -= dcache_line_bytes )
+ asm volatile (__clean_dcache_one(0) : : "r" (p));
+ dsb(sy); /* So we know the flushes happen before continuing */
+ /* ARM callers assume that dcache_* functions cannot fail. */
+@@ -192,10 +195,14 @@ static inline int clean_dcache_va_range(const void *p, unsigned long size)
+ static inline int clean_and_invalidate_dcache_va_range
+ (const void *p, unsigned long size)
+ {
+- const void *end = p + size;
++ size_t cacheline_mask = dcache_line_bytes - 1;
++
+ dsb(sy); /* So the CPU issues all writes to the range */
+- p = (void *)((uintptr_t)p & ~(dcache_line_bytes - 1));
+- for ( ; p < end; p += dcache_line_bytes )
++ size += (uintptr_t)p & cacheline_mask;
++ size = (size + cacheline_mask) & ~cacheline_mask;
++ p = (void *)((uintptr_t)p & ~cacheline_mask);
++ for ( ; size >= dcache_line_bytes;
++ p += dcache_line_bytes, size -= dcache_line_bytes )
+ asm volatile (__clean_and_invalidate_dcache_one(0) : : "r" (p));
+ dsb(sy); /* So we know the flushes happen before continuing */
+ /* ARM callers assume that dcache_* functions cannot fail. */
+--
+2.42.0
+