summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to '0002-x86-shadow-account-for-log-dirty-mode-when-pre-alloc.patch')
-rw-r--r--0002-x86-shadow-account-for-log-dirty-mode-when-pre-alloc.patch105
1 files changed, 105 insertions, 0 deletions
diff --git a/0002-x86-shadow-account-for-log-dirty-mode-when-pre-alloc.patch b/0002-x86-shadow-account-for-log-dirty-mode-when-pre-alloc.patch
new file mode 100644
index 0000000..0b5d582
--- /dev/null
+++ b/0002-x86-shadow-account-for-log-dirty-mode-when-pre-alloc.patch
@@ -0,0 +1,105 @@
+From 3a0b7fb38a3e40fcf82c10980775f0fecab667b5 Mon Sep 17 00:00:00 2001
+From: Jan Beulich <jbeulich@suse.com>
+Date: Tue, 21 Mar 2023 12:00:02 +0000
+Subject: [PATCH 02/13] x86/shadow: account for log-dirty mode when
+ pre-allocating
+
+Pre-allocation is intended to ensure that in the course of constructing
+or updating shadows there won't be any risk of just made shadows or
+shadows being acted upon can disappear under our feet. The amount of
+pages pre-allocated then, however, needs to account for all possible
+subsequent allocations. While the use in sh_page_fault() accounts for
+all shadows which may need making, so far it didn't account for
+allocations coming from log-dirty tracking (which piggybacks onto the
+P2M allocation functions).
+
+Since shadow_prealloc() takes a count of shadows (or other data
+structures) rather than a count of pages, putting the adjustment at the
+call site of this function won't work very well: We simply can't express
+the correct count that way in all cases. Instead take care of this in
+the function itself, by "snooping" for L1 type requests. (While not
+applicable right now, future new request sites of L1 tables would then
+also be covered right away.)
+
+It is relevant to note here that pre-allocations like the one done from
+shadow_alloc_p2m_page() are benign when they fall in the "scope" of an
+earlier pre-alloc which already included that count: The inner call will
+simply find enough pages available then; it'll bail right away.
+
+This is CVE-2022-42332 / XSA-427.
+
+Signed-off-by: Jan Beulich <jbeulich@suse.com>
+Reviewed-by: Tim Deegan <tim@xen.org>
+(cherry picked from commit 91767a71061035ae42be93de495cd976f863a41a)
+---
+ xen/arch/x86/mm/paging.c | 1 +
+ xen/arch/x86/mm/shadow/common.c | 12 +++++++++++-
+ xen/arch/x86/mm/shadow/private.h | 1 +
+ xen/include/asm-x86/paging.h | 4 ++++
+ 4 files changed, 17 insertions(+), 1 deletion(-)
+
+diff --git a/xen/arch/x86/mm/paging.c b/xen/arch/x86/mm/paging.c
+index 579d01c161..ab1cdf1e72 100644
+--- a/xen/arch/x86/mm/paging.c
++++ b/xen/arch/x86/mm/paging.c
+@@ -280,6 +280,7 @@ void paging_mark_pfn_dirty(struct domain *d, pfn_t pfn)
+ if ( unlikely(!VALID_M2P(pfn_x(pfn))) )
+ return;
+
++ BUILD_BUG_ON(paging_logdirty_levels() != 4);
+ i1 = L1_LOGDIRTY_IDX(pfn);
+ i2 = L2_LOGDIRTY_IDX(pfn);
+ i3 = L3_LOGDIRTY_IDX(pfn);
+diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
+index e36d49d1fc..e73931573b 100644
+--- a/xen/arch/x86/mm/shadow/common.c
++++ b/xen/arch/x86/mm/shadow/common.c
+@@ -1014,7 +1014,17 @@ bool shadow_prealloc(struct domain *d, unsigned int type, unsigned int count)
+ if ( unlikely(d->is_dying) )
+ return false;
+
+- ret = _shadow_prealloc(d, shadow_size(type) * count);
++ count *= shadow_size(type);
++ /*
++ * Log-dirty handling may result in allocations when populating its
++ * tracking structures. Tie this to the caller requesting space for L1
++ * shadows.
++ */
++ if ( paging_mode_log_dirty(d) &&
++ ((SHF_L1_ANY | SHF_FL1_ANY) & (1u << type)) )
++ count += paging_logdirty_levels();
++
++ ret = _shadow_prealloc(d, count);
+ if ( !ret && (!d->is_shutting_down || d->shutdown_code != SHUTDOWN_crash) )
+ /*
+ * Failing to allocate memory required for shadow usage can only result in
+diff --git a/xen/arch/x86/mm/shadow/private.h b/xen/arch/x86/mm/shadow/private.h
+index 3fe0388e7c..1be84fc951 100644
+--- a/xen/arch/x86/mm/shadow/private.h
++++ b/xen/arch/x86/mm/shadow/private.h
+@@ -269,6 +269,7 @@ static inline void sh_terminate_list(struct page_list_head *tmp_list)
+ #define SHF_64 (SHF_L1_64|SHF_FL1_64|SHF_L2_64|SHF_L2H_64|SHF_L3_64|SHF_L4_64)
+
+ #define SHF_L1_ANY (SHF_L1_32|SHF_L1_PAE|SHF_L1_64)
++#define SHF_FL1_ANY (SHF_FL1_32|SHF_FL1_PAE|SHF_FL1_64)
+
+ #if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC)
+ /* Marks a guest L1 page table which is shadowed but not write-protected.
+diff --git a/xen/include/asm-x86/paging.h b/xen/include/asm-x86/paging.h
+index eb23652978..5ec508a351 100644
+--- a/xen/include/asm-x86/paging.h
++++ b/xen/include/asm-x86/paging.h
+@@ -190,6 +190,10 @@ int paging_mfn_is_dirty(struct domain *d, mfn_t gmfn);
+ #define L4_LOGDIRTY_IDX(pfn) ((pfn_x(pfn) >> (PAGE_SHIFT + 3 + PAGETABLE_ORDER * 2)) & \
+ (LOGDIRTY_NODE_ENTRIES-1))
+
++#define paging_logdirty_levels() \
++ (DIV_ROUND_UP(PADDR_BITS - PAGE_SHIFT - (PAGE_SHIFT + 3), \
++ PAGE_SHIFT - ilog2(sizeof(mfn_t))) + 1)
++
+ #ifdef CONFIG_HVM
+ /* VRAM dirty tracking support */
+ struct sh_dirty_vram {
+--
+2.40.0
+