Skip to content

Commit ecbcf03

Browse files
sean-jcbonzini
authored andcommitted
KVM: Reject attempts to consume or refresh inactive gfn_to_pfn_cache
Reject kvm_gpc_check() and kvm_gpc_refresh() if the cache is inactive. Not checking the active flag during refresh is particularly egregious, as KVM can end up with a valid, inactive cache, which can lead to a variety of use-after-free bugs, e.g. consuming a NULL kernel pointer or missing an mmu_notifier invalidation due to the cache not being on the list of gfns to invalidate. Note, "active" needs to be set if and only if the cache is on the list of caches, i.e. is reachable via mmu_notifier events. If a relevant mmu_notifier event occurs while the cache is "active" but not on the list, KVM will not acquire the cache's lock and so will not serailize the mmu_notifier event with active users and/or kvm_gpc_refresh(). A race between KVM_XEN_ATTR_TYPE_SHARED_INFO and KVM_XEN_HVM_EVTCHN_SEND can be exploited to trigger the bug. 1. Deactivate shinfo cache: kvm_xen_hvm_set_attr case KVM_XEN_ATTR_TYPE_SHARED_INFO kvm_gpc_deactivate kvm_gpc_unmap gpc->valid = false gpc->khva = NULL gpc->active = false Result: active = false, valid = false 2. Cause cache refresh: kvm_arch_vm_ioctl case KVM_XEN_HVM_EVTCHN_SEND kvm_xen_hvm_evtchn_send kvm_xen_set_evtchn kvm_xen_set_evtchn_fast kvm_gpc_check return -EWOULDBLOCK because !gpc->valid kvm_xen_set_evtchn_fast return -EWOULDBLOCK kvm_gpc_refresh hva_to_pfn_retry gpc->valid = true gpc->khva = not NULL Result: active = false, valid = true 3. Race ioctl KVM_XEN_HVM_EVTCHN_SEND against ioctl KVM_XEN_ATTR_TYPE_SHARED_INFO: kvm_arch_vm_ioctl case KVM_XEN_HVM_EVTCHN_SEND kvm_xen_hvm_evtchn_send kvm_xen_set_evtchn kvm_xen_set_evtchn_fast read_lock gpc->lock kvm_xen_hvm_set_attr case KVM_XEN_ATTR_TYPE_SHARED_INFO mutex_lock kvm->lock kvm_xen_shared_info_init kvm_gpc_activate gpc->khva = NULL kvm_gpc_check [ Check passes because gpc->valid is still true, even though gpc->khva is already NULL. ] shinfo = gpc->khva pending_bits = shinfo->evtchn_pending CRASH: test_and_set_bit(..., pending_bits) Fixes: 982ed0d ("KVM: Reinstate gfn_to_pfn_cache with invalidation support") Cc: stable@vger.kernel.org Reported-by: : Michal Luczaj <mhal@rbox.co> Signed-off-by: Sean Christopherson <seanjc@google.com> Message-Id: <20221013211234.1318131-3-seanjc@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
1 parent 52491a3 commit ecbcf03

File tree

1 file changed

+34
-7
lines changed

1 file changed

+34
-7
lines changed

virt/kvm/pfncache.c

+34-7
Original file line numberDiff line numberDiff line change
@@ -81,6 +81,9 @@ bool kvm_gfn_to_pfn_cache_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
8181
{
8282
struct kvm_memslots *slots = kvm_memslots(kvm);
8383

84+
if (!gpc->active)
85+
return false;
86+
8487
if ((gpa & ~PAGE_MASK) + len > PAGE_SIZE)
8588
return false;
8689

@@ -240,10 +243,11 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
240243
{
241244
struct kvm_memslots *slots = kvm_memslots(kvm);
242245
unsigned long page_offset = gpa & ~PAGE_MASK;
243-
kvm_pfn_t old_pfn, new_pfn;
246+
bool unmap_old = false;
244247
unsigned long old_uhva;
248+
kvm_pfn_t old_pfn;
245249
void *old_khva;
246-
int ret = 0;
250+
int ret;
247251

248252
/*
249253
* If must fit within a single page. The 'len' argument is
@@ -261,6 +265,11 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
261265

262266
write_lock_irq(&gpc->lock);
263267

268+
if (!gpc->active) {
269+
ret = -EINVAL;
270+
goto out_unlock;
271+
}
272+
264273
old_pfn = gpc->pfn;
265274
old_khva = gpc->khva - offset_in_page(gpc->khva);
266275
old_uhva = gpc->uhva;
@@ -291,6 +300,7 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
291300
/* If the HVA→PFN mapping was already valid, don't unmap it. */
292301
old_pfn = KVM_PFN_ERR_FAULT;
293302
old_khva = NULL;
303+
ret = 0;
294304
}
295305

296306
out:
@@ -305,14 +315,15 @@ int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
305315
gpc->khva = NULL;
306316
}
307317

308-
/* Snapshot the new pfn before dropping the lock! */
309-
new_pfn = gpc->pfn;
318+
/* Detect a pfn change before dropping the lock! */
319+
unmap_old = (old_pfn != gpc->pfn);
310320

321+
out_unlock:
311322
write_unlock_irq(&gpc->lock);
312323

313324
mutex_unlock(&gpc->refresh_lock);
314325

315-
if (old_pfn != new_pfn)
326+
if (unmap_old)
316327
gpc_unmap_khva(kvm, old_pfn, old_khva);
317328

318329
return ret;
@@ -366,11 +377,19 @@ int kvm_gpc_activate(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
366377
gpc->vcpu = vcpu;
367378
gpc->usage = usage;
368379
gpc->valid = false;
369-
gpc->active = true;
370380

371381
spin_lock(&kvm->gpc_lock);
372382
list_add(&gpc->list, &kvm->gpc_list);
373383
spin_unlock(&kvm->gpc_lock);
384+
385+
/*
386+
* Activate the cache after adding it to the list, a concurrent
387+
* refresh must not establish a mapping until the cache is
388+
* reachable by mmu_notifier events.
389+
*/
390+
write_lock_irq(&gpc->lock);
391+
gpc->active = true;
392+
write_unlock_irq(&gpc->lock);
374393
}
375394
return kvm_gfn_to_pfn_cache_refresh(kvm, gpc, gpa, len);
376395
}
@@ -379,12 +398,20 @@ EXPORT_SYMBOL_GPL(kvm_gpc_activate);
379398
void kvm_gpc_deactivate(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
380399
{
381400
if (gpc->active) {
401+
/*
402+
* Deactivate the cache before removing it from the list, KVM
403+
* must stall mmu_notifier events until all users go away, i.e.
404+
* until gpc->lock is dropped and refresh is guaranteed to fail.
405+
*/
406+
write_lock_irq(&gpc->lock);
407+
gpc->active = false;
408+
write_unlock_irq(&gpc->lock);
409+
382410
spin_lock(&kvm->gpc_lock);
383411
list_del(&gpc->list);
384412
spin_unlock(&kvm->gpc_lock);
385413

386414
kvm_gfn_to_pfn_cache_unmap(kvm, gpc);
387-
gpc->active = false;
388415
}
389416
}
390417
EXPORT_SYMBOL_GPL(kvm_gpc_deactivate);

0 commit comments

Comments
 (0)