KVM: MMU: Selectively set PageDirty when releasing guest memory

Improve dirty bit setting for pages that kvm release, until now every page
that we released we marked dirty, from now only pages that have potential
to get dirty we mark dirty.

Signed-off-by: Izik Eidus <izike@qumranet.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
This commit is contained in:
Izik Eidus 2007-11-20 11:49:33 +02:00 committed by Avi Kivity
parent 2065b3727e
commit b4231d6180
5 changed files with 33 additions and 19 deletions

View File

@ -393,7 +393,8 @@ int __kvm_set_memory_region(struct kvm *kvm,
int user_alloc);
gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn);
struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
void kvm_release_page(struct page *page);
void kvm_release_page_clean(struct page *page);
void kvm_release_page_dirty(struct page *page);
int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
int len);
int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);

View File

@ -543,13 +543,19 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
EXPORT_SYMBOL_GPL(gfn_to_page);
void kvm_release_page(struct page *page)
void kvm_release_page_clean(struct page *page)
{
put_page(page);
}
EXPORT_SYMBOL_GPL(kvm_release_page_clean);
void kvm_release_page_dirty(struct page *page)
{
if (!PageReserved(page))
SetPageDirty(page);
put_page(page);
}
EXPORT_SYMBOL_GPL(kvm_release_page);
EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
static int next_segment(unsigned long len, int offset)
{
@ -1055,7 +1061,7 @@ static struct page *kvm_vm_nopage(struct vm_area_struct *vma,
/* current->mm->mmap_sem is already held so call lockless version */
page = __gfn_to_page(kvm, pgoff);
if (is_error_page(page)) {
kvm_release_page(page);
kvm_release_page_clean(page);
return NOPAGE_SIGBUS;
}
if (type != NULL)

View File

@ -420,14 +420,18 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
struct kvm_rmap_desc *desc;
struct kvm_rmap_desc *prev_desc;
struct kvm_mmu_page *page;
struct page *release_page;
unsigned long *rmapp;
int i;
if (!is_rmap_pte(*spte))
return;
page = page_header(__pa(spte));
kvm_release_page(pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >>
PAGE_SHIFT));
release_page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
if (is_writeble_pte(*spte))
kvm_release_page_dirty(release_page);
else
kvm_release_page_clean(release_page);
rmapp = gfn_to_rmap(kvm, page->gfns[spte - page->spt]);
if (!*rmapp) {
printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
@ -893,7 +897,9 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p)
{
int level = PT32E_ROOT_LEVEL;
hpa_t table_addr = vcpu->mmu.root_hpa;
struct page *page;
page = pfn_to_page(p >> PAGE_SHIFT);
for (; ; level--) {
u32 index = PT64_INDEX(v, level);
u64 *table;
@ -908,7 +914,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p)
pte = table[index];
was_rmapped = is_rmap_pte(pte);
if (is_shadow_present_pte(pte) && is_writeble_pte(pte)) {
kvm_release_page(pfn_to_page(p >> PAGE_SHIFT));
kvm_release_page_clean(page);
return 0;
}
mark_page_dirty(vcpu->kvm, v >> PAGE_SHIFT);
@ -918,7 +924,8 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p)
if (!was_rmapped)
rmap_add(vcpu, &table[index], v >> PAGE_SHIFT);
else
kvm_release_page(pfn_to_page(p >> PAGE_SHIFT));
kvm_release_page_clean(page);
return 0;
}
@ -933,7 +940,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p)
1, 3, &table[index]);
if (!new_table) {
pgprintk("nonpaging_map: ENOMEM\n");
kvm_release_page(pfn_to_page(p >> PAGE_SHIFT));
kvm_release_page_clean(page);
return -ENOMEM;
}
@ -1049,8 +1056,8 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
paddr = gpa_to_hpa(vcpu->kvm, addr & PT64_BASE_ADDR_MASK);
if (is_error_hpa(paddr)) {
kvm_release_page(pfn_to_page((paddr & PT64_BASE_ADDR_MASK)
>> PAGE_SHIFT));
kvm_release_page_clean(pfn_to_page((paddr & PT64_BASE_ADDR_MASK)
>> PAGE_SHIFT));
return 1;
}
@ -1580,7 +1587,7 @@ static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
" valid guest gva %lx\n", audit_msg, va);
page = pfn_to_page((gpa & PT64_BASE_ADDR_MASK)
>> PAGE_SHIFT);
kvm_release_page(page);
kvm_release_page_clean(page);
}
}

View File

@ -212,8 +212,8 @@ static void FNAME(set_pte_common)(struct kvm_vcpu *vcpu,
if (is_error_hpa(paddr)) {
set_shadow_pte(shadow_pte,
shadow_trap_nonpresent_pte | PT_SHADOW_IO_MARK);
kvm_release_page(pfn_to_page((paddr & PT64_BASE_ADDR_MASK)
>> PAGE_SHIFT));
kvm_release_page_clean(pfn_to_page((paddr & PT64_BASE_ADDR_MASK)
>> PAGE_SHIFT));
return;
}
@ -259,12 +259,12 @@ static void FNAME(set_pte_common)(struct kvm_vcpu *vcpu,
page = pfn_to_page((paddr & PT64_BASE_ADDR_MASK)
>> PAGE_SHIFT);
kvm_release_page(page);
kvm_release_page_clean(page);
}
}
else
kvm_release_page(pfn_to_page((paddr & PT64_BASE_ADDR_MASK)
>> PAGE_SHIFT));
kvm_release_page_clean(pfn_to_page((paddr & PT64_BASE_ADDR_MASK)
>> PAGE_SHIFT));
if (!ptwrite || !*ptwrite)
vcpu->last_pte_updated = shadow_pte;
}
@ -503,7 +503,7 @@ static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu,
else
sp->spt[i] = shadow_notrap_nonpresent_pte;
kunmap_atomic(gpt, KM_USER0);
kvm_release_page(page);
kvm_release_page_clean(page);
}
#undef pt_element_t

View File

@ -1472,7 +1472,7 @@ static void free_pio_guest_pages(struct kvm_vcpu *vcpu)
for (i = 0; i < ARRAY_SIZE(vcpu->pio.guest_pages); ++i)
if (vcpu->pio.guest_pages[i]) {
kvm_release_page(vcpu->pio.guest_pages[i]);
kvm_release_page_dirty(vcpu->pio.guest_pages[i]);
vcpu->pio.guest_pages[i] = NULL;
}
}