2009-10-30 13:47:11 +08:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2009 SUSE Linux Products GmbH. All rights reserved.
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Alexander Graf <agraf@suse.de>
|
|
|
|
* Kevin Wolf <mail@kevin-wolf.de>
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License, version 2, as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program; if not, write to the Free Software
|
|
|
|
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/kvm_host.h>
|
|
|
|
|
|
|
|
#include <asm/kvm_ppc.h>
|
|
|
|
#include <asm/kvm_book3s.h>
|
2016-03-01 15:29:20 +08:00
|
|
|
#include <asm/book3s/64/mmu-hash.h>
|
2009-10-30 13:47:11 +08:00
|
|
|
#include <asm/machdep.h>
|
|
|
|
#include <asm/mmu_context.h>
|
|
|
|
#include <asm/hw_irq.h>
|
2013-10-08 00:47:57 +08:00
|
|
|
#include "trace_pr.h"
|
2015-05-22 15:25:02 +08:00
|
|
|
#include "book3s.h"
|
2009-10-30 13:47:11 +08:00
|
|
|
|
|
|
|
#define PTE_SIZE 12
|
|
|
|
|
2010-06-30 21:18:46 +08:00
|
|
|
void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
|
2009-10-30 13:47:11 +08:00
|
|
|
{
|
2016-07-05 13:03:58 +08:00
|
|
|
mmu_hash_ops.hpte_invalidate(pte->slot, pte->host_vpn,
|
|
|
|
pte->pagesize, pte->pagesize,
|
|
|
|
MMU_SEGSIZE_256M, false);
|
2009-10-30 13:47:11 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* We keep 512 gvsid->hvsid entries, mapping the guest ones to the array using
|
|
|
|
* a hash, so we don't waste cycles on looping */
|
|
|
|
static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid)
|
|
|
|
{
|
2010-08-03 03:48:53 +08:00
|
|
|
return (u16)(((gvsid >> (SID_MAP_BITS * 7)) & SID_MAP_MASK) ^
|
|
|
|
((gvsid >> (SID_MAP_BITS * 6)) & SID_MAP_MASK) ^
|
|
|
|
((gvsid >> (SID_MAP_BITS * 5)) & SID_MAP_MASK) ^
|
|
|
|
((gvsid >> (SID_MAP_BITS * 4)) & SID_MAP_MASK) ^
|
|
|
|
((gvsid >> (SID_MAP_BITS * 3)) & SID_MAP_MASK) ^
|
|
|
|
((gvsid >> (SID_MAP_BITS * 2)) & SID_MAP_MASK) ^
|
|
|
|
((gvsid >> (SID_MAP_BITS * 1)) & SID_MAP_MASK) ^
|
|
|
|
((gvsid >> (SID_MAP_BITS * 0)) & SID_MAP_MASK));
|
2009-10-30 13:47:11 +08:00
|
|
|
}
|
|
|
|
|
2010-08-03 03:48:53 +08:00
|
|
|
|
2009-10-30 13:47:11 +08:00
|
|
|
static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
|
|
|
|
{
|
|
|
|
struct kvmppc_sid_map *map;
|
|
|
|
u16 sid_map_mask;
|
|
|
|
|
2014-04-24 19:46:24 +08:00
|
|
|
if (kvmppc_get_msr(vcpu) & MSR_PR)
|
2009-10-30 13:47:11 +08:00
|
|
|
gvsid |= VSID_PR;
|
|
|
|
|
|
|
|
sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
|
|
|
|
map = &to_book3s(vcpu)->sid_map[sid_map_mask];
|
2010-08-02 19:38:18 +08:00
|
|
|
if (map->valid && (map->guest_vsid == gvsid)) {
|
2010-08-03 03:25:33 +08:00
|
|
|
trace_kvm_book3s_slb_found(gvsid, map->host_vsid);
|
2009-10-30 13:47:11 +08:00
|
|
|
return map;
|
|
|
|
}
|
|
|
|
|
|
|
|
map = &to_book3s(vcpu)->sid_map[SID_MAP_MASK - sid_map_mask];
|
2010-08-02 19:38:18 +08:00
|
|
|
if (map->valid && (map->guest_vsid == gvsid)) {
|
2010-08-03 03:25:33 +08:00
|
|
|
trace_kvm_book3s_slb_found(gvsid, map->host_vsid);
|
2009-10-30 13:47:11 +08:00
|
|
|
return map;
|
|
|
|
}
|
|
|
|
|
2010-08-03 03:25:33 +08:00
|
|
|
trace_kvm_book3s_slb_fail(sid_map_mask, gvsid);
|
2009-10-30 13:47:11 +08:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
KVM: PPC: Book3S PR: Better handling of host-side read-only pages
Currently we request write access to all pages that get mapped into the
guest, even if the guest is only loading from the page. This reduces
the effectiveness of KSM because it means that we unshare every page we
access. Also, we always set the changed (C) bit in the guest HPTE if
it allows writing, even for a guest load.
This fixes both these problems. We pass an 'iswrite' flag to the
mmu.xlate() functions and to kvmppc_mmu_map_page() to indicate whether
the access is a load or a store. The mmu.xlate() functions now only
set C for stores. kvmppc_gfn_to_pfn() now calls gfn_to_pfn_prot()
instead of gfn_to_pfn() so that it can indicate whether we need write
access to the page, and get back a 'writable' flag to indicate whether
the page is writable or not. If that 'writable' flag is clear, we then
make the host HPTE read-only even if the guest HPTE allowed writing.
This means that we can get a protection fault when the guest writes to a
page that it has mapped read-write but which is read-only on the host
side (perhaps due to KSM having merged the page). Thus we now call
kvmppc_handle_pagefault() for protection faults as well as HPTE not found
faults. In kvmppc_handle_pagefault(), if the access was allowed by the
guest HPTE and we thus need to install a new host HPTE, we then need to
remove the old host HPTE if there is one. This is done with a new
function, kvmppc_mmu_unmap_page(), which uses kvmppc_mmu_pte_vflush() to
find and remove the old host HPTE.
Since the memslot-related functions require the KVM SRCU read lock to
be held, this adds srcu_read_lock/unlock pairs around the calls to
kvmppc_handle_pagefault().
Finally, this changes kvmppc_mmu_book3s_32_xlate_pte() to not ignore
guest HPTEs that don't permit access, and to return -EPERM for accesses
that are not permitted by the page protections.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2013-09-20 12:52:51 +08:00
|
|
|
int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
|
|
|
|
bool iswrite)
|
2009-10-30 13:47:11 +08:00
|
|
|
{
|
2012-09-10 10:52:50 +08:00
|
|
|
unsigned long vpn;
|
kvm: rename pfn_t to kvm_pfn_t
To date, we have implemented two I/O usage models for persistent memory,
PMEM (a persistent "ram disk") and DAX (mmap persistent memory into
userspace). This series adds a third, DAX-GUP, that allows DAX mappings
to be the target of direct-i/o. It allows userspace to coordinate
DMA/RDMA from/to persistent memory.
The implementation leverages the ZONE_DEVICE mm-zone that went into
4.3-rc1 (also discussed at kernel summit) to flag pages that are owned
and dynamically mapped by a device driver. The pmem driver, after
mapping a persistent memory range into the system memmap via
devm_memremap_pages(), arranges for DAX to distinguish pfn-only versus
page-backed pmem-pfns via flags in the new pfn_t type.
The DAX code, upon seeing a PFN_DEV+PFN_MAP flagged pfn, flags the
resulting pte(s) inserted into the process page tables with a new
_PAGE_DEVMAP flag. Later, when get_user_pages() is walking ptes it keys
off _PAGE_DEVMAP to pin the device hosting the page range active.
Finally, get_page() and put_page() are modified to take references
against the device driver established page mapping.
Finally, this need for "struct page" for persistent memory requires
memory capacity to store the memmap array. Given the memmap array for a
large pool of persistent may exhaust available DRAM introduce a
mechanism to allocate the memmap from persistent memory. The new
"struct vmem_altmap *" parameter to devm_memremap_pages() enables
arch_add_memory() to use reserved pmem capacity rather than the page
allocator.
This patch (of 18):
The core has developed a need for a "pfn_t" type [1]. Move the existing
pfn_t in KVM to kvm_pfn_t [2].
[1]: https://lists.01.org/pipermail/linux-nvdimm/2015-September/002199.html
[2]: https://lists.01.org/pipermail/linux-nvdimm/2015-September/002218.html
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Acked-by: Christoffer Dall <christoffer.dall@linaro.org>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-01-16 08:56:11 +08:00
|
|
|
kvm_pfn_t hpaddr;
|
2012-09-10 10:52:50 +08:00
|
|
|
ulong hash, hpteg;
|
2009-10-30 13:47:11 +08:00
|
|
|
u64 vsid;
|
|
|
|
int ret;
|
|
|
|
int rflags = 0x192;
|
|
|
|
int vflags = 0;
|
|
|
|
int attempt = 0;
|
|
|
|
struct kvmppc_sid_map *map;
|
2011-12-09 21:44:13 +08:00
|
|
|
int r = 0;
|
2013-09-20 12:52:45 +08:00
|
|
|
int hpsize = MMU_PAGE_4K;
|
KVM: PPC: Book3S PR: Better handling of host-side read-only pages
Currently we request write access to all pages that get mapped into the
guest, even if the guest is only loading from the page. This reduces
the effectiveness of KSM because it means that we unshare every page we
access. Also, we always set the changed (C) bit in the guest HPTE if
it allows writing, even for a guest load.
This fixes both these problems. We pass an 'iswrite' flag to the
mmu.xlate() functions and to kvmppc_mmu_map_page() to indicate whether
the access is a load or a store. The mmu.xlate() functions now only
set C for stores. kvmppc_gfn_to_pfn() now calls gfn_to_pfn_prot()
instead of gfn_to_pfn() so that it can indicate whether we need write
access to the page, and get back a 'writable' flag to indicate whether
the page is writable or not. If that 'writable' flag is clear, we then
make the host HPTE read-only even if the guest HPTE allowed writing.
This means that we can get a protection fault when the guest writes to a
page that it has mapped read-write but which is read-only on the host
side (perhaps due to KSM having merged the page). Thus we now call
kvmppc_handle_pagefault() for protection faults as well as HPTE not found
faults. In kvmppc_handle_pagefault(), if the access was allowed by the
guest HPTE and we thus need to install a new host HPTE, we then need to
remove the old host HPTE if there is one. This is done with a new
function, kvmppc_mmu_unmap_page(), which uses kvmppc_mmu_pte_vflush() to
find and remove the old host HPTE.
Since the memslot-related functions require the KVM SRCU read lock to
be held, this adds srcu_read_lock/unlock pairs around the calls to
kvmppc_handle_pagefault().
Finally, this changes kvmppc_mmu_book3s_32_xlate_pte() to not ignore
guest HPTEs that don't permit access, and to return -EPERM for accesses
that are not permitted by the page protections.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2013-09-20 12:52:51 +08:00
|
|
|
bool writable;
|
KVM: PPC: Book3S PR: Use mmu_notifier_retry() in kvmppc_mmu_map_page()
When the MM code is invalidating a range of pages, it calls the KVM
kvm_mmu_notifier_invalidate_range_start() notifier function, which calls
kvm_unmap_hva_range(), which arranges to flush all the existing host
HPTEs for guest pages. However, the Linux PTEs for the range being
flushed are still valid at that point. We are not supposed to establish
any new references to pages in the range until the ...range_end()
notifier gets called. The PPC-specific KVM code doesn't get any
explicit notification of that; instead, we are supposed to use
mmu_notifier_retry() to test whether we are or have been inside a
range flush notifier pair while we have been getting a page and
instantiating a host HPTE for the page.
This therefore adds a call to mmu_notifier_retry inside
kvmppc_mmu_map_page(). This call is inside a region locked with
kvm->mmu_lock, which is the same lock that is called by the KVM
MMU notifier functions, thus ensuring that no new notification can
proceed while we are in the locked region. Inside this region we
also create the host HPTE and link the corresponding hpte_cache
structure into the lists used to find it later. We cannot allocate
the hpte_cache structure inside this locked region because that can
lead to deadlock, so we allocate it outside the region and free it
if we end up not using it.
This also moves the updates of vcpu3s->hpte_cache_count inside the
regions locked with vcpu3s->mmu_lock, and does the increment in
kvmppc_mmu_hpte_cache_map() when the pte is added to the cache
rather than when it is allocated, in order that the hpte_cache_count
is accurate.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2013-09-20 12:52:52 +08:00
|
|
|
unsigned long mmu_seq;
|
|
|
|
struct kvm *kvm = vcpu->kvm;
|
|
|
|
struct hpte_cache *cpte;
|
KVM: PPC: Book3S PR: Mark pages accessed, and dirty if being written
The mark_page_dirty() function, despite what its name might suggest,
doesn't actually mark the page as dirty as far as the MM subsystem is
concerned. It merely sets a bit in KVM's map of dirty pages, if
userspace has requested dirty tracking for the relevant memslot.
To tell the MM subsystem that the page is dirty, we have to call
kvm_set_pfn_dirty() (or an equivalent such as SetPageDirty()).
This adds a call to kvm_set_pfn_dirty(), and while we are here, also
adds a call to kvm_set_pfn_accessed() to tell the MM subsystem that
the page has been accessed. Since we are now using the pfn in
several places, this adds a 'pfn' variable to store it and changes
the places that used hpaddr >> PAGE_SHIFT to use pfn instead, which
is the same thing.
This also changes a use of HPTE_R_PP to PP_RXRX. Both are 3, but
PP_RXRX is more informative as being the read-only page permission
bit setting.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2013-09-20 12:52:53 +08:00
|
|
|
unsigned long gfn = orig_pte->raddr >> PAGE_SHIFT;
|
|
|
|
unsigned long pfn;
|
KVM: PPC: Book3S PR: Use mmu_notifier_retry() in kvmppc_mmu_map_page()
When the MM code is invalidating a range of pages, it calls the KVM
kvm_mmu_notifier_invalidate_range_start() notifier function, which calls
kvm_unmap_hva_range(), which arranges to flush all the existing host
HPTEs for guest pages. However, the Linux PTEs for the range being
flushed are still valid at that point. We are not supposed to establish
any new references to pages in the range until the ...range_end()
notifier gets called. The PPC-specific KVM code doesn't get any
explicit notification of that; instead, we are supposed to use
mmu_notifier_retry() to test whether we are or have been inside a
range flush notifier pair while we have been getting a page and
instantiating a host HPTE for the page.
This therefore adds a call to mmu_notifier_retry inside
kvmppc_mmu_map_page(). This call is inside a region locked with
kvm->mmu_lock, which is the same lock that is called by the KVM
MMU notifier functions, thus ensuring that no new notification can
proceed while we are in the locked region. Inside this region we
also create the host HPTE and link the corresponding hpte_cache
structure into the lists used to find it later. We cannot allocate
the hpte_cache structure inside this locked region because that can
lead to deadlock, so we allocate it outside the region and free it
if we end up not using it.
This also moves the updates of vcpu3s->hpte_cache_count inside the
regions locked with vcpu3s->mmu_lock, and does the increment in
kvmppc_mmu_hpte_cache_map() when the pte is added to the cache
rather than when it is allocated, in order that the hpte_cache_count
is accurate.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2013-09-20 12:52:52 +08:00
|
|
|
|
|
|
|
/* used to check for invalidations in progress */
|
|
|
|
mmu_seq = kvm->mmu_notifier_seq;
|
|
|
|
smp_rmb();
|
2009-10-30 13:47:11 +08:00
|
|
|
|
|
|
|
/* Get host physical address for gpa */
|
2014-07-13 22:37:12 +08:00
|
|
|
pfn = kvmppc_gpa_to_pfn(vcpu, orig_pte->raddr, iswrite, &writable);
|
KVM: PPC: Book3S PR: Mark pages accessed, and dirty if being written
The mark_page_dirty() function, despite what its name might suggest,
doesn't actually mark the page as dirty as far as the MM subsystem is
concerned. It merely sets a bit in KVM's map of dirty pages, if
userspace has requested dirty tracking for the relevant memslot.
To tell the MM subsystem that the page is dirty, we have to call
kvm_set_pfn_dirty() (or an equivalent such as SetPageDirty()).
This adds a call to kvm_set_pfn_dirty(), and while we are here, also
adds a call to kvm_set_pfn_accessed() to tell the MM subsystem that
the page has been accessed. Since we are now using the pfn in
several places, this adds a 'pfn' variable to store it and changes
the places that used hpaddr >> PAGE_SHIFT to use pfn instead, which
is the same thing.
This also changes a use of HPTE_R_PP to PP_RXRX. Both are 3, but
PP_RXRX is more informative as being the read-only page permission
bit setting.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2013-09-20 12:52:53 +08:00
|
|
|
if (is_error_noslot_pfn(pfn)) {
|
2014-07-13 22:37:12 +08:00
|
|
|
printk(KERN_INFO "Couldn't get guest page for gpa %lx!\n",
|
|
|
|
orig_pte->raddr);
|
2011-12-09 21:44:13 +08:00
|
|
|
r = -EINVAL;
|
|
|
|
goto out;
|
2009-10-30 13:47:11 +08:00
|
|
|
}
|
KVM: PPC: Book3S PR: Mark pages accessed, and dirty if being written
The mark_page_dirty() function, despite what its name might suggest,
doesn't actually mark the page as dirty as far as the MM subsystem is
concerned. It merely sets a bit in KVM's map of dirty pages, if
userspace has requested dirty tracking for the relevant memslot.
To tell the MM subsystem that the page is dirty, we have to call
kvm_set_pfn_dirty() (or an equivalent such as SetPageDirty()).
This adds a call to kvm_set_pfn_dirty(), and while we are here, also
adds a call to kvm_set_pfn_accessed() to tell the MM subsystem that
the page has been accessed. Since we are now using the pfn in
several places, this adds a 'pfn' variable to store it and changes
the places that used hpaddr >> PAGE_SHIFT to use pfn instead, which
is the same thing.
This also changes a use of HPTE_R_PP to PP_RXRX. Both are 3, but
PP_RXRX is more informative as being the read-only page permission
bit setting.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2013-09-20 12:52:53 +08:00
|
|
|
hpaddr = pfn << PAGE_SHIFT;
|
2009-10-30 13:47:11 +08:00
|
|
|
|
|
|
|
/* and write the mapping ea -> hpa into the pt */
|
|
|
|
vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid);
|
|
|
|
map = find_sid_vsid(vcpu, vsid);
|
|
|
|
if (!map) {
|
2010-04-20 08:49:50 +08:00
|
|
|
ret = kvmppc_mmu_map_segment(vcpu, orig_pte->eaddr);
|
|
|
|
WARN_ON(ret < 0);
|
2009-10-30 13:47:11 +08:00
|
|
|
map = find_sid_vsid(vcpu, vsid);
|
|
|
|
}
|
2010-04-20 08:49:50 +08:00
|
|
|
if (!map) {
|
|
|
|
printk(KERN_ERR "KVM: Segment map for 0x%llx (0x%lx) failed\n",
|
|
|
|
vsid, orig_pte->eaddr);
|
|
|
|
WARN_ON(true);
|
2011-12-09 21:44:13 +08:00
|
|
|
r = -EINVAL;
|
|
|
|
goto out;
|
2010-04-20 08:49:50 +08:00
|
|
|
}
|
2009-10-30 13:47:11 +08:00
|
|
|
|
2013-09-20 12:52:45 +08:00
|
|
|
vpn = hpt_vpn(orig_pte->eaddr, map->host_vsid, MMU_SEGSIZE_256M);
|
2009-10-30 13:47:11 +08:00
|
|
|
|
KVM: PPC: Book3S PR: Mark pages accessed, and dirty if being written
The mark_page_dirty() function, despite what its name might suggest,
doesn't actually mark the page as dirty as far as the MM subsystem is
concerned. It merely sets a bit in KVM's map of dirty pages, if
userspace has requested dirty tracking for the relevant memslot.
To tell the MM subsystem that the page is dirty, we have to call
kvm_set_pfn_dirty() (or an equivalent such as SetPageDirty()).
This adds a call to kvm_set_pfn_dirty(), and while we are here, also
adds a call to kvm_set_pfn_accessed() to tell the MM subsystem that
the page has been accessed. Since we are now using the pfn in
several places, this adds a 'pfn' variable to store it and changes
the places that used hpaddr >> PAGE_SHIFT to use pfn instead, which
is the same thing.
This also changes a use of HPTE_R_PP to PP_RXRX. Both are 3, but
PP_RXRX is more informative as being the read-only page permission
bit setting.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2013-09-20 12:52:53 +08:00
|
|
|
kvm_set_pfn_accessed(pfn);
|
KVM: PPC: Book3S PR: Better handling of host-side read-only pages
Currently we request write access to all pages that get mapped into the
guest, even if the guest is only loading from the page. This reduces
the effectiveness of KSM because it means that we unshare every page we
access. Also, we always set the changed (C) bit in the guest HPTE if
it allows writing, even for a guest load.
This fixes both these problems. We pass an 'iswrite' flag to the
mmu.xlate() functions and to kvmppc_mmu_map_page() to indicate whether
the access is a load or a store. The mmu.xlate() functions now only
set C for stores. kvmppc_gfn_to_pfn() now calls gfn_to_pfn_prot()
instead of gfn_to_pfn() so that it can indicate whether we need write
access to the page, and get back a 'writable' flag to indicate whether
the page is writable or not. If that 'writable' flag is clear, we then
make the host HPTE read-only even if the guest HPTE allowed writing.
This means that we can get a protection fault when the guest writes to a
page that it has mapped read-write but which is read-only on the host
side (perhaps due to KSM having merged the page). Thus we now call
kvmppc_handle_pagefault() for protection faults as well as HPTE not found
faults. In kvmppc_handle_pagefault(), if the access was allowed by the
guest HPTE and we thus need to install a new host HPTE, we then need to
remove the old host HPTE if there is one. This is done with a new
function, kvmppc_mmu_unmap_page(), which uses kvmppc_mmu_pte_vflush() to
find and remove the old host HPTE.
Since the memslot-related functions require the KVM SRCU read lock to
be held, this adds srcu_read_lock/unlock pairs around the calls to
kvmppc_handle_pagefault().
Finally, this changes kvmppc_mmu_book3s_32_xlate_pte() to not ignore
guest HPTEs that don't permit access, and to return -EPERM for accesses
that are not permitted by the page protections.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2013-09-20 12:52:51 +08:00
|
|
|
if (!orig_pte->may_write || !writable)
|
KVM: PPC: Book3S PR: Mark pages accessed, and dirty if being written
The mark_page_dirty() function, despite what its name might suggest,
doesn't actually mark the page as dirty as far as the MM subsystem is
concerned. It merely sets a bit in KVM's map of dirty pages, if
userspace has requested dirty tracking for the relevant memslot.
To tell the MM subsystem that the page is dirty, we have to call
kvm_set_pfn_dirty() (or an equivalent such as SetPageDirty()).
This adds a call to kvm_set_pfn_dirty(), and while we are here, also
adds a call to kvm_set_pfn_accessed() to tell the MM subsystem that
the page has been accessed. Since we are now using the pfn in
several places, this adds a 'pfn' variable to store it and changes
the places that used hpaddr >> PAGE_SHIFT to use pfn instead, which
is the same thing.
This also changes a use of HPTE_R_PP to PP_RXRX. Both are 3, but
PP_RXRX is more informative as being the read-only page permission
bit setting.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2013-09-20 12:52:53 +08:00
|
|
|
rflags |= PP_RXRX;
|
|
|
|
else {
|
|
|
|
mark_page_dirty(vcpu->kvm, gfn);
|
|
|
|
kvm_set_pfn_dirty(pfn);
|
|
|
|
}
|
2009-10-30 13:47:11 +08:00
|
|
|
|
|
|
|
if (!orig_pte->may_execute)
|
|
|
|
rflags |= HPTE_R_N;
|
2012-08-03 19:56:33 +08:00
|
|
|
else
|
KVM: PPC: Book3S PR: Mark pages accessed, and dirty if being written
The mark_page_dirty() function, despite what its name might suggest,
doesn't actually mark the page as dirty as far as the MM subsystem is
concerned. It merely sets a bit in KVM's map of dirty pages, if
userspace has requested dirty tracking for the relevant memslot.
To tell the MM subsystem that the page is dirty, we have to call
kvm_set_pfn_dirty() (or an equivalent such as SetPageDirty()).
This adds a call to kvm_set_pfn_dirty(), and while we are here, also
adds a call to kvm_set_pfn_accessed() to tell the MM subsystem that
the page has been accessed. Since we are now using the pfn in
several places, this adds a 'pfn' variable to store it and changes
the places that used hpaddr >> PAGE_SHIFT to use pfn instead, which
is the same thing.
This also changes a use of HPTE_R_PP to PP_RXRX. Both are 3, but
PP_RXRX is more informative as being the read-only page permission
bit setting.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2013-09-20 12:52:53 +08:00
|
|
|
kvmppc_mmu_flush_icache(pfn);
|
2009-10-30 13:47:11 +08:00
|
|
|
|
2013-09-20 12:52:45 +08:00
|
|
|
/*
|
|
|
|
* Use 64K pages if possible; otherwise, on 64K page kernels,
|
|
|
|
* we need to transfer 4 more bits from guest real to host real addr.
|
|
|
|
*/
|
|
|
|
if (vsid & VSID_64K)
|
|
|
|
hpsize = MMU_PAGE_64K;
|
|
|
|
else
|
|
|
|
hpaddr |= orig_pte->raddr & (~0xfffULL & ~PAGE_MASK);
|
|
|
|
|
|
|
|
hash = hpt_hash(vpn, mmu_psize_defs[hpsize].shift, MMU_SEGSIZE_256M);
|
2009-10-30 13:47:11 +08:00
|
|
|
|
KVM: PPC: Book3S PR: Use mmu_notifier_retry() in kvmppc_mmu_map_page()
When the MM code is invalidating a range of pages, it calls the KVM
kvm_mmu_notifier_invalidate_range_start() notifier function, which calls
kvm_unmap_hva_range(), which arranges to flush all the existing host
HPTEs for guest pages. However, the Linux PTEs for the range being
flushed are still valid at that point. We are not supposed to establish
any new references to pages in the range until the ...range_end()
notifier gets called. The PPC-specific KVM code doesn't get any
explicit notification of that; instead, we are supposed to use
mmu_notifier_retry() to test whether we are or have been inside a
range flush notifier pair while we have been getting a page and
instantiating a host HPTE for the page.
This therefore adds a call to mmu_notifier_retry inside
kvmppc_mmu_map_page(). This call is inside a region locked with
kvm->mmu_lock, which is the same lock that is called by the KVM
MMU notifier functions, thus ensuring that no new notification can
proceed while we are in the locked region. Inside this region we
also create the host HPTE and link the corresponding hpte_cache
structure into the lists used to find it later. We cannot allocate
the hpte_cache structure inside this locked region because that can
lead to deadlock, so we allocate it outside the region and free it
if we end up not using it.
This also moves the updates of vcpu3s->hpte_cache_count inside the
regions locked with vcpu3s->mmu_lock, and does the increment in
kvmppc_mmu_hpte_cache_map() when the pte is added to the cache
rather than when it is allocated, in order that the hpte_cache_count
is accurate.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2013-09-20 12:52:52 +08:00
|
|
|
cpte = kvmppc_mmu_hpte_cache_next(vcpu);
|
|
|
|
|
|
|
|
spin_lock(&kvm->mmu_lock);
|
|
|
|
if (!cpte || mmu_notifier_retry(kvm, mmu_seq)) {
|
|
|
|
r = -EAGAIN;
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
|
|
|
|
2009-10-30 13:47:11 +08:00
|
|
|
map_again:
|
|
|
|
hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
|
|
|
|
|
|
|
|
/* In case we tried normal mapping already, let's nuke old entries */
|
|
|
|
if (attempt > 1)
|
2016-07-05 13:03:58 +08:00
|
|
|
if (mmu_hash_ops.hpte_remove(hpteg) < 0) {
|
2011-12-09 21:44:13 +08:00
|
|
|
r = -1;
|
KVM: PPC: Book3S PR: Use mmu_notifier_retry() in kvmppc_mmu_map_page()
When the MM code is invalidating a range of pages, it calls the KVM
kvm_mmu_notifier_invalidate_range_start() notifier function, which calls
kvm_unmap_hva_range(), which arranges to flush all the existing host
HPTEs for guest pages. However, the Linux PTEs for the range being
flushed are still valid at that point. We are not supposed to establish
any new references to pages in the range until the ...range_end()
notifier gets called. The PPC-specific KVM code doesn't get any
explicit notification of that; instead, we are supposed to use
mmu_notifier_retry() to test whether we are or have been inside a
range flush notifier pair while we have been getting a page and
instantiating a host HPTE for the page.
This therefore adds a call to mmu_notifier_retry inside
kvmppc_mmu_map_page(). This call is inside a region locked with
kvm->mmu_lock, which is the same lock that is called by the KVM
MMU notifier functions, thus ensuring that no new notification can
proceed while we are in the locked region. Inside this region we
also create the host HPTE and link the corresponding hpte_cache
structure into the lists used to find it later. We cannot allocate
the hpte_cache structure inside this locked region because that can
lead to deadlock, so we allocate it outside the region and free it
if we end up not using it.
This also moves the updates of vcpu3s->hpte_cache_count inside the
regions locked with vcpu3s->mmu_lock, and does the increment in
kvmppc_mmu_hpte_cache_map() when the pte is added to the cache
rather than when it is allocated, in order that the hpte_cache_count
is accurate.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2013-09-20 12:52:52 +08:00
|
|
|
goto out_unlock;
|
2011-12-09 21:44:13 +08:00
|
|
|
}
|
2009-10-30 13:47:11 +08:00
|
|
|
|
2016-07-05 13:03:58 +08:00
|
|
|
ret = mmu_hash_ops.hpte_insert(hpteg, vpn, hpaddr, rflags, vflags,
|
|
|
|
hpsize, hpsize, MMU_SEGSIZE_256M);
|
2009-10-30 13:47:11 +08:00
|
|
|
|
|
|
|
if (ret < 0) {
|
|
|
|
/* If we couldn't map a primary PTE, try a secondary */
|
|
|
|
hash = ~hash;
|
2010-02-19 18:00:46 +08:00
|
|
|
vflags ^= HPTE_V_SECONDARY;
|
2009-10-30 13:47:11 +08:00
|
|
|
attempt++;
|
|
|
|
goto map_again;
|
|
|
|
} else {
|
2012-09-10 10:52:50 +08:00
|
|
|
trace_kvm_book3s_64_mmu_map(rflags, hpteg,
|
|
|
|
vpn, hpaddr, orig_pte);
|
2009-10-30 13:47:11 +08:00
|
|
|
|
2016-07-05 13:03:58 +08:00
|
|
|
/*
|
|
|
|
* The mmu_hash_ops code may give us a secondary entry even
|
|
|
|
* though we asked for a primary. Fix up.
|
|
|
|
*/
|
2010-03-25 04:48:34 +08:00
|
|
|
if ((ret & _PTEIDX_SECONDARY) && !(vflags & HPTE_V_SECONDARY)) {
|
|
|
|
hash = ~hash;
|
|
|
|
hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
|
|
|
|
}
|
|
|
|
|
KVM: PPC: Book3S PR: Use mmu_notifier_retry() in kvmppc_mmu_map_page()
When the MM code is invalidating a range of pages, it calls the KVM
kvm_mmu_notifier_invalidate_range_start() notifier function, which calls
kvm_unmap_hva_range(), which arranges to flush all the existing host
HPTEs for guest pages. However, the Linux PTEs for the range being
flushed are still valid at that point. We are not supposed to establish
any new references to pages in the range until the ...range_end()
notifier gets called. The PPC-specific KVM code doesn't get any
explicit notification of that; instead, we are supposed to use
mmu_notifier_retry() to test whether we are or have been inside a
range flush notifier pair while we have been getting a page and
instantiating a host HPTE for the page.
This therefore adds a call to mmu_notifier_retry inside
kvmppc_mmu_map_page(). This call is inside a region locked with
kvm->mmu_lock, which is the same lock that is called by the KVM
MMU notifier functions, thus ensuring that no new notification can
proceed while we are in the locked region. Inside this region we
also create the host HPTE and link the corresponding hpte_cache
structure into the lists used to find it later. We cannot allocate
the hpte_cache structure inside this locked region because that can
lead to deadlock, so we allocate it outside the region and free it
if we end up not using it.
This also moves the updates of vcpu3s->hpte_cache_count inside the
regions locked with vcpu3s->mmu_lock, and does the increment in
kvmppc_mmu_hpte_cache_map() when the pte is added to the cache
rather than when it is allocated, in order that the hpte_cache_count
is accurate.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2013-09-20 12:52:52 +08:00
|
|
|
cpte->slot = hpteg + (ret & 7);
|
|
|
|
cpte->host_vpn = vpn;
|
|
|
|
cpte->pte = *orig_pte;
|
KVM: PPC: Book3S PR: Mark pages accessed, and dirty if being written
The mark_page_dirty() function, despite what its name might suggest,
doesn't actually mark the page as dirty as far as the MM subsystem is
concerned. It merely sets a bit in KVM's map of dirty pages, if
userspace has requested dirty tracking for the relevant memslot.
To tell the MM subsystem that the page is dirty, we have to call
kvm_set_pfn_dirty() (or an equivalent such as SetPageDirty()).
This adds a call to kvm_set_pfn_dirty(), and while we are here, also
adds a call to kvm_set_pfn_accessed() to tell the MM subsystem that
the page has been accessed. Since we are now using the pfn in
several places, this adds a 'pfn' variable to store it and changes
the places that used hpaddr >> PAGE_SHIFT to use pfn instead, which
is the same thing.
This also changes a use of HPTE_R_PP to PP_RXRX. Both are 3, but
PP_RXRX is more informative as being the read-only page permission
bit setting.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2013-09-20 12:52:53 +08:00
|
|
|
cpte->pfn = pfn;
|
KVM: PPC: Book3S PR: Use mmu_notifier_retry() in kvmppc_mmu_map_page()
When the MM code is invalidating a range of pages, it calls the KVM
kvm_mmu_notifier_invalidate_range_start() notifier function, which calls
kvm_unmap_hva_range(), which arranges to flush all the existing host
HPTEs for guest pages. However, the Linux PTEs for the range being
flushed are still valid at that point. We are not supposed to establish
any new references to pages in the range until the ...range_end()
notifier gets called. The PPC-specific KVM code doesn't get any
explicit notification of that; instead, we are supposed to use
mmu_notifier_retry() to test whether we are or have been inside a
range flush notifier pair while we have been getting a page and
instantiating a host HPTE for the page.
This therefore adds a call to mmu_notifier_retry inside
kvmppc_mmu_map_page(). This call is inside a region locked with
kvm->mmu_lock, which is the same lock that is called by the KVM
MMU notifier functions, thus ensuring that no new notification can
proceed while we are in the locked region. Inside this region we
also create the host HPTE and link the corresponding hpte_cache
structure into the lists used to find it later. We cannot allocate
the hpte_cache structure inside this locked region because that can
lead to deadlock, so we allocate it outside the region and free it
if we end up not using it.
This also moves the updates of vcpu3s->hpte_cache_count inside the
regions locked with vcpu3s->mmu_lock, and does the increment in
kvmppc_mmu_hpte_cache_map() when the pte is added to the cache
rather than when it is allocated, in order that the hpte_cache_count
is accurate.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2013-09-20 12:52:52 +08:00
|
|
|
cpte->pagesize = hpsize;
|
2010-06-30 21:18:46 +08:00
|
|
|
|
KVM: PPC: Book3S PR: Use mmu_notifier_retry() in kvmppc_mmu_map_page()
When the MM code is invalidating a range of pages, it calls the KVM
kvm_mmu_notifier_invalidate_range_start() notifier function, which calls
kvm_unmap_hva_range(), which arranges to flush all the existing host
HPTEs for guest pages. However, the Linux PTEs for the range being
flushed are still valid at that point. We are not supposed to establish
any new references to pages in the range until the ...range_end()
notifier gets called. The PPC-specific KVM code doesn't get any
explicit notification of that; instead, we are supposed to use
mmu_notifier_retry() to test whether we are or have been inside a
range flush notifier pair while we have been getting a page and
instantiating a host HPTE for the page.
This therefore adds a call to mmu_notifier_retry inside
kvmppc_mmu_map_page(). This call is inside a region locked with
kvm->mmu_lock, which is the same lock that is called by the KVM
MMU notifier functions, thus ensuring that no new notification can
proceed while we are in the locked region. Inside this region we
also create the host HPTE and link the corresponding hpte_cache
structure into the lists used to find it later. We cannot allocate
the hpte_cache structure inside this locked region because that can
lead to deadlock, so we allocate it outside the region and free it
if we end up not using it.
This also moves the updates of vcpu3s->hpte_cache_count inside the
regions locked with vcpu3s->mmu_lock, and does the increment in
kvmppc_mmu_hpte_cache_map() when the pte is added to the cache
rather than when it is allocated, in order that the hpte_cache_count
is accurate.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2013-09-20 12:52:52 +08:00
|
|
|
kvmppc_mmu_hpte_cache_map(vcpu, cpte);
|
|
|
|
cpte = NULL;
|
2009-10-30 13:47:11 +08:00
|
|
|
}
|
KVM: PPC: Book3S PR: Use mmu_notifier_retry() in kvmppc_mmu_map_page()
When the MM code is invalidating a range of pages, it calls the KVM
kvm_mmu_notifier_invalidate_range_start() notifier function, which calls
kvm_unmap_hva_range(), which arranges to flush all the existing host
HPTEs for guest pages. However, the Linux PTEs for the range being
flushed are still valid at that point. We are not supposed to establish
any new references to pages in the range until the ...range_end()
notifier gets called. The PPC-specific KVM code doesn't get any
explicit notification of that; instead, we are supposed to use
mmu_notifier_retry() to test whether we are or have been inside a
range flush notifier pair while we have been getting a page and
instantiating a host HPTE for the page.
This therefore adds a call to mmu_notifier_retry inside
kvmppc_mmu_map_page(). This call is inside a region locked with
kvm->mmu_lock, which is the same lock that is called by the KVM
MMU notifier functions, thus ensuring that no new notification can
proceed while we are in the locked region. Inside this region we
also create the host HPTE and link the corresponding hpte_cache
structure into the lists used to find it later. We cannot allocate
the hpte_cache structure inside this locked region because that can
lead to deadlock, so we allocate it outside the region and free it
if we end up not using it.
This also moves the updates of vcpu3s->hpte_cache_count inside the
regions locked with vcpu3s->mmu_lock, and does the increment in
kvmppc_mmu_hpte_cache_map() when the pte is added to the cache
rather than when it is allocated, in order that the hpte_cache_count
is accurate.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2013-09-20 12:52:52 +08:00
|
|
|
|
|
|
|
out_unlock:
|
|
|
|
spin_unlock(&kvm->mmu_lock);
|
KVM: PPC: Book3S PR: Mark pages accessed, and dirty if being written
The mark_page_dirty() function, despite what its name might suggest,
doesn't actually mark the page as dirty as far as the MM subsystem is
concerned. It merely sets a bit in KVM's map of dirty pages, if
userspace has requested dirty tracking for the relevant memslot.
To tell the MM subsystem that the page is dirty, we have to call
kvm_set_pfn_dirty() (or an equivalent such as SetPageDirty()).
This adds a call to kvm_set_pfn_dirty(), and while we are here, also
adds a call to kvm_set_pfn_accessed() to tell the MM subsystem that
the page has been accessed. Since we are now using the pfn in
several places, this adds a 'pfn' variable to store it and changes
the places that used hpaddr >> PAGE_SHIFT to use pfn instead, which
is the same thing.
This also changes a use of HPTE_R_PP to PP_RXRX. Both are 3, but
PP_RXRX is more informative as being the read-only page permission
bit setting.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2013-09-20 12:52:53 +08:00
|
|
|
kvm_release_pfn_clean(pfn);
|
KVM: PPC: Book3S PR: Use mmu_notifier_retry() in kvmppc_mmu_map_page()
When the MM code is invalidating a range of pages, it calls the KVM
kvm_mmu_notifier_invalidate_range_start() notifier function, which calls
kvm_unmap_hva_range(), which arranges to flush all the existing host
HPTEs for guest pages. However, the Linux PTEs for the range being
flushed are still valid at that point. We are not supposed to establish
any new references to pages in the range until the ...range_end()
notifier gets called. The PPC-specific KVM code doesn't get any
explicit notification of that; instead, we are supposed to use
mmu_notifier_retry() to test whether we are or have been inside a
range flush notifier pair while we have been getting a page and
instantiating a host HPTE for the page.
This therefore adds a call to mmu_notifier_retry inside
kvmppc_mmu_map_page(). This call is inside a region locked with
kvm->mmu_lock, which is the same lock that is called by the KVM
MMU notifier functions, thus ensuring that no new notification can
proceed while we are in the locked region. Inside this region we
also create the host HPTE and link the corresponding hpte_cache
structure into the lists used to find it later. We cannot allocate
the hpte_cache structure inside this locked region because that can
lead to deadlock, so we allocate it outside the region and free it
if we end up not using it.
This also moves the updates of vcpu3s->hpte_cache_count inside the
regions locked with vcpu3s->mmu_lock, and does the increment in
kvmppc_mmu_hpte_cache_map() when the pte is added to the cache
rather than when it is allocated, in order that the hpte_cache_count
is accurate.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2013-09-20 12:52:52 +08:00
|
|
|
if (cpte)
|
|
|
|
kvmppc_mmu_hpte_cache_free(cpte);
|
2009-10-30 13:47:11 +08:00
|
|
|
|
2011-12-09 21:44:13 +08:00
|
|
|
out:
|
|
|
|
return r;
|
2009-10-30 13:47:11 +08:00
|
|
|
}
|
|
|
|
|
KVM: PPC: Book3S PR: Better handling of host-side read-only pages
Currently we request write access to all pages that get mapped into the
guest, even if the guest is only loading from the page. This reduces
the effectiveness of KSM because it means that we unshare every page we
access. Also, we always set the changed (C) bit in the guest HPTE if
it allows writing, even for a guest load.
This fixes both these problems. We pass an 'iswrite' flag to the
mmu.xlate() functions and to kvmppc_mmu_map_page() to indicate whether
the access is a load or a store. The mmu.xlate() functions now only
set C for stores. kvmppc_gfn_to_pfn() now calls gfn_to_pfn_prot()
instead of gfn_to_pfn() so that it can indicate whether we need write
access to the page, and get back a 'writable' flag to indicate whether
the page is writable or not. If that 'writable' flag is clear, we then
make the host HPTE read-only even if the guest HPTE allowed writing.
This means that we can get a protection fault when the guest writes to a
page that it has mapped read-write but which is read-only on the host
side (perhaps due to KSM having merged the page). Thus we now call
kvmppc_handle_pagefault() for protection faults as well as HPTE not found
faults. In kvmppc_handle_pagefault(), if the access was allowed by the
guest HPTE and we thus need to install a new host HPTE, we then need to
remove the old host HPTE if there is one. This is done with a new
function, kvmppc_mmu_unmap_page(), which uses kvmppc_mmu_pte_vflush() to
find and remove the old host HPTE.
Since the memslot-related functions require the KVM SRCU read lock to
be held, this adds srcu_read_lock/unlock pairs around the calls to
kvmppc_handle_pagefault().
Finally, this changes kvmppc_mmu_book3s_32_xlate_pte() to not ignore
guest HPTEs that don't permit access, and to return -EPERM for accesses
that are not permitted by the page protections.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Alexander Graf <agraf@suse.de>
2013-09-20 12:52:51 +08:00
|
|
|
void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
|
|
|
|
{
|
|
|
|
u64 mask = 0xfffffffffULL;
|
|
|
|
u64 vsid;
|
|
|
|
|
|
|
|
vcpu->arch.mmu.esid_to_vsid(vcpu, pte->eaddr >> SID_SHIFT, &vsid);
|
|
|
|
if (vsid & VSID_64K)
|
|
|
|
mask = 0xffffffff0ULL;
|
|
|
|
kvmppc_mmu_pte_vflush(vcpu, pte->vpage, mask);
|
|
|
|
}
|
|
|
|
|
2009-10-30 13:47:11 +08:00
|
|
|
static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
|
|
|
|
{
|
|
|
|
struct kvmppc_sid_map *map;
|
|
|
|
struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
|
|
|
|
u16 sid_map_mask;
|
|
|
|
static int backwards_map = 0;
|
|
|
|
|
2014-04-24 19:46:24 +08:00
|
|
|
if (kvmppc_get_msr(vcpu) & MSR_PR)
|
2009-10-30 13:47:11 +08:00
|
|
|
gvsid |= VSID_PR;
|
|
|
|
|
|
|
|
/* We might get collisions that trap in preceding order, so let's
|
|
|
|
map them differently */
|
|
|
|
|
|
|
|
sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
|
|
|
|
if (backwards_map)
|
|
|
|
sid_map_mask = SID_MAP_MASK - sid_map_mask;
|
|
|
|
|
|
|
|
map = &to_book3s(vcpu)->sid_map[sid_map_mask];
|
|
|
|
|
|
|
|
/* Make sure we're taking the other map next time */
|
|
|
|
backwards_map = !backwards_map;
|
|
|
|
|
|
|
|
/* Uh-oh ... out of mappings. Let's flush! */
|
2012-03-23 08:21:14 +08:00
|
|
|
if (vcpu_book3s->proto_vsid_next == vcpu_book3s->proto_vsid_max) {
|
|
|
|
vcpu_book3s->proto_vsid_next = vcpu_book3s->proto_vsid_first;
|
2009-10-30 13:47:11 +08:00
|
|
|
memset(vcpu_book3s->sid_map, 0,
|
|
|
|
sizeof(struct kvmppc_sid_map) * SID_MAP_NUM);
|
|
|
|
kvmppc_mmu_pte_flush(vcpu, 0, 0);
|
|
|
|
kvmppc_mmu_flush_segments(vcpu);
|
|
|
|
}
|
2012-03-23 08:21:14 +08:00
|
|
|
map->host_vsid = vsid_scramble(vcpu_book3s->proto_vsid_next++, 256M);
|
2009-10-30 13:47:11 +08:00
|
|
|
|
|
|
|
map->guest_vsid = gvsid;
|
|
|
|
map->valid = true;
|
|
|
|
|
2010-08-03 03:25:33 +08:00
|
|
|
trace_kvm_book3s_slb_map(sid_map_mask, gvsid, map->host_vsid);
|
2010-04-20 08:49:52 +08:00
|
|
|
|
2009-10-30 13:47:11 +08:00
|
|
|
return map;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid)
|
|
|
|
{
|
2011-12-09 21:44:13 +08:00
|
|
|
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
2009-10-30 13:47:11 +08:00
|
|
|
int i;
|
|
|
|
int max_slb_size = 64;
|
|
|
|
int found_inval = -1;
|
|
|
|
int r;
|
|
|
|
|
|
|
|
/* Are we overwriting? */
|
2014-05-15 20:36:05 +08:00
|
|
|
for (i = 0; i < svcpu->slb_max; i++) {
|
2011-12-09 21:44:13 +08:00
|
|
|
if (!(svcpu->slb[i].esid & SLB_ESID_V))
|
2009-10-30 13:47:11 +08:00
|
|
|
found_inval = i;
|
2011-12-09 21:44:13 +08:00
|
|
|
else if ((svcpu->slb[i].esid & ESID_MASK) == esid) {
|
|
|
|
r = i;
|
|
|
|
goto out;
|
|
|
|
}
|
2009-10-30 13:47:11 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Found a spare entry that was invalidated before */
|
2014-05-15 20:36:05 +08:00
|
|
|
if (found_inval >= 0) {
|
2011-12-09 21:44:13 +08:00
|
|
|
r = found_inval;
|
|
|
|
goto out;
|
|
|
|
}
|
2009-10-30 13:47:11 +08:00
|
|
|
|
|
|
|
/* No spare invalid entry, so create one */
|
|
|
|
|
|
|
|
if (mmu_slb_size < 64)
|
|
|
|
max_slb_size = mmu_slb_size;
|
|
|
|
|
|
|
|
/* Overflowing -> purge */
|
2011-12-09 21:44:13 +08:00
|
|
|
if ((svcpu->slb_max) == max_slb_size)
|
2009-10-30 13:47:11 +08:00
|
|
|
kvmppc_mmu_flush_segments(vcpu);
|
|
|
|
|
2011-12-09 21:44:13 +08:00
|
|
|
r = svcpu->slb_max;
|
|
|
|
svcpu->slb_max++;
|
2009-10-30 13:47:11 +08:00
|
|
|
|
2011-12-09 21:44:13 +08:00
|
|
|
out:
|
|
|
|
svcpu_put(svcpu);
|
2009-10-30 13:47:11 +08:00
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
|
|
|
int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
|
|
|
|
{
|
2011-12-09 21:44:13 +08:00
|
|
|
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
2009-10-30 13:47:11 +08:00
|
|
|
u64 esid = eaddr >> SID_SHIFT;
|
|
|
|
u64 slb_esid = (eaddr & ESID_MASK) | SLB_ESID_V;
|
|
|
|
u64 slb_vsid = SLB_VSID_USER;
|
|
|
|
u64 gvsid;
|
|
|
|
int slb_index;
|
|
|
|
struct kvmppc_sid_map *map;
|
2011-12-09 21:44:13 +08:00
|
|
|
int r = 0;
|
2009-10-30 13:47:11 +08:00
|
|
|
|
|
|
|
slb_index = kvmppc_mmu_next_segment(vcpu, eaddr & ESID_MASK);
|
|
|
|
|
|
|
|
if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) {
|
|
|
|
/* Invalidate an entry */
|
2011-12-09 21:44:13 +08:00
|
|
|
svcpu->slb[slb_index].esid = 0;
|
|
|
|
r = -ENOENT;
|
|
|
|
goto out;
|
2009-10-30 13:47:11 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
map = find_sid_vsid(vcpu, gvsid);
|
|
|
|
if (!map)
|
|
|
|
map = create_sid_map(vcpu, gvsid);
|
|
|
|
|
|
|
|
map->guest_esid = esid;
|
|
|
|
|
|
|
|
slb_vsid |= (map->host_vsid << 12);
|
|
|
|
slb_vsid &= ~SLB_VSID_KP;
|
|
|
|
slb_esid |= slb_index;
|
|
|
|
|
2013-09-20 12:52:45 +08:00
|
|
|
#ifdef CONFIG_PPC_64K_PAGES
|
|
|
|
/* Set host segment base page size to 64K if possible */
|
|
|
|
if (gvsid & VSID_64K)
|
|
|
|
slb_vsid |= mmu_psize_defs[MMU_PAGE_64K].sllp;
|
|
|
|
#endif
|
|
|
|
|
2011-12-09 21:44:13 +08:00
|
|
|
svcpu->slb[slb_index].esid = slb_esid;
|
|
|
|
svcpu->slb[slb_index].vsid = slb_vsid;
|
2009-10-30 13:47:11 +08:00
|
|
|
|
2010-08-03 03:25:33 +08:00
|
|
|
trace_kvm_book3s_slbmte(slb_vsid, slb_esid);
|
2009-10-30 13:47:11 +08:00
|
|
|
|
2011-12-09 21:44:13 +08:00
|
|
|
out:
|
|
|
|
svcpu_put(svcpu);
|
|
|
|
return r;
|
2009-10-30 13:47:11 +08:00
|
|
|
}
|
|
|
|
|
2013-06-22 15:16:32 +08:00
|
|
|
void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong ea, ulong seg_size)
|
|
|
|
{
|
|
|
|
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
|
|
|
ulong seg_mask = -seg_size;
|
|
|
|
int i;
|
|
|
|
|
2014-05-15 20:36:05 +08:00
|
|
|
for (i = 0; i < svcpu->slb_max; i++) {
|
2013-06-22 15:16:32 +08:00
|
|
|
if ((svcpu->slb[i].esid & SLB_ESID_V) &&
|
|
|
|
(svcpu->slb[i].esid & seg_mask) == ea) {
|
|
|
|
/* Invalidate this entry */
|
|
|
|
svcpu->slb[i].esid = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
svcpu_put(svcpu);
|
|
|
|
}
|
|
|
|
|
2009-10-30 13:47:11 +08:00
|
|
|
void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
2011-12-09 21:44:13 +08:00
|
|
|
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
|
2014-05-15 20:36:05 +08:00
|
|
|
svcpu->slb_max = 0;
|
2011-12-09 21:44:13 +08:00
|
|
|
svcpu->slb[0].esid = 0;
|
|
|
|
svcpu_put(svcpu);
|
2009-10-30 13:47:11 +08:00
|
|
|
}
|
|
|
|
|
2013-10-08 00:47:53 +08:00
|
|
|
void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu)
|
2009-10-30 13:47:11 +08:00
|
|
|
{
|
2010-06-30 21:18:46 +08:00
|
|
|
kvmppc_mmu_hpte_destroy(vcpu);
|
2010-08-15 14:04:24 +08:00
|
|
|
__destroy_context(to_book3s(vcpu)->context_id[0]);
|
2010-04-16 06:11:45 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
int kvmppc_mmu_init(struct kvm_vcpu *vcpu)
|
|
|
|
{
|
|
|
|
struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = __init_new_context();
|
|
|
|
if (err < 0)
|
|
|
|
return -1;
|
2010-08-15 14:04:24 +08:00
|
|
|
vcpu3s->context_id[0] = err;
|
2010-04-16 06:11:45 +08:00
|
|
|
|
2013-06-22 15:13:32 +08:00
|
|
|
vcpu3s->proto_vsid_max = ((u64)(vcpu3s->context_id[0] + 1)
|
2013-03-13 11:34:55 +08:00
|
|
|
<< ESID_BITS) - 1;
|
2013-06-22 15:13:32 +08:00
|
|
|
vcpu3s->proto_vsid_first = (u64)vcpu3s->context_id[0] << ESID_BITS;
|
2012-03-23 08:21:14 +08:00
|
|
|
vcpu3s->proto_vsid_next = vcpu3s->proto_vsid_first;
|
2010-04-16 06:11:45 +08:00
|
|
|
|
2010-06-30 21:18:46 +08:00
|
|
|
kvmppc_mmu_hpte_init(vcpu);
|
|
|
|
|
2010-04-16 06:11:45 +08:00
|
|
|
return 0;
|
2009-10-30 13:47:11 +08:00
|
|
|
}
|