KVM: x86: Consolidate flags for __linearize()

Upstream commit: 7b0dd9430cf0c1ae19645d2a6608a5fb57faffe4
Conflict: none

Consolidate @write and @fetch of __linearize() into a set of flags so that
additional flags can be added without needing more/new boolean parameters,
to precisely identify the access type.

No functional change intended.

Intel-SIG: commit 7b0dd9430cf0 KVM: x86: Consolidate flags for
__linearize()
Backport KVM Linear Address Masking (LAM) support.

Signed-off-by: Binbin Wu <binbin.wu@linux.intel.com>
Reviewed-by: Chao Gao <chao.gao@intel.com>
Acked-by: Kai Huang <kai.huang@intel.com>
Tested-by: Xuelian Guo <xuelian.guo@intel.com>
Link: https://lore.kernel.org/r/20230913124227.12574-2-binbin.wu@linux.intel.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
[ Zhiquan Li: amend commit log ]
Signed-off-by: Zhiquan Li <zhiquan1.li@intel.com>
This commit is contained in:
Binbin Wu 2023-09-13 20:42:12 +08:00 committed by Zhiquan Li
parent 45dbbcbd4b
commit 1f4de308b4
2 changed files with 15 additions and 10 deletions

View File

@ -687,8 +687,8 @@ static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size)
static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
struct segmented_address addr,
unsigned *max_size, unsigned size,
bool write, bool fetch,
enum x86emul_mode mode, ulong *linear)
enum x86emul_mode mode, ulong *linear,
unsigned int flags)
{
struct desc_struct desc;
bool usable;
@ -717,11 +717,11 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
if (!usable)
goto bad;
/* code segment in protected mode or read-only data segment */
if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
|| !(desc.type & 2)) && write)
if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8)) || !(desc.type & 2)) &&
(flags & X86EMUL_F_WRITE))
goto bad;
/* unreadable code segment */
if (!fetch && (desc.type & 8) && !(desc.type & 2))
if (!(flags & X86EMUL_F_FETCH) && (desc.type & 8) && !(desc.type & 2))
goto bad;
lim = desc_limit_scaled(&desc);
if (!(desc.type & 8) && (desc.type & 4)) {
@ -757,8 +757,8 @@ static int linearize(struct x86_emulate_ctxt *ctxt,
ulong *linear)
{
unsigned max_size;
return __linearize(ctxt, addr, &max_size, size, write, false,
ctxt->mode, linear);
return __linearize(ctxt, addr, &max_size, size, ctxt->mode, linear,
write ? X86EMUL_F_WRITE : 0);
}
static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst)
@ -771,7 +771,8 @@ static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst)
if (ctxt->op_bytes != sizeof(unsigned long))
addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
rc = __linearize(ctxt, addr, &max_size, 1, false, true, ctxt->mode, &linear);
rc = __linearize(ctxt, addr, &max_size, 1, ctxt->mode, &linear,
X86EMUL_F_FETCH);
if (rc == X86EMUL_CONTINUE)
ctxt->_eip = addr.ea;
return rc;
@ -907,8 +908,8 @@ static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
* boundary check itself. Instead, we use max_size to check
* against op_size.
*/
rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
&linear);
rc = __linearize(ctxt, addr, &max_size, 0, ctxt->mode, &linear,
X86EMUL_F_FETCH);
if (unlikely(rc != X86EMUL_CONTINUE))
return rc;

View File

@ -88,6 +88,10 @@ struct x86_instruction_info {
#define X86EMUL_IO_NEEDED 5 /* IO is needed to complete emulation */
#define X86EMUL_INTERCEPTED 6 /* Intercepted by nested VMCB/VMCS */
/* x86-specific emulation flags */
#define X86EMUL_F_WRITE BIT(0)
#define X86EMUL_F_FETCH BIT(1)
struct x86_emulate_ops {
void (*vm_bugged)(struct x86_emulate_ctxt *ctxt);
/*