mirror of https://github.com/l4ka/pistachio.git
Merged IA32's and AMD64's assembler-instruction wrapper functions.
This commit is contained in:
parent
0a0853b958
commit
70cfceaa81
|
@ -85,10 +85,10 @@ DECLARE_CMD (cmd_dump_msrs, arch, 'm', "dumpmsrs",
|
|||
|
||||
CMD (cmd_dump_msrs, cg)
|
||||
{
|
||||
printf("LASTBRANCH_FROM_IP: %x\n", amd64_rdmsr (AMD64_LASTBRANCHFROMIP));
|
||||
printf("LASTBRANCH_TO_IP: %x\n", amd64_rdmsr (AMD64_LASTBRANCHTOIP));
|
||||
printf("LASTINT_FROM_IP: %x\n", amd64_rdmsr (AMD64_LASTINTFROMIP));
|
||||
printf("LASTINT_TO_IP: %x\n", amd64_rdmsr (AMD64_LASTINTTOIP));
|
||||
printf("LASTBRANCH_FROM_IP: %x\n", x86_rdmsr (AMD64_LASTBRANCHFROMIP));
|
||||
printf("LASTBRANCH_TO_IP: %x\n", x86_rdmsr (AMD64_LASTBRANCHTOIP));
|
||||
printf("LASTINT_FROM_IP: %x\n", x86_rdmsr (AMD64_LASTINTFROMIP));
|
||||
printf("LASTINT_TO_IP: %x\n", x86_rdmsr (AMD64_LASTINTTOIP));
|
||||
return CMD_NOQUIT;
|
||||
}
|
||||
#endif
|
||||
|
@ -262,7 +262,7 @@ CMD(cmd_gdt, cg)
|
|||
printf("dpl=%d 64-bit ", tss->x.d.dpl);
|
||||
printf("tss\n");
|
||||
|
||||
printf("FS_MSR = %16x\nGS_MSR = %16x\n", amd64_rdmsr(AMD64_FS_MSR), amd64_rdmsr(AMD64_GS_MSR));
|
||||
printf("FS_MSR = %16x\nGS_MSR = %16x\n", x86_rdmsr(AMD64_FS_MSR), x86_rdmsr(AMD64_GS_MSR));
|
||||
|
||||
return CMD_NOQUIT;
|
||||
}
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/*********************************************************************
|
||||
*
|
||||
* Copyright (C) 2002, 2003, Karlsruhe University
|
||||
* Copyright (C) 2002, 2003, 2007, Karlsruhe University
|
||||
*
|
||||
* File path: kdb/arch/ia32/stepping.cc
|
||||
* Description: Single stepping for IA-32
|
||||
|
@ -58,7 +58,7 @@ CMD (cmd_branchstep, cg)
|
|||
ia32_exceptionframe_t * f = (ia32_exceptionframe_t *) kdb.kdb_param;
|
||||
|
||||
f->eflags |= (1 << 8) + (1 << 16); /* RF + TF */
|
||||
ia32_wrmsr (IA32_DEBUGCTL, ((1 << 0) + (1 << 1))); /* LBR + BTF */
|
||||
x86_wrmsr (IA32_DEBUGCTL, ((1 << 0) + (1 << 1))); /* LBR + BTF */
|
||||
ia32_single_step_on_branches = true;
|
||||
|
||||
return CMD_QUIT;
|
||||
|
|
|
@ -85,16 +85,16 @@ DECLARE_CMD (cmd_dump_msrs, arch, 'm', "dumpmsrs",
|
|||
CMD (cmd_dump_msrs, cg)
|
||||
{
|
||||
#if defined(CONFIG_CPU_IA32_I686)
|
||||
printf("LASTBRANCH_FROM_IP: %x\n", ia32_rdmsr (IA32_LASTBRANCHFROMIP));
|
||||
printf("LASTBRANCH_TO_IP: %x\n", ia32_rdmsr (IA32_LASTBRANCHTOIP));
|
||||
printf("LASTINT_FROM_IP: %x\n", ia32_rdmsr (IA32_LASTINTFROMIP));
|
||||
printf("LASTINT_TO_IP: %x\n", ia32_rdmsr (IA32_LASTINTTOIP));
|
||||
printf("LASTBRANCH_FROM_IP: %x\n", x86_rdmsr (IA32_LASTBRANCHFROMIP));
|
||||
printf("LASTBRANCH_TO_IP: %x\n", x86_rdmsr (IA32_LASTBRANCHTOIP));
|
||||
printf("LASTINT_FROM_IP: %x\n", x86_rdmsr (IA32_LASTINTFROMIP));
|
||||
printf("LASTINT_TO_IP: %x\n", x86_rdmsr (IA32_LASTINTTOIP));
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_CPU_IA32_P4)
|
||||
for (int i = 0; i < 18; i++) {
|
||||
u64_t pmc = ia32_rdmsr (IA32_COUNTER_BASE + i);
|
||||
u64_t cccr = ia32_rdmsr (IA32_CCCR_BASE + i);
|
||||
u64_t pmc = x86_rdmsr (IA32_COUNTER_BASE + i);
|
||||
u64_t cccr = x86_rdmsr (IA32_CCCR_BASE + i);
|
||||
printf("PMC/CCCR %02u: 0x%08x%08x/0x%08x%08x\n",
|
||||
i,
|
||||
(u32_t)(pmc >> 32), (u32_t)pmc,
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/*********************************************************************
|
||||
*
|
||||
* Copyright (C) 2002-2006, Karlsruhe University
|
||||
* Copyright (C) 2002-2007, Karlsruhe University
|
||||
*
|
||||
* File path: kdb/glue/v4-ia32/prepost.cc
|
||||
* Description: IA-32 specific handlers for KDB entry and exit
|
||||
|
@ -81,15 +81,15 @@ bool kdb_t::pre()
|
|||
if (ia32_single_step_on_branches)
|
||||
{
|
||||
addr_t last_branch_ip;
|
||||
ia32_wrmsr (IA32_DEBUGCTL, 0);
|
||||
x86_wrmsr (IA32_DEBUGCTL, 0);
|
||||
ia32_single_step_on_branches = false;
|
||||
#if defined(CONFIG_CPU_IA32_I686)
|
||||
last_branch_ip = (addr_t) (word_t)
|
||||
ia32_rdmsr (IA32_LASTBRANCHFROMIP);
|
||||
x86_rdmsr (IA32_LASTBRANCHFROMIP);
|
||||
#else
|
||||
last_branch_ip = (addr_t) (word_t)
|
||||
(ia32_rdmsr (IA32_LASTBRANCH_0 +
|
||||
ia32_rdmsr (IA32_LASTBRANCH_TOS)) >> 32);
|
||||
(x86_rdmsr (IA32_LASTBRANCH_0 +
|
||||
x86_rdmsr (IA32_LASTBRANCH_TOS)) >> 32);
|
||||
#endif
|
||||
disas_addr (last_branch_ip, "branch to");
|
||||
ia32_last_eip = f->eip;
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
/*********************************************************************
|
||||
*
|
||||
* Copyright (C) 2003, 2006, Karlsruhe University
|
||||
*
|
||||
*
|
||||
* Copyright (C) 2003, 2006-2007, Karlsruhe University
|
||||
*
|
||||
* File path: arch/amd64/cpu.h
|
||||
* Description: X86-64 CPU Specific functions
|
||||
*
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
|
@ -13,7 +13,7 @@
|
|||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
|
@ -25,133 +25,14 @@
|
|||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
*
|
||||
* $Id: cpu.h,v 1.4 2006/09/25 13:30:29 stoess Exp $
|
||||
*
|
||||
*
|
||||
********************************************************************/
|
||||
#ifndef __ARCH__AMD64__CPU_H__
|
||||
#define __ARCH__AMD64__CPU_H__
|
||||
|
||||
#include INC_ARCH(amd64.h)
|
||||
|
||||
#ifndef ASSEMBLY
|
||||
INLINE u64_t amd64_rdpmc(const int ctrsel)
|
||||
{
|
||||
u32_t __eax, __edx;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
"rdpmc"
|
||||
: "=a"(__eax), "=d"(__edx)
|
||||
: "c"(ctrsel));
|
||||
|
||||
return ( (((u64_t) __edx) << 32) | ( (u64_t) __eax));
|
||||
}
|
||||
|
||||
INLINE u64_t amd64_rdtsc(void)
|
||||
{
|
||||
u32_t __eax, __edx;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
"rdtsc"
|
||||
: "=a"(__eax), "=d"(__edx));
|
||||
|
||||
return ( (((u64_t) __edx) << 32) | ( (u64_t) __eax));
|
||||
}
|
||||
|
||||
INLINE u64_t amd64_rdmsr(const u32_t reg)
|
||||
{
|
||||
u32_t __eax, __edx;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
"rdmsr"
|
||||
: "=a"(__eax), "=d"(__edx)
|
||||
: "c"(reg)
|
||||
);
|
||||
|
||||
return ( (((u64_t) __edx) << 32) | ( (u64_t) __eax));
|
||||
}
|
||||
|
||||
INLINE void amd64_wrmsr(const u32_t reg, const u64_t val)
|
||||
{
|
||||
__asm__ __volatile__ (
|
||||
"wrmsr"
|
||||
:
|
||||
: "a"( (u32_t) val), "d" ( (u32_t) (val >> 32)), "c" (reg));
|
||||
}
|
||||
|
||||
/*
|
||||
* The AMD manual tells us that setting the TSC isn't a good idea. We still do
|
||||
* it for SMP synchronization; offsetting in software would be the alternative.
|
||||
*/
|
||||
|
||||
INLINE void amd64_settsc(const u64_t val)
|
||||
{
|
||||
amd64_wrmsr(0x10, val);
|
||||
}
|
||||
|
||||
|
||||
INLINE void amd64_wbinvd()
|
||||
{
|
||||
__asm__ ("wbinvd\n" : : : "memory");
|
||||
}
|
||||
|
||||
INLINE int amd64_lsb (word_t w) __attribute__ ((const));
|
||||
INLINE int amd64_lsb (word_t w)
|
||||
{
|
||||
int bitnum;
|
||||
__asm__ ("bsf %1, %0" : "=r" (bitnum) : "rm" (w));
|
||||
return bitnum;
|
||||
}
|
||||
|
||||
INLINE int amd64_msb (word_t w) __attribute__ ((const));
|
||||
INLINE int amd64_msb (word_t w)
|
||||
{
|
||||
int bitnum;
|
||||
__asm__ ("bsr %1, %0" : "=r" (bitnum) : "rm" (w));
|
||||
return bitnum;
|
||||
}
|
||||
|
||||
INLINE void amd64_cr0_set(const word_t val)
|
||||
{
|
||||
word_t tmp;
|
||||
__asm__ __volatile__ ("mov %%cr0, %0 \n"
|
||||
"or %1, %0 \n"
|
||||
"mov %0, %%cr0 \n"
|
||||
: "=r"(tmp)
|
||||
: "ri"(val));
|
||||
}
|
||||
|
||||
INLINE void amd64_cr0_mask(const word_t val)
|
||||
{
|
||||
word_t tmp;
|
||||
__asm__ __volatile__ ("mov %%cr0, %0 \n"
|
||||
"and %1, %0 \n"
|
||||
"mov %0, %%cr0 \n"
|
||||
: "=r"(tmp)
|
||||
: "ri"(~val));
|
||||
}
|
||||
|
||||
INLINE void amd64_cr4_set(const word_t val)
|
||||
{
|
||||
word_t tmp;
|
||||
__asm__ __volatile__ ("mov %%cr4, %0 \n"
|
||||
"or %1, %0 \n"
|
||||
"mov %0, %%cr4 \n"
|
||||
: "=r"(tmp)
|
||||
: "ri"(val));
|
||||
}
|
||||
|
||||
INLINE void amd64_cr4_mask(const word_t val)
|
||||
{
|
||||
word_t tmp;
|
||||
__asm__ __volatile__ ("mov %%cr4, %0 \n"
|
||||
"and %1, %0 \n"
|
||||
"mov %0, %%cr4 \n"
|
||||
: "=r"(tmp)
|
||||
: "ri"(~val));
|
||||
}
|
||||
|
||||
#endif /* ASSEMBLY */
|
||||
|
||||
#include INC_ARCHX(x86,cpu.h)
|
||||
|
||||
#endif /* !__ARCH__AMD64__CPU_H__ */
|
||||
|
|
|
@ -38,14 +38,14 @@ class amd64_fpu_t
|
|||
{
|
||||
public:
|
||||
static void enable()
|
||||
{ amd64_cr0_mask(X86_CR0_TS); }
|
||||
{ x86_cr0_mask(X86_CR0_TS); }
|
||||
static void disable()
|
||||
{ amd64_cr0_set(X86_CR0_TS); }
|
||||
{ x86_cr0_set(X86_CR0_TS); }
|
||||
|
||||
static void enable_osfxsr()
|
||||
{ amd64_cr4_set(X86_CR4_OSFXSR); }
|
||||
{ x86_cr4_set(X86_CR4_OSFXSR); }
|
||||
static void disable_osfxsr()
|
||||
{ amd64_cr4_mask(X86_CR4_OSFXSR); }
|
||||
{ x86_cr4_mask(X86_CR4_OSFXSR); }
|
||||
|
||||
static void init()
|
||||
{ __asm__ __volatile__ ("finit\n"); }
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/*********************************************************************
|
||||
*
|
||||
* Copyright (C) 2004, Karlsruhe University
|
||||
* Copyright (C) 2004, 2007, Karlsruhe University
|
||||
*
|
||||
* File path: arch/amd64/hwcr.h
|
||||
* Description:
|
||||
|
@ -88,218 +88,218 @@ public:
|
|||
|
||||
static bool is_smm_locked()
|
||||
{
|
||||
return (amd64_rdmsr(AMD64_HWCR_MSR) & AMD64_HWCR_SMMLOCK);
|
||||
return (x86_rdmsr(AMD64_HWCR_MSR) & AMD64_HWCR_SMMLOCK);
|
||||
}
|
||||
static void enable_smmlock()
|
||||
{
|
||||
u64_t hwcr = amd64_rdmsr(AMD64_HWCR_MSR);
|
||||
amd64_wrmsr(AMD64_HWCR_MSR, hwcr | AMD64_HWCR_SMMLOCK);
|
||||
u64_t hwcr = x86_rdmsr(AMD64_HWCR_MSR);
|
||||
x86_wrmsr(AMD64_HWCR_MSR, hwcr | AMD64_HWCR_SMMLOCK);
|
||||
}
|
||||
static void disable_smmlock()
|
||||
{
|
||||
u64_t hwcr = amd64_rdmsr(AMD64_HWCR_MSR);
|
||||
amd64_wrmsr(AMD64_HWCR_MSR, hwcr & ~AMD64_HWCR_SMMLOCK);
|
||||
u64_t hwcr = x86_rdmsr(AMD64_HWCR_MSR);
|
||||
x86_wrmsr(AMD64_HWCR_MSR, hwcr & ~AMD64_HWCR_SMMLOCK);
|
||||
}
|
||||
|
||||
|
||||
static bool is_slowfence_enabled()
|
||||
{
|
||||
return (amd64_rdmsr(AMD64_HWCR_MSR) & AMD64_HWCR_SLOWFENCE);
|
||||
return (x86_rdmsr(AMD64_HWCR_MSR) & AMD64_HWCR_SLOWFENCE);
|
||||
}
|
||||
static void enable_slowfence()
|
||||
{
|
||||
u64_t hwcr = amd64_rdmsr(AMD64_HWCR_MSR);
|
||||
amd64_wrmsr(AMD64_HWCR_MSR, hwcr | AMD64_HWCR_SLOWFENCE);
|
||||
u64_t hwcr = x86_rdmsr(AMD64_HWCR_MSR);
|
||||
x86_wrmsr(AMD64_HWCR_MSR, hwcr | AMD64_HWCR_SLOWFENCE);
|
||||
}
|
||||
|
||||
static void disable_slowfence()
|
||||
{
|
||||
u64_t hwcr = amd64_rdmsr(AMD64_HWCR_MSR);
|
||||
amd64_wrmsr(AMD64_HWCR_MSR, hwcr & ~AMD64_HWCR_SMMLOCK);
|
||||
u64_t hwcr = x86_rdmsr(AMD64_HWCR_MSR);
|
||||
x86_wrmsr(AMD64_HWCR_MSR, hwcr & ~AMD64_HWCR_SMMLOCK);
|
||||
}
|
||||
|
||||
|
||||
static bool is_ptemem_cached()
|
||||
{
|
||||
return !(amd64_rdmsr(AMD64_HWCR_MSR) & AMD64_HWCR_TLBCACHEDIS);
|
||||
return !(x86_rdmsr(AMD64_HWCR_MSR) & AMD64_HWCR_TLBCACHEDIS);
|
||||
}
|
||||
static void enable_ptemem_cached()
|
||||
{
|
||||
u64_t hwcr = amd64_rdmsr(AMD64_HWCR_MSR);
|
||||
amd64_wrmsr(AMD64_HWCR_MSR, hwcr & ~AMD64_HWCR_TLBCACHEDIS);
|
||||
u64_t hwcr = x86_rdmsr(AMD64_HWCR_MSR);
|
||||
x86_wrmsr(AMD64_HWCR_MSR, hwcr & ~AMD64_HWCR_TLBCACHEDIS);
|
||||
}
|
||||
static void disable_ptemem_cached()
|
||||
{
|
||||
u64_t hwcr = amd64_rdmsr(AMD64_HWCR_MSR);
|
||||
amd64_wrmsr(AMD64_HWCR_MSR, hwcr | AMD64_HWCR_TLBCACHEDIS);
|
||||
u64_t hwcr = x86_rdmsr(AMD64_HWCR_MSR);
|
||||
x86_wrmsr(AMD64_HWCR_MSR, hwcr | AMD64_HWCR_TLBCACHEDIS);
|
||||
}
|
||||
|
||||
|
||||
static bool is_invd_wbinvd() {
|
||||
return (amd64_rdmsr(AMD64_HWCR_MSR) & AMD64_HWCR_INVD_WBINVD);
|
||||
return (x86_rdmsr(AMD64_HWCR_MSR) & AMD64_HWCR_INVD_WBINVD);
|
||||
}
|
||||
static void enable_invd_wbinvd()
|
||||
{
|
||||
u64_t hwcr = amd64_rdmsr(AMD64_HWCR_MSR);
|
||||
amd64_wrmsr(AMD64_HWCR_MSR, hwcr | AMD64_HWCR_INVD_WBINVD);
|
||||
u64_t hwcr = x86_rdmsr(AMD64_HWCR_MSR);
|
||||
x86_wrmsr(AMD64_HWCR_MSR, hwcr | AMD64_HWCR_INVD_WBINVD);
|
||||
}
|
||||
static void disable_invd_wbinvd()
|
||||
{
|
||||
u64_t hwcr = amd64_rdmsr(AMD64_HWCR_MSR);
|
||||
amd64_wrmsr(AMD64_HWCR_MSR, hwcr & ~AMD64_HWCR_INVD_WBINVD);
|
||||
u64_t hwcr = x86_rdmsr(AMD64_HWCR_MSR);
|
||||
x86_wrmsr(AMD64_HWCR_MSR, hwcr & ~AMD64_HWCR_INVD_WBINVD);
|
||||
}
|
||||
|
||||
|
||||
static bool is_flushfilter_enabled()
|
||||
{
|
||||
return !(amd64_rdmsr(AMD64_HWCR_MSR) & AMD64_HWCR_FFDIS);
|
||||
return !(x86_rdmsr(AMD64_HWCR_MSR) & AMD64_HWCR_FFDIS);
|
||||
}
|
||||
|
||||
static void enable_flushfilter()
|
||||
{
|
||||
u64_t hwcr = amd64_rdmsr(AMD64_HWCR_MSR);
|
||||
amd64_wrmsr(AMD64_HWCR_MSR, hwcr & ~AMD64_HWCR_FFDIS);
|
||||
u64_t hwcr = x86_rdmsr(AMD64_HWCR_MSR);
|
||||
x86_wrmsr(AMD64_HWCR_MSR, hwcr & ~AMD64_HWCR_FFDIS);
|
||||
}
|
||||
static void disable_flushfilter()
|
||||
{
|
||||
u64_t hwcr = amd64_rdmsr(AMD64_HWCR_MSR);
|
||||
amd64_wrmsr(AMD64_HWCR_MSR, hwcr | AMD64_HWCR_FFDIS);
|
||||
u64_t hwcr = x86_rdmsr(AMD64_HWCR_MSR);
|
||||
x86_wrmsr(AMD64_HWCR_MSR, hwcr | AMD64_HWCR_FFDIS);
|
||||
}
|
||||
|
||||
|
||||
static bool is_lockprefix_enabled()
|
||||
{
|
||||
return !(amd64_rdmsr(AMD64_HWCR_MSR) & AMD64_HWCR_DISLOCK);
|
||||
return !(x86_rdmsr(AMD64_HWCR_MSR) & AMD64_HWCR_DISLOCK);
|
||||
}
|
||||
static void enable_lockprefix()
|
||||
{
|
||||
u64_t hwcr = amd64_rdmsr(AMD64_HWCR_MSR);
|
||||
amd64_wrmsr(AMD64_HWCR_MSR, hwcr & ~AMD64_HWCR_DISLOCK);
|
||||
u64_t hwcr = x86_rdmsr(AMD64_HWCR_MSR);
|
||||
x86_wrmsr(AMD64_HWCR_MSR, hwcr & ~AMD64_HWCR_DISLOCK);
|
||||
}
|
||||
static void disable_lockprefix()
|
||||
{
|
||||
u64_t hwcr = amd64_rdmsr(AMD64_HWCR_MSR);
|
||||
amd64_wrmsr(AMD64_HWCR_MSR, hwcr | AMD64_HWCR_DISLOCK);
|
||||
u64_t hwcr = x86_rdmsr(AMD64_HWCR_MSR);
|
||||
x86_wrmsr(AMD64_HWCR_MSR, hwcr | AMD64_HWCR_DISLOCK);
|
||||
}
|
||||
|
||||
|
||||
static bool is_ignne_emulation_enabled()
|
||||
{
|
||||
return (amd64_rdmsr(AMD64_HWCR_MSR) & AMD64_HWCR_IGNNE_EM);
|
||||
return (x86_rdmsr(AMD64_HWCR_MSR) & AMD64_HWCR_IGNNE_EM);
|
||||
}
|
||||
static void enable_ignne_emulation()
|
||||
{
|
||||
u64_t hwcr = amd64_rdmsr(AMD64_HWCR_MSR);
|
||||
amd64_wrmsr(AMD64_HWCR_MSR, hwcr | AMD64_HWCR_IGNNE_EM);
|
||||
u64_t hwcr = x86_rdmsr(AMD64_HWCR_MSR);
|
||||
x86_wrmsr(AMD64_HWCR_MSR, hwcr | AMD64_HWCR_IGNNE_EM);
|
||||
}
|
||||
static void disable_ignne_emulation()
|
||||
{
|
||||
u64_t hwcr = amd64_rdmsr(AMD64_HWCR_MSR);
|
||||
amd64_wrmsr(AMD64_HWCR_MSR, hwcr & ~AMD64_HWCR_IGNNE_EM);
|
||||
u64_t hwcr = x86_rdmsr(AMD64_HWCR_MSR);
|
||||
x86_wrmsr(AMD64_HWCR_MSR, hwcr & ~AMD64_HWCR_IGNNE_EM);
|
||||
}
|
||||
|
||||
|
||||
static bool is_hltx_spc_enabled()
|
||||
{
|
||||
return (amd64_rdmsr(AMD64_HWCR_MSR) & AMD64_HWCR_HLTXSPCYCEN);
|
||||
return (x86_rdmsr(AMD64_HWCR_MSR) & AMD64_HWCR_HLTXSPCYCEN);
|
||||
}
|
||||
static void enable_hltx_spc()
|
||||
{
|
||||
u64_t hwcr = amd64_rdmsr(AMD64_HWCR_MSR);
|
||||
amd64_wrmsr(AMD64_HWCR_MSR, hwcr | AMD64_HWCR_HLTXSPCYCEN);
|
||||
u64_t hwcr = x86_rdmsr(AMD64_HWCR_MSR);
|
||||
x86_wrmsr(AMD64_HWCR_MSR, hwcr | AMD64_HWCR_HLTXSPCYCEN);
|
||||
}
|
||||
static void disable_hltx_spc()
|
||||
{
|
||||
u64_t hwcr = amd64_rdmsr(AMD64_HWCR_MSR);
|
||||
amd64_wrmsr(AMD64_HWCR_MSR, hwcr & ~AMD64_HWCR_HLTXSPCYCEN);
|
||||
u64_t hwcr = x86_rdmsr(AMD64_HWCR_MSR);
|
||||
x86_wrmsr(AMD64_HWCR_MSR, hwcr & ~AMD64_HWCR_HLTXSPCYCEN);
|
||||
}
|
||||
|
||||
|
||||
static bool is_smi_spc_enabled()
|
||||
{
|
||||
return !(amd64_rdmsr(AMD64_HWCR_MSR) & AMD64_HWCR_SMISPCYCDIS);
|
||||
return !(x86_rdmsr(AMD64_HWCR_MSR) & AMD64_HWCR_SMISPCYCDIS);
|
||||
}
|
||||
static void enable_smi_spc()
|
||||
{
|
||||
u64_t hwcr = amd64_rdmsr(AMD64_HWCR_MSR);
|
||||
amd64_wrmsr(AMD64_HWCR_MSR, hwcr & ~AMD64_HWCR_SMISPCYCDIS);
|
||||
u64_t hwcr = x86_rdmsr(AMD64_HWCR_MSR);
|
||||
x86_wrmsr(AMD64_HWCR_MSR, hwcr & ~AMD64_HWCR_SMISPCYCDIS);
|
||||
}
|
||||
static void disable_smi_spc()
|
||||
{
|
||||
u64_t hwcr = amd64_rdmsr(AMD64_HWCR_MSR);
|
||||
amd64_wrmsr(AMD64_HWCR_MSR, hwcr | AMD64_HWCR_SMISPCYCDIS);
|
||||
u64_t hwcr = x86_rdmsr(AMD64_HWCR_MSR);
|
||||
x86_wrmsr(AMD64_HWCR_MSR, hwcr | AMD64_HWCR_SMISPCYCDIS);
|
||||
}
|
||||
|
||||
|
||||
static bool is_rsm_spc_enabled()
|
||||
{
|
||||
return !(amd64_rdmsr(AMD64_HWCR_MSR) & AMD64_HWCR_RSMSPCYCDIS);
|
||||
return !(x86_rdmsr(AMD64_HWCR_MSR) & AMD64_HWCR_RSMSPCYCDIS);
|
||||
}
|
||||
static void enable_rsm_spc()
|
||||
{
|
||||
u64_t hwcr = amd64_rdmsr(AMD64_HWCR_MSR);
|
||||
amd64_wrmsr(AMD64_HWCR_MSR, hwcr & ~AMD64_HWCR_RSMSPCYCDIS);
|
||||
u64_t hwcr = x86_rdmsr(AMD64_HWCR_MSR);
|
||||
x86_wrmsr(AMD64_HWCR_MSR, hwcr & ~AMD64_HWCR_RSMSPCYCDIS);
|
||||
}
|
||||
static void disable_rsm_spc()
|
||||
{
|
||||
u64_t hwcr = amd64_rdmsr(AMD64_HWCR_MSR);
|
||||
amd64_wrmsr(AMD64_HWCR_MSR, hwcr | AMD64_HWCR_RSMSPCYCDIS);
|
||||
u64_t hwcr = x86_rdmsr(AMD64_HWCR_MSR);
|
||||
x86_wrmsr(AMD64_HWCR_MSR, hwcr | AMD64_HWCR_RSMSPCYCDIS);
|
||||
}
|
||||
|
||||
|
||||
static bool is_sse_enabled()
|
||||
{
|
||||
return !(amd64_rdmsr(AMD64_HWCR_MSR) & AMD64_HWCR_SSEDIS);
|
||||
return !(x86_rdmsr(AMD64_HWCR_MSR) & AMD64_HWCR_SSEDIS);
|
||||
}
|
||||
static void enable_sse()
|
||||
{
|
||||
u64_t hwcr = amd64_rdmsr(AMD64_HWCR_MSR);
|
||||
amd64_wrmsr(AMD64_HWCR_MSR, hwcr & ~AMD64_HWCR_SSEDIS);
|
||||
u64_t hwcr = x86_rdmsr(AMD64_HWCR_MSR);
|
||||
x86_wrmsr(AMD64_HWCR_MSR, hwcr & ~AMD64_HWCR_SSEDIS);
|
||||
}
|
||||
static void disable_sse()
|
||||
{
|
||||
u64_t hwcr = amd64_rdmsr(AMD64_HWCR_MSR);
|
||||
amd64_wrmsr(AMD64_HWCR_MSR, hwcr | AMD64_HWCR_SSEDIS);
|
||||
u64_t hwcr = x86_rdmsr(AMD64_HWCR_MSR);
|
||||
x86_wrmsr(AMD64_HWCR_MSR, hwcr | AMD64_HWCR_SSEDIS);
|
||||
}
|
||||
|
||||
|
||||
|
||||
static bool is_wrap32_enabled()
|
||||
{
|
||||
return !(amd64_rdmsr(AMD64_HWCR_MSR) & AMD64_HWCR_WRAP32DIS);
|
||||
return !(x86_rdmsr(AMD64_HWCR_MSR) & AMD64_HWCR_WRAP32DIS);
|
||||
}
|
||||
static void enable_wrap32()
|
||||
{
|
||||
u64_t hwcr = amd64_rdmsr(AMD64_HWCR_MSR);
|
||||
amd64_wrmsr(AMD64_HWCR_MSR, hwcr & ~AMD64_HWCR_WRAP32DIS);
|
||||
u64_t hwcr = x86_rdmsr(AMD64_HWCR_MSR);
|
||||
x86_wrmsr(AMD64_HWCR_MSR, hwcr & ~AMD64_HWCR_WRAP32DIS);
|
||||
}
|
||||
|
||||
static void disable_wrap32()
|
||||
{
|
||||
u64_t hwcr = amd64_rdmsr(AMD64_HWCR_MSR);
|
||||
amd64_wrmsr(AMD64_HWCR_MSR, hwcr | AMD64_HWCR_WRAP32DIS);
|
||||
u64_t hwcr = x86_rdmsr(AMD64_HWCR_MSR);
|
||||
x86_wrmsr(AMD64_HWCR_MSR, hwcr | AMD64_HWCR_WRAP32DIS);
|
||||
}
|
||||
|
||||
|
||||
|
||||
static bool is_mci_status_write_enabled()
|
||||
{
|
||||
return (amd64_rdmsr(AMD64_HWCR_MSR) & AMD64_HWCR_MCIS_WREN);
|
||||
return (x86_rdmsr(AMD64_HWCR_MSR) & AMD64_HWCR_MCIS_WREN);
|
||||
}
|
||||
static void enable_mci_status_write()
|
||||
{
|
||||
u64_t hwcr = amd64_rdmsr(AMD64_HWCR_MSR);
|
||||
amd64_wrmsr(AMD64_HWCR_MSR, hwcr | AMD64_HWCR_MCIS_WREN);
|
||||
u64_t hwcr = x86_rdmsr(AMD64_HWCR_MSR);
|
||||
x86_wrmsr(AMD64_HWCR_MSR, hwcr | AMD64_HWCR_MCIS_WREN);
|
||||
}
|
||||
|
||||
static void disable_mci_status_write()
|
||||
{
|
||||
u64_t hwcr = amd64_rdmsr(AMD64_HWCR_MSR);
|
||||
amd64_wrmsr(AMD64_HWCR_MSR, hwcr & ~AMD64_HWCR_MCIS_WREN);
|
||||
u64_t hwcr = x86_rdmsr(AMD64_HWCR_MSR);
|
||||
x86_wrmsr(AMD64_HWCR_MSR, hwcr & ~AMD64_HWCR_MCIS_WREN);
|
||||
}
|
||||
|
||||
static u8_t get_startup_fid_status() {
|
||||
return (u8_t) ((amd64_rdmsr(AMD64_HWCR_MSR) & AMD64_HWCR_START_FID) >> 19);
|
||||
return (u8_t) ((x86_rdmsr(AMD64_HWCR_MSR) & AMD64_HWCR_START_FID) >> 19);
|
||||
}
|
||||
|
||||
};
|
||||
|
|
|
@ -90,7 +90,7 @@ INLINE void amd64_mmu_t::flush_tlbent(word_t addr){
|
|||
* Enables paged mode for X86_64
|
||||
*/
|
||||
INLINE void amd64_mmu_t::enable_paging(){
|
||||
amd64_cr0_set(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE);
|
||||
x86_cr0_set(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE);
|
||||
asm("jmp penabled; penabled:");
|
||||
}
|
||||
|
||||
|
@ -98,7 +98,7 @@ INLINE void amd64_mmu_t::enable_paging(){
|
|||
* Disable paged mode for X86_64
|
||||
*/
|
||||
INLINE void amd64_mmu_t::disable_paging(){
|
||||
amd64_cr0_mask(X86_CR0_PG);
|
||||
x86_cr0_mask(X86_CR0_PG);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -106,7 +106,7 @@ INLINE void amd64_mmu_t::disable_paging(){
|
|||
* Needed for long and compatibility mode
|
||||
*/
|
||||
INLINE void amd64_mmu_t::enable_pae_mode(){
|
||||
amd64_cr4_set(X86_CR4_PAE);
|
||||
x86_cr4_set(X86_CR4_PAE);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -142,9 +142,9 @@ INLINE bool amd64_mmu_t::has_long_mode(){
|
|||
*/
|
||||
INLINE void amd64_mmu_t::enable_long_mode(){
|
||||
|
||||
word_t efer = amd64_rdmsr(AMD64_EFER_MSR);
|
||||
word_t efer = x86_rdmsr(AMD64_EFER_MSR);
|
||||
efer |= AMD64_EFER_LME;
|
||||
amd64_wrmsr(AMD64_EFER_MSR, efer);
|
||||
x86_wrmsr(AMD64_EFER_MSR, efer);
|
||||
|
||||
}
|
||||
|
||||
|
@ -154,7 +154,7 @@ INLINE void amd64_mmu_t::enable_long_mode(){
|
|||
*/
|
||||
INLINE bool amd64_mmu_t::long_mode_active(){
|
||||
|
||||
word_t efer = amd64_rdmsr(AMD64_EFER_MSR);
|
||||
word_t efer = x86_rdmsr(AMD64_EFER_MSR);
|
||||
return (efer & AMD64_EFER_LMA);
|
||||
|
||||
}
|
||||
|
@ -163,7 +163,7 @@ INLINE bool amd64_mmu_t::long_mode_active(){
|
|||
* Enables global pages
|
||||
*/
|
||||
INLINE void amd64_mmu_t::enable_global_pages(){
|
||||
amd64_cr4_set(X86_CR4_PGE);
|
||||
x86_cr4_set(X86_CR4_PGE);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/*********************************************************************
|
||||
*
|
||||
* Copyright (C) 2003, 2006, Karlsruhe University
|
||||
* Copyright (C) 2003, 2006-2007, Karlsruhe University
|
||||
*
|
||||
* File path: arch/amd64/segdesc.h
|
||||
* Description: paste ia32/segdesc.h, s/ia32/amd64
|
||||
|
@ -95,7 +95,7 @@ INLINE void amd64_segdesc_t::set_seg(u64_t base, segtype_e type, int dpl, mode_e
|
|||
if (msr != msr_none && (base >> 32))
|
||||
{
|
||||
u32_t reg = (msr == msr_fs) ? AMD64_FS_MSR : AMD64_GS_MSR;
|
||||
amd64_wrmsr(reg, base);
|
||||
x86_wrmsr(reg, base);
|
||||
}
|
||||
|
||||
x.d.base_low = base & 0xFFFFFF;
|
||||
|
|
|
@ -33,6 +33,7 @@
|
|||
#define __ARCH__IA32__CPU_H__
|
||||
|
||||
#include INC_ARCH(ia32.h)
|
||||
#include INC_ARCHX(x86,cpu.h)
|
||||
|
||||
INLINE bool ia32_has_cpuid()
|
||||
{
|
||||
|
@ -83,106 +84,4 @@ INLINE u32_t ia32_get_cpu_features()
|
|||
}
|
||||
}
|
||||
|
||||
INLINE u64_t ia32_rdpmc(const int ctrsel)
|
||||
{
|
||||
u64_t __return;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
"rdpmc"
|
||||
: "=A"(__return)
|
||||
: "c"(ctrsel));
|
||||
|
||||
return __return;
|
||||
}
|
||||
|
||||
INLINE u64_t ia32_rdtsc(void)
|
||||
{
|
||||
u64_t __return;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
"rdtsc"
|
||||
: "=A"(__return));
|
||||
|
||||
return __return;
|
||||
}
|
||||
|
||||
INLINE u64_t ia32_rdmsr(const u32_t reg)
|
||||
{
|
||||
u64_t __return;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
"rdmsr"
|
||||
: "=A"(__return)
|
||||
: "c"(reg)
|
||||
);
|
||||
|
||||
return __return;
|
||||
}
|
||||
|
||||
INLINE void ia32_wrmsr(const u32_t reg, const u64_t val)
|
||||
{
|
||||
__asm__ __volatile__ (
|
||||
"wrmsr"
|
||||
:
|
||||
: "A"(val), "c"(reg));
|
||||
}
|
||||
|
||||
INLINE void ia32_settsc(const u64_t val)
|
||||
{
|
||||
ia32_wrmsr(0x10, val);
|
||||
}
|
||||
|
||||
INLINE void ia32_wbinvd()
|
||||
{
|
||||
__asm__ ("wbinvd\n" : : : "memory");
|
||||
}
|
||||
|
||||
INLINE int ia32_lsb (u32_t w) __attribute__ ((const));
|
||||
INLINE int ia32_lsb (u32_t w)
|
||||
{
|
||||
int bitnum;
|
||||
__asm__ ("bsf %1, %0" : "=r" (bitnum) : "rm" (w));
|
||||
return bitnum;
|
||||
}
|
||||
|
||||
INLINE void ia32_cr0_set(const u32_t val)
|
||||
{
|
||||
u32_t tmp;
|
||||
__asm__ __volatile__ ("mov %%cr0, %0 \n"
|
||||
"orl %1, %0 \n"
|
||||
"mov %0, %%cr0 \n"
|
||||
: "=r"(tmp)
|
||||
: "ri"(val));
|
||||
}
|
||||
|
||||
INLINE void ia32_cr0_mask(const u32_t val)
|
||||
{
|
||||
u32_t tmp;
|
||||
__asm__ __volatile__ ("movl %%cr0, %0 \n"
|
||||
"andl %1, %0 \n"
|
||||
"movl %0, %%cr0 \n"
|
||||
: "=r"(tmp)
|
||||
: "ri"(~val));
|
||||
}
|
||||
|
||||
INLINE void ia32_cr4_set(const u32_t val)
|
||||
{
|
||||
u32_t tmp;
|
||||
__asm__ __volatile__ ("movl %%cr4, %0 \n"
|
||||
"orl %1, %0 \n"
|
||||
"movl %0, %%cr4 \n"
|
||||
: "=r"(tmp)
|
||||
: "ri"(val));
|
||||
}
|
||||
|
||||
INLINE void ia32_cr4_mask(const u32_t val)
|
||||
{
|
||||
u32_t tmp;
|
||||
__asm__ __volatile__ ("movl %%cr4, %0 \n"
|
||||
"andl %1, %0 \n"
|
||||
"movl %0, %%cr4 \n"
|
||||
: "=r"(tmp)
|
||||
: "ri"(~val));
|
||||
}
|
||||
|
||||
#endif /* !__ARCH__IA32__CPU_H__ */
|
||||
|
|
|
@ -38,14 +38,14 @@ class ia32_fpu_t
|
|||
{
|
||||
public:
|
||||
static void enable()
|
||||
{ ia32_cr0_mask(X86_CR0_TS); }
|
||||
{ x86_cr0_mask(X86_CR0_TS); }
|
||||
static void disable()
|
||||
{ ia32_cr0_set(X86_CR0_TS); }
|
||||
{ x86_cr0_set(X86_CR0_TS); }
|
||||
|
||||
static void enable_osfxsr()
|
||||
{ ia32_cr4_set(X86_CR4_OSFXSR); }
|
||||
{ x86_cr4_set(X86_CR4_OSFXSR); }
|
||||
static void disable_osfxsr()
|
||||
{ ia32_cr4_mask(X86_CR4_OSFXSR); }
|
||||
{ x86_cr4_mask(X86_CR4_OSFXSR); }
|
||||
|
||||
static void init()
|
||||
{ __asm__ __volatile__ ("finit\n"); }
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/*********************************************************************
|
||||
*
|
||||
* Copyright (C) 2004-2005, Karlsruhe University
|
||||
* Copyright (C) 2004-2005, 2007, Karlsruhe University
|
||||
*
|
||||
* File path: arch/ia32/hwcr_k8.h
|
||||
* Description:
|
||||
|
@ -88,218 +88,218 @@ public:
|
|||
|
||||
static bool is_smm_locked()
|
||||
{
|
||||
return (ia32_rdmsr(IA32_HWCR_MSR) & IA32_HWCR_SMMLOCK);
|
||||
return (x86_rdmsr(IA32_HWCR_MSR) & IA32_HWCR_SMMLOCK);
|
||||
}
|
||||
static void enable_smmlock()
|
||||
{
|
||||
u64_t hwcr = ia32_rdmsr(IA32_HWCR_MSR);
|
||||
ia32_wrmsr(IA32_HWCR_MSR, hwcr | IA32_HWCR_SMMLOCK);
|
||||
u64_t hwcr = x86_rdmsr(IA32_HWCR_MSR);
|
||||
x86_wrmsr(IA32_HWCR_MSR, hwcr | IA32_HWCR_SMMLOCK);
|
||||
}
|
||||
static void disable_smmlock()
|
||||
{
|
||||
u64_t hwcr = ia32_rdmsr(IA32_HWCR_MSR);
|
||||
ia32_wrmsr(IA32_HWCR_MSR, hwcr & ~IA32_HWCR_SMMLOCK);
|
||||
u64_t hwcr = x86_rdmsr(IA32_HWCR_MSR);
|
||||
x86_wrmsr(IA32_HWCR_MSR, hwcr & ~IA32_HWCR_SMMLOCK);
|
||||
}
|
||||
|
||||
|
||||
static bool is_slowfence_enabled()
|
||||
{
|
||||
return (ia32_rdmsr(IA32_HWCR_MSR) & IA32_HWCR_SLOWFENCE);
|
||||
return (x86_rdmsr(IA32_HWCR_MSR) & IA32_HWCR_SLOWFENCE);
|
||||
}
|
||||
static void enable_slowfence()
|
||||
{
|
||||
u64_t hwcr = ia32_rdmsr(IA32_HWCR_MSR);
|
||||
ia32_wrmsr(IA32_HWCR_MSR, hwcr | IA32_HWCR_SLOWFENCE);
|
||||
u64_t hwcr = x86_rdmsr(IA32_HWCR_MSR);
|
||||
x86_wrmsr(IA32_HWCR_MSR, hwcr | IA32_HWCR_SLOWFENCE);
|
||||
}
|
||||
|
||||
static void disable_slowfence()
|
||||
{
|
||||
u64_t hwcr = ia32_rdmsr(IA32_HWCR_MSR);
|
||||
ia32_wrmsr(IA32_HWCR_MSR, hwcr & ~IA32_HWCR_SMMLOCK);
|
||||
u64_t hwcr = x86_rdmsr(IA32_HWCR_MSR);
|
||||
x86_wrmsr(IA32_HWCR_MSR, hwcr & ~IA32_HWCR_SMMLOCK);
|
||||
}
|
||||
|
||||
|
||||
static bool is_ptemem_cached()
|
||||
{
|
||||
return !(ia32_rdmsr(IA32_HWCR_MSR) & IA32_HWCR_TLBCACHEDIS);
|
||||
return !(x86_rdmsr(IA32_HWCR_MSR) & IA32_HWCR_TLBCACHEDIS);
|
||||
}
|
||||
static void enable_ptemem_cached()
|
||||
{
|
||||
u64_t hwcr = ia32_rdmsr(IA32_HWCR_MSR);
|
||||
ia32_wrmsr(IA32_HWCR_MSR, hwcr & ~IA32_HWCR_TLBCACHEDIS);
|
||||
u64_t hwcr = x86_rdmsr(IA32_HWCR_MSR);
|
||||
x86_wrmsr(IA32_HWCR_MSR, hwcr & ~IA32_HWCR_TLBCACHEDIS);
|
||||
}
|
||||
static void disable_ptemem_cached()
|
||||
{
|
||||
u64_t hwcr = ia32_rdmsr(IA32_HWCR_MSR);
|
||||
ia32_wrmsr(IA32_HWCR_MSR, hwcr | IA32_HWCR_TLBCACHEDIS);
|
||||
u64_t hwcr = x86_rdmsr(IA32_HWCR_MSR);
|
||||
x86_wrmsr(IA32_HWCR_MSR, hwcr | IA32_HWCR_TLBCACHEDIS);
|
||||
}
|
||||
|
||||
|
||||
static bool is_invd_wbinvd() {
|
||||
return (ia32_rdmsr(IA32_HWCR_MSR) & IA32_HWCR_INVD_WBINVD);
|
||||
return (x86_rdmsr(IA32_HWCR_MSR) & IA32_HWCR_INVD_WBINVD);
|
||||
}
|
||||
static void enable_invd_wbinvd()
|
||||
{
|
||||
u64_t hwcr = ia32_rdmsr(IA32_HWCR_MSR);
|
||||
ia32_wrmsr(IA32_HWCR_MSR, hwcr | IA32_HWCR_INVD_WBINVD);
|
||||
u64_t hwcr = x86_rdmsr(IA32_HWCR_MSR);
|
||||
x86_wrmsr(IA32_HWCR_MSR, hwcr | IA32_HWCR_INVD_WBINVD);
|
||||
}
|
||||
static void disable_invd_wbinvd()
|
||||
{
|
||||
u64_t hwcr = ia32_rdmsr(IA32_HWCR_MSR);
|
||||
ia32_wrmsr(IA32_HWCR_MSR, hwcr & ~IA32_HWCR_INVD_WBINVD);
|
||||
u64_t hwcr = x86_rdmsr(IA32_HWCR_MSR);
|
||||
x86_wrmsr(IA32_HWCR_MSR, hwcr & ~IA32_HWCR_INVD_WBINVD);
|
||||
}
|
||||
|
||||
|
||||
static bool is_flushfilter_enabled()
|
||||
{
|
||||
return !(ia32_rdmsr(IA32_HWCR_MSR) & IA32_HWCR_FFDIS);
|
||||
return !(x86_rdmsr(IA32_HWCR_MSR) & IA32_HWCR_FFDIS);
|
||||
}
|
||||
|
||||
static void enable_flushfilter()
|
||||
{
|
||||
u64_t hwcr = ia32_rdmsr(IA32_HWCR_MSR);
|
||||
ia32_wrmsr(IA32_HWCR_MSR, hwcr & ~IA32_HWCR_FFDIS);
|
||||
u64_t hwcr = x86_rdmsr(IA32_HWCR_MSR);
|
||||
x86_wrmsr(IA32_HWCR_MSR, hwcr & ~IA32_HWCR_FFDIS);
|
||||
}
|
||||
static void disable_flushfilter()
|
||||
{
|
||||
u64_t hwcr = ia32_rdmsr(IA32_HWCR_MSR);
|
||||
ia32_wrmsr(IA32_HWCR_MSR, hwcr | IA32_HWCR_FFDIS);
|
||||
u64_t hwcr = x86_rdmsr(IA32_HWCR_MSR);
|
||||
x86_wrmsr(IA32_HWCR_MSR, hwcr | IA32_HWCR_FFDIS);
|
||||
}
|
||||
|
||||
|
||||
static bool is_lockprefix_enabled()
|
||||
{
|
||||
return !(ia32_rdmsr(IA32_HWCR_MSR) & IA32_HWCR_DISLOCK);
|
||||
return !(x86_rdmsr(IA32_HWCR_MSR) & IA32_HWCR_DISLOCK);
|
||||
}
|
||||
static void enable_lockprefix()
|
||||
{
|
||||
u64_t hwcr = ia32_rdmsr(IA32_HWCR_MSR);
|
||||
ia32_wrmsr(IA32_HWCR_MSR, hwcr & ~IA32_HWCR_DISLOCK);
|
||||
u64_t hwcr = x86_rdmsr(IA32_HWCR_MSR);
|
||||
x86_wrmsr(IA32_HWCR_MSR, hwcr & ~IA32_HWCR_DISLOCK);
|
||||
}
|
||||
static void disable_lockprefix()
|
||||
{
|
||||
u64_t hwcr = ia32_rdmsr(IA32_HWCR_MSR);
|
||||
ia32_wrmsr(IA32_HWCR_MSR, hwcr | IA32_HWCR_DISLOCK);
|
||||
u64_t hwcr = x86_rdmsr(IA32_HWCR_MSR);
|
||||
x86_wrmsr(IA32_HWCR_MSR, hwcr | IA32_HWCR_DISLOCK);
|
||||
}
|
||||
|
||||
|
||||
static bool is_ignne_emulation_enabled()
|
||||
{
|
||||
return (ia32_rdmsr(IA32_HWCR_MSR) & IA32_HWCR_IGNNE_EM);
|
||||
return (x86_rdmsr(IA32_HWCR_MSR) & IA32_HWCR_IGNNE_EM);
|
||||
}
|
||||
static void enable_ignne_emulation()
|
||||
{
|
||||
u64_t hwcr = ia32_rdmsr(IA32_HWCR_MSR);
|
||||
ia32_wrmsr(IA32_HWCR_MSR, hwcr | IA32_HWCR_IGNNE_EM);
|
||||
u64_t hwcr = x86_rdmsr(IA32_HWCR_MSR);
|
||||
x86_wrmsr(IA32_HWCR_MSR, hwcr | IA32_HWCR_IGNNE_EM);
|
||||
}
|
||||
static void disable_ignne_emulation()
|
||||
{
|
||||
u64_t hwcr = ia32_rdmsr(IA32_HWCR_MSR);
|
||||
ia32_wrmsr(IA32_HWCR_MSR, hwcr & ~IA32_HWCR_IGNNE_EM);
|
||||
u64_t hwcr = x86_rdmsr(IA32_HWCR_MSR);
|
||||
x86_wrmsr(IA32_HWCR_MSR, hwcr & ~IA32_HWCR_IGNNE_EM);
|
||||
}
|
||||
|
||||
|
||||
static bool is_hltx_spc_enabled()
|
||||
{
|
||||
return (ia32_rdmsr(IA32_HWCR_MSR) & IA32_HWCR_HLTXSPCYCEN);
|
||||
return (x86_rdmsr(IA32_HWCR_MSR) & IA32_HWCR_HLTXSPCYCEN);
|
||||
}
|
||||
static void enable_hltx_spc()
|
||||
{
|
||||
u64_t hwcr = ia32_rdmsr(IA32_HWCR_MSR);
|
||||
ia32_wrmsr(IA32_HWCR_MSR, hwcr | IA32_HWCR_HLTXSPCYCEN);
|
||||
u64_t hwcr = x86_rdmsr(IA32_HWCR_MSR);
|
||||
x86_wrmsr(IA32_HWCR_MSR, hwcr | IA32_HWCR_HLTXSPCYCEN);
|
||||
}
|
||||
static void disable_hltx_spc()
|
||||
{
|
||||
u64_t hwcr = ia32_rdmsr(IA32_HWCR_MSR);
|
||||
ia32_wrmsr(IA32_HWCR_MSR, hwcr & ~IA32_HWCR_HLTXSPCYCEN);
|
||||
u64_t hwcr = x86_rdmsr(IA32_HWCR_MSR);
|
||||
x86_wrmsr(IA32_HWCR_MSR, hwcr & ~IA32_HWCR_HLTXSPCYCEN);
|
||||
}
|
||||
|
||||
|
||||
static bool is_smi_spc_enabled()
|
||||
{
|
||||
return !(ia32_rdmsr(IA32_HWCR_MSR) & IA32_HWCR_SMISPCYCDIS);
|
||||
return !(x86_rdmsr(IA32_HWCR_MSR) & IA32_HWCR_SMISPCYCDIS);
|
||||
}
|
||||
static void enable_smi_spc()
|
||||
{
|
||||
u64_t hwcr = ia32_rdmsr(IA32_HWCR_MSR);
|
||||
ia32_wrmsr(IA32_HWCR_MSR, hwcr & ~IA32_HWCR_SMISPCYCDIS);
|
||||
u64_t hwcr = x86_rdmsr(IA32_HWCR_MSR);
|
||||
x86_wrmsr(IA32_HWCR_MSR, hwcr & ~IA32_HWCR_SMISPCYCDIS);
|
||||
}
|
||||
static void disable_smi_spc()
|
||||
{
|
||||
u64_t hwcr = ia32_rdmsr(IA32_HWCR_MSR);
|
||||
ia32_wrmsr(IA32_HWCR_MSR, hwcr | IA32_HWCR_SMISPCYCDIS);
|
||||
u64_t hwcr = x86_rdmsr(IA32_HWCR_MSR);
|
||||
x86_wrmsr(IA32_HWCR_MSR, hwcr | IA32_HWCR_SMISPCYCDIS);
|
||||
}
|
||||
|
||||
|
||||
static bool is_rsm_spc_enabled()
|
||||
{
|
||||
return !(ia32_rdmsr(IA32_HWCR_MSR) & IA32_HWCR_RSMSPCYCDIS);
|
||||
return !(x86_rdmsr(IA32_HWCR_MSR) & IA32_HWCR_RSMSPCYCDIS);
|
||||
}
|
||||
static void enable_rsm_spc()
|
||||
{
|
||||
u64_t hwcr = ia32_rdmsr(IA32_HWCR_MSR);
|
||||
ia32_wrmsr(IA32_HWCR_MSR, hwcr & ~IA32_HWCR_RSMSPCYCDIS);
|
||||
u64_t hwcr = x86_rdmsr(IA32_HWCR_MSR);
|
||||
x86_wrmsr(IA32_HWCR_MSR, hwcr & ~IA32_HWCR_RSMSPCYCDIS);
|
||||
}
|
||||
static void disable_rsm_spc()
|
||||
{
|
||||
u64_t hwcr = ia32_rdmsr(IA32_HWCR_MSR);
|
||||
ia32_wrmsr(IA32_HWCR_MSR, hwcr | IA32_HWCR_RSMSPCYCDIS);
|
||||
u64_t hwcr = x86_rdmsr(IA32_HWCR_MSR);
|
||||
x86_wrmsr(IA32_HWCR_MSR, hwcr | IA32_HWCR_RSMSPCYCDIS);
|
||||
}
|
||||
|
||||
|
||||
static bool is_sse_enabled()
|
||||
{
|
||||
return !(ia32_rdmsr(IA32_HWCR_MSR) & IA32_HWCR_SSEDIS);
|
||||
return !(x86_rdmsr(IA32_HWCR_MSR) & IA32_HWCR_SSEDIS);
|
||||
}
|
||||
static void enable_sse()
|
||||
{
|
||||
u64_t hwcr = ia32_rdmsr(IA32_HWCR_MSR);
|
||||
ia32_wrmsr(IA32_HWCR_MSR, hwcr & ~IA32_HWCR_SSEDIS);
|
||||
u64_t hwcr = x86_rdmsr(IA32_HWCR_MSR);
|
||||
x86_wrmsr(IA32_HWCR_MSR, hwcr & ~IA32_HWCR_SSEDIS);
|
||||
}
|
||||
static void disable_sse()
|
||||
{
|
||||
u64_t hwcr = ia32_rdmsr(IA32_HWCR_MSR);
|
||||
ia32_wrmsr(IA32_HWCR_MSR, hwcr | IA32_HWCR_SSEDIS);
|
||||
u64_t hwcr = x86_rdmsr(IA32_HWCR_MSR);
|
||||
x86_wrmsr(IA32_HWCR_MSR, hwcr | IA32_HWCR_SSEDIS);
|
||||
}
|
||||
|
||||
|
||||
|
||||
static bool is_wrap32_enabled()
|
||||
{
|
||||
return !(ia32_rdmsr(IA32_HWCR_MSR) & IA32_HWCR_WRAP32DIS);
|
||||
return !(x86_rdmsr(IA32_HWCR_MSR) & IA32_HWCR_WRAP32DIS);
|
||||
}
|
||||
static void enable_wrap32()
|
||||
{
|
||||
u64_t hwcr = ia32_rdmsr(IA32_HWCR_MSR);
|
||||
ia32_wrmsr(IA32_HWCR_MSR, hwcr & ~IA32_HWCR_WRAP32DIS);
|
||||
u64_t hwcr = x86_rdmsr(IA32_HWCR_MSR);
|
||||
x86_wrmsr(IA32_HWCR_MSR, hwcr & ~IA32_HWCR_WRAP32DIS);
|
||||
}
|
||||
|
||||
static void disable_wrap32()
|
||||
{
|
||||
u64_t hwcr = ia32_rdmsr(IA32_HWCR_MSR);
|
||||
ia32_wrmsr(IA32_HWCR_MSR, hwcr | IA32_HWCR_WRAP32DIS);
|
||||
u64_t hwcr = x86_rdmsr(IA32_HWCR_MSR);
|
||||
x86_wrmsr(IA32_HWCR_MSR, hwcr | IA32_HWCR_WRAP32DIS);
|
||||
}
|
||||
|
||||
|
||||
|
||||
static bool is_mci_status_write_enabled()
|
||||
{
|
||||
return (ia32_rdmsr(IA32_HWCR_MSR) & IA32_HWCR_MCIS_WREN);
|
||||
return (x86_rdmsr(IA32_HWCR_MSR) & IA32_HWCR_MCIS_WREN);
|
||||
}
|
||||
static void enable_mci_status_write()
|
||||
{
|
||||
u64_t hwcr = ia32_rdmsr(IA32_HWCR_MSR);
|
||||
ia32_wrmsr(IA32_HWCR_MSR, hwcr | IA32_HWCR_MCIS_WREN);
|
||||
u64_t hwcr = x86_rdmsr(IA32_HWCR_MSR);
|
||||
x86_wrmsr(IA32_HWCR_MSR, hwcr | IA32_HWCR_MCIS_WREN);
|
||||
}
|
||||
|
||||
static void disable_mci_status_write()
|
||||
{
|
||||
u64_t hwcr = ia32_rdmsr(IA32_HWCR_MSR);
|
||||
ia32_wrmsr(IA32_HWCR_MSR, hwcr & ~IA32_HWCR_MCIS_WREN);
|
||||
u64_t hwcr = x86_rdmsr(IA32_HWCR_MSR);
|
||||
x86_wrmsr(IA32_HWCR_MSR, hwcr & ~IA32_HWCR_MCIS_WREN);
|
||||
}
|
||||
|
||||
static u8_t get_startup_fid_status() {
|
||||
return (u8_t) ((ia32_rdmsr(IA32_HWCR_MSR) & IA32_HWCR_START_FID) >> 19);
|
||||
return (u8_t) ((x86_rdmsr(IA32_HWCR_MSR) & IA32_HWCR_START_FID) >> 19);
|
||||
}
|
||||
|
||||
};
|
||||
|
|
|
@ -89,7 +89,7 @@ INLINE void ia32_mmu_t::flush_tlbent(u32_t addr)
|
|||
*/
|
||||
INLINE void ia32_mmu_t::enable_super_pages()
|
||||
{
|
||||
ia32_cr4_set(X86_CR4_PSE);
|
||||
x86_cr4_set(X86_CR4_PSE);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -97,7 +97,7 @@ INLINE void ia32_mmu_t::enable_super_pages()
|
|||
*/
|
||||
INLINE void ia32_mmu_t::enable_global_pages()
|
||||
{
|
||||
ia32_cr4_set(X86_CR4_PGE);
|
||||
x86_cr4_set(X86_CR4_PGE);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -0,0 +1,239 @@
|
|||
/*********************************************************************
|
||||
*
|
||||
* Copyright (C) 2001-2004, 2007, Karlsruhe University
|
||||
*
|
||||
* File path: arch/x86/instr.h
|
||||
* Description: x86 helper functions to access special instructions
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*
|
||||
* $Id$
|
||||
*
|
||||
********************************************************************/
|
||||
#ifndef __ARCH__X86__INSTR_H__
|
||||
#define __ARCH__X86__INSTR_H__
|
||||
|
||||
|
||||
INLINE u64_t x86_rdpmc(const int ctrsel)
|
||||
{
|
||||
u32_t eax, edx;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
"rdpmc"
|
||||
: "=a"(eax), "=d"(edx)
|
||||
: "c"(ctrsel));
|
||||
|
||||
return (((u64_t)edx) << 32) | (u64_t)eax;
|
||||
}
|
||||
|
||||
|
||||
INLINE u64_t x86_rdtsc(void)
|
||||
{
|
||||
u32_t eax, edx;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
"rdtsc"
|
||||
: "=a"(eax), "=d"(edx));
|
||||
|
||||
return (((u64_t)edx) << 32) | (u64_t)eax;
|
||||
}
|
||||
|
||||
|
||||
INLINE u64_t x86_rdmsr(const u32_t reg)
|
||||
{
|
||||
u32_t eax, edx;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
"rdmsr"
|
||||
: "=a"(eax), "=d"(edx)
|
||||
: "c"(reg)
|
||||
);
|
||||
|
||||
return (((u64_t)edx) << 32) | (u64_t)eax;
|
||||
}
|
||||
|
||||
|
||||
INLINE void x86_wrmsr(const u32_t reg, const u64_t val)
|
||||
{
|
||||
__asm__ __volatile__ (
|
||||
"wrmsr"
|
||||
:
|
||||
: "a"( (u32_t) val), "d" ( (u32_t) (val >> 32)), "c" (reg));
|
||||
}
|
||||
|
||||
|
||||
INLINE void x86_settsc(const u64_t val)
|
||||
{
|
||||
x86_wrmsr(0x10, val);
|
||||
}
|
||||
|
||||
|
||||
INLINE void x86_wbinvd(void)
|
||||
{
|
||||
__asm__ ("wbinvd\n" : : : "memory");
|
||||
}
|
||||
|
||||
|
||||
INLINE int x86_lsb (word_t w) __attribute__ ((const));
|
||||
INLINE int x86_lsb (word_t w)
|
||||
{
|
||||
int bitnum;
|
||||
__asm__ ("bsf %1, %0" : "=r" (bitnum) : "rm" (w));
|
||||
return bitnum;
|
||||
}
|
||||
|
||||
|
||||
INLINE int x86_msb (word_t w) __attribute__ ((const));
|
||||
INLINE int x86_msb (word_t w)
|
||||
{
|
||||
int bitnum;
|
||||
__asm__ ("bsr %1, %0" : "=r" (bitnum) : "rm" (w));
|
||||
return bitnum;
|
||||
}
|
||||
|
||||
|
||||
INLINE word_t x86_cr0_read(void)
|
||||
{
|
||||
word_t tmp;
|
||||
__asm__ __volatile__ ("mov %%cr0, %0 \n"
|
||||
: "=r"(tmp));
|
||||
return tmp;
|
||||
}
|
||||
|
||||
|
||||
INLINE void x86_cr0_set(const word_t val)
|
||||
{
|
||||
word_t tmp;
|
||||
__asm__ __volatile__ (
|
||||
"mov %%cr0, %0 \n"
|
||||
"or %1, %0 \n"
|
||||
"mov %0, %%cr0 \n"
|
||||
: "=r"(tmp)
|
||||
: "ri"(val));
|
||||
}
|
||||
|
||||
|
||||
INLINE void x86_cr0_mask(const word_t val)
|
||||
{
|
||||
word_t tmp;
|
||||
__asm__ __volatile__ (
|
||||
"mov %%cr0, %0 \n"
|
||||
"and %1, %0 \n"
|
||||
"mov %0, %%cr0 \n"
|
||||
: "=r"(tmp)
|
||||
: "ri"(~val));
|
||||
}
|
||||
|
||||
|
||||
INLINE word_t x86_cr3_read(void)
|
||||
{
|
||||
word_t tmp;
|
||||
__asm__ __volatile__ (
|
||||
"mov %%cr3, %0 \n"
|
||||
: "=r"(tmp));
|
||||
return tmp;
|
||||
}
|
||||
|
||||
|
||||
INLINE void x86_cr3_write(const word_t val)
|
||||
{
|
||||
__asm__ __volatile__ (
|
||||
"mov %0, %%cr3 \n"
|
||||
:
|
||||
: "r"(val));
|
||||
}
|
||||
|
||||
|
||||
INLINE word_t x86_cr4_read(void)
|
||||
{
|
||||
word_t tmp;
|
||||
__asm__ __volatile__ (
|
||||
"mov %%cr4, %0 \n"
|
||||
: "=r"(tmp));
|
||||
return tmp;
|
||||
}
|
||||
|
||||
|
||||
INLINE void x86_cr4_write(const word_t val)
|
||||
{
|
||||
__asm__ __volatile__ (
|
||||
"mov %0, %%cr4 \n"
|
||||
:
|
||||
: "r"(val));
|
||||
}
|
||||
|
||||
|
||||
INLINE void x86_cr4_set(const word_t val)
|
||||
{
|
||||
word_t tmp;
|
||||
__asm__ __volatile__ (
|
||||
"mov %%cr4, %0 \n"
|
||||
"or %1, %0 \n"
|
||||
"mov %0, %%cr4 \n"
|
||||
: "=r"(tmp)
|
||||
: "ri"(val));
|
||||
}
|
||||
|
||||
|
||||
INLINE void x86_cr4_mask(const word_t val)
|
||||
{
|
||||
word_t tmp;
|
||||
__asm__ __volatile__ (
|
||||
"mov %%cr4, %0 \n"
|
||||
"and %1, %0 \n"
|
||||
"mov %0, %%cr4 \n"
|
||||
: "=r"(tmp)
|
||||
: "ri"(~val));
|
||||
}
|
||||
|
||||
|
||||
INLINE void x86_enable_interrupts(void)
|
||||
{
|
||||
__asm__ __volatile__ ("sti\n":);
|
||||
}
|
||||
|
||||
|
||||
INLINE void x86_disable_interrupts(void)
|
||||
{
|
||||
__asm__ __volatile__ ("cli\n":);
|
||||
}
|
||||
|
||||
|
||||
INLINE void x86_sleep(void)
|
||||
{
|
||||
__asm__ __volatile__(
|
||||
"sti \n"
|
||||
"hlt \n"
|
||||
"cli \n"
|
||||
:);
|
||||
}
|
||||
|
||||
|
||||
INLINE void x86_invlpg (word_t addr)
|
||||
{
|
||||
__asm__ __volatile__ (
|
||||
"invlpg (%0) \n"
|
||||
:
|
||||
: "r" (addr));
|
||||
}
|
||||
|
||||
#endif /* !__ARCH__X86__INSTR_H__ */
|
|
@ -1,6 +1,6 @@
|
|||
/*********************************************************************
|
||||
*
|
||||
* Copyright (C) 2002-2006, Karlsruhe University
|
||||
* Copyright (C) 2002-2007, Karlsruhe University
|
||||
*
|
||||
* File path: glue/v4-amd64/exception.cc
|
||||
* Description: exception handling
|
||||
|
@ -456,7 +456,7 @@ static bool handle_faulting_instruction (amd64_exceptionframe_t * frame)
|
|||
case 0x30:
|
||||
/* wrmsr */
|
||||
if ( is_privileged_space(space) ) {
|
||||
amd64_wrmsr ((u32_t) frame->rcx, (frame->rdx << 32) | (frame->rax & 0xffffffff));
|
||||
x86_wrmsr ((u32_t) frame->rcx, (frame->rdx << 32) | (frame->rax & 0xffffffff));
|
||||
frame->rip += 2;
|
||||
return true;
|
||||
} break;
|
||||
|
@ -464,7 +464,7 @@ static bool handle_faulting_instruction (amd64_exceptionframe_t * frame)
|
|||
case 0x32:
|
||||
/* rdmsr */
|
||||
if ( is_privileged_space(space) ) {
|
||||
u64_t val = amd64_rdmsr ((u32_t) frame->rcx);
|
||||
u64_t val = x86_rdmsr ((u32_t) frame->rcx);
|
||||
frame->rax = val & 0xffffffff;
|
||||
frame->rdx = val >> 32;
|
||||
frame->rip += 2;
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/*********************************************************************
|
||||
*
|
||||
* Copyright (C) 2003-2006, Karlsruhe University
|
||||
* Copyright (C) 2003-2007, Karlsruhe University
|
||||
*
|
||||
* File path: glue/v4-amd64/ia32/syscalls.cc
|
||||
* Description: syscall dispatcher for 32-bit programs
|
||||
|
@ -116,7 +116,7 @@ extern "C" amd64_sysret_t syscall_dispatcher_32(word_t arg1, /* RDI */
|
|||
{
|
||||
procdesc_t * pdesc = get_kip()->processor_info.get_procdesc(0);
|
||||
ASSERT (pdesc);
|
||||
ret.rax = amd64_rdtsc() / (pdesc->internal_freq / 1000);
|
||||
ret.rax = x86_rdtsc() / (pdesc->internal_freq / 1000);
|
||||
ret.rdx = ret.rax >> 32;
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -377,33 +377,33 @@ static void setup_msrs()
|
|||
{
|
||||
|
||||
/* sysret (63..48) / syscall (47..32) CS/SS MSR */
|
||||
amd64_wrmsr(AMD64_STAR_MSR, ((AMD64_SYSRETCS << 48) | (AMD64_SYSCALLCS << 32)));
|
||||
x86_wrmsr(AMD64_STAR_MSR, ((AMD64_SYSRETCS << 48) | (AMD64_SYSCALLCS << 32)));
|
||||
|
||||
/* long mode syscalls MSR */
|
||||
amd64_wrmsr(AMD64_LSTAR_MSR, (u64_t)(syscall_entry));
|
||||
x86_wrmsr(AMD64_LSTAR_MSR, (u64_t)(syscall_entry));
|
||||
|
||||
/* compatibility mode syscalls MSR */
|
||||
#if defined(CONFIG_AMD64_COMPATIBILITY_MODE)
|
||||
#if defined(CONFIG_CPU_AMD64_EM64T)
|
||||
amd64_wrmsr(AMD64_SYSENTER_CS_MSR, AMD64_SYSCALLCS);
|
||||
amd64_wrmsr(AMD64_SYSENTER_EIP_MSR, (u64_t)(sysenter_entry_32));
|
||||
x86_wrmsr(AMD64_SYSENTER_CS_MSR, AMD64_SYSCALLCS);
|
||||
x86_wrmsr(AMD64_SYSENTER_EIP_MSR, (u64_t)(sysenter_entry_32));
|
||||
#if defined(CONFIG_IO_FLEXPAGES)
|
||||
amd64_wrmsr(AMD64_SYSENTER_ESP_MSR, (u64_t)(TSS_MAPPING) + 4);
|
||||
x86_wrmsr(AMD64_SYSENTER_ESP_MSR, (u64_t)(TSS_MAPPING) + 4);
|
||||
#else
|
||||
amd64_wrmsr(AMD64_SYSENTER_ESP_MSR, (u64_t)(&tss) + 4);
|
||||
x86_wrmsr(AMD64_SYSENTER_ESP_MSR, (u64_t)(&tss) + 4);
|
||||
#endif
|
||||
#else /* !defined(CONFIG_CPU_AMD64_EM64T) */
|
||||
amd64_wrmsr(AMD64_CSTAR_MSR, (u64_t)(syscall_entry_32));
|
||||
x86_wrmsr(AMD64_CSTAR_MSR, (u64_t)(syscall_entry_32));
|
||||
#endif /* !defined(CONFIG_CPU_AMD64_EM64T) */
|
||||
#endif /* defined(CONFIG_AMD64_COMPATIBILITY_MODE) */
|
||||
|
||||
/* long mode syscall RFLAGS MASK */
|
||||
amd64_wrmsr(AMD64_SFMASK_MSR, (u64_t)(AMD64_SYSCALL_FLAGMASK));
|
||||
x86_wrmsr(AMD64_SFMASK_MSR, (u64_t)(AMD64_SYSCALL_FLAGMASK));
|
||||
|
||||
/* enable syscall/sysret in EFER */
|
||||
word_t efer = amd64_rdmsr(AMD64_EFER_MSR);
|
||||
word_t efer = x86_rdmsr(AMD64_EFER_MSR);
|
||||
efer |= AMD64_EFER_SCE;
|
||||
amd64_wrmsr(AMD64_EFER_MSR, efer);
|
||||
x86_wrmsr(AMD64_EFER_MSR, efer);
|
||||
|
||||
}
|
||||
|
||||
|
@ -440,7 +440,7 @@ static cpuid_t SECTION(".init.cpu") init_cpu()
|
|||
|
||||
/* Allow performance counters for users */
|
||||
TRACE_INIT("Enabling performance monitoring at user level (CPU %d)\n", cpuid);
|
||||
amd64_cr4_set(X86_CR4_PCE);
|
||||
x86_cr4_set(X86_CR4_PCE);
|
||||
#endif /* defined(CONFIG_PERFMON) */
|
||||
|
||||
TRACE_INIT("Enabling global pages (CPU %d)\n", cpuid);
|
||||
|
@ -679,7 +679,7 @@ static void smp_ap_commence()
|
|||
while( smp_commence_lock.is_locked() );
|
||||
|
||||
/* TSC should not be written, but we do it anyway ;-) */
|
||||
amd64_settsc(0);
|
||||
x86_settsc(0);
|
||||
}
|
||||
|
||||
static void smp_bp_commence()
|
||||
|
@ -690,7 +690,7 @@ static void smp_bp_commence()
|
|||
// now release all at once
|
||||
smp_commence_lock.unlock();
|
||||
|
||||
amd64_settsc(0);
|
||||
x86_settsc(0);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -135,7 +135,7 @@ extern "C" amd64_sysret_t syscall_dispatcher(word_t arg1, /* RDI */
|
|||
{
|
||||
procdesc_t * pdesc = get_kip()->processor_info.get_procdesc(0);
|
||||
ASSERT(pdesc);
|
||||
ret.rax = amd64_rdtsc() / (pdesc->internal_freq / 1000);
|
||||
ret.rax = x86_rdtsc() / (pdesc->internal_freq / 1000);
|
||||
ret.rdx = 0;
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/*********************************************************************
|
||||
*
|
||||
* Copyright (C) 2002-2005, Karlsruhe University
|
||||
* Copyright (C) 2002-2005, 2007, Karlsruhe University
|
||||
*
|
||||
* File path: glue/v4-amd64/timer-apic.cc
|
||||
* Description: implementation of apic timer
|
||||
|
@ -78,12 +78,12 @@ void timer_t::init_cpu()
|
|||
/* calculate processor speed */
|
||||
wait_for_second_tick();
|
||||
|
||||
cpu_cycles = amd64_rdtsc();
|
||||
cpu_cycles = x86_rdtsc();
|
||||
bus_cycles = local_apic.timer_get();
|
||||
|
||||
wait_for_second_tick();
|
||||
|
||||
cpu_cycles = amd64_rdtsc() - cpu_cycles;
|
||||
cpu_cycles = x86_rdtsc() - cpu_cycles;
|
||||
bus_cycles -= local_apic.timer_get();
|
||||
|
||||
proc_freq = cpu_cycles / 1000;
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/*********************************************************************
|
||||
*
|
||||
* Copyright (C) 2002, 2004-2005, Karlsruhe University
|
||||
* Copyright (C) 2002, 2004-2005, 2007, Karlsruhe University
|
||||
*
|
||||
* File path: glue/v4-amd64/timer.cc
|
||||
* Description: Implements RTC timer
|
||||
|
@ -89,18 +89,18 @@ void timer_t::init_global()
|
|||
|
||||
void timer_t::init_cpu()
|
||||
{
|
||||
u64_t cpu_cycles;
|
||||
u64_t cpu_cycles;
|
||||
|
||||
#if !defined(CONFIG_CPU_AMD64_SIMICS)
|
||||
TRACE_INIT("Calculating processor speed...\n");
|
||||
/* calculate processor speed */
|
||||
wait_for_second_tick();
|
||||
|
||||
cpu_cycles = amd64_rdtsc();
|
||||
|
||||
cpu_cycles = x86_rdtsc();
|
||||
wait_for_second_tick();
|
||||
|
||||
cpu_cycles = amd64_rdtsc() - cpu_cycles;
|
||||
|
||||
|
||||
cpu_cycles = x86_rdtsc() - cpu_cycles;
|
||||
|
||||
proc_freq = cpu_cycles / 1000;
|
||||
bus_freq = 0;
|
||||
|
||||
|
|
|
@ -316,7 +316,7 @@ static bool handle_faulting_instruction (ia32_exceptionframe_t * frame)
|
|||
case 0x30:
|
||||
/* wrmsr */
|
||||
if ( is_privileged_space(space) ) {
|
||||
ia32_wrmsr (frame->ecx, ((u64_t)(frame->eax)) |
|
||||
x86_wrmsr (frame->ecx, ((u64_t)(frame->eax)) |
|
||||
((u64_t)(frame->edx)) << 32);
|
||||
frame->eip += 2;
|
||||
return true;
|
||||
|
@ -325,7 +325,7 @@ static bool handle_faulting_instruction (ia32_exceptionframe_t * frame)
|
|||
case 0x32:
|
||||
/* rdmsr */
|
||||
if ( is_privileged_space(space) ) {
|
||||
u64_t val = ia32_rdmsr (frame->ecx);
|
||||
u64_t val = x86_rdmsr (frame->ecx);
|
||||
frame->eax = (u32_t)val;
|
||||
frame->edx = (u32_t)(val >> 32);
|
||||
frame->eip += 2;
|
||||
|
|
|
@ -236,12 +236,12 @@ static void setup_msrs()
|
|||
{
|
||||
#ifdef CONFIG_IA32_SYSENTER
|
||||
/* here we also setup the model specific registers for the syscalls */
|
||||
ia32_wrmsr(IA32_SYSENTER_CS_MSR, (u32_t)(IA32_KCS));
|
||||
ia32_wrmsr(IA32_SYSENTER_EIP_MSR, (u32_t)(exc_user_sysipc));
|
||||
x86_wrmsr(IA32_SYSENTER_CS_MSR, (u32_t)(IA32_KCS));
|
||||
x86_wrmsr(IA32_SYSENTER_EIP_MSR, (u32_t)(exc_user_sysipc));
|
||||
#if defined(CONFIG_IO_FLEXPAGES)
|
||||
ia32_wrmsr(IA32_SYSENTER_ESP_MSR, (u32_t)(TSS_MAPPING) + 4);
|
||||
x86_wrmsr(IA32_SYSENTER_ESP_MSR, (u32_t)(TSS_MAPPING) + 4);
|
||||
#else
|
||||
ia32_wrmsr(IA32_SYSENTER_ESP_MSR, (u32_t)(&tss) + 4);
|
||||
x86_wrmsr(IA32_SYSENTER_ESP_MSR, (u32_t)(&tss) + 4);
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
@ -275,7 +275,7 @@ static void setup_msrs()
|
|||
((u64_t) IA32_PAT_WC << (8*4)) | ((u64_t) IA32_PAT_WT << (8*5)) |
|
||||
((u64_t) IA32_PAT_UM << (8*6)) | ((u64_t) IA32_PAT_WP << (8*7));
|
||||
|
||||
ia32_wrmsr (IA32_CR_PAT_MSR, pats);
|
||||
x86_wrmsr (IA32_CR_PAT_MSR, pats);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -348,7 +348,7 @@ static cpuid_t SECTION(".init.cpu") init_cpu()
|
|||
#endif
|
||||
|
||||
/* Allow performance counters for users */
|
||||
ia32_cr4_set(X86_CR4_PCE);
|
||||
x86_cr4_set(X86_CR4_PCE);
|
||||
#endif /* defined(CONFIG_PERFMON) */
|
||||
|
||||
/* initialize the CPU specific mappings */
|
||||
|
@ -694,7 +694,7 @@ static void smp_ap_commence()
|
|||
/* finally we sync the time-stamp counters */
|
||||
while( smp_commence_lock.is_locked() );
|
||||
|
||||
ia32_settsc(0);
|
||||
x86_settsc(0);
|
||||
}
|
||||
|
||||
static void smp_bp_commence()
|
||||
|
@ -704,8 +704,8 @@ static void smp_bp_commence()
|
|||
|
||||
// now release all at once
|
||||
smp_commence_lock.unlock();
|
||||
|
||||
ia32_settsc(0);
|
||||
|
||||
x86_settsc(0);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/*********************************************************************
|
||||
*
|
||||
* Copyright (C) 2002-2003, Karlsruhe University
|
||||
* Copyright (C) 2002-2003, 2007, Karlsruhe University
|
||||
*
|
||||
* File path: glue/v4-ia32/timer-apic.cc
|
||||
* Description: implementation of apic timer
|
||||
|
@ -74,12 +74,12 @@ void timer_t::init_cpu()
|
|||
/* calculate processor speed */
|
||||
wait_for_second_tick();
|
||||
|
||||
u64_t cpu_cycles = ia32_rdtsc();
|
||||
u64_t cpu_cycles = x86_rdtsc();
|
||||
u32_t bus_cycles = local_apic.timer_get();
|
||||
|
||||
wait_for_second_tick();
|
||||
|
||||
cpu_cycles = ia32_rdtsc() - cpu_cycles;
|
||||
cpu_cycles = x86_rdtsc() - cpu_cycles;
|
||||
bus_cycles -= local_apic.timer_get();
|
||||
|
||||
proc_freq = cpu_cycles / 1000;
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/*********************************************************************
|
||||
*
|
||||
* Copyright (C) 2002-2004, 2006, Karlsruhe University
|
||||
* Copyright (C) 2002-2004, 2006-2007, Karlsruhe University
|
||||
*
|
||||
* File path: glue/v4-ia32/timer.cc
|
||||
* Description: Implements RTC timer
|
||||
|
@ -102,11 +102,11 @@ void SECTION (".init") timer_t::init_cpu()
|
|||
/* calculate processor speed */
|
||||
wait_for_second_tick();
|
||||
|
||||
u64_t cpu_cycles = ia32_rdtsc();
|
||||
u64_t cpu_cycles = x86_rdtsc();
|
||||
|
||||
wait_for_second_tick();
|
||||
|
||||
cpu_cycles = ia32_rdtsc() - cpu_cycles;
|
||||
cpu_cycles = x86_rdtsc() - cpu_cycles;
|
||||
|
||||
proc_freq = cpu_cycles / 1000;
|
||||
bus_freq = 0;
|
||||
|
|
|
@ -221,7 +221,7 @@ void init_io_space(void)
|
|||
#if defined(CONFIG_IA32_PVI)
|
||||
/* Enable PVI Bit */
|
||||
#warning Setting PVI bit in CR4 will not work with vmware
|
||||
ia32_cr4_set(X86_CR4_PVI);
|
||||
x86_cr4_set(X86_CR4_PVI);
|
||||
#endif
|
||||
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
/*********************************************************************
|
||||
*
|
||||
* Copyright (C) 2006, Karlsruhe University
|
||||
* Copyright (C) 2006-2007, Karlsruhe University
|
||||
*
|
||||
* File path: platform/pc99/perfmon.h
|
||||
* Description: Performance monitoring counter macros for IA32/AMD64 CPUS.
|
||||
|
@ -139,11 +139,7 @@
|
|||
|
||||
#endif /* defined(CONFIG_CPU_IA32_I686) */
|
||||
|
||||
#if defined(CONFIG_ARCH_IA32)
|
||||
#define arch_wrmsr ia32_wrmsr
|
||||
#elif defined(CONFIG_ARCH_AMD64)
|
||||
#define arch_wrmsr amd64_wrmsr
|
||||
#endif
|
||||
#define arch_wrmsr x86_wrmsr
|
||||
|
||||
INLINE void setup_perfmon_cpu(word_t cpuid)
|
||||
{
|
||||
|
@ -162,7 +158,7 @@ INLINE void setup_perfmon_cpu(word_t cpuid)
|
|||
arch_wrmsr(PMC_MSR_EVTSEL0, 0x4100C0); // ENABLE + USER + INST_RETIRED
|
||||
arch_wrmsr(PMC_MSR_EVTSEL1, 0x4200C0); // ENABLE + KRNL + INST_RETIRED
|
||||
|
||||
//ia32_cr4_set(IA32_CR4_PCE); // allow rdpmc in user mode
|
||||
//x86_cr4_set(IA32_CR4_PCE); // allow rdpmc in user mode
|
||||
|
||||
#elif defined(CONFIG_CPU_IA32_P4) || defined(CONFIG_CPU_AMD64_P4)
|
||||
|
||||
|
|
Loading…
Reference in New Issue