x86/paravirt/xen: properly fill out the ldt ops
LTP testing showed that Xen does not properly implement sys_modify_ldt(). This patch does the final little bits needed to make the ldt work properly. Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
338b9bb3ad
commit
38ffbe66d5
|
@ -51,6 +51,8 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
|
|||
memset(newldt + oldsize * LDT_ENTRY_SIZE, 0,
|
||||
(mincount - oldsize) * LDT_ENTRY_SIZE);
|
||||
|
||||
paravirt_alloc_ldt(newldt, mincount);
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
/* CHECKME: Do we really need this ? */
|
||||
wmb();
|
||||
|
@ -75,6 +77,7 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
|
|||
#endif
|
||||
}
|
||||
if (oldsize) {
|
||||
paravirt_free_ldt(oldldt, oldsize);
|
||||
if (oldsize * LDT_ENTRY_SIZE > PAGE_SIZE)
|
||||
vfree(oldldt);
|
||||
else
|
||||
|
@ -86,10 +89,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
|
|||
static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
|
||||
{
|
||||
int err = alloc_ldt(new, old->size, 0);
|
||||
int i;
|
||||
|
||||
if (err < 0)
|
||||
return err;
|
||||
memcpy(new->ldt, old->ldt, old->size * LDT_ENTRY_SIZE);
|
||||
|
||||
for(i = 0; i < old->size; i++)
|
||||
write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -126,6 +132,7 @@ void destroy_context(struct mm_struct *mm)
|
|||
if (mm == current->active_mm)
|
||||
clear_LDT();
|
||||
#endif
|
||||
paravirt_free_ldt(mm->context.ldt, mm->context.size);
|
||||
if (mm->context.size * LDT_ENTRY_SIZE > PAGE_SIZE)
|
||||
vfree(mm->context.ldt);
|
||||
else
|
||||
|
|
|
@ -348,6 +348,10 @@ struct pv_cpu_ops pv_cpu_ops = {
|
|||
.write_ldt_entry = native_write_ldt_entry,
|
||||
.write_gdt_entry = native_write_gdt_entry,
|
||||
.write_idt_entry = native_write_idt_entry,
|
||||
|
||||
.alloc_ldt = paravirt_nop,
|
||||
.free_ldt = paravirt_nop,
|
||||
|
||||
.load_sp0 = native_load_sp0,
|
||||
|
||||
#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
|
||||
|
|
|
@ -325,6 +325,26 @@ static unsigned long xen_store_tr(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
|
||||
{
|
||||
unsigned pages = roundup(entries * LDT_ENTRY_SIZE, PAGE_SIZE);
|
||||
void *v = ldt;
|
||||
int i;
|
||||
|
||||
for(i = 0; i < pages; i += PAGE_SIZE)
|
||||
make_lowmem_page_readonly(v + i);
|
||||
}
|
||||
|
||||
static void xen_free_ldt(struct desc_struct *ldt, unsigned entries)
|
||||
{
|
||||
unsigned pages = roundup(entries * LDT_ENTRY_SIZE, PAGE_SIZE);
|
||||
void *v = ldt;
|
||||
int i;
|
||||
|
||||
for(i = 0; i < pages; i += PAGE_SIZE)
|
||||
make_lowmem_page_readwrite(v + i);
|
||||
}
|
||||
|
||||
static void xen_set_ldt(const void *addr, unsigned entries)
|
||||
{
|
||||
struct mmuext_op *op;
|
||||
|
@ -1220,6 +1240,9 @@ static const struct pv_cpu_ops xen_cpu_ops __initdata = {
|
|||
.load_gs_index = xen_load_gs_index,
|
||||
#endif
|
||||
|
||||
.alloc_ldt = xen_alloc_ldt,
|
||||
.free_ldt = xen_free_ldt,
|
||||
|
||||
.store_gdt = native_store_gdt,
|
||||
.store_idt = native_store_idt,
|
||||
.store_tr = xen_store_tr,
|
||||
|
|
|
@ -97,7 +97,15 @@ static inline int desc_empty(const void *ptr)
|
|||
native_write_gdt_entry(dt, entry, desc, type)
|
||||
#define write_idt_entry(dt, entry, g) \
|
||||
native_write_idt_entry(dt, entry, g)
|
||||
#endif
|
||||
|
||||
static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_PARAVIRT */
|
||||
|
||||
static inline void native_write_idt_entry(gate_desc *idt, int entry,
|
||||
const gate_desc *gate)
|
||||
|
|
|
@ -124,6 +124,9 @@ struct pv_cpu_ops {
|
|||
int entrynum, const void *desc, int size);
|
||||
void (*write_idt_entry)(gate_desc *,
|
||||
int entrynum, const gate_desc *gate);
|
||||
void (*alloc_ldt)(struct desc_struct *ldt, unsigned entries);
|
||||
void (*free_ldt)(struct desc_struct *ldt, unsigned entries);
|
||||
|
||||
void (*load_sp0)(struct tss_struct *tss, struct thread_struct *t);
|
||||
|
||||
void (*set_iopl_mask)(unsigned mask);
|
||||
|
@ -824,6 +827,16 @@ do { \
|
|||
(aux) = __aux; \
|
||||
} while (0)
|
||||
|
||||
static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
|
||||
{
|
||||
PVOP_VCALL2(pv_cpu_ops.alloc_ldt, ldt, entries);
|
||||
}
|
||||
|
||||
static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
|
||||
{
|
||||
PVOP_VCALL2(pv_cpu_ops.free_ldt, ldt, entries);
|
||||
}
|
||||
|
||||
static inline void load_TR_desc(void)
|
||||
{
|
||||
PVOP_VCALL0(pv_cpu_ops.load_tr_desc);
|
||||
|
|
Loading…
Reference in New Issue