cxl: Unmap MMIO regions when detaching a context

If we need to force detach a context (e.g. due to EEH or simply force
unbinding the driver) we should prevent the userspace contexts from
being able to access the Problem State Area MMIO region further, which
they may have mapped with mmap().

This patch unmaps any mapped MMIO regions when detaching a userspace
context.

Cc: stable@vger.kernel.org
Signed-off-by: Ian Munsie <imunsie@au1.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:
Ian Munsie 2014-12-08 19:18:01 +11:00 committed by Michael Ellerman
parent a98e6e9f4e
commit b123429e6a
3 changed files with 21 additions and 3 deletions

View File

@ -34,7 +34,8 @@ struct cxl_context *cxl_context_alloc(void)
/* /*
* Initialises a CXL context. * Initialises a CXL context.
*/ */
int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master) int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
struct address_space *mapping)
{ {
int i; int i;
@ -42,6 +43,8 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master)
ctx->afu = afu; ctx->afu = afu;
ctx->master = master; ctx->master = master;
ctx->pid = NULL; /* Set in start work ioctl */ ctx->pid = NULL; /* Set in start work ioctl */
mutex_init(&ctx->mapping_lock);
ctx->mapping = mapping;
/* /*
* Allocate the segment table before we put it in the IDR so that we * Allocate the segment table before we put it in the IDR so that we
@ -147,6 +150,12 @@ static void __detach_context(struct cxl_context *ctx)
afu_release_irqs(ctx); afu_release_irqs(ctx);
flush_work(&ctx->fault_work); /* Only needed for dedicated process */ flush_work(&ctx->fault_work); /* Only needed for dedicated process */
wake_up_all(&ctx->wq); wake_up_all(&ctx->wq);
/* Release Problem State Area mapping */
mutex_lock(&ctx->mapping_lock);
if (ctx->mapping)
unmap_mapping_range(ctx->mapping, 0, 0, 1);
mutex_unlock(&ctx->mapping_lock);
} }
/* /*

View File

@ -398,6 +398,10 @@ struct cxl_context {
phys_addr_t psn_phys; phys_addr_t psn_phys;
u64 psn_size; u64 psn_size;
/* Used to unmap any mmaps when force detaching */
struct address_space *mapping;
struct mutex mapping_lock;
spinlock_t sste_lock; /* Protects segment table entries */ spinlock_t sste_lock; /* Protects segment table entries */
struct cxl_sste *sstp; struct cxl_sste *sstp;
u64 sstp0, sstp1; u64 sstp0, sstp1;
@ -599,7 +603,8 @@ int cxl_alloc_sst(struct cxl_context *ctx);
void init_cxl_native(void); void init_cxl_native(void);
struct cxl_context *cxl_context_alloc(void); struct cxl_context *cxl_context_alloc(void);
int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master); int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
struct address_space *mapping);
void cxl_context_free(struct cxl_context *ctx); void cxl_context_free(struct cxl_context *ctx);
int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma); int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma);

View File

@ -77,7 +77,7 @@ static int __afu_open(struct inode *inode, struct file *file, bool master)
goto err_put_afu; goto err_put_afu;
} }
if ((rc = cxl_context_init(ctx, afu, master))) if ((rc = cxl_context_init(ctx, afu, master, inode->i_mapping)))
goto err_put_afu; goto err_put_afu;
pr_devel("afu_open pe: %i\n", ctx->pe); pr_devel("afu_open pe: %i\n", ctx->pe);
@ -113,6 +113,10 @@ static int afu_release(struct inode *inode, struct file *file)
__func__, ctx->pe); __func__, ctx->pe);
cxl_context_detach(ctx); cxl_context_detach(ctx);
mutex_lock(&ctx->mapping_lock);
ctx->mapping = NULL;
mutex_unlock(&ctx->mapping_lock);
put_device(&ctx->afu->dev); put_device(&ctx->afu->dev);
/* /*