[POWERPC] spufs: state_mutex cleanup
Various cleanups in code surrounding the state semaphore: - inline spu_acquire/spu_release - cleanup spu_acquire_* and add kerneldoc comments to these functions - remove spu_release_exclusive and replace it with spu_release Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com>
This commit is contained in:
parent
650f8b0291
commit
6a0641e510
|
@ -96,16 +96,6 @@ void spu_forget(struct spu_context *ctx)
|
|||
spu_release(ctx);
|
||||
}
|
||||
|
||||
void spu_acquire(struct spu_context *ctx)
|
||||
{
|
||||
mutex_lock(&ctx->state_mutex);
|
||||
}
|
||||
|
||||
void spu_release(struct spu_context *ctx)
|
||||
{
|
||||
mutex_unlock(&ctx->state_mutex);
|
||||
}
|
||||
|
||||
void spu_unmap_mappings(struct spu_context *ctx)
|
||||
{
|
||||
if (ctx->local_store)
|
||||
|
@ -124,66 +114,85 @@ void spu_unmap_mappings(struct spu_context *ctx)
|
|||
unmap_mapping_range(ctx->psmap, 0, 0x20000, 1);
|
||||
}
|
||||
|
||||
/**
|
||||
* spu_acquire_exclusive - lock spu contex and protect against userspace access
|
||||
* @ctx: spu contex to lock
|
||||
*
|
||||
* Note:
|
||||
* Returns 0 and with the context locked on success
|
||||
* Returns negative error and with the context _unlocked_ on failure.
|
||||
*/
|
||||
int spu_acquire_exclusive(struct spu_context *ctx)
|
||||
{
|
||||
int ret = 0;
|
||||
int ret = -EINVAL;
|
||||
|
||||
mutex_lock(&ctx->state_mutex);
|
||||
/* ctx is about to be freed, can't acquire any more */
|
||||
if (!ctx->owner) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
spu_acquire(ctx);
|
||||
/*
|
||||
* Context is about to be freed, so we can't acquire it anymore.
|
||||
*/
|
||||
if (!ctx->owner)
|
||||
goto out_unlock;
|
||||
|
||||
if (ctx->state == SPU_STATE_SAVED) {
|
||||
ret = spu_activate(ctx, 0);
|
||||
if (ret)
|
||||
goto out;
|
||||
goto out_unlock;
|
||||
} else {
|
||||
/* We need to exclude userspace access to the context. */
|
||||
/*
|
||||
* We need to exclude userspace access to the context.
|
||||
*
|
||||
* To protect against memory access we invalidate all ptes
|
||||
* and make sure the pagefault handlers block on the mutex.
|
||||
*/
|
||||
spu_unmap_mappings(ctx);
|
||||
}
|
||||
|
||||
out:
|
||||
if (ret)
|
||||
mutex_unlock(&ctx->state_mutex);
|
||||
return 0;
|
||||
|
||||
out_unlock:
|
||||
spu_release(ctx);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* spu_acquire_runnable - lock spu contex and make sure it is in runnable state
|
||||
* @ctx: spu contex to lock
|
||||
*
|
||||
* Note:
|
||||
* Returns 0 and with the context locked on success
|
||||
* Returns negative error and with the context _unlocked_ on failure.
|
||||
*/
|
||||
int spu_acquire_runnable(struct spu_context *ctx)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&ctx->state_mutex);
|
||||
if (ctx->state == SPU_STATE_RUNNABLE) {
|
||||
ctx->spu->prio = current->prio;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* ctx is about to be freed, can't acquire any more */
|
||||
if (!ctx->owner) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
int ret = -EINVAL;
|
||||
|
||||
spu_acquire(ctx);
|
||||
if (ctx->state == SPU_STATE_SAVED) {
|
||||
/*
|
||||
* Context is about to be freed, so we can't acquire it anymore.
|
||||
*/
|
||||
if (!ctx->owner)
|
||||
goto out_unlock;
|
||||
ret = spu_activate(ctx, 0);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
goto out_unlock;
|
||||
} else
|
||||
ctx->spu->prio = current->prio;
|
||||
|
||||
/* On success, we return holding the lock */
|
||||
return ret;
|
||||
out:
|
||||
/* Release here, to simplify calling code. */
|
||||
mutex_unlock(&ctx->state_mutex);
|
||||
return 0;
|
||||
|
||||
out_unlock:
|
||||
spu_release(ctx);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* spu_acquire_saved - lock spu contex and make sure it is in saved state
|
||||
* @ctx: spu contex to lock
|
||||
*/
|
||||
void spu_acquire_saved(struct spu_context *ctx)
|
||||
{
|
||||
mutex_lock(&ctx->state_mutex);
|
||||
if (ctx->state == SPU_STATE_RUNNABLE)
|
||||
spu_acquire(ctx);
|
||||
if (ctx->state != SPU_STATE_SAVED)
|
||||
spu_deactivate(ctx);
|
||||
}
|
||||
|
|
|
@ -133,7 +133,7 @@ out_drop_priv:
|
|||
spu_mfc_sr1_set(ctx->spu, sr1);
|
||||
|
||||
out_unlock:
|
||||
spu_release_exclusive(ctx);
|
||||
spu_release(ctx);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -158,6 +158,16 @@ void spu_gang_remove_ctx(struct spu_gang *gang, struct spu_context *ctx);
|
|||
void spu_gang_add_ctx(struct spu_gang *gang, struct spu_context *ctx);
|
||||
|
||||
/* context management */
|
||||
static inline void spu_acquire(struct spu_context *ctx)
|
||||
{
|
||||
mutex_lock(&ctx->state_mutex);
|
||||
}
|
||||
|
||||
static inline void spu_release(struct spu_context *ctx)
|
||||
{
|
||||
mutex_unlock(&ctx->state_mutex);
|
||||
}
|
||||
|
||||
struct spu_context * alloc_spu_context(struct spu_gang *gang);
|
||||
void destroy_spu_context(struct kref *kref);
|
||||
struct spu_context * get_spu_context(struct spu_context *ctx);
|
||||
|
@ -165,17 +175,9 @@ int put_spu_context(struct spu_context *ctx);
|
|||
void spu_unmap_mappings(struct spu_context *ctx);
|
||||
|
||||
void spu_forget(struct spu_context *ctx);
|
||||
void spu_acquire(struct spu_context *ctx);
|
||||
void spu_release(struct spu_context *ctx);
|
||||
int spu_acquire_runnable(struct spu_context *ctx);
|
||||
void spu_acquire_saved(struct spu_context *ctx);
|
||||
int spu_acquire_exclusive(struct spu_context *ctx);
|
||||
|
||||
static inline void spu_release_exclusive(struct spu_context *ctx)
|
||||
{
|
||||
mutex_unlock(&ctx->state_mutex);
|
||||
}
|
||||
|
||||
int spu_activate(struct spu_context *ctx, u64 flags);
|
||||
void spu_deactivate(struct spu_context *ctx);
|
||||
void spu_yield(struct spu_context *ctx);
|
||||
|
|
Loading…
Reference in New Issue