2019-05-27 14:55:21 +08:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
2016-10-25 20:00:45 +08:00
|
|
|
/*
|
|
|
|
* fence-array: aggregates fence to be waited together
|
|
|
|
*
|
|
|
|
* Copyright (C) 2016 Collabora Ltd
|
|
|
|
* Copyright (C) 2016 Advanced Micro Devices, Inc.
|
|
|
|
* Authors:
|
|
|
|
* Gustavo Padovan <gustavo@padovan.org>
|
|
|
|
* Christian König <christian.koenig@amd.com>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef __LINUX_DMA_FENCE_ARRAY_H
|
|
|
|
#define __LINUX_DMA_FENCE_ARRAY_H
|
|
|
|
|
|
|
|
#include <linux/dma-fence.h>
|
dma-buf/fence: Fix lock inversion within dma-fence-array
Ages ago Rob Clark noted,
"Currently with fence-array, we have a potential deadlock situation. If
we fence_add_callback() on an array-fence, the array-fence's lock is
acquired first, and in it's ->enable_signaling() callback, it will install
cbs on it's array-member fences, so the array-member's lock is acquired
second.
But in the signal path, the array-member's lock is acquired first, and
the array-fence's lock acquired second."
Rob proposed either extensive changes to dma-fence to unnest the
fence-array signaling, or to defer the signaling onto a workqueue. This
is a more refined version of the later, that should keep the latency
of the fence signaling to a minimum by using an irq-work, which is
executed asap.
Reported-by: Rob Clark <robdclark@gmail.com>
Suggested-by: Rob Clark <robdclark@gmail.com>
References: 1476635975-21981-1-git-send-email-robdclark@gmail.com
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Rob Clark <robdclark@gmail.com>
Cc: Gustavo Padovan <gustavo.padovan@collabora.co.uk>
Cc: Sumit Semwal <sumit.semwal@linaro.org>
Cc: Christian König <christian.koenig@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Sumit Semwal <sumit.semwal@linaro.org>
Link: https://patchwork.freedesktop.org/patch/msgid/20171114162719.30958-1-chris@chris-wilson.co.uk
2017-11-15 00:27:19 +08:00
|
|
|
#include <linux/irq_work.h>
|
2016-10-25 20:00:45 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* struct dma_fence_array_cb - callback helper for fence array
|
|
|
|
* @cb: fence callback structure for signaling
|
|
|
|
* @array: reference to the parent fence array object
|
|
|
|
*/
|
|
|
|
struct dma_fence_array_cb {
|
|
|
|
struct dma_fence_cb cb;
|
|
|
|
struct dma_fence_array *array;
|
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* struct dma_fence_array - fence to represent an array of fences
|
|
|
|
* @base: fence base class
|
|
|
|
* @lock: spinlock for fence handling
|
|
|
|
* @num_fences: number of fences in the array
|
|
|
|
* @num_pending: fences in the array still pending
|
|
|
|
* @fences: array of the fences
|
2019-01-17 06:34:36 +08:00
|
|
|
* @work: internal irq_work function
|
2016-10-25 20:00:45 +08:00
|
|
|
*/
|
|
|
|
struct dma_fence_array {
|
|
|
|
struct dma_fence base;
|
|
|
|
|
|
|
|
spinlock_t lock;
|
|
|
|
unsigned num_fences;
|
|
|
|
atomic_t num_pending;
|
|
|
|
struct dma_fence **fences;
|
dma-buf/fence: Fix lock inversion within dma-fence-array
Ages ago Rob Clark noted,
"Currently with fence-array, we have a potential deadlock situation. If
we fence_add_callback() on an array-fence, the array-fence's lock is
acquired first, and in it's ->enable_signaling() callback, it will install
cbs on it's array-member fences, so the array-member's lock is acquired
second.
But in the signal path, the array-member's lock is acquired first, and
the array-fence's lock acquired second."
Rob proposed either extensive changes to dma-fence to unnest the
fence-array signaling, or to defer the signaling onto a workqueue. This
is a more refined version of the later, that should keep the latency
of the fence signaling to a minimum by using an irq-work, which is
executed asap.
Reported-by: Rob Clark <robdclark@gmail.com>
Suggested-by: Rob Clark <robdclark@gmail.com>
References: 1476635975-21981-1-git-send-email-robdclark@gmail.com
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Rob Clark <robdclark@gmail.com>
Cc: Gustavo Padovan <gustavo.padovan@collabora.co.uk>
Cc: Sumit Semwal <sumit.semwal@linaro.org>
Cc: Christian König <christian.koenig@amd.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Sumit Semwal <sumit.semwal@linaro.org>
Link: https://patchwork.freedesktop.org/patch/msgid/20171114162719.30958-1-chris@chris-wilson.co.uk
2017-11-15 00:27:19 +08:00
|
|
|
|
|
|
|
struct irq_work work;
|
2016-10-25 20:00:45 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* to_dma_fence_array - cast a fence to a dma_fence_array
|
|
|
|
* @fence: fence to cast to a dma_fence_array
|
|
|
|
*
|
|
|
|
* Returns NULL if the fence is not a dma_fence_array,
|
|
|
|
* or the dma_fence_array otherwise.
|
|
|
|
*/
|
|
|
|
static inline struct dma_fence_array *
|
|
|
|
to_dma_fence_array(struct dma_fence *fence)
|
|
|
|
{
|
2022-01-19 18:17:32 +08:00
|
|
|
if (!fence || !dma_fence_is_array(fence))
|
2016-10-25 20:00:45 +08:00
|
|
|
return NULL;
|
|
|
|
|
|
|
|
return container_of(fence, struct dma_fence_array, base);
|
|
|
|
}
|
|
|
|
|
2022-01-24 21:03:24 +08:00
|
|
|
/**
|
|
|
|
* dma_fence_array_for_each - iterate over all fences in array
|
|
|
|
* @fence: current fence
|
|
|
|
* @index: index into the array
|
|
|
|
* @head: potential dma_fence_array object
|
|
|
|
*
|
|
|
|
* Test if @array is a dma_fence_array object and if yes iterate over all fences
|
|
|
|
* in the array. If not just iterate over the fence in @array itself.
|
2022-03-11 17:27:53 +08:00
|
|
|
*
|
|
|
|
* For a deep dive iterator see dma_fence_unwrap_for_each().
|
2022-01-24 21:03:24 +08:00
|
|
|
*/
|
|
|
|
#define dma_fence_array_for_each(fence, index, head) \
|
|
|
|
for (index = 0, fence = dma_fence_array_first(head); fence; \
|
|
|
|
++(index), fence = dma_fence_array_next(head, index))
|
|
|
|
|
2016-10-25 20:00:45 +08:00
|
|
|
struct dma_fence_array *dma_fence_array_create(int num_fences,
|
|
|
|
struct dma_fence **fences,
|
|
|
|
u64 context, unsigned seqno,
|
|
|
|
bool signal_on_any);
|
|
|
|
|
2017-03-18 00:34:49 +08:00
|
|
|
bool dma_fence_match_context(struct dma_fence *fence, u64 context);
|
|
|
|
|
2022-01-24 21:03:24 +08:00
|
|
|
struct dma_fence *dma_fence_array_first(struct dma_fence *head);
|
|
|
|
struct dma_fence *dma_fence_array_next(struct dma_fence *head,
|
|
|
|
unsigned int index);
|
|
|
|
|
2016-10-25 20:00:45 +08:00
|
|
|
#endif /* __LINUX_DMA_FENCE_ARRAY_H */
|