imx-drm: cleanups and YUV 4:2:0 memory read/write reduction support
- Remove counter load enable form PRE, which has no effect. - Add support for setting the double read/write reduction flag in channel parameter memory. This can be used to save some memory bandwidth when capturing in YUV 4:2:0 chroma subsampled formats. - Allocate DMA channel structures as needed, most of the 64 channels are unused or even reserved. - Remove unused interrupt busy waiting routine. - Set VDIC field order for both AUTO and MAN inputs simultaneously as both can't be active at the same time. -----BEGIN PGP SIGNATURE----- iQJLBAABCAA1FiEEBsBxhV1FaKwXuCOBUMKIHHCeYOsFAlk49zAXHHAuemFiZWxA cGVuZ3V0cm9uaXguZGUACgkQUMKIHHCeYOtX+g/+M5DPkGhcP8fN9m47MOAEa+nP oDciYC76VrQev/Qys/zLM3/6sWF9h82USJ52H+zw41RuKKkYlcOzVWnSPQd2yN6Y hIl0fCsFzGxOMnIAhmi6BHFnvJKP1jsfeBdXSHyxI0y5kGoufG7BEHiJ7TTSgy/I JhccDKTRV9NzAfwpD37EI3a/Nc53DRpw3jrnHPnAaBJ6hYVPZ9YCSrBYbQQbIrDr x6NB8E1Ga3KRGZMTw45bTBiOs4AbZKSunzrqWQFnTRjbE+aTDs9W5n5wcdr7AoWi gqnx+b6TkiarNK3taHffjYioYvvn2nbGhuoAtg7hpS0CeUup0gitQquKM0kqsWnk yBykkP+Z7udASRgXdK6Gtzo6hzdhPeFPmmMbKmSBdIvT26t0ikf9RN1UlEhE6nY3 A68jKC4+gNTu8kF6imzWCfwM9KB4pWn0N0qTY5U9Y7/gWFky6IEDn3V5OM3XXqUa c/iglYyzO+B7vVu6ZajlH+shemO1mVaxGjVFrfX29syVooZrmo0NVJPdoKwiYx0r E08FCOdUIEhzFS6h1/FII+mZ6YzAmNkXVz+l+MWaoOW2tIWWX4xSsFTKBka2hDfq daU3CDcKcg8LuSybHg6lzj8Hw+/CWkqgtv6ESVnvHEUYCHoZsvArJTNoQrx/zJbE EwgZIM4daufEuv6I5zo= =m3BY -----END PGP SIGNATURE----- Merge tag 'imx-drm-next-2017-06-08' of git://git.pengutronix.de/git/pza/linux into drm-next imx-drm: cleanups and YUV 4:2:0 memory read/write reduction support - Remove counter load enable form PRE, which has no effect. - Add support for setting the double read/write reduction flag in channel parameter memory. This can be used to save some memory bandwidth when capturing in YUV 4:2:0 chroma subsampled formats. - Allocate DMA channel structures as needed, most of the 64 channels are unused or even reserved. - Remove unused interrupt busy waiting routine. - Set VDIC field order for both AUTO and MAN inputs simultaneously as both can't be active at the same time. * tag 'imx-drm-next-2017-06-08' of git://git.pengutronix.de/git/pza/linux: gpu: ipu-v3: vdic: include AUTO field order bit in ipu_vdi_set_field_order gpu: ipu-v3: remove interrupt busy waiting routine gpu: ipu-v3: allocate ipuv3_channels as needed gpu: ipu-v3: Add support for double read/write reduction gpu: ipu-v3: prg: remove counter load enable
This commit is contained in:
commit
a682169891
|
@ -274,15 +274,22 @@ struct ipuv3_channel *ipu_idmac_get(struct ipu_soc *ipu, unsigned num)
|
|||
|
||||
mutex_lock(&ipu->channel_lock);
|
||||
|
||||
channel = &ipu->channel[num];
|
||||
list_for_each_entry(channel, &ipu->channels, list) {
|
||||
if (channel->num == num) {
|
||||
channel = ERR_PTR(-EBUSY);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
if (channel->busy) {
|
||||
channel = ERR_PTR(-EBUSY);
|
||||
channel = kzalloc(sizeof(*channel), GFP_KERNEL);
|
||||
if (!channel) {
|
||||
channel = ERR_PTR(-ENOMEM);
|
||||
goto out;
|
||||
}
|
||||
|
||||
channel->busy = true;
|
||||
channel->num = num;
|
||||
channel->ipu = ipu;
|
||||
list_add(&channel->list, &ipu->channels);
|
||||
|
||||
out:
|
||||
mutex_unlock(&ipu->channel_lock);
|
||||
|
@ -299,7 +306,8 @@ void ipu_idmac_put(struct ipuv3_channel *channel)
|
|||
|
||||
mutex_lock(&ipu->channel_lock);
|
||||
|
||||
channel->busy = false;
|
||||
list_del(&channel->list);
|
||||
kfree(channel);
|
||||
|
||||
mutex_unlock(&ipu->channel_lock);
|
||||
}
|
||||
|
@ -589,22 +597,6 @@ int ipu_idmac_wait_busy(struct ipuv3_channel *channel, int ms)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(ipu_idmac_wait_busy);
|
||||
|
||||
int ipu_wait_interrupt(struct ipu_soc *ipu, int irq, int ms)
|
||||
{
|
||||
unsigned long timeout;
|
||||
|
||||
timeout = jiffies + msecs_to_jiffies(ms);
|
||||
ipu_cm_write(ipu, BIT(irq % 32), IPU_INT_STAT(irq / 32));
|
||||
while (!(ipu_cm_read(ipu, IPU_INT_STAT(irq / 32) & BIT(irq % 32)))) {
|
||||
if (time_after(jiffies, timeout))
|
||||
return -ETIMEDOUT;
|
||||
cpu_relax();
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ipu_wait_interrupt);
|
||||
|
||||
int ipu_idmac_disable_channel(struct ipuv3_channel *channel)
|
||||
{
|
||||
struct ipu_soc *ipu = channel->ipu;
|
||||
|
@ -1376,7 +1368,7 @@ static int ipu_probe(struct platform_device *pdev)
|
|||
struct ipu_soc *ipu;
|
||||
struct resource *res;
|
||||
unsigned long ipu_base;
|
||||
int i, ret, irq_sync, irq_err;
|
||||
int ret, irq_sync, irq_err;
|
||||
const struct ipu_devtype *devtype;
|
||||
|
||||
devtype = of_device_get_match_data(&pdev->dev);
|
||||
|
@ -1409,13 +1401,12 @@ static int ipu_probe(struct platform_device *pdev)
|
|||
return -EPROBE_DEFER;
|
||||
}
|
||||
|
||||
for (i = 0; i < 64; i++)
|
||||
ipu->channel[i].ipu = ipu;
|
||||
ipu->devtype = devtype;
|
||||
ipu->ipu_type = devtype->type;
|
||||
|
||||
spin_lock_init(&ipu->lock);
|
||||
mutex_init(&ipu->channel_lock);
|
||||
INIT_LIST_HEAD(&ipu->channels);
|
||||
|
||||
dev_dbg(&pdev->dev, "cm_reg: 0x%08lx\n",
|
||||
ipu_base + devtype->cm_ofs);
|
||||
|
|
|
@ -224,6 +224,12 @@ void ipu_cpmem_set_resolution(struct ipuv3_channel *ch, int xres, int yres)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(ipu_cpmem_set_resolution);
|
||||
|
||||
void ipu_cpmem_skip_odd_chroma_rows(struct ipuv3_channel *ch)
|
||||
{
|
||||
ipu_ch_param_write_field(ch, IPU_FIELD_RDRW, 1);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ipu_cpmem_skip_odd_chroma_rows);
|
||||
|
||||
void ipu_cpmem_set_stride(struct ipuv3_channel *ch, int stride)
|
||||
{
|
||||
ipu_ch_param_write_field(ch, IPU_FIELD_SLY, stride - 1);
|
||||
|
|
|
@ -318,8 +318,6 @@ int ipu_prg_channel_configure(struct ipuv3_channel *ipu_chan,
|
|||
writel(val, prg->regs + IPU_PRG_BADDR(prg_chan));
|
||||
|
||||
val = readl(prg->regs + IPU_PRG_CTL);
|
||||
/* counter load enable */
|
||||
val |= IPU_PRG_CTL_CNT_LOAD_EN(prg_chan);
|
||||
/* config AXI ID */
|
||||
val &= ~(IPU_PRG_CTL_SOFT_ARID_MASK <<
|
||||
IPU_PRG_CTL_SOFT_ARID_SHIFT(prg_chan));
|
||||
|
|
|
@ -157,11 +157,8 @@ enum ipu_modules {
|
|||
|
||||
struct ipuv3_channel {
|
||||
unsigned int num;
|
||||
|
||||
bool enabled;
|
||||
bool busy;
|
||||
|
||||
struct ipu_soc *ipu;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
struct ipu_cpmem;
|
||||
|
@ -184,6 +181,7 @@ struct ipu_soc {
|
|||
enum ipuv3_type ipu_type;
|
||||
spinlock_t lock;
|
||||
struct mutex channel_lock;
|
||||
struct list_head channels;
|
||||
|
||||
void __iomem *cm_reg;
|
||||
void __iomem *idmac_reg;
|
||||
|
@ -193,8 +191,6 @@ struct ipu_soc {
|
|||
|
||||
struct clk *clk;
|
||||
|
||||
struct ipuv3_channel channel[64];
|
||||
|
||||
int irq_sync;
|
||||
int irq_err;
|
||||
struct irq_domain *domain;
|
||||
|
@ -229,7 +225,6 @@ int ipu_module_enable(struct ipu_soc *ipu, u32 mask);
|
|||
int ipu_module_disable(struct ipu_soc *ipu, u32 mask);
|
||||
|
||||
bool ipu_idmac_channel_busy(struct ipu_soc *ipu, unsigned int chno);
|
||||
int ipu_wait_interrupt(struct ipu_soc *ipu, int irq, int ms);
|
||||
|
||||
int ipu_csi_init(struct ipu_soc *ipu, struct device *dev, int id,
|
||||
unsigned long base, u32 module, struct clk *clk_ipu);
|
||||
|
|
|
@ -88,9 +88,9 @@ void ipu_vdi_set_field_order(struct ipu_vdi *vdi, v4l2_std_id std, u32 field)
|
|||
|
||||
reg = ipu_vdi_read(vdi, VDI_C);
|
||||
if (top_field_0)
|
||||
reg &= ~VDI_C_TOP_FIELD_MAN_1;
|
||||
reg &= ~(VDI_C_TOP_FIELD_MAN_1 | VDI_C_TOP_FIELD_AUTO_1);
|
||||
else
|
||||
reg |= VDI_C_TOP_FIELD_MAN_1;
|
||||
reg |= VDI_C_TOP_FIELD_MAN_1 | VDI_C_TOP_FIELD_AUTO_1;
|
||||
ipu_vdi_write(vdi, reg, VDI_C);
|
||||
|
||||
spin_unlock_irqrestore(&vdi->lock, flags);
|
||||
|
|
|
@ -250,6 +250,7 @@ struct ipu_image {
|
|||
|
||||
void ipu_cpmem_zero(struct ipuv3_channel *ch);
|
||||
void ipu_cpmem_set_resolution(struct ipuv3_channel *ch, int xres, int yres);
|
||||
void ipu_cpmem_skip_odd_chroma_rows(struct ipuv3_channel *ch);
|
||||
void ipu_cpmem_set_stride(struct ipuv3_channel *ch, int stride);
|
||||
void ipu_cpmem_set_high_priority(struct ipuv3_channel *ch);
|
||||
void ipu_cpmem_set_buffer(struct ipuv3_channel *ch, int bufnum, dma_addr_t buf);
|
||||
|
|
Loading…
Reference in New Issue