2216 lines
60 KiB
C
2216 lines
60 KiB
C
/*
|
|
* Driver for the Conexant CX23885 PCIe bridge
|
|
*
|
|
* Copyright (c) 2006 Steven Toth <stoth@linuxtv.org>
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License as published by
|
|
* the Free Software Foundation; either version 2 of the License, or
|
|
* (at your option) any later version.
|
|
*
|
|
* This program is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
*
|
|
* GNU General Public License for more details.
|
|
*/
|
|
|
|
#include "cx23885.h"
|
|
|
|
#include <linux/init.h>
|
|
#include <linux/list.h>
|
|
#include <linux/module.h>
|
|
#include <linux/moduleparam.h>
|
|
#include <linux/kmod.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/interrupt.h>
|
|
#include <linux/delay.h>
|
|
#include <asm/div64.h>
|
|
#include <linux/firmware.h>
|
|
|
|
#include "cimax2.h"
|
|
#include "altera-ci.h"
|
|
#include "cx23888-ir.h"
|
|
#include "cx23885-ir.h"
|
|
#include "cx23885-av.h"
|
|
#include "cx23885-input.h"
|
|
|
|
MODULE_DESCRIPTION("Driver for cx23885 based TV cards");
|
|
MODULE_AUTHOR("Steven Toth <stoth@linuxtv.org>");
|
|
MODULE_LICENSE("GPL");
|
|
MODULE_VERSION(CX23885_VERSION);
|
|
|
|
static unsigned int debug;
|
|
module_param(debug, int, 0644);
|
|
MODULE_PARM_DESC(debug, "enable debug messages");
|
|
|
|
static unsigned int card[] = {[0 ... (CX23885_MAXBOARDS - 1)] = UNSET };
|
|
module_param_array(card, int, NULL, 0444);
|
|
MODULE_PARM_DESC(card, "card type");
|
|
|
|
#define dprintk(level, fmt, arg...)\
|
|
do { if (debug >= level)\
|
|
printk(KERN_DEBUG pr_fmt("%s: " fmt), \
|
|
__func__, ##arg); \
|
|
} while (0)
|
|
|
|
static unsigned int cx23885_devcount;
|
|
|
|
#define NO_SYNC_LINE (-1U)
|
|
|
|
/* FIXME, these allocations will change when
|
|
* analog arrives. The be reviewed.
|
|
* CX23887 Assumptions
|
|
* 1 line = 16 bytes of CDT
|
|
* cmds size = 80
|
|
* cdt size = 16 * linesize
|
|
* iqsize = 64
|
|
* maxlines = 6
|
|
*
|
|
* Address Space:
|
|
* 0x00000000 0x00008fff FIFO clusters
|
|
* 0x00010000 0x000104af Channel Management Data Structures
|
|
* 0x000104b0 0x000104ff Free
|
|
* 0x00010500 0x000108bf 15 channels * iqsize
|
|
* 0x000108c0 0x000108ff Free
|
|
* 0x00010900 0x00010e9f IQ's + Cluster Descriptor Tables
|
|
* 15 channels * (iqsize + (maxlines * linesize))
|
|
* 0x00010ea0 0x00010xxx Free
|
|
*/
|
|
|
|
static struct sram_channel cx23885_sram_channels[] = {
|
|
[SRAM_CH01] = {
|
|
.name = "VID A",
|
|
.cmds_start = 0x10000,
|
|
.ctrl_start = 0x10380,
|
|
.cdt = 0x104c0,
|
|
.fifo_start = 0x40,
|
|
.fifo_size = 0x2800,
|
|
.ptr1_reg = DMA1_PTR1,
|
|
.ptr2_reg = DMA1_PTR2,
|
|
.cnt1_reg = DMA1_CNT1,
|
|
.cnt2_reg = DMA1_CNT2,
|
|
},
|
|
[SRAM_CH02] = {
|
|
.name = "ch2",
|
|
.cmds_start = 0x0,
|
|
.ctrl_start = 0x0,
|
|
.cdt = 0x0,
|
|
.fifo_start = 0x0,
|
|
.fifo_size = 0x0,
|
|
.ptr1_reg = DMA2_PTR1,
|
|
.ptr2_reg = DMA2_PTR2,
|
|
.cnt1_reg = DMA2_CNT1,
|
|
.cnt2_reg = DMA2_CNT2,
|
|
},
|
|
[SRAM_CH03] = {
|
|
.name = "TS1 B",
|
|
.cmds_start = 0x100A0,
|
|
.ctrl_start = 0x10400,
|
|
.cdt = 0x10580,
|
|
.fifo_start = 0x5000,
|
|
.fifo_size = 0x1000,
|
|
.ptr1_reg = DMA3_PTR1,
|
|
.ptr2_reg = DMA3_PTR2,
|
|
.cnt1_reg = DMA3_CNT1,
|
|
.cnt2_reg = DMA3_CNT2,
|
|
},
|
|
[SRAM_CH04] = {
|
|
.name = "ch4",
|
|
.cmds_start = 0x0,
|
|
.ctrl_start = 0x0,
|
|
.cdt = 0x0,
|
|
.fifo_start = 0x0,
|
|
.fifo_size = 0x0,
|
|
.ptr1_reg = DMA4_PTR1,
|
|
.ptr2_reg = DMA4_PTR2,
|
|
.cnt1_reg = DMA4_CNT1,
|
|
.cnt2_reg = DMA4_CNT2,
|
|
},
|
|
[SRAM_CH05] = {
|
|
.name = "ch5",
|
|
.cmds_start = 0x0,
|
|
.ctrl_start = 0x0,
|
|
.cdt = 0x0,
|
|
.fifo_start = 0x0,
|
|
.fifo_size = 0x0,
|
|
.ptr1_reg = DMA5_PTR1,
|
|
.ptr2_reg = DMA5_PTR2,
|
|
.cnt1_reg = DMA5_CNT1,
|
|
.cnt2_reg = DMA5_CNT2,
|
|
},
|
|
[SRAM_CH06] = {
|
|
.name = "TS2 C",
|
|
.cmds_start = 0x10140,
|
|
.ctrl_start = 0x10440,
|
|
.cdt = 0x105e0,
|
|
.fifo_start = 0x6000,
|
|
.fifo_size = 0x1000,
|
|
.ptr1_reg = DMA5_PTR1,
|
|
.ptr2_reg = DMA5_PTR2,
|
|
.cnt1_reg = DMA5_CNT1,
|
|
.cnt2_reg = DMA5_CNT2,
|
|
},
|
|
[SRAM_CH07] = {
|
|
.name = "TV Audio",
|
|
.cmds_start = 0x10190,
|
|
.ctrl_start = 0x10480,
|
|
.cdt = 0x10a00,
|
|
.fifo_start = 0x7000,
|
|
.fifo_size = 0x1000,
|
|
.ptr1_reg = DMA6_PTR1,
|
|
.ptr2_reg = DMA6_PTR2,
|
|
.cnt1_reg = DMA6_CNT1,
|
|
.cnt2_reg = DMA6_CNT2,
|
|
},
|
|
[SRAM_CH08] = {
|
|
.name = "ch8",
|
|
.cmds_start = 0x0,
|
|
.ctrl_start = 0x0,
|
|
.cdt = 0x0,
|
|
.fifo_start = 0x0,
|
|
.fifo_size = 0x0,
|
|
.ptr1_reg = DMA7_PTR1,
|
|
.ptr2_reg = DMA7_PTR2,
|
|
.cnt1_reg = DMA7_CNT1,
|
|
.cnt2_reg = DMA7_CNT2,
|
|
},
|
|
[SRAM_CH09] = {
|
|
.name = "ch9",
|
|
.cmds_start = 0x0,
|
|
.ctrl_start = 0x0,
|
|
.cdt = 0x0,
|
|
.fifo_start = 0x0,
|
|
.fifo_size = 0x0,
|
|
.ptr1_reg = DMA8_PTR1,
|
|
.ptr2_reg = DMA8_PTR2,
|
|
.cnt1_reg = DMA8_CNT1,
|
|
.cnt2_reg = DMA8_CNT2,
|
|
},
|
|
};
|
|
|
|
static struct sram_channel cx23887_sram_channels[] = {
|
|
[SRAM_CH01] = {
|
|
.name = "VID A",
|
|
.cmds_start = 0x10000,
|
|
.ctrl_start = 0x105b0,
|
|
.cdt = 0x107b0,
|
|
.fifo_start = 0x40,
|
|
.fifo_size = 0x2800,
|
|
.ptr1_reg = DMA1_PTR1,
|
|
.ptr2_reg = DMA1_PTR2,
|
|
.cnt1_reg = DMA1_CNT1,
|
|
.cnt2_reg = DMA1_CNT2,
|
|
},
|
|
[SRAM_CH02] = {
|
|
.name = "VID A (VBI)",
|
|
.cmds_start = 0x10050,
|
|
.ctrl_start = 0x105F0,
|
|
.cdt = 0x10810,
|
|
.fifo_start = 0x3000,
|
|
.fifo_size = 0x1000,
|
|
.ptr1_reg = DMA2_PTR1,
|
|
.ptr2_reg = DMA2_PTR2,
|
|
.cnt1_reg = DMA2_CNT1,
|
|
.cnt2_reg = DMA2_CNT2,
|
|
},
|
|
[SRAM_CH03] = {
|
|
.name = "TS1 B",
|
|
.cmds_start = 0x100A0,
|
|
.ctrl_start = 0x10630,
|
|
.cdt = 0x10870,
|
|
.fifo_start = 0x5000,
|
|
.fifo_size = 0x1000,
|
|
.ptr1_reg = DMA3_PTR1,
|
|
.ptr2_reg = DMA3_PTR2,
|
|
.cnt1_reg = DMA3_CNT1,
|
|
.cnt2_reg = DMA3_CNT2,
|
|
},
|
|
[SRAM_CH04] = {
|
|
.name = "ch4",
|
|
.cmds_start = 0x0,
|
|
.ctrl_start = 0x0,
|
|
.cdt = 0x0,
|
|
.fifo_start = 0x0,
|
|
.fifo_size = 0x0,
|
|
.ptr1_reg = DMA4_PTR1,
|
|
.ptr2_reg = DMA4_PTR2,
|
|
.cnt1_reg = DMA4_CNT1,
|
|
.cnt2_reg = DMA4_CNT2,
|
|
},
|
|
[SRAM_CH05] = {
|
|
.name = "ch5",
|
|
.cmds_start = 0x0,
|
|
.ctrl_start = 0x0,
|
|
.cdt = 0x0,
|
|
.fifo_start = 0x0,
|
|
.fifo_size = 0x0,
|
|
.ptr1_reg = DMA5_PTR1,
|
|
.ptr2_reg = DMA5_PTR2,
|
|
.cnt1_reg = DMA5_CNT1,
|
|
.cnt2_reg = DMA5_CNT2,
|
|
},
|
|
[SRAM_CH06] = {
|
|
.name = "TS2 C",
|
|
.cmds_start = 0x10140,
|
|
.ctrl_start = 0x10670,
|
|
.cdt = 0x108d0,
|
|
.fifo_start = 0x6000,
|
|
.fifo_size = 0x1000,
|
|
.ptr1_reg = DMA5_PTR1,
|
|
.ptr2_reg = DMA5_PTR2,
|
|
.cnt1_reg = DMA5_CNT1,
|
|
.cnt2_reg = DMA5_CNT2,
|
|
},
|
|
[SRAM_CH07] = {
|
|
.name = "TV Audio",
|
|
.cmds_start = 0x10190,
|
|
.ctrl_start = 0x106B0,
|
|
.cdt = 0x10930,
|
|
.fifo_start = 0x7000,
|
|
.fifo_size = 0x1000,
|
|
.ptr1_reg = DMA6_PTR1,
|
|
.ptr2_reg = DMA6_PTR2,
|
|
.cnt1_reg = DMA6_CNT1,
|
|
.cnt2_reg = DMA6_CNT2,
|
|
},
|
|
[SRAM_CH08] = {
|
|
.name = "ch8",
|
|
.cmds_start = 0x0,
|
|
.ctrl_start = 0x0,
|
|
.cdt = 0x0,
|
|
.fifo_start = 0x0,
|
|
.fifo_size = 0x0,
|
|
.ptr1_reg = DMA7_PTR1,
|
|
.ptr2_reg = DMA7_PTR2,
|
|
.cnt1_reg = DMA7_CNT1,
|
|
.cnt2_reg = DMA7_CNT2,
|
|
},
|
|
[SRAM_CH09] = {
|
|
.name = "ch9",
|
|
.cmds_start = 0x0,
|
|
.ctrl_start = 0x0,
|
|
.cdt = 0x0,
|
|
.fifo_start = 0x0,
|
|
.fifo_size = 0x0,
|
|
.ptr1_reg = DMA8_PTR1,
|
|
.ptr2_reg = DMA8_PTR2,
|
|
.cnt1_reg = DMA8_CNT1,
|
|
.cnt2_reg = DMA8_CNT2,
|
|
},
|
|
};
|
|
|
|
static void cx23885_irq_add(struct cx23885_dev *dev, u32 mask)
|
|
{
|
|
unsigned long flags;
|
|
spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
|
|
|
|
dev->pci_irqmask |= mask;
|
|
|
|
spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
|
|
}
|
|
|
|
void cx23885_irq_add_enable(struct cx23885_dev *dev, u32 mask)
|
|
{
|
|
unsigned long flags;
|
|
spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
|
|
|
|
dev->pci_irqmask |= mask;
|
|
cx_set(PCI_INT_MSK, mask);
|
|
|
|
spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
|
|
}
|
|
|
|
void cx23885_irq_enable(struct cx23885_dev *dev, u32 mask)
|
|
{
|
|
u32 v;
|
|
unsigned long flags;
|
|
spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
|
|
|
|
v = mask & dev->pci_irqmask;
|
|
if (v)
|
|
cx_set(PCI_INT_MSK, v);
|
|
|
|
spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
|
|
}
|
|
|
|
static inline void cx23885_irq_enable_all(struct cx23885_dev *dev)
|
|
{
|
|
cx23885_irq_enable(dev, 0xffffffff);
|
|
}
|
|
|
|
void cx23885_irq_disable(struct cx23885_dev *dev, u32 mask)
|
|
{
|
|
unsigned long flags;
|
|
spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
|
|
|
|
cx_clear(PCI_INT_MSK, mask);
|
|
|
|
spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
|
|
}
|
|
|
|
static inline void cx23885_irq_disable_all(struct cx23885_dev *dev)
|
|
{
|
|
cx23885_irq_disable(dev, 0xffffffff);
|
|
}
|
|
|
|
void cx23885_irq_remove(struct cx23885_dev *dev, u32 mask)
|
|
{
|
|
unsigned long flags;
|
|
spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
|
|
|
|
dev->pci_irqmask &= ~mask;
|
|
cx_clear(PCI_INT_MSK, mask);
|
|
|
|
spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
|
|
}
|
|
|
|
static u32 cx23885_irq_get_mask(struct cx23885_dev *dev)
|
|
{
|
|
u32 v;
|
|
unsigned long flags;
|
|
spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
|
|
|
|
v = cx_read(PCI_INT_MSK);
|
|
|
|
spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
|
|
return v;
|
|
}
|
|
|
|
static int cx23885_risc_decode(u32 risc)
|
|
{
|
|
static char *instr[16] = {
|
|
[RISC_SYNC >> 28] = "sync",
|
|
[RISC_WRITE >> 28] = "write",
|
|
[RISC_WRITEC >> 28] = "writec",
|
|
[RISC_READ >> 28] = "read",
|
|
[RISC_READC >> 28] = "readc",
|
|
[RISC_JUMP >> 28] = "jump",
|
|
[RISC_SKIP >> 28] = "skip",
|
|
[RISC_WRITERM >> 28] = "writerm",
|
|
[RISC_WRITECM >> 28] = "writecm",
|
|
[RISC_WRITECR >> 28] = "writecr",
|
|
};
|
|
static int incr[16] = {
|
|
[RISC_WRITE >> 28] = 3,
|
|
[RISC_JUMP >> 28] = 3,
|
|
[RISC_SKIP >> 28] = 1,
|
|
[RISC_SYNC >> 28] = 1,
|
|
[RISC_WRITERM >> 28] = 3,
|
|
[RISC_WRITECM >> 28] = 3,
|
|
[RISC_WRITECR >> 28] = 4,
|
|
};
|
|
static char *bits[] = {
|
|
"12", "13", "14", "resync",
|
|
"cnt0", "cnt1", "18", "19",
|
|
"20", "21", "22", "23",
|
|
"irq1", "irq2", "eol", "sol",
|
|
};
|
|
int i;
|
|
|
|
printk(KERN_DEBUG "0x%08x [ %s", risc,
|
|
instr[risc >> 28] ? instr[risc >> 28] : "INVALID");
|
|
for (i = ARRAY_SIZE(bits) - 1; i >= 0; i--)
|
|
if (risc & (1 << (i + 12)))
|
|
pr_cont(" %s", bits[i]);
|
|
pr_cont(" count=%d ]\n", risc & 0xfff);
|
|
return incr[risc >> 28] ? incr[risc >> 28] : 1;
|
|
}
|
|
|
|
static void cx23885_wakeup(struct cx23885_tsport *port,
|
|
struct cx23885_dmaqueue *q, u32 count)
|
|
{
|
|
struct cx23885_buffer *buf;
|
|
int count_delta;
|
|
int max_buf_done = 5; /* service maximum five buffers */
|
|
|
|
do {
|
|
if (list_empty(&q->active))
|
|
return;
|
|
buf = list_entry(q->active.next,
|
|
struct cx23885_buffer, queue);
|
|
|
|
buf->vb.vb2_buf.timestamp = ktime_get_ns();
|
|
buf->vb.sequence = q->count++;
|
|
if (count != (q->count % 65536)) {
|
|
dprintk(1, "[%p/%d] wakeup reg=%d buf=%d\n", buf,
|
|
buf->vb.vb2_buf.index, count, q->count);
|
|
} else {
|
|
dprintk(7, "[%p/%d] wakeup reg=%d buf=%d\n", buf,
|
|
buf->vb.vb2_buf.index, count, q->count);
|
|
}
|
|
list_del(&buf->queue);
|
|
vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
|
|
max_buf_done--;
|
|
/* count register is 16 bits so apply modulo appropriately */
|
|
count_delta = ((int)count - (int)(q->count % 65536));
|
|
} while ((count_delta > 0) && (max_buf_done > 0));
|
|
}
|
|
|
|
int cx23885_sram_channel_setup(struct cx23885_dev *dev,
|
|
struct sram_channel *ch,
|
|
unsigned int bpl, u32 risc)
|
|
{
|
|
unsigned int i, lines;
|
|
u32 cdt;
|
|
|
|
if (ch->cmds_start == 0) {
|
|
dprintk(1, "%s() Erasing channel [%s]\n", __func__,
|
|
ch->name);
|
|
cx_write(ch->ptr1_reg, 0);
|
|
cx_write(ch->ptr2_reg, 0);
|
|
cx_write(ch->cnt2_reg, 0);
|
|
cx_write(ch->cnt1_reg, 0);
|
|
return 0;
|
|
} else {
|
|
dprintk(1, "%s() Configuring channel [%s]\n", __func__,
|
|
ch->name);
|
|
}
|
|
|
|
bpl = (bpl + 7) & ~7; /* alignment */
|
|
cdt = ch->cdt;
|
|
lines = ch->fifo_size / bpl;
|
|
if (lines > 6)
|
|
lines = 6;
|
|
BUG_ON(lines < 2);
|
|
|
|
cx_write(8 + 0, RISC_JUMP | RISC_CNT_RESET);
|
|
cx_write(8 + 4, 12);
|
|
cx_write(8 + 8, 0);
|
|
|
|
/* write CDT */
|
|
for (i = 0; i < lines; i++) {
|
|
dprintk(2, "%s() 0x%08x <- 0x%08x\n", __func__, cdt + 16*i,
|
|
ch->fifo_start + bpl*i);
|
|
cx_write(cdt + 16*i, ch->fifo_start + bpl*i);
|
|
cx_write(cdt + 16*i + 4, 0);
|
|
cx_write(cdt + 16*i + 8, 0);
|
|
cx_write(cdt + 16*i + 12, 0);
|
|
}
|
|
|
|
/* write CMDS */
|
|
if (ch->jumponly)
|
|
cx_write(ch->cmds_start + 0, 8);
|
|
else
|
|
cx_write(ch->cmds_start + 0, risc);
|
|
cx_write(ch->cmds_start + 4, 0); /* 64 bits 63-32 */
|
|
cx_write(ch->cmds_start + 8, cdt);
|
|
cx_write(ch->cmds_start + 12, (lines*16) >> 3);
|
|
cx_write(ch->cmds_start + 16, ch->ctrl_start);
|
|
if (ch->jumponly)
|
|
cx_write(ch->cmds_start + 20, 0x80000000 | (64 >> 2));
|
|
else
|
|
cx_write(ch->cmds_start + 20, 64 >> 2);
|
|
for (i = 24; i < 80; i += 4)
|
|
cx_write(ch->cmds_start + i, 0);
|
|
|
|
/* fill registers */
|
|
cx_write(ch->ptr1_reg, ch->fifo_start);
|
|
cx_write(ch->ptr2_reg, cdt);
|
|
cx_write(ch->cnt2_reg, (lines*16) >> 3);
|
|
cx_write(ch->cnt1_reg, (bpl >> 3) - 1);
|
|
|
|
dprintk(2, "[bridge %d] sram setup %s: bpl=%d lines=%d\n",
|
|
dev->bridge,
|
|
ch->name,
|
|
bpl,
|
|
lines);
|
|
|
|
return 0;
|
|
}
|
|
|
|
void cx23885_sram_channel_dump(struct cx23885_dev *dev,
|
|
struct sram_channel *ch)
|
|
{
|
|
static char *name[] = {
|
|
"init risc lo",
|
|
"init risc hi",
|
|
"cdt base",
|
|
"cdt size",
|
|
"iq base",
|
|
"iq size",
|
|
"risc pc lo",
|
|
"risc pc hi",
|
|
"iq wr ptr",
|
|
"iq rd ptr",
|
|
"cdt current",
|
|
"pci target lo",
|
|
"pci target hi",
|
|
"line / byte",
|
|
};
|
|
u32 risc;
|
|
unsigned int i, j, n;
|
|
|
|
pr_warn("%s: %s - dma channel status dump\n",
|
|
dev->name, ch->name);
|
|
for (i = 0; i < ARRAY_SIZE(name); i++)
|
|
pr_warn("%s: cmds: %-15s: 0x%08x\n",
|
|
dev->name, name[i],
|
|
cx_read(ch->cmds_start + 4*i));
|
|
|
|
for (i = 0; i < 4; i++) {
|
|
risc = cx_read(ch->cmds_start + 4 * (i + 14));
|
|
pr_warn("%s: risc%d: ", dev->name, i);
|
|
cx23885_risc_decode(risc);
|
|
}
|
|
for (i = 0; i < (64 >> 2); i += n) {
|
|
risc = cx_read(ch->ctrl_start + 4 * i);
|
|
/* No consideration for bits 63-32 */
|
|
|
|
pr_warn("%s: (0x%08x) iq %x: ", dev->name,
|
|
ch->ctrl_start + 4 * i, i);
|
|
n = cx23885_risc_decode(risc);
|
|
for (j = 1; j < n; j++) {
|
|
risc = cx_read(ch->ctrl_start + 4 * (i + j));
|
|
pr_warn("%s: iq %x: 0x%08x [ arg #%d ]\n",
|
|
dev->name, i+j, risc, j);
|
|
}
|
|
}
|
|
|
|
pr_warn("%s: fifo: 0x%08x -> 0x%x\n",
|
|
dev->name, ch->fifo_start, ch->fifo_start+ch->fifo_size);
|
|
pr_warn("%s: ctrl: 0x%08x -> 0x%x\n",
|
|
dev->name, ch->ctrl_start, ch->ctrl_start + 6*16);
|
|
pr_warn("%s: ptr1_reg: 0x%08x\n",
|
|
dev->name, cx_read(ch->ptr1_reg));
|
|
pr_warn("%s: ptr2_reg: 0x%08x\n",
|
|
dev->name, cx_read(ch->ptr2_reg));
|
|
pr_warn("%s: cnt1_reg: 0x%08x\n",
|
|
dev->name, cx_read(ch->cnt1_reg));
|
|
pr_warn("%s: cnt2_reg: 0x%08x\n",
|
|
dev->name, cx_read(ch->cnt2_reg));
|
|
}
|
|
|
|
static void cx23885_risc_disasm(struct cx23885_tsport *port,
|
|
struct cx23885_riscmem *risc)
|
|
{
|
|
struct cx23885_dev *dev = port->dev;
|
|
unsigned int i, j, n;
|
|
|
|
pr_info("%s: risc disasm: %p [dma=0x%08lx]\n",
|
|
dev->name, risc->cpu, (unsigned long)risc->dma);
|
|
for (i = 0; i < (risc->size >> 2); i += n) {
|
|
pr_info("%s: %04d: ", dev->name, i);
|
|
n = cx23885_risc_decode(le32_to_cpu(risc->cpu[i]));
|
|
for (j = 1; j < n; j++)
|
|
pr_info("%s: %04d: 0x%08x [ arg #%d ]\n",
|
|
dev->name, i + j, risc->cpu[i + j], j);
|
|
if (risc->cpu[i] == cpu_to_le32(RISC_JUMP))
|
|
break;
|
|
}
|
|
}
|
|
|
|
static void cx23885_clear_bridge_error(struct cx23885_dev *dev)
|
|
{
|
|
uint32_t reg1_val = cx_read(TC_REQ); /* read-only */
|
|
uint32_t reg2_val = cx_read(TC_REQ_SET);
|
|
|
|
if (reg1_val && reg2_val) {
|
|
cx_write(TC_REQ, reg1_val);
|
|
cx_write(TC_REQ_SET, reg2_val);
|
|
cx_read(VID_B_DMA);
|
|
cx_read(VBI_B_DMA);
|
|
cx_read(VID_C_DMA);
|
|
cx_read(VBI_C_DMA);
|
|
|
|
dev_info(&dev->pci->dev,
|
|
"dma in progress detected 0x%08x 0x%08x, clearing\n",
|
|
reg1_val, reg2_val);
|
|
}
|
|
}
|
|
|
|
static void cx23885_shutdown(struct cx23885_dev *dev)
|
|
{
|
|
/* disable RISC controller */
|
|
cx_write(DEV_CNTRL2, 0);
|
|
|
|
/* Disable all IR activity */
|
|
cx_write(IR_CNTRL_REG, 0);
|
|
|
|
/* Disable Video A/B activity */
|
|
cx_write(VID_A_DMA_CTL, 0);
|
|
cx_write(VID_B_DMA_CTL, 0);
|
|
cx_write(VID_C_DMA_CTL, 0);
|
|
|
|
/* Disable Audio activity */
|
|
cx_write(AUD_INT_DMA_CTL, 0);
|
|
cx_write(AUD_EXT_DMA_CTL, 0);
|
|
|
|
/* Disable Serial port */
|
|
cx_write(UART_CTL, 0);
|
|
|
|
/* Disable Interrupts */
|
|
cx23885_irq_disable_all(dev);
|
|
cx_write(VID_A_INT_MSK, 0);
|
|
cx_write(VID_B_INT_MSK, 0);
|
|
cx_write(VID_C_INT_MSK, 0);
|
|
cx_write(AUDIO_INT_INT_MSK, 0);
|
|
cx_write(AUDIO_EXT_INT_MSK, 0);
|
|
|
|
}
|
|
|
|
static void cx23885_reset(struct cx23885_dev *dev)
|
|
{
|
|
dprintk(1, "%s()\n", __func__);
|
|
|
|
cx23885_shutdown(dev);
|
|
|
|
cx_write(PCI_INT_STAT, 0xffffffff);
|
|
cx_write(VID_A_INT_STAT, 0xffffffff);
|
|
cx_write(VID_B_INT_STAT, 0xffffffff);
|
|
cx_write(VID_C_INT_STAT, 0xffffffff);
|
|
cx_write(AUDIO_INT_INT_STAT, 0xffffffff);
|
|
cx_write(AUDIO_EXT_INT_STAT, 0xffffffff);
|
|
cx_write(CLK_DELAY, cx_read(CLK_DELAY) & 0x80000000);
|
|
cx_write(PAD_CTRL, 0x00500300);
|
|
|
|
/* clear dma in progress */
|
|
cx23885_clear_bridge_error(dev);
|
|
msleep(100);
|
|
|
|
cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH01],
|
|
720*4, 0);
|
|
cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH02], 128, 0);
|
|
cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH03],
|
|
188*4, 0);
|
|
cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH04], 128, 0);
|
|
cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH05], 128, 0);
|
|
cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH06],
|
|
188*4, 0);
|
|
cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH07], 128, 0);
|
|
cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH08], 128, 0);
|
|
cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH09], 128, 0);
|
|
|
|
cx23885_gpio_setup(dev);
|
|
|
|
cx23885_irq_get_mask(dev);
|
|
|
|
/* clear dma in progress */
|
|
cx23885_clear_bridge_error(dev);
|
|
}
|
|
|
|
|
|
static int cx23885_pci_quirks(struct cx23885_dev *dev)
|
|
{
|
|
dprintk(1, "%s()\n", __func__);
|
|
|
|
/* The cx23885 bridge has a weird bug which causes NMI to be asserted
|
|
* when DMA begins if RDR_TLCTL0 bit4 is not cleared. It does not
|
|
* occur on the cx23887 bridge.
|
|
*/
|
|
if (dev->bridge == CX23885_BRIDGE_885)
|
|
cx_clear(RDR_TLCTL0, 1 << 4);
|
|
|
|
/* clear dma in progress */
|
|
cx23885_clear_bridge_error(dev);
|
|
return 0;
|
|
}
|
|
|
|
static int get_resources(struct cx23885_dev *dev)
|
|
{
|
|
if (request_mem_region(pci_resource_start(dev->pci, 0),
|
|
pci_resource_len(dev->pci, 0),
|
|
dev->name))
|
|
return 0;
|
|
|
|
pr_err("%s: can't get MMIO memory @ 0x%llx\n",
|
|
dev->name, (unsigned long long)pci_resource_start(dev->pci, 0));
|
|
|
|
return -EBUSY;
|
|
}
|
|
|
|
static int cx23885_init_tsport(struct cx23885_dev *dev,
|
|
struct cx23885_tsport *port, int portno)
|
|
{
|
|
dprintk(1, "%s(portno=%d)\n", __func__, portno);
|
|
|
|
/* Transport bus init dma queue - Common settings */
|
|
port->dma_ctl_val = 0x11; /* Enable RISC controller and Fifo */
|
|
port->ts_int_msk_val = 0x1111; /* TS port bits for RISC */
|
|
port->vld_misc_val = 0x0;
|
|
port->hw_sop_ctrl_val = (0x47 << 16 | 188 << 4);
|
|
|
|
spin_lock_init(&port->slock);
|
|
port->dev = dev;
|
|
port->nr = portno;
|
|
|
|
INIT_LIST_HEAD(&port->mpegq.active);
|
|
mutex_init(&port->frontends.lock);
|
|
INIT_LIST_HEAD(&port->frontends.felist);
|
|
port->frontends.active_fe_id = 0;
|
|
|
|
/* This should be hardcoded allow a single frontend
|
|
* attachment to this tsport, keeping the -dvb.c
|
|
* code clean and safe.
|
|
*/
|
|
if (!port->num_frontends)
|
|
port->num_frontends = 1;
|
|
|
|
switch (portno) {
|
|
case 1:
|
|
port->reg_gpcnt = VID_B_GPCNT;
|
|
port->reg_gpcnt_ctl = VID_B_GPCNT_CTL;
|
|
port->reg_dma_ctl = VID_B_DMA_CTL;
|
|
port->reg_lngth = VID_B_LNGTH;
|
|
port->reg_hw_sop_ctrl = VID_B_HW_SOP_CTL;
|
|
port->reg_gen_ctrl = VID_B_GEN_CTL;
|
|
port->reg_bd_pkt_status = VID_B_BD_PKT_STATUS;
|
|
port->reg_sop_status = VID_B_SOP_STATUS;
|
|
port->reg_fifo_ovfl_stat = VID_B_FIFO_OVFL_STAT;
|
|
port->reg_vld_misc = VID_B_VLD_MISC;
|
|
port->reg_ts_clk_en = VID_B_TS_CLK_EN;
|
|
port->reg_src_sel = VID_B_SRC_SEL;
|
|
port->reg_ts_int_msk = VID_B_INT_MSK;
|
|
port->reg_ts_int_stat = VID_B_INT_STAT;
|
|
port->sram_chno = SRAM_CH03; /* VID_B */
|
|
port->pci_irqmask = 0x02; /* VID_B bit1 */
|
|
break;
|
|
case 2:
|
|
port->reg_gpcnt = VID_C_GPCNT;
|
|
port->reg_gpcnt_ctl = VID_C_GPCNT_CTL;
|
|
port->reg_dma_ctl = VID_C_DMA_CTL;
|
|
port->reg_lngth = VID_C_LNGTH;
|
|
port->reg_hw_sop_ctrl = VID_C_HW_SOP_CTL;
|
|
port->reg_gen_ctrl = VID_C_GEN_CTL;
|
|
port->reg_bd_pkt_status = VID_C_BD_PKT_STATUS;
|
|
port->reg_sop_status = VID_C_SOP_STATUS;
|
|
port->reg_fifo_ovfl_stat = VID_C_FIFO_OVFL_STAT;
|
|
port->reg_vld_misc = VID_C_VLD_MISC;
|
|
port->reg_ts_clk_en = VID_C_TS_CLK_EN;
|
|
port->reg_src_sel = 0;
|
|
port->reg_ts_int_msk = VID_C_INT_MSK;
|
|
port->reg_ts_int_stat = VID_C_INT_STAT;
|
|
port->sram_chno = SRAM_CH06; /* VID_C */
|
|
port->pci_irqmask = 0x04; /* VID_C bit2 */
|
|
break;
|
|
default:
|
|
BUG();
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void cx23885_dev_checkrevision(struct cx23885_dev *dev)
|
|
{
|
|
switch (cx_read(RDR_CFG2) & 0xff) {
|
|
case 0x00:
|
|
/* cx23885 */
|
|
dev->hwrevision = 0xa0;
|
|
break;
|
|
case 0x01:
|
|
/* CX23885-12Z */
|
|
dev->hwrevision = 0xa1;
|
|
break;
|
|
case 0x02:
|
|
/* CX23885-13Z/14Z */
|
|
dev->hwrevision = 0xb0;
|
|
break;
|
|
case 0x03:
|
|
if (dev->pci->device == 0x8880) {
|
|
/* CX23888-21Z/22Z */
|
|
dev->hwrevision = 0xc0;
|
|
} else {
|
|
/* CX23885-14Z */
|
|
dev->hwrevision = 0xa4;
|
|
}
|
|
break;
|
|
case 0x04:
|
|
if (dev->pci->device == 0x8880) {
|
|
/* CX23888-31Z */
|
|
dev->hwrevision = 0xd0;
|
|
} else {
|
|
/* CX23885-15Z, CX23888-31Z */
|
|
dev->hwrevision = 0xa5;
|
|
}
|
|
break;
|
|
case 0x0e:
|
|
/* CX23887-15Z */
|
|
dev->hwrevision = 0xc0;
|
|
break;
|
|
case 0x0f:
|
|
/* CX23887-14Z */
|
|
dev->hwrevision = 0xb1;
|
|
break;
|
|
default:
|
|
pr_err("%s() New hardware revision found 0x%x\n",
|
|
__func__, dev->hwrevision);
|
|
}
|
|
if (dev->hwrevision)
|
|
pr_info("%s() Hardware revision = 0x%02x\n",
|
|
__func__, dev->hwrevision);
|
|
else
|
|
pr_err("%s() Hardware revision unknown 0x%x\n",
|
|
__func__, dev->hwrevision);
|
|
}
|
|
|
|
/* Find the first v4l2_subdev member of the group id in hw */
|
|
struct v4l2_subdev *cx23885_find_hw(struct cx23885_dev *dev, u32 hw)
|
|
{
|
|
struct v4l2_subdev *result = NULL;
|
|
struct v4l2_subdev *sd;
|
|
|
|
spin_lock(&dev->v4l2_dev.lock);
|
|
v4l2_device_for_each_subdev(sd, &dev->v4l2_dev) {
|
|
if (sd->grp_id == hw) {
|
|
result = sd;
|
|
break;
|
|
}
|
|
}
|
|
spin_unlock(&dev->v4l2_dev.lock);
|
|
return result;
|
|
}
|
|
|
|
static int cx23885_dev_setup(struct cx23885_dev *dev)
|
|
{
|
|
int i;
|
|
|
|
spin_lock_init(&dev->pci_irqmask_lock);
|
|
spin_lock_init(&dev->slock);
|
|
|
|
mutex_init(&dev->lock);
|
|
mutex_init(&dev->gpio_lock);
|
|
|
|
atomic_inc(&dev->refcount);
|
|
|
|
dev->nr = cx23885_devcount++;
|
|
sprintf(dev->name, "cx23885[%d]", dev->nr);
|
|
|
|
/* Configure the internal memory */
|
|
if (dev->pci->device == 0x8880) {
|
|
/* Could be 887 or 888, assume an 888 default */
|
|
dev->bridge = CX23885_BRIDGE_888;
|
|
/* Apply a sensible clock frequency for the PCIe bridge */
|
|
dev->clk_freq = 50000000;
|
|
dev->sram_channels = cx23887_sram_channels;
|
|
} else
|
|
if (dev->pci->device == 0x8852) {
|
|
dev->bridge = CX23885_BRIDGE_885;
|
|
/* Apply a sensible clock frequency for the PCIe bridge */
|
|
dev->clk_freq = 28000000;
|
|
dev->sram_channels = cx23885_sram_channels;
|
|
} else
|
|
BUG();
|
|
|
|
dprintk(1, "%s() Memory configured for PCIe bridge type %d\n",
|
|
__func__, dev->bridge);
|
|
|
|
/* board config */
|
|
dev->board = UNSET;
|
|
if (card[dev->nr] < cx23885_bcount)
|
|
dev->board = card[dev->nr];
|
|
for (i = 0; UNSET == dev->board && i < cx23885_idcount; i++)
|
|
if (dev->pci->subsystem_vendor == cx23885_subids[i].subvendor &&
|
|
dev->pci->subsystem_device == cx23885_subids[i].subdevice)
|
|
dev->board = cx23885_subids[i].card;
|
|
if (UNSET == dev->board) {
|
|
dev->board = CX23885_BOARD_UNKNOWN;
|
|
cx23885_card_list(dev);
|
|
}
|
|
|
|
if (dev->pci->device == 0x8852) {
|
|
/* no DIF on cx23885, so no analog tuner support possible */
|
|
if (dev->board == CX23885_BOARD_HAUPPAUGE_QUADHD_ATSC)
|
|
dev->board = CX23885_BOARD_HAUPPAUGE_QUADHD_ATSC_885;
|
|
else if (dev->board == CX23885_BOARD_HAUPPAUGE_QUADHD_DVB)
|
|
dev->board = CX23885_BOARD_HAUPPAUGE_QUADHD_DVB_885;
|
|
}
|
|
|
|
/* If the user specific a clk freq override, apply it */
|
|
if (cx23885_boards[dev->board].clk_freq > 0)
|
|
dev->clk_freq = cx23885_boards[dev->board].clk_freq;
|
|
|
|
if (dev->board == CX23885_BOARD_HAUPPAUGE_IMPACTVCBE &&
|
|
dev->pci->subsystem_device == 0x7137) {
|
|
/* Hauppauge ImpactVCBe device ID 0x7137 is populated
|
|
* with an 888, and a 25Mhz crystal, instead of the
|
|
* usual third overtone 50Mhz. The default clock rate must
|
|
* be overridden so the cx25840 is properly configured
|
|
*/
|
|
dev->clk_freq = 25000000;
|
|
}
|
|
|
|
dev->pci_bus = dev->pci->bus->number;
|
|
dev->pci_slot = PCI_SLOT(dev->pci->devfn);
|
|
cx23885_irq_add(dev, 0x001f00);
|
|
|
|
/* External Master 1 Bus */
|
|
dev->i2c_bus[0].nr = 0;
|
|
dev->i2c_bus[0].dev = dev;
|
|
dev->i2c_bus[0].reg_stat = I2C1_STAT;
|
|
dev->i2c_bus[0].reg_ctrl = I2C1_CTRL;
|
|
dev->i2c_bus[0].reg_addr = I2C1_ADDR;
|
|
dev->i2c_bus[0].reg_rdata = I2C1_RDATA;
|
|
dev->i2c_bus[0].reg_wdata = I2C1_WDATA;
|
|
dev->i2c_bus[0].i2c_period = (0x9d << 24); /* 100kHz */
|
|
|
|
/* External Master 2 Bus */
|
|
dev->i2c_bus[1].nr = 1;
|
|
dev->i2c_bus[1].dev = dev;
|
|
dev->i2c_bus[1].reg_stat = I2C2_STAT;
|
|
dev->i2c_bus[1].reg_ctrl = I2C2_CTRL;
|
|
dev->i2c_bus[1].reg_addr = I2C2_ADDR;
|
|
dev->i2c_bus[1].reg_rdata = I2C2_RDATA;
|
|
dev->i2c_bus[1].reg_wdata = I2C2_WDATA;
|
|
dev->i2c_bus[1].i2c_period = (0x9d << 24); /* 100kHz */
|
|
|
|
/* Internal Master 3 Bus */
|
|
dev->i2c_bus[2].nr = 2;
|
|
dev->i2c_bus[2].dev = dev;
|
|
dev->i2c_bus[2].reg_stat = I2C3_STAT;
|
|
dev->i2c_bus[2].reg_ctrl = I2C3_CTRL;
|
|
dev->i2c_bus[2].reg_addr = I2C3_ADDR;
|
|
dev->i2c_bus[2].reg_rdata = I2C3_RDATA;
|
|
dev->i2c_bus[2].reg_wdata = I2C3_WDATA;
|
|
dev->i2c_bus[2].i2c_period = (0x07 << 24); /* 1.95MHz */
|
|
|
|
if ((cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) ||
|
|
(cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER))
|
|
cx23885_init_tsport(dev, &dev->ts1, 1);
|
|
|
|
if ((cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) ||
|
|
(cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER))
|
|
cx23885_init_tsport(dev, &dev->ts2, 2);
|
|
|
|
if (get_resources(dev) < 0) {
|
|
pr_err("CORE %s No more PCIe resources for subsystem: %04x:%04x\n",
|
|
dev->name, dev->pci->subsystem_vendor,
|
|
dev->pci->subsystem_device);
|
|
|
|
cx23885_devcount--;
|
|
return -ENODEV;
|
|
}
|
|
|
|
/* PCIe stuff */
|
|
dev->lmmio = ioremap(pci_resource_start(dev->pci, 0),
|
|
pci_resource_len(dev->pci, 0));
|
|
|
|
dev->bmmio = (u8 __iomem *)dev->lmmio;
|
|
|
|
pr_info("CORE %s: subsystem: %04x:%04x, board: %s [card=%d,%s]\n",
|
|
dev->name, dev->pci->subsystem_vendor,
|
|
dev->pci->subsystem_device, cx23885_boards[dev->board].name,
|
|
dev->board, card[dev->nr] == dev->board ?
|
|
"insmod option" : "autodetected");
|
|
|
|
cx23885_pci_quirks(dev);
|
|
|
|
/* Assume some sensible defaults */
|
|
dev->tuner_type = cx23885_boards[dev->board].tuner_type;
|
|
dev->tuner_addr = cx23885_boards[dev->board].tuner_addr;
|
|
dev->tuner_bus = cx23885_boards[dev->board].tuner_bus;
|
|
dev->radio_type = cx23885_boards[dev->board].radio_type;
|
|
dev->radio_addr = cx23885_boards[dev->board].radio_addr;
|
|
|
|
dprintk(1, "%s() tuner_type = 0x%x tuner_addr = 0x%x tuner_bus = %d\n",
|
|
__func__, dev->tuner_type, dev->tuner_addr, dev->tuner_bus);
|
|
dprintk(1, "%s() radio_type = 0x%x radio_addr = 0x%x\n",
|
|
__func__, dev->radio_type, dev->radio_addr);
|
|
|
|
/* The cx23417 encoder has GPIO's that need to be initialised
|
|
* before DVB, so that demodulators and tuners are out of
|
|
* reset before DVB uses them.
|
|
*/
|
|
if ((cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) ||
|
|
(cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER))
|
|
cx23885_mc417_init(dev);
|
|
|
|
/* init hardware */
|
|
cx23885_reset(dev);
|
|
|
|
cx23885_i2c_register(&dev->i2c_bus[0]);
|
|
cx23885_i2c_register(&dev->i2c_bus[1]);
|
|
cx23885_i2c_register(&dev->i2c_bus[2]);
|
|
cx23885_card_setup(dev);
|
|
call_all(dev, tuner, standby);
|
|
cx23885_ir_init(dev);
|
|
|
|
if (dev->board == CX23885_BOARD_VIEWCAST_460E) {
|
|
/*
|
|
* GPIOs 9/8 are input detection bits for the breakout video
|
|
* (gpio 8) and audio (gpio 9) cables. When they're attached,
|
|
* this gpios are pulled high. Make sure these GPIOs are marked
|
|
* as inputs.
|
|
*/
|
|
cx23885_gpio_enable(dev, 0x300, 0);
|
|
}
|
|
|
|
if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO) {
|
|
if (cx23885_video_register(dev) < 0) {
|
|
pr_err("%s() Failed to register analog video adapters on VID_A\n",
|
|
__func__);
|
|
}
|
|
}
|
|
|
|
if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) {
|
|
if (cx23885_boards[dev->board].num_fds_portb)
|
|
dev->ts1.num_frontends =
|
|
cx23885_boards[dev->board].num_fds_portb;
|
|
if (cx23885_dvb_register(&dev->ts1) < 0) {
|
|
pr_err("%s() Failed to register dvb adapters on VID_B\n",
|
|
__func__);
|
|
}
|
|
} else
|
|
if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
|
|
if (cx23885_417_register(dev) < 0) {
|
|
pr_err("%s() Failed to register 417 on VID_B\n",
|
|
__func__);
|
|
}
|
|
}
|
|
|
|
if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) {
|
|
if (cx23885_boards[dev->board].num_fds_portc)
|
|
dev->ts2.num_frontends =
|
|
cx23885_boards[dev->board].num_fds_portc;
|
|
if (cx23885_dvb_register(&dev->ts2) < 0) {
|
|
pr_err("%s() Failed to register dvb on VID_C\n",
|
|
__func__);
|
|
}
|
|
} else
|
|
if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER) {
|
|
if (cx23885_417_register(dev) < 0) {
|
|
pr_err("%s() Failed to register 417 on VID_C\n",
|
|
__func__);
|
|
}
|
|
}
|
|
|
|
cx23885_dev_checkrevision(dev);
|
|
|
|
/* disable MSI for NetUP cards, otherwise CI is not working */
|
|
if (cx23885_boards[dev->board].ci_type > 0)
|
|
cx_clear(RDR_RDRCTL1, 1 << 8);
|
|
|
|
switch (dev->board) {
|
|
case CX23885_BOARD_TEVII_S470:
|
|
case CX23885_BOARD_TEVII_S471:
|
|
cx_clear(RDR_RDRCTL1, 1 << 8);
|
|
break;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void cx23885_dev_unregister(struct cx23885_dev *dev)
|
|
{
|
|
release_mem_region(pci_resource_start(dev->pci, 0),
|
|
pci_resource_len(dev->pci, 0));
|
|
|
|
if (!atomic_dec_and_test(&dev->refcount))
|
|
return;
|
|
|
|
if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO)
|
|
cx23885_video_unregister(dev);
|
|
|
|
if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB)
|
|
cx23885_dvb_unregister(&dev->ts1);
|
|
|
|
if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
|
|
cx23885_417_unregister(dev);
|
|
|
|
if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB)
|
|
cx23885_dvb_unregister(&dev->ts2);
|
|
|
|
if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER)
|
|
cx23885_417_unregister(dev);
|
|
|
|
cx23885_i2c_unregister(&dev->i2c_bus[2]);
|
|
cx23885_i2c_unregister(&dev->i2c_bus[1]);
|
|
cx23885_i2c_unregister(&dev->i2c_bus[0]);
|
|
|
|
iounmap(dev->lmmio);
|
|
}
|
|
|
|
static __le32 *cx23885_risc_field(__le32 *rp, struct scatterlist *sglist,
|
|
unsigned int offset, u32 sync_line,
|
|
unsigned int bpl, unsigned int padding,
|
|
unsigned int lines, unsigned int lpi, bool jump)
|
|
{
|
|
struct scatterlist *sg;
|
|
unsigned int line, todo, sol;
|
|
|
|
|
|
if (jump) {
|
|
*(rp++) = cpu_to_le32(RISC_JUMP);
|
|
*(rp++) = cpu_to_le32(0);
|
|
*(rp++) = cpu_to_le32(0); /* bits 63-32 */
|
|
}
|
|
|
|
/* sync instruction */
|
|
if (sync_line != NO_SYNC_LINE)
|
|
*(rp++) = cpu_to_le32(RISC_RESYNC | sync_line);
|
|
|
|
/* scan lines */
|
|
sg = sglist;
|
|
for (line = 0; line < lines; line++) {
|
|
while (offset && offset >= sg_dma_len(sg)) {
|
|
offset -= sg_dma_len(sg);
|
|
sg = sg_next(sg);
|
|
}
|
|
|
|
if (lpi && line > 0 && !(line % lpi))
|
|
sol = RISC_SOL | RISC_IRQ1 | RISC_CNT_INC;
|
|
else
|
|
sol = RISC_SOL;
|
|
|
|
if (bpl <= sg_dma_len(sg)-offset) {
|
|
/* fits into current chunk */
|
|
*(rp++) = cpu_to_le32(RISC_WRITE|sol|RISC_EOL|bpl);
|
|
*(rp++) = cpu_to_le32(sg_dma_address(sg)+offset);
|
|
*(rp++) = cpu_to_le32(0); /* bits 63-32 */
|
|
offset += bpl;
|
|
} else {
|
|
/* scanline needs to be split */
|
|
todo = bpl;
|
|
*(rp++) = cpu_to_le32(RISC_WRITE|sol|
|
|
(sg_dma_len(sg)-offset));
|
|
*(rp++) = cpu_to_le32(sg_dma_address(sg)+offset);
|
|
*(rp++) = cpu_to_le32(0); /* bits 63-32 */
|
|
todo -= (sg_dma_len(sg)-offset);
|
|
offset = 0;
|
|
sg = sg_next(sg);
|
|
while (todo > sg_dma_len(sg)) {
|
|
*(rp++) = cpu_to_le32(RISC_WRITE|
|
|
sg_dma_len(sg));
|
|
*(rp++) = cpu_to_le32(sg_dma_address(sg));
|
|
*(rp++) = cpu_to_le32(0); /* bits 63-32 */
|
|
todo -= sg_dma_len(sg);
|
|
sg = sg_next(sg);
|
|
}
|
|
*(rp++) = cpu_to_le32(RISC_WRITE|RISC_EOL|todo);
|
|
*(rp++) = cpu_to_le32(sg_dma_address(sg));
|
|
*(rp++) = cpu_to_le32(0); /* bits 63-32 */
|
|
offset += todo;
|
|
}
|
|
offset += padding;
|
|
}
|
|
|
|
return rp;
|
|
}
|
|
|
|
int cx23885_risc_buffer(struct pci_dev *pci, struct cx23885_riscmem *risc,
|
|
struct scatterlist *sglist, unsigned int top_offset,
|
|
unsigned int bottom_offset, unsigned int bpl,
|
|
unsigned int padding, unsigned int lines)
|
|
{
|
|
u32 instructions, fields;
|
|
__le32 *rp;
|
|
|
|
fields = 0;
|
|
if (UNSET != top_offset)
|
|
fields++;
|
|
if (UNSET != bottom_offset)
|
|
fields++;
|
|
|
|
/* estimate risc mem: worst case is one write per page border +
|
|
one write per scan line + syncs + jump (all 2 dwords). Padding
|
|
can cause next bpl to start close to a page border. First DMA
|
|
region may be smaller than PAGE_SIZE */
|
|
/* write and jump need and extra dword */
|
|
instructions = fields * (1 + ((bpl + padding) * lines)
|
|
/ PAGE_SIZE + lines);
|
|
instructions += 5;
|
|
risc->size = instructions * 12;
|
|
risc->cpu = pci_alloc_consistent(pci, risc->size, &risc->dma);
|
|
if (risc->cpu == NULL)
|
|
return -ENOMEM;
|
|
|
|
/* write risc instructions */
|
|
rp = risc->cpu;
|
|
if (UNSET != top_offset)
|
|
rp = cx23885_risc_field(rp, sglist, top_offset, 0,
|
|
bpl, padding, lines, 0, true);
|
|
if (UNSET != bottom_offset)
|
|
rp = cx23885_risc_field(rp, sglist, bottom_offset, 0x200,
|
|
bpl, padding, lines, 0, UNSET == top_offset);
|
|
|
|
/* save pointer to jmp instruction address */
|
|
risc->jmp = rp;
|
|
BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
|
|
return 0;
|
|
}
|
|
|
|
int cx23885_risc_databuffer(struct pci_dev *pci,
|
|
struct cx23885_riscmem *risc,
|
|
struct scatterlist *sglist,
|
|
unsigned int bpl,
|
|
unsigned int lines, unsigned int lpi)
|
|
{
|
|
u32 instructions;
|
|
__le32 *rp;
|
|
|
|
/* estimate risc mem: worst case is one write per page border +
|
|
one write per scan line + syncs + jump (all 2 dwords). Here
|
|
there is no padding and no sync. First DMA region may be smaller
|
|
than PAGE_SIZE */
|
|
/* Jump and write need an extra dword */
|
|
instructions = 1 + (bpl * lines) / PAGE_SIZE + lines;
|
|
instructions += 4;
|
|
|
|
risc->size = instructions * 12;
|
|
risc->cpu = pci_alloc_consistent(pci, risc->size, &risc->dma);
|
|
if (risc->cpu == NULL)
|
|
return -ENOMEM;
|
|
|
|
/* write risc instructions */
|
|
rp = risc->cpu;
|
|
rp = cx23885_risc_field(rp, sglist, 0, NO_SYNC_LINE,
|
|
bpl, 0, lines, lpi, lpi == 0);
|
|
|
|
/* save pointer to jmp instruction address */
|
|
risc->jmp = rp;
|
|
BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
|
|
return 0;
|
|
}
|
|
|
|
int cx23885_risc_vbibuffer(struct pci_dev *pci, struct cx23885_riscmem *risc,
|
|
struct scatterlist *sglist, unsigned int top_offset,
|
|
unsigned int bottom_offset, unsigned int bpl,
|
|
unsigned int padding, unsigned int lines)
|
|
{
|
|
u32 instructions, fields;
|
|
__le32 *rp;
|
|
|
|
fields = 0;
|
|
if (UNSET != top_offset)
|
|
fields++;
|
|
if (UNSET != bottom_offset)
|
|
fields++;
|
|
|
|
/* estimate risc mem: worst case is one write per page border +
|
|
one write per scan line + syncs + jump (all 2 dwords). Padding
|
|
can cause next bpl to start close to a page border. First DMA
|
|
region may be smaller than PAGE_SIZE */
|
|
/* write and jump need and extra dword */
|
|
instructions = fields * (1 + ((bpl + padding) * lines)
|
|
/ PAGE_SIZE + lines);
|
|
instructions += 5;
|
|
risc->size = instructions * 12;
|
|
risc->cpu = pci_alloc_consistent(pci, risc->size, &risc->dma);
|
|
if (risc->cpu == NULL)
|
|
return -ENOMEM;
|
|
/* write risc instructions */
|
|
rp = risc->cpu;
|
|
|
|
/* Sync to line 6, so US CC line 21 will appear in line '12'
|
|
* in the userland vbi payload */
|
|
if (UNSET != top_offset)
|
|
rp = cx23885_risc_field(rp, sglist, top_offset, 0,
|
|
bpl, padding, lines, 0, true);
|
|
|
|
if (UNSET != bottom_offset)
|
|
rp = cx23885_risc_field(rp, sglist, bottom_offset, 0x200,
|
|
bpl, padding, lines, 0, UNSET == top_offset);
|
|
|
|
|
|
|
|
/* save pointer to jmp instruction address */
|
|
risc->jmp = rp;
|
|
BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
|
|
return 0;
|
|
}
|
|
|
|
|
|
void cx23885_free_buffer(struct cx23885_dev *dev, struct cx23885_buffer *buf)
|
|
{
|
|
struct cx23885_riscmem *risc = &buf->risc;
|
|
|
|
BUG_ON(in_interrupt());
|
|
pci_free_consistent(dev->pci, risc->size, risc->cpu, risc->dma);
|
|
}
|
|
|
|
static void cx23885_tsport_reg_dump(struct cx23885_tsport *port)
|
|
{
|
|
struct cx23885_dev *dev = port->dev;
|
|
|
|
dprintk(1, "%s() Register Dump\n", __func__);
|
|
dprintk(1, "%s() DEV_CNTRL2 0x%08X\n", __func__,
|
|
cx_read(DEV_CNTRL2));
|
|
dprintk(1, "%s() PCI_INT_MSK 0x%08X\n", __func__,
|
|
cx23885_irq_get_mask(dev));
|
|
dprintk(1, "%s() AUD_INT_INT_MSK 0x%08X\n", __func__,
|
|
cx_read(AUDIO_INT_INT_MSK));
|
|
dprintk(1, "%s() AUD_INT_DMA_CTL 0x%08X\n", __func__,
|
|
cx_read(AUD_INT_DMA_CTL));
|
|
dprintk(1, "%s() AUD_EXT_INT_MSK 0x%08X\n", __func__,
|
|
cx_read(AUDIO_EXT_INT_MSK));
|
|
dprintk(1, "%s() AUD_EXT_DMA_CTL 0x%08X\n", __func__,
|
|
cx_read(AUD_EXT_DMA_CTL));
|
|
dprintk(1, "%s() PAD_CTRL 0x%08X\n", __func__,
|
|
cx_read(PAD_CTRL));
|
|
dprintk(1, "%s() ALT_PIN_OUT_SEL 0x%08X\n", __func__,
|
|
cx_read(ALT_PIN_OUT_SEL));
|
|
dprintk(1, "%s() GPIO2 0x%08X\n", __func__,
|
|
cx_read(GPIO2));
|
|
dprintk(1, "%s() gpcnt(0x%08X) 0x%08X\n", __func__,
|
|
port->reg_gpcnt, cx_read(port->reg_gpcnt));
|
|
dprintk(1, "%s() gpcnt_ctl(0x%08X) 0x%08x\n", __func__,
|
|
port->reg_gpcnt_ctl, cx_read(port->reg_gpcnt_ctl));
|
|
dprintk(1, "%s() dma_ctl(0x%08X) 0x%08x\n", __func__,
|
|
port->reg_dma_ctl, cx_read(port->reg_dma_ctl));
|
|
if (port->reg_src_sel)
|
|
dprintk(1, "%s() src_sel(0x%08X) 0x%08x\n", __func__,
|
|
port->reg_src_sel, cx_read(port->reg_src_sel));
|
|
dprintk(1, "%s() lngth(0x%08X) 0x%08x\n", __func__,
|
|
port->reg_lngth, cx_read(port->reg_lngth));
|
|
dprintk(1, "%s() hw_sop_ctrl(0x%08X) 0x%08x\n", __func__,
|
|
port->reg_hw_sop_ctrl, cx_read(port->reg_hw_sop_ctrl));
|
|
dprintk(1, "%s() gen_ctrl(0x%08X) 0x%08x\n", __func__,
|
|
port->reg_gen_ctrl, cx_read(port->reg_gen_ctrl));
|
|
dprintk(1, "%s() bd_pkt_status(0x%08X) 0x%08x\n", __func__,
|
|
port->reg_bd_pkt_status, cx_read(port->reg_bd_pkt_status));
|
|
dprintk(1, "%s() sop_status(0x%08X) 0x%08x\n", __func__,
|
|
port->reg_sop_status, cx_read(port->reg_sop_status));
|
|
dprintk(1, "%s() fifo_ovfl_stat(0x%08X) 0x%08x\n", __func__,
|
|
port->reg_fifo_ovfl_stat, cx_read(port->reg_fifo_ovfl_stat));
|
|
dprintk(1, "%s() vld_misc(0x%08X) 0x%08x\n", __func__,
|
|
port->reg_vld_misc, cx_read(port->reg_vld_misc));
|
|
dprintk(1, "%s() ts_clk_en(0x%08X) 0x%08x\n", __func__,
|
|
port->reg_ts_clk_en, cx_read(port->reg_ts_clk_en));
|
|
dprintk(1, "%s() ts_int_msk(0x%08X) 0x%08x\n", __func__,
|
|
port->reg_ts_int_msk, cx_read(port->reg_ts_int_msk));
|
|
dprintk(1, "%s() ts_int_status(0x%08X) 0x%08x\n", __func__,
|
|
port->reg_ts_int_stat, cx_read(port->reg_ts_int_stat));
|
|
dprintk(1, "%s() PCI_INT_STAT 0x%08X\n", __func__,
|
|
cx_read(PCI_INT_STAT));
|
|
dprintk(1, "%s() VID_B_INT_MSTAT 0x%08X\n", __func__,
|
|
cx_read(VID_B_INT_MSTAT));
|
|
dprintk(1, "%s() VID_B_INT_SSTAT 0x%08X\n", __func__,
|
|
cx_read(VID_B_INT_SSTAT));
|
|
dprintk(1, "%s() VID_C_INT_MSTAT 0x%08X\n", __func__,
|
|
cx_read(VID_C_INT_MSTAT));
|
|
dprintk(1, "%s() VID_C_INT_SSTAT 0x%08X\n", __func__,
|
|
cx_read(VID_C_INT_SSTAT));
|
|
}
|
|
|
|
int cx23885_start_dma(struct cx23885_tsport *port,
|
|
struct cx23885_dmaqueue *q,
|
|
struct cx23885_buffer *buf)
|
|
{
|
|
struct cx23885_dev *dev = port->dev;
|
|
u32 reg;
|
|
|
|
dprintk(1, "%s() w: %d, h: %d, f: %d\n", __func__,
|
|
dev->width, dev->height, dev->field);
|
|
|
|
/* clear dma in progress */
|
|
cx23885_clear_bridge_error(dev);
|
|
|
|
/* Stop the fifo and risc engine for this port */
|
|
cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
|
|
|
|
/* setup fifo + format */
|
|
cx23885_sram_channel_setup(dev,
|
|
&dev->sram_channels[port->sram_chno],
|
|
port->ts_packet_size, buf->risc.dma);
|
|
if (debug > 5) {
|
|
cx23885_sram_channel_dump(dev,
|
|
&dev->sram_channels[port->sram_chno]);
|
|
cx23885_risc_disasm(port, &buf->risc);
|
|
}
|
|
|
|
/* write TS length to chip */
|
|
cx_write(port->reg_lngth, port->ts_packet_size);
|
|
|
|
if ((!(cx23885_boards[dev->board].portb & CX23885_MPEG_DVB)) &&
|
|
(!(cx23885_boards[dev->board].portc & CX23885_MPEG_DVB))) {
|
|
pr_err("%s() Unsupported .portb/c (0x%08x)/(0x%08x)\n",
|
|
__func__,
|
|
cx23885_boards[dev->board].portb,
|
|
cx23885_boards[dev->board].portc);
|
|
return -EINVAL;
|
|
}
|
|
|
|
if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
|
|
cx23885_av_clk(dev, 0);
|
|
|
|
udelay(100);
|
|
|
|
/* If the port supports SRC SELECT, configure it */
|
|
if (port->reg_src_sel)
|
|
cx_write(port->reg_src_sel, port->src_sel_val);
|
|
|
|
cx_write(port->reg_hw_sop_ctrl, port->hw_sop_ctrl_val);
|
|
cx_write(port->reg_ts_clk_en, port->ts_clk_en_val);
|
|
cx_write(port->reg_vld_misc, port->vld_misc_val);
|
|
cx_write(port->reg_gen_ctrl, port->gen_ctrl_val);
|
|
udelay(100);
|
|
|
|
/* NOTE: this is 2 (reserved) for portb, does it matter? */
|
|
/* reset counter to zero */
|
|
cx_write(port->reg_gpcnt_ctl, 3);
|
|
q->count = 0;
|
|
|
|
/* Set VIDB pins to input */
|
|
if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) {
|
|
reg = cx_read(PAD_CTRL);
|
|
reg &= ~0x3; /* Clear TS1_OE & TS1_SOP_OE */
|
|
cx_write(PAD_CTRL, reg);
|
|
}
|
|
|
|
/* Set VIDC pins to input */
|
|
if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) {
|
|
reg = cx_read(PAD_CTRL);
|
|
reg &= ~0x4; /* Clear TS2_SOP_OE */
|
|
cx_write(PAD_CTRL, reg);
|
|
}
|
|
|
|
if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
|
|
|
|
reg = cx_read(PAD_CTRL);
|
|
reg = reg & ~0x1; /* Clear TS1_OE */
|
|
|
|
/* FIXME, bit 2 writing here is questionable */
|
|
/* set TS1_SOP_OE and TS1_OE_HI */
|
|
reg = reg | 0xa;
|
|
cx_write(PAD_CTRL, reg);
|
|
|
|
/* Sets MOE_CLK_DIS to disable MoE clock */
|
|
/* sets MCLK_DLY_SEL/BCLK_DLY_SEL to 1 buffer delay each */
|
|
cx_write(CLK_DELAY, cx_read(CLK_DELAY) | 0x80000011);
|
|
|
|
/* ALT_GPIO_ALT_SET: GPIO[0]
|
|
* IR_ALT_TX_SEL: GPIO[1]
|
|
* GPIO1_ALT_SEL: VIP_656_DATA[0]
|
|
* GPIO0_ALT_SEL: VIP_656_CLK
|
|
*/
|
|
cx_write(ALT_PIN_OUT_SEL, 0x10100045);
|
|
}
|
|
|
|
switch (dev->bridge) {
|
|
case CX23885_BRIDGE_885:
|
|
case CX23885_BRIDGE_887:
|
|
case CX23885_BRIDGE_888:
|
|
/* enable irqs */
|
|
dprintk(1, "%s() enabling TS int's and DMA\n", __func__);
|
|
/* clear dma in progress */
|
|
cx23885_clear_bridge_error(dev);
|
|
cx_set(port->reg_ts_int_msk, port->ts_int_msk_val);
|
|
cx_set(port->reg_dma_ctl, port->dma_ctl_val);
|
|
|
|
/* clear dma in progress */
|
|
cx23885_clear_bridge_error(dev);
|
|
cx23885_irq_add(dev, port->pci_irqmask);
|
|
cx23885_irq_enable_all(dev);
|
|
|
|
/* clear dma in progress */
|
|
cx23885_clear_bridge_error(dev);
|
|
break;
|
|
default:
|
|
BUG();
|
|
}
|
|
|
|
cx_set(DEV_CNTRL2, (1<<5)); /* Enable RISC controller */
|
|
/* clear dma in progress */
|
|
cx23885_clear_bridge_error(dev);
|
|
|
|
if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
|
|
cx23885_av_clk(dev, 1);
|
|
|
|
if (debug > 4)
|
|
cx23885_tsport_reg_dump(port);
|
|
|
|
cx23885_irq_get_mask(dev);
|
|
|
|
/* clear dma in progress */
|
|
cx23885_clear_bridge_error(dev);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int cx23885_stop_dma(struct cx23885_tsport *port)
|
|
{
|
|
struct cx23885_dev *dev = port->dev;
|
|
u32 reg;
|
|
int delay = 0;
|
|
uint32_t reg1_val;
|
|
uint32_t reg2_val;
|
|
|
|
dprintk(1, "%s()\n", __func__);
|
|
|
|
/* Stop interrupts and DMA */
|
|
cx_clear(port->reg_ts_int_msk, port->ts_int_msk_val);
|
|
cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
|
|
/* just in case wait for any dma to complete before allowing dealloc */
|
|
mdelay(20);
|
|
for (delay = 0; delay < 100; delay++) {
|
|
reg1_val = cx_read(TC_REQ);
|
|
reg2_val = cx_read(TC_REQ_SET);
|
|
if (reg1_val == 0 || reg2_val == 0)
|
|
break;
|
|
mdelay(1);
|
|
}
|
|
dev_dbg(&dev->pci->dev, "delay=%d reg1=0x%08x reg2=0x%08x\n",
|
|
delay, reg1_val, reg2_val);
|
|
|
|
if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
|
|
reg = cx_read(PAD_CTRL);
|
|
|
|
/* Set TS1_OE */
|
|
reg = reg | 0x1;
|
|
|
|
/* clear TS1_SOP_OE and TS1_OE_HI */
|
|
reg = reg & ~0xa;
|
|
cx_write(PAD_CTRL, reg);
|
|
cx_write(port->reg_src_sel, 0);
|
|
cx_write(port->reg_gen_ctrl, 8);
|
|
}
|
|
|
|
if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
|
|
cx23885_av_clk(dev, 0);
|
|
|
|
return 0;
|
|
}
|
|
|
|
/* ------------------------------------------------------------------ */
|
|
|
|
int cx23885_buf_prepare(struct cx23885_buffer *buf, struct cx23885_tsport *port)
|
|
{
|
|
struct cx23885_dev *dev = port->dev;
|
|
int size = port->ts_packet_size * port->ts_packet_count;
|
|
struct sg_table *sgt = vb2_dma_sg_plane_desc(&buf->vb.vb2_buf, 0);
|
|
|
|
dprintk(1, "%s: %p\n", __func__, buf);
|
|
if (vb2_plane_size(&buf->vb.vb2_buf, 0) < size)
|
|
return -EINVAL;
|
|
vb2_set_plane_payload(&buf->vb.vb2_buf, 0, size);
|
|
|
|
cx23885_risc_databuffer(dev->pci, &buf->risc,
|
|
sgt->sgl,
|
|
port->ts_packet_size, port->ts_packet_count, 0);
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* The risc program for each buffer works as follows: it starts with a simple
|
|
* 'JUMP to addr + 12', which is effectively a NOP. Then the code to DMA the
|
|
* buffer follows and at the end we have a JUMP back to the start + 12 (skipping
|
|
* the initial JUMP).
|
|
*
|
|
* This is the risc program of the first buffer to be queued if the active list
|
|
* is empty and it just keeps DMAing this buffer without generating any
|
|
* interrupts.
|
|
*
|
|
* If a new buffer is added then the initial JUMP in the code for that buffer
|
|
* will generate an interrupt which signals that the previous buffer has been
|
|
* DMAed successfully and that it can be returned to userspace.
|
|
*
|
|
* It also sets the final jump of the previous buffer to the start of the new
|
|
* buffer, thus chaining the new buffer into the DMA chain. This is a single
|
|
* atomic u32 write, so there is no race condition.
|
|
*
|
|
* The end-result of all this that you only get an interrupt when a buffer
|
|
* is ready, so the control flow is very easy.
|
|
*/
|
|
void cx23885_buf_queue(struct cx23885_tsport *port, struct cx23885_buffer *buf)
|
|
{
|
|
struct cx23885_buffer *prev;
|
|
struct cx23885_dev *dev = port->dev;
|
|
struct cx23885_dmaqueue *cx88q = &port->mpegq;
|
|
unsigned long flags;
|
|
|
|
buf->risc.cpu[1] = cpu_to_le32(buf->risc.dma + 12);
|
|
buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_CNT_INC);
|
|
buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma + 12);
|
|
buf->risc.jmp[2] = cpu_to_le32(0); /* bits 63-32 */
|
|
|
|
spin_lock_irqsave(&dev->slock, flags);
|
|
if (list_empty(&cx88q->active)) {
|
|
list_add_tail(&buf->queue, &cx88q->active);
|
|
dprintk(1, "[%p/%d] %s - first active\n",
|
|
buf, buf->vb.vb2_buf.index, __func__);
|
|
} else {
|
|
buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1);
|
|
prev = list_entry(cx88q->active.prev, struct cx23885_buffer,
|
|
queue);
|
|
list_add_tail(&buf->queue, &cx88q->active);
|
|
prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
|
|
dprintk(1, "[%p/%d] %s - append to active\n",
|
|
buf, buf->vb.vb2_buf.index, __func__);
|
|
}
|
|
spin_unlock_irqrestore(&dev->slock, flags);
|
|
}
|
|
|
|
/* ----------------------------------------------------------- */
|
|
|
|
static void do_cancel_buffers(struct cx23885_tsport *port, char *reason)
|
|
{
|
|
struct cx23885_dmaqueue *q = &port->mpegq;
|
|
struct cx23885_buffer *buf;
|
|
unsigned long flags;
|
|
|
|
spin_lock_irqsave(&port->slock, flags);
|
|
while (!list_empty(&q->active)) {
|
|
buf = list_entry(q->active.next, struct cx23885_buffer,
|
|
queue);
|
|
list_del(&buf->queue);
|
|
vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
|
|
dprintk(1, "[%p/%d] %s - dma=0x%08lx\n",
|
|
buf, buf->vb.vb2_buf.index, reason,
|
|
(unsigned long)buf->risc.dma);
|
|
}
|
|
spin_unlock_irqrestore(&port->slock, flags);
|
|
}
|
|
|
|
void cx23885_cancel_buffers(struct cx23885_tsport *port)
|
|
{
|
|
dprintk(1, "%s()\n", __func__);
|
|
cx23885_stop_dma(port);
|
|
do_cancel_buffers(port, "cancel");
|
|
}
|
|
|
|
int cx23885_irq_417(struct cx23885_dev *dev, u32 status)
|
|
{
|
|
/* FIXME: port1 assumption here. */
|
|
struct cx23885_tsport *port = &dev->ts1;
|
|
int count = 0;
|
|
int handled = 0;
|
|
|
|
if (status == 0)
|
|
return handled;
|
|
|
|
count = cx_read(port->reg_gpcnt);
|
|
dprintk(7, "status: 0x%08x mask: 0x%08x count: 0x%x\n",
|
|
status, cx_read(port->reg_ts_int_msk), count);
|
|
|
|
if ((status & VID_B_MSK_BAD_PKT) ||
|
|
(status & VID_B_MSK_OPC_ERR) ||
|
|
(status & VID_B_MSK_VBI_OPC_ERR) ||
|
|
(status & VID_B_MSK_SYNC) ||
|
|
(status & VID_B_MSK_VBI_SYNC) ||
|
|
(status & VID_B_MSK_OF) ||
|
|
(status & VID_B_MSK_VBI_OF)) {
|
|
pr_err("%s: V4L mpeg risc op code error, status = 0x%x\n",
|
|
dev->name, status);
|
|
if (status & VID_B_MSK_BAD_PKT)
|
|
dprintk(1, " VID_B_MSK_BAD_PKT\n");
|
|
if (status & VID_B_MSK_OPC_ERR)
|
|
dprintk(1, " VID_B_MSK_OPC_ERR\n");
|
|
if (status & VID_B_MSK_VBI_OPC_ERR)
|
|
dprintk(1, " VID_B_MSK_VBI_OPC_ERR\n");
|
|
if (status & VID_B_MSK_SYNC)
|
|
dprintk(1, " VID_B_MSK_SYNC\n");
|
|
if (status & VID_B_MSK_VBI_SYNC)
|
|
dprintk(1, " VID_B_MSK_VBI_SYNC\n");
|
|
if (status & VID_B_MSK_OF)
|
|
dprintk(1, " VID_B_MSK_OF\n");
|
|
if (status & VID_B_MSK_VBI_OF)
|
|
dprintk(1, " VID_B_MSK_VBI_OF\n");
|
|
|
|
cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
|
|
cx23885_sram_channel_dump(dev,
|
|
&dev->sram_channels[port->sram_chno]);
|
|
cx23885_417_check_encoder(dev);
|
|
} else if (status & VID_B_MSK_RISCI1) {
|
|
dprintk(7, " VID_B_MSK_RISCI1\n");
|
|
spin_lock(&port->slock);
|
|
cx23885_wakeup(port, &port->mpegq, count);
|
|
spin_unlock(&port->slock);
|
|
}
|
|
if (status) {
|
|
cx_write(port->reg_ts_int_stat, status);
|
|
handled = 1;
|
|
}
|
|
|
|
return handled;
|
|
}
|
|
|
|
static int cx23885_irq_ts(struct cx23885_tsport *port, u32 status)
|
|
{
|
|
struct cx23885_dev *dev = port->dev;
|
|
int handled = 0;
|
|
u32 count;
|
|
|
|
if ((status & VID_BC_MSK_OPC_ERR) ||
|
|
(status & VID_BC_MSK_BAD_PKT) ||
|
|
(status & VID_BC_MSK_SYNC) ||
|
|
(status & VID_BC_MSK_OF)) {
|
|
|
|
if (status & VID_BC_MSK_OPC_ERR)
|
|
dprintk(7, " (VID_BC_MSK_OPC_ERR 0x%08x)\n",
|
|
VID_BC_MSK_OPC_ERR);
|
|
|
|
if (status & VID_BC_MSK_BAD_PKT)
|
|
dprintk(7, " (VID_BC_MSK_BAD_PKT 0x%08x)\n",
|
|
VID_BC_MSK_BAD_PKT);
|
|
|
|
if (status & VID_BC_MSK_SYNC)
|
|
dprintk(7, " (VID_BC_MSK_SYNC 0x%08x)\n",
|
|
VID_BC_MSK_SYNC);
|
|
|
|
if (status & VID_BC_MSK_OF)
|
|
dprintk(7, " (VID_BC_MSK_OF 0x%08x)\n",
|
|
VID_BC_MSK_OF);
|
|
|
|
pr_err("%s: mpeg risc op code error\n", dev->name);
|
|
|
|
cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
|
|
cx23885_sram_channel_dump(dev,
|
|
&dev->sram_channels[port->sram_chno]);
|
|
|
|
} else if (status & VID_BC_MSK_RISCI1) {
|
|
|
|
dprintk(7, " (RISCI1 0x%08x)\n", VID_BC_MSK_RISCI1);
|
|
|
|
spin_lock(&port->slock);
|
|
count = cx_read(port->reg_gpcnt);
|
|
cx23885_wakeup(port, &port->mpegq, count);
|
|
spin_unlock(&port->slock);
|
|
|
|
}
|
|
if (status) {
|
|
cx_write(port->reg_ts_int_stat, status);
|
|
handled = 1;
|
|
}
|
|
|
|
return handled;
|
|
}
|
|
|
|
static irqreturn_t cx23885_irq(int irq, void *dev_id)
|
|
{
|
|
struct cx23885_dev *dev = dev_id;
|
|
struct cx23885_tsport *ts1 = &dev->ts1;
|
|
struct cx23885_tsport *ts2 = &dev->ts2;
|
|
u32 pci_status, pci_mask;
|
|
u32 vida_status, vida_mask;
|
|
u32 audint_status, audint_mask;
|
|
u32 ts1_status, ts1_mask;
|
|
u32 ts2_status, ts2_mask;
|
|
int vida_count = 0, ts1_count = 0, ts2_count = 0, handled = 0;
|
|
int audint_count = 0;
|
|
bool subdev_handled;
|
|
|
|
pci_status = cx_read(PCI_INT_STAT);
|
|
pci_mask = cx23885_irq_get_mask(dev);
|
|
if ((pci_status & pci_mask) == 0) {
|
|
dprintk(7, "pci_status: 0x%08x pci_mask: 0x%08x\n",
|
|
pci_status, pci_mask);
|
|
goto out;
|
|
}
|
|
|
|
vida_status = cx_read(VID_A_INT_STAT);
|
|
vida_mask = cx_read(VID_A_INT_MSK);
|
|
audint_status = cx_read(AUDIO_INT_INT_STAT);
|
|
audint_mask = cx_read(AUDIO_INT_INT_MSK);
|
|
ts1_status = cx_read(VID_B_INT_STAT);
|
|
ts1_mask = cx_read(VID_B_INT_MSK);
|
|
ts2_status = cx_read(VID_C_INT_STAT);
|
|
ts2_mask = cx_read(VID_C_INT_MSK);
|
|
|
|
if (((pci_status & pci_mask) == 0) &&
|
|
((ts2_status & ts2_mask) == 0) &&
|
|
((ts1_status & ts1_mask) == 0))
|
|
goto out;
|
|
|
|
vida_count = cx_read(VID_A_GPCNT);
|
|
audint_count = cx_read(AUD_INT_A_GPCNT);
|
|
ts1_count = cx_read(ts1->reg_gpcnt);
|
|
ts2_count = cx_read(ts2->reg_gpcnt);
|
|
dprintk(7, "pci_status: 0x%08x pci_mask: 0x%08x\n",
|
|
pci_status, pci_mask);
|
|
dprintk(7, "vida_status: 0x%08x vida_mask: 0x%08x count: 0x%x\n",
|
|
vida_status, vida_mask, vida_count);
|
|
dprintk(7, "audint_status: 0x%08x audint_mask: 0x%08x count: 0x%x\n",
|
|
audint_status, audint_mask, audint_count);
|
|
dprintk(7, "ts1_status: 0x%08x ts1_mask: 0x%08x count: 0x%x\n",
|
|
ts1_status, ts1_mask, ts1_count);
|
|
dprintk(7, "ts2_status: 0x%08x ts2_mask: 0x%08x count: 0x%x\n",
|
|
ts2_status, ts2_mask, ts2_count);
|
|
|
|
if (pci_status & (PCI_MSK_RISC_RD | PCI_MSK_RISC_WR |
|
|
PCI_MSK_AL_RD | PCI_MSK_AL_WR | PCI_MSK_APB_DMA |
|
|
PCI_MSK_VID_C | PCI_MSK_VID_B | PCI_MSK_VID_A |
|
|
PCI_MSK_AUD_INT | PCI_MSK_AUD_EXT |
|
|
PCI_MSK_GPIO0 | PCI_MSK_GPIO1 |
|
|
PCI_MSK_AV_CORE | PCI_MSK_IR)) {
|
|
|
|
if (pci_status & PCI_MSK_RISC_RD)
|
|
dprintk(7, " (PCI_MSK_RISC_RD 0x%08x)\n",
|
|
PCI_MSK_RISC_RD);
|
|
|
|
if (pci_status & PCI_MSK_RISC_WR)
|
|
dprintk(7, " (PCI_MSK_RISC_WR 0x%08x)\n",
|
|
PCI_MSK_RISC_WR);
|
|
|
|
if (pci_status & PCI_MSK_AL_RD)
|
|
dprintk(7, " (PCI_MSK_AL_RD 0x%08x)\n",
|
|
PCI_MSK_AL_RD);
|
|
|
|
if (pci_status & PCI_MSK_AL_WR)
|
|
dprintk(7, " (PCI_MSK_AL_WR 0x%08x)\n",
|
|
PCI_MSK_AL_WR);
|
|
|
|
if (pci_status & PCI_MSK_APB_DMA)
|
|
dprintk(7, " (PCI_MSK_APB_DMA 0x%08x)\n",
|
|
PCI_MSK_APB_DMA);
|
|
|
|
if (pci_status & PCI_MSK_VID_C)
|
|
dprintk(7, " (PCI_MSK_VID_C 0x%08x)\n",
|
|
PCI_MSK_VID_C);
|
|
|
|
if (pci_status & PCI_MSK_VID_B)
|
|
dprintk(7, " (PCI_MSK_VID_B 0x%08x)\n",
|
|
PCI_MSK_VID_B);
|
|
|
|
if (pci_status & PCI_MSK_VID_A)
|
|
dprintk(7, " (PCI_MSK_VID_A 0x%08x)\n",
|
|
PCI_MSK_VID_A);
|
|
|
|
if (pci_status & PCI_MSK_AUD_INT)
|
|
dprintk(7, " (PCI_MSK_AUD_INT 0x%08x)\n",
|
|
PCI_MSK_AUD_INT);
|
|
|
|
if (pci_status & PCI_MSK_AUD_EXT)
|
|
dprintk(7, " (PCI_MSK_AUD_EXT 0x%08x)\n",
|
|
PCI_MSK_AUD_EXT);
|
|
|
|
if (pci_status & PCI_MSK_GPIO0)
|
|
dprintk(7, " (PCI_MSK_GPIO0 0x%08x)\n",
|
|
PCI_MSK_GPIO0);
|
|
|
|
if (pci_status & PCI_MSK_GPIO1)
|
|
dprintk(7, " (PCI_MSK_GPIO1 0x%08x)\n",
|
|
PCI_MSK_GPIO1);
|
|
|
|
if (pci_status & PCI_MSK_AV_CORE)
|
|
dprintk(7, " (PCI_MSK_AV_CORE 0x%08x)\n",
|
|
PCI_MSK_AV_CORE);
|
|
|
|
if (pci_status & PCI_MSK_IR)
|
|
dprintk(7, " (PCI_MSK_IR 0x%08x)\n",
|
|
PCI_MSK_IR);
|
|
}
|
|
|
|
if (cx23885_boards[dev->board].ci_type == 1 &&
|
|
(pci_status & (PCI_MSK_GPIO1 | PCI_MSK_GPIO0)))
|
|
handled += netup_ci_slot_status(dev, pci_status);
|
|
|
|
if (cx23885_boards[dev->board].ci_type == 2 &&
|
|
(pci_status & PCI_MSK_GPIO0))
|
|
handled += altera_ci_irq(dev);
|
|
|
|
if (ts1_status) {
|
|
if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB)
|
|
handled += cx23885_irq_ts(ts1, ts1_status);
|
|
else
|
|
if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
|
|
handled += cx23885_irq_417(dev, ts1_status);
|
|
}
|
|
|
|
if (ts2_status) {
|
|
if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB)
|
|
handled += cx23885_irq_ts(ts2, ts2_status);
|
|
else
|
|
if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER)
|
|
handled += cx23885_irq_417(dev, ts2_status);
|
|
}
|
|
|
|
if (vida_status)
|
|
handled += cx23885_video_irq(dev, vida_status);
|
|
|
|
if (audint_status)
|
|
handled += cx23885_audio_irq(dev, audint_status, audint_mask);
|
|
|
|
if (pci_status & PCI_MSK_IR) {
|
|
subdev_handled = false;
|
|
v4l2_subdev_call(dev->sd_ir, core, interrupt_service_routine,
|
|
pci_status, &subdev_handled);
|
|
if (subdev_handled)
|
|
handled++;
|
|
}
|
|
|
|
if ((pci_status & pci_mask) & PCI_MSK_AV_CORE) {
|
|
cx23885_irq_disable(dev, PCI_MSK_AV_CORE);
|
|
schedule_work(&dev->cx25840_work);
|
|
handled++;
|
|
}
|
|
|
|
if (handled)
|
|
cx_write(PCI_INT_STAT, pci_status & pci_mask);
|
|
out:
|
|
return IRQ_RETVAL(handled);
|
|
}
|
|
|
|
static void cx23885_v4l2_dev_notify(struct v4l2_subdev *sd,
|
|
unsigned int notification, void *arg)
|
|
{
|
|
struct cx23885_dev *dev;
|
|
|
|
if (sd == NULL)
|
|
return;
|
|
|
|
dev = to_cx23885(sd->v4l2_dev);
|
|
|
|
switch (notification) {
|
|
case V4L2_SUBDEV_IR_RX_NOTIFY: /* Possibly called in an IRQ context */
|
|
if (sd == dev->sd_ir)
|
|
cx23885_ir_rx_v4l2_dev_notify(sd, *(u32 *)arg);
|
|
break;
|
|
case V4L2_SUBDEV_IR_TX_NOTIFY: /* Possibly called in an IRQ context */
|
|
if (sd == dev->sd_ir)
|
|
cx23885_ir_tx_v4l2_dev_notify(sd, *(u32 *)arg);
|
|
break;
|
|
}
|
|
}
|
|
|
|
static void cx23885_v4l2_dev_notify_init(struct cx23885_dev *dev)
|
|
{
|
|
INIT_WORK(&dev->cx25840_work, cx23885_av_work_handler);
|
|
INIT_WORK(&dev->ir_rx_work, cx23885_ir_rx_work_handler);
|
|
INIT_WORK(&dev->ir_tx_work, cx23885_ir_tx_work_handler);
|
|
dev->v4l2_dev.notify = cx23885_v4l2_dev_notify;
|
|
}
|
|
|
|
static inline int encoder_on_portb(struct cx23885_dev *dev)
|
|
{
|
|
return cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER;
|
|
}
|
|
|
|
static inline int encoder_on_portc(struct cx23885_dev *dev)
|
|
{
|
|
return cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER;
|
|
}
|
|
|
|
/* Mask represents 32 different GPIOs, GPIO's are split into multiple
|
|
* registers depending on the board configuration (and whether the
|
|
* 417 encoder (wi it's own GPIO's) are present. Each GPIO bit will
|
|
* be pushed into the correct hardware register, regardless of the
|
|
* physical location. Certain registers are shared so we sanity check
|
|
* and report errors if we think we're tampering with a GPIo that might
|
|
* be assigned to the encoder (and used for the host bus).
|
|
*
|
|
* GPIO 2 thru 0 - On the cx23885 bridge
|
|
* GPIO 18 thru 3 - On the cx23417 host bus interface
|
|
* GPIO 23 thru 19 - On the cx25840 a/v core
|
|
*/
|
|
void cx23885_gpio_set(struct cx23885_dev *dev, u32 mask)
|
|
{
|
|
if (mask & 0x7)
|
|
cx_set(GP0_IO, mask & 0x7);
|
|
|
|
if (mask & 0x0007fff8) {
|
|
if (encoder_on_portb(dev) || encoder_on_portc(dev))
|
|
pr_err("%s: Setting GPIO on encoder ports\n",
|
|
dev->name);
|
|
cx_set(MC417_RWD, (mask & 0x0007fff8) >> 3);
|
|
}
|
|
|
|
/* TODO: 23-19 */
|
|
if (mask & 0x00f80000)
|
|
pr_info("%s: Unsupported\n", dev->name);
|
|
}
|
|
|
|
void cx23885_gpio_clear(struct cx23885_dev *dev, u32 mask)
|
|
{
|
|
if (mask & 0x00000007)
|
|
cx_clear(GP0_IO, mask & 0x7);
|
|
|
|
if (mask & 0x0007fff8) {
|
|
if (encoder_on_portb(dev) || encoder_on_portc(dev))
|
|
pr_err("%s: Clearing GPIO moving on encoder ports\n",
|
|
dev->name);
|
|
cx_clear(MC417_RWD, (mask & 0x7fff8) >> 3);
|
|
}
|
|
|
|
/* TODO: 23-19 */
|
|
if (mask & 0x00f80000)
|
|
pr_info("%s: Unsupported\n", dev->name);
|
|
}
|
|
|
|
u32 cx23885_gpio_get(struct cx23885_dev *dev, u32 mask)
|
|
{
|
|
if (mask & 0x00000007)
|
|
return (cx_read(GP0_IO) >> 8) & mask & 0x7;
|
|
|
|
if (mask & 0x0007fff8) {
|
|
if (encoder_on_portb(dev) || encoder_on_portc(dev))
|
|
pr_err("%s: Reading GPIO moving on encoder ports\n",
|
|
dev->name);
|
|
return (cx_read(MC417_RWD) & ((mask & 0x7fff8) >> 3)) << 3;
|
|
}
|
|
|
|
/* TODO: 23-19 */
|
|
if (mask & 0x00f80000)
|
|
pr_info("%s: Unsupported\n", dev->name);
|
|
|
|
return 0;
|
|
}
|
|
|
|
void cx23885_gpio_enable(struct cx23885_dev *dev, u32 mask, int asoutput)
|
|
{
|
|
if ((mask & 0x00000007) && asoutput)
|
|
cx_set(GP0_IO, (mask & 0x7) << 16);
|
|
else if ((mask & 0x00000007) && !asoutput)
|
|
cx_clear(GP0_IO, (mask & 0x7) << 16);
|
|
|
|
if (mask & 0x0007fff8) {
|
|
if (encoder_on_portb(dev) || encoder_on_portc(dev))
|
|
pr_err("%s: Enabling GPIO on encoder ports\n",
|
|
dev->name);
|
|
}
|
|
|
|
/* MC417_OEN is active low for output, write 1 for an input */
|
|
if ((mask & 0x0007fff8) && asoutput)
|
|
cx_clear(MC417_OEN, (mask & 0x7fff8) >> 3);
|
|
|
|
else if ((mask & 0x0007fff8) && !asoutput)
|
|
cx_set(MC417_OEN, (mask & 0x7fff8) >> 3);
|
|
|
|
/* TODO: 23-19 */
|
|
}
|
|
|
|
static int cx23885_initdev(struct pci_dev *pci_dev,
|
|
const struct pci_device_id *pci_id)
|
|
{
|
|
struct cx23885_dev *dev;
|
|
struct v4l2_ctrl_handler *hdl;
|
|
int err;
|
|
|
|
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
|
|
if (NULL == dev)
|
|
return -ENOMEM;
|
|
|
|
err = v4l2_device_register(&pci_dev->dev, &dev->v4l2_dev);
|
|
if (err < 0)
|
|
goto fail_free;
|
|
|
|
hdl = &dev->ctrl_handler;
|
|
v4l2_ctrl_handler_init(hdl, 6);
|
|
if (hdl->error) {
|
|
err = hdl->error;
|
|
goto fail_ctrl;
|
|
}
|
|
dev->v4l2_dev.ctrl_handler = hdl;
|
|
|
|
/* Prepare to handle notifications from subdevices */
|
|
cx23885_v4l2_dev_notify_init(dev);
|
|
|
|
/* pci init */
|
|
dev->pci = pci_dev;
|
|
if (pci_enable_device(pci_dev)) {
|
|
err = -EIO;
|
|
goto fail_ctrl;
|
|
}
|
|
|
|
if (cx23885_dev_setup(dev) < 0) {
|
|
err = -EINVAL;
|
|
goto fail_ctrl;
|
|
}
|
|
|
|
/* print pci info */
|
|
dev->pci_rev = pci_dev->revision;
|
|
pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &dev->pci_lat);
|
|
pr_info("%s/0: found at %s, rev: %d, irq: %d, latency: %d, mmio: 0x%llx\n",
|
|
dev->name,
|
|
pci_name(pci_dev), dev->pci_rev, pci_dev->irq,
|
|
dev->pci_lat,
|
|
(unsigned long long)pci_resource_start(pci_dev, 0));
|
|
|
|
pci_set_master(pci_dev);
|
|
err = pci_set_dma_mask(pci_dev, 0xffffffff);
|
|
if (err) {
|
|
pr_err("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name);
|
|
goto fail_ctrl;
|
|
}
|
|
|
|
err = request_irq(pci_dev->irq, cx23885_irq,
|
|
IRQF_SHARED, dev->name, dev);
|
|
if (err < 0) {
|
|
pr_err("%s: can't get IRQ %d\n",
|
|
dev->name, pci_dev->irq);
|
|
goto fail_irq;
|
|
}
|
|
|
|
switch (dev->board) {
|
|
case CX23885_BOARD_NETUP_DUAL_DVBS2_CI:
|
|
cx23885_irq_add_enable(dev, PCI_MSK_GPIO1 | PCI_MSK_GPIO0);
|
|
break;
|
|
case CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF:
|
|
cx23885_irq_add_enable(dev, PCI_MSK_GPIO0);
|
|
break;
|
|
}
|
|
|
|
/*
|
|
* The CX2388[58] IR controller can start firing interrupts when
|
|
* enabled, so these have to take place after the cx23885_irq() handler
|
|
* is hooked up by the call to request_irq() above.
|
|
*/
|
|
cx23885_ir_pci_int_enable(dev);
|
|
cx23885_input_init(dev);
|
|
|
|
return 0;
|
|
|
|
fail_irq:
|
|
cx23885_dev_unregister(dev);
|
|
fail_ctrl:
|
|
v4l2_ctrl_handler_free(hdl);
|
|
v4l2_device_unregister(&dev->v4l2_dev);
|
|
fail_free:
|
|
kfree(dev);
|
|
return err;
|
|
}
|
|
|
|
static void cx23885_finidev(struct pci_dev *pci_dev)
|
|
{
|
|
struct v4l2_device *v4l2_dev = pci_get_drvdata(pci_dev);
|
|
struct cx23885_dev *dev = to_cx23885(v4l2_dev);
|
|
|
|
cx23885_input_fini(dev);
|
|
cx23885_ir_fini(dev);
|
|
|
|
cx23885_shutdown(dev);
|
|
|
|
/* unregister stuff */
|
|
free_irq(pci_dev->irq, dev);
|
|
|
|
pci_disable_device(pci_dev);
|
|
|
|
cx23885_dev_unregister(dev);
|
|
v4l2_ctrl_handler_free(&dev->ctrl_handler);
|
|
v4l2_device_unregister(v4l2_dev);
|
|
kfree(dev);
|
|
}
|
|
|
|
static const struct pci_device_id cx23885_pci_tbl[] = {
|
|
{
|
|
/* CX23885 */
|
|
.vendor = 0x14f1,
|
|
.device = 0x8852,
|
|
.subvendor = PCI_ANY_ID,
|
|
.subdevice = PCI_ANY_ID,
|
|
}, {
|
|
/* CX23887 Rev 2 */
|
|
.vendor = 0x14f1,
|
|
.device = 0x8880,
|
|
.subvendor = PCI_ANY_ID,
|
|
.subdevice = PCI_ANY_ID,
|
|
}, {
|
|
/* --- end of list --- */
|
|
}
|
|
};
|
|
MODULE_DEVICE_TABLE(pci, cx23885_pci_tbl);
|
|
|
|
static struct pci_driver cx23885_pci_driver = {
|
|
.name = "cx23885",
|
|
.id_table = cx23885_pci_tbl,
|
|
.probe = cx23885_initdev,
|
|
.remove = cx23885_finidev,
|
|
/* TODO */
|
|
.suspend = NULL,
|
|
.resume = NULL,
|
|
};
|
|
|
|
static int __init cx23885_init(void)
|
|
{
|
|
pr_info("cx23885 driver version %s loaded\n",
|
|
CX23885_VERSION);
|
|
return pci_register_driver(&cx23885_pci_driver);
|
|
}
|
|
|
|
static void __exit cx23885_fini(void)
|
|
{
|
|
pci_unregister_driver(&cx23885_pci_driver);
|
|
}
|
|
|
|
module_init(cx23885_init);
|
|
module_exit(cx23885_fini);
|