Merge branch 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/libata-dev

* 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/libata-dev: (50 commits)
  [libata] Delete pata_it8172 driver
  [PATCH] libata: improve handling of diagostic fail (and hardware that misreports it)
  [PATCH] libata: fix non-uniform ports handling
  Fix libata resource conflict for legacy mode
  [libata] ata_piix: build fix
  [PATCH] pata_amd: Check enable bits on Nvidia
  [PATCH] Update SiS PATA
  [libata] Add pata_jmicron driver to Kconfig, Makefile
  [libata #pata-drivers] Trim trailing whitespace.
  [libata] Trim trailing whitespace.
  [libata] Add a bunch of PATA drivers.
  Rename libata-bmdma.c to libata-sff.c.
  libata: Grand renaming.
  Clean up drivers/ata/Kconfig a bit.
  [PATCH] CONFIG_PM=n slim: drivers/scsi/sata_sil*
  [PATCH] sata_via: Add SATA support for vt8237a
  [PATCH] libata: change path to libata in libata.tmpl
  [PATCH] libata: s/CONFIG_SCSI_SATA/CONFIG_[S]ATA/g in pci/quirks.c
  libata: Make sure drivers/ata is a separate Kconfig menu
  [libata] ata_piix: add missing kfree()
  ...
This commit is contained in:
Linus Torvalds 2006-09-24 10:19:56 -07:00
commit a68aa1cc6f
77 changed files with 19870 additions and 1299 deletions

View File

@ -868,18 +868,18 @@ and other resources, etc.
<chapter id="libataExt">
<title>libata Library</title>
!Edrivers/scsi/libata-core.c
!Edrivers/ata/libata-core.c
</chapter>
<chapter id="libataInt">
<title>libata Core Internals</title>
!Idrivers/scsi/libata-core.c
!Idrivers/ata/libata-core.c
</chapter>
<chapter id="libataScsiInt">
<title>libata SCSI translation/emulation</title>
!Edrivers/scsi/libata-scsi.c
!Idrivers/scsi/libata-scsi.c
!Edrivers/ata/libata-scsi.c
!Idrivers/ata/libata-scsi.c
</chapter>
<chapter id="ataExceptions">
@ -1600,12 +1600,12 @@ and other resources, etc.
<chapter id="PiixInt">
<title>ata_piix Internals</title>
!Idrivers/scsi/ata_piix.c
!Idrivers/ata/ata_piix.c
</chapter>
<chapter id="SILInt">
<title>sata_sil Internals</title>
!Idrivers/scsi/sata_sil.c
!Idrivers/ata/sata_sil.c
</chapter>
<chapter id="libataThanks">

View File

@ -18,6 +18,8 @@ source "drivers/ide/Kconfig"
source "drivers/scsi/Kconfig"
source "drivers/ata/Kconfig"
source "drivers/cdrom/Kconfig"
source "drivers/md/Kconfig"

View File

@ -34,6 +34,7 @@ obj-$(CONFIG_PPC_PMAC) += macintosh/
obj-$(CONFIG_IDE) += ide/
obj-$(CONFIG_FC4) += fc4/
obj-$(CONFIG_SCSI) += scsi/
obj-$(CONFIG_ATA) += ata/
obj-$(CONFIG_FUSION) += message/
obj-$(CONFIG_IEEE1394) += ieee1394/
obj-y += cdrom/

484
drivers/ata/Kconfig Normal file
View File

@ -0,0 +1,484 @@
#
# SATA/PATA driver configuration
#
menu "Serial ATA (prod) and Parallel ATA (experimental) drivers"
config ATA
tristate "ATA device support"
select SCSI
---help---
If you want to use a ATA hard disk, ATA tape drive, ATA CD-ROM or
any other ATA device under Linux, say Y and make sure that you know
the name of your ATA host adapter (the card inside your computer
that "speaks" the ATA protocol, also called ATA controller),
because you will be asked for it.
if ATA
config SATA_AHCI
tristate "AHCI SATA support"
depends on PCI
help
This option enables support for AHCI Serial ATA.
If unsure, say N.
config SATA_SVW
tristate "ServerWorks Frodo / Apple K2 SATA support"
depends on PCI
help
This option enables support for Broadcom/Serverworks/Apple K2
SATA support.
If unsure, say N.
config ATA_PIIX
tristate "Intel PIIX/ICH SATA support"
depends on PCI
help
This option enables support for ICH5/6/7/8 Serial ATA.
If PATA support was enabled previously, this enables
support for select Intel PIIX/ICH PATA host controllers.
If unsure, say N.
config SATA_MV
tristate "Marvell SATA support (HIGHLY EXPERIMENTAL)"
depends on PCI && EXPERIMENTAL
help
This option enables support for the Marvell Serial ATA family.
Currently supports 88SX[56]0[48][01] chips.
If unsure, say N.
config SATA_NV
tristate "NVIDIA SATA support"
depends on PCI
help
This option enables support for NVIDIA Serial ATA.
If unsure, say N.
config PDC_ADMA
tristate "Pacific Digital ADMA support"
depends on PCI
help
This option enables support for Pacific Digital ADMA controllers
If unsure, say N.
config SATA_QSTOR
tristate "Pacific Digital SATA QStor support"
depends on PCI
help
This option enables support for Pacific Digital Serial ATA QStor.
If unsure, say N.
config SATA_PROMISE
tristate "Promise SATA TX2/TX4 support"
depends on PCI
help
This option enables support for Promise Serial ATA TX2/TX4.
If unsure, say N.
config SATA_SX4
tristate "Promise SATA SX4 support"
depends on PCI && EXPERIMENTAL
help
This option enables support for Promise Serial ATA SX4.
If unsure, say N.
config SATA_SIL
tristate "Silicon Image SATA support"
depends on PCI
help
This option enables support for Silicon Image Serial ATA.
If unsure, say N.
config SATA_SIL24
tristate "Silicon Image 3124/3132 SATA support"
depends on PCI
help
This option enables support for Silicon Image 3124/3132 Serial ATA.
If unsure, say N.
config SATA_SIS
tristate "SiS 964/180 SATA support"
depends on PCI
help
This option enables support for SiS Serial ATA 964/180.
If unsure, say N.
config SATA_ULI
tristate "ULi Electronics SATA support"
depends on PCI
help
This option enables support for ULi Electronics SATA.
If unsure, say N.
config SATA_VIA
tristate "VIA SATA support"
depends on PCI
help
This option enables support for VIA Serial ATA.
If unsure, say N.
config SATA_VITESSE
tristate "VITESSE VSC-7174 / INTEL 31244 SATA support"
depends on PCI
help
This option enables support for Vitesse VSC7174 and Intel 31244 Serial ATA.
If unsure, say N.
config SATA_INTEL_COMBINED
bool
depends on IDE=y && !BLK_DEV_IDE_SATA && (SATA_AHCI || ATA_PIIX)
default y
config PATA_ALI
tristate "ALi PATA support (Experimental)"
depends on PCI && EXPERIMENTAL
help
This option enables support for the ALi ATA interfaces
found on the many ALi chipsets.
If unsure, say N.
config PATA_AMD
tristate "AMD/NVidia PATA support (Experimental)"
depends on PCI
help
This option enables support for the AMD and NVidia PATA
interfaces found on the chipsets for Athlon/Athlon64.
If unsure, say N.
config PATA_ARTOP
tristate "ARTOP 6210/6260 PATA support (Experimental)"
depends on PCI && EXPERIMENTAL
help
This option enables support for ARTOP PATA controllers.
If unsure, say N.
config PATA_ATIIXP
tristate "ATI PATA support (Experimental)"
depends on PCI && EXPERIMENTAL
help
This option enables support for the ATI ATA interfaces
found on the many ATI chipsets.
If unsure, say N.
config PATA_CMD64X
tristate "CMD64x PATA support (Very Experimental)"
depends on PCI&& EXPERIMENTAL
help
This option enables support for the CMD64x series chips
except for the CMD640.
If unsure, say N.
config PATA_CS5520
tristate "CS5510/5520 PATA support"
depends on PCI
help
This option enables support for the Cyrix 5510/5520
companion chip used with the MediaGX/Geode processor family.
If unsure, say N.
config PATA_CS5530
tristate "CS5530 PATA support (Experimental)"
depends on PCI && EXPERIMENTAL
help
This option enables support for the Cyrix/NatSemi/AMD CS5530
companion chip used with the MediaGX/Geode processor family.
If unsure, say N.
config PATA_CS5535
tristate "CS5535 PATA support (Experimental)"
depends on PCI && X86 && !X86_64 && EXPERIMENTAL
help
This option enables support for the NatSemi/AMD CS5535
companion chip used with the Geode processor family.
If unsure, say N.
config PATA_CYPRESS
tristate "Cypress CY82C693 PATA support (Very Experimental)"
depends on PCI && EXPERIMENTAL
help
This option enables support for the Cypress/Contaq CY82C693
chipset found in some Alpha systems
If unsure, say N.
config PATA_EFAR
tristate "EFAR SLC90E66 support"
depends on PCI
help
This option enables support for the EFAR SLC90E66
IDE controller found on some older machines.
If unsure, say N.
config ATA_GENERIC
tristate "Generic ATA support"
depends on PCI
help
This option enables support for generic BIOS configured
ATA controllers via the new ATA layer
If unsure, say N.
config PATA_HPT366
tristate "HPT 366/368 PATA support (Very Experimental)"
depends on PCI && EXPERIMENTAL
help
This option enables support for the HPT 366 and 368
PATA controllers via the new ATA layer.
If unsure, say N.
config PATA_HPT37X
tristate "HPT 370/370A/371/372/374/302 PATA support (Very Experimental)"
depends on PCI && EXPERIMENTAL
help
This option enables support for the majority of the later HPT
PATA controllers via the new ATA layer.
If unsure, say N.
config PATA_HPT3X2N
tristate "HPT 372N/302N PATA support (Very Experimental)"
depends on PCI && EXPERIMENTAL
help
This option enables support for the N variant HPT PATA
controllers via the new ATA layer
If unsure, say N.
config PATA_HPT3X3
tristate "HPT 343/363 PATA support (Experimental)"
depends on PCI
help
This option enables support for the HPT 343/363
PATA controllers via the new ATA layer
If unsure, say N.
config PATA_ISAPNP
tristate "ISA Plug and Play PATA support (Very Experimental)"
depends on EXPERIMENTAL && ISAPNP
help
This option enables support for ISA plug & play ATA
controllers such as those found on old soundcards.
If unsure, say N.
config PATA_IT821X
tristate "IT821x PATA support (Experimental)"
depends on PCI && EXPERIMENTAL
help
This option enables support for the ITE 8211 and 8212
PATA controllers via the new ATA layer, including RAID
mode.
If unsure, say N.
config PATA_JMICRON
tristate "JMicron PATA support"
depends on PCI
help
Enable support for the JMicron IDE controller, via the new
ATA layer.
If unsure, say N.
config PATA_LEGACY
tristate "Legacy ISA PATA support (Experimental)"
depends on PCI && EXPERIMENTAL
help
This option enables support for ISA/VLB bus legacy PATA
ports and allows them to be accessed via the new ATA layer.
If unsure, say N.
config PATA_TRIFLEX
tristate "Compaq Triflex PATA support"
depends on PCI
help
Enable support for the Compaq 'Triflex' IDE controller as found
on many Compaq Pentium-Pro systems, via the new ATA layer.
If unsure, say N.
config PATA_MPIIX
tristate "Intel PATA MPIIX support"
depends on PCI
help
This option enables support for MPIIX PATA support.
If unsure, say N.
config PATA_OLDPIIX
tristate "Intel PATA old PIIX support (Experimental)"
depends on PCI && EXPERIMENTAL
help
This option enables support for old(?) PIIX PATA support.
If unsure, say N.
config PATA_NETCELL
tristate "NETCELL Revolution RAID support"
depends on PCI
help
This option enables support for the Netcell Revolution RAID
PATA controller.
If unsure, say N.
config PATA_NS87410
tristate "Nat Semi NS87410 PATA support (Experimental)"
depends on PCI && EXPERIMENTAL
help
This option enables support for the National Semiconductor
NS87410 PCI-IDE controller.
If unsure, say N.
config PATA_OPTI
tristate "OPTI621/6215 PATA support (Very Experimental)"
depends on PCI && EXPERIMENTAL
help
This option enables full PIO support for the early Opti ATA
controllers found on some old motherboards.
If unsure, say N.
config PATA_OPTIDMA
tristate "OPTI FireStar PATA support (Veyr Experimental)"
depends on PCI && EXPERIMENTAL
help
This option enables DMA/PIO support for the later OPTi
controllers found on some old motherboards and in some
latops
If unsure, say N.
config PATA_PCMCIA
tristate "PCMCIA PATA support"
depends on PCMCIA
help
This option enables support for PCMCIA ATA interfaces, including
compact flash card adapters via the new ATA layer.
If unsure, say N.
config PATA_PDC_OLD
tristate "Older Promise PATA controller support (Very Experimental)"
depends on PCI && EXPERIMENTAL
help
This option enables support for the Promise 20246, 20262, 20263,
20265 and 20267 adapters.
If unsure, say N.
config PATA_QDI
tristate "QDI VLB PATA support"
help
Support for QDI 6500 and 6580 PATA controllers on VESA local bus.
config PATA_RADISYS
tristate "RADISYS 82600 PATA support (Very experimental)"
depends on PCI && EXPERIMENTAL
help
This option enables support for the RADISYS 82600
PATA controllers via the new ATA layer
If unsure, say N.
config PATA_RZ1000
tristate "PC Tech RZ1000 PATA support"
depends on PCI
help
This option enables basic support for the PC Tech RZ1000/1
PATA controllers via the new ATA layer
If unsure, say N.
config PATA_SC1200
tristate "SC1200 PATA support (Raving Lunatic)"
depends on PCI && EXPERIMENTAL
help
This option enables support for the NatSemi/AMD SC1200 SoC
companion chip used with the Geode processor family.
If unsure, say N.
config PATA_SERVERWORKS
tristate "SERVERWORKS OSB4/CSB5/CSB6/HT1000 PATA support (Experimental)"
depends on PCI && EXPERIMENTAL
help
This option enables support for the Serverworks OSB4/CSB5/CSB6 and
HT1000 PATA controllers, via the new ATA layer.
If unsure, say N.
config PATA_PDC2027X
tristate "Promise PATA 2027x support"
depends on PCI
help
This option enables support for Promise PATA pdc20268 to pdc20277 host adapters.
If unsure, say N.
config PATA_SIL680
tristate "CMD / Silicon Image 680 PATA support"
depends on PCI
help
This option enables support for CMD / Silicon Image 680 PATA.
If unsure, say N.
config PATA_SIS
tristate "SiS PATA support (Experimental)"
depends on PCI && EXPERIMENTAL
help
This option enables support for SiS PATA controllers
If unsure, say N.
config PATA_VIA
tristate "VIA PATA support"
depends on PCI
help
This option enables support for the VIA PATA interfaces
found on the many VIA chipsets.
If unsure, say N.
config PATA_WINBOND
tristate "Winbond SL82C105 PATA support"
depends on PCI
help
This option enables support for SL82C105 PATA devices found in the
Netwinder and some other systems
If unsure, say N.
endif
endmenu

62
drivers/ata/Makefile Normal file
View File

@ -0,0 +1,62 @@
obj-$(CONFIG_ATA) += libata.o
obj-$(CONFIG_SATA_AHCI) += ahci.o
obj-$(CONFIG_SATA_SVW) += sata_svw.o
obj-$(CONFIG_ATA_PIIX) += ata_piix.o
obj-$(CONFIG_SATA_PROMISE) += sata_promise.o
obj-$(CONFIG_SATA_QSTOR) += sata_qstor.o
obj-$(CONFIG_SATA_SIL) += sata_sil.o
obj-$(CONFIG_SATA_SIL24) += sata_sil24.o
obj-$(CONFIG_SATA_VIA) += sata_via.o
obj-$(CONFIG_SATA_VITESSE) += sata_vsc.o
obj-$(CONFIG_SATA_SIS) += sata_sis.o
obj-$(CONFIG_SATA_SX4) += sata_sx4.o
obj-$(CONFIG_SATA_NV) += sata_nv.o
obj-$(CONFIG_SATA_ULI) += sata_uli.o
obj-$(CONFIG_SATA_MV) += sata_mv.o
obj-$(CONFIG_PDC_ADMA) += pdc_adma.o
obj-$(CONFIG_PATA_ALI) += pata_ali.o
obj-$(CONFIG_PATA_AMD) += pata_amd.o
obj-$(CONFIG_PATA_ARTOP) += pata_artop.o
obj-$(CONFIG_PATA_ATIIXP) += pata_atiixp.o
obj-$(CONFIG_PATA_CMD64X) += pata_cmd64x.o
obj-$(CONFIG_PATA_CS5520) += pata_cs5520.o
obj-$(CONFIG_PATA_CS5530) += pata_cs5530.o
obj-$(CONFIG_PATA_CS5535) += pata_cs5535.o
obj-$(CONFIG_PATA_CYPRESS) += pata_cypress.o
obj-$(CONFIG_PATA_EFAR) += pata_efar.o
obj-$(CONFIG_PATA_HPT366) += pata_hpt366.o
obj-$(CONFIG_PATA_HPT37X) += pata_hpt37x.o
obj-$(CONFIG_PATA_HPT3X2N) += pata_hpt3x2n.o
obj-$(CONFIG_PATA_HPT3X3) += pata_hpt3x3.o
obj-$(CONFIG_PATA_ISAPNP) += pata_isapnp.o
obj-$(CONFIG_PATA_IT821X) += pata_it821x.o
obj-$(CONFIG_PATA_JMICRON) += pata_jmicron.o
obj-$(CONFIG_PATA_NETCELL) += pata_netcell.o
obj-$(CONFIG_PATA_NS87410) += pata_ns87410.o
obj-$(CONFIG_PATA_OPTI) += pata_opti.o
obj-$(CONFIG_PATA_OPTIDMA) += pata_optidma.o
obj-$(CONFIG_PATA_MPIIX) += pata_mpiix.o
obj-$(CONFIG_PATA_OLDPIIX) += pata_oldpiix.o
obj-$(CONFIG_PATA_PCMCIA) += pata_pcmcia.o
obj-$(CONFIG_PATA_PDC2027X) += pata_pdc2027x.o
obj-$(CONFIG_PATA_PDC_OLD) += pata_pdc202xx_old.o
obj-$(CONFIG_PATA_QDI) += pata_qdi.o
obj-$(CONFIG_PATA_RADISYS) += pata_radisys.o
obj-$(CONFIG_PATA_RZ1000) += pata_rz1000.o
obj-$(CONFIG_PATA_SC1200) += pata_sc1200.o
obj-$(CONFIG_PATA_SERVERWORKS) += pata_serverworks.o
obj-$(CONFIG_PATA_SIL680) += pata_sil680.o
obj-$(CONFIG_PATA_VIA) += pata_via.o
obj-$(CONFIG_PATA_WINBOND) += pata_sl82c105.o
obj-$(CONFIG_PATA_SIS) += pata_sis.o
obj-$(CONFIG_PATA_TRIFLEX) += pata_triflex.o
# Should be last but one libata driver
obj-$(CONFIG_ATA_GENERIC) += ata_generic.o
# Should be last libata driver
obj-$(CONFIG_PATA_LEGACY) += pata_legacy.o
libata-objs := libata-core.o libata-scsi.o libata-sff.o libata-eh.o

View File

@ -92,7 +92,9 @@ enum {
HOST_AHCI_EN = (1 << 31), /* AHCI enabled */
/* HOST_CAP bits */
HOST_CAP_SSC = (1 << 14), /* Slumber capable */
HOST_CAP_CLO = (1 << 24), /* Command List Override support */
HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */
HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */
HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
@ -155,6 +157,7 @@ enum {
PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */
PORT_CMD_START = (1 << 0), /* Enable port DMA engine */
PORT_CMD_ICC_MASK = (0xf << 28), /* i/f ICC state mask */
PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */
PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */
PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */
@ -212,6 +215,10 @@ static void ahci_freeze(struct ata_port *ap);
static void ahci_thaw(struct ata_port *ap);
static void ahci_error_handler(struct ata_port *ap);
static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg);
static int ahci_port_resume(struct ata_port *ap);
static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
static int ahci_pci_device_resume(struct pci_dev *pdev);
static void ahci_remove_one (struct pci_dev *pdev);
static struct scsi_host_template ahci_sht = {
@ -231,6 +238,8 @@ static struct scsi_host_template ahci_sht = {
.slave_configure = ata_scsi_slave_config,
.slave_destroy = ata_scsi_slave_destroy,
.bios_param = ata_std_bios_param,
.suspend = ata_scsi_device_suspend,
.resume = ata_scsi_device_resume,
};
static const struct ata_port_operations ahci_ops = {
@ -257,6 +266,9 @@ static const struct ata_port_operations ahci_ops = {
.error_handler = ahci_error_handler,
.post_internal_cmd = ahci_post_internal_cmd,
.port_suspend = ahci_port_suspend,
.port_resume = ahci_port_resume,
.port_start = ahci_port_start,
.port_stop = ahci_port_stop,
};
@ -265,7 +277,7 @@ static const struct ata_port_info ahci_port_info[] = {
/* board_ahci */
{
.sht = &ahci_sht,
.host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
.flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
ATA_FLAG_SKIP_D2H_BSY,
.pio_mask = 0x1f, /* pio0-4 */
@ -275,7 +287,7 @@ static const struct ata_port_info ahci_port_info[] = {
/* board_ahci_vt8251 */
{
.sht = &ahci_sht,
.host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
.flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
ATA_FLAG_SKIP_D2H_BSY |
AHCI_FLAG_RESET_NEEDS_CLO | AHCI_FLAG_NO_NCQ,
@ -350,6 +362,14 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VENDOR_ID_NVIDIA, 0x044f, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
board_ahci }, /* MCP65 */
/* SiS */
{ PCI_VENDOR_ID_SI, 0x1184, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
board_ahci }, /* SiS 966 */
{ PCI_VENDOR_ID_SI, 0x1185, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
board_ahci }, /* SiS 966 */
{ PCI_VENDOR_ID_SI, 0x0186, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
board_ahci }, /* SiS 968 */
{ } /* terminate list */
};
@ -358,6 +378,8 @@ static struct pci_driver ahci_pci_driver = {
.name = DRV_NAME,
.id_table = ahci_pci_tbl,
.probe = ahci_init_one,
.suspend = ahci_pci_device_suspend,
.resume = ahci_pci_device_resume,
.remove = ahci_remove_one,
};
@ -372,108 +394,6 @@ static inline void __iomem *ahci_port_base (void __iomem *base, unsigned int por
return (void __iomem *) ahci_port_base_ul((unsigned long)base, port);
}
static int ahci_port_start(struct ata_port *ap)
{
struct device *dev = ap->host_set->dev;
struct ahci_host_priv *hpriv = ap->host_set->private_data;
struct ahci_port_priv *pp;
void __iomem *mmio = ap->host_set->mmio_base;
void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
void *mem;
dma_addr_t mem_dma;
int rc;
pp = kmalloc(sizeof(*pp), GFP_KERNEL);
if (!pp)
return -ENOMEM;
memset(pp, 0, sizeof(*pp));
rc = ata_pad_alloc(ap, dev);
if (rc) {
kfree(pp);
return rc;
}
mem = dma_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma, GFP_KERNEL);
if (!mem) {
ata_pad_free(ap, dev);
kfree(pp);
return -ENOMEM;
}
memset(mem, 0, AHCI_PORT_PRIV_DMA_SZ);
/*
* First item in chunk of DMA memory: 32-slot command table,
* 32 bytes each in size
*/
pp->cmd_slot = mem;
pp->cmd_slot_dma = mem_dma;
mem += AHCI_CMD_SLOT_SZ;
mem_dma += AHCI_CMD_SLOT_SZ;
/*
* Second item: Received-FIS area
*/
pp->rx_fis = mem;
pp->rx_fis_dma = mem_dma;
mem += AHCI_RX_FIS_SZ;
mem_dma += AHCI_RX_FIS_SZ;
/*
* Third item: data area for storing a single command
* and its scatter-gather table
*/
pp->cmd_tbl = mem;
pp->cmd_tbl_dma = mem_dma;
ap->private_data = pp;
if (hpriv->cap & HOST_CAP_64)
writel((pp->cmd_slot_dma >> 16) >> 16, port_mmio + PORT_LST_ADDR_HI);
writel(pp->cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR);
readl(port_mmio + PORT_LST_ADDR); /* flush */
if (hpriv->cap & HOST_CAP_64)
writel((pp->rx_fis_dma >> 16) >> 16, port_mmio + PORT_FIS_ADDR_HI);
writel(pp->rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR);
readl(port_mmio + PORT_FIS_ADDR); /* flush */
writel(PORT_CMD_ICC_ACTIVE | PORT_CMD_FIS_RX |
PORT_CMD_POWER_ON | PORT_CMD_SPIN_UP |
PORT_CMD_START, port_mmio + PORT_CMD);
readl(port_mmio + PORT_CMD); /* flush */
return 0;
}
static void ahci_port_stop(struct ata_port *ap)
{
struct device *dev = ap->host_set->dev;
struct ahci_port_priv *pp = ap->private_data;
void __iomem *mmio = ap->host_set->mmio_base;
void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
u32 tmp;
tmp = readl(port_mmio + PORT_CMD);
tmp &= ~(PORT_CMD_START | PORT_CMD_FIS_RX);
writel(tmp, port_mmio + PORT_CMD);
readl(port_mmio + PORT_CMD); /* flush */
/* spec says 500 msecs for each PORT_CMD_{START,FIS_RX} bit, so
* this is slightly incorrect.
*/
msleep(500);
ap->private_data = NULL;
dma_free_coherent(dev, AHCI_PORT_PRIV_DMA_SZ,
pp->cmd_slot, pp->cmd_slot_dma);
ata_pad_free(ap, dev);
kfree(pp);
}
static u32 ahci_scr_read (struct ata_port *ap, unsigned int sc_reg_in)
{
unsigned int sc_reg;
@ -508,43 +428,256 @@ static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg_in,
writel(val, (void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
}
static int ahci_stop_engine(struct ata_port *ap)
static void ahci_start_engine(void __iomem *port_mmio)
{
void __iomem *mmio = ap->host_set->mmio_base;
void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
int work;
u32 tmp;
tmp = readl(port_mmio + PORT_CMD);
tmp &= ~PORT_CMD_START;
writel(tmp, port_mmio + PORT_CMD);
/* wait for engine to stop. TODO: this could be
* as long as 500 msec
*/
work = 1000;
while (work-- > 0) {
tmp = readl(port_mmio + PORT_CMD);
if ((tmp & PORT_CMD_LIST_ON) == 0)
return 0;
udelay(10);
}
return -EIO;
}
static void ahci_start_engine(struct ata_port *ap)
{
void __iomem *mmio = ap->host_set->mmio_base;
void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
u32 tmp;
/* start DMA */
tmp = readl(port_mmio + PORT_CMD);
tmp |= PORT_CMD_START;
writel(tmp, port_mmio + PORT_CMD);
readl(port_mmio + PORT_CMD); /* flush */
}
static int ahci_stop_engine(void __iomem *port_mmio)
{
u32 tmp;
tmp = readl(port_mmio + PORT_CMD);
/* check if the HBA is idle */
if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)
return 0;
/* setting HBA to idle */
tmp &= ~PORT_CMD_START;
writel(tmp, port_mmio + PORT_CMD);
/* wait for engine to stop. This could be as long as 500 msec */
tmp = ata_wait_register(port_mmio + PORT_CMD,
PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500);
if (tmp & PORT_CMD_LIST_ON)
return -EIO;
return 0;
}
static void ahci_start_fis_rx(void __iomem *port_mmio, u32 cap,
dma_addr_t cmd_slot_dma, dma_addr_t rx_fis_dma)
{
u32 tmp;
/* set FIS registers */
if (cap & HOST_CAP_64)
writel((cmd_slot_dma >> 16) >> 16, port_mmio + PORT_LST_ADDR_HI);
writel(cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR);
if (cap & HOST_CAP_64)
writel((rx_fis_dma >> 16) >> 16, port_mmio + PORT_FIS_ADDR_HI);
writel(rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR);
/* enable FIS reception */
tmp = readl(port_mmio + PORT_CMD);
tmp |= PORT_CMD_FIS_RX;
writel(tmp, port_mmio + PORT_CMD);
/* flush */
readl(port_mmio + PORT_CMD);
}
static int ahci_stop_fis_rx(void __iomem *port_mmio)
{
u32 tmp;
/* disable FIS reception */
tmp = readl(port_mmio + PORT_CMD);
tmp &= ~PORT_CMD_FIS_RX;
writel(tmp, port_mmio + PORT_CMD);
/* wait for completion, spec says 500ms, give it 1000 */
tmp = ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_FIS_ON,
PORT_CMD_FIS_ON, 10, 1000);
if (tmp & PORT_CMD_FIS_ON)
return -EBUSY;
return 0;
}
static void ahci_power_up(void __iomem *port_mmio, u32 cap)
{
u32 cmd;
cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
/* spin up device */
if (cap & HOST_CAP_SSS) {
cmd |= PORT_CMD_SPIN_UP;
writel(cmd, port_mmio + PORT_CMD);
}
/* wake up link */
writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD);
}
static void ahci_power_down(void __iomem *port_mmio, u32 cap)
{
u32 cmd, scontrol;
cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
if (cap & HOST_CAP_SSC) {
/* enable transitions to slumber mode */
scontrol = readl(port_mmio + PORT_SCR_CTL);
if ((scontrol & 0x0f00) > 0x100) {
scontrol &= ~0xf00;
writel(scontrol, port_mmio + PORT_SCR_CTL);
}
/* put device into slumber mode */
writel(cmd | PORT_CMD_ICC_SLUMBER, port_mmio + PORT_CMD);
/* wait for the transition to complete */
ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_ICC_SLUMBER,
PORT_CMD_ICC_SLUMBER, 1, 50);
}
/* put device into listen mode */
if (cap & HOST_CAP_SSS) {
/* first set PxSCTL.DET to 0 */
scontrol = readl(port_mmio + PORT_SCR_CTL);
scontrol &= ~0xf;
writel(scontrol, port_mmio + PORT_SCR_CTL);
/* then set PxCMD.SUD to 0 */
cmd &= ~PORT_CMD_SPIN_UP;
writel(cmd, port_mmio + PORT_CMD);
}
}
static void ahci_init_port(void __iomem *port_mmio, u32 cap,
dma_addr_t cmd_slot_dma, dma_addr_t rx_fis_dma)
{
/* power up */
ahci_power_up(port_mmio, cap);
/* enable FIS reception */
ahci_start_fis_rx(port_mmio, cap, cmd_slot_dma, rx_fis_dma);
/* enable DMA */
ahci_start_engine(port_mmio);
}
static int ahci_deinit_port(void __iomem *port_mmio, u32 cap, const char **emsg)
{
int rc;
/* disable DMA */
rc = ahci_stop_engine(port_mmio);
if (rc) {
*emsg = "failed to stop engine";
return rc;
}
/* disable FIS reception */
rc = ahci_stop_fis_rx(port_mmio);
if (rc) {
*emsg = "failed stop FIS RX";
return rc;
}
/* put device into slumber mode */
ahci_power_down(port_mmio, cap);
return 0;
}
static int ahci_reset_controller(void __iomem *mmio, struct pci_dev *pdev)
{
u32 cap_save, tmp;
cap_save = readl(mmio + HOST_CAP);
cap_save &= ( (1<<28) | (1<<17) );
cap_save |= (1 << 27);
/* global controller reset */
tmp = readl(mmio + HOST_CTL);
if ((tmp & HOST_RESET) == 0) {
writel(tmp | HOST_RESET, mmio + HOST_CTL);
readl(mmio + HOST_CTL); /* flush */
}
/* reset must complete within 1 second, or
* the hardware should be considered fried.
*/
ssleep(1);
tmp = readl(mmio + HOST_CTL);
if (tmp & HOST_RESET) {
dev_printk(KERN_ERR, &pdev->dev,
"controller reset failed (0x%x)\n", tmp);
return -EIO;
}
writel(HOST_AHCI_EN, mmio + HOST_CTL);
(void) readl(mmio + HOST_CTL); /* flush */
writel(cap_save, mmio + HOST_CAP);
writel(0xf, mmio + HOST_PORTS_IMPL);
(void) readl(mmio + HOST_PORTS_IMPL); /* flush */
if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
u16 tmp16;
/* configure PCS */
pci_read_config_word(pdev, 0x92, &tmp16);
tmp16 |= 0xf;
pci_write_config_word(pdev, 0x92, tmp16);
}
return 0;
}
static void ahci_init_controller(void __iomem *mmio, struct pci_dev *pdev,
int n_ports, u32 cap)
{
int i, rc;
u32 tmp;
for (i = 0; i < n_ports; i++) {
void __iomem *port_mmio = ahci_port_base(mmio, i);
const char *emsg = NULL;
#if 0 /* BIOSen initialize this incorrectly */
if (!(hpriv->port_map & (1 << i)))
continue;
#endif
/* make sure port is not active */
rc = ahci_deinit_port(port_mmio, cap, &emsg);
if (rc)
dev_printk(KERN_WARNING, &pdev->dev,
"%s (%d)\n", emsg, rc);
/* clear SError */
tmp = readl(port_mmio + PORT_SCR_ERR);
VPRINTK("PORT_SCR_ERR 0x%x\n", tmp);
writel(tmp, port_mmio + PORT_SCR_ERR);
/* clear port IRQ */
tmp = readl(port_mmio + PORT_IRQ_STAT);
VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
if (tmp)
writel(tmp, port_mmio + PORT_IRQ_STAT);
writel(1 << i, mmio + HOST_IRQ_STAT);
}
tmp = readl(mmio + HOST_CTL);
VPRINTK("HOST_CTL 0x%x\n", tmp);
writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL);
tmp = readl(mmio + HOST_CTL);
VPRINTK("HOST_CTL 0x%x\n", tmp);
}
static unsigned int ahci_dev_classify(struct ata_port *ap)
{
void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
@ -576,7 +709,7 @@ static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
static int ahci_clo(struct ata_port *ap)
{
void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
struct ahci_host_priv *hpriv = ap->host_set->private_data;
struct ahci_host_priv *hpriv = ap->host->private_data;
u32 tmp;
if (!(hpriv->cap & HOST_CAP_CLO))
@ -608,7 +741,7 @@ static int ahci_prereset(struct ata_port *ap)
static int ahci_softreset(struct ata_port *ap, unsigned int *class)
{
struct ahci_port_priv *pp = ap->private_data;
void __iomem *mmio = ap->host_set->mmio_base;
void __iomem *mmio = ap->host->mmio_base;
void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
const u32 cmd_fis_len = 5; /* five dwords */
const char *reason = NULL;
@ -626,7 +759,7 @@ static int ahci_softreset(struct ata_port *ap, unsigned int *class)
}
/* prepare for SRST (AHCI-1.1 10.4.1) */
rc = ahci_stop_engine(ap);
rc = ahci_stop_engine(port_mmio);
if (rc) {
reason = "failed to stop engine";
goto fail_restart;
@ -647,7 +780,7 @@ static int ahci_softreset(struct ata_port *ap, unsigned int *class)
}
/* restart engine */
ahci_start_engine(ap);
ahci_start_engine(port_mmio);
ata_tf_init(ap->device, &tf);
fis = pp->cmd_tbl;
@ -706,7 +839,7 @@ static int ahci_softreset(struct ata_port *ap, unsigned int *class)
return 0;
fail_restart:
ahci_start_engine(ap);
ahci_start_engine(port_mmio);
fail:
ata_port_printk(ap, KERN_ERR, "softreset failed (%s)\n", reason);
return rc;
@ -717,11 +850,13 @@ static int ahci_hardreset(struct ata_port *ap, unsigned int *class)
struct ahci_port_priv *pp = ap->private_data;
u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
struct ata_taskfile tf;
void __iomem *mmio = ap->host->mmio_base;
void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
int rc;
DPRINTK("ENTER\n");
ahci_stop_engine(ap);
ahci_stop_engine(port_mmio);
/* clear D2H reception area to properly wait for D2H FIS */
ata_tf_init(ap->device, &tf);
@ -730,7 +865,7 @@ static int ahci_hardreset(struct ata_port *ap, unsigned int *class)
rc = sata_std_hardreset(ap, class);
ahci_start_engine(ap);
ahci_start_engine(port_mmio);
if (rc == 0 && ata_port_online(ap))
*class = ahci_dev_classify(ap);
@ -904,7 +1039,7 @@ static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
static void ahci_host_intr(struct ata_port *ap)
{
void __iomem *mmio = ap->host_set->mmio_base;
void __iomem *mmio = ap->host->mmio_base;
void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
struct ata_eh_info *ehi = &ap->eh_info;
u32 status, qc_active;
@ -940,7 +1075,7 @@ static void ahci_host_intr(struct ata_port *ap)
return;
/* ignore interim PIO setup fis interrupts */
if (ata_tag_valid(ap->active_tag) && (status & PORT_IRQ_PIOS_FIS))
if (ata_tag_valid(ap->active_tag) && (status & PORT_IRQ_PIOS_FIS))
return;
if (ata_ratelimit())
@ -956,7 +1091,7 @@ static void ahci_irq_clear(struct ata_port *ap)
static irqreturn_t ahci_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
{
struct ata_host_set *host_set = dev_instance;
struct ata_host *host = dev_instance;
struct ahci_host_priv *hpriv;
unsigned int i, handled = 0;
void __iomem *mmio;
@ -964,8 +1099,8 @@ static irqreturn_t ahci_interrupt(int irq, void *dev_instance, struct pt_regs *r
VPRINTK("ENTER\n");
hpriv = host_set->private_data;
mmio = host_set->mmio_base;
hpriv = host->private_data;
mmio = host->mmio_base;
/* sigh. 0xffffffff is a valid return from h/w */
irq_stat = readl(mmio + HOST_IRQ_STAT);
@ -973,22 +1108,22 @@ static irqreturn_t ahci_interrupt(int irq, void *dev_instance, struct pt_regs *r
if (!irq_stat)
return IRQ_NONE;
spin_lock(&host_set->lock);
spin_lock(&host->lock);
for (i = 0; i < host_set->n_ports; i++) {
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap;
if (!(irq_stat & (1 << i)))
continue;
ap = host_set->ports[i];
ap = host->ports[i];
if (ap) {
ahci_host_intr(ap);
VPRINTK("port %u\n", i);
} else {
VPRINTK("port %u (no irq)\n", i);
if (ata_ratelimit())
dev_printk(KERN_WARNING, host_set->dev,
dev_printk(KERN_WARNING, host->dev,
"interrupt on disabled port %u\n", i);
}
@ -1000,7 +1135,7 @@ static irqreturn_t ahci_interrupt(int irq, void *dev_instance, struct pt_regs *r
handled = 1;
}
spin_unlock(&host_set->lock);
spin_unlock(&host->lock);
VPRINTK("EXIT\n");
@ -1022,7 +1157,7 @@ static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
static void ahci_freeze(struct ata_port *ap)
{
void __iomem *mmio = ap->host_set->mmio_base;
void __iomem *mmio = ap->host->mmio_base;
void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
/* turn IRQ off */
@ -1031,7 +1166,7 @@ static void ahci_freeze(struct ata_port *ap)
static void ahci_thaw(struct ata_port *ap)
{
void __iomem *mmio = ap->host_set->mmio_base;
void __iomem *mmio = ap->host->mmio_base;
void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
u32 tmp;
@ -1046,10 +1181,13 @@ static void ahci_thaw(struct ata_port *ap)
static void ahci_error_handler(struct ata_port *ap)
{
void __iomem *mmio = ap->host->mmio_base;
void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
/* restart engine */
ahci_stop_engine(ap);
ahci_start_engine(ap);
ahci_stop_engine(port_mmio);
ahci_start_engine(port_mmio);
}
/* perform recovery */
@ -1060,17 +1198,178 @@ static void ahci_error_handler(struct ata_port *ap)
static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
void __iomem *mmio = ap->host->mmio_base;
void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
if (qc->flags & ATA_QCFLAG_FAILED)
qc->err_mask |= AC_ERR_OTHER;
if (qc->err_mask) {
/* make DMA engine forget about the failed command */
ahci_stop_engine(ap);
ahci_start_engine(ap);
ahci_stop_engine(port_mmio);
ahci_start_engine(port_mmio);
}
}
static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
{
struct ahci_host_priv *hpriv = ap->host->private_data;
struct ahci_port_priv *pp = ap->private_data;
void __iomem *mmio = ap->host->mmio_base;
void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
const char *emsg = NULL;
int rc;
rc = ahci_deinit_port(port_mmio, hpriv->cap, &emsg);
if (rc) {
ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc);
ahci_init_port(port_mmio, hpriv->cap,
pp->cmd_slot_dma, pp->rx_fis_dma);
}
return rc;
}
static int ahci_port_resume(struct ata_port *ap)
{
struct ahci_port_priv *pp = ap->private_data;
struct ahci_host_priv *hpriv = ap->host->private_data;
void __iomem *mmio = ap->host->mmio_base;
void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
ahci_init_port(port_mmio, hpriv->cap, pp->cmd_slot_dma, pp->rx_fis_dma);
return 0;
}
static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
{
struct ata_host *host = dev_get_drvdata(&pdev->dev);
void __iomem *mmio = host->mmio_base;
u32 ctl;
if (mesg.event == PM_EVENT_SUSPEND) {
/* AHCI spec rev1.1 section 8.3.3:
* Software must disable interrupts prior to requesting a
* transition of the HBA to D3 state.
*/
ctl = readl(mmio + HOST_CTL);
ctl &= ~HOST_IRQ_EN;
writel(ctl, mmio + HOST_CTL);
readl(mmio + HOST_CTL); /* flush */
}
return ata_pci_device_suspend(pdev, mesg);
}
static int ahci_pci_device_resume(struct pci_dev *pdev)
{
struct ata_host *host = dev_get_drvdata(&pdev->dev);
struct ahci_host_priv *hpriv = host->private_data;
void __iomem *mmio = host->mmio_base;
int rc;
ata_pci_device_do_resume(pdev);
if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
rc = ahci_reset_controller(mmio, pdev);
if (rc)
return rc;
ahci_init_controller(mmio, pdev, host->n_ports, hpriv->cap);
}
ata_host_resume(host);
return 0;
}
static int ahci_port_start(struct ata_port *ap)
{
struct device *dev = ap->host->dev;
struct ahci_host_priv *hpriv = ap->host->private_data;
struct ahci_port_priv *pp;
void __iomem *mmio = ap->host->mmio_base;
void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
void *mem;
dma_addr_t mem_dma;
int rc;
pp = kmalloc(sizeof(*pp), GFP_KERNEL);
if (!pp)
return -ENOMEM;
memset(pp, 0, sizeof(*pp));
rc = ata_pad_alloc(ap, dev);
if (rc) {
kfree(pp);
return rc;
}
mem = dma_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma, GFP_KERNEL);
if (!mem) {
ata_pad_free(ap, dev);
kfree(pp);
return -ENOMEM;
}
memset(mem, 0, AHCI_PORT_PRIV_DMA_SZ);
/*
* First item in chunk of DMA memory: 32-slot command table,
* 32 bytes each in size
*/
pp->cmd_slot = mem;
pp->cmd_slot_dma = mem_dma;
mem += AHCI_CMD_SLOT_SZ;
mem_dma += AHCI_CMD_SLOT_SZ;
/*
* Second item: Received-FIS area
*/
pp->rx_fis = mem;
pp->rx_fis_dma = mem_dma;
mem += AHCI_RX_FIS_SZ;
mem_dma += AHCI_RX_FIS_SZ;
/*
* Third item: data area for storing a single command
* and its scatter-gather table
*/
pp->cmd_tbl = mem;
pp->cmd_tbl_dma = mem_dma;
ap->private_data = pp;
/* initialize port */
ahci_init_port(port_mmio, hpriv->cap, pp->cmd_slot_dma, pp->rx_fis_dma);
return 0;
}
static void ahci_port_stop(struct ata_port *ap)
{
struct device *dev = ap->host->dev;
struct ahci_host_priv *hpriv = ap->host->private_data;
struct ahci_port_priv *pp = ap->private_data;
void __iomem *mmio = ap->host->mmio_base;
void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
const char *emsg = NULL;
int rc;
/* de-initialize port */
rc = ahci_deinit_port(port_mmio, hpriv->cap, &emsg);
if (rc)
ata_port_printk(ap, KERN_WARNING, "%s (%d)\n", emsg, rc);
ap->private_data = NULL;
dma_free_coherent(dev, AHCI_PORT_PRIV_DMA_SZ,
pp->cmd_slot, pp->cmd_slot_dma);
ata_pad_free(ap, dev);
kfree(pp);
}
static void ahci_setup_port(struct ata_ioports *port, unsigned long base,
unsigned int port_idx)
{
@ -1089,47 +1388,12 @@ static int ahci_host_init(struct ata_probe_ent *probe_ent)
struct ahci_host_priv *hpriv = probe_ent->private_data;
struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
void __iomem *mmio = probe_ent->mmio_base;
u32 tmp, cap_save;
unsigned int i, j, using_dac;
unsigned int i, using_dac;
int rc;
void __iomem *port_mmio;
cap_save = readl(mmio + HOST_CAP);
cap_save &= ( (1<<28) | (1<<17) );
cap_save |= (1 << 27);
/* global controller reset */
tmp = readl(mmio + HOST_CTL);
if ((tmp & HOST_RESET) == 0) {
writel(tmp | HOST_RESET, mmio + HOST_CTL);
readl(mmio + HOST_CTL); /* flush */
}
/* reset must complete within 1 second, or
* the hardware should be considered fried.
*/
ssleep(1);
tmp = readl(mmio + HOST_CTL);
if (tmp & HOST_RESET) {
dev_printk(KERN_ERR, &pdev->dev,
"controller reset failed (0x%x)\n", tmp);
return -EIO;
}
writel(HOST_AHCI_EN, mmio + HOST_CTL);
(void) readl(mmio + HOST_CTL); /* flush */
writel(cap_save, mmio + HOST_CAP);
writel(0xf, mmio + HOST_PORTS_IMPL);
(void) readl(mmio + HOST_PORTS_IMPL); /* flush */
if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
u16 tmp16;
pci_read_config_word(pdev, 0x92, &tmp16);
tmp16 |= 0xf;
pci_write_config_word(pdev, 0x92, tmp16);
}
rc = ahci_reset_controller(mmio, pdev);
if (rc)
return rc;
hpriv->cap = readl(mmio + HOST_CAP);
hpriv->port_map = readl(mmio + HOST_PORTS_IMPL);
@ -1165,63 +1429,10 @@ static int ahci_host_init(struct ata_probe_ent *probe_ent)
}
}
for (i = 0; i < probe_ent->n_ports; i++) {
#if 0 /* BIOSen initialize this incorrectly */
if (!(hpriv->port_map & (1 << i)))
continue;
#endif
for (i = 0; i < probe_ent->n_ports; i++)
ahci_setup_port(&probe_ent->port[i], (unsigned long) mmio, i);
port_mmio = ahci_port_base(mmio, i);
VPRINTK("mmio %p port_mmio %p\n", mmio, port_mmio);
ahci_setup_port(&probe_ent->port[i],
(unsigned long) mmio, i);
/* make sure port is not active */
tmp = readl(port_mmio + PORT_CMD);
VPRINTK("PORT_CMD 0x%x\n", tmp);
if (tmp & (PORT_CMD_LIST_ON | PORT_CMD_FIS_ON |
PORT_CMD_FIS_RX | PORT_CMD_START)) {
tmp &= ~(PORT_CMD_LIST_ON | PORT_CMD_FIS_ON |
PORT_CMD_FIS_RX | PORT_CMD_START);
writel(tmp, port_mmio + PORT_CMD);
readl(port_mmio + PORT_CMD); /* flush */
/* spec says 500 msecs for each bit, so
* this is slightly incorrect.
*/
msleep(500);
}
writel(PORT_CMD_SPIN_UP, port_mmio + PORT_CMD);
j = 0;
while (j < 100) {
msleep(10);
tmp = readl(port_mmio + PORT_SCR_STAT);
if ((tmp & 0xf) == 0x3)
break;
j++;
}
tmp = readl(port_mmio + PORT_SCR_ERR);
VPRINTK("PORT_SCR_ERR 0x%x\n", tmp);
writel(tmp, port_mmio + PORT_SCR_ERR);
/* ack any pending irq events for this port */
tmp = readl(port_mmio + PORT_IRQ_STAT);
VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
if (tmp)
writel(tmp, port_mmio + PORT_IRQ_STAT);
writel(1 << i, mmio + HOST_IRQ_STAT);
}
tmp = readl(mmio + HOST_CTL);
VPRINTK("HOST_CTL 0x%x\n", tmp);
writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL);
tmp = readl(mmio + HOST_CTL);
VPRINTK("HOST_CTL 0x%x\n", tmp);
ahci_init_controller(mmio, pdev, probe_ent->n_ports, hpriv->cap);
pci_set_master(pdev);
@ -1370,7 +1581,7 @@ static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
memset(hpriv, 0, sizeof(*hpriv));
probe_ent->sht = ahci_port_info[board_idx].sht;
probe_ent->host_flags = ahci_port_info[board_idx].host_flags;
probe_ent->port_flags = ahci_port_info[board_idx].flags;
probe_ent->pio_mask = ahci_port_info[board_idx].pio_mask;
probe_ent->udma_mask = ahci_port_info[board_idx].udma_mask;
probe_ent->port_ops = ahci_port_info[board_idx].port_ops;
@ -1388,9 +1599,9 @@ static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto err_out_hpriv;
if (!(probe_ent->host_flags & AHCI_FLAG_NO_NCQ) &&
if (!(probe_ent->port_flags & AHCI_FLAG_NO_NCQ) &&
(hpriv->cap & HOST_CAP_NCQ))
probe_ent->host_flags |= ATA_FLAG_NCQ;
probe_ent->port_flags |= ATA_FLAG_NCQ;
ahci_print_info(probe_ent);
@ -1421,27 +1632,27 @@ err_out:
static void ahci_remove_one (struct pci_dev *pdev)
{
struct device *dev = pci_dev_to_dev(pdev);
struct ata_host_set *host_set = dev_get_drvdata(dev);
struct ahci_host_priv *hpriv = host_set->private_data;
struct ata_host *host = dev_get_drvdata(dev);
struct ahci_host_priv *hpriv = host->private_data;
unsigned int i;
int have_msi;
for (i = 0; i < host_set->n_ports; i++)
ata_port_detach(host_set->ports[i]);
for (i = 0; i < host->n_ports; i++)
ata_port_detach(host->ports[i]);
have_msi = hpriv->flags & AHCI_FLAG_MSI;
free_irq(host_set->irq, host_set);
free_irq(host->irq, host);
for (i = 0; i < host_set->n_ports; i++) {
struct ata_port *ap = host_set->ports[i];
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
ata_scsi_release(ap->host);
scsi_host_put(ap->host);
ata_scsi_release(ap->scsi_host);
scsi_host_put(ap->scsi_host);
}
kfree(hpriv);
pci_iounmap(pdev, host_set->mmio_base);
kfree(host_set);
pci_iounmap(pdev, host->mmio_base);
kfree(host);
if (have_msi)
pci_disable_msi(pdev);
@ -1454,7 +1665,7 @@ static void ahci_remove_one (struct pci_dev *pdev)
static int __init ahci_init(void)
{
return pci_module_init(&ahci_pci_driver);
return pci_register_driver(&ahci_pci_driver);
}
static void __exit ahci_exit(void)

252
drivers/ata/ata_generic.c Normal file
View File

@ -0,0 +1,252 @@
/*
* ata_generic.c - Generic PATA/SATA controller driver.
* Copyright 2005 Red Hat Inc <alan@redhat.com>, all rights reserved.
*
* Elements from ide/pci/generic.c
* Copyright (C) 2001-2002 Andre Hedrick <andre@linux-ide.org>
* Portions (C) Copyright 2002 Red Hat Inc <alan@redhat.com>
*
* May be copied or modified under the terms of the GNU General Public License
*
* Driver for PCI IDE interfaces implementing the standard bus mastering
* interface functionality. This assumes the BIOS did the drive set up and
* tuning for us. By default we do not grab all IDE class devices as they
* may have other drivers or need fixups to avoid problems. Instead we keep
* a default list of stuff without documentation/driver that appears to
* work.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#define DRV_NAME "ata_generic"
#define DRV_VERSION "0.2.6"
/*
* A generic parallel ATA driver using libata
*/
/**
* generic_pre_reset - probe begin
* @ap: ATA port
*
* Set up cable type and use generic probe init
*/
static int generic_pre_reset(struct ata_port *ap)
{
ap->cbl = ATA_CBL_PATA80;
return ata_std_prereset(ap);
}
/**
* generic_error_handler - Probe specified port on PATA host controller
* @ap: Port to probe
* @classes:
*
* LOCKING:
* None (inherited from caller).
*/
static void generic_error_handler(struct ata_port *ap)
{
ata_bmdma_drive_eh(ap, generic_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
}
/**
* generic_set_mode - mode setting
* @ap: interface to set up
*
* Use a non standard set_mode function. We don't want to be tuned.
* The BIOS configured everything. Our job is not to fiddle. We
* read the dma enabled bits from the PCI configuration of the device
* and respect them.
*/
static void generic_set_mode(struct ata_port *ap)
{
int dma_enabled = 0;
int i;
/* Bits 5 and 6 indicate if DMA is active on master/slave */
if (ap->ioaddr.bmdma_addr)
dma_enabled = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
for (i = 0; i < ATA_MAX_DEVICES; i++) {
struct ata_device *dev = &ap->device[i];
if (ata_dev_enabled(dev)) {
/* We don't really care */
dev->pio_mode = XFER_PIO_0;
dev->dma_mode = XFER_MW_DMA_0;
/* We do need the right mode information for DMA or PIO
and this comes from the current configuration flags */
if (dma_enabled & (1 << (5 + i))) {
dev->xfer_mode = XFER_MW_DMA_0;
dev->xfer_shift = ATA_SHIFT_MWDMA;
dev->flags &= ~ATA_DFLAG_PIO;
} else {
dev->xfer_mode = XFER_PIO_0;
dev->xfer_shift = ATA_SHIFT_PIO;
dev->flags |= ATA_DFLAG_PIO;
}
}
}
}
static struct scsi_host_template generic_sht = {
.module = THIS_MODULE,
.name = DRV_NAME,
.ioctl = ata_scsi_ioctl,
.queuecommand = ata_scsi_queuecmd,
.can_queue = ATA_DEF_QUEUE,
.this_id = ATA_SHT_THIS_ID,
.sg_tablesize = LIBATA_MAX_PRD,
.max_sectors = ATA_MAX_SECTORS,
.cmd_per_lun = ATA_SHT_CMD_PER_LUN,
.emulated = ATA_SHT_EMULATED,
.use_clustering = ATA_SHT_USE_CLUSTERING,
.proc_name = DRV_NAME,
.dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config,
.bios_param = ata_std_bios_param,
};
static struct ata_port_operations generic_port_ops = {
.set_mode = generic_set_mode,
.port_disable = ata_port_disable,
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
.check_status = ata_check_status,
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
.bmdma_setup = ata_bmdma_setup,
.bmdma_start = ata_bmdma_start,
.bmdma_stop = ata_bmdma_stop,
.bmdma_status = ata_bmdma_status,
.data_xfer = ata_pio_data_xfer,
.freeze = ata_bmdma_freeze,
.thaw = ata_bmdma_thaw,
.error_handler = generic_error_handler,
.post_internal_cmd = ata_bmdma_post_internal_cmd,
.qc_prep = ata_qc_prep,
.qc_issue = ata_qc_issue_prot,
.eng_timeout = ata_eng_timeout,
.irq_handler = ata_interrupt,
.irq_clear = ata_bmdma_irq_clear,
.port_start = ata_port_start,
.port_stop = ata_port_stop,
.host_stop = ata_host_stop
};
static int all_generic_ide; /* Set to claim all devices */
/**
* ata_generic_init - attach generic IDE
* @dev: PCI device found
* @id: match entry
*
* Called each time a matching IDE interface is found. We check if the
* interface is one we wish to claim and if so we perform any chip
* specific hacks then let the ATA layer do the heavy lifting.
*/
static int ata_generic_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
u16 command;
static struct ata_port_info info = {
.sht = &generic_sht,
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
.pio_mask = 0x1f,
.mwdma_mask = 0x07,
.udma_mask = 0x3f,
.port_ops = &generic_port_ops
};
static struct ata_port_info *port_info[2] = { &info, &info };
/* Don't use the generic entry unless instructed to do so */
if (id->driver_data == 1 && all_generic_ide == 0)
return -ENODEV;
/* Devices that need care */
if (dev->vendor == PCI_VENDOR_ID_UMC &&
dev->device == PCI_DEVICE_ID_UMC_UM8886A &&
(!(PCI_FUNC(dev->devfn) & 1)))
return -ENODEV;
if (dev->vendor == PCI_VENDOR_ID_OPTI &&
dev->device == PCI_DEVICE_ID_OPTI_82C558 &&
(!(PCI_FUNC(dev->devfn) & 1)))
return -ENODEV;
/* Don't re-enable devices in generic mode or we will break some
motherboards with disabled and unused IDE controllers */
pci_read_config_word(dev, PCI_COMMAND, &command);
if (!(command & PCI_COMMAND_IO))
return -ENODEV;
if (dev->vendor == PCI_VENDOR_ID_AL)
ata_pci_clear_simplex(dev);
return ata_pci_init_one(dev, port_info, 2);
}
static struct pci_device_id ata_generic[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_PCTECH, PCI_DEVICE_ID_PCTECH_SAMURAI_IDE), },
{ PCI_DEVICE(PCI_VENDOR_ID_HOLTEK, PCI_DEVICE_ID_HOLTEK_6565), },
{ PCI_DEVICE(PCI_VENDOR_ID_UMC, PCI_DEVICE_ID_UMC_UM8673F), },
{ PCI_DEVICE(PCI_VENDOR_ID_UMC, PCI_DEVICE_ID_UMC_UM8886A), },
{ PCI_DEVICE(PCI_VENDOR_ID_UMC, PCI_DEVICE_ID_UMC_UM8886BF), },
{ PCI_DEVICE(PCI_VENDOR_ID_HINT, PCI_DEVICE_ID_HINT_VXPROII_IDE), },
{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C561), },
{ PCI_DEVICE(PCI_VENDOR_ID_OPTI, PCI_DEVICE_ID_OPTI_82C558), },
{ PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO), },
{ PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_1), },
{ PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA,PCI_DEVICE_ID_TOSHIBA_PICCOLO_2), },
/* Must come last. If you add entries adjust this table appropriately */
{ PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE << 8, 0xFFFFFF00UL, 1},
{ 0, },
};
static struct pci_driver ata_generic_pci_driver = {
.name = DRV_NAME,
.id_table = ata_generic,
.probe = ata_generic_init_one,
.remove = ata_pci_remove_one
};
static int __init ata_generic_init(void)
{
return pci_module_init(&ata_generic_pci_driver);
}
static void __exit ata_generic_exit(void)
{
pci_unregister_driver(&ata_generic_pci_driver);
}
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for generic ATA");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, ata_generic);
MODULE_VERSION(DRV_VERSION);
module_init(ata_generic_init);
module_exit(ata_generic_exit);
module_param(all_generic_ide, int, 0);

View File

@ -93,7 +93,7 @@
#include <linux/libata.h>
#define DRV_NAME "ata_piix"
#define DRV_VERSION "2.00"
#define DRV_VERSION "2.00ac6"
enum {
PIIX_IOCFG = 0x54, /* IDE I/O configuration register */
@ -116,15 +116,18 @@ enum {
PIIX_80C_SEC = (1 << 7) | (1 << 6),
/* controller IDs */
piix4_pata = 0,
ich5_pata = 1,
ich5_sata = 2,
esb_sata = 3,
ich6_sata = 4,
ich6_sata_ahci = 5,
ich6m_sata_ahci = 6,
ich7m_sata_ahci = 7,
ich8_sata_ahci = 8,
piix_pata_33 = 0, /* PIIX3 or 4 at 33Mhz */
ich_pata_33 = 1, /* ICH up to UDMA 33 only */
ich_pata_66 = 2, /* ICH up to 66 Mhz */
ich_pata_100 = 3, /* ICH up to UDMA 100 */
ich_pata_133 = 4, /* ICH up to UDMA 133 */
ich5_sata = 5,
esb_sata = 6,
ich6_sata = 7,
ich6_sata_ahci = 8,
ich6m_sata_ahci = 9,
ich7m_sata_ahci = 10,
ich8_sata_ahci = 11,
/* constants for mapping table */
P0 = 0, /* port 0 */
@ -152,20 +155,55 @@ struct piix_host_priv {
static int piix_init_one (struct pci_dev *pdev,
const struct pci_device_id *ent);
static void piix_host_stop(struct ata_host_set *host_set);
static void piix_host_stop(struct ata_host *host);
static void piix_pata_error_handler(struct ata_port *ap);
static void ich_pata_error_handler(struct ata_port *ap);
static void piix_sata_error_handler(struct ata_port *ap);
static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev);
static void piix_set_dmamode (struct ata_port *ap, struct ata_device *adev);
static void piix_pata_error_handler(struct ata_port *ap);
static void piix_sata_error_handler(struct ata_port *ap);
static void ich_set_dmamode (struct ata_port *ap, struct ata_device *adev);
static unsigned int in_module_init = 1;
static const struct pci_device_id piix_pci_tbl[] = {
#ifdef ATA_ENABLE_PATA
{ 0x8086, 0x7111, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix4_pata },
{ 0x8086, 0x24db, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_pata },
{ 0x8086, 0x25a2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_pata },
{ 0x8086, 0x27df, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_pata },
/* Intel PIIX4 for the 430TX/440BX/MX chipset: UDMA 33 */
/* Also PIIX4E (fn3 rev 2) and PIIX4M (fn3 rev 3) */
{ 0x8086, 0x7111, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix_pata_33 },
{ 0x8086, 0x24db, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
{ 0x8086, 0x25a2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
/* Intel PIIX4 */
{ 0x8086, 0x7199, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix_pata_33 },
/* Intel PIIX4 */
{ 0x8086, 0x7601, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix_pata_33 },
/* Intel PIIX */
{ 0x8086, 0x84CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix_pata_33 },
/* Intel ICH (i810, i815, i840) UDMA 66*/
{ 0x8086, 0x2411, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_66 },
/* Intel ICH0 : UDMA 33*/
{ 0x8086, 0x2421, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_33 },
/* Intel ICH2M */
{ 0x8086, 0x244A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
/* Intel ICH2 (i810E2, i845, 850, 860) UDMA 100 */
{ 0x8086, 0x244B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
/* Intel ICH3M */
{ 0x8086, 0x248A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
/* Intel ICH3 (E7500/1) UDMA 100 */
{ 0x8086, 0x248B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
/* Intel ICH4 (i845GV, i845E, i852, i855) UDMA 100 */
{ 0x8086, 0x24CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
{ 0x8086, 0x24CB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
/* Intel ICH5 */
{ 0x8086, 0x24DB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_133 },
/* C-ICH (i810E2) */
{ 0x8086, 0x245B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
/* ESB (855GME/875P + 6300ESB) UDMA 100 */
{ 0x8086, 0x25A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
/* ICH6 (and 6) (i915) UDMA 100 */
{ 0x8086, 0x266F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
/* ICH7/7-R (i945, i975) UDMA 100*/
{ 0x8086, 0x27DF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_133 },
{ 0x8086, 0x269E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich_pata_100 },
#endif
/* NOTE: The following PCI ids must be kept in sync with the
@ -264,6 +302,39 @@ static const struct ata_port_operations piix_pata_ops = {
.host_stop = piix_host_stop,
};
static const struct ata_port_operations ich_pata_ops = {
.port_disable = ata_port_disable,
.set_piomode = piix_set_piomode,
.set_dmamode = ich_set_dmamode,
.mode_filter = ata_pci_default_filter,
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
.check_status = ata_check_status,
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
.bmdma_setup = ata_bmdma_setup,
.bmdma_start = ata_bmdma_start,
.bmdma_stop = ata_bmdma_stop,
.bmdma_status = ata_bmdma_status,
.qc_prep = ata_qc_prep,
.qc_issue = ata_qc_issue_prot,
.data_xfer = ata_pio_data_xfer,
.freeze = ata_bmdma_freeze,
.thaw = ata_bmdma_thaw,
.error_handler = ich_pata_error_handler,
.post_internal_cmd = ata_bmdma_post_internal_cmd,
.irq_handler = ata_interrupt,
.irq_clear = ata_bmdma_irq_clear,
.port_start = ata_port_start,
.port_stop = ata_port_stop,
.host_stop = ata_host_stop,
};
static const struct ata_port_operations piix_sata_ops = {
.port_disable = ata_port_disable,
@ -379,38 +450,59 @@ static const struct piix_map_db *piix_map_db_table[] = {
};
static struct ata_port_info piix_port_info[] = {
/* piix4_pata */
/* piix_pata_33: 0: PIIX3 or 4 at 33MHz */
{
.sht = &piix_sht,
.host_flags = ATA_FLAG_SLAVE_POSS,
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
.pio_mask = 0x1f, /* pio0-4 */
#if 0
.mwdma_mask = 0x06, /* mwdma1-2 */
#else
.mwdma_mask = 0x00, /* mwdma broken */
#endif
.mwdma_mask = 0x06, /* mwdma1-2 ?? CHECK 0 should be ok but slow */
.udma_mask = ATA_UDMA_MASK_40C,
.port_ops = &piix_pata_ops,
},
/* ich5_pata */
/* ich_pata_33: 1 ICH0 - ICH at 33Mhz*/
{
.sht = &piix_sht,
.host_flags = ATA_FLAG_SLAVE_POSS | PIIX_FLAG_CHECKINTR,
.pio_mask = 0x1f, /* pio0-4 */
#if 0
.mwdma_mask = 0x06, /* mwdma1-2 */
#else
.mwdma_mask = 0x00, /* mwdma broken */
#endif
.udma_mask = 0x3f, /* udma0-5 */
.port_ops = &piix_pata_ops,
.flags = ATA_FLAG_SRST | ATA_FLAG_SLAVE_POSS,
.pio_mask = 0x1f, /* pio 0-4 */
.mwdma_mask = 0x06, /* Check: maybe 0x07 */
.udma_mask = ATA_UDMA2, /* UDMA33 */
.port_ops = &ich_pata_ops,
},
/* ich_pata_66: 2 ICH controllers up to 66MHz */
{
.sht = &piix_sht,
.flags = ATA_FLAG_SRST | ATA_FLAG_SLAVE_POSS,
.pio_mask = 0x1f, /* pio 0-4 */
.mwdma_mask = 0x06, /* MWDMA0 is broken on chip */
.udma_mask = ATA_UDMA4,
.port_ops = &ich_pata_ops,
},
/* ich5_sata */
/* ich_pata_100: 3 */
{
.sht = &piix_sht,
.host_flags = ATA_FLAG_SATA | PIIX_FLAG_CHECKINTR |
.flags = ATA_FLAG_SRST | ATA_FLAG_SLAVE_POSS | PIIX_FLAG_CHECKINTR,
.pio_mask = 0x1f, /* pio0-4 */
.mwdma_mask = 0x06, /* mwdma1-2 */
.udma_mask = ATA_UDMA5, /* udma0-5 */
.port_ops = &ich_pata_ops,
},
/* ich_pata_133: 4 ICH with full UDMA6 */
{
.sht = &piix_sht,
.flags = ATA_FLAG_SRST | ATA_FLAG_SLAVE_POSS | PIIX_FLAG_CHECKINTR,
.pio_mask = 0x1f, /* pio 0-4 */
.mwdma_mask = 0x06, /* Check: maybe 0x07 */
.udma_mask = ATA_UDMA6, /* UDMA133 */
.port_ops = &ich_pata_ops,
},
/* ich5_sata: 5 */
{
.sht = &piix_sht,
.flags = ATA_FLAG_SATA | PIIX_FLAG_CHECKINTR |
PIIX_FLAG_IGNORE_PCS,
.pio_mask = 0x1f, /* pio0-4 */
.mwdma_mask = 0x07, /* mwdma0-2 */
@ -418,10 +510,10 @@ static struct ata_port_info piix_port_info[] = {
.port_ops = &piix_sata_ops,
},
/* i6300esb_sata */
/* i6300esb_sata: 6 */
{
.sht = &piix_sht,
.host_flags = ATA_FLAG_SATA |
.flags = ATA_FLAG_SATA |
PIIX_FLAG_CHECKINTR | PIIX_FLAG_IGNORE_PCS,
.pio_mask = 0x1f, /* pio0-4 */
.mwdma_mask = 0x07, /* mwdma0-2 */
@ -429,10 +521,10 @@ static struct ata_port_info piix_port_info[] = {
.port_ops = &piix_sata_ops,
},
/* ich6_sata */
/* ich6_sata: 7 */
{
.sht = &piix_sht,
.host_flags = ATA_FLAG_SATA |
.flags = ATA_FLAG_SATA |
PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR,
.pio_mask = 0x1f, /* pio0-4 */
.mwdma_mask = 0x07, /* mwdma0-2 */
@ -440,10 +532,10 @@ static struct ata_port_info piix_port_info[] = {
.port_ops = &piix_sata_ops,
},
/* ich6_sata_ahci */
/* ich6_sata_ahci: 8 */
{
.sht = &piix_sht,
.host_flags = ATA_FLAG_SATA |
.flags = ATA_FLAG_SATA |
PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR |
PIIX_FLAG_AHCI,
.pio_mask = 0x1f, /* pio0-4 */
@ -452,10 +544,10 @@ static struct ata_port_info piix_port_info[] = {
.port_ops = &piix_sata_ops,
},
/* ich6m_sata_ahci */
/* ich6m_sata_ahci: 9 */
{
.sht = &piix_sht,
.host_flags = ATA_FLAG_SATA |
.flags = ATA_FLAG_SATA |
PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR |
PIIX_FLAG_AHCI,
.pio_mask = 0x1f, /* pio0-4 */
@ -464,10 +556,10 @@ static struct ata_port_info piix_port_info[] = {
.port_ops = &piix_sata_ops,
},
/* ich7m_sata_ahci */
/* ich7m_sata_ahci: 10 */
{
.sht = &piix_sht,
.host_flags = ATA_FLAG_SATA |
.flags = ATA_FLAG_SATA |
PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR |
PIIX_FLAG_AHCI,
.pio_mask = 0x1f, /* pio0-4 */
@ -476,10 +568,10 @@ static struct ata_port_info piix_port_info[] = {
.port_ops = &piix_sata_ops,
},
/* ich8_sata_ahci */
/* ich8_sata_ahci: 11 */
{
.sht = &piix_sht,
.host_flags = ATA_FLAG_SATA |
.flags = ATA_FLAG_SATA |
PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR |
PIIX_FLAG_AHCI,
.pio_mask = 0x1f, /* pio0-4 */
@ -487,6 +579,7 @@ static struct ata_port_info piix_port_info[] = {
.udma_mask = 0x7f, /* udma0-6 */
.port_ops = &piix_sata_ops,
},
};
static struct pci_bits piix_enable_bits[] = {
@ -515,9 +608,10 @@ MODULE_PARM_DESC(force_pcs, "force honoring or ignoring PCS to work around "
* LOCKING:
* None (inherited from caller).
*/
static void piix_pata_cbl_detect(struct ata_port *ap)
static void ich_pata_cbl_detect(struct ata_port *ap)
{
struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u8 tmp, mask;
/* no 80c support in host controller? */
@ -525,7 +619,7 @@ static void piix_pata_cbl_detect(struct ata_port *ap)
goto cbl40;
/* check BIOS cable detect results */
mask = ap->hard_port_no == 0 ? PIIX_80C_PRI : PIIX_80C_SEC;
mask = ap->port_no == 0 ? PIIX_80C_PRI : PIIX_80C_SEC;
pci_read_config_byte(pdev, PIIX_IOCFG, &tmp);
if ((tmp & mask) == 0)
goto cbl40;
@ -535,30 +629,26 @@ static void piix_pata_cbl_detect(struct ata_port *ap)
cbl40:
ap->cbl = ATA_CBL_PATA40;
ap->udma_mask &= ATA_UDMA_MASK_40C;
}
/**
* piix_pata_prereset - prereset for PATA host controller
* @ap: Target port
*
* Prereset including cable detection.
*
* LOCKING:
* None (inherited from caller).
*/
static int piix_pata_prereset(struct ata_port *ap)
{
struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
if (!pci_test_config_bits(pdev, &piix_enable_bits[ap->hard_port_no])) {
if (!pci_test_config_bits(pdev, &piix_enable_bits[ap->port_no])) {
ata_port_printk(ap, KERN_INFO, "port disabled. ignoring.\n");
ap->eh_context.i.action &= ~ATA_EH_RESET_MASK;
return 0;
}
piix_pata_cbl_detect(ap);
ap->cbl = ATA_CBL_PATA40;
return ata_std_prereset(ap);
}
@ -568,6 +658,36 @@ static void piix_pata_error_handler(struct ata_port *ap)
ata_std_postreset);
}
/**
* ich_pata_prereset - prereset for PATA host controller
* @ap: Target port
*
*
* LOCKING:
* None (inherited from caller).
*/
static int ich_pata_prereset(struct ata_port *ap)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
if (!pci_test_config_bits(pdev, &piix_enable_bits[ap->port_no])) {
ata_port_printk(ap, KERN_INFO, "port disabled. ignoring.\n");
ap->eh_context.i.action &= ~ATA_EH_RESET_MASK;
return 0;
}
ich_pata_cbl_detect(ap);
return ata_std_prereset(ap);
}
static void ich_pata_error_handler(struct ata_port *ap)
{
ata_bmdma_drive_eh(ap, ich_pata_prereset, ata_std_softreset, NULL,
ata_std_postreset);
}
/**
* piix_sata_present_mask - determine present mask for SATA host controller
* @ap: Target port
@ -583,10 +703,10 @@ static void piix_pata_error_handler(struct ata_port *ap)
*/
static unsigned int piix_sata_present_mask(struct ata_port *ap)
{
struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
struct piix_host_priv *hpriv = ap->host_set->private_data;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
struct piix_host_priv *hpriv = ap->host->private_data;
const unsigned int *map = hpriv->map;
int base = 2 * ap->hard_port_no;
int base = 2 * ap->port_no;
unsigned int present_mask = 0;
int port, i;
u16 pcs;
@ -663,12 +783,90 @@ static void piix_sata_error_handler(struct ata_port *ap)
static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev)
{
unsigned int pio = adev->pio_mode - XFER_PIO_0;
struct pci_dev *dev = to_pci_dev(ap->host_set->dev);
struct pci_dev *dev = to_pci_dev(ap->host->dev);
unsigned int is_slave = (adev->devno != 0);
unsigned int master_port= ap->hard_port_no ? 0x42 : 0x40;
unsigned int master_port= ap->port_no ? 0x42 : 0x40;
unsigned int slave_port = 0x44;
u16 master_data;
u8 slave_data;
u8 udma_enable;
int control = 0;
/*
* See Intel Document 298600-004 for the timing programing rules
* for ICH controllers.
*/
static const /* ISP RTC */
u8 timings[][2] = { { 0, 0 },
{ 0, 0 },
{ 1, 0 },
{ 2, 1 },
{ 2, 3 }, };
if (pio >= 2)
control |= 1; /* TIME1 enable */
if (ata_pio_need_iordy(adev))
control |= 2; /* IE enable */
/* Intel specifies that the PPE functionality is for disk only */
if (adev->class == ATA_DEV_ATA)
control |= 4; /* PPE enable */
pci_read_config_word(dev, master_port, &master_data);
if (is_slave) {
/* Enable SITRE (seperate slave timing register) */
master_data |= 0x4000;
/* enable PPE1, IE1 and TIME1 as needed */
master_data |= (control << 4);
pci_read_config_byte(dev, slave_port, &slave_data);
slave_data &= (ap->port_no ? 0x0f : 0xf0);
/* Load the timing nibble for this slave */
slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) << (ap->port_no ? 4 : 0);
} else {
/* Master keeps the bits in a different format */
master_data &= 0xccf8;
/* Enable PPE, IE and TIME as appropriate */
master_data |= control;
master_data |=
(timings[pio][0] << 12) |
(timings[pio][1] << 8);
}
pci_write_config_word(dev, master_port, master_data);
if (is_slave)
pci_write_config_byte(dev, slave_port, slave_data);
/* Ensure the UDMA bit is off - it will be turned back on if
UDMA is selected */
if (ap->udma_mask) {
pci_read_config_byte(dev, 0x48, &udma_enable);
udma_enable &= ~(1 << (2 * ap->port_no + adev->devno));
pci_write_config_byte(dev, 0x48, udma_enable);
}
}
/**
* do_pata_set_dmamode - Initialize host controller PATA PIO timings
* @ap: Port whose timings we are configuring
* @adev: Drive in question
* @udma: udma mode, 0 - 6
* @is_ich: set if the chip is an ICH device
*
* Set UDMA mode for device, in host controller PCI config space.
*
* LOCKING:
* None (inherited from caller).
*/
static void do_pata_set_dmamode (struct ata_port *ap, struct ata_device *adev, int isich)
{
struct pci_dev *dev = to_pci_dev(ap->host->dev);
u8 master_port = ap->port_no ? 0x42 : 0x40;
u16 master_data;
u8 speed = adev->dma_mode;
int devid = adev->devno + 2 * ap->port_no;
u8 udma_enable;
static const /* ISP RTC */
u8 timings[][2] = { { 0, 0 },
@ -678,35 +876,99 @@ static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev)
{ 2, 3 }, };
pci_read_config_word(dev, master_port, &master_data);
if (is_slave) {
master_data |= 0x4000;
/* enable PPE, IE and TIME */
master_data |= 0x0070;
pci_read_config_byte(dev, slave_port, &slave_data);
slave_data &= (ap->hard_port_no ? 0x0f : 0xf0);
slave_data |=
(timings[pio][0] << 2) |
(timings[pio][1] << (ap->hard_port_no ? 4 : 0));
pci_read_config_byte(dev, 0x48, &udma_enable);
if (speed >= XFER_UDMA_0) {
unsigned int udma = adev->dma_mode - XFER_UDMA_0;
u16 udma_timing;
u16 ideconf;
int u_clock, u_speed;
/*
* UDMA is handled by a combination of clock switching and
* selection of dividers
*
* Handy rule: Odd modes are UDMATIMx 01, even are 02
* except UDMA0 which is 00
*/
u_speed = min(2 - (udma & 1), udma);
if (udma == 5)
u_clock = 0x1000; /* 100Mhz */
else if (udma > 2)
u_clock = 1; /* 66Mhz */
else
u_clock = 0; /* 33Mhz */
udma_enable |= (1 << devid);
/* Load the CT/RP selection */
pci_read_config_word(dev, 0x4A, &udma_timing);
udma_timing &= ~(3 << (4 * devid));
udma_timing |= u_speed << (4 * devid);
pci_write_config_word(dev, 0x4A, udma_timing);
if (isich) {
/* Select a 33/66/100Mhz clock */
pci_read_config_word(dev, 0x54, &ideconf);
ideconf &= ~(0x1001 << devid);
ideconf |= u_clock << devid;
/* For ICH or later we should set bit 10 for better
performance (WR_PingPong_En) */
pci_write_config_word(dev, 0x54, ideconf);
}
} else {
master_data &= 0xccf8;
/* enable PPE, IE and TIME */
master_data |= 0x0007;
master_data |=
(timings[pio][0] << 12) |
(timings[pio][1] << 8);
/*
* MWDMA is driven by the PIO timings. We must also enable
* IORDY unconditionally along with TIME1. PPE has already
* been set when the PIO timing was set.
*/
unsigned int mwdma = adev->dma_mode - XFER_MW_DMA_0;
unsigned int control;
u8 slave_data;
const unsigned int needed_pio[3] = {
XFER_PIO_0, XFER_PIO_3, XFER_PIO_4
};
int pio = needed_pio[mwdma] - XFER_PIO_0;
control = 3; /* IORDY|TIME1 */
/* If the drive MWDMA is faster than it can do PIO then
we must force PIO into PIO0 */
if (adev->pio_mode < needed_pio[mwdma])
/* Enable DMA timing only */
control |= 8; /* PIO cycles in PIO0 */
if (adev->devno) { /* Slave */
master_data &= 0xFF4F; /* Mask out IORDY|TIME1|DMAONLY */
master_data |= control << 4;
pci_read_config_byte(dev, 0x44, &slave_data);
slave_data &= (0x0F + 0xE1 * ap->port_no);
/* Load the matching timing */
slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) << (ap->port_no ? 4 : 0);
pci_write_config_byte(dev, 0x44, slave_data);
} else { /* Master */
master_data &= 0xCCF4; /* Mask out IORDY|TIME1|DMAONLY
and master timing bits */
master_data |= control;
master_data |=
(timings[pio][0] << 12) |
(timings[pio][1] << 8);
}
udma_enable &= ~(1 << devid);
pci_write_config_word(dev, master_port, master_data);
}
pci_write_config_word(dev, master_port, master_data);
if (is_slave)
pci_write_config_byte(dev, slave_port, slave_data);
/* Don't scribble on 0x48 if the controller does not support UDMA */
if (ap->udma_mask)
pci_write_config_byte(dev, 0x48, udma_enable);
}
/**
* piix_set_dmamode - Initialize host controller PATA PIO timings
* piix_set_dmamode - Initialize host controller PATA DMA timings
* @ap: Port whose timings we are configuring
* @adev: um
* @udma: udma mode, 0 - 6
*
* Set UDMA mode for device, in host controller PCI config space.
* Set MW/UDMA mode for device, in host controller PCI config space.
*
* LOCKING:
* None (inherited from caller).
@ -714,68 +976,23 @@ static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev)
static void piix_set_dmamode (struct ata_port *ap, struct ata_device *adev)
{
unsigned int udma = adev->dma_mode; /* FIXME: MWDMA too */
struct pci_dev *dev = to_pci_dev(ap->host_set->dev);
u8 maslave = ap->hard_port_no ? 0x42 : 0x40;
u8 speed = udma;
unsigned int drive_dn = (ap->hard_port_no ? 2 : 0) + adev->devno;
int a_speed = 3 << (drive_dn * 4);
int u_flag = 1 << drive_dn;
int v_flag = 0x01 << drive_dn;
int w_flag = 0x10 << drive_dn;
int u_speed = 0;
int sitre;
u16 reg4042, reg4a;
u8 reg48, reg54, reg55;
do_pata_set_dmamode(ap, adev, 0);
}
pci_read_config_word(dev, maslave, &reg4042);
DPRINTK("reg4042 = 0x%04x\n", reg4042);
sitre = (reg4042 & 0x4000) ? 1 : 0;
pci_read_config_byte(dev, 0x48, &reg48);
pci_read_config_word(dev, 0x4a, &reg4a);
pci_read_config_byte(dev, 0x54, &reg54);
pci_read_config_byte(dev, 0x55, &reg55);
/**
* ich_set_dmamode - Initialize host controller PATA DMA timings
* @ap: Port whose timings we are configuring
* @adev: um
*
* Set MW/UDMA mode for device, in host controller PCI config space.
*
* LOCKING:
* None (inherited from caller).
*/
switch(speed) {
case XFER_UDMA_4:
case XFER_UDMA_2: u_speed = 2 << (drive_dn * 4); break;
case XFER_UDMA_6:
case XFER_UDMA_5:
case XFER_UDMA_3:
case XFER_UDMA_1: u_speed = 1 << (drive_dn * 4); break;
case XFER_UDMA_0: u_speed = 0 << (drive_dn * 4); break;
case XFER_MW_DMA_2:
case XFER_MW_DMA_1: break;
default:
BUG();
return;
}
if (speed >= XFER_UDMA_0) {
if (!(reg48 & u_flag))
pci_write_config_byte(dev, 0x48, reg48 | u_flag);
if (speed == XFER_UDMA_5) {
pci_write_config_byte(dev, 0x55, (u8) reg55|w_flag);
} else {
pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag);
}
if ((reg4a & a_speed) != u_speed)
pci_write_config_word(dev, 0x4a, (reg4a & ~a_speed) | u_speed);
if (speed > XFER_UDMA_2) {
if (!(reg54 & v_flag))
pci_write_config_byte(dev, 0x54, reg54 | v_flag);
} else
pci_write_config_byte(dev, 0x54, reg54 & ~v_flag);
} else {
if (reg48 & u_flag)
pci_write_config_byte(dev, 0x48, reg48 & ~u_flag);
if (reg4a & a_speed)
pci_write_config_word(dev, 0x4a, reg4a & ~a_speed);
if (reg54 & v_flag)
pci_write_config_byte(dev, 0x54, reg54 & ~v_flag);
if (reg55 & w_flag)
pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag);
}
static void ich_set_dmamode (struct ata_port *ap, struct ata_device *adev)
{
do_pata_set_dmamode(ap, adev, 1);
}
#define AHCI_PCI_BAR 5
@ -867,13 +1084,13 @@ static void __devinit piix_init_pcs(struct pci_dev *pdev,
if (force_pcs == 1) {
dev_printk(KERN_INFO, &pdev->dev,
"force ignoring PCS (0x%x)\n", new_pcs);
pinfo[0].host_flags |= PIIX_FLAG_IGNORE_PCS;
pinfo[1].host_flags |= PIIX_FLAG_IGNORE_PCS;
pinfo[0].flags |= PIIX_FLAG_IGNORE_PCS;
pinfo[1].flags |= PIIX_FLAG_IGNORE_PCS;
} else if (force_pcs == 2) {
dev_printk(KERN_INFO, &pdev->dev,
"force honoring PCS (0x%x)\n", new_pcs);
pinfo[0].host_flags &= ~PIIX_FLAG_IGNORE_PCS;
pinfo[1].host_flags &= ~PIIX_FLAG_IGNORE_PCS;
pinfo[0].flags &= ~PIIX_FLAG_IGNORE_PCS;
pinfo[1].flags &= ~PIIX_FLAG_IGNORE_PCS;
}
}
@ -904,7 +1121,7 @@ static void __devinit piix_init_sata_map(struct pci_dev *pdev,
case IDE:
WARN_ON((i & 1) || map[i + 1] != IDE);
pinfo[i / 2] = piix_port_info[ich5_pata];
pinfo[i / 2] = piix_port_info[ich_pata_100];
pinfo[i / 2].private_data = hpriv;
i++;
printk(" IDE IDE");
@ -913,7 +1130,7 @@ static void __devinit piix_init_sata_map(struct pci_dev *pdev,
default:
printk(" P%d", map[i]);
if (i & 1)
pinfo[i / 2].host_flags |= ATA_FLAG_SLAVE_POSS;
pinfo[i / 2].flags |= ATA_FLAG_SLAVE_POSS;
break;
}
}
@ -948,7 +1165,7 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
struct ata_port_info port_info[2];
struct ata_port_info *ppinfo[2] = { &port_info[0], &port_info[1] };
struct piix_host_priv *hpriv;
unsigned long host_flags;
unsigned long port_flags;
if (!printed_version++)
dev_printk(KERN_DEBUG, &pdev->dev,
@ -967,9 +1184,9 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
port_info[0].private_data = hpriv;
port_info[1].private_data = hpriv;
host_flags = port_info[0].host_flags;
port_flags = port_info[0].flags;
if (host_flags & PIIX_FLAG_AHCI) {
if (port_flags & PIIX_FLAG_AHCI) {
u8 tmp;
pci_read_config_byte(pdev, PIIX_SCC, &tmp);
if (tmp == PIIX_AHCI_DEVICE) {
@ -980,7 +1197,7 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* Initialize SATA map */
if (host_flags & ATA_FLAG_SATA) {
if (port_flags & ATA_FLAG_SATA) {
piix_init_sata_map(pdev, port_info,
piix_map_db_table[ent->driver_data]);
piix_init_pcs(pdev, port_info,
@ -993,7 +1210,7 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
* MSI is disabled (and it is disabled, as we don't use
* message-signalled interrupts currently).
*/
if (host_flags & PIIX_FLAG_CHECKINTR)
if (port_flags & PIIX_FLAG_CHECKINTR)
pci_intx(pdev, 1);
if (piix_check_450nx_errata(pdev)) {
@ -1008,19 +1225,21 @@ static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
return ata_pci_init_one(pdev, ppinfo, 2);
}
static void piix_host_stop(struct ata_host_set *host_set)
static void piix_host_stop(struct ata_host *host)
{
if (host_set->next == NULL)
kfree(host_set->private_data);
ata_host_stop(host_set);
struct piix_host_priv *hpriv = host->private_data;
ata_host_stop(host);
kfree(hpriv);
}
static int __init piix_init(void)
{
int rc;
DPRINTK("pci_module_init\n");
rc = pci_module_init(&piix_pci_driver);
DPRINTK("pci_register_driver\n");
rc = pci_register_driver(&piix_pci_driver);
if (rc)
return rc;
@ -1037,4 +1256,3 @@ static void __exit piix_exit(void)
module_init(piix_init);
module_exit(piix_exit);

File diff suppressed because it is too large Load Diff

View File

@ -38,7 +38,7 @@
#include <scsi/scsi_eh.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_cmnd.h>
#include "scsi_transport_api.h"
#include "../scsi/scsi_transport_api.h"
#include <linux/libata.h>
@ -199,7 +199,7 @@ void ata_scsi_error(struct Scsi_Host *host)
/* synchronize with port task */
ata_port_flush_task(ap);
/* synchronize with host_set lock and sort out timeouts */
/* synchronize with host lock and sort out timeouts */
/* For new EH, all qcs are finished in one of three ways -
* normal completion, error completion, and SCSI timeout.
@ -376,7 +376,7 @@ void ata_port_wait_eh(struct ata_port *ap)
spin_unlock_irqrestore(ap->lock, flags);
/* make sure SCSI EH is complete */
if (scsi_host_in_recovery(ap->host)) {
if (scsi_host_in_recovery(ap->scsi_host)) {
msleep(10);
goto retry;
}
@ -485,7 +485,7 @@ void ata_eng_timeout(struct ata_port *ap)
* other commands are drained.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*/
void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
{
@ -512,14 +512,14 @@ void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
* all commands are drained.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*/
void ata_port_schedule_eh(struct ata_port *ap)
{
WARN_ON(!ap->ops->error_handler);
ap->pflags |= ATA_PFLAG_EH_PENDING;
scsi_schedule_eh(ap->host);
scsi_schedule_eh(ap->scsi_host);
DPRINTK("port EH scheduled\n");
}
@ -531,7 +531,7 @@ void ata_port_schedule_eh(struct ata_port *ap)
* Abort all active qc's of @ap and schedule EH.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*
* RETURNS:
* Number of aborted qc's.
@ -574,7 +574,7 @@ int ata_port_abort(struct ata_port *ap)
* is frozen.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*/
static void __ata_port_freeze(struct ata_port *ap)
{
@ -595,7 +595,7 @@ static void __ata_port_freeze(struct ata_port *ap)
* Abort and freeze @ap.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*
* RETURNS:
* Number of aborted commands.

View File

@ -321,7 +321,7 @@ int ata_scsi_ioctl(struct scsi_device *scsidev, int cmd, void __user *arg)
* current command.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*
* RETURNS:
* Command allocated, or %NULL if none available.
@ -400,7 +400,7 @@ void ata_dump_status(unsigned id, struct ata_taskfile *tf)
/**
* ata_scsi_device_suspend - suspend ATA device associated with sdev
* @sdev: the SCSI device to suspend
* @state: target power management state
* @mesg: target power management message
*
* Request suspend EH action on the ATA device associated with
* @sdev and wait for the operation to complete.
@ -411,7 +411,7 @@ void ata_dump_status(unsigned id, struct ata_taskfile *tf)
* RETURNS:
* 0 on success, -errno otherwise.
*/
int ata_scsi_device_suspend(struct scsi_device *sdev, pm_message_t state)
int ata_scsi_device_suspend(struct scsi_device *sdev, pm_message_t mesg)
{
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct ata_device *dev = ata_scsi_find_dev(ap, sdev);
@ -438,7 +438,7 @@ int ata_scsi_device_suspend(struct scsi_device *sdev, pm_message_t state)
/* request suspend */
action = ATA_EH_SUSPEND;
if (state.event != PM_EVENT_SUSPEND)
if (mesg.event != PM_EVENT_SUSPEND)
action |= ATA_EH_PM_FREEZE;
ap->eh_info.dev_action[dev->devno] |= action;
ap->eh_info.flags |= ATA_EHI_QUIET;
@ -463,7 +463,7 @@ int ata_scsi_device_suspend(struct scsi_device *sdev, pm_message_t state)
spin_unlock_irqrestore(ap->lock, flags);
out:
if (rc == 0)
sdev->sdev_gendev.power.power_state = state;
sdev->sdev_gendev.power.power_state = mesg;
return rc;
}
@ -537,7 +537,7 @@ int ata_scsi_device_resume(struct scsi_device *sdev)
* format sense blocks.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*/
void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, u8 *asc,
u8 *ascq, int verbose)
@ -649,7 +649,7 @@ void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, u8 *asc,
* block. Clear sense key, ASC & ASCQ if there is no error.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*/
void ata_gen_ata_desc_sense(struct ata_queued_cmd *qc)
{
@ -918,7 +918,7 @@ int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth)
* [See SAT revision 5 at www.t10.org]
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*
* RETURNS:
* Zero on success, non-zero on error.
@ -986,7 +986,7 @@ invalid_fld:
* FLUSH CACHE EXT.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*
* RETURNS:
* Zero on success, non-zero on error.
@ -1109,7 +1109,7 @@ static void scsi_16_lba_len(const u8 *scsicmd, u64 *plba, u32 *plen)
* Converts SCSI VERIFY command to an ATA READ VERIFY command.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*
* RETURNS:
* Zero on success, non-zero on error.
@ -1233,7 +1233,7 @@ nothing_to_do:
* %WRITE_16 are currently supported.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*
* RETURNS:
* Zero on success, non-zero on error.
@ -1467,7 +1467,7 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
* issued to @dev.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*
* RETURNS:
* 1 if deferring is needed, 0 otherwise.
@ -1510,7 +1510,7 @@ static int ata_scmd_need_defer(struct ata_device *dev, int is_io)
* termination.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*
* RETURNS:
* 0 on success, SCSI_ML_QUEUE_DEVICE_BUSY if the command
@ -1589,7 +1589,7 @@ defer:
* Maps buffer contained within SCSI command @cmd.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*
* RETURNS:
* Length of response buffer.
@ -1623,7 +1623,7 @@ static unsigned int ata_scsi_rbuf_get(struct scsi_cmnd *cmd, u8 **buf_out)
* Unmaps response buffer contained within @cmd.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*/
static inline void ata_scsi_rbuf_put(struct scsi_cmnd *cmd, u8 *buf)
@ -1649,7 +1649,7 @@ static inline void ata_scsi_rbuf_put(struct scsi_cmnd *cmd, u8 *buf)
* and sense buffer are assumed to be set).
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*/
void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
@ -1680,7 +1680,7 @@ void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
* with non-VPD INQUIRY command output.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*/
unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf,
@ -1736,7 +1736,7 @@ unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf,
* Returns list of inquiry VPD pages available.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*/
unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf,
@ -1764,7 +1764,7 @@ unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf,
* Returns ATA device serial number.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*/
unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf,
@ -1797,7 +1797,7 @@ unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf,
* name ("ATA "), model and serial numbers.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*/
unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf,
@ -1849,7 +1849,7 @@ unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf,
* that the caller should successfully complete this SCSI command.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*/
unsigned int ata_scsiop_noop(struct ata_scsi_args *args, u8 *rbuf,
@ -1990,7 +1990,7 @@ static int ata_dev_supports_fua(u16 *id)
* descriptor for other device types.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*/
unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf,
@ -2129,7 +2129,7 @@ saving_not_supp:
* Simulate READ CAPACITY commands.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*/
unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf,
@ -2204,7 +2204,7 @@ unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf,
* Simulate REPORT LUNS command.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*/
unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf,
@ -2256,7 +2256,7 @@ void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
* and the specified additional sense codes.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*/
void ata_scsi_badcmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), u8 asc, u8 ascq)
@ -2421,7 +2421,7 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc)
* @scsicmd: SCSI CDB associated with this PACKET command
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*
* RETURNS:
* Zero on success, non-zero on failure.
@ -2500,7 +2500,7 @@ static struct ata_device * __ata_scsi_find_dev(struct ata_port *ap,
* Determine if commands should be sent to the specified device.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*
* RETURNS:
* 0 if commands are not allowed / 1 if commands are allowed
@ -2534,7 +2534,7 @@ static int ata_scsi_dev_enabled(struct ata_device *dev)
* SCSI command to be sent.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*
* RETURNS:
* Associated ATA device, or %NULL if not found.
@ -2808,7 +2808,7 @@ static inline int __ata_scsi_queuecmd(struct scsi_cmnd *cmd,
* ATA and ATAPI devices appearing as SCSI devices.
*
* LOCKING:
* Releases scsi-layer-held lock, and obtains host_set lock.
* Releases scsi-layer-held lock, and obtains host lock.
*
* RETURNS:
* Return value from __ata_scsi_queuecmd() if @cmd can be queued,
@ -2852,7 +2852,7 @@ int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
* that can be handled internally.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*/
void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd,
@ -2944,7 +2944,7 @@ void ata_scsi_scan_host(struct ata_port *ap)
if (!ata_dev_enabled(dev) || dev->sdev)
continue;
sdev = __scsi_add_device(ap->host, 0, i, 0, NULL);
sdev = __scsi_add_device(ap->scsi_host, 0, i, 0, NULL);
if (!IS_ERR(sdev)) {
dev->sdev = sdev;
scsi_device_put(sdev);
@ -2958,11 +2958,11 @@ void ata_scsi_scan_host(struct ata_port *ap)
*
* This function is called from ata_eh_hotplug() and responsible
* for taking the SCSI device attached to @dev offline. This
* function is called with host_set lock which protects dev->sdev
* function is called with host lock which protects dev->sdev
* against clearing.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*
* RETURNS:
* 1 if attached SCSI device exists, 0 otherwise.
@ -2998,16 +2998,16 @@ static void ata_scsi_remove_dev(struct ata_device *dev)
* be removed if there is __scsi_device_get() interface which
* increments reference counts regardless of device state.
*/
mutex_lock(&ap->host->scan_mutex);
mutex_lock(&ap->scsi_host->scan_mutex);
spin_lock_irqsave(ap->lock, flags);
/* clearing dev->sdev is protected by host_set lock */
/* clearing dev->sdev is protected by host lock */
sdev = dev->sdev;
dev->sdev = NULL;
if (sdev) {
/* If user initiated unplug races with us, sdev can go
* away underneath us after the host_set lock and
* away underneath us after the host lock and
* scan_mutex are released. Hold onto it.
*/
if (scsi_device_get(sdev) == 0) {
@ -3024,7 +3024,7 @@ static void ata_scsi_remove_dev(struct ata_device *dev)
}
spin_unlock_irqrestore(ap->lock, flags);
mutex_unlock(&ap->host->scan_mutex);
mutex_unlock(&ap->scsi_host->scan_mutex);
if (sdev) {
ata_dev_printk(dev, KERN_INFO, "detaching (SCSI %s)\n",
@ -3171,3 +3171,152 @@ void ata_scsi_dev_rescan(void *data)
scsi_rescan_device(&(dev->sdev->sdev_gendev));
}
}
/**
* ata_sas_port_alloc - Allocate port for a SAS attached SATA device
* @pdev: PCI device that the scsi device is attached to
* @port_info: Information from low-level host driver
* @shost: SCSI host that the scsi device is attached to
*
* LOCKING:
* PCI/etc. bus probe sem.
*
* RETURNS:
* ata_port pointer on success / NULL on failure.
*/
struct ata_port *ata_sas_port_alloc(struct ata_host *host,
struct ata_port_info *port_info,
struct Scsi_Host *shost)
{
struct ata_port *ap = kzalloc(sizeof(*ap), GFP_KERNEL);
struct ata_probe_ent *ent;
if (!ap)
return NULL;
ent = ata_probe_ent_alloc(host->dev, port_info);
if (!ent) {
kfree(ap);
return NULL;
}
ata_port_init(ap, host, ent, 0);
ap->lock = shost->host_lock;
kfree(ent);
return ap;
}
EXPORT_SYMBOL_GPL(ata_sas_port_alloc);
/**
* ata_sas_port_start - Set port up for dma.
* @ap: Port to initialize
*
* Called just after data structures for each port are
* initialized. Allocates DMA pad.
*
* May be used as the port_start() entry in ata_port_operations.
*
* LOCKING:
* Inherited from caller.
*/
int ata_sas_port_start(struct ata_port *ap)
{
return ata_pad_alloc(ap, ap->dev);
}
EXPORT_SYMBOL_GPL(ata_sas_port_start);
/**
* ata_port_stop - Undo ata_sas_port_start()
* @ap: Port to shut down
*
* Frees the DMA pad.
*
* May be used as the port_stop() entry in ata_port_operations.
*
* LOCKING:
* Inherited from caller.
*/
void ata_sas_port_stop(struct ata_port *ap)
{
ata_pad_free(ap, ap->dev);
}
EXPORT_SYMBOL_GPL(ata_sas_port_stop);
/**
* ata_sas_port_init - Initialize a SATA device
* @ap: SATA port to initialize
*
* LOCKING:
* PCI/etc. bus probe sem.
*
* RETURNS:
* Zero on success, non-zero on error.
*/
int ata_sas_port_init(struct ata_port *ap)
{
int rc = ap->ops->port_start(ap);
if (!rc)
rc = ata_bus_probe(ap);
return rc;
}
EXPORT_SYMBOL_GPL(ata_sas_port_init);
/**
* ata_sas_port_destroy - Destroy a SATA port allocated by ata_sas_port_alloc
* @ap: SATA port to destroy
*
*/
void ata_sas_port_destroy(struct ata_port *ap)
{
ap->ops->port_stop(ap);
kfree(ap);
}
EXPORT_SYMBOL_GPL(ata_sas_port_destroy);
/**
* ata_sas_slave_configure - Default slave_config routine for libata devices
* @sdev: SCSI device to configure
* @ap: ATA port to which SCSI device is attached
*
* RETURNS:
* Zero.
*/
int ata_sas_slave_configure(struct scsi_device *sdev, struct ata_port *ap)
{
ata_scsi_sdev_config(sdev);
ata_scsi_dev_config(sdev, ap->device);
return 0;
}
EXPORT_SYMBOL_GPL(ata_sas_slave_configure);
/**
* ata_sas_queuecmd - Issue SCSI cdb to libata-managed device
* @cmd: SCSI command to be sent
* @done: Completion function, called when command is complete
* @ap: ATA port to which the command is being sent
*
* RETURNS:
* Zero.
*/
int ata_sas_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *),
struct ata_port *ap)
{
ata_scsi_dump_cdb(ap, cmd);
if (likely(ata_scsi_dev_enabled(ap->device)))
__ata_scsi_queuecmd(cmd, done, ap->device);
else {
cmd->result = (DID_BAD_TARGET << 16);
done(cmd);
}
return 0;
}
EXPORT_SYMBOL_GPL(ata_sas_queuecmd);

View File

@ -193,7 +193,7 @@ void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
* synchronization with interrupt handler / other threads.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*/
static void ata_exec_command_pio(struct ata_port *ap, const struct ata_taskfile *tf)
@ -216,7 +216,7 @@ static void ata_exec_command_pio(struct ata_port *ap, const struct ata_taskfile
* FIXME: missing write posting for 400nS delay enforcement
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*/
static void ata_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
@ -237,7 +237,7 @@ static void ata_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile
* synchronization with interrupt handler / other threads.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*/
void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
{
@ -422,7 +422,7 @@ u8 ata_altstatus(struct ata_port *ap)
* @qc: Info associated with this ATA transaction.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*/
static void ata_bmdma_setup_mmio (struct ata_queued_cmd *qc)
@ -452,7 +452,7 @@ static void ata_bmdma_setup_mmio (struct ata_queued_cmd *qc)
* @qc: Info associated with this ATA transaction.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*/
static void ata_bmdma_start_mmio (struct ata_queued_cmd *qc)
@ -483,7 +483,7 @@ static void ata_bmdma_start_mmio (struct ata_queued_cmd *qc)
* @qc: Info associated with this ATA transaction.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*/
static void ata_bmdma_setup_pio (struct ata_queued_cmd *qc)
@ -511,7 +511,7 @@ static void ata_bmdma_setup_pio (struct ata_queued_cmd *qc)
* @qc: Info associated with this ATA transaction.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*/
static void ata_bmdma_start_pio (struct ata_queued_cmd *qc)
@ -535,7 +535,7 @@ static void ata_bmdma_start_pio (struct ata_queued_cmd *qc)
* May be used as the bmdma_start() entry in ata_port_operations.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*/
void ata_bmdma_start(struct ata_queued_cmd *qc)
{
@ -557,7 +557,7 @@ void ata_bmdma_start(struct ata_queued_cmd *qc)
* May be used as the bmdma_setup() entry in ata_port_operations.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*/
void ata_bmdma_setup(struct ata_queued_cmd *qc)
{
@ -577,7 +577,7 @@ void ata_bmdma_setup(struct ata_queued_cmd *qc)
* May be used as the irq_clear() entry in ata_port_operations.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*/
void ata_bmdma_irq_clear(struct ata_port *ap)
@ -605,7 +605,7 @@ void ata_bmdma_irq_clear(struct ata_port *ap)
* May be used as the bmdma_status() entry in ata_port_operations.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*/
u8 ata_bmdma_status(struct ata_port *ap)
@ -629,7 +629,7 @@ u8 ata_bmdma_status(struct ata_port *ap)
* May be used as the bmdma_stop() entry in ata_port_operations.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*/
void ata_bmdma_stop(struct ata_queued_cmd *qc)
@ -797,32 +797,6 @@ void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc)
}
#ifdef CONFIG_PCI
static struct ata_probe_ent *
ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
{
struct ata_probe_ent *probe_ent;
probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
if (!probe_ent) {
printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
kobject_name(&(dev->kobj)));
return NULL;
}
INIT_LIST_HEAD(&probe_ent->node);
probe_ent->dev = dev;
probe_ent->sht = port->sht;
probe_ent->host_flags = port->host_flags;
probe_ent->pio_mask = port->pio_mask;
probe_ent->mwdma_mask = port->mwdma_mask;
probe_ent->udma_mask = port->udma_mask;
probe_ent->port_ops = port->port_ops;
return probe_ent;
}
/**
* ata_pci_init_native_mode - Initialize native-mode driver
* @pdev: pci device to be initialized
@ -864,7 +838,7 @@ ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int
bmdma = pci_resource_start(pdev, 4);
if (bmdma) {
if (inb(bmdma + 2) & 0x80)
probe_ent->host_set_flags |= ATA_HOST_SIMPLEX;
probe_ent->_host_flags |= ATA_HOST_SIMPLEX;
probe_ent->port[p].bmdma_addr = bmdma;
}
ata_std_ports(&probe_ent->port[p]);
@ -880,10 +854,11 @@ ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int
if (bmdma) {
bmdma += 8;
if(inb(bmdma + 2) & 0x80)
probe_ent->host_set_flags |= ATA_HOST_SIMPLEX;
probe_ent->_host_flags |= ATA_HOST_SIMPLEX;
probe_ent->port[p].bmdma_addr = bmdma;
}
ata_std_ports(&probe_ent->port[p]);
probe_ent->pinfo2 = port[1];
p++;
}
@ -893,44 +868,49 @@ ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int
static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev,
struct ata_port_info *port, int port_num)
struct ata_port_info **port, int port_mask)
{
struct ata_probe_ent *probe_ent;
unsigned long bmdma;
unsigned long bmdma = pci_resource_start(pdev, 4);
probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port);
probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
if (!probe_ent)
return NULL;
probe_ent->legacy_mode = 1;
probe_ent->n_ports = 1;
probe_ent->hard_port_no = port_num;
probe_ent->private_data = port->private_data;
probe_ent->n_ports = 2;
probe_ent->private_data = port[0]->private_data;
switch(port_num)
{
case 0:
probe_ent->irq = 14;
probe_ent->port[0].cmd_addr = 0x1f0;
probe_ent->port[0].altstatus_addr =
probe_ent->port[0].ctl_addr = 0x3f6;
break;
case 1:
if (port_mask & ATA_PORT_PRIMARY) {
probe_ent->irq = 14;
probe_ent->port[0].cmd_addr = ATA_PRIMARY_CMD;
probe_ent->port[0].altstatus_addr =
probe_ent->port[0].ctl_addr = ATA_PRIMARY_CTL;
if (bmdma) {
probe_ent->port[0].bmdma_addr = bmdma;
if (inb(bmdma + 2) & 0x80)
probe_ent->_host_flags |= ATA_HOST_SIMPLEX;
}
ata_std_ports(&probe_ent->port[0]);
} else
probe_ent->dummy_port_mask |= ATA_PORT_PRIMARY;
if (port_mask & ATA_PORT_SECONDARY) {
if (probe_ent->irq)
probe_ent->irq2 = 15;
else
probe_ent->irq = 15;
probe_ent->port[0].cmd_addr = 0x170;
probe_ent->port[0].altstatus_addr =
probe_ent->port[0].ctl_addr = 0x376;
break;
}
bmdma = pci_resource_start(pdev, 4);
if (bmdma != 0) {
bmdma += 8 * port_num;
probe_ent->port[0].bmdma_addr = bmdma;
if (inb(bmdma + 2) & 0x80)
probe_ent->host_set_flags |= ATA_HOST_SIMPLEX;
}
ata_std_ports(&probe_ent->port[0]);
probe_ent->port[1].cmd_addr = ATA_SECONDARY_CMD;
probe_ent->port[1].altstatus_addr =
probe_ent->port[1].ctl_addr = ATA_SECONDARY_CTL;
if (bmdma) {
probe_ent->port[1].bmdma_addr = bmdma + 8;
if (inb(bmdma + 10) & 0x80)
probe_ent->_host_flags |= ATA_HOST_SIMPLEX;
}
ata_std_ports(&probe_ent->port[1]);
probe_ent->pinfo2 = port[1];
} else
probe_ent->dummy_port_mask |= ATA_PORT_SECONDARY;
return probe_ent;
}
@ -950,6 +930,10 @@ static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev,
* regions, sets the dma mask, enables bus master mode, and calls
* ata_device_add()
*
* ASSUMPTION:
* Nobody makes a single channel controller that appears solely as
* the secondary legacy port on PCI.
*
* LOCKING:
* Inherited from PCI layer (may sleep).
*
@ -960,7 +944,7 @@ static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev,
int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
unsigned int n_ports)
{
struct ata_probe_ent *probe_ent = NULL, *probe_ent2 = NULL;
struct ata_probe_ent *probe_ent = NULL;
struct ata_port_info *port[2];
u8 tmp8, mask;
unsigned int legacy_mode = 0;
@ -975,7 +959,7 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
else
port[1] = port[0];
if ((port[0]->host_flags & ATA_FLAG_NO_LEGACY) == 0
if ((port[0]->flags & ATA_FLAG_NO_LEGACY) == 0
&& (pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
/* TODO: What if one channel is in native mode ... */
pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
@ -1009,35 +993,44 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
goto err_out;
}
/* FIXME: Should use platform specific mappers for legacy port ranges */
if (legacy_mode) {
if (!request_region(0x1f0, 8, "libata")) {
if (!request_region(ATA_PRIMARY_CMD, 8, "libata")) {
struct resource *conflict, res;
res.start = 0x1f0;
res.end = 0x1f0 + 8 - 1;
res.start = ATA_PRIMARY_CMD;
res.end = ATA_PRIMARY_CMD + 8 - 1;
conflict = ____request_resource(&ioport_resource, &res);
while (conflict->child)
conflict = ____request_resource(conflict, &res);
if (!strcmp(conflict->name, "libata"))
legacy_mode |= (1 << 0);
legacy_mode |= ATA_PORT_PRIMARY;
else {
disable_dev_on_err = 0;
printk(KERN_WARNING "ata: 0x1f0 IDE port busy\n");
printk(KERN_WARNING "ata: 0x%0X IDE port busy\n" \
"ata: conflict with %s\n",
ATA_PRIMARY_CMD,
conflict->name);
}
} else
legacy_mode |= (1 << 0);
legacy_mode |= ATA_PORT_PRIMARY;
if (!request_region(0x170, 8, "libata")) {
if (!request_region(ATA_SECONDARY_CMD, 8, "libata")) {
struct resource *conflict, res;
res.start = 0x170;
res.end = 0x170 + 8 - 1;
res.start = ATA_SECONDARY_CMD;
res.end = ATA_SECONDARY_CMD + 8 - 1;
conflict = ____request_resource(&ioport_resource, &res);
while (conflict->child)
conflict = ____request_resource(conflict, &res);
if (!strcmp(conflict->name, "libata"))
legacy_mode |= (1 << 1);
legacy_mode |= ATA_PORT_SECONDARY;
else {
disable_dev_on_err = 0;
printk(KERN_WARNING "ata: 0x170 IDE port busy\n");
printk(KERN_WARNING "ata: 0x%X IDE port busy\n" \
"ata: conflict with %s\n",
ATA_SECONDARY_CMD,
conflict->name);
}
} else
legacy_mode |= (1 << 1);
legacy_mode |= ATA_PORT_SECONDARY;
}
/* we have legacy mode, but all ports are unavailable */
@ -1055,17 +1048,14 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
goto err_out_regions;
if (legacy_mode) {
if (legacy_mode & (1 << 0))
probe_ent = ata_pci_init_legacy_port(pdev, port[0], 0);
if (legacy_mode & (1 << 1))
probe_ent2 = ata_pci_init_legacy_port(pdev, port[1], 1);
probe_ent = ata_pci_init_legacy_port(pdev, port, legacy_mode);
} else {
if (n_ports == 2)
probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
else
probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY);
}
if (!probe_ent && !probe_ent2) {
if (!probe_ent) {
rc = -ENOMEM;
goto err_out_regions;
}
@ -1073,35 +1063,17 @@ int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
pci_set_master(pdev);
/* FIXME: check ata_device_add return */
if (legacy_mode) {
struct device *dev = &pdev->dev;
struct ata_host_set *host_set = NULL;
if (legacy_mode & (1 << 0)) {
ata_device_add(probe_ent);
host_set = dev_get_drvdata(dev);
}
if (legacy_mode & (1 << 1)) {
ata_device_add(probe_ent2);
if (host_set) {
host_set->next = dev_get_drvdata(dev);
dev_set_drvdata(dev, host_set);
}
}
} else
ata_device_add(probe_ent);
ata_device_add(probe_ent);
kfree(probe_ent);
kfree(probe_ent2);
return 0;
err_out_regions:
if (legacy_mode & (1 << 0))
release_region(0x1f0, 8);
if (legacy_mode & (1 << 1))
release_region(0x170, 8);
if (legacy_mode & ATA_PORT_PRIMARY)
release_region(ATA_PRIMARY_CMD, 8);
if (legacy_mode & ATA_PORT_SECONDARY)
release_region(ATA_SECONDARY_CMD, 8);
pci_release_regions(pdev);
err_out:
if (disable_dev_on_err)

View File

@ -69,6 +69,10 @@ extern int ata_flush_cache(struct ata_device *dev);
extern void ata_dev_init(struct ata_device *dev);
extern int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg);
extern int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg);
extern void ata_port_init(struct ata_port *ap, struct ata_host *host,
const struct ata_probe_ent *ent, unsigned int port_no);
extern struct ata_probe_ent *ata_probe_ent_alloc(struct device *dev,
const struct ata_port_info *port);
/* libata-scsi.c */
@ -107,6 +111,7 @@ extern void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
u8 *rbuf, unsigned int buflen));
extern void ata_schedule_scsi_eh(struct Scsi_Host *shost);
extern void ata_scsi_dev_rescan(void *data);
extern int ata_bus_probe(struct ata_port *ap);
/* libata-eh.c */
extern enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd);

679
drivers/ata/pata_ali.c Normal file
View File

@ -0,0 +1,679 @@
/*
* pata_ali.c - ALI 15x3 PATA for new ATA layer
* (C) 2005 Red Hat Inc
* Alan Cox <alan@redhat.com>
*
* based in part upon
* linux/drivers/ide/pci/alim15x3.c Version 0.17 2003/01/02
*
* Copyright (C) 1998-2000 Michel Aubry, Maintainer
* Copyright (C) 1998-2000 Andrzej Krzysztofowicz, Maintainer
* Copyright (C) 1999-2000 CJ, cjtsai@ali.com.tw, Maintainer
*
* Copyright (C) 1998-2000 Andre Hedrick (andre@linux-ide.org)
* May be copied or modified under the terms of the GNU General Public License
* Copyright (C) 2002 Alan Cox <alan@redhat.com>
* ALi (now ULi M5228) support by Clear Zhang <Clear.Zhang@ali.com.tw>
*
* Documentation
* Chipset documentation available under NDA only
*
* TODO/CHECK
* Cannot have ATAPI on both master & slave for rev < c2 (???) but
* otherwise should do atapi DMA.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#include <linux/dmi.h>
#define DRV_NAME "pata_ali"
#define DRV_VERSION "0.6.5"
/*
* Cable special cases
*/
static struct dmi_system_id cable_dmi_table[] = {
{
.ident = "HP Pavilion N5430",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_BOARD_NAME, "OmniBook N32N-736"),
},
},
{ }
};
static int ali_cable_override(struct pci_dev *pdev)
{
/* Fujitsu P2000 */
if (pdev->subsystem_vendor == 0x10CF && pdev->subsystem_device == 0x10AF)
return 1;
/* Systems by DMI */
if (dmi_check_system(cable_dmi_table))
return 1;
return 0;
}
/**
* ali_c2_cable_detect - cable detection
* @ap: ATA port
*
* Perform cable detection for C2 and later revisions
*/
static int ali_c2_cable_detect(struct ata_port *ap)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u8 ata66;
/* Certain laptops use short but suitable cables and don't
implement the detect logic */
if (ali_cable_override(pdev))
return ATA_CBL_PATA80;
/* Host view cable detect 0x4A bit 0 primary bit 1 secondary
Bit set for 40 pin */
pci_read_config_byte(pdev, 0x4A, &ata66);
if (ata66 & (1 << ap->port_no))
return ATA_CBL_PATA40;
else
return ATA_CBL_PATA80;
}
/**
* ali_early_error_handler - reset for eary chip
* @ap: ATA port
*
* Handle the reset callback for the later chips with cable detect
*/
static int ali_c2_pre_reset(struct ata_port *ap)
{
ap->cbl = ali_c2_cable_detect(ap);
return ata_std_prereset(ap);
}
static void ali_c2_error_handler(struct ata_port *ap)
{
ata_bmdma_drive_eh(ap, ali_c2_pre_reset,
ata_std_softreset, NULL,
ata_std_postreset);
}
/**
* ali_early_cable_detect - cable detection
* @ap: ATA port
*
* Perform cable detection for older chipsets. This turns out to be
* rather easy to implement
*/
static int ali_early_cable_detect(struct ata_port *ap)
{
return ATA_CBL_PATA40;
}
/**
* ali_early_probe_init - reset for early chip
* @ap: ATA port
*
* Handle the reset callback for the early (pre cable detect) chips.
*/
static int ali_early_pre_reset(struct ata_port *ap)
{
ap->cbl = ali_early_cable_detect(ap);
return ata_std_prereset(ap);
}
static void ali_early_error_handler(struct ata_port *ap)
{
return ata_bmdma_drive_eh(ap, ali_early_pre_reset,
ata_std_softreset, NULL,
ata_std_postreset);
}
/**
* ali_20_filter - filter for earlier ALI DMA
* @ap: ALi ATA port
* @adev: attached device
*
* Ensure that we do not do DMA on CD devices. We may be able to
* fix that later on. Also ensure we do not do UDMA on WDC drives
*/
static unsigned long ali_20_filter(const struct ata_port *ap, struct ata_device *adev, unsigned long mask)
{
char model_num[40];
/* No DMA on anything but a disk for now */
if (adev->class != ATA_DEV_ATA)
mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
ata_id_string(adev->id, model_num, ATA_ID_PROD_OFS, sizeof(model_num));
if (strstr(model_num, "WDC"))
return mask &= ~ATA_MASK_UDMA;
return ata_pci_default_filter(ap, adev, mask);
}
/**
* ali_fifo_control - FIFO manager
* @ap: ALi channel to control
* @adev: device for FIFO control
* @on: 0 for off 1 for on
*
* Enable or disable the FIFO on a given device. Because of the way the
* ALi FIFO works it provides a boost on ATA disk but can be confused by
* ATAPI and we must therefore manage it.
*/
static void ali_fifo_control(struct ata_port *ap, struct ata_device *adev, int on)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int pio_fifo = 0x54 + ap->port_no;
u8 fifo;
int shift = 4 * adev->devno;
/* ATA - FIFO on set nibble to 0x05, ATAPI - FIFO off, set nibble to
0x00. Not all the docs agree but the behaviour we now use is the
one stated in the BIOS Programming Guide */
pci_read_config_byte(pdev, pio_fifo, &fifo);
fifo &= ~(0x0F << shift);
if (on)
fifo |= (on << shift);
pci_write_config_byte(pdev, pio_fifo, fifo);
}
/**
* ali_program_modes - load mode registers
* @ap: ALi channel to load
* @adev: Device the timing is for
* @cmd: Command timing
* @data: Data timing
* @ultra: UDMA timing or zero for off
*
* Loads the timing registers for cmd/data and disable UDMA if
* ultra is zero. If ultra is set then load and enable the UDMA
* timing but do not touch the command/data timing.
*/
static void ali_program_modes(struct ata_port *ap, struct ata_device *adev, struct ata_timing *t, u8 ultra)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int cas = 0x58 + 4 * ap->port_no; /* Command timing */
int cbt = 0x59 + 4 * ap->port_no; /* Command timing */
int drwt = 0x5A + 4 * ap->port_no + adev->devno; /* R/W timing */
int udmat = 0x56 + ap->port_no; /* UDMA timing */
int shift = 4 * adev->devno;
u8 udma;
if (t != NULL) {
t->setup = FIT(t->setup, 1, 8) & 7;
t->act8b = FIT(t->act8b, 1, 8) & 7;
t->rec8b = FIT(t->rec8b, 1, 16) & 15;
t->active = FIT(t->active, 1, 8) & 7;
t->recover = FIT(t->recover, 1, 16) & 15;
pci_write_config_byte(pdev, cas, t->setup);
pci_write_config_byte(pdev, cbt, (t->act8b << 4) | t->rec8b);
pci_write_config_byte(pdev, drwt, (t->active << 4) | t->recover);
}
/* Set up the UDMA enable */
pci_read_config_byte(pdev, udmat, &udma);
udma &= ~(0x0F << shift);
udma |= ultra << shift;
pci_write_config_byte(pdev, udmat, udma);
}
/**
* ali_set_piomode - set initial PIO mode data
* @ap: ATA interface
* @adev: ATA device
*
* Program the ALi registers for PIO mode. FIXME: add timings for
* PIO5.
*/
static void ali_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
struct ata_device *pair = ata_dev_pair(adev);
struct ata_timing t;
unsigned long T = 1000000000 / 33333; /* PCI clock based */
ata_timing_compute(adev, adev->pio_mode, &t, T, 1);
if (pair) {
struct ata_timing p;
ata_timing_compute(pair, pair->pio_mode, &p, T, 1);
ata_timing_merge(&p, &t, &t, ATA_TIMING_SETUP|ATA_TIMING_8BIT);
if (pair->dma_mode) {
ata_timing_compute(pair, pair->dma_mode, &p, T, 1);
ata_timing_merge(&p, &t, &t, ATA_TIMING_SETUP|ATA_TIMING_8BIT);
}
}
/* PIO FIFO is only permitted on ATA disk */
if (adev->class != ATA_DEV_ATA)
ali_fifo_control(ap, adev, 0x00);
ali_program_modes(ap, adev, &t, 0);
if (adev->class == ATA_DEV_ATA)
ali_fifo_control(ap, adev, 0x05);
}
/**
* ali_set_dmamode - set initial DMA mode data
* @ap: ATA interface
* @adev: ATA device
*
* FIXME: MWDMA timings
*/
static void ali_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
static u8 udma_timing[7] = { 0xC, 0xB, 0xA, 0x9, 0x8, 0xF, 0xD };
struct ata_device *pair = ata_dev_pair(adev);
struct ata_timing t;
unsigned long T = 1000000000 / 33333; /* PCI clock based */
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
if (adev->class == ATA_DEV_ATA)
ali_fifo_control(ap, adev, 0x08);
if (adev->dma_mode >= XFER_UDMA_0) {
ali_program_modes(ap, adev, NULL, udma_timing[adev->dma_mode - XFER_UDMA_0]);
if (adev->dma_mode >= XFER_UDMA_3) {
u8 reg4b;
pci_read_config_byte(pdev, 0x4B, &reg4b);
reg4b |= 1;
pci_write_config_byte(pdev, 0x4B, reg4b);
}
} else {
ata_timing_compute(adev, adev->dma_mode, &t, T, 1);
if (pair) {
struct ata_timing p;
ata_timing_compute(pair, pair->pio_mode, &p, T, 1);
ata_timing_merge(&p, &t, &t, ATA_TIMING_SETUP|ATA_TIMING_8BIT);
if (pair->dma_mode) {
ata_timing_compute(pair, pair->dma_mode, &p, T, 1);
ata_timing_merge(&p, &t, &t, ATA_TIMING_SETUP|ATA_TIMING_8BIT);
}
}
ali_program_modes(ap, adev, &t, 0);
}
}
/**
* ali_lock_sectors - Keep older devices to 255 sector mode
* @ap: ATA port
* @adev: Device
*
* Called during the bus probe for each device that is found. We use
* this call to lock the sector count of the device to 255 or less on
* older ALi controllers. If we didn't do this then large I/O's would
* require LBA48 commands which the older ALi requires are issued by
* slower PIO methods
*/
static void ali_lock_sectors(struct ata_port *ap, struct ata_device *adev)
{
adev->max_sectors = 255;
}
static struct scsi_host_template ali_sht = {
.module = THIS_MODULE,
.name = DRV_NAME,
.ioctl = ata_scsi_ioctl,
.queuecommand = ata_scsi_queuecmd,
.can_queue = ATA_DEF_QUEUE,
.this_id = ATA_SHT_THIS_ID,
.sg_tablesize = LIBATA_MAX_PRD,
/* Keep LBA28 counts so large I/O's don't turn LBA48 and PIO
with older controllers. Not locked so will grow on C5 or later */
.max_sectors = 255,
.cmd_per_lun = ATA_SHT_CMD_PER_LUN,
.emulated = ATA_SHT_EMULATED,
.use_clustering = ATA_SHT_USE_CLUSTERING,
.proc_name = DRV_NAME,
.dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config,
.bios_param = ata_std_bios_param,
};
/*
* Port operations for PIO only ALi
*/
static struct ata_port_operations ali_early_port_ops = {
.port_disable = ata_port_disable,
.set_piomode = ali_set_piomode,
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
.check_status = ata_check_status,
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
.freeze = ata_bmdma_freeze,
.thaw = ata_bmdma_thaw,
.error_handler = ali_early_error_handler,
.post_internal_cmd = ata_bmdma_post_internal_cmd,
.qc_prep = ata_qc_prep,
.qc_issue = ata_qc_issue_prot,
.eng_timeout = ata_eng_timeout,
.data_xfer = ata_pio_data_xfer,
.irq_handler = ata_interrupt,
.irq_clear = ata_bmdma_irq_clear,
.port_start = ata_port_start,
.port_stop = ata_port_stop,
.host_stop = ata_host_stop
};
/*
* Port operations for DMA capable ALi without cable
* detect
*/
static struct ata_port_operations ali_20_port_ops = {
.port_disable = ata_port_disable,
.set_piomode = ali_set_piomode,
.set_dmamode = ali_set_dmamode,
.mode_filter = ali_20_filter,
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
.check_status = ata_check_status,
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
.dev_config = ali_lock_sectors,
.freeze = ata_bmdma_freeze,
.thaw = ata_bmdma_thaw,
.error_handler = ali_early_error_handler,
.post_internal_cmd = ata_bmdma_post_internal_cmd,
.bmdma_setup = ata_bmdma_setup,
.bmdma_start = ata_bmdma_start,
.bmdma_stop = ata_bmdma_stop,
.bmdma_status = ata_bmdma_status,
.qc_prep = ata_qc_prep,
.qc_issue = ata_qc_issue_prot,
.eng_timeout = ata_eng_timeout,
.data_xfer = ata_pio_data_xfer,
.irq_handler = ata_interrupt,
.irq_clear = ata_bmdma_irq_clear,
.port_start = ata_port_start,
.port_stop = ata_port_stop,
.host_stop = ata_host_stop
};
/*
* Port operations for DMA capable ALi with cable detect
*/
static struct ata_port_operations ali_c2_port_ops = {
.port_disable = ata_port_disable,
.set_piomode = ali_set_piomode,
.set_dmamode = ali_set_dmamode,
.mode_filter = ata_pci_default_filter,
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
.check_status = ata_check_status,
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
.dev_config = ali_lock_sectors,
.freeze = ata_bmdma_freeze,
.thaw = ata_bmdma_thaw,
.error_handler = ali_c2_error_handler,
.post_internal_cmd = ata_bmdma_post_internal_cmd,
.bmdma_setup = ata_bmdma_setup,
.bmdma_start = ata_bmdma_start,
.bmdma_stop = ata_bmdma_stop,
.bmdma_status = ata_bmdma_status,
.qc_prep = ata_qc_prep,
.qc_issue = ata_qc_issue_prot,
.eng_timeout = ata_eng_timeout,
.data_xfer = ata_pio_data_xfer,
.irq_handler = ata_interrupt,
.irq_clear = ata_bmdma_irq_clear,
.port_start = ata_port_start,
.port_stop = ata_port_stop,
.host_stop = ata_host_stop
};
/*
* Port operations for DMA capable ALi with cable detect and LBA48
*/
static struct ata_port_operations ali_c5_port_ops = {
.port_disable = ata_port_disable,
.set_piomode = ali_set_piomode,
.set_dmamode = ali_set_dmamode,
.mode_filter = ata_pci_default_filter,
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
.check_status = ata_check_status,
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
.freeze = ata_bmdma_freeze,
.thaw = ata_bmdma_thaw,
.error_handler = ali_c2_error_handler,
.post_internal_cmd = ata_bmdma_post_internal_cmd,
.bmdma_setup = ata_bmdma_setup,
.bmdma_start = ata_bmdma_start,
.bmdma_stop = ata_bmdma_stop,
.bmdma_status = ata_bmdma_status,
.qc_prep = ata_qc_prep,
.qc_issue = ata_qc_issue_prot,
.eng_timeout = ata_eng_timeout,
.data_xfer = ata_pio_data_xfer,
.irq_handler = ata_interrupt,
.irq_clear = ata_bmdma_irq_clear,
.port_start = ata_port_start,
.port_stop = ata_port_stop,
.host_stop = ata_host_stop
};
/**
* ali_init_one - discovery callback
* @pdev: PCI device ID
* @id: PCI table info
*
* An ALi IDE interface has been discovered. Figure out what revision
* and perform configuration work before handing it to the ATA layer
*/
static int ali_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
static struct ata_port_info info_early = {
.sht = &ali_sht,
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
.pio_mask = 0x1f,
.port_ops = &ali_early_port_ops
};
/* Revision 0x20 added DMA */
static struct ata_port_info info_20 = {
.sht = &ali_sht,
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST | ATA_FLAG_PIO_LBA48,
.pio_mask = 0x1f,
.mwdma_mask = 0x07,
.port_ops = &ali_20_port_ops
};
/* Revision 0x20 with support logic added UDMA */
static struct ata_port_info info_20_udma = {
.sht = &ali_sht,
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST | ATA_FLAG_PIO_LBA48,
.pio_mask = 0x1f,
.mwdma_mask = 0x07,
.udma_mask = 0x07, /* UDMA33 */
.port_ops = &ali_20_port_ops
};
/* Revision 0xC2 adds UDMA66 */
static struct ata_port_info info_c2 = {
.sht = &ali_sht,
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST | ATA_FLAG_PIO_LBA48,
.pio_mask = 0x1f,
.mwdma_mask = 0x07,
.udma_mask = 0x1f,
.port_ops = &ali_c2_port_ops
};
/* Revision 0xC3 is UDMA100 */
static struct ata_port_info info_c3 = {
.sht = &ali_sht,
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST | ATA_FLAG_PIO_LBA48,
.pio_mask = 0x1f,
.mwdma_mask = 0x07,
.udma_mask = 0x3f,
.port_ops = &ali_c2_port_ops
};
/* Revision 0xC4 is UDMA133 */
static struct ata_port_info info_c4 = {
.sht = &ali_sht,
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST | ATA_FLAG_PIO_LBA48,
.pio_mask = 0x1f,
.mwdma_mask = 0x07,
.udma_mask = 0x7f,
.port_ops = &ali_c2_port_ops
};
/* Revision 0xC5 is UDMA133 with LBA48 DMA */
static struct ata_port_info info_c5 = {
.sht = &ali_sht,
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
.pio_mask = 0x1f,
.mwdma_mask = 0x07,
.udma_mask = 0x7f,
.port_ops = &ali_c5_port_ops
};
static struct ata_port_info *port_info[2];
u8 rev, tmp;
struct pci_dev *north, *isa_bridge;
pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
/*
* The chipset revision selects the driver operations and
* mode data.
*/
if (rev < 0x20) {
port_info[0] = port_info[1] = &info_early;
} else if (rev < 0xC2) {
/* 1543-E/F, 1543C-C, 1543C-D, 1543C-E */
pci_read_config_byte(pdev, 0x4B, &tmp);
/* Clear CD-ROM DMA write bit */
tmp &= 0x7F;
pci_write_config_byte(pdev, 0x4B, tmp);
port_info[0] = port_info[1] = &info_20;
} else if (rev == 0xC2) {
port_info[0] = port_info[1] = &info_c2;
} else if (rev == 0xC3) {
port_info[0] = port_info[1] = &info_c3;
} else if (rev == 0xC4) {
port_info[0] = port_info[1] = &info_c4;
} else
port_info[0] = port_info[1] = &info_c5;
if (rev >= 0xC2) {
/* Enable cable detection logic */
pci_read_config_byte(pdev, 0x4B, &tmp);
pci_write_config_byte(pdev, 0x4B, tmp | 0x08);
}
north = pci_get_slot(pdev->bus, PCI_DEVFN(0,0));
isa_bridge = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL);
if (north && north->vendor == PCI_VENDOR_ID_AL) {
/* Configure the ALi bridge logic. For non ALi rely on BIOS.
Set the south bridge enable bit */
pci_read_config_byte(isa_bridge, 0x79, &tmp);
if (rev == 0xC2)
pci_write_config_byte(isa_bridge, 0x79, tmp | 0x04);
else if (rev > 0xC2)
pci_write_config_byte(isa_bridge, 0x79, tmp | 0x02);
}
if (rev >= 0x20) {
if (rev < 0xC2) {
/* Are we paired with a UDMA capable chip */
pci_read_config_byte(isa_bridge, 0x5E, &tmp);
if ((tmp & 0x1E) == 0x12)
port_info[0] = port_info[1] = &info_20_udma;
}
/*
* CD_ROM DMA on (0x53 bit 0). Enable this even if we want
* to use PIO. 0x53 bit 1 (rev 20 only) - enable FIFO control
* via 0x54/55.
*/
pci_read_config_byte(pdev, 0x53, &tmp);
if (rev <= 0x20)
tmp &= ~0x02;
if (rev == 0xc7)
tmp |= 0x03;
else
tmp |= 0x01; /* CD_ROM enable for DMA */
pci_write_config_byte(pdev, 0x53, tmp);
}
pci_dev_put(isa_bridge);
pci_dev_put(north);
ata_pci_clear_simplex(pdev);
return ata_pci_init_one(pdev, port_info, 2);
}
static struct pci_device_id ali[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M5228), },
{ PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M5229), },
{ 0, },
};
static struct pci_driver ali_pci_driver = {
.name = DRV_NAME,
.id_table = ali,
.probe = ali_init_one,
.remove = ata_pci_remove_one
};
static int __init ali_init(void)
{
return pci_register_driver(&ali_pci_driver);
}
static void __exit ali_exit(void)
{
pci_unregister_driver(&ali_pci_driver);
}
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for ALi PATA");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, ali);
MODULE_VERSION(DRV_VERSION);
module_init(ali_init);
module_exit(ali_exit);

718
drivers/ata/pata_amd.c Normal file
View File

@ -0,0 +1,718 @@
/*
* pata_amd.c - AMD PATA for new ATA layer
* (C) 2005-2006 Red Hat Inc
* Alan Cox <alan@redhat.com>
*
* Based on pata-sil680. Errata information is taken from data sheets
* and the amd74xx.c driver by Vojtech Pavlik. Nvidia SATA devices are
* claimed by sata-nv.c.
*
* TODO:
* Variable system clock when/if it makes sense
* Power management on ports
*
*
* Documentation publically available.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#define DRV_NAME "pata_amd"
#define DRV_VERSION "0.2.3"
/**
* timing_setup - shared timing computation and load
* @ap: ATA port being set up
* @adev: drive being configured
* @offset: port offset
* @speed: target speed
* @clock: clock multiplier (number of times 33MHz for this part)
*
* Perform the actual timing set up for Nvidia or AMD PATA devices.
* The actual devices vary so they all call into this helper function
* providing the clock multipler and offset (because AMD and Nvidia put
* the ports at different locations).
*/
static void timing_setup(struct ata_port *ap, struct ata_device *adev, int offset, int speed, int clock)
{
static const unsigned char amd_cyc2udma[] = {
6, 6, 5, 4, 0, 1, 1, 2, 2, 3, 3, 3, 3, 3, 3, 7
};
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
struct ata_device *peer = ata_dev_pair(adev);
int dn = ap->port_no * 2 + adev->devno;
struct ata_timing at, apeer;
int T, UT;
const int amd_clock = 33333; /* KHz. */
u8 t;
T = 1000000000 / amd_clock;
UT = T / min_t(int, max_t(int, clock, 1), 2);
if (ata_timing_compute(adev, speed, &at, T, UT) < 0) {
dev_printk(KERN_ERR, &pdev->dev, "unknown mode %d.\n", speed);
return;
}
if (peer) {
/* This may be over conservative */
if (peer->dma_mode) {
ata_timing_compute(peer, peer->dma_mode, &apeer, T, UT);
ata_timing_merge(&apeer, &at, &at, ATA_TIMING_8BIT);
}
ata_timing_compute(peer, peer->pio_mode, &apeer, T, UT);
ata_timing_merge(&apeer, &at, &at, ATA_TIMING_8BIT);
}
if (speed == XFER_UDMA_5 && amd_clock <= 33333) at.udma = 1;
if (speed == XFER_UDMA_6 && amd_clock <= 33333) at.udma = 15;
/*
* Now do the setup work
*/
/* Configure the address set up timing */
pci_read_config_byte(pdev, offset + 0x0C, &t);
t = (t & ~(3 << ((3 - dn) << 1))) | ((FIT(at.setup, 1, 4) - 1) << ((3 - dn) << 1));
pci_write_config_byte(pdev, offset + 0x0C , t);
/* Configure the 8bit I/O timing */
pci_write_config_byte(pdev, offset + 0x0E + (1 - (dn >> 1)),
((FIT(at.act8b, 1, 16) - 1) << 4) | (FIT(at.rec8b, 1, 16) - 1));
/* Drive timing */
pci_write_config_byte(pdev, offset + 0x08 + (3 - dn),
((FIT(at.active, 1, 16) - 1) << 4) | (FIT(at.recover, 1, 16) - 1));
switch (clock) {
case 1:
t = at.udma ? (0xc0 | (FIT(at.udma, 2, 5) - 2)) : 0x03;
break;
case 2:
t = at.udma ? (0xc0 | amd_cyc2udma[FIT(at.udma, 2, 10)]) : 0x03;
break;
case 3:
t = at.udma ? (0xc0 | amd_cyc2udma[FIT(at.udma, 1, 10)]) : 0x03;
break;
case 4:
t = at.udma ? (0xc0 | amd_cyc2udma[FIT(at.udma, 1, 15)]) : 0x03;
break;
default:
return;
}
/* UDMA timing */
pci_write_config_byte(pdev, offset + 0x10 + (3 - dn), t);
}
/**
* amd_probe_init - cable detection
* @ap: ATA port
*
* Perform cable detection. The BIOS stores this in PCI config
* space for us.
*/
static int amd_pre_reset(struct ata_port *ap)
{
static const u32 bitmask[2] = {0x03, 0xC0};
static const struct pci_bits amd_enable_bits[] = {
{ 0x40, 1, 0x02, 0x02 },
{ 0x40, 1, 0x01, 0x01 }
};
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u8 ata66;
if (!pci_test_config_bits(pdev, &amd_enable_bits[ap->port_no])) {
ata_port_disable(ap);
printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id);
return 0;
}
pci_read_config_byte(pdev, 0x42, &ata66);
if (ata66 & bitmask[ap->port_no])
ap->cbl = ATA_CBL_PATA80;
else
ap->cbl = ATA_CBL_PATA40;
return ata_std_prereset(ap);
}
static void amd_error_handler(struct ata_port *ap)
{
return ata_bmdma_drive_eh(ap, amd_pre_reset,
ata_std_softreset, NULL,
ata_std_postreset);
}
static int amd_early_pre_reset(struct ata_port *ap)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
static struct pci_bits amd_enable_bits[] = {
{ 0x40, 1, 0x02, 0x02 },
{ 0x40, 1, 0x01, 0x01 }
};
if (!pci_test_config_bits(pdev, &amd_enable_bits[ap->port_no])) {
ata_port_disable(ap);
printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id);
return 0;
}
/* No host side cable detection */
ap->cbl = ATA_CBL_PATA80;
return ata_std_prereset(ap);
}
static void amd_early_error_handler(struct ata_port *ap)
{
ata_bmdma_drive_eh(ap, amd_early_pre_reset,
ata_std_softreset, NULL,
ata_std_postreset);
}
/**
* amd33_set_piomode - set initial PIO mode data
* @ap: ATA interface
* @adev: ATA device
*
* Program the AMD registers for PIO mode.
*/
static void amd33_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
timing_setup(ap, adev, 0x40, adev->pio_mode, 1);
}
static void amd66_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
timing_setup(ap, adev, 0x40, adev->pio_mode, 2);
}
static void amd100_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
timing_setup(ap, adev, 0x40, adev->pio_mode, 3);
}
static void amd133_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
timing_setup(ap, adev, 0x40, adev->pio_mode, 4);
}
/**
* amd33_set_dmamode - set initial DMA mode data
* @ap: ATA interface
* @adev: ATA device
*
* Program the MWDMA/UDMA modes for the AMD and Nvidia
* chipset.
*/
static void amd33_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
timing_setup(ap, adev, 0x40, adev->dma_mode, 1);
}
static void amd66_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
timing_setup(ap, adev, 0x40, adev->dma_mode, 2);
}
static void amd100_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
timing_setup(ap, adev, 0x40, adev->dma_mode, 3);
}
static void amd133_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
timing_setup(ap, adev, 0x40, adev->dma_mode, 4);
}
/**
* nv_probe_init - cable detection
* @ap: ATA port
*
* Perform cable detection. The BIOS stores this in PCI config
* space for us.
*/
static int nv_pre_reset(struct ata_port *ap) {
static const u8 bitmask[2] = {0x03, 0xC0};
static const struct pci_bits nv_enable_bits[] = {
{ 0x50, 1, 0x02, 0x02 },
{ 0x50, 1, 0x01, 0x01 }
};
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u8 ata66;
u16 udma;
if (!pci_test_config_bits(pdev, &nv_enable_bits[ap->port_no])) {
ata_port_disable(ap);
printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id);
return 0;
}
pci_read_config_byte(pdev, 0x52, &ata66);
if (ata66 & bitmask[ap->port_no])
ap->cbl = ATA_CBL_PATA80;
else
ap->cbl = ATA_CBL_PATA40;
/* We now have to double check because the Nvidia boxes BIOS
doesn't always set the cable bits but does set mode bits */
pci_read_config_word(pdev, 0x62 - 2 * ap->port_no, &udma);
if ((udma & 0xC4) == 0xC4 || (udma & 0xC400) == 0xC400)
ap->cbl = ATA_CBL_PATA80;
return ata_std_prereset(ap);
}
static void nv_error_handler(struct ata_port *ap)
{
ata_bmdma_drive_eh(ap, nv_pre_reset,
ata_std_softreset, NULL,
ata_std_postreset);
}
/**
* nv100_set_piomode - set initial PIO mode data
* @ap: ATA interface
* @adev: ATA device
*
* Program the AMD registers for PIO mode.
*/
static void nv100_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
timing_setup(ap, adev, 0x50, adev->pio_mode, 3);
}
static void nv133_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
timing_setup(ap, adev, 0x50, adev->pio_mode, 4);
}
/**
* nv100_set_dmamode - set initial DMA mode data
* @ap: ATA interface
* @adev: ATA device
*
* Program the MWDMA/UDMA modes for the AMD and Nvidia
* chipset.
*/
static void nv100_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
timing_setup(ap, adev, 0x50, adev->dma_mode, 3);
}
static void nv133_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
timing_setup(ap, adev, 0x50, adev->dma_mode, 4);
}
static struct scsi_host_template amd_sht = {
.module = THIS_MODULE,
.name = DRV_NAME,
.ioctl = ata_scsi_ioctl,
.queuecommand = ata_scsi_queuecmd,
.can_queue = ATA_DEF_QUEUE,
.this_id = ATA_SHT_THIS_ID,
.sg_tablesize = LIBATA_MAX_PRD,
.max_sectors = ATA_MAX_SECTORS,
.cmd_per_lun = ATA_SHT_CMD_PER_LUN,
.emulated = ATA_SHT_EMULATED,
.use_clustering = ATA_SHT_USE_CLUSTERING,
.proc_name = DRV_NAME,
.dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config,
.bios_param = ata_std_bios_param,
};
static struct ata_port_operations amd33_port_ops = {
.port_disable = ata_port_disable,
.set_piomode = amd33_set_piomode,
.set_dmamode = amd33_set_dmamode,
.mode_filter = ata_pci_default_filter,
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
.check_status = ata_check_status,
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
.freeze = ata_bmdma_freeze,
.thaw = ata_bmdma_thaw,
.error_handler = amd_early_error_handler,
.post_internal_cmd = ata_bmdma_post_internal_cmd,
.bmdma_setup = ata_bmdma_setup,
.bmdma_start = ata_bmdma_start,
.bmdma_stop = ata_bmdma_stop,
.bmdma_status = ata_bmdma_status,
.qc_prep = ata_qc_prep,
.qc_issue = ata_qc_issue_prot,
.eng_timeout = ata_eng_timeout,
.data_xfer = ata_pio_data_xfer,
.irq_handler = ata_interrupt,
.irq_clear = ata_bmdma_irq_clear,
.port_start = ata_port_start,
.port_stop = ata_port_stop,
.host_stop = ata_host_stop
};
static struct ata_port_operations amd66_port_ops = {
.port_disable = ata_port_disable,
.set_piomode = amd66_set_piomode,
.set_dmamode = amd66_set_dmamode,
.mode_filter = ata_pci_default_filter,
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
.check_status = ata_check_status,
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
.freeze = ata_bmdma_freeze,
.thaw = ata_bmdma_thaw,
.error_handler = amd_early_error_handler,
.post_internal_cmd = ata_bmdma_post_internal_cmd,
.bmdma_setup = ata_bmdma_setup,
.bmdma_start = ata_bmdma_start,
.bmdma_stop = ata_bmdma_stop,
.bmdma_status = ata_bmdma_status,
.qc_prep = ata_qc_prep,
.qc_issue = ata_qc_issue_prot,
.eng_timeout = ata_eng_timeout,
.data_xfer = ata_pio_data_xfer,
.irq_handler = ata_interrupt,
.irq_clear = ata_bmdma_irq_clear,
.port_start = ata_port_start,
.port_stop = ata_port_stop,
.host_stop = ata_host_stop
};
static struct ata_port_operations amd100_port_ops = {
.port_disable = ata_port_disable,
.set_piomode = amd100_set_piomode,
.set_dmamode = amd100_set_dmamode,
.mode_filter = ata_pci_default_filter,
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
.check_status = ata_check_status,
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
.freeze = ata_bmdma_freeze,
.thaw = ata_bmdma_thaw,
.error_handler = amd_error_handler,
.post_internal_cmd = ata_bmdma_post_internal_cmd,
.bmdma_setup = ata_bmdma_setup,
.bmdma_start = ata_bmdma_start,
.bmdma_stop = ata_bmdma_stop,
.bmdma_status = ata_bmdma_status,
.qc_prep = ata_qc_prep,
.qc_issue = ata_qc_issue_prot,
.eng_timeout = ata_eng_timeout,
.data_xfer = ata_pio_data_xfer,
.irq_handler = ata_interrupt,
.irq_clear = ata_bmdma_irq_clear,
.port_start = ata_port_start,
.port_stop = ata_port_stop,
.host_stop = ata_host_stop
};
static struct ata_port_operations amd133_port_ops = {
.port_disable = ata_port_disable,
.set_piomode = amd133_set_piomode,
.set_dmamode = amd133_set_dmamode,
.mode_filter = ata_pci_default_filter,
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
.check_status = ata_check_status,
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
.freeze = ata_bmdma_freeze,
.thaw = ata_bmdma_thaw,
.error_handler = amd_error_handler,
.post_internal_cmd = ata_bmdma_post_internal_cmd,
.bmdma_setup = ata_bmdma_setup,
.bmdma_start = ata_bmdma_start,
.bmdma_stop = ata_bmdma_stop,
.bmdma_status = ata_bmdma_status,
.qc_prep = ata_qc_prep,
.qc_issue = ata_qc_issue_prot,
.eng_timeout = ata_eng_timeout,
.data_xfer = ata_pio_data_xfer,
.irq_handler = ata_interrupt,
.irq_clear = ata_bmdma_irq_clear,
.port_start = ata_port_start,
.port_stop = ata_port_stop,
.host_stop = ata_host_stop
};
static struct ata_port_operations nv100_port_ops = {
.port_disable = ata_port_disable,
.set_piomode = nv100_set_piomode,
.set_dmamode = nv100_set_dmamode,
.mode_filter = ata_pci_default_filter,
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
.check_status = ata_check_status,
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
.freeze = ata_bmdma_freeze,
.thaw = ata_bmdma_thaw,
.error_handler = nv_error_handler,
.post_internal_cmd = ata_bmdma_post_internal_cmd,
.bmdma_setup = ata_bmdma_setup,
.bmdma_start = ata_bmdma_start,
.bmdma_stop = ata_bmdma_stop,
.bmdma_status = ata_bmdma_status,
.qc_prep = ata_qc_prep,
.qc_issue = ata_qc_issue_prot,
.eng_timeout = ata_eng_timeout,
.data_xfer = ata_pio_data_xfer,
.irq_handler = ata_interrupt,
.irq_clear = ata_bmdma_irq_clear,
.port_start = ata_port_start,
.port_stop = ata_port_stop,
.host_stop = ata_host_stop
};
static struct ata_port_operations nv133_port_ops = {
.port_disable = ata_port_disable,
.set_piomode = nv133_set_piomode,
.set_dmamode = nv133_set_dmamode,
.mode_filter = ata_pci_default_filter,
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
.check_status = ata_check_status,
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
.freeze = ata_bmdma_freeze,
.thaw = ata_bmdma_thaw,
.error_handler = nv_error_handler,
.post_internal_cmd = ata_bmdma_post_internal_cmd,
.bmdma_setup = ata_bmdma_setup,
.bmdma_start = ata_bmdma_start,
.bmdma_stop = ata_bmdma_stop,
.bmdma_status = ata_bmdma_status,
.qc_prep = ata_qc_prep,
.qc_issue = ata_qc_issue_prot,
.eng_timeout = ata_eng_timeout,
.data_xfer = ata_pio_data_xfer,
.irq_handler = ata_interrupt,
.irq_clear = ata_bmdma_irq_clear,
.port_start = ata_port_start,
.port_stop = ata_port_stop,
.host_stop = ata_host_stop
};
static int amd_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
static struct ata_port_info info[10] = {
{ /* 0: AMD 7401 */
.sht = &amd_sht,
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
.pio_mask = 0x1f,
.mwdma_mask = 0x07, /* No SWDMA */
.udma_mask = 0x07, /* UDMA 33 */
.port_ops = &amd33_port_ops
},
{ /* 1: Early AMD7409 - no swdma */
.sht = &amd_sht,
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
.pio_mask = 0x1f,
.mwdma_mask = 0x07,
.udma_mask = 0x1f, /* UDMA 66 */
.port_ops = &amd66_port_ops
},
{ /* 2: AMD 7409, no swdma errata */
.sht = &amd_sht,
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
.pio_mask = 0x1f,
.mwdma_mask = 0x07,
.udma_mask = 0x1f, /* UDMA 66 */
.port_ops = &amd66_port_ops
},
{ /* 3: AMD 7411 */
.sht = &amd_sht,
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
.pio_mask = 0x1f,
.mwdma_mask = 0x07,
.udma_mask = 0x3f, /* UDMA 100 */
.port_ops = &amd100_port_ops
},
{ /* 4: AMD 7441 */
.sht = &amd_sht,
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
.pio_mask = 0x1f,
.mwdma_mask = 0x07,
.udma_mask = 0x3f, /* UDMA 100 */
.port_ops = &amd100_port_ops
},
{ /* 5: AMD 8111*/
.sht = &amd_sht,
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
.pio_mask = 0x1f,
.mwdma_mask = 0x07,
.udma_mask = 0x7f, /* UDMA 133, no swdma */
.port_ops = &amd133_port_ops
},
{ /* 6: AMD 8111 UDMA 100 (Serenade) */
.sht = &amd_sht,
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
.pio_mask = 0x1f,
.mwdma_mask = 0x07,
.udma_mask = 0x3f, /* UDMA 100, no swdma */
.port_ops = &amd133_port_ops
},
{ /* 7: Nvidia Nforce */
.sht = &amd_sht,
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
.pio_mask = 0x1f,
.mwdma_mask = 0x07,
.udma_mask = 0x3f, /* UDMA 100 */
.port_ops = &nv100_port_ops
},
{ /* 8: Nvidia Nforce2 and later */
.sht = &amd_sht,
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
.pio_mask = 0x1f,
.mwdma_mask = 0x07,
.udma_mask = 0x7f, /* UDMA 133, no swdma */
.port_ops = &nv133_port_ops
},
{ /* 9: AMD CS5536 (Geode companion) */
.sht = &amd_sht,
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
.pio_mask = 0x1f,
.mwdma_mask = 0x07,
.udma_mask = 0x3f, /* UDMA 100 */
.port_ops = &amd100_port_ops
}
};
static struct ata_port_info *port_info[2];
static int printed_version;
int type = id->driver_data;
u8 rev;
u8 fifo;
if (!printed_version++)
dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
pci_read_config_byte(pdev, 0x41, &fifo);
/* Check for AMD7409 without swdma errata and if found adjust type */
if (type == 1 && rev > 0x7)
type = 2;
/* Check for AMD7411 */
if (type == 3)
/* FIFO is broken */
pci_write_config_byte(pdev, 0x41, fifo & 0x0F);
else
pci_write_config_byte(pdev, 0x41, fifo | 0xF0);
/* Serenade ? */
if (type == 5 && pdev->subsystem_vendor == PCI_VENDOR_ID_AMD &&
pdev->subsystem_device == PCI_DEVICE_ID_AMD_SERENADE)
type = 6; /* UDMA 100 only */
if (type < 3)
ata_pci_clear_simplex(pdev);
/* And fire it up */
port_info[0] = port_info[1] = &info[type];
return ata_pci_init_one(pdev, port_info, 2);
}
static const struct pci_device_id amd[] = {
{ PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_COBRA_7401, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7409, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
{ PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7411, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 },
{ PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_OPUS_7441, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
{ PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 7 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
{ PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 9 },
{ 0, },
};
static struct pci_driver amd_pci_driver = {
.name = DRV_NAME,
.id_table = amd,
.probe = amd_init_one,
.remove = ata_pci_remove_one
};
static int __init amd_init(void)
{
return pci_register_driver(&amd_pci_driver);
}
static void __exit amd_exit(void)
{
pci_unregister_driver(&amd_pci_driver);
}
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for AMD PATA IDE");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, amd);
MODULE_VERSION(DRV_VERSION);
module_init(amd_init);
module_exit(amd_exit);

518
drivers/ata/pata_artop.c Normal file
View File

@ -0,0 +1,518 @@
/*
* pata_artop.c - ARTOP ATA controller driver
*
* (C) 2006 Red Hat <alan@redhat.com>
*
* Based in part on drivers/ide/pci/aec62xx.c
* Copyright (C) 1999-2002 Andre Hedrick <andre@linux-ide.org>
* 865/865R fixes for Macintosh card version from a patch to the old
* driver by Thibaut VARENE <varenet@parisc-linux.org>
* When setting the PCI latency we must set 0x80 or higher for burst
* performance Alessandro Zummo <alessandro.zummo@towertech.it>
*
* TODO
* 850 serialization once the core supports it
* Investigate no_dsc on 850R
* Clock detect
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#include <linux/ata.h>
#define DRV_NAME "pata_artop"
#define DRV_VERSION "0.4.1"
/*
* The ARTOP has 33 Mhz and "over clocked" timing tables. Until we
* get PCI bus speed functionality we leave this as 0. Its a variable
* for when we get the functionality and also for folks wanting to
* test stuff.
*/
static int clock = 0;
static int artop6210_pre_reset(struct ata_port *ap)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
const struct pci_bits artop_enable_bits[] = {
{ 0x4AU, 1U, 0x02UL, 0x02UL }, /* port 0 */
{ 0x4AU, 1U, 0x04UL, 0x04UL }, /* port 1 */
};
if (!pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no])) {
ata_port_disable(ap);
printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id);
return 0;
}
ap->cbl = ATA_CBL_PATA40;
return ata_std_prereset(ap);
}
/**
* artop6210_error_handler - Probe specified port on PATA host controller
* @ap: Port to probe
*
* LOCKING:
* None (inherited from caller).
*/
static void artop6210_error_handler(struct ata_port *ap)
{
ata_bmdma_drive_eh(ap, artop6210_pre_reset,
ata_std_softreset, NULL,
ata_std_postreset);
}
/**
* artop6260_pre_reset - check for 40/80 pin
* @ap: Port
*
* The ARTOP hardware reports the cable detect bits in register 0x49.
* Nothing complicated needed here.
*/
static int artop6260_pre_reset(struct ata_port *ap)
{
static const struct pci_bits artop_enable_bits[] = {
{ 0x4AU, 1U, 0x02UL, 0x02UL }, /* port 0 */
{ 0x4AU, 1U, 0x04UL, 0x04UL }, /* port 1 */
};
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u8 tmp;
/* Odd numbered device ids are the units with enable bits (the -R cards) */
if (pdev->device % 1 && !pci_test_config_bits(pdev, &artop_enable_bits[ap->port_no])) {
ata_port_disable(ap);
printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id);
return 0;
}
pci_read_config_byte(pdev, 0x49, &tmp);
if (tmp & (1 >> ap->port_no))
ap->cbl = ATA_CBL_PATA40;
else
ap->cbl = ATA_CBL_PATA80;
return ata_std_prereset(ap);
}
/**
* artop6260_error_handler - Probe specified port on PATA host controller
* @ap: Port to probe
*
* LOCKING:
* None (inherited from caller).
*/
static void artop6260_error_handler(struct ata_port *ap)
{
ata_bmdma_drive_eh(ap, artop6260_pre_reset,
ata_std_softreset, NULL,
ata_std_postreset);
}
/**
* artop6210_load_piomode - Load a set of PATA PIO timings
* @ap: Port whose timings we are configuring
* @adev: Device
* @pio: PIO mode
*
* Set PIO mode for device, in host controller PCI config space. This
* is used both to set PIO timings in PIO mode and also to set the
* matching PIO clocking for UDMA, as well as the MWDMA timings.
*
* LOCKING:
* None (inherited from caller).
*/
static void artop6210_load_piomode(struct ata_port *ap, struct ata_device *adev, unsigned int pio)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int dn = adev->devno + 2 * ap->port_no;
const u16 timing[2][5] = {
{ 0x0000, 0x000A, 0x0008, 0x0303, 0x0301 },
{ 0x0700, 0x070A, 0x0708, 0x0403, 0x0401 }
};
/* Load the PIO timing active/recovery bits */
pci_write_config_word(pdev, 0x40 + 2 * dn, timing[clock][pio]);
}
/**
* artop6210_set_piomode - Initialize host controller PATA PIO timings
* @ap: Port whose timings we are configuring
* @adev: Device we are configuring
*
* Set PIO mode for device, in host controller PCI config space. For
* ARTOP we must also clear the UDMA bits if we are not doing UDMA. In
* the event UDMA is used the later call to set_dmamode will set the
* bits as required.
*
* LOCKING:
* None (inherited from caller).
*/
static void artop6210_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int dn = adev->devno + 2 * ap->port_no;
u8 ultra;
artop6210_load_piomode(ap, adev, adev->pio_mode - XFER_PIO_0);
/* Clear the UDMA mode bits (set_dmamode will redo this if needed) */
pci_read_config_byte(pdev, 0x54, &ultra);
ultra &= ~(3 << (2 * dn));
pci_write_config_byte(pdev, 0x54, ultra);
}
/**
* artop6260_load_piomode - Initialize host controller PATA PIO timings
* @ap: Port whose timings we are configuring
* @adev: Device we are configuring
* @pio: PIO mode
*
* Set PIO mode for device, in host controller PCI config space. The
* ARTOP6260 and relatives store the timing data differently.
*
* LOCKING:
* None (inherited from caller).
*/
static void artop6260_load_piomode (struct ata_port *ap, struct ata_device *adev, unsigned int pio)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int dn = adev->devno + 2 * ap->port_no;
const u8 timing[2][5] = {
{ 0x00, 0x0A, 0x08, 0x33, 0x31 },
{ 0x70, 0x7A, 0x78, 0x43, 0x41 }
};
/* Load the PIO timing active/recovery bits */
pci_write_config_byte(pdev, 0x40 + dn, timing[clock][pio]);
}
/**
* artop6260_set_piomode - Initialize host controller PATA PIO timings
* @ap: Port whose timings we are configuring
* @adev: Device we are configuring
*
* Set PIO mode for device, in host controller PCI config space. For
* ARTOP we must also clear the UDMA bits if we are not doing UDMA. In
* the event UDMA is used the later call to set_dmamode will set the
* bits as required.
*
* LOCKING:
* None (inherited from caller).
*/
static void artop6260_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u8 ultra;
artop6260_load_piomode(ap, adev, adev->pio_mode - XFER_PIO_0);
/* Clear the UDMA mode bits (set_dmamode will redo this if needed) */
pci_read_config_byte(pdev, 0x44 + ap->port_no, &ultra);
ultra &= ~(7 << (4 * adev->devno)); /* One nibble per drive */
pci_write_config_byte(pdev, 0x44 + ap->port_no, ultra);
}
/**
* artop6210_set_dmamode - Initialize host controller PATA PIO timings
* @ap: Port whose timings we are configuring
* @adev: um
*
* Set DMA mode for device, in host controller PCI config space.
*
* LOCKING:
* None (inherited from caller).
*/
static void artop6210_set_dmamode (struct ata_port *ap, struct ata_device *adev)
{
unsigned int pio;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int dn = adev->devno + 2 * ap->port_no;
u8 ultra;
if (adev->dma_mode == XFER_MW_DMA_0)
pio = 1;
else
pio = 4;
/* Load the PIO timing active/recovery bits */
artop6210_load_piomode(ap, adev, pio);
pci_read_config_byte(pdev, 0x54, &ultra);
ultra &= ~(3 << (2 * dn));
/* Add ultra DMA bits if in UDMA mode */
if (adev->dma_mode >= XFER_UDMA_0) {
u8 mode = (adev->dma_mode - XFER_UDMA_0) + 1 - clock;
if (mode == 0)
mode = 1;
ultra |= (mode << (2 * dn));
}
pci_write_config_byte(pdev, 0x54, ultra);
}
/**
* artop6260_set_dmamode - Initialize host controller PATA PIO timings
* @ap: Port whose timings we are configuring
* @adev: Device we are configuring
*
* Set DMA mode for device, in host controller PCI config space. The
* ARTOP6260 and relatives store the timing data differently.
*
* LOCKING:
* None (inherited from caller).
*/
static void artop6260_set_dmamode (struct ata_port *ap, struct ata_device *adev)
{
unsigned int pio = adev->pio_mode - XFER_PIO_0;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u8 ultra;
if (adev->dma_mode == XFER_MW_DMA_0)
pio = 1;
else
pio = 4;
/* Load the PIO timing active/recovery bits */
artop6260_load_piomode(ap, adev, pio);
/* Add ultra DMA bits if in UDMA mode */
pci_read_config_byte(pdev, 0x44 + ap->port_no, &ultra);
ultra &= ~(7 << (4 * adev->devno)); /* One nibble per drive */
if (adev->dma_mode >= XFER_UDMA_0) {
u8 mode = adev->dma_mode - XFER_UDMA_0 + 1 - clock;
if (mode == 0)
mode = 1;
ultra |= (mode << (4 * adev->devno));
}
pci_write_config_byte(pdev, 0x44 + ap->port_no, ultra);
}
static struct scsi_host_template artop_sht = {
.module = THIS_MODULE,
.name = DRV_NAME,
.ioctl = ata_scsi_ioctl,
.queuecommand = ata_scsi_queuecmd,
.can_queue = ATA_DEF_QUEUE,
.this_id = ATA_SHT_THIS_ID,
.sg_tablesize = LIBATA_MAX_PRD,
.max_sectors = ATA_MAX_SECTORS,
.cmd_per_lun = ATA_SHT_CMD_PER_LUN,
.emulated = ATA_SHT_EMULATED,
.use_clustering = ATA_SHT_USE_CLUSTERING,
.proc_name = DRV_NAME,
.dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config,
.bios_param = ata_std_bios_param,
};
static const struct ata_port_operations artop6210_ops = {
.port_disable = ata_port_disable,
.set_piomode = artop6210_set_piomode,
.set_dmamode = artop6210_set_dmamode,
.mode_filter = ata_pci_default_filter,
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
.check_status = ata_check_status,
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
.freeze = ata_bmdma_freeze,
.thaw = ata_bmdma_thaw,
.error_handler = artop6210_error_handler,
.post_internal_cmd = ata_bmdma_post_internal_cmd,
.bmdma_setup = ata_bmdma_setup,
.bmdma_start = ata_bmdma_start,
.bmdma_stop = ata_bmdma_stop,
.bmdma_status = ata_bmdma_status,
.qc_prep = ata_qc_prep,
.qc_issue = ata_qc_issue_prot,
.eng_timeout = ata_eng_timeout,
.data_xfer = ata_pio_data_xfer,
.irq_handler = ata_interrupt,
.irq_clear = ata_bmdma_irq_clear,
.port_start = ata_port_start,
.port_stop = ata_port_stop,
.host_stop = ata_host_stop,
};
static const struct ata_port_operations artop6260_ops = {
.port_disable = ata_port_disable,
.set_piomode = artop6260_set_piomode,
.set_dmamode = artop6260_set_dmamode,
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
.check_status = ata_check_status,
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
.freeze = ata_bmdma_freeze,
.thaw = ata_bmdma_thaw,
.error_handler = artop6260_error_handler,
.post_internal_cmd = ata_bmdma_post_internal_cmd,
.bmdma_setup = ata_bmdma_setup,
.bmdma_start = ata_bmdma_start,
.bmdma_stop = ata_bmdma_stop,
.bmdma_status = ata_bmdma_status,
.qc_prep = ata_qc_prep,
.qc_issue = ata_qc_issue_prot,
.data_xfer = ata_pio_data_xfer,
.eng_timeout = ata_eng_timeout,
.irq_handler = ata_interrupt,
.irq_clear = ata_bmdma_irq_clear,
.port_start = ata_port_start,
.port_stop = ata_port_stop,
.host_stop = ata_host_stop,
};
/**
* artop_init_one - Register ARTOP ATA PCI device with kernel services
* @pdev: PCI device to register
* @ent: Entry in artop_pci_tbl matching with @pdev
*
* Called from kernel PCI layer.
*
* LOCKING:
* Inherited from PCI layer (may sleep).
*
* RETURNS:
* Zero on success, or -ERRNO value.
*/
static int artop_init_one (struct pci_dev *pdev, const struct pci_device_id *id)
{
static int printed_version;
static struct ata_port_info info_6210 = {
.sht = &artop_sht,
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
.pio_mask = 0x1f, /* pio0-4 */
.mwdma_mask = 0x07, /* mwdma0-2 */
.udma_mask = ATA_UDMA2,
.port_ops = &artop6210_ops,
};
static struct ata_port_info info_626x = {
.sht = &artop_sht,
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
.pio_mask = 0x1f, /* pio0-4 */
.mwdma_mask = 0x07, /* mwdma0-2 */
.udma_mask = ATA_UDMA4,
.port_ops = &artop6260_ops,
};
static struct ata_port_info info_626x_fast = {
.sht = &artop_sht,
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
.pio_mask = 0x1f, /* pio0-4 */
.mwdma_mask = 0x07, /* mwdma0-2 */
.udma_mask = ATA_UDMA5,
.port_ops = &artop6260_ops,
};
struct ata_port_info *port_info[2];
struct ata_port_info *info;
int ports = 2;
if (!printed_version++)
dev_printk(KERN_DEBUG, &pdev->dev,
"version " DRV_VERSION "\n");
if (id->driver_data == 0) { /* 6210 variant */
info = &info_6210;
/* BIOS may have left us in UDMA, clear it before libata probe */
pci_write_config_byte(pdev, 0x54, 0);
/* For the moment (also lacks dsc) */
printk(KERN_WARNING "ARTOP 6210 requires serialize functionality not yet supported by libata.\n");
printk(KERN_WARNING "Secondary ATA ports will not be activated.\n");
ports = 1;
}
else if (id->driver_data == 1) /* 6260 */
info = &info_626x;
else if (id->driver_data == 2) { /* 6260 or 6260 + fast */
unsigned long io = pci_resource_start(pdev, 4);
u8 reg;
info = &info_626x;
if (inb(io) & 0x10)
info = &info_626x_fast;
/* Mac systems come up with some registers not set as we
will need them */
/* Clear reset & test bits */
pci_read_config_byte(pdev, 0x49, &reg);
pci_write_config_byte(pdev, 0x49, reg & ~ 0x30);
/* PCI latency must be > 0x80 for burst mode, tweak it
* if required.
*/
pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &reg);
if (reg <= 0x80)
pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x90);
/* Enable IRQ output and burst mode */
pci_read_config_byte(pdev, 0x4a, &reg);
pci_write_config_byte(pdev, 0x4a, (reg & ~0x01) | 0x80);
}
port_info[0] = port_info[1] = info;
return ata_pci_init_one(pdev, port_info, ports);
}
static const struct pci_device_id artop_pci_tbl[] = {
{ 0x1191, 0x0005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{ 0x1191, 0x0006, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1},
{ 0x1191, 0x0007, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1},
{ 0x1191, 0x0008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2},
{ 0x1191, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2},
{ } /* terminate list */
};
static struct pci_driver artop_pci_driver = {
.name = DRV_NAME,
.id_table = artop_pci_tbl,
.probe = artop_init_one,
.remove = ata_pci_remove_one,
};
static int __init artop_init(void)
{
return pci_register_driver(&artop_pci_driver);
}
static void __exit artop_exit(void)
{
pci_unregister_driver(&artop_pci_driver);
}
module_init(artop_init);
module_exit(artop_exit);
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("SCSI low-level driver for ARTOP PATA");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, artop_pci_tbl);
MODULE_VERSION(DRV_VERSION);

306
drivers/ata/pata_atiixp.c Normal file
View File

@ -0,0 +1,306 @@
/*
* pata_atiixp.c - ATI PATA for new ATA layer
* (C) 2005 Red Hat Inc
* Alan Cox <alan@redhat.com>
*
* Based on
*
* linux/drivers/ide/pci/atiixp.c Version 0.01-bart2 Feb. 26, 2004
*
* Copyright (C) 2003 ATI Inc. <hyu@ati.com>
* Copyright (C) 2004 Bartlomiej Zolnierkiewicz
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#define DRV_NAME "pata_atiixp"
#define DRV_VERSION "0.4.2"
enum {
ATIIXP_IDE_PIO_TIMING = 0x40,
ATIIXP_IDE_MWDMA_TIMING = 0x44,
ATIIXP_IDE_PIO_CONTROL = 0x48,
ATIIXP_IDE_PIO_MODE = 0x4a,
ATIIXP_IDE_UDMA_CONTROL = 0x54,
ATIIXP_IDE_UDMA_MODE = 0x56
};
static int atiixp_pre_reset(struct ata_port *ap)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
static struct pci_bits atiixp_enable_bits[] = {
{ 0x48, 1, 0x01, 0x00 },
{ 0x48, 1, 0x08, 0x00 }
};
if (!pci_test_config_bits(pdev, &atiixp_enable_bits[ap->port_no])) {
ata_port_disable(ap);
printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id);
return 0;
}
ap->cbl = ATA_CBL_PATA80;
return ata_std_prereset(ap);
}
static void atiixp_error_handler(struct ata_port *ap)
{
ata_bmdma_drive_eh(ap, atiixp_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
}
/**
* atiixp_set_pio_timing - set initial PIO mode data
* @ap: ATA interface
* @adev: ATA device
*
* Called by both the pio and dma setup functions to set the controller
* timings for PIO transfers. We must load both the mode number and
* timing values into the controller.
*/
static void atiixp_set_pio_timing(struct ata_port *ap, struct ata_device *adev, int pio)
{
static u8 pio_timings[5] = { 0x5D, 0x47, 0x34, 0x22, 0x20 };
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int dn = 2 * ap->port_no + adev->devno;
/* Check this is correct - the order is odd in both drivers */
int timing_shift = (16 * ap->port_no) + 8 * (adev->devno ^ 1);
u16 pio_mode_data, pio_timing_data;
pci_read_config_word(pdev, ATIIXP_IDE_PIO_MODE, &pio_mode_data);
pio_mode_data &= ~(0x7 << (4 * dn));
pio_mode_data |= pio << (4 * dn);
pci_write_config_word(pdev, ATIIXP_IDE_PIO_MODE, pio_mode_data);
pci_read_config_word(pdev, ATIIXP_IDE_PIO_TIMING, &pio_timing_data);
pio_mode_data &= ~(0xFF << timing_shift);
pio_mode_data |= (pio_timings[pio] << timing_shift);
pci_write_config_word(pdev, ATIIXP_IDE_PIO_TIMING, pio_timing_data);
}
/**
* atiixp_set_piomode - set initial PIO mode data
* @ap: ATA interface
* @adev: ATA device
*
* Called to do the PIO mode setup. We use a shared helper for this
* as the DMA setup must also adjust the PIO timing information.
*/
static void atiixp_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
atiixp_set_pio_timing(ap, adev, adev->pio_mode - XFER_PIO_0);
}
/**
* atiixp_set_dmamode - set initial DMA mode data
* @ap: ATA interface
* @adev: ATA device
*
* Called to do the DMA mode setup. We use timing tables for most
* modes but must tune an appropriate PIO mode to match.
*/
static void atiixp_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
static u8 mwdma_timings[5] = { 0x77, 0x21, 0x20 };
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int dma = adev->dma_mode;
int dn = 2 * ap->port_no + adev->devno;
int wanted_pio;
if (adev->dma_mode >= XFER_UDMA_0) {
u16 udma_mode_data;
dma -= XFER_UDMA_0;
pci_read_config_word(pdev, ATIIXP_IDE_UDMA_MODE, &udma_mode_data);
udma_mode_data &= ~(0x7 << (4 * dn));
udma_mode_data |= dma << (4 * dn);
pci_write_config_word(pdev, ATIIXP_IDE_UDMA_MODE, udma_mode_data);
} else {
u16 mwdma_timing_data;
/* Check this is correct - the order is odd in both drivers */
int timing_shift = (16 * ap->port_no) + 8 * (adev->devno ^ 1);
dma -= XFER_MW_DMA_0;
pci_read_config_word(pdev, ATIIXP_IDE_MWDMA_TIMING, &mwdma_timing_data);
mwdma_timing_data &= ~(0xFF << timing_shift);
mwdma_timing_data |= (mwdma_timings[dma] << timing_shift);
pci_write_config_word(pdev, ATIIXP_IDE_MWDMA_TIMING, mwdma_timing_data);
}
/*
* We must now look at the PIO mode situation. We may need to
* adjust the PIO mode to keep the timings acceptable
*/
if (adev->dma_mode >= XFER_MW_DMA_2)
wanted_pio = 4;
else if (adev->dma_mode == XFER_MW_DMA_1)
wanted_pio = 3;
else if (adev->dma_mode == XFER_MW_DMA_0)
wanted_pio = 0;
else BUG();
if (adev->pio_mode != wanted_pio)
atiixp_set_pio_timing(ap, adev, wanted_pio);
}
/**
* atiixp_bmdma_start - DMA start callback
* @qc: Command in progress
*
* When DMA begins we need to ensure that the UDMA control
* register for the channel is correctly set.
*/
static void atiixp_bmdma_start(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct ata_device *adev = qc->dev;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int dn = (2 * ap->port_no) + adev->devno;
u16 tmp16;
pci_read_config_word(pdev, ATIIXP_IDE_UDMA_CONTROL, &tmp16);
if (adev->dma_mode >= XFER_UDMA_0)
tmp16 |= (1 << dn);
else
tmp16 &= ~(1 << dn);
pci_write_config_word(pdev, ATIIXP_IDE_UDMA_CONTROL, tmp16);
ata_bmdma_start(qc);
}
/**
* atiixp_dma_stop - DMA stop callback
* @qc: Command in progress
*
* DMA has completed. Clear the UDMA flag as the next operations will
* be PIO ones not UDMA data transfer.
*/
static void atiixp_bmdma_stop(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int dn = (2 * ap->port_no) + qc->dev->devno;
u16 tmp16;
pci_read_config_word(pdev, ATIIXP_IDE_UDMA_CONTROL, &tmp16);
tmp16 &= ~(1 << dn);
pci_write_config_word(pdev, ATIIXP_IDE_UDMA_CONTROL, tmp16);
ata_bmdma_stop(qc);
}
static struct scsi_host_template atiixp_sht = {
.module = THIS_MODULE,
.name = DRV_NAME,
.ioctl = ata_scsi_ioctl,
.queuecommand = ata_scsi_queuecmd,
.can_queue = ATA_DEF_QUEUE,
.this_id = ATA_SHT_THIS_ID,
.sg_tablesize = LIBATA_MAX_PRD,
.max_sectors = ATA_MAX_SECTORS,
.cmd_per_lun = ATA_SHT_CMD_PER_LUN,
.emulated = ATA_SHT_EMULATED,
.use_clustering = ATA_SHT_USE_CLUSTERING,
.proc_name = DRV_NAME,
.dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config,
.bios_param = ata_std_bios_param,
};
static struct ata_port_operations atiixp_port_ops = {
.port_disable = ata_port_disable,
.set_piomode = atiixp_set_piomode,
.set_dmamode = atiixp_set_dmamode,
.mode_filter = ata_pci_default_filter,
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
.check_status = ata_check_status,
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
.freeze = ata_bmdma_freeze,
.thaw = ata_bmdma_thaw,
.error_handler = atiixp_error_handler,
.post_internal_cmd = ata_bmdma_post_internal_cmd,
.bmdma_setup = ata_bmdma_setup,
.bmdma_start = atiixp_bmdma_start,
.bmdma_stop = atiixp_bmdma_stop,
.bmdma_status = ata_bmdma_status,
.qc_prep = ata_qc_prep,
.qc_issue = ata_qc_issue_prot,
.eng_timeout = ata_eng_timeout,
.data_xfer = ata_pio_data_xfer,
.irq_handler = ata_interrupt,
.irq_clear = ata_bmdma_irq_clear,
.port_start = ata_port_start,
.port_stop = ata_port_stop,
.host_stop = ata_host_stop
};
static int atiixp_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
static struct ata_port_info info = {
.sht = &atiixp_sht,
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
.pio_mask = 0x1f,
.mwdma_mask = 0x06, /* No MWDMA0 support */
.udma_mask = 0x3F,
.port_ops = &atiixp_port_ops
};
static struct ata_port_info *port_info[2] = { &info, &info };
return ata_pci_init_one(dev, port_info, 2);
}
static struct pci_device_id atiixp[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP200_IDE), },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP300_IDE), },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP400_IDE), },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP600_IDE), },
{ 0, },
};
static struct pci_driver atiixp_pci_driver = {
.name = DRV_NAME,
.id_table = atiixp,
.probe = atiixp_init_one,
.remove = ata_pci_remove_one
};
static int __init atiixp_init(void)
{
return pci_register_driver(&atiixp_pci_driver);
}
static void __exit atiixp_exit(void)
{
pci_unregister_driver(&atiixp_pci_driver);
}
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for ATI IXP200/300/400");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, atiixp);
MODULE_VERSION(DRV_VERSION);
module_init(atiixp_init);
module_exit(atiixp_exit);

505
drivers/ata/pata_cmd64x.c Normal file
View File

@ -0,0 +1,505 @@
/*
* pata_cmd64x.c - ATI PATA for new ATA layer
* (C) 2005 Red Hat Inc
* Alan Cox <alan@redhat.com>
*
* Based upon
* linux/drivers/ide/pci/cmd64x.c Version 1.30 Sept 10, 2002
*
* cmd64x.c: Enable interrupts at initialization time on Ultra/PCI machines.
* Note, this driver is not used at all on other systems because
* there the "BIOS" has done all of the following already.
* Due to massive hardware bugs, UltraDMA is only supported
* on the 646U2 and not on the 646U.
*
* Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be)
* Copyright (C) 1998 David S. Miller (davem@redhat.com)
*
* Copyright (C) 1999-2002 Andre Hedrick <andre@linux-ide.org>
*
* TODO
* Testing work
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#define DRV_NAME "pata_cmd64x"
#define DRV_VERSION "0.2.1"
/*
* CMD64x specific registers definition.
*/
enum {
CFR = 0x50,
CFR_INTR_CH0 = 0x02,
CNTRL = 0x51,
CNTRL_DIS_RA0 = 0x40,
CNTRL_DIS_RA1 = 0x80,
CNTRL_ENA_2ND = 0x08,
CMDTIM = 0x52,
ARTTIM0 = 0x53,
DRWTIM0 = 0x54,
ARTTIM1 = 0x55,
DRWTIM1 = 0x56,
ARTTIM23 = 0x57,
ARTTIM23_DIS_RA2 = 0x04,
ARTTIM23_DIS_RA3 = 0x08,
ARTTIM23_INTR_CH1 = 0x10,
ARTTIM2 = 0x57,
ARTTIM3 = 0x57,
DRWTIM23 = 0x58,
DRWTIM2 = 0x58,
BRST = 0x59,
DRWTIM3 = 0x5b,
BMIDECR0 = 0x70,
MRDMODE = 0x71,
MRDMODE_INTR_CH0 = 0x04,
MRDMODE_INTR_CH1 = 0x08,
MRDMODE_BLK_CH0 = 0x10,
MRDMODE_BLK_CH1 = 0x20,
BMIDESR0 = 0x72,
UDIDETCR0 = 0x73,
DTPR0 = 0x74,
BMIDECR1 = 0x78,
BMIDECSR = 0x79,
BMIDESR1 = 0x7A,
UDIDETCR1 = 0x7B,
DTPR1 = 0x7C
};
static int cmd64x_pre_reset(struct ata_port *ap)
{
ap->cbl = ATA_CBL_PATA40;
return ata_std_prereset(ap);
}
static int cmd648_pre_reset(struct ata_port *ap)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u8 r;
/* Check cable detect bits */
pci_read_config_byte(pdev, BMIDECSR, &r);
if (r & (1 << ap->port_no))
ap->cbl = ATA_CBL_PATA80;
else
ap->cbl = ATA_CBL_PATA40;
return ata_std_prereset(ap);
}
static void cmd64x_error_handler(struct ata_port *ap)
{
return ata_bmdma_drive_eh(ap, cmd64x_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
}
static void cmd648_error_handler(struct ata_port *ap)
{
ata_bmdma_drive_eh(ap, cmd648_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
}
/**
* cmd64x_set_piomode - set initial PIO mode data
* @ap: ATA interface
* @adev: ATA device
*
* Called to do the PIO mode setup.
*/
static void cmd64x_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
struct ata_timing t;
const unsigned long T = 1000000 / 33;
const u8 setup_data[] = { 0x40, 0x40, 0x40, 0x80, 0x00 };
u8 reg;
/* Port layout is not logical so use a table */
const u8 arttim_port[2][2] = {
{ ARTTIM0, ARTTIM1 },
{ ARTTIM23, ARTTIM23 }
};
const u8 drwtim_port[2][2] = {
{ DRWTIM0, DRWTIM1 },
{ DRWTIM2, DRWTIM3 }
};
int arttim = arttim_port[ap->port_no][adev->devno];
int drwtim = drwtim_port[ap->port_no][adev->devno];
if (ata_timing_compute(adev, adev->pio_mode, &t, T, 0) < 0) {
printk(KERN_ERR DRV_NAME ": mode computation failed.\n");
return;
}
if (ap->port_no) {
/* Slave has shared address setup */
struct ata_device *pair = ata_dev_pair(adev);
if (pair) {
struct ata_timing tp;
ata_timing_compute(pair, pair->pio_mode, &tp, T, 0);
ata_timing_merge(&t, &tp, &t, ATA_TIMING_SETUP);
}
}
printk(KERN_DEBUG DRV_NAME ": active %d recovery %d setup %d.\n",
t.active, t.recover, t.setup);
if (t.recover > 16) {
t.active += t.recover - 16;
t.recover = 16;
}
if (t.active > 16)
t.active = 16;
/* Now convert the clocks into values we can actually stuff into
the chip */
if (t.recover > 1)
t.recover--;
else
t.recover = 15;
if (t.setup > 4)
t.setup = 0xC0;
else
t.setup = setup_data[t.setup];
t.active &= 0x0F; /* 0 = 16 */
/* Load setup timing */
pci_read_config_byte(pdev, arttim, &reg);
reg &= 0x3F;
reg |= t.setup;
pci_write_config_byte(pdev, arttim, reg);
/* Load active/recovery */
pci_write_config_byte(pdev, drwtim, (t.active << 4) | t.recover);
}
/**
* cmd64x_set_dmamode - set initial DMA mode data
* @ap: ATA interface
* @adev: ATA device
*
* Called to do the DMA mode setup.
*/
static void cmd64x_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
static const u8 udma_data[] = {
0x31, 0x21, 0x11, 0x25, 0x15, 0x05
};
static const u8 mwdma_data[] = {
0x30, 0x20, 0x10
};
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u8 regU, regD;
int pciU = UDIDETCR0 + 8 * ap->port_no;
int pciD = BMIDESR0 + 8 * ap->port_no;
int shift = 2 * adev->devno;
pci_read_config_byte(pdev, pciD, &regD);
pci_read_config_byte(pdev, pciU, &regU);
regD &= ~(0x20 << shift);
regU &= ~(0x35 << shift);
if (adev->dma_mode >= XFER_UDMA_0)
regU |= udma_data[adev->dma_mode - XFER_UDMA_0] << shift;
else
regD |= mwdma_data[adev->dma_mode - XFER_MW_DMA_0] << shift;
regD |= 0x20 << adev->devno;
pci_write_config_byte(pdev, pciU, regU);
pci_write_config_byte(pdev, pciD, regD);
}
/**
* cmd648_dma_stop - DMA stop callback
* @qc: Command in progress
*
* DMA has completed.
*/
static void cmd648_bmdma_stop(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u8 dma_intr;
int dma_reg = ap->port_no ? ARTTIM23_INTR_CH1 : CFR_INTR_CH0;
int dma_mask = ap->port_no ? ARTTIM2 : CFR;
ata_bmdma_stop(qc);
pci_read_config_byte(pdev, dma_reg, &dma_intr);
pci_write_config_byte(pdev, dma_reg, dma_intr | dma_mask);
}
/**
* cmd646r1_dma_stop - DMA stop callback
* @qc: Command in progress
*
* Stub for now while investigating the r1 quirk in the old driver.
*/
static void cmd646r1_bmdma_stop(struct ata_queued_cmd *qc)
{
ata_bmdma_stop(qc);
}
static struct scsi_host_template cmd64x_sht = {
.module = THIS_MODULE,
.name = DRV_NAME,
.ioctl = ata_scsi_ioctl,
.queuecommand = ata_scsi_queuecmd,
.can_queue = ATA_DEF_QUEUE,
.this_id = ATA_SHT_THIS_ID,
.sg_tablesize = LIBATA_MAX_PRD,
.max_sectors = ATA_MAX_SECTORS,
.cmd_per_lun = ATA_SHT_CMD_PER_LUN,
.emulated = ATA_SHT_EMULATED,
.use_clustering = ATA_SHT_USE_CLUSTERING,
.proc_name = DRV_NAME,
.dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config,
.bios_param = ata_std_bios_param,
};
static struct ata_port_operations cmd64x_port_ops = {
.port_disable = ata_port_disable,
.set_piomode = cmd64x_set_piomode,
.set_dmamode = cmd64x_set_dmamode,
.mode_filter = ata_pci_default_filter,
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
.check_status = ata_check_status,
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
.freeze = ata_bmdma_freeze,
.thaw = ata_bmdma_thaw,
.error_handler = cmd64x_error_handler,
.post_internal_cmd = ata_bmdma_post_internal_cmd,
.bmdma_setup = ata_bmdma_setup,
.bmdma_start = ata_bmdma_start,
.bmdma_stop = ata_bmdma_stop,
.bmdma_status = ata_bmdma_status,
.qc_prep = ata_qc_prep,
.qc_issue = ata_qc_issue_prot,
.eng_timeout = ata_eng_timeout,
.data_xfer = ata_pio_data_xfer,
.irq_handler = ata_interrupt,
.irq_clear = ata_bmdma_irq_clear,
.port_start = ata_port_start,
.port_stop = ata_port_stop,
.host_stop = ata_host_stop
};
static struct ata_port_operations cmd646r1_port_ops = {
.port_disable = ata_port_disable,
.set_piomode = cmd64x_set_piomode,
.set_dmamode = cmd64x_set_dmamode,
.mode_filter = ata_pci_default_filter,
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
.check_status = ata_check_status,
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
.freeze = ata_bmdma_freeze,
.thaw = ata_bmdma_thaw,
.error_handler = cmd64x_error_handler,
.post_internal_cmd = ata_bmdma_post_internal_cmd,
.bmdma_setup = ata_bmdma_setup,
.bmdma_start = ata_bmdma_start,
.bmdma_stop = cmd646r1_bmdma_stop,
.bmdma_status = ata_bmdma_status,
.qc_prep = ata_qc_prep,
.qc_issue = ata_qc_issue_prot,
.eng_timeout = ata_eng_timeout,
.data_xfer = ata_pio_data_xfer,
.irq_handler = ata_interrupt,
.irq_clear = ata_bmdma_irq_clear,
.port_start = ata_port_start,
.port_stop = ata_port_stop,
.host_stop = ata_host_stop
};
static struct ata_port_operations cmd648_port_ops = {
.port_disable = ata_port_disable,
.set_piomode = cmd64x_set_piomode,
.set_dmamode = cmd64x_set_dmamode,
.mode_filter = ata_pci_default_filter,
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
.check_status = ata_check_status,
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
.freeze = ata_bmdma_freeze,
.thaw = ata_bmdma_thaw,
.error_handler = cmd648_error_handler,
.post_internal_cmd = ata_bmdma_post_internal_cmd,
.bmdma_setup = ata_bmdma_setup,
.bmdma_start = ata_bmdma_start,
.bmdma_stop = cmd648_bmdma_stop,
.bmdma_status = ata_bmdma_status,
.qc_prep = ata_qc_prep,
.qc_issue = ata_qc_issue_prot,
.eng_timeout = ata_eng_timeout,
.data_xfer = ata_pio_data_xfer,
.irq_handler = ata_interrupt,
.irq_clear = ata_bmdma_irq_clear,
.port_start = ata_port_start,
.port_stop = ata_port_stop,
.host_stop = ata_host_stop
};
static int cmd64x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
u32 class_rev;
static struct ata_port_info cmd_info[6] = {
{ /* CMD 643 - no UDMA */
.sht = &cmd64x_sht,
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
.pio_mask = 0x1f,
.mwdma_mask = 0x07,
.port_ops = &cmd64x_port_ops
},
{ /* CMD 646 with broken UDMA */
.sht = &cmd64x_sht,
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
.pio_mask = 0x1f,
.mwdma_mask = 0x07,
.port_ops = &cmd64x_port_ops
},
{ /* CMD 646 with working UDMA */
.sht = &cmd64x_sht,
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
.pio_mask = 0x1f,
.mwdma_mask = 0x07,
.udma_mask = ATA_UDMA1,
.port_ops = &cmd64x_port_ops
},
{ /* CMD 646 rev 1 */
.sht = &cmd64x_sht,
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
.pio_mask = 0x1f,
.mwdma_mask = 0x07,
.port_ops = &cmd646r1_port_ops
},
{ /* CMD 648 */
.sht = &cmd64x_sht,
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
.pio_mask = 0x1f,
.mwdma_mask = 0x07,
.udma_mask = ATA_UDMA2,
.port_ops = &cmd648_port_ops
},
{ /* CMD 649 */
.sht = &cmd64x_sht,
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
.pio_mask = 0x1f,
.mwdma_mask = 0x07,
.udma_mask = ATA_UDMA3,
.port_ops = &cmd648_port_ops
}
};
static struct ata_port_info *port_info[2], *info;
u8 mrdmode;
info = &cmd_info[id->driver_data];
pci_read_config_dword(pdev, PCI_CLASS_REVISION, &class_rev);
class_rev &= 0xFF;
if (id->driver_data == 0) /* 643 */
ata_pci_clear_simplex(pdev);
if (pdev->device == PCI_DEVICE_ID_CMD_646) {
/* Does UDMA work ? */
if (class_rev > 4)
info = &cmd_info[2];
/* Early rev with other problems ? */
else if (class_rev == 1)
info = &cmd_info[3];
}
pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 64);
pci_read_config_byte(pdev, MRDMODE, &mrdmode);
mrdmode &= ~ 0x30; /* IRQ set up */
mrdmode |= 0x02; /* Memory read line enable */
pci_write_config_byte(pdev, MRDMODE, mrdmode);
/* Force PIO 0 here.. */
/* PPC specific fixup copied from old driver */
#ifdef CONFIG_PPC
pci_write_config_byte(pdev, UDIDETCR0, 0xF0);
#endif
port_info[0] = port_info[1] = info;
return ata_pci_init_one(pdev, port_info, 2);
}
static struct pci_device_id cmd64x[] = {
{ PCI_VENDOR_ID_CMD, PCI_DEVICE_ID_CMD_643, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{ PCI_VENDOR_ID_CMD, PCI_DEVICE_ID_CMD_646, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1},
{ PCI_VENDOR_ID_CMD, PCI_DEVICE_ID_CMD_648, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4},
{ PCI_VENDOR_ID_CMD, PCI_DEVICE_ID_CMD_649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5},
{ 0, },
};
static struct pci_driver cmd64x_pci_driver = {
.name = DRV_NAME,
.id_table = cmd64x,
.probe = cmd64x_init_one,
.remove = ata_pci_remove_one
};
static int __init cmd64x_init(void)
{
return pci_register_driver(&cmd64x_pci_driver);
}
static void __exit cmd64x_exit(void)
{
pci_unregister_driver(&cmd64x_pci_driver);
}
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for CMD64x series PATA controllers");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, cmd64x);
MODULE_VERSION(DRV_VERSION);
module_init(cmd64x_init);
module_exit(cmd64x_exit);

336
drivers/ata/pata_cs5520.c Normal file
View File

@ -0,0 +1,336 @@
/*
* IDE tuning and bus mastering support for the CS5510/CS5520
* chipsets
*
* The CS5510/CS5520 are slightly unusual devices. Unlike the
* typical IDE controllers they do bus mastering with the drive in
* PIO mode and smarter silicon.
*
* The practical upshot of this is that we must always tune the
* drive for the right PIO mode. We must also ignore all the blacklists
* and the drive bus mastering DMA information. Also to confuse matters
* further we can do DMA on PIO only drives.
*
* DMA on the 5510 also requires we disable_hlt() during DMA on early
* revisions.
*
* *** This driver is strictly experimental ***
*
* (c) Copyright Red Hat Inc 2002
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2, or (at your option) any
* later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* Documentation:
* Not publically available.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#define DRV_NAME "pata_cs5520"
#define DRV_VERSION "0.6.2"
struct pio_clocks
{
int address;
int assert;
int recovery;
};
static const struct pio_clocks cs5520_pio_clocks[]={
{3, 6, 11},
{2, 5, 6},
{1, 4, 3},
{1, 3, 2},
{1, 2, 1}
};
/**
* cs5520_set_timings - program PIO timings
* @ap: ATA port
* @adev: ATA device
*
* Program the PIO mode timings for the controller according to the pio
* clocking table.
*/
static void cs5520_set_timings(struct ata_port *ap, struct ata_device *adev, int pio)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int slave = adev->devno;
pio -= XFER_PIO_0;
/* Channel command timing */
pci_write_config_byte(pdev, 0x62 + ap->port_no,
(cs5520_pio_clocks[pio].recovery << 4) |
(cs5520_pio_clocks[pio].assert));
/* FIXME: should these use address ? */
/* Read command timing */
pci_write_config_byte(pdev, 0x64 + 4*ap->port_no + slave,
(cs5520_pio_clocks[pio].recovery << 4) |
(cs5520_pio_clocks[pio].assert));
/* Write command timing */
pci_write_config_byte(pdev, 0x66 + 4*ap->port_no + slave,
(cs5520_pio_clocks[pio].recovery << 4) |
(cs5520_pio_clocks[pio].assert));
}
/**
* cs5520_enable_dma - turn on DMA bits
*
* Turn on the DMA bits for this disk. Needed because the BIOS probably
* has not done the work for us. Belongs in the core SATA code.
*/
static void cs5520_enable_dma(struct ata_port *ap, struct ata_device *adev)
{
/* Set the DMA enable/disable flag */
u8 reg = inb(ap->ioaddr.bmdma_addr + 0x02);
reg |= 1<<(adev->devno + 5);
outb(reg, ap->ioaddr.bmdma_addr + 0x02);
}
/**
* cs5520_set_dmamode - program DMA timings
* @ap: ATA port
* @adev: ATA device
*
* Program the DMA mode timings for the controller according to the pio
* clocking table. Note that this device sets the DMA timings to PIO
* mode values. This may seem bizarre but the 5520 architecture talks
* PIO mode to the disk and DMA mode to the controller so the underlying
* transfers are PIO timed.
*/
static void cs5520_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
static const int dma_xlate[3] = { XFER_PIO_0, XFER_PIO_3, XFER_PIO_4 };
cs5520_set_timings(ap, adev, dma_xlate[adev->dma_mode]);
cs5520_enable_dma(ap, adev);
}
/**
* cs5520_set_piomode - program PIO timings
* @ap: ATA port
* @adev: ATA device
*
* Program the PIO mode timings for the controller according to the pio
* clocking table. We know pio_mode will equal dma_mode because of the
* CS5520 architecture. At least once we turned DMA on and wrote a
* mode setter.
*/
static void cs5520_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
cs5520_set_timings(ap, adev, adev->pio_mode);
}
static int cs5520_pre_reset(struct ata_port *ap)
{
ap->cbl = ATA_CBL_PATA40;
return ata_std_prereset(ap);
}
static void cs5520_error_handler(struct ata_port *ap)
{
return ata_bmdma_drive_eh(ap, cs5520_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
}
static struct scsi_host_template cs5520_sht = {
.module = THIS_MODULE,
.name = DRV_NAME,
.ioctl = ata_scsi_ioctl,
.queuecommand = ata_scsi_queuecmd,
.can_queue = ATA_DEF_QUEUE,
.this_id = ATA_SHT_THIS_ID,
.sg_tablesize = LIBATA_MAX_PRD,
.max_sectors = ATA_MAX_SECTORS,
.cmd_per_lun = ATA_SHT_CMD_PER_LUN,
.emulated = ATA_SHT_EMULATED,
.use_clustering = ATA_SHT_USE_CLUSTERING,
.proc_name = DRV_NAME,
.dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config,
.bios_param = ata_std_bios_param,
};
static struct ata_port_operations cs5520_port_ops = {
.port_disable = ata_port_disable,
.set_piomode = cs5520_set_piomode,
.set_dmamode = cs5520_set_dmamode,
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
.check_status = ata_check_status,
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
.freeze = ata_bmdma_freeze,
.thaw = ata_bmdma_thaw,
.error_handler = cs5520_error_handler,
.post_internal_cmd = ata_bmdma_post_internal_cmd,
.bmdma_setup = ata_bmdma_setup,
.bmdma_start = ata_bmdma_start,
.bmdma_stop = ata_bmdma_stop,
.bmdma_status = ata_bmdma_status,
.qc_prep = ata_qc_prep,
.qc_issue = ata_qc_issue_prot,
.data_xfer = ata_pio_data_xfer,
.eng_timeout = ata_eng_timeout,
.irq_handler = ata_interrupt,
.irq_clear = ata_bmdma_irq_clear,
.port_start = ata_port_start,
.port_stop = ata_port_stop,
.host_stop = ata_host_stop,
};
static int __devinit cs5520_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
u8 pcicfg;
static struct ata_probe_ent probe[2];
int ports = 0;
/* IDE port enable bits */
pci_read_config_byte(dev, 0x60, &pcicfg);
/* Check if the ATA ports are enabled */
if ((pcicfg & 3) == 0)
return -ENODEV;
if ((pcicfg & 0x40) == 0) {
printk(KERN_WARNING DRV_NAME ": DMA mode disabled. Enabling.\n");
pci_write_config_byte(dev, 0x60, pcicfg | 0x40);
}
/* Perform set up for DMA */
if (pci_enable_device_bars(dev, 1<<2)) {
printk(KERN_ERR DRV_NAME ": unable to configure BAR2.\n");
return -ENODEV;
}
pci_set_master(dev);
if (pci_set_dma_mask(dev, DMA_32BIT_MASK)) {
printk(KERN_ERR DRV_NAME ": unable to configure DMA mask.\n");
return -ENODEV;
}
if (pci_set_consistent_dma_mask(dev, DMA_32BIT_MASK)) {
printk(KERN_ERR DRV_NAME ": unable to configure consistent DMA mask.\n");
return -ENODEV;
}
/* We have to do our own plumbing as the PCI setup for this
chipset is non-standard so we can't punt to the libata code */
INIT_LIST_HEAD(&probe[0].node);
probe[0].dev = pci_dev_to_dev(dev);
probe[0].port_ops = &cs5520_port_ops;
probe[0].sht = &cs5520_sht;
probe[0].pio_mask = 0x1F;
probe[0].mwdma_mask = id->driver_data;
probe[0].irq = 14;
probe[0].irq_flags = 0;
probe[0].port_flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST;
probe[0].n_ports = 1;
probe[0].port[0].cmd_addr = 0x1F0;
probe[0].port[0].ctl_addr = 0x3F6;
probe[0].port[0].altstatus_addr = 0x3F6;
probe[0].port[0].bmdma_addr = pci_resource_start(dev, 2);
/* The secondary lurks at different addresses but is otherwise
the same beastie */
probe[1] = probe[0];
INIT_LIST_HEAD(&probe[1].node);
probe[1].irq = 15;
probe[1].port[0].cmd_addr = 0x170;
probe[1].port[0].ctl_addr = 0x376;
probe[1].port[0].altstatus_addr = 0x376;
probe[1].port[0].bmdma_addr = pci_resource_start(dev, 2) + 8;
/* Let libata fill in the port details */
ata_std_ports(&probe[0].port[0]);
ata_std_ports(&probe[1].port[0]);
/* Now add the ports that are active */
if (pcicfg & 1)
ports += ata_device_add(&probe[0]);
if (pcicfg & 2)
ports += ata_device_add(&probe[1]);
if (ports)
return 0;
return -ENODEV;
}
/**
* cs5520_remove_one - device unload
* @pdev: PCI device being removed
*
* Handle an unplug/unload event for a PCI device. Unload the
* PCI driver but do not use the default handler as we manage
* resources ourself and *MUST NOT* disable the device as it has
* other functions.
*/
static void __devexit cs5520_remove_one(struct pci_dev *pdev)
{
struct device *dev = pci_dev_to_dev(pdev);
struct ata_host *host = dev_get_drvdata(dev);
ata_host_remove(host);
dev_set_drvdata(dev, NULL);
}
/* For now keep DMA off. We can set it for all but A rev CS5510 once the
core ATA code can handle it */
static struct pci_device_id pata_cs5520[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5510), },
{ PCI_DEVICE(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5520), },
{ 0, },
};
static struct pci_driver cs5520_pci_driver = {
.name = DRV_NAME,
.id_table = pata_cs5520,
.probe = cs5520_init_one,
.remove = cs5520_remove_one
};
static int __init cs5520_init(void)
{
return pci_register_driver(&cs5520_pci_driver);
}
static void __exit cs5520_exit(void)
{
pci_unregister_driver(&cs5520_pci_driver);
}
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for Cyrix CS5510/5520");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, pata_cs5520);
MODULE_VERSION(DRV_VERSION);
module_init(cs5520_init);
module_exit(cs5520_exit);

387
drivers/ata/pata_cs5530.c Normal file
View File

@ -0,0 +1,387 @@
/*
* pata-cs5530.c - CS5530 PATA for new ATA layer
* (C) 2005 Red Hat Inc
* Alan Cox <alan@redhat.com>
*
* based upon cs5530.c by Mark Lord.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Loosely based on the piix & svwks drivers.
*
* Documentation:
* Available from AMD web site.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#include <linux/dmi.h>
#define DRV_NAME "pata_cs5530"
#define DRV_VERSION "0.6"
/**
* cs5530_set_piomode - PIO setup
* @ap: ATA interface
* @adev: device on the interface
*
* Set our PIO requirements. This is fairly simple on the CS5530
* chips.
*/
static void cs5530_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
static const unsigned int cs5530_pio_timings[2][5] = {
{0x00009172, 0x00012171, 0x00020080, 0x00032010, 0x00040010},
{0xd1329172, 0x71212171, 0x30200080, 0x20102010, 0x00100010}
};
unsigned long base = ( ap->ioaddr.bmdma_addr & ~0x0F) + 0x20 + 0x10 * ap->port_no;
u32 tuning;
int format;
/* Find out which table to use */
tuning = inl(base + 0x04);
format = (tuning & 0x80000000UL) ? 1 : 0;
/* Now load the right timing register */
if (adev->devno)
base += 0x08;
outl(cs5530_pio_timings[format][adev->pio_mode - XFER_PIO_0], base);
}
/**
* cs5530_set_dmamode - DMA timing setup
* @ap: ATA interface
* @adev: Device being configured
*
* We cannot mix MWDMA and UDMA without reloading timings each switch
* master to slave. We track the last DMA setup in order to minimise
* reloads.
*/
static void cs5530_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
unsigned long base = ( ap->ioaddr.bmdma_addr & ~0x0F) + 0x20 + 0x10 * ap->port_no;
u32 tuning, timing = 0;
u8 reg;
/* Find out which table to use */
tuning = inl(base + 0x04);
switch(adev->dma_mode) {
case XFER_UDMA_0:
timing = 0x00921250;break;
case XFER_UDMA_1:
timing = 0x00911140;break;
case XFER_UDMA_2:
timing = 0x00911030;break;
case XFER_MW_DMA_0:
timing = 0x00077771;break;
case XFER_MW_DMA_1:
timing = 0x00012121;break;
case XFER_MW_DMA_2:
timing = 0x00002020;break;
default:
BUG();
}
/* Merge in the PIO format bit */
timing |= (tuning & 0x80000000UL);
if (adev->devno == 0) /* Master */
outl(timing, base + 0x04);
else {
if (timing & 0x00100000)
tuning |= 0x00100000; /* UDMA for both */
else
tuning &= ~0x00100000; /* MWDMA for both */
outl(tuning, base + 0x04);
outl(timing, base + 0x0C);
}
/* Set the DMA capable bit in the BMDMA area */
reg = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
reg |= (1 << (5 + adev->devno));
outb(reg, ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
/* Remember the last DMA setup we did */
ap->private_data = adev;
}
/**
* cs5530_qc_issue_prot - command issue
* @qc: command pending
*
* Called when the libata layer is about to issue a command. We wrap
* this interface so that we can load the correct ATA timings if
* neccessary. Specifically we have a problem that there is only
* one MWDMA/UDMA bit.
*/
static unsigned int cs5530_qc_issue_prot(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct ata_device *adev = qc->dev;
struct ata_device *prev = ap->private_data;
/* See if the DMA settings could be wrong */
if (adev->dma_mode != 0 && adev != prev && prev != NULL) {
/* Maybe, but do the channels match MWDMA/UDMA ? */
if ((adev->dma_mode >= XFER_UDMA_0 && prev->dma_mode < XFER_UDMA_0) ||
(adev->dma_mode < XFER_UDMA_0 && prev->dma_mode >= XFER_UDMA_0))
/* Switch the mode bits */
cs5530_set_dmamode(ap, adev);
}
return ata_qc_issue_prot(qc);
}
static int cs5530_pre_reset(struct ata_port *ap)
{
ap->cbl = ATA_CBL_PATA40;
return ata_std_prereset(ap);
}
static void cs5530_error_handler(struct ata_port *ap)
{
return ata_bmdma_drive_eh(ap, cs5530_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
}
static struct scsi_host_template cs5530_sht = {
.module = THIS_MODULE,
.name = DRV_NAME,
.ioctl = ata_scsi_ioctl,
.queuecommand = ata_scsi_queuecmd,
.can_queue = ATA_DEF_QUEUE,
.this_id = ATA_SHT_THIS_ID,
.sg_tablesize = LIBATA_MAX_PRD,
.max_sectors = ATA_MAX_SECTORS,
.cmd_per_lun = ATA_SHT_CMD_PER_LUN,
.emulated = ATA_SHT_EMULATED,
.use_clustering = ATA_SHT_USE_CLUSTERING,
.proc_name = DRV_NAME,
.dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config,
.bios_param = ata_std_bios_param,
};
static struct ata_port_operations cs5530_port_ops = {
.port_disable = ata_port_disable,
.set_piomode = cs5530_set_piomode,
.set_dmamode = cs5530_set_dmamode,
.mode_filter = ata_pci_default_filter,
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
.check_status = ata_check_status,
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
.bmdma_setup = ata_bmdma_setup,
.bmdma_start = ata_bmdma_start,
.bmdma_stop = ata_bmdma_stop,
.bmdma_status = ata_bmdma_status,
.freeze = ata_bmdma_freeze,
.thaw = ata_bmdma_thaw,
.error_handler = cs5530_error_handler,
.post_internal_cmd = ata_bmdma_post_internal_cmd,
.qc_prep = ata_qc_prep,
.qc_issue = cs5530_qc_issue_prot,
.eng_timeout = ata_eng_timeout,
.data_xfer = ata_pio_data_xfer,
.irq_handler = ata_interrupt,
.irq_clear = ata_bmdma_irq_clear,
.port_start = ata_port_start,
.port_stop = ata_port_stop,
.host_stop = ata_host_stop
};
static struct dmi_system_id palmax_dmi_table[] = {
{
.ident = "Palmax PD1100",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Cyrix"),
DMI_MATCH(DMI_PRODUCT_NAME, "Caddis"),
},
},
{ }
};
static int cs5530_is_palmax(void)
{
if (dmi_check_system(palmax_dmi_table)) {
printk(KERN_INFO "Palmax PD1100: Disabling DMA on docking port.\n");
return 1;
}
return 0;
}
/**
* cs5530_init_one - Initialise a CS5530
* @dev: PCI device
* @id: Entry in match table
*
* Install a driver for the newly found CS5530 companion chip. Most of
* this is just housekeeping. We have to set the chip up correctly and
* turn off various bits of emulation magic.
*/
static int cs5530_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
int compiler_warning_pointless_fix;
struct pci_dev *master_0 = NULL, *cs5530_0 = NULL;
static struct ata_port_info info = {
.sht = &cs5530_sht,
.flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST,
.pio_mask = 0x1f,
.mwdma_mask = 0x07,
.udma_mask = 0x07,
.port_ops = &cs5530_port_ops
};
/* The docking connector doesn't do UDMA, and it seems not MWDMA */
static struct ata_port_info info_palmax_secondary = {
.sht = &cs5530_sht,
.flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST,
.pio_mask = 0x1f,
.port_ops = &cs5530_port_ops
};
static struct ata_port_info *port_info[2] = { &info, &info };
dev = NULL;
while ((dev = pci_get_device(PCI_VENDOR_ID_CYRIX, PCI_ANY_ID, dev)) != NULL) {
switch (dev->device) {
case PCI_DEVICE_ID_CYRIX_PCI_MASTER:
master_0 = pci_dev_get(dev);
break;
case PCI_DEVICE_ID_CYRIX_5530_LEGACY:
cs5530_0 = pci_dev_get(dev);
break;
}
}
if (!master_0) {
printk(KERN_ERR DRV_NAME ": unable to locate PCI MASTER function\n");
goto fail_put;
}
if (!cs5530_0) {
printk(KERN_ERR DRV_NAME ": unable to locate CS5530 LEGACY function\n");
goto fail_put;
}
pci_set_master(cs5530_0);
compiler_warning_pointless_fix = pci_set_mwi(cs5530_0);
/*
* Set PCI CacheLineSize to 16-bytes:
* --> Write 0x04 into 8-bit PCI CACHELINESIZE reg of function 0 of the cs5530
*
* Note: This value is constant because the 5530 is only a Geode companion
*/
pci_write_config_byte(cs5530_0, PCI_CACHE_LINE_SIZE, 0x04);
/*
* Disable trapping of UDMA register accesses (Win98 hack):
* --> Write 0x5006 into 16-bit reg at offset 0xd0 of function 0 of the cs5530
*/
pci_write_config_word(cs5530_0, 0xd0, 0x5006);
/*
* Bit-1 at 0x40 enables MemoryWriteAndInvalidate on internal X-bus:
* The other settings are what is necessary to get the register
* into a sane state for IDE DMA operation.
*/
pci_write_config_byte(master_0, 0x40, 0x1e);
/*
* Set max PCI burst size (16-bytes seems to work best):
* 16bytes: set bit-1 at 0x41 (reg value of 0x16)
* all others: clear bit-1 at 0x41, and do:
* 128bytes: OR 0x00 at 0x41
* 256bytes: OR 0x04 at 0x41
* 512bytes: OR 0x08 at 0x41
* 1024bytes: OR 0x0c at 0x41
*/
pci_write_config_byte(master_0, 0x41, 0x14);
/*
* These settings are necessary to get the chip
* into a sane state for IDE DMA operation.
*/
pci_write_config_byte(master_0, 0x42, 0x00);
pci_write_config_byte(master_0, 0x43, 0xc1);
pci_dev_put(master_0);
pci_dev_put(cs5530_0);
if (cs5530_is_palmax())
port_info[1] = &info_palmax_secondary;
/* Now kick off ATA set up */
return ata_pci_init_one(dev, port_info, 2);
fail_put:
if (master_0)
pci_dev_put(master_0);
if (cs5530_0)
pci_dev_put(cs5530_0);
return -ENODEV;
}
static struct pci_device_id cs5530[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5530_IDE), },
{ 0, },
};
static struct pci_driver cs5530_pci_driver = {
.name = DRV_NAME,
.id_table = cs5530,
.probe = cs5530_init_one,
.remove = ata_pci_remove_one
};
static int __init cs5530_init(void)
{
return pci_register_driver(&cs5530_pci_driver);
}
static void __exit cs5530_exit(void)
{
pci_unregister_driver(&cs5530_pci_driver);
}
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for the Cyrix/NS/AMD 5530");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, cs5530);
MODULE_VERSION(DRV_VERSION);
module_init(cs5530_init);
module_exit(cs5530_exit);

291
drivers/ata/pata_cs5535.c Normal file
View File

@ -0,0 +1,291 @@
/*
* pata-cs5535.c - CS5535 PATA for new ATA layer
* (C) 2005-2006 Red Hat Inc
* Alan Cox <alan@redhat.com>
*
* based upon cs5535.c from AMD <Jens.Altmann@amd.com> as cleaned up and
* made readable and Linux style by Wolfgang Zuleger <wolfgang.zuleger@gmx.de
* and Alexander Kiausch <alex.kiausch@t-online.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Loosely based on the piix & svwks drivers.
*
* Documentation:
* Available from AMD web site.
* TODO
* Review errata to see if serializing is neccessary
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#include <asm/msr.h>
#define DRV_NAME "cs5535"
#define DRV_VERSION "0.2.10"
/*
* The Geode (Aka Athlon GX now) uses an internal MSR based
* bus system for control. Demented but there you go.
*/
#define MSR_ATAC_BASE 0x51300000
#define ATAC_GLD_MSR_CAP (MSR_ATAC_BASE+0)
#define ATAC_GLD_MSR_CONFIG (MSR_ATAC_BASE+0x01)
#define ATAC_GLD_MSR_SMI (MSR_ATAC_BASE+0x02)
#define ATAC_GLD_MSR_ERROR (MSR_ATAC_BASE+0x03)
#define ATAC_GLD_MSR_PM (MSR_ATAC_BASE+0x04)
#define ATAC_GLD_MSR_DIAG (MSR_ATAC_BASE+0x05)
#define ATAC_IO_BAR (MSR_ATAC_BASE+0x08)
#define ATAC_RESET (MSR_ATAC_BASE+0x10)
#define ATAC_CH0D0_PIO (MSR_ATAC_BASE+0x20)
#define ATAC_CH0D0_DMA (MSR_ATAC_BASE+0x21)
#define ATAC_CH0D1_PIO (MSR_ATAC_BASE+0x22)
#define ATAC_CH0D1_DMA (MSR_ATAC_BASE+0x23)
#define ATAC_PCI_ABRTERR (MSR_ATAC_BASE+0x24)
#define ATAC_BM0_CMD_PRIM 0x00
#define ATAC_BM0_STS_PRIM 0x02
#define ATAC_BM0_PRD 0x04
#define CS5535_CABLE_DETECT 0x48
#define CS5535_BAD_PIO(timings) ( (timings&~0x80000000UL)==0x00009172 )
/**
* cs5535_pre_reset - detect cable type
* @ap: Port to detect on
*
* Perform cable detection for ATA66 capable cable. Return a libata
* cable type.
*/
static int cs5535_pre_reset(struct ata_port *ap)
{
u8 cable;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
pci_read_config_byte(pdev, CS5535_CABLE_DETECT, &cable);
if (cable & 1)
ap->cbl = ATA_CBL_PATA80;
else
ap->cbl = ATA_CBL_PATA40;
return ata_std_prereset(ap);
}
/**
* cs5535_error_handler - reset/probe
* @ap: Port to reset
*
* Reset and configure a port
*/
static void cs5535_error_handler(struct ata_port *ap)
{
ata_bmdma_drive_eh(ap, cs5535_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
}
/**
* cs5535_set_piomode - PIO setup
* @ap: ATA interface
* @adev: device on the interface
*
* Set our PIO requirements. The CS5535 is pretty clean about all this
*/
static void cs5535_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
static const u16 pio_timings[5] = {
0xF7F4, 0x53F3, 0x13F1, 0x5131, 0x1131
};
static const u16 pio_cmd_timings[5] = {
0xF7F4, 0x53F3, 0x13F1, 0x5131, 0x1131
};
u32 reg, dummy;
struct ata_device *pair = ata_dev_pair(adev);
int mode = adev->pio_mode - XFER_PIO_0;
int cmdmode = mode;
/* Command timing has to be for the lowest of the pair of devices */
if (pair) {
int pairmode = pair->pio_mode - XFER_PIO_0;
cmdmode = min(mode, pairmode);
/* Write the other drive timing register if it changed */
if (cmdmode < pairmode)
wrmsr(ATAC_CH0D0_PIO + 2 * pair->devno,
pio_cmd_timings[cmdmode] << 16 | pio_timings[pairmode], 0);
}
/* Write the drive timing register */
wrmsr(ATAC_CH0D0_PIO + 2 * adev->devno,
pio_cmd_timings[cmdmode] << 16 | pio_timings[mode], 0);
/* Set the PIO "format 1" bit in the DMA timing register */
rdmsr(ATAC_CH0D0_DMA + 2 * adev->devno, reg, dummy);
wrmsr(ATAC_CH0D0_DMA + 2 * adev->devno, reg | 0x80000000UL, 0);
}
/**
* cs5535_set_dmamode - DMA timing setup
* @ap: ATA interface
* @adev: Device being configured
*
*/
static void cs5535_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
static const u32 udma_timings[5] = {
0x7F7436A1, 0x7F733481, 0x7F723261, 0x7F713161, 0x7F703061
};
static const u32 mwdma_timings[3] = {
0x7F0FFFF3, 0x7F035352, 0x7F024241
};
u32 reg, dummy;
int mode = adev->dma_mode;
rdmsr(ATAC_CH0D0_DMA + 2 * adev->devno, reg, dummy);
reg &= 0x80000000UL;
if (mode >= XFER_UDMA_0)
reg |= udma_timings[mode - XFER_UDMA_0];
else
reg |= mwdma_timings[mode - XFER_MW_DMA_0];
wrmsr(ATAC_CH0D0_DMA + 2 * adev->devno, reg, 0);
}
static struct scsi_host_template cs5535_sht = {
.module = THIS_MODULE,
.name = DRV_NAME,
.ioctl = ata_scsi_ioctl,
.queuecommand = ata_scsi_queuecmd,
.can_queue = ATA_DEF_QUEUE,
.this_id = ATA_SHT_THIS_ID,
.sg_tablesize = LIBATA_MAX_PRD,
.max_sectors = ATA_MAX_SECTORS,
.cmd_per_lun = ATA_SHT_CMD_PER_LUN,
.emulated = ATA_SHT_EMULATED,
.use_clustering = ATA_SHT_USE_CLUSTERING,
.proc_name = DRV_NAME,
.dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config,
.bios_param = ata_std_bios_param,
};
static struct ata_port_operations cs5535_port_ops = {
.port_disable = ata_port_disable,
.set_piomode = cs5535_set_piomode,
.set_dmamode = cs5535_set_dmamode,
.mode_filter = ata_pci_default_filter,
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
.check_status = ata_check_status,
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
.freeze = ata_bmdma_freeze,
.thaw = ata_bmdma_thaw,
.error_handler = cs5535_error_handler,
.post_internal_cmd = ata_bmdma_post_internal_cmd,
.bmdma_setup = ata_bmdma_setup,
.bmdma_start = ata_bmdma_start,
.bmdma_stop = ata_bmdma_stop,
.bmdma_status = ata_bmdma_status,
.qc_prep = ata_qc_prep,
.qc_issue = ata_qc_issue_prot,
.eng_timeout = ata_eng_timeout,
.data_xfer = ata_pio_data_xfer,
.irq_handler = ata_interrupt,
.irq_clear = ata_bmdma_irq_clear,
.port_start = ata_port_start,
.port_stop = ata_port_stop,
.host_stop = ata_host_stop
};
/**
* cs5535_init_one - Initialise a CS5530
* @dev: PCI device
* @id: Entry in match table
*
* Install a driver for the newly found CS5530 companion chip. Most of
* this is just housekeeping. We have to set the chip up correctly and
* turn off various bits of emulation magic.
*/
static int cs5535_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
static struct ata_port_info info = {
.sht = &cs5535_sht,
.flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST,
.pio_mask = 0x1f,
.mwdma_mask = 0x07,
.udma_mask = 0x1f,
.port_ops = &cs5535_port_ops
};
struct ata_port_info *ports[1] = { &info };
u32 timings, dummy;
/* Check the BIOS set the initial timing clock. If not set the
timings for PIO0 */
rdmsr(ATAC_CH0D0_PIO, timings, dummy);
if (CS5535_BAD_PIO(timings))
wrmsr(ATAC_CH0D0_PIO, 0xF7F4F7F4UL, 0);
rdmsr(ATAC_CH0D1_PIO, timings, dummy);
if (CS5535_BAD_PIO(timings))
wrmsr(ATAC_CH0D1_PIO, 0xF7F4F7F4UL, 0);
return ata_pci_init_one(dev, ports, 1);
}
static struct pci_device_id cs5535[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_NS, 0x002D), },
{ 0, },
};
static struct pci_driver cs5535_pci_driver = {
.name = DRV_NAME,
.id_table = cs5535,
.probe = cs5535_init_one,
.remove = ata_pci_remove_one
};
static int __init cs5535_init(void)
{
return pci_register_driver(&cs5535_pci_driver);
}
static void __exit cs5535_exit(void)
{
pci_unregister_driver(&cs5535_pci_driver);
}
MODULE_AUTHOR("Alan Cox, Jens Altmann, Wolfgan Zuleger, Alexander Kiausch");
MODULE_DESCRIPTION("low-level driver for the NS/AMD 5530");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, cs5535);
MODULE_VERSION(DRV_VERSION);
module_init(cs5535_init);
module_exit(cs5535_exit);

227
drivers/ata/pata_cypress.c Normal file
View File

@ -0,0 +1,227 @@
/*
* pata_cypress.c - Cypress PATA for new ATA layer
* (C) 2006 Red Hat Inc
* Alan Cox <alan@redhat.com>
*
* Based heavily on
* linux/drivers/ide/pci/cy82c693.c Version 0.40 Sep. 10, 2002
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#define DRV_NAME "pata_cypress"
#define DRV_VERSION "0.1.2"
/* here are the offset definitions for the registers */
enum {
CY82_IDE_CMDREG = 0x04,
CY82_IDE_ADDRSETUP = 0x48,
CY82_IDE_MASTER_IOR = 0x4C,
CY82_IDE_MASTER_IOW = 0x4D,
CY82_IDE_SLAVE_IOR = 0x4E,
CY82_IDE_SLAVE_IOW = 0x4F,
CY82_IDE_MASTER_8BIT = 0x50,
CY82_IDE_SLAVE_8BIT = 0x51,
CY82_INDEX_PORT = 0x22,
CY82_DATA_PORT = 0x23,
CY82_INDEX_CTRLREG1 = 0x01,
CY82_INDEX_CHANNEL0 = 0x30,
CY82_INDEX_CHANNEL1 = 0x31,
CY82_INDEX_TIMEOUT = 0x32
};
static int cy82c693_pre_reset(struct ata_port *ap)
{
ap->cbl = ATA_CBL_PATA40;
return ata_std_prereset(ap);
}
static void cy82c693_error_handler(struct ata_port *ap)
{
ata_bmdma_drive_eh(ap, cy82c693_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
}
/**
* cy82c693_set_piomode - set initial PIO mode data
* @ap: ATA interface
* @adev: ATA device
*
* Called to do the PIO mode setup.
*/
static void cy82c693_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
struct ata_timing t;
const unsigned long T = 1000000 / 33;
short time_16, time_8;
u32 addr;
if (ata_timing_compute(adev, adev->pio_mode, &t, T, 1) < 0) {
printk(KERN_ERR DRV_NAME ": mome computation failed.\n");
return;
}
time_16 = FIT(t.recover, 0, 15) | (FIT(t.active, 0, 15) << 4);
time_8 = FIT(t.act8b, 0, 15) | (FIT(t.rec8b, 0, 15) << 4);
if (adev->devno == 0) {
pci_read_config_dword(pdev, CY82_IDE_ADDRSETUP, &addr);
addr &= ~0x0F; /* Mask bits */
addr |= FIT(t.setup, 0, 15);
pci_write_config_dword(pdev, CY82_IDE_ADDRSETUP, addr);
pci_write_config_byte(pdev, CY82_IDE_MASTER_IOR, time_16);
pci_write_config_byte(pdev, CY82_IDE_MASTER_IOW, time_16);
pci_write_config_byte(pdev, CY82_IDE_MASTER_8BIT, time_8);
} else {
pci_read_config_dword(pdev, CY82_IDE_ADDRSETUP, &addr);
addr &= ~0xF0; /* Mask bits */
addr |= (FIT(t.setup, 0, 15) << 4);
pci_write_config_dword(pdev, CY82_IDE_ADDRSETUP, addr);
pci_write_config_byte(pdev, CY82_IDE_SLAVE_IOR, time_16);
pci_write_config_byte(pdev, CY82_IDE_SLAVE_IOW, time_16);
pci_write_config_byte(pdev, CY82_IDE_SLAVE_8BIT, time_8);
}
}
/**
* cy82c693_set_dmamode - set initial DMA mode data
* @ap: ATA interface
* @adev: ATA device
*
* Called to do the DMA mode setup.
*/
static void cy82c693_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
int reg = CY82_INDEX_CHANNEL0 + ap->port_no;
/* Be afraid, be very afraid. Magic registers in low I/O space */
outb(reg, 0x22);
outb(adev->dma_mode - XFER_MW_DMA_0, 0x23);
/* 0x50 gives the best behaviour on the Alpha's using this chip */
outb(CY82_INDEX_TIMEOUT, 0x22);
outb(0x50, 0x23);
}
static struct scsi_host_template cy82c693_sht = {
.module = THIS_MODULE,
.name = DRV_NAME,
.ioctl = ata_scsi_ioctl,
.queuecommand = ata_scsi_queuecmd,
.can_queue = ATA_DEF_QUEUE,
.this_id = ATA_SHT_THIS_ID,
.sg_tablesize = LIBATA_MAX_PRD,
.max_sectors = ATA_MAX_SECTORS,
.cmd_per_lun = ATA_SHT_CMD_PER_LUN,
.emulated = ATA_SHT_EMULATED,
.use_clustering = ATA_SHT_USE_CLUSTERING,
.proc_name = DRV_NAME,
.dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config,
.bios_param = ata_std_bios_param,
};
static struct ata_port_operations cy82c693_port_ops = {
.port_disable = ata_port_disable,
.set_piomode = cy82c693_set_piomode,
.set_dmamode = cy82c693_set_dmamode,
.mode_filter = ata_pci_default_filter,
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
.check_status = ata_check_status,
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
.freeze = ata_bmdma_freeze,
.thaw = ata_bmdma_thaw,
.error_handler = cy82c693_error_handler,
.post_internal_cmd = ata_bmdma_post_internal_cmd,
.bmdma_setup = ata_bmdma_setup,
.bmdma_start = ata_bmdma_start,
.bmdma_stop = ata_bmdma_stop,
.bmdma_status = ata_bmdma_status,
.qc_prep = ata_qc_prep,
.qc_issue = ata_qc_issue_prot,
.eng_timeout = ata_eng_timeout,
.data_xfer = ata_pio_data_xfer,
.irq_handler = ata_interrupt,
.irq_clear = ata_bmdma_irq_clear,
.port_start = ata_port_start,
.port_stop = ata_port_stop,
.host_stop = ata_host_stop
};
static int cy82c693_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
static struct ata_port_info info = {
.sht = &cy82c693_sht,
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
.pio_mask = 0x1f,
.mwdma_mask = 0x07,
.port_ops = &cy82c693_port_ops
};
static struct ata_port_info *port_info[1] = { &info };
/* Devfn 1 is the ATA primary. The secondary is magic and on devfn2. For the
moment we don't handle the secondary. FIXME */
if (PCI_FUNC(pdev->devfn) != 1)
return -ENODEV;
return ata_pci_init_one(pdev, port_info, 1);
}
static struct pci_device_id cy82c693[] = {
{ PCI_VENDOR_ID_CONTAQ, PCI_DEVICE_ID_CONTAQ_82C693, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{ 0, },
};
static struct pci_driver cy82c693_pci_driver = {
.name = DRV_NAME,
.id_table = cy82c693,
.probe = cy82c693_init_one,
.remove = ata_pci_remove_one
};
static int __init cy82c693_init(void)
{
return pci_register_driver(&cy82c693_pci_driver);
}
static void __exit cy82c693_exit(void)
{
pci_unregister_driver(&cy82c693_pci_driver);
}
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for the CY82C693 PATA controller");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, cy82c693);
MODULE_VERSION(DRV_VERSION);
module_init(cy82c693_init);
module_exit(cy82c693_exit);

342
drivers/ata/pata_efar.c Normal file
View File

@ -0,0 +1,342 @@
/*
* pata_efar.c - EFAR PIIX clone controller driver
*
* (C) 2005 Red Hat <alan@redhat.com>
*
* Some parts based on ata_piix.c by Jeff Garzik and others.
*
* The EFAR is a PIIX4 clone with UDMA66 support. Unlike the later
* Intel ICH controllers the EFAR widened the UDMA mode register bits
* and doesn't require the funky clock selection.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#include <linux/ata.h>
#define DRV_NAME "pata_efar"
#define DRV_VERSION "0.4.1"
/**
* efar_pre_reset - check for 40/80 pin
* @ap: Port
*
* Perform cable detection for the EFAR ATA interface. This is
* different to the PIIX arrangement
*/
static int efar_pre_reset(struct ata_port *ap)
{
static const struct pci_bits efar_enable_bits[] = {
{ 0x41U, 1U, 0x80UL, 0x80UL }, /* port 0 */
{ 0x43U, 1U, 0x80UL, 0x80UL }, /* port 1 */
};
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u8 tmp;
if (!pci_test_config_bits(pdev, &efar_enable_bits[ap->port_no])) {
ata_port_disable(ap);
printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id);
return 0;
}
pci_read_config_byte(pdev, 0x47, &tmp);
if (tmp & (2 >> ap->port_no))
ap->cbl = ATA_CBL_PATA40;
else
ap->cbl = ATA_CBL_PATA80;
return ata_std_prereset(ap);
}
/**
* efar_probe_reset - Probe specified port on PATA host controller
* @ap: Port to probe
*
* LOCKING:
* None (inherited from caller).
*/
static void efar_error_handler(struct ata_port *ap)
{
ata_bmdma_drive_eh(ap, efar_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
}
/**
* efar_set_piomode - Initialize host controller PATA PIO timings
* @ap: Port whose timings we are configuring
* @adev: um
*
* Set PIO mode for device, in host controller PCI config space.
*
* LOCKING:
* None (inherited from caller).
*/
static void efar_set_piomode (struct ata_port *ap, struct ata_device *adev)
{
unsigned int pio = adev->pio_mode - XFER_PIO_0;
struct pci_dev *dev = to_pci_dev(ap->host->dev);
unsigned int idetm_port= ap->port_no ? 0x42 : 0x40;
u16 idetm_data;
int control = 0;
/*
* See Intel Document 298600-004 for the timing programing rules
* for PIIX/ICH. The EFAR is a clone so very similar
*/
static const /* ISP RTC */
u8 timings[][2] = { { 0, 0 },
{ 0, 0 },
{ 1, 0 },
{ 2, 1 },
{ 2, 3 }, };
if (pio > 2)
control |= 1; /* TIME1 enable */
if (ata_pio_need_iordy(adev)) /* PIO 3/4 require IORDY */
control |= 2; /* IE enable */
/* Intel specifies that the PPE functionality is for disk only */
if (adev->class == ATA_DEV_ATA)
control |= 4; /* PPE enable */
pci_read_config_word(dev, idetm_port, &idetm_data);
/* Enable PPE, IE and TIME as appropriate */
if (adev->devno == 0) {
idetm_data &= 0xCCF0;
idetm_data |= control;
idetm_data |= (timings[pio][0] << 12) |
(timings[pio][1] << 8);
} else {
int shift = 4 * ap->port_no;
u8 slave_data;
idetm_data &= 0xCC0F;
idetm_data |= (control << 4);
/* Slave timing in seperate register */
pci_read_config_byte(dev, 0x44, &slave_data);
slave_data &= 0x0F << shift;
slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) << shift;
pci_write_config_byte(dev, 0x44, slave_data);
}
idetm_data |= 0x4000; /* Ensure SITRE is enabled */
pci_write_config_word(dev, idetm_port, idetm_data);
}
/**
* efar_set_dmamode - Initialize host controller PATA DMA timings
* @ap: Port whose timings we are configuring
* @adev: Device to program
*
* Set UDMA/MWDMA mode for device, in host controller PCI config space.
*
* LOCKING:
* None (inherited from caller).
*/
static void efar_set_dmamode (struct ata_port *ap, struct ata_device *adev)
{
struct pci_dev *dev = to_pci_dev(ap->host->dev);
u8 master_port = ap->port_no ? 0x42 : 0x40;
u16 master_data;
u8 speed = adev->dma_mode;
int devid = adev->devno + 2 * ap->port_no;
u8 udma_enable;
static const /* ISP RTC */
u8 timings[][2] = { { 0, 0 },
{ 0, 0 },
{ 1, 0 },
{ 2, 1 },
{ 2, 3 }, };
pci_read_config_word(dev, master_port, &master_data);
pci_read_config_byte(dev, 0x48, &udma_enable);
if (speed >= XFER_UDMA_0) {
unsigned int udma = adev->dma_mode - XFER_UDMA_0;
u16 udma_timing;
udma_enable |= (1 << devid);
/* Load the UDMA mode number */
pci_read_config_word(dev, 0x4A, &udma_timing);
udma_timing &= ~(7 << (4 * devid));
udma_timing |= udma << (4 * devid);
pci_write_config_word(dev, 0x4A, udma_timing);
} else {
/*
* MWDMA is driven by the PIO timings. We must also enable
* IORDY unconditionally along with TIME1. PPE has already
* been set when the PIO timing was set.
*/
unsigned int mwdma = adev->dma_mode - XFER_MW_DMA_0;
unsigned int control;
u8 slave_data;
const unsigned int needed_pio[3] = {
XFER_PIO_0, XFER_PIO_3, XFER_PIO_4
};
int pio = needed_pio[mwdma] - XFER_PIO_0;
control = 3; /* IORDY|TIME1 */
/* If the drive MWDMA is faster than it can do PIO then
we must force PIO into PIO0 */
if (adev->pio_mode < needed_pio[mwdma])
/* Enable DMA timing only */
control |= 8; /* PIO cycles in PIO0 */
if (adev->devno) { /* Slave */
master_data &= 0xFF4F; /* Mask out IORDY|TIME1|DMAONLY */
master_data |= control << 4;
pci_read_config_byte(dev, 0x44, &slave_data);
slave_data &= (0x0F + 0xE1 * ap->port_no);
/* Load the matching timing */
slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) << (ap->port_no ? 4 : 0);
pci_write_config_byte(dev, 0x44, slave_data);
} else { /* Master */
master_data &= 0xCCF4; /* Mask out IORDY|TIME1|DMAONLY
and master timing bits */
master_data |= control;
master_data |=
(timings[pio][0] << 12) |
(timings[pio][1] << 8);
}
udma_enable &= ~(1 << devid);
pci_write_config_word(dev, master_port, master_data);
}
pci_write_config_byte(dev, 0x48, udma_enable);
}
static struct scsi_host_template efar_sht = {
.module = THIS_MODULE,
.name = DRV_NAME,
.ioctl = ata_scsi_ioctl,
.queuecommand = ata_scsi_queuecmd,
.can_queue = ATA_DEF_QUEUE,
.this_id = ATA_SHT_THIS_ID,
.sg_tablesize = LIBATA_MAX_PRD,
.max_sectors = ATA_MAX_SECTORS,
.cmd_per_lun = ATA_SHT_CMD_PER_LUN,
.emulated = ATA_SHT_EMULATED,
.use_clustering = ATA_SHT_USE_CLUSTERING,
.proc_name = DRV_NAME,
.dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config,
.bios_param = ata_std_bios_param,
};
static const struct ata_port_operations efar_ops = {
.port_disable = ata_port_disable,
.set_piomode = efar_set_piomode,
.set_dmamode = efar_set_dmamode,
.mode_filter = ata_pci_default_filter,
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
.check_status = ata_check_status,
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
.freeze = ata_bmdma_freeze,
.thaw = ata_bmdma_thaw,
.error_handler = efar_error_handler,
.post_internal_cmd = ata_bmdma_post_internal_cmd,
.bmdma_setup = ata_bmdma_setup,
.bmdma_start = ata_bmdma_start,
.bmdma_stop = ata_bmdma_stop,
.bmdma_status = ata_bmdma_status,
.qc_prep = ata_qc_prep,
.qc_issue = ata_qc_issue_prot,
.data_xfer = ata_pio_data_xfer,
.eng_timeout = ata_eng_timeout,
.irq_handler = ata_interrupt,
.irq_clear = ata_bmdma_irq_clear,
.port_start = ata_port_start,
.port_stop = ata_port_stop,
.host_stop = ata_host_stop,
};
/**
* efar_init_one - Register EFAR ATA PCI device with kernel services
* @pdev: PCI device to register
* @ent: Entry in efar_pci_tbl matching with @pdev
*
* Called from kernel PCI layer.
*
* LOCKING:
* Inherited from PCI layer (may sleep).
*
* RETURNS:
* Zero on success, or -ERRNO value.
*/
static int efar_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
{
static int printed_version;
static struct ata_port_info info = {
.sht = &efar_sht,
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
.pio_mask = 0x1f, /* pio0-4 */
.mwdma_mask = 0x07, /* mwdma1-2 */
.udma_mask = 0x0f, /* UDMA 66 */
.port_ops = &efar_ops,
};
static struct ata_port_info *port_info[2] = { &info, &info };
if (!printed_version++)
dev_printk(KERN_DEBUG, &pdev->dev,
"version " DRV_VERSION "\n");
return ata_pci_init_one(pdev, port_info, 2);
}
static const struct pci_device_id efar_pci_tbl[] = {
{ 0x1055, 0x9130, PCI_ANY_ID, PCI_ANY_ID, },
{ } /* terminate list */
};
static struct pci_driver efar_pci_driver = {
.name = DRV_NAME,
.id_table = efar_pci_tbl,
.probe = efar_init_one,
.remove = ata_pci_remove_one,
};
static int __init efar_init(void)
{
return pci_register_driver(&efar_pci_driver);
}
static void __exit efar_exit(void)
{
pci_unregister_driver(&efar_pci_driver);
}
module_init(efar_init);
module_exit(efar_exit);
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("SCSI low-level driver for EFAR PIIX clones");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, efar_pci_tbl);
MODULE_VERSION(DRV_VERSION);

478
drivers/ata/pata_hpt366.c Normal file
View File

@ -0,0 +1,478 @@
/*
* Libata driver for the highpoint 366 and 368 UDMA66 ATA controllers.
*
* This driver is heavily based upon:
*
* linux/drivers/ide/pci/hpt366.c Version 0.36 April 25, 2003
*
* Copyright (C) 1999-2003 Andre Hedrick <andre@linux-ide.org>
* Portions Copyright (C) 2001 Sun Microsystems, Inc.
* Portions Copyright (C) 2003 Red Hat Inc
*
*
* TODO
* Maybe PLL mode
* Look into engine reset on timeout errors. Should not be
* required.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#define DRV_NAME "pata_hpt366"
#define DRV_VERSION "0.5"
struct hpt_clock {
u8 xfer_speed;
u32 timing;
};
/* key for bus clock timings
* bit
* 0:3 data_high_time. inactive time of DIOW_/DIOR_ for PIO and MW
* DMA. cycles = value + 1
* 4:8 data_low_time. active time of DIOW_/DIOR_ for PIO and MW
* DMA. cycles = value + 1
* 9:12 cmd_high_time. inactive time of DIOW_/DIOR_ during task file
* register access.
* 13:17 cmd_low_time. active time of DIOW_/DIOR_ during task file
* register access.
* 18:21 udma_cycle_time. clock freq and clock cycles for UDMA xfer.
* during task file register access.
* 22:24 pre_high_time. time to initialize 1st cycle for PIO and MW DMA
* xfer.
* 25:27 cmd_pre_high_time. time to initialize 1st PIO cycle for task
* register access.
* 28 UDMA enable
* 29 DMA enable
* 30 PIO_MST enable. if set, the chip is in bus master mode during
* PIO.
* 31 FIFO enable.
*/
static const struct hpt_clock hpt366_40[] = {
{ XFER_UDMA_4, 0x900fd943 },
{ XFER_UDMA_3, 0x900ad943 },
{ XFER_UDMA_2, 0x900bd943 },
{ XFER_UDMA_1, 0x9008d943 },
{ XFER_UDMA_0, 0x9008d943 },
{ XFER_MW_DMA_2, 0xa008d943 },
{ XFER_MW_DMA_1, 0xa010d955 },
{ XFER_MW_DMA_0, 0xa010d9fc },
{ XFER_PIO_4, 0xc008d963 },
{ XFER_PIO_3, 0xc010d974 },
{ XFER_PIO_2, 0xc010d997 },
{ XFER_PIO_1, 0xc010d9c7 },
{ XFER_PIO_0, 0xc018d9d9 },
{ 0, 0x0120d9d9 }
};
static const struct hpt_clock hpt366_33[] = {
{ XFER_UDMA_4, 0x90c9a731 },
{ XFER_UDMA_3, 0x90cfa731 },
{ XFER_UDMA_2, 0x90caa731 },
{ XFER_UDMA_1, 0x90cba731 },
{ XFER_UDMA_0, 0x90c8a731 },
{ XFER_MW_DMA_2, 0xa0c8a731 },
{ XFER_MW_DMA_1, 0xa0c8a732 }, /* 0xa0c8a733 */
{ XFER_MW_DMA_0, 0xa0c8a797 },
{ XFER_PIO_4, 0xc0c8a731 },
{ XFER_PIO_3, 0xc0c8a742 },
{ XFER_PIO_2, 0xc0d0a753 },
{ XFER_PIO_1, 0xc0d0a7a3 }, /* 0xc0d0a793 */
{ XFER_PIO_0, 0xc0d0a7aa }, /* 0xc0d0a7a7 */
{ 0, 0x0120a7a7 }
};
static const struct hpt_clock hpt366_25[] = {
{ XFER_UDMA_4, 0x90c98521 },
{ XFER_UDMA_3, 0x90cf8521 },
{ XFER_UDMA_2, 0x90cf8521 },
{ XFER_UDMA_1, 0x90cb8521 },
{ XFER_UDMA_0, 0x90cb8521 },
{ XFER_MW_DMA_2, 0xa0ca8521 },
{ XFER_MW_DMA_1, 0xa0ca8532 },
{ XFER_MW_DMA_0, 0xa0ca8575 },
{ XFER_PIO_4, 0xc0ca8521 },
{ XFER_PIO_3, 0xc0ca8532 },
{ XFER_PIO_2, 0xc0ca8542 },
{ XFER_PIO_1, 0xc0d08572 },
{ XFER_PIO_0, 0xc0d08585 },
{ 0, 0x01208585 }
};
static const char *bad_ata33[] = {
"Maxtor 92720U8", "Maxtor 92040U6", "Maxtor 91360U4", "Maxtor 91020U3", "Maxtor 90845U3", "Maxtor 90650U2",
"Maxtor 91360D8", "Maxtor 91190D7", "Maxtor 91020D6", "Maxtor 90845D5", "Maxtor 90680D4", "Maxtor 90510D3", "Maxtor 90340D2",
"Maxtor 91152D8", "Maxtor 91008D7", "Maxtor 90845D6", "Maxtor 90840D6", "Maxtor 90720D5", "Maxtor 90648D5", "Maxtor 90576D4",
"Maxtor 90510D4",
"Maxtor 90432D3", "Maxtor 90288D2", "Maxtor 90256D2",
"Maxtor 91000D8", "Maxtor 90910D8", "Maxtor 90875D7", "Maxtor 90840D7", "Maxtor 90750D6", "Maxtor 90625D5", "Maxtor 90500D4",
"Maxtor 91728D8", "Maxtor 91512D7", "Maxtor 91303D6", "Maxtor 91080D5", "Maxtor 90845D4", "Maxtor 90680D4", "Maxtor 90648D3", "Maxtor 90432D2",
NULL
};
static const char *bad_ata66_4[] = {
"IBM-DTLA-307075",
"IBM-DTLA-307060",
"IBM-DTLA-307045",
"IBM-DTLA-307030",
"IBM-DTLA-307020",
"IBM-DTLA-307015",
"IBM-DTLA-305040",
"IBM-DTLA-305030",
"IBM-DTLA-305020",
"IC35L010AVER07-0",
"IC35L020AVER07-0",
"IC35L030AVER07-0",
"IC35L040AVER07-0",
"IC35L060AVER07-0",
"WDC AC310200R",
NULL
};
static const char *bad_ata66_3[] = {
"WDC AC310200R",
NULL
};
static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr, const char *list[])
{
unsigned char model_num[40];
char *s;
unsigned int len;
int i = 0;
ata_id_string(dev->id, model_num, ATA_ID_PROD_OFS, sizeof(model_num));
s = &model_num[0];
len = strnlen(s, sizeof(model_num));
/* ATAPI specifies that empty space is blank-filled; remove blanks */
while ((len > 0) && (s[len - 1] == ' ')) {
len--;
s[len] = 0;
}
while(list[i] != NULL) {
if (!strncmp(list[i], s, len)) {
printk(KERN_WARNING DRV_NAME ": %s is not supported for %s.\n",
modestr, list[i]);
return 1;
}
i++;
}
return 0;
}
/**
* hpt366_filter - mode selection filter
* @ap: ATA interface
* @adev: ATA device
*
* Block UDMA on devices that cause trouble with this controller.
*/
static unsigned long hpt366_filter(const struct ata_port *ap, struct ata_device *adev, unsigned long mask)
{
if (adev->class == ATA_DEV_ATA) {
if (hpt_dma_blacklisted(adev, "UDMA", bad_ata33))
mask &= ~ATA_MASK_UDMA;
if (hpt_dma_blacklisted(adev, "UDMA3", bad_ata66_3))
mask &= ~(0x07 << ATA_SHIFT_UDMA);
if (hpt_dma_blacklisted(adev, "UDMA4", bad_ata66_4))
mask &= ~(0x0F << ATA_SHIFT_UDMA);
}
return ata_pci_default_filter(ap, adev, mask);
}
/**
* hpt36x_find_mode - reset the hpt36x bus
* @ap: ATA port
* @speed: transfer mode
*
* Return the 32bit register programming information for this channel
* that matches the speed provided.
*/
static u32 hpt36x_find_mode(struct ata_port *ap, int speed)
{
struct hpt_clock *clocks = ap->host->private_data;
while(clocks->xfer_speed) {
if (clocks->xfer_speed == speed)
return clocks->timing;
clocks++;
}
BUG();
return 0xffffffffU; /* silence compiler warning */
}
static int hpt36x_pre_reset(struct ata_port *ap)
{
u8 ata66;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
pci_read_config_byte(pdev, 0x5A, &ata66);
if (ata66 & (1 << ap->port_no))
ap->cbl = ATA_CBL_PATA40;
else
ap->cbl = ATA_CBL_PATA80;
return ata_std_prereset(ap);
}
/**
* hpt36x_error_handler - reset the hpt36x bus
* @ap: ATA port to reset
*
* Perform the reset handling for the 366/368
*/
static void hpt36x_error_handler(struct ata_port *ap)
{
ata_bmdma_drive_eh(ap, hpt36x_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
}
/**
* hpt366_set_piomode - PIO setup
* @ap: ATA interface
* @adev: device on the interface
*
* Perform PIO mode setup.
*/
static void hpt366_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u32 addr1, addr2;
u32 reg;
u32 mode;
u8 fast;
addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no);
addr2 = 0x51 + 4 * ap->port_no;
/* Fast interrupt prediction disable, hold off interrupt disable */
pci_read_config_byte(pdev, addr2, &fast);
if (fast & 0x80) {
fast &= ~0x80;
pci_write_config_byte(pdev, addr2, fast);
}
pci_read_config_dword(pdev, addr1, &reg);
mode = hpt36x_find_mode(ap, adev->pio_mode);
mode &= ~0x8000000; /* No FIFO in PIO */
mode &= ~0x30070000; /* Leave config bits alone */
reg &= 0x30070000; /* Strip timing bits */
pci_write_config_dword(pdev, addr1, reg | mode);
}
/**
* hpt366_set_dmamode - DMA timing setup
* @ap: ATA interface
* @adev: Device being configured
*
* Set up the channel for MWDMA or UDMA modes. Much the same as with
* PIO, load the mode number and then set MWDMA or UDMA flag.
*/
static void hpt366_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u32 addr1, addr2;
u32 reg;
u32 mode;
u8 fast;
addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no);
addr2 = 0x51 + 4 * ap->port_no;
/* Fast interrupt prediction disable, hold off interrupt disable */
pci_read_config_byte(pdev, addr2, &fast);
if (fast & 0x80) {
fast &= ~0x80;
pci_write_config_byte(pdev, addr2, fast);
}
pci_read_config_dword(pdev, addr1, &reg);
mode = hpt36x_find_mode(ap, adev->dma_mode);
mode |= 0x8000000; /* FIFO in MWDMA or UDMA */
mode &= ~0xC0000000; /* Leave config bits alone */
reg &= 0xC0000000; /* Strip timing bits */
pci_write_config_dword(pdev, addr1, reg | mode);
}
static struct scsi_host_template hpt36x_sht = {
.module = THIS_MODULE,
.name = DRV_NAME,
.ioctl = ata_scsi_ioctl,
.queuecommand = ata_scsi_queuecmd,
.can_queue = ATA_DEF_QUEUE,
.this_id = ATA_SHT_THIS_ID,
.sg_tablesize = LIBATA_MAX_PRD,
.max_sectors = ATA_MAX_SECTORS,
.cmd_per_lun = ATA_SHT_CMD_PER_LUN,
.emulated = ATA_SHT_EMULATED,
.use_clustering = ATA_SHT_USE_CLUSTERING,
.proc_name = DRV_NAME,
.dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config,
.bios_param = ata_std_bios_param,
};
/*
* Configuration for HPT366/68
*/
static struct ata_port_operations hpt366_port_ops = {
.port_disable = ata_port_disable,
.set_piomode = hpt366_set_piomode,
.set_dmamode = hpt366_set_dmamode,
.mode_filter = hpt366_filter,
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
.check_status = ata_check_status,
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
.freeze = ata_bmdma_freeze,
.thaw = ata_bmdma_thaw,
.error_handler = hpt36x_error_handler,
.post_internal_cmd = ata_bmdma_post_internal_cmd,
.bmdma_setup = ata_bmdma_setup,
.bmdma_start = ata_bmdma_start,
.bmdma_stop = ata_bmdma_stop,
.bmdma_status = ata_bmdma_status,
.qc_prep = ata_qc_prep,
.qc_issue = ata_qc_issue_prot,
.eng_timeout = ata_eng_timeout,
.data_xfer = ata_pio_data_xfer,
.irq_handler = ata_interrupt,
.irq_clear = ata_bmdma_irq_clear,
.port_start = ata_port_start,
.port_stop = ata_port_stop,
.host_stop = ata_host_stop
};
/**
* hpt36x_init_one - Initialise an HPT366/368
* @dev: PCI device
* @id: Entry in match table
*
* Initialise an HPT36x device. There are some interesting complications
* here. Firstly the chip may report 366 and be one of several variants.
* Secondly all the timings depend on the clock for the chip which we must
* detect and look up
*
* This is the known chip mappings. It may be missing a couple of later
* releases.
*
* Chip version PCI Rev Notes
* HPT366 4 (HPT366) 0 UDMA66
* HPT366 4 (HPT366) 1 UDMA66
* HPT368 4 (HPT366) 2 UDMA66
* HPT37x/30x 4 (HPT366) 3+ Other driver
*
*/
static int hpt36x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
static struct ata_port_info info_hpt366 = {
.sht = &hpt36x_sht,
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
.pio_mask = 0x1f,
.mwdma_mask = 0x07,
.udma_mask = 0x1f,
.port_ops = &hpt366_port_ops
};
struct ata_port_info *port_info[2] = {&info_hpt366, &info_hpt366};
u32 class_rev;
u32 reg1;
u8 drive_fast;
pci_read_config_dword(dev, PCI_CLASS_REVISION, &class_rev);
class_rev &= 0xFF;
/* May be a later chip in disguise. Check */
/* Newer chips are not in the HPT36x driver. Ignore them */
if (class_rev > 2)
return -ENODEV;
pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, (L1_CACHE_BYTES / 4));
pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x78);
pci_write_config_byte(dev, PCI_MIN_GNT, 0x08);
pci_write_config_byte(dev, PCI_MAX_LAT, 0x08);
pci_read_config_byte(dev, 0x51, &drive_fast);
if (drive_fast & 0x80)
pci_write_config_byte(dev, 0x51, drive_fast & ~0x80);
pci_read_config_dword(dev, 0x40, &reg1);
/* PCI clocking determines the ATA timing values to use */
/* info_hpt366 is safe against re-entry so we can scribble on it */
switch(reg1 & 0x700) {
case 5:
info_hpt366.private_data = &hpt366_40;
break;
case 9:
info_hpt366.private_data = &hpt366_25;
break;
default:
info_hpt366.private_data = &hpt366_33;
break;
}
/* Now kick off ATA set up */
return ata_pci_init_one(dev, port_info, 2);
}
static struct pci_device_id hpt36x[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_TTI, PCI_DEVICE_ID_TTI_HPT366), },
{ 0, },
};
static struct pci_driver hpt36x_pci_driver = {
.name = DRV_NAME,
.id_table = hpt36x,
.probe = hpt36x_init_one,
.remove = ata_pci_remove_one
};
static int __init hpt36x_init(void)
{
return pci_register_driver(&hpt36x_pci_driver);
}
static void __exit hpt36x_exit(void)
{
pci_unregister_driver(&hpt36x_pci_driver);
}
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for the Highpoint HPT366/368");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, hpt36x);
MODULE_VERSION(DRV_VERSION);
module_init(hpt36x_init);
module_exit(hpt36x_exit);

1257
drivers/ata/pata_hpt37x.c Normal file

File diff suppressed because it is too large Load Diff

597
drivers/ata/pata_hpt3x2n.c Normal file
View File

@ -0,0 +1,597 @@
/*
* Libata driver for the highpoint 372N and 302N UDMA66 ATA controllers.
*
* This driver is heavily based upon:
*
* linux/drivers/ide/pci/hpt366.c Version 0.36 April 25, 2003
*
* Copyright (C) 1999-2003 Andre Hedrick <andre@linux-ide.org>
* Portions Copyright (C) 2001 Sun Microsystems, Inc.
* Portions Copyright (C) 2003 Red Hat Inc
*
*
* TODO
* 371N
* Work out best PLL policy
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#define DRV_NAME "pata_hpt3x2n"
#define DRV_VERSION "0.3"
enum {
HPT_PCI_FAST = (1 << 31),
PCI66 = (1 << 1),
USE_DPLL = (1 << 0)
};
struct hpt_clock {
u8 xfer_speed;
u32 timing;
};
struct hpt_chip {
const char *name;
struct hpt_clock *clocks[3];
};
/* key for bus clock timings
* bit
* 0:3 data_high_time. inactive time of DIOW_/DIOR_ for PIO and MW
* DMA. cycles = value + 1
* 4:8 data_low_time. active time of DIOW_/DIOR_ for PIO and MW
* DMA. cycles = value + 1
* 9:12 cmd_high_time. inactive time of DIOW_/DIOR_ during task file
* register access.
* 13:17 cmd_low_time. active time of DIOW_/DIOR_ during task file
* register access.
* 18:21 udma_cycle_time. clock freq and clock cycles for UDMA xfer.
* during task file register access.
* 22:24 pre_high_time. time to initialize 1st cycle for PIO and MW DMA
* xfer.
* 25:27 cmd_pre_high_time. time to initialize 1st PIO cycle for task
* register access.
* 28 UDMA enable
* 29 DMA enable
* 30 PIO_MST enable. if set, the chip is in bus master mode during
* PIO.
* 31 FIFO enable.
*/
/* 66MHz DPLL clocks */
static struct hpt_clock hpt3x2n_clocks[] = {
{ XFER_UDMA_7, 0x1c869c62 },
{ XFER_UDMA_6, 0x1c869c62 },
{ XFER_UDMA_5, 0x1c8a9c62 },
{ XFER_UDMA_4, 0x1c8a9c62 },
{ XFER_UDMA_3, 0x1c8e9c62 },
{ XFER_UDMA_2, 0x1c929c62 },
{ XFER_UDMA_1, 0x1c9a9c62 },
{ XFER_UDMA_0, 0x1c829c62 },
{ XFER_MW_DMA_2, 0x2c829c62 },
{ XFER_MW_DMA_1, 0x2c829c66 },
{ XFER_MW_DMA_0, 0x2c829d2c },
{ XFER_PIO_4, 0x0c829c62 },
{ XFER_PIO_3, 0x0c829c84 },
{ XFER_PIO_2, 0x0c829ca6 },
{ XFER_PIO_1, 0x0d029d26 },
{ XFER_PIO_0, 0x0d029d5e },
{ 0, 0x0d029d5e }
};
/**
* hpt3x2n_find_mode - reset the hpt3x2n bus
* @ap: ATA port
* @speed: transfer mode
*
* Return the 32bit register programming information for this channel
* that matches the speed provided. For the moment the clocks table
* is hard coded but easy to change. This will be needed if we use
* different DPLLs
*/
static u32 hpt3x2n_find_mode(struct ata_port *ap, int speed)
{
struct hpt_clock *clocks = hpt3x2n_clocks;
while(clocks->xfer_speed) {
if (clocks->xfer_speed == speed)
return clocks->timing;
clocks++;
}
BUG();
return 0xffffffffU; /* silence compiler warning */
}
/**
* hpt3x2n_pre_reset - reset the hpt3x2n bus
* @ap: ATA port to reset
*
* Perform the initial reset handling for the 3x2n series controllers.
* Reset the hardware and state machine, obtain the cable type.
*/
static int hpt3xn_pre_reset(struct ata_port *ap)
{
u8 scr2, ata66;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
pci_read_config_byte(pdev, 0x5B, &scr2);
pci_write_config_byte(pdev, 0x5B, scr2 & ~0x01);
/* Cable register now active */
pci_read_config_byte(pdev, 0x5A, &ata66);
/* Restore state */
pci_write_config_byte(pdev, 0x5B, scr2);
if (ata66 & (1 << ap->port_no))
ap->cbl = ATA_CBL_PATA40;
else
ap->cbl = ATA_CBL_PATA80;
/* Reset the state machine */
pci_write_config_byte(pdev, 0x50, 0x37);
pci_write_config_byte(pdev, 0x54, 0x37);
udelay(100);
return ata_std_prereset(ap);
}
/**
* hpt3x2n_error_handler - probe the hpt3x2n bus
* @ap: ATA port to reset
*
* Perform the probe reset handling for the 3x2N
*/
static void hpt3x2n_error_handler(struct ata_port *ap)
{
ata_bmdma_drive_eh(ap, hpt3xn_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
}
/**
* hpt3x2n_set_piomode - PIO setup
* @ap: ATA interface
* @adev: device on the interface
*
* Perform PIO mode setup.
*/
static void hpt3x2n_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u32 addr1, addr2;
u32 reg;
u32 mode;
u8 fast;
addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no);
addr2 = 0x51 + 4 * ap->port_no;
/* Fast interrupt prediction disable, hold off interrupt disable */
pci_read_config_byte(pdev, addr2, &fast);
fast &= ~0x07;
pci_write_config_byte(pdev, addr2, fast);
pci_read_config_dword(pdev, addr1, &reg);
mode = hpt3x2n_find_mode(ap, adev->pio_mode);
mode &= ~0x8000000; /* No FIFO in PIO */
mode &= ~0x30070000; /* Leave config bits alone */
reg &= 0x30070000; /* Strip timing bits */
pci_write_config_dword(pdev, addr1, reg | mode);
}
/**
* hpt3x2n_set_dmamode - DMA timing setup
* @ap: ATA interface
* @adev: Device being configured
*
* Set up the channel for MWDMA or UDMA modes. Much the same as with
* PIO, load the mode number and then set MWDMA or UDMA flag.
*/
static void hpt3x2n_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u32 addr1, addr2;
u32 reg;
u32 mode;
u8 fast;
addr1 = 0x40 + 4 * (adev->devno + 2 * ap->port_no);
addr2 = 0x51 + 4 * ap->port_no;
/* Fast interrupt prediction disable, hold off interrupt disable */
pci_read_config_byte(pdev, addr2, &fast);
fast &= ~0x07;
pci_write_config_byte(pdev, addr2, fast);
pci_read_config_dword(pdev, addr1, &reg);
mode = hpt3x2n_find_mode(ap, adev->dma_mode);
mode |= 0x8000000; /* FIFO in MWDMA or UDMA */
mode &= ~0xC0000000; /* Leave config bits alone */
reg &= 0xC0000000; /* Strip timing bits */
pci_write_config_dword(pdev, addr1, reg | mode);
}
/**
* hpt3x2n_bmdma_end - DMA engine stop
* @qc: ATA command
*
* Clean up after the HPT3x2n and later DMA engine
*/
static void hpt3x2n_bmdma_stop(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int mscreg = 0x50 + 2 * ap->port_no;
u8 bwsr_stat, msc_stat;
pci_read_config_byte(pdev, 0x6A, &bwsr_stat);
pci_read_config_byte(pdev, mscreg, &msc_stat);
if (bwsr_stat & (1 << ap->port_no))
pci_write_config_byte(pdev, mscreg, msc_stat | 0x30);
ata_bmdma_stop(qc);
}
/**
* hpt3x2n_set_clock - clock control
* @ap: ATA port
* @source: 0x21 or 0x23 for PLL or PCI sourced clock
*
* Switch the ATA bus clock between the PLL and PCI clock sources
* while correctly isolating the bus and resetting internal logic
*
* We must use the DPLL for
* - writing
* - second channel UDMA7 (SATA ports) or higher
* - 66MHz PCI
*
* or we will underclock the device and get reduced performance.
*/
static void hpt3x2n_set_clock(struct ata_port *ap, int source)
{
unsigned long bmdma = ap->ioaddr.bmdma_addr;
/* Tristate the bus */
outb(0x80, bmdma+0x73);
outb(0x80, bmdma+0x77);
/* Switch clock and reset channels */
outb(source, bmdma+0x7B);
outb(0xC0, bmdma+0x79);
/* Reset state machines */
outb(0x37, bmdma+0x70);
outb(0x37, bmdma+0x74);
/* Complete reset */
outb(0x00, bmdma+0x79);
/* Reconnect channels to bus */
outb(0x00, bmdma+0x73);
outb(0x00, bmdma+0x77);
}
/* Check if our partner interface is busy */
static int hpt3x2n_pair_idle(struct ata_port *ap)
{
struct ata_host *host = ap->host;
struct ata_port *pair = host->ports[ap->port_no ^ 1];
if (pair->hsm_task_state == HSM_ST_IDLE)
return 1;
return 0;
}
static int hpt3x2n_use_dpll(struct ata_port *ap, int reading)
{
long flags = (long)ap->host->private_data;
/* See if we should use the DPLL */
if (reading == 0)
return USE_DPLL; /* Needed for write */
if (flags & PCI66)
return USE_DPLL; /* Needed at 66Mhz */
return 0;
}
static unsigned int hpt3x2n_qc_issue_prot(struct ata_queued_cmd *qc)
{
struct ata_taskfile *tf = &qc->tf;
struct ata_port *ap = qc->ap;
int flags = (long)ap->host->private_data;
if (hpt3x2n_pair_idle(ap)) {
int dpll = hpt3x2n_use_dpll(ap, (tf->flags & ATA_TFLAG_WRITE));
if ((flags & USE_DPLL) != dpll) {
if (dpll == 1)
hpt3x2n_set_clock(ap, 0x21);
else
hpt3x2n_set_clock(ap, 0x23);
}
}
return ata_qc_issue_prot(qc);
}
static struct scsi_host_template hpt3x2n_sht = {
.module = THIS_MODULE,
.name = DRV_NAME,
.ioctl = ata_scsi_ioctl,
.queuecommand = ata_scsi_queuecmd,
.can_queue = ATA_DEF_QUEUE,
.this_id = ATA_SHT_THIS_ID,
.sg_tablesize = LIBATA_MAX_PRD,
.max_sectors = ATA_MAX_SECTORS,
.cmd_per_lun = ATA_SHT_CMD_PER_LUN,
.emulated = ATA_SHT_EMULATED,
.use_clustering = ATA_SHT_USE_CLUSTERING,
.proc_name = DRV_NAME,
.dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config,
.bios_param = ata_std_bios_param,
};
/*
* Configuration for HPT3x2n.
*/
static struct ata_port_operations hpt3x2n_port_ops = {
.port_disable = ata_port_disable,
.set_piomode = hpt3x2n_set_piomode,
.set_dmamode = hpt3x2n_set_dmamode,
.mode_filter = ata_pci_default_filter,
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
.check_status = ata_check_status,
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
.freeze = ata_bmdma_freeze,
.thaw = ata_bmdma_thaw,
.error_handler = hpt3x2n_error_handler,
.post_internal_cmd = ata_bmdma_post_internal_cmd,
.bmdma_setup = ata_bmdma_setup,
.bmdma_start = ata_bmdma_start,
.bmdma_stop = hpt3x2n_bmdma_stop,
.bmdma_status = ata_bmdma_status,
.qc_prep = ata_qc_prep,
.qc_issue = hpt3x2n_qc_issue_prot,
.eng_timeout = ata_eng_timeout,
.data_xfer = ata_pio_data_xfer,
.irq_handler = ata_interrupt,
.irq_clear = ata_bmdma_irq_clear,
.port_start = ata_port_start,
.port_stop = ata_port_stop,
.host_stop = ata_host_stop
};
/**
* hpt3xn_calibrate_dpll - Calibrate the DPLL loop
* @dev: PCI device
*
* Perform a calibration cycle on the HPT3xN DPLL. Returns 1 if this
* succeeds
*/
static int hpt3xn_calibrate_dpll(struct pci_dev *dev)
{
u8 reg5b;
u32 reg5c;
int tries;
for(tries = 0; tries < 0x5000; tries++) {
udelay(50);
pci_read_config_byte(dev, 0x5b, &reg5b);
if (reg5b & 0x80) {
/* See if it stays set */
for(tries = 0; tries < 0x1000; tries ++) {
pci_read_config_byte(dev, 0x5b, &reg5b);
/* Failed ? */
if ((reg5b & 0x80) == 0)
return 0;
}
/* Turn off tuning, we have the DPLL set */
pci_read_config_dword(dev, 0x5c, &reg5c);
pci_write_config_dword(dev, 0x5c, reg5c & ~ 0x100);
return 1;
}
}
/* Never went stable */
return 0;
}
static int hpt3x2n_pci_clock(struct pci_dev *pdev)
{
unsigned long freq;
u32 fcnt;
pci_read_config_dword(pdev, 0x70/*CHECKME*/, &fcnt);
if ((fcnt >> 12) != 0xABCDE) {
printk(KERN_WARNING "hpt3xn: BIOS clock data not set.\n");
return 33; /* Not BIOS set */
}
fcnt &= 0x1FF;
freq = (fcnt * 77) / 192;
/* Clamp to bands */
if (freq < 40)
return 33;
if (freq < 45)
return 40;
if (freq < 55)
return 50;
return 66;
}
/**
* hpt3x2n_init_one - Initialise an HPT37X/302
* @dev: PCI device
* @id: Entry in match table
*
* Initialise an HPT3x2n device. There are some interesting complications
* here. Firstly the chip may report 366 and be one of several variants.
* Secondly all the timings depend on the clock for the chip which we must
* detect and look up
*
* This is the known chip mappings. It may be missing a couple of later
* releases.
*
* Chip version PCI Rev Notes
* HPT372 4 (HPT366) 5 Other driver
* HPT372N 4 (HPT366) 6 UDMA133
* HPT372 5 (HPT372) 1 Other driver
* HPT372N 5 (HPT372) 2 UDMA133
* HPT302 6 (HPT302) * Other driver
* HPT302N 6 (HPT302) > 1 UDMA133
* HPT371 7 (HPT371) * Other driver
* HPT371N 7 (HPT371) > 1 UDMA133
* HPT374 8 (HPT374) * Other driver
* HPT372N 9 (HPT372N) * UDMA133
*
* (1) UDMA133 support depends on the bus clock
*
* To pin down HPT371N
*/
static int hpt3x2n_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
/* HPT372N and friends - UDMA133 */
static struct ata_port_info info = {
.sht = &hpt3x2n_sht,
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
.pio_mask = 0x1f,
.mwdma_mask = 0x07,
.udma_mask = 0x7f,
.port_ops = &hpt3x2n_port_ops
};
struct ata_port_info *port_info[2];
struct ata_port_info *port = &info;
u8 irqmask;
u32 class_rev;
unsigned int pci_mhz;
unsigned int f_low, f_high;
int adjust;
pci_read_config_dword(dev, PCI_CLASS_REVISION, &class_rev);
class_rev &= 0xFF;
switch(dev->device) {
case PCI_DEVICE_ID_TTI_HPT366:
if (class_rev < 6)
return -ENODEV;
break;
case PCI_DEVICE_ID_TTI_HPT372:
/* 372N if rev >= 1*/
if (class_rev == 0)
return -ENODEV;
break;
case PCI_DEVICE_ID_TTI_HPT302:
if (class_rev < 2)
return -ENODEV;
break;
case PCI_DEVICE_ID_TTI_HPT372N:
break;
default:
printk(KERN_ERR "pata_hpt3x2n: PCI table is bogus please report (%d).\n", dev->device);
return -ENODEV;
}
/* Ok so this is a chip we support */
pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, (L1_CACHE_BYTES / 4));
pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x78);
pci_write_config_byte(dev, PCI_MIN_GNT, 0x08);
pci_write_config_byte(dev, PCI_MAX_LAT, 0x08);
pci_read_config_byte(dev, 0x5A, &irqmask);
irqmask &= ~0x10;
pci_write_config_byte(dev, 0x5a, irqmask);
/* Tune the PLL. HPT recommend using 75 for SATA, 66 for UDMA133 or
50 for UDMA100. Right now we always use 66 */
pci_mhz = hpt3x2n_pci_clock(dev);
f_low = (pci_mhz * 48) / 66; /* PCI Mhz for 66Mhz DPLL */
f_high = f_low + 2; /* Tolerance */
pci_write_config_dword(dev, 0x5C, (f_high << 16) | f_low | 0x100);
/* PLL clock */
pci_write_config_byte(dev, 0x5B, 0x21);
/* Unlike the 37x we don't try jiggling the frequency */
for(adjust = 0; adjust < 8; adjust++) {
if (hpt3xn_calibrate_dpll(dev))
break;
pci_write_config_dword(dev, 0x5C, (f_high << 16) | f_low);
}
if (adjust == 8)
printk(KERN_WARNING "hpt3xn: DPLL did not stabilize.\n");
/* Set our private data up. We only need a few flags so we use
it directly */
port->private_data = NULL;
if (pci_mhz > 60)
port->private_data = (void *)PCI66;
/* Now kick off ATA set up */
port_info[0] = port_info[1] = port;
return ata_pci_init_one(dev, port_info, 2);
}
static struct pci_device_id hpt3x2n[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_TTI, PCI_DEVICE_ID_TTI_HPT366), },
{ PCI_DEVICE(PCI_VENDOR_ID_TTI, PCI_DEVICE_ID_TTI_HPT372), },
{ PCI_DEVICE(PCI_VENDOR_ID_TTI, PCI_DEVICE_ID_TTI_HPT302), },
{ PCI_DEVICE(PCI_VENDOR_ID_TTI, PCI_DEVICE_ID_TTI_HPT372N), },
{ 0, },
};
static struct pci_driver hpt3x2n_pci_driver = {
.name = DRV_NAME,
.id_table = hpt3x2n,
.probe = hpt3x2n_init_one,
.remove = ata_pci_remove_one
};
static int __init hpt3x2n_init(void)
{
return pci_register_driver(&hpt3x2n_pci_driver);
}
static void __exit hpt3x2n_exit(void)
{
pci_unregister_driver(&hpt3x2n_pci_driver);
}
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for the Highpoint HPT3x2n/30x");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, hpt3x2n);
MODULE_VERSION(DRV_VERSION);
module_init(hpt3x2n_init);
module_exit(hpt3x2n_exit);

226
drivers/ata/pata_hpt3x3.c Normal file
View File

@ -0,0 +1,226 @@
/*
* pata_hpt3x3 - HPT3x3 driver
* (c) Copyright 2005-2006 Red Hat
*
* Was pata_hpt34x but the naming was confusing as it supported the
* 343 and 363 so it has been renamed.
*
* Based on:
* linux/drivers/ide/pci/hpt34x.c Version 0.40 Sept 10, 2002
* Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org>
*
* May be copied or modified under the terms of the GNU General Public
* License
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#define DRV_NAME "pata_hpt3x3"
#define DRV_VERSION "0.4.1"
static int hpt3x3_probe_init(struct ata_port *ap)
{
ap->cbl = ATA_CBL_PATA40;
return ata_std_prereset(ap);
}
/**
* hpt3x3_probe_reset - reset the hpt3x3 bus
* @ap: ATA port to reset
*
* Perform the housekeeping when doing an ATA bus reeset. We just
* need to force the cable type.
*/
static void hpt3x3_error_handler(struct ata_port *ap)
{
return ata_bmdma_drive_eh(ap, hpt3x3_probe_init, ata_std_softreset, NULL, ata_std_postreset);
}
/**
* hpt3x3_set_piomode - PIO setup
* @ap: ATA interface
* @adev: device on the interface
*
* Set our PIO requirements. This is fairly simple on the HPT3x3 as
* all we have to do is clear the MWDMA and UDMA bits then load the
* mode number.
*/
static void hpt3x3_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u32 r1, r2;
int dn = 2 * ap->port_no + adev->devno;
pci_read_config_dword(pdev, 0x44, &r1);
pci_read_config_dword(pdev, 0x48, &r2);
/* Load the PIO timing number */
r1 &= ~(7 << (3 * dn));
r1 |= (adev->pio_mode - XFER_PIO_0) << (3 * dn);
r2 &= ~(0x11 << dn); /* Clear MWDMA and UDMA bits */
pci_write_config_dword(pdev, 0x44, r1);
pci_write_config_dword(pdev, 0x48, r2);
}
/**
* hpt3x3_set_dmamode - DMA timing setup
* @ap: ATA interface
* @adev: Device being configured
*
* Set up the channel for MWDMA or UDMA modes. Much the same as with
* PIO, load the mode number and then set MWDMA or UDMA flag.
*/
static void hpt3x3_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u32 r1, r2;
int dn = 2 * ap->port_no + adev->devno;
int mode_num = adev->dma_mode & 0x0F;
pci_read_config_dword(pdev, 0x44, &r1);
pci_read_config_dword(pdev, 0x48, &r2);
/* Load the timing number */
r1 &= ~(7 << (3 * dn));
r1 |= (mode_num << (3 * dn));
r2 &= ~(0x11 << dn); /* Clear MWDMA and UDMA bits */
if (adev->dma_mode >= XFER_UDMA_0)
r2 |= 0x01 << dn; /* Ultra mode */
else
r2 |= 0x10 << dn; /* MWDMA */
pci_write_config_dword(pdev, 0x44, r1);
pci_write_config_dword(pdev, 0x48, r2);
}
static struct scsi_host_template hpt3x3_sht = {
.module = THIS_MODULE,
.name = DRV_NAME,
.ioctl = ata_scsi_ioctl,
.queuecommand = ata_scsi_queuecmd,
.can_queue = ATA_DEF_QUEUE,
.this_id = ATA_SHT_THIS_ID,
.sg_tablesize = LIBATA_MAX_PRD,
.max_sectors = ATA_MAX_SECTORS,
.cmd_per_lun = ATA_SHT_CMD_PER_LUN,
.emulated = ATA_SHT_EMULATED,
.use_clustering = ATA_SHT_USE_CLUSTERING,
.proc_name = DRV_NAME,
.dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config,
.bios_param = ata_std_bios_param,
};
static struct ata_port_operations hpt3x3_port_ops = {
.port_disable = ata_port_disable,
.set_piomode = hpt3x3_set_piomode,
.set_dmamode = hpt3x3_set_dmamode,
.mode_filter = ata_pci_default_filter,
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
.check_status = ata_check_status,
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
.freeze = ata_bmdma_freeze,
.thaw = ata_bmdma_thaw,
.error_handler = hpt3x3_error_handler,
.post_internal_cmd = ata_bmdma_post_internal_cmd,
.bmdma_setup = ata_bmdma_setup,
.bmdma_start = ata_bmdma_start,
.bmdma_stop = ata_bmdma_stop,
.bmdma_status = ata_bmdma_status,
.qc_prep = ata_qc_prep,
.qc_issue = ata_qc_issue_prot,
.eng_timeout = ata_eng_timeout,
.data_xfer = ata_pio_data_xfer,
.irq_handler = ata_interrupt,
.irq_clear = ata_bmdma_irq_clear,
.port_start = ata_port_start,
.port_stop = ata_port_stop,
.host_stop = ata_host_stop
};
/**
* hpt3x3_init_one - Initialise an HPT343/363
* @dev: PCI device
* @id: Entry in match table
*
* Perform basic initialisation. The chip has a quirk that it won't
* function unless it is at XX00. The old ATA driver touched this up
* but we leave it for pci quirks to do properly.
*/
static int hpt3x3_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
static struct ata_port_info info = {
.sht = &hpt3x3_sht,
.flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST,
.pio_mask = 0x1f,
.mwdma_mask = 0x07,
.udma_mask = 0x07,
.port_ops = &hpt3x3_port_ops
};
static struct ata_port_info *port_info[2] = { &info, &info };
u16 cmd;
/* Initialize the board */
pci_write_config_word(dev, 0x80, 0x00);
/* Check if it is a 343 or a 363. 363 has COMMAND_MEMORY set */
pci_read_config_word(dev, PCI_COMMAND, &cmd);
if (cmd & PCI_COMMAND_MEMORY)
pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0xF0);
else
pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x20);
/* Now kick off ATA set up */
return ata_pci_init_one(dev, port_info, 2);
}
static struct pci_device_id hpt3x3[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_TTI, PCI_DEVICE_ID_TTI_HPT343), },
{ 0, },
};
static struct pci_driver hpt3x3_pci_driver = {
.name = DRV_NAME,
.id_table = hpt3x3,
.probe = hpt3x3_init_one,
.remove = ata_pci_remove_one
};
static int __init hpt3x3_init(void)
{
return pci_register_driver(&hpt3x3_pci_driver);
}
static void __exit hpt3x3_exit(void)
{
pci_unregister_driver(&hpt3x3_pci_driver);
}
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for the Highpoint HPT343/363");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, hpt3x3);
MODULE_VERSION(DRV_VERSION);
module_init(hpt3x3_init);
module_exit(hpt3x3_exit);

156
drivers/ata/pata_isapnp.c Normal file
View File

@ -0,0 +1,156 @@
/*
* pata-isapnp.c - ISA PnP PATA controller driver.
* Copyright 2005/2006 Red Hat Inc <alan@redhat.com>, all rights reserved.
*
* Based in part on ide-pnp.c by Andrey Panin <pazke@donpac.ru>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/isapnp.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
#include <linux/ata.h>
#include <linux/libata.h>
#define DRV_NAME "pata_isapnp"
#define DRV_VERSION "0.1.5"
static struct scsi_host_template isapnp_sht = {
.module = THIS_MODULE,
.name = DRV_NAME,
.ioctl = ata_scsi_ioctl,
.queuecommand = ata_scsi_queuecmd,
.can_queue = ATA_DEF_QUEUE,
.this_id = ATA_SHT_THIS_ID,
.sg_tablesize = LIBATA_MAX_PRD,
.max_sectors = ATA_MAX_SECTORS,
.cmd_per_lun = ATA_SHT_CMD_PER_LUN,
.emulated = ATA_SHT_EMULATED,
.use_clustering = ATA_SHT_USE_CLUSTERING,
.proc_name = DRV_NAME,
.dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config,
.bios_param = ata_std_bios_param,
};
static struct ata_port_operations isapnp_port_ops = {
.port_disable = ata_port_disable,
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
.check_status = ata_check_status,
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
.freeze = ata_bmdma_freeze,
.thaw = ata_bmdma_thaw,
.error_handler = ata_bmdma_error_handler,
.post_internal_cmd = ata_bmdma_post_internal_cmd,
.qc_prep = ata_qc_prep,
.qc_issue = ata_qc_issue_prot,
.eng_timeout = ata_eng_timeout,
.data_xfer = ata_pio_data_xfer,
.irq_handler = ata_interrupt,
.irq_clear = ata_bmdma_irq_clear,
.port_start = ata_port_start,
.port_stop = ata_port_stop,
.host_stop = ata_host_stop
};
/**
* isapnp_init_one - attach an isapnp interface
* @idev: PnP device
* @dev_id: matching detect line
*
* Register an ISA bus IDE interface. Such interfaces are PIO 0 and
* non shared IRQ.
*/
static int isapnp_init_one(struct pnp_dev *idev, const struct pnp_device_id *dev_id)
{
struct ata_probe_ent ae;
if (pnp_port_valid(idev, 0) == 0)
return -ENODEV;
/* FIXME: Should selected polled PIO here not fail */
if (pnp_irq_valid(idev, 0) == 0)
return -ENODEV;
memset(&ae, 0, sizeof(struct ata_probe_ent));
INIT_LIST_HEAD(&ae.node);
ae.dev = &idev->dev;
ae.port_ops = &isapnp_port_ops;
ae.sht = &isapnp_sht;
ae.n_ports = 1;
ae.pio_mask = 1; /* ISA so PIO 0 cycles */
ae.irq = pnp_irq(idev, 0);
ae.irq_flags = 0;
ae.port_flags = ATA_FLAG_SLAVE_POSS;
ae.port[0].cmd_addr = pnp_port_start(idev, 0);
if (pnp_port_valid(idev, 1) == 0) {
ae.port[0].altstatus_addr = pnp_port_start(idev, 1);
ae.port[0].ctl_addr = pnp_port_start(idev, 1);
ae.port_flags |= ATA_FLAG_SRST;
}
ata_std_ports(&ae.port[0]);
if (ata_device_add(&ae) == 0)
return -ENODEV;
return 0;
}
/**
* isapnp_remove_one - unplug an isapnp interface
* @idev: PnP device
*
* Remove a previously configured PnP ATA port. Called only on module
* unload events as the core does not currently deal with ISAPnP docking.
*/
static void isapnp_remove_one(struct pnp_dev *idev)
{
struct device *dev = &idev->dev;
struct ata_host *host = dev_get_drvdata(dev);
ata_host_remove(host);
dev_set_drvdata(dev, NULL);
}
static struct pnp_device_id isapnp_devices[] = {
/* Generic ESDI/IDE/ATA compatible hard disk controller */
{.id = "PNP0600", .driver_data = 0},
{.id = ""}
};
static struct pnp_driver isapnp_driver = {
.name = DRV_NAME,
.id_table = isapnp_devices,
.probe = isapnp_init_one,
.remove = isapnp_remove_one,
};
static int __init isapnp_init(void)
{
return pnp_register_driver(&isapnp_driver);
}
static void __exit isapnp_exit(void)
{
pnp_unregister_driver(&isapnp_driver);
}
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for ISA PnP ATA");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
module_init(isapnp_init);
module_exit(isapnp_exit);

847
drivers/ata/pata_it821x.c Normal file
View File

@ -0,0 +1,847 @@
/*
* ata-it821x.c - IT821x PATA for new ATA layer
* (C) 2005 Red Hat Inc
* Alan Cox <alan@redhat.com>
*
* based upon
*
* it821x.c
*
* linux/drivers/ide/pci/it821x.c Version 0.09 December 2004
*
* Copyright (C) 2004 Red Hat <alan@redhat.com>
*
* May be copied or modified under the terms of the GNU General Public License
* Based in part on the ITE vendor provided SCSI driver.
*
* Documentation available from
* http://www.ite.com.tw/pc/IT8212F_V04.pdf
* Some other documents are NDA.
*
* The ITE8212 isn't exactly a standard IDE controller. It has two
* modes. In pass through mode then it is an IDE controller. In its smart
* mode its actually quite a capable hardware raid controller disguised
* as an IDE controller. Smart mode only understands DMA read/write and
* identify, none of the fancier commands apply. The IT8211 is identical
* in other respects but lacks the raid mode.
*
* Errata:
* o Rev 0x10 also requires master/slave hold the same DMA timings and
* cannot do ATAPI MWDMA.
* o The identify data for raid volumes lacks CHS info (technically ok)
* but also fails to set the LBA28 and other bits. We fix these in
* the IDE probe quirk code.
* o If you write LBA48 sized I/O's (ie > 256 sector) in smart mode
* raid then the controller firmware dies
* o Smart mode without RAID doesn't clear all the necessary identify
* bits to reduce the command set to the one used
*
* This has a few impacts on the driver
* - In pass through mode we do all the work you would expect
* - In smart mode the clocking set up is done by the controller generally
* but we must watch the other limits and filter.
* - There are a few extra vendor commands that actually talk to the
* controller but only work PIO with no IRQ.
*
* Vendor areas of the identify block in smart mode are used for the
* timing and policy set up. Each HDD in raid mode also has a serial
* block on the disk. The hardware extra commands are get/set chip status,
* rebuild, get rebuild status.
*
* In Linux the driver supports pass through mode as if the device was
* just another IDE controller. If the smart mode is running then
* volumes are managed by the controller firmware and each IDE "disk"
* is a raid volume. Even more cute - the controller can do automated
* hotplug and rebuild.
*
* The pass through controller itself is a little demented. It has a
* flaw that it has a single set of PIO/MWDMA timings per channel so
* non UDMA devices restrict each others performance. It also has a
* single clock source per channel so mixed UDMA100/133 performance
* isn't perfect and we have to pick a clock. Thankfully none of this
* matters in smart mode. ATAPI DMA is not currently supported.
*
* It seems the smart mode is a win for RAID1/RAID10 but otherwise not.
*
* TODO
* - ATAPI and other speed filtering
* - Command filter in smart mode
* - RAID configuration ioctls
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#define DRV_NAME "pata_it821x"
#define DRV_VERSION "0.3.2"
struct it821x_dev
{
unsigned int smart:1, /* Are we in smart raid mode */
timing10:1; /* Rev 0x10 */
u8 clock_mode; /* 0, ATA_50 or ATA_66 */
u8 want[2][2]; /* Mode/Pri log for master slave */
/* We need these for switching the clock when DMA goes on/off
The high byte is the 66Mhz timing */
u16 pio[2]; /* Cached PIO values */
u16 mwdma[2]; /* Cached MWDMA values */
u16 udma[2]; /* Cached UDMA values (per drive) */
u16 last_device; /* Master or slave loaded ? */
};
#define ATA_66 0
#define ATA_50 1
#define ATA_ANY 2
#define UDMA_OFF 0
#define MWDMA_OFF 0
/*
* We allow users to force the card into non raid mode without
* flashing the alternative BIOS. This is also neccessary right now
* for embedded platforms that cannot run a PC BIOS but are using this
* device.
*/
static int it8212_noraid;
/**
* it821x_pre_reset - probe
* @ap: ATA port
*
* Set the cable type
*/
static int it821x_pre_reset(struct ata_port *ap)
{
ap->cbl = ATA_CBL_PATA80;
return ata_std_prereset(ap);
}
/**
* it821x_error_handler - probe/reset
* @ap: ATA port
*
* Set the cable type and trigger a probe
*/
static void it821x_error_handler(struct ata_port *ap)
{
return ata_bmdma_drive_eh(ap, it821x_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
}
/**
* it821x_program - program the PIO/MWDMA registers
* @ap: ATA port
* @adev: Device to program
* @timing: Timing value (66Mhz in top 8bits, 50 in the low 8)
*
* Program the PIO/MWDMA timing for this channel according to the
* current clock. These share the same register so are managed by
* the DMA start/stop sequence as with the old driver.
*/
static void it821x_program(struct ata_port *ap, struct ata_device *adev, u16 timing)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
struct it821x_dev *itdev = ap->private_data;
int channel = ap->port_no;
u8 conf;
/* Program PIO/MWDMA timing bits */
if (itdev->clock_mode == ATA_66)
conf = timing >> 8;
else
conf = timing & 0xFF;
pci_write_config_byte(pdev, 0x54 + 4 * channel, conf);
}
/**
* it821x_program_udma - program the UDMA registers
* @ap: ATA port
* @adev: ATA device to update
* @timing: Timing bits. Top 8 are for 66Mhz bottom for 50Mhz
*
* Program the UDMA timing for this drive according to the
* current clock. Handles the dual clocks and also knows about
* the errata on the 0x10 revision. The UDMA errata is partly handled
* here and partly in start_dma.
*/
static void it821x_program_udma(struct ata_port *ap, struct ata_device *adev, u16 timing)
{
struct it821x_dev *itdev = ap->private_data;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int channel = ap->port_no;
int unit = adev->devno;
u8 conf;
/* Program UDMA timing bits */
if (itdev->clock_mode == ATA_66)
conf = timing >> 8;
else
conf = timing & 0xFF;
if (itdev->timing10 == 0)
pci_write_config_byte(pdev, 0x56 + 4 * channel + unit, conf);
else {
/* Early revision must be programmed for both together */
pci_write_config_byte(pdev, 0x56 + 4 * channel, conf);
pci_write_config_byte(pdev, 0x56 + 4 * channel + 1, conf);
}
}
/**
* it821x_clock_strategy
* @ap: ATA interface
* @adev: ATA device being updated
*
* Select between the 50 and 66Mhz base clocks to get the best
* results for this interface.
*/
static void it821x_clock_strategy(struct ata_port *ap, struct ata_device *adev)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
struct it821x_dev *itdev = ap->private_data;
u8 unit = adev->devno;
struct ata_device *pair = ata_dev_pair(adev);
int clock, altclock;
u8 v;
int sel = 0;
/* Look for the most wanted clocking */
if (itdev->want[0][0] > itdev->want[1][0]) {
clock = itdev->want[0][1];
altclock = itdev->want[1][1];
} else {
clock = itdev->want[1][1];
altclock = itdev->want[0][1];
}
/* Master doesn't care does the slave ? */
if (clock == ATA_ANY)
clock = altclock;
/* Nobody cares - keep the same clock */
if (clock == ATA_ANY)
return;
/* No change */
if (clock == itdev->clock_mode)
return;
/* Load this into the controller */
if (clock == ATA_66)
itdev->clock_mode = ATA_66;
else {
itdev->clock_mode = ATA_50;
sel = 1;
}
pci_read_config_byte(pdev, 0x50, &v);
v &= ~(1 << (1 + ap->port_no));
v |= sel << (1 + ap->port_no);
pci_write_config_byte(pdev, 0x50, v);
/*
* Reprogram the UDMA/PIO of the pair drive for the switch
* MWDMA will be dealt with by the dma switcher
*/
if (pair && itdev->udma[1-unit] != UDMA_OFF) {
it821x_program_udma(ap, pair, itdev->udma[1-unit]);
it821x_program(ap, pair, itdev->pio[1-unit]);
}
/*
* Reprogram the UDMA/PIO of our drive for the switch.
* MWDMA will be dealt with by the dma switcher
*/
if (itdev->udma[unit] != UDMA_OFF) {
it821x_program_udma(ap, adev, itdev->udma[unit]);
it821x_program(ap, adev, itdev->pio[unit]);
}
}
/**
* it821x_passthru_set_piomode - set PIO mode data
* @ap: ATA interface
* @adev: ATA device
*
* Configure for PIO mode. This is complicated as the register is
* shared by PIO and MWDMA and for both channels.
*/
static void it821x_passthru_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
/* Spec says 89 ref driver uses 88 */
static const u16 pio[] = { 0xAA88, 0xA382, 0xA181, 0x3332, 0x3121 };
static const u8 pio_want[] = { ATA_66, ATA_66, ATA_66, ATA_66, ATA_ANY };
struct it821x_dev *itdev = ap->private_data;
int unit = adev->devno;
int mode_wanted = adev->pio_mode - XFER_PIO_0;
/* We prefer 66Mhz clock for PIO 0-3, don't care for PIO4 */
itdev->want[unit][1] = pio_want[mode_wanted];
itdev->want[unit][0] = 1; /* PIO is lowest priority */
itdev->pio[unit] = pio[mode_wanted];
it821x_clock_strategy(ap, adev);
it821x_program(ap, adev, itdev->pio[unit]);
}
/**
* it821x_passthru_set_dmamode - set initial DMA mode data
* @ap: ATA interface
* @adev: ATA device
*
* Set up the DMA modes. The actions taken depend heavily on the mode
* to use. If UDMA is used as is hopefully the usual case then the
* timing register is private and we need only consider the clock. If
* we are using MWDMA then we have to manage the setting ourself as
* we switch devices and mode.
*/
static void it821x_passthru_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
static const u16 dma[] = { 0x8866, 0x3222, 0x3121 };
static const u8 mwdma_want[] = { ATA_ANY, ATA_66, ATA_ANY };
static const u16 udma[] = { 0x4433, 0x4231, 0x3121, 0x2121, 0x1111, 0x2211, 0x1111 };
static const u8 udma_want[] = { ATA_ANY, ATA_50, ATA_ANY, ATA_66, ATA_66, ATA_50, ATA_66 };
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
struct it821x_dev *itdev = ap->private_data;
int channel = ap->port_no;
int unit = adev->devno;
u8 conf;
if (adev->dma_mode >= XFER_UDMA_0) {
int mode_wanted = adev->dma_mode - XFER_UDMA_0;
itdev->want[unit][1] = udma_want[mode_wanted];
itdev->want[unit][0] = 3; /* UDMA is high priority */
itdev->mwdma[unit] = MWDMA_OFF;
itdev->udma[unit] = udma[mode_wanted];
if (mode_wanted >= 5)
itdev->udma[unit] |= 0x8080; /* UDMA 5/6 select on */
/* UDMA on. Again revision 0x10 must do the pair */
pci_read_config_byte(pdev, 0x50, &conf);
if (itdev->timing10)
conf &= channel ? 0x9F: 0xE7;
else
conf &= ~ (1 << (3 + 2 * channel + unit));
pci_write_config_byte(pdev, 0x50, conf);
it821x_clock_strategy(ap, adev);
it821x_program_udma(ap, adev, itdev->udma[unit]);
} else {
int mode_wanted = adev->dma_mode - XFER_MW_DMA_0;
itdev->want[unit][1] = mwdma_want[mode_wanted];
itdev->want[unit][0] = 2; /* MWDMA is low priority */
itdev->mwdma[unit] = dma[mode_wanted];
itdev->udma[unit] = UDMA_OFF;
/* UDMA bits off - Revision 0x10 do them in pairs */
pci_read_config_byte(pdev, 0x50, &conf);
if (itdev->timing10)
conf |= channel ? 0x60: 0x18;
else
conf |= 1 << (3 + 2 * channel + unit);
pci_write_config_byte(pdev, 0x50, conf);
it821x_clock_strategy(ap, adev);
}
}
/**
* it821x_passthru_dma_start - DMA start callback
* @qc: Command in progress
*
* Usually drivers set the DMA timing at the point the set_dmamode call
* is made. IT821x however requires we load new timings on the
* transitions in some cases.
*/
static void it821x_passthru_bmdma_start(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct ata_device *adev = qc->dev;
struct it821x_dev *itdev = ap->private_data;
int unit = adev->devno;
if (itdev->mwdma[unit] != MWDMA_OFF)
it821x_program(ap, adev, itdev->mwdma[unit]);
else if (itdev->udma[unit] != UDMA_OFF && itdev->timing10)
it821x_program_udma(ap, adev, itdev->udma[unit]);
ata_bmdma_start(qc);
}
/**
* it821x_passthru_dma_stop - DMA stop callback
* @qc: ATA command
*
* We loaded new timings in dma_start, as a result we need to restore
* the PIO timings in dma_stop so that the next command issue gets the
* right clock values.
*/
static void it821x_passthru_bmdma_stop(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct ata_device *adev = qc->dev;
struct it821x_dev *itdev = ap->private_data;
int unit = adev->devno;
ata_bmdma_stop(qc);
if (itdev->mwdma[unit] != MWDMA_OFF)
it821x_program(ap, adev, itdev->pio[unit]);
}
/**
* it821x_passthru_dev_select - Select master/slave
* @ap: ATA port
* @device: Device number (not pointer)
*
* Device selection hook. If neccessary perform clock switching
*/
static void it821x_passthru_dev_select(struct ata_port *ap,
unsigned int device)
{
struct it821x_dev *itdev = ap->private_data;
if (itdev && device != itdev->last_device) {
struct ata_device *adev = &ap->device[device];
it821x_program(ap, adev, itdev->pio[adev->devno]);
itdev->last_device = device;
}
ata_std_dev_select(ap, device);
}
/**
* it821x_smart_qc_issue_prot - wrap qc issue prot
* @qc: command
*
* Wrap the command issue sequence for the IT821x. We need to
* perform out own device selection timing loads before the
* usual happenings kick off
*/
static unsigned int it821x_smart_qc_issue_prot(struct ata_queued_cmd *qc)
{
switch(qc->tf.command)
{
/* Commands the firmware supports */
case ATA_CMD_READ:
case ATA_CMD_READ_EXT:
case ATA_CMD_WRITE:
case ATA_CMD_WRITE_EXT:
case ATA_CMD_PIO_READ:
case ATA_CMD_PIO_READ_EXT:
case ATA_CMD_PIO_WRITE:
case ATA_CMD_PIO_WRITE_EXT:
case ATA_CMD_READ_MULTI:
case ATA_CMD_READ_MULTI_EXT:
case ATA_CMD_WRITE_MULTI:
case ATA_CMD_WRITE_MULTI_EXT:
case ATA_CMD_ID_ATA:
/* Arguably should just no-op this one */
case ATA_CMD_SET_FEATURES:
return ata_qc_issue_prot(qc);
}
printk(KERN_DEBUG "it821x: can't process command 0x%02X\n", qc->tf.command);
return AC_ERR_INVALID;
}
/**
* it821x_passthru_qc_issue_prot - wrap qc issue prot
* @qc: command
*
* Wrap the command issue sequence for the IT821x. We need to
* perform out own device selection timing loads before the
* usual happenings kick off
*/
static unsigned int it821x_passthru_qc_issue_prot(struct ata_queued_cmd *qc)
{
it821x_passthru_dev_select(qc->ap, qc->dev->devno);
return ata_qc_issue_prot(qc);
}
/**
* it821x_smart_set_mode - mode setting
* @ap: interface to set up
*
* Use a non standard set_mode function. We don't want to be tuned.
* The BIOS configured everything. Our job is not to fiddle. We
* read the dma enabled bits from the PCI configuration of the device
* and respect them.
*/
static void it821x_smart_set_mode(struct ata_port *ap)
{
int dma_enabled = 0;
int i;
/* Bits 5 and 6 indicate if DMA is active on master/slave */
/* It is possible that BMDMA isn't allocated */
if (ap->ioaddr.bmdma_addr)
dma_enabled = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
for (i = 0; i < ATA_MAX_DEVICES; i++) {
struct ata_device *dev = &ap->device[i];
if (ata_dev_enabled(dev)) {
/* We don't really care */
dev->pio_mode = XFER_PIO_0;
dev->dma_mode = XFER_MW_DMA_0;
/* We do need the right mode information for DMA or PIO
and this comes from the current configuration flags */
if (dma_enabled & (1 << (5 + i))) {
dev->xfer_mode = XFER_MW_DMA_0;
dev->xfer_shift = ATA_SHIFT_MWDMA;
dev->flags &= ~ATA_DFLAG_PIO;
} else {
dev->xfer_mode = XFER_PIO_0;
dev->xfer_shift = ATA_SHIFT_PIO;
dev->flags |= ATA_DFLAG_PIO;
}
}
}
}
/**
* it821x_dev_config - Called each device identify
* @ap: ATA port
* @adev: Device that has just been identified
*
* Perform the initial setup needed for each device that is chip
* special. In our case we need to lock the sector count to avoid
* blowing the brains out of the firmware with large LBA48 requests
*
* FIXME: When FUA appears we need to block FUA too. And SMART and
* basically we need to filter commands for this chip.
*/
static void it821x_dev_config(struct ata_port *ap, struct ata_device *adev)
{
unsigned char model_num[40];
char *s;
unsigned int len;
/* This block ought to be a library routine as it is in several
drivers now */
ata_id_string(adev->id, model_num, ATA_ID_PROD_OFS,
sizeof(model_num));
s = &model_num[0];
len = strnlen(s, sizeof(model_num));
/* ATAPI specifies that empty space is blank-filled; remove blanks */
while ((len > 0) && (s[len - 1] == ' ')) {
len--;
s[len] = 0;
}
if (adev->max_sectors > 255)
adev->max_sectors = 255;
if (strstr(model_num, "Integrated Technology Express")) {
/* RAID mode */
printk(KERN_INFO "IT821x %sRAID%d volume",
adev->id[147]?"Bootable ":"",
adev->id[129]);
if (adev->id[129] != 1)
printk("(%dK stripe)", adev->id[146]);
printk(".\n");
}
}
/**
* it821x_check_atapi_dma - ATAPI DMA handler
* @qc: Command we are about to issue
*
* Decide if this ATAPI command can be issued by DMA on this
* controller. Return 0 if it can be.
*/
static int it821x_check_atapi_dma(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct it821x_dev *itdev = ap->private_data;
/* No ATAPI DMA in smart mode */
if (itdev->smart)
return -EOPNOTSUPP;
/* No ATAPI DMA on rev 10 */
if (itdev->timing10)
return -EOPNOTSUPP;
/* Cool */
return 0;
}
/**
* it821x_port_start - port setup
* @ap: ATA port being set up
*
* The it821x needs to maintain private data structures and also to
* use the standard PCI interface which lacks support for this
* functionality. We instead set up the private data on the port
* start hook, and tear it down on port stop
*/
static int it821x_port_start(struct ata_port *ap)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
struct it821x_dev *itdev;
u8 conf;
int ret = ata_port_start(ap);
if (ret < 0)
return ret;
ap->private_data = kmalloc(sizeof(struct it821x_dev), GFP_KERNEL);
if (ap->private_data == NULL) {
ata_port_stop(ap);
return -ENOMEM;
}
itdev = ap->private_data;
memset(itdev, 0, sizeof(struct it821x_dev));
pci_read_config_byte(pdev, 0x50, &conf);
if (conf & 1) {
itdev->smart = 1;
/* Long I/O's although allowed in LBA48 space cause the
onboard firmware to enter the twighlight zone */
/* No ATAPI DMA in this mode either */
}
/* Pull the current clocks from 0x50 */
if (conf & (1 << (1 + ap->port_no)))
itdev->clock_mode = ATA_50;
else
itdev->clock_mode = ATA_66;
itdev->want[0][1] = ATA_ANY;
itdev->want[1][1] = ATA_ANY;
itdev->last_device = -1;
pci_read_config_byte(pdev, PCI_REVISION_ID, &conf);
if (conf == 0x10) {
itdev->timing10 = 1;
/* Need to disable ATAPI DMA for this case */
if (!itdev->smart)
printk(KERN_WARNING DRV_NAME": Revision 0x10, workarounds activated.\n");
}
return 0;
}
/**
* it821x_port_stop - port shutdown
* @ap: ATA port being removed
*
* Release the private objects we added in it821x_port_start
*/
static void it821x_port_stop(struct ata_port *ap) {
kfree(ap->private_data);
ap->private_data = NULL; /* We want an OOPS if we reuse this
too late! */
ata_port_stop(ap);
}
static struct scsi_host_template it821x_sht = {
.module = THIS_MODULE,
.name = DRV_NAME,
.ioctl = ata_scsi_ioctl,
.queuecommand = ata_scsi_queuecmd,
.can_queue = ATA_DEF_QUEUE,
.this_id = ATA_SHT_THIS_ID,
.sg_tablesize = LIBATA_MAX_PRD,
/* 255 sectors to begin with. This is locked in smart mode but not
in pass through */
.max_sectors = 255,
.cmd_per_lun = ATA_SHT_CMD_PER_LUN,
.emulated = ATA_SHT_EMULATED,
.use_clustering = ATA_SHT_USE_CLUSTERING,
.proc_name = DRV_NAME,
.dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config,
.bios_param = ata_std_bios_param,
};
static struct ata_port_operations it821x_smart_port_ops = {
.set_mode = it821x_smart_set_mode,
.port_disable = ata_port_disable,
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
.mode_filter = ata_pci_default_filter,
.check_status = ata_check_status,
.check_atapi_dma= it821x_check_atapi_dma,
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
.dev_config = it821x_dev_config,
.freeze = ata_bmdma_freeze,
.thaw = ata_bmdma_thaw,
.error_handler = it821x_error_handler,
.post_internal_cmd = ata_bmdma_post_internal_cmd,
.bmdma_setup = ata_bmdma_setup,
.bmdma_start = ata_bmdma_start,
.bmdma_stop = ata_bmdma_stop,
.bmdma_status = ata_bmdma_status,
.qc_prep = ata_qc_prep,
.qc_issue = it821x_smart_qc_issue_prot,
.eng_timeout = ata_eng_timeout,
.data_xfer = ata_pio_data_xfer,
.irq_handler = ata_interrupt,
.irq_clear = ata_bmdma_irq_clear,
.port_start = it821x_port_start,
.port_stop = it821x_port_stop,
.host_stop = ata_host_stop
};
static struct ata_port_operations it821x_passthru_port_ops = {
.port_disable = ata_port_disable,
.set_piomode = it821x_passthru_set_piomode,
.set_dmamode = it821x_passthru_set_dmamode,
.mode_filter = ata_pci_default_filter,
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
.check_status = ata_check_status,
.exec_command = ata_exec_command,
.check_atapi_dma= it821x_check_atapi_dma,
.dev_select = it821x_passthru_dev_select,
.freeze = ata_bmdma_freeze,
.thaw = ata_bmdma_thaw,
.error_handler = it821x_error_handler,
.post_internal_cmd = ata_bmdma_post_internal_cmd,
.bmdma_setup = ata_bmdma_setup,
.bmdma_start = it821x_passthru_bmdma_start,
.bmdma_stop = it821x_passthru_bmdma_stop,
.bmdma_status = ata_bmdma_status,
.qc_prep = ata_qc_prep,
.qc_issue = it821x_passthru_qc_issue_prot,
.eng_timeout = ata_eng_timeout,
.data_xfer = ata_pio_data_xfer,
.irq_clear = ata_bmdma_irq_clear,
.irq_handler = ata_interrupt,
.port_start = it821x_port_start,
.port_stop = it821x_port_stop,
.host_stop = ata_host_stop
};
static void __devinit it821x_disable_raid(struct pci_dev *pdev)
{
/* Reset local CPU, and set BIOS not ready */
pci_write_config_byte(pdev, 0x5E, 0x01);
/* Set to bypass mode, and reset PCI bus */
pci_write_config_byte(pdev, 0x50, 0x00);
pci_write_config_word(pdev, PCI_COMMAND,
PCI_COMMAND_PARITY | PCI_COMMAND_IO |
PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
pci_write_config_word(pdev, 0x40, 0xA0F3);
pci_write_config_dword(pdev,0x4C, 0x02040204);
pci_write_config_byte(pdev, 0x42, 0x36);
pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x20);
}
static int it821x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
u8 conf;
static struct ata_port_info info_smart = {
.sht = &it821x_sht,
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
.pio_mask = 0x1f,
.mwdma_mask = 0x07,
.port_ops = &it821x_smart_port_ops
};
static struct ata_port_info info_passthru = {
.sht = &it821x_sht,
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
.pio_mask = 0x1f,
.mwdma_mask = 0x07,
.udma_mask = 0x7f,
.port_ops = &it821x_passthru_port_ops
};
static struct ata_port_info *port_info[2];
static char *mode[2] = { "pass through", "smart" };
/* Force the card into bypass mode if so requested */
if (it8212_noraid) {
printk(KERN_INFO DRV_NAME ": forcing bypass mode.\n");
it821x_disable_raid(pdev);
}
pci_read_config_byte(pdev, 0x50, &conf);
conf &= 1;
printk(KERN_INFO DRV_NAME ": controller in %s mode.\n", mode[conf]);
if (conf == 0)
port_info[0] = port_info[1] = &info_passthru;
else
port_info[0] = port_info[1] = &info_smart;
return ata_pci_init_one(pdev, port_info, 2);
}
static struct pci_device_id it821x[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_ITE, PCI_DEVICE_ID_ITE_8211), },
{ PCI_DEVICE(PCI_VENDOR_ID_ITE, PCI_DEVICE_ID_ITE_8212), },
{ 0, },
};
static struct pci_driver it821x_pci_driver = {
.name = DRV_NAME,
.id_table = it821x,
.probe = it821x_init_one,
.remove = ata_pci_remove_one
};
static int __init it821x_init(void)
{
return pci_register_driver(&it821x_pci_driver);
}
static void __exit it821x_exit(void)
{
pci_unregister_driver(&it821x_pci_driver);
}
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for the IT8211/IT8212 IDE RAID controller");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, it821x);
MODULE_VERSION(DRV_VERSION);
module_param_named(noraid, it8212_noraid, int, S_IRUGO);
MODULE_PARM_DESC(it8212_noraid, "Force card into bypass mode");
module_init(it821x_init);
module_exit(it821x_exit);

266
drivers/ata/pata_jmicron.c Normal file
View File

@ -0,0 +1,266 @@
/*
* pata_jmicron.c - JMicron ATA driver for non AHCI mode. This drives the
* PATA port of the controller. The SATA ports are
* driven by AHCI in the usual configuration although
* this driver can handle other setups if we need it.
*
* (c) 2006 Red Hat <alan@redhat.com>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#include <linux/ata.h>
#define DRV_NAME "pata_jmicron"
#define DRV_VERSION "0.1.2"
typedef enum {
PORT_PATA0 = 0,
PORT_PATA1 = 1,
PORT_SATA = 2,
} port_type;
/**
* jmicron_pre_reset - check for 40/80 pin
* @ap: Port
*
* Perform the PATA port setup we need.
* On the Jmicron 361/363 there is a single PATA port that can be mapped
* either as primary or secondary (or neither). We don't do any policy
* and setup here. We assume that has been done by init_one and the
* BIOS.
*/
static int jmicron_pre_reset(struct ata_port *ap)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u32 control;
u32 control5;
int port_mask = 1<< (4 * ap->port_no);
int port = ap->port_no;
port_type port_map[2];
/* Check if our port is enabled */
pci_read_config_dword(pdev, 0x40, &control);
if ((control & port_mask) == 0)
return 0;
/* There are two basic mappings. One has the two SATA ports merged
as master/slave and the secondary as PATA, the other has only the
SATA port mapped */
if (control & (1 << 23)) {
port_map[0] = PORT_SATA;
port_map[1] = PORT_PATA0;
} else {
port_map[0] = PORT_SATA;
port_map[1] = PORT_SATA;
}
/* The 365/366 may have this bit set to map the second PATA port
as the internal primary channel */
pci_read_config_dword(pdev, 0x80, &control5);
if (control5 & (1<<24))
port_map[0] = PORT_PATA1;
/* The two ports may then be logically swapped by the firmware */
if (control & (1 << 22))
port = port ^ 1;
/*
* Now we know which physical port we are talking about we can
* actually do our cable checking etc. Thankfully we don't need
* to do the plumbing for other cases.
*/
switch (port_map[port])
{
case PORT_PATA0:
if (control & (1 << 5))
return 0;
if (control & (1 << 3)) /* 40/80 pin primary */
ap->cbl = ATA_CBL_PATA40;
else
ap->cbl = ATA_CBL_PATA80;
break;
case PORT_PATA1:
/* Bit 21 is set if the port is enabled */
if ((control5 & (1 << 21)) == 0)
return 0;
if (control5 & (1 << 19)) /* 40/80 pin secondary */
ap->cbl = ATA_CBL_PATA40;
else
ap->cbl = ATA_CBL_PATA80;
break;
case PORT_SATA:
ap->cbl = ATA_CBL_SATA;
break;
}
return ata_std_prereset(ap);
}
/**
* jmicron_error_handler - Setup and error handler
* @ap: Port to handle
*
* LOCKING:
* None (inherited from caller).
*/
static void jmicron_error_handler(struct ata_port *ap)
{
return ata_bmdma_drive_eh(ap, jmicron_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
}
/* No PIO or DMA methods needed for this device */
static struct scsi_host_template jmicron_sht = {
.module = THIS_MODULE,
.name = DRV_NAME,
.ioctl = ata_scsi_ioctl,
.queuecommand = ata_scsi_queuecmd,
.can_queue = ATA_DEF_QUEUE,
.this_id = ATA_SHT_THIS_ID,
.sg_tablesize = LIBATA_MAX_PRD,
/* Special handling needed if you have sector or LBA48 limits */
.max_sectors = ATA_MAX_SECTORS,
.cmd_per_lun = ATA_SHT_CMD_PER_LUN,
.emulated = ATA_SHT_EMULATED,
.use_clustering = ATA_SHT_USE_CLUSTERING,
.proc_name = DRV_NAME,
.dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config,
/* Use standard CHS mapping rules */
.bios_param = ata_std_bios_param,
};
static const struct ata_port_operations jmicron_ops = {
.port_disable = ata_port_disable,
/* Task file is PCI ATA format, use helpers */
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
.check_status = ata_check_status,
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
.freeze = ata_bmdma_freeze,
.thaw = ata_bmdma_thaw,
.error_handler = jmicron_error_handler,
.post_internal_cmd = ata_bmdma_post_internal_cmd,
/* BMDMA handling is PCI ATA format, use helpers */
.bmdma_setup = ata_bmdma_setup,
.bmdma_start = ata_bmdma_start,
.bmdma_stop = ata_bmdma_stop,
.bmdma_status = ata_bmdma_status,
.qc_prep = ata_qc_prep,
.qc_issue = ata_qc_issue_prot,
.data_xfer = ata_pio_data_xfer,
/* Timeout handling. Special recovery hooks here */
.eng_timeout = ata_eng_timeout,
.irq_handler = ata_interrupt,
.irq_clear = ata_bmdma_irq_clear,
/* Generic PATA PCI ATA helpers */
.port_start = ata_port_start,
.port_stop = ata_port_stop,
.host_stop = ata_host_stop,
};
/**
* jmicron_init_one - Register Jmicron ATA PCI device with kernel services
* @pdev: PCI device to register
* @ent: Entry in jmicron_pci_tbl matching with @pdev
*
* Called from kernel PCI layer.
*
* LOCKING:
* Inherited from PCI layer (may sleep).
*
* RETURNS:
* Zero on success, or -ERRNO value.
*/
static int jmicron_init_one (struct pci_dev *pdev, const struct pci_device_id *id)
{
static struct ata_port_info info = {
.sht = &jmicron_sht,
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
.pio_mask = 0x1f,
.mwdma_mask = 0x07,
.udma_mask = 0x3f,
.port_ops = &jmicron_ops,
};
struct ata_port_info *port_info[2] = { &info, &info };
u32 reg;
if (id->driver_data != 368) {
/* Put the controller into AHCI mode in case the AHCI driver
has not yet been loaded. This can be done with either
function present */
/* FIXME: We may want a way to override this in future */
pci_write_config_byte(pdev, 0x41, 0xa1);
}
/* PATA controller is fn 1, AHCI is fn 0 */
if (PCI_FUNC(pdev->devfn) != 1)
return -ENODEV;
if ( id->driver_data == 365 || id->driver_data == 366) {
/* The 365/66 have two PATA channels, redirect the second */
pci_read_config_dword(pdev, 0x80, &reg);
reg |= (1 << 24); /* IDE1 to PATA IDE secondary */
pci_write_config_dword(pdev, 0x80, reg);
}
return ata_pci_init_one(pdev, port_info, 2);
}
static const struct pci_device_id jmicron_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361), 361},
{ PCI_DEVICE(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363), 363},
{ PCI_DEVICE(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365), 365},
{ PCI_DEVICE(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366), 366},
{ PCI_DEVICE(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368), 368},
{ } /* terminate list */
};
static struct pci_driver jmicron_pci_driver = {
.name = DRV_NAME,
.id_table = jmicron_pci_tbl,
.probe = jmicron_init_one,
.remove = ata_pci_remove_one,
};
static int __init jmicron_init(void)
{
return pci_register_driver(&jmicron_pci_driver);
}
static void __exit jmicron_exit(void)
{
pci_unregister_driver(&jmicron_pci_driver);
}
module_init(jmicron_init);
module_exit(jmicron_exit);
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("SCSI low-level driver for Jmicron PATA ports");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, jmicron_pci_tbl);
MODULE_VERSION(DRV_VERSION);

949
drivers/ata/pata_legacy.c Normal file
View File

@ -0,0 +1,949 @@
/*
* pata-legacy.c - Legacy port PATA/SATA controller driver.
* Copyright 2005/2006 Red Hat <alan@redhat.com>, all rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*
* An ATA driver for the legacy ATA ports.
*
* Data Sources:
* Opti 82C465/82C611 support: Data sheets at opti-inc.com
* HT6560 series:
* Promise 20230/20620:
* http://www.ryston.cz/petr/vlb/pdc20230b.html
* http://www.ryston.cz/petr/vlb/pdc20230c.html
* http://www.ryston.cz/petr/vlb/pdc20630.html
*
* Unsupported but docs exist:
* Appian/Adaptec AIC25VL01/Cirrus Logic PD7220
* Winbond W83759A
*
* This driver handles legacy (that is "ISA/VLB side") IDE ports found
* on PC class systems. There are three hybrid devices that are exceptions
* The Cyrix 5510/5520 where a pre SFF ATA device is on the bridge and
* the MPIIX where the tuning is PCI side but the IDE is "ISA side".
*
* Specific support is included for the ht6560a/ht6560b/opti82c611a/
* opti82c465mv/promise 20230c/20630
*
* Use the autospeed and pio_mask options with:
* Appian ADI/2 aka CLPD7220 or AIC25VL01.
* Use the jumpers, autospeed and set pio_mask to the mode on the jumpers with
* Goldstar GM82C711, PIC-1288A-125, UMC 82C871F, Winbond W83759,
* Winbond W83759A, Promise PDC20230-B
*
* For now use autospeed and pio_mask as above with the W83759A. This may
* change.
*
* TODO
* Merge existing pata_qdi driver
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
#include <linux/ata.h>
#include <linux/libata.h>
#include <linux/platform_device.h>
#define DRV_NAME "pata_legacy"
#define DRV_VERSION "0.5.3"
#define NR_HOST 6
static int legacy_port[NR_HOST] = { 0x1f0, 0x170, 0x1e8, 0x168, 0x1e0, 0x160 };
static int legacy_irq[NR_HOST] = { 15, 14, 11, 10, 8, 12 };
struct legacy_data {
unsigned long timing;
u8 clock[2];
u8 last;
int fast;
struct platform_device *platform_dev;
};
static struct legacy_data legacy_data[NR_HOST];
static struct ata_host *legacy_host[NR_HOST];
static int nr_legacy_host;
static int probe_all; /* Set to check all ISA port ranges */
static int ht6560a; /* HT 6560A on primary 1, secondary 2, both 3 */
static int ht6560b; /* HT 6560A on primary 1, secondary 2, both 3 */
static int opti82c611a; /* Opti82c611A on primary 1, secondary 2, both 3 */
static int opti82c46x; /* Opti 82c465MV present (pri/sec autodetect) */
static int autospeed; /* Chip present which snoops speed changes */
static int pio_mask = 0x1F; /* PIO range for autospeed devices */
/**
* legacy_set_mode - mode setting
* @ap: IDE interface
*
* Use a non standard set_mode function. We don't want to be tuned.
*
* The BIOS configured everything. Our job is not to fiddle. Just use
* whatever PIO the hardware is using and leave it at that. When we
* get some kind of nice user driven API for control then we can
* expand on this as per hdparm in the base kernel.
*/
static void legacy_set_mode(struct ata_port *ap)
{
int i;
for (i = 0; i < ATA_MAX_DEVICES; i++) {
struct ata_device *dev = &ap->device[i];
if (ata_dev_enabled(dev)) {
dev->pio_mode = XFER_PIO_0;
dev->xfer_mode = XFER_PIO_0;
dev->xfer_shift = ATA_SHIFT_PIO;
dev->flags |= ATA_DFLAG_PIO;
}
}
}
static struct scsi_host_template legacy_sht = {
.module = THIS_MODULE,
.name = DRV_NAME,
.ioctl = ata_scsi_ioctl,
.queuecommand = ata_scsi_queuecmd,
.can_queue = ATA_DEF_QUEUE,
.this_id = ATA_SHT_THIS_ID,
.sg_tablesize = LIBATA_MAX_PRD,
.max_sectors = ATA_MAX_SECTORS,
.cmd_per_lun = ATA_SHT_CMD_PER_LUN,
.emulated = ATA_SHT_EMULATED,
.use_clustering = ATA_SHT_USE_CLUSTERING,
.proc_name = DRV_NAME,
.dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config,
.bios_param = ata_std_bios_param,
};
/*
* These ops are used if the user indicates the hardware
* snoops the commands to decide on the mode and handles the
* mode selection "magically" itself. Several legacy controllers
* do this. The mode range can be set if it is not 0x1F by setting
* pio_mask as well.
*/
static struct ata_port_operations simple_port_ops = {
.port_disable = ata_port_disable,
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
.check_status = ata_check_status,
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
.freeze = ata_bmdma_freeze,
.thaw = ata_bmdma_thaw,
.error_handler = ata_bmdma_error_handler,
.post_internal_cmd = ata_bmdma_post_internal_cmd,
.qc_prep = ata_qc_prep,
.qc_issue = ata_qc_issue_prot,
.eng_timeout = ata_eng_timeout,
.data_xfer = ata_pio_data_xfer_noirq,
.irq_handler = ata_interrupt,
.irq_clear = ata_bmdma_irq_clear,
.port_start = ata_port_start,
.port_stop = ata_port_stop,
.host_stop = ata_host_stop
};
static struct ata_port_operations legacy_port_ops = {
.set_mode = legacy_set_mode,
.port_disable = ata_port_disable,
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
.check_status = ata_check_status,
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
.error_handler = ata_bmdma_error_handler,
.qc_prep = ata_qc_prep,
.qc_issue = ata_qc_issue_prot,
.eng_timeout = ata_eng_timeout,
.data_xfer = ata_pio_data_xfer_noirq,
.irq_handler = ata_interrupt,
.irq_clear = ata_bmdma_irq_clear,
.port_start = ata_port_start,
.port_stop = ata_port_stop,
.host_stop = ata_host_stop
};
/*
* Promise 20230C and 20620 support
*
* This controller supports PIO0 to PIO2. We set PIO timings conservatively to
* allow for 50MHz Vesa Local Bus. The 20620 DMA support is weird being DMA to
* controller and PIO'd to the host and not supported.
*/
static void pdc20230_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
int tries = 5;
int pio = adev->pio_mode - XFER_PIO_0;
u8 rt;
unsigned long flags;
/* Safe as UP only. Force I/Os to occur together */
local_irq_save(flags);
/* Unlock the control interface */
do
{
inb(0x1F5);
outb(inb(0x1F2) | 0x80, 0x1F2);
inb(0x1F2);
inb(0x3F6);
inb(0x3F6);
inb(0x1F2);
inb(0x1F2);
}
while((inb(0x1F2) & 0x80) && --tries);
local_irq_restore(flags);
outb(inb(0x1F4) & 0x07, 0x1F4);
rt = inb(0x1F3);
rt &= 0x07 << (3 * adev->devno);
if (pio)
rt |= (1 + 3 * pio) << (3 * adev->devno);
udelay(100);
outb(inb(0x1F2) | 0x01, 0x1F2);
udelay(100);
inb(0x1F5);
}
static void pdc_data_xfer_vlb(struct ata_device *adev, unsigned char *buf, unsigned int buflen, int write_data)
{
struct ata_port *ap = adev->ap;
int slop = buflen & 3;
unsigned long flags;
if (ata_id_has_dword_io(adev->id)) {
local_irq_save(flags);
/* Perform the 32bit I/O synchronization sequence */
inb(ap->ioaddr.nsect_addr);
inb(ap->ioaddr.nsect_addr);
inb(ap->ioaddr.nsect_addr);
/* Now the data */
if (write_data)
outsl(ap->ioaddr.data_addr, buf, buflen >> 2);
else
insl(ap->ioaddr.data_addr, buf, buflen >> 2);
if (unlikely(slop)) {
u32 pad;
if (write_data) {
memcpy(&pad, buf + buflen - slop, slop);
outl(le32_to_cpu(pad), ap->ioaddr.data_addr);
} else {
pad = cpu_to_le16(inl(ap->ioaddr.data_addr));
memcpy(buf + buflen - slop, &pad, slop);
}
}
local_irq_restore(flags);
}
else
ata_pio_data_xfer_noirq(adev, buf, buflen, write_data);
}
static struct ata_port_operations pdc20230_port_ops = {
.set_piomode = pdc20230_set_piomode,
.port_disable = ata_port_disable,
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
.check_status = ata_check_status,
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
.error_handler = ata_bmdma_error_handler,
.qc_prep = ata_qc_prep,
.qc_issue = ata_qc_issue_prot,
.eng_timeout = ata_eng_timeout,
.data_xfer = pdc_data_xfer_vlb,
.irq_handler = ata_interrupt,
.irq_clear = ata_bmdma_irq_clear,
.port_start = ata_port_start,
.port_stop = ata_port_stop,
.host_stop = ata_host_stop
};
/*
* Holtek 6560A support
*
* This controller supports PIO0 to PIO2 (no IORDY even though higher timings
* can be loaded).
*/
static void ht6560a_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
u8 active, recover;
struct ata_timing t;
/* Get the timing data in cycles. For now play safe at 50Mhz */
ata_timing_compute(adev, adev->pio_mode, &t, 20000, 1000);
active = FIT(t.active, 2, 15);
recover = FIT(t.recover, 4, 15);
inb(0x3E6);
inb(0x3E6);
inb(0x3E6);
inb(0x3E6);
outb(recover << 4 | active, ap->ioaddr.device_addr);
inb(ap->ioaddr.status_addr);
}
static struct ata_port_operations ht6560a_port_ops = {
.set_piomode = ht6560a_set_piomode,
.port_disable = ata_port_disable,
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
.check_status = ata_check_status,
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
.error_handler = ata_bmdma_error_handler,
.qc_prep = ata_qc_prep,
.qc_issue = ata_qc_issue_prot,
.eng_timeout = ata_eng_timeout,
.data_xfer = ata_pio_data_xfer, /* Check vlb/noirq */
.irq_handler = ata_interrupt,
.irq_clear = ata_bmdma_irq_clear,
.port_start = ata_port_start,
.port_stop = ata_port_stop,
.host_stop = ata_host_stop
};
/*
* Holtek 6560B support
*
* This controller supports PIO0 to PIO4. We honour the BIOS/jumper FIFO setting
* unless we see an ATAPI device in which case we force it off.
*
* FIXME: need to implement 2nd channel support.
*/
static void ht6560b_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
u8 active, recover;
struct ata_timing t;
/* Get the timing data in cycles. For now play safe at 50Mhz */
ata_timing_compute(adev, adev->pio_mode, &t, 20000, 1000);
active = FIT(t.active, 2, 15);
recover = FIT(t.recover, 2, 16);
recover &= 0x15;
inb(0x3E6);
inb(0x3E6);
inb(0x3E6);
inb(0x3E6);
outb(recover << 4 | active, ap->ioaddr.device_addr);
if (adev->class != ATA_DEV_ATA) {
u8 rconf = inb(0x3E6);
if (rconf & 0x24) {
rconf &= ~ 0x24;
outb(rconf, 0x3E6);
}
}
inb(ap->ioaddr.status_addr);
}
static struct ata_port_operations ht6560b_port_ops = {
.set_piomode = ht6560b_set_piomode,
.port_disable = ata_port_disable,
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
.check_status = ata_check_status,
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
.error_handler = ata_bmdma_error_handler,
.qc_prep = ata_qc_prep,
.qc_issue = ata_qc_issue_prot,
.eng_timeout = ata_eng_timeout,
.data_xfer = ata_pio_data_xfer, /* FIXME: Check 32bit and noirq */
.irq_handler = ata_interrupt,
.irq_clear = ata_bmdma_irq_clear,
.port_start = ata_port_start,
.port_stop = ata_port_stop,
.host_stop = ata_host_stop
};
/*
* Opti core chipset helpers
*/
/**
* opti_syscfg - read OPTI chipset configuration
* @reg: Configuration register to read
*
* Returns the value of an OPTI system board configuration register.
*/
static u8 opti_syscfg(u8 reg)
{
unsigned long flags;
u8 r;
/* Uniprocessor chipset and must force cycles adjancent */
local_irq_save(flags);
outb(reg, 0x22);
r = inb(0x24);
local_irq_restore(flags);
return r;
}
/*
* Opti 82C611A
*
* This controller supports PIO0 to PIO3.
*/
static void opti82c611a_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
u8 active, recover, setup;
struct ata_timing t;
struct ata_device *pair = ata_dev_pair(adev);
int clock;
int khz[4] = { 50000, 40000, 33000, 25000 };
u8 rc;
/* Enter configuration mode */
inw(ap->ioaddr.error_addr);
inw(ap->ioaddr.error_addr);
outb(3, ap->ioaddr.nsect_addr);
/* Read VLB clock strapping */
clock = 1000000000 / khz[inb(ap->ioaddr.lbah_addr) & 0x03];
/* Get the timing data in cycles */
ata_timing_compute(adev, adev->pio_mode, &t, clock, 1000);
/* Setup timing is shared */
if (pair) {
struct ata_timing tp;
ata_timing_compute(pair, pair->pio_mode, &tp, clock, 1000);
ata_timing_merge(&t, &tp, &t, ATA_TIMING_SETUP);
}
active = FIT(t.active, 2, 17) - 2;
recover = FIT(t.recover, 1, 16) - 1;
setup = FIT(t.setup, 1, 4) - 1;
/* Select the right timing bank for write timing */
rc = inb(ap->ioaddr.lbal_addr);
rc &= 0x7F;
rc |= (adev->devno << 7);
outb(rc, ap->ioaddr.lbal_addr);
/* Write the timings */
outb(active << 4 | recover, ap->ioaddr.error_addr);
/* Select the right bank for read timings, also
load the shared timings for address */
rc = inb(ap->ioaddr.device_addr);
rc &= 0xC0;
rc |= adev->devno; /* Index select */
rc |= (setup << 4) | 0x04;
outb(rc, ap->ioaddr.device_addr);
/* Load the read timings */
outb(active << 4 | recover, ap->ioaddr.data_addr);
/* Ensure the timing register mode is right */
rc = inb (ap->ioaddr.lbal_addr);
rc &= 0x73;
rc |= 0x84;
outb(rc, ap->ioaddr.lbal_addr);
/* Exit command mode */
outb(0x83, ap->ioaddr.nsect_addr);
}
static struct ata_port_operations opti82c611a_port_ops = {
.set_piomode = opti82c611a_set_piomode,
.port_disable = ata_port_disable,
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
.check_status = ata_check_status,
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
.error_handler = ata_bmdma_error_handler,
.qc_prep = ata_qc_prep,
.qc_issue = ata_qc_issue_prot,
.eng_timeout = ata_eng_timeout,
.data_xfer = ata_pio_data_xfer,
.irq_handler = ata_interrupt,
.irq_clear = ata_bmdma_irq_clear,
.port_start = ata_port_start,
.port_stop = ata_port_stop,
.host_stop = ata_host_stop
};
/*
* Opti 82C465MV
*
* This controller supports PIO0 to PIO3. Unlike the 611A the MVB
* version is dual channel but doesn't have a lot of unique registers.
*/
static void opti82c46x_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
u8 active, recover, setup;
struct ata_timing t;
struct ata_device *pair = ata_dev_pair(adev);
int clock;
int khz[4] = { 50000, 40000, 33000, 25000 };
u8 rc;
u8 sysclk;
/* Get the clock */
sysclk = opti_syscfg(0xAC) & 0xC0; /* BIOS set */
/* Enter configuration mode */
inw(ap->ioaddr.error_addr);
inw(ap->ioaddr.error_addr);
outb(3, ap->ioaddr.nsect_addr);
/* Read VLB clock strapping */
clock = 1000000000 / khz[sysclk];
/* Get the timing data in cycles */
ata_timing_compute(adev, adev->pio_mode, &t, clock, 1000);
/* Setup timing is shared */
if (pair) {
struct ata_timing tp;
ata_timing_compute(pair, pair->pio_mode, &tp, clock, 1000);
ata_timing_merge(&t, &tp, &t, ATA_TIMING_SETUP);
}
active = FIT(t.active, 2, 17) - 2;
recover = FIT(t.recover, 1, 16) - 1;
setup = FIT(t.setup, 1, 4) - 1;
/* Select the right timing bank for write timing */
rc = inb(ap->ioaddr.lbal_addr);
rc &= 0x7F;
rc |= (adev->devno << 7);
outb(rc, ap->ioaddr.lbal_addr);
/* Write the timings */
outb(active << 4 | recover, ap->ioaddr.error_addr);
/* Select the right bank for read timings, also
load the shared timings for address */
rc = inb(ap->ioaddr.device_addr);
rc &= 0xC0;
rc |= adev->devno; /* Index select */
rc |= (setup << 4) | 0x04;
outb(rc, ap->ioaddr.device_addr);
/* Load the read timings */
outb(active << 4 | recover, ap->ioaddr.data_addr);
/* Ensure the timing register mode is right */
rc = inb (ap->ioaddr.lbal_addr);
rc &= 0x73;
rc |= 0x84;
outb(rc, ap->ioaddr.lbal_addr);
/* Exit command mode */
outb(0x83, ap->ioaddr.nsect_addr);
/* We need to know this for quad device on the MVB */
ap->host->private_data = ap;
}
/**
* opt82c465mv_qc_issue_prot - command issue
* @qc: command pending
*
* Called when the libata layer is about to issue a command. We wrap
* this interface so that we can load the correct ATA timings. The
* MVB has a single set of timing registers and these are shared
* across channels. As there are two registers we really ought to
* track the last two used values as a sort of register window. For
* now we just reload on a channel switch. On the single channel
* setup this condition never fires so we do nothing extra.
*
* FIXME: dual channel needs ->serialize support
*/
static unsigned int opti82c46x_qc_issue_prot(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct ata_device *adev = qc->dev;
/* If timings are set and for the wrong channel (2nd test is
due to a libata shortcoming and will eventually go I hope) */
if (ap->host->private_data != ap->host
&& ap->host->private_data != NULL)
opti82c46x_set_piomode(ap, adev);
return ata_qc_issue_prot(qc);
}
static struct ata_port_operations opti82c46x_port_ops = {
.set_piomode = opti82c46x_set_piomode,
.port_disable = ata_port_disable,
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
.check_status = ata_check_status,
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
.error_handler = ata_bmdma_error_handler,
.qc_prep = ata_qc_prep,
.qc_issue = opti82c46x_qc_issue_prot,
.eng_timeout = ata_eng_timeout,
.data_xfer = ata_pio_data_xfer,
.irq_handler = ata_interrupt,
.irq_clear = ata_bmdma_irq_clear,
.port_start = ata_port_start,
.port_stop = ata_port_stop,
.host_stop = ata_host_stop
};
/**
* legacy_init_one - attach a legacy interface
* @port: port number
* @io: I/O port start
* @ctrl: control port
* @irq: interrupt line
*
* Register an ISA bus IDE interface. Such interfaces are PIO and we
* assume do not support IRQ sharing.
*/
static __init int legacy_init_one(int port, unsigned long io, unsigned long ctrl, int irq)
{
struct legacy_data *ld = &legacy_data[nr_legacy_host];
struct ata_probe_ent ae;
struct platform_device *pdev;
int ret = -EBUSY;
struct ata_port_operations *ops = &legacy_port_ops;
int pio_modes = pio_mask;
u32 mask = (1 << port);
if (request_region(io, 8, "pata_legacy") == NULL)
return -EBUSY;
if (request_region(ctrl, 1, "pata_legacy") == NULL)
goto fail_io;
pdev = platform_device_register_simple(DRV_NAME, nr_legacy_host, NULL, 0);
if (pdev == NULL)
goto fail_dev;
if (ht6560a & mask) {
ops = &ht6560a_port_ops;
pio_modes = 0x07;
}
if (ht6560b & mask) {
ops = &ht6560b_port_ops;
pio_modes = 0x1F;
}
if (opti82c611a & mask) {
ops = &opti82c611a_port_ops;
pio_modes = 0x0F;
}
if (opti82c46x & mask) {
ops = &opti82c46x_port_ops;
pio_modes = 0x0F;
}
/* Probe for automatically detectable controllers */
if (io == 0x1F0 && ops == &legacy_port_ops) {
unsigned long flags;
local_irq_save(flags);
/* Probes */
inb(0x1F5);
outb(inb(0x1F2) | 0x80, 0x1F2);
inb(0x1F2);
inb(0x3F6);
inb(0x3F6);
inb(0x1F2);
inb(0x1F2);
if ((inb(0x1F2) & 0x80) == 0) {
/* PDC20230c or 20630 ? */
printk(KERN_INFO "PDC20230-C/20630 VLB ATA controller detected.\n");
pio_modes = 0x07;
ops = &pdc20230_port_ops;
udelay(100);
inb(0x1F5);
} else {
outb(0x55, 0x1F2);
inb(0x1F2);
inb(0x1F2);
if (inb(0x1F2) == 0x00) {
printk(KERN_INFO "PDC20230-B VLB ATA controller detected.\n");
}
}
local_irq_restore(flags);
}
/* Chip does mode setting by command snooping */
if (ops == &legacy_port_ops && (autospeed & mask))
ops = &simple_port_ops;
memset(&ae, 0, sizeof(struct ata_probe_ent));
INIT_LIST_HEAD(&ae.node);
ae.dev = &pdev->dev;
ae.port_ops = ops;
ae.sht = &legacy_sht;
ae.n_ports = 1;
ae.pio_mask = pio_modes;
ae.irq = irq;
ae.irq_flags = 0;
ae.port_flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST;
ae.port[0].cmd_addr = io;
ae.port[0].altstatus_addr = ctrl;
ae.port[0].ctl_addr = ctrl;
ata_std_ports(&ae.port[0]);
ae.private_data = ld;
ret = ata_device_add(&ae);
if (ret == 0) {
ret = -ENODEV;
goto fail;
}
legacy_host[nr_legacy_host++] = dev_get_drvdata(&pdev->dev);
ld->platform_dev = pdev;
return 0;
fail:
platform_device_unregister(pdev);
fail_dev:
release_region(ctrl, 1);
fail_io:
release_region(io, 8);
return ret;
}
/**
* legacy_check_special_cases - ATA special cases
* @p: PCI device to check
* @master: set this if we find an ATA master
* @master: set this if we find an ATA secondary
*
* A small number of vendors implemented early PCI ATA interfaces on bridge logic
* without the ATA interface being PCI visible. Where we have a matching PCI driver
* we must skip the relevant device here. If we don't know about it then the legacy
* driver is the right driver anyway.
*/
static void legacy_check_special_cases(struct pci_dev *p, int *primary, int *secondary)
{
/* Cyrix CS5510 pre SFF MWDMA ATA on the bridge */
if (p->vendor == 0x1078 && p->device == 0x0000) {
*primary = *secondary = 1;
return;
}
/* Cyrix CS5520 pre SFF MWDMA ATA on the bridge */
if (p->vendor == 0x1078 && p->device == 0x0002) {
*primary = *secondary = 1;
return;
}
/* Intel MPIIX - PIO ATA on non PCI side of bridge */
if (p->vendor == 0x8086 && p->device == 0x1234) {
u16 r;
pci_read_config_word(p, 0x6C, &r);
if (r & 0x8000) { /* ATA port enabled */
if (r & 0x4000)
*secondary = 1;
else
*primary = 1;
}
return;
}
}
/**
* legacy_init - attach legacy interfaces
*
* Attach legacy IDE interfaces by scanning the usual IRQ/port suspects.
* Right now we do not scan the ide0 and ide1 address but should do so
* for non PCI systems or systems with no PCI IDE legacy mode devices.
* If you fix that note there are special cases to consider like VLB
* drivers and CS5510/20.
*/
static __init int legacy_init(void)
{
int i;
int ct = 0;
int primary = 0;
int secondary = 0;
int last_port = NR_HOST;
struct pci_dev *p = NULL;
for_each_pci_dev(p) {
int r;
/* Check for any overlap of the system ATA mappings. Native mode controllers
stuck on these addresses or some devices in 'raid' mode won't be found by
the storage class test */
for (r = 0; r < 6; r++) {
if (pci_resource_start(p, r) == 0x1f0)
primary = 1;
if (pci_resource_start(p, r) == 0x170)
secondary = 1;
}
/* Check for special cases */
legacy_check_special_cases(p, &primary, &secondary);
/* If PCI bus is present then don't probe for tertiary legacy ports */
if (probe_all == 0)
last_port = 2;
}
/* If an OPTI 82C46X is present find out where the channels are */
if (opti82c46x) {
static const char *optis[4] = {
"3/463MV", "5MV",
"5MVA", "5MVB"
};
u8 chans = 1;
u8 ctrl = (opti_syscfg(0x30) & 0xC0) >> 6;
opti82c46x = 3; /* Assume master and slave first */
printk(KERN_INFO DRV_NAME ": Opti 82C46%s chipset support.\n", optis[ctrl]);
if (ctrl == 3)
chans = (opti_syscfg(0x3F) & 0x20) ? 2 : 1;
ctrl = opti_syscfg(0xAC);
/* Check enabled and this port is the 465MV port. On the
MVB we may have two channels */
if (ctrl & 8) {
if (ctrl & 4)
opti82c46x = 2; /* Slave */
else
opti82c46x = 1; /* Master */
if (chans == 2)
opti82c46x = 3; /* Master and Slave */
} /* Slave only */
else if (chans == 1)
opti82c46x = 1;
}
for (i = 0; i < last_port; i++) {
/* Skip primary if we have seen a PCI one */
if (i == 0 && primary == 1)
continue;
/* Skip secondary if we have seen a PCI one */
if (i == 1 && secondary == 1)
continue;
if (legacy_init_one(i, legacy_port[i],
legacy_port[i] + 0x0206,
legacy_irq[i]) == 0)
ct++;
}
if (ct != 0)
return 0;
return -ENODEV;
}
static __exit void legacy_exit(void)
{
int i;
for (i = 0; i < nr_legacy_host; i++) {
struct legacy_data *ld = &legacy_data[i];
struct ata_port *ap =legacy_host[i]->ports[0];
unsigned long io = ap->ioaddr.cmd_addr;
unsigned long ctrl = ap->ioaddr.ctl_addr;
ata_host_remove(legacy_host[i]);
platform_device_unregister(ld->platform_dev);
if (ld->timing)
release_region(ld->timing, 2);
release_region(io, 8);
release_region(ctrl, 1);
}
}
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for legacy ATA");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
module_param(probe_all, int, 0);
module_param(autospeed, int, 0);
module_param(ht6560a, int, 0);
module_param(ht6560b, int, 0);
module_param(opti82c611a, int, 0);
module_param(opti82c46x, int, 0);
module_param(pio_mask, int, 0);
module_init(legacy_init);
module_exit(legacy_exit);

313
drivers/ata/pata_mpiix.c Normal file
View File

@ -0,0 +1,313 @@
/*
* pata_mpiix.c - Intel MPIIX PATA for new ATA layer
* (C) 2005-2006 Red Hat Inc
* Alan Cox <alan@redhat.com>
*
* The MPIIX is different enough to the PIIX4 and friends that we give it
* a separate driver. The old ide/pci code handles this by just not tuning
* MPIIX at all.
*
* The MPIIX also differs in another important way from the majority of PIIX
* devices. The chip is a bridge (pardon the pun) between the old world of
* ISA IDE and PCI IDE. Although the ATA timings are PCI configured the actual
* IDE controller is not decoded in PCI space and the chip does not claim to
* be IDE class PCI. This requires slightly non-standard probe logic compared
* with PCI IDE and also that we do not disable the device when our driver is
* unloaded (as it has many other functions).
*
* The driver conciously keeps this logic internally to avoid pushing quirky
* PATA history into the clean libata layer.
*
* Thinkpad specific note: If you boot an MPIIX using thinkpad with a PCMCIA
* hard disk present this driver will not detect it. This is not a bug. In this
* configuration the secondary port of the MPIIX is disabled and the addresses
* are decoded by the PCMCIA bridge and therefore are for a generic IDE driver
* to operate.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#define DRV_NAME "pata_mpiix"
#define DRV_VERSION "0.7.1"
enum {
IDETIM = 0x6C, /* IDE control register */
IORDY = (1 << 1),
PPE = (1 << 2),
FTIM = (1 << 0),
ENABLED = (1 << 15),
SECONDARY = (1 << 14)
};
static int mpiix_pre_reset(struct ata_port *ap)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
static const struct pci_bits mpiix_enable_bits[] = {
{ 0x6D, 1, 0x80, 0x80 },
{ 0x6F, 1, 0x80, 0x80 }
};
if (!pci_test_config_bits(pdev, &mpiix_enable_bits[ap->port_no])) {
ata_port_disable(ap);
printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id);
return 0;
}
ap->cbl = ATA_CBL_PATA40;
return ata_std_prereset(ap);
}
/**
* mpiix_error_handler - probe reset
* @ap: ATA port
*
* Perform the ATA probe and bus reset sequence plus specific handling
* for this hardware. The MPIIX has the enable bits in a different place
* to PIIX4 and friends. As a pure PIO device it has no cable detect
*/
static void mpiix_error_handler(struct ata_port *ap)
{
ata_bmdma_drive_eh(ap, mpiix_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
}
/**
* mpiix_set_piomode - set initial PIO mode data
* @ap: ATA interface
* @adev: ATA device
*
* Called to do the PIO mode setup. The MPIIX allows us to program the
* IORDY sample point (2-5 clocks), recovery 1-4 clocks and whether
* prefetching or iordy are used.
*
* This would get very ugly because we can only program timing for one
* device at a time, the other gets PIO0. Fortunately libata calls
* our qc_issue_prot command before a command is issued so we can
* flip the timings back and forth to reduce the pain.
*/
static void mpiix_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
int control = 0;
int pio = adev->pio_mode - XFER_PIO_0;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u16 idetim;
static const /* ISP RTC */
u8 timings[][2] = { { 0, 0 },
{ 0, 0 },
{ 1, 0 },
{ 2, 1 },
{ 2, 3 }, };
pci_read_config_word(pdev, IDETIM, &idetim);
/* Mask the IORDY/TIME/PPE0 bank for this device */
if (adev->class == ATA_DEV_ATA)
control |= PPE; /* PPE enable for disk */
if (ata_pio_need_iordy(adev))
control |= IORDY; /* IORDY */
if (pio > 0)
control |= FTIM; /* This drive is on the fast timing bank */
/* Mask out timing and clear both TIME bank selects */
idetim &= 0xCCEE;
idetim &= ~(0x07 << (2 * adev->devno));
idetim |= (control << (2 * adev->devno));
idetim |= (timings[pio][0] << 12) | (timings[pio][1] << 8);
pci_write_config_word(pdev, IDETIM, idetim);
/* We use ap->private_data as a pointer to the device currently
loaded for timing */
ap->private_data = adev;
}
/**
* mpiix_qc_issue_prot - command issue
* @qc: command pending
*
* Called when the libata layer is about to issue a command. We wrap
* this interface so that we can load the correct ATA timings if
* neccessary. Our logic also clears TIME0/TIME1 for the other device so
* that, even if we get this wrong, cycles to the other device will
* be made PIO0.
*/
static unsigned int mpiix_qc_issue_prot(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct ata_device *adev = qc->dev;
/* If modes have been configured and the channel data is not loaded
then load it. We have to check if pio_mode is set as the core code
does not set adev->pio_mode to XFER_PIO_0 while probing as would be
logical */
if (adev->pio_mode && adev != ap->private_data)
mpiix_set_piomode(ap, adev);
return ata_qc_issue_prot(qc);
}
static struct scsi_host_template mpiix_sht = {
.module = THIS_MODULE,
.name = DRV_NAME,
.ioctl = ata_scsi_ioctl,
.queuecommand = ata_scsi_queuecmd,
.can_queue = ATA_DEF_QUEUE,
.this_id = ATA_SHT_THIS_ID,
.sg_tablesize = LIBATA_MAX_PRD,
.max_sectors = ATA_MAX_SECTORS,
.cmd_per_lun = ATA_SHT_CMD_PER_LUN,
.emulated = ATA_SHT_EMULATED,
.use_clustering = ATA_SHT_USE_CLUSTERING,
.proc_name = DRV_NAME,
.dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config,
.bios_param = ata_std_bios_param,
};
static struct ata_port_operations mpiix_port_ops = {
.port_disable = ata_port_disable,
.set_piomode = mpiix_set_piomode,
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
.check_status = ata_check_status,
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
.freeze = ata_bmdma_freeze,
.thaw = ata_bmdma_thaw,
.error_handler = mpiix_error_handler,
.post_internal_cmd = ata_bmdma_post_internal_cmd,
.qc_prep = ata_qc_prep,
.qc_issue = mpiix_qc_issue_prot,
.data_xfer = ata_pio_data_xfer,
.irq_handler = ata_interrupt,
.irq_clear = ata_bmdma_irq_clear,
.port_start = ata_port_start,
.port_stop = ata_port_stop,
.host_stop = ata_host_stop
};
static int mpiix_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
/* Single threaded by the PCI probe logic */
static struct ata_probe_ent probe[2];
static int printed_version;
u16 idetim;
int enabled;
if (!printed_version++)
dev_printk(KERN_DEBUG, &dev->dev, "version " DRV_VERSION "\n");
/* MPIIX has many functions which can be turned on or off according
to other devices present. Make sure IDE is enabled before we try
and use it */
pci_read_config_word(dev, IDETIM, &idetim);
if (!(idetim & ENABLED))
return -ENODEV;
/* We do our own plumbing to avoid leaking special cases for whacko
ancient hardware into the core code. There are two issues to
worry about. #1 The chip is a bridge so if in legacy mode and
without BARs set fools the setup. #2 If you pci_disable_device
the MPIIX your box goes castors up */
INIT_LIST_HEAD(&probe[0].node);
probe[0].dev = pci_dev_to_dev(dev);
probe[0].port_ops = &mpiix_port_ops;
probe[0].sht = &mpiix_sht;
probe[0].pio_mask = 0x1F;
probe[0].irq = 14;
probe[0].irq_flags = SA_SHIRQ;
probe[0].port_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST;
probe[0].n_ports = 1;
probe[0].port[0].cmd_addr = 0x1F0;
probe[0].port[0].ctl_addr = 0x3F6;
probe[0].port[0].altstatus_addr = 0x3F6;
/* The secondary lurks at different addresses but is otherwise
the same beastie */
INIT_LIST_HEAD(&probe[1].node);
probe[1] = probe[0];
probe[1].irq = 15;
probe[1].port[0].cmd_addr = 0x170;
probe[1].port[0].ctl_addr = 0x376;
probe[1].port[0].altstatus_addr = 0x376;
/* Let libata fill in the port details */
ata_std_ports(&probe[0].port[0]);
ata_std_ports(&probe[1].port[0]);
/* Now add the port that is active */
enabled = (idetim & SECONDARY) ? 1 : 0;
if (ata_device_add(&probe[enabled]))
return 0;
return -ENODEV;
}
/**
* mpiix_remove_one - device unload
* @pdev: PCI device being removed
*
* Handle an unplug/unload event for a PCI device. Unload the
* PCI driver but do not use the default handler as we *MUST NOT*
* disable the device as it has other functions.
*/
static void __devexit mpiix_remove_one(struct pci_dev *pdev)
{
struct device *dev = pci_dev_to_dev(pdev);
struct ata_host *host = dev_get_drvdata(dev);
ata_host_remove(host);
dev_set_drvdata(dev, NULL);
}
static const struct pci_device_id mpiix[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371MX), },
{ 0, },
};
static struct pci_driver mpiix_pci_driver = {
.name = DRV_NAME,
.id_table = mpiix,
.probe = mpiix_init_one,
.remove = mpiix_remove_one
};
static int __init mpiix_init(void)
{
return pci_register_driver(&mpiix_pci_driver);
}
static void __exit mpiix_exit(void)
{
pci_unregister_driver(&mpiix_pci_driver);
}
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for Intel MPIIX");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, mpiix);
MODULE_VERSION(DRV_VERSION);
module_init(mpiix_init);
module_exit(mpiix_exit);

175
drivers/ata/pata_netcell.c Normal file
View File

@ -0,0 +1,175 @@
/*
* pata_netcell.c - Netcell PATA driver
*
* (c) 2006 Red Hat <alan@redhat.com>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#include <linux/ata.h>
#define DRV_NAME "pata_netcell"
#define DRV_VERSION "0.1.5"
/**
* netcell_probe_init - check for 40/80 pin
* @ap: Port
*
* Cables are handled by the RAID controller. Report 80 pin.
*/
static int netcell_pre_reset(struct ata_port *ap)
{
ap->cbl = ATA_CBL_PATA80;
return ata_std_prereset(ap);
}
/**
* netcell_probe_reset - Probe specified port on PATA host controller
* @ap: Port to probe
*
* LOCKING:
* None (inherited from caller).
*/
static void netcell_error_handler(struct ata_port *ap)
{
return ata_bmdma_drive_eh(ap, netcell_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
}
/* No PIO or DMA methods needed for this device */
static struct scsi_host_template netcell_sht = {
.module = THIS_MODULE,
.name = DRV_NAME,
.ioctl = ata_scsi_ioctl,
.queuecommand = ata_scsi_queuecmd,
.can_queue = ATA_DEF_QUEUE,
.this_id = ATA_SHT_THIS_ID,
.sg_tablesize = LIBATA_MAX_PRD,
/* Special handling needed if you have sector or LBA48 limits */
.max_sectors = ATA_MAX_SECTORS,
.cmd_per_lun = ATA_SHT_CMD_PER_LUN,
.emulated = ATA_SHT_EMULATED,
.use_clustering = ATA_SHT_USE_CLUSTERING,
.proc_name = DRV_NAME,
.dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config,
/* Use standard CHS mapping rules */
.bios_param = ata_std_bios_param,
};
static const struct ata_port_operations netcell_ops = {
.port_disable = ata_port_disable,
/* Task file is PCI ATA format, use helpers */
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
.check_status = ata_check_status,
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
.freeze = ata_bmdma_freeze,
.thaw = ata_bmdma_thaw,
.error_handler = netcell_error_handler,
.post_internal_cmd = ata_bmdma_post_internal_cmd,
/* BMDMA handling is PCI ATA format, use helpers */
.bmdma_setup = ata_bmdma_setup,
.bmdma_start = ata_bmdma_start,
.bmdma_stop = ata_bmdma_stop,
.bmdma_status = ata_bmdma_status,
.qc_prep = ata_qc_prep,
.qc_issue = ata_qc_issue_prot,
.data_xfer = ata_pio_data_xfer,
/* Timeout handling. Special recovery hooks here */
.eng_timeout = ata_eng_timeout,
.irq_handler = ata_interrupt,
.irq_clear = ata_bmdma_irq_clear,
/* Generic PATA PCI ATA helpers */
.port_start = ata_port_start,
.port_stop = ata_port_stop,
.host_stop = ata_host_stop,
};
/**
* netcell_init_one - Register Netcell ATA PCI device with kernel services
* @pdev: PCI device to register
* @ent: Entry in netcell_pci_tbl matching with @pdev
*
* Called from kernel PCI layer.
*
* LOCKING:
* Inherited from PCI layer (may sleep).
*
* RETURNS:
* Zero on success, or -ERRNO value.
*/
static int netcell_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
{
static int printed_version;
static struct ata_port_info info = {
.sht = &netcell_sht,
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
/* Actually we don't really care about these as the
firmware deals with it */
.pio_mask = 0x1f, /* pio0-4 */
.mwdma_mask = 0x07, /* mwdma0-2 */
.udma_mask = 0x3f, /* UDMA 133 */
.port_ops = &netcell_ops,
};
static struct ata_port_info *port_info[2] = { &info, &info };
if (!printed_version++)
dev_printk(KERN_DEBUG, &pdev->dev,
"version " DRV_VERSION "\n");
/* Any chip specific setup/optimisation/messages here */
ata_pci_clear_simplex(pdev);
/* And let the library code do the work */
return ata_pci_init_one(pdev, port_info, 2);
}
static const struct pci_device_id netcell_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_NETCELL, PCI_DEVICE_ID_REVOLUTION), },
{ } /* terminate list */
};
static struct pci_driver netcell_pci_driver = {
.name = DRV_NAME,
.id_table = netcell_pci_tbl,
.probe = netcell_init_one,
.remove = ata_pci_remove_one,
};
static int __init netcell_init(void)
{
return pci_register_driver(&netcell_pci_driver);
}
static void __exit netcell_exit(void)
{
pci_unregister_driver(&netcell_pci_driver);
}
module_init(netcell_init);
module_exit(netcell_exit);
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("SCSI low-level driver for Netcell PATA RAID");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, netcell_pci_tbl);
MODULE_VERSION(DRV_VERSION);

236
drivers/ata/pata_ns87410.c Normal file
View File

@ -0,0 +1,236 @@
/*
* pata_ns87410.c - National Semiconductor 87410 PATA for new ATA layer
* (C) 2006 Red Hat Inc
* Alan Cox <alan@redhat.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#define DRV_NAME "pata_ns87410"
#define DRV_VERSION "0.4.2"
/**
* ns87410_pre_reset - probe begin
* @ap: ATA port
*
* Set up cable type and use generic probe init
*/
static int ns87410_pre_reset(struct ata_port *ap)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
static const struct pci_bits ns87410_enable_bits[] = {
{ 0x43, 1, 0x08, 0x08 },
{ 0x47, 1, 0x08, 0x08 }
};
if (!pci_test_config_bits(pdev, &ns87410_enable_bits[ap->port_no])) {
ata_port_disable(ap);
printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id);
return 0;
}
ap->cbl = ATA_CBL_PATA40;
return ata_std_prereset(ap);
}
/**
* ns87410_error_handler - probe reset
* @ap: ATA port
*
* Perform the ATA probe and bus reset sequence plus specific handling
* for this hardware. The MPIIX has the enable bits in a different place
* to PIIX4 and friends. As a pure PIO device it has no cable detect
*/
static void ns87410_error_handler(struct ata_port *ap)
{
ata_bmdma_drive_eh(ap, ns87410_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
}
/**
* ns87410_set_piomode - set initial PIO mode data
* @ap: ATA interface
* @adev: ATA device
*
* Program timing data. This is kept per channel not per device,
* and only affects the data port.
*/
static void ns87410_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int port = 0x40 + 4 * ap->port_no;
u8 idetcr, idefr;
struct ata_timing at;
static const u8 activebits[15] = {
0, 1, 2, 3, 4,
5, 5, 6, 6, 6,
6, 7, 7, 7, 7
};
static const u8 recoverbits[12] = {
0, 1, 2, 3, 4, 5, 6, 6, 7, 7, 7, 7
};
pci_read_config_byte(pdev, port + 3, &idefr);
if (ata_pio_need_iordy(adev))
idefr |= 0x04; /* IORDY enable */
else
idefr &= ~0x04;
if (ata_timing_compute(adev, adev->pio_mode, &at, 30303, 1) < 0) {
dev_printk(KERN_ERR, &pdev->dev, "unknown mode %d.\n", adev->pio_mode);
return;
}
at.active = FIT(at.active, 2, 16) - 2;
at.setup = FIT(at.setup, 1, 4) - 1;
at.recover = FIT(at.recover, 1, 12) - 1;
idetcr = (at.setup << 6) | (recoverbits[at.recover] << 3) | activebits[at.active];
pci_write_config_byte(pdev, port, idetcr);
pci_write_config_byte(pdev, port + 3, idefr);
/* We use ap->private_data as a pointer to the device currently
loaded for timing */
ap->private_data = adev;
}
/**
* ns87410_qc_issue_prot - command issue
* @qc: command pending
*
* Called when the libata layer is about to issue a command. We wrap
* this interface so that we can load the correct ATA timings if
* neccessary.
*/
static unsigned int ns87410_qc_issue_prot(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct ata_device *adev = qc->dev;
/* If modes have been configured and the channel data is not loaded
then load it. We have to check if pio_mode is set as the core code
does not set adev->pio_mode to XFER_PIO_0 while probing as would be
logical */
if (adev->pio_mode && adev != ap->private_data)
ns87410_set_piomode(ap, adev);
return ata_qc_issue_prot(qc);
}
static struct scsi_host_template ns87410_sht = {
.module = THIS_MODULE,
.name = DRV_NAME,
.ioctl = ata_scsi_ioctl,
.queuecommand = ata_scsi_queuecmd,
.can_queue = ATA_DEF_QUEUE,
.this_id = ATA_SHT_THIS_ID,
.sg_tablesize = LIBATA_MAX_PRD,
.max_sectors = ATA_MAX_SECTORS,
.cmd_per_lun = ATA_SHT_CMD_PER_LUN,
.emulated = ATA_SHT_EMULATED,
.use_clustering = ATA_SHT_USE_CLUSTERING,
.proc_name = DRV_NAME,
.dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config,
.bios_param = ata_std_bios_param,
};
static struct ata_port_operations ns87410_port_ops = {
.port_disable = ata_port_disable,
.set_piomode = ns87410_set_piomode,
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
.check_status = ata_check_status,
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
.freeze = ata_bmdma_freeze,
.thaw = ata_bmdma_thaw,
.error_handler = ns87410_error_handler,
.post_internal_cmd = ata_bmdma_post_internal_cmd,
.qc_prep = ata_qc_prep,
.qc_issue = ns87410_qc_issue_prot,
.eng_timeout = ata_eng_timeout,
.data_xfer = ata_pio_data_xfer,
.irq_handler = ata_interrupt,
.irq_clear = ata_bmdma_irq_clear,
.port_start = ata_port_start,
.port_stop = ata_port_stop,
.host_stop = ata_host_stop
};
static int ns87410_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
static struct ata_port_info info = {
.sht = &ns87410_sht,
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
.pio_mask = 0x0F,
.port_ops = &ns87410_port_ops
};
static struct ata_port_info *port_info[2] = {&info, &info};
return ata_pci_init_one(dev, port_info, 2);
}
static const struct pci_device_id ns87410[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_87410), },
{ 0, },
};
static struct pci_driver ns87410_pci_driver = {
.name = DRV_NAME,
.id_table = ns87410,
.probe = ns87410_init_one,
.remove = ata_pci_remove_one
};
static int __init ns87410_init(void)
{
return pci_register_driver(&ns87410_pci_driver);
}
static void __exit ns87410_exit(void)
{
pci_unregister_driver(&ns87410_pci_driver);
}
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for Nat Semi 87410");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, ns87410);
MODULE_VERSION(DRV_VERSION);
module_init(ns87410_init);
module_exit(ns87410_exit);

339
drivers/ata/pata_oldpiix.c Normal file
View File

@ -0,0 +1,339 @@
/*
* pata_oldpiix.c - Intel PATA/SATA controllers
*
* (C) 2005 Red Hat <alan@redhat.com>
*
* Some parts based on ata_piix.c by Jeff Garzik and others.
*
* Early PIIX differs significantly from the later PIIX as it lacks
* SITRE and the slave timing registers. This means that you have to
* set timing per channel, or be clever. Libata tells us whenever it
* does drive selection and we use this to reload the timings.
*
* Because of these behaviour differences PIIX gets its own driver module.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#include <linux/ata.h>
#define DRV_NAME "pata_oldpiix"
#define DRV_VERSION "0.5.1"
/**
* oldpiix_pre_reset - probe begin
* @ap: ATA port
*
* Set up cable type and use generic probe init
*/
static int oldpiix_pre_reset(struct ata_port *ap)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
static const struct pci_bits oldpiix_enable_bits[] = {
{ 0x41U, 1U, 0x80UL, 0x80UL }, /* port 0 */
{ 0x43U, 1U, 0x80UL, 0x80UL }, /* port 1 */
};
if (!pci_test_config_bits(pdev, &oldpiix_enable_bits[ap->port_no])) {
ata_port_disable(ap);
printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id);
return 0;
}
ap->cbl = ATA_CBL_PATA40;
return ata_std_prereset(ap);
}
/**
* oldpiix_pata_error_handler - Probe specified port on PATA host controller
* @ap: Port to probe
* @classes:
*
* LOCKING:
* None (inherited from caller).
*/
static void oldpiix_pata_error_handler(struct ata_port *ap)
{
ata_bmdma_drive_eh(ap, oldpiix_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
}
/**
* oldpiix_set_piomode - Initialize host controller PATA PIO timings
* @ap: Port whose timings we are configuring
* @adev: um
*
* Set PIO mode for device, in host controller PCI config space.
*
* LOCKING:
* None (inherited from caller).
*/
static void oldpiix_set_piomode (struct ata_port *ap, struct ata_device *adev)
{
unsigned int pio = adev->pio_mode - XFER_PIO_0;
struct pci_dev *dev = to_pci_dev(ap->host->dev);
unsigned int idetm_port= ap->port_no ? 0x42 : 0x40;
u16 idetm_data;
int control = 0;
/*
* See Intel Document 298600-004 for the timing programing rules
* for PIIX/ICH. Note that the early PIIX does not have the slave
* timing port at 0x44.
*/
static const /* ISP RTC */
u8 timings[][2] = { { 0, 0 },
{ 0, 0 },
{ 1, 0 },
{ 2, 1 },
{ 2, 3 }, };
if (pio > 2)
control |= 1; /* TIME1 enable */
if (ata_pio_need_iordy(adev))
control |= 2; /* IE IORDY */
/* Intel specifies that the PPE functionality is for disk only */
if (adev->class == ATA_DEV_ATA)
control |= 4; /* PPE enable */
pci_read_config_word(dev, idetm_port, &idetm_data);
/* Enable PPE, IE and TIME as appropriate. Clear the other
drive timing bits */
if (adev->devno == 0) {
idetm_data &= 0xCCE0;
idetm_data |= control;
} else {
idetm_data &= 0xCC0E;
idetm_data |= (control << 4);
}
idetm_data |= (timings[pio][0] << 12) |
(timings[pio][1] << 8);
pci_write_config_word(dev, idetm_port, idetm_data);
/* Track which port is configured */
ap->private_data = adev;
}
/**
* oldpiix_set_dmamode - Initialize host controller PATA DMA timings
* @ap: Port whose timings we are configuring
* @adev: Device to program
* @isich: True if the device is an ICH and has IOCFG registers
*
* Set MWDMA mode for device, in host controller PCI config space.
*
* LOCKING:
* None (inherited from caller).
*/
static void oldpiix_set_dmamode (struct ata_port *ap, struct ata_device *adev)
{
struct pci_dev *dev = to_pci_dev(ap->host->dev);
u8 idetm_port = ap->port_no ? 0x42 : 0x40;
u16 idetm_data;
static const /* ISP RTC */
u8 timings[][2] = { { 0, 0 },
{ 0, 0 },
{ 1, 0 },
{ 2, 1 },
{ 2, 3 }, };
/*
* MWDMA is driven by the PIO timings. We must also enable
* IORDY unconditionally along with TIME1. PPE has already
* been set when the PIO timing was set.
*/
unsigned int mwdma = adev->dma_mode - XFER_MW_DMA_0;
unsigned int control;
const unsigned int needed_pio[3] = {
XFER_PIO_0, XFER_PIO_3, XFER_PIO_4
};
int pio = needed_pio[mwdma] - XFER_PIO_0;
pci_read_config_word(dev, idetm_port, &idetm_data);
control = 3; /* IORDY|TIME0 */
/* Intel specifies that the PPE functionality is for disk only */
if (adev->class == ATA_DEV_ATA)
control |= 4; /* PPE enable */
/* If the drive MWDMA is faster than it can do PIO then
we must force PIO into PIO0 */
if (adev->pio_mode < needed_pio[mwdma])
/* Enable DMA timing only */
control |= 8; /* PIO cycles in PIO0 */
/* Mask out the relevant control and timing bits we will load. Also
clear the other drive TIME register as a precaution */
if (adev->devno == 0) {
idetm_data &= 0xCCE0;
idetm_data |= control;
} else {
idetm_data &= 0xCC0E;
idetm_data |= (control << 4);
}
idetm_data |= (timings[pio][0] << 12) | (timings[pio][1] << 8);
pci_write_config_word(dev, idetm_port, idetm_data);
/* Track which port is configured */
ap->private_data = adev;
}
/**
* oldpiix_qc_issue_prot - command issue
* @qc: command pending
*
* Called when the libata layer is about to issue a command. We wrap
* this interface so that we can load the correct ATA timings if
* neccessary. Our logic also clears TIME0/TIME1 for the other device so
* that, even if we get this wrong, cycles to the other device will
* be made PIO0.
*/
static unsigned int oldpiix_qc_issue_prot(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct ata_device *adev = qc->dev;
if (adev != ap->private_data) {
if (adev->dma_mode)
oldpiix_set_dmamode(ap, adev);
else if (adev->pio_mode)
oldpiix_set_piomode(ap, adev);
}
return ata_qc_issue_prot(qc);
}
static struct scsi_host_template oldpiix_sht = {
.module = THIS_MODULE,
.name = DRV_NAME,
.ioctl = ata_scsi_ioctl,
.queuecommand = ata_scsi_queuecmd,
.can_queue = ATA_DEF_QUEUE,
.this_id = ATA_SHT_THIS_ID,
.sg_tablesize = LIBATA_MAX_PRD,
.max_sectors = ATA_MAX_SECTORS,
.cmd_per_lun = ATA_SHT_CMD_PER_LUN,
.emulated = ATA_SHT_EMULATED,
.use_clustering = ATA_SHT_USE_CLUSTERING,
.proc_name = DRV_NAME,
.dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config,
.bios_param = ata_std_bios_param,
};
static const struct ata_port_operations oldpiix_pata_ops = {
.port_disable = ata_port_disable,
.set_piomode = oldpiix_set_piomode,
.set_dmamode = oldpiix_set_dmamode,
.mode_filter = ata_pci_default_filter,
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
.check_status = ata_check_status,
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
.freeze = ata_bmdma_freeze,
.thaw = ata_bmdma_thaw,
.error_handler = oldpiix_pata_error_handler,
.post_internal_cmd = ata_bmdma_post_internal_cmd,
.bmdma_setup = ata_bmdma_setup,
.bmdma_start = ata_bmdma_start,
.bmdma_stop = ata_bmdma_stop,
.bmdma_status = ata_bmdma_status,
.qc_prep = ata_qc_prep,
.qc_issue = oldpiix_qc_issue_prot,
.data_xfer = ata_pio_data_xfer,
.irq_handler = ata_interrupt,
.irq_clear = ata_bmdma_irq_clear,
.port_start = ata_port_start,
.port_stop = ata_port_stop,
.host_stop = ata_host_stop,
};
/**
* oldpiix_init_one - Register PIIX ATA PCI device with kernel services
* @pdev: PCI device to register
* @ent: Entry in oldpiix_pci_tbl matching with @pdev
*
* Called from kernel PCI layer. We probe for combined mode (sigh),
* and then hand over control to libata, for it to do the rest.
*
* LOCKING:
* Inherited from PCI layer (may sleep).
*
* RETURNS:
* Zero on success, or -ERRNO value.
*/
static int oldpiix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
{
static int printed_version;
static struct ata_port_info info = {
.sht = &oldpiix_sht,
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
.pio_mask = 0x1f, /* pio0-4 */
.mwdma_mask = 0x07, /* mwdma1-2 */
.port_ops = &oldpiix_pata_ops,
};
static struct ata_port_info *port_info[2] = { &info, &info };
if (!printed_version++)
dev_printk(KERN_DEBUG, &pdev->dev,
"version " DRV_VERSION "\n");
return ata_pci_init_one(pdev, port_info, 2);
}
static const struct pci_device_id oldpiix_pci_tbl[] = {
{ PCI_DEVICE(0x8086, 0x1230), },
{ } /* terminate list */
};
static struct pci_driver oldpiix_pci_driver = {
.name = DRV_NAME,
.id_table = oldpiix_pci_tbl,
.probe = oldpiix_init_one,
.remove = ata_pci_remove_one,
};
static int __init oldpiix_init(void)
{
return pci_register_driver(&oldpiix_pci_driver);
}
static void __exit oldpiix_exit(void)
{
pci_unregister_driver(&oldpiix_pci_driver);
}
module_init(oldpiix_init);
module_exit(oldpiix_exit);
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("SCSI low-level driver for early PIIX series controllers");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, oldpiix_pci_tbl);
MODULE_VERSION(DRV_VERSION);

292
drivers/ata/pata_opti.c Normal file
View File

@ -0,0 +1,292 @@
/*
* pata_opti.c - ATI PATA for new ATA layer
* (C) 2005 Red Hat Inc
* Alan Cox <alan@redhat.com>
*
* Based on
* linux/drivers/ide/pci/opti621.c Version 0.7 Sept 10, 2002
*
* Copyright (C) 1996-1998 Linus Torvalds & authors (see below)
*
* Authors:
* Jaromir Koutek <miri@punknet.cz>,
* Jan Harkes <jaharkes@cwi.nl>,
* Mark Lord <mlord@pobox.com>
* Some parts of code are from ali14xx.c and from rz1000.c.
*
* Also consulted the FreeBSD prototype driver by Kevin Day to try
* and resolve some confusions. Further documentation can be found in
* Ralf Brown's interrupt list
*
* If you have other variants of the Opti range (Viper/Vendetta) please
* try this driver with those PCI idents and report back. For the later
* chips see the pata_optidma driver
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#define DRV_NAME "pata_opti"
#define DRV_VERSION "0.2.4"
enum {
READ_REG = 0, /* index of Read cycle timing register */
WRITE_REG = 1, /* index of Write cycle timing register */
CNTRL_REG = 3, /* index of Control register */
STRAP_REG = 5, /* index of Strap register */
MISC_REG = 6 /* index of Miscellaneous register */
};
/**
* opti_pre_reset - probe begin
* @ap: ATA port
*
* Set up cable type and use generic probe init
*/
static int opti_pre_reset(struct ata_port *ap)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
static const struct pci_bits opti_enable_bits[] = {
{ 0x45, 1, 0x80, 0x00 },
{ 0x40, 1, 0x08, 0x00 }
};
if (!pci_test_config_bits(pdev, &opti_enable_bits[ap->port_no])) {
ata_port_disable(ap);
printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id);
return 0;
}
ap->cbl = ATA_CBL_PATA40;
return ata_std_prereset(ap);
}
/**
* opti_probe_reset - probe reset
* @ap: ATA port
*
* Perform the ATA probe and bus reset sequence plus specific handling
* for this hardware. The Opti needs little handling - we have no UDMA66
* capability that needs cable detection. All we must do is check the port
* is enabled.
*/
static void opti_error_handler(struct ata_port *ap)
{
ata_bmdma_drive_eh(ap, opti_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
}
/**
* opti_write_reg - control register setup
* @ap: ATA port
* @value: value
* @reg: control register number
*
* The Opti uses magic 'trapdoor' register accesses to do configuration
* rather than using PCI space as other controllers do. The double inw
* on the error register activates configuration mode. We can then write
* the control register
*/
static void opti_write_reg(struct ata_port *ap, u8 val, int reg)
{
unsigned long regio = ap->ioaddr.cmd_addr;
/* These 3 unlock the control register access */
inw(regio + 1);
inw(regio + 1);
outb(3, regio + 2);
/* Do the I/O */
outb(val, regio + reg);
/* Relock */
outb(0x83, regio + 2);
}
#if 0
/**
* opti_read_reg - control register read
* @ap: ATA port
* @reg: control register number
*
* The Opti uses magic 'trapdoor' register accesses to do configuration
* rather than using PCI space as other controllers do. The double inw
* on the error register activates configuration mode. We can then read
* the control register
*/
static u8 opti_read_reg(struct ata_port *ap, int reg)
{
unsigned long regio = ap->ioaddr.cmd_addr;
u8 ret;
inw(regio + 1);
inw(regio + 1);
outb(3, regio + 2);
ret = inb(regio + reg);
outb(0x83, regio + 2);
}
#endif
/**
* opti_set_piomode - set initial PIO mode data
* @ap: ATA interface
* @adev: ATA device
*
* Called to do the PIO mode setup. Timing numbers are taken from
* the FreeBSD driver then pre computed to keep the code clean. There
* are two tables depending on the hardware clock speed.
*/
static void opti_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
struct ata_device *pair = ata_dev_pair(adev);
int clock;
int pio = adev->pio_mode - XFER_PIO_0;
unsigned long regio = ap->ioaddr.cmd_addr;
u8 addr;
/* Address table precomputed with prefetch off and a DCLK of 2 */
static const u8 addr_timing[2][5] = {
{ 0x30, 0x20, 0x20, 0x10, 0x10 },
{ 0x20, 0x20, 0x10, 0x10, 0x10 }
};
static const u8 data_rec_timing[2][5] = {
{ 0x6B, 0x56, 0x42, 0x32, 0x31 },
{ 0x58, 0x44, 0x32, 0x22, 0x21 }
};
outb(0xff, regio + 5);
clock = inw(regio + 5) & 1;
/*
* As with many controllers the address setup time is shared
* and must suit both devices if present.
*/
addr = addr_timing[clock][pio];
if (pair) {
/* Hardware constraint */
u8 pair_addr = addr_timing[clock][pair->pio_mode - XFER_PIO_0];
if (pair_addr > addr)
addr = pair_addr;
}
/* Commence primary programming sequence */
opti_write_reg(ap, adev->devno, MISC_REG);
opti_write_reg(ap, data_rec_timing[clock][pio], READ_REG);
opti_write_reg(ap, data_rec_timing[clock][pio], WRITE_REG);
opti_write_reg(ap, addr, MISC_REG);
/* Programming sequence complete, override strapping */
opti_write_reg(ap, 0x85, CNTRL_REG);
}
static struct scsi_host_template opti_sht = {
.module = THIS_MODULE,
.name = DRV_NAME,
.ioctl = ata_scsi_ioctl,
.queuecommand = ata_scsi_queuecmd,
.can_queue = ATA_DEF_QUEUE,
.this_id = ATA_SHT_THIS_ID,
.sg_tablesize = LIBATA_MAX_PRD,
.max_sectors = ATA_MAX_SECTORS,
.cmd_per_lun = ATA_SHT_CMD_PER_LUN,
.emulated = ATA_SHT_EMULATED,
.use_clustering = ATA_SHT_USE_CLUSTERING,
.proc_name = DRV_NAME,
.dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config,
.bios_param = ata_std_bios_param,
};
static struct ata_port_operations opti_port_ops = {
.port_disable = ata_port_disable,
.set_piomode = opti_set_piomode,
/* .set_dmamode = opti_set_dmamode, */
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
.check_status = ata_check_status,
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
.freeze = ata_bmdma_freeze,
.thaw = ata_bmdma_thaw,
.error_handler = opti_error_handler,
.post_internal_cmd = ata_bmdma_post_internal_cmd,
.bmdma_setup = ata_bmdma_setup,
.bmdma_start = ata_bmdma_start,
.bmdma_stop = ata_bmdma_stop,
.bmdma_status = ata_bmdma_status,
.qc_prep = ata_qc_prep,
.qc_issue = ata_qc_issue_prot,
.eng_timeout = ata_eng_timeout,
.data_xfer = ata_pio_data_xfer,
.irq_handler = ata_interrupt,
.irq_clear = ata_bmdma_irq_clear,
.port_start = ata_port_start,
.port_stop = ata_port_stop,
.host_stop = ata_host_stop
};
static int opti_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
static struct ata_port_info info = {
.sht = &opti_sht,
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
.pio_mask = 0x1f,
.port_ops = &opti_port_ops
};
static struct ata_port_info *port_info[2] = { &info, &info };
static int printed_version;
if (!printed_version++)
dev_printk(KERN_DEBUG, &dev->dev, "version " DRV_VERSION "\n");
return ata_pci_init_one(dev, port_info, 2);
}
static const struct pci_device_id opti[] = {
{ PCI_VENDOR_ID_OPTI, PCI_DEVICE_ID_OPTI_82C621, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{ PCI_VENDOR_ID_OPTI, PCI_DEVICE_ID_OPTI_82C825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1},
{ 0, },
};
static struct pci_driver opti_pci_driver = {
.name = DRV_NAME,
.id_table = opti,
.probe = opti_init_one,
.remove = ata_pci_remove_one
};
static int __init opti_init(void)
{
return pci_register_driver(&opti_pci_driver);
}
static void __exit opti_exit(void)
{
pci_unregister_driver(&opti_pci_driver);
}
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for Opti 621/621X");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, opti);
MODULE_VERSION(DRV_VERSION);
module_init(opti_init);
module_exit(opti_exit);

547
drivers/ata/pata_optidma.c Normal file
View File

@ -0,0 +1,547 @@
/*
* pata_optidma.c - Opti DMA PATA for new ATA layer
* (C) 2006 Red Hat Inc
* Alan Cox <alan@redhat.com>
*
* The Opti DMA controllers are related to the older PIO PCI controllers
* and indeed the VLB ones. The main differences are that the timing
* numbers are now based off PCI clocks not VLB and differ, and that
* MWDMA is supported.
*
* This driver should support Viper-N+, FireStar, FireStar Plus.
*
* These devices support virtual DMA for read (aka the CS5520). Later
* chips support UDMA33, but only if the rest of the board logic does,
* so you have to get this right. We don't support the virtual DMA
* but we do handle UDMA.
*
* Bits that are worth knowing
* Most control registers are shadowed into I/O registers
* 0x1F5 bit 0 tells you if the PCI/VLB clock is 33 or 25Mhz
* Virtual DMA registers *move* between rev 0x02 and rev 0x10
* UDMA requires a 66MHz FSB
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#define DRV_NAME "pata_optidma"
#define DRV_VERSION "0.2.1"
enum {
READ_REG = 0, /* index of Read cycle timing register */
WRITE_REG = 1, /* index of Write cycle timing register */
CNTRL_REG = 3, /* index of Control register */
STRAP_REG = 5, /* index of Strap register */
MISC_REG = 6 /* index of Miscellaneous register */
};
static int pci_clock; /* 0 = 33 1 = 25 */
/**
* optidma_pre_reset - probe begin
* @ap: ATA port
*
* Set up cable type and use generic probe init
*/
static int optidma_pre_reset(struct ata_port *ap)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
static const struct pci_bits optidma_enable_bits = {
0x40, 1, 0x08, 0x00
};
if (ap->port_no && !pci_test_config_bits(pdev, &optidma_enable_bits)) {
ata_port_disable(ap);
printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id);
return 0;
}
ap->cbl = ATA_CBL_PATA40;
return ata_std_prereset(ap);
}
/**
* optidma_probe_reset - probe reset
* @ap: ATA port
*
* Perform the ATA probe and bus reset sequence plus specific handling
* for this hardware. The Opti needs little handling - we have no UDMA66
* capability that needs cable detection. All we must do is check the port
* is enabled.
*/
static void optidma_error_handler(struct ata_port *ap)
{
ata_bmdma_drive_eh(ap, optidma_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
}
/**
* optidma_unlock - unlock control registers
* @ap: ATA port
*
* Unlock the control register block for this adapter. Registers must not
* be unlocked in a situation where libata might look at them.
*/
static void optidma_unlock(struct ata_port *ap)
{
unsigned long regio = ap->ioaddr.cmd_addr;
/* These 3 unlock the control register access */
inw(regio + 1);
inw(regio + 1);
outb(3, regio + 2);
}
/**
* optidma_lock - issue temporary relock
* @ap: ATA port
*
* Re-lock the configuration register settings.
*/
static void optidma_lock(struct ata_port *ap)
{
unsigned long regio = ap->ioaddr.cmd_addr;
/* Relock */
outb(0x83, regio + 2);
}
/**
* optidma_set_mode - set mode data
* @ap: ATA interface
* @adev: ATA device
* @mode: Mode to set
*
* Called to do the DMA or PIO mode setup. Timing numbers are all
* pre computed to keep the code clean. There are two tables depending
* on the hardware clock speed.
*
* WARNING: While we do this the IDE registers vanish. If we take an
* IRQ here we depend on the host set locking to avoid catastrophe.
*/
static void optidma_set_mode(struct ata_port *ap, struct ata_device *adev, u8 mode)
{
struct ata_device *pair = ata_dev_pair(adev);
int pio = adev->pio_mode - XFER_PIO_0;
int dma = adev->dma_mode - XFER_MW_DMA_0;
unsigned long regio = ap->ioaddr.cmd_addr;
u8 addr;
/* Address table precomputed with a DCLK of 2 */
static const u8 addr_timing[2][5] = {
{ 0x30, 0x20, 0x20, 0x10, 0x10 },
{ 0x20, 0x20, 0x10, 0x10, 0x10 }
};
static const u8 data_rec_timing[2][5] = {
{ 0x59, 0x46, 0x30, 0x20, 0x20 },
{ 0x46, 0x32, 0x20, 0x20, 0x10 }
};
static const u8 dma_data_rec_timing[2][3] = {
{ 0x76, 0x20, 0x20 },
{ 0x54, 0x20, 0x10 }
};
/* Switch from IDE to control mode */
optidma_unlock(ap);
/*
* As with many controllers the address setup time is shared
* and must suit both devices if present. FIXME: Check if we
* need to look at slowest of PIO/DMA mode of either device
*/
if (mode >= XFER_MW_DMA_0)
addr = 0;
else
addr = addr_timing[pci_clock][pio];
if (pair) {
u8 pair_addr;
/* Hardware constraint */
if (pair->dma_mode)
pair_addr = 0;
else
pair_addr = addr_timing[pci_clock][pair->pio_mode - XFER_PIO_0];
if (pair_addr > addr)
addr = pair_addr;
}
/* Commence primary programming sequence */
/* First we load the device number into the timing select */
outb(adev->devno, regio + MISC_REG);
/* Now we load the data timings into read data/write data */
if (mode < XFER_MW_DMA_0) {
outb(data_rec_timing[pci_clock][pio], regio + READ_REG);
outb(data_rec_timing[pci_clock][pio], regio + WRITE_REG);
} else if (mode < XFER_UDMA_0) {
outb(dma_data_rec_timing[pci_clock][dma], regio + READ_REG);
outb(dma_data_rec_timing[pci_clock][dma], regio + WRITE_REG);
}
/* Finally we load the address setup into the misc register */
outb(addr | adev->devno, regio + MISC_REG);
/* Programming sequence complete, timing 0 dev 0, timing 1 dev 1 */
outb(0x85, regio + CNTRL_REG);
/* Switch back to IDE mode */
optidma_lock(ap);
/* Note: at this point our programming is incomplete. We are
not supposed to program PCI 0x43 "things we hacked onto the chip"
until we've done both sets of PIO/DMA timings */
}
/**
* optiplus_set_mode - DMA setup for Firestar Plus
* @ap: ATA port
* @adev: device
* @mode: desired mode
*
* The Firestar plus has additional UDMA functionality for UDMA0-2 and
* requires we do some additional work. Because the base work we must do
* is mostly shared we wrap the Firestar setup functionality in this
* one
*/
static void optiplus_set_mode(struct ata_port *ap, struct ata_device *adev, u8 mode)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u8 udcfg;
u8 udslave;
int dev2 = 2 * adev->devno;
int unit = 2 * ap->port_no + adev->devno;
int udma = mode - XFER_UDMA_0;
pci_read_config_byte(pdev, 0x44, &udcfg);
if (mode <= XFER_UDMA_0) {
udcfg &= ~(1 << unit);
optidma_set_mode(ap, adev, adev->dma_mode);
} else {
udcfg |= (1 << unit);
if (ap->port_no) {
pci_read_config_byte(pdev, 0x45, &udslave);
udslave &= ~(0x03 << dev2);
udslave |= (udma << dev2);
pci_write_config_byte(pdev, 0x45, udslave);
} else {
udcfg &= ~(0x30 << dev2);
udcfg |= (udma << dev2);
}
}
pci_write_config_byte(pdev, 0x44, udcfg);
}
/**
* optidma_set_pio_mode - PIO setup callback
* @ap: ATA port
* @adev: Device
*
* The libata core provides separate functions for handling PIO and
* DMA programming. The architecture of the Firestar makes it easier
* for us to have a common function so we provide wrappers
*/
static void optidma_set_pio_mode(struct ata_port *ap, struct ata_device *adev)
{
optidma_set_mode(ap, adev, adev->pio_mode);
}
/**
* optidma_set_dma_mode - DMA setup callback
* @ap: ATA port
* @adev: Device
*
* The libata core provides separate functions for handling PIO and
* DMA programming. The architecture of the Firestar makes it easier
* for us to have a common function so we provide wrappers
*/
static void optidma_set_dma_mode(struct ata_port *ap, struct ata_device *adev)
{
optidma_set_mode(ap, adev, adev->dma_mode);
}
/**
* optiplus_set_pio_mode - PIO setup callback
* @ap: ATA port
* @adev: Device
*
* The libata core provides separate functions for handling PIO and
* DMA programming. The architecture of the Firestar makes it easier
* for us to have a common function so we provide wrappers
*/
static void optiplus_set_pio_mode(struct ata_port *ap, struct ata_device *adev)
{
optiplus_set_mode(ap, adev, adev->pio_mode);
}
/**
* optiplus_set_dma_mode - DMA setup callback
* @ap: ATA port
* @adev: Device
*
* The libata core provides separate functions for handling PIO and
* DMA programming. The architecture of the Firestar makes it easier
* for us to have a common function so we provide wrappers
*/
static void optiplus_set_dma_mode(struct ata_port *ap, struct ata_device *adev)
{
optiplus_set_mode(ap, adev, adev->dma_mode);
}
/**
* optidma_make_bits - PCI setup helper
* @adev: ATA device
*
* Turn the ATA device setup into PCI configuration bits
* for register 0x43 and return the two bits needed.
*/
static u8 optidma_make_bits43(struct ata_device *adev)
{
static const u8 bits43[5] = {
0, 0, 0, 1, 2
};
if (!ata_dev_enabled(adev))
return 0;
if (adev->dma_mode)
return adev->dma_mode - XFER_MW_DMA_0;
return bits43[adev->pio_mode - XFER_PIO_0];
}
/**
* optidma_post_set_mode - finalize PCI setup
* @ap: port to set up
*
* Finalise the configuration by writing the nibble of extra bits
* of data into the chip.
*/
static void optidma_post_set_mode(struct ata_port *ap)
{
u8 r;
int nybble = 4 * ap->port_no;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
pci_read_config_byte(pdev, 0x43, &r);
r &= (0x0F << nybble);
r |= (optidma_make_bits43(&ap->device[0]) +
(optidma_make_bits43(&ap->device[0]) << 2)) << nybble;
pci_write_config_byte(pdev, 0x43, r);
}
static struct scsi_host_template optidma_sht = {
.module = THIS_MODULE,
.name = DRV_NAME,
.ioctl = ata_scsi_ioctl,
.queuecommand = ata_scsi_queuecmd,
.can_queue = ATA_DEF_QUEUE,
.this_id = ATA_SHT_THIS_ID,
.sg_tablesize = LIBATA_MAX_PRD,
.max_sectors = ATA_MAX_SECTORS,
.cmd_per_lun = ATA_SHT_CMD_PER_LUN,
.emulated = ATA_SHT_EMULATED,
.use_clustering = ATA_SHT_USE_CLUSTERING,
.proc_name = DRV_NAME,
.dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config,
.bios_param = ata_std_bios_param,
};
static struct ata_port_operations optidma_port_ops = {
.port_disable = ata_port_disable,
.set_piomode = optidma_set_pio_mode,
.set_dmamode = optidma_set_dma_mode,
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
.check_status = ata_check_status,
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
.freeze = ata_bmdma_freeze,
.thaw = ata_bmdma_thaw,
.post_internal_cmd = ata_bmdma_post_internal_cmd,
.error_handler = optidma_error_handler,
.post_set_mode = optidma_post_set_mode,
.bmdma_setup = ata_bmdma_setup,
.bmdma_start = ata_bmdma_start,
.bmdma_stop = ata_bmdma_stop,
.bmdma_status = ata_bmdma_status,
.qc_prep = ata_qc_prep,
.qc_issue = ata_qc_issue_prot,
.eng_timeout = ata_eng_timeout,
.data_xfer = ata_pio_data_xfer,
.irq_handler = ata_interrupt,
.irq_clear = ata_bmdma_irq_clear,
.port_start = ata_port_start,
.port_stop = ata_port_stop,
.host_stop = ata_host_stop
};
static struct ata_port_operations optiplus_port_ops = {
.port_disable = ata_port_disable,
.set_piomode = optiplus_set_pio_mode,
.set_dmamode = optiplus_set_dma_mode,
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
.check_status = ata_check_status,
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
.freeze = ata_bmdma_freeze,
.thaw = ata_bmdma_thaw,
.post_internal_cmd = ata_bmdma_post_internal_cmd,
.error_handler = optidma_error_handler,
.post_set_mode = optidma_post_set_mode,
.bmdma_setup = ata_bmdma_setup,
.bmdma_start = ata_bmdma_start,
.bmdma_stop = ata_bmdma_stop,
.bmdma_status = ata_bmdma_status,
.qc_prep = ata_qc_prep,
.qc_issue = ata_qc_issue_prot,
.eng_timeout = ata_eng_timeout,
.data_xfer = ata_pio_data_xfer,
.irq_handler = ata_interrupt,
.irq_clear = ata_bmdma_irq_clear,
.port_start = ata_port_start,
.port_stop = ata_port_stop,
.host_stop = ata_host_stop
};
/**
* optiplus_with_udma - Look for UDMA capable setup
* @pdev; ATA controller
*/
static int optiplus_with_udma(struct pci_dev *pdev)
{
u8 r;
int ret = 0;
int ioport = 0x22;
struct pci_dev *dev1;
/* Find function 1 */
dev1 = pci_get_device(0x1045, 0xC701, NULL);
if(dev1 == NULL)
return 0;
/* Rev must be >= 0x10 */
pci_read_config_byte(dev1, 0x08, &r);
if (r < 0x10)
goto done_nomsg;
/* Read the chipset system configuration to check our mode */
pci_read_config_byte(dev1, 0x5F, &r);
ioport |= (r << 8);
outb(0x10, ioport);
/* Must be 66Mhz sync */
if ((inb(ioport + 2) & 1) == 0)
goto done;
/* Check the ATA arbitration/timing is suitable */
pci_read_config_byte(pdev, 0x42, &r);
if ((r & 0x36) != 0x36)
goto done;
pci_read_config_byte(dev1, 0x52, &r);
if (r & 0x80) /* IDEDIR disabled */
ret = 1;
done:
printk(KERN_WARNING "UDMA not supported in this configuration.\n");
done_nomsg: /* Wrong chip revision */
pci_dev_put(dev1);
return ret;
}
static int optidma_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
static struct ata_port_info info_82c700 = {
.sht = &optidma_sht,
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
.pio_mask = 0x1f,
.mwdma_mask = 0x07,
.port_ops = &optidma_port_ops
};
static struct ata_port_info info_82c700_udma = {
.sht = &optidma_sht,
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
.pio_mask = 0x1f,
.mwdma_mask = 0x07,
.udma_mask = 0x07,
.port_ops = &optiplus_port_ops
};
static struct ata_port_info *port_info[2];
struct ata_port_info *info = &info_82c700;
static int printed_version;
if (!printed_version++)
dev_printk(KERN_DEBUG, &dev->dev, "version " DRV_VERSION "\n");
/* Fixed location chipset magic */
inw(0x1F1);
inw(0x1F1);
pci_clock = inb(0x1F5) & 1; /* 0 = 33Mhz, 1 = 25Mhz */
if (optiplus_with_udma(dev))
info = &info_82c700_udma;
port_info[0] = port_info[1] = info;
return ata_pci_init_one(dev, port_info, 2);
}
static const struct pci_device_id optidma[] = {
{ PCI_DEVICE(0x1045, 0xD568), }, /* Opti 82C700 */
{ 0, },
};
static struct pci_driver optidma_pci_driver = {
.name = DRV_NAME,
.id_table = optidma,
.probe = optidma_init_one,
.remove = ata_pci_remove_one
};
static int __init optidma_init(void)
{
return pci_register_driver(&optidma_pci_driver);
}
static void __exit optidma_exit(void)
{
pci_unregister_driver(&optidma_pci_driver);
}
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for Opti Firestar/Firestar Plus");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, optidma);
MODULE_VERSION(DRV_VERSION);
module_init(optidma_init);
module_exit(optidma_exit);

393
drivers/ata/pata_pcmcia.c Normal file
View File

@ -0,0 +1,393 @@
/*
* pata_pcmcia.c - PCMCIA PATA controller driver.
* Copyright 2005-2006 Red Hat Inc <alan@redhat.com>, all rights reserved.
* PCMCIA ident update Copyright 2006 Marcin Juszkiewicz
* <openembedded@hrw.one.pl>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Heavily based upon ide-cs.c
* The initial developer of the original code is David A. Hinds
* <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
* are Copyright (C) 1999 David A. Hinds. All Rights Reserved.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
#include <linux/ata.h>
#include <linux/libata.h>
#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/ds.h>
#include <pcmcia/cisreg.h>
#include <pcmcia/ciscode.h>
#define DRV_NAME "pata_pcmcia"
#define DRV_VERSION "0.2.9"
/*
* Private data structure to glue stuff together
*/
struct ata_pcmcia_info {
struct pcmcia_device *pdev;
int ndev;
dev_node_t node;
};
static struct scsi_host_template pcmcia_sht = {
.module = THIS_MODULE,
.name = DRV_NAME,
.ioctl = ata_scsi_ioctl,
.queuecommand = ata_scsi_queuecmd,
.can_queue = ATA_DEF_QUEUE,
.this_id = ATA_SHT_THIS_ID,
.sg_tablesize = LIBATA_MAX_PRD,
.max_sectors = ATA_MAX_SECTORS,
.cmd_per_lun = ATA_SHT_CMD_PER_LUN,
.emulated = ATA_SHT_EMULATED,
.use_clustering = ATA_SHT_USE_CLUSTERING,
.proc_name = DRV_NAME,
.dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config,
.bios_param = ata_std_bios_param,
};
static struct ata_port_operations pcmcia_port_ops = {
.port_disable = ata_port_disable,
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
.check_status = ata_check_status,
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
.freeze = ata_bmdma_freeze,
.thaw = ata_bmdma_thaw,
.error_handler = ata_bmdma_error_handler,
.post_internal_cmd = ata_bmdma_post_internal_cmd,
.qc_prep = ata_qc_prep,
.qc_issue = ata_qc_issue_prot,
.eng_timeout = ata_eng_timeout,
.data_xfer = ata_pio_data_xfer_noirq,
.irq_handler = ata_interrupt,
.irq_clear = ata_bmdma_irq_clear,
.port_start = ata_port_start,
.port_stop = ata_port_stop,
.host_stop = ata_host_stop
};
#define CS_CHECK(fn, ret) \
do { last_fn = (fn); if ((last_ret = (ret)) != 0) goto cs_failed; } while (0)
/**
* pcmcia_init_one - attach a PCMCIA interface
* @pdev: pcmcia device
*
* Register a PCMCIA IDE interface. Such interfaces are PIO 0 and
* shared IRQ.
*/
static int pcmcia_init_one(struct pcmcia_device *pdev)
{
struct ata_probe_ent ae;
struct ata_pcmcia_info *info;
tuple_t tuple;
struct {
unsigned short buf[128];
cisparse_t parse;
config_info_t conf;
cistpl_cftable_entry_t dflt;
} *stk = NULL;
cistpl_cftable_entry_t *cfg;
int pass, last_ret = 0, last_fn = 0, is_kme = 0, ret = -ENOMEM;
unsigned long io_base, ctl_base;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (info == NULL)
return -ENOMEM;
/* Glue stuff together. FIXME: We may be able to get rid of info with care */
info->pdev = pdev;
pdev->priv = info;
/* Set up attributes in order to probe card and get resources */
pdev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO;
pdev->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
pdev->io.IOAddrLines = 3;
pdev->irq.Attributes = IRQ_TYPE_DYNAMIC_SHARING;
pdev->irq.IRQInfo1 = IRQ_LEVEL_ID;
pdev->conf.Attributes = CONF_ENABLE_IRQ;
pdev->conf.IntType = INT_MEMORY_AND_IO;
/* Allocate resoure probing structures */
stk = kzalloc(sizeof(*stk), GFP_KERNEL);
if (!stk)
goto out1;
cfg = &stk->parse.cftable_entry;
/* Tuples we are walking */
tuple.TupleData = (cisdata_t *)&stk->buf;
tuple.TupleOffset = 0;
tuple.TupleDataMax = 255;
tuple.Attributes = 0;
tuple.DesiredTuple = CISTPL_CONFIG;
CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(pdev, &tuple));
CS_CHECK(GetTupleData, pcmcia_get_tuple_data(pdev, &tuple));
CS_CHECK(ParseTuple, pcmcia_parse_tuple(pdev, &tuple, &stk->parse));
pdev->conf.ConfigBase = stk->parse.config.base;
pdev->conf.Present = stk->parse.config.rmask[0];
/* See if we have a manufacturer identifier. Use it to set is_kme for
vendor quirks */
tuple.DesiredTuple = CISTPL_MANFID;
if (!pcmcia_get_first_tuple(pdev, &tuple) && !pcmcia_get_tuple_data(pdev, &tuple) && !pcmcia_parse_tuple(pdev, &tuple, &stk->parse))
is_kme = ((stk->parse.manfid.manf == MANFID_KME) && ((stk->parse.manfid.card == PRODID_KME_KXLC005_A) || (stk->parse.manfid.card == PRODID_KME_KXLC005_B)));
/* Not sure if this is right... look up the current Vcc */
CS_CHECK(GetConfigurationInfo, pcmcia_get_configuration_info(pdev, &stk->conf));
/* link->conf.Vcc = stk->conf.Vcc; */
pass = io_base = ctl_base = 0;
tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
tuple.Attributes = 0;
CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(pdev, &tuple));
/* Now munch the resources looking for a suitable set */
while (1) {
if (pcmcia_get_tuple_data(pdev, &tuple) != 0)
goto next_entry;
if (pcmcia_parse_tuple(pdev, &tuple, &stk->parse) != 0)
goto next_entry;
/* Check for matching Vcc, unless we're desperate */
if (!pass) {
if (cfg->vcc.present & (1 << CISTPL_POWER_VNOM)) {
if (stk->conf.Vcc != cfg->vcc.param[CISTPL_POWER_VNOM] / 10000)
goto next_entry;
} else if (stk->dflt.vcc.present & (1 << CISTPL_POWER_VNOM)) {
if (stk->conf.Vcc != stk->dflt.vcc.param[CISTPL_POWER_VNOM] / 10000)
goto next_entry;
}
}
if (cfg->vpp1.present & (1 << CISTPL_POWER_VNOM))
pdev->conf.Vpp = cfg->vpp1.param[CISTPL_POWER_VNOM] / 10000;
else if (stk->dflt.vpp1.present & (1 << CISTPL_POWER_VNOM))
pdev->conf.Vpp = stk->dflt.vpp1.param[CISTPL_POWER_VNOM] / 10000;
if ((cfg->io.nwin > 0) || (stk->dflt.io.nwin > 0)) {
cistpl_io_t *io = (cfg->io.nwin) ? &cfg->io : &stk->dflt.io;
pdev->conf.ConfigIndex = cfg->index;
pdev->io.BasePort1 = io->win[0].base;
pdev->io.IOAddrLines = io->flags & CISTPL_IO_LINES_MASK;
if (!(io->flags & CISTPL_IO_16BIT))
pdev->io.Attributes1 = IO_DATA_PATH_WIDTH_8;
if (io->nwin == 2) {
pdev->io.NumPorts1 = 8;
pdev->io.BasePort2 = io->win[1].base;
pdev->io.NumPorts2 = (is_kme) ? 2 : 1;
if (pcmcia_request_io(pdev, &pdev->io) != 0)
goto next_entry;
io_base = pdev->io.BasePort1;
ctl_base = pdev->io.BasePort2;
} else if ((io->nwin == 1) && (io->win[0].len >= 16)) {
pdev->io.NumPorts1 = io->win[0].len;
pdev->io.NumPorts2 = 0;
if (pcmcia_request_io(pdev, &pdev->io) != 0)
goto next_entry;
io_base = pdev->io.BasePort1;
ctl_base = pdev->io.BasePort1 + 0x0e;
} else goto next_entry;
/* If we've got this far, we're done */
break;
}
next_entry:
if (cfg->flags & CISTPL_CFTABLE_DEFAULT)
memcpy(&stk->dflt, cfg, sizeof(stk->dflt));
if (pass) {
CS_CHECK(GetNextTuple, pcmcia_get_next_tuple(pdev, &tuple));
} else if (pcmcia_get_next_tuple(pdev, &tuple) != 0) {
CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(pdev, &tuple));
memset(&stk->dflt, 0, sizeof(stk->dflt));
pass++;
}
}
CS_CHECK(RequestIRQ, pcmcia_request_irq(pdev, &pdev->irq));
CS_CHECK(RequestConfiguration, pcmcia_request_configuration(pdev, &pdev->conf));
/* Success. Disable the IRQ nIEN line, do quirks */
outb(0x02, ctl_base);
if (is_kme)
outb(0x81, ctl_base + 0x01);
/* FIXME: Could be more ports at base + 0x10 but we only deal with
one right now */
if (pdev->io.NumPorts1 >= 0x20)
printk(KERN_WARNING DRV_NAME ": second channel not yet supported.\n");
/*
* Having done the PCMCIA plumbing the ATA side is relatively
* sane.
*/
memset(&ae, 0, sizeof(struct ata_probe_ent));
INIT_LIST_HEAD(&ae.node);
ae.dev = &pdev->dev;
ae.port_ops = &pcmcia_port_ops;
ae.sht = &pcmcia_sht;
ae.n_ports = 1;
ae.pio_mask = 1; /* ISA so PIO 0 cycles */
ae.irq = pdev->irq.AssignedIRQ;
ae.irq_flags = SA_SHIRQ;
ae.port_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST;
ae.port[0].cmd_addr = io_base;
ae.port[0].altstatus_addr = ctl_base;
ae.port[0].ctl_addr = ctl_base;
ata_std_ports(&ae.port[0]);
if (ata_device_add(&ae) == 0)
goto failed;
info->ndev = 1;
kfree(stk);
return 0;
cs_failed:
cs_error(pdev, last_fn, last_ret);
failed:
kfree(stk);
info->ndev = 0;
pcmcia_disable_device(pdev);
out1:
kfree(info);
return ret;
}
/**
* pcmcia_remove_one - unplug an pcmcia interface
* @pdev: pcmcia device
*
* A PCMCIA ATA device has been unplugged. Perform the needed
* cleanup. Also called on module unload for any active devices.
*/
static void pcmcia_remove_one(struct pcmcia_device *pdev)
{
struct ata_pcmcia_info *info = pdev->priv;
struct device *dev = &pdev->dev;
if (info != NULL) {
/* If we have attached the device to the ATA layer, detach it */
if (info->ndev) {
struct ata_host *host = dev_get_drvdata(dev);
ata_host_remove(host);
dev_set_drvdata(dev, NULL);
}
info->ndev = 0;
pdev->priv = NULL;
}
pcmcia_disable_device(pdev);
kfree(info);
}
static struct pcmcia_device_id pcmcia_devices[] = {
PCMCIA_DEVICE_FUNC_ID(4),
PCMCIA_DEVICE_MANF_CARD(0x0007, 0x0000), /* Hitachi */
PCMCIA_DEVICE_MANF_CARD(0x0032, 0x0704),
PCMCIA_DEVICE_MANF_CARD(0x0045, 0x0401),
PCMCIA_DEVICE_MANF_CARD(0x0098, 0x0000), /* Toshiba */
PCMCIA_DEVICE_MANF_CARD(0x00a4, 0x002d),
PCMCIA_DEVICE_MANF_CARD(0x00ce, 0x0000), /* Samsung */
PCMCIA_DEVICE_MANF_CARD(0x0319, 0x0000), /* Hitachi */
PCMCIA_DEVICE_MANF_CARD(0x2080, 0x0001),
PCMCIA_DEVICE_MANF_CARD(0x4e01, 0x0200), /* Lexar */
PCMCIA_DEVICE_PROD_ID123("Caravelle", "PSC-IDE ", "PSC000", 0x8c36137c, 0xd0693ab8, 0x2768a9f0),
PCMCIA_DEVICE_PROD_ID123("CDROM", "IDE", "MCD-601p", 0x1b9179ca, 0xede88951, 0x0d902f74),
PCMCIA_DEVICE_PROD_ID123("PCMCIA", "IDE CARD", "F1", 0x281f1c5d, 0x1907960c, 0xf7fde8b9),
PCMCIA_DEVICE_PROD_ID12("ARGOSY", "CD-ROM", 0x78f308dc, 0x66536591),
PCMCIA_DEVICE_PROD_ID12("ARGOSY", "PnPIDE", 0x78f308dc, 0x0c694728),
PCMCIA_DEVICE_PROD_ID12("CNF CD-M", "CD-ROM", 0x7d93b852, 0x66536591),
PCMCIA_DEVICE_PROD_ID12("Creative Technology Ltd.", "PCMCIA CD-ROM Interface Card", 0xff8c8a45, 0xfe8020c4),
PCMCIA_DEVICE_PROD_ID12("Digital Equipment Corporation.", "Digital Mobile Media CD-ROM", 0x17692a66, 0xef1dcbde),
PCMCIA_DEVICE_PROD_ID12("EXP", "CD+GAME", 0x6f58c983, 0x63c13aaf),
PCMCIA_DEVICE_PROD_ID12("EXP ", "CD-ROM", 0x0a5c52fd, 0x66536591),
PCMCIA_DEVICE_PROD_ID12("EXP ", "PnPIDE", 0x0a5c52fd, 0x0c694728),
PCMCIA_DEVICE_PROD_ID12("FREECOM", "PCCARD-IDE", 0x5714cbf7, 0x48e0ab8e),
PCMCIA_DEVICE_PROD_ID12("HITACHI", "FLASH", 0xf4f43949, 0x9eb86aae),
PCMCIA_DEVICE_PROD_ID12("HITACHI", "microdrive", 0xf4f43949, 0xa6d76178),
PCMCIA_DEVICE_PROD_ID12("IBM", "microdrive", 0xb569a6e5, 0xa6d76178),
PCMCIA_DEVICE_PROD_ID12("IBM", "IBM17JSSFP20", 0xb569a6e5, 0xf2508753),
PCMCIA_DEVICE_PROD_ID12("IO DATA", "CBIDE2 ", 0x547e66dc, 0x8671043b),
PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149),
PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDEII", 0x547e66dc, 0xb3662674),
PCMCIA_DEVICE_PROD_ID12("LOOKMEET", "CBIDE2 ", 0xe37be2b5, 0x8671043b),
PCMCIA_DEVICE_PROD_ID12("M-Systems", "CF500", 0x7ed2ad87, 0x7a13045c),
PCMCIA_DEVICE_PROD_ID2("NinjaATA-", 0xebe0bd79),
PCMCIA_DEVICE_PROD_ID12("PCMCIA", "CD-ROM", 0x281f1c5d, 0x66536591),
PCMCIA_DEVICE_PROD_ID12("PCMCIA", "PnPIDE", 0x281f1c5d, 0x0c694728),
PCMCIA_DEVICE_PROD_ID12("SHUTTLE TECHNOLOGY LTD.", "PCCARD-IDE/ATAPI Adapter", 0x4a3f0ba0, 0x322560e1),
PCMCIA_DEVICE_PROD_ID12("SEAGATE", "ST1", 0x87c1b330, 0xe1f30883),
PCMCIA_DEVICE_PROD_ID12("SAMSUNG", "04/05/06", 0x43d74cb4, 0x6a22777d),
PCMCIA_DEVICE_PROD_ID12("SMI VENDOR", "SMI PRODUCT", 0x30896c92, 0x703cc5f6),
PCMCIA_DEVICE_PROD_ID12("TOSHIBA", "MK2001MPL", 0xb4585a1a, 0x3489e003),
PCMCIA_DEVICE_PROD_ID12("WIT", "IDE16", 0x244e5994, 0x3e232852),
PCMCIA_DEVICE_PROD_ID1("STI Flash", 0xe4a13209),
PCMCIA_DEVICE_PROD_ID12("STI", "Flash 5.0", 0xbf2df18d, 0x8cb57a0e),
PCMCIA_MFC_DEVICE_PROD_ID12(1, "SanDisk", "ConnectPlus", 0x7a954bd9, 0x74be00c6),
PCMCIA_DEVICE_NULL,
};
MODULE_DEVICE_TABLE(pcmcia, pcmcia_devices);
static struct pcmcia_driver pcmcia_driver = {
.owner = THIS_MODULE,
.drv = {
.name = DRV_NAME,
},
.id_table = pcmcia_devices,
.probe = pcmcia_init_one,
.remove = pcmcia_remove_one,
};
static int __init pcmcia_init(void)
{
return pcmcia_register_driver(&pcmcia_driver);
}
static void __exit pcmcia_exit(void)
{
pcmcia_unregister_driver(&pcmcia_driver);
}
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for PCMCIA ATA");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
module_init(pcmcia_init);
module_exit(pcmcia_exit);

869
drivers/ata/pata_pdc2027x.c Normal file
View File

@ -0,0 +1,869 @@
/*
* Promise PATA TX2/TX4/TX2000/133 IDE driver for pdc20268 to pdc20277.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Ported to libata by:
* Albert Lee <albertcc@tw.ibm.com> IBM Corporation
*
* Copyright (C) 1998-2002 Andre Hedrick <andre@linux-ide.org>
* Portions Copyright (C) 1999 Promise Technology, Inc.
*
* Author: Frank Tiernan (frankt@promise.com)
* Released under terms of General Public License
*
*
* libata documentation is available via 'make {ps|pdf}docs',
* as Documentation/DocBook/libata.*
*
* Hardware information only available under NDA.
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>
#include <linux/libata.h>
#include <asm/io.h>
#define DRV_NAME "pata_pdc2027x"
#define DRV_VERSION "0.74-ac3"
#undef PDC_DEBUG
#ifdef PDC_DEBUG
#define PDPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args)
#else
#define PDPRINTK(fmt, args...)
#endif
enum {
PDC_UDMA_100 = 0,
PDC_UDMA_133 = 1,
PDC_100_MHZ = 100000000,
PDC_133_MHZ = 133333333,
PDC_SYS_CTL = 0x1100,
PDC_ATA_CTL = 0x1104,
PDC_GLOBAL_CTL = 0x1108,
PDC_CTCR0 = 0x110C,
PDC_CTCR1 = 0x1110,
PDC_BYTE_COUNT = 0x1120,
PDC_PLL_CTL = 0x1202,
};
static int pdc2027x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
static void pdc2027x_remove_one(struct pci_dev *pdev);
static void pdc2027x_error_handler(struct ata_port *ap);
static void pdc2027x_set_piomode(struct ata_port *ap, struct ata_device *adev);
static void pdc2027x_set_dmamode(struct ata_port *ap, struct ata_device *adev);
static void pdc2027x_post_set_mode(struct ata_port *ap);
static int pdc2027x_check_atapi_dma(struct ata_queued_cmd *qc);
/*
* ATA Timing Tables based on 133MHz controller clock.
* These tables are only used when the controller is in 133MHz clock.
* If the controller is in 100MHz clock, the ASIC hardware will
* set the timing registers automatically when "set feature" command
* is issued to the device. However, if the controller clock is 133MHz,
* the following tables must be used.
*/
static struct pdc2027x_pio_timing {
u8 value0, value1, value2;
} pdc2027x_pio_timing_tbl [] = {
{ 0xfb, 0x2b, 0xac }, /* PIO mode 0 */
{ 0x46, 0x29, 0xa4 }, /* PIO mode 1 */
{ 0x23, 0x26, 0x64 }, /* PIO mode 2 */
{ 0x27, 0x0d, 0x35 }, /* PIO mode 3, IORDY on, Prefetch off */
{ 0x23, 0x09, 0x25 }, /* PIO mode 4, IORDY on, Prefetch off */
};
static struct pdc2027x_mdma_timing {
u8 value0, value1;
} pdc2027x_mdma_timing_tbl [] = {
{ 0xdf, 0x5f }, /* MDMA mode 0 */
{ 0x6b, 0x27 }, /* MDMA mode 1 */
{ 0x69, 0x25 }, /* MDMA mode 2 */
};
static struct pdc2027x_udma_timing {
u8 value0, value1, value2;
} pdc2027x_udma_timing_tbl [] = {
{ 0x4a, 0x0f, 0xd5 }, /* UDMA mode 0 */
{ 0x3a, 0x0a, 0xd0 }, /* UDMA mode 1 */
{ 0x2a, 0x07, 0xcd }, /* UDMA mode 2 */
{ 0x1a, 0x05, 0xcd }, /* UDMA mode 3 */
{ 0x1a, 0x03, 0xcd }, /* UDMA mode 4 */
{ 0x1a, 0x02, 0xcb }, /* UDMA mode 5 */
{ 0x1a, 0x01, 0xcb }, /* UDMA mode 6 */
};
static const struct pci_device_id pdc2027x_pci_tbl[] = {
{ PCI_VENDOR_ID_PROMISE, PCI_DEVICE_ID_PROMISE_20268, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PDC_UDMA_100 },
{ PCI_VENDOR_ID_PROMISE, PCI_DEVICE_ID_PROMISE_20269, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PDC_UDMA_133 },
{ PCI_VENDOR_ID_PROMISE, PCI_DEVICE_ID_PROMISE_20270, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PDC_UDMA_100 },
{ PCI_VENDOR_ID_PROMISE, PCI_DEVICE_ID_PROMISE_20271, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PDC_UDMA_133 },
{ PCI_VENDOR_ID_PROMISE, PCI_DEVICE_ID_PROMISE_20275, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PDC_UDMA_133 },
{ PCI_VENDOR_ID_PROMISE, PCI_DEVICE_ID_PROMISE_20276, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PDC_UDMA_133 },
{ PCI_VENDOR_ID_PROMISE, PCI_DEVICE_ID_PROMISE_20277, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PDC_UDMA_133 },
{ } /* terminate list */
};
static struct pci_driver pdc2027x_pci_driver = {
.name = DRV_NAME,
.id_table = pdc2027x_pci_tbl,
.probe = pdc2027x_init_one,
.remove = __devexit_p(pdc2027x_remove_one),
};
static struct scsi_host_template pdc2027x_sht = {
.module = THIS_MODULE,
.name = DRV_NAME,
.ioctl = ata_scsi_ioctl,
.queuecommand = ata_scsi_queuecmd,
.can_queue = ATA_DEF_QUEUE,
.this_id = ATA_SHT_THIS_ID,
.sg_tablesize = LIBATA_MAX_PRD,
.max_sectors = ATA_MAX_SECTORS,
.cmd_per_lun = ATA_SHT_CMD_PER_LUN,
.emulated = ATA_SHT_EMULATED,
.use_clustering = ATA_SHT_USE_CLUSTERING,
.proc_name = DRV_NAME,
.dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config,
.bios_param = ata_std_bios_param,
};
static struct ata_port_operations pdc2027x_pata100_ops = {
.port_disable = ata_port_disable,
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
.check_status = ata_check_status,
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
.check_atapi_dma = pdc2027x_check_atapi_dma,
.bmdma_setup = ata_bmdma_setup,
.bmdma_start = ata_bmdma_start,
.bmdma_stop = ata_bmdma_stop,
.bmdma_status = ata_bmdma_status,
.qc_prep = ata_qc_prep,
.qc_issue = ata_qc_issue_prot,
.data_xfer = ata_mmio_data_xfer,
.freeze = ata_bmdma_freeze,
.thaw = ata_bmdma_thaw,
.error_handler = pdc2027x_error_handler,
.post_internal_cmd = ata_bmdma_post_internal_cmd,
.irq_handler = ata_interrupt,
.irq_clear = ata_bmdma_irq_clear,
.port_start = ata_port_start,
.port_stop = ata_port_stop,
.host_stop = ata_pci_host_stop,
};
static struct ata_port_operations pdc2027x_pata133_ops = {
.port_disable = ata_port_disable,
.set_piomode = pdc2027x_set_piomode,
.set_dmamode = pdc2027x_set_dmamode,
.post_set_mode = pdc2027x_post_set_mode,
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
.check_status = ata_check_status,
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
.check_atapi_dma = pdc2027x_check_atapi_dma,
.bmdma_setup = ata_bmdma_setup,
.bmdma_start = ata_bmdma_start,
.bmdma_stop = ata_bmdma_stop,
.bmdma_status = ata_bmdma_status,
.qc_prep = ata_qc_prep,
.qc_issue = ata_qc_issue_prot,
.data_xfer = ata_mmio_data_xfer,
.freeze = ata_bmdma_freeze,
.thaw = ata_bmdma_thaw,
.error_handler = pdc2027x_error_handler,
.post_internal_cmd = ata_bmdma_post_internal_cmd,
.irq_handler = ata_interrupt,
.irq_clear = ata_bmdma_irq_clear,
.port_start = ata_port_start,
.port_stop = ata_port_stop,
.host_stop = ata_pci_host_stop,
};
static struct ata_port_info pdc2027x_port_info[] = {
/* PDC_UDMA_100 */
{
.sht = &pdc2027x_sht,
.flags = ATA_FLAG_NO_LEGACY | ATA_FLAG_SLAVE_POSS |
ATA_FLAG_MMIO,
.pio_mask = 0x1f, /* pio0-4 */
.mwdma_mask = 0x07, /* mwdma0-2 */
.udma_mask = ATA_UDMA5, /* udma0-5 */
.port_ops = &pdc2027x_pata100_ops,
},
/* PDC_UDMA_133 */
{
.sht = &pdc2027x_sht,
.flags = ATA_FLAG_NO_LEGACY | ATA_FLAG_SLAVE_POSS |
ATA_FLAG_MMIO,
.pio_mask = 0x1f, /* pio0-4 */
.mwdma_mask = 0x07, /* mwdma0-2 */
.udma_mask = ATA_UDMA6, /* udma0-6 */
.port_ops = &pdc2027x_pata133_ops,
},
};
MODULE_AUTHOR("Andre Hedrick, Frank Tiernan, Albert Lee");
MODULE_DESCRIPTION("libata driver module for Promise PDC20268 to PDC20277");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, pdc2027x_pci_tbl);
/**
* port_mmio - Get the MMIO address of PDC2027x extended registers
* @ap: Port
* @offset: offset from mmio base
*/
static inline void* port_mmio(struct ata_port *ap, unsigned int offset)
{
return ap->host->mmio_base + ap->port_no * 0x100 + offset;
}
/**
* dev_mmio - Get the MMIO address of PDC2027x extended registers
* @ap: Port
* @adev: device
* @offset: offset from mmio base
*/
static inline void* dev_mmio(struct ata_port *ap, struct ata_device *adev, unsigned int offset)
{
u8 adj = (adev->devno) ? 0x08 : 0x00;
return port_mmio(ap, offset) + adj;
}
/**
* pdc2027x_pata_cbl_detect - Probe host controller cable detect info
* @ap: Port for which cable detect info is desired
*
* Read 80c cable indicator from Promise extended register.
* This register is latched when the system is reset.
*
* LOCKING:
* None (inherited from caller).
*/
static void pdc2027x_cbl_detect(struct ata_port *ap)
{
u32 cgcr;
/* check cable detect results */
cgcr = readl(port_mmio(ap, PDC_GLOBAL_CTL));
if (cgcr & (1 << 26))
goto cbl40;
PDPRINTK("No cable or 80-conductor cable on port %d\n", ap->port_no);
ap->cbl = ATA_CBL_PATA80;
return;
cbl40:
printk(KERN_INFO DRV_NAME ": 40-conductor cable detected on port %d\n", ap->port_no);
ap->cbl = ATA_CBL_PATA40;
ap->udma_mask &= ATA_UDMA_MASK_40C;
}
/**
* pdc2027x_port_enabled - Check PDC ATA control register to see whether the port is enabled.
* @ap: Port to check
*/
static inline int pdc2027x_port_enabled(struct ata_port *ap)
{
return readb(port_mmio(ap, PDC_ATA_CTL)) & 0x02;
}
/**
* pdc2027x_prereset - prereset for PATA host controller
* @ap: Target port
*
* Probeinit including cable detection.
*
* LOCKING:
* None (inherited from caller).
*/
static int pdc2027x_prereset(struct ata_port *ap)
{
/* Check whether port enabled */
if (!pdc2027x_port_enabled(ap)) {
printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id);
return 0;
}
pdc2027x_cbl_detect(ap);
return ata_std_prereset(ap);
}
/**
* pdc2027x_error_handler - Perform reset on PATA port and classify
* @ap: Port to reset
*
* Reset PATA phy and classify attached devices.
*
* LOCKING:
* None (inherited from caller).
*/
static void pdc2027x_error_handler(struct ata_port *ap)
{
ata_bmdma_drive_eh(ap, pdc2027x_prereset, ata_std_softreset, NULL, ata_std_postreset);
}
/**
* pdc2027x_set_piomode - Initialize host controller PATA PIO timings
* @ap: Port to configure
* @adev: um
* @pio: PIO mode, 0 - 4
*
* Set PIO mode for device.
*
* LOCKING:
* None (inherited from caller).
*/
static void pdc2027x_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
unsigned int pio = adev->pio_mode - XFER_PIO_0;
u32 ctcr0, ctcr1;
PDPRINTK("adev->pio_mode[%X]\n", adev->pio_mode);
/* Sanity check */
if (pio > 4) {
printk(KERN_ERR DRV_NAME ": Unknown pio mode [%d] ignored\n", pio);
return;
}
/* Set the PIO timing registers using value table for 133MHz */
PDPRINTK("Set pio regs... \n");
ctcr0 = readl(dev_mmio(ap, adev, PDC_CTCR0));
ctcr0 &= 0xffff0000;
ctcr0 |= pdc2027x_pio_timing_tbl[pio].value0 |
(pdc2027x_pio_timing_tbl[pio].value1 << 8);
writel(ctcr0, dev_mmio(ap, adev, PDC_CTCR0));
ctcr1 = readl(dev_mmio(ap, adev, PDC_CTCR1));
ctcr1 &= 0x00ffffff;
ctcr1 |= (pdc2027x_pio_timing_tbl[pio].value2 << 24);
writel(ctcr1, dev_mmio(ap, adev, PDC_CTCR1));
PDPRINTK("Set pio regs done\n");
PDPRINTK("Set to pio mode[%u] \n", pio);
}
/**
* pdc2027x_set_dmamode - Initialize host controller PATA UDMA timings
* @ap: Port to configure
* @adev: um
* @udma: udma mode, XFER_UDMA_0 to XFER_UDMA_6
*
* Set UDMA mode for device.
*
* LOCKING:
* None (inherited from caller).
*/
static void pdc2027x_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
unsigned int dma_mode = adev->dma_mode;
u32 ctcr0, ctcr1;
if ((dma_mode >= XFER_UDMA_0) &&
(dma_mode <= XFER_UDMA_6)) {
/* Set the UDMA timing registers with value table for 133MHz */
unsigned int udma_mode = dma_mode & 0x07;
if (dma_mode == XFER_UDMA_2) {
/*
* Turn off tHOLD.
* If tHOLD is '1', the hardware will add half clock for data hold time.
* This code segment seems to be no effect. tHOLD will be overwritten below.
*/
ctcr1 = readl(dev_mmio(ap, adev, PDC_CTCR1));
writel(ctcr1 & ~(1 << 7), dev_mmio(ap, adev, PDC_CTCR1));
}
PDPRINTK("Set udma regs... \n");
ctcr1 = readl(dev_mmio(ap, adev, PDC_CTCR1));
ctcr1 &= 0xff000000;
ctcr1 |= pdc2027x_udma_timing_tbl[udma_mode].value0 |
(pdc2027x_udma_timing_tbl[udma_mode].value1 << 8) |
(pdc2027x_udma_timing_tbl[udma_mode].value2 << 16);
writel(ctcr1, dev_mmio(ap, adev, PDC_CTCR1));
PDPRINTK("Set udma regs done\n");
PDPRINTK("Set to udma mode[%u] \n", udma_mode);
} else if ((dma_mode >= XFER_MW_DMA_0) &&
(dma_mode <= XFER_MW_DMA_2)) {
/* Set the MDMA timing registers with value table for 133MHz */
unsigned int mdma_mode = dma_mode & 0x07;
PDPRINTK("Set mdma regs... \n");
ctcr0 = readl(dev_mmio(ap, adev, PDC_CTCR0));
ctcr0 &= 0x0000ffff;
ctcr0 |= (pdc2027x_mdma_timing_tbl[mdma_mode].value0 << 16) |
(pdc2027x_mdma_timing_tbl[mdma_mode].value1 << 24);
writel(ctcr0, dev_mmio(ap, adev, PDC_CTCR0));
PDPRINTK("Set mdma regs done\n");
PDPRINTK("Set to mdma mode[%u] \n", mdma_mode);
} else {
printk(KERN_ERR DRV_NAME ": Unknown dma mode [%u] ignored\n", dma_mode);
}
}
/**
* pdc2027x_post_set_mode - Set the timing registers back to correct values.
* @ap: Port to configure
*
* The pdc2027x hardware will look at "SET FEATURES" and change the timing registers
* automatically. The values set by the hardware might be incorrect, under 133Mhz PLL.
* This function overwrites the possibly incorrect values set by the hardware to be correct.
*/
static void pdc2027x_post_set_mode(struct ata_port *ap)
{
int i;
for (i = 0; i < ATA_MAX_DEVICES; i++) {
struct ata_device *dev = &ap->device[i];
if (ata_dev_enabled(dev)) {
pdc2027x_set_piomode(ap, dev);
/*
* Enable prefetch if the device support PIO only.
*/
if (dev->xfer_shift == ATA_SHIFT_PIO) {
u32 ctcr1 = readl(dev_mmio(ap, dev, PDC_CTCR1));
ctcr1 |= (1 << 25);
writel(ctcr1, dev_mmio(ap, dev, PDC_CTCR1));
PDPRINTK("Turn on prefetch\n");
} else {
pdc2027x_set_dmamode(ap, dev);
}
}
}
}
/**
* pdc2027x_check_atapi_dma - Check whether ATAPI DMA can be supported for this command
* @qc: Metadata associated with taskfile to check
*
* LOCKING:
* None (inherited from caller).
*
* RETURNS: 0 when ATAPI DMA can be used
* 1 otherwise
*/
static int pdc2027x_check_atapi_dma(struct ata_queued_cmd *qc)
{
struct scsi_cmnd *cmd = qc->scsicmd;
u8 *scsicmd = cmd->cmnd;
int rc = 1; /* atapi dma off by default */
/*
* This workaround is from Promise's GPL driver.
* If ATAPI DMA is used for commands not in the
* following white list, say MODE_SENSE and REQUEST_SENSE,
* pdc2027x might hit the irq lost problem.
*/
switch (scsicmd[0]) {
case READ_10:
case WRITE_10:
case READ_12:
case WRITE_12:
case READ_6:
case WRITE_6:
case 0xad: /* READ_DVD_STRUCTURE */
case 0xbe: /* READ_CD */
/* ATAPI DMA is ok */
rc = 0;
break;
default:
;
}
return rc;
}
/**
* pdc_read_counter - Read the ctr counter
* @probe_ent: for the port address
*/
static long pdc_read_counter(struct ata_probe_ent *probe_ent)
{
long counter;
int retry = 1;
u32 bccrl, bccrh, bccrlv, bccrhv;
retry:
bccrl = readl(probe_ent->mmio_base + PDC_BYTE_COUNT) & 0xffff;
bccrh = readl(probe_ent->mmio_base + PDC_BYTE_COUNT + 0x100) & 0xffff;
rmb();
/* Read the counter values again for verification */
bccrlv = readl(probe_ent->mmio_base + PDC_BYTE_COUNT) & 0xffff;
bccrhv = readl(probe_ent->mmio_base + PDC_BYTE_COUNT + 0x100) & 0xffff;
rmb();
counter = (bccrh << 15) | bccrl;
PDPRINTK("bccrh [%X] bccrl [%X]\n", bccrh, bccrl);
PDPRINTK("bccrhv[%X] bccrlv[%X]\n", bccrhv, bccrlv);
/*
* The 30-bit decreasing counter are read by 2 pieces.
* Incorrect value may be read when both bccrh and bccrl are changing.
* Ex. When 7900 decrease to 78FF, wrong value 7800 might be read.
*/
if (retry && !(bccrh == bccrhv && bccrl >= bccrlv)) {
retry--;
PDPRINTK("rereading counter\n");
goto retry;
}
return counter;
}
/**
* adjust_pll - Adjust the PLL input clock in Hz.
*
* @pdc_controller: controller specific information
* @probe_ent: For the port address
* @pll_clock: The input of PLL in HZ
*/
static void pdc_adjust_pll(struct ata_probe_ent *probe_ent, long pll_clock, unsigned int board_idx)
{
u16 pll_ctl;
long pll_clock_khz = pll_clock / 1000;
long pout_required = board_idx? PDC_133_MHZ:PDC_100_MHZ;
long ratio = pout_required / pll_clock_khz;
int F, R;
/* Sanity check */
if (unlikely(pll_clock_khz < 5000L || pll_clock_khz > 70000L)) {
printk(KERN_ERR DRV_NAME ": Invalid PLL input clock %ldkHz, give up!\n", pll_clock_khz);
return;
}
#ifdef PDC_DEBUG
PDPRINTK("pout_required is %ld\n", pout_required);
/* Show the current clock value of PLL control register
* (maybe already configured by the firmware)
*/
pll_ctl = readw(probe_ent->mmio_base + PDC_PLL_CTL);
PDPRINTK("pll_ctl[%X]\n", pll_ctl);
#endif
/*
* Calculate the ratio of F, R and OD
* POUT = (F + 2) / (( R + 2) * NO)
*/
if (ratio < 8600L) { /* 8.6x */
/* Using NO = 0x01, R = 0x0D */
R = 0x0d;
} else if (ratio < 12900L) { /* 12.9x */
/* Using NO = 0x01, R = 0x08 */
R = 0x08;
} else if (ratio < 16100L) { /* 16.1x */
/* Using NO = 0x01, R = 0x06 */
R = 0x06;
} else if (ratio < 64000L) { /* 64x */
R = 0x00;
} else {
/* Invalid ratio */
printk(KERN_ERR DRV_NAME ": Invalid ratio %ld, give up!\n", ratio);
return;
}
F = (ratio * (R+2)) / 1000 - 2;
if (unlikely(F < 0 || F > 127)) {
/* Invalid F */
printk(KERN_ERR DRV_NAME ": F[%d] invalid!\n", F);
return;
}
PDPRINTK("F[%d] R[%d] ratio*1000[%ld]\n", F, R, ratio);
pll_ctl = (R << 8) | F;
PDPRINTK("Writing pll_ctl[%X]\n", pll_ctl);
writew(pll_ctl, probe_ent->mmio_base + PDC_PLL_CTL);
readw(probe_ent->mmio_base + PDC_PLL_CTL); /* flush */
/* Wait the PLL circuit to be stable */
mdelay(30);
#ifdef PDC_DEBUG
/*
* Show the current clock value of PLL control register
* (maybe configured by the firmware)
*/
pll_ctl = readw(probe_ent->mmio_base + PDC_PLL_CTL);
PDPRINTK("pll_ctl[%X]\n", pll_ctl);
#endif
return;
}
/**
* detect_pll_input_clock - Detect the PLL input clock in Hz.
* @probe_ent: for the port address
* Ex. 16949000 on 33MHz PCI bus for pdc20275.
* Half of the PCI clock.
*/
static long pdc_detect_pll_input_clock(struct ata_probe_ent *probe_ent)
{
u32 scr;
long start_count, end_count;
long pll_clock;
/* Read current counter value */
start_count = pdc_read_counter(probe_ent);
/* Start the test mode */
scr = readl(probe_ent->mmio_base + PDC_SYS_CTL);
PDPRINTK("scr[%X]\n", scr);
writel(scr | (0x01 << 14), probe_ent->mmio_base + PDC_SYS_CTL);
readl(probe_ent->mmio_base + PDC_SYS_CTL); /* flush */
/* Let the counter run for 100 ms. */
mdelay(100);
/* Read the counter values again */
end_count = pdc_read_counter(probe_ent);
/* Stop the test mode */
scr = readl(probe_ent->mmio_base + PDC_SYS_CTL);
PDPRINTK("scr[%X]\n", scr);
writel(scr & ~(0x01 << 14), probe_ent->mmio_base + PDC_SYS_CTL);
readl(probe_ent->mmio_base + PDC_SYS_CTL); /* flush */
/* calculate the input clock in Hz */
pll_clock = (start_count - end_count) * 10;
PDPRINTK("start[%ld] end[%ld] \n", start_count, end_count);
PDPRINTK("PLL input clock[%ld]Hz\n", pll_clock);
return pll_clock;
}
/**
* pdc_hardware_init - Initialize the hardware.
* @pdev: instance of pci_dev found
* @pdc_controller: controller specific information
* @pe: for the port address
*/
static int pdc_hardware_init(struct pci_dev *pdev, struct ata_probe_ent *pe, unsigned int board_idx)
{
long pll_clock;
/*
* Detect PLL input clock rate.
* On some system, where PCI bus is running at non-standard clock rate.
* Ex. 25MHz or 40MHz, we have to adjust the cycle_time.
* The pdc20275 controller employs PLL circuit to help correct timing registers setting.
*/
pll_clock = pdc_detect_pll_input_clock(pe);
if (pll_clock < 0) /* counter overflow? Try again. */
pll_clock = pdc_detect_pll_input_clock(pe);
dev_printk(KERN_INFO, &pdev->dev, "PLL input clock %ld kHz\n", pll_clock/1000);
/* Adjust PLL control register */
pdc_adjust_pll(pe, pll_clock, board_idx);
return 0;
}
/**
* pdc_ata_setup_port - setup the mmio address
* @port: ata ioports to setup
* @base: base address
*/
static void pdc_ata_setup_port(struct ata_ioports *port, unsigned long base)
{
port->cmd_addr =
port->data_addr = base;
port->feature_addr =
port->error_addr = base + 0x05;
port->nsect_addr = base + 0x0a;
port->lbal_addr = base + 0x0f;
port->lbam_addr = base + 0x10;
port->lbah_addr = base + 0x15;
port->device_addr = base + 0x1a;
port->command_addr =
port->status_addr = base + 0x1f;
port->altstatus_addr =
port->ctl_addr = base + 0x81a;
}
/**
* pdc2027x_init_one - PCI probe function
* Called when an instance of PCI adapter is inserted.
* This function checks whether the hardware is supported,
* initialize hardware and register an instance of ata_host to
* libata by providing struct ata_probe_ent and ata_device_add().
* (implements struct pci_driver.probe() )
*
* @pdev: instance of pci_dev found
* @ent: matching entry in the id_tbl[]
*/
static int __devinit pdc2027x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
static int printed_version;
unsigned int board_idx = (unsigned int) ent->driver_data;
struct ata_probe_ent *probe_ent = NULL;
unsigned long base;
void *mmio_base;
int rc;
if (!printed_version++)
dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
rc = pci_enable_device(pdev);
if (rc)
return rc;
rc = pci_request_regions(pdev, DRV_NAME);
if (rc)
goto err_out;
rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
if (rc)
goto err_out_regions;
rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
if (rc)
goto err_out_regions;
/* Prepare the probe entry */
probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
if (probe_ent == NULL) {
rc = -ENOMEM;
goto err_out_regions;
}
probe_ent->dev = pci_dev_to_dev(pdev);
INIT_LIST_HEAD(&probe_ent->node);
mmio_base = pci_iomap(pdev, 5, 0);
if (!mmio_base) {
rc = -ENOMEM;
goto err_out_free_ent;
}
base = (unsigned long) mmio_base;
probe_ent->sht = pdc2027x_port_info[board_idx].sht;
probe_ent->port_flags = pdc2027x_port_info[board_idx].flags;
probe_ent->pio_mask = pdc2027x_port_info[board_idx].pio_mask;
probe_ent->mwdma_mask = pdc2027x_port_info[board_idx].mwdma_mask;
probe_ent->udma_mask = pdc2027x_port_info[board_idx].udma_mask;
probe_ent->port_ops = pdc2027x_port_info[board_idx].port_ops;
probe_ent->irq = pdev->irq;
probe_ent->irq_flags = SA_SHIRQ;
probe_ent->mmio_base = mmio_base;
pdc_ata_setup_port(&probe_ent->port[0], base + 0x17c0);
probe_ent->port[0].bmdma_addr = base + 0x1000;
pdc_ata_setup_port(&probe_ent->port[1], base + 0x15c0);
probe_ent->port[1].bmdma_addr = base + 0x1008;
probe_ent->n_ports = 2;
pci_set_master(pdev);
//pci_enable_intx(pdev);
/* initialize adapter */
if (pdc_hardware_init(pdev, probe_ent, board_idx) != 0)
goto err_out_free_ent;
ata_device_add(probe_ent);
kfree(probe_ent);
return 0;
err_out_free_ent:
kfree(probe_ent);
err_out_regions:
pci_release_regions(pdev);
err_out:
pci_disable_device(pdev);
return rc;
}
/**
* pdc2027x_remove_one - Called to remove a single instance of the
* adapter.
*
* @dev: The PCI device to remove.
* FIXME: module load/unload not working yet
*/
static void __devexit pdc2027x_remove_one(struct pci_dev *pdev)
{
ata_pci_remove_one(pdev);
}
/**
* pdc2027x_init - Called after this module is loaded into the kernel.
*/
static int __init pdc2027x_init(void)
{
return pci_module_init(&pdc2027x_pci_driver);
}
/**
* pdc2027x_exit - Called before this module unloaded from the kernel
*/
static void __exit pdc2027x_exit(void)
{
pci_unregister_driver(&pdc2027x_pci_driver);
}
module_init(pdc2027x_init);
module_exit(pdc2027x_exit);

View File

@ -0,0 +1,423 @@
/*
* pata_pdc202xx_old.c - Promise PDC202xx PATA for new ATA layer
* (C) 2005 Red Hat Inc
* Alan Cox <alan@redhat.com>
*
* Based in part on linux/drivers/ide/pci/pdc202xx_old.c
*
* First cut with LBA48/ATAPI
*
* TODO:
* Channel interlock/reset on both required ?
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#define DRV_NAME "pata_pdc202xx_old"
#define DRV_VERSION "0.2.1"
/**
* pdc2024x_pre_reset - probe begin
* @ap: ATA port
*
* Set up cable type and use generic probe init
*/
static int pdc2024x_pre_reset(struct ata_port *ap)
{
ap->cbl = ATA_CBL_PATA40;
return ata_std_prereset(ap);
}
static void pdc2024x_error_handler(struct ata_port *ap)
{
ata_bmdma_drive_eh(ap, pdc2024x_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
}
static int pdc2026x_pre_reset(struct ata_port *ap)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u16 cis;
pci_read_config_word(pdev, 0x50, &cis);
if (cis & (1 << (10 + ap->port_no)))
ap->cbl = ATA_CBL_PATA80;
else
ap->cbl = ATA_CBL_PATA40;
return ata_std_prereset(ap);
}
static void pdc2026x_error_handler(struct ata_port *ap)
{
ata_bmdma_drive_eh(ap, pdc2026x_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
}
/**
* pdc_configure_piomode - set chip PIO timing
* @ap: ATA interface
* @adev: ATA device
* @pio: PIO mode
*
* Called to do the PIO mode setup. Our timing registers are shared
* so a configure_dmamode call will undo any work we do here and vice
* versa
*/
static void pdc_configure_piomode(struct ata_port *ap, struct ata_device *adev, int pio)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int port = 0x60 + 4 * ap->port_no + 2 * adev->devno;
static u16 pio_timing[5] = {
0x0913, 0x050C , 0x0308, 0x0206, 0x0104
};
u8 r_ap, r_bp;
pci_read_config_byte(pdev, port, &r_ap);
pci_read_config_byte(pdev, port + 1, &r_bp);
r_ap &= ~0x3F; /* Preserve ERRDY_EN, SYNC_IN */
r_bp &= ~0x07;
r_ap |= (pio_timing[pio] >> 8);
r_bp |= (pio_timing[pio] & 0xFF);
if (ata_pio_need_iordy(adev))
r_ap |= 0x20; /* IORDY enable */
if (adev->class == ATA_DEV_ATA)
r_ap |= 0x10; /* FIFO enable */
pci_write_config_byte(pdev, port, r_ap);
pci_write_config_byte(pdev, port + 1, r_bp);
}
/**
* pdc_set_piomode - set initial PIO mode data
* @ap: ATA interface
* @adev: ATA device
*
* Called to do the PIO mode setup. Our timing registers are shared
* but we want to set the PIO timing by default.
*/
static void pdc_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
pdc_configure_piomode(ap, adev, adev->pio_mode - XFER_PIO_0);
}
/**
* pdc_configure_dmamode - set DMA mode in chip
* @ap: ATA interface
* @adev: ATA device
*
* Load DMA cycle times into the chip ready for a DMA transfer
* to occur.
*/
static void pdc_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int port = 0x60 + 4 * ap->port_no + 2 * adev->devno;
static u8 udma_timing[6][2] = {
{ 0x60, 0x03 }, /* 33 Mhz Clock */
{ 0x40, 0x02 },
{ 0x20, 0x01 },
{ 0x40, 0x02 }, /* 66 Mhz Clock */
{ 0x20, 0x01 },
{ 0x20, 0x01 }
};
u8 r_bp, r_cp;
pci_read_config_byte(pdev, port + 1, &r_bp);
pci_read_config_byte(pdev, port + 2, &r_cp);
r_bp &= ~0xF0;
r_cp &= ~0x0F;
if (adev->dma_mode >= XFER_UDMA_0) {
int speed = adev->dma_mode - XFER_UDMA_0;
r_bp |= udma_timing[speed][0];
r_cp |= udma_timing[speed][1];
} else {
int speed = adev->dma_mode - XFER_MW_DMA_0;
r_bp |= 0x60;
r_cp |= (5 - speed);
}
pci_write_config_byte(pdev, port + 1, r_bp);
pci_write_config_byte(pdev, port + 2, r_cp);
}
/**
* pdc2026x_bmdma_start - DMA engine begin
* @qc: ATA command
*
* In UDMA3 or higher we have to clock switch for the duration of the
* DMA transfer sequence.
*/
static void pdc2026x_bmdma_start(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct ata_device *adev = qc->dev;
struct ata_taskfile *tf = &qc->tf;
int sel66 = ap->port_no ? 0x08: 0x02;
unsigned long master = ap->host->ports[0]->ioaddr.bmdma_addr;
unsigned long clock = master + 0x11;
unsigned long atapi_reg = master + 0x20 + (4 * ap->port_no);
u32 len;
/* Check we keep host level locking here */
if (adev->dma_mode >= XFER_UDMA_2)
outb(inb(clock) | sel66, clock);
else
outb(inb(clock) & ~sel66, clock);
/* The DMA clocks may have been trashed by a reset. FIXME: make conditional
and move to qc_issue ? */
pdc_set_dmamode(ap, qc->dev);
/* Cases the state machine will not complete correctly without help */
if ((tf->flags & ATA_TFLAG_LBA48) || tf->protocol == ATA_PROT_ATAPI_DMA)
{
if (tf->flags & ATA_TFLAG_LBA48)
len = qc->nsect * 512;
else
len = qc->nbytes;
if (tf->flags & ATA_TFLAG_WRITE)
len |= 0x06000000;
else
len |= 0x05000000;
outl(len, atapi_reg);
}
/* Activate DMA */
ata_bmdma_start(qc);
}
/**
* pdc2026x_bmdma_end - DMA engine stop
* @qc: ATA command
*
* After a DMA completes we need to put the clock back to 33MHz for
* PIO timings.
*/
static void pdc2026x_bmdma_stop(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct ata_device *adev = qc->dev;
struct ata_taskfile *tf = &qc->tf;
int sel66 = ap->port_no ? 0x08: 0x02;
/* The clock bits are in the same register for both channels */
unsigned long master = ap->host->ports[0]->ioaddr.bmdma_addr;
unsigned long clock = master + 0x11;
unsigned long atapi_reg = master + 0x20 + (4 * ap->port_no);
/* Cases the state machine will not complete correctly */
if (tf->protocol == ATA_PROT_ATAPI_DMA || ( tf->flags & ATA_TFLAG_LBA48)) {
outl(0, atapi_reg);
outb(inb(clock) & ~sel66, clock);
}
/* Check we keep host level locking here */
/* Flip back to 33Mhz for PIO */
if (adev->dma_mode >= XFER_UDMA_2)
outb(inb(clock) & ~sel66, clock);
ata_bmdma_stop(qc);
}
/**
* pdc2026x_dev_config - device setup hook
* @ap: ATA port
* @adev: newly found device
*
* Perform chip specific early setup. We need to lock the transfer
* sizes to 8bit to avoid making the state engine on the 2026x cards
* barf.
*/
static void pdc2026x_dev_config(struct ata_port *ap, struct ata_device *adev)
{
adev->max_sectors = 256;
}
static struct scsi_host_template pdc_sht = {
.module = THIS_MODULE,
.name = DRV_NAME,
.ioctl = ata_scsi_ioctl,
.queuecommand = ata_scsi_queuecmd,
.can_queue = ATA_DEF_QUEUE,
.this_id = ATA_SHT_THIS_ID,
.sg_tablesize = LIBATA_MAX_PRD,
.max_sectors = ATA_MAX_SECTORS,
.cmd_per_lun = ATA_SHT_CMD_PER_LUN,
.emulated = ATA_SHT_EMULATED,
.use_clustering = ATA_SHT_USE_CLUSTERING,
.proc_name = DRV_NAME,
.dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config,
.bios_param = ata_std_bios_param,
};
static struct ata_port_operations pdc2024x_port_ops = {
.port_disable = ata_port_disable,
.set_piomode = pdc_set_piomode,
.set_dmamode = pdc_set_dmamode,
.mode_filter = ata_pci_default_filter,
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
.check_status = ata_check_status,
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
.freeze = ata_bmdma_freeze,
.thaw = ata_bmdma_thaw,
.error_handler = pdc2024x_error_handler,
.post_internal_cmd = ata_bmdma_post_internal_cmd,
.bmdma_setup = ata_bmdma_setup,
.bmdma_start = ata_bmdma_start,
.bmdma_stop = ata_bmdma_stop,
.bmdma_status = ata_bmdma_status,
.qc_prep = ata_qc_prep,
.qc_issue = ata_qc_issue_prot,
.data_xfer = ata_pio_data_xfer,
.irq_handler = ata_interrupt,
.irq_clear = ata_bmdma_irq_clear,
.port_start = ata_port_start,
.port_stop = ata_port_stop,
.host_stop = ata_host_stop
};
static struct ata_port_operations pdc2026x_port_ops = {
.port_disable = ata_port_disable,
.set_piomode = pdc_set_piomode,
.set_dmamode = pdc_set_dmamode,
.mode_filter = ata_pci_default_filter,
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
.check_status = ata_check_status,
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
.dev_config = pdc2026x_dev_config,
.freeze = ata_bmdma_freeze,
.thaw = ata_bmdma_thaw,
.error_handler = pdc2026x_error_handler,
.post_internal_cmd = ata_bmdma_post_internal_cmd,
.bmdma_setup = ata_bmdma_setup,
.bmdma_start = pdc2026x_bmdma_start,
.bmdma_stop = pdc2026x_bmdma_stop,
.bmdma_status = ata_bmdma_status,
.qc_prep = ata_qc_prep,
.qc_issue = ata_qc_issue_prot,
.data_xfer = ata_pio_data_xfer,
.irq_handler = ata_interrupt,
.irq_clear = ata_bmdma_irq_clear,
.port_start = ata_port_start,
.port_stop = ata_port_stop,
.host_stop = ata_host_stop
};
static int pdc_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
static struct ata_port_info info[3] = {
{
.sht = &pdc_sht,
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
.pio_mask = 0x1f,
.mwdma_mask = 0x07,
.udma_mask = ATA_UDMA2,
.port_ops = &pdc2024x_port_ops
},
{
.sht = &pdc_sht,
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
.pio_mask = 0x1f,
.mwdma_mask = 0x07,
.udma_mask = ATA_UDMA4,
.port_ops = &pdc2026x_port_ops
},
{
.sht = &pdc_sht,
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
.pio_mask = 0x1f,
.mwdma_mask = 0x07,
.udma_mask = ATA_UDMA5,
.port_ops = &pdc2026x_port_ops
}
};
static struct ata_port_info *port_info[2];
port_info[0] = port_info[1] = &info[id->driver_data];
if (dev->device == PCI_DEVICE_ID_PROMISE_20265) {
struct pci_dev *bridge = dev->bus->self;
/* Don't grab anything behind a Promise I2O RAID */
if (bridge && bridge->vendor == PCI_VENDOR_ID_INTEL) {
if( bridge->device == PCI_DEVICE_ID_INTEL_I960)
return -ENODEV;
if( bridge->device == PCI_DEVICE_ID_INTEL_I960RM)
return -ENODEV;
}
}
return ata_pci_init_one(dev, port_info, 2);
}
static struct pci_device_id pdc[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_PROMISE, PCI_DEVICE_ID_PROMISE_20246), 0},
{ PCI_DEVICE(PCI_VENDOR_ID_PROMISE, PCI_DEVICE_ID_PROMISE_20262), 1},
{ PCI_DEVICE(PCI_VENDOR_ID_PROMISE, PCI_DEVICE_ID_PROMISE_20263), 1},
{ PCI_DEVICE(PCI_VENDOR_ID_PROMISE, PCI_DEVICE_ID_PROMISE_20265), 2},
{ PCI_DEVICE(PCI_VENDOR_ID_PROMISE, PCI_DEVICE_ID_PROMISE_20267), 2},
{ 0, },
};
static struct pci_driver pdc_pci_driver = {
.name = DRV_NAME,
.id_table = pdc,
.probe = pdc_init_one,
.remove = ata_pci_remove_one
};
static int __init pdc_init(void)
{
return pci_register_driver(&pdc_pci_driver);
}
static void __exit pdc_exit(void)
{
pci_unregister_driver(&pdc_pci_driver);
}
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for Promise 2024x and 20262-20267");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, pdc);
MODULE_VERSION(DRV_VERSION);
module_init(pdc_init);
module_exit(pdc_exit);

403
drivers/ata/pata_qdi.c Normal file
View File

@ -0,0 +1,403 @@
/*
* pata_qdi.c - QDI VLB ATA controllers
* (C) 2006 Red Hat <alan@redhat.com>
*
* This driver mostly exists as a proof of concept for non PCI devices under
* libata. While the QDI6580 was 'neat' in 1993 it is no longer terribly
* useful.
*
* Tuning code written from the documentation at
* http://www.ryston.cz/petr/vlb/qd6500.html
* http://www.ryston.cz/petr/vlb/qd6580.html
*
* Probe code based on drivers/ide/legacy/qd65xx.c
* Rewritten from the work of Colten Edwards <pje120@cs.usask.ca> by
* Samuel Thibault <samuel.thibault@fnac.net>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#include <linux/platform_device.h>
#define DRV_NAME "pata_qdi"
#define DRV_VERSION "0.2.4"
#define NR_HOST 4 /* Two 6580s */
struct qdi_data {
unsigned long timing;
u8 clock[2];
u8 last;
int fast;
struct platform_device *platform_dev;
};
static struct ata_host *qdi_host[NR_HOST];
static struct qdi_data qdi_data[NR_HOST];
static int nr_qdi_host;
#ifdef MODULE
static int probe_qdi = 1;
#else
static int probe_qdi;
#endif
static void qdi6500_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
struct ata_timing t;
struct qdi_data *qdi = ap->host->private_data;
int active, recovery;
u8 timing;
/* Get the timing data in cycles */
ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
if (qdi->fast) {
active = 8 - FIT(t.active, 1, 8);
recovery = 18 - FIT(t.recover, 3, 18);
} else {
active = 9 - FIT(t.active, 2, 9);
recovery = 15 - FIT(t.recover, 0, 15);
}
timing = (recovery << 4) | active | 0x08;
qdi->clock[adev->devno] = timing;
outb(timing, qdi->timing);
}
static void qdi6580_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
struct ata_timing t;
struct qdi_data *qdi = ap->host->private_data;
int active, recovery;
u8 timing;
/* Get the timing data in cycles */
ata_timing_compute(adev, adev->pio_mode, &t, 30303, 1000);
if (qdi->fast) {
active = 8 - FIT(t.active, 1, 8);
recovery = 18 - FIT(t.recover, 3, 18);
} else {
active = 9 - FIT(t.active, 2, 9);
recovery = 15 - FIT(t.recover, 0, 15);
}
timing = (recovery << 4) | active | 0x08;
qdi->clock[adev->devno] = timing;
outb(timing, qdi->timing);
/* Clear the FIFO */
if (adev->class != ATA_DEV_ATA)
outb(0x5F, (qdi->timing & 0xFFF0) + 3);
}
/**
* qdi_qc_issue_prot - command issue
* @qc: command pending
*
* Called when the libata layer is about to issue a command. We wrap
* this interface so that we can load the correct ATA timings.
*/
static unsigned int qdi_qc_issue_prot(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct ata_device *adev = qc->dev;
struct qdi_data *qdi = ap->host->private_data;
if (qdi->clock[adev->devno] != qdi->last) {
if (adev->pio_mode) {
qdi->last = qdi->clock[adev->devno];
outb(qdi->clock[adev->devno], qdi->timing);
}
}
return ata_qc_issue_prot(qc);
}
static void qdi_data_xfer(struct ata_device *adev, unsigned char *buf, unsigned int buflen, int write_data)
{
struct ata_port *ap = adev->ap;
int slop = buflen & 3;
if (ata_id_has_dword_io(adev->id)) {
if (write_data)
outsl(ap->ioaddr.data_addr, buf, buflen >> 2);
else
insl(ap->ioaddr.data_addr, buf, buflen >> 2);
if (unlikely(slop)) {
u32 pad;
if (write_data) {
memcpy(&pad, buf + buflen - slop, slop);
outl(le32_to_cpu(pad), ap->ioaddr.data_addr);
} else {
pad = cpu_to_le16(inl(ap->ioaddr.data_addr));
memcpy(buf + buflen - slop, &pad, slop);
}
}
} else
ata_pio_data_xfer(adev, buf, buflen, write_data);
}
static struct scsi_host_template qdi_sht = {
.module = THIS_MODULE,
.name = DRV_NAME,
.ioctl = ata_scsi_ioctl,
.queuecommand = ata_scsi_queuecmd,
.can_queue = ATA_DEF_QUEUE,
.this_id = ATA_SHT_THIS_ID,
.sg_tablesize = LIBATA_MAX_PRD,
.max_sectors = ATA_MAX_SECTORS,
.cmd_per_lun = ATA_SHT_CMD_PER_LUN,
.emulated = ATA_SHT_EMULATED,
.use_clustering = ATA_SHT_USE_CLUSTERING,
.proc_name = DRV_NAME,
.dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config,
.bios_param = ata_std_bios_param,
};
static struct ata_port_operations qdi6500_port_ops = {
.port_disable = ata_port_disable,
.set_piomode = qdi6500_set_piomode,
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
.check_status = ata_check_status,
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
.freeze = ata_bmdma_freeze,
.thaw = ata_bmdma_thaw,
.error_handler = ata_bmdma_error_handler,
.post_internal_cmd = ata_bmdma_post_internal_cmd,
.qc_prep = ata_qc_prep,
.qc_issue = qdi_qc_issue_prot,
.eng_timeout = ata_eng_timeout,
.data_xfer = qdi_data_xfer,
.irq_handler = ata_interrupt,
.irq_clear = ata_bmdma_irq_clear,
.port_start = ata_port_start,
.port_stop = ata_port_stop,
.host_stop = ata_host_stop
};
static struct ata_port_operations qdi6580_port_ops = {
.port_disable = ata_port_disable,
.set_piomode = qdi6580_set_piomode,
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
.check_status = ata_check_status,
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
.freeze = ata_bmdma_freeze,
.thaw = ata_bmdma_thaw,
.error_handler = ata_bmdma_error_handler,
.post_internal_cmd = ata_bmdma_post_internal_cmd,
.qc_prep = ata_qc_prep,
.qc_issue = qdi_qc_issue_prot,
.eng_timeout = ata_eng_timeout,
.data_xfer = qdi_data_xfer,
.irq_handler = ata_interrupt,
.irq_clear = ata_bmdma_irq_clear,
.port_start = ata_port_start,
.port_stop = ata_port_stop,
.host_stop = ata_host_stop
};
/**
* qdi_init_one - attach a qdi interface
* @type: Type to display
* @io: I/O port start
* @irq: interrupt line
* @fast: True if on a > 33Mhz VLB
*
* Register an ISA bus IDE interface. Such interfaces are PIO and we
* assume do not support IRQ sharing.
*/
static __init int qdi_init_one(unsigned long port, int type, unsigned long io, int irq, int fast)
{
struct ata_probe_ent ae;
struct platform_device *pdev;
int ret;
unsigned long ctrl = io + 0x206;
/*
* Fill in a probe structure first of all
*/
pdev = platform_device_register_simple(DRV_NAME, nr_qdi_host, NULL, 0);
if (pdev == NULL)
return -ENOMEM;
memset(&ae, 0, sizeof(struct ata_probe_ent));
INIT_LIST_HEAD(&ae.node);
ae.dev = &pdev->dev;
if (type == 6580) {
ae.port_ops = &qdi6580_port_ops;
ae.pio_mask = 0x1F;
} else {
ae.port_ops = &qdi6500_port_ops;
ae.pio_mask = 0x07; /* Actually PIO3 !IORDY is possible */
}
ae.sht = &qdi_sht;
ae.n_ports = 1;
ae.irq = irq;
ae.irq_flags = 0;
ae.port_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST;
ae.port[0].cmd_addr = io;
ae.port[0].altstatus_addr = ctrl;
ae.port[0].ctl_addr = ctrl;
ata_std_ports(&ae.port[0]);
/*
* Hook in a private data structure per channel
*/
ae.private_data = &qdi_data[nr_qdi_host];
qdi_data[nr_qdi_host].timing = port;
qdi_data[nr_qdi_host].fast = fast;
qdi_data[nr_qdi_host].platform_dev = pdev;
printk(KERN_INFO DRV_NAME": qd%d at 0x%lx.\n", type, io);
ret = ata_device_add(&ae);
if (ret == 0) {
platform_device_unregister(pdev);
return -ENODEV;
}
qdi_host[nr_qdi_host++] = dev_get_drvdata(&pdev->dev);
return 0;
}
/**
* qdi_init - attach qdi interfaces
*
* Attach qdi IDE interfaces by scanning the ports it may occupy.
*/
static __init int qdi_init(void)
{
unsigned long flags;
static const unsigned long qd_port[2] = { 0x30, 0xB0 };
static const unsigned long ide_port[2] = { 0x170, 0x1F0 };
static const int ide_irq[2] = { 14, 15 };
int ct = 0;
int i;
if (probe_qdi == 0)
return -ENODEV;
/*
* Check each possible QD65xx base address
*/
for (i = 0; i < 2; i++) {
unsigned long port = qd_port[i];
u8 r, res;
if (request_region(port, 2, "pata_qdi")) {
/* Check for a card */
local_irq_save(flags);
r = inb_p(port);
outb_p(0x19, port);
res = inb_p(port);
outb_p(r, port);
local_irq_restore(flags);
/* Fail */
if (res == 0x19)
{
release_region(port, 2);
continue;
}
/* Passes the presence test */
r = inb_p(port + 1); /* Check port agrees with port set */
if ((r & 2) >> 1 != i) {
release_region(port, 2);
continue;
}
/* Check card type */
if ((r & 0xF0) == 0xC0) {
/* QD6500: single channel */
if (r & 8) {
/* Disabled ? */
release_region(port, 2);
continue;
}
ct += qdi_init_one(port, 6500, ide_port[r & 0x01], ide_irq[r & 0x01], r & 0x04);
}
if (((r & 0xF0) == 0xA0) || (r & 0xF0) == 0x50) {
/* QD6580: dual channel */
if (!request_region(port + 2 , 2, "pata_qdi"))
{
release_region(port, 2);
continue;
}
res = inb(port + 3);
if (res & 1) {
/* Single channel mode */
ct += qdi_init_one(port, 6580, ide_port[r & 0x01], ide_irq[r & 0x01], r & 0x04);
} else {
/* Dual channel mode */
ct += qdi_init_one(port, 6580, 0x1F0, 14, r & 0x04);
ct += qdi_init_one(port + 2, 6580, 0x170, 15, r & 0x04);
}
}
}
}
if (ct != 0)
return 0;
return -ENODEV;
}
static __exit void qdi_exit(void)
{
int i;
for (i = 0; i < nr_qdi_host; i++) {
ata_host_remove(qdi_host[i]);
/* Free the control resource. The 6580 dual channel has the resources
* claimed as a pair of 2 byte resources so we need no special cases...
*/
release_region(qdi_data[i].timing, 2);
platform_device_unregister(qdi_data[i].platform_dev);
}
}
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for qdi ATA");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
module_init(qdi_init);
module_exit(qdi_exit);
module_param(probe_qdi, int, 0);

335
drivers/ata/pata_radisys.c Normal file
View File

@ -0,0 +1,335 @@
/*
* pata_radisys.c - Intel PATA/SATA controllers
*
* (C) 2006 Red Hat <alan@redhat.com>
*
* Some parts based on ata_piix.c by Jeff Garzik and others.
*
* A PIIX relative, this device has a single ATA channel and no
* slave timings, SITRE or PPE. In that sense it is a close relative
* of the original PIIX. It does however support UDMA 33/66 per channel
* although no other modes/timings. Also lacking is 32bit I/O on the ATA
* port.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#include <linux/ata.h>
#define DRV_NAME "pata_radisys"
#define DRV_VERSION "0.4.1"
/**
* radisys_probe_init - probe begin
* @ap: ATA port
*
* Set up cable type and use generic probe init
*/
static int radisys_pre_reset(struct ata_port *ap)
{
ap->cbl = ATA_CBL_PATA80;
return ata_std_prereset(ap);
}
/**
* radisys_pata_error_handler - Probe specified port on PATA host controller
* @ap: Port to probe
* @classes:
*
* LOCKING:
* None (inherited from caller).
*/
static void radisys_pata_error_handler(struct ata_port *ap)
{
ata_bmdma_drive_eh(ap, radisys_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
}
/**
* radisys_set_piomode - Initialize host controller PATA PIO timings
* @ap: Port whose timings we are configuring
* @adev: um
*
* Set PIO mode for device, in host controller PCI config space.
*
* LOCKING:
* None (inherited from caller).
*/
static void radisys_set_piomode (struct ata_port *ap, struct ata_device *adev)
{
unsigned int pio = adev->pio_mode - XFER_PIO_0;
struct pci_dev *dev = to_pci_dev(ap->host->dev);
u16 idetm_data;
int control = 0;
/*
* See Intel Document 298600-004 for the timing programing rules
* for PIIX/ICH. Note that the early PIIX does not have the slave
* timing port at 0x44. The Radisys is a relative of the PIIX
* but not the same so be careful.
*/
static const /* ISP RTC */
u8 timings[][2] = { { 0, 0 }, /* Check me */
{ 0, 0 },
{ 1, 1 },
{ 2, 2 },
{ 3, 3 }, };
if (pio > 0)
control |= 1; /* TIME1 enable */
if (ata_pio_need_iordy(adev))
control |= 2; /* IE IORDY */
pci_read_config_word(dev, 0x40, &idetm_data);
/* Enable IE and TIME as appropriate. Clear the other
drive timing bits */
idetm_data &= 0xCCCC;
idetm_data |= (control << (4 * adev->devno));
idetm_data |= (timings[pio][0] << 12) |
(timings[pio][1] << 8);
pci_write_config_word(dev, 0x40, idetm_data);
/* Track which port is configured */
ap->private_data = adev;
}
/**
* radisys_set_dmamode - Initialize host controller PATA DMA timings
* @ap: Port whose timings we are configuring
* @adev: Device to program
* @isich: True if the device is an ICH and has IOCFG registers
*
* Set MWDMA mode for device, in host controller PCI config space.
*
* LOCKING:
* None (inherited from caller).
*/
static void radisys_set_dmamode (struct ata_port *ap, struct ata_device *adev)
{
struct pci_dev *dev = to_pci_dev(ap->host->dev);
u16 idetm_data;
u8 udma_enable;
static const /* ISP RTC */
u8 timings[][2] = { { 0, 0 },
{ 0, 0 },
{ 1, 1 },
{ 2, 2 },
{ 3, 3 }, };
/*
* MWDMA is driven by the PIO timings. We must also enable
* IORDY unconditionally.
*/
pci_read_config_word(dev, 0x40, &idetm_data);
pci_read_config_byte(dev, 0x48, &udma_enable);
if (adev->dma_mode < XFER_UDMA_0) {
unsigned int mwdma = adev->dma_mode - XFER_MW_DMA_0;
const unsigned int needed_pio[3] = {
XFER_PIO_0, XFER_PIO_3, XFER_PIO_4
};
int pio = needed_pio[mwdma] - XFER_PIO_0;
int control = 3; /* IORDY|TIME0 */
/* If the drive MWDMA is faster than it can do PIO then
we must force PIO0 for PIO cycles. */
if (adev->pio_mode < needed_pio[mwdma])
control = 1;
/* Mask out the relevant control and timing bits we will load. Also
clear the other drive TIME register as a precaution */
idetm_data &= 0xCCCC;
idetm_data |= control << (4 * adev->devno);
idetm_data |= (timings[pio][0] << 12) | (timings[pio][1] << 8);
udma_enable &= ~(1 << adev->devno);
} else {
u8 udma_mode;
/* UDMA66 on: UDMA 33 and 66 are switchable via register 0x4A */
pci_read_config_byte(dev, 0x4A, &udma_mode);
if (adev->xfer_mode == XFER_UDMA_2)
udma_mode &= ~ (1 << adev->devno);
else /* UDMA 4 */
udma_mode |= (1 << adev->devno);
pci_write_config_byte(dev, 0x4A, udma_mode);
udma_enable |= (1 << adev->devno);
}
pci_write_config_word(dev, 0x40, idetm_data);
pci_write_config_byte(dev, 0x48, udma_enable);
/* Track which port is configured */
ap->private_data = adev;
}
/**
* radisys_qc_issue_prot - command issue
* @qc: command pending
*
* Called when the libata layer is about to issue a command. We wrap
* this interface so that we can load the correct ATA timings if
* neccessary. Our logic also clears TIME0/TIME1 for the other device so
* that, even if we get this wrong, cycles to the other device will
* be made PIO0.
*/
static unsigned int radisys_qc_issue_prot(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct ata_device *adev = qc->dev;
if (adev != ap->private_data) {
/* UDMA timing is not shared */
if (adev->dma_mode < XFER_UDMA_0) {
if (adev->dma_mode)
radisys_set_dmamode(ap, adev);
else if (adev->pio_mode)
radisys_set_piomode(ap, adev);
}
}
return ata_qc_issue_prot(qc);
}
static struct scsi_host_template radisys_sht = {
.module = THIS_MODULE,
.name = DRV_NAME,
.ioctl = ata_scsi_ioctl,
.queuecommand = ata_scsi_queuecmd,
.can_queue = ATA_DEF_QUEUE,
.this_id = ATA_SHT_THIS_ID,
.sg_tablesize = LIBATA_MAX_PRD,
.max_sectors = ATA_MAX_SECTORS,
.cmd_per_lun = ATA_SHT_CMD_PER_LUN,
.emulated = ATA_SHT_EMULATED,
.use_clustering = ATA_SHT_USE_CLUSTERING,
.proc_name = DRV_NAME,
.dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config,
.bios_param = ata_std_bios_param,
};
static const struct ata_port_operations radisys_pata_ops = {
.port_disable = ata_port_disable,
.set_piomode = radisys_set_piomode,
.set_dmamode = radisys_set_dmamode,
.mode_filter = ata_pci_default_filter,
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
.check_status = ata_check_status,
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
.freeze = ata_bmdma_freeze,
.thaw = ata_bmdma_thaw,
.error_handler = radisys_pata_error_handler,
.post_internal_cmd = ata_bmdma_post_internal_cmd,
.bmdma_setup = ata_bmdma_setup,
.bmdma_start = ata_bmdma_start,
.bmdma_stop = ata_bmdma_stop,
.bmdma_status = ata_bmdma_status,
.qc_prep = ata_qc_prep,
.qc_issue = radisys_qc_issue_prot,
.data_xfer = ata_pio_data_xfer,
.eng_timeout = ata_eng_timeout,
.irq_handler = ata_interrupt,
.irq_clear = ata_bmdma_irq_clear,
.port_start = ata_port_start,
.port_stop = ata_port_stop,
.host_stop = ata_host_stop,
};
/**
* radisys_init_one - Register PIIX ATA PCI device with kernel services
* @pdev: PCI device to register
* @ent: Entry in radisys_pci_tbl matching with @pdev
*
* Called from kernel PCI layer. We probe for combined mode (sigh),
* and then hand over control to libata, for it to do the rest.
*
* LOCKING:
* Inherited from PCI layer (may sleep).
*
* RETURNS:
* Zero on success, or -ERRNO value.
*/
static int radisys_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
{
static int printed_version;
static struct ata_port_info info = {
.sht = &radisys_sht,
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
.pio_mask = 0x1f, /* pio0-4 */
.mwdma_mask = 0x07, /* mwdma1-2 */
.udma_mask = 0x14, /* UDMA33/66 only */
.port_ops = &radisys_pata_ops,
};
static struct ata_port_info *port_info[2] = { &info, &info };
if (!printed_version++)
dev_printk(KERN_DEBUG, &pdev->dev,
"version " DRV_VERSION "\n");
return ata_pci_init_one(pdev, port_info, 2);
}
static const struct pci_device_id radisys_pci_tbl[] = {
{ 0x1331, 0x8201, PCI_ANY_ID, PCI_ANY_ID, },
{ } /* terminate list */
};
static struct pci_driver radisys_pci_driver = {
.name = DRV_NAME,
.id_table = radisys_pci_tbl,
.probe = radisys_init_one,
.remove = ata_pci_remove_one,
};
static int __init radisys_init(void)
{
return pci_register_driver(&radisys_pci_driver);
}
static void __exit radisys_exit(void)
{
pci_unregister_driver(&radisys_pci_driver);
}
module_init(radisys_init);
module_exit(radisys_exit);
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("SCSI low-level driver for Radisys R82600 controllers");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, radisys_pci_tbl);
MODULE_VERSION(DRV_VERSION);

205
drivers/ata/pata_rz1000.c Normal file
View File

@ -0,0 +1,205 @@
/*
* RZ1000/1001 driver based upon
*
* linux/drivers/ide/pci/rz1000.c Version 0.06 January 12, 2003
* Copyright (C) 1995-1998 Linus Torvalds & author (see below)
* Principal Author: mlord@pobox.com (Mark Lord)
*
* See linux/MAINTAINERS for address of current maintainer.
*
* This file provides support for disabling the buggy read-ahead
* mode of the RZ1000 IDE chipset, commonly used on Intel motherboards.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#define DRV_NAME "pata_rz1000"
#define DRV_VERSION "0.2.2"
/**
* rz1000_prereset - probe begin
* @ap: ATA port
*
* Set up cable type and use generics
*/
static int rz1000_prereset(struct ata_port *ap)
{
ap->cbl = ATA_CBL_PATA40;
return ata_std_prereset(ap);
}
/**
* rz1000_error_handler - probe reset
* @ap: ATA port
*
* Perform the ATA standard reset sequence
*/
static void rz1000_error_handler(struct ata_port *ap)
{
ata_bmdma_drive_eh(ap, rz1000_prereset, ata_std_softreset, NULL, ata_std_postreset);
}
/**
* rz1000_set_mode - mode setting function
* @ap: ATA interface
*
* Use a non standard set_mode function. We don't want to be tuned. We
* would prefer to be BIOS generic but for the fact our hardware is
* whacked out.
*/
static void rz1000_set_mode(struct ata_port *ap)
{
int i;
for (i = 0; i < ATA_MAX_DEVICES; i++) {
struct ata_device *dev = &ap->device[i];
if (ata_dev_enabled(dev)) {
/* We don't really care */
dev->pio_mode = XFER_PIO_0;
dev->xfer_mode = XFER_PIO_0;
dev->xfer_shift = ATA_SHIFT_PIO;
dev->flags |= ATA_DFLAG_PIO;
}
}
}
static struct scsi_host_template rz1000_sht = {
.module = THIS_MODULE,
.name = DRV_NAME,
.ioctl = ata_scsi_ioctl,
.queuecommand = ata_scsi_queuecmd,
.can_queue = ATA_DEF_QUEUE,
.this_id = ATA_SHT_THIS_ID,
.sg_tablesize = LIBATA_MAX_PRD,
.max_sectors = ATA_MAX_SECTORS,
.cmd_per_lun = ATA_SHT_CMD_PER_LUN,
.emulated = ATA_SHT_EMULATED,
.use_clustering = ATA_SHT_USE_CLUSTERING,
.proc_name = DRV_NAME,
.dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config,
.bios_param = ata_std_bios_param,
};
static struct ata_port_operations rz1000_port_ops = {
.set_mode = rz1000_set_mode,
.port_disable = ata_port_disable,
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
.check_status = ata_check_status,
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
.error_handler = rz1000_error_handler,
.bmdma_setup = ata_bmdma_setup,
.bmdma_start = ata_bmdma_start,
.bmdma_stop = ata_bmdma_stop,
.bmdma_status = ata_bmdma_status,
.qc_prep = ata_qc_prep,
.qc_issue = ata_qc_issue_prot,
.eng_timeout = ata_eng_timeout,
.data_xfer = ata_pio_data_xfer,
.freeze = ata_bmdma_freeze,
.thaw = ata_bmdma_thaw,
.error_handler = rz1000_error_handler,
.post_internal_cmd = ata_bmdma_post_internal_cmd,
.irq_handler = ata_interrupt,
.irq_clear = ata_bmdma_irq_clear,
.port_start = ata_port_start,
.port_stop = ata_port_stop,
.host_stop = ata_host_stop
};
/**
* rz1000_init_one - Register RZ1000 ATA PCI device with kernel services
* @pdev: PCI device to register
* @ent: Entry in rz1000_pci_tbl matching with @pdev
*
* Configure an RZ1000 interface. This doesn't require much special
* handling except that we *MUST* kill the chipset readahead or the
* user may experience data corruption.
*/
static int rz1000_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
{
static int printed_version;
struct ata_port_info *port_info[2];
u16 reg;
static struct ata_port_info info = {
.sht = &rz1000_sht,
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
.pio_mask = 0x1f,
.port_ops = &rz1000_port_ops
};
if (!printed_version++)
printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n");
/* Be exceptionally paranoid as we must be sure to apply the fix */
if (pci_read_config_word(pdev, 0x40, &reg) != 0)
goto fail;
reg &= 0xDFFF;
if (pci_write_config_word(pdev, 0x40, reg) != 0)
goto fail;
printk(KERN_INFO DRV_NAME ": disabled chipset readahead.\n");
port_info[0] = &info;
port_info[1] = &info;
return ata_pci_init_one(pdev, port_info, 2);
fail:
printk(KERN_ERR DRV_NAME ": failed to disable read-ahead on chipset..\n");
/* Not safe to use so skip */
return -ENODEV;
}
static struct pci_device_id pata_rz1000[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_PCTECH, PCI_DEVICE_ID_PCTECH_RZ1000), },
{ PCI_DEVICE(PCI_VENDOR_ID_PCTECH, PCI_DEVICE_ID_PCTECH_RZ1001), },
{ 0, },
};
static struct pci_driver rz1000_pci_driver = {
.name = DRV_NAME,
.id_table = pata_rz1000,
.probe = rz1000_init_one,
.remove = ata_pci_remove_one
};
static int __init rz1000_init(void)
{
return pci_register_driver(&rz1000_pci_driver);
}
static void __exit rz1000_exit(void)
{
pci_unregister_driver(&rz1000_pci_driver);
}
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for RZ1000 PCI ATA");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, pata_rz1000);
MODULE_VERSION(DRV_VERSION);
module_init(rz1000_init);
module_exit(rz1000_exit);

287
drivers/ata/pata_sc1200.c Normal file
View File

@ -0,0 +1,287 @@
/*
* New ATA layer SC1200 driver Alan Cox <alan@redhat.com>
*
* TODO: Mode selection filtering
* TODO: Can't enable second channel until ATA core has serialize
* TODO: Needs custom DMA cleanup code
*
* Based very heavily on
*
* linux/drivers/ide/pci/sc1200.c Version 0.91 28-Jan-2003
*
* Copyright (C) 2000-2002 Mark Lord <mlord@pobox.com>
* May be copied or modified under the terms of the GNU General Public License
*
* Development of this chipset driver was funded
* by the nice folks at National Semiconductor.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#define DRV_NAME "sc1200"
#define DRV_VERSION "0.2.3"
#define SC1200_REV_A 0x00
#define SC1200_REV_B1 0x01
#define SC1200_REV_B3 0x02
#define SC1200_REV_C1 0x03
#define SC1200_REV_D1 0x04
/**
* sc1200_clock - PCI clock
*
* Return the PCI bus clocking for the SC1200 chipset configuration
* in use. We return 0 for 33MHz 1 for 48MHz and 2 for 66Mhz
*/
static int sc1200_clock(void)
{
/* Magic registers that give us the chipset data */
u8 chip_id = inb(0x903C);
u8 silicon_rev = inb(0x903D);
u16 pci_clock;
if (chip_id == 0x04 && silicon_rev < SC1200_REV_B1)
return 0; /* 33 MHz mode */
/* Clock generator configuration 0x901E its 8/9 are the PCI clocking
0/3 is 33Mhz 1 is 48 2 is 66 */
pci_clock = inw(0x901E);
pci_clock >>= 8;
pci_clock &= 0x03;
if (pci_clock == 3)
pci_clock = 0;
return pci_clock;
}
/**
* sc1200_set_piomode - PIO setup
* @ap: ATA interface
* @adev: device on the interface
*
* Set our PIO requirements. This is fairly simple on the SC1200
*/
static void sc1200_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
static const u32 pio_timings[4][5] = {
{0x00009172, 0x00012171, 0x00020080, 0x00032010, 0x00040010}, // format0 33Mhz
{0xd1329172, 0x71212171, 0x30200080, 0x20102010, 0x00100010}, // format1, 33Mhz
{0xfaa3f4f3, 0xc23232b2, 0x513101c1, 0x31213121, 0x10211021}, // format1, 48Mhz
{0xfff4fff4, 0xf35353d3, 0x814102f1, 0x42314231, 0x11311131} // format1, 66Mhz
};
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u32 format;
unsigned int reg = 0x40 + 0x10 * ap->port_no;
int mode = adev->pio_mode - XFER_PIO_0;
pci_read_config_dword(pdev, reg + 4, &format);
format >>= 31;
format += sc1200_clock();
pci_write_config_dword(pdev, reg + 8 * adev->devno,
pio_timings[format][mode]);
}
/**
* sc1200_set_dmamode - DMA timing setup
* @ap: ATA interface
* @adev: Device being configured
*
* We cannot mix MWDMA and UDMA without reloading timings each switch
* master to slave.
*/
static void sc1200_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
static const u32 udma_timing[3][3] = {
{ 0x00921250, 0x00911140, 0x00911030 },
{ 0x00932470, 0x00922260, 0x00922140 },
{ 0x009436A1, 0x00933481, 0x00923261 }
};
static const u32 mwdma_timing[3][3] = {
{ 0x00077771, 0x00012121, 0x00002020 },
{ 0x000BBBB2, 0x00024241, 0x00013131 },
{ 0x000FFFF3, 0x00035352, 0x00015151 }
};
int clock = sc1200_clock();
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
unsigned int reg = 0x40 + 0x10 * ap->port_no;
int mode = adev->dma_mode;
u32 format;
if (mode >= XFER_UDMA_0)
format = udma_timing[clock][mode - XFER_UDMA_0];
else
format = mwdma_timing[clock][mode - XFER_MW_DMA_0];
if (adev->devno == 0) {
u32 timings;
pci_read_config_dword(pdev, reg + 4, &timings);
timings &= 0x80000000UL;
timings |= format;
pci_write_config_dword(pdev, reg + 4, timings);
} else
pci_write_config_dword(pdev, reg + 12, format);
}
/**
* sc1200_qc_issue_prot - command issue
* @qc: command pending
*
* Called when the libata layer is about to issue a command. We wrap
* this interface so that we can load the correct ATA timings if
* neccessary. Specifically we have a problem that there is only
* one MWDMA/UDMA bit.
*/
static unsigned int sc1200_qc_issue_prot(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct ata_device *adev = qc->dev;
struct ata_device *prev = ap->private_data;
/* See if the DMA settings could be wrong */
if (adev->dma_mode != 0 && adev != prev && prev != NULL) {
/* Maybe, but do the channels match MWDMA/UDMA ? */
if ((adev->dma_mode >= XFER_UDMA_0 && prev->dma_mode < XFER_UDMA_0) ||
(adev->dma_mode < XFER_UDMA_0 && prev->dma_mode >= XFER_UDMA_0))
/* Switch the mode bits */
sc1200_set_dmamode(ap, adev);
}
return ata_qc_issue_prot(qc);
}
static struct scsi_host_template sc1200_sht = {
.module = THIS_MODULE,
.name = DRV_NAME,
.ioctl = ata_scsi_ioctl,
.queuecommand = ata_scsi_queuecmd,
.can_queue = ATA_DEF_QUEUE,
.this_id = ATA_SHT_THIS_ID,
.sg_tablesize = LIBATA_MAX_PRD,
.max_sectors = ATA_MAX_SECTORS,
.cmd_per_lun = ATA_SHT_CMD_PER_LUN,
.emulated = ATA_SHT_EMULATED,
.use_clustering = ATA_SHT_USE_CLUSTERING,
.proc_name = DRV_NAME,
.dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config,
.bios_param = ata_std_bios_param,
};
static struct ata_port_operations sc1200_port_ops = {
.port_disable = ata_port_disable,
.set_piomode = sc1200_set_piomode,
.set_dmamode = sc1200_set_dmamode,
.mode_filter = ata_pci_default_filter,
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
.check_status = ata_check_status,
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
.error_handler = ata_bmdma_error_handler,
.bmdma_setup = ata_bmdma_setup,
.bmdma_start = ata_bmdma_start,
.bmdma_stop = ata_bmdma_stop,
.bmdma_status = ata_bmdma_status,
.qc_prep = ata_qc_prep,
.qc_issue = sc1200_qc_issue_prot,
.eng_timeout = ata_eng_timeout,
.data_xfer = ata_pio_data_xfer,
.irq_handler = ata_interrupt,
.irq_clear = ata_bmdma_irq_clear,
.port_start = ata_port_start,
.port_stop = ata_port_stop,
.host_stop = ata_host_stop
};
/**
* sc1200_init_one - Initialise an SC1200
* @dev: PCI device
* @id: Entry in match table
*
* Just throw the needed data at the libata helper and it does all
* our work.
*/
static int sc1200_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
static struct ata_port_info info = {
.sht = &sc1200_sht,
.flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST,
.pio_mask = 0x1f,
.mwdma_mask = 0x07,
.udma_mask = 0x07,
.port_ops = &sc1200_port_ops
};
static struct ata_port_info *port_info[2] = { &info, &info };
/* Can't enable port 2 yet, see top comments */
return ata_pci_init_one(dev, port_info, 1);
}
static struct pci_device_id sc1200[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SCx200_IDE), },
{ 0, },
};
static struct pci_driver sc1200_pci_driver = {
.name = DRV_NAME,
.id_table = sc1200,
.probe = sc1200_init_one,
.remove = ata_pci_remove_one
};
static int __init sc1200_init(void)
{
return pci_register_driver(&sc1200_pci_driver);
}
static void __exit sc1200_exit(void)
{
pci_unregister_driver(&sc1200_pci_driver);
}
MODULE_AUTHOR("Alan Cox, Mark Lord");
MODULE_DESCRIPTION("low-level driver for the NS/AMD SC1200");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, sc1200);
MODULE_VERSION(DRV_VERSION);
module_init(sc1200_init);
module_exit(sc1200_exit);

View File

@ -0,0 +1,587 @@
/*
* ata-serverworks.c - Serverworks PATA for new ATA layer
* (C) 2005 Red Hat Inc
* Alan Cox <alan@redhat.com>
*
* based upon
*
* serverworks.c
*
* Copyright (C) 1998-2000 Michel Aubry
* Copyright (C) 1998-2000 Andrzej Krzysztofowicz
* Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org>
* Portions copyright (c) 2001 Sun Microsystems
*
*
* RCC/ServerWorks IDE driver for Linux
*
* OSB4: `Open South Bridge' IDE Interface (fn 1)
* supports UDMA mode 2 (33 MB/s)
*
* CSB5: `Champion South Bridge' IDE Interface (fn 1)
* all revisions support UDMA mode 4 (66 MB/s)
* revision A2.0 and up support UDMA mode 5 (100 MB/s)
*
* *** The CSB5 does not provide ANY register ***
* *** to detect 80-conductor cable presence. ***
*
* CSB6: `Champion South Bridge' IDE Interface (optional: third channel)
*
* Documentation:
* Available under NDA only. Errata info very hard to get.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#define DRV_NAME "pata_serverworks"
#define DRV_VERSION "0.3.6"
#define SVWKS_CSB5_REVISION_NEW 0x92 /* min PCI_REVISION_ID for UDMA5 (A2.0) */
#define SVWKS_CSB6_REVISION 0xa0 /* min PCI_REVISION_ID for UDMA4 (A1.0) */
/* Seagate Barracuda ATA IV Family drives in UDMA mode 5
* can overrun their FIFOs when used with the CSB5 */
static const char *csb_bad_ata100[] = {
"ST320011A",
"ST340016A",
"ST360021A",
"ST380021A",
NULL
};
/**
* dell_cable - Dell serverworks cable detection
* @ap: ATA port to do cable detect
*
* Dell hide the 40/80 pin select for their interfaces in the top two
* bits of the subsystem ID.
*/
static int dell_cable(struct ata_port *ap) {
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
if (pdev->subsystem_device & (1 << (ap->port_no + 14)))
return ATA_CBL_PATA80;
return ATA_CBL_PATA40;
}
/**
* sun_cable - Sun Cobalt 'Alpine' cable detection
* @ap: ATA port to do cable select
*
* Cobalt CSB5 IDE hides the 40/80pin in the top two bits of the
* subsystem ID the same as dell. We could use one function but we may
* need to extend the Dell one in future
*/
static int sun_cable(struct ata_port *ap) {
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
if (pdev->subsystem_device & (1 << (ap->port_no + 14)))
return ATA_CBL_PATA80;
return ATA_CBL_PATA40;
}
/**
* osb4_cable - OSB4 cable detect
* @ap: ATA port to check
*
* The OSB4 isn't UDMA66 capable so this is easy
*/
static int osb4_cable(struct ata_port *ap) {
return ATA_CBL_PATA40;
}
/**
* csb4_cable - CSB5/6 cable detect
* @ap: ATA port to check
*
* Serverworks default arrangement is to use the drive side detection
* only.
*/
static int csb_cable(struct ata_port *ap) {
return ATA_CBL_PATA80;
}
struct sv_cable_table {
int device;
int subvendor;
int (*cable_detect)(struct ata_port *ap);
};
/*
* Note that we don't copy the old serverworks code because the old
* code contains obvious mistakes
*/
static struct sv_cable_table cable_detect[] = {
{ PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, PCI_VENDOR_ID_DELL, dell_cable },
{ PCI_DEVICE_ID_SERVERWORKS_CSB6IDE, PCI_VENDOR_ID_DELL, dell_cable },
{ PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, PCI_VENDOR_ID_SUN, sun_cable },
{ PCI_DEVICE_ID_SERVERWORKS_OSB4, PCI_ANY_ID, osb4_cable },
{ PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, PCI_ANY_ID, csb_cable },
{ PCI_DEVICE_ID_SERVERWORKS_CSB6IDE, PCI_ANY_ID, csb_cable },
{ PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2, PCI_ANY_ID, csb_cable },
{ PCI_DEVICE_ID_SERVERWORKS_HT1000IDE, PCI_ANY_ID, csb_cable },
{ }
};
/**
* serverworks_pre_reset - cable detection
* @ap: ATA port
*
* Perform cable detection according to the device and subvendor
* identifications
*/
static int serverworks_pre_reset(struct ata_port *ap) {
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
struct sv_cable_table *cb = cable_detect;
while(cb->device) {
if (cb->device == pdev->device &&
(cb->subvendor == pdev->subsystem_vendor ||
cb->subvendor == PCI_ANY_ID)) {
ap->cbl = cb->cable_detect(ap);
return ata_std_prereset(ap);
}
cb++;
}
BUG();
return -1; /* kill compiler warning */
}
static void serverworks_error_handler(struct ata_port *ap)
{
return ata_bmdma_drive_eh(ap, serverworks_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
}
/**
* serverworks_is_csb - Check for CSB or OSB
* @pdev: PCI device to check
*
* Returns true if the device being checked is known to be a CSB
* series device.
*/
static u8 serverworks_is_csb(struct pci_dev *pdev)
{
switch (pdev->device) {
case PCI_DEVICE_ID_SERVERWORKS_CSB5IDE:
case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE:
case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2:
case PCI_DEVICE_ID_SERVERWORKS_HT1000IDE:
return 1;
default:
break;
}
return 0;
}
/**
* serverworks_osb4_filter - mode selection filter
* @ap: ATA interface
* @adev: ATA device
*
* Filter the offered modes for the device to apply controller
* specific rules. OSB4 requires no UDMA for disks due to a FIFO
* bug we hit.
*/
static unsigned long serverworks_osb4_filter(const struct ata_port *ap, struct ata_device *adev, unsigned long mask)
{
if (adev->class == ATA_DEV_ATA)
mask &= ~ATA_MASK_UDMA;
return ata_pci_default_filter(ap, adev, mask);
}
/**
* serverworks_csb_filter - mode selection filter
* @ap: ATA interface
* @adev: ATA device
*
* Check the blacklist and disable UDMA5 if matched
*/
static unsigned long serverworks_csb_filter(const struct ata_port *ap, struct ata_device *adev, unsigned long mask)
{
const char *p;
char model_num[40];
int len, i;
/* Disk, UDMA */
if (adev->class != ATA_DEV_ATA)
return ata_pci_default_filter(ap, adev, mask);
/* Actually do need to check */
ata_id_string(adev->id, model_num, ATA_ID_PROD_OFS, sizeof(model_num));
/* Precuationary - why not do this in the libata core ?? */
len = strlen(model_num);
while ((len > 0) && (model_num[len - 1] == ' ')) {
len--;
model_num[len] = 0;
}
for(i = 0; (p = csb_bad_ata100[i]) != NULL; i++) {
if (!strncmp(p, model_num, len))
mask &= ~(0x1F << ATA_SHIFT_UDMA);
}
return ata_pci_default_filter(ap, adev, mask);
}
/**
* serverworks_set_piomode - set initial PIO mode data
* @ap: ATA interface
* @adev: ATA device
*
* Program the OSB4/CSB5 timing registers for PIO. The PIO register
* load is done as a simple lookup.
*/
static void serverworks_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
static const u8 pio_mode[] = { 0x5d, 0x47, 0x34, 0x22, 0x20 };
int offset = 1 + (2 * ap->port_no) - adev->devno;
int devbits = (2 * ap->port_no + adev->devno) * 4;
u16 csb5_pio;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int pio = adev->pio_mode - XFER_PIO_0;
pci_write_config_byte(pdev, 0x40 + offset, pio_mode[pio]);
/* The OSB4 just requires the timing but the CSB series want the
mode number as well */
if (serverworks_is_csb(pdev)) {
pci_read_config_word(pdev, 0x4A, &csb5_pio);
csb5_pio &= ~(0x0F << devbits);
pci_write_config_byte(pdev, 0x4A, csb5_pio | (pio << devbits));
}
}
/**
* serverworks_set_dmamode - set initial DMA mode data
* @ap: ATA interface
* @adev: ATA device
*
* Program the MWDMA/UDMA modes for the serverworks OSB4/CSB5
* chipset. The MWDMA mode values are pulled from a lookup table
* while the chipset uses mode number for UDMA.
*/
static void serverworks_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
static const u8 dma_mode[] = { 0x77, 0x21, 0x20 };
int offset = 1 + 2 * ap->port_no - adev->devno;
int devbits = (2 * ap->port_no + adev->devno);
u8 ultra;
u8 ultra_cfg;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
pci_read_config_byte(pdev, 0x54, &ultra_cfg);
if (adev->dma_mode >= XFER_UDMA_0) {
pci_write_config_byte(pdev, 0x44 + offset, 0x20);
pci_read_config_byte(pdev, 0x56 + ap->port_no, &ultra);
ultra &= ~(0x0F << (ap->port_no * 4));
ultra |= (adev->dma_mode - XFER_UDMA_0)
<< (ap->port_no * 4);
pci_write_config_byte(pdev, 0x56 + ap->port_no, ultra);
ultra_cfg |= (1 << devbits);
} else {
pci_write_config_byte(pdev, 0x44 + offset,
dma_mode[adev->dma_mode - XFER_MW_DMA_0]);
ultra_cfg &= ~(1 << devbits);
}
pci_write_config_byte(pdev, 0x54, ultra_cfg);
}
static struct scsi_host_template serverworks_sht = {
.module = THIS_MODULE,
.name = DRV_NAME,
.ioctl = ata_scsi_ioctl,
.queuecommand = ata_scsi_queuecmd,
.can_queue = ATA_DEF_QUEUE,
.this_id = ATA_SHT_THIS_ID,
.sg_tablesize = LIBATA_MAX_PRD,
.max_sectors = ATA_MAX_SECTORS,
.cmd_per_lun = ATA_SHT_CMD_PER_LUN,
.emulated = ATA_SHT_EMULATED,
.use_clustering = ATA_SHT_USE_CLUSTERING,
.proc_name = DRV_NAME,
.dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config,
.bios_param = ata_std_bios_param,
};
static struct ata_port_operations serverworks_osb4_port_ops = {
.port_disable = ata_port_disable,
.set_piomode = serverworks_set_piomode,
.set_dmamode = serverworks_set_dmamode,
.mode_filter = serverworks_osb4_filter,
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
.check_status = ata_check_status,
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
.freeze = ata_bmdma_freeze,
.thaw = ata_bmdma_thaw,
.error_handler = serverworks_error_handler,
.post_internal_cmd = ata_bmdma_post_internal_cmd,
.bmdma_setup = ata_bmdma_setup,
.bmdma_start = ata_bmdma_start,
.bmdma_stop = ata_bmdma_stop,
.bmdma_status = ata_bmdma_status,
.qc_prep = ata_qc_prep,
.qc_issue = ata_qc_issue_prot,
.eng_timeout = ata_eng_timeout,
.data_xfer = ata_pio_data_xfer,
.irq_handler = ata_interrupt,
.port_start = ata_port_start,
.port_stop = ata_port_stop,
.host_stop = ata_host_stop
};
static struct ata_port_operations serverworks_csb_port_ops = {
.port_disable = ata_port_disable,
.set_piomode = serverworks_set_piomode,
.set_dmamode = serverworks_set_dmamode,
.mode_filter = serverworks_csb_filter,
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
.check_status = ata_check_status,
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
.freeze = ata_bmdma_freeze,
.thaw = ata_bmdma_thaw,
.error_handler = serverworks_error_handler,
.post_internal_cmd = ata_bmdma_post_internal_cmd,
.bmdma_setup = ata_bmdma_setup,
.bmdma_start = ata_bmdma_start,
.bmdma_stop = ata_bmdma_stop,
.bmdma_status = ata_bmdma_status,
.qc_prep = ata_qc_prep,
.qc_issue = ata_qc_issue_prot,
.eng_timeout = ata_eng_timeout,
.data_xfer = ata_pio_data_xfer,
.irq_handler = ata_interrupt,
.port_start = ata_port_start,
.port_stop = ata_port_stop,
.host_stop = ata_host_stop
};
static int serverworks_fixup_osb4(struct pci_dev *pdev)
{
u32 reg;
struct pci_dev *isa_dev = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
PCI_DEVICE_ID_SERVERWORKS_OSB4, NULL);
if (isa_dev) {
pci_read_config_dword(isa_dev, 0x64, &reg);
reg &= ~0x00002000; /* disable 600ns interrupt mask */
if (!(reg & 0x00004000))
printk(KERN_DEBUG DRV_NAME ": UDMA not BIOS enabled.\n");
reg |= 0x00004000; /* enable UDMA/33 support */
pci_write_config_dword(isa_dev, 0x64, reg);
pci_dev_put(isa_dev);
return 0;
}
printk(KERN_WARNING "ata_serverworks: Unable to find bridge.\n");
return -ENODEV;
}
static int serverworks_fixup_csb(struct pci_dev *pdev)
{
u8 rev;
u8 btr;
pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
/* Third Channel Test */
if (!(PCI_FUNC(pdev->devfn) & 1)) {
struct pci_dev * findev = NULL;
u32 reg4c = 0;
findev = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
PCI_DEVICE_ID_SERVERWORKS_CSB5, NULL);
if (findev) {
pci_read_config_dword(findev, 0x4C, &reg4c);
reg4c &= ~0x000007FF;
reg4c |= 0x00000040;
reg4c |= 0x00000020;
pci_write_config_dword(findev, 0x4C, reg4c);
pci_dev_put(findev);
}
} else {
struct pci_dev * findev = NULL;
u8 reg41 = 0;
findev = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
PCI_DEVICE_ID_SERVERWORKS_CSB6, NULL);
if (findev) {
pci_read_config_byte(findev, 0x41, &reg41);
reg41 &= ~0x40;
pci_write_config_byte(findev, 0x41, reg41);
pci_dev_put(findev);
}
}
/* setup the UDMA Control register
*
* 1. clear bit 6 to enable DMA
* 2. enable DMA modes with bits 0-1
* 00 : legacy
* 01 : udma2
* 10 : udma2/udma4
* 11 : udma2/udma4/udma5
*/
pci_read_config_byte(pdev, 0x5A, &btr);
btr &= ~0x40;
if (!(PCI_FUNC(pdev->devfn) & 1))
btr |= 0x2;
else
btr |= (rev >= SVWKS_CSB5_REVISION_NEW) ? 0x3 : 0x2;
pci_write_config_byte(pdev, 0x5A, btr);
return btr;
}
static void serverworks_fixup_ht1000(struct pci_dev *pdev)
{
u8 btr;
/* Setup HT1000 SouthBridge Controller - Single Channel Only */
pci_read_config_byte(pdev, 0x5A, &btr);
btr &= ~0x40;
btr |= 0x3;
pci_write_config_byte(pdev, 0x5A, btr);
}
static int serverworks_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
int ports = 2;
static struct ata_port_info info[4] = {
{ /* OSB4 */
.sht = &serverworks_sht,
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
.pio_mask = 0x1f,
.mwdma_mask = 0x07,
.udma_mask = 0x07,
.port_ops = &serverworks_osb4_port_ops
}, { /* OSB4 no UDMA */
.sht = &serverworks_sht,
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
.pio_mask = 0x1f,
.mwdma_mask = 0x07,
.udma_mask = 0x00,
.port_ops = &serverworks_osb4_port_ops
}, { /* CSB5 */
.sht = &serverworks_sht,
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
.pio_mask = 0x1f,
.mwdma_mask = 0x07,
.udma_mask = 0x1f,
.port_ops = &serverworks_csb_port_ops
}, { /* CSB5 - later revisions*/
.sht = &serverworks_sht,
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
.pio_mask = 0x1f,
.mwdma_mask = 0x07,
.udma_mask = 0x3f,
.port_ops = &serverworks_csb_port_ops
}
};
static struct ata_port_info *port_info[2];
struct ata_port_info *devinfo = &info[id->driver_data];
/* Force master latency timer to 64 PCI clocks */
pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x40);
/* OSB4 : South Bridge and IDE */
if (pdev->device == PCI_DEVICE_ID_SERVERWORKS_OSB4IDE) {
/* Select non UDMA capable OSB4 if we can't do fixups */
if ( serverworks_fixup_osb4(pdev) < 0)
devinfo = &info[1];
}
/* setup CSB5/CSB6 : South Bridge and IDE option RAID */
else if ((pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5IDE) ||
(pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE) ||
(pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2)) {
/* If the returned btr is the newer revision then
select the right info block */
if (serverworks_fixup_csb(pdev) == 3)
devinfo = &info[3];
/* Is this the 3rd channel CSB6 IDE ? */
if (pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2)
ports = 1;
}
/* setup HT1000E */
else if (pdev->device == PCI_DEVICE_ID_SERVERWORKS_HT1000IDE)
serverworks_fixup_ht1000(pdev);
if (pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5IDE)
ata_pci_clear_simplex(pdev);
port_info[0] = port_info[1] = devinfo;
return ata_pci_init_one(pdev, port_info, ports);
}
static struct pci_device_id serverworks[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_OSB4IDE), 0},
{ PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB5IDE), 2},
{ PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB6IDE), 2},
{ PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2), 2},
{ PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000IDE), 2},
{ 0, },
};
static struct pci_driver serverworks_pci_driver = {
.name = DRV_NAME,
.id_table = serverworks,
.probe = serverworks_init_one,
.remove = ata_pci_remove_one
};
static int __init serverworks_init(void)
{
return pci_register_driver(&serverworks_pci_driver);
}
static void __exit serverworks_exit(void)
{
pci_unregister_driver(&serverworks_pci_driver);
}
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for Serverworks OSB4/CSB5/CSB6");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, serverworks);
MODULE_VERSION(DRV_VERSION);
module_init(serverworks_init);
module_exit(serverworks_exit);

381
drivers/ata/pata_sil680.c Normal file
View File

@ -0,0 +1,381 @@
/*
* pata_sil680.c - SIL680 PATA for new ATA layer
* (C) 2005 Red Hat Inc
* Alan Cox <alan@redhat.com>
*
* based upon
*
* linux/drivers/ide/pci/siimage.c Version 1.07 Nov 30, 2003
*
* Copyright (C) 2001-2002 Andre Hedrick <andre@linux-ide.org>
* Copyright (C) 2003 Red Hat <alan@redhat.com>
*
* May be copied or modified under the terms of the GNU General Public License
*
* Documentation publically available.
*
* If you have strange problems with nVidia chipset systems please
* see the SI support documentation and update your system BIOS
* if neccessary
*
* TODO
* If we know all our devices are LBA28 (or LBA28 sized) we could use
* the command fifo mode.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#define DRV_NAME "pata_sil680"
#define DRV_VERSION "0.3.2"
/**
* sil680_selreg - return register base
* @hwif: interface
* @r: config offset
*
* Turn a config register offset into the right address in either
* PCI space or MMIO space to access the control register in question
* Thankfully this is a configuration operation so isnt performance
* criticial.
*/
static unsigned long sil680_selreg(struct ata_port *ap, int r)
{
unsigned long base = 0xA0 + r;
base += (ap->port_no << 4);
return base;
}
/**
* sil680_seldev - return register base
* @hwif: interface
* @r: config offset
*
* Turn a config register offset into the right address in either
* PCI space or MMIO space to access the control register in question
* including accounting for the unit shift.
*/
static unsigned long sil680_seldev(struct ata_port *ap, struct ata_device *adev, int r)
{
unsigned long base = 0xA0 + r;
base += (ap->port_no << 4);
base |= adev->devno ? 2 : 0;
return base;
}
/**
* sil680_cable_detect - cable detection
* @ap: ATA port
*
* Perform cable detection. The SIL680 stores this in PCI config
* space for us.
*/
static int sil680_cable_detect(struct ata_port *ap) {
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
unsigned long addr = sil680_selreg(ap, 0);
u8 ata66;
pci_read_config_byte(pdev, addr, &ata66);
if (ata66 & 1)
return ATA_CBL_PATA80;
else
return ATA_CBL_PATA40;
}
static int sil680_pre_reset(struct ata_port *ap)
{
ap->cbl = sil680_cable_detect(ap);
return ata_std_prereset(ap);
}
/**
* sil680_bus_reset - reset the SIL680 bus
* @ap: ATA port to reset
*
* Perform the SIL680 housekeeping when doing an ATA bus reset
*/
static int sil680_bus_reset(struct ata_port *ap,unsigned int *classes)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
unsigned long addr = sil680_selreg(ap, 0);
u8 reset;
pci_read_config_byte(pdev, addr, &reset);
pci_write_config_byte(pdev, addr, reset | 0x03);
udelay(25);
pci_write_config_byte(pdev, addr, reset);
return ata_std_softreset(ap, classes);
}
static void sil680_error_handler(struct ata_port *ap)
{
ata_bmdma_drive_eh(ap, sil680_pre_reset, sil680_bus_reset, NULL, ata_std_postreset);
}
/**
* sil680_set_piomode - set initial PIO mode data
* @ap: ATA interface
* @adev: ATA device
*
* Program the SIL680 registers for PIO mode. Note that the task speed
* registers are shared between the devices so we must pick the lowest
* mode for command work.
*/
static void sil680_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
static u16 speed_p[5] = { 0x328A, 0x2283, 0x1104, 0x10C3, 0x10C1 };
static u16 speed_t[5] = { 0x328A, 0x1281, 0x1281, 0x10C3, 0x10C1 };
unsigned long tfaddr = sil680_selreg(ap, 0x02);
unsigned long addr = sil680_seldev(ap, adev, 0x04);
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
int pio = adev->pio_mode - XFER_PIO_0;
int lowest_pio = pio;
u16 reg;
struct ata_device *pair = ata_dev_pair(adev);
if (pair != NULL && adev->pio_mode > pair->pio_mode)
lowest_pio = pair->pio_mode - XFER_PIO_0;
pci_write_config_word(pdev, addr, speed_p[pio]);
pci_write_config_word(pdev, tfaddr, speed_t[lowest_pio]);
pci_read_config_word(pdev, tfaddr-2, &reg);
reg &= ~0x0200; /* Clear IORDY */
if (ata_pio_need_iordy(adev))
reg |= 0x0200; /* Enable IORDY */
pci_write_config_word(pdev, tfaddr-2, reg);
}
/**
* sil680_set_dmamode - set initial DMA mode data
* @ap: ATA interface
* @adev: ATA device
*
* Program the MWDMA/UDMA modes for the sil680 k
* chipset. The MWDMA mode values are pulled from a lookup table
* while the chipset uses mode number for UDMA.
*/
static void sil680_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
static u8 ultra_table[2][7] = {
{ 0x0C, 0x07, 0x05, 0x04, 0x02, 0x01, 0xFF }, /* 100MHz */
{ 0x0F, 0x0B, 0x07, 0x05, 0x03, 0x02, 0x01 }, /* 133Mhz */
};
static u16 dma_table[3] = { 0x2208, 0x10C2, 0x10C1 };
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
unsigned long ma = sil680_seldev(ap, adev, 0x08);
unsigned long ua = sil680_seldev(ap, adev, 0x0C);
unsigned long addr_mask = 0x80 + 4 * ap->port_no;
int port_shift = adev->devno * 4;
u8 scsc, mode;
u16 multi, ultra;
pci_read_config_byte(pdev, 0x8A, &scsc);
pci_read_config_byte(pdev, addr_mask, &mode);
pci_read_config_word(pdev, ma, &multi);
pci_read_config_word(pdev, ua, &ultra);
/* Mask timing bits */
ultra &= ~0x3F;
mode &= ~(0x03 << port_shift);
/* Extract scsc */
scsc = (scsc & 0x30) ? 1: 0;
if (adev->dma_mode >= XFER_UDMA_0) {
multi = 0x10C1;
ultra |= ultra_table[scsc][adev->dma_mode - XFER_UDMA_0];
mode |= (0x03 << port_shift);
} else {
multi = dma_table[adev->dma_mode - XFER_MW_DMA_0];
mode |= (0x02 << port_shift);
}
pci_write_config_byte(pdev, addr_mask, mode);
pci_write_config_word(pdev, ma, multi);
pci_write_config_word(pdev, ua, ultra);
}
static struct scsi_host_template sil680_sht = {
.module = THIS_MODULE,
.name = DRV_NAME,
.ioctl = ata_scsi_ioctl,
.queuecommand = ata_scsi_queuecmd,
.can_queue = ATA_DEF_QUEUE,
.this_id = ATA_SHT_THIS_ID,
.sg_tablesize = LIBATA_MAX_PRD,
.max_sectors = ATA_MAX_SECTORS,
.cmd_per_lun = ATA_SHT_CMD_PER_LUN,
.emulated = ATA_SHT_EMULATED,
.use_clustering = ATA_SHT_USE_CLUSTERING,
.proc_name = DRV_NAME,
.dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config,
.bios_param = ata_std_bios_param,
};
static struct ata_port_operations sil680_port_ops = {
.port_disable = ata_port_disable,
.set_piomode = sil680_set_piomode,
.set_dmamode = sil680_set_dmamode,
.mode_filter = ata_pci_default_filter,
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
.check_status = ata_check_status,
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
.freeze = ata_bmdma_freeze,
.thaw = ata_bmdma_thaw,
.error_handler = sil680_error_handler,
.post_internal_cmd = ata_bmdma_post_internal_cmd,
.bmdma_setup = ata_bmdma_setup,
.bmdma_start = ata_bmdma_start,
.bmdma_stop = ata_bmdma_stop,
.bmdma_status = ata_bmdma_status,
.qc_prep = ata_qc_prep,
.qc_issue = ata_qc_issue_prot,
.eng_timeout = ata_eng_timeout,
.data_xfer = ata_pio_data_xfer,
.irq_handler = ata_interrupt,
.irq_clear = ata_bmdma_irq_clear,
.port_start = ata_port_start,
.port_stop = ata_port_stop,
.host_stop = ata_host_stop
};
static int sil680_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
static struct ata_port_info info = {
.sht = &sil680_sht,
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
.pio_mask = 0x1f,
.mwdma_mask = 0x07,
.udma_mask = 0x7f,
.port_ops = &sil680_port_ops
};
static struct ata_port_info info_slow = {
.sht = &sil680_sht,
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
.pio_mask = 0x1f,
.mwdma_mask = 0x07,
.udma_mask = 0x3f,
.port_ops = &sil680_port_ops
};
static struct ata_port_info *port_info[2] = {&info, &info};
static int printed_version;
u32 class_rev = 0;
u8 tmpbyte = 0;
if (!printed_version++)
dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
pci_read_config_dword(pdev, PCI_CLASS_REVISION, &class_rev);
class_rev &= 0xff;
/* FIXME: double check */
pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, (class_rev) ? 1 : 255);
pci_write_config_byte(pdev, 0x80, 0x00);
pci_write_config_byte(pdev, 0x84, 0x00);
pci_read_config_byte(pdev, 0x8A, &tmpbyte);
printk(KERN_INFO "sil680: BA5_EN = %d clock = %02X\n",
tmpbyte & 1, tmpbyte & 0x30);
switch(tmpbyte & 0x30) {
case 0x00:
/* 133 clock attempt to force it on */
pci_write_config_byte(pdev, 0x8A, tmpbyte|0x10);
break;
case 0x30:
/* if clocking is disabled */
/* 133 clock attempt to force it on */
pci_write_config_byte(pdev, 0x8A, tmpbyte & ~0x20);
break;
case 0x10:
/* 133 already */
break;
case 0x20:
/* BIOS set PCI x2 clocking */
break;
}
pci_read_config_byte(pdev, 0x8A, &tmpbyte);
printk(KERN_INFO "sil680: BA5_EN = %d clock = %02X\n",
tmpbyte & 1, tmpbyte & 0x30);
if ((tmpbyte & 0x30) == 0)
port_info[0] = port_info[1] = &info_slow;
pci_write_config_byte(pdev, 0xA1, 0x72);
pci_write_config_word(pdev, 0xA2, 0x328A);
pci_write_config_dword(pdev, 0xA4, 0x62DD62DD);
pci_write_config_dword(pdev, 0xA8, 0x43924392);
pci_write_config_dword(pdev, 0xAC, 0x40094009);
pci_write_config_byte(pdev, 0xB1, 0x72);
pci_write_config_word(pdev, 0xB2, 0x328A);
pci_write_config_dword(pdev, 0xB4, 0x62DD62DD);
pci_write_config_dword(pdev, 0xB8, 0x43924392);
pci_write_config_dword(pdev, 0xBC, 0x40094009);
switch(tmpbyte & 0x30) {
case 0x00: printk(KERN_INFO "sil680: 100MHz clock.\n");break;
case 0x10: printk(KERN_INFO "sil680: 133MHz clock.\n");break;
case 0x20: printk(KERN_INFO "sil680: Using PCI clock.\n");break;
/* This last case is _NOT_ ok */
case 0x30: printk(KERN_ERR "sil680: Clock disabled ?\n");
return -EIO;
}
return ata_pci_init_one(pdev, port_info, 2);
}
static const struct pci_device_id sil680[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_CMD, PCI_DEVICE_ID_SII_680), },
{ 0, },
};
static struct pci_driver sil680_pci_driver = {
.name = DRV_NAME,
.id_table = sil680,
.probe = sil680_init_one,
.remove = ata_pci_remove_one
};
static int __init sil680_init(void)
{
return pci_register_driver(&sil680_pci_driver);
}
static void __exit sil680_exit(void)
{
pci_unregister_driver(&sil680_pci_driver);
}
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for SI680 PATA");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, sil680);
MODULE_VERSION(DRV_VERSION);
module_init(sil680_init);
module_exit(sil680_exit);

1034
drivers/ata/pata_sis.c Normal file

File diff suppressed because it is too large Load Diff

388
drivers/ata/pata_sl82c105.c Normal file
View File

@ -0,0 +1,388 @@
/*
* pata_sl82c105.c - SL82C105 PATA for new ATA layer
* (C) 2005 Red Hat Inc
* Alan Cox <alan@redhat.com>
*
* Based in part on linux/drivers/ide/pci/sl82c105.c
* SL82C105/Winbond 553 IDE driver
*
* and in part on the documentation and errata sheet
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#define DRV_NAME "pata_sl82c105"
#define DRV_VERSION "0.2.2"
enum {
/*
* SL82C105 PCI config register 0x40 bits.
*/
CTRL_IDE_IRQB = (1 << 30),
CTRL_IDE_IRQA = (1 << 28),
CTRL_LEGIRQ = (1 << 11),
CTRL_P1F16 = (1 << 5),
CTRL_P1EN = (1 << 4),
CTRL_P0F16 = (1 << 1),
CTRL_P0EN = (1 << 0)
};
/**
* sl82c105_pre_reset - probe begin
* @ap: ATA port
*
* Set up cable type and use generic probe init
*/
static int sl82c105_pre_reset(struct ata_port *ap)
{
static const struct pci_bits sl82c105_enable_bits[] = {
{ 0x40, 1, 0x01, 0x01 },
{ 0x40, 1, 0x10, 0x10 }
};
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
if (ap->port_no && !pci_test_config_bits(pdev, &sl82c105_enable_bits[ap->port_no])) {
ata_port_disable(ap);
dev_printk(KERN_INFO, &pdev->dev, "port disabled. ignoring.\n");
return 0;
}
ap->cbl = ATA_CBL_PATA40;
return ata_std_prereset(ap);
}
static void sl82c105_error_handler(struct ata_port *ap)
{
ata_bmdma_drive_eh(ap, sl82c105_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
}
/**
* sl82c105_configure_piomode - set chip PIO timing
* @ap: ATA interface
* @adev: ATA device
* @pio: PIO mode
*
* Called to do the PIO mode setup. Our timing registers are shared
* so a configure_dmamode call will undo any work we do here and vice
* versa
*/
static void sl82c105_configure_piomode(struct ata_port *ap, struct ata_device *adev, int pio)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
static u16 pio_timing[5] = {
0x50D, 0x407, 0x304, 0x242, 0x240
};
u16 dummy;
int timing = 0x44 + (8 * ap->port_no) + (4 * adev->devno);
pci_write_config_word(pdev, timing, pio_timing[pio]);
/* Can we lose this oddity of the old driver */
pci_read_config_word(pdev, timing, &dummy);
}
/**
* sl82c105_set_piomode - set initial PIO mode data
* @ap: ATA interface
* @adev: ATA device
*
* Called to do the PIO mode setup. Our timing registers are shared
* but we want to set the PIO timing by default.
*/
static void sl82c105_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
sl82c105_configure_piomode(ap, adev, adev->pio_mode - XFER_PIO_0);
}
/**
* sl82c105_configure_dmamode - set DMA mode in chip
* @ap: ATA interface
* @adev: ATA device
*
* Load DMA cycle times into the chip ready for a DMA transfer
* to occur.
*/
static void sl82c105_configure_dmamode(struct ata_port *ap, struct ata_device *adev)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
static u16 dma_timing[3] = {
0x707, 0x201, 0x200
};
u16 dummy;
int timing = 0x44 + (8 * ap->port_no) + (4 * adev->devno);
int dma = adev->dma_mode - XFER_MW_DMA_0;
pci_write_config_word(pdev, timing, dma_timing[dma]);
/* Can we lose this oddity of the old driver */
pci_read_config_word(pdev, timing, &dummy);
}
/**
* sl82c105_set_dmamode - set initial DMA mode data
* @ap: ATA interface
* @adev: ATA device
*
* Called to do the DMA mode setup. This replaces the PIO timings
* for the device in question. Set appropriate PIO timings not DMA
* timings at this point.
*/
static void sl82c105_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
switch(adev->dma_mode) {
case XFER_MW_DMA_0:
sl82c105_configure_piomode(ap, adev, 1);
break;
case XFER_MW_DMA_1:
sl82c105_configure_piomode(ap, adev, 3);
break;
case XFER_MW_DMA_2:
sl82c105_configure_piomode(ap, adev, 3);
break;
default:
BUG();
}
}
/**
* sl82c105_reset_engine - Reset the DMA engine
* @ap: ATA interface
*
* The sl82c105 has some serious problems with the DMA engine
* when transfers don't run as expected or ATAPI is used. The
* recommended fix is to reset the engine each use using a chip
* test register.
*/
static void sl82c105_reset_engine(struct ata_port *ap)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u16 val;
pci_read_config_word(pdev, 0x7E, &val);
pci_write_config_word(pdev, 0x7E, val | 4);
pci_write_config_word(pdev, 0x7E, val & ~4);
}
/**
* sl82c105_bmdma_start - DMA engine begin
* @qc: ATA command
*
* Reset the DMA engine each use as recommended by the errata
* document.
*
* FIXME: if we switch clock at BMDMA start/end we might get better
* PIO performance on DMA capable devices.
*/
static void sl82c105_bmdma_start(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
sl82c105_reset_engine(ap);
/* Set the clocks for DMA */
sl82c105_configure_dmamode(ap, qc->dev);
/* Activate DMA */
ata_bmdma_start(qc);
}
/**
* sl82c105_bmdma_end - DMA engine stop
* @qc: ATA command
*
* Reset the DMA engine each use as recommended by the errata
* document.
*
* This function is also called to turn off DMA when a timeout occurs
* during DMA operation. In both cases we need to reset the engine,
* so no actual eng_timeout handler is required.
*
* We assume bmdma_stop is always called if bmdma_start as called. If
* not then we may need to wrap qc_issue.
*/
static void sl82c105_bmdma_stop(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
ata_bmdma_stop(qc);
sl82c105_reset_engine(ap);
/* This will redo the initial setup of the DMA device to matching
PIO timings */
sl82c105_set_dmamode(ap, qc->dev);
}
static struct scsi_host_template sl82c105_sht = {
.module = THIS_MODULE,
.name = DRV_NAME,
.ioctl = ata_scsi_ioctl,
.queuecommand = ata_scsi_queuecmd,
.can_queue = ATA_DEF_QUEUE,
.this_id = ATA_SHT_THIS_ID,
.sg_tablesize = LIBATA_MAX_PRD,
.max_sectors = ATA_MAX_SECTORS,
.cmd_per_lun = ATA_SHT_CMD_PER_LUN,
.emulated = ATA_SHT_EMULATED,
.use_clustering = ATA_SHT_USE_CLUSTERING,
.proc_name = DRV_NAME,
.dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config,
.bios_param = ata_std_bios_param,
};
static struct ata_port_operations sl82c105_port_ops = {
.port_disable = ata_port_disable,
.set_piomode = sl82c105_set_piomode,
.set_dmamode = sl82c105_set_dmamode,
.mode_filter = ata_pci_default_filter,
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
.check_status = ata_check_status,
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
.error_handler = sl82c105_error_handler,
.bmdma_setup = ata_bmdma_setup,
.bmdma_start = sl82c105_bmdma_start,
.bmdma_stop = sl82c105_bmdma_stop,
.bmdma_status = ata_bmdma_status,
.qc_prep = ata_qc_prep,
.qc_issue = ata_qc_issue_prot,
.eng_timeout = ata_eng_timeout,
.data_xfer = ata_pio_data_xfer,
.irq_handler = ata_interrupt,
.irq_clear = ata_bmdma_irq_clear,
.port_start = ata_port_start,
.port_stop = ata_port_stop,
.host_stop = ata_host_stop
};
/**
* sl82c105_bridge_revision - find bridge version
* @pdev: PCI device for the ATA function
*
* Locates the PCI bridge associated with the ATA function and
* providing it is a Winbond 553 reports the revision. If it cannot
* find a revision or the right device it returns -1
*/
static int sl82c105_bridge_revision(struct pci_dev *pdev)
{
struct pci_dev *bridge;
u8 rev;
/*
* The bridge should be part of the same device, but function 0.
*/
bridge = pci_get_slot(pdev->bus,
PCI_DEVFN(PCI_SLOT(pdev->devfn), 0));
if (!bridge)
return -1;
/*
* Make sure it is a Winbond 553 and is an ISA bridge.
*/
if (bridge->vendor != PCI_VENDOR_ID_WINBOND ||
bridge->device != PCI_DEVICE_ID_WINBOND_83C553 ||
bridge->class >> 8 != PCI_CLASS_BRIDGE_ISA) {
pci_dev_put(bridge);
return -1;
}
/*
* We need to find function 0's revision, not function 1
*/
pci_read_config_byte(bridge, PCI_REVISION_ID, &rev);
pci_dev_put(bridge);
return rev;
}
static int sl82c105_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
static struct ata_port_info info_dma = {
.sht = &sl82c105_sht,
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
.pio_mask = 0x1f,
.mwdma_mask = 0x07,
.port_ops = &sl82c105_port_ops
};
static struct ata_port_info info_early = {
.sht = &sl82c105_sht,
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
.pio_mask = 0x1f,
.port_ops = &sl82c105_port_ops
};
static struct ata_port_info *port_info[2] = { &info_early, &info_early };
u32 val;
int rev;
rev = sl82c105_bridge_revision(dev);
if (rev == -1)
dev_printk(KERN_WARNING, &dev->dev, "pata_sl82c105: Unable to find bridge, disabling DMA.\n");
else if (rev <= 5)
dev_printk(KERN_WARNING, &dev->dev, "pata_sl82c105: Early bridge revision, no DMA available.\n");
else {
port_info[0] = &info_dma;
port_info[1] = &info_dma;
}
pci_read_config_dword(dev, 0x40, &val);
val |= CTRL_P0EN | CTRL_P0F16 | CTRL_P1F16;
pci_write_config_dword(dev, 0x40, val);
return ata_pci_init_one(dev, port_info, 1); /* For now */
}
static struct pci_device_id sl82c105[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_WINBOND, PCI_DEVICE_ID_WINBOND_82C105), },
{ 0, },
};
static struct pci_driver sl82c105_pci_driver = {
.name = DRV_NAME,
.id_table = sl82c105,
.probe = sl82c105_init_one,
.remove = ata_pci_remove_one
};
static int __init sl82c105_init(void)
{
return pci_register_driver(&sl82c105_pci_driver);
}
static void __exit sl82c105_exit(void)
{
pci_unregister_driver(&sl82c105_pci_driver);
}
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for Sl82c105");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, sl82c105);
MODULE_VERSION(DRV_VERSION);
module_init(sl82c105_init);
module_exit(sl82c105_exit);

285
drivers/ata/pata_triflex.c Normal file
View File

@ -0,0 +1,285 @@
/*
* pata_triflex.c - Compaq PATA for new ATA layer
* (C) 2005 Red Hat Inc
* Alan Cox <alan@redhat.com>
*
* based upon
*
* triflex.c
*
* IDE Chipset driver for the Compaq TriFlex IDE controller.
*
* Known to work with the Compaq Workstation 5x00 series.
*
* Copyright (C) 2002 Hewlett-Packard Development Group, L.P.
* Author: Torben Mathiasen <torben.mathiasen@hp.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Loosely based on the piix & svwks drivers.
*
* Documentation:
* Not publically available.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#define DRV_NAME "pata_triflex"
#define DRV_VERSION "0.2.5"
/**
* triflex_probe_init - probe begin
* @ap: ATA port
*
* Set up cable type and use generic probe init
*/
static int triflex_probe_init(struct ata_port *ap)
{
static const struct pci_bits triflex_enable_bits[] = {
{ 0x80, 1, 0x01, 0x01 },
{ 0x80, 1, 0x02, 0x02 }
};
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
if (!pci_test_config_bits(pdev, &triflex_enable_bits[ap->port_no])) {
ata_port_disable(ap);
printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id);
return 0;
}
ap->cbl = ATA_CBL_PATA40;
return ata_std_prereset(ap);
}
static void triflex_error_handler(struct ata_port *ap)
{
ata_bmdma_drive_eh(ap, triflex_probe_init, ata_std_softreset, NULL, ata_std_postreset);
}
/**
* triflex_load_timing - timing configuration
* @ap: ATA interface
* @adev: Device on the bus
* @speed: speed to configure
*
* The Triflex has one set of timings per device per channel. This
* means we must do some switching. As the PIO and DMA timings don't
* match we have to do some reloading unlike PIIX devices where tuning
* tricks can avoid it.
*/
static void triflex_load_timing(struct ata_port *ap, struct ata_device *adev, int speed)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u32 timing = 0;
u32 triflex_timing, old_triflex_timing;
int channel_offset = ap->port_no ? 0x74: 0x70;
unsigned int is_slave = (adev->devno != 0);
pci_read_config_dword(pdev, channel_offset, &old_triflex_timing);
triflex_timing = old_triflex_timing;
switch(speed)
{
case XFER_MW_DMA_2:
timing = 0x0103;break;
case XFER_MW_DMA_1:
timing = 0x0203;break;
case XFER_MW_DMA_0:
timing = 0x0808;break;
case XFER_SW_DMA_2:
case XFER_SW_DMA_1:
case XFER_SW_DMA_0:
timing = 0x0F0F;break;
case XFER_PIO_4:
timing = 0x0202;break;
case XFER_PIO_3:
timing = 0x0204;break;
case XFER_PIO_2:
timing = 0x0404;break;
case XFER_PIO_1:
timing = 0x0508;break;
case XFER_PIO_0:
timing = 0x0808;break;
default:
BUG();
}
triflex_timing &= ~ (0xFFFF << (16 * is_slave));
triflex_timing |= (timing << (16 * is_slave));
if (triflex_timing != old_triflex_timing)
pci_write_config_dword(pdev, channel_offset, triflex_timing);
}
/**
* triflex_set_piomode - set initial PIO mode data
* @ap: ATA interface
* @adev: ATA device
*
* Use the timing loader to set up the PIO mode. We have to do this
* because DMA start/stop will only be called once DMA occurs. If there
* has been no DMA then the PIO timings are still needed.
*/
static void triflex_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
triflex_load_timing(ap, adev, adev->pio_mode);
}
/**
* triflex_dma_start - DMA start callback
* @qc: Command in progress
*
* Usually drivers set the DMA timing at the point the set_dmamode call
* is made. Triflex however requires we load new timings on the
* transition or keep matching PIO/DMA pairs (ie MWDMA2/PIO4 etc).
* We load the DMA timings just before starting DMA and then restore
* the PIO timing when the DMA is finished.
*/
static void triflex_bmdma_start(struct ata_queued_cmd *qc)
{
triflex_load_timing(qc->ap, qc->dev, qc->dev->dma_mode);
ata_bmdma_start(qc);
}
/**
* triflex_dma_stop - DMA stop callback
* @ap: ATA interface
* @adev: ATA device
*
* We loaded new timings in dma_start, as a result we need to restore
* the PIO timings in dma_stop so that the next command issue gets the
* right clock values.
*/
static void triflex_bmdma_stop(struct ata_queued_cmd *qc)
{
ata_bmdma_stop(qc);
triflex_load_timing(qc->ap, qc->dev, qc->dev->pio_mode);
}
static struct scsi_host_template triflex_sht = {
.module = THIS_MODULE,
.name = DRV_NAME,
.ioctl = ata_scsi_ioctl,
.queuecommand = ata_scsi_queuecmd,
.can_queue = ATA_DEF_QUEUE,
.this_id = ATA_SHT_THIS_ID,
.sg_tablesize = LIBATA_MAX_PRD,
.max_sectors = ATA_MAX_SECTORS,
.cmd_per_lun = ATA_SHT_CMD_PER_LUN,
.emulated = ATA_SHT_EMULATED,
.use_clustering = ATA_SHT_USE_CLUSTERING,
.proc_name = DRV_NAME,
.dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config,
.bios_param = ata_std_bios_param,
};
static struct ata_port_operations triflex_port_ops = {
.port_disable = ata_port_disable,
.set_piomode = triflex_set_piomode,
.mode_filter = ata_pci_default_filter,
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
.check_status = ata_check_status,
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
.freeze = ata_bmdma_freeze,
.thaw = ata_bmdma_thaw,
.error_handler = triflex_error_handler,
.post_internal_cmd = ata_bmdma_post_internal_cmd,
.bmdma_setup = ata_bmdma_setup,
.bmdma_start = triflex_bmdma_start,
.bmdma_stop = triflex_bmdma_stop,
.bmdma_status = ata_bmdma_status,
.qc_prep = ata_qc_prep,
.qc_issue = ata_qc_issue_prot,
.eng_timeout = ata_eng_timeout,
.data_xfer = ata_pio_data_xfer,
.irq_handler = ata_interrupt,
.irq_clear = ata_bmdma_irq_clear,
.port_start = ata_port_start,
.port_stop = ata_port_stop,
.host_stop = ata_host_stop
};
static int triflex_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
static struct ata_port_info info = {
.sht = &triflex_sht,
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
.pio_mask = 0x1f,
.mwdma_mask = 0x07,
.port_ops = &triflex_port_ops
};
static struct ata_port_info *port_info[2] = { &info, &info };
static int printed_version;
if (!printed_version++)
dev_printk(KERN_DEBUG, &dev->dev, "version " DRV_VERSION "\n");
return ata_pci_init_one(dev, port_info, 2);
}
static const struct pci_device_id triflex[] = {
{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_TRIFLEX_IDE,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ 0, },
};
static struct pci_driver triflex_pci_driver = {
.name = DRV_NAME,
.id_table = triflex,
.probe = triflex_init_one,
.remove = ata_pci_remove_one
};
static int __init triflex_init(void)
{
return pci_register_driver(&triflex_pci_driver);
}
static void __exit triflex_exit(void)
{
pci_unregister_driver(&triflex_pci_driver);
}
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for Compaq Triflex");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, triflex);
MODULE_VERSION(DRV_VERSION);
module_init(triflex_init);
module_exit(triflex_exit);

568
drivers/ata/pata_via.c Normal file
View File

@ -0,0 +1,568 @@
/*
* pata_via.c - VIA PATA for new ATA layer
* (C) 2005-2006 Red Hat Inc
* Alan Cox <alan@redhat.com>
*
* Documentation
* Most chipset documentation available under NDA only
*
* VIA version guide
* VIA VT82C561 - early design, uses ata_generic currently
* VIA VT82C576 - MWDMA, 33Mhz
* VIA VT82C586 - MWDMA, 33Mhz
* VIA VT82C586a - Added UDMA to 33Mhz
* VIA VT82C586b - UDMA33
* VIA VT82C596a - Nonfunctional UDMA66
* VIA VT82C596b - Working UDMA66
* VIA VT82C686 - Nonfunctional UDMA66
* VIA VT82C686a - Working UDMA66
* VIA VT82C686b - Updated to UDMA100
* VIA VT8231 - UDMA100
* VIA VT8233 - UDMA100
* VIA VT8233a - UDMA133
* VIA VT8233c - UDMA100
* VIA VT8235 - UDMA133
* VIA VT8237 - UDMA133
*
* Most registers remain compatible across chips. Others start reserved
* and acquire sensible semantics if set to 1 (eg cable detect). A few
* exceptions exist, notably around the FIFO settings.
*
* One additional quirk of the VIA design is that like ALi they use few
* PCI IDs for a lot of chips.
*
* Based heavily on:
*
* Version 3.38
*
* VIA IDE driver for Linux. Supported southbridges:
*
* vt82c576, vt82c586, vt82c586a, vt82c586b, vt82c596a, vt82c596b,
* vt82c686, vt82c686a, vt82c686b, vt8231, vt8233, vt8233c, vt8233a,
* vt8235, vt8237
*
* Copyright (c) 2000-2002 Vojtech Pavlik
*
* Based on the work of:
* Michel Aubry
* Jeff Garzik
* Andre Hedrick
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#define DRV_NAME "pata_via"
#define DRV_VERSION "0.1.13"
/*
* The following comes directly from Vojtech Pavlik's ide/pci/via82cxxx
* driver.
*/
enum {
VIA_UDMA = 0x007,
VIA_UDMA_NONE = 0x000,
VIA_UDMA_33 = 0x001,
VIA_UDMA_66 = 0x002,
VIA_UDMA_100 = 0x003,
VIA_UDMA_133 = 0x004,
VIA_BAD_PREQ = 0x010, /* Crashes if PREQ# till DDACK# set */
VIA_BAD_CLK66 = 0x020, /* 66 MHz clock doesn't work correctly */
VIA_SET_FIFO = 0x040, /* Needs to have FIFO split set */
VIA_NO_UNMASK = 0x080, /* Doesn't work with IRQ unmasking on */
VIA_BAD_ID = 0x100, /* Has wrong vendor ID (0x1107) */
VIA_BAD_AST = 0x200, /* Don't touch Address Setup Timing */
VIA_NO_ENABLES = 0x400, /* Has no enablebits */
};
/*
* VIA SouthBridge chips.
*/
static const struct via_isa_bridge {
const char *name;
u16 id;
u8 rev_min;
u8 rev_max;
u16 flags;
} via_isa_bridges[] = {
{ "cx700", PCI_DEVICE_ID_VIA_CX700, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
{ "vt6410", PCI_DEVICE_ID_VIA_6410, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST | VIA_NO_ENABLES},
{ "vt8237a", PCI_DEVICE_ID_VIA_8237A, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
{ "vt8237", PCI_DEVICE_ID_VIA_8237, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
{ "vt8235", PCI_DEVICE_ID_VIA_8235, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
{ "vt8233a", PCI_DEVICE_ID_VIA_8233A, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
{ "vt8233c", PCI_DEVICE_ID_VIA_8233C_0, 0x00, 0x2f, VIA_UDMA_100 },
{ "vt8233", PCI_DEVICE_ID_VIA_8233_0, 0x00, 0x2f, VIA_UDMA_100 },
{ "vt8231", PCI_DEVICE_ID_VIA_8231, 0x00, 0x2f, VIA_UDMA_100 },
{ "vt82c686b", PCI_DEVICE_ID_VIA_82C686, 0x40, 0x4f, VIA_UDMA_100 },
{ "vt82c686a", PCI_DEVICE_ID_VIA_82C686, 0x10, 0x2f, VIA_UDMA_66 },
{ "vt82c686", PCI_DEVICE_ID_VIA_82C686, 0x00, 0x0f, VIA_UDMA_33 | VIA_BAD_CLK66 },
{ "vt82c596b", PCI_DEVICE_ID_VIA_82C596, 0x10, 0x2f, VIA_UDMA_66 },
{ "vt82c596a", PCI_DEVICE_ID_VIA_82C596, 0x00, 0x0f, VIA_UDMA_33 | VIA_BAD_CLK66 },
{ "vt82c586b", PCI_DEVICE_ID_VIA_82C586_0, 0x47, 0x4f, VIA_UDMA_33 | VIA_SET_FIFO },
{ "vt82c586b", PCI_DEVICE_ID_VIA_82C586_0, 0x40, 0x46, VIA_UDMA_33 | VIA_SET_FIFO | VIA_BAD_PREQ },
{ "vt82c586b", PCI_DEVICE_ID_VIA_82C586_0, 0x30, 0x3f, VIA_UDMA_33 | VIA_SET_FIFO },
{ "vt82c586a", PCI_DEVICE_ID_VIA_82C586_0, 0x20, 0x2f, VIA_UDMA_33 | VIA_SET_FIFO },
{ "vt82c586", PCI_DEVICE_ID_VIA_82C586_0, 0x00, 0x0f, VIA_UDMA_NONE | VIA_SET_FIFO },
{ "vt82c576", PCI_DEVICE_ID_VIA_82C576, 0x00, 0x2f, VIA_UDMA_NONE | VIA_SET_FIFO | VIA_NO_UNMASK },
{ "vt82c576", PCI_DEVICE_ID_VIA_82C576, 0x00, 0x2f, VIA_UDMA_NONE | VIA_SET_FIFO | VIA_NO_UNMASK | VIA_BAD_ID },
{ NULL }
};
/**
* via_cable_detect - cable detection
* @ap: ATA port
*
* Perform cable detection. Actually for the VIA case the BIOS
* already did this for us. We read the values provided by the
* BIOS. If you are using an 8235 in a non-PC configuration you
* may need to update this code.
*
* Hotplug also impacts on this.
*/
static int via_cable_detect(struct ata_port *ap) {
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u32 ata66;
pci_read_config_dword(pdev, 0x50, &ata66);
/* Check both the drive cable reporting bits, we might not have
two drives */
if (ata66 & (0x10100000 >> (16 * ap->port_no)))
return ATA_CBL_PATA80;
else
return ATA_CBL_PATA40;
}
static int via_pre_reset(struct ata_port *ap)
{
const struct via_isa_bridge *config = ap->host->private_data;
if (!(config->flags & VIA_NO_ENABLES)) {
static const struct pci_bits via_enable_bits[] = {
{ 0x40, 1, 0x02, 0x02 },
{ 0x40, 1, 0x01, 0x01 }
};
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
if (!pci_test_config_bits(pdev, &via_enable_bits[ap->port_no])) {
ata_port_disable(ap);
printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id);
return 0;
}
}
if ((config->flags & VIA_UDMA) >= VIA_UDMA_66)
ap->cbl = via_cable_detect(ap);
else
ap->cbl = ATA_CBL_PATA40;
return ata_std_prereset(ap);
}
/**
* via_error_handler - reset for VIA chips
* @ap: ATA port
*
* Handle the reset callback for the later chips with cable detect
*/
static void via_error_handler(struct ata_port *ap)
{
ata_bmdma_drive_eh(ap, via_pre_reset, ata_std_softreset, NULL, ata_std_postreset);
}
/**
* via_do_set_mode - set initial PIO mode data
* @ap: ATA interface
* @adev: ATA device
* @mode: ATA mode being programmed
* @tdiv: Clocks per PCI clock
* @set_ast: Set to program address setup
* @udma_type: UDMA mode/format of registers
*
* Program the VIA registers for DMA and PIO modes. Uses the ata timing
* support in order to compute modes.
*
* FIXME: Hotplug will require we serialize multiple mode changes
* on the two channels.
*/
static void via_do_set_mode(struct ata_port *ap, struct ata_device *adev, int mode, int tdiv, int set_ast, int udma_type)
{
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
struct ata_device *peer = ata_dev_pair(adev);
struct ata_timing t, p;
static int via_clock = 33333; /* Bus clock in kHZ - ought to be tunable one day */
unsigned long T = 1000000000 / via_clock;
unsigned long UT = T/tdiv;
int ut;
int offset = 3 - (2*ap->port_no) - adev->devno;
/* Calculate the timing values we require */
ata_timing_compute(adev, mode, &t, T, UT);
/* We share 8bit timing so we must merge the constraints */
if (peer) {
if (peer->pio_mode) {
ata_timing_compute(peer, peer->pio_mode, &p, T, UT);
ata_timing_merge(&p, &t, &t, ATA_TIMING_8BIT);
}
}
/* Address setup is programmable but breaks on UDMA133 setups */
if (set_ast) {
u8 setup; /* 2 bits per drive */
int shift = 2 * offset;
pci_read_config_byte(pdev, 0x4C, &setup);
setup &= ~(3 << shift);
setup |= FIT(t.setup, 1, 4) << shift; /* 1,4 or 1,4 - 1 FIXME */
pci_write_config_byte(pdev, 0x4C, setup);
}
/* Load the PIO mode bits */
pci_write_config_byte(pdev, 0x4F - ap->port_no,
((FIT(t.act8b, 1, 16) - 1) << 4) | (FIT(t.rec8b, 1, 16) - 1));
pci_write_config_byte(pdev, 0x48 + offset,
((FIT(t.active, 1, 16) - 1) << 4) | (FIT(t.recover, 1, 16) - 1));
/* Load the UDMA bits according to type */
switch(udma_type) {
default:
/* BUG() ? */
/* fall through */
case 33:
ut = t.udma ? (0xe0 | (FIT(t.udma, 2, 5) - 2)) : 0x03;
break;
case 66:
ut = t.udma ? (0xe8 | (FIT(t.udma, 2, 9) - 2)) : 0x0f;
break;
case 100:
ut = t.udma ? (0xe0 | (FIT(t.udma, 2, 9) - 2)) : 0x07;
break;
case 133:
ut = t.udma ? (0xe0 | (FIT(t.udma, 2, 9) - 2)) : 0x07;
break;
}
/* Set UDMA unless device is not UDMA capable */
if (udma_type)
pci_write_config_byte(pdev, 0x50 + offset, ut);
}
static void via_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
const struct via_isa_bridge *config = ap->host->private_data;
int set_ast = (config->flags & VIA_BAD_AST) ? 0 : 1;
int mode = config->flags & VIA_UDMA;
static u8 tclock[5] = { 1, 1, 2, 3, 4 };
static u8 udma[5] = { 0, 33, 66, 100, 133 };
via_do_set_mode(ap, adev, adev->pio_mode, tclock[mode], set_ast, udma[mode]);
}
static void via_set_dmamode(struct ata_port *ap, struct ata_device *adev)
{
const struct via_isa_bridge *config = ap->host->private_data;
int set_ast = (config->flags & VIA_BAD_AST) ? 0 : 1;
int mode = config->flags & VIA_UDMA;
static u8 tclock[5] = { 1, 1, 2, 3, 4 };
static u8 udma[5] = { 0, 33, 66, 100, 133 };
via_do_set_mode(ap, adev, adev->dma_mode, tclock[mode], set_ast, udma[mode]);
}
static struct scsi_host_template via_sht = {
.module = THIS_MODULE,
.name = DRV_NAME,
.ioctl = ata_scsi_ioctl,
.queuecommand = ata_scsi_queuecmd,
.can_queue = ATA_DEF_QUEUE,
.this_id = ATA_SHT_THIS_ID,
.sg_tablesize = LIBATA_MAX_PRD,
.max_sectors = ATA_MAX_SECTORS,
.cmd_per_lun = ATA_SHT_CMD_PER_LUN,
.emulated = ATA_SHT_EMULATED,
.use_clustering = ATA_SHT_USE_CLUSTERING,
.proc_name = DRV_NAME,
.dma_boundary = ATA_DMA_BOUNDARY,
.slave_configure = ata_scsi_slave_config,
.bios_param = ata_std_bios_param,
};
static struct ata_port_operations via_port_ops = {
.port_disable = ata_port_disable,
.set_piomode = via_set_piomode,
.set_dmamode = via_set_dmamode,
.mode_filter = ata_pci_default_filter,
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
.check_status = ata_check_status,
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
.freeze = ata_bmdma_freeze,
.thaw = ata_bmdma_thaw,
.error_handler = via_error_handler,
.post_internal_cmd = ata_bmdma_post_internal_cmd,
.bmdma_setup = ata_bmdma_setup,
.bmdma_start = ata_bmdma_start,
.bmdma_stop = ata_bmdma_stop,
.bmdma_status = ata_bmdma_status,
.qc_prep = ata_qc_prep,
.qc_issue = ata_qc_issue_prot,
.eng_timeout = ata_eng_timeout,
.data_xfer = ata_pio_data_xfer,
.irq_handler = ata_interrupt,
.irq_clear = ata_bmdma_irq_clear,
.port_start = ata_port_start,
.port_stop = ata_port_stop,
.host_stop = ata_host_stop
};
static struct ata_port_operations via_port_ops_noirq = {
.port_disable = ata_port_disable,
.set_piomode = via_set_piomode,
.set_dmamode = via_set_dmamode,
.mode_filter = ata_pci_default_filter,
.tf_load = ata_tf_load,
.tf_read = ata_tf_read,
.check_status = ata_check_status,
.exec_command = ata_exec_command,
.dev_select = ata_std_dev_select,
.freeze = ata_bmdma_freeze,
.thaw = ata_bmdma_thaw,
.error_handler = via_error_handler,
.post_internal_cmd = ata_bmdma_post_internal_cmd,
.bmdma_setup = ata_bmdma_setup,
.bmdma_start = ata_bmdma_start,
.bmdma_stop = ata_bmdma_stop,
.bmdma_status = ata_bmdma_status,
.qc_prep = ata_qc_prep,
.qc_issue = ata_qc_issue_prot,
.eng_timeout = ata_eng_timeout,
.data_xfer = ata_pio_data_xfer_noirq,
.irq_handler = ata_interrupt,
.irq_clear = ata_bmdma_irq_clear,
.port_start = ata_port_start,
.port_stop = ata_port_stop,
.host_stop = ata_host_stop
};
/**
* via_init_one - discovery callback
* @pdev: PCI device ID
* @id: PCI table info
*
* A VIA IDE interface has been discovered. Figure out what revision
* and perform configuration work before handing it to the ATA layer
*/
static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
/* Early VIA without UDMA support */
static struct ata_port_info via_mwdma_info = {
.sht = &via_sht,
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
.pio_mask = 0x1f,
.mwdma_mask = 0x07,
.port_ops = &via_port_ops
};
/* Ditto with IRQ masking required */
static struct ata_port_info via_mwdma_info_borked = {
.sht = &via_sht,
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
.pio_mask = 0x1f,
.mwdma_mask = 0x07,
.port_ops = &via_port_ops_noirq,
};
/* VIA UDMA 33 devices (and borked 66) */
static struct ata_port_info via_udma33_info = {
.sht = &via_sht,
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
.pio_mask = 0x1f,
.mwdma_mask = 0x07,
.udma_mask = 0x7,
.port_ops = &via_port_ops
};
/* VIA UDMA 66 devices */
static struct ata_port_info via_udma66_info = {
.sht = &via_sht,
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
.pio_mask = 0x1f,
.mwdma_mask = 0x07,
.udma_mask = 0x1f,
.port_ops = &via_port_ops
};
/* VIA UDMA 100 devices */
static struct ata_port_info via_udma100_info = {
.sht = &via_sht,
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
.pio_mask = 0x1f,
.mwdma_mask = 0x07,
.udma_mask = 0x3f,
.port_ops = &via_port_ops
};
/* UDMA133 with bad AST (All current 133) */
static struct ata_port_info via_udma133_info = {
.sht = &via_sht,
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
.pio_mask = 0x1f,
.mwdma_mask = 0x07,
.udma_mask = 0x7f, /* FIXME: should check north bridge */
.port_ops = &via_port_ops
};
struct ata_port_info *port_info[2], *type;
struct pci_dev *isa = NULL;
const struct via_isa_bridge *config;
static int printed_version;
u8 t;
u8 enable;
u32 timing;
if (!printed_version++)
dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
/* To find out how the IDE will behave and what features we
actually have to look at the bridge not the IDE controller */
for (config = via_isa_bridges; config->id; config++)
if ((isa = pci_get_device(PCI_VENDOR_ID_VIA +
!!(config->flags & VIA_BAD_ID),
config->id, NULL))) {
pci_read_config_byte(isa, PCI_REVISION_ID, &t);
if (t >= config->rev_min &&
t <= config->rev_max)
break;
pci_dev_put(isa);
}
if (!config->id) {
printk(KERN_WARNING "via: Unknown VIA SouthBridge, disabling.\n");
return -ENODEV;
}
pci_dev_put(isa);
/* 0x40 low bits indicate enabled channels */
pci_read_config_byte(pdev, 0x40 , &enable);
enable &= 3;
if (enable == 0) {
return -ENODEV;
}
/* Initialise the FIFO for the enabled channels. */
if (config->flags & VIA_SET_FIFO) {
u8 fifo_setting[4] = {0x00, 0x60, 0x00, 0x20};
u8 fifo;
pci_read_config_byte(pdev, 0x43, &fifo);
/* Clear PREQ# until DDACK# for errata */
if (config->flags & VIA_BAD_PREQ)
fifo &= 0x7F;
else
fifo &= 0x9f;
/* Turn on FIFO for enabled channels */
fifo |= fifo_setting[enable];
pci_write_config_byte(pdev, 0x43, fifo);
}
/* Clock set up */
switch(config->flags & VIA_UDMA) {
case VIA_UDMA_NONE:
if (config->flags & VIA_NO_UNMASK)
type = &via_mwdma_info_borked;
else
type = &via_mwdma_info;
break;
case VIA_UDMA_33:
type = &via_udma33_info;
break;
case VIA_UDMA_66:
type = &via_udma66_info;
/* The 66 MHz devices require we enable the clock */
pci_read_config_dword(pdev, 0x50, &timing);
timing |= 0x80008;
pci_write_config_dword(pdev, 0x50, timing);
break;
case VIA_UDMA_100:
type = &via_udma100_info;
break;
case VIA_UDMA_133:
type = &via_udma133_info;
break;
default:
WARN_ON(1);
return -ENODEV;
}
if (config->flags & VIA_BAD_CLK66) {
/* Disable the 66MHz clock on problem devices */
pci_read_config_dword(pdev, 0x50, &timing);
timing &= ~0x80008;
pci_write_config_dword(pdev, 0x50, timing);
}
/* We have established the device type, now fire it up */
type->private_data = (void *)config;
port_info[0] = port_info[1] = type;
return ata_pci_init_one(pdev, port_info, 2);
}
static const struct pci_device_id via[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C576_1), },
{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_1), },
{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_6410), },
{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_SATA_EIDE), },
{ 0, },
};
static struct pci_driver via_pci_driver = {
.name = DRV_NAME,
.id_table = via,
.probe = via_init_one,
.remove = ata_pci_remove_one
};
static int __init via_init(void)
{
return pci_register_driver(&via_pci_driver);
}
static void __exit via_exit(void)
{
pci_unregister_driver(&via_pci_driver);
}
MODULE_AUTHOR("Alan Cox");
MODULE_DESCRIPTION("low-level driver for VIA PATA");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, via);
MODULE_VERSION(DRV_VERSION);
module_init(via_init);
module_exit(via_exit);

View File

@ -127,7 +127,7 @@ static int adma_ata_init_one (struct pci_dev *pdev,
static irqreturn_t adma_intr (int irq, void *dev_instance,
struct pt_regs *regs);
static int adma_port_start(struct ata_port *ap);
static void adma_host_stop(struct ata_host_set *host_set);
static void adma_host_stop(struct ata_host *host);
static void adma_port_stop(struct ata_port *ap);
static void adma_phy_reset(struct ata_port *ap);
static void adma_qc_prep(struct ata_queued_cmd *qc);
@ -182,7 +182,7 @@ static struct ata_port_info adma_port_info[] = {
/* board_1841_idx */
{
.sht = &adma_ata_sht,
.host_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST |
.flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST |
ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO |
ATA_FLAG_PIO_POLLING,
.pio_mask = 0x10, /* pio4 */
@ -237,7 +237,7 @@ static void adma_reset_engine(void __iomem *chan)
static void adma_reinit_engine(struct ata_port *ap)
{
struct adma_port_priv *pp = ap->private_data;
void __iomem *mmio_base = ap->host_set->mmio_base;
void __iomem *mmio_base = ap->host->mmio_base;
void __iomem *chan = ADMA_REGS(mmio_base, ap->port_no);
/* mask/clear ATA interrupts */
@ -265,7 +265,7 @@ static void adma_reinit_engine(struct ata_port *ap)
static inline void adma_enter_reg_mode(struct ata_port *ap)
{
void __iomem *chan = ADMA_REGS(ap->host_set->mmio_base, ap->port_no);
void __iomem *chan = ADMA_REGS(ap->host->mmio_base, ap->port_no);
writew(aPIOMD4, chan + ADMA_CONTROL);
readb(chan + ADMA_STATUS); /* flush */
@ -412,7 +412,7 @@ static void adma_qc_prep(struct ata_queued_cmd *qc)
static inline void adma_packet_start(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
void __iomem *chan = ADMA_REGS(ap->host_set->mmio_base, ap->port_no);
void __iomem *chan = ADMA_REGS(ap->host->mmio_base, ap->port_no);
VPRINTK("ENTER, ap %p\n", ap);
@ -442,13 +442,13 @@ static unsigned int adma_qc_issue(struct ata_queued_cmd *qc)
return ata_qc_issue_prot(qc);
}
static inline unsigned int adma_intr_pkt(struct ata_host_set *host_set)
static inline unsigned int adma_intr_pkt(struct ata_host *host)
{
unsigned int handled = 0, port_no;
u8 __iomem *mmio_base = host_set->mmio_base;
u8 __iomem *mmio_base = host->mmio_base;
for (port_no = 0; port_no < host_set->n_ports; ++port_no) {
struct ata_port *ap = host_set->ports[port_no];
for (port_no = 0; port_no < host->n_ports; ++port_no) {
struct ata_port *ap = host->ports[port_no];
struct adma_port_priv *pp;
struct ata_queued_cmd *qc;
void __iomem *chan = ADMA_REGS(mmio_base, port_no);
@ -476,13 +476,13 @@ static inline unsigned int adma_intr_pkt(struct ata_host_set *host_set)
return handled;
}
static inline unsigned int adma_intr_mmio(struct ata_host_set *host_set)
static inline unsigned int adma_intr_mmio(struct ata_host *host)
{
unsigned int handled = 0, port_no;
for (port_no = 0; port_no < host_set->n_ports; ++port_no) {
for (port_no = 0; port_no < host->n_ports; ++port_no) {
struct ata_port *ap;
ap = host_set->ports[port_no];
ap = host->ports[port_no];
if (ap && (!(ap->flags & ATA_FLAG_DISABLED))) {
struct ata_queued_cmd *qc;
struct adma_port_priv *pp = ap->private_data;
@ -497,7 +497,7 @@ static inline unsigned int adma_intr_mmio(struct ata_host_set *host_set)
continue;
DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
ap->id, qc->tf.protocol, status);
/* complete taskfile transaction */
pp->state = adma_state_idle;
qc->err_mask |= ac_err_mask(status);
@ -511,14 +511,14 @@ static inline unsigned int adma_intr_mmio(struct ata_host_set *host_set)
static irqreturn_t adma_intr(int irq, void *dev_instance, struct pt_regs *regs)
{
struct ata_host_set *host_set = dev_instance;
struct ata_host *host = dev_instance;
unsigned int handled = 0;
VPRINTK("ENTER\n");
spin_lock(&host_set->lock);
handled = adma_intr_pkt(host_set) | adma_intr_mmio(host_set);
spin_unlock(&host_set->lock);
spin_lock(&host->lock);
handled = adma_intr_pkt(host) | adma_intr_mmio(host);
spin_unlock(&host->lock);
VPRINTK("EXIT\n");
@ -544,7 +544,7 @@ static void adma_ata_setup_port(struct ata_ioports *port, unsigned long base)
static int adma_port_start(struct ata_port *ap)
{
struct device *dev = ap->host_set->dev;
struct device *dev = ap->host->dev;
struct adma_port_priv *pp;
int rc;
@ -582,10 +582,10 @@ err_out:
static void adma_port_stop(struct ata_port *ap)
{
struct device *dev = ap->host_set->dev;
struct device *dev = ap->host->dev;
struct adma_port_priv *pp = ap->private_data;
adma_reset_engine(ADMA_REGS(ap->host_set->mmio_base, ap->port_no));
adma_reset_engine(ADMA_REGS(ap->host->mmio_base, ap->port_no));
if (pp != NULL) {
ap->private_data = NULL;
if (pp->pkt != NULL)
@ -596,14 +596,14 @@ static void adma_port_stop(struct ata_port *ap)
ata_port_stop(ap);
}
static void adma_host_stop(struct ata_host_set *host_set)
static void adma_host_stop(struct ata_host *host)
{
unsigned int port_no;
for (port_no = 0; port_no < ADMA_PORTS; ++port_no)
adma_reset_engine(ADMA_REGS(host_set->mmio_base, port_no));
adma_reset_engine(ADMA_REGS(host->mmio_base, port_no));
ata_pci_host_stop(host_set);
ata_pci_host_stop(host);
}
static void adma_host_init(unsigned int chip_id,
@ -684,7 +684,7 @@ static int adma_ata_init_one(struct pci_dev *pdev,
INIT_LIST_HEAD(&probe_ent->node);
probe_ent->sht = adma_port_info[board_idx].sht;
probe_ent->host_flags = adma_port_info[board_idx].host_flags;
probe_ent->port_flags = adma_port_info[board_idx].flags;
probe_ent->pio_mask = adma_port_info[board_idx].pio_mask;
probe_ent->mwdma_mask = adma_port_info[board_idx].mwdma_mask;
probe_ent->udma_mask = adma_port_info[board_idx].udma_mask;
@ -722,7 +722,7 @@ err_out:
static int __init adma_ata_init(void)
{
return pci_module_init(&adma_ata_pci_driver);
return pci_register_driver(&adma_ata_pci_driver);
}
static void __exit adma_ata_exit(void)

View File

@ -342,7 +342,7 @@ static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
static void mv_phy_reset(struct ata_port *ap);
static void __mv_phy_reset(struct ata_port *ap, int can_sleep);
static void mv_host_stop(struct ata_host_set *host_set);
static void mv_host_stop(struct ata_host *host);
static int mv_port_start(struct ata_port *ap);
static void mv_port_stop(struct ata_port *ap);
static void mv_qc_prep(struct ata_queued_cmd *qc);
@ -480,35 +480,35 @@ static const struct ata_port_operations mv_iie_ops = {
static const struct ata_port_info mv_port_info[] = {
{ /* chip_504x */
.sht = &mv_sht,
.host_flags = MV_COMMON_FLAGS,
.flags = MV_COMMON_FLAGS,
.pio_mask = 0x1f, /* pio0-4 */
.udma_mask = 0x7f, /* udma0-6 */
.port_ops = &mv5_ops,
},
{ /* chip_508x */
.sht = &mv_sht,
.host_flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC),
.flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC),
.pio_mask = 0x1f, /* pio0-4 */
.udma_mask = 0x7f, /* udma0-6 */
.port_ops = &mv5_ops,
},
{ /* chip_5080 */
.sht = &mv_sht,
.host_flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC),
.flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC),
.pio_mask = 0x1f, /* pio0-4 */
.udma_mask = 0x7f, /* udma0-6 */
.port_ops = &mv5_ops,
},
{ /* chip_604x */
.sht = &mv_sht,
.host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
.flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
.pio_mask = 0x1f, /* pio0-4 */
.udma_mask = 0x7f, /* udma0-6 */
.port_ops = &mv6_ops,
},
{ /* chip_608x */
.sht = &mv_sht,
.host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS |
.flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS |
MV_FLAG_DUAL_HC),
.pio_mask = 0x1f, /* pio0-4 */
.udma_mask = 0x7f, /* udma0-6 */
@ -516,14 +516,14 @@ static const struct ata_port_info mv_port_info[] = {
},
{ /* chip_6042 */
.sht = &mv_sht,
.host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
.flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
.pio_mask = 0x1f, /* pio0-4 */
.udma_mask = 0x7f, /* udma0-6 */
.port_ops = &mv_iie_ops,
},
{ /* chip_7042 */
.sht = &mv_sht,
.host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS |
.flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS |
MV_FLAG_DUAL_HC),
.pio_mask = 0x1f, /* pio0-4 */
.udma_mask = 0x7f, /* udma0-6 */
@ -618,12 +618,12 @@ static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
static inline void __iomem *mv_ap_base(struct ata_port *ap)
{
return mv_port_base(ap->host_set->mmio_base, ap->port_no);
return mv_port_base(ap->host->mmio_base, ap->port_no);
}
static inline int mv_get_hc_count(unsigned long host_flags)
static inline int mv_get_hc_count(unsigned long port_flags)
{
return ((host_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
}
static void mv_irq_clear(struct ata_port *ap)
@ -809,7 +809,7 @@ static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
/**
* mv_host_stop - Host specific cleanup/stop routine.
* @host_set: host data structure
* @host: host data structure
*
* Disable ints, cleanup host memory, call general purpose
* host_stop.
@ -817,10 +817,10 @@ static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
* LOCKING:
* Inherited from caller.
*/
static void mv_host_stop(struct ata_host_set *host_set)
static void mv_host_stop(struct ata_host *host)
{
struct mv_host_priv *hpriv = host_set->private_data;
struct pci_dev *pdev = to_pci_dev(host_set->dev);
struct mv_host_priv *hpriv = host->private_data;
struct pci_dev *pdev = to_pci_dev(host->dev);
if (hpriv->hp_flags & MV_HP_FLAG_MSI) {
pci_disable_msi(pdev);
@ -828,7 +828,7 @@ static void mv_host_stop(struct ata_host_set *host_set)
pci_intx(pdev, 0);
}
kfree(hpriv);
ata_host_stop(host_set);
ata_host_stop(host);
}
static inline void mv_priv_free(struct mv_port_priv *pp, struct device *dev)
@ -875,8 +875,8 @@ static void mv_edma_cfg(struct mv_host_priv *hpriv, void __iomem *port_mmio)
*/
static int mv_port_start(struct ata_port *ap)
{
struct device *dev = ap->host_set->dev;
struct mv_host_priv *hpriv = ap->host_set->private_data;
struct device *dev = ap->host->dev;
struct mv_host_priv *hpriv = ap->host->private_data;
struct mv_port_priv *pp;
void __iomem *port_mmio = mv_ap_base(ap);
void *mem;
@ -965,17 +965,17 @@ err_out:
* Stop DMA, cleanup port memory.
*
* LOCKING:
* This routine uses the host_set lock to protect the DMA stop.
* This routine uses the host lock to protect the DMA stop.
*/
static void mv_port_stop(struct ata_port *ap)
{
struct device *dev = ap->host_set->dev;
struct device *dev = ap->host->dev;
struct mv_port_priv *pp = ap->private_data;
unsigned long flags;
spin_lock_irqsave(&ap->host_set->lock, flags);
spin_lock_irqsave(&ap->host->lock, flags);
mv_stop_dma(ap);
spin_unlock_irqrestore(&ap->host_set->lock, flags);
spin_unlock_irqrestore(&ap->host->lock, flags);
ap->private_data = NULL;
ata_pad_free(ap, dev);
@ -1330,7 +1330,7 @@ static void mv_err_intr(struct ata_port *ap, int reset_allowed)
/**
* mv_host_intr - Handle all interrupts on the given host controller
* @host_set: host specific structure
* @host: host specific structure
* @relevant: port error bits relevant to this host controller
* @hc: which host controller we're to look at
*
@ -1344,10 +1344,9 @@ static void mv_err_intr(struct ata_port *ap, int reset_allowed)
* LOCKING:
* Inherited from caller.
*/
static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
unsigned int hc)
static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
{
void __iomem *mmio = host_set->mmio_base;
void __iomem *mmio = host->mmio_base;
void __iomem *hc_mmio = mv_hc_base(mmio, hc);
struct ata_queued_cmd *qc;
u32 hc_irq_cause;
@ -1371,7 +1370,7 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
u8 ata_status = 0;
struct ata_port *ap = host_set->ports[port];
struct ata_port *ap = host->ports[port];
struct mv_port_priv *pp = ap->private_data;
hard_port = mv_hardport_from_port(port); /* range 0..3 */
@ -1444,15 +1443,15 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
* reported here.
*
* LOCKING:
* This routine holds the host_set lock while processing pending
* This routine holds the host lock while processing pending
* interrupts.
*/
static irqreturn_t mv_interrupt(int irq, void *dev_instance,
struct pt_regs *regs)
{
struct ata_host_set *host_set = dev_instance;
struct ata_host *host = dev_instance;
unsigned int hc, handled = 0, n_hcs;
void __iomem *mmio = host_set->mmio_base;
void __iomem *mmio = host->mmio_base;
struct mv_host_priv *hpriv;
u32 irq_stat;
@ -1465,18 +1464,18 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance,
return IRQ_NONE;
}
n_hcs = mv_get_hc_count(host_set->ports[0]->flags);
spin_lock(&host_set->lock);
n_hcs = mv_get_hc_count(host->ports[0]->flags);
spin_lock(&host->lock);
for (hc = 0; hc < n_hcs; hc++) {
u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
if (relevant) {
mv_host_intr(host_set, relevant, hc);
mv_host_intr(host, relevant, hc);
handled++;
}
}
hpriv = host_set->private_data;
hpriv = host->private_data;
if (IS_60XX(hpriv)) {
/* deal with the interrupt coalescing bits */
if (irq_stat & (TRAN_LO_DONE | TRAN_HI_DONE | PORTS_0_7_COAL_DONE)) {
@ -1491,12 +1490,12 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance,
readl(mmio + PCI_IRQ_CAUSE_OFS));
DPRINTK("All regs @ PCI error\n");
mv_dump_all_regs(mmio, -1, to_pci_dev(host_set->dev));
mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
handled++;
}
spin_unlock(&host_set->lock);
spin_unlock(&host->lock);
return IRQ_RETVAL(handled);
}
@ -1528,7 +1527,7 @@ static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
{
void __iomem *mmio = mv5_phy_base(ap->host_set->mmio_base, ap->port_no);
void __iomem *mmio = mv5_phy_base(ap->host->mmio_base, ap->port_no);
unsigned int ofs = mv5_scr_offset(sc_reg_in);
if (ofs != 0xffffffffU)
@ -1539,7 +1538,7 @@ static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
{
void __iomem *mmio = mv5_phy_base(ap->host_set->mmio_base, ap->port_no);
void __iomem *mmio = mv5_phy_base(ap->host->mmio_base, ap->port_no);
unsigned int ofs = mv5_scr_offset(sc_reg_in);
if (ofs != 0xffffffffU)
@ -1904,8 +1903,8 @@ static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
static void mv_stop_and_reset(struct ata_port *ap)
{
struct mv_host_priv *hpriv = ap->host_set->private_data;
void __iomem *mmio = ap->host_set->mmio_base;
struct mv_host_priv *hpriv = ap->host->private_data;
void __iomem *mmio = ap->host->mmio_base;
mv_stop_dma(ap);
@ -1936,7 +1935,7 @@ static inline void __msleep(unsigned int msec, int can_sleep)
static void __mv_phy_reset(struct ata_port *ap, int can_sleep)
{
struct mv_port_priv *pp = ap->private_data;
struct mv_host_priv *hpriv = ap->host_set->private_data;
struct mv_host_priv *hpriv = ap->host->private_data;
void __iomem *port_mmio = mv_ap_base(ap);
struct ata_taskfile tf;
struct ata_device *dev = &ap->device[0];
@ -2034,7 +2033,7 @@ static void mv_phy_reset(struct ata_port *ap)
* chip/bus, fail the command, and move on.
*
* LOCKING:
* This routine holds the host_set lock while failing the command.
* This routine holds the host lock while failing the command.
*/
static void mv_eng_timeout(struct ata_port *ap)
{
@ -2043,18 +2042,17 @@ static void mv_eng_timeout(struct ata_port *ap)
ata_port_printk(ap, KERN_ERR, "Entering mv_eng_timeout\n");
DPRINTK("All regs @ start of eng_timeout\n");
mv_dump_all_regs(ap->host_set->mmio_base, ap->port_no,
to_pci_dev(ap->host_set->dev));
mv_dump_all_regs(ap->host->mmio_base, ap->port_no,
to_pci_dev(ap->host->dev));
qc = ata_qc_from_tag(ap, ap->active_tag);
printk(KERN_ERR "mmio_base %p ap %p qc %p scsi_cmnd %p &cmnd %p\n",
ap->host_set->mmio_base, ap, qc, qc->scsicmd,
&qc->scsicmd->cmnd);
ap->host->mmio_base, ap, qc, qc->scsicmd, &qc->scsicmd->cmnd);
spin_lock_irqsave(&ap->host_set->lock, flags);
spin_lock_irqsave(&ap->host->lock, flags);
mv_err_intr(ap, 0);
mv_stop_and_reset(ap);
spin_unlock_irqrestore(&ap->host_set->lock, flags);
spin_unlock_irqrestore(&ap->host->lock, flags);
WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
if (qc->flags & ATA_QCFLAG_ACTIVE) {
@ -2235,7 +2233,7 @@ static int mv_init_host(struct pci_dev *pdev, struct ata_probe_ent *probe_ent,
if (rc)
goto done;
n_hc = mv_get_hc_count(probe_ent->host_flags);
n_hc = mv_get_hc_count(probe_ent->port_flags);
probe_ent->n_ports = MV_PORTS_PER_HC * n_hc;
for (port = 0; port < probe_ent->n_ports; port++)
@ -2388,7 +2386,7 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
memset(hpriv, 0, sizeof(*hpriv));
probe_ent->sht = mv_port_info[board_idx].sht;
probe_ent->host_flags = mv_port_info[board_idx].host_flags;
probe_ent->port_flags = mv_port_info[board_idx].flags;
probe_ent->pio_mask = mv_port_info[board_idx].pio_mask;
probe_ent->udma_mask = mv_port_info[board_idx].udma_mask;
probe_ent->port_ops = mv_port_info[board_idx].port_ops;
@ -2446,7 +2444,7 @@ err_out:
static int __init mv_init(void)
{
return pci_module_init(&mv_pci_driver);
return pci_register_driver(&mv_pci_driver);
}
static void __exit mv_exit(void)

View File

@ -81,7 +81,7 @@ enum {
};
static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
static void nv_ck804_host_stop(struct ata_host_set *host_set);
static void nv_ck804_host_stop(struct ata_host *host);
static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance,
struct pt_regs *regs);
static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance,
@ -257,7 +257,7 @@ static struct ata_port_info nv_port_info[] = {
/* generic */
{
.sht = &nv_sht,
.host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
.flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
.pio_mask = NV_PIO_MASK,
.mwdma_mask = NV_MWDMA_MASK,
.udma_mask = NV_UDMA_MASK,
@ -266,7 +266,7 @@ static struct ata_port_info nv_port_info[] = {
/* nforce2/3 */
{
.sht = &nv_sht,
.host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
.flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
.pio_mask = NV_PIO_MASK,
.mwdma_mask = NV_MWDMA_MASK,
.udma_mask = NV_UDMA_MASK,
@ -275,7 +275,7 @@ static struct ata_port_info nv_port_info[] = {
/* ck804 */
{
.sht = &nv_sht,
.host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
.flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
.pio_mask = NV_PIO_MASK,
.mwdma_mask = NV_MWDMA_MASK,
.udma_mask = NV_UDMA_MASK,
@ -292,17 +292,17 @@ MODULE_VERSION(DRV_VERSION);
static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance,
struct pt_regs *regs)
{
struct ata_host_set *host_set = dev_instance;
struct ata_host *host = dev_instance;
unsigned int i;
unsigned int handled = 0;
unsigned long flags;
spin_lock_irqsave(&host_set->lock, flags);
spin_lock_irqsave(&host->lock, flags);
for (i = 0; i < host_set->n_ports; i++) {
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap;
ap = host_set->ports[i];
ap = host->ports[i];
if (ap &&
!(ap->flags & ATA_FLAG_DISABLED)) {
struct ata_queued_cmd *qc;
@ -318,7 +318,7 @@ static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance,
}
spin_unlock_irqrestore(&host_set->lock, flags);
spin_unlock_irqrestore(&host->lock, flags);
return IRQ_RETVAL(handled);
}
@ -354,12 +354,12 @@ static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
return 1;
}
static irqreturn_t nv_do_interrupt(struct ata_host_set *host_set, u8 irq_stat)
static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
{
int i, handled = 0;
for (i = 0; i < host_set->n_ports; i++) {
struct ata_port *ap = host_set->ports[i];
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
if (ap && !(ap->flags & ATA_FLAG_DISABLED))
handled += nv_host_intr(ap, irq_stat);
@ -373,14 +373,14 @@ static irqreturn_t nv_do_interrupt(struct ata_host_set *host_set, u8 irq_stat)
static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance,
struct pt_regs *regs)
{
struct ata_host_set *host_set = dev_instance;
struct ata_host *host = dev_instance;
u8 irq_stat;
irqreturn_t ret;
spin_lock(&host_set->lock);
irq_stat = inb(host_set->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
ret = nv_do_interrupt(host_set, irq_stat);
spin_unlock(&host_set->lock);
spin_lock(&host->lock);
irq_stat = inb(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
ret = nv_do_interrupt(host, irq_stat);
spin_unlock(&host->lock);
return ret;
}
@ -388,14 +388,14 @@ static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance,
static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance,
struct pt_regs *regs)
{
struct ata_host_set *host_set = dev_instance;
struct ata_host *host = dev_instance;
u8 irq_stat;
irqreturn_t ret;
spin_lock(&host_set->lock);
irq_stat = readb(host_set->mmio_base + NV_INT_STATUS_CK804);
ret = nv_do_interrupt(host_set, irq_stat);
spin_unlock(&host_set->lock);
spin_lock(&host->lock);
irq_stat = readb(host->mmio_base + NV_INT_STATUS_CK804);
ret = nv_do_interrupt(host, irq_stat);
spin_unlock(&host->lock);
return ret;
}
@ -418,7 +418,7 @@ static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
static void nv_nf2_freeze(struct ata_port *ap)
{
unsigned long scr_addr = ap->host_set->ports[0]->ioaddr.scr_addr;
unsigned long scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
int shift = ap->port_no * NV_INT_PORT_SHIFT;
u8 mask;
@ -429,7 +429,7 @@ static void nv_nf2_freeze(struct ata_port *ap)
static void nv_nf2_thaw(struct ata_port *ap)
{
unsigned long scr_addr = ap->host_set->ports[0]->ioaddr.scr_addr;
unsigned long scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
int shift = ap->port_no * NV_INT_PORT_SHIFT;
u8 mask;
@ -442,7 +442,7 @@ static void nv_nf2_thaw(struct ata_port *ap)
static void nv_ck804_freeze(struct ata_port *ap)
{
void __iomem *mmio_base = ap->host_set->mmio_base;
void __iomem *mmio_base = ap->host->mmio_base;
int shift = ap->port_no * NV_INT_PORT_SHIFT;
u8 mask;
@ -453,7 +453,7 @@ static void nv_ck804_freeze(struct ata_port *ap)
static void nv_ck804_thaw(struct ata_port *ap)
{
void __iomem *mmio_base = ap->host_set->mmio_base;
void __iomem *mmio_base = ap->host->mmio_base;
int shift = ap->port_no * NV_INT_PORT_SHIFT;
u8 mask;
@ -568,9 +568,9 @@ err_out:
return rc;
}
static void nv_ck804_host_stop(struct ata_host_set *host_set)
static void nv_ck804_host_stop(struct ata_host *host)
{
struct pci_dev *pdev = to_pci_dev(host_set->dev);
struct pci_dev *pdev = to_pci_dev(host->dev);
u8 regval;
/* disable SATA space for CK804 */
@ -578,12 +578,12 @@ static void nv_ck804_host_stop(struct ata_host_set *host_set)
regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
ata_pci_host_stop(host_set);
ata_pci_host_stop(host);
}
static int __init nv_init(void)
{
return pci_module_init(&nv_pci_driver);
return pci_register_driver(&nv_pci_driver);
}
static void __exit nv_exit(void)

View File

@ -104,7 +104,7 @@ static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
static void pdc_irq_clear(struct ata_port *ap);
static unsigned int pdc_qc_issue_prot(struct ata_queued_cmd *qc);
static void pdc_host_stop(struct ata_host_set *host_set);
static void pdc_host_stop(struct ata_host *host);
static struct scsi_host_template pdc_ata_sht = {
@ -175,7 +175,7 @@ static const struct ata_port_info pdc_port_info[] = {
/* board_2037x */
{
.sht = &pdc_ata_sht,
.host_flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA,
.flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA,
.pio_mask = 0x1f, /* pio0-4 */
.mwdma_mask = 0x07, /* mwdma0-2 */
.udma_mask = 0x7f, /* udma0-6 ; FIXME */
@ -185,7 +185,7 @@ static const struct ata_port_info pdc_port_info[] = {
/* board_20319 */
{
.sht = &pdc_ata_sht,
.host_flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA,
.flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA,
.pio_mask = 0x1f, /* pio0-4 */
.mwdma_mask = 0x07, /* mwdma0-2 */
.udma_mask = 0x7f, /* udma0-6 ; FIXME */
@ -195,7 +195,7 @@ static const struct ata_port_info pdc_port_info[] = {
/* board_20619 */
{
.sht = &pdc_ata_sht,
.host_flags = PDC_COMMON_FLAGS | ATA_FLAG_SLAVE_POSS,
.flags = PDC_COMMON_FLAGS | ATA_FLAG_SLAVE_POSS,
.pio_mask = 0x1f, /* pio0-4 */
.mwdma_mask = 0x07, /* mwdma0-2 */
.udma_mask = 0x7f, /* udma0-6 ; FIXME */
@ -205,7 +205,7 @@ static const struct ata_port_info pdc_port_info[] = {
/* board_20771 */
{
.sht = &pdc_ata_sht,
.host_flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA,
.flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA,
.pio_mask = 0x1f, /* pio0-4 */
.mwdma_mask = 0x07, /* mwdma0-2 */
.udma_mask = 0x7f, /* udma0-6 ; FIXME */
@ -215,7 +215,7 @@ static const struct ata_port_info pdc_port_info[] = {
/* board_2057x */
{
.sht = &pdc_ata_sht,
.host_flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA,
.flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA,
.pio_mask = 0x1f, /* pio0-4 */
.mwdma_mask = 0x07, /* mwdma0-2 */
.udma_mask = 0x7f, /* udma0-6 ; FIXME */
@ -225,7 +225,7 @@ static const struct ata_port_info pdc_port_info[] = {
/* board_40518 */
{
.sht = &pdc_ata_sht,
.host_flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA,
.flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA,
.pio_mask = 0x1f, /* pio0-4 */
.mwdma_mask = 0x07, /* mwdma0-2 */
.udma_mask = 0x7f, /* udma0-6 ; FIXME */
@ -292,7 +292,7 @@ static struct pci_driver pdc_ata_pci_driver = {
static int pdc_port_start(struct ata_port *ap)
{
struct device *dev = ap->host_set->dev;
struct device *dev = ap->host->dev;
struct pdc_port_priv *pp;
int rc;
@ -326,7 +326,7 @@ err_out:
static void pdc_port_stop(struct ata_port *ap)
{
struct device *dev = ap->host_set->dev;
struct device *dev = ap->host->dev;
struct pdc_port_priv *pp = ap->private_data;
ap->private_data = NULL;
@ -336,11 +336,11 @@ static void pdc_port_stop(struct ata_port *ap)
}
static void pdc_host_stop(struct ata_host_set *host_set)
static void pdc_host_stop(struct ata_host *host)
{
struct pdc_host_priv *hp = host_set->private_data;
struct pdc_host_priv *hp = host->private_data;
ata_pci_host_stop(host_set);
ata_pci_host_stop(host);
kfree(hp);
}
@ -443,14 +443,14 @@ static void pdc_qc_prep(struct ata_queued_cmd *qc)
static void pdc_eng_timeout(struct ata_port *ap)
{
struct ata_host_set *host_set = ap->host_set;
struct ata_host *host = ap->host;
u8 drv_stat;
struct ata_queued_cmd *qc;
unsigned long flags;
DPRINTK("ENTER\n");
spin_lock_irqsave(&host_set->lock, flags);
spin_lock_irqsave(&host->lock, flags);
qc = ata_qc_from_tag(ap, ap->active_tag);
@ -473,7 +473,7 @@ static void pdc_eng_timeout(struct ata_port *ap)
break;
}
spin_unlock_irqrestore(&host_set->lock, flags);
spin_unlock_irqrestore(&host->lock, flags);
ata_eh_qc_complete(qc);
DPRINTK("EXIT\n");
}
@ -509,15 +509,15 @@ static inline unsigned int pdc_host_intr( struct ata_port *ap,
static void pdc_irq_clear(struct ata_port *ap)
{
struct ata_host_set *host_set = ap->host_set;
void __iomem *mmio = host_set->mmio_base;
struct ata_host *host = ap->host;
void __iomem *mmio = host->mmio_base;
readl(mmio + PDC_INT_SEQMASK);
}
static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
{
struct ata_host_set *host_set = dev_instance;
struct ata_host *host = dev_instance;
struct ata_port *ap;
u32 mask = 0;
unsigned int i, tmp;
@ -526,12 +526,12 @@ static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *r
VPRINTK("ENTER\n");
if (!host_set || !host_set->mmio_base) {
if (!host || !host->mmio_base) {
VPRINTK("QUICK EXIT\n");
return IRQ_NONE;
}
mmio_base = host_set->mmio_base;
mmio_base = host->mmio_base;
/* reading should also clear interrupts */
mask = readl(mmio_base + PDC_INT_SEQMASK);
@ -541,7 +541,7 @@ static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *r
return IRQ_NONE;
}
spin_lock(&host_set->lock);
spin_lock(&host->lock);
mask &= 0xffff; /* only 16 tags possible */
if (!mask) {
@ -551,9 +551,9 @@ static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *r
writel(mask, mmio_base + PDC_INT_SEQMASK);
for (i = 0; i < host_set->n_ports; i++) {
for (i = 0; i < host->n_ports; i++) {
VPRINTK("port %u\n", i);
ap = host_set->ports[i];
ap = host->ports[i];
tmp = mask & (1 << (i + 1));
if (tmp && ap &&
!(ap->flags & ATA_FLAG_DISABLED)) {
@ -568,7 +568,7 @@ static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *r
VPRINTK("EXIT\n");
done_irq:
spin_unlock(&host_set->lock);
spin_unlock(&host->lock);
return IRQ_RETVAL(handled);
}
@ -581,8 +581,8 @@ static inline void pdc_packet_start(struct ata_queued_cmd *qc)
VPRINTK("ENTER, ap %p\n", ap);
writel(0x00000001, ap->host_set->mmio_base + (seq * 4));
readl(ap->host_set->mmio_base + (seq * 4)); /* flush */
writel(0x00000001, ap->host->mmio_base + (seq * 4));
readl(ap->host->mmio_base + (seq * 4)); /* flush */
pp->pkt[2] = seq;
wmb(); /* flush PRD, pkt writes */
@ -743,7 +743,7 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
probe_ent->private_data = hp;
probe_ent->sht = pdc_port_info[board_idx].sht;
probe_ent->host_flags = pdc_port_info[board_idx].host_flags;
probe_ent->port_flags = pdc_port_info[board_idx].flags;
probe_ent->pio_mask = pdc_port_info[board_idx].pio_mask;
probe_ent->mwdma_mask = pdc_port_info[board_idx].mwdma_mask;
probe_ent->udma_mask = pdc_port_info[board_idx].udma_mask;
@ -824,7 +824,7 @@ err_out:
static int __init pdc_ata_init(void)
{
return pci_module_init(&pdc_ata_pci_driver);
return pci_register_driver(&pdc_ata_pci_driver);
}

View File

@ -116,7 +116,7 @@ static void qs_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
static int qs_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
static irqreturn_t qs_intr (int irq, void *dev_instance, struct pt_regs *regs);
static int qs_port_start(struct ata_port *ap);
static void qs_host_stop(struct ata_host_set *host_set);
static void qs_host_stop(struct ata_host *host);
static void qs_port_stop(struct ata_port *ap);
static void qs_phy_reset(struct ata_port *ap);
static void qs_qc_prep(struct ata_queued_cmd *qc);
@ -174,7 +174,7 @@ static const struct ata_port_info qs_port_info[] = {
/* board_2068_idx */
{
.sht = &qs_ata_sht,
.host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
.flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
ATA_FLAG_SATA_RESET |
//FIXME ATA_FLAG_SRST |
ATA_FLAG_MMIO | ATA_FLAG_PIO_POLLING,
@ -220,7 +220,7 @@ static void qs_irq_clear(struct ata_port *ap)
static inline void qs_enter_reg_mode(struct ata_port *ap)
{
u8 __iomem *chan = ap->host_set->mmio_base + (ap->port_no * 0x4000);
u8 __iomem *chan = ap->host->mmio_base + (ap->port_no * 0x4000);
writeb(QS_CTR0_REG, chan + QS_CCT_CTR0);
readb(chan + QS_CCT_CTR0); /* flush */
@ -228,7 +228,7 @@ static inline void qs_enter_reg_mode(struct ata_port *ap)
static inline void qs_reset_channel_logic(struct ata_port *ap)
{
u8 __iomem *chan = ap->host_set->mmio_base + (ap->port_no * 0x4000);
u8 __iomem *chan = ap->host->mmio_base + (ap->port_no * 0x4000);
writeb(QS_CTR1_RCHN, chan + QS_CCT_CTR1);
readb(chan + QS_CCT_CTR0); /* flush */
@ -342,7 +342,7 @@ static void qs_qc_prep(struct ata_queued_cmd *qc)
static inline void qs_packet_start(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
u8 __iomem *chan = ap->host_set->mmio_base + (ap->port_no * 0x4000);
u8 __iomem *chan = ap->host->mmio_base + (ap->port_no * 0x4000);
VPRINTK("ENTER, ap %p\n", ap);
@ -375,11 +375,11 @@ static unsigned int qs_qc_issue(struct ata_queued_cmd *qc)
return ata_qc_issue_prot(qc);
}
static inline unsigned int qs_intr_pkt(struct ata_host_set *host_set)
static inline unsigned int qs_intr_pkt(struct ata_host *host)
{
unsigned int handled = 0;
u8 sFFE;
u8 __iomem *mmio_base = host_set->mmio_base;
u8 __iomem *mmio_base = host->mmio_base;
do {
u32 sff0 = readl(mmio_base + QS_HST_SFF);
@ -391,7 +391,7 @@ static inline unsigned int qs_intr_pkt(struct ata_host_set *host_set)
u8 sDST = sff0 >> 16; /* dev status */
u8 sHST = sff1 & 0x3f; /* host status */
unsigned int port_no = (sff1 >> 8) & 0x03;
struct ata_port *ap = host_set->ports[port_no];
struct ata_port *ap = host->ports[port_no];
DPRINTK("SFF=%08x%08x: sCHAN=%u sHST=%d sDST=%02x\n",
sff1, sff0, port_no, sHST, sDST);
@ -421,13 +421,13 @@ static inline unsigned int qs_intr_pkt(struct ata_host_set *host_set)
return handled;
}
static inline unsigned int qs_intr_mmio(struct ata_host_set *host_set)
static inline unsigned int qs_intr_mmio(struct ata_host *host)
{
unsigned int handled = 0, port_no;
for (port_no = 0; port_no < host_set->n_ports; ++port_no) {
for (port_no = 0; port_no < host->n_ports; ++port_no) {
struct ata_port *ap;
ap = host_set->ports[port_no];
ap = host->ports[port_no];
if (ap &&
!(ap->flags & ATA_FLAG_DISABLED)) {
struct ata_queued_cmd *qc;
@ -457,14 +457,14 @@ static inline unsigned int qs_intr_mmio(struct ata_host_set *host_set)
static irqreturn_t qs_intr(int irq, void *dev_instance, struct pt_regs *regs)
{
struct ata_host_set *host_set = dev_instance;
struct ata_host *host = dev_instance;
unsigned int handled = 0;
VPRINTK("ENTER\n");
spin_lock(&host_set->lock);
handled = qs_intr_pkt(host_set) | qs_intr_mmio(host_set);
spin_unlock(&host_set->lock);
spin_lock(&host->lock);
handled = qs_intr_pkt(host) | qs_intr_mmio(host);
spin_unlock(&host->lock);
VPRINTK("EXIT\n");
@ -491,9 +491,9 @@ static void qs_ata_setup_port(struct ata_ioports *port, unsigned long base)
static int qs_port_start(struct ata_port *ap)
{
struct device *dev = ap->host_set->dev;
struct device *dev = ap->host->dev;
struct qs_port_priv *pp;
void __iomem *mmio_base = ap->host_set->mmio_base;
void __iomem *mmio_base = ap->host->mmio_base;
void __iomem *chan = mmio_base + (ap->port_no * 0x4000);
u64 addr;
int rc;
@ -530,7 +530,7 @@ err_out:
static void qs_port_stop(struct ata_port *ap)
{
struct device *dev = ap->host_set->dev;
struct device *dev = ap->host->dev;
struct qs_port_priv *pp = ap->private_data;
if (pp != NULL) {
@ -543,10 +543,10 @@ static void qs_port_stop(struct ata_port *ap)
ata_port_stop(ap);
}
static void qs_host_stop(struct ata_host_set *host_set)
static void qs_host_stop(struct ata_host *host)
{
void __iomem *mmio_base = host_set->mmio_base;
struct pci_dev *pdev = to_pci_dev(host_set->dev);
void __iomem *mmio_base = host->mmio_base;
struct pci_dev *pdev = to_pci_dev(host->dev);
writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */
writeb(QS_CNFG3_GSRST, mmio_base + QS_HCF_CNFG3); /* global reset */
@ -673,7 +673,7 @@ static int qs_ata_init_one(struct pci_dev *pdev,
INIT_LIST_HEAD(&probe_ent->node);
probe_ent->sht = qs_port_info[board_idx].sht;
probe_ent->host_flags = qs_port_info[board_idx].host_flags;
probe_ent->port_flags = qs_port_info[board_idx].flags;
probe_ent->pio_mask = qs_port_info[board_idx].pio_mask;
probe_ent->mwdma_mask = qs_port_info[board_idx].mwdma_mask;
probe_ent->udma_mask = qs_port_info[board_idx].udma_mask;
@ -712,7 +712,7 @@ err_out:
static int __init qs_ata_init(void)
{
return pci_module_init(&qs_ata_pci_driver);
return pci_register_driver(&qs_ata_pci_driver);
}
static void __exit qs_ata_exit(void)

View File

@ -56,7 +56,7 @@ enum {
SIL_FLAG_RERR_ON_DMA_ACT = (1 << 29),
SIL_FLAG_MOD15WRITE = (1 << 30),
SIL_DFL_HOST_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
SIL_DFL_PORT_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
ATA_FLAG_MMIO | ATA_FLAG_HRST_TO_RESUME,
/*
@ -109,7 +109,9 @@ enum {
};
static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
#ifdef CONFIG_PM
static int sil_pci_device_resume(struct pci_dev *pdev);
#endif
static void sil_dev_config(struct ata_port *ap, struct ata_device *dev);
static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg);
static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
@ -141,12 +143,8 @@ static const struct sil_drivelist {
{ "ST330013AS", SIL_QUIRK_MOD15WRITE },
{ "ST340017AS", SIL_QUIRK_MOD15WRITE },
{ "ST360015AS", SIL_QUIRK_MOD15WRITE },
{ "ST380013AS", SIL_QUIRK_MOD15WRITE },
{ "ST380023AS", SIL_QUIRK_MOD15WRITE },
{ "ST3120023AS", SIL_QUIRK_MOD15WRITE },
{ "ST3160023AS", SIL_QUIRK_MOD15WRITE },
{ "ST3120026AS", SIL_QUIRK_MOD15WRITE },
{ "ST3200822AS", SIL_QUIRK_MOD15WRITE },
{ "ST340014ASL", SIL_QUIRK_MOD15WRITE },
{ "ST360014ASL", SIL_QUIRK_MOD15WRITE },
{ "ST380011ASL", SIL_QUIRK_MOD15WRITE },
@ -161,8 +159,10 @@ static struct pci_driver sil_pci_driver = {
.id_table = sil_pci_tbl,
.probe = sil_init_one,
.remove = ata_pci_remove_one,
#ifdef CONFIG_PM
.suspend = ata_pci_device_suspend,
.resume = sil_pci_device_resume,
#endif
};
static struct scsi_host_template sil_sht = {
@ -218,7 +218,7 @@ static const struct ata_port_info sil_port_info[] = {
/* sil_3112 */
{
.sht = &sil_sht,
.host_flags = SIL_DFL_HOST_FLAGS | SIL_FLAG_MOD15WRITE,
.flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_MOD15WRITE,
.pio_mask = 0x1f, /* pio0-4 */
.mwdma_mask = 0x07, /* mwdma0-2 */
.udma_mask = 0x3f, /* udma0-5 */
@ -227,7 +227,7 @@ static const struct ata_port_info sil_port_info[] = {
/* sil_3112_no_sata_irq */
{
.sht = &sil_sht,
.host_flags = SIL_DFL_HOST_FLAGS | SIL_FLAG_MOD15WRITE |
.flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_MOD15WRITE |
SIL_FLAG_NO_SATA_IRQ,
.pio_mask = 0x1f, /* pio0-4 */
.mwdma_mask = 0x07, /* mwdma0-2 */
@ -237,7 +237,7 @@ static const struct ata_port_info sil_port_info[] = {
/* sil_3512 */
{
.sht = &sil_sht,
.host_flags = SIL_DFL_HOST_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT,
.flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT,
.pio_mask = 0x1f, /* pio0-4 */
.mwdma_mask = 0x07, /* mwdma0-2 */
.udma_mask = 0x3f, /* udma0-5 */
@ -246,7 +246,7 @@ static const struct ata_port_info sil_port_info[] = {
/* sil_3114 */
{
.sht = &sil_sht,
.host_flags = SIL_DFL_HOST_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT,
.flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT,
.pio_mask = 0x1f, /* pio0-4 */
.mwdma_mask = 0x07, /* mwdma0-2 */
.udma_mask = 0x3f, /* udma0-5 */
@ -295,10 +295,9 @@ static unsigned char sil_get_device_cache_line(struct pci_dev *pdev)
static void sil_post_set_mode (struct ata_port *ap)
{
struct ata_host_set *host_set = ap->host_set;
struct ata_host *host = ap->host;
struct ata_device *dev;
void __iomem *addr =
host_set->mmio_base + sil_port[ap->port_no].xfer_mode;
void __iomem *addr = host->mmio_base + sil_port[ap->port_no].xfer_mode;
u32 tmp, dev_mode[2];
unsigned int i;
@ -440,15 +439,15 @@ static void sil_host_intr(struct ata_port *ap, u32 bmdma2)
static irqreturn_t sil_interrupt(int irq, void *dev_instance,
struct pt_regs *regs)
{
struct ata_host_set *host_set = dev_instance;
void __iomem *mmio_base = host_set->mmio_base;
struct ata_host *host = dev_instance;
void __iomem *mmio_base = host->mmio_base;
int handled = 0;
int i;
spin_lock(&host_set->lock);
spin_lock(&host->lock);
for (i = 0; i < host_set->n_ports; i++) {
struct ata_port *ap = host_set->ports[i];
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
u32 bmdma2 = readl(mmio_base + sil_port[ap->port_no].bmdma2);
if (unlikely(!ap || ap->flags & ATA_FLAG_DISABLED))
@ -466,14 +465,14 @@ static irqreturn_t sil_interrupt(int irq, void *dev_instance,
handled = 1;
}
spin_unlock(&host_set->lock);
spin_unlock(&host->lock);
return IRQ_RETVAL(handled);
}
static void sil_freeze(struct ata_port *ap)
{
void __iomem *mmio_base = ap->host_set->mmio_base;
void __iomem *mmio_base = ap->host->mmio_base;
u32 tmp;
/* global IRQ mask doesn't block SATA IRQ, turn off explicitly */
@ -488,7 +487,7 @@ static void sil_freeze(struct ata_port *ap)
static void sil_thaw(struct ata_port *ap)
{
void __iomem *mmio_base = ap->host_set->mmio_base;
void __iomem *mmio_base = ap->host->mmio_base;
u32 tmp;
/* clear IRQ */
@ -567,7 +566,7 @@ static void sil_dev_config(struct ata_port *ap, struct ata_device *dev)
}
static void sil_init_controller(struct pci_dev *pdev,
int n_ports, unsigned long host_flags,
int n_ports, unsigned long port_flags,
void __iomem *mmio_base)
{
u8 cls;
@ -587,7 +586,7 @@ static void sil_init_controller(struct pci_dev *pdev,
"cache line size not set. Driver may not function\n");
/* Apply R_ERR on DMA activate FIS errata workaround */
if (host_flags & SIL_FLAG_RERR_ON_DMA_ACT) {
if (port_flags & SIL_FLAG_RERR_ON_DMA_ACT) {
int cnt;
for (i = 0, cnt = 0; i < n_ports; i++) {
@ -658,7 +657,7 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
probe_ent->udma_mask = sil_port_info[ent->driver_data].udma_mask;
probe_ent->irq = pdev->irq;
probe_ent->irq_flags = IRQF_SHARED;
probe_ent->host_flags = sil_port_info[ent->driver_data].host_flags;
probe_ent->port_flags = sil_port_info[ent->driver_data].flags;
mmio_base = pci_iomap(pdev, 5, 0);
if (mmio_base == NULL) {
@ -679,7 +678,7 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
ata_std_ports(&probe_ent->port[i]);
}
sil_init_controller(pdev, probe_ent->n_ports, probe_ent->host_flags,
sil_init_controller(pdev, probe_ent->n_ports, probe_ent->port_flags,
mmio_base);
pci_set_master(pdev);
@ -700,21 +699,23 @@ err_out:
return rc;
}
#ifdef CONFIG_PM
static int sil_pci_device_resume(struct pci_dev *pdev)
{
struct ata_host_set *host_set = dev_get_drvdata(&pdev->dev);
struct ata_host *host = dev_get_drvdata(&pdev->dev);
ata_pci_device_do_resume(pdev);
sil_init_controller(pdev, host_set->n_ports, host_set->ports[0]->flags,
host_set->mmio_base);
ata_host_set_resume(host_set);
sil_init_controller(pdev, host->n_ports, host->ports[0]->flags,
host->mmio_base);
ata_host_resume(host);
return 0;
}
#endif
static int __init sil_init(void)
{
return pci_module_init(&sil_pci_driver);
return pci_register_driver(&sil_pci_driver);
}
static void __exit sil_exit(void)

View File

@ -316,7 +316,7 @@ struct sil24_port_priv {
struct ata_taskfile tf; /* Cached taskfile registers */
};
/* ap->host_set->private_data */
/* ap->host->private_data */
struct sil24_host_priv {
void __iomem *host_base; /* global controller control (128 bytes @BAR0) */
void __iomem *port_base; /* port registers (4 * 8192 bytes @BAR2) */
@ -337,9 +337,11 @@ static void sil24_error_handler(struct ata_port *ap);
static void sil24_post_internal_cmd(struct ata_queued_cmd *qc);
static int sil24_port_start(struct ata_port *ap);
static void sil24_port_stop(struct ata_port *ap);
static void sil24_host_stop(struct ata_host_set *host_set);
static void sil24_host_stop(struct ata_host *host);
static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
#ifdef CONFIG_PM
static int sil24_pci_device_resume(struct pci_dev *pdev);
#endif
static const struct pci_device_id sil24_pci_tbl[] = {
{ 0x1095, 0x3124, PCI_ANY_ID, PCI_ANY_ID, 0, 0, BID_SIL3124 },
@ -355,8 +357,10 @@ static struct pci_driver sil24_pci_driver = {
.id_table = sil24_pci_tbl,
.probe = sil24_init_one,
.remove = ata_pci_remove_one, /* safe? */
#ifdef CONFIG_PM
.suspend = ata_pci_device_suspend,
.resume = sil24_pci_device_resume,
#endif
};
static struct scsi_host_template sil24_sht = {
@ -411,7 +415,7 @@ static const struct ata_port_operations sil24_ops = {
};
/*
* Use bits 30-31 of host_flags to encode available port numbers.
* Use bits 30-31 of port_flags to encode available port numbers.
* Current maxium is 4.
*/
#define SIL24_NPORTS2FLAG(nports) ((((unsigned)(nports) - 1) & 0x3) << 30)
@ -421,7 +425,7 @@ static struct ata_port_info sil24_port_info[] = {
/* sil_3124 */
{
.sht = &sil24_sht,
.host_flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(4) |
.flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(4) |
SIL24_FLAG_PCIX_IRQ_WOC,
.pio_mask = 0x1f, /* pio0-4 */
.mwdma_mask = 0x07, /* mwdma0-2 */
@ -431,7 +435,7 @@ static struct ata_port_info sil24_port_info[] = {
/* sil_3132 */
{
.sht = &sil24_sht,
.host_flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(2),
.flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(2),
.pio_mask = 0x1f, /* pio0-4 */
.mwdma_mask = 0x07, /* mwdma0-2 */
.udma_mask = 0x3f, /* udma0-5 */
@ -440,7 +444,7 @@ static struct ata_port_info sil24_port_info[] = {
/* sil_3131/sil_3531 */
{
.sht = &sil24_sht,
.host_flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(1),
.flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(1),
.pio_mask = 0x1f, /* pio0-4 */
.mwdma_mask = 0x07, /* mwdma0-2 */
.udma_mask = 0x3f, /* udma0-5 */
@ -867,8 +871,8 @@ static inline void sil24_host_intr(struct ata_port *ap)
static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
{
struct ata_host_set *host_set = dev_instance;
struct sil24_host_priv *hpriv = host_set->private_data;
struct ata_host *host = dev_instance;
struct sil24_host_priv *hpriv = host->private_data;
unsigned handled = 0;
u32 status;
int i;
@ -884,20 +888,20 @@ static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *
if (!(status & IRQ_STAT_4PORTS))
goto out;
spin_lock(&host_set->lock);
spin_lock(&host->lock);
for (i = 0; i < host_set->n_ports; i++)
for (i = 0; i < host->n_ports; i++)
if (status & (1 << i)) {
struct ata_port *ap = host_set->ports[i];
struct ata_port *ap = host->ports[i];
if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
sil24_host_intr(host_set->ports[i]);
sil24_host_intr(host->ports[i]);
handled++;
} else
printk(KERN_ERR DRV_NAME
": interrupt from disabled port %d\n", i);
}
spin_unlock(&host_set->lock);
spin_unlock(&host->lock);
out:
return IRQ_RETVAL(handled);
}
@ -937,7 +941,7 @@ static inline void sil24_cblk_free(struct sil24_port_priv *pp, struct device *de
static int sil24_port_start(struct ata_port *ap)
{
struct device *dev = ap->host_set->dev;
struct device *dev = ap->host->dev;
struct sil24_port_priv *pp;
union sil24_cmd_block *cb;
size_t cb_size = sizeof(*cb) * SIL24_MAX_CMDS;
@ -976,7 +980,7 @@ err_out:
static void sil24_port_stop(struct ata_port *ap)
{
struct device *dev = ap->host_set->dev;
struct device *dev = ap->host->dev;
struct sil24_port_priv *pp = ap->private_data;
sil24_cblk_free(pp, dev);
@ -984,10 +988,10 @@ static void sil24_port_stop(struct ata_port *ap)
kfree(pp);
}
static void sil24_host_stop(struct ata_host_set *host_set)
static void sil24_host_stop(struct ata_host *host)
{
struct sil24_host_priv *hpriv = host_set->private_data;
struct pci_dev *pdev = to_pci_dev(host_set->dev);
struct sil24_host_priv *hpriv = host->private_data;
struct pci_dev *pdev = to_pci_dev(host->dev);
pci_iounmap(pdev, hpriv->host_base);
pci_iounmap(pdev, hpriv->port_base);
@ -995,7 +999,7 @@ static void sil24_host_stop(struct ata_host_set *host_set)
}
static void sil24_init_controller(struct pci_dev *pdev, int n_ports,
unsigned long host_flags,
unsigned long port_flags,
void __iomem *host_base,
void __iomem *port_base)
{
@ -1028,7 +1032,7 @@ static void sil24_init_controller(struct pci_dev *pdev, int n_ports,
}
/* Configure IRQ WoC */
if (host_flags & SIL24_FLAG_PCIX_IRQ_WOC)
if (port_flags & SIL24_FLAG_PCIX_IRQ_WOC)
writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_STAT);
else
writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_CLR);
@ -1097,12 +1101,12 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_LIST_HEAD(&probe_ent->node);
probe_ent->sht = pinfo->sht;
probe_ent->host_flags = pinfo->host_flags;
probe_ent->port_flags = pinfo->flags;
probe_ent->pio_mask = pinfo->pio_mask;
probe_ent->mwdma_mask = pinfo->mwdma_mask;
probe_ent->udma_mask = pinfo->udma_mask;
probe_ent->port_ops = pinfo->port_ops;
probe_ent->n_ports = SIL24_FLAG2NPORTS(pinfo->host_flags);
probe_ent->n_ports = SIL24_FLAG2NPORTS(pinfo->flags);
probe_ent->irq = pdev->irq;
probe_ent->irq_flags = IRQF_SHARED;
@ -1140,14 +1144,14 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* Apply workaround for completion IRQ loss on PCI-X errata */
if (probe_ent->host_flags & SIL24_FLAG_PCIX_IRQ_WOC) {
if (probe_ent->port_flags & SIL24_FLAG_PCIX_IRQ_WOC) {
tmp = readl(host_base + HOST_CTRL);
if (tmp & (HOST_CTRL_TRDY | HOST_CTRL_STOP | HOST_CTRL_DEVSEL))
dev_printk(KERN_INFO, &pdev->dev,
"Applying completion IRQ loss on PCI-X "
"errata fix\n");
else
probe_ent->host_flags &= ~SIL24_FLAG_PCIX_IRQ_WOC;
probe_ent->port_flags &= ~SIL24_FLAG_PCIX_IRQ_WOC;
}
for (i = 0; i < probe_ent->n_ports; i++) {
@ -1160,7 +1164,7 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
ata_std_ports(&probe_ent->port[i]);
}
sil24_init_controller(pdev, probe_ent->n_ports, probe_ent->host_flags,
sil24_init_controller(pdev, probe_ent->n_ports, probe_ent->port_flags,
host_base, port_base);
pci_set_master(pdev);
@ -1184,28 +1188,29 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return rc;
}
#ifdef CONFIG_PM
static int sil24_pci_device_resume(struct pci_dev *pdev)
{
struct ata_host_set *host_set = dev_get_drvdata(&pdev->dev);
struct sil24_host_priv *hpriv = host_set->private_data;
struct ata_host *host = dev_get_drvdata(&pdev->dev);
struct sil24_host_priv *hpriv = host->private_data;
ata_pci_device_do_resume(pdev);
if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND)
writel(HOST_CTRL_GLOBAL_RST, hpriv->host_base + HOST_CTRL);
sil24_init_controller(pdev, host_set->n_ports,
host_set->ports[0]->flags,
sil24_init_controller(pdev, host->n_ports, host->ports[0]->flags,
hpriv->host_base, hpriv->port_base);
ata_host_set_resume(host_set);
ata_host_resume(host);
return 0;
}
#endif
static int __init sil24_init(void)
{
return pci_module_init(&sil24_pci_driver);
return pci_register_driver(&sil24_pci_driver);
}
static void __exit sil24_exit(void)

View File

@ -128,7 +128,7 @@ static const struct ata_port_operations sis_ops = {
static struct ata_port_info sis_port_info = {
.sht = &sis_sht,
.host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
.flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
.pio_mask = 0x1f,
.mwdma_mask = 0x7,
.udma_mask = 0x7f,
@ -158,7 +158,7 @@ static unsigned int get_scr_cfg_addr(unsigned int port_no, unsigned int sc_reg,
static u32 sis_scr_cfg_read (struct ata_port *ap, unsigned int sc_reg)
{
struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
unsigned int cfg_addr = get_scr_cfg_addr(ap->port_no, sc_reg, pdev->device);
u32 val, val2 = 0;
u8 pmr;
@ -178,7 +178,7 @@ static u32 sis_scr_cfg_read (struct ata_port *ap, unsigned int sc_reg)
static void sis_scr_cfg_write (struct ata_port *ap, unsigned int scr, u32 val)
{
struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
unsigned int cfg_addr = get_scr_cfg_addr(ap->port_no, scr, pdev->device);
u8 pmr;
@ -195,7 +195,7 @@ static void sis_scr_cfg_write (struct ata_port *ap, unsigned int scr, u32 val)
static u32 sis_scr_read (struct ata_port *ap, unsigned int sc_reg)
{
struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u32 val, val2 = 0;
u8 pmr;
@ -217,7 +217,7 @@ static u32 sis_scr_read (struct ata_port *ap, unsigned int sc_reg)
static void sis_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
{
struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
u8 pmr;
if (sc_reg > SCR_CONTROL)
@ -275,17 +275,17 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
/* check and see if the SCRs are in IO space or PCI cfg space */
pci_read_config_dword(pdev, SIS_GENCTL, &genctl);
if ((genctl & GENCTL_IOMAPPED_SCR) == 0)
probe_ent->host_flags |= SIS_FLAG_CFGSCR;
probe_ent->port_flags |= SIS_FLAG_CFGSCR;
/* if hardware thinks SCRs are in IO space, but there are
* no IO resources assigned, change to PCI cfg space.
*/
if ((!(probe_ent->host_flags & SIS_FLAG_CFGSCR)) &&
if ((!(probe_ent->port_flags & SIS_FLAG_CFGSCR)) &&
((pci_resource_start(pdev, SIS_SCR_PCI_BAR) == 0) ||
(pci_resource_len(pdev, SIS_SCR_PCI_BAR) < 128))) {
genctl &= ~GENCTL_IOMAPPED_SCR;
pci_write_config_dword(pdev, SIS_GENCTL, genctl);
probe_ent->host_flags |= SIS_FLAG_CFGSCR;
probe_ent->port_flags |= SIS_FLAG_CFGSCR;
}
pci_read_config_byte(pdev, SIS_PMR, &pmr);
@ -306,7 +306,7 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
port2_start = 0x20;
}
if (!(probe_ent->host_flags & SIS_FLAG_CFGSCR)) {
if (!(probe_ent->port_flags & SIS_FLAG_CFGSCR)) {
probe_ent->port[0].scr_addr =
pci_resource_start(pdev, SIS_SCR_PCI_BAR);
probe_ent->port[1].scr_addr =
@ -334,7 +334,7 @@ err_out:
static int __init sis_init(void)
{
return pci_module_init(&sis_pci_driver);
return pci_register_driver(&sis_pci_driver);
}
static void __exit sis_exit(void)

View File

@ -169,7 +169,7 @@ static void k2_sata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
* @qc: Info associated with this ATA transaction.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*/
static void k2_bmdma_setup_mmio (struct ata_queued_cmd *qc)
@ -199,7 +199,7 @@ static void k2_bmdma_setup_mmio (struct ata_queued_cmd *qc)
* @qc: Info associated with this ATA transaction.
*
* LOCKING:
* spin_lock_irqsave(host_set lock)
* spin_lock_irqsave(host lock)
*/
static void k2_bmdma_start_mmio (struct ata_queued_cmd *qc)
@ -261,12 +261,12 @@ static int k2_sata_proc_info(struct Scsi_Host *shost, char *page, char **start,
return 0;
/* Find the OF node for the PCI device proper */
np = pci_device_to_OF_node(to_pci_dev(ap->host_set->dev));
np = pci_device_to_OF_node(to_pci_dev(ap->host->dev));
if (np == NULL)
return 0;
/* Match it to a port node */
index = (ap == ap->host_set->ports[0]) ? 0 : 1;
index = (ap == ap->host->ports[0]) ? 0 : 1;
for (np = np->child; np != NULL; np = np->sibling) {
const u32 *reg = get_property(np, "reg", NULL);
if (!reg)
@ -423,7 +423,7 @@ static int k2_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
writel(0x0, mmio_base + K2_SATA_SIM_OFFSET);
probe_ent->sht = &k2_sata_sht;
probe_ent->host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
probe_ent->port_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
ATA_FLAG_MMIO;
probe_ent->port_ops = &k2_sata_ops;
probe_ent->n_ports = 4;
@ -488,7 +488,7 @@ static struct pci_driver k2_sata_pci_driver = {
static int __init k2_sata_init(void)
{
return pci_module_init(&k2_sata_pci_driver);
return pci_register_driver(&k2_sata_pci_driver);
}

View File

@ -160,7 +160,7 @@ static void pdc_port_stop(struct ata_port *ap);
static void pdc20621_qc_prep(struct ata_queued_cmd *qc);
static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf);
static void pdc20621_host_stop(struct ata_host_set *host_set);
static void pdc20621_host_stop(struct ata_host *host);
static unsigned int pdc20621_dimm_init(struct ata_probe_ent *pe);
static int pdc20621_detect_dimm(struct ata_probe_ent *pe);
static unsigned int pdc20621_i2c_read(struct ata_probe_ent *pe,
@ -218,7 +218,7 @@ static const struct ata_port_info pdc_port_info[] = {
/* board_20621 */
{
.sht = &pdc_sata_sht,
.host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
.flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
ATA_FLAG_SRST | ATA_FLAG_MMIO |
ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING,
.pio_mask = 0x1f, /* pio0-4 */
@ -244,21 +244,21 @@ static struct pci_driver pdc_sata_pci_driver = {
};
static void pdc20621_host_stop(struct ata_host_set *host_set)
static void pdc20621_host_stop(struct ata_host *host)
{
struct pci_dev *pdev = to_pci_dev(host_set->dev);
struct pdc_host_priv *hpriv = host_set->private_data;
struct pci_dev *pdev = to_pci_dev(host->dev);
struct pdc_host_priv *hpriv = host->private_data;
void __iomem *dimm_mmio = hpriv->dimm_mmio;
pci_iounmap(pdev, dimm_mmio);
kfree(hpriv);
pci_iounmap(pdev, host_set->mmio_base);
pci_iounmap(pdev, host->mmio_base);
}
static int pdc_port_start(struct ata_port *ap)
{
struct device *dev = ap->host_set->dev;
struct device *dev = ap->host->dev;
struct pdc_port_priv *pp;
int rc;
@ -293,7 +293,7 @@ err_out:
static void pdc_port_stop(struct ata_port *ap)
{
struct device *dev = ap->host_set->dev;
struct device *dev = ap->host->dev;
struct pdc_port_priv *pp = ap->private_data;
ap->private_data = NULL;
@ -453,8 +453,8 @@ static void pdc20621_dma_prep(struct ata_queued_cmd *qc)
struct scatterlist *sg;
struct ata_port *ap = qc->ap;
struct pdc_port_priv *pp = ap->private_data;
void __iomem *mmio = ap->host_set->mmio_base;
struct pdc_host_priv *hpriv = ap->host_set->private_data;
void __iomem *mmio = ap->host->mmio_base;
struct pdc_host_priv *hpriv = ap->host->private_data;
void __iomem *dimm_mmio = hpriv->dimm_mmio;
unsigned int portno = ap->port_no;
unsigned int i, idx, total_len = 0, sgt_len;
@ -514,8 +514,8 @@ static void pdc20621_nodata_prep(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct pdc_port_priv *pp = ap->private_data;
void __iomem *mmio = ap->host_set->mmio_base;
struct pdc_host_priv *hpriv = ap->host_set->private_data;
void __iomem *mmio = ap->host->mmio_base;
struct pdc_host_priv *hpriv = ap->host->private_data;
void __iomem *dimm_mmio = hpriv->dimm_mmio;
unsigned int portno = ap->port_no;
unsigned int i;
@ -565,8 +565,8 @@ static void __pdc20621_push_hdma(struct ata_queued_cmd *qc,
u32 pkt_ofs)
{
struct ata_port *ap = qc->ap;
struct ata_host_set *host_set = ap->host_set;
void __iomem *mmio = host_set->mmio_base;
struct ata_host *host = ap->host;
void __iomem *mmio = host->mmio_base;
/* hard-code chip #0 */
mmio += PDC_CHIP0_OFS;
@ -583,7 +583,7 @@ static void pdc20621_push_hdma(struct ata_queued_cmd *qc,
u32 pkt_ofs)
{
struct ata_port *ap = qc->ap;
struct pdc_host_priv *pp = ap->host_set->private_data;
struct pdc_host_priv *pp = ap->host->private_data;
unsigned int idx = pp->hdma_prod & PDC_HDMA_Q_MASK;
if (!pp->doing_hdma) {
@ -601,7 +601,7 @@ static void pdc20621_push_hdma(struct ata_queued_cmd *qc,
static void pdc20621_pop_hdma(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct pdc_host_priv *pp = ap->host_set->private_data;
struct pdc_host_priv *pp = ap->host->private_data;
unsigned int idx = pp->hdma_cons & PDC_HDMA_Q_MASK;
/* if nothing on queue, we're done */
@ -620,7 +620,7 @@ static void pdc20621_dump_hdma(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
unsigned int port_no = ap->port_no;
struct pdc_host_priv *hpriv = ap->host_set->private_data;
struct pdc_host_priv *hpriv = ap->host->private_data;
void *dimm_mmio = hpriv->dimm_mmio;
dimm_mmio += (port_no * PDC_DIMM_WINDOW_STEP);
@ -638,9 +638,9 @@ static inline void pdc20621_dump_hdma(struct ata_queued_cmd *qc) { }
static void pdc20621_packet_start(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct ata_host_set *host_set = ap->host_set;
struct ata_host *host = ap->host;
unsigned int port_no = ap->port_no;
void __iomem *mmio = host_set->mmio_base;
void __iomem *mmio = host->mmio_base;
unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
u8 seq = (u8) (port_no + 1);
unsigned int port_ofs;
@ -781,8 +781,8 @@ static inline unsigned int pdc20621_host_intr( struct ata_port *ap,
static void pdc20621_irq_clear(struct ata_port *ap)
{
struct ata_host_set *host_set = ap->host_set;
void __iomem *mmio = host_set->mmio_base;
struct ata_host *host = ap->host;
void __iomem *mmio = host->mmio_base;
mmio += PDC_CHIP0_OFS;
@ -791,7 +791,7 @@ static void pdc20621_irq_clear(struct ata_port *ap)
static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
{
struct ata_host_set *host_set = dev_instance;
struct ata_host *host = dev_instance;
struct ata_port *ap;
u32 mask = 0;
unsigned int i, tmp, port_no;
@ -800,12 +800,12 @@ static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance, struct pt_re
VPRINTK("ENTER\n");
if (!host_set || !host_set->mmio_base) {
if (!host || !host->mmio_base) {
VPRINTK("QUICK EXIT\n");
return IRQ_NONE;
}
mmio_base = host_set->mmio_base;
mmio_base = host->mmio_base;
/* reading should also clear interrupts */
mmio_base += PDC_CHIP0_OFS;
@ -822,16 +822,16 @@ static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance, struct pt_re
return IRQ_NONE;
}
spin_lock(&host_set->lock);
spin_lock(&host->lock);
for (i = 1; i < 9; i++) {
port_no = i - 1;
if (port_no > 3)
port_no -= 4;
if (port_no >= host_set->n_ports)
if (port_no >= host->n_ports)
ap = NULL;
else
ap = host_set->ports[port_no];
ap = host->ports[port_no];
tmp = mask & (1 << i);
VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp);
if (tmp && ap &&
@ -845,7 +845,7 @@ static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance, struct pt_re
}
}
spin_unlock(&host_set->lock);
spin_unlock(&host->lock);
VPRINTK("mask == 0x%x\n", mask);
@ -857,13 +857,13 @@ static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance, struct pt_re
static void pdc_eng_timeout(struct ata_port *ap)
{
u8 drv_stat;
struct ata_host_set *host_set = ap->host_set;
struct ata_host *host = ap->host;
struct ata_queued_cmd *qc;
unsigned long flags;
DPRINTK("ENTER\n");
spin_lock_irqsave(&host_set->lock, flags);
spin_lock_irqsave(&host->lock, flags);
qc = ata_qc_from_tag(ap, ap->active_tag);
@ -885,7 +885,7 @@ static void pdc_eng_timeout(struct ata_port *ap)
break;
}
spin_unlock_irqrestore(&host_set->lock, flags);
spin_unlock_irqrestore(&host->lock, flags);
ata_eh_qc_complete(qc);
DPRINTK("EXIT\n");
}
@ -1429,7 +1429,7 @@ static int pdc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *
hpriv->dimm_mmio = dimm_mmio;
probe_ent->sht = pdc_port_info[board_idx].sht;
probe_ent->host_flags = pdc_port_info[board_idx].host_flags;
probe_ent->port_flags = pdc_port_info[board_idx].flags;
probe_ent->pio_mask = pdc_port_info[board_idx].pio_mask;
probe_ent->mwdma_mask = pdc_port_info[board_idx].mwdma_mask;
probe_ent->udma_mask = pdc_port_info[board_idx].udma_mask;
@ -1482,7 +1482,7 @@ err_out:
static int __init pdc_sata_init(void)
{
return pci_module_init(&pdc_sata_pci_driver);
return pci_register_driver(&pdc_sata_pci_driver);
}

View File

@ -128,7 +128,7 @@ static const struct ata_port_operations uli_ops = {
static struct ata_port_info uli_port_info = {
.sht = &uli_sht,
.host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
.flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
.pio_mask = 0x1f, /* pio0-4 */
.udma_mask = 0x7f, /* udma0-6 */
.port_ops = &uli_ops,
@ -143,13 +143,13 @@ MODULE_VERSION(DRV_VERSION);
static unsigned int get_scr_cfg_addr(struct ata_port *ap, unsigned int sc_reg)
{
struct uli_priv *hpriv = ap->host_set->private_data;
struct uli_priv *hpriv = ap->host->private_data;
return hpriv->scr_cfg_addr[ap->port_no] + (4 * sc_reg);
}
static u32 uli_scr_cfg_read (struct ata_port *ap, unsigned int sc_reg)
{
struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
unsigned int cfg_addr = get_scr_cfg_addr(ap, sc_reg);
u32 val;
@ -159,7 +159,7 @@ static u32 uli_scr_cfg_read (struct ata_port *ap, unsigned int sc_reg)
static void uli_scr_cfg_write (struct ata_port *ap, unsigned int scr, u32 val)
{
struct pci_dev *pdev = to_pci_dev(ap->host_set->dev);
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
unsigned int cfg_addr = get_scr_cfg_addr(ap, scr);
pci_write_config_dword(pdev, cfg_addr, val);
@ -287,7 +287,7 @@ err_out:
static int __init uli_init(void)
{
return pci_module_init(&uli_pci_driver);
return pci_register_driver(&uli_pci_driver);
}
static void __exit uli_exit(void)

View File

@ -176,7 +176,7 @@ static const struct ata_port_operations vt6421_sata_ops = {
static struct ata_port_info vt6420_port_info = {
.sht = &svia_sht,
.host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
.flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
.pio_mask = 0x1f,
.mwdma_mask = 0x07,
.udma_mask = 0x7f,
@ -346,7 +346,7 @@ static struct ata_probe_ent *vt6421_init_probe_ent(struct pci_dev *pdev)
INIT_LIST_HEAD(&probe_ent->node);
probe_ent->sht = &svia_sht;
probe_ent->host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY;
probe_ent->port_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY;
probe_ent->port_ops = &vt6421_sata_ops;
probe_ent->n_ports = N_PORTS;
probe_ent->irq = pdev->irq;
@ -489,7 +489,7 @@ err_out:
static int __init svia_init(void)
{
return pci_module_init(&svia_pci_driver);
return pci_register_driver(&svia_pci_driver);
}
static void __exit svia_exit(void)

View File

@ -123,7 +123,7 @@ static void vsc_intr_mask_update(struct ata_port *ap, u8 ctl)
void __iomem *mask_addr;
u8 mask;
mask_addr = ap->host_set->mmio_base +
mask_addr = ap->host->mmio_base +
VSC_SATA_INT_MASK_OFFSET + ap->port_no;
mask = readb(mask_addr);
if (ctl & ATA_NIEN)
@ -206,20 +206,20 @@ static void vsc_sata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
static irqreturn_t vsc_sata_interrupt (int irq, void *dev_instance,
struct pt_regs *regs)
{
struct ata_host_set *host_set = dev_instance;
struct ata_host *host = dev_instance;
unsigned int i;
unsigned int handled = 0;
u32 int_status;
spin_lock(&host_set->lock);
spin_lock(&host->lock);
int_status = readl(host_set->mmio_base + VSC_SATA_INT_STAT_OFFSET);
int_status = readl(host->mmio_base + VSC_SATA_INT_STAT_OFFSET);
for (i = 0; i < host_set->n_ports; i++) {
for (i = 0; i < host->n_ports; i++) {
if (int_status & ((u32) 0xFF << (8 * i))) {
struct ata_port *ap;
ap = host_set->ports[i];
ap = host->ports[i];
if (is_vsc_sata_int_err(i, int_status)) {
u32 err_status;
@ -259,7 +259,7 @@ static irqreturn_t vsc_sata_interrupt (int irq, void *dev_instance,
}
}
spin_unlock(&host_set->lock);
spin_unlock(&host->lock);
return IRQ_RETVAL(handled);
}
@ -395,7 +395,7 @@ static int __devinit vsc_sata_init_one (struct pci_dev *pdev, const struct pci_d
pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x80);
probe_ent->sht = &vsc_sata_sht;
probe_ent->host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
probe_ent->port_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
ATA_FLAG_MMIO;
probe_ent->port_ops = &vsc_sata_ops;
probe_ent->n_ports = 4;
@ -462,7 +462,7 @@ static struct pci_driver vsc_sata_pci_driver = {
static int __init vsc_sata_init(void)
{
return pci_module_init(&vsc_sata_pci_driver);
return pci_register_driver(&vsc_sata_pci_driver);
}

View File

@ -1210,7 +1210,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_962, quirk_sis_96x_
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_963, quirk_sis_96x_smbus );
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC, quirk_sis_96x_smbus );
#if defined(CONFIG_SCSI_SATA) || defined(CONFIG_SCSI_SATA_MODULE)
#if defined(CONFIG_ATA) || defined(CONFIG_ATA_MODULE)
/*
* If we are using libata we can drive this chip properly but must
@ -1300,7 +1300,7 @@ static int __init combined_setup(char *str)
}
__setup("combined_mode=", combined_setup);
#ifdef CONFIG_SCSI_SATA_INTEL_COMBINED
#ifdef CONFIG_SATA_INTEL_COMBINED
static void __devinit quirk_intel_ide_combined(struct pci_dev *pdev)
{
u8 prog, comb, tmp;
@ -1393,7 +1393,7 @@ static void __devinit quirk_intel_ide_combined(struct pci_dev *pdev)
request_region(0x170, 8, "libata"); /* port 1 */
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, quirk_intel_ide_combined );
#endif /* CONFIG_SCSI_SATA_INTEL_COMBINED */
#endif /* CONFIG_SATA_INTEL_COMBINED */
int pcie_mch_quirk;

View File

@ -494,67 +494,6 @@ config SCSI_ARCMSR
source "drivers/scsi/megaraid/Kconfig.megaraid"
config SCSI_SATA
tristate "Serial ATA (SATA) support"
depends on SCSI
help
This driver family supports Serial ATA host controllers
and devices.
If unsure, say N.
config SCSI_SATA_AHCI
tristate "AHCI SATA support"
depends on SCSI_SATA && PCI
help
This option enables support for AHCI Serial ATA.
If unsure, say N.
config SCSI_SATA_SVW
tristate "ServerWorks Frodo / Apple K2 SATA support"
depends on SCSI_SATA && PCI
help
This option enables support for Broadcom/Serverworks/Apple K2
SATA support.
If unsure, say N.
config SCSI_ATA_PIIX
tristate "Intel PIIX/ICH SATA support"
depends on SCSI_SATA && PCI
help
This option enables support for ICH5/6/7/8 Serial ATA.
If PATA support was enabled previously, this enables
support for select Intel PIIX/ICH PATA host controllers.
If unsure, say N.
config SCSI_SATA_MV
tristate "Marvell SATA support (HIGHLY EXPERIMENTAL)"
depends on SCSI_SATA && PCI && EXPERIMENTAL
help
This option enables support for the Marvell Serial ATA family.
Currently supports 88SX[56]0[48][01] chips.
If unsure, say N.
config SCSI_SATA_NV
tristate "NVIDIA SATA support"
depends on SCSI_SATA && PCI && EXPERIMENTAL
help
This option enables support for NVIDIA Serial ATA.
If unsure, say N.
config SCSI_PDC_ADMA
tristate "Pacific Digital ADMA support"
depends on SCSI_SATA && PCI
help
This option enables support for Pacific Digital ADMA controllers
If unsure, say N.
config SCSI_HPTIOP
tristate "HighPoint RocketRAID 3xxx Controller support"
depends on SCSI && PCI
@ -565,83 +504,6 @@ config SCSI_HPTIOP
To compile this driver as a module, choose M here; the module
will be called hptiop. If unsure, say N.
config SCSI_SATA_QSTOR
tristate "Pacific Digital SATA QStor support"
depends on SCSI_SATA && PCI
help
This option enables support for Pacific Digital Serial ATA QStor.
If unsure, say N.
config SCSI_SATA_PROMISE
tristate "Promise SATA TX2/TX4 support"
depends on SCSI_SATA && PCI
help
This option enables support for Promise Serial ATA TX2/TX4.
If unsure, say N.
config SCSI_SATA_SX4
tristate "Promise SATA SX4 support"
depends on SCSI_SATA && PCI && EXPERIMENTAL
help
This option enables support for Promise Serial ATA SX4.
If unsure, say N.
config SCSI_SATA_SIL
tristate "Silicon Image SATA support"
depends on SCSI_SATA && PCI && EXPERIMENTAL
help
This option enables support for Silicon Image Serial ATA.
If unsure, say N.
config SCSI_SATA_SIL24
tristate "Silicon Image 3124/3132 SATA support"
depends on SCSI_SATA && PCI && EXPERIMENTAL
help
This option enables support for Silicon Image 3124/3132 Serial ATA.
If unsure, say N.
config SCSI_SATA_SIS
tristate "SiS 964/180 SATA support"
depends on SCSI_SATA && PCI && EXPERIMENTAL
help
This option enables support for SiS Serial ATA 964/180.
If unsure, say N.
config SCSI_SATA_ULI
tristate "ULi Electronics SATA support"
depends on SCSI_SATA && PCI && EXPERIMENTAL
help
This option enables support for ULi Electronics SATA.
If unsure, say N.
config SCSI_SATA_VIA
tristate "VIA SATA support"
depends on SCSI_SATA && PCI
help
This option enables support for VIA Serial ATA.
If unsure, say N.
config SCSI_SATA_VITESSE
tristate "VITESSE VSC-7174 / INTEL 31244 SATA support"
depends on SCSI_SATA && PCI
help
This option enables support for Vitesse VSC7174 and Intel 31244 Serial ATA.
If unsure, say N.
config SCSI_SATA_INTEL_COMBINED
bool
depends on IDE=y && !BLK_DEV_IDE_SATA && (SCSI_SATA_AHCI || SCSI_ATA_PIIX)
default y
config SCSI_BUSLOGIC
tristate "BusLogic SCSI support"
depends on (PCI || ISA || MCA) && SCSI && ISA_DMA_API

View File

@ -125,21 +125,6 @@ obj-$(CONFIG_SCSI_LASI700) += 53c700.o lasi700.o
obj-$(CONFIG_SCSI_NSP32) += nsp32.o
obj-$(CONFIG_SCSI_IPR) += ipr.o
obj-$(CONFIG_SCSI_IBMVSCSI) += ibmvscsi/
obj-$(CONFIG_SCSI_SATA_AHCI) += libata.o ahci.o
obj-$(CONFIG_SCSI_SATA_SVW) += libata.o sata_svw.o
obj-$(CONFIG_SCSI_ATA_PIIX) += libata.o ata_piix.o
obj-$(CONFIG_SCSI_SATA_PROMISE) += libata.o sata_promise.o
obj-$(CONFIG_SCSI_SATA_QSTOR) += libata.o sata_qstor.o
obj-$(CONFIG_SCSI_SATA_SIL) += libata.o sata_sil.o
obj-$(CONFIG_SCSI_SATA_SIL24) += libata.o sata_sil24.o
obj-$(CONFIG_SCSI_SATA_VIA) += libata.o sata_via.o
obj-$(CONFIG_SCSI_SATA_VITESSE) += libata.o sata_vsc.o
obj-$(CONFIG_SCSI_SATA_SIS) += libata.o sata_sis.o
obj-$(CONFIG_SCSI_SATA_SX4) += libata.o sata_sx4.o
obj-$(CONFIG_SCSI_SATA_NV) += libata.o sata_nv.o
obj-$(CONFIG_SCSI_SATA_ULI) += libata.o sata_uli.o
obj-$(CONFIG_SCSI_SATA_MV) += libata.o sata_mv.o
obj-$(CONFIG_SCSI_PDC_ADMA) += libata.o pdc_adma.o
obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o
obj-$(CONFIG_SCSI_STEX) += stex.o
@ -171,7 +156,6 @@ ncr53c8xx-flags-$(CONFIG_SCSI_ZALON) \
CFLAGS_ncr53c8xx.o := $(ncr53c8xx-flags-y) $(ncr53c8xx-flags-m)
zalon7xx-objs := zalon.o ncr53c8xx.o
NCR_Q720_mod-objs := NCR_Q720.o ncr53c8xx.o
libata-objs := libata-core.o libata-scsi.o libata-bmdma.o libata-eh.o
oktagon_esp_mod-objs := oktagon_esp.o oktagon_io.o
# Files generated that shall be removed upon make clean

View File

@ -0,0 +1 @@
#include <asm-generic/libata-portmap.h>

View File

@ -0,0 +1,12 @@
#ifndef __ASM_GENERIC_LIBATA_PORTMAP_H
#define __ASM_GENERIC_LIBATA_PORTMAP_H
#define ATA_PRIMARY_CMD 0x1F0
#define ATA_PRIMARY_CTL 0x3F6
#define ATA_PRIMARY_IRQ 14
#define ATA_SECONDARY_CMD 0x170
#define ATA_SECONDARY_CTL 0x376
#define ATA_SECONDARY_IRQ 15
#endif

View File

@ -0,0 +1 @@
#include <asm-generic/libata-portmap.h>

View File

@ -0,0 +1 @@
#include <asm-generic/libata-portmap.h>

View File

@ -0,0 +1 @@
#include <asm-generic/libata-portmap.h>

View File

@ -0,0 +1 @@
#include <asm-generic/libata-portmap.h>

View File

@ -0,0 +1 @@
#include <asm-generic/libata-portmap.h>

View File

@ -0,0 +1 @@
#include <asm-generic/libata-portmap.h>

View File

@ -40,6 +40,8 @@ enum {
ATA_MAX_DEVICES = 2, /* per bus/port */
ATA_MAX_PRD = 256, /* we could make these 256/256 */
ATA_SECT_SIZE = 512,
ATA_MAX_SECTORS = 256,
ATA_MAX_SECTORS_LBA48 = 65535,/* TODO: 65536? */
ATA_ID_WORDS = 256,
ATA_ID_SERNO_OFS = 10,
@ -168,12 +170,16 @@ enum {
XFER_UDMA_2 = 0x42,
XFER_UDMA_1 = 0x41,
XFER_UDMA_0 = 0x40,
XFER_MW_DMA_4 = 0x24, /* CFA only */
XFER_MW_DMA_3 = 0x23, /* CFA only */
XFER_MW_DMA_2 = 0x22,
XFER_MW_DMA_1 = 0x21,
XFER_MW_DMA_0 = 0x20,
XFER_SW_DMA_2 = 0x12,
XFER_SW_DMA_1 = 0x11,
XFER_SW_DMA_0 = 0x10,
XFER_PIO_6 = 0x0E, /* CFA only */
XFER_PIO_5 = 0x0D, /* CFA only */
XFER_PIO_4 = 0x0C,
XFER_PIO_3 = 0x0B,
XFER_PIO_2 = 0x0A,
@ -272,7 +278,6 @@ struct ata_taskfile {
};
#define ata_id_is_ata(id) (((id)[0] & (1 << 15)) == 0)
#define ata_id_is_cfa(id) ((id)[0] == 0x848A)
#define ata_id_is_sata(id) ((id)[93] == 0)
#define ata_id_rahead_enabled(id) ((id)[85] & (1 << 6))
#define ata_id_wcache_enabled(id) ((id)[85] & (1 << 5))
@ -304,6 +309,9 @@ static inline unsigned int ata_id_major_version(const u16 *id)
{
unsigned int mver;
if (id[ATA_ID_MAJOR_VER] == 0xFFFF)
return 0;
for (mver = 14; mver >= 1; mver--)
if (id[ATA_ID_MAJOR_VER] & (1 << mver))
break;
@ -312,8 +320,8 @@ static inline unsigned int ata_id_major_version(const u16 *id)
static inline int ata_id_current_chs_valid(const u16 *id)
{
/* For ATA-1 devices, if the INITIALIZE DEVICE PARAMETERS command
has not been issued to the device then the values of
/* For ATA-1 devices, if the INITIALIZE DEVICE PARAMETERS command
has not been issued to the device then the values of
id[54] to id[56] are vendor specific. */
return (id[53] & 0x01) && /* Current translation valid */
id[54] && /* cylinders in current translation */
@ -322,6 +330,18 @@ static inline int ata_id_current_chs_valid(const u16 *id)
id[56]; /* sectors in current translation */
}
static inline int ata_id_is_cfa(const u16 *id)
{
u16 v = id[0];
if (v == 0x848A) /* Standard CF */
return 1;
/* Could be CF hiding as standard ATA */
if (ata_id_major_version(id) >= 3 && id[82] != 0xFFFF &&
(id[82] & ( 1 << 2)))
return 1;
return 0;
}
static inline int atapi_cdb_len(const u16 *dev_id)
{
u16 tmp = dev_id[0] & 0x3;

View File

@ -36,6 +36,8 @@
#include <linux/workqueue.h>
#include <scsi/scsi_host.h>
#include <asm/libata-portmap.h>
/*
* compile-time options: to be removed as soon as all the drivers are
* converted to the new debugging mechanism
@ -44,7 +46,7 @@
#undef ATA_VERBOSE_DEBUG /* yet more debugging output */
#undef ATA_IRQ_TRAP /* define to ack screaming irqs */
#undef ATA_NDEBUG /* define to disable quick runtime checks */
#undef ATA_ENABLE_PATA /* define to enable PATA support in some
#define ATA_ENABLE_PATA /* define to enable PATA support in some
* low-level drivers */
@ -112,8 +114,6 @@ enum {
/* tag ATA_MAX_QUEUE - 1 is reserved for internal commands */
ATA_MAX_QUEUE = 32,
ATA_TAG_INTERNAL = ATA_MAX_QUEUE - 1,
ATA_MAX_SECTORS = 200, /* FIXME */
ATA_MAX_SECTORS_LBA48 = 65535,
ATA_MAX_BUS = 2,
ATA_DEF_BUSY_WAIT = 10000,
ATA_SHORT_PAUSE = (HZ >> 6) + 1,
@ -197,8 +197,8 @@ enum {
ATA_QCFLAG_EH_SCHEDULED = (1 << 18), /* EH scheduled (obsolete) */
/* host set flags */
ATA_HOST_SIMPLEX = (1 << 0), /* Host is simplex, one DMA channel per host_set only */
ATA_HOST_SIMPLEX = (1 << 0), /* Host is simplex, one DMA channel per host only */
/* various lengths of time */
ATA_TMOUT_BOOT = 30 * HZ, /* heuristic */
ATA_TMOUT_BOOT_QUICK = 7 * HZ, /* heuristic */
@ -225,8 +225,8 @@ enum {
/* encoding various smaller bitmaps into a single
* unsigned int bitmap
*/
ATA_BITS_PIO = 5,
ATA_BITS_MWDMA = 3,
ATA_BITS_PIO = 7,
ATA_BITS_MWDMA = 5,
ATA_BITS_UDMA = 8,
ATA_SHIFT_PIO = 0,
@ -289,6 +289,11 @@ enum {
* most devices.
*/
ATA_SPINUP_WAIT = 8000,
/* Horkage types. May be set by libata or controller on drives
(some horkage may be drive/controller pair dependant */
ATA_HORKAGE_DIAGNOSTIC = (1 << 0), /* Failed boot diag */
};
enum hsm_task_states {
@ -350,23 +355,32 @@ struct ata_probe_ent {
struct scsi_host_template *sht;
struct ata_ioports port[ATA_MAX_PORTS];
unsigned int n_ports;
unsigned int hard_port_no;
unsigned int dummy_port_mask;
unsigned int pio_mask;
unsigned int mwdma_mask;
unsigned int udma_mask;
unsigned int legacy_mode;
unsigned long irq;
unsigned long irq2;
unsigned int irq_flags;
unsigned long host_flags;
unsigned long host_set_flags;
unsigned long port_flags;
unsigned long _host_flags;
void __iomem *mmio_base;
void *private_data;
/* port_info for the secondary port. Together with irq2, it's
* used to implement non-uniform secondary port. Currently,
* the only user is ata_piix combined mode. This workaround
* will be removed together with ata_probe_ent when init model
* is updated.
*/
const struct ata_port_info *pinfo2;
};
struct ata_host_set {
struct ata_host {
spinlock_t lock;
struct device *dev;
unsigned long irq;
unsigned long irq2;
void __iomem *mmio_base;
unsigned int n_ports;
void *private_data;
@ -374,7 +388,6 @@ struct ata_host_set {
unsigned long flags;
int simplex_claimed; /* Keep seperate in case we
ever need to do this locked */
struct ata_host_set *next; /* for legacy mode */
struct ata_port *ports[0];
};
@ -420,7 +433,7 @@ struct ata_queued_cmd {
void *private_data;
};
struct ata_host_stats {
struct ata_port_stats {
unsigned long unhandled_irq;
unsigned long idle_irq;
unsigned long rw_reqbuf;
@ -468,6 +481,7 @@ struct ata_device {
/* error history */
struct ata_ering ering;
unsigned int horkage; /* List of broken features */
};
/* Offset into struct ata_device. Fields above it are maintained
@ -498,14 +512,13 @@ struct ata_eh_context {
};
struct ata_port {
struct Scsi_Host *host; /* our co-allocated scsi host */
struct Scsi_Host *scsi_host; /* our co-allocated scsi host */
const struct ata_port_operations *ops;
spinlock_t *lock;
unsigned long flags; /* ATA_FLAG_xxx */
unsigned int pflags; /* ATA_PFLAG_xxx */
unsigned int id; /* unique id req'd by scsi midlyr */
unsigned int port_no; /* unique port #; from zero */
unsigned int hard_port_no; /* hardware port #; from zero */
struct ata_prd *prd; /* our SG list */
dma_addr_t prd_dma; /* and its DMA mapping */
@ -524,7 +537,7 @@ struct ata_port {
unsigned int hw_sata_spd_limit;
unsigned int sata_spd_limit; /* SATA PHY speed limit */
/* record runtime error info, protected by host_set lock */
/* record runtime error info, protected by host lock */
struct ata_eh_info eh_info;
/* EH context owned by EH */
struct ata_eh_context eh_context;
@ -538,8 +551,8 @@ struct ata_port {
unsigned int active_tag;
u32 sactive;
struct ata_host_stats stats;
struct ata_host_set *host_set;
struct ata_port_stats stats;
struct ata_host *host;
struct device *dev;
struct work_struct port_task;
@ -615,7 +628,7 @@ struct ata_port_operations {
int (*port_start) (struct ata_port *ap);
void (*port_stop) (struct ata_port *ap);
void (*host_stop) (struct ata_host_set *host_set);
void (*host_stop) (struct ata_host *host);
void (*bmdma_stop) (struct ata_queued_cmd *qc);
u8 (*bmdma_status) (struct ata_port *ap);
@ -623,7 +636,7 @@ struct ata_port_operations {
struct ata_port_info {
struct scsi_host_template *sht;
unsigned long host_flags;
unsigned long flags;
unsigned long pio_mask;
unsigned long mwdma_mask;
unsigned long udma_mask;
@ -649,6 +662,8 @@ extern const unsigned long sata_deb_timing_normal[];
extern const unsigned long sata_deb_timing_hotplug[];
extern const unsigned long sata_deb_timing_long[];
extern const struct ata_port_operations ata_dummy_port_ops;
static inline const unsigned long *
sata_ehc_deb_timing(struct ata_eh_context *ehc)
{
@ -658,6 +673,11 @@ sata_ehc_deb_timing(struct ata_eh_context *ehc)
return sata_deb_timing_normal;
}
static inline int ata_port_is_dummy(struct ata_port *ap)
{
return ap->ops == &ata_dummy_port_ops;
}
extern void ata_port_probe(struct ata_port *);
extern void __sata_phy_reset(struct ata_port *ap);
extern void sata_phy_reset(struct ata_port *ap);
@ -676,19 +696,30 @@ extern void ata_std_ports(struct ata_ioports *ioaddr);
extern int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
unsigned int n_ports);
extern void ata_pci_remove_one (struct pci_dev *pdev);
extern void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t state);
extern void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg);
extern void ata_pci_device_do_resume(struct pci_dev *pdev);
extern int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state);
extern int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
extern int ata_pci_device_resume(struct pci_dev *pdev);
extern int ata_pci_clear_simplex(struct pci_dev *pdev);
#endif /* CONFIG_PCI */
extern int ata_device_add(const struct ata_probe_ent *ent);
extern void ata_port_detach(struct ata_port *ap);
extern void ata_host_set_remove(struct ata_host_set *host_set);
extern void ata_host_init(struct ata_host *, struct device *,
unsigned long, const struct ata_port_operations *);
extern void ata_host_remove(struct ata_host *host);
extern int ata_scsi_detect(struct scsi_host_template *sht);
extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
extern int ata_scsi_release(struct Scsi_Host *host);
extern void ata_sas_port_destroy(struct ata_port *);
extern struct ata_port *ata_sas_port_alloc(struct ata_host *,
struct ata_port_info *, struct Scsi_Host *);
extern int ata_sas_port_init(struct ata_port *);
extern int ata_sas_port_start(struct ata_port *ap);
extern void ata_sas_port_stop(struct ata_port *ap);
extern int ata_sas_slave_configure(struct scsi_device *, struct ata_port *);
extern int ata_sas_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *),
struct ata_port *ap);
extern unsigned int ata_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc);
extern int sata_scr_valid(struct ata_port *ap);
extern int sata_scr_read(struct ata_port *ap, int reg, u32 *val);
@ -697,10 +728,9 @@ extern int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val);
extern int ata_port_online(struct ata_port *ap);
extern int ata_port_offline(struct ata_port *ap);
extern int ata_scsi_device_resume(struct scsi_device *);
extern int ata_scsi_device_suspend(struct scsi_device *, pm_message_t state);
extern int ata_host_set_suspend(struct ata_host_set *host_set,
pm_message_t mesg);
extern void ata_host_set_resume(struct ata_host_set *host_set);
extern int ata_scsi_device_suspend(struct scsi_device *, pm_message_t mesg);
extern int ata_host_suspend(struct ata_host *host, pm_message_t mesg);
extern void ata_host_resume(struct ata_host *host);
extern int ata_ratelimit(void);
extern unsigned int ata_busy_sleep(struct ata_port *ap,
unsigned long timeout_pat,
@ -725,7 +755,7 @@ extern u8 ata_altstatus(struct ata_port *ap);
extern void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf);
extern int ata_port_start (struct ata_port *ap);
extern void ata_port_stop (struct ata_port *ap);
extern void ata_host_stop (struct ata_host_set *host_set);
extern void ata_host_stop (struct ata_host *host);
extern irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs);
extern void ata_mmio_data_xfer(struct ata_device *adev, unsigned char *buf,
unsigned int buflen, int write_data);
@ -811,7 +841,7 @@ struct pci_bits {
unsigned long val;
};
extern void ata_pci_host_stop (struct ata_host_set *host_set);
extern void ata_pci_host_stop (struct ata_host *host);
extern struct ata_probe_ent *
ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int portmask);
extern int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits);