From 4dbedf43d26276f6d7c8c3146d0a5b2f0309d968 Mon Sep 17 00:00:00 2001 From: Nathan Williams Date: Wed, 25 Mar 2009 20:27:37 +1100 Subject: [PATCH 0001/3409] solos: support new FPGA RAM layout Buffer sizes have been changed to 2048 bytes. Flash upgrades use a dedicated RAM block. Add support for daughterboard. Signed-off-by: Nathan Williams Signed-off-by: David Woodhouse --- drivers/atm/solos-pci.c | 71 +++++++++++++++++++++++++++++++++++------ 1 file changed, 62 insertions(+), 9 deletions(-) diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c index bc3079d17876..bfef8d255811 100644 --- a/drivers/atm/solos-pci.c +++ b/drivers/atm/solos-pci.c @@ -59,21 +59,29 @@ #define RX_DMA_ADDR(port) (0x30 + (4 * (port))) #define DATA_RAM_SIZE 32768 -#define BUF_SIZE 4096 +#define BUF_SIZE 2048 +#define OLD_BUF_SIZE 4096 /* For FPGA versions <= 2*/ #define FPGA_PAGE 528 /* FPGA flash page size*/ #define SOLOS_PAGE 512 /* Solos flash page size*/ #define FPGA_BLOCK (FPGA_PAGE * 8) /* FPGA flash block size*/ #define SOLOS_BLOCK (SOLOS_PAGE * 8) /* Solos flash block size*/ -#define RX_BUF(card, nr) ((card->buffers) + (nr)*BUF_SIZE*2) -#define TX_BUF(card, nr) ((card->buffers) + (nr)*BUF_SIZE*2 + BUF_SIZE) +#define RX_BUF(card, nr) ((card->buffers) + (nr)*(card->buffer_size)*2) +#define TX_BUF(card, nr) ((card->buffers) + (nr)*(card->buffer_size)*2 + (card->buffer_size)) +#define FLASH_BUF ((card->buffers) + 4*(card->buffer_size)*2) #define RX_DMA_SIZE 2048 +#define FPGA_VERSION(a,b) (((a) << 8) + (b)) +#define LEGACY_BUFFERS 2 +#define DMA_SUPPORTED 4 + static int reset = 0; static int atmdebug = 0; static int firmware_upgrade = 0; static int fpga_upgrade = 0; +static int db_firmware_upgrade = 0; +static int db_fpga_upgrade = 0; struct pkt_hdr { __le16 size; @@ -116,6 +124,8 @@ struct solos_card { wait_queue_head_t param_wq; wait_queue_head_t fw_wq; int using_dma; + int fpga_version; + int buffer_size; }; @@ -136,10 +146,14 @@ MODULE_PARM_DESC(reset, "Reset Solos chips on startup"); MODULE_PARM_DESC(atmdebug, "Print ATM data"); MODULE_PARM_DESC(firmware_upgrade, "Initiate Solos firmware upgrade"); MODULE_PARM_DESC(fpga_upgrade, "Initiate FPGA upgrade"); +MODULE_PARM_DESC(db_firmware_upgrade, "Initiate daughter board Solos firmware upgrade"); +MODULE_PARM_DESC(db_fpga_upgrade, "Initiate daughter board FPGA upgrade"); module_param(reset, int, 0444); module_param(atmdebug, int, 0644); module_param(firmware_upgrade, int, 0444); module_param(fpga_upgrade, int, 0444); +module_param(db_firmware_upgrade, int, 0444); +module_param(db_fpga_upgrade, int, 0444); static void fpga_queue(struct solos_card *card, int port, struct sk_buff *skb, struct atm_vcc *vcc); @@ -517,10 +531,32 @@ static int flash_upgrade(struct solos_card *card, int chip) if (chip == 0) { fw_name = "solos-FPGA.bin"; blocksize = FPGA_BLOCK; - } else { + } + + if (chip == 1) { fw_name = "solos-Firmware.bin"; blocksize = SOLOS_BLOCK; } + + if (chip == 2){ + if (card->fpga_version > LEGACY_BUFFERS){ + fw_name = "solos-db-FPGA.bin"; + blocksize = FPGA_BLOCK; + } else { + dev_info(&card->dev->dev, "FPGA version doesn't support daughter board upgrades\n"); + return -EPERM; + } + } + + if (chip == 3){ + if (card->fpga_version > LEGACY_BUFFERS){ + fw_name = "solos-Firmware.bin"; + blocksize = SOLOS_BLOCK; + } else { + dev_info(&card->dev->dev, "FPGA version doesn't support daughter board upgrades\n"); + return -EPERM; + } + } if (request_firmware(&fw, fw_name, &card->dev->dev)) return -ENOENT; @@ -536,8 +572,10 @@ static int flash_upgrade(struct solos_card *card, int chip) data32 = ioread32(card->config_regs + FPGA_MODE); /* Set mode to Chip Erase */ - dev_info(&card->dev->dev, "Set FPGA Flash mode to %s Chip Erase\n", - chip?"Solos":"FPGA"); + if(chip == 0 || chip == 2) + dev_info(&card->dev->dev, "Set FPGA Flash mode to FPGA Chip Erase\n"); + if(chip == 1 || chip == 3) + dev_info(&card->dev->dev, "Set FPGA Flash mode to Solos Chip Erase\n"); iowrite32((chip * 2), card->config_regs + FLASH_MODE); @@ -557,7 +595,10 @@ static int flash_upgrade(struct solos_card *card, int chip) /* Copy block to buffer, swapping each 16 bits */ for(i = 0; i < blocksize; i += 4) { uint32_t word = swahb32p((uint32_t *)(fw->data + offset + i)); - iowrite32(word, RX_BUF(card, 3) + i); + if(card->fpga_version > LEGACY_BUFFERS) + iowrite32(word, FLASH_BUF + i); + else + iowrite32(word, RX_BUF(card, 3) + i); } /* Specify block number and then trigger flash write */ @@ -1094,12 +1135,18 @@ static int fpga_probe(struct pci_dev *dev, const struct pci_device_id *id) fpga_ver = (data32 & 0x0000FFFF); major_ver = ((data32 & 0xFF000000) >> 24); minor_ver = ((data32 & 0x00FF0000) >> 16); + card->fpga_version = FPGA_VERSION(major_ver,minor_ver); + if (card->fpga_version > LEGACY_BUFFERS) + card->buffer_size = BUF_SIZE; + else + card->buffer_size = OLD_BUF_SIZE; dev_info(&dev->dev, "Solos FPGA Version %d.%02d svn-%d\n", major_ver, minor_ver, fpga_ver); - if (0 && fpga_ver > 27) + if (card->fpga_version >= DMA_SUPPORTED){ card->using_dma = 1; - else { + } else { + card->using_dma = 0; /* Set RX empty flag for all ports */ iowrite32(0xF0, card->config_regs + FLAGS_ADDR); } @@ -1131,6 +1178,12 @@ static int fpga_probe(struct pci_dev *dev, const struct pci_device_id *id) if (firmware_upgrade) flash_upgrade(card, 1); + if (db_fpga_upgrade) + flash_upgrade(card, 2); + + if (db_firmware_upgrade) + flash_upgrade(card, 3); + err = atm_init(card); if (err) goto out_free_irq; From 78f857f265241dfa6f343d75b45e8b30935f71df Mon Sep 17 00:00:00 2001 From: Nathan Williams Date: Wed, 25 Mar 2009 20:33:42 +1100 Subject: [PATCH 0002/3409] solos: Check for rogue received packets Sometimes there can be received packets with the size field set to 0xFFFF. This seems to only occur after an FPGA or firmware upgrade. This patch discards packets with an invalid size. Signed-off-by: Nathan Williams Signed-off-by: David Woodhouse --- drivers/atm/solos-pci.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c index bfef8d255811..6c828347c9cc 100644 --- a/drivers/atm/solos-pci.c +++ b/drivers/atm/solos-pci.c @@ -671,6 +671,10 @@ void solos_bh(unsigned long card_arg) memcpy_fromio(header, RX_BUF(card, port), sizeof(*header)); size = le16_to_cpu(header->size); + if (size > (card->buffer_size - sizeof(*header))){ + dev_warn(&card->dev->dev, "Invalid buffer size\n"); + continue; + } skb = alloc_skb(size + 1, GFP_ATOMIC); if (!skb) { From 099f53cb50e45ef617a9f1d63ceec799e489418b Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 8 Apr 2009 14:28:37 -0700 Subject: [PATCH 0003/3409] async_tx: rename zero_sum to val 'zero_sum' does not properly describe the operation of generating parity and checking that it validates against an existing buffer. Change the name of the operation to 'val' (for 'validate'). This is in anticipation of the p+q case where it is a requirement to identify the target parity buffers separately from the source buffers, because the target parity buffers will not have corresponding pq coefficients. Reviewed-by: Andre Noll Acked-by: Maciej Sosnowski Signed-off-by: Dan Williams --- Documentation/crypto/async-tx-api.txt | 14 +++++----- arch/arm/mach-iop13xx/setup.c | 8 +++--- arch/arm/plat-iop/adma.c | 2 +- crypto/async_tx/async_xor.c | 16 +++++------ drivers/dma/dmaengine.c | 4 +-- drivers/dma/iop-adma.c | 38 +++++++++++++-------------- drivers/md/raid5.c | 2 +- include/linux/async_tx.h | 2 +- include/linux/dmaengine.h | 8 +++--- 9 files changed, 47 insertions(+), 47 deletions(-) diff --git a/Documentation/crypto/async-tx-api.txt b/Documentation/crypto/async-tx-api.txt index 9f59fcbf5d82..4af12180d191 100644 --- a/Documentation/crypto/async-tx-api.txt +++ b/Documentation/crypto/async-tx-api.txt @@ -61,13 +61,13 @@ async_(, void *callback_parameter); 3.2 Supported operations: -memcpy - memory copy between a source and a destination buffer -memset - fill a destination buffer with a byte value -xor - xor a series of source buffers and write the result to a - destination buffer -xor_zero_sum - xor a series of source buffers and set a flag if the - result is zero. The implementation attempts to prevent - writes to memory +memcpy - memory copy between a source and a destination buffer +memset - fill a destination buffer with a byte value +xor - xor a series of source buffers and write the result to a + destination buffer +xor_val - xor a series of source buffers and set a flag if the + result is zero. The implementation attempts to prevent + writes to memory 3.3 Descriptor management: The return value is non-NULL and points to a 'descriptor' when the operation diff --git a/arch/arm/mach-iop13xx/setup.c b/arch/arm/mach-iop13xx/setup.c index cfd4d2e6dacd..9800228b71d3 100644 --- a/arch/arm/mach-iop13xx/setup.c +++ b/arch/arm/mach-iop13xx/setup.c @@ -478,7 +478,7 @@ void __init iop13xx_platform_init(void) dma_cap_set(DMA_MEMCPY, plat_data->cap_mask); dma_cap_set(DMA_XOR, plat_data->cap_mask); dma_cap_set(DMA_DUAL_XOR, plat_data->cap_mask); - dma_cap_set(DMA_ZERO_SUM, plat_data->cap_mask); + dma_cap_set(DMA_XOR_VAL, plat_data->cap_mask); dma_cap_set(DMA_MEMSET, plat_data->cap_mask); dma_cap_set(DMA_MEMCPY_CRC32C, plat_data->cap_mask); dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask); @@ -490,7 +490,7 @@ void __init iop13xx_platform_init(void) dma_cap_set(DMA_MEMCPY, plat_data->cap_mask); dma_cap_set(DMA_XOR, plat_data->cap_mask); dma_cap_set(DMA_DUAL_XOR, plat_data->cap_mask); - dma_cap_set(DMA_ZERO_SUM, plat_data->cap_mask); + dma_cap_set(DMA_XOR_VAL, plat_data->cap_mask); dma_cap_set(DMA_MEMSET, plat_data->cap_mask); dma_cap_set(DMA_MEMCPY_CRC32C, plat_data->cap_mask); dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask); @@ -502,13 +502,13 @@ void __init iop13xx_platform_init(void) dma_cap_set(DMA_MEMCPY, plat_data->cap_mask); dma_cap_set(DMA_XOR, plat_data->cap_mask); dma_cap_set(DMA_DUAL_XOR, plat_data->cap_mask); - dma_cap_set(DMA_ZERO_SUM, plat_data->cap_mask); + dma_cap_set(DMA_XOR_VAL, plat_data->cap_mask); dma_cap_set(DMA_MEMSET, plat_data->cap_mask); dma_cap_set(DMA_MEMCPY_CRC32C, plat_data->cap_mask); dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask); dma_cap_set(DMA_PQ_XOR, plat_data->cap_mask); dma_cap_set(DMA_PQ_UPDATE, plat_data->cap_mask); - dma_cap_set(DMA_PQ_ZERO_SUM, plat_data->cap_mask); + dma_cap_set(DMA_PQ_VAL, plat_data->cap_mask); break; } } diff --git a/arch/arm/plat-iop/adma.c b/arch/arm/plat-iop/adma.c index f72420821619..c0400440e21c 100644 --- a/arch/arm/plat-iop/adma.c +++ b/arch/arm/plat-iop/adma.c @@ -198,7 +198,7 @@ static int __init iop3xx_adma_cap_init(void) dma_cap_set(DMA_INTERRUPT, iop3xx_aau_data.cap_mask); #else dma_cap_set(DMA_XOR, iop3xx_aau_data.cap_mask); - dma_cap_set(DMA_ZERO_SUM, iop3xx_aau_data.cap_mask); + dma_cap_set(DMA_XOR_VAL, iop3xx_aau_data.cap_mask); dma_cap_set(DMA_MEMSET, iop3xx_aau_data.cap_mask); dma_cap_set(DMA_INTERRUPT, iop3xx_aau_data.cap_mask); #endif diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c index 95fe2c8d6c51..e0580b0ea533 100644 --- a/crypto/async_tx/async_xor.c +++ b/crypto/async_tx/async_xor.c @@ -222,7 +222,7 @@ static int page_is_zero(struct page *p, unsigned int offset, size_t len) } /** - * async_xor_zero_sum - attempt a xor parity check with a dma engine. + * async_xor_val - attempt a xor parity check with a dma engine. * @dest: destination page used if the xor is performed synchronously * @src_list: array of source pages. The dest page must be listed as a source * at index zero. The contents of this array may be overwritten. @@ -236,13 +236,13 @@ static int page_is_zero(struct page *p, unsigned int offset, size_t len) * @cb_param: parameter to pass to the callback routine */ struct dma_async_tx_descriptor * -async_xor_zero_sum(struct page *dest, struct page **src_list, +async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, int src_cnt, size_t len, u32 *result, enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx, dma_async_tx_callback cb_fn, void *cb_param) { - struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_ZERO_SUM, + struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_XOR_VAL, &dest, 1, src_list, src_cnt, len); struct dma_device *device = chan ? chan->device : NULL; @@ -261,15 +261,15 @@ async_xor_zero_sum(struct page *dest, struct page **src_list, dma_src[i] = dma_map_page(device->dev, src_list[i], offset, len, DMA_TO_DEVICE); - tx = device->device_prep_dma_zero_sum(chan, dma_src, src_cnt, - len, result, - dma_prep_flags); + tx = device->device_prep_dma_xor_val(chan, dma_src, src_cnt, + len, result, + dma_prep_flags); if (unlikely(!tx)) { async_tx_quiesce(&depend_tx); while (!tx) { dma_async_issue_pending(chan); - tx = device->device_prep_dma_zero_sum(chan, + tx = device->device_prep_dma_xor_val(chan, dma_src, src_cnt, len, result, dma_prep_flags); } @@ -296,7 +296,7 @@ async_xor_zero_sum(struct page *dest, struct page **src_list, return tx; } -EXPORT_SYMBOL_GPL(async_xor_zero_sum); +EXPORT_SYMBOL_GPL(async_xor_val); static int __init async_xor_init(void) { diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 92438e9dacc3..6781e8f3c064 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -644,8 +644,8 @@ int dma_async_device_register(struct dma_device *device) !device->device_prep_dma_memcpy); BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) && !device->device_prep_dma_xor); - BUG_ON(dma_has_cap(DMA_ZERO_SUM, device->cap_mask) && - !device->device_prep_dma_zero_sum); + BUG_ON(dma_has_cap(DMA_XOR_VAL, device->cap_mask) && + !device->device_prep_dma_xor_val); BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) && !device->device_prep_dma_memset); BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) && diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index 2f052265122f..6ff79a672699 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c @@ -660,9 +660,9 @@ iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest, } static struct dma_async_tx_descriptor * -iop_adma_prep_dma_zero_sum(struct dma_chan *chan, dma_addr_t *dma_src, - unsigned int src_cnt, size_t len, u32 *result, - unsigned long flags) +iop_adma_prep_dma_xor_val(struct dma_chan *chan, dma_addr_t *dma_src, + unsigned int src_cnt, size_t len, u32 *result, + unsigned long flags) { struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); struct iop_adma_desc_slot *sw_desc, *grp_start; @@ -906,7 +906,7 @@ out: #define IOP_ADMA_NUM_SRC_TEST 4 /* must be <= 15 */ static int __devinit -iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device) +iop_adma_xor_val_self_test(struct iop_adma_device *device) { int i, src_idx; struct page *dest; @@ -1002,7 +1002,7 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device) PAGE_SIZE, DMA_TO_DEVICE); /* skip zero sum if the capability is not present */ - if (!dma_has_cap(DMA_ZERO_SUM, dma_chan->device->cap_mask)) + if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask)) goto free_resources; /* zero sum the sources with the destintation page */ @@ -1016,10 +1016,10 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device) dma_srcs[i] = dma_map_page(dma_chan->device->dev, zero_sum_srcs[i], 0, PAGE_SIZE, DMA_TO_DEVICE); - tx = iop_adma_prep_dma_zero_sum(dma_chan, dma_srcs, - IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE, - &zero_sum_result, - DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + tx = iop_adma_prep_dma_xor_val(dma_chan, dma_srcs, + IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE, + &zero_sum_result, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); cookie = iop_adma_tx_submit(tx); iop_adma_issue_pending(dma_chan); @@ -1072,10 +1072,10 @@ iop_adma_xor_zero_sum_self_test(struct iop_adma_device *device) dma_srcs[i] = dma_map_page(dma_chan->device->dev, zero_sum_srcs[i], 0, PAGE_SIZE, DMA_TO_DEVICE); - tx = iop_adma_prep_dma_zero_sum(dma_chan, dma_srcs, - IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE, - &zero_sum_result, - DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + tx = iop_adma_prep_dma_xor_val(dma_chan, dma_srcs, + IOP_ADMA_NUM_SRC_TEST + 1, PAGE_SIZE, + &zero_sum_result, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); cookie = iop_adma_tx_submit(tx); iop_adma_issue_pending(dma_chan); @@ -1192,9 +1192,9 @@ static int __devinit iop_adma_probe(struct platform_device *pdev) dma_dev->max_xor = iop_adma_get_max_xor(); dma_dev->device_prep_dma_xor = iop_adma_prep_dma_xor; } - if (dma_has_cap(DMA_ZERO_SUM, dma_dev->cap_mask)) - dma_dev->device_prep_dma_zero_sum = - iop_adma_prep_dma_zero_sum; + if (dma_has_cap(DMA_XOR_VAL, dma_dev->cap_mask)) + dma_dev->device_prep_dma_xor_val = + iop_adma_prep_dma_xor_val; if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask)) dma_dev->device_prep_dma_interrupt = iop_adma_prep_dma_interrupt; @@ -1249,7 +1249,7 @@ static int __devinit iop_adma_probe(struct platform_device *pdev) if (dma_has_cap(DMA_XOR, dma_dev->cap_mask) || dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) { - ret = iop_adma_xor_zero_sum_self_test(adev); + ret = iop_adma_xor_val_self_test(adev); dev_dbg(&pdev->dev, "xor self test returned %d\n", ret); if (ret) goto err_free_iop_chan; @@ -1259,10 +1259,10 @@ static int __devinit iop_adma_probe(struct platform_device *pdev) "( %s%s%s%s%s%s%s%s%s%s)\n", dma_has_cap(DMA_PQ_XOR, dma_dev->cap_mask) ? "pq_xor " : "", dma_has_cap(DMA_PQ_UPDATE, dma_dev->cap_mask) ? "pq_update " : "", - dma_has_cap(DMA_PQ_ZERO_SUM, dma_dev->cap_mask) ? "pq_zero_sum " : "", + dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask) ? "pq_val " : "", dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", dma_has_cap(DMA_DUAL_XOR, dma_dev->cap_mask) ? "dual_xor " : "", - dma_has_cap(DMA_ZERO_SUM, dma_dev->cap_mask) ? "xor_zero_sum " : "", + dma_has_cap(DMA_XOR_VAL, dma_dev->cap_mask) ? "xor_val " : "", dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "", dma_has_cap(DMA_MEMCPY_CRC32C, dma_dev->cap_mask) ? "cpy+crc " : "", dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 3bbc6d647044..f8d2d35ed298 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -854,7 +854,7 @@ static void ops_run_check(struct stripe_head *sh) xor_srcs[count++] = dev->page; } - tx = async_xor_zero_sum(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, + tx = async_xor_val(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &sh->ops.zero_sum_result, 0, NULL, NULL, NULL); atomic_inc(&sh->count); diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h index 5fc2ef8d97fa..513150d8c25b 100644 --- a/include/linux/async_tx.h +++ b/include/linux/async_tx.h @@ -117,7 +117,7 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset, dma_async_tx_callback cb_fn, void *cb_fn_param); struct dma_async_tx_descriptor * -async_xor_zero_sum(struct page *dest, struct page **src_list, +async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, int src_cnt, size_t len, u32 *result, enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx, diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 2e2aa3df170c..6768727d00d7 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -55,8 +55,8 @@ enum dma_transaction_type { DMA_PQ_XOR, DMA_DUAL_XOR, DMA_PQ_UPDATE, - DMA_ZERO_SUM, - DMA_PQ_ZERO_SUM, + DMA_XOR_VAL, + DMA_PQ_VAL, DMA_MEMSET, DMA_MEMCPY_CRC32C, DMA_INTERRUPT, @@ -214,7 +214,7 @@ struct dma_async_tx_descriptor { * @device_free_chan_resources: release DMA channel's resources * @device_prep_dma_memcpy: prepares a memcpy operation * @device_prep_dma_xor: prepares a xor operation - * @device_prep_dma_zero_sum: prepares a zero_sum operation + * @device_prep_dma_xor_val: prepares a xor validation operation * @device_prep_dma_memset: prepares a memset operation * @device_prep_dma_interrupt: prepares an end of chain interrupt operation * @device_prep_slave_sg: prepares a slave dma operation @@ -243,7 +243,7 @@ struct dma_device { struct dma_async_tx_descriptor *(*device_prep_dma_xor)( struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, unsigned int src_cnt, size_t len, unsigned long flags); - struct dma_async_tx_descriptor *(*device_prep_dma_zero_sum)( + struct dma_async_tx_descriptor *(*device_prep_dma_xor_val)( struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt, size_t len, u32 *result, unsigned long flags); struct dma_async_tx_descriptor *(*device_prep_dma_memset)( From a3434660e5b47ab90387b017e311437bb3a88083 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Thu, 30 Apr 2009 15:38:01 +0100 Subject: [PATCH 0004/3409] solos: Add some margin-related parameters Signed-off-by: David Woodhouse --- drivers/atm/solos-attrlist.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/atm/solos-attrlist.c b/drivers/atm/solos-attrlist.c index efa2808dd94d..946726399398 100644 --- a/drivers/atm/solos-attrlist.c +++ b/drivers/atm/solos-attrlist.c @@ -62,6 +62,13 @@ SOLOS_ATTR_RW(Defaults) SOLOS_ATTR_RW(LineMode) SOLOS_ATTR_RW(Profile) SOLOS_ATTR_RW(DetectNoise) +SOLOS_ATTR_RW(BisAForceSNRMarginDn) +SOLOS_ATTR_RW(BisMForceSNRMarginDn) +SOLOS_ATTR_RW(BisAMaxMargin) +SOLOS_ATTR_RW(BisMMaxMargin) +SOLOS_ATTR_RW(AnnexAForceSNRMarginDn) +SOLOS_ATTR_RW(AnnexAMaxMargin) +SOLOS_ATTR_RW(AnnexMMaxMargin) SOLOS_ATTR_RO(SupportedAnnexes) SOLOS_ATTR_RO(Status) SOLOS_ATTR_RO(TotalStart) From 69a9ceab83928e743fafbba5b98bd09aed7c0e0d Mon Sep 17 00:00:00 2001 From: Simon Farnsworth Date: Thu, 7 May 2009 19:43:42 +0100 Subject: [PATCH 0005/3409] solos: Show Interleaving details for ADSL2 and 2+ InterleaveRDn and InterleaveRUp only apply to G.dmt. The equivalents for ADSL2 and 2+ are BisRDn and BisRUp. In addition, the INPdown and INPup statuses are useful when trying to track down instability on a line. Signed-off-by: Simon Farnsworth Signed-off-by: David Woodhouse --- drivers/atm/solos-attrlist.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/atm/solos-attrlist.c b/drivers/atm/solos-attrlist.c index 946726399398..1a9332e4efe0 100644 --- a/drivers/atm/solos-attrlist.c +++ b/drivers/atm/solos-attrlist.c @@ -25,6 +25,10 @@ SOLOS_ATTR_RO(RSCorrectedErrorsUp) SOLOS_ATTR_RO(RSUnCorrectedErrorsUp) SOLOS_ATTR_RO(InterleaveRDn) SOLOS_ATTR_RO(InterleaveRUp) +SOLOS_ATTR_RO(BisRDn) +SOLOS_ATTR_RO(BisRUp) +SOLOS_ATTR_RO(INPdown) +SOLOS_ATTR_RO(INPup) SOLOS_ATTR_RO(ShowtimeStart) SOLOS_ATTR_RO(ATURVendor) SOLOS_ATTR_RO(ATUCCountry) From 88ba2aa586c874681c072101287e15d40de7e6e2 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Thu, 9 Apr 2009 16:16:18 -0700 Subject: [PATCH 0006/3409] async_tx: kill ASYNC_TX_DEP_ACK flag In support of inter-channel chaining async_tx utilizes an ack flag to gate whether a dependent operation can be chained to another. While the flag is not set the chain can be considered open for appending. Setting the ack flag closes the chain and flags the descriptor for garbage collection. The ASYNC_TX_DEP_ACK flag essentially means "close the chain after adding this dependency". Since each operation can only have one child the api now implicitly sets the ack flag at dependency submission time. This removes an unnecessary management burden from clients of the api. [ Impact: clean up and enforce one dependency per operation ] Reviewed-by: Andre Noll Acked-by: Maciej Sosnowski Signed-off-by: Dan Williams --- Documentation/crypto/async-tx-api.txt | 9 ++++----- crypto/async_tx/async_memcpy.c | 2 +- crypto/async_tx/async_memset.c | 2 +- crypto/async_tx/async_tx.c | 4 ++-- crypto/async_tx/async_xor.c | 6 ++---- drivers/md/raid5.c | 25 +++++++++++-------------- include/linux/async_tx.h | 4 +--- 7 files changed, 22 insertions(+), 30 deletions(-) diff --git a/Documentation/crypto/async-tx-api.txt b/Documentation/crypto/async-tx-api.txt index 4af12180d191..76feda8541dc 100644 --- a/Documentation/crypto/async-tx-api.txt +++ b/Documentation/crypto/async-tx-api.txt @@ -80,8 +80,8 @@ acknowledged by the application before the offload engine driver is allowed to recycle (or free) the descriptor. A descriptor can be acked by one of the following methods: 1/ setting the ASYNC_TX_ACK flag if no child operations are to be submitted -2/ setting the ASYNC_TX_DEP_ACK flag to acknowledge the parent - descriptor of a new operation. +2/ submitting an unacknowledged descriptor as a dependency to another + async_tx call will implicitly set the acknowledged state. 3/ calling async_tx_ack() on the descriptor. 3.4 When does the operation execute? @@ -136,10 +136,9 @@ int run_xor_copy_xor(struct page **xor_srcs, tx = async_xor(xor_dest, xor_srcs, 0, xor_src_cnt, xor_len, ASYNC_TX_XOR_DROP_DST, NULL, NULL, NULL); - tx = async_memcpy(copy_dest, copy_src, 0, 0, copy_len, - ASYNC_TX_DEP_ACK, tx, NULL, NULL); + tx = async_memcpy(copy_dest, copy_src, 0, 0, copy_len, tx, NULL, NULL); tx = async_xor(xor_dest, xor_srcs, 0, xor_src_cnt, xor_len, - ASYNC_TX_XOR_DROP_DST | ASYNC_TX_DEP_ACK | ASYNC_TX_ACK, + ASYNC_TX_XOR_DROP_DST | ASYNC_TX_ACK, tx, complete_xor_copy_xor, NULL); async_tx_issue_pending_all(); diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c index ddccfb01c416..7117ec6f1b74 100644 --- a/crypto/async_tx/async_memcpy.c +++ b/crypto/async_tx/async_memcpy.c @@ -35,7 +35,7 @@ * @src: src page * @offset: offset in pages to start transaction * @len: length in bytes - * @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK, + * @flags: ASYNC_TX_ACK * @depend_tx: memcpy depends on the result of this transaction * @cb_fn: function to call when the memcpy completes * @cb_param: parameter to pass to the callback routine diff --git a/crypto/async_tx/async_memset.c b/crypto/async_tx/async_memset.c index 5b5eb99bb244..b2f133885b7f 100644 --- a/crypto/async_tx/async_memset.c +++ b/crypto/async_tx/async_memset.c @@ -35,7 +35,7 @@ * @val: fill value * @offset: offset in pages to start transaction * @len: length in bytes - * @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK + * @flags: ASYNC_TX_ACK * @depend_tx: memset depends on the result of this transaction * @cb_fn: function to call when the memcpy completes * @cb_param: parameter to pass to the callback routine diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c index 06eb6cc09fef..3766bc3d7d89 100644 --- a/crypto/async_tx/async_tx.c +++ b/crypto/async_tx/async_tx.c @@ -223,7 +223,7 @@ async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, if (flags & ASYNC_TX_ACK) async_tx_ack(tx); - if (depend_tx && (flags & ASYNC_TX_DEP_ACK)) + if (depend_tx) async_tx_ack(depend_tx); } EXPORT_SYMBOL_GPL(async_tx_submit); @@ -231,7 +231,7 @@ EXPORT_SYMBOL_GPL(async_tx_submit); /** * async_trigger_callback - schedules the callback function to be run after * any dependent operations have been completed. - * @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK + * @flags: ASYNC_TX_ACK * @depend_tx: 'callback' requires the completion of this transaction * @cb_fn: function to call after depend_tx completes * @cb_param: parameter to pass to the callback routine diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c index e0580b0ea533..3cc5dc763b54 100644 --- a/crypto/async_tx/async_xor.c +++ b/crypto/async_tx/async_xor.c @@ -105,7 +105,6 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, _cb_param); depend_tx = tx; - flags |= ASYNC_TX_DEP_ACK; if (src_cnt > xor_src_cnt) { /* drop completed sources */ @@ -168,8 +167,7 @@ do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset, * @offset: offset in pages to start transaction * @src_cnt: number of source pages * @len: length in bytes - * @flags: ASYNC_TX_XOR_ZERO_DST, ASYNC_TX_XOR_DROP_DEST, - * ASYNC_TX_ACK, ASYNC_TX_DEP_ACK + * @flags: ASYNC_TX_XOR_ZERO_DST, ASYNC_TX_XOR_DROP_DEST, ASYNC_TX_ACK * @depend_tx: xor depends on the result of this transaction. * @cb_fn: function to call when the xor completes * @cb_param: parameter to pass to the callback routine @@ -230,7 +228,7 @@ static int page_is_zero(struct page *p, unsigned int offset, size_t len) * @src_cnt: number of source pages * @len: length in bytes * @result: 0 if sum == 0 else non-zero - * @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK + * @flags: ASYNC_TX_ACK * @depend_tx: xor depends on the result of this transaction. * @cb_fn: function to call when the xor completes * @cb_param: parameter to pass to the callback routine diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index f8d2d35ed298..0ef5362c8d02 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -525,14 +525,12 @@ async_copy_data(int frombio, struct bio *bio, struct page *page, bio_page = bio_iovec_idx(bio, i)->bv_page; if (frombio) tx = async_memcpy(page, bio_page, page_offset, - b_offset, clen, - ASYNC_TX_DEP_ACK, - tx, NULL, NULL); + b_offset, clen, 0, + tx, NULL, NULL); else tx = async_memcpy(bio_page, page, b_offset, - page_offset, clen, - ASYNC_TX_DEP_ACK, - tx, NULL, NULL); + page_offset, clen, 0, + tx, NULL, NULL); } if (clen < len) /* hit end of page */ break; @@ -615,8 +613,7 @@ static void ops_run_biofill(struct stripe_head *sh) } atomic_inc(&sh->count); - async_trigger_callback(ASYNC_TX_DEP_ACK | ASYNC_TX_ACK, tx, - ops_complete_biofill, sh); + async_trigger_callback(ASYNC_TX_ACK, tx, ops_complete_biofill, sh); } static void ops_complete_compute5(void *stripe_head_ref) @@ -701,8 +698,8 @@ ops_run_prexor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) } tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, - ASYNC_TX_DEP_ACK | ASYNC_TX_XOR_DROP_DST, tx, - ops_complete_prexor, sh); + ASYNC_TX_XOR_DROP_DST, tx, + ops_complete_prexor, sh); return tx; } @@ -809,7 +806,7 @@ ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST * for the synchronous xor case */ - flags = ASYNC_TX_DEP_ACK | ASYNC_TX_ACK | + flags = ASYNC_TX_ACK | (prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST); atomic_inc(&sh->count); @@ -858,7 +855,7 @@ static void ops_run_check(struct stripe_head *sh) &sh->ops.zero_sum_result, 0, NULL, NULL, NULL); atomic_inc(&sh->count); - tx = async_trigger_callback(ASYNC_TX_DEP_ACK | ASYNC_TX_ACK, tx, + tx = async_trigger_callback(ASYNC_TX_ACK, tx, ops_complete_check, sh); } @@ -2687,8 +2684,8 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh, /* place all the copies on one channel */ tx = async_memcpy(sh2->dev[dd_idx].page, - sh->dev[i].page, 0, 0, STRIPE_SIZE, - ASYNC_TX_DEP_ACK, tx, NULL, NULL); + sh->dev[i].page, 0, 0, STRIPE_SIZE, + 0, tx, NULL, NULL); set_bit(R5_Expanded, &sh2->dev[dd_idx].flags); set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags); diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h index 513150d8c25b..9f14cd540cd2 100644 --- a/include/linux/async_tx.h +++ b/include/linux/async_tx.h @@ -58,13 +58,11 @@ struct dma_chan_ref { * array. * @ASYNC_TX_ACK: immediately ack the descriptor, precludes setting up a * dependency chain - * @ASYNC_TX_DEP_ACK: ack the dependency descriptor. Useful for chaining. */ enum async_tx_flags { ASYNC_TX_XOR_ZERO_DST = (1 << 0), ASYNC_TX_XOR_DROP_DST = (1 << 1), - ASYNC_TX_ACK = (1 << 3), - ASYNC_TX_DEP_ACK = (1 << 4), + ASYNC_TX_ACK = (1 << 2), }; #ifdef CONFIG_DMA_ENGINE From a08abd8ca890a377521d65d493d174bebcaf694b Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 3 Jun 2009 11:43:59 -0700 Subject: [PATCH 0007/3409] async_tx: structify submission arguments, add scribble Prepare the api for the arrival of a new parameter, 'scribble'. This will allow callers to identify scratchpad memory for dma address or page address conversions. As this adds yet another parameter, take this opportunity to convert the common submission parameters (flags, dependency, callback, and callback argument) into an object that is passed by reference. Also, take this opportunity to fix up the kerneldoc and add notes about the relevant ASYNC_TX_* flags for each routine. [ Impact: moves api pass-by-value parameters to a pass-by-reference struct ] Signed-off-by: Andre Noll Acked-by: Maciej Sosnowski Signed-off-by: Dan Williams --- Documentation/crypto/async-tx-api.txt | 6 +- crypto/async_tx/async_memcpy.c | 26 +++--- crypto/async_tx/async_memset.c | 25 +++--- crypto/async_tx/async_tx.c | 51 ++++++----- crypto/async_tx/async_xor.c | 123 +++++++++++++------------- drivers/md/raid5.c | 59 ++++++------ include/linux/async_tx.h | 84 +++++++++++------- 7 files changed, 200 insertions(+), 174 deletions(-) diff --git a/Documentation/crypto/async-tx-api.txt b/Documentation/crypto/async-tx-api.txt index 76feda8541dc..dfe0475f7919 100644 --- a/Documentation/crypto/async-tx-api.txt +++ b/Documentation/crypto/async-tx-api.txt @@ -54,11 +54,7 @@ features surfaced as a result: 3.1 General format of the API: struct dma_async_tx_descriptor * -async_(, - enum async_tx_flags flags, - struct dma_async_tx_descriptor *dependency, - dma_async_tx_callback callback_routine, - void *callback_parameter); +async_(, struct async_submit ctl *submit) 3.2 Supported operations: memcpy - memory copy between a source and a destination buffer diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c index 7117ec6f1b74..89e05556f3df 100644 --- a/crypto/async_tx/async_memcpy.c +++ b/crypto/async_tx/async_memcpy.c @@ -33,28 +33,28 @@ * async_memcpy - attempt to copy memory with a dma engine. * @dest: destination page * @src: src page - * @offset: offset in pages to start transaction + * @dest_offset: offset into 'dest' to start transaction + * @src_offset: offset into 'src' to start transaction * @len: length in bytes - * @flags: ASYNC_TX_ACK - * @depend_tx: memcpy depends on the result of this transaction - * @cb_fn: function to call when the memcpy completes - * @cb_param: parameter to pass to the callback routine + * @submit: submission / completion modifiers + * + * honored flags: ASYNC_TX_ACK */ struct dma_async_tx_descriptor * async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, - unsigned int src_offset, size_t len, enum async_tx_flags flags, - struct dma_async_tx_descriptor *depend_tx, - dma_async_tx_callback cb_fn, void *cb_param) + unsigned int src_offset, size_t len, + struct async_submit_ctl *submit) { - struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_MEMCPY, + struct dma_chan *chan = async_tx_find_channel(submit, DMA_MEMCPY, &dest, 1, &src, 1, len); struct dma_device *device = chan ? chan->device : NULL; struct dma_async_tx_descriptor *tx = NULL; if (device) { dma_addr_t dma_dest, dma_src; - unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0; + unsigned long dma_prep_flags; + dma_prep_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0; dma_dest = dma_map_page(device->dev, dest, dest_offset, len, DMA_FROM_DEVICE); @@ -67,13 +67,13 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, if (tx) { pr_debug("%s: (async) len: %zu\n", __func__, len); - async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); + async_tx_submit(chan, tx, submit); } else { void *dest_buf, *src_buf; pr_debug("%s: (sync) len: %zu\n", __func__, len); /* wait for any prerequisite operations */ - async_tx_quiesce(&depend_tx); + async_tx_quiesce(&submit->depend_tx); dest_buf = kmap_atomic(dest, KM_USER0) + dest_offset; src_buf = kmap_atomic(src, KM_USER1) + src_offset; @@ -83,7 +83,7 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, kunmap_atomic(dest_buf, KM_USER0); kunmap_atomic(src_buf, KM_USER1); - async_tx_sync_epilog(cb_fn, cb_param); + async_tx_sync_epilog(submit); } return tx; diff --git a/crypto/async_tx/async_memset.c b/crypto/async_tx/async_memset.c index b2f133885b7f..c14437238f4c 100644 --- a/crypto/async_tx/async_memset.c +++ b/crypto/async_tx/async_memset.c @@ -35,26 +35,23 @@ * @val: fill value * @offset: offset in pages to start transaction * @len: length in bytes - * @flags: ASYNC_TX_ACK - * @depend_tx: memset depends on the result of this transaction - * @cb_fn: function to call when the memcpy completes - * @cb_param: parameter to pass to the callback routine + * + * honored flags: ASYNC_TX_ACK */ struct dma_async_tx_descriptor * -async_memset(struct page *dest, int val, unsigned int offset, - size_t len, enum async_tx_flags flags, - struct dma_async_tx_descriptor *depend_tx, - dma_async_tx_callback cb_fn, void *cb_param) +async_memset(struct page *dest, int val, unsigned int offset, size_t len, + struct async_submit_ctl *submit) { - struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_MEMSET, + struct dma_chan *chan = async_tx_find_channel(submit, DMA_MEMSET, &dest, 1, NULL, 0, len); struct dma_device *device = chan ? chan->device : NULL; struct dma_async_tx_descriptor *tx = NULL; if (device) { dma_addr_t dma_dest; - unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0; + unsigned long dma_prep_flags; + dma_prep_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0; dma_dest = dma_map_page(device->dev, dest, offset, len, DMA_FROM_DEVICE); @@ -64,19 +61,19 @@ async_memset(struct page *dest, int val, unsigned int offset, if (tx) { pr_debug("%s: (async) len: %zu\n", __func__, len); - async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); + async_tx_submit(chan, tx, submit); } else { /* run the memset synchronously */ void *dest_buf; pr_debug("%s: (sync) len: %zu\n", __func__, len); - dest_buf = (void *) (((char *) page_address(dest)) + offset); + dest_buf = page_address(dest) + offset; /* wait for any prerequisite operations */ - async_tx_quiesce(&depend_tx); + async_tx_quiesce(&submit->depend_tx); memset(dest_buf, val, len); - async_tx_sync_epilog(cb_fn, cb_param); + async_tx_sync_epilog(submit); } return tx; diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c index 3766bc3d7d89..802a5ce437d9 100644 --- a/crypto/async_tx/async_tx.c +++ b/crypto/async_tx/async_tx.c @@ -45,13 +45,15 @@ static void __exit async_tx_exit(void) /** * __async_tx_find_channel - find a channel to carry out the operation or let * the transaction execute synchronously - * @depend_tx: transaction dependency + * @submit: transaction dependency and submission modifiers * @tx_type: transaction type */ struct dma_chan * -__async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, - enum dma_transaction_type tx_type) +__async_tx_find_channel(struct async_submit_ctl *submit, + enum dma_transaction_type tx_type) { + struct dma_async_tx_descriptor *depend_tx = submit->depend_tx; + /* see if we can keep the chain on one channel */ if (depend_tx && dma_has_cap(tx_type, depend_tx->chan->device->cap_mask)) @@ -144,13 +146,14 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, /** - * submit_disposition - while holding depend_tx->lock we must avoid submitting - * new operations to prevent a circular locking dependency with - * drivers that already hold a channel lock when calling - * async_tx_run_dependencies. + * submit_disposition - flags for routing an incoming operation * @ASYNC_TX_SUBMITTED: we were able to append the new operation under the lock * @ASYNC_TX_CHANNEL_SWITCH: when the lock is dropped schedule a channel switch * @ASYNC_TX_DIRECT_SUBMIT: when the lock is dropped submit directly + * + * while holding depend_tx->lock we must avoid submitting new operations + * to prevent a circular locking dependency with drivers that already + * hold a channel lock when calling async_tx_run_dependencies. */ enum submit_disposition { ASYNC_TX_SUBMITTED, @@ -160,11 +163,12 @@ enum submit_disposition { void async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, - enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx, - dma_async_tx_callback cb_fn, void *cb_param) + struct async_submit_ctl *submit) { - tx->callback = cb_fn; - tx->callback_param = cb_param; + struct dma_async_tx_descriptor *depend_tx = submit->depend_tx; + + tx->callback = submit->cb_fn; + tx->callback_param = submit->cb_param; if (depend_tx) { enum submit_disposition s; @@ -220,7 +224,7 @@ async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, tx->tx_submit(tx); } - if (flags & ASYNC_TX_ACK) + if (submit->flags & ASYNC_TX_ACK) async_tx_ack(tx); if (depend_tx) @@ -229,21 +233,20 @@ async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, EXPORT_SYMBOL_GPL(async_tx_submit); /** - * async_trigger_callback - schedules the callback function to be run after - * any dependent operations have been completed. - * @flags: ASYNC_TX_ACK - * @depend_tx: 'callback' requires the completion of this transaction - * @cb_fn: function to call after depend_tx completes - * @cb_param: parameter to pass to the callback routine + * async_trigger_callback - schedules the callback function to be run + * @submit: submission and completion parameters + * + * honored flags: ASYNC_TX_ACK + * + * The callback is run after any dependent operations have completed. */ struct dma_async_tx_descriptor * -async_trigger_callback(enum async_tx_flags flags, - struct dma_async_tx_descriptor *depend_tx, - dma_async_tx_callback cb_fn, void *cb_param) +async_trigger_callback(struct async_submit_ctl *submit) { struct dma_chan *chan; struct dma_device *device; struct dma_async_tx_descriptor *tx; + struct dma_async_tx_descriptor *depend_tx = submit->depend_tx; if (depend_tx) { chan = depend_tx->chan; @@ -262,14 +265,14 @@ async_trigger_callback(enum async_tx_flags flags, if (tx) { pr_debug("%s: (async)\n", __func__); - async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); + async_tx_submit(chan, tx, submit); } else { pr_debug("%s: (sync)\n", __func__); /* wait for any prerequisite operations */ - async_tx_quiesce(&depend_tx); + async_tx_quiesce(&submit->depend_tx); - async_tx_sync_epilog(cb_fn, cb_param); + async_tx_sync_epilog(submit); } return tx; diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c index 3cc5dc763b54..691fa98a18c4 100644 --- a/crypto/async_tx/async_xor.c +++ b/crypto/async_tx/async_xor.c @@ -34,18 +34,16 @@ static __async_inline struct dma_async_tx_descriptor * do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, unsigned int offset, int src_cnt, size_t len, - enum async_tx_flags flags, - struct dma_async_tx_descriptor *depend_tx, - dma_async_tx_callback cb_fn, void *cb_param) + struct async_submit_ctl *submit) { struct dma_device *dma = chan->device; dma_addr_t *dma_src = (dma_addr_t *) src_list; struct dma_async_tx_descriptor *tx = NULL; int src_off = 0; int i; - dma_async_tx_callback _cb_fn; - void *_cb_param; - enum async_tx_flags async_flags; + dma_async_tx_callback cb_fn_orig = submit->cb_fn; + void *cb_param_orig = submit->cb_param; + enum async_tx_flags flags_orig = submit->flags; enum dma_ctrl_flags dma_flags; int xor_src_cnt; dma_addr_t dma_dest; @@ -63,7 +61,7 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, } while (src_cnt) { - async_flags = flags; + submit->flags = flags_orig; dma_flags = 0; xor_src_cnt = min(src_cnt, dma->max_xor); /* if we are submitting additional xors, leave the chain open, @@ -71,15 +69,15 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, * buffer mapped */ if (src_cnt > xor_src_cnt) { - async_flags &= ~ASYNC_TX_ACK; + submit->flags &= ~ASYNC_TX_ACK; dma_flags = DMA_COMPL_SKIP_DEST_UNMAP; - _cb_fn = NULL; - _cb_param = NULL; + submit->cb_fn = NULL; + submit->cb_param = NULL; } else { - _cb_fn = cb_fn; - _cb_param = cb_param; + submit->cb_fn = cb_fn_orig; + submit->cb_param = cb_param_orig; } - if (_cb_fn) + if (submit->cb_fn) dma_flags |= DMA_PREP_INTERRUPT; /* Since we have clobbered the src_list we are committed @@ -90,7 +88,7 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, xor_src_cnt, len, dma_flags); if (unlikely(!tx)) - async_tx_quiesce(&depend_tx); + async_tx_quiesce(&submit->depend_tx); /* spin wait for the preceeding transactions to complete */ while (unlikely(!tx)) { @@ -101,10 +99,8 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, dma_flags); } - async_tx_submit(chan, tx, async_flags, depend_tx, _cb_fn, - _cb_param); - - depend_tx = tx; + async_tx_submit(chan, tx, submit); + submit->depend_tx = tx; if (src_cnt > xor_src_cnt) { /* drop completed sources */ @@ -123,8 +119,7 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, static void do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset, - int src_cnt, size_t len, enum async_tx_flags flags, - dma_async_tx_callback cb_fn, void *cb_param) + int src_cnt, size_t len, struct async_submit_ctl *submit) { int i; int xor_src_cnt; @@ -139,7 +134,7 @@ do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset, /* set destination address */ dest_buf = page_address(dest) + offset; - if (flags & ASYNC_TX_XOR_ZERO_DST) + if (submit->flags & ASYNC_TX_XOR_ZERO_DST) memset(dest_buf, 0, len); while (src_cnt > 0) { @@ -152,33 +147,35 @@ do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset, src_off += xor_src_cnt; } - async_tx_sync_epilog(cb_fn, cb_param); + async_tx_sync_epilog(submit); } /** * async_xor - attempt to xor a set of blocks with a dma engine. - * xor_blocks always uses the dest as a source so the ASYNC_TX_XOR_ZERO_DST - * flag must be set to not include dest data in the calculation. The - * assumption with dma eninges is that they only use the destination - * buffer as a source when it is explicity specified in the source list. * @dest: destination page - * @src_list: array of source pages (if the dest is also a source it must be - * at index zero). The contents of this array may be overwritten. - * @offset: offset in pages to start transaction + * @src_list: array of source pages + * @offset: common src/dst offset to start transaction * @src_cnt: number of source pages * @len: length in bytes - * @flags: ASYNC_TX_XOR_ZERO_DST, ASYNC_TX_XOR_DROP_DEST, ASYNC_TX_ACK - * @depend_tx: xor depends on the result of this transaction. - * @cb_fn: function to call when the xor completes - * @cb_param: parameter to pass to the callback routine + * @submit: submission / completion modifiers + * + * honored flags: ASYNC_TX_ACK, ASYNC_TX_XOR_ZERO_DST, ASYNC_TX_XOR_DROP_DST + * + * xor_blocks always uses the dest as a source so the + * ASYNC_TX_XOR_ZERO_DST flag must be set to not include dest data in + * the calculation. The assumption with dma eninges is that they only + * use the destination buffer as a source when it is explicity specified + * in the source list. + * + * src_list note: if the dest is also a source it must be at index zero. + * The contents of this array will be overwritten if a scribble region + * is not specified. */ struct dma_async_tx_descriptor * async_xor(struct page *dest, struct page **src_list, unsigned int offset, - int src_cnt, size_t len, enum async_tx_flags flags, - struct dma_async_tx_descriptor *depend_tx, - dma_async_tx_callback cb_fn, void *cb_param) + int src_cnt, size_t len, struct async_submit_ctl *submit) { - struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_XOR, + struct dma_chan *chan = async_tx_find_channel(submit, DMA_XOR, &dest, 1, src_list, src_cnt, len); BUG_ON(src_cnt <= 1); @@ -188,7 +185,7 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset, pr_debug("%s (async): len: %zu\n", __func__, len); return do_async_xor(chan, dest, src_list, offset, src_cnt, len, - flags, depend_tx, cb_fn, cb_param); + submit); } else { /* run the xor synchronously */ pr_debug("%s (sync): len: %zu\n", __func__, len); @@ -196,16 +193,15 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset, /* in the sync case the dest is an implied source * (assumes the dest is the first source) */ - if (flags & ASYNC_TX_XOR_DROP_DST) { + if (submit->flags & ASYNC_TX_XOR_DROP_DST) { src_cnt--; src_list++; } /* wait for any prerequisite operations */ - async_tx_quiesce(&depend_tx); + async_tx_quiesce(&submit->depend_tx); - do_sync_xor(dest, src_list, offset, src_cnt, len, - flags, cb_fn, cb_param); + do_sync_xor(dest, src_list, offset, src_cnt, len, submit); return NULL; } @@ -222,25 +218,25 @@ static int page_is_zero(struct page *p, unsigned int offset, size_t len) /** * async_xor_val - attempt a xor parity check with a dma engine. * @dest: destination page used if the xor is performed synchronously - * @src_list: array of source pages. The dest page must be listed as a source - * at index zero. The contents of this array may be overwritten. + * @src_list: array of source pages * @offset: offset in pages to start transaction * @src_cnt: number of source pages * @len: length in bytes * @result: 0 if sum == 0 else non-zero - * @flags: ASYNC_TX_ACK - * @depend_tx: xor depends on the result of this transaction. - * @cb_fn: function to call when the xor completes - * @cb_param: parameter to pass to the callback routine + * @submit: submission / completion modifiers + * + * honored flags: ASYNC_TX_ACK + * + * src_list note: if the dest is also a source it must be at index zero. + * The contents of this array will be overwritten if a scribble region + * is not specified. */ struct dma_async_tx_descriptor * -async_xor_val(struct page *dest, struct page **src_list, - unsigned int offset, int src_cnt, size_t len, - u32 *result, enum async_tx_flags flags, - struct dma_async_tx_descriptor *depend_tx, - dma_async_tx_callback cb_fn, void *cb_param) +async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, + int src_cnt, size_t len, u32 *result, + struct async_submit_ctl *submit) { - struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_XOR_VAL, + struct dma_chan *chan = async_tx_find_channel(submit, DMA_XOR_VAL, &dest, 1, src_list, src_cnt, len); struct dma_device *device = chan ? chan->device : NULL; @@ -250,11 +246,12 @@ async_xor_val(struct page *dest, struct page **src_list, if (device && src_cnt <= device->max_xor) { dma_addr_t *dma_src = (dma_addr_t *) src_list; - unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0; + unsigned long dma_prep_flags; int i; pr_debug("%s: (async) len: %zu\n", __func__, len); + dma_prep_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0; for (i = 0; i < src_cnt; i++) dma_src[i] = dma_map_page(device->dev, src_list[i], offset, len, DMA_TO_DEVICE); @@ -263,7 +260,7 @@ async_xor_val(struct page *dest, struct page **src_list, len, result, dma_prep_flags); if (unlikely(!tx)) { - async_tx_quiesce(&depend_tx); + async_tx_quiesce(&submit->depend_tx); while (!tx) { dma_async_issue_pending(chan); @@ -273,23 +270,23 @@ async_xor_val(struct page *dest, struct page **src_list, } } - async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); + async_tx_submit(chan, tx, submit); } else { - unsigned long xor_flags = flags; + enum async_tx_flags flags_orig = submit->flags; pr_debug("%s: (sync) len: %zu\n", __func__, len); - xor_flags |= ASYNC_TX_XOR_DROP_DST; - xor_flags &= ~ASYNC_TX_ACK; + submit->flags |= ASYNC_TX_XOR_DROP_DST; + submit->flags &= ~ASYNC_TX_ACK; - tx = async_xor(dest, src_list, offset, src_cnt, len, xor_flags, - depend_tx, NULL, NULL); + tx = async_xor(dest, src_list, offset, src_cnt, len, submit); async_tx_quiesce(&tx); *result = page_is_zero(dest, offset, len) ? 0 : 1; - async_tx_sync_epilog(cb_fn, cb_param); + async_tx_sync_epilog(submit); + submit->flags = flags_orig; } return tx; diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 0ef5362c8d02..e1920f23579f 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -499,11 +499,14 @@ async_copy_data(int frombio, struct bio *bio, struct page *page, struct page *bio_page; int i; int page_offset; + struct async_submit_ctl submit; if (bio->bi_sector >= sector) page_offset = (signed)(bio->bi_sector - sector) * 512; else page_offset = (signed)(sector - bio->bi_sector) * -512; + + init_async_submit(&submit, 0, tx, NULL, NULL, NULL); bio_for_each_segment(bvl, bio, i) { int len = bio_iovec_idx(bio, i)->bv_len; int clen; @@ -525,13 +528,14 @@ async_copy_data(int frombio, struct bio *bio, struct page *page, bio_page = bio_iovec_idx(bio, i)->bv_page; if (frombio) tx = async_memcpy(page, bio_page, page_offset, - b_offset, clen, 0, - tx, NULL, NULL); + b_offset, clen, &submit); else tx = async_memcpy(bio_page, page, b_offset, - page_offset, clen, 0, - tx, NULL, NULL); + page_offset, clen, &submit); } + /* chain the operations */ + submit.depend_tx = tx; + if (clen < len) /* hit end of page */ break; page_offset += len; @@ -590,6 +594,7 @@ static void ops_run_biofill(struct stripe_head *sh) { struct dma_async_tx_descriptor *tx = NULL; raid5_conf_t *conf = sh->raid_conf; + struct async_submit_ctl submit; int i; pr_debug("%s: stripe %llu\n", __func__, @@ -613,7 +618,8 @@ static void ops_run_biofill(struct stripe_head *sh) } atomic_inc(&sh->count); - async_trigger_callback(ASYNC_TX_ACK, tx, ops_complete_biofill, sh); + init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_biofill, sh, NULL); + async_trigger_callback(&submit); } static void ops_complete_compute5(void *stripe_head_ref) @@ -645,6 +651,7 @@ static struct dma_async_tx_descriptor *ops_run_compute5(struct stripe_head *sh) struct page *xor_dest = tgt->page; int count = 0; struct dma_async_tx_descriptor *tx; + struct async_submit_ctl submit; int i; pr_debug("%s: stripe %llu block: %d\n", @@ -657,13 +664,12 @@ static struct dma_async_tx_descriptor *ops_run_compute5(struct stripe_head *sh) atomic_inc(&sh->count); + init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL, + ops_complete_compute5, sh, NULL); if (unlikely(count == 1)) - tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, - 0, NULL, ops_complete_compute5, sh); + tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit); else - tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, - ASYNC_TX_XOR_ZERO_DST, NULL, - ops_complete_compute5, sh); + tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit); return tx; } @@ -683,6 +689,7 @@ ops_run_prexor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) int disks = sh->disks; struct page *xor_srcs[disks]; int count = 0, pd_idx = sh->pd_idx, i; + struct async_submit_ctl submit; /* existing parity data subtracted */ struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; @@ -697,9 +704,9 @@ ops_run_prexor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) xor_srcs[count++] = dev->page; } - tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, - ASYNC_TX_XOR_DROP_DST, tx, - ops_complete_prexor, sh); + init_async_submit(&submit, ASYNC_TX_XOR_DROP_DST, tx, + ops_complete_prexor, sh, NULL); + tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit); return tx; } @@ -772,7 +779,7 @@ ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) /* kernel stack size limits the total number of disks */ int disks = sh->disks; struct page *xor_srcs[disks]; - + struct async_submit_ctl submit; int count = 0, pd_idx = sh->pd_idx, i; struct page *xor_dest; int prexor = 0; @@ -811,13 +818,11 @@ ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) atomic_inc(&sh->count); - if (unlikely(count == 1)) { - flags &= ~(ASYNC_TX_XOR_DROP_DST | ASYNC_TX_XOR_ZERO_DST); - tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, - flags, tx, ops_complete_postxor, sh); - } else - tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, - flags, tx, ops_complete_postxor, sh); + init_async_submit(&submit, flags, tx, ops_complete_postxor, sh, NULL); + if (unlikely(count == 1)) + tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit); + else + tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit); } static void ops_complete_check(void *stripe_head_ref) @@ -838,6 +843,7 @@ static void ops_run_check(struct stripe_head *sh) int disks = sh->disks; struct page *xor_srcs[disks]; struct dma_async_tx_descriptor *tx; + struct async_submit_ctl submit; int count = 0, pd_idx = sh->pd_idx, i; struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; @@ -851,12 +857,13 @@ static void ops_run_check(struct stripe_head *sh) xor_srcs[count++] = dev->page; } + init_async_submit(&submit, 0, NULL, NULL, NULL, NULL); tx = async_xor_val(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, - &sh->ops.zero_sum_result, 0, NULL, NULL, NULL); + &sh->ops.zero_sum_result, &submit); atomic_inc(&sh->count); - tx = async_trigger_callback(ASYNC_TX_ACK, tx, - ops_complete_check, sh); + init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_check, sh, NULL); + tx = async_trigger_callback(&submit); } static void raid5_run_ops(struct stripe_head *sh, unsigned long ops_request) @@ -2664,6 +2671,7 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh, if (i != sh->pd_idx && i != sh->qd_idx) { int dd_idx, j; struct stripe_head *sh2; + struct async_submit_ctl submit; sector_t bn = compute_blocknr(sh, i, 1); sector_t s = raid5_compute_sector(conf, bn, 0, @@ -2683,9 +2691,10 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh, } /* place all the copies on one channel */ + init_async_submit(&submit, 0, tx, NULL, NULL, NULL); tx = async_memcpy(sh2->dev[dd_idx].page, sh->dev[i].page, 0, 0, STRIPE_SIZE, - 0, tx, NULL, NULL); + &submit); set_bit(R5_Expanded, &sh2->dev[dd_idx].flags); set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags); diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h index 9f14cd540cd2..00cfb637ddf2 100644 --- a/include/linux/async_tx.h +++ b/include/linux/async_tx.h @@ -65,6 +65,22 @@ enum async_tx_flags { ASYNC_TX_ACK = (1 << 2), }; +/** + * struct async_submit_ctl - async_tx submission/completion modifiers + * @flags: submission modifiers + * @depend_tx: parent dependency of the current operation being submitted + * @cb_fn: callback routine to run at operation completion + * @cb_param: parameter for the callback routine + * @scribble: caller provided space for dma/page address conversions + */ +struct async_submit_ctl { + enum async_tx_flags flags; + struct dma_async_tx_descriptor *depend_tx; + dma_async_tx_callback cb_fn; + void *cb_param; + void *scribble; +}; + #ifdef CONFIG_DMA_ENGINE #define async_tx_issue_pending_all dma_issue_pending_all #ifdef CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL @@ -73,8 +89,8 @@ enum async_tx_flags { #define async_tx_find_channel(dep, type, dst, dst_count, src, src_count, len) \ __async_tx_find_channel(dep, type) struct dma_chan * -__async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, - enum dma_transaction_type tx_type); +__async_tx_find_channel(struct async_submit_ctl *submit, + enum dma_transaction_type tx_type); #endif /* CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL */ #else static inline void async_tx_issue_pending_all(void) @@ -83,9 +99,10 @@ static inline void async_tx_issue_pending_all(void) } static inline struct dma_chan * -async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, - enum dma_transaction_type tx_type, struct page **dst, int dst_count, - struct page **src, int src_count, size_t len) +async_tx_find_channel(struct async_submit_ctl *submit, + enum dma_transaction_type tx_type, struct page **dst, + int dst_count, struct page **src, int src_count, + size_t len) { return NULL; } @@ -97,46 +114,53 @@ async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, * @cb_fn_param: parameter to pass to the callback routine */ static inline void -async_tx_sync_epilog(dma_async_tx_callback cb_fn, void *cb_fn_param) +async_tx_sync_epilog(struct async_submit_ctl *submit) { - if (cb_fn) - cb_fn(cb_fn_param); + if (submit->cb_fn) + submit->cb_fn(submit->cb_param); } -void -async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, - enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx, - dma_async_tx_callback cb_fn, void *cb_fn_param); +typedef union { + unsigned long addr; + struct page *page; + dma_addr_t dma; +} addr_conv_t; + +static inline void +init_async_submit(struct async_submit_ctl *args, enum async_tx_flags flags, + struct dma_async_tx_descriptor *tx, + dma_async_tx_callback cb_fn, void *cb_param, + addr_conv_t *scribble) +{ + args->flags = flags; + args->depend_tx = tx; + args->cb_fn = cb_fn; + args->cb_param = cb_param; + args->scribble = scribble; +} + +void async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, + struct async_submit_ctl *submit); struct dma_async_tx_descriptor * async_xor(struct page *dest, struct page **src_list, unsigned int offset, - int src_cnt, size_t len, enum async_tx_flags flags, - struct dma_async_tx_descriptor *depend_tx, - dma_async_tx_callback cb_fn, void *cb_fn_param); + int src_cnt, size_t len, struct async_submit_ctl *submit); struct dma_async_tx_descriptor * -async_xor_val(struct page *dest, struct page **src_list, - unsigned int offset, int src_cnt, size_t len, - u32 *result, enum async_tx_flags flags, - struct dma_async_tx_descriptor *depend_tx, - dma_async_tx_callback cb_fn, void *cb_fn_param); +async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, + int src_cnt, size_t len, u32 *result, + struct async_submit_ctl *submit); struct dma_async_tx_descriptor * async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, - unsigned int src_offset, size_t len, enum async_tx_flags flags, - struct dma_async_tx_descriptor *depend_tx, - dma_async_tx_callback cb_fn, void *cb_fn_param); + unsigned int src_offset, size_t len, + struct async_submit_ctl *submit); struct dma_async_tx_descriptor * async_memset(struct page *dest, int val, unsigned int offset, - size_t len, enum async_tx_flags flags, - struct dma_async_tx_descriptor *depend_tx, - dma_async_tx_callback cb_fn, void *cb_fn_param); + size_t len, struct async_submit_ctl *submit); -struct dma_async_tx_descriptor * -async_trigger_callback(enum async_tx_flags flags, - struct dma_async_tx_descriptor *depend_tx, - dma_async_tx_callback cb_fn, void *cb_fn_param); +struct dma_async_tx_descriptor *async_trigger_callback(struct async_submit_ctl *submit); void async_tx_quiesce(struct dma_async_tx_descriptor **tx); #endif /* _ASYNC_TX_H_ */ From 04ce9ab385dc97eb55299d533cd3af79b8fc7529 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 3 Jun 2009 14:22:28 -0700 Subject: [PATCH 0008/3409] async_xor: permit callers to pass in a 'dma/page scribble' region async_xor() needs space to perform dma and page address conversions. In most cases the code can simply reuse the struct page * array because the size of the native pointer matches the size of a dma/page address. In order to support archs where sizeof(dma_addr_t) is larger than sizeof(struct page *), or to preserve the input parameters, we utilize a memory region passed in by the caller. Since the code is now prepared to handle the case where it cannot perform address conversions on the stack, we no longer need the !HIGHMEM64G dependency in drivers/dma/Kconfig. [ Impact: don't clobber input buffers for address conversions ] Reviewed-by: Andre Noll Acked-by: Maciej Sosnowski Signed-off-by: Dan Williams --- Documentation/crypto/async-tx-api.txt | 43 ++++++++++++------- crypto/async_tx/async_xor.c | 60 +++++++++++++-------------- drivers/dma/Kconfig | 2 +- 3 files changed, 58 insertions(+), 47 deletions(-) diff --git a/Documentation/crypto/async-tx-api.txt b/Documentation/crypto/async-tx-api.txt index dfe0475f7919..6b15e488c0e7 100644 --- a/Documentation/crypto/async-tx-api.txt +++ b/Documentation/crypto/async-tx-api.txt @@ -115,29 +115,42 @@ of an operation. Perform a xor->copy->xor operation where each operation depends on the result from the previous operation: -void complete_xor_copy_xor(void *param) +void callback(void *param) { - printk("complete\n"); + struct completion *cmp = param; + + complete(cmp); } -int run_xor_copy_xor(struct page **xor_srcs, - int xor_src_cnt, - struct page *xor_dest, - size_t xor_len, - struct page *copy_src, - struct page *copy_dest, - size_t copy_len) +void run_xor_copy_xor(struct page **xor_srcs, + int xor_src_cnt, + struct page *xor_dest, + size_t xor_len, + struct page *copy_src, + struct page *copy_dest, + size_t copy_len) { struct dma_async_tx_descriptor *tx; + addr_conv_t addr_conv[xor_src_cnt]; + struct async_submit_ctl submit; + addr_conv_t addr_conv[NDISKS]; + struct completion cmp; - tx = async_xor(xor_dest, xor_srcs, 0, xor_src_cnt, xor_len, - ASYNC_TX_XOR_DROP_DST, NULL, NULL, NULL); - tx = async_memcpy(copy_dest, copy_src, 0, 0, copy_len, tx, NULL, NULL); - tx = async_xor(xor_dest, xor_srcs, 0, xor_src_cnt, xor_len, - ASYNC_TX_XOR_DROP_DST | ASYNC_TX_ACK, - tx, complete_xor_copy_xor, NULL); + init_async_submit(&submit, ASYNC_TX_XOR_DROP_DST, NULL, NULL, NULL, + addr_conv); + tx = async_xor(xor_dest, xor_srcs, 0, xor_src_cnt, xor_len, &submit) + + submit->depend_tx = tx; + tx = async_memcpy(copy_dest, copy_src, 0, 0, copy_len, &submit); + + init_completion(&cmp); + init_async_submit(&submit, ASYNC_TX_XOR_DROP_DST | ASYNC_TX_ACK, tx, + callback, &cmp, addr_conv); + tx = async_xor(xor_dest, xor_srcs, 0, xor_src_cnt, xor_len, &submit); async_tx_issue_pending_all(); + + wait_for_completion(&cmp); } See include/linux/async_tx.h for more information on the flags. See the diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c index 691fa98a18c4..1e96c4df7061 100644 --- a/crypto/async_tx/async_xor.c +++ b/crypto/async_tx/async_xor.c @@ -33,11 +33,10 @@ /* do_async_xor - dma map the pages and perform the xor with an engine */ static __async_inline struct dma_async_tx_descriptor * do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, - unsigned int offset, int src_cnt, size_t len, + unsigned int offset, int src_cnt, size_t len, dma_addr_t *dma_src, struct async_submit_ctl *submit) { struct dma_device *dma = chan->device; - dma_addr_t *dma_src = (dma_addr_t *) src_list; struct dma_async_tx_descriptor *tx = NULL; int src_off = 0; int i; @@ -125,9 +124,14 @@ do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset, int xor_src_cnt; int src_off = 0; void *dest_buf; - void **srcs = (void **) src_list; + void **srcs; - /* reuse the 'src_list' array to convert to buffer pointers */ + if (submit->scribble) + srcs = submit->scribble; + else + srcs = (void **) src_list; + + /* convert to buffer pointers */ for (i = 0; i < src_cnt; i++) srcs[i] = page_address(src_list[i]) + offset; @@ -178,17 +182,26 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset, struct dma_chan *chan = async_tx_find_channel(submit, DMA_XOR, &dest, 1, src_list, src_cnt, len); + dma_addr_t *dma_src = NULL; + BUG_ON(src_cnt <= 1); - if (chan) { + if (submit->scribble) + dma_src = submit->scribble; + else if (sizeof(dma_addr_t) <= sizeof(struct page *)) + dma_src = (dma_addr_t *) src_list; + + if (dma_src && chan) { /* run the xor asynchronously */ pr_debug("%s (async): len: %zu\n", __func__, len); return do_async_xor(chan, dest, src_list, offset, src_cnt, len, - submit); + dma_src, submit); } else { /* run the xor synchronously */ pr_debug("%s (sync): len: %zu\n", __func__, len); + WARN_ONCE(chan, "%s: no space for dma address conversion\n", + __func__); /* in the sync case the dest is an implied source * (assumes the dest is the first source) @@ -241,11 +254,16 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, src_cnt, len); struct dma_device *device = chan ? chan->device : NULL; struct dma_async_tx_descriptor *tx = NULL; + dma_addr_t *dma_src = NULL; BUG_ON(src_cnt <= 1); - if (device && src_cnt <= device->max_xor) { - dma_addr_t *dma_src = (dma_addr_t *) src_list; + if (submit->scribble) + dma_src = submit->scribble; + else if (sizeof(dma_addr_t) <= sizeof(struct page *)) + dma_src = (dma_addr_t *) src_list; + + if (dma_src && device && src_cnt <= device->max_xor) { unsigned long dma_prep_flags; int i; @@ -275,6 +293,9 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, enum async_tx_flags flags_orig = submit->flags; pr_debug("%s: (sync) len: %zu\n", __func__, len); + WARN_ONCE(device && src_cnt <= device->max_xor, + "%s: no space for dma address conversion\n", + __func__); submit->flags |= ASYNC_TX_XOR_DROP_DST; submit->flags &= ~ASYNC_TX_ACK; @@ -293,29 +314,6 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, } EXPORT_SYMBOL_GPL(async_xor_val); -static int __init async_xor_init(void) -{ - #ifdef CONFIG_DMA_ENGINE - /* To conserve stack space the input src_list (array of page pointers) - * is reused to hold the array of dma addresses passed to the driver. - * This conversion is only possible when dma_addr_t is less than the - * the size of a pointer. HIGHMEM64G is known to violate this - * assumption. - */ - BUILD_BUG_ON(sizeof(dma_addr_t) > sizeof(struct page *)); - #endif - - return 0; -} - -static void __exit async_xor_exit(void) -{ - do { } while (0); -} - -module_init(async_xor_init); -module_exit(async_xor_exit); - MODULE_AUTHOR("Intel Corporation"); MODULE_DESCRIPTION("asynchronous xor/xor-zero-sum api"); MODULE_LICENSE("GPL"); diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 3b3c01b6f1ee..912a51b5cbd3 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -4,7 +4,7 @@ menuconfig DMADEVICES bool "DMA Engine support" - depends on !HIGHMEM64G && HAS_DMA + depends on HAS_DMA help DMA engines can do asynchronous data transfers without involving the host CPU. Currently, this framework can be From b294a290d24d1196d68399cc3a9b8c50bfb55abd Mon Sep 17 00:00:00 2001 From: Andres Salomon Date: Tue, 30 Jun 2009 02:13:01 -0400 Subject: [PATCH 0009/3409] Revert "power: remove POWER_SUPPLY_PROP_CAPACITY_LEVEL" This reverts commit 8efe444038a205e79b38b7ad03878824901849a8 and 4cbc76eadf56399cd11fb736b33c53aec9caab8c. Richard@laptop.org was apparently using CAPACITY_LEVEL for debugging battery/EC problems, and was upset that it was removed. This readds it. Conflicts: Documentation/power_supply_class.txt Signed-off-by: Andres Salomon Signed-off-by: Anton Vorontsov --- Documentation/power/power_supply_class.txt | 2 ++ drivers/power/olpc_battery.c | 9 +++++++++ drivers/power/power_supply_sysfs.c | 6 ++++++ include/linux/power_supply.h | 10 ++++++++++ 4 files changed, 27 insertions(+) diff --git a/Documentation/power/power_supply_class.txt b/Documentation/power/power_supply_class.txt index c6cd4956047c..709d95571d7b 100644 --- a/Documentation/power/power_supply_class.txt +++ b/Documentation/power/power_supply_class.txt @@ -108,6 +108,8 @@ relative, time-based measurements. ENERGY_FULL, ENERGY_EMPTY - same as above but for energy. CAPACITY - capacity in percents. +CAPACITY_LEVEL - capacity level. This corresponds to +POWER_SUPPLY_CAPACITY_LEVEL_*. TEMP - temperature of the power supply. TEMP_AMBIENT - ambient temperature. diff --git a/drivers/power/olpc_battery.c b/drivers/power/olpc_battery.c index 58e419299cd6..3a589df09376 100644 --- a/drivers/power/olpc_battery.c +++ b/drivers/power/olpc_battery.c @@ -276,6 +276,14 @@ static int olpc_bat_get_property(struct power_supply *psy, return ret; val->intval = ec_byte; break; + case POWER_SUPPLY_PROP_CAPACITY_LEVEL: + if (ec_byte & BAT_STAT_FULL) + val->intval = POWER_SUPPLY_CAPACITY_LEVEL_FULL; + else if (ec_byte & BAT_STAT_LOW) + val->intval = POWER_SUPPLY_CAPACITY_LEVEL_LOW; + else + val->intval = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL; + break; case POWER_SUPPLY_PROP_TEMP: ret = olpc_ec_cmd(EC_BAT_TEMP, NULL, 0, (void *)&ec_word, 2); if (ret) @@ -321,6 +329,7 @@ static enum power_supply_property olpc_bat_props[] = { POWER_SUPPLY_PROP_VOLTAGE_AVG, POWER_SUPPLY_PROP_CURRENT_AVG, POWER_SUPPLY_PROP_CAPACITY, + POWER_SUPPLY_PROP_CAPACITY_LEVEL, POWER_SUPPLY_PROP_TEMP, POWER_SUPPLY_PROP_TEMP_AMBIENT, POWER_SUPPLY_PROP_MANUFACTURER, diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c index da73591017f9..9deabbde6fd6 100644 --- a/drivers/power/power_supply_sysfs.c +++ b/drivers/power/power_supply_sysfs.c @@ -51,6 +51,9 @@ static ssize_t power_supply_show_property(struct device *dev, "Unknown", "NiMH", "Li-ion", "Li-poly", "LiFe", "NiCd", "LiMn" }; + static char *capacity_level_text[] = { + "Unknown", "Critical", "Low", "Normal", "High", "Full" + }; ssize_t ret; struct power_supply *psy = dev_get_drvdata(dev); const ptrdiff_t off = attr - power_supply_attrs; @@ -71,6 +74,8 @@ static ssize_t power_supply_show_property(struct device *dev, return sprintf(buf, "%s\n", health_text[value.intval]); else if (off == POWER_SUPPLY_PROP_TECHNOLOGY) return sprintf(buf, "%s\n", technology_text[value.intval]); + else if (off == POWER_SUPPLY_PROP_CAPACITY_LEVEL) + return sprintf(buf, "%s\n", capacity_level_text[value.intval]); else if (off >= POWER_SUPPLY_PROP_MODEL_NAME) return sprintf(buf, "%s\n", value.strval); @@ -109,6 +114,7 @@ static struct device_attribute power_supply_attrs[] = { POWER_SUPPLY_ATTR(energy_now), POWER_SUPPLY_ATTR(energy_avg), POWER_SUPPLY_ATTR(capacity), + POWER_SUPPLY_ATTR(capacity_level), POWER_SUPPLY_ATTR(temp), POWER_SUPPLY_ATTR(temp_ambient), POWER_SUPPLY_ATTR(time_to_empty_now), diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index 594c494ac3f0..0ab6aa171241 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -58,6 +58,15 @@ enum { POWER_SUPPLY_TECHNOLOGY_LiMn, }; +enum { + POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN = 0, + POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL, + POWER_SUPPLY_CAPACITY_LEVEL_LOW, + POWER_SUPPLY_CAPACITY_LEVEL_NORMAL, + POWER_SUPPLY_CAPACITY_LEVEL_HIGH, + POWER_SUPPLY_CAPACITY_LEVEL_FULL, +}; + enum power_supply_property { /* Properties of type `int' */ POWER_SUPPLY_PROP_STATUS = 0, @@ -89,6 +98,7 @@ enum power_supply_property { POWER_SUPPLY_PROP_ENERGY_NOW, POWER_SUPPLY_PROP_ENERGY_AVG, POWER_SUPPLY_PROP_CAPACITY, /* in percents! */ + POWER_SUPPLY_PROP_CAPACITY_LEVEL, POWER_SUPPLY_PROP_TEMP, POWER_SUPPLY_PROP_TEMP_AMBIENT, POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW, From 144bbeaedc53290eab21da82ce1cb5faefd14374 Mon Sep 17 00:00:00 2001 From: Andres Salomon Date: Tue, 30 Jun 2009 02:15:26 -0400 Subject: [PATCH 0010/3409] olpc_battery: Add an 'error' sysfs device that displays raw errors Grab the error code from EC_BAT_ERRCODE and let the user see it (rather than attempting to decode it as we do with PROP_HEALTH) with a separate error sysfs file. Signed-off-by: Andres Salomon Signed-off-by: Anton Vorontsov --- drivers/power/olpc_battery.c | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/drivers/power/olpc_battery.c b/drivers/power/olpc_battery.c index 3a589df09376..602bbd008f78 100644 --- a/drivers/power/olpc_battery.c +++ b/drivers/power/olpc_battery.c @@ -10,7 +10,9 @@ #include #include +#include #include +#include #include #include #include @@ -379,6 +381,29 @@ static struct bin_attribute olpc_bat_eeprom = { .read = olpc_bat_eeprom_read, }; +/* Allow userspace to see the specific error value pulled from the EC */ + +static ssize_t olpc_bat_error_read(struct device *dev, + struct device_attribute *attr, char *buf) +{ + uint8_t ec_byte; + ssize_t ret; + + ret = olpc_ec_cmd(EC_BAT_ERRCODE, NULL, 0, &ec_byte, 1); + if (ret < 0) + return ret; + + return sprintf(buf, "%d\n", ec_byte); +} + +static struct device_attribute olpc_bat_error = { + .attr = { + .name = "error", + .mode = S_IRUGO, + }, + .show = olpc_bat_error_read, +}; + /********************************************************************* * Initialisation *********************************************************************/ @@ -442,8 +467,14 @@ static int __init olpc_bat_init(void) if (ret) goto eeprom_failed; + ret = device_create_file(olpc_bat.dev, &olpc_bat_error); + if (ret) + goto error_failed; + goto success; +error_failed: + device_remove_bin_file(olpc_bat.dev, &olpc_bat_eeprom); eeprom_failed: power_supply_unregister(&olpc_bat); battery_failed: @@ -456,6 +487,7 @@ success: static void __exit olpc_bat_exit(void) { + device_remove_file(olpc_bat.dev, &olpc_bat_error); device_remove_bin_file(olpc_bat.dev, &olpc_bat_eeprom); power_supply_unregister(&olpc_bat); power_supply_unregister(&olpc_ac); From ee8076ed3e1cdd0cd1e61318386932669c90b92f Mon Sep 17 00:00:00 2001 From: Andres Salomon Date: Thu, 2 Jul 2009 09:45:18 -0400 Subject: [PATCH 0011/3409] power_supply: Add a charge_type property, and use it for olpc driver This adds a new sysfs file called 'charge_type' which displays the type of charging (unknown, n/a, trickle charge, or fast charging). This allows things like battery diagnostics to determine what the battery/EC is doing without resorting to changing the 'status' sysfs output. Signed-off-by: Andres Salomon Acked-by: Mark Brown Signed-off-by: Anton Vorontsov --- Documentation/power/power_supply_class.txt | 5 +++++ drivers/power/olpc_battery.c | 9 +++++++++ drivers/power/power_supply_sysfs.c | 6 ++++++ include/linux/power_supply.h | 8 ++++++++ 4 files changed, 28 insertions(+) diff --git a/Documentation/power/power_supply_class.txt b/Documentation/power/power_supply_class.txt index 709d95571d7b..9f16c5178b66 100644 --- a/Documentation/power/power_supply_class.txt +++ b/Documentation/power/power_supply_class.txt @@ -76,6 +76,11 @@ STATUS - this attribute represents operating status (charging, full, discharging (i.e. powering a load), etc.). This corresponds to BATTERY_STATUS_* values, as defined in battery.h. +CHARGE_TYPE - batteries can typically charge at different rates. +This defines trickle and fast charges. For batteries that +are already charged or discharging, 'n/a' can be displayed (or +'unknown', if the status is not known). + HEALTH - represents health of the battery, values corresponds to POWER_SUPPLY_HEALTH_*, defined in battery.h. diff --git a/drivers/power/olpc_battery.c b/drivers/power/olpc_battery.c index 602bbd008f78..8fefe5a73558 100644 --- a/drivers/power/olpc_battery.c +++ b/drivers/power/olpc_battery.c @@ -233,6 +233,14 @@ static int olpc_bat_get_property(struct power_supply *psy, if (ret) return ret; break; + case POWER_SUPPLY_PROP_CHARGE_TYPE: + if (ec_byte & BAT_STAT_TRICKLE) + val->intval = POWER_SUPPLY_CHARGE_TYPE_TRICKLE; + else if (ec_byte & BAT_STAT_CHARGING) + val->intval = POWER_SUPPLY_CHARGE_TYPE_FAST; + else + val->intval = POWER_SUPPLY_CHARGE_TYPE_NONE; + break; case POWER_SUPPLY_PROP_PRESENT: val->intval = !!(ec_byte & (BAT_STAT_PRESENT | BAT_STAT_TRICKLE)); @@ -325,6 +333,7 @@ static int olpc_bat_get_property(struct power_supply *psy, static enum power_supply_property olpc_bat_props[] = { POWER_SUPPLY_PROP_STATUS, + POWER_SUPPLY_PROP_CHARGE_TYPE, POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_HEALTH, POWER_SUPPLY_PROP_TECHNOLOGY, diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c index 9deabbde6fd6..08144393d64b 100644 --- a/drivers/power/power_supply_sysfs.c +++ b/drivers/power/power_supply_sysfs.c @@ -43,6 +43,9 @@ static ssize_t power_supply_show_property(struct device *dev, static char *status_text[] = { "Unknown", "Charging", "Discharging", "Not charging", "Full" }; + static char *charge_type[] = { + "Unknown", "N/A", "Trickle", "Fast" + }; static char *health_text[] = { "Unknown", "Good", "Overheat", "Dead", "Over voltage", "Unspecified failure", "Cold", @@ -70,6 +73,8 @@ static ssize_t power_supply_show_property(struct device *dev, if (off == POWER_SUPPLY_PROP_STATUS) return sprintf(buf, "%s\n", status_text[value.intval]); + else if (off == POWER_SUPPLY_PROP_CHARGE_TYPE) + return sprintf(buf, "%s\n", charge_type[value.intval]); else if (off == POWER_SUPPLY_PROP_HEALTH) return sprintf(buf, "%s\n", health_text[value.intval]); else if (off == POWER_SUPPLY_PROP_TECHNOLOGY) @@ -86,6 +91,7 @@ static ssize_t power_supply_show_property(struct device *dev, static struct device_attribute power_supply_attrs[] = { /* Properties of type `int' */ POWER_SUPPLY_ATTR(status), + POWER_SUPPLY_ATTR(charge_type), POWER_SUPPLY_ATTR(health), POWER_SUPPLY_ATTR(present), POWER_SUPPLY_ATTR(online), diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index 0ab6aa171241..4c7c6fc35487 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -38,6 +38,13 @@ enum { POWER_SUPPLY_STATUS_FULL, }; +enum { + POWER_SUPPLY_CHARGE_TYPE_UNKNOWN = 0, + POWER_SUPPLY_CHARGE_TYPE_NONE, + POWER_SUPPLY_CHARGE_TYPE_TRICKLE, + POWER_SUPPLY_CHARGE_TYPE_FAST, +}; + enum { POWER_SUPPLY_HEALTH_UNKNOWN = 0, POWER_SUPPLY_HEALTH_GOOD, @@ -70,6 +77,7 @@ enum { enum power_supply_property { /* Properties of type `int' */ POWER_SUPPLY_PROP_STATUS = 0, + POWER_SUPPLY_PROP_CHARGE_TYPE, POWER_SUPPLY_PROP_HEALTH, POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_ONLINE, From 4aed03ae58946c716c8e3f7060f8b500b8a8e30f Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Fri, 10 Jul 2009 16:28:33 +0100 Subject: [PATCH 0012/3409] wm8350_power: Implement charge type property Signed-off-by: Mark Brown Signed-off-by: Anton Vorontsov --- drivers/power/wm8350_power.c | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/drivers/power/wm8350_power.c b/drivers/power/wm8350_power.c index 1b16bf343f2f..28b0299c0043 100644 --- a/drivers/power/wm8350_power.c +++ b/drivers/power/wm8350_power.c @@ -321,6 +321,24 @@ static int wm8350_bat_check_health(struct wm8350 *wm8350) return POWER_SUPPLY_HEALTH_GOOD; } +static int wm8350_bat_get_charge_type(struct wm8350 *wm8350) +{ + int state; + + state = wm8350_reg_read(wm8350, WM8350_BATTERY_CHARGER_CONTROL_2) & + WM8350_CHG_STS_MASK; + switch (state) { + case WM8350_CHG_STS_OFF: + return POWER_SUPPLY_CHARGE_TYPE_NONE; + case WM8350_CHG_STS_TRICKLE: + return POWER_SUPPLY_CHARGE_TYPE_TRICKLE; + case WM8350_CHG_STS_FAST: + return POWER_SUPPLY_CHARGE_TYPE_FAST; + default: + return POWER_SUPPLY_CHARGE_TYPE_UNKNOWN; + } +} + static int wm8350_bat_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) @@ -342,6 +360,9 @@ static int wm8350_bat_get_property(struct power_supply *psy, case POWER_SUPPLY_PROP_HEALTH: val->intval = wm8350_bat_check_health(wm8350); break; + case POWER_SUPPLY_PROP_CHARGE_TYPE: + val->intval = wm8350_bat_get_charge_type(wm8350); + break; default: ret = -EINVAL; break; @@ -355,6 +376,7 @@ static enum power_supply_property wm8350_bat_props[] = { POWER_SUPPLY_PROP_ONLINE, POWER_SUPPLY_PROP_VOLTAGE_NOW, POWER_SUPPLY_PROP_HEALTH, + POWER_SUPPLY_PROP_CHARGE_TYPE, }; /********************************************************************* From a11034b4282515fd7d9f6fdc0a1380781da461c3 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 14 Jul 2009 11:48:16 -0700 Subject: [PATCH 0013/3409] md/raid6: release spare page at ->stop() Add missing call to safe_put_page from stop() by unifying open coded raid5_conf_t de-allocation under free_conf(). Signed-off-by: Dan Williams --- drivers/md/raid5.c | 26 ++++++++++++-------------- 1 file changed, 12 insertions(+), 14 deletions(-) diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index e1920f23579f..9411466f71de 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -4309,6 +4309,15 @@ raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks) return sectors * (raid_disks - conf->max_degraded); } +static void free_conf(raid5_conf_t *conf) +{ + shrink_stripes(conf); + safe_put_page(conf->spare_page); + kfree(conf->disks); + kfree(conf->stripe_hashtbl); + kfree(conf); +} + static raid5_conf_t *setup_conf(mddev_t *mddev) { raid5_conf_t *conf; @@ -4438,11 +4447,7 @@ static raid5_conf_t *setup_conf(mddev_t *mddev) abort: if (conf) { - shrink_stripes(conf); - safe_put_page(conf->spare_page); - kfree(conf->disks); - kfree(conf->stripe_hashtbl); - kfree(conf); + free_conf(conf); return ERR_PTR(-EIO); } else return ERR_PTR(-ENOMEM); @@ -4608,12 +4613,8 @@ abort: md_unregister_thread(mddev->thread); mddev->thread = NULL; if (conf) { - shrink_stripes(conf); print_raid5_conf(conf); - safe_put_page(conf->spare_page); - kfree(conf->disks); - kfree(conf->stripe_hashtbl); - kfree(conf); + free_conf(conf); } mddev->private = NULL; printk(KERN_ALERT "raid5: failed to run raid set %s\n", mdname(mddev)); @@ -4628,13 +4629,10 @@ static int stop(mddev_t *mddev) md_unregister_thread(mddev->thread); mddev->thread = NULL; - shrink_stripes(conf); - kfree(conf->stripe_hashtbl); mddev->queue->backing_dev_info.congested_fn = NULL; blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ sysfs_remove_group(&mddev->kobj, &raid5_attrs_group); - kfree(conf->disks); - kfree(conf); + free_conf(conf); mddev->private = NULL; return 0; } From 584ec22759c06cdfc189c03a727f20038526245b Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 28 Jul 2009 14:32:12 -0700 Subject: [PATCH 0014/3409] ioat: move to drivers/dma/ioat/ When first created the ioat driver was the only inhabitant of drivers/dma/. Now, it is the only multi-file (more than a .c and a .h) driver in the directory. Moving it to an ioat/ subdirectory allows the naming convention to be cleaned up, and allows for future splitting of the source files by hardware version (v1, v2, and v3). Signed-off-by: Maciej Sosnowski Signed-off-by: Dan Williams --- drivers/dma/Makefile | 3 +-- drivers/dma/ioat/Makefile | 2 ++ drivers/dma/{ioat_dca.c => ioat/dca.c} | 4 ++-- drivers/dma/{ioat_dma.c => ioat/dma.c} | 6 +++--- drivers/dma/{ioatdma.h => ioat/dma.h} | 2 +- drivers/dma/{ioatdma_hw.h => ioat/hw.h} | 0 drivers/dma/{ioat.c => ioat/pci.c} | 6 +++--- drivers/dma/{ioatdma_registers.h => ioat/registers.h} | 0 drivers/idle/i7300_idle.c | 4 ++-- 9 files changed, 14 insertions(+), 13 deletions(-) create mode 100644 drivers/dma/ioat/Makefile rename drivers/dma/{ioat_dca.c => ioat/dca.c} (99%) rename drivers/dma/{ioat_dma.c => ioat/dma.c} (99%) rename drivers/dma/{ioatdma.h => ioat/dma.h} (99%) rename drivers/dma/{ioatdma_hw.h => ioat/hw.h} (100%) rename drivers/dma/{ioat.c => ioat/pci.c} (98%) rename drivers/dma/{ioatdma_registers.h => ioat/registers.h} (100%) diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 2e5dc96700d2..a1cb2857bba6 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -1,8 +1,7 @@ obj-$(CONFIG_DMA_ENGINE) += dmaengine.o obj-$(CONFIG_NET_DMA) += iovlock.o obj-$(CONFIG_DMATEST) += dmatest.o -obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o -ioatdma-objs := ioat.o ioat_dma.o ioat_dca.o +obj-$(CONFIG_INTEL_IOATDMA) += ioat/ obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o obj-$(CONFIG_FSL_DMA) += fsldma.o obj-$(CONFIG_MV_XOR) += mv_xor.o diff --git a/drivers/dma/ioat/Makefile b/drivers/dma/ioat/Makefile new file mode 100644 index 000000000000..2ce3d3a4270b --- /dev/null +++ b/drivers/dma/ioat/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o +ioatdma-objs := pci.o dma.o dca.o diff --git a/drivers/dma/ioat_dca.c b/drivers/dma/ioat/dca.c similarity index 99% rename from drivers/dma/ioat_dca.c rename to drivers/dma/ioat/dca.c index c012a1e15043..af1c762dd9d0 100644 --- a/drivers/dma/ioat_dca.c +++ b/drivers/dma/ioat/dca.c @@ -33,8 +33,8 @@ #define cpu_physical_id(cpu) (cpuid_ebx(1) >> 24) #endif -#include "ioatdma.h" -#include "ioatdma_registers.h" +#include "dma.h" +#include "registers.h" /* * Bit 7 of a tag map entry is the "valid" bit, if it is set then bits 0:6 diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat/dma.c similarity index 99% rename from drivers/dma/ioat_dma.c rename to drivers/dma/ioat/dma.c index a600fc0f7962..648797e83295 100644 --- a/drivers/dma/ioat_dma.c +++ b/drivers/dma/ioat/dma.c @@ -34,9 +34,9 @@ #include #include #include -#include "ioatdma.h" -#include "ioatdma_registers.h" -#include "ioatdma_hw.h" +#include "dma.h" +#include "registers.h" +#include "hw.h" #define to_ioat_chan(chan) container_of(chan, struct ioat_dma_chan, common) #define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common) diff --git a/drivers/dma/ioatdma.h b/drivers/dma/ioat/dma.h similarity index 99% rename from drivers/dma/ioatdma.h rename to drivers/dma/ioat/dma.h index a52ff4bd4601..e80e787fe64f 100644 --- a/drivers/dma/ioatdma.h +++ b/drivers/dma/ioat/dma.h @@ -22,7 +22,7 @@ #define IOATDMA_H #include -#include "ioatdma_hw.h" +#include "hw.h" #include #include #include diff --git a/drivers/dma/ioatdma_hw.h b/drivers/dma/ioat/hw.h similarity index 100% rename from drivers/dma/ioatdma_hw.h rename to drivers/dma/ioat/hw.h diff --git a/drivers/dma/ioat.c b/drivers/dma/ioat/pci.c similarity index 98% rename from drivers/dma/ioat.c rename to drivers/dma/ioat/pci.c index 2225bb6ba3d1..d7948bfd8fba 100644 --- a/drivers/dma/ioat.c +++ b/drivers/dma/ioat/pci.c @@ -30,9 +30,9 @@ #include #include #include -#include "ioatdma.h" -#include "ioatdma_registers.h" -#include "ioatdma_hw.h" +#include "dma.h" +#include "registers.h" +#include "hw.h" MODULE_VERSION(IOAT_DMA_VERSION); MODULE_LICENSE("GPL"); diff --git a/drivers/dma/ioatdma_registers.h b/drivers/dma/ioat/registers.h similarity index 100% rename from drivers/dma/ioatdma_registers.h rename to drivers/dma/ioat/registers.h diff --git a/drivers/idle/i7300_idle.c b/drivers/idle/i7300_idle.c index 949c97ff57e3..f2ec7243549e 100644 --- a/drivers/idle/i7300_idle.c +++ b/drivers/idle/i7300_idle.c @@ -29,8 +29,8 @@ #include -#include "../dma/ioatdma_hw.h" -#include "../dma/ioatdma_registers.h" +#include "../dma/ioat/hw.h" +#include "../dma/ioat/registers.h" #define I7300_IDLE_DRIVER_VERSION "1.55" #define I7300_PRINT "i7300_idle:" From 2e83a5c5d2317c386de2880eb43ef0bef8eb1fa9 Mon Sep 17 00:00:00 2001 From: Daniel Mack Date: Wed, 15 Jul 2009 18:20:38 +0200 Subject: [PATCH 0015/3409] ds2760_battery: delay power supply registration This fixes a race condition I recently introduced with the PMOD feature addition (cef437e3: "w1: ds2760_battery: add support for sleep mode feature"). Postpone the call to power_supply_register() to fix it. Signed-off-by: Daniel Mack Cc: Szabolcs Gyurko Acked-by: Matt Reimer Signed-off-by: Anton Vorontsov --- drivers/power/ds2760_battery.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/power/ds2760_battery.c b/drivers/power/ds2760_battery.c index 520b5c49ff30..cf07c43e90cd 100644 --- a/drivers/power/ds2760_battery.c +++ b/drivers/power/ds2760_battery.c @@ -381,12 +381,6 @@ static int ds2760_battery_probe(struct platform_device *pdev) di->charge_status = POWER_SUPPLY_STATUS_UNKNOWN; - retval = power_supply_register(&pdev->dev, &di->bat); - if (retval) { - dev_err(di->dev, "failed to register battery\n"); - goto batt_failed; - } - /* enable sleep mode feature */ ds2760_battery_read_status(di); status = di->raw[DS2760_STATUS_REG]; @@ -397,6 +391,12 @@ static int ds2760_battery_probe(struct platform_device *pdev) ds2760_battery_write_status(di, status); + retval = power_supply_register(&pdev->dev, &di->bat); + if (retval) { + dev_err(di->dev, "failed to register battery\n"); + goto batt_failed; + } + INIT_DELAYED_WORK(&di->monitor_work, ds2760_battery_work); di->monitor_wqueue = create_singlethread_workqueue(dev_name(&pdev->dev)); if (!di->monitor_wqueue) { From 5c6e9bf2c96e746237516bc8897add67682ee452 Mon Sep 17 00:00:00 2001 From: Daniel Mack Date: Wed, 15 Jul 2009 18:20:39 +0200 Subject: [PATCH 0016/3409] ds2760_battery: export more features Export POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW and POWER_SUPPLY_PROP_CAPACITY features to the power supply core. Signed-off-by: Daniel Mack Cc: Szabolcs Gyurko Acked-by: Matt Reimer Signed-off-by: Anton Vorontsov --- drivers/power/ds2760_battery.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/power/ds2760_battery.c b/drivers/power/ds2760_battery.c index cf07c43e90cd..f439071c2aa2 100644 --- a/drivers/power/ds2760_battery.c +++ b/drivers/power/ds2760_battery.c @@ -337,6 +337,12 @@ static int ds2760_battery_get_property(struct power_supply *psy, case POWER_SUPPLY_PROP_TEMP: val->intval = di->temp_C; break; + case POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW: + val->intval = di->life_sec; + break; + case POWER_SUPPLY_PROP_CAPACITY: + val->intval = di->rem_capacity; + break; default: return -EINVAL; } @@ -353,6 +359,8 @@ static enum power_supply_property ds2760_battery_props[] = { POWER_SUPPLY_PROP_CHARGE_EMPTY, POWER_SUPPLY_PROP_CHARGE_NOW, POWER_SUPPLY_PROP_TEMP, + POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW, + POWER_SUPPLY_PROP_CAPACITY, }; static int ds2760_battery_probe(struct platform_device *pdev) From c1e72193ea3fa02e96bf3aa66006e18d107d0266 Mon Sep 17 00:00:00 2001 From: Daniel Mack Date: Wed, 15 Jul 2009 18:20:40 +0200 Subject: [PATCH 0017/3409] ds2760_battery: add rated_capacity module parameter For systems where the ds2760 is soldered directly on the PCB, the 'rated capacity' register is not set to anything useful. In order to allow users to bootstrap this value, introduce a new module parameter 'rated_capacity' and use it to write the internal EEPROM in case the value differes from what's been given. Signed-off-by: Daniel Mack Cc: Szabolcs Gyurko Acked-by: Matt Reimer Signed-off-by: Anton Vorontsov --- drivers/power/ds2760_battery.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/drivers/power/ds2760_battery.c b/drivers/power/ds2760_battery.c index f439071c2aa2..ed0ea5e275c5 100644 --- a/drivers/power/ds2760_battery.c +++ b/drivers/power/ds2760_battery.c @@ -66,6 +66,10 @@ static unsigned int pmod_enabled; module_param(pmod_enabled, bool, 0644); MODULE_PARM_DESC(pmod_enabled, "PMOD enable bit"); +static unsigned int rated_capacity; +module_param(rated_capacity, uint, 0644); +MODULE_PARM_DESC(rated_capacity, "rated battery capacity, 10*mAh or index"); + /* Some batteries have their rated capacity stored a N * 10 mAh, while * others use an index into this table. */ static int rated_capacities[] = { @@ -274,6 +278,17 @@ static void ds2760_battery_write_status(struct ds2760_device_info *di, w1_ds2760_recall_eeprom(di->w1_dev, DS2760_EEPROM_BLOCK1); } +static void ds2760_battery_write_rated_capacity(struct ds2760_device_info *di, + unsigned char rated_capacity) +{ + if (rated_capacity == di->raw[DS2760_RATED_CAPACITY]) + return; + + w1_ds2760_write(di->w1_dev, &rated_capacity, DS2760_RATED_CAPACITY, 1); + w1_ds2760_store_eeprom(di->w1_dev, DS2760_EEPROM_BLOCK1); + w1_ds2760_recall_eeprom(di->w1_dev, DS2760_EEPROM_BLOCK1); +} + static void ds2760_battery_work(struct work_struct *work) { struct ds2760_device_info *di = container_of(work, @@ -399,6 +414,10 @@ static int ds2760_battery_probe(struct platform_device *pdev) ds2760_battery_write_status(di, status); + /* set rated capacity from module param */ + if (rated_capacity) + ds2760_battery_write_rated_capacity(di, rated_capacity); + retval = power_supply_register(&pdev->dev, &di->bat); if (retval) { dev_err(di->dev, "failed to register battery\n"); From 25f2bfa62ae77820a8185734c4a2ab7f3971a2fc Mon Sep 17 00:00:00 2001 From: Daniel Mack Date: Wed, 15 Jul 2009 18:20:41 +0200 Subject: [PATCH 0018/3409] ds2760_battery: handle full_active_uAh == 0 case correctly In systems where the battery monitor is not part of the battery pack and is hence not bootstrapped with sane values, the full_active_uAh is likely to be zero. Handle that case by defaulting to the rated_capacity information which can be passed to the driver using the new module parameter. Signed-off-by: Daniel Mack Cc: Szabolcs Gyurko Acked-by: Matt Reimer Signed-off-by: Anton Vorontsov --- drivers/power/ds2760_battery.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/drivers/power/ds2760_battery.c b/drivers/power/ds2760_battery.c index ed0ea5e275c5..2d0e5edb4742 100644 --- a/drivers/power/ds2760_battery.c +++ b/drivers/power/ds2760_battery.c @@ -172,8 +172,13 @@ static int ds2760_battery_read_status(struct ds2760_device_info *di) di->full_active_uAh = di->raw[DS2760_ACTIVE_FULL] << 8 | di->raw[DS2760_ACTIVE_FULL + 1]; - scale[0] = di->raw[DS2760_ACTIVE_FULL] << 8 | - di->raw[DS2760_ACTIVE_FULL + 1]; + /* If the full_active_uAh value is not given, fall back to the rated + * capacity. This is likely to happen when chips are not part of the + * battery pack and is therefore not bootstrapped. */ + if (di->full_active_uAh == 0) + di->full_active_uAh = di->rated_capacity / 1000L; + + scale[0] = di->full_active_uAh; for (i = 1; i < 5; i++) scale[i] = scale[i - 1] + di->raw[DS2760_ACTIVE_FULL + 2 + i]; From 02d0d2758821c38b2601d34dac544140af09e651 Mon Sep 17 00:00:00 2001 From: Daniel Mack Date: Wed, 15 Jul 2009 22:57:16 +0200 Subject: [PATCH 0019/3409] ds2760_battery: add current_accum module parameter When connecting a ds2760 to a partly loaded battery the first time, there must be a way to bootstrap the current_accum value. Without that, the current capactity value is bogus until the battery is fully charged for the first time. Signed-off-by: Daniel Mack Cc: Szabolcs Gyurko Cc: Matt Reimer Signed-off-by: Anton Vorontsov --- drivers/power/ds2760_battery.c | 41 ++++++++++++++++++++++------------ 1 file changed, 27 insertions(+), 14 deletions(-) diff --git a/drivers/power/ds2760_battery.c b/drivers/power/ds2760_battery.c index 2d0e5edb4742..f4a9258aa9d0 100644 --- a/drivers/power/ds2760_battery.c +++ b/drivers/power/ds2760_battery.c @@ -70,6 +70,10 @@ static unsigned int rated_capacity; module_param(rated_capacity, uint, 0644); MODULE_PARM_DESC(rated_capacity, "rated battery capacity, 10*mAh or index"); +static unsigned int current_accum; +module_param(current_accum, uint, 0644); +MODULE_PARM_DESC(current_accum, "current accumulator value"); + /* Some batteries have their rated capacity stored a N * 10 mAh, while * others use an index into this table. */ static int rated_capacities[] = { @@ -215,6 +219,22 @@ static int ds2760_battery_read_status(struct ds2760_device_info *di) return 0; } +static void ds2760_battery_set_current_accum(struct ds2760_device_info *di, + unsigned int acr_val) +{ + unsigned char acr[2]; + + /* acr is in units of 0.25 mAh */ + acr_val *= 4L; + acr_val /= 1000; + + acr[0] = acr_val >> 8; + acr[1] = acr_val & 0xff; + + if (w1_ds2760_write(di->w1_dev, acr, DS2760_CURRENT_ACCUM_MSB, 2) < 2) + dev_warn(di->dev, "ACR write failed\n"); +} + static void ds2760_battery_update_status(struct ds2760_device_info *di) { int old_charge_status = di->charge_status; @@ -246,21 +266,9 @@ static void ds2760_battery_update_status(struct ds2760_device_info *di) if (di->full_counter < 2) { di->charge_status = POWER_SUPPLY_STATUS_CHARGING; } else { - unsigned char acr[2]; - int acr_val; - - /* acr is in units of 0.25 mAh */ - acr_val = di->full_active_uAh * 4L / 1000; - - acr[0] = acr_val >> 8; - acr[1] = acr_val & 0xff; - - if (w1_ds2760_write(di->w1_dev, acr, - DS2760_CURRENT_ACCUM_MSB, 2) < 2) - dev_warn(di->dev, - "ACR reset failed\n"); - di->charge_status = POWER_SUPPLY_STATUS_FULL; + ds2760_battery_set_current_accum(di, + di->full_active_uAh); } } } else { @@ -423,6 +431,11 @@ static int ds2760_battery_probe(struct platform_device *pdev) if (rated_capacity) ds2760_battery_write_rated_capacity(di, rated_capacity); + /* set current accumulator if given as parameter. + * this should only be done for bootstrapping the value */ + if (current_accum) + ds2760_battery_set_current_accum(di, current_accum); + retval = power_supply_register(&pdev->dev, &di->bat); if (retval) { dev_err(di->dev, "failed to register battery\n"); From ff3417e7effe57cc002a8882a48bcb8e1a7e7267 Mon Sep 17 00:00:00 2001 From: Daniel Mack Date: Thu, 30 Jul 2009 17:42:31 +0400 Subject: [PATCH 0020/3409] power_supply: EXPORT_SYMBOL cleanups While I'm at it, cleanup the power supply code so that EXPORT_SYMBOL_GPL appears directly after the symbole declaration. checkpatch.pl wants it that way. Signed-off-by: Daniel Mack Cc: Ian Molton Cc: Matt Reimer Signed-off-by: Anton Vorontsov --- drivers/power/power_supply_core.c | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c index 5520040449c4..12cd6e36ff1d 100644 --- a/drivers/power/power_supply_core.c +++ b/drivers/power/power_supply_core.c @@ -18,7 +18,9 @@ #include #include "power_supply.h" +/* exported for the APM Power driver, APM emulation */ struct class *power_supply_class; +EXPORT_SYMBOL_GPL(power_supply_class); static int __power_supply_changed_work(struct device *dev, void *data) { @@ -55,6 +57,7 @@ void power_supply_changed(struct power_supply *psy) schedule_work(&psy->changed_work); } +EXPORT_SYMBOL_GPL(power_supply_changed); static int __power_supply_am_i_supplied(struct device *dev, void *data) { @@ -86,6 +89,7 @@ int power_supply_am_i_supplied(struct power_supply *psy) return error; } +EXPORT_SYMBOL_GPL(power_supply_am_i_supplied); static int __power_supply_is_system_supplied(struct device *dev, void *data) { @@ -110,6 +114,7 @@ int power_supply_is_system_supplied(void) return error; } +EXPORT_SYMBOL_GPL(power_supply_is_system_supplied); int power_supply_register(struct device *parent, struct power_supply *psy) { @@ -144,6 +149,7 @@ dev_create_failed: success: return rc; } +EXPORT_SYMBOL_GPL(power_supply_register); void power_supply_unregister(struct power_supply *psy) { @@ -152,6 +158,7 @@ void power_supply_unregister(struct power_supply *psy) power_supply_remove_attrs(psy); device_unregister(psy->dev); } +EXPORT_SYMBOL_GPL(power_supply_unregister); static int __init power_supply_class_init(void) { @@ -170,15 +177,6 @@ static void __exit power_supply_class_exit(void) class_destroy(power_supply_class); } -EXPORT_SYMBOL_GPL(power_supply_changed); -EXPORT_SYMBOL_GPL(power_supply_am_i_supplied); -EXPORT_SYMBOL_GPL(power_supply_is_system_supplied); -EXPORT_SYMBOL_GPL(power_supply_register); -EXPORT_SYMBOL_GPL(power_supply_unregister); - -/* exported for the APM Power driver, APM emulation */ -EXPORT_SYMBOL_GPL(power_supply_class); - subsys_initcall(power_supply_class_init); module_exit(power_supply_class_exit); From e5f5ccb646bc6009572b5c23201b5e81638ff150 Mon Sep 17 00:00:00 2001 From: Daniel Mack Date: Thu, 23 Jul 2009 20:35:53 +0200 Subject: [PATCH 0021/3409] power_supply: get_by_name and set_charged functionality This adds a function that indicates that a battery is fully charged. It also includes functions to get a power_supply device from the class of registered devices by name reference. These can be used to find a specific battery to call power_supply_set_battery_charged() on. Some battery drivers might need this information to calibrate themselves. Signed-off-by: Daniel Mack Cc: Ian Molton Cc: Anton Vorontsov Cc: Matt Reimer Signed-off-by: Anton Vorontsov --- drivers/power/power_supply_core.c | 28 ++++++++++++++++++++++++++++ include/linux/power_supply.h | 3 +++ 2 files changed, 31 insertions(+) diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c index 12cd6e36ff1d..cce75b40b435 100644 --- a/drivers/power/power_supply_core.c +++ b/drivers/power/power_supply_core.c @@ -116,6 +116,34 @@ int power_supply_is_system_supplied(void) } EXPORT_SYMBOL_GPL(power_supply_is_system_supplied); +int power_supply_set_battery_charged(struct power_supply *psy) +{ + if (psy->type == POWER_SUPPLY_TYPE_BATTERY && psy->set_charged) { + psy->set_charged(psy); + return 0; + } + + return -EINVAL; +} +EXPORT_SYMBOL_GPL(power_supply_set_battery_charged); + +static int power_supply_match_device_by_name(struct device *dev, void *data) +{ + const char *name = data; + struct power_supply *psy = dev_get_drvdata(dev); + + return strcmp(psy->name, name) == 0; +} + +struct power_supply *power_supply_get_by_name(char *name) +{ + struct device *dev = class_find_device(power_supply_class, NULL, name, + power_supply_match_device_by_name); + + return dev ? dev_get_drvdata(dev) : NULL; +} +EXPORT_SYMBOL_GPL(power_supply_get_by_name); + int power_supply_register(struct device *parent, struct power_supply *psy) { int rc = 0; diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index 4c7c6fc35487..b5d096d3a9be 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -144,6 +144,7 @@ struct power_supply { enum power_supply_property psp, union power_supply_propval *val); void (*external_power_changed)(struct power_supply *psy); + void (*set_charged)(struct power_supply *psy); /* For APM emulation, think legacy userspace. */ int use_for_apm; @@ -183,8 +184,10 @@ struct power_supply_info { int use_for_apm; }; +extern struct power_supply *power_supply_get_by_name(char *name); extern void power_supply_changed(struct power_supply *psy); extern int power_supply_am_i_supplied(struct power_supply *psy); +extern int power_supply_set_battery_charged(struct power_supply *psy); #if defined(CONFIG_POWER_SUPPLY) || defined(CONFIG_POWER_SUPPLY_MODULE) extern int power_supply_is_system_supplied(void); From 8d631ccff8d90fce77b42f01b3872595c599cbf9 Mon Sep 17 00:00:00 2001 From: Daniel Mack Date: Thu, 23 Jul 2009 20:35:54 +0200 Subject: [PATCH 0022/3409] ds2760_battery: implement set_charged() feature The ds2760's internal current meter is not reliable enough as it has an inacurracy of around ~15%. Without any correction for that error, the current accumulator is couting up all the time, even though the battery is already fully charged and hence destroys the static information. The longer it is connected, the worse is the aberration. Fortunately, this can be corrected by the DS2760_CURRENT_OFFSET_BIAS register. Using the external power_supply_set_battery_charged() function, this register is now gauging the measurement. A delayed work is used to debounce flaky GPIO signals and to let the current value settle. Also see Maxim's application note AN4188. Signed-off-by: Daniel Mack Cc: Szabolcs Gyurko Cc: Matt Reimer Cc: Anton Vorontsov Signed-off-by: Anton Vorontsov --- drivers/power/ds2760_battery.c | 52 ++++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) diff --git a/drivers/power/ds2760_battery.c b/drivers/power/ds2760_battery.c index f4a9258aa9d0..1bb8498f14be 100644 --- a/drivers/power/ds2760_battery.c +++ b/drivers/power/ds2760_battery.c @@ -56,6 +56,7 @@ struct ds2760_device_info { struct device *w1_dev; struct workqueue_struct *monitor_wqueue; struct delayed_work monitor_work; + struct delayed_work set_charged_work; }; static unsigned int cache_time = 1000; @@ -327,6 +328,52 @@ static void ds2760_battery_external_power_changed(struct power_supply *psy) queue_delayed_work(di->monitor_wqueue, &di->monitor_work, HZ/10); } + +static void ds2760_battery_set_charged_work(struct work_struct *work) +{ + char bias; + struct ds2760_device_info *di = container_of(work, + struct ds2760_device_info, set_charged_work.work); + + dev_dbg(di->dev, "%s\n", __func__); + + ds2760_battery_read_status(di); + + /* When we get notified by external circuitry that the battery is + * considered fully charged now, we know that there is no current + * flow any more. However, the ds2760's internal current meter is + * too inaccurate to rely on - spec say something ~15% failure. + * Hence, we use the current offset bias register to compensate + * that error. + */ + + if (!power_supply_am_i_supplied(&di->bat)) + return; + + bias = (signed char) di->current_raw + + (signed char) di->raw[DS2760_CURRENT_OFFSET_BIAS]; + + dev_dbg(di->dev, "%s: bias = %d\n", __func__, bias); + + w1_ds2760_write(di->w1_dev, &bias, DS2760_CURRENT_OFFSET_BIAS, 1); + w1_ds2760_store_eeprom(di->w1_dev, DS2760_EEPROM_BLOCK1); + w1_ds2760_recall_eeprom(di->w1_dev, DS2760_EEPROM_BLOCK1); + + /* Write to the di->raw[] buffer directly - the CURRENT_OFFSET_BIAS + * value won't be read back by ds2760_battery_read_status() */ + di->raw[DS2760_CURRENT_OFFSET_BIAS] = bias; +} + +static void ds2760_battery_set_charged(struct power_supply *psy) +{ + struct ds2760_device_info *di = to_ds2760_device_info(psy); + + /* postpone the actual work by 20 secs. This is for debouncing GPIO + * signals and to let the current value settle. See AN4188. */ + cancel_delayed_work(&di->set_charged_work); + queue_delayed_work(di->monitor_wqueue, &di->set_charged_work, HZ * 20); +} + static int ds2760_battery_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) @@ -412,6 +459,7 @@ static int ds2760_battery_probe(struct platform_device *pdev) di->bat.properties = ds2760_battery_props; di->bat.num_properties = ARRAY_SIZE(ds2760_battery_props); di->bat.get_property = ds2760_battery_get_property; + di->bat.set_charged = ds2760_battery_set_charged; di->bat.external_power_changed = ds2760_battery_external_power_changed; @@ -443,6 +491,8 @@ static int ds2760_battery_probe(struct platform_device *pdev) } INIT_DELAYED_WORK(&di->monitor_work, ds2760_battery_work); + INIT_DELAYED_WORK(&di->set_charged_work, + ds2760_battery_set_charged_work); di->monitor_wqueue = create_singlethread_workqueue(dev_name(&pdev->dev)); if (!di->monitor_wqueue) { retval = -ESRCH; @@ -467,6 +517,8 @@ static int ds2760_battery_remove(struct platform_device *pdev) cancel_rearming_delayed_workqueue(di->monitor_wqueue, &di->monitor_work); + cancel_rearming_delayed_workqueue(di->monitor_wqueue, + &di->set_charged_work); destroy_workqueue(di->monitor_wqueue); power_supply_unregister(&di->bat); From 8e0af5141ab950b78b3ebbfaded5439dcf8b3a8d Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Mon, 27 Jul 2009 18:11:02 -0400 Subject: [PATCH 0023/3409] ACPI: create Processor Aggregator Device driver ACPI 4.0 created the logical "processor aggregator device" as a mechinism for platforms to ask the OS to force otherwise busy processors to enter (power saving) idle. The intent is to lower power consumption to ride-out transient electrical and thermal emergencies, rather than powering off the server. On platforms that can save more power/performance via P-states, the platform will first exhaust P-states before forcing idle. However, the relative benefit of P-states vs. idle states is platform dependent, and thus this driver need not know or care about it. This driver does not use the kernel's CPU hot-plug mechanism because after the transient emergency is over, the system must be returned to its normal state, and hotplug would permanently break both cpusets and binding. So to force idle, the driver creates a power saving thread. The scheduler will migrate the thread to the preferred CPU. The thread has max priority and has SCHED_RR policy, so it can occupy one CPU. To save power, the thread will invoke the deep C-state entry instructions. To avoid starvation, the thread will sleep 5% of the time time for every second (current RT scheduler has threshold to avoid starvation, but if other CPUs are idle, the CPU can borrow CPU timer from other, which makes the mechanism not work here) Vaidyanathan Srinivasan has proposed scheduler enhancements to allow injecting idle time into the system. This driver doesn't depend on those enhancements, but could cut over to them when they are available. Peter Z. does not favor upstreaming this driver until the those scheduler enhancements are in place. However, we favor upstreaming this driver now because it is useful now, and can be enhanced over time. Signed-off-by: Shaohua Li NACKed-by: Peter Zijlstra Cc: Vaidyanathan Srinivasan Signed-off-by: Len Brown --- MAINTAINERS | 8 + drivers/acpi/Kconfig | 11 + drivers/acpi/Makefile | 2 + drivers/acpi/acpi_pad.c | 514 ++++++++++++++++++++++++++++++++++++++++ 4 files changed, 535 insertions(+) create mode 100644 drivers/acpi/acpi_pad.c diff --git a/MAINTAINERS b/MAINTAINERS index ebc269152faf..082df56d5b9a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -288,6 +288,14 @@ L: linux-pci@vger.kernel.org S: Supported F: drivers/pci/hotplug/acpi* +ACPI PROCESSOR AGGREGATOR DRIVER +P: Shaohua Li +M: shaohua.li@intel.com +L: linux-acpi@vger.kernel.org +W: http://www.lesswatts.org/projects/acpi/ +S: Supported +F: drivers/acpi/acpi_pad.c + ACPI THERMAL DRIVER P: Zhang Rui M: rui.zhang@intel.com diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index 7ec7d88c5999..13531ed3cbd3 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -196,6 +196,17 @@ config ACPI_HOTPLUG_CPU select ACPI_CONTAINER default y +config ACPI_PROCESSOR_AGGREGATOR + tristate "Processor Aggregator" + depends on ACPI_PROCESSOR + depends on EXPERIMENTAL + help + ACPI 4.0 defines processor Aggregator, which enables OS to perform + specfic processor configuration and control that applies to all + processors in the platform. Currently only logical processor idling + is defined, which is to reduce power consumption. This driver + support the new device. + config ACPI_THERMAL tristate "Thermal Zone" depends on ACPI_PROCESSOR diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile index 03a985be3fe3..8dae61c8a176 100644 --- a/drivers/acpi/Makefile +++ b/drivers/acpi/Makefile @@ -61,3 +61,5 @@ obj-$(CONFIG_ACPI_SBS) += sbs.o processor-y := processor_core.o processor_throttling.o processor-y += processor_idle.o processor_thermal.o processor-$(CONFIG_CPU_FREQ) += processor_perflib.o + +obj-$(CONFIG_ACPI_PROCESSOR_AGGREGATOR) += acpi_pad.o diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c new file mode 100644 index 000000000000..0d2cdb86158b --- /dev/null +++ b/drivers/acpi/acpi_pad.c @@ -0,0 +1,514 @@ +/* + * acpi_pad.c ACPI Processor Aggregator Driver + * + * Copyright (c) 2009, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define ACPI_PROCESSOR_AGGREGATOR_CLASS "processor_aggregator" +#define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator" +#define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80 +static DEFINE_MUTEX(isolated_cpus_lock); + +#define MWAIT_SUBSTATE_MASK (0xf) +#define MWAIT_CSTATE_MASK (0xf) +#define MWAIT_SUBSTATE_SIZE (4) +#define CPUID_MWAIT_LEAF (5) +#define CPUID5_ECX_EXTENSIONS_SUPPORTED (0x1) +#define CPUID5_ECX_INTERRUPT_BREAK (0x2) +static unsigned long power_saving_mwait_eax; +static void power_saving_mwait_init(void) +{ + unsigned int eax, ebx, ecx, edx; + unsigned int highest_cstate = 0; + unsigned int highest_subcstate = 0; + int i; + + if (!boot_cpu_has(X86_FEATURE_MWAIT)) + return; + if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF) + return; + + cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx); + + if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) || + !(ecx & CPUID5_ECX_INTERRUPT_BREAK)) + return; + + edx >>= MWAIT_SUBSTATE_SIZE; + for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) { + if (edx & MWAIT_SUBSTATE_MASK) { + highest_cstate = i; + highest_subcstate = edx & MWAIT_SUBSTATE_MASK; + } + } + power_saving_mwait_eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) | + (highest_subcstate - 1); + + for_each_online_cpu(i) + clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ON, &i); + +#if defined(CONFIG_GENERIC_TIME) && defined(CONFIG_X86) + switch (boot_cpu_data.x86_vendor) { + case X86_VENDOR_AMD: + case X86_VENDOR_INTEL: + /* + * AMD Fam10h TSC will tick in all + * C/P/S0/S1 states when this bit is set. + */ + if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) + return; + + /*FALL THROUGH*/ + default: + /* TSC could halt in idle, so notify users */ + mark_tsc_unstable("TSC halts in idle"); + } +#endif +} + +static unsigned long cpu_weight[NR_CPUS]; +static int tsk_in_cpu[NR_CPUS] = {[0 ... NR_CPUS-1] = -1}; +static DECLARE_BITMAP(pad_busy_cpus_bits, NR_CPUS); +static void round_robin_cpu(unsigned int tsk_index) +{ + struct cpumask *pad_busy_cpus = to_cpumask(pad_busy_cpus_bits); + cpumask_var_t tmp; + int cpu; + unsigned long min_weight = -1, preferred_cpu; + + if (!alloc_cpumask_var(&tmp, GFP_KERNEL)) + return; + + mutex_lock(&isolated_cpus_lock); + cpumask_clear(tmp); + for_each_cpu(cpu, pad_busy_cpus) + cpumask_or(tmp, tmp, topology_thread_cpumask(cpu)); + cpumask_andnot(tmp, cpu_online_mask, tmp); + /* avoid HT sibilings if possible */ + if (cpumask_empty(tmp)) + cpumask_andnot(tmp, cpu_online_mask, pad_busy_cpus); + if (cpumask_empty(tmp)) { + mutex_unlock(&isolated_cpus_lock); + return; + } + for_each_cpu(cpu, tmp) { + if (cpu_weight[cpu] < min_weight) { + min_weight = cpu_weight[cpu]; + preferred_cpu = cpu; + } + } + + if (tsk_in_cpu[tsk_index] != -1) + cpumask_clear_cpu(tsk_in_cpu[tsk_index], pad_busy_cpus); + tsk_in_cpu[tsk_index] = preferred_cpu; + cpumask_set_cpu(preferred_cpu, pad_busy_cpus); + cpu_weight[preferred_cpu]++; + mutex_unlock(&isolated_cpus_lock); + + set_cpus_allowed_ptr(current, cpumask_of(preferred_cpu)); +} + +static void exit_round_robin(unsigned int tsk_index) +{ + struct cpumask *pad_busy_cpus = to_cpumask(pad_busy_cpus_bits); + cpumask_clear_cpu(tsk_in_cpu[tsk_index], pad_busy_cpus); + tsk_in_cpu[tsk_index] = -1; +} + +static unsigned int idle_pct = 5; /* percentage */ +static unsigned int round_robin_time = 10; /* second */ +static int power_saving_thread(void *data) +{ + struct sched_param param = {.sched_priority = 1}; + int do_sleep; + unsigned int tsk_index = (unsigned long)data; + u64 last_jiffies = 0; + + sched_setscheduler(current, SCHED_RR, ¶m); + + while (!kthread_should_stop()) { + int cpu; + u64 expire_time; + + try_to_freeze(); + + /* round robin to cpus */ + if (last_jiffies + round_robin_time * HZ < jiffies) { + last_jiffies = jiffies; + round_robin_cpu(tsk_index); + } + + do_sleep = 0; + + current_thread_info()->status &= ~TS_POLLING; + /* + * TS_POLLING-cleared state must be visible before we test + * NEED_RESCHED: + */ + smp_mb(); + + expire_time = jiffies + HZ * (100 - idle_pct) / 100; + + while (!need_resched()) { + local_irq_disable(); + cpu = smp_processor_id(); + clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, + &cpu); + stop_critical_timings(); + + __monitor((void *)¤t_thread_info()->flags, 0, 0); + smp_mb(); + if (!need_resched()) + __mwait(power_saving_mwait_eax, 1); + + start_critical_timings(); + clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, + &cpu); + local_irq_enable(); + + if (jiffies > expire_time) { + do_sleep = 1; + break; + } + } + + current_thread_info()->status |= TS_POLLING; + + /* + * current sched_rt has threshold for rt task running time. + * When a rt task uses 95% CPU time, the rt thread will be + * scheduled out for 5% CPU time to not starve other tasks. But + * the mechanism only works when all CPUs have RT task running, + * as if one CPU hasn't RT task, RT task from other CPUs will + * borrow CPU time from this CPU and cause RT task use > 95% + * CPU time. To make 'avoid staration' work, takes a nap here. + */ + if (do_sleep) + schedule_timeout_killable(HZ * idle_pct / 100); + } + + exit_round_robin(tsk_index); + return 0; +} + +static struct task_struct *ps_tsks[NR_CPUS]; +static unsigned int ps_tsk_num; +static int create_power_saving_task(void) +{ + ps_tsks[ps_tsk_num] = kthread_run(power_saving_thread, + (void *)(unsigned long)ps_tsk_num, + "power_saving/%d", ps_tsk_num); + if (ps_tsks[ps_tsk_num]) { + ps_tsk_num++; + return 0; + } + return -EINVAL; +} + +static void destroy_power_saving_task(void) +{ + if (ps_tsk_num > 0) { + ps_tsk_num--; + kthread_stop(ps_tsks[ps_tsk_num]); + } +} + +static void set_power_saving_task_num(unsigned int num) +{ + if (num > ps_tsk_num) { + while (ps_tsk_num < num) { + if (create_power_saving_task()) + return; + } + } else if (num < ps_tsk_num) { + while (ps_tsk_num > num) + destroy_power_saving_task(); + } +} + +static int acpi_pad_idle_cpus(unsigned int num_cpus) +{ + get_online_cpus(); + + num_cpus = min_t(unsigned int, num_cpus, num_online_cpus()); + set_power_saving_task_num(num_cpus); + + put_online_cpus(); + return 0; +} + +static uint32_t acpi_pad_idle_cpus_num(void) +{ + return ps_tsk_num; +} + +static ssize_t acpi_pad_rrtime_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + unsigned long num; + if (strict_strtoul(buf, 0, &num)) + return -EINVAL; + if (num < 1 || num >= 100) + return -EINVAL; + mutex_lock(&isolated_cpus_lock); + round_robin_time = num; + mutex_unlock(&isolated_cpus_lock); + return count; +} + +static ssize_t acpi_pad_rrtime_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%d", round_robin_time); +} +static DEVICE_ATTR(rrtime, S_IRUGO|S_IWUSR, + acpi_pad_rrtime_show, + acpi_pad_rrtime_store); + +static ssize_t acpi_pad_idlepct_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + unsigned long num; + if (strict_strtoul(buf, 0, &num)) + return -EINVAL; + if (num < 1 || num >= 100) + return -EINVAL; + mutex_lock(&isolated_cpus_lock); + idle_pct = num; + mutex_unlock(&isolated_cpus_lock); + return count; +} + +static ssize_t acpi_pad_idlepct_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%d", idle_pct); +} +static DEVICE_ATTR(idlepct, S_IRUGO|S_IWUSR, + acpi_pad_idlepct_show, + acpi_pad_idlepct_store); + +static ssize_t acpi_pad_idlecpus_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + unsigned long num; + if (strict_strtoul(buf, 0, &num)) + return -EINVAL; + mutex_lock(&isolated_cpus_lock); + acpi_pad_idle_cpus(num); + mutex_unlock(&isolated_cpus_lock); + return count; +} + +static ssize_t acpi_pad_idlecpus_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return cpumask_scnprintf(buf, PAGE_SIZE, + to_cpumask(pad_busy_cpus_bits)); +} +static DEVICE_ATTR(idlecpus, S_IRUGO|S_IWUSR, + acpi_pad_idlecpus_show, + acpi_pad_idlecpus_store); + +static int acpi_pad_add_sysfs(struct acpi_device *device) +{ + int result; + + result = device_create_file(&device->dev, &dev_attr_idlecpus); + if (result) + return -ENODEV; + result = device_create_file(&device->dev, &dev_attr_idlepct); + if (result) { + device_remove_file(&device->dev, &dev_attr_idlecpus); + return -ENODEV; + } + result = device_create_file(&device->dev, &dev_attr_rrtime); + if (result) { + device_remove_file(&device->dev, &dev_attr_idlecpus); + device_remove_file(&device->dev, &dev_attr_idlepct); + return -ENODEV; + } + return 0; +} + +static void acpi_pad_remove_sysfs(struct acpi_device *device) +{ + device_remove_file(&device->dev, &dev_attr_idlecpus); + device_remove_file(&device->dev, &dev_attr_idlepct); + device_remove_file(&device->dev, &dev_attr_rrtime); +} + +/* Query firmware how many CPUs should be idle */ +static int acpi_pad_pur(acpi_handle handle, int *num_cpus) +{ + struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; + acpi_status status; + union acpi_object *package; + int rev, num, ret = -EINVAL; + + status = acpi_evaluate_object(handle, "_PUR", NULL, &buffer); + if (ACPI_FAILURE(status)) + return -EINVAL; + package = buffer.pointer; + if (package->type != ACPI_TYPE_PACKAGE || package->package.count != 2) + goto out; + rev = package->package.elements[0].integer.value; + num = package->package.elements[1].integer.value; + if (rev != 1) + goto out; + *num_cpus = num; + ret = 0; +out: + kfree(buffer.pointer); + return ret; +} + +/* Notify firmware how many CPUs are idle */ +static void acpi_pad_ost(acpi_handle handle, int stat, + uint32_t idle_cpus) +{ + union acpi_object params[3] = { + {.type = ACPI_TYPE_INTEGER,}, + {.type = ACPI_TYPE_INTEGER,}, + {.type = ACPI_TYPE_BUFFER,}, + }; + struct acpi_object_list arg_list = {3, params}; + + params[0].integer.value = ACPI_PROCESSOR_AGGREGATOR_NOTIFY; + params[1].integer.value = stat; + params[2].buffer.length = 4; + params[2].buffer.pointer = (void *)&idle_cpus; + acpi_evaluate_object(handle, "_OST", &arg_list, NULL); +} + +static void acpi_pad_handle_notify(acpi_handle handle) +{ + int num_cpus, ret; + uint32_t idle_cpus; + + mutex_lock(&isolated_cpus_lock); + if (acpi_pad_pur(handle, &num_cpus)) { + mutex_unlock(&isolated_cpus_lock); + return; + } + ret = acpi_pad_idle_cpus(num_cpus); + idle_cpus = acpi_pad_idle_cpus_num(); + if (!ret) + acpi_pad_ost(handle, 0, idle_cpus); + else + acpi_pad_ost(handle, 1, 0); + mutex_unlock(&isolated_cpus_lock); +} + +static void acpi_pad_notify(acpi_handle handle, u32 event, + void *data) +{ + struct acpi_device *device = data; + + switch (event) { + case ACPI_PROCESSOR_AGGREGATOR_NOTIFY: + acpi_pad_handle_notify(handle); + acpi_bus_generate_proc_event(device, event, 0); + acpi_bus_generate_netlink_event(device->pnp.device_class, + dev_name(&device->dev), event, 0); + break; + default: + printk(KERN_WARNING"Unsupported event [0x%x]\n", event); + break; + } +} + +static int acpi_pad_add(struct acpi_device *device) +{ + acpi_status status; + + strcpy(acpi_device_name(device), ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME); + strcpy(acpi_device_class(device), ACPI_PROCESSOR_AGGREGATOR_CLASS); + + if (acpi_pad_add_sysfs(device)) + return -ENODEV; + + status = acpi_install_notify_handler(device->handle, + ACPI_DEVICE_NOTIFY, acpi_pad_notify, device); + if (ACPI_FAILURE(status)) { + acpi_pad_remove_sysfs(device); + return -ENODEV; + } + + return 0; +} + +static int acpi_pad_remove(struct acpi_device *device, + int type) +{ + mutex_lock(&isolated_cpus_lock); + acpi_pad_idle_cpus(0); + mutex_unlock(&isolated_cpus_lock); + + acpi_remove_notify_handler(device->handle, + ACPI_DEVICE_NOTIFY, acpi_pad_notify); + acpi_pad_remove_sysfs(device); + return 0; +} + +static const struct acpi_device_id pad_device_ids[] = { + {"ACPI000C", 0}, + {"", 0}, +}; +MODULE_DEVICE_TABLE(acpi, pad_device_ids); + +static struct acpi_driver acpi_pad_driver = { + .name = "processor_aggregator", + .class = ACPI_PROCESSOR_AGGREGATOR_CLASS, + .ids = pad_device_ids, + .ops = { + .add = acpi_pad_add, + .remove = acpi_pad_remove, + }, +}; + +static int __init acpi_pad_init(void) +{ + power_saving_mwait_init(); + if (power_saving_mwait_eax == 0) + return -EINVAL; + + return acpi_bus_register_driver(&acpi_pad_driver); +} + +static void __exit acpi_pad_exit(void) +{ + acpi_bus_unregister_driver(&acpi_pad_driver); +} + +module_init(acpi_pad_init); +module_exit(acpi_pad_exit); +MODULE_AUTHOR("Shaohua Li"); +MODULE_DESCRIPTION("ACPI Processor Aggregator Driver"); +MODULE_LICENSE("GPL"); From 67638e4043083cdc6f10386a75fef87ba46eecb3 Mon Sep 17 00:00:00 2001 From: OGAWA Hirofumi Date: Sat, 1 Aug 2009 21:30:31 +0900 Subject: [PATCH 0024/3409] fat/nls: Fix handling of utf8 invalid char With utf8 option, vfat allowed the duplicated filenames. Normal nls returns -EINVAL for invalid char. But utf8s_to_utf16s() skipped the invalid char historically. So, this changes the utf8s_to_utf16s() directly to return -EINVAL for invalid char, because vfat is only user of it. mkdir /mnt/fatfs FILENAME=`echo -ne "invalidutf8char_\\0341_endofchar"` echo "Using filename: $FILENAME" dd if=/dev/zero of=fatfs bs=512 count=128 mkdosfs -F 32 fatfs mount -o loop,utf8 fatfs /mnt/fatfs touch "/mnt/fatfs/$FILENAME" umount /mnt/fatfs mount -o loop,utf8 fatfs /mnt/fatfs touch "/mnt/fatfs/$FILENAME" ls -l /mnt/fatfs umount /mnt/fatfs ---- And the output is: Using filename: invalidutf8char_\0341_endofchar 128+0 records in 128+0 records out 65536 bytes (66 kB) copied, 0.000388118 s, 169 MB/s mkdosfs 2.11 (12 Mar 2005) total 0 -rwxr-xr-x 1 root root 0 Jun 28 19:46 invalidutf8char__endofchar -rwxr-xr-x 1 root root 0 Jun 28 19:46 invalidutf8char__endofchar Tested-by: Marton Balint Signed-off-by: OGAWA Hirofumi --- fs/fat/namei_vfat.c | 15 ++++----------- fs/nls/nls_base.c | 8 ++++---- 2 files changed, 8 insertions(+), 15 deletions(-) diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c index cb6e83557112..f565f24019b5 100644 --- a/fs/fat/namei_vfat.c +++ b/fs/fat/namei_vfat.c @@ -499,17 +499,10 @@ xlate_to_uni(const unsigned char *name, int len, unsigned char *outname, int charlen; if (utf8) { - int name_len = strlen(name); - - *outlen = utf8s_to_utf16s(name, PATH_MAX, (wchar_t *) outname); - - /* - * We stripped '.'s before and set len appropriately, - * but utf8s_to_utf16s doesn't care about len - */ - *outlen -= (name_len - len); - - if (*outlen > 255) + *outlen = utf8s_to_utf16s(name, len, (wchar_t *)outname); + if (*outlen < 0) + return *outlen; + else if (*outlen > 255) return -ENAMETOOLONG; op = &outname[*outlen * sizeof(wchar_t)]; diff --git a/fs/nls/nls_base.c b/fs/nls/nls_base.c index 477d37d83b31..b25c218671b3 100644 --- a/fs/nls/nls_base.c +++ b/fs/nls/nls_base.c @@ -124,10 +124,10 @@ int utf8s_to_utf16s(const u8 *s, int len, wchar_t *pwcs) while (*s && len > 0) { if (*s & 0x80) { size = utf8_to_utf32(s, len, &u); - if (size < 0) { - /* Ignore character and move on */ - size = 1; - } else if (u >= PLANE_SIZE) { + if (size < 0) + return -EINVAL; + + if (u >= PLANE_SIZE) { u -= PLANE_SIZE; *op++ = (wchar_t) (SURROGATE_PAIR | ((u >> 10) & SURROGATE_BITS)); From 955234755ce4a2c33cfc558912aa8f2148cc1fc6 Mon Sep 17 00:00:00 2001 From: Paul Wise Date: Sat, 1 Aug 2009 21:30:31 +0900 Subject: [PATCH 0025/3409] vfat: change the default from shortname=lower to shortname=mixed Because, with "shortname=lower", copying one FAT filesystem tree to another FAT filesystem tree using Linux results in semantically different filesystems. (E.g.: Filenames which were once "all uppercase" are now "all lowercase"). So, this changes the default of "shortname=lower" to "shortname=mixed". Signed-off-by: Paul Wise [change fat_show_options()] Signed-off-by: OGAWA Hirofumi --- Documentation/filesystems/vfat.txt | 2 +- fs/fat/inode.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Documentation/filesystems/vfat.txt b/Documentation/filesystems/vfat.txt index b58b84b50fa2..eed520fd0c8e 100644 --- a/Documentation/filesystems/vfat.txt +++ b/Documentation/filesystems/vfat.txt @@ -102,7 +102,7 @@ shortname=lower|win95|winnt|mixed winnt: emulate the Windows NT rule for display/create. mixed: emulate the Windows NT rule for display, emulate the Windows 95 rule for create. - Default setting is `lower'. + Default setting is `mixed'. tz=UTC -- Interpret timestamps as UTC rather than local time. This option disables the conversion of timestamps diff --git a/fs/fat/inode.c b/fs/fat/inode.c index 8970d8c49bb0..63a5c1a4ee60 100644 --- a/fs/fat/inode.c +++ b/fs/fat/inode.c @@ -820,7 +820,7 @@ static int fat_show_options(struct seq_file *m, struct vfsmount *mnt) seq_puts(m, ",shortname=mixed"); break; case VFAT_SFN_DISPLAY_LOWER | VFAT_SFN_CREATE_WIN95: - /* seq_puts(m, ",shortname=lower"); */ + seq_puts(m, ",shortname=lower"); break; default: seq_puts(m, ",shortname=unknown"); @@ -971,7 +971,7 @@ static int parse_options(char *options, int is_vfat, int silent, int *debug, opts->codepage = fat_default_codepage; opts->iocharset = fat_default_iocharset; if (is_vfat) { - opts->shortname = VFAT_SFN_DISPLAY_LOWER|VFAT_SFN_CREATE_WIN95; + opts->shortname = VFAT_SFN_DISPLAY_WINNT|VFAT_SFN_CREATE_WIN95; opts->rodir = 0; } else { opts->shortname = 0; From 5ad73d07173e7b76c16bcb8b6cf64d8386019689 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Mon, 10 Aug 2009 12:51:56 +0100 Subject: [PATCH 0026/3409] ARM: 5665/1: U300 syscon register updates This adds in a few new register and defines for improved padmux support and some figures that were plain wrong on the targeted U300 platforms. Signed-off-by: Linus Walleij Signed-off-by: Russell King --- arch/arm/mach-u300/include/mach/syscon.h | 120 +++++++++++++++++++++-- 1 file changed, 113 insertions(+), 7 deletions(-) diff --git a/arch/arm/mach-u300/include/mach/syscon.h b/arch/arm/mach-u300/include/mach/syscon.h index 1c90d1b1ccb6..7444f5c7da97 100644 --- a/arch/arm/mach-u300/include/mach/syscon.h +++ b/arch/arm/mach-u300/include/mach/syscon.h @@ -240,8 +240,13 @@ #define U300_SYSCON_PMC1LR_CDI_MASK (0xC000) #define U300_SYSCON_PMC1LR_CDI_CDI (0x0000) #define U300_SYSCON_PMC1LR_CDI_EMIF (0x4000) +#ifdef CONFIG_MACH_U300_BS335 +#define U300_SYSCON_PMC1LR_CDI_CDI2 (0x8000) +#define U300_SYSCON_PMC1LR_CDI_WCDMA_APP_GPIO (0xC000) +#elif CONFIG_MACH_U300_BS365 #define U300_SYSCON_PMC1LR_CDI_GPIO (0x8000) #define U300_SYSCON_PMC1LR_CDI_WCDMA (0xC000) +#endif #define U300_SYSCON_PMC1LR_PDI_MASK (0x3000) #define U300_SYSCON_PMC1LR_PDI_PDI (0x0000) #define U300_SYSCON_PMC1LR_PDI_EGG (0x1000) @@ -345,19 +350,69 @@ #define U300_SYSCON_MMCR_MASK (0x0003) #define U300_SYSCON_MMCR_MMC_FB_CLK_SEL_ENABLE (0x0002) #define U300_SYSCON_MMCR_MSPRO_FREQSEL_ENABLE (0x0001) - +/* Pull up/down control (R/W) */ +#define U300_SYSCON_PUCR (0x104) +#define U300_SYSCON_PUCR_EMIF_1_WAIT_N_PU_ENABLE (0x0200) +#define U300_SYSCON_PUCR_EMIF_1_NFIF_READY_PU_ENABLE (0x0100) +#define U300_SYSCON_PUCR_EMIF_1_16BIT_PU_ENABLE (0x0080) +#define U300_SYSCON_PUCR_EMIF_1_8BIT_PU_ENABLE (0x0040) +#define U300_SYSCON_PUCR_KEY_IN_PU_EN_MASK (0x003F) +/* Padmux 2 control */ +#define U300_SYSCON_PMC2R (0x100) +#define U300_SYSCON_PMC2R_APP_MISC_0_MASK (0x00C0) +#define U300_SYSCON_PMC2R_APP_MISC_0_APP_GPIO (0x0000) +#define U300_SYSCON_PMC2R_APP_MISC_0_EMIF_SDRAM (0x0040) +#define U300_SYSCON_PMC2R_APP_MISC_0_MMC (0x0080) +#define U300_SYSCON_PMC2R_APP_MISC_0_CDI2 (0x00C0) +#define U300_SYSCON_PMC2R_APP_MISC_1_MASK (0x0300) +#define U300_SYSCON_PMC2R_APP_MISC_1_APP_GPIO (0x0000) +#define U300_SYSCON_PMC2R_APP_MISC_1_EMIF_SDRAM (0x0100) +#define U300_SYSCON_PMC2R_APP_MISC_1_MMC (0x0200) +#define U300_SYSCON_PMC2R_APP_MISC_1_CDI2 (0x0300) +#define U300_SYSCON_PMC2R_APP_MISC_2_MASK (0x0C00) +#define U300_SYSCON_PMC2R_APP_MISC_2_APP_GPIO (0x0000) +#define U300_SYSCON_PMC2R_APP_MISC_2_EMIF_SDRAM (0x0400) +#define U300_SYSCON_PMC2R_APP_MISC_2_MMC (0x0800) +#define U300_SYSCON_PMC2R_APP_MISC_2_CDI2 (0x0C00) +#define U300_SYSCON_PMC2R_APP_MISC_3_MASK (0x3000) +#define U300_SYSCON_PMC2R_APP_MISC_3_APP_GPIO (0x0000) +#define U300_SYSCON_PMC2R_APP_MISC_3_EMIF_SDRAM (0x1000) +#define U300_SYSCON_PMC2R_APP_MISC_3_MMC (0x2000) +#define U300_SYSCON_PMC2R_APP_MISC_3_CDI2 (0x3000) +#define U300_SYSCON_PMC2R_APP_MISC_4_MASK (0xC000) +#define U300_SYSCON_PMC2R_APP_MISC_4_APP_GPIO (0x0000) +#define U300_SYSCON_PMC2R_APP_MISC_4_EMIF_SDRAM (0x4000) +#define U300_SYSCON_PMC2R_APP_MISC_4_MMC (0x8000) +#define U300_SYSCON_PMC2R_APP_MISC_4_ACC_GPIO (0xC000) /* TODO: More SYSCON registers missing */ #define U300_SYSCON_PMC3R (0x10c) #define U300_SYSCON_PMC3R_APP_MISC_11_MASK (0xc000) #define U300_SYSCON_PMC3R_APP_MISC_11_SPI (0x4000) #define U300_SYSCON_PMC3R_APP_MISC_10_MASK (0x3000) #define U300_SYSCON_PMC3R_APP_MISC_10_SPI (0x1000) -/* TODO: Missing other configs, I just added the SPI stuff */ - +/* TODO: Missing other configs */ +#define U300_SYSCON_PMC4R (0x168) +#define U300_SYSCON_PMC4R_APP_MISC_12_MASK (0x0003) +#define U300_SYSCON_PMC4R_APP_MISC_12_APP_GPIO (0x0000) +#define U300_SYSCON_PMC4R_APP_MISC_13_MASK (0x000C) +#define U300_SYSCON_PMC4R_APP_MISC_13_CDI (0x0000) +#define U300_SYSCON_PMC4R_APP_MISC_13_SMIA (0x0004) +#define U300_SYSCON_PMC4R_APP_MISC_13_SMIA2 (0x0008) +#define U300_SYSCON_PMC4R_APP_MISC_13_APP_GPIO (0x000C) +#define U300_SYSCON_PMC4R_APP_MISC_14_MASK (0x0030) +#define U300_SYSCON_PMC4R_APP_MISC_14_CDI (0x0000) +#define U300_SYSCON_PMC4R_APP_MISC_14_SMIA (0x0010) +#define U300_SYSCON_PMC4R_APP_MISC_14_CDI2 (0x0020) +#define U300_SYSCON_PMC4R_APP_MISC_14_APP_GPIO (0x0030) +#define U300_SYSCON_PMC4R_APP_MISC_16_MASK (0x0300) +#define U300_SYSCON_PMC4R_APP_MISC_16_APP_GPIO_13 (0x0000) +#define U300_SYSCON_PMC4R_APP_MISC_16_APP_UART1_CTS (0x0100) +#define U300_SYSCON_PMC4R_APP_MISC_16_EMIF_1_STATIC_CS5_N (0x0200) /* SYS_0_CLK_CONTROL first clock control 16bit (R/W) */ #define U300_SYSCON_S0CCR (0x120) #define U300_SYSCON_S0CCR_FIELD_MASK (0x43FF) #define U300_SYSCON_S0CCR_CLOCK_REQ (0x4000) +#define U300_SYSCON_S0CCR_CLOCK_REQ_MONITOR (0x2000) #define U300_SYSCON_S0CCR_CLOCK_INV (0x0200) #define U300_SYSCON_S0CCR_CLOCK_FREQ_MASK (0x01E0) #define U300_SYSCON_S0CCR_CLOCK_SELECT_MASK (0x001E) @@ -375,6 +430,7 @@ #define U300_SYSCON_S1CCR (0x124) #define U300_SYSCON_S1CCR_FIELD_MASK (0x43FF) #define U300_SYSCON_S1CCR_CLOCK_REQ (0x4000) +#define U300_SYSCON_S1CCR_CLOCK_REQ_MONITOR (0x2000) #define U300_SYSCON_S1CCR_CLOCK_INV (0x0200) #define U300_SYSCON_S1CCR_CLOCK_FREQ_MASK (0x01E0) #define U300_SYSCON_S1CCR_CLOCK_SELECT_MASK (0x001E) @@ -393,6 +449,7 @@ #define U300_SYSCON_S2CCR_FIELD_MASK (0xC3FF) #define U300_SYSCON_S2CCR_CLK_STEAL (0x8000) #define U300_SYSCON_S2CCR_CLOCK_REQ (0x4000) +#define U300_SYSCON_S2CCR_CLOCK_REQ_MONITOR (0x2000) #define U300_SYSCON_S2CCR_CLOCK_INV (0x0200) #define U300_SYSCON_S2CCR_CLOCK_FREQ_MASK (0x01E0) #define U300_SYSCON_S2CCR_CLOCK_SELECT_MASK (0x001E) @@ -425,6 +482,44 @@ #define U300_SYSCON_MCR_PMGEN_CR_0_EMIF_0_SDRAM (0x000C) #define U300_SYSCON_MCR_PM1G_MODE_ENABLE (0x0002) #define U300_SYSCON_MCR_PMTG5_MODE_ENABLE (0x0001) +/* SC_PLL_IRQ_CONTROL 16bit (R/W) */ +#define U300_SYSCON_PICR (0x0130) +#define U300_SYSCON_PICR_MASK (0x00FF) +#define U300_SYSCON_PICR_FORCE_PLL208_LOCK_LOW_ENABLE (0x0080) +#define U300_SYSCON_PICR_FORCE_PLL208_LOCK_HIGH_ENABLE (0x0040) +#define U300_SYSCON_PICR_FORCE_PLL13_LOCK_LOW_ENABLE (0x0020) +#define U300_SYSCON_PICR_FORCE_PLL13_LOCK_HIGH_ENABLE (0x0010) +#define U300_SYSCON_PICR_IRQMASK_PLL13_UNLOCK_ENABLE (0x0008) +#define U300_SYSCON_PICR_IRQMASK_PLL13_LOCK_ENABLE (0x0004) +#define U300_SYSCON_PICR_IRQMASK_PLL208_UNLOCK_ENABLE (0x0002) +#define U300_SYSCON_PICR_IRQMASK_PLL208_LOCK_ENABLE (0x0001) +/* SC_PLL_IRQ_STATUS 16 bit (R/-) */ +#define U300_SYSCON_PISR (0x0134) +#define U300_SYSCON_PISR_MASK (0x000F) +#define U300_SYSCON_PISR_PLL13_UNLOCK_IND (0x0008) +#define U300_SYSCON_PISR_PLL13_LOCK_IND (0x0004) +#define U300_SYSCON_PISR_PLL208_UNLOCK_IND (0x0002) +#define U300_SYSCON_PISR_PLL208_LOCK_IND (0x0001) +/* SC_PLL_IRQ_CLEAR 16 bit (-/W) */ +#define U300_SYSCON_PICLR (0x0138) +#define U300_SYSCON_PICLR_MASK (0x000F) +#define U300_SYSCON_PICLR_RWMASK (0x0000) +#define U300_SYSCON_PICLR_PLL13_UNLOCK_SC (0x0008) +#define U300_SYSCON_PICLR_PLL13_LOCK_SC (0x0004) +#define U300_SYSCON_PICLR_PLL208_UNLOCK_SC (0x0002) +#define U300_SYSCON_PICLR_PLL208_LOCK_SC (0x0001) +/* CAMIF_CONTROL 16 bit (-/W) */ +#define U300_SYSCON_CICR (0x013C) +#define U300_SYSCON_CICR_MASK (0x0FFF) +#define U300_SYSCON_CICR_APP_SUBLVDS_TESTMODE_MASK (0x0F00) +#define U300_SYSCON_CICR_APP_SUBLVDS_TESTMODE_PORT1 (0x0C00) +#define U300_SYSCON_CICR_APP_SUBLVDS_TESTMODE_PORT0 (0x0300) +#define U300_SYSCON_CICR_APP_SUBLVDS_RESCON_MASK (0x00F0) +#define U300_SYSCON_CICR_APP_SUBLVDS_RESCON_PORT1 (0x00C0) +#define U300_SYSCON_CICR_APP_SUBLVDS_RESCON_PORT0 (0x0030) +#define U300_SYSCON_CICR_APP_SUBLVDS_PWR_DWN_N_MASK (0x000F) +#define U300_SYSCON_CICR_APP_SUBLVDS_PWR_DWN_N_PORT1 (0x000C) +#define U300_SYSCON_CICR_APP_SUBLVDS_PWR_DWN_N_PORT0 (0x0003) /* Clock activity observability register 0 */ #define U300_SYSCON_C0OAR (0x140) #define U300_SYSCON_C0OAR_MASK (0xFFFF) @@ -513,7 +608,7 @@ /** * CPU medium frequency in MHz */ -#define SYSCON_CPU_CLOCK_MEDIUM 104 +#define SYSCON_CPU_CLOCK_MEDIUM 52 /** * CPU low frequency in MHz */ @@ -527,7 +622,7 @@ /** * EMIF medium frequency in MHz */ -#define SYSCON_EMIF_CLOCK_MEDIUM 104 +#define SYSCON_EMIF_CLOCK_MEDIUM 52 /** * EMIF low frequency in MHz */ @@ -541,7 +636,7 @@ /** * AHB medium frequency in MHz */ -#define SYSCON_AHB_CLOCK_MEDIUM 52 +#define SYSCON_AHB_CLOCK_MEDIUM 26 /** * AHB low frequency in MHz */ @@ -553,6 +648,15 @@ enum syscon_busmaster { SYSCON_BM_VIDEO_ENC }; +/* Selectr a resistor or a set of resistors */ +enum syscon_pull_up_down { + SYSCON_PU_KEY_IN_EN, + SYSCON_PU_EMIF_1_8_BIT_EN, + SYSCON_PU_EMIF_1_16_BIT_EN, + SYSCON_PU_EMIF_1_NFIF_READY_EN, + SYSCON_PU_EMIF_1_NFIF_WAIT_N_EN, +}; + /* * Note that this array must match the order of the array "clk_reg" * in syscon.c @@ -575,6 +679,7 @@ enum syscon_clk { SYSCON_CLKCONTROL_SPI, SYSCON_CLKCONTROL_I2S0_CORE, SYSCON_CLKCONTROL_I2S1_CORE, + SYSCON_CLKCONTROL_UART1, SYSCON_CLKCONTROL_AAIF, SYSCON_CLKCONTROL_AHB, SYSCON_CLKCONTROL_APEX, @@ -604,7 +709,8 @@ enum syscon_sysclk_mode { enum syscon_sysclk_req { SYSCON_SYSCLKREQ_DISABLED, - SYSCON_SYSCLKREQ_ACTIVE_LOW + SYSCON_SYSCLKREQ_ACTIVE_LOW, + SYSCON_SYSCLKREQ_MONITOR }; enum syscon_clk_mode { From df1e0520f9434b5b771c854a13dd928727d8673a Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Mon, 10 Aug 2009 12:52:40 +0100 Subject: [PATCH 0027/3409] ARM: 5666/1: Revamped U300 padmux API This abstracts the hackish padmux API on the U300 platform into something more manageable. It provides a way for drivers to activate/deactivate a certain padmux setting. It will also switch the users of the old API over to using the new style, pushing muxing into the apropriate setup files. Signed-off-by: Linus Walleij Signed-off-by: Russell King --- arch/arm/mach-u300/gpio.c | 13 -- arch/arm/mach-u300/mmc.c | 16 ++ arch/arm/mach-u300/padmux.c | 385 ++++++++++++++++++++++++++++++++---- arch/arm/mach-u300/padmux.h | 28 ++- 4 files changed, 387 insertions(+), 55 deletions(-) diff --git a/arch/arm/mach-u300/gpio.c b/arch/arm/mach-u300/gpio.c index 308cdb197a92..63c8f27fb15a 100644 --- a/arch/arm/mach-u300/gpio.c +++ b/arch/arm/mach-u300/gpio.c @@ -25,11 +25,6 @@ #include #include -/* Need access to SYSCON registers for PADmuxing */ -#include - -#include "padmux.h" - /* Reference to GPIO block clock */ static struct clk *clk; @@ -606,14 +601,6 @@ static int __init gpio_probe(struct platform_device *pdev) writel(U300_GPIO_CR_BLOCK_CLKRQ_ENABLE, virtbase + U300_GPIO_CR); #endif - /* Set up some padmuxing here */ -#ifdef CONFIG_MMC - pmx_set_mission_mode_mmc(); -#endif -#ifdef CONFIG_SPI_PL022 - pmx_set_mission_mode_spi(); -#endif - gpio_set_initial_values(); for (num_irqs = 0 ; num_irqs < U300_GPIO_NUM_PORTS; num_irqs++) { diff --git a/arch/arm/mach-u300/mmc.c b/arch/arm/mach-u300/mmc.c index 3138d3955c9e..e66284d73565 100644 --- a/arch/arm/mach-u300/mmc.c +++ b/arch/arm/mach-u300/mmc.c @@ -22,6 +22,7 @@ #include #include "mmc.h" +#include "padmux.h" struct mmci_card_event { struct input_dev *mmc_input; @@ -146,6 +147,7 @@ int __devinit mmc_init(struct amba_device *adev) { struct mmci_card_event *mmci_card; struct device *mmcsd_device = &adev->dev; + struct pmx *pmx; int ret = 0; mmci_card = kzalloc(sizeof(struct mmci_card_event), GFP_KERNEL); @@ -205,6 +207,20 @@ int __devinit mmc_init(struct amba_device *adev) input_set_drvdata(mmci_card->mmc_input, mmci_card); + /* + * Setup padmuxing for MMC. Since this must always be + * compiled into the kernel, pmx is never released. + */ + pmx = pmx_get(mmcsd_device, U300_APP_PMX_MMC_SETTING); + + if (IS_ERR(pmx)) + pr_warning("Could not get padmux handle\n"); + else { + ret = pmx_activate(mmcsd_device, pmx); + if (IS_ERR_VALUE(ret)) + pr_warning("Could not activate padmuxing\n"); + } + ret = gpio_register_callback(U300_GPIO_PIN_MMC_CD, mmci_callback, mmci_card); diff --git a/arch/arm/mach-u300/padmux.c b/arch/arm/mach-u300/padmux.c index f3664564f086..4c93c6cefd37 100644 --- a/arch/arm/mach-u300/padmux.c +++ b/arch/arm/mach-u300/padmux.c @@ -6,53 +6,362 @@ * Copyright (C) 2009 ST-Ericsson AB * License terms: GNU General Public License (GPL) version 2 * U300 PADMUX functions - * Author: Linus Walleij - * + * Author: Martin Persson */ -#include + +#include +#include +#include #include +#include +#include +#include +#include +#include +#include +#include #include #include - #include "padmux.h" -/* Set the PAD MUX to route the MMC reader correctly to GPIO0. */ -void pmx_set_mission_mode_mmc(void) -{ - u16 val; +static DEFINE_MUTEX(pmx_mutex); - val = readw(U300_SYSCON_VBASE + U300_SYSCON_PMC1LR); - val &= ~U300_SYSCON_PMC1LR_MMCSD_MASK; - writew(val, U300_SYSCON_VBASE + U300_SYSCON_PMC1LR); - val = readw(U300_SYSCON_VBASE + U300_SYSCON_PMC1HR); - val &= ~U300_SYSCON_PMC1HR_APP_GPIO_1_MASK; - val |= U300_SYSCON_PMC1HR_APP_GPIO_1_MMC; - writew(val, U300_SYSCON_VBASE + U300_SYSCON_PMC1HR); +const u32 pmx_registers[] = { + (U300_SYSCON_VBASE + U300_SYSCON_PMC1LR), + (U300_SYSCON_VBASE + U300_SYSCON_PMC1HR), + (U300_SYSCON_VBASE + U300_SYSCON_PMC2R), + (U300_SYSCON_VBASE + U300_SYSCON_PMC3R), + (U300_SYSCON_VBASE + U300_SYSCON_PMC4R) +}; + +/* High level functionality */ + +/* Lazy dog: + * onmask = { + * {"PMC1LR" mask, "PMC1LR" value}, + * {"PMC1HR" mask, "PMC1HR" value}, + * {"PMC2R" mask, "PMC2R" value}, + * {"PMC3R" mask, "PMC3R" value}, + * {"PMC4R" mask, "PMC4R" value} + * } + */ +static struct pmx mmc_setting = { + .setting = U300_APP_PMX_MMC_SETTING, + .default_on = false, + .activated = false, + .name = "MMC", + .onmask = { + {U300_SYSCON_PMC1LR_MMCSD_MASK, + U300_SYSCON_PMC1LR_MMCSD_MMCSD}, + {0, 0}, + {0, 0}, + {0, 0}, + {U300_SYSCON_PMC4R_APP_MISC_12_MASK, + U300_SYSCON_PMC4R_APP_MISC_12_APP_GPIO} + }, +}; + +static struct pmx spi_setting = { + .setting = U300_APP_PMX_SPI_SETTING, + .default_on = false, + .activated = false, + .name = "SPI", + .onmask = {{0, 0}, + {U300_SYSCON_PMC1HR_APP_SPI_2_MASK | + U300_SYSCON_PMC1HR_APP_SPI_CS_1_MASK | + U300_SYSCON_PMC1HR_APP_SPI_CS_2_MASK, + U300_SYSCON_PMC1HR_APP_SPI_2_SPI | + U300_SYSCON_PMC1HR_APP_SPI_CS_1_SPI | + U300_SYSCON_PMC1HR_APP_SPI_CS_2_SPI}, + {0, 0}, + {0, 0}, + {0, 0} + }, +}; + +/* Available padmux settings */ +static struct pmx *pmx_settings[] = { + &mmc_setting, + &spi_setting, +}; + +static void update_registers(struct pmx *pmx, bool activate) +{ + u16 regval, val, mask; + int i; + + for (i = 0; i < ARRAY_SIZE(pmx_registers); i++) { + if (activate) + val = pmx->onmask[i].val; + else + val = 0; + + mask = pmx->onmask[i].mask; + if (mask != 0) { + regval = readw(pmx_registers[i]); + regval &= ~mask; + regval |= val; + writew(regval, pmx_registers[i]); + } + } } -void pmx_set_mission_mode_spi(void) +struct pmx *pmx_get(struct device *dev, enum pmx_settings setting) { - u16 val; + int i; + struct pmx *pmx = ERR_PTR(-ENOENT); - /* Set up padmuxing so the SPI port and its chipselects are active */ - val = readw(U300_SYSCON_VBASE + U300_SYSCON_PMC1HR); - /* - * Activate the SPI port (disable the use of these pins for generic - * GPIO, DSP, AAIF - */ - val &= ~U300_SYSCON_PMC1HR_APP_SPI_2_MASK; - val |= U300_SYSCON_PMC1HR_APP_SPI_2_SPI; - /* - * Use GPIO pin SPI CS1 for CS1 actually (it can be used for other - * things also) - */ - val &= ~U300_SYSCON_PMC1HR_APP_SPI_CS_1_MASK; - val |= U300_SYSCON_PMC1HR_APP_SPI_CS_1_SPI; - /* - * Use GPIO pin SPI CS2 for CS2 actually (it can be used for other - * things also) - */ - val &= ~U300_SYSCON_PMC1HR_APP_SPI_CS_2_MASK; - val |= U300_SYSCON_PMC1HR_APP_SPI_CS_2_SPI; - writew(val, U300_SYSCON_VBASE + U300_SYSCON_PMC1HR); + if (dev == NULL) + return ERR_PTR(-EINVAL); + + mutex_lock(&pmx_mutex); + for (i = 0; i < ARRAY_SIZE(pmx_settings); i++) { + + if (setting == pmx_settings[i]->setting) { + + if (pmx_settings[i]->dev != NULL) { + WARN(1, "padmux: required setting " + "in use by another consumer\n"); + } else { + pmx = pmx_settings[i]; + pmx->dev = dev; + dev_dbg(dev, "padmux: setting nr %d is now " + "bound to %s and ready to use\n", + setting, dev_name(dev)); + break; + } + } + } + mutex_unlock(&pmx_mutex); + + return pmx; } +EXPORT_SYMBOL(pmx_get); + +int pmx_put(struct device *dev, struct pmx *pmx) +{ + int i; + int ret = -ENOENT; + + if (pmx == NULL || dev == NULL) + return -EINVAL; + + mutex_lock(&pmx_mutex); + for (i = 0; i < ARRAY_SIZE(pmx_settings); i++) { + + if (pmx->setting == pmx_settings[i]->setting) { + + if (dev != pmx->dev) { + WARN(1, "padmux: cannot release handle as " + "it is bound to another consumer\n"); + ret = -EINVAL; + break; + } else { + pmx_settings[i]->dev = NULL; + ret = 0; + break; + } + } + } + mutex_unlock(&pmx_mutex); + + return ret; +} +EXPORT_SYMBOL(pmx_put); + +int pmx_activate(struct device *dev, struct pmx *pmx) +{ + int i, j, ret; + ret = 0; + + if (pmx == NULL || dev == NULL) + return -EINVAL; + + mutex_lock(&pmx_mutex); + + /* Make sure the required bits are not used */ + for (i = 0; i < ARRAY_SIZE(pmx_settings); i++) { + + if (pmx_settings[i]->dev == NULL || pmx_settings[i] == pmx) + continue; + + for (j = 0; j < ARRAY_SIZE(pmx_registers); j++) { + + if (pmx_settings[i]->onmask[j].mask & pmx-> + onmask[j].mask) { + /* More than one entry on the same bits */ + WARN(1, "padmux: cannot activate " + "setting. Bit conflict with " + "an active setting\n"); + + ret = -EUSERS; + goto exit; + } + } + } + update_registers(pmx, true); + pmx->activated = true; + dev_dbg(dev, "padmux: setting nr %d is activated\n", + pmx->setting); + +exit: + mutex_unlock(&pmx_mutex); + return ret; +} +EXPORT_SYMBOL(pmx_activate); + +int pmx_deactivate(struct device *dev, struct pmx *pmx) +{ + int i; + int ret = -ENOENT; + + if (pmx == NULL || dev == NULL) + return -EINVAL; + + mutex_lock(&pmx_mutex); + for (i = 0; i < ARRAY_SIZE(pmx_settings); i++) { + + if (pmx_settings[i]->dev == NULL) + continue; + + if (pmx->setting == pmx_settings[i]->setting) { + + if (dev != pmx->dev) { + WARN(1, "padmux: cannot deactivate " + "pmx setting as it was activated " + "by another consumer\n"); + + ret = -EBUSY; + continue; + } else { + update_registers(pmx, false); + pmx_settings[i]->dev = NULL; + pmx->activated = false; + ret = 0; + dev_dbg(dev, "padmux: setting nr %d is deactivated", + pmx->setting); + break; + } + } + } + mutex_unlock(&pmx_mutex); + + return ret; +} +EXPORT_SYMBOL(pmx_deactivate); + +/* + * For internal use only. If it is to be exported, + * it should be reentrant. Notice that pmx_activate + * (i.e. runtime settings) always override default settings. + */ +static int pmx_set_default(void) +{ + /* Used to identify several entries on the same bits */ + u16 modbits[ARRAY_SIZE(pmx_registers)]; + + int i, j; + + memset(modbits, 0, ARRAY_SIZE(pmx_registers) * sizeof(u16)); + + for (i = 0; i < ARRAY_SIZE(pmx_settings); i++) { + + if (!pmx_settings[i]->default_on) + continue; + + for (j = 0; j < ARRAY_SIZE(pmx_registers); j++) { + + /* Make sure there is only one entry on the same bits */ + if (modbits[j] & pmx_settings[i]->onmask[j].mask) { + BUG(); + return -EUSERS; + } + modbits[j] |= pmx_settings[i]->onmask[j].mask; + } + update_registers(pmx_settings[i], true); + } + return 0; +} + +#if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_U300_DEBUG)) +static int pmx_show(struct seq_file *s, void *data) +{ + int i; + seq_printf(s, "-------------------------------------------------\n"); + seq_printf(s, "SETTING BOUND TO DEVICE STATE\n"); + seq_printf(s, "-------------------------------------------------\n"); + mutex_lock(&pmx_mutex); + for (i = 0; i < ARRAY_SIZE(pmx_settings); i++) { + /* Format pmx and device name nicely */ + char cdp[33]; + int chars; + + chars = snprintf(&cdp[0], 17, "%s", pmx_settings[i]->name); + while (chars < 16) { + cdp[chars] = ' '; + chars++; + } + chars = snprintf(&cdp[16], 17, "%s", pmx_settings[i]->dev ? + dev_name(pmx_settings[i]->dev) : "N/A"); + while (chars < 16) { + cdp[chars+16] = ' '; + chars++; + } + cdp[32] = '\0'; + + seq_printf(s, + "%s\t%s\n", + &cdp[0], + pmx_settings[i]->activated ? + "ACTIVATED" : "DEACTIVATED" + ); + + } + mutex_unlock(&pmx_mutex); + return 0; +} + +static int pmx_open(struct inode *inode, struct file *file) +{ + return single_open(file, pmx_show, NULL); +} + +static const struct file_operations pmx_operations = { + .owner = THIS_MODULE, + .open = pmx_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int __init init_pmx_read_debugfs(void) +{ + /* Expose a simple debugfs interface to view pmx settings */ + (void) debugfs_create_file("padmux", S_IFREG | S_IRUGO, + NULL, NULL, + &pmx_operations); + return 0; +} + +/* + * This needs to come in after the core_initcall(), + * because debugfs is not available until + * the subsystems come up. + */ +module_init(init_pmx_read_debugfs); +#endif + +static int __init pmx_init(void) +{ + int ret; + + ret = pmx_set_default(); + + if (IS_ERR_VALUE(ret)) + pr_crit("padmux: default settings could not be set\n"); + + return 0; +} + +/* Should be initialized before consumers */ +core_initcall(pmx_init); diff --git a/arch/arm/mach-u300/padmux.h b/arch/arm/mach-u300/padmux.h index 8c2099ac5046..6e8b86064097 100644 --- a/arch/arm/mach-u300/padmux.h +++ b/arch/arm/mach-u300/padmux.h @@ -6,14 +6,34 @@ * Copyright (C) 2009 ST-Ericsson AB * License terms: GNU General Public License (GPL) version 2 * U300 PADMUX API - * Author: Linus Walleij - * + * Author: Martin Persson */ #ifndef __MACH_U300_PADMUX_H #define __MACH_U300_PADMUX_H -void pmx_set_mission_mode_mmc(void); -void pmx_set_mission_mode_spi(void); +enum pmx_settings { + U300_APP_PMX_MMC_SETTING, + U300_APP_PMX_SPI_SETTING +}; + +struct pmx_onmask { + u16 mask; /* Mask bits */ + u16 val; /* Value when active */ +}; + +struct pmx { + struct device *dev; + enum pmx_settings setting; + char *name; + bool activated; + bool default_on; + struct pmx_onmask onmask[]; +}; + +struct pmx *pmx_get(struct device *dev, enum pmx_settings setting); +int pmx_put(struct device *dev, struct pmx *pmx); +int pmx_activate(struct device *dev, struct pmx *pmx); +int pmx_deactivate(struct device *dev, struct pmx *pmx); #endif From c7c8c78fdf6e9bd65d8ee879115dc2cd5d9fd0dc Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Fri, 14 Aug 2009 10:59:05 +0100 Subject: [PATCH 0028/3409] ARM: 5667/3: U300 SSP/SPI board setup and test This adds a U300 board configuration for the PL022 SSP/SPI PrimeCell driver recently merged to the 2.6.31-rc series. Further it adds a dummy loopback SPI chip that can be used for testing the SPI functionality on a running system using the loopback mode of PL022. Signed-off-by: Linus Walleij Signed-off-by: Russell King --- arch/arm/mach-u300/Kconfig | 12 ++ arch/arm/mach-u300/Makefile | 2 + arch/arm/mach-u300/core.c | 6 + arch/arm/mach-u300/dummyspichip.c | 290 ++++++++++++++++++++++++++++++ arch/arm/mach-u300/spi.c | 124 +++++++++++++ arch/arm/mach-u300/spi.h | 26 +++ 6 files changed, 460 insertions(+) create mode 100644 arch/arm/mach-u300/dummyspichip.c create mode 100644 arch/arm/mach-u300/spi.c create mode 100644 arch/arm/mach-u300/spi.h diff --git a/arch/arm/mach-u300/Kconfig b/arch/arm/mach-u300/Kconfig index 337b9aabce49..801b21e7f677 100644 --- a/arch/arm/mach-u300/Kconfig +++ b/arch/arm/mach-u300/Kconfig @@ -81,6 +81,18 @@ config MACH_U300_SEMI_IS_SHARED Memory Interface) from both from access and application side. +config MACH_U300_SPIDUMMY + bool "SSP/SPI dummy chip" + select SPI + select SPI_MASTER + select SPI_PL022 + help + This creates a small kernel module that creates a dummy + SPI device to be used for loopback tests. Regularly used + to test reference designs. If you're not testing SPI, + you don't need it. Selecting this will activate the + SPI framework and ARM PL022 support. + comment "All the settings below must match the bootloader's settings" config MACH_U300_ACCESS_MEM_SIZE diff --git a/arch/arm/mach-u300/Makefile b/arch/arm/mach-u300/Makefile index 24950e0df4b4..4941f89ea798 100644 --- a/arch/arm/mach-u300/Makefile +++ b/arch/arm/mach-u300/Makefile @@ -9,3 +9,5 @@ obj- := obj-$(CONFIG_ARCH_U300) += u300.o obj-$(CONFIG_MMC) += mmc.o +obj-$(CONFIG_SPI_PL022) += spi.o +obj-$(CONFIG_MACH_U300_SPIDUMMY) += dummyspichip.o diff --git a/arch/arm/mach-u300/core.c b/arch/arm/mach-u300/core.c index 89b3ccf35e1b..38d08a1c25d0 100644 --- a/arch/arm/mach-u300/core.c +++ b/arch/arm/mach-u300/core.c @@ -32,6 +32,7 @@ #include "clock.h" #include "mmc.h" +#include "spi.h" /* * Static I/O mappings that are needed for booting the U300 platforms. The @@ -611,6 +612,8 @@ void __init u300_init_devices(void) /* Wait for the PLL208 to lock if not locked in yet */ while (!(readw(U300_SYSCON_VBASE + U300_SYSCON_CSR) & U300_SYSCON_CSR_PLL208_LOCK_IND)); + /* Initialize SPI device with some board specifics */ + u300_spi_init(&pl022_device); /* Register the AMBA devices in the AMBA bus abstraction layer */ u300_clock_primecells(); @@ -622,6 +625,9 @@ void __init u300_init_devices(void) u300_assign_physmem(); + /* Register subdevices on the SPI bus */ + u300_spi_register_board_devices(); + /* Register the platform devices */ platform_add_devices(platform_devs, ARRAY_SIZE(platform_devs)); diff --git a/arch/arm/mach-u300/dummyspichip.c b/arch/arm/mach-u300/dummyspichip.c new file mode 100644 index 000000000000..962f9de454de --- /dev/null +++ b/arch/arm/mach-u300/dummyspichip.c @@ -0,0 +1,290 @@ +/* + * arch/arm/mach-u300/dummyspichip.c + * + * Copyright (C) 2007-2009 ST-Ericsson AB + * License terms: GNU General Public License (GPL) version 2 + * This is a dummy loopback SPI "chip" used for testing SPI. + * Author: Linus Walleij + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +/* + * WARNING! Do not include this pl022-specific controller header + * for any generic driver. It is only done in this dummy chip + * because we alter the chip configuration in order to test some + * different settings on the loopback device. Normal chip configs + * shall be STATIC and not altered by the driver! + */ +#include + +struct dummy { + struct device *dev; + struct mutex lock; +}; + +#define DMA_TEST_SIZE 2048 + +/* When we cat /sys/bus/spi/devices/spi0.0/looptest this will be triggered */ +static ssize_t dummy_looptest(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct spi_device *spi = to_spi_device(dev); + struct dummy *p_dummy = dev_get_drvdata(&spi->dev); + + /* + * WARNING! Do not dereference the chip-specific data in any normal + * driver for a chip. It is usually STATIC and shall not be read + * or written to. Your chip driver should NOT depend on fields in this + * struct, this is just used here to alter the behaviour of the chip + * in order to perform tests. + */ + struct pl022_config_chip *chip_info = spi->controller_data; + int status; + u8 txbuf[14] = {0xDE, 0xAD, 0xBE, 0xEF, 0x2B, 0xAD, + 0xCA, 0xFE, 0xBA, 0xBE, 0xB1, 0x05, + 0xF0, 0x0D}; + u8 rxbuf[14]; + u8 *bigtxbuf_virtual; + u8 *bigrxbuf_virtual; + + if (mutex_lock_interruptible(&p_dummy->lock)) + return -ERESTARTSYS; + + bigtxbuf_virtual = kmalloc(DMA_TEST_SIZE, GFP_KERNEL); + if (bigtxbuf_virtual == NULL) { + status = -ENOMEM; + goto out; + } + bigrxbuf_virtual = kmalloc(DMA_TEST_SIZE, GFP_KERNEL); + + /* Fill TXBUF with some happy pattern */ + memset(bigtxbuf_virtual, 0xAA, DMA_TEST_SIZE); + + /* + * Force chip to 8 bit mode + * WARNING: NEVER DO THIS IN REAL DRIVER CODE, THIS SHOULD BE STATIC! + */ + chip_info->data_size = SSP_DATA_BITS_8; + /* You should NOT DO THIS EITHER */ + spi->master->setup(spi); + + /* Now run the tests for 8bit mode */ + pr_info("Simple test 1: write 0xAA byte, read back garbage byte " + "in 8bit mode\n"); + status = spi_w8r8(spi, 0xAA); + if (status < 0) + pr_warning("Siple test 1: FAILURE: spi_write_then_read " + "failed with status %d\n", status); + else + pr_info("Simple test 1: SUCCESS!\n"); + + pr_info("Simple test 2: write 8 bytes, read back 8 bytes garbage " + "in 8bit mode (full FIFO)\n"); + status = spi_write_then_read(spi, &txbuf[0], 8, &rxbuf[0], 8); + if (status < 0) + pr_warning("Simple test 2: FAILURE: spi_write_then_read() " + "failed with status %d\n", status); + else + pr_info("Simple test 2: SUCCESS!\n"); + + pr_info("Simple test 3: write 14 bytes, read back 14 bytes garbage " + "in 8bit mode (see if we overflow FIFO)\n"); + status = spi_write_then_read(spi, &txbuf[0], 14, &rxbuf[0], 14); + if (status < 0) + pr_warning("Simple test 3: FAILURE: failed with status %d " + "(probably FIFO overrun)\n", status); + else + pr_info("Simple test 3: SUCCESS!\n"); + + pr_info("Simple test 4: write 8 bytes with spi_write(), read 8 " + "bytes garbage with spi_read() in 8bit mode\n"); + status = spi_write(spi, &txbuf[0], 8); + if (status < 0) + pr_warning("Simple test 4 step 1: FAILURE: spi_write() " + "failed with status %d\n", status); + else + pr_info("Simple test 4 step 1: SUCCESS!\n"); + status = spi_read(spi, &rxbuf[0], 8); + if (status < 0) + pr_warning("Simple test 4 step 2: FAILURE: spi_read() " + "failed with status %d\n", status); + else + pr_info("Simple test 4 step 2: SUCCESS!\n"); + + pr_info("Simple test 5: write 14 bytes with spi_write(), read " + "14 bytes garbage with spi_read() in 8bit mode\n"); + status = spi_write(spi, &txbuf[0], 14); + if (status < 0) + pr_warning("Simple test 5 step 1: FAILURE: spi_write() " + "failed with status %d (probably FIFO overrun)\n", + status); + else + pr_info("Simple test 5 step 1: SUCCESS!\n"); + status = spi_read(spi, &rxbuf[0], 14); + if (status < 0) + pr_warning("Simple test 5 step 2: FAILURE: spi_read() " + "failed with status %d (probably FIFO overrun)\n", + status); + else + pr_info("Simple test 5: SUCCESS!\n"); + + pr_info("Simple test 6: write %d bytes with spi_write(), " + "read %d bytes garbage with spi_read() in 8bit mode\n", + DMA_TEST_SIZE, DMA_TEST_SIZE); + status = spi_write(spi, &bigtxbuf_virtual[0], DMA_TEST_SIZE); + if (status < 0) + pr_warning("Simple test 6 step 1: FAILURE: spi_write() " + "failed with status %d (probably FIFO overrun)\n", + status); + else + pr_info("Simple test 6 step 1: SUCCESS!\n"); + status = spi_read(spi, &bigrxbuf_virtual[0], DMA_TEST_SIZE); + if (status < 0) + pr_warning("Simple test 6 step 2: FAILURE: spi_read() " + "failed with status %d (probably FIFO overrun)\n", + status); + else + pr_info("Simple test 6: SUCCESS!\n"); + + + /* + * Force chip to 16 bit mode + * WARNING: NEVER DO THIS IN REAL DRIVER CODE, THIS SHOULD BE STATIC! + */ + chip_info->data_size = SSP_DATA_BITS_16; + /* You should NOT DO THIS EITHER */ + spi->master->setup(spi); + + pr_info("Simple test 7: write 0xAA byte, read back garbage byte " + "in 16bit bus mode\n"); + status = spi_w8r8(spi, 0xAA); + if (status == -EIO) + pr_info("Simple test 7: SUCCESS! (expected failure with " + "status EIO)\n"); + else if (status < 0) + pr_warning("Siple test 7: FAILURE: spi_write_then_read " + "failed with status %d\n", status); + else + pr_warning("Siple test 7: FAILURE: spi_write_then_read " + "succeeded but it was expected to fail!\n"); + + pr_info("Simple test 8: write 8 bytes, read back 8 bytes garbage " + "in 16bit mode (full FIFO)\n"); + status = spi_write_then_read(spi, &txbuf[0], 8, &rxbuf[0], 8); + if (status < 0) + pr_warning("Simple test 8: FAILURE: spi_write_then_read() " + "failed with status %d\n", status); + else + pr_info("Simple test 8: SUCCESS!\n"); + + pr_info("Simple test 9: write 14 bytes, read back 14 bytes garbage " + "in 16bit mode (see if we overflow FIFO)\n"); + status = spi_write_then_read(spi, &txbuf[0], 14, &rxbuf[0], 14); + if (status < 0) + pr_warning("Simple test 9: FAILURE: failed with status %d " + "(probably FIFO overrun)\n", status); + else + pr_info("Simple test 9: SUCCESS!\n"); + + pr_info("Simple test 10: write %d bytes with spi_write(), " + "read %d bytes garbage with spi_read() in 16bit mode\n", + DMA_TEST_SIZE, DMA_TEST_SIZE); + status = spi_write(spi, &bigtxbuf_virtual[0], DMA_TEST_SIZE); + if (status < 0) + pr_warning("Simple test 10 step 1: FAILURE: spi_write() " + "failed with status %d (probably FIFO overrun)\n", + status); + else + pr_info("Simple test 10 step 1: SUCCESS!\n"); + + status = spi_read(spi, &bigrxbuf_virtual[0], DMA_TEST_SIZE); + if (status < 0) + pr_warning("Simple test 10 step 2: FAILURE: spi_read() " + "failed with status %d (probably FIFO overrun)\n", + status); + else + pr_info("Simple test 10: SUCCESS!\n"); + + status = sprintf(buf, "loop test complete\n"); + kfree(bigrxbuf_virtual); + kfree(bigtxbuf_virtual); + out: + mutex_unlock(&p_dummy->lock); + return status; +} + +static DEVICE_ATTR(looptest, S_IRUGO, dummy_looptest, NULL); + +static int __devinit pl022_dummy_probe(struct spi_device *spi) +{ + struct dummy *p_dummy; + int status; + + dev_info(&spi->dev, "probing dummy SPI device\n"); + + p_dummy = kzalloc(sizeof *p_dummy, GFP_KERNEL); + if (!p_dummy) + return -ENOMEM; + + dev_set_drvdata(&spi->dev, p_dummy); + mutex_init(&p_dummy->lock); + + /* sysfs hook */ + status = device_create_file(&spi->dev, &dev_attr_looptest); + if (status) { + dev_dbg(&spi->dev, "device_create_file looptest failure.\n"); + goto out_dev_create_looptest_failed; + } + + return 0; + +out_dev_create_looptest_failed: + dev_set_drvdata(&spi->dev, NULL); + kfree(p_dummy); + return status; +} + +static int __devexit pl022_dummy_remove(struct spi_device *spi) +{ + struct dummy *p_dummy = dev_get_drvdata(&spi->dev); + + dev_info(&spi->dev, "removing dummy SPI device\n"); + device_remove_file(&spi->dev, &dev_attr_looptest); + dev_set_drvdata(&spi->dev, NULL); + kfree(p_dummy); + + return 0; +} + +static struct spi_driver pl022_dummy_driver = { + .driver = { + .name = "spi-dummy", + .owner = THIS_MODULE, + }, + .probe = pl022_dummy_probe, + .remove = __devexit_p(pl022_dummy_remove), +}; + +static int __init pl022_init_dummy(void) +{ + return spi_register_driver(&pl022_dummy_driver); +} + +static void __exit pl022_exit_dummy(void) +{ + spi_unregister_driver(&pl022_dummy_driver); +} + +module_init(pl022_init_dummy); +module_exit(pl022_exit_dummy); + +MODULE_AUTHOR("Linus Walleij "); +MODULE_DESCRIPTION("PL022 SSP/SPI DUMMY Linux driver"); +MODULE_LICENSE("GPL"); diff --git a/arch/arm/mach-u300/spi.c b/arch/arm/mach-u300/spi.c new file mode 100644 index 000000000000..307d007ea7f3 --- /dev/null +++ b/arch/arm/mach-u300/spi.c @@ -0,0 +1,124 @@ +/* + * arch/arm/mach-u300/spi.c + * + * Copyright (C) 2009 ST-Ericsson AB + * License terms: GNU General Public License (GPL) version 2 + * + * Author: Linus Walleij + */ +#include +#include +#include +#include +#include +#include "padmux.h" + +/* + * The following is for the actual devices on the SSP/SPI bus + */ +#ifdef CONFIG_MACH_U300_SPIDUMMY +static void select_dummy_chip(u32 chipselect) +{ + pr_debug("CORE: %s called with CS=0x%x (%s)\n", + __func__, + chipselect, + chipselect ? "unselect chip" : "select chip"); + /* + * Here you would write the chip select value to the GPIO pins if + * this was a real chip (but this is a loopback dummy). + */ +} + +struct pl022_config_chip dummy_chip_info = { + /* Nominally this is LOOPBACK_DISABLED, but this is our dummy chip! */ + .lbm = LOOPBACK_ENABLED, + /* + * available POLLING_TRANSFER and INTERRUPT_TRANSFER, + * DMA_TRANSFER does not work + */ + .com_mode = INTERRUPT_TRANSFER, + .iface = SSP_INTERFACE_MOTOROLA_SPI, + /* We can only act as master but SSP_SLAVE is possible in theory */ + .hierarchy = SSP_MASTER, + /* 0 = drive TX even as slave, 1 = do not drive TX as slave */ + .slave_tx_disable = 0, + /* LSB first */ + .endian_tx = SSP_TX_LSB, + .endian_rx = SSP_RX_LSB, + .data_size = SSP_DATA_BITS_8, /* used to be 12 in some default */ + .rx_lev_trig = SSP_RX_1_OR_MORE_ELEM, + .tx_lev_trig = SSP_TX_1_OR_MORE_EMPTY_LOC, + .clk_phase = SSP_CLK_FALLING_EDGE, + .clk_pol = SSP_CLK_POL_IDLE_LOW, + .ctrl_len = SSP_BITS_12, + .wait_state = SSP_MWIRE_WAIT_ZERO, + .duplex = SSP_MICROWIRE_CHANNEL_FULL_DUPLEX, + /* + * This is where you insert a call to a function to enable CS + * (usually GPIO) for a certain chip. + */ + .cs_control = select_dummy_chip, +}; +#endif + +static struct spi_board_info u300_spi_devices[] = { +#ifdef CONFIG_MACH_U300_SPIDUMMY + { + /* A dummy chip used for loopback tests */ + .modalias = "spi-dummy", + /* Really dummy, pass in additional chip config here */ + .platform_data = NULL, + /* This defines how the controller shall handle the device */ + .controller_data = &dummy_chip_info, + /* .irq - no external IRQ routed from this device */ + .max_speed_hz = 1000000, + .bus_num = 0, /* Only one bus on this chip */ + .chip_select = 0, + /* Means SPI_CS_HIGH, change if e.g low CS */ + .mode = 0, + }, +#endif +}; + +static struct pl022_ssp_controller ssp_platform_data = { + /* If you have several SPI buses this varies, we have only bus 0 */ + .bus_id = 0, + /* Set this to 1 when we think we got DMA working */ + .enable_dma = 0, + /* + * On the APP CPU GPIO 4, 5 and 6 are connected as generic + * chip selects for SPI. (Same on U330, U335 and U365.) + * TODO: make sure the GPIO driver can select these properly + * and do padmuxing accordingly too. + */ + .num_chipselect = 3, +}; + + +void __init u300_spi_init(struct amba_device *adev) +{ + struct pmx *pmx; + + adev->dev.platform_data = &ssp_platform_data; + /* + * Setup padmuxing for SPI. Since this must always be + * compiled into the kernel, pmx is never released. + */ + pmx = pmx_get(&adev->dev, U300_APP_PMX_SPI_SETTING); + + if (IS_ERR(pmx)) + dev_warn(&adev->dev, "Could not get padmux handle\n"); + else { + int ret; + + ret = pmx_activate(&adev->dev, pmx); + if (IS_ERR_VALUE(ret)) + dev_warn(&adev->dev, "Could not activate padmuxing\n"); + } + +} +void __init u300_spi_register_board_devices(void) +{ + /* Register any SPI devices */ + spi_register_board_info(u300_spi_devices, ARRAY_SIZE(u300_spi_devices)); +} diff --git a/arch/arm/mach-u300/spi.h b/arch/arm/mach-u300/spi.h new file mode 100644 index 000000000000..bd3d867e240f --- /dev/null +++ b/arch/arm/mach-u300/spi.h @@ -0,0 +1,26 @@ +/* + * arch/arm/mach-u300/spi.h + * + * Copyright (C) 2009 ST-Ericsson AB + * License terms: GNU General Public License (GPL) version 2 + * + * Author: Linus Walleij + */ +#ifndef SPI_H +#define SPI_H +#include + +#ifdef CONFIG_SPI_PL022 +void __init u300_spi_init(struct amba_device *adev); +void __init u300_spi_register_board_devices(void); +#else +/* Compile out SPI support if PL022 is not selected */ +static inline void __init u300_spi_init(struct amba_device *adev) +{ +} +static inline void __init u300_spi_register_board_devices(void) +{ +} +#endif + +#endif From 6be2a0cacc1ed6899a53172e2e9b7a3587be0bea Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Thu, 13 Aug 2009 21:42:01 +0100 Subject: [PATCH 0029/3409] ARM: 5668/2: U300 I2C board setup This sets up the U300 I2C subdevices so that the AB3100 analog baseband ASIC is properly detected and also the camera devices in the U335 reference design get properly registered. Signed-off-by: Linus Walleij Signed-off-by: Russell King --- arch/arm/mach-u300/Makefile | 1 + arch/arm/mach-u300/core.c | 8 +++++-- arch/arm/mach-u300/i2c.c | 43 +++++++++++++++++++++++++++++++++++++ arch/arm/mach-u300/i2c.h | 23 ++++++++++++++++++++ 4 files changed, 73 insertions(+), 2 deletions(-) create mode 100644 arch/arm/mach-u300/i2c.c create mode 100644 arch/arm/mach-u300/i2c.h diff --git a/arch/arm/mach-u300/Makefile b/arch/arm/mach-u300/Makefile index 4941f89ea798..885b5c027c1e 100644 --- a/arch/arm/mach-u300/Makefile +++ b/arch/arm/mach-u300/Makefile @@ -11,3 +11,4 @@ obj-$(CONFIG_ARCH_U300) += u300.o obj-$(CONFIG_MMC) += mmc.o obj-$(CONFIG_SPI_PL022) += spi.o obj-$(CONFIG_MACH_U300_SPIDUMMY) += dummyspichip.o +obj-$(CONFIG_I2C_STU300) += i2c.o diff --git a/arch/arm/mach-u300/core.c b/arch/arm/mach-u300/core.c index 38d08a1c25d0..bad1ba228cbf 100644 --- a/arch/arm/mach-u300/core.c +++ b/arch/arm/mach-u300/core.c @@ -33,6 +33,7 @@ #include "clock.h" #include "mmc.h" #include "spi.h" +#include "i2c.h" /* * Static I/O mappings that are needed for booting the U300 platforms. The @@ -379,14 +380,14 @@ static struct platform_device wdog_device = { }; static struct platform_device i2c0_device = { - .name = "stddci2c", + .name = "stu300", .id = 0, .num_resources = ARRAY_SIZE(i2c0_resources), .resource = i2c0_resources, }; static struct platform_device i2c1_device = { - .name = "stddci2c", + .name = "stu300", .id = 1, .num_resources = ARRAY_SIZE(i2c1_resources), .resource = i2c1_resources, @@ -625,6 +626,9 @@ void __init u300_init_devices(void) u300_assign_physmem(); + /* Register subdevices on the I2C buses */ + u300_i2c_register_board_devices(); + /* Register subdevices on the SPI bus */ u300_spi_register_board_devices(); diff --git a/arch/arm/mach-u300/i2c.c b/arch/arm/mach-u300/i2c.c new file mode 100644 index 000000000000..10be1f888b27 --- /dev/null +++ b/arch/arm/mach-u300/i2c.c @@ -0,0 +1,43 @@ +/* + * arch/arm/mach-u300/i2c.c + * + * Copyright (C) 2009 ST-Ericsson AB + * License terms: GNU General Public License (GPL) version 2 + * + * Register board i2c devices + * Author: Linus Walleij + */ +#include +#include +#include + +static struct i2c_board_info __initdata bus0_i2c_board_info[] = { + { + .type = "ab3100", + .addr = 0x48, + .irq = IRQ_U300_IRQ0_EXT, + }, +}; + +static struct i2c_board_info __initdata bus1_i2c_board_info[] = { +#ifdef CONFIG_MACH_U300_BS335 + { + .type = "fwcam", + .addr = 0x10, + }, + { + .type = "fwcam", + .addr = 0x5d, + }, +#else + { }, +#endif +}; + +void __init u300_i2c_register_board_devices(void) +{ + i2c_register_board_info(0, bus0_i2c_board_info, + ARRAY_SIZE(bus0_i2c_board_info)); + i2c_register_board_info(1, bus1_i2c_board_info, + ARRAY_SIZE(bus1_i2c_board_info)); +} diff --git a/arch/arm/mach-u300/i2c.h b/arch/arm/mach-u300/i2c.h new file mode 100644 index 000000000000..485c02e5c06d --- /dev/null +++ b/arch/arm/mach-u300/i2c.h @@ -0,0 +1,23 @@ +/* + * arch/arm/mach-u300/i2c.h + * + * Copyright (C) 2009 ST-Ericsson AB + * License terms: GNU General Public License (GPL) version 2 + * + * Register board i2c devices + * Author: Linus Walleij + */ + +#ifndef MACH_U300_I2C_H +#define MACH_U300_I2C_H + +#ifdef CONFIG_I2C_STU300 +void __init u300_i2c_register_board_devices(void); +#else +/* Compile out this stuff if no I2C adapter is available */ +static inline void __init u300_i2c_register_board_devices(void) +{ +} +#endif + +#endif From 9e2a7e6f5f1091965c78e01e87af05607bbf937f Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 18 Aug 2009 13:47:30 -0700 Subject: [PATCH 0030/3409] iop33x: update defconfig (default atu to on) By default the iop3xx configurations are set to boot the platforms over an nfs root configuration. Since commit c34002c1 "iop: unconditionally initialize the ATU on platforms known to be 'hosts'" this configuration also requires iop3xx_init_atu=y to be specified on the kernel command line. Signed-off-by: Dan Williams --- arch/arm/configs/iop33x_defconfig | 558 +++++++++++++++++++++++------- 1 file changed, 431 insertions(+), 127 deletions(-) diff --git a/arch/arm/configs/iop33x_defconfig b/arch/arm/configs/iop33x_defconfig index eec488298267..ed2d59d01829 100644 --- a/arch/arm/configs/iop33x_defconfig +++ b/arch/arm/configs/iop33x_defconfig @@ -1,29 +1,26 @@ # # Automatically generated make config: don't edit -# Linux kernel version: 2.6.24-rc5 -# Wed Dec 12 16:11:27 2007 +# Linux kernel version: 2.6.31-rc6 +# Tue Aug 18 13:41:41 2009 # CONFIG_ARM=y CONFIG_SYS_SUPPORTS_APM_EMULATION=y -# CONFIG_GENERIC_GPIO is not set -# CONFIG_GENERIC_TIME is not set -# CONFIG_GENERIC_CLOCKEVENTS is not set +CONFIG_GENERIC_GPIO=y CONFIG_MMU=y -# CONFIG_NO_IOPORT is not set CONFIG_GENERIC_HARDIRQS=y CONFIG_STACKTRACE_SUPPORT=y +CONFIG_HAVE_LATENCYTOP_SUPPORT=y CONFIG_LOCKDEP_SUPPORT=y CONFIG_TRACE_IRQFLAGS_SUPPORT=y CONFIG_HARDIRQS_SW_RESEND=y CONFIG_GENERIC_IRQ_PROBE=y CONFIG_RWSEM_GENERIC_SPINLOCK=y -# CONFIG_ARCH_HAS_ILOG2_U32 is not set -# CONFIG_ARCH_HAS_ILOG2_U64 is not set CONFIG_GENERIC_HWEIGHT=y CONFIG_GENERIC_CALIBRATE_DELAY=y -CONFIG_ZONE_DMA=y +CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y CONFIG_VECTORS_BASE=0xffff0000 CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" +CONFIG_CONSTRUCTORS=y # # General setup @@ -40,21 +37,39 @@ CONFIG_SYSVIPC_SYSCTL=y CONFIG_BSD_PROCESS_ACCT=y # CONFIG_BSD_PROCESS_ACCT_V3 is not set # CONFIG_TASKSTATS is not set -# CONFIG_USER_NS is not set -# CONFIG_PID_NS is not set # CONFIG_AUDIT is not set + +# +# RCU Subsystem +# +# CONFIG_CLASSIC_RCU is not set +CONFIG_TREE_RCU=y +# CONFIG_PREEMPT_RCU is not set +# CONFIG_RCU_TRACE is not set +CONFIG_RCU_FANOUT=32 +# CONFIG_RCU_FANOUT_EXACT is not set +# CONFIG_TREE_RCU_TRACE is not set +# CONFIG_PREEMPT_RCU_TRACE is not set # CONFIG_IKCONFIG is not set CONFIG_LOG_BUF_SHIFT=14 +# CONFIG_GROUP_SCHED is not set # CONFIG_CGROUPS is not set -CONFIG_FAIR_GROUP_SCHED=y -CONFIG_FAIR_USER_SCHED=y -# CONFIG_FAIR_CGROUP_SCHED is not set -CONFIG_SYSFS_DEPRECATED=y +# CONFIG_SYSFS_DEPRECATED_V2 is not set # CONFIG_RELAY is not set +CONFIG_NAMESPACES=y +# CONFIG_UTS_NS is not set +# CONFIG_IPC_NS is not set +# CONFIG_USER_NS is not set +# CONFIG_PID_NS is not set +# CONFIG_NET_NS is not set CONFIG_BLK_DEV_INITRD=y CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +CONFIG_RD_BZIP2=y +CONFIG_RD_LZMA=y CONFIG_CC_OPTIMIZE_FOR_SIZE=y CONFIG_SYSCTL=y +CONFIG_ANON_INODES=y # CONFIG_EMBEDDED is not set CONFIG_UID16=y CONFIG_SYSCTL_SYSCALL=y @@ -67,29 +82,48 @@ CONFIG_BUG=y CONFIG_ELF_CORE=y CONFIG_BASE_FULL=y CONFIG_FUTEX=y -CONFIG_ANON_INODES=y CONFIG_EPOLL=y CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y CONFIG_EVENTFD=y CONFIG_SHMEM=y +CONFIG_AIO=y + +# +# Performance Counters +# CONFIG_VM_EVENT_COUNTERS=y +CONFIG_PCI_QUIRKS=y +# CONFIG_STRIP_ASM_SYMS is not set +CONFIG_COMPAT_BRK=y CONFIG_SLAB=y # CONFIG_SLUB is not set # CONFIG_SLOB is not set +# CONFIG_PROFILING is not set +# CONFIG_MARKERS is not set +CONFIG_HAVE_OPROFILE=y +# CONFIG_KPROBES is not set +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y + +# +# GCOV-based kernel profiling +# +# CONFIG_SLOW_WORK is not set +CONFIG_HAVE_GENERIC_DMA_COHERENT=y +CONFIG_SLABINFO=y CONFIG_RT_MUTEXES=y -# CONFIG_TINY_SHMEM is not set CONFIG_BASE_SMALL=0 CONFIG_MODULES=y +# CONFIG_MODULE_FORCE_LOAD is not set CONFIG_MODULE_UNLOAD=y # CONFIG_MODULE_FORCE_UNLOAD is not set # CONFIG_MODVERSIONS is not set # CONFIG_MODULE_SRCVERSION_ALL is not set -CONFIG_KMOD=y CONFIG_BLOCK=y -# CONFIG_LBD is not set -# CONFIG_BLK_DEV_IO_TRACE is not set -# CONFIG_LSF is not set +CONFIG_LBDAF=y # CONFIG_BLK_DEV_BSG is not set +# CONFIG_BLK_DEV_INTEGRITY is not set # # IO Schedulers @@ -103,6 +137,7 @@ CONFIG_IOSCHED_CFQ=y CONFIG_DEFAULT_CFQ=y # CONFIG_DEFAULT_NOOP is not set CONFIG_DEFAULT_IOSCHED="cfq" +# CONFIG_FREEZER is not set # # System Type @@ -112,15 +147,15 @@ CONFIG_DEFAULT_IOSCHED="cfq" # CONFIG_ARCH_REALVIEW is not set # CONFIG_ARCH_VERSATILE is not set # CONFIG_ARCH_AT91 is not set -# CONFIG_ARCH_CLPS7500 is not set # CONFIG_ARCH_CLPS711X is not set -# CONFIG_ARCH_CO285 is not set +# CONFIG_ARCH_GEMINI is not set # CONFIG_ARCH_EBSA110 is not set # CONFIG_ARCH_EP93XX is not set # CONFIG_ARCH_FOOTBRIDGE is not set +# CONFIG_ARCH_MXC is not set +# CONFIG_ARCH_STMP3XXX is not set # CONFIG_ARCH_NETX is not set # CONFIG_ARCH_H720X is not set -# CONFIG_ARCH_IMX is not set # CONFIG_ARCH_IOP13XX is not set # CONFIG_ARCH_IOP32X is not set CONFIG_ARCH_IOP33X=y @@ -128,19 +163,26 @@ CONFIG_ARCH_IOP33X=y # CONFIG_ARCH_IXP2000 is not set # CONFIG_ARCH_IXP4XX is not set # CONFIG_ARCH_L7200 is not set +# CONFIG_ARCH_KIRKWOOD is not set +# CONFIG_ARCH_LOKI is not set +# CONFIG_ARCH_MV78XX0 is not set +# CONFIG_ARCH_ORION5X is not set +# CONFIG_ARCH_MMP is not set # CONFIG_ARCH_KS8695 is not set # CONFIG_ARCH_NS9XXX is not set -# CONFIG_ARCH_MXC is not set +# CONFIG_ARCH_W90X900 is not set # CONFIG_ARCH_PNX4008 is not set # CONFIG_ARCH_PXA is not set +# CONFIG_ARCH_MSM is not set # CONFIG_ARCH_RPC is not set # CONFIG_ARCH_SA1100 is not set # CONFIG_ARCH_S3C2410 is not set +# CONFIG_ARCH_S3C64XX is not set # CONFIG_ARCH_SHARK is not set # CONFIG_ARCH_LH7A40X is not set +# CONFIG_ARCH_U300 is not set # CONFIG_ARCH_DAVINCI is not set # CONFIG_ARCH_OMAP is not set -CONFIG_IOP3XX_ATU=y # # IOP33x Implementation Options @@ -151,14 +193,6 @@ CONFIG_IOP3XX_ATU=y # CONFIG_ARCH_IQ80331=y CONFIG_MACH_IQ80332=y - -# -# Boot options -# - -# -# Power management -# CONFIG_PLAT_IOP=y # @@ -168,6 +202,7 @@ CONFIG_CPU_32=y CONFIG_CPU_XSCALE=y CONFIG_CPU_32v5=y CONFIG_CPU_ABRT_EV5T=y +CONFIG_CPU_PABRT_NOIFAR=y CONFIG_CPU_CACHE_VIVT=y CONFIG_CPU_TLB_V4WBI=y CONFIG_CPU_CP15=y @@ -178,7 +213,6 @@ CONFIG_CPU_CP15_MMU=y # # CONFIG_ARM_THUMB is not set # CONFIG_CPU_DCACHE_DISABLE is not set -# CONFIG_OUTER_CACHE is not set # CONFIG_IWMMXT is not set CONFIG_XSCALE_PMU=y @@ -190,40 +224,54 @@ CONFIG_PCI_SYSCALL=y # CONFIG_ARCH_SUPPORTS_MSI is not set CONFIG_PCI_LEGACY=y # CONFIG_PCI_DEBUG is not set +# CONFIG_PCI_STUB is not set +# CONFIG_PCI_IOV is not set # CONFIG_PCCARD is not set # # Kernel Features # -# CONFIG_TICK_ONESHOT is not set +CONFIG_VMSPLIT_3G=y +# CONFIG_VMSPLIT_2G is not set +# CONFIG_VMSPLIT_1G is not set +CONFIG_PAGE_OFFSET=0xC0000000 # CONFIG_PREEMPT is not set CONFIG_HZ=100 # CONFIG_AEABI is not set -# CONFIG_ARCH_DISCONTIGMEM_ENABLE is not set +# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set +# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set +# CONFIG_HIGHMEM is not set CONFIG_SELECT_MEMORY_MODEL=y CONFIG_FLATMEM_MANUAL=y # CONFIG_DISCONTIGMEM_MANUAL is not set # CONFIG_SPARSEMEM_MANUAL is not set CONFIG_FLATMEM=y CONFIG_FLAT_NODE_MEM_MAP=y -# CONFIG_SPARSEMEM_STATIC is not set -# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set +CONFIG_PAGEFLAGS_EXTENDED=y CONFIG_SPLIT_PTLOCK_CPUS=4096 -# CONFIG_RESOURCES_64BIT is not set -CONFIG_ZONE_DMA_FLAG=1 -CONFIG_BOUNCE=y +# CONFIG_PHYS_ADDR_T_64BIT is not set +CONFIG_ZONE_DMA_FLAG=0 CONFIG_VIRT_TO_BUS=y +CONFIG_HAVE_MLOCK=y +CONFIG_HAVE_MLOCKED_PAGE_BIT=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 CONFIG_ALIGNMENT_TRAP=y +# CONFIG_UACCESS_WITH_MEMCPY is not set # # Boot options # CONFIG_ZBOOT_ROM_TEXT=0x0 CONFIG_ZBOOT_ROM_BSS=0x0 -CONFIG_CMDLINE="console=ttyS0,115200 root=/dev/nfs ip=bootp cachepolicy=writealloc" +CONFIG_CMDLINE="console=ttyS0,115200 root=/dev/nfs ip=bootp cachepolicy=writealloc iop3xx_init_atu=y" # CONFIG_XIP_KERNEL is not set # CONFIG_KEXEC is not set +# +# CPU Power Management +# +# CONFIG_CPU_IDLE is not set + # # Floating point emulation # @@ -239,6 +287,8 @@ CONFIG_FPE_NWFPE=y # Userspace binary formats # CONFIG_BINFMT_ELF=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_HAVE_AOUT=y CONFIG_BINFMT_AOUT=y # CONFIG_BINFMT_MISC is not set # CONFIG_ARTHUR is not set @@ -247,11 +297,7 @@ CONFIG_BINFMT_AOUT=y # Power management options # # CONFIG_PM is not set -CONFIG_SUSPEND_UP_POSSIBLE=y - -# -# Networking -# +CONFIG_ARCH_SUSPEND_POSSIBLE=y CONFIG_NET=y # @@ -264,6 +310,7 @@ CONFIG_XFRM=y # CONFIG_XFRM_USER is not set # CONFIG_XFRM_SUB_POLICY is not set # CONFIG_XFRM_MIGRATE is not set +# CONFIG_XFRM_STATISTICS is not set # CONFIG_NET_KEY is not set CONFIG_INET=y CONFIG_IP_MULTICAST=y @@ -310,6 +357,7 @@ CONFIG_IPV6=y # CONFIG_IPV6_SIT is not set # CONFIG_IPV6_TUNNEL is not set # CONFIG_IPV6_MULTIPLE_TABLES is not set +# CONFIG_IPV6_MROUTE is not set # CONFIG_NETWORK_SECMARK is not set # CONFIG_NETFILTER is not set # CONFIG_IP_DCCP is not set @@ -317,6 +365,7 @@ CONFIG_IPV6=y # CONFIG_TIPC is not set # CONFIG_ATM is not set # CONFIG_BRIDGE is not set +# CONFIG_NET_DSA is not set # CONFIG_VLAN_8021Q is not set # CONFIG_DECNET is not set # CONFIG_LLC2 is not set @@ -326,24 +375,31 @@ CONFIG_IPV6=y # CONFIG_LAPB is not set # CONFIG_ECONET is not set # CONFIG_WAN_ROUTER is not set +# CONFIG_PHONET is not set +# CONFIG_IEEE802154 is not set # CONFIG_NET_SCHED is not set +# CONFIG_DCB is not set # # Network testing # # CONFIG_NET_PKTGEN is not set # CONFIG_HAMRADIO is not set +# CONFIG_CAN is not set # CONFIG_IRDA is not set # CONFIG_BT is not set # CONFIG_AF_RXRPC is not set +CONFIG_WIRELESS=y +# CONFIG_CFG80211 is not set +# CONFIG_WIRELESS_OLD_REGULATORY is not set +# CONFIG_WIRELESS_EXT is not set +# CONFIG_LIB80211 is not set # -# Wireless +# CFG80211 needs to be enabled for MAC80211 # -# CONFIG_CFG80211 is not set -# CONFIG_WIRELESS_EXT is not set -# CONFIG_MAC80211 is not set -# CONFIG_IEEE80211 is not set +CONFIG_MAC80211_DEFAULT_PS_VALUE=0 +# CONFIG_WIMAX is not set # CONFIG_RFKILL is not set # CONFIG_NET_9P is not set @@ -357,7 +413,9 @@ CONFIG_IPV6=y CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_STANDALONE=y CONFIG_PREVENT_FIRMWARE_BUILD=y -# CONFIG_FW_LOADER is not set +CONFIG_FW_LOADER=y +CONFIG_FIRMWARE_IN_KERNEL=y +CONFIG_EXTRA_FIRMWARE="" # CONFIG_DEBUG_DRIVER is not set # CONFIG_DEBUG_DEVRES is not set # CONFIG_SYS_HYPERVISOR is not set @@ -366,12 +424,14 @@ CONFIG_MTD=y # CONFIG_MTD_DEBUG is not set # CONFIG_MTD_CONCAT is not set CONFIG_MTD_PARTITIONS=y +# CONFIG_MTD_TESTS is not set CONFIG_MTD_REDBOOT_PARTS=y CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-1 CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED=y CONFIG_MTD_REDBOOT_PARTS_READONLY=y # CONFIG_MTD_CMDLINE_PARTS is not set # CONFIG_MTD_AFS_PARTS is not set +# CONFIG_MTD_AR7_PARTS is not set # # User Modules And Translation Layers @@ -421,9 +481,7 @@ CONFIG_MTD_CFI_UTIL=y # # CONFIG_MTD_COMPLEX_MAPPINGS is not set CONFIG_MTD_PHYSMAP=y -CONFIG_MTD_PHYSMAP_START=0x0 -CONFIG_MTD_PHYSMAP_LEN=0x0 -CONFIG_MTD_PHYSMAP_BANKWIDTH=1 +# CONFIG_MTD_PHYSMAP_COMPAT is not set # CONFIG_MTD_ARM_INTEGRATOR is not set # CONFIG_MTD_INTEL_VR_NOR is not set # CONFIG_MTD_PLATRAM is not set @@ -446,6 +504,11 @@ CONFIG_MTD_PHYSMAP_BANKWIDTH=1 # CONFIG_MTD_NAND is not set # CONFIG_MTD_ONENAND is not set +# +# LPDDR flash memory drivers +# +# CONFIG_MTD_LPDDR is not set + # # UBI - Unsorted block images # @@ -463,14 +526,29 @@ CONFIG_BLK_DEV_NBD=y CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_COUNT=16 CONFIG_BLK_DEV_RAM_SIZE=8192 -CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024 +# CONFIG_BLK_DEV_XIP is not set # CONFIG_CDROM_PKTCDVD is not set # CONFIG_ATA_OVER_ETH is not set +# CONFIG_MG_DISK is not set CONFIG_MISC_DEVICES=y # CONFIG_PHANTOM is not set -# CONFIG_EEPROM_93CX6 is not set # CONFIG_SGI_IOC4 is not set # CONFIG_TIFM_CORE is not set +# CONFIG_ICS932S401 is not set +# CONFIG_ENCLOSURE_SERVICES is not set +# CONFIG_HP_ILO is not set +# CONFIG_ISL29003 is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_LEGACY is not set +# CONFIG_EEPROM_MAX6875 is not set +# CONFIG_EEPROM_93CX6 is not set +# CONFIG_CB710_CORE is not set +CONFIG_HAVE_IDE=y # CONFIG_IDE is not set # @@ -492,10 +570,6 @@ CONFIG_BLK_DEV_SD=y # CONFIG_BLK_DEV_SR is not set CONFIG_CHR_DEV_SG=y # CONFIG_CHR_DEV_SCH is not set - -# -# Some SCSI devices (e.g. CD jukebox) support multiple LUNs -# # CONFIG_SCSI_MULTI_LUN is not set # CONFIG_SCSI_CONSTANTS is not set # CONFIG_SCSI_LOGGING is not set @@ -512,6 +586,8 @@ CONFIG_SCSI_WAIT_SCAN=m # CONFIG_SCSI_SRP_ATTRS is not set CONFIG_SCSI_LOWLEVEL=y # CONFIG_ISCSI_TCP is not set +# CONFIG_SCSI_CXGB3_ISCSI is not set +# CONFIG_SCSI_BNX2_ISCSI is not set # CONFIG_BLK_DEV_3W_XXXX_RAID is not set # CONFIG_SCSI_3W_9XXX is not set # CONFIG_SCSI_ACARD is not set @@ -520,13 +596,18 @@ CONFIG_SCSI_LOWLEVEL=y # CONFIG_SCSI_AIC7XXX_OLD is not set # CONFIG_SCSI_AIC79XX is not set # CONFIG_SCSI_AIC94XX is not set +# CONFIG_SCSI_MVSAS is not set # CONFIG_SCSI_DPT_I2O is not set # CONFIG_SCSI_ADVANSYS is not set # CONFIG_SCSI_ARCMSR is not set # CONFIG_MEGARAID_NEWGEN is not set # CONFIG_MEGARAID_LEGACY is not set # CONFIG_MEGARAID_SAS is not set +# CONFIG_SCSI_MPT2SAS is not set # CONFIG_SCSI_HPTIOP is not set +# CONFIG_LIBFC is not set +# CONFIG_LIBFCOE is not set +# CONFIG_FCOE is not set # CONFIG_SCSI_DMX3191D is not set # CONFIG_SCSI_FUTURE_DOMAIN is not set # CONFIG_SCSI_IPS is not set @@ -543,15 +624,18 @@ CONFIG_SCSI_LOWLEVEL=y # CONFIG_SCSI_NSP32 is not set # CONFIG_SCSI_DEBUG is not set # CONFIG_SCSI_SRP is not set +# CONFIG_SCSI_DH is not set +# CONFIG_SCSI_OSD_INITIATOR is not set # CONFIG_ATA is not set CONFIG_MD=y CONFIG_BLK_DEV_MD=y +CONFIG_MD_AUTODETECT=y CONFIG_MD_LINEAR=y CONFIG_MD_RAID0=y CONFIG_MD_RAID1=y # CONFIG_MD_RAID10 is not set CONFIG_MD_RAID456=y -# CONFIG_MD_RAID5_RESHAPE is not set +CONFIG_MD_RAID6_PQ=y # CONFIG_MD_MULTIPATH is not set # CONFIG_MD_FAULTY is not set CONFIG_BLK_DEV_DM=y @@ -568,27 +652,34 @@ CONFIG_BLK_DEV_DM=y # # IEEE 1394 (FireWire) support # + +# +# You can enable one or both FireWire driver stacks. +# + +# +# See the help texts for more information. +# # CONFIG_FIREWIRE is not set # CONFIG_IEEE1394 is not set # CONFIG_I2O is not set CONFIG_NETDEVICES=y -# CONFIG_NETDEVICES_MULTIQUEUE is not set # CONFIG_DUMMY is not set # CONFIG_BONDING is not set # CONFIG_MACVLAN is not set # CONFIG_EQUALIZER is not set # CONFIG_TUN is not set # CONFIG_VETH is not set -# CONFIG_IP1000 is not set # CONFIG_ARCNET is not set # CONFIG_NET_ETHERNET is not set CONFIG_NETDEV_1000=y # CONFIG_ACENIC is not set # CONFIG_DL2K is not set CONFIG_E1000=y -CONFIG_E1000_NAPI=y -# CONFIG_E1000_DISABLE_PACKET_SPLIT is not set # CONFIG_E1000E is not set +# CONFIG_IP1000 is not set +# CONFIG_IGB is not set +# CONFIG_IGBVF is not set # CONFIG_NS83820 is not set # CONFIG_HAMACHI is not set # CONFIG_YELLOWFIN is not set @@ -596,23 +687,34 @@ CONFIG_E1000_NAPI=y # CONFIG_SIS190 is not set # CONFIG_SKGE is not set # CONFIG_SKY2 is not set -# CONFIG_SK98LIN is not set # CONFIG_VIA_VELOCITY is not set # CONFIG_TIGON3 is not set # CONFIG_BNX2 is not set +# CONFIG_CNIC is not set # CONFIG_QLA3XXX is not set # CONFIG_ATL1 is not set +# CONFIG_ATL1E is not set +# CONFIG_ATL1C is not set +# CONFIG_JME is not set CONFIG_NETDEV_10000=y # CONFIG_CHELSIO_T1 is not set +CONFIG_CHELSIO_T3_DEPENDS=y # CONFIG_CHELSIO_T3 is not set +# CONFIG_ENIC is not set # CONFIG_IXGBE is not set # CONFIG_IXGB is not set # CONFIG_S2IO is not set +# CONFIG_VXGE is not set # CONFIG_MYRI10GE is not set # CONFIG_NETXEN_NIC is not set # CONFIG_NIU is not set +# CONFIG_MLX4_EN is not set # CONFIG_MLX4_CORE is not set # CONFIG_TEHUTI is not set +# CONFIG_BNX2X is not set +# CONFIG_QLGE is not set +# CONFIG_SFC is not set +# CONFIG_BE2NET is not set # CONFIG_TR is not set # @@ -620,13 +722,16 @@ CONFIG_NETDEV_10000=y # # CONFIG_WLAN_PRE80211 is not set # CONFIG_WLAN_80211 is not set + +# +# Enable WiMAX (Networking options) to see the WiMAX drivers +# # CONFIG_WAN is not set # CONFIG_FDDI is not set # CONFIG_HIPPI is not set # CONFIG_PPP is not set # CONFIG_SLIP is not set # CONFIG_NET_FC is not set -# CONFIG_SHAPER is not set # CONFIG_NETCONSOLE is not set # CONFIG_NETPOLL is not set # CONFIG_NET_POLL_CONTROLLER is not set @@ -670,10 +775,13 @@ CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 # Character devices # CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y CONFIG_VT_CONSOLE=y CONFIG_HW_CONSOLE=y # CONFIG_VT_HW_CONSOLE_BINDING is not set +CONFIG_DEVKMEM=y # CONFIG_SERIAL_NONSTANDARD is not set +# CONFIG_NOZOMI is not set # # Serial drivers @@ -692,11 +800,12 @@ CONFIG_SERIAL_CORE=y CONFIG_SERIAL_CORE_CONSOLE=y # CONFIG_SERIAL_JSM is not set CONFIG_UNIX98_PTYS=y +# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set CONFIG_LEGACY_PTYS=y CONFIG_LEGACY_PTY_COUNT=256 # CONFIG_IPMI_HANDLER is not set CONFIG_HW_RANDOM=y -# CONFIG_NVRAM is not set +# CONFIG_HW_RANDOM_TIMERIOMEM is not set # CONFIG_R3964 is not set # CONFIG_APPLICOM is not set # CONFIG_RAW_DRIVER is not set @@ -705,67 +814,97 @@ CONFIG_DEVPORT=y CONFIG_I2C=y CONFIG_I2C_BOARDINFO=y CONFIG_I2C_CHARDEV=y - -# -# I2C Algorithms -# -# CONFIG_I2C_ALGOBIT is not set -# CONFIG_I2C_ALGOPCF is not set -# CONFIG_I2C_ALGOPCA is not set +CONFIG_I2C_HELPER_AUTO=y # # I2C Hardware Bus support # + +# +# PC SMBus host controller drivers +# # CONFIG_I2C_ALI1535 is not set # CONFIG_I2C_ALI1563 is not set # CONFIG_I2C_ALI15X3 is not set # CONFIG_I2C_AMD756 is not set # CONFIG_I2C_AMD8111 is not set # CONFIG_I2C_I801 is not set -# CONFIG_I2C_I810 is not set +# CONFIG_I2C_ISCH is not set # CONFIG_I2C_PIIX4 is not set -CONFIG_I2C_IOP3XX=y # CONFIG_I2C_NFORCE2 is not set -# CONFIG_I2C_OCORES is not set -# CONFIG_I2C_PARPORT_LIGHT is not set -# CONFIG_I2C_PROSAVAGE is not set -# CONFIG_I2C_SAVAGE4 is not set -# CONFIG_I2C_SIMTEC is not set # CONFIG_I2C_SIS5595 is not set # CONFIG_I2C_SIS630 is not set # CONFIG_I2C_SIS96X is not set -# CONFIG_I2C_TAOS_EVM is not set -# CONFIG_I2C_STUB is not set # CONFIG_I2C_VIA is not set # CONFIG_I2C_VIAPRO is not set + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_GPIO is not set +CONFIG_I2C_IOP3XX=y +# CONFIG_I2C_OCORES is not set +# CONFIG_I2C_SIMTEC is not set + +# +# External I2C/SMBus adapter drivers +# +# CONFIG_I2C_PARPORT_LIGHT is not set +# CONFIG_I2C_TAOS_EVM is not set + +# +# Graphics adapter I2C/DDC channel drivers +# # CONFIG_I2C_VOODOO3 is not set +# +# Other I2C/SMBus bus drivers +# +# CONFIG_I2C_PCA_PLATFORM is not set +# CONFIG_I2C_STUB is not set + # # Miscellaneous I2C Chip support # -# CONFIG_SENSORS_DS1337 is not set -# CONFIG_SENSORS_DS1374 is not set # CONFIG_DS1682 is not set -# CONFIG_EEPROM_LEGACY is not set # CONFIG_SENSORS_PCF8574 is not set +# CONFIG_PCF8575 is not set # CONFIG_SENSORS_PCA9539 is not set -# CONFIG_SENSORS_PCF8591 is not set -# CONFIG_SENSORS_MAX6875 is not set # CONFIG_SENSORS_TSL2550 is not set # CONFIG_I2C_DEBUG_CORE is not set # CONFIG_I2C_DEBUG_ALGO is not set # CONFIG_I2C_DEBUG_BUS is not set # CONFIG_I2C_DEBUG_CHIP is not set +# CONFIG_SPI is not set +CONFIG_ARCH_REQUIRE_GPIOLIB=y +CONFIG_GPIOLIB=y +# CONFIG_DEBUG_GPIO is not set +# CONFIG_GPIO_SYSFS is not set # -# SPI support +# Memory mapped GPIO expanders: +# + +# +# I2C GPIO expanders: +# +# CONFIG_GPIO_MAX732X is not set +# CONFIG_GPIO_PCA953X is not set +# CONFIG_GPIO_PCF857X is not set + +# +# PCI GPIO expanders: +# +# CONFIG_GPIO_BT8XX is not set + +# +# SPI GPIO expanders: # -# CONFIG_SPI is not set -# CONFIG_SPI_MASTER is not set # CONFIG_W1 is not set # CONFIG_POWER_SUPPLY is not set CONFIG_HWMON=y # CONFIG_HWMON_VID is not set +# CONFIG_SENSORS_AD7414 is not set # CONFIG_SENSORS_AD7418 is not set # CONFIG_SENSORS_ADM1021 is not set # CONFIG_SENSORS_ADM1025 is not set @@ -773,13 +912,17 @@ CONFIG_HWMON=y # CONFIG_SENSORS_ADM1029 is not set # CONFIG_SENSORS_ADM1031 is not set # CONFIG_SENSORS_ADM9240 is not set +# CONFIG_SENSORS_ADT7462 is not set # CONFIG_SENSORS_ADT7470 is not set +# CONFIG_SENSORS_ADT7473 is not set +# CONFIG_SENSORS_ADT7475 is not set # CONFIG_SENSORS_ATXP1 is not set # CONFIG_SENSORS_DS1621 is not set # CONFIG_SENSORS_I5K_AMB is not set # CONFIG_SENSORS_F71805F is not set # CONFIG_SENSORS_F71882FG is not set # CONFIG_SENSORS_F75375S is not set +# CONFIG_SENSORS_G760A is not set # CONFIG_SENSORS_GL518SM is not set # CONFIG_SENSORS_GL520SM is not set # CONFIG_SENSORS_IT87 is not set @@ -794,16 +937,23 @@ CONFIG_HWMON=y # CONFIG_SENSORS_LM90 is not set # CONFIG_SENSORS_LM92 is not set # CONFIG_SENSORS_LM93 is not set +# CONFIG_SENSORS_LTC4215 is not set +# CONFIG_SENSORS_LTC4245 is not set +# CONFIG_SENSORS_LM95241 is not set # CONFIG_SENSORS_MAX1619 is not set # CONFIG_SENSORS_MAX6650 is not set # CONFIG_SENSORS_PC87360 is not set # CONFIG_SENSORS_PC87427 is not set +# CONFIG_SENSORS_PCF8591 is not set +# CONFIG_SENSORS_SHT15 is not set # CONFIG_SENSORS_SIS5595 is not set # CONFIG_SENSORS_DME1737 is not set # CONFIG_SENSORS_SMSC47M1 is not set # CONFIG_SENSORS_SMSC47M192 is not set # CONFIG_SENSORS_SMSC47B397 is not set +# CONFIG_SENSORS_ADS7828 is not set # CONFIG_SENSORS_THMC50 is not set +# CONFIG_SENSORS_TMP401 is not set # CONFIG_SENSORS_VIA686A is not set # CONFIG_SENSORS_VT1211 is not set # CONFIG_SENSORS_VT8231 is not set @@ -812,28 +962,38 @@ CONFIG_HWMON=y # CONFIG_SENSORS_W83792D is not set # CONFIG_SENSORS_W83793 is not set # CONFIG_SENSORS_W83L785TS is not set +# CONFIG_SENSORS_W83L786NG is not set # CONFIG_SENSORS_W83627HF is not set # CONFIG_SENSORS_W83627EHF is not set # CONFIG_HWMON_DEBUG_CHIP is not set +# CONFIG_THERMAL is not set +# CONFIG_THERMAL_HWMON is not set # CONFIG_WATCHDOG is not set +CONFIG_SSB_POSSIBLE=y # # Sonics Silicon Backplane # -CONFIG_SSB_POSSIBLE=y # CONFIG_SSB is not set # # Multifunction device drivers # +# CONFIG_MFD_CORE is not set # CONFIG_MFD_SM501 is not set - -# -# Multimedia devices -# -# CONFIG_VIDEO_DEV is not set -# CONFIG_DVB_CORE is not set -CONFIG_DAB=y +# CONFIG_MFD_ASIC3 is not set +# CONFIG_HTC_EGPIO is not set +# CONFIG_HTC_PASIC3 is not set +# CONFIG_TPS65010 is not set +# CONFIG_TWL4030_CORE is not set +# CONFIG_MFD_TMIO is not set +# CONFIG_MFD_TC6393XB is not set +# CONFIG_PMIC_DA903X is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_AB3100_CORE is not set +# CONFIG_MEDIA_SUPPORT is not set # # Graphics support @@ -854,15 +1014,16 @@ CONFIG_DAB=y # # CONFIG_VGA_CONSOLE is not set CONFIG_DUMMY_CONSOLE=y - -# -# Sound -# # CONFIG_SOUND is not set CONFIG_HID_SUPPORT=y CONFIG_HID=y # CONFIG_HID_DEBUG is not set # CONFIG_HIDRAW is not set +# CONFIG_HID_PID is not set + +# +# Special HID drivers +# CONFIG_USB_SUPPORT=y CONFIG_USB_ARCH_HAS_HCD=y CONFIG_USB_ARCH_HAS_OHCI=y @@ -870,14 +1031,21 @@ CONFIG_USB_ARCH_HAS_EHCI=y # CONFIG_USB is not set # -# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' +# Enable Host or Gadget support to see Inventra options # # -# USB Gadget Support +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may # # CONFIG_USB_GADGET is not set + +# +# OTG and related infrastructure +# +# CONFIG_UWB is not set # CONFIG_MMC is not set +# CONFIG_MEMSTICK is not set +# CONFIG_ACCESSIBILITY is not set # CONFIG_NEW_LEDS is not set CONFIG_RTC_LIB=y # CONFIG_RTC_CLASS is not set @@ -893,6 +1061,12 @@ CONFIG_DMA_ENGINE=y # DMA Clients # CONFIG_NET_DMA=y +# CONFIG_ASYNC_TX_DMA is not set +# CONFIG_DMATEST is not set +# CONFIG_AUXDISPLAY is not set +# CONFIG_REGULATOR is not set +# CONFIG_UIO is not set +# CONFIG_STAGING is not set # # File systems @@ -901,10 +1075,11 @@ CONFIG_EXT2_FS=y # CONFIG_EXT2_FS_XATTR is not set # CONFIG_EXT2_FS_XIP is not set CONFIG_EXT3_FS=y +# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set CONFIG_EXT3_FS_XATTR=y # CONFIG_EXT3_FS_POSIX_ACL is not set # CONFIG_EXT3_FS_SECURITY is not set -# CONFIG_EXT4DEV_FS is not set +# CONFIG_EXT4_FS is not set CONFIG_JBD=y CONFIG_FS_MBCACHE=y # CONFIG_REISERFS_FS is not set @@ -913,16 +1088,22 @@ CONFIG_FS_MBCACHE=y # CONFIG_XFS_FS is not set # CONFIG_GFS2_FS is not set # CONFIG_OCFS2_FS is not set -# CONFIG_MINIX_FS is not set -# CONFIG_ROMFS_FS is not set +# CONFIG_BTRFS_FS is not set +CONFIG_FILE_LOCKING=y +CONFIG_FSNOTIFY=y +CONFIG_DNOTIFY=y CONFIG_INOTIFY=y CONFIG_INOTIFY_USER=y # CONFIG_QUOTA is not set -CONFIG_DNOTIFY=y # CONFIG_AUTOFS_FS is not set # CONFIG_AUTOFS4_FS is not set # CONFIG_FUSE_FS is not set +# +# Caches +# +# CONFIG_FSCACHE is not set + # # CD-ROM/DVD Filesystems # @@ -941,15 +1122,13 @@ CONFIG_DNOTIFY=y # CONFIG_PROC_FS=y CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y CONFIG_SYSFS=y CONFIG_TMPFS=y # CONFIG_TMPFS_POSIX_ACL is not set # CONFIG_HUGETLB_PAGE is not set # CONFIG_CONFIGFS_FS is not set - -# -# Miscellaneous filesystems -# +CONFIG_MISC_FILESYSTEMS=y # CONFIG_ADFS_FS is not set # CONFIG_AFFS_FS is not set # CONFIG_HFS_FS is not set @@ -959,29 +1138,31 @@ CONFIG_TMPFS=y # CONFIG_EFS_FS is not set # CONFIG_JFFS2_FS is not set CONFIG_CRAMFS=y +# CONFIG_SQUASHFS is not set # CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set # CONFIG_HPFS_FS is not set # CONFIG_QNX4FS_FS is not set +# CONFIG_ROMFS_FS is not set # CONFIG_SYSV_FS is not set # CONFIG_UFS_FS is not set +# CONFIG_NILFS2_FS is not set CONFIG_NETWORK_FILESYSTEMS=y CONFIG_NFS_FS=y CONFIG_NFS_V3=y # CONFIG_NFS_V3_ACL is not set # CONFIG_NFS_V4 is not set -# CONFIG_NFS_DIRECTIO is not set +CONFIG_ROOT_NFS=y CONFIG_NFSD=y CONFIG_NFSD_V3=y # CONFIG_NFSD_V3_ACL is not set # CONFIG_NFSD_V4 is not set -# CONFIG_NFSD_TCP is not set -CONFIG_ROOT_NFS=y CONFIG_LOCKD=y CONFIG_LOCKD_V4=y CONFIG_EXPORTFS=y CONFIG_NFS_COMMON=y CONFIG_SUNRPC=y -# CONFIG_SUNRPC_BIND34 is not set # CONFIG_RPCSEC_GSS_KRB5 is not set # CONFIG_RPCSEC_GSS_SPKM3 is not set # CONFIG_SMB_FS is not set @@ -1013,9 +1194,6 @@ CONFIG_MSDOS_PARTITION=y # CONFIG_SYSV68_PARTITION is not set # CONFIG_NLS is not set # CONFIG_DLM is not set -CONFIG_INSTRUMENTATION=y -# CONFIG_PROFILING is not set -# CONFIG_MARKERS is not set # # Kernel hacking @@ -1023,6 +1201,7 @@ CONFIG_INSTRUMENTATION=y # CONFIG_PRINTK_TIME is not set CONFIG_ENABLE_WARN_DEPRECATED=y CONFIG_ENABLE_MUST_CHECK=y +CONFIG_FRAME_WARN=1024 CONFIG_MAGIC_SYSRQ=y # CONFIG_UNUSED_SYMBOLS is not set # CONFIG_DEBUG_FS is not set @@ -1030,10 +1209,17 @@ CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_KERNEL=y # CONFIG_DEBUG_SHIRQ is not set CONFIG_DETECT_SOFTLOCKUP=y +# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set +CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 +CONFIG_DETECT_HUNG_TASK=y +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 CONFIG_SCHED_DEBUG=y # CONFIG_SCHEDSTATS is not set # CONFIG_TIMER_STATS is not set +# CONFIG_DEBUG_OBJECTS is not set # CONFIG_DEBUG_SLAB is not set +# CONFIG_DEBUG_KMEMLEAK is not set # CONFIG_DEBUG_RT_MUTEXES is not set # CONFIG_RT_MUTEX_TESTER is not set # CONFIG_DEBUG_SPINLOCK is not set @@ -1047,16 +1233,41 @@ CONFIG_SCHED_DEBUG=y CONFIG_DEBUG_BUGVERBOSE=y # CONFIG_DEBUG_INFO is not set # CONFIG_DEBUG_VM is not set +# CONFIG_DEBUG_WRITECOUNT is not set +CONFIG_DEBUG_MEMORY_INIT=y # CONFIG_DEBUG_LIST is not set # CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set CONFIG_FRAME_POINTER=y -# CONFIG_FORCED_INLINING is not set # CONFIG_BOOT_PRINTK_DELAY is not set # CONFIG_RCU_TORTURE_TEST is not set +# CONFIG_RCU_CPU_STALL_DETECTOR is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set # CONFIG_FAULT_INJECTION is not set +# CONFIG_LATENCYTOP is not set +# CONFIG_SYSCTL_SYSCALL_CHECK is not set +# CONFIG_PAGE_POISONING is not set +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +# CONFIG_FUNCTION_TRACER is not set +# CONFIG_SCHED_TRACER is not set +# CONFIG_ENABLE_DEFAULT_TRACERS is not set +# CONFIG_BOOT_TRACER is not set +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +# CONFIG_PROFILE_ALL_BRANCHES is not set +# CONFIG_STACK_TRACER is not set +# CONFIG_KMEMTRACE is not set +# CONFIG_WORKQUEUE_TRACER is not set +# CONFIG_BLK_DEV_IO_TRACE is not set # CONFIG_SAMPLES is not set +CONFIG_HAVE_ARCH_KGDB=y +# CONFIG_KGDB is not set CONFIG_DEBUG_USER=y # CONFIG_DEBUG_ERRORS is not set +# CONFIG_DEBUG_STACK_USAGE is not set CONFIG_DEBUG_LL=y # CONFIG_DEBUG_ICEDCC is not set @@ -1065,24 +1276,117 @@ CONFIG_DEBUG_LL=y # # CONFIG_KEYS is not set # CONFIG_SECURITY is not set +# CONFIG_SECURITYFS is not set # CONFIG_SECURITY_FILE_CAPABILITIES is not set CONFIG_XOR_BLOCKS=y CONFIG_ASYNC_CORE=y CONFIG_ASYNC_MEMCPY=y CONFIG_ASYNC_XOR=y -# CONFIG_CRYPTO is not set +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +# CONFIG_CRYPTO_FIPS is not set +# CONFIG_CRYPTO_MANAGER is not set +# CONFIG_CRYPTO_MANAGER2 is not set +# CONFIG_CRYPTO_GF128MUL is not set +# CONFIG_CRYPTO_NULL is not set +# CONFIG_CRYPTO_CRYPTD is not set +# CONFIG_CRYPTO_AUTHENC is not set +# CONFIG_CRYPTO_TEST is not set + +# +# Authenticated Encryption with Associated Data +# +# CONFIG_CRYPTO_CCM is not set +# CONFIG_CRYPTO_GCM is not set +# CONFIG_CRYPTO_SEQIV is not set + +# +# Block modes +# +# CONFIG_CRYPTO_CBC is not set +# CONFIG_CRYPTO_CTR is not set +# CONFIG_CRYPTO_CTS is not set +# CONFIG_CRYPTO_ECB is not set +# CONFIG_CRYPTO_LRW is not set +# CONFIG_CRYPTO_PCBC is not set +# CONFIG_CRYPTO_XTS is not set + +# +# Hash modes +# +# CONFIG_CRYPTO_HMAC is not set +# CONFIG_CRYPTO_XCBC is not set + +# +# Digest +# +# CONFIG_CRYPTO_CRC32C is not set +# CONFIG_CRYPTO_MD4 is not set +# CONFIG_CRYPTO_MD5 is not set +# CONFIG_CRYPTO_MICHAEL_MIC is not set +# CONFIG_CRYPTO_RMD128 is not set +# CONFIG_CRYPTO_RMD160 is not set +# CONFIG_CRYPTO_RMD256 is not set +# CONFIG_CRYPTO_RMD320 is not set +# CONFIG_CRYPTO_SHA1 is not set +# CONFIG_CRYPTO_SHA256 is not set +# CONFIG_CRYPTO_SHA512 is not set +# CONFIG_CRYPTO_TGR192 is not set +# CONFIG_CRYPTO_WP512 is not set + +# +# Ciphers +# +# CONFIG_CRYPTO_AES is not set +# CONFIG_CRYPTO_ANUBIS is not set +# CONFIG_CRYPTO_ARC4 is not set +# CONFIG_CRYPTO_BLOWFISH is not set +# CONFIG_CRYPTO_CAMELLIA is not set +# CONFIG_CRYPTO_CAST5 is not set +# CONFIG_CRYPTO_CAST6 is not set +# CONFIG_CRYPTO_DES is not set +# CONFIG_CRYPTO_FCRYPT is not set +# CONFIG_CRYPTO_KHAZAD is not set +# CONFIG_CRYPTO_SALSA20 is not set +# CONFIG_CRYPTO_SEED is not set +# CONFIG_CRYPTO_SERPENT is not set +# CONFIG_CRYPTO_TEA is not set +# CONFIG_CRYPTO_TWOFISH is not set + +# +# Compression +# +# CONFIG_CRYPTO_DEFLATE is not set +# CONFIG_CRYPTO_ZLIB is not set +# CONFIG_CRYPTO_LZO is not set + +# +# Random Number Generation +# +# CONFIG_CRYPTO_ANSI_CPRNG is not set +CONFIG_CRYPTO_HW=y +# CONFIG_CRYPTO_DEV_HIFN_795X is not set +# CONFIG_BINARY_PRINTF is not set # # Library routines # +CONFIG_GENERIC_FIND_LAST_BIT=y # CONFIG_CRC_CCITT is not set # CONFIG_CRC16 is not set +# CONFIG_CRC_T10DIF is not set # CONFIG_CRC_ITU_T is not set # CONFIG_CRC32 is not set # CONFIG_CRC7 is not set # CONFIG_LIBCRC32C is not set CONFIG_ZLIB_INFLATE=y -CONFIG_PLIST=y +CONFIG_DECOMPRESS_GZIP=y +CONFIG_DECOMPRESS_BZIP2=y +CONFIG_DECOMPRESS_LZMA=y CONFIG_HAS_IOMEM=y CONFIG_HAS_IOPORT=y CONFIG_HAS_DMA=y +CONFIG_NLATTR=y From 5b9eda3313b678f20f2bec08e8173f93e85f6c14 Mon Sep 17 00:00:00 2001 From: Aaro Koskinen Date: Fri, 31 Jul 2009 12:16:21 +0300 Subject: [PATCH 0031/3409] iop3xx: ATU and PCI memory configuration corrected There are two 64 MB outbound memory windows at bus addresses 0x80000000..0x83ffffff and 0x84000000..0x87ffffff for PCI memory. Currently, on iop32x, only the lower window is available for allocations, limiting the available space to 64 MB. On iop33x the full 128 MB can be allocated, but the translation value is wrong for the upper window. The patch enables the full 128 MB space on iop32x and corrects the initialization of OMWTVR1. Redundant definitions are deleted. Tested using a Thecus N2100 board with a graphics adapter in the expansion slot. Both windows are in use: 00:05.0 VGA compatible controller: XGI Technology Inc. (eXtreme Graphics Innovation) Volari Z7 (prog-if 00 [VGA controller]) [...] Region 0: Memory at 80000000 (32-bit, prefetchable) [size=64M] Region 1: Memory at 84080000 (32-bit, non-prefetchable) [size=256K] Signed-off-by: Aaro Koskinen Cc: Lennert Buytenhek Signed-off-by: Dan Williams --- arch/arm/include/asm/hardware/iop3xx.h | 1 + arch/arm/mach-iop32x/include/mach/iop32x.h | 2 -- arch/arm/mach-iop33x/include/mach/iop33x.h | 2 -- arch/arm/plat-iop/pci.c | 3 ++- 4 files changed, 3 insertions(+), 5 deletions(-) diff --git a/arch/arm/include/asm/hardware/iop3xx.h b/arch/arm/include/asm/hardware/iop3xx.h index 4b8e7f559929..8d60ad267e3a 100644 --- a/arch/arm/include/asm/hardware/iop3xx.h +++ b/arch/arm/include/asm/hardware/iop3xx.h @@ -215,6 +215,7 @@ extern int iop3xx_get_init_atu(void); * IOP3XX I/O and Mem space regions for PCI autoconfiguration */ #define IOP3XX_PCI_LOWER_MEM_PA 0x80000000 +#define IOP3XX_PCI_MEM_WINDOW_SIZE 0x08000000 #define IOP3XX_PCI_IO_WINDOW_SIZE 0x00010000 #define IOP3XX_PCI_LOWER_IO_PA 0x90000000 diff --git a/arch/arm/mach-iop32x/include/mach/iop32x.h b/arch/arm/mach-iop32x/include/mach/iop32x.h index abd9eb49f103..941f363aca56 100644 --- a/arch/arm/mach-iop32x/include/mach/iop32x.h +++ b/arch/arm/mach-iop32x/include/mach/iop32x.h @@ -31,7 +31,5 @@ #define IOP32X_MAX_RAM_SIZE 0x40000000UL #define IOP3XX_MAX_RAM_SIZE IOP32X_MAX_RAM_SIZE #define IOP3XX_PCI_LOWER_MEM_BA 0x80000000 -#define IOP32X_PCI_MEM_WINDOW_SIZE 0x04000000 -#define IOP3XX_PCI_MEM_WINDOW_SIZE IOP32X_PCI_MEM_WINDOW_SIZE #endif diff --git a/arch/arm/mach-iop33x/include/mach/iop33x.h b/arch/arm/mach-iop33x/include/mach/iop33x.h index 24567316ec88..a89c0a234bff 100644 --- a/arch/arm/mach-iop33x/include/mach/iop33x.h +++ b/arch/arm/mach-iop33x/include/mach/iop33x.h @@ -36,8 +36,6 @@ #define IOP33X_MAX_RAM_SIZE 0x80000000UL #define IOP3XX_MAX_RAM_SIZE IOP33X_MAX_RAM_SIZE #define IOP3XX_PCI_LOWER_MEM_BA (PHYS_OFFSET + IOP33X_MAX_RAM_SIZE) -#define IOP33X_PCI_MEM_WINDOW_SIZE 0x08000000 -#define IOP3XX_PCI_MEM_WINDOW_SIZE IOP33X_PCI_MEM_WINDOW_SIZE #endif diff --git a/arch/arm/plat-iop/pci.c b/arch/arm/plat-iop/pci.c index 77fa7cc7d162..ce31f316ac75 100644 --- a/arch/arm/plat-iop/pci.c +++ b/arch/arm/plat-iop/pci.c @@ -257,7 +257,8 @@ void __init iop3xx_atu_setup(void) *IOP3XX_OUMWTVR0 = 0; /* Outbound window 1 */ - *IOP3XX_OMWTVR1 = IOP3XX_PCI_LOWER_MEM_BA + IOP3XX_PCI_MEM_WINDOW_SIZE; + *IOP3XX_OMWTVR1 = IOP3XX_PCI_LOWER_MEM_BA + + IOP3XX_PCI_MEM_WINDOW_SIZE / 2; *IOP3XX_OUMWTVR1 = 0; /* BAR 3 ( Disabled ) */ From a692838dcaacb5f6f05fac73abb99d92dd7b1021 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Sun, 2 Aug 2009 10:46:45 +0200 Subject: [PATCH 0032/3409] arch/arm/plat-iop: Use DIV_ROUND_CLOSEST The kernel.h macro DIV_ROUND_CLOSEST performs the computation (x + d/2)/d but is perhaps more readable. The semantic patch that makes this change is as follows: (http://www.emn.fr/x-info/coccinelle/) // @haskernel@ @@ #include @depends on haskernel@ expression x,__divisor; @@ - (((x) + ((__divisor) / 2)) / (__divisor)) + DIV_ROUND_CLOSEST(x,__divisor) // Signed-off-by: Julia Lawall Signed-off-by: Dan Williams --- arch/arm/plat-iop/time.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/plat-iop/time.c b/arch/arm/plat-iop/time.c index 3695bbe3ee28..8da95d57c21f 100644 --- a/arch/arm/plat-iop/time.c +++ b/arch/arm/plat-iop/time.c @@ -85,7 +85,7 @@ void __init iop_init_time(unsigned long tick_rate) { u32 timer_ctl; - ticks_per_jiffy = (tick_rate + HZ/2) / HZ; + ticks_per_jiffy = DIV_ROUND_CLOSEST(tick_rate, HZ); ticks_per_usec = tick_rate / 1000000; next_jiffy_time = 0xffffffff; iop_tick_rate = tick_rate; From f00f510ab33f3e689e35ccdbb8dc264168aa5250 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 18 Aug 2009 15:21:50 -0700 Subject: [PATCH 0033/3409] iop: downgrade maintenance status iop support is in maintenance-only mode. Cc: Lennert Buytenhek Signed-off-by: Dan Williams --- MAINTAINERS | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/MAINTAINERS b/MAINTAINERS index 2c4326c0de9a..e2f7199c532f 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -631,24 +631,24 @@ ARM/INTEL IOP32X ARM ARCHITECTURE M: Lennert Buytenhek M: Dan Williams L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) -S: Supported +S: Maintained ARM/INTEL IOP33X ARM ARCHITECTURE M: Dan Williams L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) -S: Supported +S: Maintained ARM/INTEL IOP13XX ARM ARCHITECTURE M: Lennert Buytenhek M: Dan Williams L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) -S: Supported +S: Maintained ARM/INTEL IQ81342EX MACHINE SUPPORT M: Lennert Buytenhek M: Dan Williams L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) -S: Supported +S: Maintained ARM/INTEL IXP2000 ARM ARCHITECTURE M: Lennert Buytenhek @@ -669,7 +669,7 @@ ARM/INTEL XSC3 (MANZANO) ARM CORE M: Lennert Buytenhek M: Dan Williams L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only) -S: Supported +S: Maintained ARM/IP FABRICS DOUBLE ESPRESSO MACHINE SUPPORT M: Lennert Buytenhek @@ -2619,7 +2619,7 @@ F: include/linux/intel-iommu.h INTEL IOP-ADMA DMA DRIVER M: Dan Williams -S: Supported +S: Maintained F: drivers/dma/iop-adma.c INTEL IXP4XX QMGR, NPE, ETHERNET and HSS SUPPORT From 36d1c6476be51101778882897b315bd928c8c7b5 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 14 Jul 2009 11:48:22 -0700 Subject: [PATCH 0034/3409] md/raid6: move the spare page to a percpu allocation In preparation for asynchronous handling of raid6 operations move the spare page to a percpu allocation to allow multiple simultaneous synchronous raid6 recovery operations. Make this allocation cpu hotplug aware to maximize allocation efficiency. Signed-off-by: Dan Williams --- drivers/md/raid5.c | 260 ++++++++++++++++++++++++++++++--------------- drivers/md/raid5.h | 9 +- 2 files changed, 179 insertions(+), 90 deletions(-) diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 9411466f71de..5359236a1ec7 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -48,6 +48,7 @@ #include #include #include +#include #include "md.h" #include "raid5.h" #include "bitmap.h" @@ -2565,14 +2566,15 @@ static void handle_parity_checks5(raid5_conf_t *conf, struct stripe_head *sh, static void handle_parity_checks6(raid5_conf_t *conf, struct stripe_head *sh, - struct stripe_head_state *s, - struct r6_state *r6s, struct page *tmp_page, - int disks) + struct stripe_head_state *s, + struct r6_state *r6s, int disks) { int update_p = 0, update_q = 0; struct r5dev *dev; int pd_idx = sh->pd_idx; int qd_idx = sh->qd_idx; + unsigned long cpu; + struct page *tmp_page; set_bit(STRIPE_HANDLE, &sh->state); @@ -2583,78 +2585,75 @@ static void handle_parity_checks6(raid5_conf_t *conf, struct stripe_head *sh, * case we can only check one of them, possibly using the * other to generate missing data */ - - /* If !tmp_page, we cannot do the calculations, - * but as we have set STRIPE_HANDLE, we will soon be called - * by stripe_handle with a tmp_page - just wait until then. - */ - if (tmp_page) { - if (s->failed == r6s->q_failed) { - /* The only possible failed device holds 'Q', so it - * makes sense to check P (If anything else were failed, - * we would have used P to recreate it). - */ - compute_block_1(sh, pd_idx, 1); - if (!page_is_zero(sh->dev[pd_idx].page)) { - compute_block_1(sh, pd_idx, 0); - update_p = 1; - } - } - if (!r6s->q_failed && s->failed < 2) { - /* q is not failed, and we didn't use it to generate - * anything, so it makes sense to check it - */ - memcpy(page_address(tmp_page), - page_address(sh->dev[qd_idx].page), - STRIPE_SIZE); - compute_parity6(sh, UPDATE_PARITY); - if (memcmp(page_address(tmp_page), - page_address(sh->dev[qd_idx].page), - STRIPE_SIZE) != 0) { - clear_bit(STRIPE_INSYNC, &sh->state); - update_q = 1; - } - } - if (update_p || update_q) { - conf->mddev->resync_mismatches += STRIPE_SECTORS; - if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) - /* don't try to repair!! */ - update_p = update_q = 0; - } - - /* now write out any block on a failed drive, - * or P or Q if they need it + cpu = get_cpu(); + tmp_page = per_cpu_ptr(conf->percpu, cpu)->spare_page; + if (s->failed == r6s->q_failed) { + /* The only possible failed device holds 'Q', so it + * makes sense to check P (If anything else were failed, + * we would have used P to recreate it). */ - - if (s->failed == 2) { - dev = &sh->dev[r6s->failed_num[1]]; - s->locked++; - set_bit(R5_LOCKED, &dev->flags); - set_bit(R5_Wantwrite, &dev->flags); + compute_block_1(sh, pd_idx, 1); + if (!page_is_zero(sh->dev[pd_idx].page)) { + compute_block_1(sh, pd_idx, 0); + update_p = 1; } - if (s->failed >= 1) { - dev = &sh->dev[r6s->failed_num[0]]; - s->locked++; - set_bit(R5_LOCKED, &dev->flags); - set_bit(R5_Wantwrite, &dev->flags); - } - - if (update_p) { - dev = &sh->dev[pd_idx]; - s->locked++; - set_bit(R5_LOCKED, &dev->flags); - set_bit(R5_Wantwrite, &dev->flags); - } - if (update_q) { - dev = &sh->dev[qd_idx]; - s->locked++; - set_bit(R5_LOCKED, &dev->flags); - set_bit(R5_Wantwrite, &dev->flags); - } - clear_bit(STRIPE_DEGRADED, &sh->state); - - set_bit(STRIPE_INSYNC, &sh->state); } + if (!r6s->q_failed && s->failed < 2) { + /* q is not failed, and we didn't use it to generate + * anything, so it makes sense to check it + */ + memcpy(page_address(tmp_page), + page_address(sh->dev[qd_idx].page), + STRIPE_SIZE); + compute_parity6(sh, UPDATE_PARITY); + if (memcmp(page_address(tmp_page), + page_address(sh->dev[qd_idx].page), + STRIPE_SIZE) != 0) { + clear_bit(STRIPE_INSYNC, &sh->state); + update_q = 1; + } + } + put_cpu(); + + if (update_p || update_q) { + conf->mddev->resync_mismatches += STRIPE_SECTORS; + if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) + /* don't try to repair!! */ + update_p = update_q = 0; + } + + /* now write out any block on a failed drive, + * or P or Q if they need it + */ + + if (s->failed == 2) { + dev = &sh->dev[r6s->failed_num[1]]; + s->locked++; + set_bit(R5_LOCKED, &dev->flags); + set_bit(R5_Wantwrite, &dev->flags); + } + if (s->failed >= 1) { + dev = &sh->dev[r6s->failed_num[0]]; + s->locked++; + set_bit(R5_LOCKED, &dev->flags); + set_bit(R5_Wantwrite, &dev->flags); + } + + if (update_p) { + dev = &sh->dev[pd_idx]; + s->locked++; + set_bit(R5_LOCKED, &dev->flags); + set_bit(R5_Wantwrite, &dev->flags); + } + if (update_q) { + dev = &sh->dev[qd_idx]; + s->locked++; + set_bit(R5_LOCKED, &dev->flags); + set_bit(R5_Wantwrite, &dev->flags); + } + clear_bit(STRIPE_DEGRADED, &sh->state); + + set_bit(STRIPE_INSYNC, &sh->state); } static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh, @@ -3009,7 +3008,7 @@ static bool handle_stripe5(struct stripe_head *sh) return blocked_rdev == NULL; } -static bool handle_stripe6(struct stripe_head *sh, struct page *tmp_page) +static bool handle_stripe6(struct stripe_head *sh) { raid5_conf_t *conf = sh->raid_conf; int disks = sh->disks; @@ -3164,7 +3163,7 @@ static bool handle_stripe6(struct stripe_head *sh, struct page *tmp_page) * data is available */ if (s.syncing && s.locked == 0 && !test_bit(STRIPE_INSYNC, &sh->state)) - handle_parity_checks6(conf, sh, &s, &r6s, tmp_page, disks); + handle_parity_checks6(conf, sh, &s, &r6s, disks); if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) { md_done_sync(conf->mddev, STRIPE_SECTORS,1); @@ -3247,16 +3246,14 @@ static bool handle_stripe6(struct stripe_head *sh, struct page *tmp_page) } /* returns true if the stripe was handled */ -static bool handle_stripe(struct stripe_head *sh, struct page *tmp_page) +static bool handle_stripe(struct stripe_head *sh) { if (sh->raid_conf->level == 6) - return handle_stripe6(sh, tmp_page); + return handle_stripe6(sh); else return handle_stripe5(sh); } - - static void raid5_activate_delayed(raid5_conf_t *conf) { if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) { @@ -4047,7 +4044,7 @@ static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *ski spin_unlock(&sh->lock); /* wait for any blocked device to be handled */ - while(unlikely(!handle_stripe(sh, NULL))) + while (unlikely(!handle_stripe(sh))) ; release_stripe(sh); @@ -4104,7 +4101,7 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio) return handled; } - handle_stripe(sh, NULL); + handle_stripe(sh); release_stripe(sh); handled++; } @@ -4168,7 +4165,7 @@ static void raid5d(mddev_t *mddev) spin_unlock_irq(&conf->device_lock); handled++; - handle_stripe(sh, conf->spare_page); + handle_stripe(sh); release_stripe(sh); spin_lock_irq(&conf->device_lock); @@ -4309,15 +4306,104 @@ raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks) return sectors * (raid_disks - conf->max_degraded); } +static void raid5_free_percpu(raid5_conf_t *conf) +{ + struct raid5_percpu *percpu; + unsigned long cpu; + + if (!conf->percpu) + return; + + get_online_cpus(); + for_each_possible_cpu(cpu) { + percpu = per_cpu_ptr(conf->percpu, cpu); + safe_put_page(percpu->spare_page); + } +#ifdef CONFIG_HOTPLUG_CPU + unregister_cpu_notifier(&conf->cpu_notify); +#endif + put_online_cpus(); + + free_percpu(conf->percpu); +} + static void free_conf(raid5_conf_t *conf) { shrink_stripes(conf); - safe_put_page(conf->spare_page); + raid5_free_percpu(conf); kfree(conf->disks); kfree(conf->stripe_hashtbl); kfree(conf); } +#ifdef CONFIG_HOTPLUG_CPU +static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action, + void *hcpu) +{ + raid5_conf_t *conf = container_of(nfb, raid5_conf_t, cpu_notify); + long cpu = (long)hcpu; + struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu); + + switch (action) { + case CPU_UP_PREPARE: + case CPU_UP_PREPARE_FROZEN: + if (!percpu->spare_page) + percpu->spare_page = alloc_page(GFP_KERNEL); + if (!percpu->spare_page) { + pr_err("%s: failed memory allocation for cpu%ld\n", + __func__, cpu); + return NOTIFY_BAD; + } + break; + case CPU_DEAD: + case CPU_DEAD_FROZEN: + safe_put_page(percpu->spare_page); + percpu->spare_page = NULL; + break; + default: + break; + } + return NOTIFY_OK; +} +#endif + +static int raid5_alloc_percpu(raid5_conf_t *conf) +{ + unsigned long cpu; + struct page *spare_page; + struct raid5_percpu *allcpus; + int err; + + /* the only percpu data is the raid6 spare page */ + if (conf->level != 6) + return 0; + + allcpus = alloc_percpu(struct raid5_percpu); + if (!allcpus) + return -ENOMEM; + conf->percpu = allcpus; + + get_online_cpus(); + err = 0; + for_each_present_cpu(cpu) { + spare_page = alloc_page(GFP_KERNEL); + if (!spare_page) { + err = -ENOMEM; + break; + } + per_cpu_ptr(conf->percpu, cpu)->spare_page = spare_page; + } +#ifdef CONFIG_HOTPLUG_CPU + conf->cpu_notify.notifier_call = raid456_cpu_notify; + conf->cpu_notify.priority = 0; + if (err == 0) + err = register_cpu_notifier(&conf->cpu_notify); +#endif + put_online_cpus(); + + return err; +} + static raid5_conf_t *setup_conf(mddev_t *mddev) { raid5_conf_t *conf; @@ -4372,11 +4458,10 @@ static raid5_conf_t *setup_conf(mddev_t *mddev) if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL) goto abort; - if (mddev->new_level == 6) { - conf->spare_page = alloc_page(GFP_KERNEL); - if (!conf->spare_page) - goto abort; - } + conf->level = mddev->new_level; + if (raid5_alloc_percpu(conf) != 0) + goto abort; + spin_lock_init(&conf->device_lock); init_waitqueue_head(&conf->wait_for_stripe); init_waitqueue_head(&conf->wait_for_overlap); @@ -4412,7 +4497,6 @@ static raid5_conf_t *setup_conf(mddev_t *mddev) } conf->chunk_size = mddev->new_chunk; - conf->level = mddev->new_level; if (conf->level == 6) conf->max_degraded = 2; else diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index 52ba99954dec..07a7a4102f05 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h @@ -383,8 +383,13 @@ struct raid5_private_data { * (fresh device added). * Cleared when a sync completes. */ - - struct page *spare_page; /* Used when checking P/Q in raid6 */ + /* per cpu variables */ + struct raid5_percpu { + struct page *spare_page; /* Used when checking P/Q in raid6 */ + } *percpu; +#ifdef CONFIG_HOTPLUG_CPU + struct notifier_block cpu_notify; +#endif /* * Free stripes pool From d6f38f31f3ad4b0dd33fe970988f14e7c65ef702 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 14 Jul 2009 11:50:52 -0700 Subject: [PATCH 0035/3409] md/raid5,6: add percpu scribble region for buffer lists Use percpu memory rather than stack for storing the buffer lists used in parity calculations. Include space for dma address conversions and pass that to async_tx via the async_submit_ctl.scribble pointer. [ Impact: move memory pressure from stack to heap ] Signed-off-by: Dan Williams --- drivers/md/raid5.c | 132 ++++++++++++++++++++++++++++++++++----------- drivers/md/raid5.h | 8 +++ 2 files changed, 110 insertions(+), 30 deletions(-) diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 5359236a1ec7..7727954cf726 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -642,11 +642,18 @@ static void ops_complete_compute5(void *stripe_head_ref) release_stripe(sh); } -static struct dma_async_tx_descriptor *ops_run_compute5(struct stripe_head *sh) +/* return a pointer to the address conversion region of the scribble buffer */ +static addr_conv_t *to_addr_conv(struct stripe_head *sh, + struct raid5_percpu *percpu) +{ + return percpu->scribble + sizeof(struct page *) * (sh->disks + 2); +} + +static struct dma_async_tx_descriptor * +ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu) { - /* kernel stack size limits the total number of disks */ int disks = sh->disks; - struct page *xor_srcs[disks]; + struct page **xor_srcs = percpu->scribble; int target = sh->ops.target; struct r5dev *tgt = &sh->dev[target]; struct page *xor_dest = tgt->page; @@ -666,7 +673,7 @@ static struct dma_async_tx_descriptor *ops_run_compute5(struct stripe_head *sh) atomic_inc(&sh->count); init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL, - ops_complete_compute5, sh, NULL); + ops_complete_compute5, sh, to_addr_conv(sh, percpu)); if (unlikely(count == 1)) tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit); else @@ -684,11 +691,11 @@ static void ops_complete_prexor(void *stripe_head_ref) } static struct dma_async_tx_descriptor * -ops_run_prexor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) +ops_run_prexor(struct stripe_head *sh, struct raid5_percpu *percpu, + struct dma_async_tx_descriptor *tx) { - /* kernel stack size limits the total number of disks */ int disks = sh->disks; - struct page *xor_srcs[disks]; + struct page **xor_srcs = percpu->scribble; int count = 0, pd_idx = sh->pd_idx, i; struct async_submit_ctl submit; @@ -706,7 +713,7 @@ ops_run_prexor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) } init_async_submit(&submit, ASYNC_TX_XOR_DROP_DST, tx, - ops_complete_prexor, sh, NULL); + ops_complete_prexor, sh, to_addr_conv(sh, percpu)); tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit); return tx; @@ -775,11 +782,11 @@ static void ops_complete_postxor(void *stripe_head_ref) } static void -ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) +ops_run_postxor(struct stripe_head *sh, struct raid5_percpu *percpu, + struct dma_async_tx_descriptor *tx) { - /* kernel stack size limits the total number of disks */ int disks = sh->disks; - struct page *xor_srcs[disks]; + struct page **xor_srcs = percpu->scribble; struct async_submit_ctl submit; int count = 0, pd_idx = sh->pd_idx, i; struct page *xor_dest; @@ -819,7 +826,8 @@ ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) atomic_inc(&sh->count); - init_async_submit(&submit, flags, tx, ops_complete_postxor, sh, NULL); + init_async_submit(&submit, flags, tx, ops_complete_postxor, sh, + to_addr_conv(sh, percpu)); if (unlikely(count == 1)) tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit); else @@ -838,11 +846,10 @@ static void ops_complete_check(void *stripe_head_ref) release_stripe(sh); } -static void ops_run_check(struct stripe_head *sh) +static void ops_run_check(struct stripe_head *sh, struct raid5_percpu *percpu) { - /* kernel stack size limits the total number of disks */ int disks = sh->disks; - struct page *xor_srcs[disks]; + struct page **xor_srcs = percpu->scribble; struct dma_async_tx_descriptor *tx; struct async_submit_ctl submit; @@ -858,7 +865,8 @@ static void ops_run_check(struct stripe_head *sh) xor_srcs[count++] = dev->page; } - init_async_submit(&submit, 0, NULL, NULL, NULL, NULL); + init_async_submit(&submit, 0, NULL, NULL, NULL, + to_addr_conv(sh, percpu)); tx = async_xor_val(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &sh->ops.zero_sum_result, &submit); @@ -871,21 +879,26 @@ static void raid5_run_ops(struct stripe_head *sh, unsigned long ops_request) { int overlap_clear = 0, i, disks = sh->disks; struct dma_async_tx_descriptor *tx = NULL; + raid5_conf_t *conf = sh->raid_conf; + struct raid5_percpu *percpu; + unsigned long cpu; + cpu = get_cpu(); + percpu = per_cpu_ptr(conf->percpu, cpu); if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) { ops_run_biofill(sh); overlap_clear++; } if (test_bit(STRIPE_OP_COMPUTE_BLK, &ops_request)) { - tx = ops_run_compute5(sh); + tx = ops_run_compute5(sh, percpu); /* terminate the chain if postxor is not set to be run */ if (tx && !test_bit(STRIPE_OP_POSTXOR, &ops_request)) async_tx_ack(tx); } if (test_bit(STRIPE_OP_PREXOR, &ops_request)) - tx = ops_run_prexor(sh, tx); + tx = ops_run_prexor(sh, percpu, tx); if (test_bit(STRIPE_OP_BIODRAIN, &ops_request)) { tx = ops_run_biodrain(sh, tx); @@ -893,10 +906,10 @@ static void raid5_run_ops(struct stripe_head *sh, unsigned long ops_request) } if (test_bit(STRIPE_OP_POSTXOR, &ops_request)) - ops_run_postxor(sh, tx); + ops_run_postxor(sh, percpu, tx); if (test_bit(STRIPE_OP_CHECK, &ops_request)) - ops_run_check(sh); + ops_run_check(sh, percpu); if (overlap_clear) for (i = disks; i--; ) { @@ -904,6 +917,7 @@ static void raid5_run_ops(struct stripe_head *sh, unsigned long ops_request) if (test_and_clear_bit(R5_Overlap, &dev->flags)) wake_up(&sh->raid_conf->wait_for_overlap); } + put_cpu(); } static int grow_one_stripe(raid5_conf_t *conf) @@ -953,6 +967,28 @@ static int grow_stripes(raid5_conf_t *conf, int num) return 0; } +/** + * scribble_len - return the required size of the scribble region + * @num - total number of disks in the array + * + * The size must be enough to contain: + * 1/ a struct page pointer for each device in the array +2 + * 2/ room to convert each entry in (1) to its corresponding dma + * (dma_map_page()) or page (page_address()) address. + * + * Note: the +2 is for the destination buffers of the ddf/raid6 case where we + * calculate over all devices (not just the data blocks), using zeros in place + * of the P and Q blocks. + */ +static size_t scribble_len(int num) +{ + size_t len; + + len = sizeof(struct page *) * (num+2) + sizeof(addr_conv_t) * (num+2); + + return len; +} + static int resize_stripes(raid5_conf_t *conf, int newsize) { /* Make all the stripes able to hold 'newsize' devices. @@ -981,6 +1017,7 @@ static int resize_stripes(raid5_conf_t *conf, int newsize) struct stripe_head *osh, *nsh; LIST_HEAD(newstripes); struct disk_info *ndisks; + unsigned long cpu; int err; struct kmem_cache *sc; int i; @@ -1046,7 +1083,7 @@ static int resize_stripes(raid5_conf_t *conf, int newsize) /* Step 3. * At this point, we are holding all the stripes so the array * is completely stalled, so now is a good time to resize - * conf->disks. + * conf->disks and the scribble region */ ndisks = kzalloc(newsize * sizeof(struct disk_info), GFP_NOIO); if (ndisks) { @@ -1057,10 +1094,30 @@ static int resize_stripes(raid5_conf_t *conf, int newsize) } else err = -ENOMEM; + get_online_cpus(); + conf->scribble_len = scribble_len(newsize); + for_each_present_cpu(cpu) { + struct raid5_percpu *percpu; + void *scribble; + + percpu = per_cpu_ptr(conf->percpu, cpu); + scribble = kmalloc(conf->scribble_len, GFP_NOIO); + + if (scribble) { + kfree(percpu->scribble); + percpu->scribble = scribble; + } else { + err = -ENOMEM; + break; + } + } + put_online_cpus(); + /* Step 4, return new stripes to service */ while(!list_empty(&newstripes)) { nsh = list_entry(newstripes.next, struct stripe_head, lru); list_del_init(&nsh->lru); + for (i=conf->raid_disks; i < newsize; i++) if (nsh->dev[i].page == NULL) { struct page *p = alloc_page(GFP_NOIO); @@ -4318,6 +4375,7 @@ static void raid5_free_percpu(raid5_conf_t *conf) for_each_possible_cpu(cpu) { percpu = per_cpu_ptr(conf->percpu, cpu); safe_put_page(percpu->spare_page); + kfree(percpu->scribble); } #ifdef CONFIG_HOTPLUG_CPU unregister_cpu_notifier(&conf->cpu_notify); @@ -4347,9 +4405,15 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action, switch (action) { case CPU_UP_PREPARE: case CPU_UP_PREPARE_FROZEN: - if (!percpu->spare_page) + if (conf->level == 6 && !percpu->spare_page) percpu->spare_page = alloc_page(GFP_KERNEL); - if (!percpu->spare_page) { + if (!percpu->scribble) + percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL); + + if (!percpu->scribble || + (conf->level == 6 && !percpu->spare_page)) { + safe_put_page(percpu->spare_page); + kfree(percpu->scribble); pr_err("%s: failed memory allocation for cpu%ld\n", __func__, cpu); return NOTIFY_BAD; @@ -4358,7 +4422,9 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action, case CPU_DEAD: case CPU_DEAD_FROZEN: safe_put_page(percpu->spare_page); + kfree(percpu->scribble); percpu->spare_page = NULL; + percpu->scribble = NULL; break; default: break; @@ -4372,12 +4438,9 @@ static int raid5_alloc_percpu(raid5_conf_t *conf) unsigned long cpu; struct page *spare_page; struct raid5_percpu *allcpus; + void *scribble; int err; - /* the only percpu data is the raid6 spare page */ - if (conf->level != 6) - return 0; - allcpus = alloc_percpu(struct raid5_percpu); if (!allcpus) return -ENOMEM; @@ -4386,12 +4449,20 @@ static int raid5_alloc_percpu(raid5_conf_t *conf) get_online_cpus(); err = 0; for_each_present_cpu(cpu) { - spare_page = alloc_page(GFP_KERNEL); - if (!spare_page) { + if (conf->level == 6) { + spare_page = alloc_page(GFP_KERNEL); + if (!spare_page) { + err = -ENOMEM; + break; + } + per_cpu_ptr(conf->percpu, cpu)->spare_page = spare_page; + } + scribble = kmalloc(scribble_len(conf->raid_disks), GFP_KERNEL); + if (!scribble) { err = -ENOMEM; break; } - per_cpu_ptr(conf->percpu, cpu)->spare_page = spare_page; + per_cpu_ptr(conf->percpu, cpu)->scribble = scribble; } #ifdef CONFIG_HOTPLUG_CPU conf->cpu_notify.notifier_call = raid456_cpu_notify; @@ -4443,6 +4514,7 @@ static raid5_conf_t *setup_conf(mddev_t *mddev) goto abort; conf->raid_disks = mddev->raid_disks; + conf->scribble_len = scribble_len(conf->raid_disks); if (mddev->reshape_position == MaxSector) conf->previous_raid_disks = mddev->raid_disks; else diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index 07a7a4102f05..e7baabffee86 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h @@ -386,7 +386,15 @@ struct raid5_private_data { /* per cpu variables */ struct raid5_percpu { struct page *spare_page; /* Used when checking P/Q in raid6 */ + void *scribble; /* space for constructing buffer + * lists and performing address + * conversions + */ } *percpu; + size_t scribble_len; /* size of scribble region must be + * associated with conf to handle + * cpu hotplug while reshaping + */ #ifdef CONFIG_HOTPLUG_CPU struct notifier_block cpu_notify; #endif From ad283ea4a3ce82cda2efe33163748a397b31b1eb Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Sat, 29 Aug 2009 19:09:26 -0700 Subject: [PATCH 0036/3409] async_tx: add sum check flags Replace the flat zero_sum_result with a collection of flags to contain the P (xor) zero-sum result, and the soon to be utilized Q (raid6 reed solomon syndrome) zero-sum result. Use the SUM_CHECK_ namespace instead of DMA_ since these flags will be used on non-dma-zero-sum enabled platforms. Reviewed-by: Andre Noll Acked-by: Maciej Sosnowski Signed-off-by: Dan Williams --- arch/arm/include/asm/hardware/iop3xx-adma.h | 5 +++-- arch/arm/mach-iop13xx/include/mach/adma.h | 12 +++++++----- crypto/async_tx/async_xor.c | 4 ++-- drivers/md/raid5.c | 2 +- drivers/md/raid5.h | 5 +++-- include/linux/async_tx.h | 2 +- include/linux/dmaengine.h | 21 ++++++++++++++++++++- 7 files changed, 37 insertions(+), 14 deletions(-) diff --git a/arch/arm/include/asm/hardware/iop3xx-adma.h b/arch/arm/include/asm/hardware/iop3xx-adma.h index 83e6ba338e2c..26eefea02314 100644 --- a/arch/arm/include/asm/hardware/iop3xx-adma.h +++ b/arch/arm/include/asm/hardware/iop3xx-adma.h @@ -756,13 +756,14 @@ static inline void iop_desc_set_block_fill_val(struct iop_adma_desc_slot *desc, hw_desc->src[0] = val; } -static inline int iop_desc_get_zero_result(struct iop_adma_desc_slot *desc) +static inline enum sum_check_flags +iop_desc_get_zero_result(struct iop_adma_desc_slot *desc) { struct iop3xx_desc_aau *hw_desc = desc->hw_desc; struct iop3xx_aau_desc_ctrl desc_ctrl = hw_desc->desc_ctrl_field; iop_paranoia(!(desc_ctrl.tx_complete && desc_ctrl.zero_result_en)); - return desc_ctrl.zero_result_err; + return desc_ctrl.zero_result_err << SUM_CHECK_P; } static inline void iop_chan_append(struct iop_adma_chan *chan) diff --git a/arch/arm/mach-iop13xx/include/mach/adma.h b/arch/arm/mach-iop13xx/include/mach/adma.h index 5722e86f2174..1cd31df8924d 100644 --- a/arch/arm/mach-iop13xx/include/mach/adma.h +++ b/arch/arm/mach-iop13xx/include/mach/adma.h @@ -428,18 +428,20 @@ static inline void iop_desc_set_block_fill_val(struct iop_adma_desc_slot *desc, hw_desc->block_fill_data = val; } -static inline int iop_desc_get_zero_result(struct iop_adma_desc_slot *desc) +static inline enum sum_check_flags +iop_desc_get_zero_result(struct iop_adma_desc_slot *desc) { struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; struct iop13xx_adma_desc_ctrl desc_ctrl = hw_desc->desc_ctrl_field; struct iop13xx_adma_byte_count byte_count = hw_desc->byte_count_field; + enum sum_check_flags flags; BUG_ON(!(byte_count.tx_complete && desc_ctrl.zero_result)); - if (desc_ctrl.pq_xfer_en) - return byte_count.zero_result_err_q; - else - return byte_count.zero_result_err; + flags = byte_count.zero_result_err_q << SUM_CHECK_Q; + flags |= byte_count.zero_result_err << SUM_CHECK_P; + + return flags; } static inline void iop_chan_append(struct iop_adma_chan *chan) diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c index 1e96c4df7061..78fb7780272a 100644 --- a/crypto/async_tx/async_xor.c +++ b/crypto/async_tx/async_xor.c @@ -246,7 +246,7 @@ static int page_is_zero(struct page *p, unsigned int offset, size_t len) */ struct dma_async_tx_descriptor * async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, - int src_cnt, size_t len, u32 *result, + int src_cnt, size_t len, enum sum_check_flags *result, struct async_submit_ctl *submit) { struct dma_chan *chan = async_tx_find_channel(submit, DMA_XOR_VAL, @@ -304,7 +304,7 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, async_tx_quiesce(&tx); - *result = page_is_zero(dest, offset, len) ? 0 : 1; + *result = !page_is_zero(dest, offset, len) << SUM_CHECK_P; async_tx_sync_epilog(submit); submit->flags = flags_orig; diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 7727954cf726..1f2a266f3cf7 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -2590,7 +2590,7 @@ static void handle_parity_checks5(raid5_conf_t *conf, struct stripe_head *sh, * we are done. Otherwise update the mismatch count and repair * parity if !MD_RECOVERY_CHECK */ - if (sh->ops.zero_sum_result == 0) + if ((sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) == 0) /* parity is correct (on disc, * not in buffer any more) */ diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index e7baabffee86..75f2c6c4cf90 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h @@ -2,6 +2,7 @@ #define _RAID5_H #include +#include /* * @@ -215,8 +216,8 @@ struct stripe_head { * @target - STRIPE_OP_COMPUTE_BLK target */ struct stripe_operations { - int target; - u32 zero_sum_result; + int target; + enum sum_check_flags zero_sum_result; } ops; struct r5dev { struct bio req; diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h index 00cfb637ddf2..3d21a2517518 100644 --- a/include/linux/async_tx.h +++ b/include/linux/async_tx.h @@ -148,7 +148,7 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset, struct dma_async_tx_descriptor * async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, - int src_cnt, size_t len, u32 *result, + int src_cnt, size_t len, enum sum_check_flags *result, struct async_submit_ctl *submit); struct dma_async_tx_descriptor * diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 6768727d00d7..02447afcebad 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -86,6 +86,25 @@ enum dma_ctrl_flags { DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3), }; +/** + * enum sum_check_bits - bit position of pq_check_flags + */ +enum sum_check_bits { + SUM_CHECK_P = 0, + SUM_CHECK_Q = 1, +}; + +/** + * enum pq_check_flags - result of async_{xor,pq}_zero_sum operations + * @SUM_CHECK_P_RESULT - 1 if xor zero sum error, 0 otherwise + * @SUM_CHECK_Q_RESULT - 1 if reed-solomon zero sum error, 0 otherwise + */ +enum sum_check_flags { + SUM_CHECK_P_RESULT = (1 << SUM_CHECK_P), + SUM_CHECK_Q_RESULT = (1 << SUM_CHECK_Q), +}; + + /** * dma_cap_mask_t - capabilities bitmap modeled after cpumask_t. * See linux/cpumask.h @@ -245,7 +264,7 @@ struct dma_device { unsigned int src_cnt, size_t len, unsigned long flags); struct dma_async_tx_descriptor *(*device_prep_dma_xor_val)( struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt, - size_t len, u32 *result, unsigned long flags); + size_t len, enum sum_check_flags *result, unsigned long flags); struct dma_async_tx_descriptor *(*device_prep_dma_memset)( struct dma_chan *chan, dma_addr_t dest, int value, size_t len, unsigned long flags); From af1f951eb6ef27b01cbfb3f6c21b770af4368a6d Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Sat, 29 Aug 2009 19:09:26 -0700 Subject: [PATCH 0037/3409] async_tx: kill needless module_{init|exit} If module_init and module_exit are nops then neither need to be defined. [ Impact: pure cleanup ] Reviewed-by: Andre Noll Acked-by: Maciej Sosnowski Signed-off-by: Dan Williams --- crypto/async_tx/async_memcpy.c | 13 ------------- crypto/async_tx/async_memset.c | 13 ------------- crypto/async_tx/async_tx.c | 17 +++-------------- 3 files changed, 3 insertions(+), 40 deletions(-) diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c index 89e05556f3df..98e15bd0dcb5 100644 --- a/crypto/async_tx/async_memcpy.c +++ b/crypto/async_tx/async_memcpy.c @@ -90,19 +90,6 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, } EXPORT_SYMBOL_GPL(async_memcpy); -static int __init async_memcpy_init(void) -{ - return 0; -} - -static void __exit async_memcpy_exit(void) -{ - do { } while (0); -} - -module_init(async_memcpy_init); -module_exit(async_memcpy_exit); - MODULE_AUTHOR("Intel Corporation"); MODULE_DESCRIPTION("asynchronous memcpy api"); MODULE_LICENSE("GPL"); diff --git a/crypto/async_tx/async_memset.c b/crypto/async_tx/async_memset.c index c14437238f4c..b896a6e5f673 100644 --- a/crypto/async_tx/async_memset.c +++ b/crypto/async_tx/async_memset.c @@ -80,19 +80,6 @@ async_memset(struct page *dest, int val, unsigned int offset, size_t len, } EXPORT_SYMBOL_GPL(async_memset); -static int __init async_memset_init(void) -{ - return 0; -} - -static void __exit async_memset_exit(void) -{ - do { } while (0); -} - -module_init(async_memset_init); -module_exit(async_memset_exit); - MODULE_AUTHOR("Intel Corporation"); MODULE_DESCRIPTION("asynchronous memset api"); MODULE_LICENSE("GPL"); diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c index 802a5ce437d9..6e37ad3f4417 100644 --- a/crypto/async_tx/async_tx.c +++ b/crypto/async_tx/async_tx.c @@ -42,6 +42,9 @@ static void __exit async_tx_exit(void) async_dmaengine_put(); } +module_init(async_tx_init); +module_exit(async_tx_exit); + /** * __async_tx_find_channel - find a channel to carry out the operation or let * the transaction execute synchronously @@ -61,17 +64,6 @@ __async_tx_find_channel(struct async_submit_ctl *submit, return async_dma_find_channel(tx_type); } EXPORT_SYMBOL_GPL(__async_tx_find_channel); -#else -static int __init async_tx_init(void) -{ - printk(KERN_INFO "async_tx: api initialized (sync-only)\n"); - return 0; -} - -static void __exit async_tx_exit(void) -{ - do { } while (0); -} #endif @@ -298,9 +290,6 @@ void async_tx_quiesce(struct dma_async_tx_descriptor **tx) } EXPORT_SYMBOL_GPL(async_tx_quiesce); -module_init(async_tx_init); -module_exit(async_tx_exit); - MODULE_AUTHOR("Intel Corporation"); MODULE_DESCRIPTION("Asynchronous Bulk Memory Transactions API"); MODULE_LICENSE("GPL"); From 95475e57113c66aac7583925736ed2e2d58c990d Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 14 Jul 2009 12:19:02 -0700 Subject: [PATCH 0038/3409] async_tx: remove walk of tx->parent chain in dma_wait_for_async_tx We currently walk the parent chain when waiting for a given tx to complete however this walk may race with the driver cleanup routine. The routines in async_raid6_recov.c may fall back to the synchronous path at any point so we need to be prepared to call async_tx_quiesce() (which calls dma_wait_for_async_tx). To remove the ->parent walk we guarantee that every time a dependency is attached ->issue_pending() is invoked, then we can simply poll the initial descriptor until completion. This also allows for a lighter weight 'issue pending' implementation as there is no longer a requirement to iterate through all the channels' ->issue_pending() routines as long as operations have been submitted in an ordered chain. async_tx_issue_pending() is added for this case. Signed-off-by: Dan Williams --- crypto/async_tx/async_tx.c | 13 ++++++----- drivers/dma/dmaengine.c | 45 +++++++++----------------------------- include/linux/async_tx.h | 23 +++++++++++++++++++ 3 files changed, 40 insertions(+), 41 deletions(-) diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c index 6e37ad3f4417..60615fedcf5e 100644 --- a/crypto/async_tx/async_tx.c +++ b/crypto/async_tx/async_tx.c @@ -77,8 +77,8 @@ static void async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, struct dma_async_tx_descriptor *tx) { - struct dma_chan *chan; - struct dma_device *device; + struct dma_chan *chan = depend_tx->chan; + struct dma_device *device = chan->device; struct dma_async_tx_descriptor *intr_tx = (void *) ~0; /* first check to see if we can still append to depend_tx */ @@ -90,11 +90,11 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, } spin_unlock_bh(&depend_tx->lock); - if (!intr_tx) + /* attached dependency, flush the parent channel */ + if (!intr_tx) { + device->device_issue_pending(chan); return; - - chan = depend_tx->chan; - device = chan->device; + } /* see if we can schedule an interrupt * otherwise poll for completion @@ -128,6 +128,7 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, intr_tx->tx_submit(intr_tx); async_tx_ack(intr_tx); } + device->device_issue_pending(chan); } else { if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) panic("%s: DMA_ERROR waiting for depend_tx\n", diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 6781e8f3c064..e002e0e0d055 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -934,49 +934,24 @@ EXPORT_SYMBOL(dma_async_tx_descriptor_init); /* dma_wait_for_async_tx - spin wait for a transaction to complete * @tx: in-flight transaction to wait on - * - * This routine assumes that tx was obtained from a call to async_memcpy, - * async_xor, async_memset, etc which ensures that tx is "in-flight" (prepped - * and submitted). Walking the parent chain is only meant to cover for DMA - * drivers that do not implement the DMA_INTERRUPT capability and may race with - * the driver's descriptor cleanup routine. */ enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) { - enum dma_status status; - struct dma_async_tx_descriptor *iter; - struct dma_async_tx_descriptor *parent; + unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000); if (!tx) return DMA_SUCCESS; - WARN_ONCE(tx->parent, "%s: speculatively walking dependency chain for" - " %s\n", __func__, dma_chan_name(tx->chan)); - - /* poll through the dependency chain, return when tx is complete */ - do { - iter = tx; - - /* find the root of the unsubmitted dependency chain */ - do { - parent = iter->parent; - if (!parent) - break; - else - iter = parent; - } while (parent); - - /* there is a small window for ->parent == NULL and - * ->cookie == -EBUSY - */ - while (iter->cookie == -EBUSY) - cpu_relax(); - - status = dma_sync_wait(iter->chan, iter->cookie); - } while (status == DMA_IN_PROGRESS || (iter != tx)); - - return status; + while (tx->cookie == -EBUSY) { + if (time_after_eq(jiffies, dma_sync_wait_timeout)) { + pr_err("%s timeout waiting for descriptor submission\n", + __func__); + return DMA_ERROR; + } + cpu_relax(); + } + return dma_sync_wait(tx->chan, tx->cookie); } EXPORT_SYMBOL_GPL(dma_wait_for_async_tx); diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h index 3d21a2517518..12a2efcbd565 100644 --- a/include/linux/async_tx.h +++ b/include/linux/async_tx.h @@ -83,6 +83,24 @@ struct async_submit_ctl { #ifdef CONFIG_DMA_ENGINE #define async_tx_issue_pending_all dma_issue_pending_all + +/** + * async_tx_issue_pending - send pending descriptor to the hardware channel + * @tx: descriptor handle to retrieve hardware context + * + * Note: any dependent operations will have already been issued by + * async_tx_channel_switch, or (in the case of no channel switch) will + * be already pending on this channel. + */ +static inline void async_tx_issue_pending(struct dma_async_tx_descriptor *tx) +{ + if (likely(tx)) { + struct dma_chan *chan = tx->chan; + struct dma_device *dma = chan->device; + + dma->device_issue_pending(chan); + } +} #ifdef CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL #include #else @@ -98,6 +116,11 @@ static inline void async_tx_issue_pending_all(void) do { } while (0); } +static inline void async_tx_issue_pending(struct dma_async_tx_descriptor *tx) +{ + do { } while (0); +} + static inline struct dma_chan * async_tx_find_channel(struct async_submit_ctl *submit, enum dma_transaction_type tx_type, struct page **dst, From b2f46fd8ef3dff2ab30f31126833f78b7480283a Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 14 Jul 2009 12:20:36 -0700 Subject: [PATCH 0039/3409] async_tx: add support for asynchronous GF multiplication [ Based on an original patch by Yuri Tikhonov ] This adds support for doing asynchronous GF multiplication by adding two additional functions to the async_tx API: async_gen_syndrome() does simultaneous XOR and Galois field multiplication of sources. async_syndrome_val() validates the given source buffers against known P and Q values. When a request is made to run async_pq against more than the hardware maximum number of supported sources we need to reuse the previous generated P and Q values as sources into the next operation. Care must be taken to remove Q from P' and P from Q'. For example to perform a 5 source pq op with hardware that only supports 4 sources at a time the following approach is taken: p, q = PQ(src0, src1, src2, src3, COEF({01}, {02}, {04}, {08})) p', q' = PQ(p, q, q, src4, COEF({00}, {01}, {00}, {10})) p' = p + q + q + src4 = p + src4 q' = {00}*p + {01}*q + {00}*q + {10}*src4 = q + {10}*src4 Note: 4 is the minimum acceptable maxpq otherwise we punt to synchronous-software path. The DMA_PREP_CONTINUE flag indicates to the driver to reuse p and q as sources (in the above manner) and fill the remaining slots up to maxpq with the new sources/coefficients. Note1: Some devices have native support for P+Q continuation and can skip this extra work. Devices with this capability can advertise it with dma_set_maxpq. It is up to each driver how to handle the DMA_PREP_CONTINUE flag. Note2: The api supports disabling the generation of P when generating Q, this is ignored by the synchronous path but is implemented by some dma devices to save unnecessary writes. In this case the continuation algorithm is simplified to only reuse Q as a source. Cc: H. Peter Anvin Cc: David Woodhouse Signed-off-by: Yuri Tikhonov Signed-off-by: Ilya Yanok Reviewed-by: Andre Noll Acked-by: Maciej Sosnowski Signed-off-by: Dan Williams --- Documentation/crypto/async-tx-api.txt | 3 + arch/arm/mach-iop13xx/setup.c | 2 +- crypto/async_tx/Kconfig | 4 + crypto/async_tx/Makefile | 1 + crypto/async_tx/async_pq.c | 388 ++++++++++++++++++++++++++ crypto/async_tx/async_xor.c | 2 +- drivers/dma/dmaengine.c | 4 + drivers/dma/iop-adma.c | 2 +- include/linux/async_tx.h | 9 + include/linux/dmaengine.h | 87 +++++- 10 files changed, 493 insertions(+), 9 deletions(-) create mode 100644 crypto/async_tx/async_pq.c diff --git a/Documentation/crypto/async-tx-api.txt b/Documentation/crypto/async-tx-api.txt index 6b15e488c0e7..0e48e054d69a 100644 --- a/Documentation/crypto/async-tx-api.txt +++ b/Documentation/crypto/async-tx-api.txt @@ -64,6 +64,9 @@ xor - xor a series of source buffers and write the result to a xor_val - xor a series of source buffers and set a flag if the result is zero. The implementation attempts to prevent writes to memory +pq - generate the p+q (raid6 syndrome) from a series of source buffers +pq_val - validate that a p and or q buffer are in sync with a given series of + sources 3.3 Descriptor management: The return value is non-NULL and points to a 'descriptor' when the operation diff --git a/arch/arm/mach-iop13xx/setup.c b/arch/arm/mach-iop13xx/setup.c index 9800228b71d3..2e7ca0d75f8a 100644 --- a/arch/arm/mach-iop13xx/setup.c +++ b/arch/arm/mach-iop13xx/setup.c @@ -506,7 +506,7 @@ void __init iop13xx_platform_init(void) dma_cap_set(DMA_MEMSET, plat_data->cap_mask); dma_cap_set(DMA_MEMCPY_CRC32C, plat_data->cap_mask); dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask); - dma_cap_set(DMA_PQ_XOR, plat_data->cap_mask); + dma_cap_set(DMA_PQ, plat_data->cap_mask); dma_cap_set(DMA_PQ_UPDATE, plat_data->cap_mask); dma_cap_set(DMA_PQ_VAL, plat_data->cap_mask); break; diff --git a/crypto/async_tx/Kconfig b/crypto/async_tx/Kconfig index d8fb39145986..cb6d7314f198 100644 --- a/crypto/async_tx/Kconfig +++ b/crypto/async_tx/Kconfig @@ -14,3 +14,7 @@ config ASYNC_MEMSET tristate select ASYNC_CORE +config ASYNC_PQ + tristate + select ASYNC_CORE + diff --git a/crypto/async_tx/Makefile b/crypto/async_tx/Makefile index 27baa7d52fbc..1b9926588259 100644 --- a/crypto/async_tx/Makefile +++ b/crypto/async_tx/Makefile @@ -2,3 +2,4 @@ obj-$(CONFIG_ASYNC_CORE) += async_tx.o obj-$(CONFIG_ASYNC_MEMCPY) += async_memcpy.o obj-$(CONFIG_ASYNC_MEMSET) += async_memset.o obj-$(CONFIG_ASYNC_XOR) += async_xor.o +obj-$(CONFIG_ASYNC_PQ) += async_pq.o diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c new file mode 100644 index 000000000000..108b21efb499 --- /dev/null +++ b/crypto/async_tx/async_pq.c @@ -0,0 +1,388 @@ +/* + * Copyright(c) 2007 Yuri Tikhonov + * Copyright(c) 2009 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * The full GNU General Public License is included in this distribution in the + * file called COPYING. + */ +#include +#include +#include +#include +#include + +/** + * scribble - space to hold throwaway P buffer for synchronous gen_syndrome + */ +static struct page *scribble; + +static bool is_raid6_zero_block(struct page *p) +{ + return p == (void *) raid6_empty_zero_page; +} + +/* the struct page *blocks[] parameter passed to async_gen_syndrome() + * and async_syndrome_val() contains the 'P' destination address at + * blocks[disks-2] and the 'Q' destination address at blocks[disks-1] + * + * note: these are macros as they are used as lvalues + */ +#define P(b, d) (b[d-2]) +#define Q(b, d) (b[d-1]) + +/** + * do_async_gen_syndrome - asynchronously calculate P and/or Q + */ +static __async_inline struct dma_async_tx_descriptor * +do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks, + const unsigned char *scfs, unsigned int offset, int disks, + size_t len, dma_addr_t *dma_src, + struct async_submit_ctl *submit) +{ + struct dma_async_tx_descriptor *tx = NULL; + struct dma_device *dma = chan->device; + enum dma_ctrl_flags dma_flags = 0; + enum async_tx_flags flags_orig = submit->flags; + dma_async_tx_callback cb_fn_orig = submit->cb_fn; + dma_async_tx_callback cb_param_orig = submit->cb_param; + int src_cnt = disks - 2; + unsigned char coefs[src_cnt]; + unsigned short pq_src_cnt; + dma_addr_t dma_dest[2]; + int src_off = 0; + int idx; + int i; + + /* DMAs use destinations as sources, so use BIDIRECTIONAL mapping */ + if (P(blocks, disks)) + dma_dest[0] = dma_map_page(dma->dev, P(blocks, disks), offset, + len, DMA_BIDIRECTIONAL); + else + dma_flags |= DMA_PREP_PQ_DISABLE_P; + if (Q(blocks, disks)) + dma_dest[1] = dma_map_page(dma->dev, Q(blocks, disks), offset, + len, DMA_BIDIRECTIONAL); + else + dma_flags |= DMA_PREP_PQ_DISABLE_Q; + + /* convert source addresses being careful to collapse 'empty' + * sources and update the coefficients accordingly + */ + for (i = 0, idx = 0; i < src_cnt; i++) { + if (is_raid6_zero_block(blocks[i])) + continue; + dma_src[idx] = dma_map_page(dma->dev, blocks[i], offset, len, + DMA_TO_DEVICE); + coefs[idx] = scfs[i]; + idx++; + } + src_cnt = idx; + + while (src_cnt > 0) { + submit->flags = flags_orig; + pq_src_cnt = min(src_cnt, dma_maxpq(dma, dma_flags)); + /* if we are submitting additional pqs, leave the chain open, + * clear the callback parameters, and leave the destination + * buffers mapped + */ + if (src_cnt > pq_src_cnt) { + submit->flags &= ~ASYNC_TX_ACK; + dma_flags |= DMA_COMPL_SKIP_DEST_UNMAP; + submit->cb_fn = NULL; + submit->cb_param = NULL; + } else { + dma_flags &= ~DMA_COMPL_SKIP_DEST_UNMAP; + submit->cb_fn = cb_fn_orig; + submit->cb_param = cb_param_orig; + if (cb_fn_orig) + dma_flags |= DMA_PREP_INTERRUPT; + } + + /* Since we have clobbered the src_list we are committed + * to doing this asynchronously. Drivers force forward + * progress in case they can not provide a descriptor + */ + for (;;) { + tx = dma->device_prep_dma_pq(chan, dma_dest, + &dma_src[src_off], + pq_src_cnt, + &coefs[src_off], len, + dma_flags); + if (likely(tx)) + break; + async_tx_quiesce(&submit->depend_tx); + dma_async_issue_pending(chan); + } + + async_tx_submit(chan, tx, submit); + submit->depend_tx = tx; + + /* drop completed sources */ + src_cnt -= pq_src_cnt; + src_off += pq_src_cnt; + + dma_flags |= DMA_PREP_CONTINUE; + } + + return tx; +} + +/** + * do_sync_gen_syndrome - synchronously calculate a raid6 syndrome + */ +static void +do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks, + size_t len, struct async_submit_ctl *submit) +{ + void **srcs; + int i; + + if (submit->scribble) + srcs = submit->scribble; + else + srcs = (void **) blocks; + + for (i = 0; i < disks; i++) { + if (is_raid6_zero_block(blocks[i])) { + BUG_ON(i > disks - 3); /* P or Q can't be zero */ + srcs[i] = blocks[i]; + } else + srcs[i] = page_address(blocks[i]) + offset; + } + raid6_call.gen_syndrome(disks, len, srcs); + async_tx_sync_epilog(submit); +} + +/** + * async_gen_syndrome - asynchronously calculate a raid6 syndrome + * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1 + * @offset: common offset into each block (src and dest) to start transaction + * @disks: number of blocks (including missing P or Q, see below) + * @len: length of operation in bytes + * @submit: submission/completion modifiers + * + * General note: This routine assumes a field of GF(2^8) with a + * primitive polynomial of 0x11d and a generator of {02}. + * + * 'disks' note: callers can optionally omit either P or Q (but not + * both) from the calculation by setting blocks[disks-2] or + * blocks[disks-1] to NULL. When P or Q is omitted 'len' must be <= + * PAGE_SIZE as a temporary buffer of this size is used in the + * synchronous path. 'disks' always accounts for both destination + * buffers. + * + * 'blocks' note: if submit->scribble is NULL then the contents of + * 'blocks' may be overridden + */ +struct dma_async_tx_descriptor * +async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, + size_t len, struct async_submit_ctl *submit) +{ + int src_cnt = disks - 2; + struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ, + &P(blocks, disks), 2, + blocks, src_cnt, len); + struct dma_device *device = chan ? chan->device : NULL; + dma_addr_t *dma_src = NULL; + + BUG_ON(disks > 255 || !(P(blocks, disks) || Q(blocks, disks))); + + if (submit->scribble) + dma_src = submit->scribble; + else if (sizeof(dma_addr_t) <= sizeof(struct page *)) + dma_src = (dma_addr_t *) blocks; + + if (dma_src && device && + (src_cnt <= dma_maxpq(device, 0) || + dma_maxpq(device, DMA_PREP_CONTINUE) > 0)) { + /* run the p+q asynchronously */ + pr_debug("%s: (async) disks: %d len: %zu\n", + __func__, disks, len); + return do_async_gen_syndrome(chan, blocks, raid6_gfexp, offset, + disks, len, dma_src, submit); + } + + /* run the pq synchronously */ + pr_debug("%s: (sync) disks: %d len: %zu\n", __func__, disks, len); + + /* wait for any prerequisite operations */ + async_tx_quiesce(&submit->depend_tx); + + if (!P(blocks, disks)) { + P(blocks, disks) = scribble; + BUG_ON(len + offset > PAGE_SIZE); + } + if (!Q(blocks, disks)) { + Q(blocks, disks) = scribble; + BUG_ON(len + offset > PAGE_SIZE); + } + do_sync_gen_syndrome(blocks, offset, disks, len, submit); + + return NULL; +} +EXPORT_SYMBOL_GPL(async_gen_syndrome); + +/** + * async_syndrome_val - asynchronously validate a raid6 syndrome + * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1 + * @offset: common offset into each block (src and dest) to start transaction + * @disks: number of blocks (including missing P or Q, see below) + * @len: length of operation in bytes + * @pqres: on val failure SUM_CHECK_P_RESULT and/or SUM_CHECK_Q_RESULT are set + * @spare: temporary result buffer for the synchronous case + * @submit: submission / completion modifiers + * + * The same notes from async_gen_syndrome apply to the 'blocks', + * and 'disks' parameters of this routine. The synchronous path + * requires a temporary result buffer and submit->scribble to be + * specified. + */ +struct dma_async_tx_descriptor * +async_syndrome_val(struct page **blocks, unsigned int offset, int disks, + size_t len, enum sum_check_flags *pqres, struct page *spare, + struct async_submit_ctl *submit) +{ + struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ_VAL, + NULL, 0, blocks, disks, + len); + struct dma_device *device = chan ? chan->device : NULL; + struct dma_async_tx_descriptor *tx; + enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0; + dma_addr_t *dma_src = NULL; + + BUG_ON(disks < 4); + + if (submit->scribble) + dma_src = submit->scribble; + else if (sizeof(dma_addr_t) <= sizeof(struct page *)) + dma_src = (dma_addr_t *) blocks; + + if (dma_src && device && disks <= dma_maxpq(device, 0)) { + struct device *dev = device->dev; + dma_addr_t *pq = &dma_src[disks-2]; + int i; + + pr_debug("%s: (async) disks: %d len: %zu\n", + __func__, disks, len); + if (!P(blocks, disks)) + dma_flags |= DMA_PREP_PQ_DISABLE_P; + if (!Q(blocks, disks)) + dma_flags |= DMA_PREP_PQ_DISABLE_Q; + for (i = 0; i < disks; i++) + if (likely(blocks[i])) { + BUG_ON(is_raid6_zero_block(blocks[i])); + dma_src[i] = dma_map_page(dev, blocks[i], + offset, len, + DMA_TO_DEVICE); + } + + for (;;) { + tx = device->device_prep_dma_pq_val(chan, pq, dma_src, + disks - 2, + raid6_gfexp, + len, pqres, + dma_flags); + if (likely(tx)) + break; + async_tx_quiesce(&submit->depend_tx); + dma_async_issue_pending(chan); + } + async_tx_submit(chan, tx, submit); + + return tx; + } else { + struct page *p_src = P(blocks, disks); + struct page *q_src = Q(blocks, disks); + enum async_tx_flags flags_orig = submit->flags; + dma_async_tx_callback cb_fn_orig = submit->cb_fn; + void *scribble = submit->scribble; + void *cb_param_orig = submit->cb_param; + void *p, *q, *s; + + pr_debug("%s: (sync) disks: %d len: %zu\n", + __func__, disks, len); + + /* caller must provide a temporary result buffer and + * allow the input parameters to be preserved + */ + BUG_ON(!spare || !scribble); + + /* wait for any prerequisite operations */ + async_tx_quiesce(&submit->depend_tx); + + /* recompute p and/or q into the temporary buffer and then + * check to see the result matches the current value + */ + tx = NULL; + *pqres = 0; + if (p_src) { + init_async_submit(submit, ASYNC_TX_XOR_ZERO_DST, NULL, + NULL, NULL, scribble); + tx = async_xor(spare, blocks, offset, disks-2, len, submit); + async_tx_quiesce(&tx); + p = page_address(p_src) + offset; + s = page_address(spare) + offset; + *pqres |= !!memcmp(p, s, len) << SUM_CHECK_P; + } + + if (q_src) { + P(blocks, disks) = NULL; + Q(blocks, disks) = spare; + init_async_submit(submit, 0, NULL, NULL, NULL, scribble); + tx = async_gen_syndrome(blocks, offset, disks, len, submit); + async_tx_quiesce(&tx); + q = page_address(q_src) + offset; + s = page_address(spare) + offset; + *pqres |= !!memcmp(q, s, len) << SUM_CHECK_Q; + } + + /* restore P, Q and submit */ + P(blocks, disks) = p_src; + Q(blocks, disks) = q_src; + + submit->cb_fn = cb_fn_orig; + submit->cb_param = cb_param_orig; + submit->flags = flags_orig; + async_tx_sync_epilog(submit); + + return NULL; + } +} +EXPORT_SYMBOL_GPL(async_syndrome_val); + +static int __init async_pq_init(void) +{ + scribble = alloc_page(GFP_KERNEL); + + if (scribble) + return 0; + + pr_err("%s: failed to allocate required spare page\n", __func__); + + return -ENOMEM; +} + +static void __exit async_pq_exit(void) +{ + put_page(scribble); +} + +module_init(async_pq_init); +module_exit(async_pq_exit); + +MODULE_DESCRIPTION("asynchronous raid6 syndrome generation/validation"); +MODULE_LICENSE("GPL"); diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c index 78fb7780272a..56b5f98da463 100644 --- a/crypto/async_tx/async_xor.c +++ b/crypto/async_tx/async_xor.c @@ -62,7 +62,7 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, while (src_cnt) { submit->flags = flags_orig; dma_flags = 0; - xor_src_cnt = min(src_cnt, dma->max_xor); + xor_src_cnt = min(src_cnt, (int)dma->max_xor); /* if we are submitting additional xors, leave the chain open, * clear the callback parameters, and leave the destination * buffer mapped diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index e002e0e0d055..cd5673d3043b 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -646,6 +646,10 @@ int dma_async_device_register(struct dma_device *device) !device->device_prep_dma_xor); BUG_ON(dma_has_cap(DMA_XOR_VAL, device->cap_mask) && !device->device_prep_dma_xor_val); + BUG_ON(dma_has_cap(DMA_PQ, device->cap_mask) && + !device->device_prep_dma_pq); + BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) && + !device->device_prep_dma_pq_val); BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) && !device->device_prep_dma_memset); BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) && diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index 6ff79a672699..4496bc606662 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c @@ -1257,7 +1257,7 @@ static int __devinit iop_adma_probe(struct platform_device *pdev) dev_printk(KERN_INFO, &pdev->dev, "Intel(R) IOP: " "( %s%s%s%s%s%s%s%s%s%s)\n", - dma_has_cap(DMA_PQ_XOR, dma_dev->cap_mask) ? "pq_xor " : "", + dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "pq " : "", dma_has_cap(DMA_PQ_UPDATE, dma_dev->cap_mask) ? "pq_update " : "", dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask) ? "pq_val " : "", dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h index 12a2efcbd565..e6ce5f004f98 100644 --- a/include/linux/async_tx.h +++ b/include/linux/async_tx.h @@ -185,5 +185,14 @@ async_memset(struct page *dest, int val, unsigned int offset, struct dma_async_tx_descriptor *async_trigger_callback(struct async_submit_ctl *submit); +struct dma_async_tx_descriptor * +async_gen_syndrome(struct page **blocks, unsigned int offset, int src_cnt, + size_t len, struct async_submit_ctl *submit); + +struct dma_async_tx_descriptor * +async_syndrome_val(struct page **blocks, unsigned int offset, int src_cnt, + size_t len, enum sum_check_flags *pqres, struct page *spare, + struct async_submit_ctl *submit); + void async_tx_quiesce(struct dma_async_tx_descriptor **tx); #endif /* _ASYNC_TX_H_ */ diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 02447afcebad..ce010cd991d2 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -52,7 +52,7 @@ enum dma_status { enum dma_transaction_type { DMA_MEMCPY, DMA_XOR, - DMA_PQ_XOR, + DMA_PQ, DMA_DUAL_XOR, DMA_PQ_UPDATE, DMA_XOR_VAL, @@ -70,20 +70,28 @@ enum dma_transaction_type { /** * enum dma_ctrl_flags - DMA flags to augment operation preparation, - * control completion, and communicate status. + * control completion, and communicate status. * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of - * this transaction + * this transaction * @DMA_CTRL_ACK - the descriptor cannot be reused until the client - * acknowledges receipt, i.e. has has a chance to establish any - * dependency chains + * acknowledges receipt, i.e. has has a chance to establish any dependency + * chains * @DMA_COMPL_SKIP_SRC_UNMAP - set to disable dma-unmapping the source buffer(s) * @DMA_COMPL_SKIP_DEST_UNMAP - set to disable dma-unmapping the destination(s) + * @DMA_PREP_PQ_DISABLE_P - prevent generation of P while generating Q + * @DMA_PREP_PQ_DISABLE_Q - prevent generation of Q while generating P + * @DMA_PREP_CONTINUE - indicate to a driver that it is reusing buffers as + * sources that were the result of a previous operation, in the case of a PQ + * operation it continues the calculation with new sources */ enum dma_ctrl_flags { DMA_PREP_INTERRUPT = (1 << 0), DMA_CTRL_ACK = (1 << 1), DMA_COMPL_SKIP_SRC_UNMAP = (1 << 2), DMA_COMPL_SKIP_DEST_UNMAP = (1 << 3), + DMA_PREP_PQ_DISABLE_P = (1 << 4), + DMA_PREP_PQ_DISABLE_Q = (1 << 5), + DMA_PREP_CONTINUE = (1 << 6), }; /** @@ -226,6 +234,7 @@ struct dma_async_tx_descriptor { * @global_node: list_head for global dma_device_list * @cap_mask: one or more dma_capability flags * @max_xor: maximum number of xor sources, 0 if no capability + * @max_pq: maximum number of PQ sources and PQ-continue capability * @dev_id: unique device ID * @dev: struct device reference for dma mapping api * @device_alloc_chan_resources: allocate resources and return the @@ -234,6 +243,8 @@ struct dma_async_tx_descriptor { * @device_prep_dma_memcpy: prepares a memcpy operation * @device_prep_dma_xor: prepares a xor operation * @device_prep_dma_xor_val: prepares a xor validation operation + * @device_prep_dma_pq: prepares a pq operation + * @device_prep_dma_pq_val: prepares a pqzero_sum operation * @device_prep_dma_memset: prepares a memset operation * @device_prep_dma_interrupt: prepares an end of chain interrupt operation * @device_prep_slave_sg: prepares a slave dma operation @@ -248,7 +259,9 @@ struct dma_device { struct list_head channels; struct list_head global_node; dma_cap_mask_t cap_mask; - int max_xor; + unsigned short max_xor; + unsigned short max_pq; + #define DMA_HAS_PQ_CONTINUE (1 << 15) int dev_id; struct device *dev; @@ -265,6 +278,14 @@ struct dma_device { struct dma_async_tx_descriptor *(*device_prep_dma_xor_val)( struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt, size_t len, enum sum_check_flags *result, unsigned long flags); + struct dma_async_tx_descriptor *(*device_prep_dma_pq)( + struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, + unsigned int src_cnt, const unsigned char *scf, + size_t len, unsigned long flags); + struct dma_async_tx_descriptor *(*device_prep_dma_pq_val)( + struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, + unsigned int src_cnt, const unsigned char *scf, size_t len, + enum sum_check_flags *pqres, unsigned long flags); struct dma_async_tx_descriptor *(*device_prep_dma_memset)( struct dma_chan *chan, dma_addr_t dest, int value, size_t len, unsigned long flags); @@ -283,6 +304,60 @@ struct dma_device { void (*device_issue_pending)(struct dma_chan *chan); }; +static inline void +dma_set_maxpq(struct dma_device *dma, int maxpq, int has_pq_continue) +{ + dma->max_pq = maxpq; + if (has_pq_continue) + dma->max_pq |= DMA_HAS_PQ_CONTINUE; +} + +static inline bool dmaf_continue(enum dma_ctrl_flags flags) +{ + return (flags & DMA_PREP_CONTINUE) == DMA_PREP_CONTINUE; +} + +static inline bool dmaf_p_disabled_continue(enum dma_ctrl_flags flags) +{ + enum dma_ctrl_flags mask = DMA_PREP_CONTINUE | DMA_PREP_PQ_DISABLE_P; + + return (flags & mask) == mask; +} + +static inline bool dma_dev_has_pq_continue(struct dma_device *dma) +{ + return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE; +} + +static unsigned short dma_dev_to_maxpq(struct dma_device *dma) +{ + return dma->max_pq & ~DMA_HAS_PQ_CONTINUE; +} + +/* dma_maxpq - reduce maxpq in the face of continued operations + * @dma - dma device with PQ capability + * @flags - to check if DMA_PREP_CONTINUE and DMA_PREP_PQ_DISABLE_P are set + * + * When an engine does not support native continuation we need 3 extra + * source slots to reuse P and Q with the following coefficients: + * 1/ {00} * P : remove P from Q', but use it as a source for P' + * 2/ {01} * Q : use Q to continue Q' calculation + * 3/ {00} * Q : subtract Q from P' to cancel (2) + * + * In the case where P is disabled we only need 1 extra source: + * 1/ {01} * Q : use Q to continue Q' calculation + */ +static inline int dma_maxpq(struct dma_device *dma, enum dma_ctrl_flags flags) +{ + if (dma_dev_has_pq_continue(dma) || !dmaf_continue(flags)) + return dma_dev_to_maxpq(dma); + else if (dmaf_p_disabled_continue(flags)) + return dma_dev_to_maxpq(dma) - 1; + else if (dmaf_continue(flags)) + return dma_dev_to_maxpq(dma) - 3; + BUG(); +} + /* --- public DMA engine API --- */ #ifdef CONFIG_DMA_ENGINE From 0a82a6239beecc95db6e05fe43ee62d16b381d38 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 14 Jul 2009 12:20:37 -0700 Subject: [PATCH 0040/3409] async_tx: add support for asynchronous RAID6 recovery operations async_raid6_2data_recov() recovers two data disk failures async_raid6_datap_recov() recovers a data disk and the P disk These routines are a port of the synchronous versions found in drivers/md/raid6recov.c. The primary difference is breaking out the xor operations into separate calls to async_xor. Two helper routines are introduced to perform scalar multiplication where needed. async_sum_product() multiplies two sources by scalar coefficients and then sums (xor) the result. async_mult() simply multiplies a single source by a scalar. This implemention also includes, in contrast to the original synchronous-only code, special case handling for the 4-disk and 5-disk array cases. In these situations the default N-disk algorithm will present 0-source or 1-source operations to dma devices. To cover for dma devices where the minimum source count is 2 we implement 4-disk and 5-disk handling in the recovery code. [ Impact: asynchronous raid6 recovery routines for 2data and datap cases ] Cc: Yuri Tikhonov Cc: Ilya Yanok Cc: H. Peter Anvin Cc: David Woodhouse Reviewed-by: Andre Noll Acked-by: Maciej Sosnowski Signed-off-by: Dan Williams --- Documentation/crypto/async-tx-api.txt | 4 + crypto/async_tx/Kconfig | 5 + crypto/async_tx/Makefile | 1 + crypto/async_tx/async_raid6_recov.c | 448 ++++++++++++++++++++++++++ include/linux/async_tx.h | 8 + 5 files changed, 466 insertions(+) create mode 100644 crypto/async_tx/async_raid6_recov.c diff --git a/Documentation/crypto/async-tx-api.txt b/Documentation/crypto/async-tx-api.txt index 0e48e054d69a..ba046b8fa92f 100644 --- a/Documentation/crypto/async-tx-api.txt +++ b/Documentation/crypto/async-tx-api.txt @@ -67,6 +67,10 @@ xor_val - xor a series of source buffers and set a flag if the pq - generate the p+q (raid6 syndrome) from a series of source buffers pq_val - validate that a p and or q buffer are in sync with a given series of sources +datap - (raid6_datap_recov) recover a raid6 data block and the p block + from the given sources +2data - (raid6_2data_recov) recover 2 raid6 data blocks from the given + sources 3.3 Descriptor management: The return value is non-NULL and points to a 'descriptor' when the operation diff --git a/crypto/async_tx/Kconfig b/crypto/async_tx/Kconfig index cb6d7314f198..e5aeb2b79e6f 100644 --- a/crypto/async_tx/Kconfig +++ b/crypto/async_tx/Kconfig @@ -18,3 +18,8 @@ config ASYNC_PQ tristate select ASYNC_CORE +config ASYNC_RAID6_RECOV + tristate + select ASYNC_CORE + select ASYNC_PQ + diff --git a/crypto/async_tx/Makefile b/crypto/async_tx/Makefile index 1b9926588259..9a1a76811b80 100644 --- a/crypto/async_tx/Makefile +++ b/crypto/async_tx/Makefile @@ -3,3 +3,4 @@ obj-$(CONFIG_ASYNC_MEMCPY) += async_memcpy.o obj-$(CONFIG_ASYNC_MEMSET) += async_memset.o obj-$(CONFIG_ASYNC_XOR) += async_xor.o obj-$(CONFIG_ASYNC_PQ) += async_pq.o +obj-$(CONFIG_ASYNC_RAID6_RECOV) += async_raid6_recov.o diff --git a/crypto/async_tx/async_raid6_recov.c b/crypto/async_tx/async_raid6_recov.c new file mode 100644 index 000000000000..0c14d48c9896 --- /dev/null +++ b/crypto/async_tx/async_raid6_recov.c @@ -0,0 +1,448 @@ +/* + * Asynchronous RAID-6 recovery calculations ASYNC_TX API. + * Copyright(c) 2009 Intel Corporation + * + * based on raid6recov.c: + * Copyright 2002 H. Peter Anvin + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 51 + * Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + */ +#include +#include +#include +#include +#include + +static struct dma_async_tx_descriptor * +async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef, + size_t len, struct async_submit_ctl *submit) +{ + struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ, + &dest, 1, srcs, 2, len); + struct dma_device *dma = chan ? chan->device : NULL; + const u8 *amul, *bmul; + u8 ax, bx; + u8 *a, *b, *c; + + if (dma) { + dma_addr_t dma_dest[2]; + dma_addr_t dma_src[2]; + struct device *dev = dma->dev; + struct dma_async_tx_descriptor *tx; + enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P; + + dma_dest[1] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL); + dma_src[0] = dma_map_page(dev, srcs[0], 0, len, DMA_TO_DEVICE); + dma_src[1] = dma_map_page(dev, srcs[1], 0, len, DMA_TO_DEVICE); + tx = dma->device_prep_dma_pq(chan, dma_dest, dma_src, 2, coef, + len, dma_flags); + if (tx) { + async_tx_submit(chan, tx, submit); + return tx; + } + } + + /* run the operation synchronously */ + async_tx_quiesce(&submit->depend_tx); + amul = raid6_gfmul[coef[0]]; + bmul = raid6_gfmul[coef[1]]; + a = page_address(srcs[0]); + b = page_address(srcs[1]); + c = page_address(dest); + + while (len--) { + ax = amul[*a++]; + bx = bmul[*b++]; + *c++ = ax ^ bx; + } + + return NULL; +} + +static struct dma_async_tx_descriptor * +async_mult(struct page *dest, struct page *src, u8 coef, size_t len, + struct async_submit_ctl *submit) +{ + struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ, + &dest, 1, &src, 1, len); + struct dma_device *dma = chan ? chan->device : NULL; + const u8 *qmul; /* Q multiplier table */ + u8 *d, *s; + + if (dma) { + dma_addr_t dma_dest[2]; + dma_addr_t dma_src[1]; + struct device *dev = dma->dev; + struct dma_async_tx_descriptor *tx; + enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P; + + dma_dest[1] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL); + dma_src[0] = dma_map_page(dev, src, 0, len, DMA_TO_DEVICE); + tx = dma->device_prep_dma_pq(chan, dma_dest, dma_src, 1, &coef, + len, dma_flags); + if (tx) { + async_tx_submit(chan, tx, submit); + return tx; + } + } + + /* no channel available, or failed to allocate a descriptor, so + * perform the operation synchronously + */ + async_tx_quiesce(&submit->depend_tx); + qmul = raid6_gfmul[coef]; + d = page_address(dest); + s = page_address(src); + + while (len--) + *d++ = qmul[*s++]; + + return NULL; +} + +static struct dma_async_tx_descriptor * +__2data_recov_4(size_t bytes, int faila, int failb, struct page **blocks, + struct async_submit_ctl *submit) +{ + struct dma_async_tx_descriptor *tx = NULL; + struct page *p, *q, *a, *b; + struct page *srcs[2]; + unsigned char coef[2]; + enum async_tx_flags flags = submit->flags; + dma_async_tx_callback cb_fn = submit->cb_fn; + void *cb_param = submit->cb_param; + void *scribble = submit->scribble; + + p = blocks[4-2]; + q = blocks[4-1]; + + a = blocks[faila]; + b = blocks[failb]; + + /* in the 4 disk case P + Pxy == P and Q + Qxy == Q */ + /* Dx = A*(P+Pxy) + B*(Q+Qxy) */ + srcs[0] = p; + srcs[1] = q; + coef[0] = raid6_gfexi[failb-faila]; + coef[1] = raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]]; + init_async_submit(submit, 0, tx, NULL, NULL, scribble); + tx = async_sum_product(b, srcs, coef, bytes, submit); + + /* Dy = P+Pxy+Dx */ + srcs[0] = p; + srcs[1] = b; + init_async_submit(submit, flags | ASYNC_TX_XOR_ZERO_DST, tx, cb_fn, + cb_param, scribble); + tx = async_xor(a, srcs, 0, 2, bytes, submit); + + return tx; + +} + +static struct dma_async_tx_descriptor * +__2data_recov_5(size_t bytes, int faila, int failb, struct page **blocks, + struct async_submit_ctl *submit) +{ + struct dma_async_tx_descriptor *tx = NULL; + struct page *p, *q, *g, *dp, *dq; + struct page *srcs[2]; + unsigned char coef[2]; + enum async_tx_flags flags = submit->flags; + dma_async_tx_callback cb_fn = submit->cb_fn; + void *cb_param = submit->cb_param; + void *scribble = submit->scribble; + int uninitialized_var(good); + int i; + + for (i = 0; i < 3; i++) { + if (i == faila || i == failb) + continue; + else { + good = i; + break; + } + } + BUG_ON(i >= 3); + + p = blocks[5-2]; + q = blocks[5-1]; + g = blocks[good]; + + /* Compute syndrome with zero for the missing data pages + * Use the dead data pages as temporary storage for delta p and + * delta q + */ + dp = blocks[faila]; + dq = blocks[failb]; + + init_async_submit(submit, 0, tx, NULL, NULL, scribble); + tx = async_memcpy(dp, g, 0, 0, bytes, submit); + init_async_submit(submit, 0, tx, NULL, NULL, scribble); + tx = async_mult(dq, g, raid6_gfexp[good], bytes, submit); + + /* compute P + Pxy */ + srcs[0] = dp; + srcs[1] = p; + init_async_submit(submit, ASYNC_TX_XOR_DROP_DST, tx, NULL, NULL, + scribble); + tx = async_xor(dp, srcs, 0, 2, bytes, submit); + + /* compute Q + Qxy */ + srcs[0] = dq; + srcs[1] = q; + init_async_submit(submit, ASYNC_TX_XOR_DROP_DST, tx, NULL, NULL, + scribble); + tx = async_xor(dq, srcs, 0, 2, bytes, submit); + + /* Dx = A*(P+Pxy) + B*(Q+Qxy) */ + srcs[0] = dp; + srcs[1] = dq; + coef[0] = raid6_gfexi[failb-faila]; + coef[1] = raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]]; + init_async_submit(submit, 0, tx, NULL, NULL, scribble); + tx = async_sum_product(dq, srcs, coef, bytes, submit); + + /* Dy = P+Pxy+Dx */ + srcs[0] = dp; + srcs[1] = dq; + init_async_submit(submit, flags | ASYNC_TX_XOR_DROP_DST, tx, cb_fn, + cb_param, scribble); + tx = async_xor(dp, srcs, 0, 2, bytes, submit); + + return tx; +} + +static struct dma_async_tx_descriptor * +__2data_recov_n(int disks, size_t bytes, int faila, int failb, + struct page **blocks, struct async_submit_ctl *submit) +{ + struct dma_async_tx_descriptor *tx = NULL; + struct page *p, *q, *dp, *dq; + struct page *srcs[2]; + unsigned char coef[2]; + enum async_tx_flags flags = submit->flags; + dma_async_tx_callback cb_fn = submit->cb_fn; + void *cb_param = submit->cb_param; + void *scribble = submit->scribble; + + p = blocks[disks-2]; + q = blocks[disks-1]; + + /* Compute syndrome with zero for the missing data pages + * Use the dead data pages as temporary storage for + * delta p and delta q + */ + dp = blocks[faila]; + blocks[faila] = (void *)raid6_empty_zero_page; + blocks[disks-2] = dp; + dq = blocks[failb]; + blocks[failb] = (void *)raid6_empty_zero_page; + blocks[disks-1] = dq; + + init_async_submit(submit, 0, tx, NULL, NULL, scribble); + tx = async_gen_syndrome(blocks, 0, disks, bytes, submit); + + /* Restore pointer table */ + blocks[faila] = dp; + blocks[failb] = dq; + blocks[disks-2] = p; + blocks[disks-1] = q; + + /* compute P + Pxy */ + srcs[0] = dp; + srcs[1] = p; + init_async_submit(submit, ASYNC_TX_XOR_DROP_DST, tx, NULL, NULL, + scribble); + tx = async_xor(dp, srcs, 0, 2, bytes, submit); + + /* compute Q + Qxy */ + srcs[0] = dq; + srcs[1] = q; + init_async_submit(submit, ASYNC_TX_XOR_DROP_DST, tx, NULL, NULL, + scribble); + tx = async_xor(dq, srcs, 0, 2, bytes, submit); + + /* Dx = A*(P+Pxy) + B*(Q+Qxy) */ + srcs[0] = dp; + srcs[1] = dq; + coef[0] = raid6_gfexi[failb-faila]; + coef[1] = raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]]; + init_async_submit(submit, 0, tx, NULL, NULL, scribble); + tx = async_sum_product(dq, srcs, coef, bytes, submit); + + /* Dy = P+Pxy+Dx */ + srcs[0] = dp; + srcs[1] = dq; + init_async_submit(submit, flags | ASYNC_TX_XOR_DROP_DST, tx, cb_fn, + cb_param, scribble); + tx = async_xor(dp, srcs, 0, 2, bytes, submit); + + return tx; +} + +/** + * async_raid6_2data_recov - asynchronously calculate two missing data blocks + * @disks: number of disks in the RAID-6 array + * @bytes: block size + * @faila: first failed drive index + * @failb: second failed drive index + * @blocks: array of source pointers where the last two entries are p and q + * @submit: submission/completion modifiers + */ +struct dma_async_tx_descriptor * +async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb, + struct page **blocks, struct async_submit_ctl *submit) +{ + BUG_ON(faila == failb); + if (failb < faila) + swap(faila, failb); + + pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes); + + /* we need to preserve the contents of 'blocks' for the async + * case, so punt to synchronous if a scribble buffer is not available + */ + if (!submit->scribble) { + void **ptrs = (void **) blocks; + int i; + + async_tx_quiesce(&submit->depend_tx); + for (i = 0; i < disks; i++) + ptrs[i] = page_address(blocks[i]); + + raid6_2data_recov(disks, bytes, faila, failb, ptrs); + + async_tx_sync_epilog(submit); + + return NULL; + } + + switch (disks) { + case 4: + /* dma devices do not uniformly understand a zero source pq + * operation (in contrast to the synchronous case), so + * explicitly handle the 4 disk special case + */ + return __2data_recov_4(bytes, faila, failb, blocks, submit); + case 5: + /* dma devices do not uniformly understand a single + * source pq operation (in contrast to the synchronous + * case), so explicitly handle the 5 disk special case + */ + return __2data_recov_5(bytes, faila, failb, blocks, submit); + default: + return __2data_recov_n(disks, bytes, faila, failb, blocks, submit); + } +} +EXPORT_SYMBOL_GPL(async_raid6_2data_recov); + +/** + * async_raid6_datap_recov - asynchronously calculate a data and the 'p' block + * @disks: number of disks in the RAID-6 array + * @bytes: block size + * @faila: failed drive index + * @blocks: array of source pointers where the last two entries are p and q + * @submit: submission/completion modifiers + */ +struct dma_async_tx_descriptor * +async_raid6_datap_recov(int disks, size_t bytes, int faila, + struct page **blocks, struct async_submit_ctl *submit) +{ + struct dma_async_tx_descriptor *tx = NULL; + struct page *p, *q, *dq; + u8 coef; + enum async_tx_flags flags = submit->flags; + dma_async_tx_callback cb_fn = submit->cb_fn; + void *cb_param = submit->cb_param; + void *scribble = submit->scribble; + struct page *srcs[2]; + + pr_debug("%s: disks: %d len: %zu\n", __func__, disks, bytes); + + /* we need to preserve the contents of 'blocks' for the async + * case, so punt to synchronous if a scribble buffer is not available + */ + if (!scribble) { + void **ptrs = (void **) blocks; + int i; + + async_tx_quiesce(&submit->depend_tx); + for (i = 0; i < disks; i++) + ptrs[i] = page_address(blocks[i]); + + raid6_datap_recov(disks, bytes, faila, ptrs); + + async_tx_sync_epilog(submit); + + return NULL; + } + + p = blocks[disks-2]; + q = blocks[disks-1]; + + /* Compute syndrome with zero for the missing data page + * Use the dead data page as temporary storage for delta q + */ + dq = blocks[faila]; + blocks[faila] = (void *)raid6_empty_zero_page; + blocks[disks-1] = dq; + + /* in the 4 disk case we only need to perform a single source + * multiplication + */ + if (disks == 4) { + int good = faila == 0 ? 1 : 0; + struct page *g = blocks[good]; + + init_async_submit(submit, 0, tx, NULL, NULL, scribble); + tx = async_memcpy(p, g, 0, 0, bytes, submit); + + init_async_submit(submit, 0, tx, NULL, NULL, scribble); + tx = async_mult(dq, g, raid6_gfexp[good], bytes, submit); + } else { + init_async_submit(submit, 0, tx, NULL, NULL, scribble); + tx = async_gen_syndrome(blocks, 0, disks, bytes, submit); + } + + /* Restore pointer table */ + blocks[faila] = dq; + blocks[disks-1] = q; + + /* calculate g^{-faila} */ + coef = raid6_gfinv[raid6_gfexp[faila]]; + + srcs[0] = dq; + srcs[1] = q; + init_async_submit(submit, ASYNC_TX_XOR_DROP_DST, tx, NULL, NULL, + scribble); + tx = async_xor(dq, srcs, 0, 2, bytes, submit); + + init_async_submit(submit, 0, tx, NULL, NULL, scribble); + tx = async_mult(dq, dq, coef, bytes, submit); + + srcs[0] = p; + srcs[1] = dq; + init_async_submit(submit, flags | ASYNC_TX_XOR_DROP_DST, tx, cb_fn, + cb_param, scribble); + tx = async_xor(p, srcs, 0, 2, bytes, submit); + + return tx; +} +EXPORT_SYMBOL_GPL(async_raid6_datap_recov); + +MODULE_AUTHOR("Dan Williams "); +MODULE_DESCRIPTION("asynchronous RAID-6 recovery api"); +MODULE_LICENSE("GPL"); diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h index e6ce5f004f98..866e61c4e2e0 100644 --- a/include/linux/async_tx.h +++ b/include/linux/async_tx.h @@ -194,5 +194,13 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int src_cnt, size_t len, enum sum_check_flags *pqres, struct page *spare, struct async_submit_ctl *submit); +struct dma_async_tx_descriptor * +async_raid6_2data_recov(int src_num, size_t bytes, int faila, int failb, + struct page **ptrs, struct async_submit_ctl *submit); + +struct dma_async_tx_descriptor * +async_raid6_datap_recov(int src_num, size_t bytes, int faila, + struct page **ptrs, struct async_submit_ctl *submit); + void async_tx_quiesce(struct dma_async_tx_descriptor **tx); #endif /* _ASYNC_TX_H_ */ From 58691d64c44ae41ddf098ecb31e9a994026e3cff Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Sat, 29 Aug 2009 19:09:27 -0700 Subject: [PATCH 0041/3409] dmatest: add pq support Test raid6 p+q operations with a simple "always multiply by 1" q calculation to fit into dmatest's current destination verification scheme. Reviewed-by: Andre Noll Acked-by: Maciej Sosnowski Signed-off-by: Dan Williams --- drivers/dma/dmatest.c | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index a27c0fb1bc11..a5ee54139050 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -43,6 +43,11 @@ module_param(xor_sources, uint, S_IRUGO); MODULE_PARM_DESC(xor_sources, "Number of xor source buffers (default: 3)"); +static unsigned int pq_sources = 3; +module_param(pq_sources, uint, S_IRUGO); +MODULE_PARM_DESC(pq_sources, + "Number of p+q source buffers (default: 3)"); + /* * Initialization patterns. All bytes in the source buffer has bit 7 * set, all bytes in the destination buffer has bit 7 cleared. @@ -227,6 +232,7 @@ static int dmatest_func(void *data) dma_cookie_t cookie; enum dma_status status; enum dma_ctrl_flags flags; + u8 pq_coefs[pq_sources]; int ret; int src_cnt; int dst_cnt; @@ -243,6 +249,11 @@ static int dmatest_func(void *data) else if (thread->type == DMA_XOR) { src_cnt = xor_sources | 1; /* force odd to ensure dst = src */ dst_cnt = 1; + } else if (thread->type == DMA_PQ) { + src_cnt = pq_sources | 1; /* force odd to ensure dst = src */ + dst_cnt = 2; + for (i = 0; i < pq_sources; i++) + pq_coefs[i] = 1; } else goto err_srcs; @@ -310,6 +321,15 @@ static int dmatest_func(void *data) dma_dsts[0] + dst_off, dma_srcs, xor_sources, len, flags); + else if (thread->type == DMA_PQ) { + dma_addr_t dma_pq[dst_cnt]; + + for (i = 0; i < dst_cnt; i++) + dma_pq[i] = dma_dsts[i] + dst_off; + tx = dev->device_prep_dma_pq(chan, dma_pq, dma_srcs, + pq_sources, pq_coefs, + len, flags); + } if (!tx) { for (i = 0; i < src_cnt; i++) @@ -446,6 +466,8 @@ static int dmatest_add_threads(struct dmatest_chan *dtc, enum dma_transaction_ty op = "copy"; else if (type == DMA_XOR) op = "xor"; + else if (type == DMA_PQ) + op = "pq"; else return -EINVAL; @@ -501,6 +523,10 @@ static int dmatest_add_channel(struct dma_chan *chan) cnt = dmatest_add_threads(dtc, DMA_XOR); thread_count += cnt > 0 ?: 0; } + if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) { + cnt = dmatest_add_threads(dtc, DMA_PQ); + thread_count += cnt > 0 ?: 0; + } pr_info("dmatest: Started %u threads using %s\n", thread_count, dma_chan_name(chan)); From cb3c82992f62f838e6476a0bff12909158007fc6 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 14 Jul 2009 12:20:37 -0700 Subject: [PATCH 0042/3409] async_tx: raid6 recovery self test Port drivers/md/raid6test/test.c to use the async raid6 recovery routines. This is meant as a unit test for raid6 acceleration drivers. In addition to the 16-drive test case this implements tests for the 4-disk and 5-disk special cases (dma devices can not generically handle less than 2 sources), and adds a test for the D+Q case. Reviewed-by: Andre Noll Acked-by: Maciej Sosnowski Signed-off-by: Dan Williams --- crypto/async_tx/Makefile | 1 + crypto/async_tx/raid6test.c | 241 ++++++++++++++++++++++++++++++++++++ drivers/md/Kconfig | 13 ++ 3 files changed, 255 insertions(+) create mode 100644 crypto/async_tx/raid6test.c diff --git a/crypto/async_tx/Makefile b/crypto/async_tx/Makefile index 9a1a76811b80..d1e0e6f72bc1 100644 --- a/crypto/async_tx/Makefile +++ b/crypto/async_tx/Makefile @@ -4,3 +4,4 @@ obj-$(CONFIG_ASYNC_MEMSET) += async_memset.o obj-$(CONFIG_ASYNC_XOR) += async_xor.o obj-$(CONFIG_ASYNC_PQ) += async_pq.o obj-$(CONFIG_ASYNC_RAID6_RECOV) += async_raid6_recov.o +obj-$(CONFIG_ASYNC_RAID6_TEST) += raid6test.o diff --git a/crypto/async_tx/raid6test.c b/crypto/async_tx/raid6test.c new file mode 100644 index 000000000000..98c83ca96c83 --- /dev/null +++ b/crypto/async_tx/raid6test.c @@ -0,0 +1,241 @@ +/* + * asynchronous raid6 recovery self test + * Copyright (c) 2009, Intel Corporation. + * + * based on drivers/md/raid6test/test.c: + * Copyright 2002-2007 H. Peter Anvin + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + */ +#include +#include + +#undef pr +#define pr(fmt, args...) pr_info("raid6test: " fmt, ##args) + +#define NDISKS 16 /* Including P and Q */ + +static struct page *dataptrs[NDISKS]; +static struct page *data[NDISKS+3]; +static struct page *spare; +static struct page *recovi; +static struct page *recovj; + +static void callback(void *param) +{ + struct completion *cmp = param; + + complete(cmp); +} + +static void makedata(int disks) +{ + int i, j; + + for (i = 0; i < disks; i++) { + for (j = 0; j < PAGE_SIZE/sizeof(u32); j += sizeof(u32)) { + u32 *p = page_address(data[i]) + j; + + *p = random32(); + } + + dataptrs[i] = data[i]; + } +} + +static char disk_type(int d, int disks) +{ + if (d == disks - 2) + return 'P'; + else if (d == disks - 1) + return 'Q'; + else + return 'D'; +} + +/* Recover two failed blocks. */ +static void raid6_dual_recov(int disks, size_t bytes, int faila, int failb, struct page **ptrs) +{ + struct async_submit_ctl submit; + addr_conv_t addr_conv[disks]; + struct completion cmp; + struct dma_async_tx_descriptor *tx = NULL; + enum sum_check_flags result = ~0; + + if (faila > failb) + swap(faila, failb); + + if (failb == disks-1) { + if (faila == disks-2) { + /* P+Q failure. Just rebuild the syndrome. */ + init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv); + tx = async_gen_syndrome(ptrs, 0, disks, bytes, &submit); + } else { + struct page *blocks[disks]; + struct page *dest; + int count = 0; + int i; + + /* data+Q failure. Reconstruct data from P, + * then rebuild syndrome + */ + for (i = disks; i-- ; ) { + if (i == faila || i == failb) + continue; + blocks[count++] = ptrs[i]; + } + dest = ptrs[faila]; + init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL, + NULL, NULL, addr_conv); + tx = async_xor(dest, blocks, 0, count, bytes, &submit); + + init_async_submit(&submit, 0, tx, NULL, NULL, addr_conv); + tx = async_gen_syndrome(ptrs, 0, disks, bytes, &submit); + } + } else { + if (failb == disks-2) { + /* data+P failure. */ + init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv); + tx = async_raid6_datap_recov(disks, bytes, faila, ptrs, &submit); + } else { + /* data+data failure. */ + init_async_submit(&submit, 0, NULL, NULL, NULL, addr_conv); + tx = async_raid6_2data_recov(disks, bytes, faila, failb, ptrs, &submit); + } + } + init_completion(&cmp); + init_async_submit(&submit, ASYNC_TX_ACK, tx, callback, &cmp, addr_conv); + tx = async_syndrome_val(ptrs, 0, disks, bytes, &result, spare, &submit); + async_tx_issue_pending(tx); + + if (wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)) == 0) + pr("%s: timeout! (faila: %d failb: %d disks: %d)\n", + __func__, faila, failb, disks); + + if (result != 0) + pr("%s: validation failure! faila: %d failb: %d sum_check_flags: %x\n", + __func__, faila, failb, result); +} + +static int test_disks(int i, int j, int disks) +{ + int erra, errb; + + memset(page_address(recovi), 0xf0, PAGE_SIZE); + memset(page_address(recovj), 0xba, PAGE_SIZE); + + dataptrs[i] = recovi; + dataptrs[j] = recovj; + + raid6_dual_recov(disks, PAGE_SIZE, i, j, dataptrs); + + erra = memcmp(page_address(data[i]), page_address(recovi), PAGE_SIZE); + errb = memcmp(page_address(data[j]), page_address(recovj), PAGE_SIZE); + + pr("%s(%d, %d): faila=%3d(%c) failb=%3d(%c) %s\n", + __func__, i, j, i, disk_type(i, disks), j, disk_type(j, disks), + (!erra && !errb) ? "OK" : !erra ? "ERRB" : !errb ? "ERRA" : "ERRAB"); + + dataptrs[i] = data[i]; + dataptrs[j] = data[j]; + + return erra || errb; +} + +static int test(int disks, int *tests) +{ + addr_conv_t addr_conv[disks]; + struct dma_async_tx_descriptor *tx; + struct async_submit_ctl submit; + struct completion cmp; + int err = 0; + int i, j; + + recovi = data[disks]; + recovj = data[disks+1]; + spare = data[disks+2]; + + makedata(disks); + + /* Nuke syndromes */ + memset(page_address(data[disks-2]), 0xee, PAGE_SIZE); + memset(page_address(data[disks-1]), 0xee, PAGE_SIZE); + + /* Generate assumed good syndrome */ + init_completion(&cmp); + init_async_submit(&submit, ASYNC_TX_ACK, NULL, callback, &cmp, addr_conv); + tx = async_gen_syndrome(dataptrs, 0, disks, PAGE_SIZE, &submit); + async_tx_issue_pending(tx); + + if (wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)) == 0) { + pr("error: initial gen_syndrome(%d) timed out\n", disks); + return 1; + } + + pr("testing the %d-disk case...\n", disks); + for (i = 0; i < disks-1; i++) + for (j = i+1; j < disks; j++) { + (*tests)++; + err += test_disks(i, j, disks); + } + + return err; +} + + +static int raid6_test(void) +{ + int err = 0; + int tests = 0; + int i; + + for (i = 0; i < NDISKS+3; i++) { + data[i] = alloc_page(GFP_KERNEL); + if (!data[i]) { + while (i--) + put_page(data[i]); + return -ENOMEM; + } + } + + /* the 4-disk and 5-disk cases are special for the recovery code */ + if (NDISKS > 4) + err += test(4, &tests); + if (NDISKS > 5) + err += test(5, &tests); + err += test(NDISKS, &tests); + + pr("\n"); + pr("complete (%d tests, %d failure%s)\n", + tests, err, err == 1 ? "" : "s"); + + for (i = 0; i < NDISKS+3; i++) + put_page(data[i]); + + return 0; +} + +static void raid6_test_exit(void) +{ +} + +/* when compiled-in wait for drivers to load first (assumes dma drivers + * are also compliled-in) + */ +late_initcall(raid6_test); +module_exit(raid6_test_exit); +MODULE_AUTHOR("Dan Williams "); +MODULE_DESCRIPTION("asynchronous RAID-6 recovery self tests"); +MODULE_LICENSE("GPL"); diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig index 36e0675be9f7..41b3ae25b813 100644 --- a/drivers/md/Kconfig +++ b/drivers/md/Kconfig @@ -155,6 +155,19 @@ config MD_RAID456 config MD_RAID6_PQ tristate +config ASYNC_RAID6_TEST + tristate "Self test for hardware accelerated raid6 recovery" + depends on MD_RAID6_PQ + select ASYNC_RAID6_RECOV + ---help--- + This is a one-shot self test that permutes through the + recovery of all the possible two disk failure scenarios for a + N-disk array. Recovery is performed with the asynchronous + raid6 recovery routines, and will optionally use an offload + engine if one is available. + + If unsure, say N. + config MD_MULTIPATH tristate "Multipath I/O support" depends on BLK_DEV_MD From 507fbec4cff442ebce6706db34603bfb9cc3b5a9 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Sat, 29 Aug 2009 19:12:39 -0700 Subject: [PATCH 0043/3409] iop-adma: cleanup iop_adma_run_tx_complete_actions Replace 'desc->async_tx.' with 'tx->' [ Impact: pure cleanup ] Signed-off-by: Dan Williams --- drivers/dma/iop-adma.c | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index 4496bc606662..ce45f3fb0343 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c @@ -61,17 +61,18 @@ static dma_cookie_t iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc, struct iop_adma_chan *iop_chan, dma_cookie_t cookie) { - BUG_ON(desc->async_tx.cookie < 0); - if (desc->async_tx.cookie > 0) { - cookie = desc->async_tx.cookie; - desc->async_tx.cookie = 0; + struct dma_async_tx_descriptor *tx = &desc->async_tx; + + BUG_ON(tx->cookie < 0); + if (tx->cookie > 0) { + cookie = tx->cookie; + tx->cookie = 0; /* call the callback (must not sleep or submit new * operations to this channel) */ - if (desc->async_tx.callback) - desc->async_tx.callback( - desc->async_tx.callback_param); + if (tx->callback) + tx->callback(tx->callback_param); /* unmap dma addresses * (unmap_single vs unmap_page?) @@ -81,7 +82,7 @@ iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc, struct device *dev = &iop_chan->device->pdev->dev; u32 len = unmap->unmap_len; - enum dma_ctrl_flags flags = desc->async_tx.flags; + enum dma_ctrl_flags flags = tx->flags; u32 src_cnt; dma_addr_t addr; dma_addr_t dest; @@ -115,7 +116,7 @@ iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc, } /* run dependent operations */ - dma_run_dependencies(&desc->async_tx); + dma_run_dependencies(tx); return cookie; } From 72be12f0c39df46832403cbfd82e132a883f5ddc Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 14 Jul 2009 13:38:29 -0700 Subject: [PATCH 0044/3409] iop-adma: fix lockdep false positive lockdep correctly identifies a potential recursive locking case for iop_chan->lock, but in the dependency submission case we expect that the same class will be acquired for both the parent dependency and the child channel. Signed-off-by: Dan Williams --- drivers/dma/iop-adma.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index ce45f3fb0343..9c752bd295e1 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c @@ -288,7 +288,12 @@ static void iop_adma_tasklet(unsigned long data) { struct iop_adma_chan *iop_chan = (struct iop_adma_chan *) data; - spin_lock(&iop_chan->lock); + /* lockdep will flag depedency submissions as potentially + * recursive locking, this is not the case as a dependency + * submission will never recurse a channels submit routine. + * There are checks in async_tx.c to prevent this. + */ + spin_lock_nested(&iop_chan->lock, SINGLE_DEPTH_NESTING); __iop_adma_slot_cleanup(iop_chan); spin_unlock(&iop_chan->lock); } From 7bf649aee8ac93ecc280f8745dcf8ec19d7b9fb1 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Fri, 28 Aug 2009 14:32:04 -0700 Subject: [PATCH 0045/3409] iop-adma: P+Q support for iop13xx adma engines iop33x support is not included because that engine is a bit more awkward to handle in that it can either be in xor mode or pq mode. The dmaengine/async_tx layers currently only comprehend static capabilities. Note iop13xx does not support hardware PQ continuation so the driver must handle the DMA_PREP_CONTINUE flag for operations across > 16 sources. From the comment for dma_maxpq: /* When an engine does not support native continuation we need 3 extra * source slots to reuse P and Q with the following coefficients: * 1/ {00} * P : remove P from Q', but use it as a source for P' * 2/ {01} * Q : use Q to continue Q' calculation * 3/ {00} * Q : subtract Q from P' to cancel (2) */ Signed-off-by: Dan Williams --- arch/arm/include/asm/hardware/iop3xx-adma.h | 76 +++++++ arch/arm/include/asm/hardware/iop_adma.h | 1 + arch/arm/mach-iop13xx/include/mach/adma.h | 107 +++++++++ drivers/dma/iop-adma.c | 231 +++++++++++++++++--- 4 files changed, 381 insertions(+), 34 deletions(-) diff --git a/arch/arm/include/asm/hardware/iop3xx-adma.h b/arch/arm/include/asm/hardware/iop3xx-adma.h index 26eefea02314..1a8c7279a28b 100644 --- a/arch/arm/include/asm/hardware/iop3xx-adma.h +++ b/arch/arm/include/asm/hardware/iop3xx-adma.h @@ -187,11 +187,74 @@ union iop3xx_desc { void *ptr; }; +/* No support for p+q operations */ +static inline int +iop_chan_pq_slot_count(size_t len, int src_cnt, int *slots_per_op) +{ + BUG(); + return 0; +} + +static inline void +iop_desc_init_pq(struct iop_adma_desc_slot *desc, int src_cnt, + unsigned long flags) +{ + BUG(); +} + +static inline void +iop_desc_set_pq_addr(struct iop_adma_desc_slot *desc, dma_addr_t *addr) +{ + BUG(); +} + +static inline void +iop_desc_set_pq_src_addr(struct iop_adma_desc_slot *desc, int src_idx, + dma_addr_t addr, unsigned char coef) +{ + BUG(); +} + +static inline int +iop_chan_pq_zero_sum_slot_count(size_t len, int src_cnt, int *slots_per_op) +{ + BUG(); + return 0; +} + +static inline void +iop_desc_init_pq_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt, + unsigned long flags) +{ + BUG(); +} + +static inline void +iop_desc_set_pq_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len) +{ + BUG(); +} + +#define iop_desc_set_pq_zero_sum_src_addr iop_desc_set_pq_src_addr + +static inline void +iop_desc_set_pq_zero_sum_addr(struct iop_adma_desc_slot *desc, int pq_idx, + dma_addr_t *src) +{ + BUG(); +} + static inline int iop_adma_get_max_xor(void) { return 32; } +static inline int iop_adma_get_max_pq(void) +{ + BUG(); + return 0; +} + static inline u32 iop_chan_get_current_descriptor(struct iop_adma_chan *chan) { int id = chan->device->id; @@ -332,6 +395,11 @@ static inline int iop_chan_zero_sum_slot_count(size_t len, int src_cnt, return slot_cnt; } +static inline int iop_desc_is_pq(struct iop_adma_desc_slot *desc) +{ + return 0; +} + static inline u32 iop_desc_get_dest_addr(struct iop_adma_desc_slot *desc, struct iop_adma_chan *chan) { @@ -349,6 +417,14 @@ static inline u32 iop_desc_get_dest_addr(struct iop_adma_desc_slot *desc, return 0; } + +static inline u32 iop_desc_get_qdest_addr(struct iop_adma_desc_slot *desc, + struct iop_adma_chan *chan) +{ + BUG(); + return 0; +} + static inline u32 iop_desc_get_byte_count(struct iop_adma_desc_slot *desc, struct iop_adma_chan *chan) { diff --git a/arch/arm/include/asm/hardware/iop_adma.h b/arch/arm/include/asm/hardware/iop_adma.h index 385c6e8cbbd2..bbe8a0475cad 100644 --- a/arch/arm/include/asm/hardware/iop_adma.h +++ b/arch/arm/include/asm/hardware/iop_adma.h @@ -106,6 +106,7 @@ struct iop_adma_desc_slot { union { u32 *xor_check_result; u32 *crc32_result; + u32 *pq_check_result; }; }; diff --git a/arch/arm/mach-iop13xx/include/mach/adma.h b/arch/arm/mach-iop13xx/include/mach/adma.h index 1cd31df8924d..6d3782d85a9f 100644 --- a/arch/arm/mach-iop13xx/include/mach/adma.h +++ b/arch/arm/mach-iop13xx/include/mach/adma.h @@ -150,6 +150,8 @@ static inline int iop_adma_get_max_xor(void) return 16; } +#define iop_adma_get_max_pq iop_adma_get_max_xor + static inline u32 iop_chan_get_current_descriptor(struct iop_adma_chan *chan) { return __raw_readl(ADMA_ADAR(chan)); @@ -211,7 +213,10 @@ iop_chan_xor_slot_count(size_t len, int src_cnt, int *slots_per_op) #define IOP_ADMA_MAX_BYTE_COUNT ADMA_MAX_BYTE_COUNT #define IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT ADMA_MAX_BYTE_COUNT #define IOP_ADMA_XOR_MAX_BYTE_COUNT ADMA_MAX_BYTE_COUNT +#define IOP_ADMA_PQ_MAX_BYTE_COUNT ADMA_MAX_BYTE_COUNT #define iop_chan_zero_sum_slot_count(l, s, o) iop_chan_xor_slot_count(l, s, o) +#define iop_chan_pq_slot_count iop_chan_xor_slot_count +#define iop_chan_pq_zero_sum_slot_count iop_chan_xor_slot_count static inline u32 iop_desc_get_dest_addr(struct iop_adma_desc_slot *desc, struct iop_adma_chan *chan) @@ -220,6 +225,13 @@ static inline u32 iop_desc_get_dest_addr(struct iop_adma_desc_slot *desc, return hw_desc->dest_addr; } +static inline u32 iop_desc_get_qdest_addr(struct iop_adma_desc_slot *desc, + struct iop_adma_chan *chan) +{ + struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; + return hw_desc->q_dest_addr; +} + static inline u32 iop_desc_get_byte_count(struct iop_adma_desc_slot *desc, struct iop_adma_chan *chan) { @@ -319,6 +331,58 @@ iop_desc_init_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt, return 1; } +static inline void +iop_desc_init_pq(struct iop_adma_desc_slot *desc, int src_cnt, + unsigned long flags) +{ + struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; + union { + u32 value; + struct iop13xx_adma_desc_ctrl field; + } u_desc_ctrl; + + u_desc_ctrl.value = 0; + u_desc_ctrl.field.src_select = src_cnt - 1; + u_desc_ctrl.field.xfer_dir = 3; /* local to internal bus */ + u_desc_ctrl.field.pq_xfer_en = 1; + u_desc_ctrl.field.p_xfer_dis = !!(flags & DMA_PREP_PQ_DISABLE_P); + u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT; + hw_desc->desc_ctrl = u_desc_ctrl.value; +} + +static inline int iop_desc_is_pq(struct iop_adma_desc_slot *desc) +{ + struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; + union { + u32 value; + struct iop13xx_adma_desc_ctrl field; + } u_desc_ctrl; + + u_desc_ctrl.value = hw_desc->desc_ctrl; + return u_desc_ctrl.field.pq_xfer_en; +} + +static inline void +iop_desc_init_pq_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt, + unsigned long flags) +{ + struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; + union { + u32 value; + struct iop13xx_adma_desc_ctrl field; + } u_desc_ctrl; + + u_desc_ctrl.value = 0; + u_desc_ctrl.field.src_select = src_cnt - 1; + u_desc_ctrl.field.xfer_dir = 3; /* local to internal bus */ + u_desc_ctrl.field.zero_result = 1; + u_desc_ctrl.field.status_write_back_en = 1; + u_desc_ctrl.field.pq_xfer_en = 1; + u_desc_ctrl.field.p_xfer_dis = !!(flags & DMA_PREP_PQ_DISABLE_P); + u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT; + hw_desc->desc_ctrl = u_desc_ctrl.value; +} + static inline void iop_desc_set_byte_count(struct iop_adma_desc_slot *desc, struct iop_adma_chan *chan, u32 byte_count) @@ -351,6 +415,7 @@ iop_desc_set_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len) } } +#define iop_desc_set_pq_zero_sum_byte_count iop_desc_set_zero_sum_byte_count static inline void iop_desc_set_dest_addr(struct iop_adma_desc_slot *desc, struct iop_adma_chan *chan, @@ -361,6 +426,16 @@ static inline void iop_desc_set_dest_addr(struct iop_adma_desc_slot *desc, hw_desc->upper_dest_addr = 0; } +static inline void +iop_desc_set_pq_addr(struct iop_adma_desc_slot *desc, dma_addr_t *addr) +{ + struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; + + hw_desc->dest_addr = addr[0]; + hw_desc->q_dest_addr = addr[1]; + hw_desc->upper_dest_addr = 0; +} + static inline void iop_desc_set_memcpy_src_addr(struct iop_adma_desc_slot *desc, dma_addr_t addr) { @@ -388,6 +463,29 @@ static inline void iop_desc_set_xor_src_addr(struct iop_adma_desc_slot *desc, } while (slot_cnt); } +static inline void +iop_desc_set_pq_src_addr(struct iop_adma_desc_slot *desc, int src_idx, + dma_addr_t addr, unsigned char coef) +{ + int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op; + struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc, *iter; + struct iop13xx_adma_src *src; + int i = 0; + + do { + iter = iop_hw_desc_slot_idx(hw_desc, i); + src = &iter->src[src_idx]; + src->src_addr = addr; + src->pq_upper_src_addr = 0; + src->pq_dmlt = coef; + slot_cnt -= slots_per_op; + if (slot_cnt) { + i += slots_per_op; + addr += IOP_ADMA_PQ_MAX_BYTE_COUNT; + } + } while (slot_cnt); +} + static inline void iop_desc_init_interrupt(struct iop_adma_desc_slot *desc, struct iop_adma_chan *chan) @@ -399,6 +497,15 @@ iop_desc_init_interrupt(struct iop_adma_desc_slot *desc, } #define iop_desc_set_zero_sum_src_addr iop_desc_set_xor_src_addr +#define iop_desc_set_pq_zero_sum_src_addr iop_desc_set_pq_src_addr + +static inline void +iop_desc_set_pq_zero_sum_addr(struct iop_adma_desc_slot *desc, int pq_idx, + dma_addr_t *src) +{ + iop_desc_set_xor_src_addr(desc, pq_idx, src[pq_idx]); + iop_desc_set_xor_src_addr(desc, pq_idx+1, src[pq_idx+1]); +} static inline void iop_desc_set_next_desc(struct iop_adma_desc_slot *desc, u32 next_desc_addr) diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index 9c752bd295e1..5a0f4fe2ee6e 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c @@ -57,6 +57,80 @@ static void iop_adma_free_slots(struct iop_adma_desc_slot *slot) } } +static void +iop_desc_unmap(struct iop_adma_chan *iop_chan, struct iop_adma_desc_slot *desc) +{ + struct dma_async_tx_descriptor *tx = &desc->async_tx; + struct iop_adma_desc_slot *unmap = desc->group_head; + struct device *dev = &iop_chan->device->pdev->dev; + u32 len = unmap->unmap_len; + enum dma_ctrl_flags flags = tx->flags; + u32 src_cnt; + dma_addr_t addr; + dma_addr_t dest; + + src_cnt = unmap->unmap_src_cnt; + dest = iop_desc_get_dest_addr(unmap, iop_chan); + if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { + enum dma_data_direction dir; + + if (src_cnt > 1) /* is xor? */ + dir = DMA_BIDIRECTIONAL; + else + dir = DMA_FROM_DEVICE; + + dma_unmap_page(dev, dest, len, dir); + } + + if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { + while (src_cnt--) { + addr = iop_desc_get_src_addr(unmap, iop_chan, src_cnt); + if (addr == dest) + continue; + dma_unmap_page(dev, addr, len, DMA_TO_DEVICE); + } + } + desc->group_head = NULL; +} + +static void +iop_desc_unmap_pq(struct iop_adma_chan *iop_chan, struct iop_adma_desc_slot *desc) +{ + struct dma_async_tx_descriptor *tx = &desc->async_tx; + struct iop_adma_desc_slot *unmap = desc->group_head; + struct device *dev = &iop_chan->device->pdev->dev; + u32 len = unmap->unmap_len; + enum dma_ctrl_flags flags = tx->flags; + u32 src_cnt = unmap->unmap_src_cnt; + dma_addr_t pdest = iop_desc_get_dest_addr(unmap, iop_chan); + dma_addr_t qdest = iop_desc_get_qdest_addr(unmap, iop_chan); + int i; + + if (tx->flags & DMA_PREP_CONTINUE) + src_cnt -= 3; + + if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP) && !desc->pq_check_result) { + dma_unmap_page(dev, pdest, len, DMA_BIDIRECTIONAL); + dma_unmap_page(dev, qdest, len, DMA_BIDIRECTIONAL); + } + + if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { + dma_addr_t addr; + + for (i = 0; i < src_cnt; i++) { + addr = iop_desc_get_src_addr(unmap, iop_chan, i); + dma_unmap_page(dev, addr, len, DMA_TO_DEVICE); + } + if (desc->pq_check_result) { + dma_unmap_page(dev, pdest, len, DMA_TO_DEVICE); + dma_unmap_page(dev, qdest, len, DMA_TO_DEVICE); + } + } + + desc->group_head = NULL; +} + + static dma_cookie_t iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc, struct iop_adma_chan *iop_chan, dma_cookie_t cookie) @@ -78,40 +152,10 @@ iop_adma_run_tx_complete_actions(struct iop_adma_desc_slot *desc, * (unmap_single vs unmap_page?) */ if (desc->group_head && desc->unmap_len) { - struct iop_adma_desc_slot *unmap = desc->group_head; - struct device *dev = - &iop_chan->device->pdev->dev; - u32 len = unmap->unmap_len; - enum dma_ctrl_flags flags = tx->flags; - u32 src_cnt; - dma_addr_t addr; - dma_addr_t dest; - - src_cnt = unmap->unmap_src_cnt; - dest = iop_desc_get_dest_addr(unmap, iop_chan); - if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { - enum dma_data_direction dir; - - if (src_cnt > 1) /* is xor? */ - dir = DMA_BIDIRECTIONAL; - else - dir = DMA_FROM_DEVICE; - - dma_unmap_page(dev, dest, len, dir); - } - - if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { - while (src_cnt--) { - addr = iop_desc_get_src_addr(unmap, - iop_chan, - src_cnt); - if (addr == dest) - continue; - dma_unmap_page(dev, addr, len, - DMA_TO_DEVICE); - } - } - desc->group_head = NULL; + if (iop_desc_is_pq(desc)) + iop_desc_unmap_pq(iop_chan, desc); + else + iop_desc_unmap(iop_chan, desc); } } @@ -702,6 +746,118 @@ iop_adma_prep_dma_xor_val(struct dma_chan *chan, dma_addr_t *dma_src, return sw_desc ? &sw_desc->async_tx : NULL; } +static struct dma_async_tx_descriptor * +iop_adma_prep_dma_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, + unsigned int src_cnt, const unsigned char *scf, size_t len, + unsigned long flags) +{ + struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); + struct iop_adma_desc_slot *sw_desc, *g; + int slot_cnt, slots_per_op; + int continue_srcs; + + if (unlikely(!len)) + return NULL; + BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT); + + dev_dbg(iop_chan->device->common.dev, + "%s src_cnt: %d len: %u flags: %lx\n", + __func__, src_cnt, len, flags); + + if (dmaf_p_disabled_continue(flags)) + continue_srcs = 1+src_cnt; + else if (dmaf_continue(flags)) + continue_srcs = 3+src_cnt; + else + continue_srcs = 0+src_cnt; + + spin_lock_bh(&iop_chan->lock); + slot_cnt = iop_chan_pq_slot_count(len, continue_srcs, &slots_per_op); + sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); + if (sw_desc) { + int i; + + g = sw_desc->group_head; + iop_desc_set_byte_count(g, iop_chan, len); + + /* even if P is disabled its destination address (bits + * [3:0]) must match Q. It is ok if P points to an + * invalid address, it won't be written. + */ + if (flags & DMA_PREP_PQ_DISABLE_P) + dst[0] = dst[1] & 0x7; + + iop_desc_set_pq_addr(g, dst); + sw_desc->unmap_src_cnt = src_cnt; + sw_desc->unmap_len = len; + sw_desc->async_tx.flags = flags; + for (i = 0; i < src_cnt; i++) + iop_desc_set_pq_src_addr(g, i, src[i], scf[i]); + + /* if we are continuing a previous operation factor in + * the old p and q values, see the comment for dma_maxpq + * in include/linux/dmaengine.h + */ + if (dmaf_p_disabled_continue(flags)) + iop_desc_set_pq_src_addr(g, i++, dst[1], 1); + else if (dmaf_continue(flags)) { + iop_desc_set_pq_src_addr(g, i++, dst[0], 0); + iop_desc_set_pq_src_addr(g, i++, dst[1], 1); + iop_desc_set_pq_src_addr(g, i++, dst[1], 0); + } + iop_desc_init_pq(g, i, flags); + } + spin_unlock_bh(&iop_chan->lock); + + return sw_desc ? &sw_desc->async_tx : NULL; +} + +static struct dma_async_tx_descriptor * +iop_adma_prep_dma_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, + unsigned int src_cnt, const unsigned char *scf, + size_t len, enum sum_check_flags *pqres, + unsigned long flags) +{ + struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); + struct iop_adma_desc_slot *sw_desc, *g; + int slot_cnt, slots_per_op; + + if (unlikely(!len)) + return NULL; + BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT); + + dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n", + __func__, src_cnt, len); + + spin_lock_bh(&iop_chan->lock); + slot_cnt = iop_chan_pq_zero_sum_slot_count(len, src_cnt + 2, &slots_per_op); + sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); + if (sw_desc) { + /* for validate operations p and q are tagged onto the + * end of the source list + */ + int pq_idx = src_cnt; + + g = sw_desc->group_head; + iop_desc_init_pq_zero_sum(g, src_cnt+2, flags); + iop_desc_set_pq_zero_sum_byte_count(g, len); + g->pq_check_result = pqres; + pr_debug("\t%s: g->pq_check_result: %p\n", + __func__, g->pq_check_result); + sw_desc->unmap_src_cnt = src_cnt+2; + sw_desc->unmap_len = len; + sw_desc->async_tx.flags = flags; + while (src_cnt--) + iop_desc_set_pq_zero_sum_src_addr(g, src_cnt, + src[src_cnt], + scf[src_cnt]); + iop_desc_set_pq_zero_sum_addr(g, pq_idx, src); + } + spin_unlock_bh(&iop_chan->lock); + + return sw_desc ? &sw_desc->async_tx : NULL; +} + static void iop_adma_free_chan_resources(struct dma_chan *chan) { struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); @@ -1201,6 +1357,13 @@ static int __devinit iop_adma_probe(struct platform_device *pdev) if (dma_has_cap(DMA_XOR_VAL, dma_dev->cap_mask)) dma_dev->device_prep_dma_xor_val = iop_adma_prep_dma_xor_val; + if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) { + dma_set_maxpq(dma_dev, iop_adma_get_max_pq(), 0); + dma_dev->device_prep_dma_pq = iop_adma_prep_dma_pq; + } + if (dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask)) + dma_dev->device_prep_dma_pq_val = + iop_adma_prep_dma_pq_val; if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask)) dma_dev->device_prep_dma_interrupt = iop_adma_prep_dma_interrupt; From f6dbf651615900646fe0ba1ef5ce1027e5b4748d Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Sat, 29 Aug 2009 19:12:40 -0700 Subject: [PATCH 0046/3409] iop-adma: P+Q self test Even though the intent is to extend dmatest with P+Q tests there is still value in having an always-on sanity check to prevent an unintentionally broken driver from registering. This depends on raid6_pq.ko for verification, the side effect being that PQ capable channels will fail to register when raid6 is disabled. Signed-off-by: Dan Williams --- drivers/dma/iop-adma.c | 182 ++++++++++++++++++++++++++++++++++++++++- 1 file changed, 181 insertions(+), 1 deletion(-) diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index 5a0f4fe2ee6e..f4c59e59f6cb 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c @@ -31,6 +31,7 @@ #include #include #include +#include #include @@ -1267,6 +1268,170 @@ out: return err; } +#ifdef CONFIG_MD_RAID6_PQ +static int __devinit +iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device) +{ + /* combined sources, software pq results, and extra hw pq results */ + struct page *pq[IOP_ADMA_NUM_SRC_TEST+2+2]; + /* ptr to the extra hw pq buffers defined above */ + struct page **pq_hw = &pq[IOP_ADMA_NUM_SRC_TEST+2]; + /* address conversion buffers (dma_map / page_address) */ + void *pq_sw[IOP_ADMA_NUM_SRC_TEST+2]; + dma_addr_t pq_src[IOP_ADMA_NUM_SRC_TEST]; + dma_addr_t pq_dest[2]; + + int i; + struct dma_async_tx_descriptor *tx; + struct dma_chan *dma_chan; + dma_cookie_t cookie; + u32 zero_sum_result; + int err = 0; + struct device *dev; + + dev_dbg(device->common.dev, "%s\n", __func__); + + for (i = 0; i < ARRAY_SIZE(pq); i++) { + pq[i] = alloc_page(GFP_KERNEL); + if (!pq[i]) { + while (i--) + __free_page(pq[i]); + return -ENOMEM; + } + } + + /* Fill in src buffers */ + for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++) { + pq_sw[i] = page_address(pq[i]); + memset(pq_sw[i], 0x11111111 * (1<common.channels.next, + struct dma_chan, + device_node); + if (iop_adma_alloc_chan_resources(dma_chan) < 1) { + err = -ENODEV; + goto out; + } + + dev = dma_chan->device->dev; + + /* initialize the dests */ + memset(page_address(pq_hw[0]), 0 , PAGE_SIZE); + memset(page_address(pq_hw[1]), 0 , PAGE_SIZE); + + /* test pq */ + pq_dest[0] = dma_map_page(dev, pq_hw[0], 0, PAGE_SIZE, DMA_FROM_DEVICE); + pq_dest[1] = dma_map_page(dev, pq_hw[1], 0, PAGE_SIZE, DMA_FROM_DEVICE); + for (i = 0; i < IOP_ADMA_NUM_SRC_TEST; i++) + pq_src[i] = dma_map_page(dev, pq[i], 0, PAGE_SIZE, + DMA_TO_DEVICE); + + tx = iop_adma_prep_dma_pq(dma_chan, pq_dest, pq_src, + IOP_ADMA_NUM_SRC_TEST, (u8 *)raid6_gfexp, + PAGE_SIZE, + DMA_PREP_INTERRUPT | + DMA_CTRL_ACK); + + cookie = iop_adma_tx_submit(tx); + iop_adma_issue_pending(dma_chan); + msleep(8); + + if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != + DMA_SUCCESS) { + dev_err(dev, "Self-test pq timed out, disabling\n"); + err = -ENODEV; + goto free_resources; + } + + raid6_call.gen_syndrome(IOP_ADMA_NUM_SRC_TEST+2, PAGE_SIZE, pq_sw); + + if (memcmp(pq_sw[IOP_ADMA_NUM_SRC_TEST], + page_address(pq_hw[0]), PAGE_SIZE) != 0) { + dev_err(dev, "Self-test p failed compare, disabling\n"); + err = -ENODEV; + goto free_resources; + } + if (memcmp(pq_sw[IOP_ADMA_NUM_SRC_TEST+1], + page_address(pq_hw[1]), PAGE_SIZE) != 0) { + dev_err(dev, "Self-test q failed compare, disabling\n"); + err = -ENODEV; + goto free_resources; + } + + /* test correct zero sum using the software generated pq values */ + for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 2; i++) + pq_src[i] = dma_map_page(dev, pq[i], 0, PAGE_SIZE, + DMA_TO_DEVICE); + + zero_sum_result = ~0; + tx = iop_adma_prep_dma_pq_val(dma_chan, &pq_src[IOP_ADMA_NUM_SRC_TEST], + pq_src, IOP_ADMA_NUM_SRC_TEST, + raid6_gfexp, PAGE_SIZE, &zero_sum_result, + DMA_PREP_INTERRUPT|DMA_CTRL_ACK); + + cookie = iop_adma_tx_submit(tx); + iop_adma_issue_pending(dma_chan); + msleep(8); + + if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != + DMA_SUCCESS) { + dev_err(dev, "Self-test pq-zero-sum timed out, disabling\n"); + err = -ENODEV; + goto free_resources; + } + + if (zero_sum_result != 0) { + dev_err(dev, "Self-test pq-zero-sum failed to validate: %x\n", + zero_sum_result); + err = -ENODEV; + goto free_resources; + } + + /* test incorrect zero sum */ + i = IOP_ADMA_NUM_SRC_TEST; + memset(pq_sw[i] + 100, 0, 100); + memset(pq_sw[i+1] + 200, 0, 200); + for (i = 0; i < IOP_ADMA_NUM_SRC_TEST + 2; i++) + pq_src[i] = dma_map_page(dev, pq[i], 0, PAGE_SIZE, + DMA_TO_DEVICE); + + zero_sum_result = 0; + tx = iop_adma_prep_dma_pq_val(dma_chan, &pq_src[IOP_ADMA_NUM_SRC_TEST], + pq_src, IOP_ADMA_NUM_SRC_TEST, + raid6_gfexp, PAGE_SIZE, &zero_sum_result, + DMA_PREP_INTERRUPT|DMA_CTRL_ACK); + + cookie = iop_adma_tx_submit(tx); + iop_adma_issue_pending(dma_chan); + msleep(8); + + if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != + DMA_SUCCESS) { + dev_err(dev, "Self-test !pq-zero-sum timed out, disabling\n"); + err = -ENODEV; + goto free_resources; + } + + if (zero_sum_result != (SUM_CHECK_P_RESULT | SUM_CHECK_Q_RESULT)) { + dev_err(dev, "Self-test !pq-zero-sum failed to validate: %x\n", + zero_sum_result); + err = -ENODEV; + goto free_resources; + } + +free_resources: + iop_adma_free_chan_resources(dma_chan); +out: + i = ARRAY_SIZE(pq); + while (i--) + __free_page(pq[i]); + return err; +} +#endif + static int __devexit iop_adma_remove(struct platform_device *dev) { struct iop_adma_device *device = platform_get_drvdata(dev); @@ -1417,13 +1582,28 @@ static int __devinit iop_adma_probe(struct platform_device *pdev) } if (dma_has_cap(DMA_XOR, dma_dev->cap_mask) || - dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) { + dma_has_cap(DMA_MEMSET, dma_dev->cap_mask)) { ret = iop_adma_xor_val_self_test(adev); dev_dbg(&pdev->dev, "xor self test returned %d\n", ret); if (ret) goto err_free_iop_chan; } + if (dma_has_cap(DMA_PQ, dma_dev->cap_mask) && + dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask)) { + #ifdef CONFIG_MD_RAID6_PQ + ret = iop_adma_pq_zero_sum_self_test(adev); + dev_dbg(&pdev->dev, "pq self test returned %d\n", ret); + #else + /* can not test raid6, so do not publish capability */ + dma_cap_clear(DMA_PQ, dma_dev->cap_mask); + dma_cap_clear(DMA_PQ_VAL, dma_dev->cap_mask); + ret = 0; + #endif + if (ret) + goto err_free_iop_chan; + } + dev_printk(KERN_INFO, &pdev->dev, "Intel(R) IOP: " "( %s%s%s%s%s%s%s%s%s%s)\n", dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "pq " : "", From 4e7d2c0aefb77f7b24942e5af042a083be4d60bb Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Sat, 29 Aug 2009 19:13:11 -0700 Subject: [PATCH 0047/3409] md/raid5: factor out mark_uptodate from ops_complete_compute5 ops_complete_compute5 can be reused in the raid6 path if it is updated to generically handle a second target. Signed-off-by: Dan Williams --- drivers/md/raid5.c | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 1f2a266f3cf7..e3a2990bdc7c 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -623,18 +623,29 @@ static void ops_run_biofill(struct stripe_head *sh) async_trigger_callback(&submit); } +static void mark_target_uptodate(struct stripe_head *sh, int target) +{ + struct r5dev *tgt; + + if (target < 0) + return; + + tgt = &sh->dev[target]; + set_bit(R5_UPTODATE, &tgt->flags); + BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); + clear_bit(R5_Wantcompute, &tgt->flags); +} + static void ops_complete_compute5(void *stripe_head_ref) { struct stripe_head *sh = stripe_head_ref; - int target = sh->ops.target; - struct r5dev *tgt = &sh->dev[target]; pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector); - set_bit(R5_UPTODATE, &tgt->flags); - BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); - clear_bit(R5_Wantcompute, &tgt->flags); + /* mark the computed target as uptodate */ + mark_target_uptodate(sh, sh->ops.target); + clear_bit(STRIPE_COMPUTE_RUN, &sh->state); if (sh->check_state == check_state_compute_run) sh->check_state = check_state_compute_result; From ac6b53b6e6acab27e4f3e2383f9ac1f0d7c6200b Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 14 Jul 2009 13:40:19 -0700 Subject: [PATCH 0048/3409] md/raid6: asynchronous raid6 operations [ Based on an original patch by Yuri Tikhonov ] The raid_run_ops routine uses the asynchronous offload api and the stripe_operations member of a stripe_head to carry out xor+pq+copy operations asynchronously, outside the lock. The operations performed by RAID-6 are the same as in the RAID-5 case except for no support of STRIPE_OP_PREXOR operations. All the others are supported: STRIPE_OP_BIOFILL - copy data into request buffers to satisfy a read request STRIPE_OP_COMPUTE_BLK - generate missing blocks (1 or 2) in the cache from the other blocks STRIPE_OP_BIODRAIN - copy data out of request buffers to satisfy a write request STRIPE_OP_RECONSTRUCT - recalculate parity for new data that has entered the cache STRIPE_OP_CHECK - verify that the parity is correct The flow is the same as in the RAID-5 case, and reuses some routines, namely: 1/ ops_complete_postxor (renamed to ops_complete_reconstruct) 2/ ops_complete_compute (updated to set up to 2 targets uptodate) 3/ ops_run_check (renamed to ops_run_check_p for xor parity checks) [neilb@suse.de: fixes to get it to pass mdadm regression suite] Reviewed-by: Andre Noll Signed-off-by: Yuri Tikhonov Signed-off-by: Ilya Yanok Signed-off-by: Dan Williams --- drivers/md/Kconfig | 2 + drivers/md/raid5.c | 322 ++++++++++++++++++++++++++++++++++++++++----- drivers/md/raid5.h | 8 +- 3 files changed, 299 insertions(+), 33 deletions(-) diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig index 41b3ae25b813..abb8636bfde2 100644 --- a/drivers/md/Kconfig +++ b/drivers/md/Kconfig @@ -124,6 +124,8 @@ config MD_RAID456 select MD_RAID6_PQ select ASYNC_MEMCPY select ASYNC_XOR + select ASYNC_PQ + select ASYNC_RAID6_RECOV ---help--- A RAID-5 set of N drives with a capacity of C MB per drive provides the capacity of C * (N - 1) MB, and protects against a failure diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index e3a2990bdc7c..e68616ed3e78 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -636,15 +636,16 @@ static void mark_target_uptodate(struct stripe_head *sh, int target) clear_bit(R5_Wantcompute, &tgt->flags); } -static void ops_complete_compute5(void *stripe_head_ref) +static void ops_complete_compute(void *stripe_head_ref) { struct stripe_head *sh = stripe_head_ref; pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector); - /* mark the computed target as uptodate */ + /* mark the computed target(s) as uptodate */ mark_target_uptodate(sh, sh->ops.target); + mark_target_uptodate(sh, sh->ops.target2); clear_bit(STRIPE_COMPUTE_RUN, &sh->state); if (sh->check_state == check_state_compute_run) @@ -684,7 +685,7 @@ ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu) atomic_inc(&sh->count); init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL, - ops_complete_compute5, sh, to_addr_conv(sh, percpu)); + ops_complete_compute, sh, to_addr_conv(sh, percpu)); if (unlikely(count == 1)) tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit); else @@ -693,6 +694,197 @@ ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu) return tx; } +/* set_syndrome_sources - populate source buffers for gen_syndrome + * @srcs - (struct page *) array of size sh->disks + * @sh - stripe_head to parse + * + * Populates srcs in proper layout order for the stripe and returns the + * 'count' of sources to be used in a call to async_gen_syndrome. The P + * destination buffer is recorded in srcs[count] and the Q destination + * is recorded in srcs[count+1]]. + */ +static int set_syndrome_sources(struct page **srcs, struct stripe_head *sh) +{ + int disks = sh->disks; + int syndrome_disks = sh->ddf_layout ? disks : (disks - 2); + int d0_idx = raid6_d0(sh); + int count; + int i; + + for (i = 0; i < disks; i++) + srcs[i] = (void *)raid6_empty_zero_page; + + count = 0; + i = d0_idx; + do { + int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks); + + srcs[slot] = sh->dev[i].page; + i = raid6_next_disk(i, disks); + } while (i != d0_idx); + BUG_ON(count != syndrome_disks); + + return count; +} + +static struct dma_async_tx_descriptor * +ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu) +{ + int disks = sh->disks; + struct page **blocks = percpu->scribble; + int target; + int qd_idx = sh->qd_idx; + struct dma_async_tx_descriptor *tx; + struct async_submit_ctl submit; + struct r5dev *tgt; + struct page *dest; + int i; + int count; + + if (sh->ops.target < 0) + target = sh->ops.target2; + else if (sh->ops.target2 < 0) + target = sh->ops.target; + else + /* we should only have one valid target */ + BUG(); + BUG_ON(target < 0); + pr_debug("%s: stripe %llu block: %d\n", + __func__, (unsigned long long)sh->sector, target); + + tgt = &sh->dev[target]; + BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); + dest = tgt->page; + + atomic_inc(&sh->count); + + if (target == qd_idx) { + count = set_syndrome_sources(blocks, sh); + blocks[count] = NULL; /* regenerating p is not necessary */ + BUG_ON(blocks[count+1] != dest); /* q should already be set */ + init_async_submit(&submit, 0, NULL, ops_complete_compute, sh, + to_addr_conv(sh, percpu)); + tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit); + } else { + /* Compute any data- or p-drive using XOR */ + count = 0; + for (i = disks; i-- ; ) { + if (i == target || i == qd_idx) + continue; + blocks[count++] = sh->dev[i].page; + } + + init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL, + ops_complete_compute, sh, + to_addr_conv(sh, percpu)); + tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE, &submit); + } + + return tx; +} + +static struct dma_async_tx_descriptor * +ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu) +{ + int i, count, disks = sh->disks; + int syndrome_disks = sh->ddf_layout ? disks : disks-2; + int d0_idx = raid6_d0(sh); + int faila = -1, failb = -1; + int target = sh->ops.target; + int target2 = sh->ops.target2; + struct r5dev *tgt = &sh->dev[target]; + struct r5dev *tgt2 = &sh->dev[target2]; + struct dma_async_tx_descriptor *tx; + struct page **blocks = percpu->scribble; + struct async_submit_ctl submit; + + pr_debug("%s: stripe %llu block1: %d block2: %d\n", + __func__, (unsigned long long)sh->sector, target, target2); + BUG_ON(target < 0 || target2 < 0); + BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); + BUG_ON(!test_bit(R5_Wantcompute, &tgt2->flags)); + + /* we need to open-code set_syndrome_sources to handle to the + * slot number conversion for 'faila' and 'failb' + */ + for (i = 0; i < disks ; i++) + blocks[i] = (void *)raid6_empty_zero_page; + count = 0; + i = d0_idx; + do { + int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks); + + blocks[slot] = sh->dev[i].page; + + if (i == target) + faila = slot; + if (i == target2) + failb = slot; + i = raid6_next_disk(i, disks); + } while (i != d0_idx); + BUG_ON(count != syndrome_disks); + + BUG_ON(faila == failb); + if (failb < faila) + swap(faila, failb); + pr_debug("%s: stripe: %llu faila: %d failb: %d\n", + __func__, (unsigned long long)sh->sector, faila, failb); + + atomic_inc(&sh->count); + + if (failb == syndrome_disks+1) { + /* Q disk is one of the missing disks */ + if (faila == syndrome_disks) { + /* Missing P+Q, just recompute */ + init_async_submit(&submit, 0, NULL, ops_complete_compute, + sh, to_addr_conv(sh, percpu)); + return async_gen_syndrome(blocks, 0, count+2, + STRIPE_SIZE, &submit); + } else { + struct page *dest; + int data_target; + int qd_idx = sh->qd_idx; + + /* Missing D+Q: recompute D from P, then recompute Q */ + if (target == qd_idx) + data_target = target2; + else + data_target = target; + + count = 0; + for (i = disks; i-- ; ) { + if (i == data_target || i == qd_idx) + continue; + blocks[count++] = sh->dev[i].page; + } + dest = sh->dev[data_target].page; + init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL, + NULL, NULL, to_addr_conv(sh, percpu)); + tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE, + &submit); + + count = set_syndrome_sources(blocks, sh); + init_async_submit(&submit, 0, tx, ops_complete_compute, + sh, to_addr_conv(sh, percpu)); + return async_gen_syndrome(blocks, 0, count+2, + STRIPE_SIZE, &submit); + } + } + + init_async_submit(&submit, 0, NULL, ops_complete_compute, sh, + to_addr_conv(sh, percpu)); + if (failb == syndrome_disks) { + /* We're missing D+P. */ + return async_raid6_datap_recov(syndrome_disks+2, STRIPE_SIZE, + faila, blocks, &submit); + } else { + /* We're missing D+D. */ + return async_raid6_2data_recov(syndrome_disks+2, STRIPE_SIZE, + faila, failb, blocks, &submit); + } +} + + static void ops_complete_prexor(void *stripe_head_ref) { struct stripe_head *sh = stripe_head_ref; @@ -765,17 +957,21 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) return tx; } -static void ops_complete_postxor(void *stripe_head_ref) +static void ops_complete_reconstruct(void *stripe_head_ref) { struct stripe_head *sh = stripe_head_ref; - int disks = sh->disks, i, pd_idx = sh->pd_idx; + int disks = sh->disks; + int pd_idx = sh->pd_idx; + int qd_idx = sh->qd_idx; + int i; pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector); for (i = disks; i--; ) { struct r5dev *dev = &sh->dev[i]; - if (dev->written || i == pd_idx) + + if (dev->written || i == pd_idx || i == qd_idx) set_bit(R5_UPTODATE, &dev->flags); } @@ -793,8 +989,8 @@ static void ops_complete_postxor(void *stripe_head_ref) } static void -ops_run_postxor(struct stripe_head *sh, struct raid5_percpu *percpu, - struct dma_async_tx_descriptor *tx) +ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu, + struct dma_async_tx_descriptor *tx) { int disks = sh->disks; struct page **xor_srcs = percpu->scribble; @@ -837,7 +1033,7 @@ ops_run_postxor(struct stripe_head *sh, struct raid5_percpu *percpu, atomic_inc(&sh->count); - init_async_submit(&submit, flags, tx, ops_complete_postxor, sh, + init_async_submit(&submit, flags, tx, ops_complete_reconstruct, sh, to_addr_conv(sh, percpu)); if (unlikely(count == 1)) tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit); @@ -845,6 +1041,25 @@ ops_run_postxor(struct stripe_head *sh, struct raid5_percpu *percpu, tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit); } +static void +ops_run_reconstruct6(struct stripe_head *sh, struct raid5_percpu *percpu, + struct dma_async_tx_descriptor *tx) +{ + struct async_submit_ctl submit; + struct page **blocks = percpu->scribble; + int count; + + pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector); + + count = set_syndrome_sources(blocks, sh); + + atomic_inc(&sh->count); + + init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_reconstruct, + sh, to_addr_conv(sh, percpu)); + async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit); +} + static void ops_complete_check(void *stripe_head_ref) { struct stripe_head *sh = stripe_head_ref; @@ -857,23 +1072,28 @@ static void ops_complete_check(void *stripe_head_ref) release_stripe(sh); } -static void ops_run_check(struct stripe_head *sh, struct raid5_percpu *percpu) +static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu) { int disks = sh->disks; + int pd_idx = sh->pd_idx; + int qd_idx = sh->qd_idx; + struct page *xor_dest; struct page **xor_srcs = percpu->scribble; struct dma_async_tx_descriptor *tx; struct async_submit_ctl submit; - - int count = 0, pd_idx = sh->pd_idx, i; - struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; + int count; + int i; pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector); + count = 0; + xor_dest = sh->dev[pd_idx].page; + xor_srcs[count++] = xor_dest; for (i = disks; i--; ) { - struct r5dev *dev = &sh->dev[i]; - if (i != pd_idx) - xor_srcs[count++] = dev->page; + if (i == pd_idx || i == qd_idx) + continue; + xor_srcs[count++] = sh->dev[i].page; } init_async_submit(&submit, 0, NULL, NULL, NULL, @@ -886,11 +1106,32 @@ static void ops_run_check(struct stripe_head *sh, struct raid5_percpu *percpu) tx = async_trigger_callback(&submit); } -static void raid5_run_ops(struct stripe_head *sh, unsigned long ops_request) +static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu, int checkp) +{ + struct page **srcs = percpu->scribble; + struct async_submit_ctl submit; + int count; + + pr_debug("%s: stripe %llu checkp: %d\n", __func__, + (unsigned long long)sh->sector, checkp); + + count = set_syndrome_sources(srcs, sh); + if (!checkp) + srcs[count] = NULL; + + atomic_inc(&sh->count); + init_async_submit(&submit, ASYNC_TX_ACK, NULL, ops_complete_check, + sh, to_addr_conv(sh, percpu)); + async_syndrome_val(srcs, 0, count+2, STRIPE_SIZE, + &sh->ops.zero_sum_result, percpu->spare_page, &submit); +} + +static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) { int overlap_clear = 0, i, disks = sh->disks; struct dma_async_tx_descriptor *tx = NULL; raid5_conf_t *conf = sh->raid_conf; + int level = conf->level; struct raid5_percpu *percpu; unsigned long cpu; @@ -902,9 +1143,16 @@ static void raid5_run_ops(struct stripe_head *sh, unsigned long ops_request) } if (test_bit(STRIPE_OP_COMPUTE_BLK, &ops_request)) { - tx = ops_run_compute5(sh, percpu); - /* terminate the chain if postxor is not set to be run */ - if (tx && !test_bit(STRIPE_OP_POSTXOR, &ops_request)) + if (level < 6) + tx = ops_run_compute5(sh, percpu); + else { + if (sh->ops.target2 < 0 || sh->ops.target < 0) + tx = ops_run_compute6_1(sh, percpu); + else + tx = ops_run_compute6_2(sh, percpu); + } + /* terminate the chain if reconstruct is not set to be run */ + if (tx && !test_bit(STRIPE_OP_RECONSTRUCT, &ops_request)) async_tx_ack(tx); } @@ -916,11 +1164,23 @@ static void raid5_run_ops(struct stripe_head *sh, unsigned long ops_request) overlap_clear++; } - if (test_bit(STRIPE_OP_POSTXOR, &ops_request)) - ops_run_postxor(sh, percpu, tx); + if (test_bit(STRIPE_OP_RECONSTRUCT, &ops_request)) { + if (level < 6) + ops_run_reconstruct5(sh, percpu, tx); + else + ops_run_reconstruct6(sh, percpu, tx); + } - if (test_bit(STRIPE_OP_CHECK, &ops_request)) - ops_run_check(sh, percpu); + if (test_bit(STRIPE_OP_CHECK, &ops_request)) { + if (sh->check_state == check_state_run) + ops_run_check_p(sh, percpu); + else if (sh->check_state == check_state_run_q) + ops_run_check_pq(sh, percpu, 0); + else if (sh->check_state == check_state_run_pq) + ops_run_check_pq(sh, percpu, 1); + else + BUG(); + } if (overlap_clear) for (i = disks; i--; ) { @@ -1931,7 +2191,7 @@ schedule_reconstruction5(struct stripe_head *sh, struct stripe_head_state *s, } else sh->reconstruct_state = reconstruct_state_run; - set_bit(STRIPE_OP_POSTXOR, &s->ops_request); + set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request); for (i = disks; i--; ) { struct r5dev *dev = &sh->dev[i]; @@ -1954,7 +2214,7 @@ schedule_reconstruction5(struct stripe_head *sh, struct stripe_head_state *s, sh->reconstruct_state = reconstruct_state_prexor_drain_run; set_bit(STRIPE_OP_PREXOR, &s->ops_request); set_bit(STRIPE_OP_BIODRAIN, &s->ops_request); - set_bit(STRIPE_OP_POSTXOR, &s->ops_request); + set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request); for (i = disks; i--; ) { struct r5dev *dev = &sh->dev[i]; @@ -2206,9 +2466,10 @@ static int fetch_block5(struct stripe_head *sh, struct stripe_head_state *s, set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); set_bit(R5_Wantcompute, &dev->flags); sh->ops.target = disk_idx; + sh->ops.target2 = -1; s->req_compute = 1; /* Careful: from this point on 'uptodate' is in the eye - * of raid5_run_ops which services 'compute' operations + * of raid_run_ops which services 'compute' operations * before writes. R5_Wantcompute flags a block that will * be R5_UPTODATE by the time it is needed for a * subsequent operation. @@ -2435,8 +2696,8 @@ static void handle_stripe_dirtying5(raid5_conf_t *conf, */ /* since handle_stripe can be called at any time we need to handle the * case where a compute block operation has been submitted and then a - * subsequent call wants to start a write request. raid5_run_ops only - * handles the case where compute block and postxor are requested + * subsequent call wants to start a write request. raid_run_ops only + * handles the case where compute block and reconstruct are requested * simultaneously. If this is not the case then new writes need to be * held off until the compute completes. */ @@ -2618,6 +2879,7 @@ static void handle_parity_checks5(raid5_conf_t *conf, struct stripe_head *sh, set_bit(R5_Wantcompute, &sh->dev[sh->pd_idx].flags); sh->ops.target = sh->pd_idx; + sh->ops.target2 = -1; s->uptodate++; } } @@ -3067,7 +3329,7 @@ static bool handle_stripe5(struct stripe_head *sh) md_wait_for_blocked_rdev(blocked_rdev, conf->mddev); if (s.ops_request) - raid5_run_ops(sh, s.ops_request); + raid_run_ops(sh, s.ops_request); ops_run_io(sh, &s); diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index 75f2c6c4cf90..116d0b44b2a9 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h @@ -176,7 +176,9 @@ */ enum check_states { check_state_idle = 0, - check_state_run, /* parity check */ + check_state_run, /* xor parity check */ + check_state_run_q, /* q-parity check */ + check_state_run_pq, /* pq dual parity check */ check_state_check_result, check_state_compute_run, /* parity repair */ check_state_compute_result, @@ -216,7 +218,7 @@ struct stripe_head { * @target - STRIPE_OP_COMPUTE_BLK target */ struct stripe_operations { - int target; + int target, target2; enum sum_check_flags zero_sum_result; } ops; struct r5dev { @@ -299,7 +301,7 @@ struct r6_state { #define STRIPE_OP_COMPUTE_BLK 1 #define STRIPE_OP_PREXOR 2 #define STRIPE_OP_BIODRAIN 3 -#define STRIPE_OP_POSTXOR 4 +#define STRIPE_OP_RECONSTRUCT 4 #define STRIPE_OP_CHECK 5 /* From c0f7bddbe60f43578dccf4ffb8d4bff88f625ea7 Mon Sep 17 00:00:00 2001 From: Yuri Tikhonov Date: Sat, 29 Aug 2009 19:13:12 -0700 Subject: [PATCH 0049/3409] md/raid5,6: common schedule_reconstruction for raid5/6 Extend schedule_reconstruction5 for reuse by the raid6 path. Add support for generating Q and BUG() if a request is made to perform 'prexor'. Signed-off-by: Yuri Tikhonov Signed-off-by: Ilya Yanok Signed-off-by: Dan Williams --- drivers/md/raid5.c | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index e68616ed3e78..a1245cf99957 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -2175,10 +2175,12 @@ static void compute_block_2(struct stripe_head *sh, int dd_idx1, int dd_idx2) } static void -schedule_reconstruction5(struct stripe_head *sh, struct stripe_head_state *s, +schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s, int rcw, int expand) { int i, pd_idx = sh->pd_idx, disks = sh->disks; + raid5_conf_t *conf = sh->raid_conf; + int level = conf->level; if (rcw) { /* if we are not expanding this is a proper write request, and @@ -2204,10 +2206,11 @@ schedule_reconstruction5(struct stripe_head *sh, struct stripe_head_state *s, s->locked++; } } - if (s->locked + 1 == disks) + if (s->locked + conf->max_degraded == disks) if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state)) - atomic_inc(&sh->raid_conf->pending_full_writes); + atomic_inc(&conf->pending_full_writes); } else { + BUG_ON(level == 6); BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) || test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags))); @@ -2232,13 +2235,22 @@ schedule_reconstruction5(struct stripe_head *sh, struct stripe_head_state *s, } } - /* keep the parity disk locked while asynchronous operations + /* keep the parity disk(s) locked while asynchronous operations * are in flight */ set_bit(R5_LOCKED, &sh->dev[pd_idx].flags); clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); s->locked++; + if (level == 6) { + int qd_idx = sh->qd_idx; + struct r5dev *dev = &sh->dev[qd_idx]; + + set_bit(R5_LOCKED, &dev->flags); + clear_bit(R5_UPTODATE, &dev->flags); + s->locked++; + } + pr_debug("%s: stripe %llu locked: %d ops_request: %lx\n", __func__, (unsigned long long)sh->sector, s->locked, s->ops_request); @@ -2704,7 +2716,7 @@ static void handle_stripe_dirtying5(raid5_conf_t *conf, if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) && (s->locked == 0 && (rcw == 0 || rmw == 0) && !test_bit(STRIPE_BIT_DELAY, &sh->state))) - schedule_reconstruction5(sh, s, rcw == 0, 0); + schedule_reconstruction(sh, s, rcw == 0, 0); } static void handle_stripe_dirtying6(raid5_conf_t *conf, @@ -3309,7 +3321,7 @@ static bool handle_stripe5(struct stripe_head *sh) /* Need to write out all blocks after computing parity */ sh->disks = conf->raid_disks; stripe_set_idx(sh->sector, conf, 0, sh); - schedule_reconstruction5(sh, &s, 1, 1); + schedule_reconstruction(sh, &s, 1, 1); } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) { clear_bit(STRIPE_EXPAND_READY, &sh->state); atomic_dec(&conf->reshape_stripes); From 5599becca4bee7badf605e41fd5bcde76d51f2a4 Mon Sep 17 00:00:00 2001 From: Yuri Tikhonov Date: Sat, 29 Aug 2009 19:13:12 -0700 Subject: [PATCH 0050/3409] md/raid6: asynchronous handle_stripe_fill6 Modify handle_stripe_fill6 to work asynchronously by introducing fetch_block6 as the raid6 analog of fetch_block5 (schedule compute operations for missing/out-of-sync disks). [dan.j.williams@intel.com: compute D+Q in one pass] Signed-off-by: Yuri Tikhonov Signed-off-by: Ilya Yanok Signed-off-by: Dan Williams --- drivers/md/raid5.c | 143 +++++++++++++++++++++++++++++---------------- 1 file changed, 93 insertions(+), 50 deletions(-) diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index a1245cf99957..49da6f74d6d6 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -2520,61 +2520,104 @@ static void handle_stripe_fill5(struct stripe_head *sh, set_bit(STRIPE_HANDLE, &sh->state); } +/* fetch_block6 - checks the given member device to see if its data needs + * to be read or computed to satisfy a request. + * + * Returns 1 when no more member devices need to be checked, otherwise returns + * 0 to tell the loop in handle_stripe_fill6 to continue + */ +static int fetch_block6(struct stripe_head *sh, struct stripe_head_state *s, + struct r6_state *r6s, int disk_idx, int disks) +{ + struct r5dev *dev = &sh->dev[disk_idx]; + struct r5dev *fdev[2] = { &sh->dev[r6s->failed_num[0]], + &sh->dev[r6s->failed_num[1]] }; + + if (!test_bit(R5_LOCKED, &dev->flags) && + !test_bit(R5_UPTODATE, &dev->flags) && + (dev->toread || + (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) || + s->syncing || s->expanding || + (s->failed >= 1 && + (fdev[0]->toread || s->to_write)) || + (s->failed >= 2 && + (fdev[1]->toread || s->to_write)))) { + /* we would like to get this block, possibly by computing it, + * otherwise read it if the backing disk is insync + */ + BUG_ON(test_bit(R5_Wantcompute, &dev->flags)); + BUG_ON(test_bit(R5_Wantread, &dev->flags)); + if ((s->uptodate == disks - 1) && + (s->failed && (disk_idx == r6s->failed_num[0] || + disk_idx == r6s->failed_num[1]))) { + /* have disk failed, and we're requested to fetch it; + * do compute it + */ + pr_debug("Computing stripe %llu block %d\n", + (unsigned long long)sh->sector, disk_idx); + set_bit(STRIPE_COMPUTE_RUN, &sh->state); + set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); + set_bit(R5_Wantcompute, &dev->flags); + sh->ops.target = disk_idx; + sh->ops.target2 = -1; /* no 2nd target */ + s->req_compute = 1; + s->uptodate++; + return 1; + } else if (s->uptodate == disks-2 && s->failed >= 2) { + /* Computing 2-failure is *very* expensive; only + * do it if failed >= 2 + */ + int other; + for (other = disks; other--; ) { + if (other == disk_idx) + continue; + if (!test_bit(R5_UPTODATE, + &sh->dev[other].flags)) + break; + } + BUG_ON(other < 0); + pr_debug("Computing stripe %llu blocks %d,%d\n", + (unsigned long long)sh->sector, + disk_idx, other); + set_bit(STRIPE_COMPUTE_RUN, &sh->state); + set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); + set_bit(R5_Wantcompute, &sh->dev[disk_idx].flags); + set_bit(R5_Wantcompute, &sh->dev[other].flags); + sh->ops.target = disk_idx; + sh->ops.target2 = other; + s->uptodate += 2; + s->req_compute = 1; + return 1; + } else if (test_bit(R5_Insync, &dev->flags)) { + set_bit(R5_LOCKED, &dev->flags); + set_bit(R5_Wantread, &dev->flags); + s->locked++; + pr_debug("Reading block %d (sync=%d)\n", + disk_idx, s->syncing); + } + } + + return 0; +} + +/** + * handle_stripe_fill6 - read or compute data to satisfy pending requests. + */ static void handle_stripe_fill6(struct stripe_head *sh, struct stripe_head_state *s, struct r6_state *r6s, int disks) { int i; - for (i = disks; i--; ) { - struct r5dev *dev = &sh->dev[i]; - if (!test_bit(R5_LOCKED, &dev->flags) && - !test_bit(R5_UPTODATE, &dev->flags) && - (dev->toread || (dev->towrite && - !test_bit(R5_OVERWRITE, &dev->flags)) || - s->syncing || s->expanding || - (s->failed >= 1 && - (sh->dev[r6s->failed_num[0]].toread || - s->to_write)) || - (s->failed >= 2 && - (sh->dev[r6s->failed_num[1]].toread || - s->to_write)))) { - /* we would like to get this block, possibly - * by computing it, but we might not be able to - */ - if ((s->uptodate == disks - 1) && - (s->failed && (i == r6s->failed_num[0] || - i == r6s->failed_num[1]))) { - pr_debug("Computing stripe %llu block %d\n", - (unsigned long long)sh->sector, i); - compute_block_1(sh, i, 0); - s->uptodate++; - } else if ( s->uptodate == disks-2 && s->failed >= 2 ) { - /* Computing 2-failure is *very* expensive; only - * do it if failed >= 2 - */ - int other; - for (other = disks; other--; ) { - if (other == i) - continue; - if (!test_bit(R5_UPTODATE, - &sh->dev[other].flags)) - break; - } - BUG_ON(other < 0); - pr_debug("Computing stripe %llu blocks %d,%d\n", - (unsigned long long)sh->sector, - i, other); - compute_block_2(sh, i, other); - s->uptodate += 2; - } else if (test_bit(R5_Insync, &dev->flags)) { - set_bit(R5_LOCKED, &dev->flags); - set_bit(R5_Wantread, &dev->flags); - s->locked++; - pr_debug("Reading block %d (sync=%d)\n", - i, s->syncing); - } - } - } + + /* look for blocks to read/compute, skip this if a compute + * is already in flight, or if the stripe contents are in the + * midst of changing due to a write + */ + if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state && + !sh->reconstruct_state) + for (i = disks; i--; ) + if (fetch_block6(sh, s, r6s, i, disks)) + break; set_bit(STRIPE_HANDLE, &sh->state); } From a9b39a741a7e3b262b9f51fefb68e17b32756999 Mon Sep 17 00:00:00 2001 From: Yuri Tikhonov Date: Sat, 29 Aug 2009 19:13:12 -0700 Subject: [PATCH 0051/3409] md/raid6: asynchronous handle_stripe_dirtying6 In the synchronous implementation of stripe dirtying we processed a degraded stripe with one call to handle_stripe_dirtying6(). I.e. compute the missing blocks from the other drives, then copy in the new data and reconstruct the parities. In the asynchronous case we do not perform stripe operations directly. Instead, operations are scheduled with flags to be later serviced by raid_run_ops. So, for the degraded case the final reconstruction step can only be carried out after all blocks have been brought up to date by being read, or computed. Like the raid5 case schedule_reconstruction() sets STRIPE_OP_RECONSTRUCT to request a parity generation pass and through operation chaining can handle compute and reconstruct in a single raid_run_ops pass. [dan.j.williams@intel.com: fixup handle_stripe_dirtying6 gating] Signed-off-by: Yuri Tikhonov Signed-off-by: Ilya Yanok Signed-off-by: Dan Williams --- drivers/md/raid5.c | 122 ++++++++++++++------------------------------- 1 file changed, 37 insertions(+), 85 deletions(-) diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 49da6f74d6d6..08f806379b07 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -2766,99 +2766,46 @@ static void handle_stripe_dirtying6(raid5_conf_t *conf, struct stripe_head *sh, struct stripe_head_state *s, struct r6_state *r6s, int disks) { - int rcw = 0, must_compute = 0, pd_idx = sh->pd_idx, i; + int rcw = 0, pd_idx = sh->pd_idx, i; int qd_idx = sh->qd_idx; + + set_bit(STRIPE_HANDLE, &sh->state); for (i = disks; i--; ) { struct r5dev *dev = &sh->dev[i]; - /* Would I have to read this buffer for reconstruct_write */ - if (!test_bit(R5_OVERWRITE, &dev->flags) - && i != pd_idx && i != qd_idx - && (!test_bit(R5_LOCKED, &dev->flags) - ) && - !test_bit(R5_UPTODATE, &dev->flags)) { - if (test_bit(R5_Insync, &dev->flags)) rcw++; - else { - pr_debug("raid6: must_compute: " - "disk %d flags=%#lx\n", i, dev->flags); - must_compute++; + /* check if we haven't enough data */ + if (!test_bit(R5_OVERWRITE, &dev->flags) && + i != pd_idx && i != qd_idx && + !test_bit(R5_LOCKED, &dev->flags) && + !(test_bit(R5_UPTODATE, &dev->flags) || + test_bit(R5_Wantcompute, &dev->flags))) { + rcw++; + if (!test_bit(R5_Insync, &dev->flags)) + continue; /* it's a failed drive */ + + if ( + test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { + pr_debug("Read_old stripe %llu " + "block %d for Reconstruct\n", + (unsigned long long)sh->sector, i); + set_bit(R5_LOCKED, &dev->flags); + set_bit(R5_Wantread, &dev->flags); + s->locked++; + } else { + pr_debug("Request delayed stripe %llu " + "block %d for Reconstruct\n", + (unsigned long long)sh->sector, i); + set_bit(STRIPE_DELAYED, &sh->state); + set_bit(STRIPE_HANDLE, &sh->state); } } } - pr_debug("for sector %llu, rcw=%d, must_compute=%d\n", - (unsigned long long)sh->sector, rcw, must_compute); - set_bit(STRIPE_HANDLE, &sh->state); - - if (rcw > 0) - /* want reconstruct write, but need to get some data */ - for (i = disks; i--; ) { - struct r5dev *dev = &sh->dev[i]; - if (!test_bit(R5_OVERWRITE, &dev->flags) - && !(s->failed == 0 && (i == pd_idx || i == qd_idx)) - && !test_bit(R5_LOCKED, &dev->flags) && - !test_bit(R5_UPTODATE, &dev->flags) && - test_bit(R5_Insync, &dev->flags)) { - if ( - test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { - pr_debug("Read_old stripe %llu " - "block %d for Reconstruct\n", - (unsigned long long)sh->sector, i); - set_bit(R5_LOCKED, &dev->flags); - set_bit(R5_Wantread, &dev->flags); - s->locked++; - } else { - pr_debug("Request delayed stripe %llu " - "block %d for Reconstruct\n", - (unsigned long long)sh->sector, i); - set_bit(STRIPE_DELAYED, &sh->state); - set_bit(STRIPE_HANDLE, &sh->state); - } - } - } /* now if nothing is locked, and if we have enough data, we can start a * write request */ - if (s->locked == 0 && rcw == 0 && + if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) && + s->locked == 0 && rcw == 0 && !test_bit(STRIPE_BIT_DELAY, &sh->state)) { - if (must_compute > 0) { - /* We have failed blocks and need to compute them */ - switch (s->failed) { - case 0: - BUG(); - case 1: - compute_block_1(sh, r6s->failed_num[0], 0); - break; - case 2: - compute_block_2(sh, r6s->failed_num[0], - r6s->failed_num[1]); - break; - default: /* This request should have been failed? */ - BUG(); - } - } - - pr_debug("Computing parity for stripe %llu\n", - (unsigned long long)sh->sector); - compute_parity6(sh, RECONSTRUCT_WRITE); - /* now every locked buffer is ready to be written */ - for (i = disks; i--; ) - if (test_bit(R5_LOCKED, &sh->dev[i].flags)) { - pr_debug("Writing stripe %llu block %d\n", - (unsigned long long)sh->sector, i); - s->locked++; - set_bit(R5_Wantwrite, &sh->dev[i].flags); - } - if (s->locked == disks) - if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state)) - atomic_inc(&conf->pending_full_writes); - /* after a RECONSTRUCT_WRITE, the stripe MUST be in-sync */ - set_bit(STRIPE_INSYNC, &sh->state); - - if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { - atomic_dec(&conf->preread_active_stripes); - if (atomic_read(&conf->preread_active_stripes) < - IO_THRESHOLD) - md_wakeup_thread(conf->mddev->thread); - } + schedule_reconstruction(sh, s, 1, 0); } } @@ -3539,8 +3486,13 @@ static bool handle_stripe6(struct stripe_head *sh) (s.syncing && (s.uptodate < disks)) || s.expanding) handle_stripe_fill6(sh, &s, &r6s, disks); - /* now to consider writing and what else, if anything should be read */ - if (s.to_write) + /* Now to consider new write requests and what else, if anything + * should be read. We do not handle new writes when: + * 1/ A 'write' operation (copy+gen_syndrome) is already in flight. + * 2/ A 'check' operation is in flight, as it may clobber the parity + * block. + */ + if (s.to_write && !sh->reconstruct_state && !sh->check_state) handle_stripe_dirtying6(conf, sh, &s, &r6s, disks); /* maybe we need to check and possibly fix the parity for this stripe From d82dfee0ad8f240fef1b28e2258891c07da57367 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 14 Jul 2009 13:40:57 -0700 Subject: [PATCH 0052/3409] md/raid6: asynchronous handle_parity_check6 [ Based on an original patch by Yuri Tikhonov ] Implement the state machine for handling the RAID-6 parities check and repair functionality. Note that the raid6 case does not need to check for new failures, like raid5, as it will always writeback the correct disks. The raid5 case can be updated to check zero_sum_result to avoid getting confused by new failures rather than retrying the entire check operation. Signed-off-by: Yuri Tikhonov Signed-off-by: Ilya Yanok Signed-off-by: Dan Williams --- drivers/md/raid5.c | 206 ++++++++++++++++++++++++++++++--------------- 1 file changed, 139 insertions(+), 67 deletions(-) diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 08f806379b07..3c31f7f8aa65 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -2901,91 +2901,163 @@ static void handle_parity_checks6(raid5_conf_t *conf, struct stripe_head *sh, struct stripe_head_state *s, struct r6_state *r6s, int disks) { - int update_p = 0, update_q = 0; - struct r5dev *dev; int pd_idx = sh->pd_idx; int qd_idx = sh->qd_idx; - unsigned long cpu; - struct page *tmp_page; + struct r5dev *dev; set_bit(STRIPE_HANDLE, &sh->state); BUG_ON(s->failed > 2); - BUG_ON(s->uptodate < disks); + /* Want to check and possibly repair P and Q. * However there could be one 'failed' device, in which * case we can only check one of them, possibly using the * other to generate missing data */ - cpu = get_cpu(); - tmp_page = per_cpu_ptr(conf->percpu, cpu)->spare_page; - if (s->failed == r6s->q_failed) { - /* The only possible failed device holds 'Q', so it - * makes sense to check P (If anything else were failed, - * we would have used P to recreate it). - */ - compute_block_1(sh, pd_idx, 1); - if (!page_is_zero(sh->dev[pd_idx].page)) { - compute_block_1(sh, pd_idx, 0); - update_p = 1; + + switch (sh->check_state) { + case check_state_idle: + /* start a new check operation if there are < 2 failures */ + if (s->failed == r6s->q_failed) { + /* The only possible failed device holds Q, so it + * makes sense to check P (If anything else were failed, + * we would have used P to recreate it). + */ + sh->check_state = check_state_run; } - } - if (!r6s->q_failed && s->failed < 2) { - /* q is not failed, and we didn't use it to generate - * anything, so it makes sense to check it - */ - memcpy(page_address(tmp_page), - page_address(sh->dev[qd_idx].page), - STRIPE_SIZE); - compute_parity6(sh, UPDATE_PARITY); - if (memcmp(page_address(tmp_page), - page_address(sh->dev[qd_idx].page), - STRIPE_SIZE) != 0) { - clear_bit(STRIPE_INSYNC, &sh->state); - update_q = 1; + if (!r6s->q_failed && s->failed < 2) { + /* Q is not failed, and we didn't use it to generate + * anything, so it makes sense to check it + */ + if (sh->check_state == check_state_run) + sh->check_state = check_state_run_pq; + else + sh->check_state = check_state_run_q; } - } - put_cpu(); - if (update_p || update_q) { - conf->mddev->resync_mismatches += STRIPE_SECTORS; - if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) - /* don't try to repair!! */ - update_p = update_q = 0; - } + /* discard potentially stale zero_sum_result */ + sh->ops.zero_sum_result = 0; - /* now write out any block on a failed drive, - * or P or Q if they need it - */ + if (sh->check_state == check_state_run) { + /* async_xor_zero_sum destroys the contents of P */ + clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); + s->uptodate--; + } + if (sh->check_state >= check_state_run && + sh->check_state <= check_state_run_pq) { + /* async_syndrome_zero_sum preserves P and Q, so + * no need to mark them !uptodate here + */ + set_bit(STRIPE_OP_CHECK, &s->ops_request); + break; + } - if (s->failed == 2) { - dev = &sh->dev[r6s->failed_num[1]]; - s->locked++; - set_bit(R5_LOCKED, &dev->flags); - set_bit(R5_Wantwrite, &dev->flags); - } - if (s->failed >= 1) { - dev = &sh->dev[r6s->failed_num[0]]; - s->locked++; - set_bit(R5_LOCKED, &dev->flags); - set_bit(R5_Wantwrite, &dev->flags); - } + /* we have 2-disk failure */ + BUG_ON(s->failed != 2); + /* fall through */ + case check_state_compute_result: + sh->check_state = check_state_idle; - if (update_p) { - dev = &sh->dev[pd_idx]; - s->locked++; - set_bit(R5_LOCKED, &dev->flags); - set_bit(R5_Wantwrite, &dev->flags); - } - if (update_q) { - dev = &sh->dev[qd_idx]; - s->locked++; - set_bit(R5_LOCKED, &dev->flags); - set_bit(R5_Wantwrite, &dev->flags); - } - clear_bit(STRIPE_DEGRADED, &sh->state); + /* check that a write has not made the stripe insync */ + if (test_bit(STRIPE_INSYNC, &sh->state)) + break; - set_bit(STRIPE_INSYNC, &sh->state); + /* now write out any block on a failed drive, + * or P or Q if they were recomputed + */ + BUG_ON(s->uptodate < disks - 1); /* We don't need Q to recover */ + if (s->failed == 2) { + dev = &sh->dev[r6s->failed_num[1]]; + s->locked++; + set_bit(R5_LOCKED, &dev->flags); + set_bit(R5_Wantwrite, &dev->flags); + } + if (s->failed >= 1) { + dev = &sh->dev[r6s->failed_num[0]]; + s->locked++; + set_bit(R5_LOCKED, &dev->flags); + set_bit(R5_Wantwrite, &dev->flags); + } + if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) { + dev = &sh->dev[pd_idx]; + s->locked++; + set_bit(R5_LOCKED, &dev->flags); + set_bit(R5_Wantwrite, &dev->flags); + } + if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) { + dev = &sh->dev[qd_idx]; + s->locked++; + set_bit(R5_LOCKED, &dev->flags); + set_bit(R5_Wantwrite, &dev->flags); + } + clear_bit(STRIPE_DEGRADED, &sh->state); + + set_bit(STRIPE_INSYNC, &sh->state); + break; + case check_state_run: + case check_state_run_q: + case check_state_run_pq: + break; /* we will be called again upon completion */ + case check_state_check_result: + sh->check_state = check_state_idle; + + /* handle a successful check operation, if parity is correct + * we are done. Otherwise update the mismatch count and repair + * parity if !MD_RECOVERY_CHECK + */ + if (sh->ops.zero_sum_result == 0) { + /* both parities are correct */ + if (!s->failed) + set_bit(STRIPE_INSYNC, &sh->state); + else { + /* in contrast to the raid5 case we can validate + * parity, but still have a failure to write + * back + */ + sh->check_state = check_state_compute_result; + /* Returning at this point means that we may go + * off and bring p and/or q uptodate again so + * we make sure to check zero_sum_result again + * to verify if p or q need writeback + */ + } + } else { + conf->mddev->resync_mismatches += STRIPE_SECTORS; + if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) + /* don't try to repair!! */ + set_bit(STRIPE_INSYNC, &sh->state); + else { + int *target = &sh->ops.target; + + sh->ops.target = -1; + sh->ops.target2 = -1; + sh->check_state = check_state_compute_run; + set_bit(STRIPE_COMPUTE_RUN, &sh->state); + set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); + if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) { + set_bit(R5_Wantcompute, + &sh->dev[pd_idx].flags); + *target = pd_idx; + target = &sh->ops.target2; + s->uptodate++; + } + if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) { + set_bit(R5_Wantcompute, + &sh->dev[qd_idx].flags); + *target = qd_idx; + s->uptodate++; + } + } + } + break; + case check_state_compute_run: + break; + default: + printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n", + __func__, sh->check_state, + (unsigned long long) sh->sector); + BUG(); + } } static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh, From 6c0069c0ae9659e3a91b68eaed06a5c6c37f45c8 Mon Sep 17 00:00:00 2001 From: Yuri Tikhonov Date: Sat, 29 Aug 2009 19:13:13 -0700 Subject: [PATCH 0053/3409] md/raid6: asynchronous handle_stripe6 1/ Use STRIPE_OP_BIOFILL to offload completion of read requests to raid_run_ops 2/ Implement a handler for sh->reconstruct_state similar to the raid5 case (adds handling of Q parity) 3/ Prevent handle_parity_checks6 from running concurrently with 'compute' operations 4/ Hook up raid_run_ops Signed-off-by: Yuri Tikhonov Signed-off-by: Ilya Yanok Signed-off-by: Dan Williams --- drivers/md/raid5.c | 120 +++++++++++++++++++++++++++++++-------------- 1 file changed, 82 insertions(+), 38 deletions(-) diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 3c31f7f8aa65..a833de189ca6 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -3424,9 +3424,10 @@ static bool handle_stripe6(struct stripe_head *sh) mdk_rdev_t *blocked_rdev = NULL; pr_debug("handling stripe %llu, state=%#lx cnt=%d, " - "pd_idx=%d, qd_idx=%d\n", + "pd_idx=%d, qd_idx=%d\n, check:%d, reconstruct:%d\n", (unsigned long long)sh->sector, sh->state, - atomic_read(&sh->count), pd_idx, qd_idx); + atomic_read(&sh->count), pd_idx, qd_idx, + sh->check_state, sh->reconstruct_state); memset(&s, 0, sizeof(s)); spin_lock(&sh->lock); @@ -3446,35 +3447,24 @@ static bool handle_stripe6(struct stripe_head *sh) pr_debug("check %d: state 0x%lx read %p write %p written %p\n", i, dev->flags, dev->toread, dev->towrite, dev->written); - /* maybe we can reply to a read */ - if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread) { - struct bio *rbi, *rbi2; - pr_debug("Return read for disc %d\n", i); - spin_lock_irq(&conf->device_lock); - rbi = dev->toread; - dev->toread = NULL; - if (test_and_clear_bit(R5_Overlap, &dev->flags)) - wake_up(&conf->wait_for_overlap); - spin_unlock_irq(&conf->device_lock); - while (rbi && rbi->bi_sector < dev->sector + STRIPE_SECTORS) { - copy_data(0, rbi, dev->page, dev->sector); - rbi2 = r5_next_bio(rbi, dev->sector); - spin_lock_irq(&conf->device_lock); - if (!raid5_dec_bi_phys_segments(rbi)) { - rbi->bi_next = return_bi; - return_bi = rbi; - } - spin_unlock_irq(&conf->device_lock); - rbi = rbi2; - } - } + /* maybe we can reply to a read + * + * new wantfill requests are only permitted while + * ops_complete_biofill is guaranteed to be inactive + */ + if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread && + !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) + set_bit(R5_Wantfill, &dev->flags); /* now count some things */ if (test_bit(R5_LOCKED, &dev->flags)) s.locked++; if (test_bit(R5_UPTODATE, &dev->flags)) s.uptodate++; + if (test_bit(R5_Wantcompute, &dev->flags)) + BUG_ON(++s.compute > 2); - - if (dev->toread) + if (test_bit(R5_Wantfill, &dev->flags)) { + s.to_fill++; + } else if (dev->toread) s.to_read++; if (dev->towrite) { s.to_write++; @@ -3515,6 +3505,11 @@ static bool handle_stripe6(struct stripe_head *sh) blocked_rdev = NULL; } + if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) { + set_bit(STRIPE_OP_BIOFILL, &s.ops_request); + set_bit(STRIPE_BIOFILL_RUN, &sh->state); + } + pr_debug("locked=%d uptodate=%d to_read=%d" " to_write=%d failed=%d failed_num=%d,%d\n", s.locked, s.uptodate, s.to_read, s.to_write, s.failed, @@ -3555,9 +3550,43 @@ static bool handle_stripe6(struct stripe_head *sh) * or to load a block that is being partially written. */ if (s.to_read || s.non_overwrite || (s.to_write && s.failed) || - (s.syncing && (s.uptodate < disks)) || s.expanding) + (s.syncing && (s.uptodate + s.compute < disks)) || s.expanding) handle_stripe_fill6(sh, &s, &r6s, disks); + /* Now we check to see if any write operations have recently + * completed + */ + if (sh->reconstruct_state == reconstruct_state_drain_result) { + int qd_idx = sh->qd_idx; + + sh->reconstruct_state = reconstruct_state_idle; + /* All the 'written' buffers and the parity blocks are ready to + * be written back to disk + */ + BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags)); + BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[qd_idx].flags)); + for (i = disks; i--; ) { + dev = &sh->dev[i]; + if (test_bit(R5_LOCKED, &dev->flags) && + (i == sh->pd_idx || i == qd_idx || + dev->written)) { + pr_debug("Writing block %d\n", i); + BUG_ON(!test_bit(R5_UPTODATE, &dev->flags)); + set_bit(R5_Wantwrite, &dev->flags); + if (!test_bit(R5_Insync, &dev->flags) || + ((i == sh->pd_idx || i == qd_idx) && + s.failed == 0)) + set_bit(STRIPE_INSYNC, &sh->state); + } + } + if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { + atomic_dec(&conf->preread_active_stripes); + if (atomic_read(&conf->preread_active_stripes) < + IO_THRESHOLD) + md_wakeup_thread(conf->mddev->thread); + } + } + /* Now to consider new write requests and what else, if anything * should be read. We do not handle new writes when: * 1/ A 'write' operation (copy+gen_syndrome) is already in flight. @@ -3569,9 +3598,13 @@ static bool handle_stripe6(struct stripe_head *sh) /* maybe we need to check and possibly fix the parity for this stripe * Any reads will already have been scheduled, so we just see if enough - * data is available + * data is available. The parity check is held off while parity + * dependent operations are in flight. */ - if (s.syncing && s.locked == 0 && !test_bit(STRIPE_INSYNC, &sh->state)) + if (sh->check_state || + (s.syncing && s.locked == 0 && + !test_bit(STRIPE_COMPUTE_RUN, &sh->state) && + !test_bit(STRIPE_INSYNC, &sh->state))) handle_parity_checks6(conf, sh, &s, &r6s, disks); if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) { @@ -3593,15 +3626,29 @@ static bool handle_stripe6(struct stripe_head *sh) set_bit(R5_Wantwrite, &dev->flags); set_bit(R5_ReWrite, &dev->flags); set_bit(R5_LOCKED, &dev->flags); + s.locked++; } else { /* let's read it back */ set_bit(R5_Wantread, &dev->flags); set_bit(R5_LOCKED, &dev->flags); + s.locked++; } } } - if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state)) { + /* Finish reconstruct operations initiated by the expansion process */ + if (sh->reconstruct_state == reconstruct_state_result) { + sh->reconstruct_state = reconstruct_state_idle; + clear_bit(STRIPE_EXPANDING, &sh->state); + for (i = conf->raid_disks; i--; ) { + set_bit(R5_Wantwrite, &sh->dev[i].flags); + set_bit(R5_LOCKED, &sh->dev[i].flags); + s.locked++; + } + } + + if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) && + !sh->reconstruct_state) { struct stripe_head *sh2 = get_active_stripe(conf, sh->sector, 1, 1); if (sh2 && test_bit(STRIPE_EXPAND_SOURCE, &sh2->state)) { @@ -3622,14 +3669,8 @@ static bool handle_stripe6(struct stripe_head *sh) /* Need to write out all blocks after computing P&Q */ sh->disks = conf->raid_disks; stripe_set_idx(sh->sector, conf, 0, sh); - compute_parity6(sh, RECONSTRUCT_WRITE); - for (i = conf->raid_disks ; i-- ; ) { - set_bit(R5_LOCKED, &sh->dev[i].flags); - s.locked++; - set_bit(R5_Wantwrite, &sh->dev[i].flags); - } - clear_bit(STRIPE_EXPANDING, &sh->state); - } else if (s.expanded) { + schedule_reconstruction(sh, &s, 1, 1); + } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) { clear_bit(STRIPE_EXPAND_READY, &sh->state); atomic_dec(&conf->reshape_stripes); wake_up(&conf->wait_for_overlap); @@ -3647,6 +3688,9 @@ static bool handle_stripe6(struct stripe_head *sh) if (unlikely(blocked_rdev)) md_wait_for_blocked_rdev(blocked_rdev, conf->mddev); + if (s.ops_request) + raid_run_ops(sh, s.ops_request); + ops_run_io(sh, &s); return_io(return_bi); From b774ef491b4edf6876077014ecbb87f10c69c10f Mon Sep 17 00:00:00 2001 From: Yuri Tikhonov Date: Sat, 29 Aug 2009 19:13:13 -0700 Subject: [PATCH 0054/3409] md/raid6: remove synchronous infrastructure These routines have been replaced by there asynchronous counterparts. Signed-off-by: Yuri Tikhonov Signed-off-by: Ilya Yanok Signed-off-by: Dan Williams --- drivers/md/raid5.c | 254 --------------------------------------------- 1 file changed, 254 deletions(-) diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index a833de189ca6..7c22e19aca82 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -1927,253 +1927,6 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous) } - -/* - * Copy data between a page in the stripe cache, and one or more bion - * The page could align with the middle of the bio, or there could be - * several bion, each with several bio_vecs, which cover part of the page - * Multiple bion are linked together on bi_next. There may be extras - * at the end of this list. We ignore them. - */ -static void copy_data(int frombio, struct bio *bio, - struct page *page, - sector_t sector) -{ - char *pa = page_address(page); - struct bio_vec *bvl; - int i; - int page_offset; - - if (bio->bi_sector >= sector) - page_offset = (signed)(bio->bi_sector - sector) * 512; - else - page_offset = (signed)(sector - bio->bi_sector) * -512; - bio_for_each_segment(bvl, bio, i) { - int len = bio_iovec_idx(bio,i)->bv_len; - int clen; - int b_offset = 0; - - if (page_offset < 0) { - b_offset = -page_offset; - page_offset += b_offset; - len -= b_offset; - } - - if (len > 0 && page_offset + len > STRIPE_SIZE) - clen = STRIPE_SIZE - page_offset; - else clen = len; - - if (clen > 0) { - char *ba = __bio_kmap_atomic(bio, i, KM_USER0); - if (frombio) - memcpy(pa+page_offset, ba+b_offset, clen); - else - memcpy(ba+b_offset, pa+page_offset, clen); - __bio_kunmap_atomic(ba, KM_USER0); - } - if (clen < len) /* hit end of page */ - break; - page_offset += len; - } -} - -#define check_xor() do { \ - if (count == MAX_XOR_BLOCKS) { \ - xor_blocks(count, STRIPE_SIZE, dest, ptr);\ - count = 0; \ - } \ - } while(0) - -static void compute_parity6(struct stripe_head *sh, int method) -{ - raid5_conf_t *conf = sh->raid_conf; - int i, pd_idx, qd_idx, d0_idx, disks = sh->disks, count; - int syndrome_disks = sh->ddf_layout ? disks : (disks - 2); - struct bio *chosen; - /**** FIX THIS: This could be very bad if disks is close to 256 ****/ - void *ptrs[syndrome_disks+2]; - - pd_idx = sh->pd_idx; - qd_idx = sh->qd_idx; - d0_idx = raid6_d0(sh); - - pr_debug("compute_parity, stripe %llu, method %d\n", - (unsigned long long)sh->sector, method); - - switch(method) { - case READ_MODIFY_WRITE: - BUG(); /* READ_MODIFY_WRITE N/A for RAID-6 */ - case RECONSTRUCT_WRITE: - for (i= disks; i-- ;) - if ( i != pd_idx && i != qd_idx && sh->dev[i].towrite ) { - chosen = sh->dev[i].towrite; - sh->dev[i].towrite = NULL; - - if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) - wake_up(&conf->wait_for_overlap); - - BUG_ON(sh->dev[i].written); - sh->dev[i].written = chosen; - } - break; - case CHECK_PARITY: - BUG(); /* Not implemented yet */ - } - - for (i = disks; i--;) - if (sh->dev[i].written) { - sector_t sector = sh->dev[i].sector; - struct bio *wbi = sh->dev[i].written; - while (wbi && wbi->bi_sector < sector + STRIPE_SECTORS) { - copy_data(1, wbi, sh->dev[i].page, sector); - wbi = r5_next_bio(wbi, sector); - } - - set_bit(R5_LOCKED, &sh->dev[i].flags); - set_bit(R5_UPTODATE, &sh->dev[i].flags); - } - - /* Note that unlike RAID-5, the ordering of the disks matters greatly.*/ - - for (i = 0; i < disks; i++) - ptrs[i] = (void *)raid6_empty_zero_page; - - count = 0; - i = d0_idx; - do { - int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks); - - ptrs[slot] = page_address(sh->dev[i].page); - if (slot < syndrome_disks && - !test_bit(R5_UPTODATE, &sh->dev[i].flags)) { - printk(KERN_ERR "block %d/%d not uptodate " - "on parity calc\n", i, count); - BUG(); - } - - i = raid6_next_disk(i, disks); - } while (i != d0_idx); - BUG_ON(count != syndrome_disks); - - raid6_call.gen_syndrome(syndrome_disks+2, STRIPE_SIZE, ptrs); - - switch(method) { - case RECONSTRUCT_WRITE: - set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); - set_bit(R5_UPTODATE, &sh->dev[qd_idx].flags); - set_bit(R5_LOCKED, &sh->dev[pd_idx].flags); - set_bit(R5_LOCKED, &sh->dev[qd_idx].flags); - break; - case UPDATE_PARITY: - set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); - set_bit(R5_UPTODATE, &sh->dev[qd_idx].flags); - break; - } -} - - -/* Compute one missing block */ -static void compute_block_1(struct stripe_head *sh, int dd_idx, int nozero) -{ - int i, count, disks = sh->disks; - void *ptr[MAX_XOR_BLOCKS], *dest, *p; - int qd_idx = sh->qd_idx; - - pr_debug("compute_block_1, stripe %llu, idx %d\n", - (unsigned long long)sh->sector, dd_idx); - - if ( dd_idx == qd_idx ) { - /* We're actually computing the Q drive */ - compute_parity6(sh, UPDATE_PARITY); - } else { - dest = page_address(sh->dev[dd_idx].page); - if (!nozero) memset(dest, 0, STRIPE_SIZE); - count = 0; - for (i = disks ; i--; ) { - if (i == dd_idx || i == qd_idx) - continue; - p = page_address(sh->dev[i].page); - if (test_bit(R5_UPTODATE, &sh->dev[i].flags)) - ptr[count++] = p; - else - printk("compute_block() %d, stripe %llu, %d" - " not present\n", dd_idx, - (unsigned long long)sh->sector, i); - - check_xor(); - } - if (count) - xor_blocks(count, STRIPE_SIZE, dest, ptr); - if (!nozero) set_bit(R5_UPTODATE, &sh->dev[dd_idx].flags); - else clear_bit(R5_UPTODATE, &sh->dev[dd_idx].flags); - } -} - -/* Compute two missing blocks */ -static void compute_block_2(struct stripe_head *sh, int dd_idx1, int dd_idx2) -{ - int i, count, disks = sh->disks; - int syndrome_disks = sh->ddf_layout ? disks : disks-2; - int d0_idx = raid6_d0(sh); - int faila = -1, failb = -1; - /**** FIX THIS: This could be very bad if disks is close to 256 ****/ - void *ptrs[syndrome_disks+2]; - - for (i = 0; i < disks ; i++) - ptrs[i] = (void *)raid6_empty_zero_page; - count = 0; - i = d0_idx; - do { - int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks); - - ptrs[slot] = page_address(sh->dev[i].page); - - if (i == dd_idx1) - faila = slot; - if (i == dd_idx2) - failb = slot; - i = raid6_next_disk(i, disks); - } while (i != d0_idx); - BUG_ON(count != syndrome_disks); - - BUG_ON(faila == failb); - if ( failb < faila ) { int tmp = faila; faila = failb; failb = tmp; } - - pr_debug("compute_block_2, stripe %llu, idx %d,%d (%d,%d)\n", - (unsigned long long)sh->sector, dd_idx1, dd_idx2, - faila, failb); - - if (failb == syndrome_disks+1) { - /* Q disk is one of the missing disks */ - if (faila == syndrome_disks) { - /* Missing P+Q, just recompute */ - compute_parity6(sh, UPDATE_PARITY); - return; - } else { - /* We're missing D+Q; recompute D from P */ - compute_block_1(sh, ((dd_idx1 == sh->qd_idx) ? - dd_idx2 : dd_idx1), - 0); - compute_parity6(sh, UPDATE_PARITY); /* Is this necessary? */ - return; - } - } - - /* We're missing D+P or D+D; */ - if (failb == syndrome_disks) { - /* We're missing D+P. */ - raid6_datap_recov(syndrome_disks+2, STRIPE_SIZE, faila, ptrs); - } else { - /* We're missing D+D. */ - raid6_2data_recov(syndrome_disks+2, STRIPE_SIZE, faila, failb, - ptrs); - } - - /* Both the above update both missing blocks */ - set_bit(R5_UPTODATE, &sh->dev[dd_idx1].flags); - set_bit(R5_UPTODATE, &sh->dev[dd_idx2].flags); -} - static void schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s, int rcw, int expand) @@ -2331,13 +2084,6 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in static void end_reshape(raid5_conf_t *conf); -static int page_is_zero(struct page *p) -{ - char *a = page_address(p); - return ((*(u32*)a) == 0 && - memcmp(a, a+4, STRIPE_SIZE-4)==0); -} - static void stripe_set_idx(sector_t stripe, raid5_conf_t *conf, int previous, struct stripe_head *sh) { From 07a3b417dc3d00802bd7b4874c3e811f0b015a7d Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Sat, 29 Aug 2009 19:13:13 -0700 Subject: [PATCH 0055/3409] md/raid456: distribute raid processing over multiple cores Now that the resources to handle stripe_head operations are allocated percpu it is possible for raid5d to distribute stripe handling over multiple cores. This conversion also adds a call to cond_resched() in the non-multicore case to prevent one core from getting monopolized for raid operations. Cc: Arjan van de Ven Signed-off-by: Dan Williams --- drivers/md/Kconfig | 11 +++++++++++ drivers/md/raid5.c | 36 ++++++++++++++++++++++++++++++++++-- 2 files changed, 45 insertions(+), 2 deletions(-) diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig index abb8636bfde2..09c0c6e49ab5 100644 --- a/drivers/md/Kconfig +++ b/drivers/md/Kconfig @@ -154,6 +154,17 @@ config MD_RAID456 If unsure, say Y. +config MULTICORE_RAID456 + bool "RAID-4/RAID-5/RAID-6 Multicore processing (EXPERIMENTAL)" + depends on MD_RAID456 + depends on SMP + depends on EXPERIMENTAL + ---help--- + Enable the raid456 module to dispatch per-stripe raid operations to a + thread pool. + + If unsure, say N. + config MD_RAID6_PQ tristate diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 7c22e19aca82..364ea37706fa 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -47,6 +47,7 @@ #include #include #include +#include #include #include #include "md.h" @@ -4314,6 +4315,36 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio) return handled; } +#ifdef CONFIG_MULTICORE_RAID456 +static void __process_stripe(void *param, async_cookie_t cookie) +{ + struct stripe_head *sh = param; + + handle_stripe(sh); + release_stripe(sh); +} + +static void process_stripe(struct stripe_head *sh, struct list_head *domain) +{ + async_schedule_domain(__process_stripe, sh, domain); +} + +static void synchronize_stripe_processing(struct list_head *domain) +{ + async_synchronize_full_domain(domain); +} +#else +static void process_stripe(struct stripe_head *sh, struct list_head *domain) +{ + handle_stripe(sh); + release_stripe(sh); + cond_resched(); +} + +static void synchronize_stripe_processing(struct list_head *domain) +{ +} +#endif /* @@ -4328,6 +4359,7 @@ static void raid5d(mddev_t *mddev) struct stripe_head *sh; raid5_conf_t *conf = mddev_to_conf(mddev); int handled; + LIST_HEAD(raid_domain); pr_debug("+++ raid5d active\n"); @@ -4364,8 +4396,7 @@ static void raid5d(mddev_t *mddev) spin_unlock_irq(&conf->device_lock); handled++; - handle_stripe(sh); - release_stripe(sh); + process_stripe(sh, &raid_domain); spin_lock_irq(&conf->device_lock); } @@ -4373,6 +4404,7 @@ static void raid5d(mddev_t *mddev) spin_unlock_irq(&conf->device_lock); + synchronize_stripe_processing(&raid_domain); async_tx_issue_pending_all(); unplug_slaves(mddev); From d8fb91e8344ff11381fc47d43e0b234065be4bd0 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 26 Jun 2009 08:14:36 +0200 Subject: [PATCH 0056/3409] CRIS: add pgprot_noncached On CRIS, the high address bit controls caching, which means that we can add a pgprot_noncached() macro that sets this bit in the address. Signed-off-by: Arnd Bergmann Acked-by: Jesper Nilsson --- arch/cris/include/arch-v10/arch/mmu.h | 9 +++++---- arch/cris/include/arch-v32/arch/mmu.h | 10 ++++++---- arch/cris/include/asm/pgtable.h | 2 ++ 3 files changed, 13 insertions(+), 8 deletions(-) diff --git a/arch/cris/include/arch-v10/arch/mmu.h b/arch/cris/include/arch-v10/arch/mmu.h index df84f1716e6b..e829e5a37bbe 100644 --- a/arch/cris/include/arch-v10/arch/mmu.h +++ b/arch/cris/include/arch-v10/arch/mmu.h @@ -33,10 +33,10 @@ typedef struct /* CRIS PTE bits (see R_TLB_LO in the register description) * - * Bit: 31-13 12-------4 3 2 1 0 - * ________________________________________________ - * | pfn | reserved | global | valid | kernel | we | - * |_____|__________|________|_______|________|_____| + * Bit: 31 30-13 12-------4 3 2 1 0 + * _______________________________________________________ + * | cache |pfn | reserved | global | valid | kernel | we | + * |_______|____|__________|________|_______|________|_____| * * (pfn = physical frame number) */ @@ -53,6 +53,7 @@ typedef struct #define _PAGE_VALID (1<<2) /* page is valid */ #define _PAGE_SILENT_READ (1<<2) /* synonym */ #define _PAGE_GLOBAL (1<<3) /* global page - context is ignored */ +#define _PAGE_NO_CACHE (1<<31) /* part of the uncached memory map */ /* Bits the HW doesn't care about but the kernel uses them in SW */ diff --git a/arch/cris/include/arch-v32/arch/mmu.h b/arch/cris/include/arch-v32/arch/mmu.h index 6bcdc3fdf7dc..c1a13e05e963 100644 --- a/arch/cris/include/arch-v32/arch/mmu.h +++ b/arch/cris/include/arch-v32/arch/mmu.h @@ -28,10 +28,10 @@ typedef struct /* * CRISv32 PTE bits: * - * Bit: 31-13 12-5 4 3 2 1 0 - * +-----+------+--------+-------+--------+-------+---------+ - * | pfn | zero | global | valid | kernel | write | execute | - * +-----+------+--------+-------+--------+-------+---------+ + * Bit: 31 30-13 12-5 4 3 2 1 0 + * +-------+-----+------+--------+-------+--------+-------+---------+ + * | cache | pfn | zero | global | valid | kernel | write | execute | + * +-------+-----+------+--------+-------+--------+-------+---------+ */ /* @@ -45,6 +45,8 @@ typedef struct #define _PAGE_VALID (1 << 3) /* Page is valid. */ #define _PAGE_SILENT_READ (1 << 3) /* Same as above. */ #define _PAGE_GLOBAL (1 << 4) /* Global page. */ +#define _PAGE_NO_CACHE (1 << 31) /* part of the uncached memory map */ + /* * The hardware doesn't care about these bits, but the kernel uses them in diff --git a/arch/cris/include/asm/pgtable.h b/arch/cris/include/asm/pgtable.h index 50aa974aa834..1fcce00f01f4 100644 --- a/arch/cris/include/asm/pgtable.h +++ b/arch/cris/include/asm/pgtable.h @@ -197,6 +197,8 @@ static inline pte_t __mk_pte(void * page, pgprot_t pgprot) static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; } +#define pgprot_noncached(prot) __pgprot((pgprot_val(prot) | _PAGE_NO_CACHE)) + /* pte_val refers to a page in the 0x4xxxxxxx physical DRAM interval * __pte_page(pte_val) refers to the "virtual" DRAM interval From a090ca2c840a3459642971f26bdbad96d2482e32 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 31 Aug 2009 18:20:30 +0200 Subject: [PATCH 0057/3409] CRIS: fix defconfig build failure The CRIS defconfig fails to build with: drivers/serial/crisv10.c: In function 'rs_wait_until_sent': drivers/serial/crisv10.c:3926: error: implicit declaration of function 'lock_kernel' drivers/serial/crisv10.c:3943: error: implicit declaration of function 'unlock_kernel' Include smp_lock.h. LKML-Reference: Signed-off-by: Ingo Molnar Signed-off-by: Jesper Nilsson --- drivers/serial/crisv10.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/serial/crisv10.c b/drivers/serial/crisv10.c index 7be52fe288eb..31f172397af3 100644 --- a/drivers/serial/crisv10.c +++ b/drivers/serial/crisv10.c @@ -18,6 +18,7 @@ static char *serial_version = "$Revision: 1.25 $"; #include #include #include +#include #include #include #include From e6cd19743bfc76ca81804c2a259e3a0d7b03fd73 Mon Sep 17 00:00:00 2001 From: Jesper Nilsson Date: Mon, 31 Aug 2009 18:28:26 +0200 Subject: [PATCH 0058/3409] CRISv10: Don't autonegotiate if autonegotiation is off If autonegotiation was disabled, we still set the BMCR_ANENABLE and BMCR_ANRESTART, which resulted in autonegotiation never being disabled. Signed-off-by: Jesper Nilsson Reported-by: Julia Lawall --- drivers/net/cris/eth_v10.c | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c index 7a18dc7e5c7f..d70d2039bc46 100644 --- a/drivers/net/cris/eth_v10.c +++ b/drivers/net/cris/eth_v10.c @@ -768,10 +768,24 @@ e100_negotiate(struct net_device* dev) e100_set_mdio_reg(dev, np->mii_if.phy_id, MII_ADVERTISE, data); - /* Renegotiate with link partner */ + data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR); if (autoneg_normal) { - data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR); - data |= BMCR_ANENABLE | BMCR_ANRESTART; + /* Renegotiate with link partner */ + data |= BMCR_ANENABLE | BMCR_ANRESTART; + } else { + /* Don't negotiate speed or duplex */ + data &= ~(BMCR_ANENABLE | BMCR_ANRESTART); + + /* Set speed and duplex static */ + if (current_speed_selection == 10) + data &= ~BMCR_SPEED100; + else + data |= BMCR_SPEED100; + + if (current_duplex != full) + data &= ~BMCR_FULLDPLX; + else + data |= BMCR_FULLDPLX; } e100_set_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR, data); } From 0395ac6447c05e9393d4f8187ea6588b9d1daeb7 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 4 Aug 2009 16:49:52 +0200 Subject: [PATCH 0059/3409] CRIS: convert to asm-generic/hardirq.h Convert CRIS to use asm-generic/hardirq.h Also remove unneeded (incorrect) prototypes for setup_irq(). Signed-off-by: Christoph Hellwig Signed-off-by: Jesper Nilsson --- arch/cris/arch-v10/kernel/time.c | 1 - arch/cris/arch-v32/kernel/smp.c | 2 -- arch/cris/arch-v32/kernel/time.c | 1 - arch/cris/include/asm/hardirq.h | 12 ++---------- arch/cris/kernel/irq.c | 5 ----- 5 files changed, 2 insertions(+), 19 deletions(-) diff --git a/arch/cris/arch-v10/kernel/time.c b/arch/cris/arch-v10/kernel/time.c index 2b73c7a5b649..31ca1418d5a7 100644 --- a/arch/cris/arch-v10/kernel/time.c +++ b/arch/cris/arch-v10/kernel/time.c @@ -28,7 +28,6 @@ extern void update_xtime_from_cmos(void); extern int set_rtc_mmss(unsigned long nowtime); -extern int setup_irq(int, struct irqaction *); extern int have_rtc; unsigned long get_ns_in_jiffie(void) diff --git a/arch/cris/arch-v32/kernel/smp.c b/arch/cris/arch-v32/kernel/smp.c index d2a3ff8c4d37..058adddf4e4b 100644 --- a/arch/cris/arch-v32/kernel/smp.c +++ b/arch/cris/arch-v32/kernel/smp.c @@ -52,8 +52,6 @@ static struct mm_struct* flush_mm; static struct vm_area_struct* flush_vma; static unsigned long flush_addr; -extern int setup_irq(int, struct irqaction *); - /* Mode registers */ static unsigned long irq_regs[NR_CPUS] = { regi_irq, diff --git a/arch/cris/arch-v32/kernel/time.c b/arch/cris/arch-v32/kernel/time.c index 65633d0dab86..b1920d8de403 100644 --- a/arch/cris/arch-v32/kernel/time.c +++ b/arch/cris/arch-v32/kernel/time.c @@ -46,7 +46,6 @@ unsigned long timer_regs[NR_CPUS] = extern void update_xtime_from_cmos(void); extern int set_rtc_mmss(unsigned long nowtime); -extern int setup_irq(int, struct irqaction *); extern int have_rtc; #ifdef CONFIG_CPU_FREQ diff --git a/arch/cris/include/asm/hardirq.h b/arch/cris/include/asm/hardirq.h index 74178adeb1cd..17bb12d760b2 100644 --- a/arch/cris/include/asm/hardirq.h +++ b/arch/cris/include/asm/hardirq.h @@ -2,16 +2,6 @@ #define __ASM_HARDIRQ_H #include -#include -#include - -typedef struct { - unsigned int __softirq_pending; -} ____cacheline_aligned irq_cpustat_t; - -#include /* Standard mappings for irq_cpustat_t above */ - -void ack_bad_irq(unsigned int irq); #define HARDIRQ_BITS 8 @@ -24,4 +14,6 @@ void ack_bad_irq(unsigned int irq); # error HARDIRQ_BITS is too low! #endif +#include + #endif /* __ASM_HARDIRQ_H */ diff --git a/arch/cris/kernel/irq.c b/arch/cris/kernel/irq.c index 7f642fcffbfc..0ca7d9892cc6 100644 --- a/arch/cris/kernel/irq.c +++ b/arch/cris/kernel/irq.c @@ -38,11 +38,6 @@ #include -void ack_bad_irq(unsigned int irq) -{ - printk("unexpected IRQ trap at vector %02x\n", irq); -} - int show_interrupts(struct seq_file *p, void *v) { int i = *(loff_t *) v, j; From 62f589c5e391dc2081db42e801382304c652e43a Mon Sep 17 00:00:00 2001 From: Stoyan Gaydarov Date: Wed, 22 Jul 2009 05:02:33 +0200 Subject: [PATCH 0060/3409] ARRAY_SIZE changes These changes were a direct result of using a semantic patch More information can be found at http://www.emn.fr/x-info/coccinelle/ Signed-off-by: Stoyan Gaydarov Signed-off-by: Jesper Nilsson --- arch/cris/arch-v32/mach-a3/io.c | 2 +- arch/cris/arch-v32/mach-fs/io.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/cris/arch-v32/mach-a3/io.c b/arch/cris/arch-v32/mach-a3/io.c index c22f67ecd9f3..090ceb99ef0b 100644 --- a/arch/cris/arch-v32/mach-a3/io.c +++ b/arch/cris/arch-v32/mach-a3/io.c @@ -36,7 +36,7 @@ struct crisv32_ioport crisv32_ioports[] = { }, }; -#define NBR_OF_PORTS sizeof(crisv32_ioports)/sizeof(struct crisv32_ioport) +#define NBR_OF_PORTS ARRAY_SIZE(crisv32_ioports) struct crisv32_iopin crisv32_led_net0_green; struct crisv32_iopin crisv32_led_net0_red; diff --git a/arch/cris/arch-v32/mach-fs/io.c b/arch/cris/arch-v32/mach-fs/io.c index cb6327b1f8f8..a6958661fa8e 100644 --- a/arch/cris/arch-v32/mach-fs/io.c +++ b/arch/cris/arch-v32/mach-fs/io.c @@ -52,7 +52,7 @@ struct crisv32_ioport crisv32_ioports[] = { } }; -#define NBR_OF_PORTS sizeof(crisv32_ioports)/sizeof(struct crisv32_ioport) +#define NBR_OF_PORTS ARRAY_SIZE(crisv32_ioports) struct crisv32_iopin crisv32_led_net0_green; struct crisv32_iopin crisv32_led_net0_red; From 492e1501431e0d24c5b46933fdcb60639eacded7 Mon Sep 17 00:00:00 2001 From: Mika Korhonen Date: Tue, 9 Jun 2009 21:52:35 +0300 Subject: [PATCH 0061/3409] mtd: OneNAND: spelling fixes Signed-off-by: Mika Korhonen Acked-by: Kyungmin Park Signed-off-by: Artem Bityutskiy Signed-off-by: David Woodhouse --- drivers/mtd/onenand/onenand_base.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c index 6e829095ea9d..ff66e4330aa7 100644 --- a/drivers/mtd/onenand/onenand_base.c +++ b/drivers/mtd/onenand/onenand_base.c @@ -1191,7 +1191,7 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from, /* * Chip boundary handling in DDP * Now we issued chip 1 read and pointed chip 1 - * bufferam so we have to point chip 0 bufferam. + * bufferram so we have to point chip 0 bufferram. */ if (ONENAND_IS_DDP(this) && unlikely(from == (this->chipsize >> 1))) { @@ -1867,8 +1867,8 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to, ONENAND_SET_NEXT_BUFFERRAM(this); /* - * 2 PLANE, MLC, and Flex-OneNAND doesn't support - * write-while-programe feature. + * 2 PLANE, MLC, and Flex-OneNAND do not support + * write-while-program feature. */ if (!ONENAND_IS_2PLANE(this) && !first) { ONENAND_SET_PREV_BUFFERRAM(this); @@ -1879,7 +1879,7 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to, onenand_update_bufferram(mtd, prev, !ret && !prev_subpage); if (ret) { written -= prevlen; - printk(KERN_ERR "onenand_write_ops_nolock: write filaed %d\n", ret); + printk(KERN_ERR "onenand_write_ops_nolock: write failed %d\n", ret); break; } @@ -1905,7 +1905,7 @@ static int onenand_write_ops_nolock(struct mtd_info *mtd, loff_t to, /* In partial page write we don't update bufferram */ onenand_update_bufferram(mtd, to, !ret && !subpage); if (ret) { - printk(KERN_ERR "onenand_write_ops_nolock: write filaed %d\n", ret); + printk(KERN_ERR "onenand_write_ops_nolock: write failed %d\n", ret); break; } @@ -2201,7 +2201,7 @@ static int onenand_erase(struct mtd_info *mtd, struct erase_info *instr) /* Grab the lock and see if the device is available */ onenand_get_device(mtd, FL_ERASING); - /* Loop throught the pages */ + /* Loop through the blocks */ instr->state = MTD_ERASING; while (len) { @@ -2328,7 +2328,7 @@ static int onenand_default_block_markbad(struct mtd_info *mtd, loff_t ofs) if (bbm->bbt) bbm->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1); - /* We write two bytes, so we dont have to mess with 16 bit access */ + /* We write two bytes, so we don't have to mess with 16-bit access */ ofs += mtd->oobsize + (bbm->badblockpos & ~0x01); /* FIXME : What to do when marking SLC block in partition * with MLC erasesize? For now, it is not advisable to @@ -2557,7 +2557,7 @@ static void onenand_unlock_all(struct mtd_info *mtd) #ifdef CONFIG_MTD_ONENAND_OTP -/* Interal OTP operation */ +/* Internal OTP operation */ typedef int (*otp_op_t)(struct mtd_info *mtd, loff_t form, size_t len, size_t *retlen, u_char *buf); @@ -2921,7 +2921,7 @@ static void onenand_check_features(struct mtd_info *mtd) this->options |= ONENAND_HAS_2PLANE; case ONENAND_DEVICE_DENSITY_2Gb: - /* 2Gb DDP don't have 2 plane */ + /* 2Gb DDP does not have 2 plane */ if (!ONENAND_IS_DDP(this)) this->options |= ONENAND_HAS_2PLANE; this->options |= ONENAND_HAS_UNLOCK_ALL; @@ -3364,7 +3364,7 @@ static int onenand_probe(struct mtd_info *mtd) /* It's real page size */ this->writesize = mtd->writesize; - /* REVIST: Multichip handling */ + /* REVISIT: Multichip handling */ if (FLEXONENAND(this)) flexonenand_get_size(mtd); From 652696efce135559b98ee5a3d7899295e8d553fa Mon Sep 17 00:00:00 2001 From: Kyungmin Park Date: Wed, 24 Jun 2009 12:03:51 +0900 Subject: [PATCH 0062/3409] mtd: OneNAND: 4-bit ECC status macros Define ECC status for 4-bit ECC status Signed-off-by: Kyungmin Park Signed-off-by: Artem Bityutskiy Signed-off-by: David Woodhouse --- include/linux/mtd/onenand_regs.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/include/linux/mtd/onenand_regs.h b/include/linux/mtd/onenand_regs.h index 86a6bbef6465..acadbf53a69f 100644 --- a/include/linux/mtd/onenand_regs.h +++ b/include/linux/mtd/onenand_regs.h @@ -207,6 +207,9 @@ #define ONENAND_ECC_2BIT (1 << 1) #define ONENAND_ECC_2BIT_ALL (0xAAAA) #define FLEXONENAND_UNCORRECTABLE_ERROR (0x1010) +#define ONENAND_ECC_3BIT (1 << 2) +#define ONENAND_ECC_4BIT (1 << 3) +#define ONENAND_ECC_4BIT_UNCORRECTABLE (0x1010) /* * One-Time Programmable (OTP) From b8b3ee9aabbc3e6fc7ef025d861dd780b84eb6c5 Mon Sep 17 00:00:00 2001 From: vimal singh Date: Thu, 9 Jul 2009 20:41:22 +0530 Subject: [PATCH 0063/3409] mtd: nand: remove repeated comment, fix spelling Singed-off-by: Vimal Singh Signed-off-by: Artem Bityutskiy Signed-off-by: David Woodhouse --- drivers/mtd/nand/nand_base.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index 8c21b89d2d0c..4a5a329711c4 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -688,8 +688,7 @@ nand_get_device(struct nand_chip *chip, struct mtd_info *mtd, int new_state) retry: spin_lock(lock); - /* Hardware controller shared among independend devices */ - /* Hardware controller shared among independend devices */ + /* Hardware controller shared among independent devices */ if (!chip->controller->active) chip->controller->active = chip; From 20d8e2489d619ac4f14c46ca376655fc06b3c1ff Mon Sep 17 00:00:00 2001 From: vimal singh Date: Tue, 7 Jul 2009 15:49:49 +0530 Subject: [PATCH 0064/3409] mtd: nand_base: use __func__ instead of typing names Correcting debug prints by removing function names from print messages and using '__func__' macro instead. Function names were wrong in few places. Signed-off-by: Vimal Singh Signed-off-by: Artem Bityutskiy Signed-off-by: David Woodhouse --- drivers/mtd/nand/nand_base.c | 82 ++++++++++++++++++------------------ 1 file changed, 42 insertions(+), 40 deletions(-) diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index 4a5a329711c4..268c9a4317bd 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -1412,8 +1412,8 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from, int len; uint8_t *buf = ops->oobbuf; - DEBUG(MTD_DEBUG_LEVEL3, "nand_read_oob: from = 0x%08Lx, len = %i\n", - (unsigned long long)from, readlen); + DEBUG(MTD_DEBUG_LEVEL3, "%s: from = 0x%08Lx, len = %i\n", + __func__, (unsigned long long)from, readlen); if (ops->mode == MTD_OOB_AUTO) len = chip->ecc.layout->oobavail; @@ -1421,8 +1421,8 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from, len = mtd->oobsize; if (unlikely(ops->ooboffs >= len)) { - DEBUG(MTD_DEBUG_LEVEL0, "nand_read_oob: " - "Attempt to start read outside oob\n"); + DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt to start read " + "outside oob\n", __func__); return -EINVAL; } @@ -1430,8 +1430,8 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from, if (unlikely(from >= mtd->size || ops->ooboffs + readlen > ((mtd->size >> chip->page_shift) - (from >> chip->page_shift)) * len)) { - DEBUG(MTD_DEBUG_LEVEL0, "nand_read_oob: " - "Attempt read beyond end of device\n"); + DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt read beyond end " + "of device\n", __func__); return -EINVAL; } @@ -1505,8 +1505,8 @@ static int nand_read_oob(struct mtd_info *mtd, loff_t from, /* Do not allow reads past end of device */ if (ops->datbuf && (from + ops->len) > mtd->size) { - DEBUG(MTD_DEBUG_LEVEL0, "nand_read_oob: " - "Attempt read beyond end of device\n"); + DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt read " + "beyond end of device\n", __func__); return -EINVAL; } @@ -1815,8 +1815,8 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to, /* reject writes, which are not page aligned */ if (NOTALIGNED(to) || NOTALIGNED(ops->len)) { - printk(KERN_NOTICE "nand_write: " - "Attempt to write not page aligned data\n"); + printk(KERN_NOTICE "%s: Attempt to write not " + "page aligned data\n", __func__); return -EINVAL; } @@ -1943,8 +1943,8 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to, int chipnr, page, status, len; struct nand_chip *chip = mtd->priv; - DEBUG(MTD_DEBUG_LEVEL3, "nand_write_oob: to = 0x%08x, len = %i\n", - (unsigned int)to, (int)ops->ooblen); + DEBUG(MTD_DEBUG_LEVEL3, "%s: to = 0x%08x, len = %i\n", + __func__, (unsigned int)to, (int)ops->ooblen); if (ops->mode == MTD_OOB_AUTO) len = chip->ecc.layout->oobavail; @@ -1953,14 +1953,14 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to, /* Do not allow write past end of page */ if ((ops->ooboffs + ops->ooblen) > len) { - DEBUG(MTD_DEBUG_LEVEL0, "nand_write_oob: " - "Attempt to write past end of page\n"); + DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt to write " + "past end of page\n", __func__); return -EINVAL; } if (unlikely(ops->ooboffs >= len)) { - DEBUG(MTD_DEBUG_LEVEL0, "nand_do_write_oob: " - "Attempt to start write outside oob\n"); + DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt to start " + "write outside oob\n", __func__); return -EINVAL; } @@ -1969,8 +1969,8 @@ static int nand_do_write_oob(struct mtd_info *mtd, loff_t to, ops->ooboffs + ops->ooblen > ((mtd->size >> chip->page_shift) - (to >> chip->page_shift)) * len)) { - DEBUG(MTD_DEBUG_LEVEL0, "nand_do_write_oob: " - "Attempt write beyond end of device\n"); + DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt write beyond " + "end of device\n", __func__); return -EINVAL; } @@ -2025,8 +2025,8 @@ static int nand_write_oob(struct mtd_info *mtd, loff_t to, /* Do not allow writes past end of device */ if (ops->datbuf && (to + ops->len) > mtd->size) { - DEBUG(MTD_DEBUG_LEVEL0, "nand_write_oob: " - "Attempt write beyond end of device\n"); + DEBUG(MTD_DEBUG_LEVEL0, "%s: Attempt write beyond " + "end of device\n", __func__); return -EINVAL; } @@ -2116,26 +2116,27 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr, unsigned int bbt_masked_page = 0xffffffff; loff_t len; - DEBUG(MTD_DEBUG_LEVEL3, "nand_erase: start = 0x%012llx, len = %llu\n", - (unsigned long long)instr->addr, (unsigned long long)instr->len); + DEBUG(MTD_DEBUG_LEVEL3, "%s: start = 0x%012llx, len = %llu\n", + __func__, (unsigned long long)instr->addr, + (unsigned long long)instr->len); /* Start address must align on block boundary */ if (instr->addr & ((1 << chip->phys_erase_shift) - 1)) { - DEBUG(MTD_DEBUG_LEVEL0, "nand_erase: Unaligned address\n"); + DEBUG(MTD_DEBUG_LEVEL0, "%s: Unaligned address\n", __func__); return -EINVAL; } /* Length must align on block boundary */ if (instr->len & ((1 << chip->phys_erase_shift) - 1)) { - DEBUG(MTD_DEBUG_LEVEL0, "nand_erase: " - "Length not block aligned\n"); + DEBUG(MTD_DEBUG_LEVEL0, "%s: Length not block aligned\n", + __func__); return -EINVAL; } /* Do not allow erase past end of device */ if ((instr->len + instr->addr) > mtd->size) { - DEBUG(MTD_DEBUG_LEVEL0, "nand_erase: " - "Erase past end of device\n"); + DEBUG(MTD_DEBUG_LEVEL0, "%s: Erase past end of device\n", + __func__); return -EINVAL; } @@ -2156,8 +2157,8 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr, /* Check, if it is write protected */ if (nand_check_wp(mtd)) { - DEBUG(MTD_DEBUG_LEVEL0, "nand_erase: " - "Device is write protected!!!\n"); + DEBUG(MTD_DEBUG_LEVEL0, "%s: Device is write protected!!!\n", + __func__); instr->state = MTD_ERASE_FAILED; goto erase_exit; } @@ -2182,8 +2183,8 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr, */ if (nand_block_checkbad(mtd, ((loff_t) page) << chip->page_shift, 0, allowbbt)) { - printk(KERN_WARNING "nand_erase: attempt to erase a " - "bad block at page 0x%08x\n", page); + printk(KERN_WARNING "%s: attempt to erase a bad block " + "at page 0x%08x\n", __func__, page); instr->state = MTD_ERASE_FAILED; goto erase_exit; } @@ -2210,8 +2211,8 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr, /* See if block erase succeeded */ if (status & NAND_STATUS_FAIL) { - DEBUG(MTD_DEBUG_LEVEL0, "nand_erase: " - "Failed erase, page 0x%08x\n", page); + DEBUG(MTD_DEBUG_LEVEL0, "%s: Failed erase, " + "page 0x%08x\n", __func__, page); instr->state = MTD_ERASE_FAILED; instr->fail_addr = ((loff_t)page << chip->page_shift); @@ -2271,9 +2272,9 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr, if (!rewrite_bbt[chipnr]) continue; /* update the BBT for chip */ - DEBUG(MTD_DEBUG_LEVEL0, "nand_erase_nand: nand_update_bbt " - "(%d:0x%0llx 0x%0x)\n", chipnr, rewrite_bbt[chipnr], - chip->bbt_td->pages[chipnr]); + DEBUG(MTD_DEBUG_LEVEL0, "%s: nand_update_bbt " + "(%d:0x%0llx 0x%0x)\n", __func__, chipnr, + rewrite_bbt[chipnr], chip->bbt_td->pages[chipnr]); nand_update_bbt(mtd, rewrite_bbt[chipnr]); } @@ -2291,7 +2292,7 @@ static void nand_sync(struct mtd_info *mtd) { struct nand_chip *chip = mtd->priv; - DEBUG(MTD_DEBUG_LEVEL3, "nand_sync: called\n"); + DEBUG(MTD_DEBUG_LEVEL3, "%s: called\n", __func__); /* Grab the lock and see if the device is available */ nand_get_device(chip, mtd, FL_SYNCING); @@ -2355,8 +2356,8 @@ static void nand_resume(struct mtd_info *mtd) if (chip->state == FL_PM_SUSPENDED) nand_release_device(mtd); else - printk(KERN_ERR "nand_resume() called for a chip which is not " - "in suspended state\n"); + printk(KERN_ERR "%s called for a chip which is not " + "in suspended state\n", __func__); } /* @@ -2857,7 +2858,8 @@ int nand_scan(struct mtd_info *mtd, int maxchips) /* Many callers got this wrong, so check for it for a while... */ if (!mtd->owner && caller_is_module()) { - printk(KERN_CRIT "nand_scan() called with NULL mtd->owner!\n"); + printk(KERN_CRIT "%s called with NULL mtd->owner!\n", + __func__); BUG(); } From dff1550986a4c0e2a4e857c9085ef3cb66b2cec5 Mon Sep 17 00:00:00 2001 From: Matthias Kaehlcke Date: Mon, 6 Jul 2009 12:02:08 +0200 Subject: [PATCH 0065/3409] mtd: fix a typo in comment mtdblock erase_write(): fix typo in comment Signed-off-by: Matthias Kaehlcke Signed-off-by: Artem Bityutskiy Signed-off-by: David Woodhouse --- drivers/mtd/mtdblock.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c index 77db5ce24d92..bcfb177c55ec 100644 --- a/drivers/mtd/mtdblock.c +++ b/drivers/mtd/mtdblock.c @@ -84,7 +84,7 @@ static int erase_write (struct mtd_info *mtd, unsigned long pos, remove_wait_queue(&wait_q, &wait); /* - * Next, writhe data to flash. + * Next, write the data to flash. */ ret = mtd->write(mtd, pos, len, &retlen, buf); From 44a1f2085e8fe07b3aecdab7c391ca057d75da0f Mon Sep 17 00:00:00 2001 From: H Hartley Sweeten Date: Tue, 30 Jun 2009 15:38:00 -0400 Subject: [PATCH 0066/3409] mtd: ep93xx: cleanup includes in ts7250 nand driver 1. should be included not 2. add platform specific header Signed-off-by: H Hartley Sweeten Cc: Lennert Buytenhek Signed-off-by: Artem Bityutskiy Signed-off-by: David Woodhouse --- drivers/mtd/nand/ts7250.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/mtd/nand/ts7250.c b/drivers/mtd/nand/ts7250.c index 2c410a011317..0f5562aeedc1 100644 --- a/drivers/mtd/nand/ts7250.c +++ b/drivers/mtd/nand/ts7250.c @@ -24,8 +24,11 @@ #include #include #include -#include +#include + #include +#include + #include #include From 2763c508a3c8f8ec5d6df4e8c63d5e2a5a7d3954 Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Fri, 17 Jul 2009 17:54:14 +0200 Subject: [PATCH 0067/3409] mtd: physmap_of: use resource_size Signed-off-by: Wolfram Sang Signed-off-by: Artem Bityutskiy Signed-off-by: David Woodhouse --- drivers/mtd/maps/physmap_of.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c index 39d357b2eb47..f223f3fec3af 100644 --- a/drivers/mtd/maps/physmap_of.c +++ b/drivers/mtd/maps/physmap_of.c @@ -190,6 +190,7 @@ static int __devinit of_flash_probe(struct of_device *dev, const u32 *p; int reg_tuple_size; struct mtd_info **mtd_list = NULL; + resource_size_t res_size; reg_tuple_size = (of_n_addr_cells(dp) + of_n_size_cells(dp)) * sizeof(u32); @@ -233,8 +234,8 @@ static int __devinit of_flash_probe(struct of_device *dev, (unsigned long long)res.end); err = -EBUSY; - info->list[i].res = request_mem_region(res.start, res.end - - res.start + 1, + res_size = resource_size(&res); + info->list[i].res = request_mem_region(res.start, res_size, dev_name(&dev->dev)); if (!info->list[i].res) goto err_out; @@ -249,7 +250,7 @@ static int __devinit of_flash_probe(struct of_device *dev, info->list[i].map.name = dev_name(&dev->dev); info->list[i].map.phys = res.start; - info->list[i].map.size = res.end - res.start + 1; + info->list[i].map.size = res_size; info->list[i].map.bankwidth = *width; err = -ENOMEM; From 76d6a4791609e4d14b411a513c7648b19b258d8f Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Fri, 17 Jul 2009 17:47:37 +0200 Subject: [PATCH 0068/3409] mtd: plat-ram: use resource_size Signed-off-by: Wolfram Sang Signed-off-by: Artem Bityutskiy Signed-off-by: David Woodhouse --- drivers/mtd/maps/plat-ram.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/mtd/maps/plat-ram.c b/drivers/mtd/maps/plat-ram.c index 49c9ece76477..dafb91944e70 100644 --- a/drivers/mtd/maps/plat-ram.c +++ b/drivers/mtd/maps/plat-ram.c @@ -175,7 +175,7 @@ static int platram_probe(struct platform_device *pdev) /* setup map parameters */ info->map.phys = res->start; - info->map.size = (res->end - res->start) + 1; + info->map.size = resource_size(res); info->map.name = pdata->mapname != NULL ? (char *)pdata->mapname : (char *)pdev->name; info->map.bankwidth = pdata->bankwidth; From 0ffd24fc7f82a0b594250e5f221340be4c322cda Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 18 Jul 2009 14:10:44 +0100 Subject: [PATCH 0069/3409] mtd: afs: fix build warning MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit drivers/mtd/afs.c:244: warning: format ‘%5d’ expects type ‘int’, but argument 4 has type ‘uint64_t’ [dwmw2: fix incorrect 'KB' too] Signed-off-by: Russell King Signed-off-by: Artem Bityutskiy Signed-off-by: David Woodhouse --- drivers/mtd/afs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/mtd/afs.c b/drivers/mtd/afs.c index d072ca5be689..cec7ab98b2a9 100644 --- a/drivers/mtd/afs.c +++ b/drivers/mtd/afs.c @@ -239,7 +239,7 @@ static int parse_afs_partitions(struct mtd_info *mtd, parts[idx].offset = img_ptr; parts[idx].mask_flags = 0; - printk(" mtd%d: at 0x%08x, %5dKB, %8u, %s\n", + printk(" mtd%d: at 0x%08x, %5lluKiB, %8u, %s\n", idx, img_ptr, parts[idx].size / 1024, iis.imageNumber, str); From 05dd180709fca14fbae617c0dab1bed56be334fc Mon Sep 17 00:00:00 2001 From: "Robert P. J. Day" Date: Sun, 19 Jul 2009 21:19:08 -0400 Subject: [PATCH 0070/3409] mtd: correct typo "MTD_DATAFLASH_VERIFY_WRITE" Fix the misspelling to match the actual config variable defined in drivers/mtd/devi ces/Kconfig: config MTD_DATAFLASH_WRITE_VERIFY bool "Verify DataFlash page writes" depends on MTD_DATAFLASH Signed-off-by: Robert P. J. Day Signed-off-by: Artem Bityutskiy Signed-off-by: David Woodhouse --- drivers/mtd/devices/mtd_dataflash.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c index 43976aa4dbb1..70a7369d3211 100644 --- a/drivers/mtd/devices/mtd_dataflash.c +++ b/drivers/mtd/devices/mtd_dataflash.c @@ -401,7 +401,7 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len, (void) dataflash_waitready(priv->spi); -#ifdef CONFIG_MTD_DATAFLASH_VERIFY_WRITE +#ifdef CONFIG_MTD_DATAFLASH_WRITE_VERIFY /* (3) Compare to Buffer1 */ addr = pageaddr << priv->page_offset; @@ -430,7 +430,7 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len, } else status = 0; -#endif /* CONFIG_MTD_DATAFLASH_VERIFY_WRITE */ +#endif /* CONFIG_MTD_DATAFLASH_WRITE_VERIFY */ remaining = remaining - writelen; pageaddr++; From 9a73290d7735c0671d1d2379ed40025db8b773d0 Mon Sep 17 00:00:00 2001 From: "Singh, Vimal" Date: Fri, 12 Dec 2008 00:10:57 +0000 Subject: [PATCH 0071/3409] mtd: nand_base: allow drivers to choose ECC block size This patch allows core driver to choose ECC block size in sw ecc case. Signed-off-by: Vimal Singh Signed-off-by: Artem Bityutskiy Signed-off-by: David Woodhouse --- drivers/mtd/nand/nand_base.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index 268c9a4317bd..4c5e8a74e1b2 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -2728,7 +2728,8 @@ int nand_scan_tail(struct mtd_info *mtd) chip->ecc.write_page_raw = nand_write_page_raw; chip->ecc.read_oob = nand_read_oob_std; chip->ecc.write_oob = nand_write_oob_std; - chip->ecc.size = 256; + if (!chip->ecc.size) + chip->ecc.size = 256; chip->ecc.bytes = 3; break; From ad4fbc7921bd7cca108ecc1340a014e91ecc8536 Mon Sep 17 00:00:00 2001 From: vimal singh Date: Thu, 30 Jul 2009 20:54:27 +0530 Subject: [PATCH 0072/3409] mtd: physmap_of: fix incorrect check This patch fixes a spelling error that has resulted from copy and pasting. The location of the error was found using a semantic patch but the semantic patch was not trying to find these errors. After looking things over it seemed logical that this change was needed. The patch also makes sure mtd_list is not being freed if it has not been allocated Signed-off-by: Stoyan Gaydarov Signed-off-by: Vimal Singh Signed-off-by: Artem Bityutskiy Signed-off-by: David Woodhouse --- drivers/mtd/maps/physmap_of.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c index f223f3fec3af..e828d58910f0 100644 --- a/drivers/mtd/maps/physmap_of.c +++ b/drivers/mtd/maps/physmap_of.c @@ -205,7 +205,7 @@ static int __devinit of_flash_probe(struct of_device *dev, dev_err(&dev->dev, "Malformed reg property on %s\n", dev->node->full_name); err = -EINVAL; - goto err_out; + goto err_flash_remove; } count /= reg_tuple_size; @@ -213,14 +213,14 @@ static int __devinit of_flash_probe(struct of_device *dev, info = kzalloc(sizeof(struct of_flash) + sizeof(struct of_flash_list) * count, GFP_KERNEL); if (!info) - goto err_out; - - mtd_list = kzalloc(sizeof(struct mtd_info) * count, GFP_KERNEL); - if (!info) - goto err_out; + goto err_flash_remove; dev_set_drvdata(&dev->dev, info); + mtd_list = kzalloc(sizeof(struct mtd_info) * count, GFP_KERNEL); + if (!mtd_list) + goto err_flash_remove; + for (i = 0; i < count; i++) { err = -ENXIO; if (of_address_to_resource(dp, i, &res)) { @@ -339,6 +339,7 @@ static int __devinit of_flash_probe(struct of_device *dev, err_out: kfree(mtd_list); +err_flash_remove: of_flash_remove(dev); return err; From 269c0ee66367b11de9758ee64ea039843f0c7cad Mon Sep 17 00:00:00 2001 From: Roel Kluin Date: Fri, 31 Jul 2009 14:47:58 +0200 Subject: [PATCH 0073/3409] slram: Read buffer overflow map[count] is checked before count < SLRAM_MAX_DEVICES_PARAMS Signed-off-by: Roel Kluin Signed-off-by: Artem Bityutskiy Signed-off-by: David Woodhouse --- drivers/mtd/devices/slram.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/mtd/devices/slram.c b/drivers/mtd/devices/slram.c index 00248e81ecd5..239500b0b751 100644 --- a/drivers/mtd/devices/slram.c +++ b/drivers/mtd/devices/slram.c @@ -341,7 +341,7 @@ static int init_slram(void) #else int count; - for (count = 0; (map[count]) && (count < SLRAM_MAX_DEVICES_PARAMS); + for (count = 0; count < SLRAM_MAX_DEVICES_PARAMS && map[count]; count++) { } From c6f7e7beb9e6a64816f534a3a0dd0cfa4937f1fe Mon Sep 17 00:00:00 2001 From: Roel Kluin Date: Fri, 31 Jul 2009 16:21:01 +0200 Subject: [PATCH 0074/3409] mtd: tests: fix read buffer overflows Check whether index is within bounds before testing the element. Signed-off-by: Roel Kluin Signed-off-by: Artem Bityutskiy Signed-off-by: David Woodhouse --- drivers/mtd/tests/mtd_oobtest.c | 2 +- drivers/mtd/tests/mtd_pagetest.c | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/mtd/tests/mtd_oobtest.c b/drivers/mtd/tests/mtd_oobtest.c index a18e8d2f2557..5553cd4eab20 100644 --- a/drivers/mtd/tests/mtd_oobtest.c +++ b/drivers/mtd/tests/mtd_oobtest.c @@ -512,7 +512,7 @@ static int __init mtd_oobtest_init(void) goto out; addr0 = 0; - for (i = 0; bbt[i] && i < ebcnt; ++i) + for (i = 0; i < ebcnt && bbt[i]; ++i) addr0 += mtd->erasesize; /* Attempt to write off end of OOB */ diff --git a/drivers/mtd/tests/mtd_pagetest.c b/drivers/mtd/tests/mtd_pagetest.c index 9648818b9e2c..103cac480fee 100644 --- a/drivers/mtd/tests/mtd_pagetest.c +++ b/drivers/mtd/tests/mtd_pagetest.c @@ -116,11 +116,11 @@ static int verify_eraseblock(int ebnum) loff_t addr = ebnum * mtd->erasesize; addr0 = 0; - for (i = 0; bbt[i] && i < ebcnt; ++i) + for (i = 0; i < ebcnt && bbt[i]; ++i) addr0 += mtd->erasesize; addrn = mtd->size; - for (i = 0; bbt[ebcnt - i - 1] && i < ebcnt; ++i) + for (i = 0; i < ebcnt && bbt[ebcnt - i - 1]; ++i) addrn -= mtd->erasesize; set_random_data(writebuf, mtd->erasesize); @@ -219,11 +219,11 @@ static int crosstest(void) memset(pp1, 0, pgsize * 4); addr0 = 0; - for (i = 0; bbt[i] && i < ebcnt; ++i) + for (i = 0; i < ebcnt && bbt[i]; ++i) addr0 += mtd->erasesize; addrn = mtd->size; - for (i = 0; bbt[ebcnt - i - 1] && i < ebcnt; ++i) + for (i = 0; i < ebcnt && bbt[ebcnt - i - 1]; ++i) addrn -= mtd->erasesize; /* Read 2nd-to-last page to pp1 */ @@ -317,7 +317,7 @@ static int erasecrosstest(void) ebnum = 0; addr0 = 0; - for (i = 0; bbt[i] && i < ebcnt; ++i) { + for (i = 0; i < ebcnt && bbt[i]; ++i) { addr0 += mtd->erasesize; ebnum += 1; } @@ -413,7 +413,7 @@ static int erasetest(void) ebnum = 0; addr0 = 0; - for (i = 0; bbt[i] && i < ebcnt; ++i) { + for (i = 0; i < ebcnt && bbt[i]; ++i) { addr0 += mtd->erasesize; ebnum += 1; } From b0469ea785d12a6c025fa213293d608fc41405fe Mon Sep 17 00:00:00 2001 From: Siddarth Gore Date: Tue, 4 Aug 2009 08:42:08 +0530 Subject: [PATCH 0075/3409] mtd: m25p80: add support for 3 Macronix flash chips Signed-off-by: Siddarth Gore Signed-off-by: Nicolas Pitre Signed-off-by: Artem Bityutskiy Signed-off-by: David Woodhouse --- drivers/mtd/devices/m25p80.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c index 10ed195c0c1c..6d8d265ae91a 100644 --- a/drivers/mtd/devices/m25p80.c +++ b/drivers/mtd/devices/m25p80.c @@ -501,7 +501,10 @@ static struct flash_info __devinitdata m25p_data [] = { { "at26df321", 0x1f4701, 0, 64 * 1024, 64, SECT_4K, }, /* Macronix */ + { "mx25l3205d", 0xc22016, 0, 64 * 1024, 64, }, + { "mx25l6405d", 0xc22017, 0, 64 * 1024, 128, }, { "mx25l12805d", 0xc22018, 0, 64 * 1024, 256, }, + { "mx25l12855e", 0xc22618, 0, 64 * 1024, 256, }, /* Spansion -- single (large) sector size only, at least * for the chips listed here (without boot sectors). From 0acfe530a2be9192861b84566625ba9b95703226 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Fri, 7 Aug 2009 12:34:48 +0900 Subject: [PATCH 0076/3409] mtd: onenand: select MTD_PARTITIONS All of the onenand drivers depend on mtd partition support being compiled in, so just select it. Fixes up build breakage: drivers/built-in.o: In function `generic_onenand_remove': generic.c:(.devexit.text+0x80): undefined reference to `del_mtd_partitions' Reported-by: Randy Dunlap Signed-off-by: Paul Mundt Signed-off-by: Artem Bityutskiy Signed-off-by: David Woodhouse --- drivers/mtd/onenand/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/mtd/onenand/Kconfig b/drivers/mtd/onenand/Kconfig index 79fa79e8f8de..3a094d1bf8cc 100644 --- a/drivers/mtd/onenand/Kconfig +++ b/drivers/mtd/onenand/Kconfig @@ -5,6 +5,7 @@ menuconfig MTD_ONENAND tristate "OneNAND Device Support" depends on MTD + select MTD_PARTITIONS help This enables support for accessing all type of OneNAND flash devices. For further information see @@ -66,7 +67,6 @@ config MTD_ONENAND_2X_PROGRAM config MTD_ONENAND_SIM tristate "OneNAND simulator support" - depends on MTD_PARTITIONS help The simulator may simulate various OneNAND flash chips for the OneNAND MTD layer. From fca910883324d437a24d447b08f524fa19261a94 Mon Sep 17 00:00:00 2001 From: H Hartley Sweeten Date: Thu, 6 Aug 2009 16:05:32 -0700 Subject: [PATCH 0077/3409] mtd: make few symbols static Make mtd_group and mtd_groups static since they are only used in this file. [Amended by Artem Bityutskiy] Signed-off-by: H Hartley Sweeten Signed-off-by: Andrew Morton Signed-off-by: Artem Bityutskiy Signed-off-by: David Woodhouse --- drivers/mtd/mtdcore.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c index 00ebf7af7467..3f559c0f1879 100644 --- a/drivers/mtd/mtdcore.c +++ b/drivers/mtd/mtdcore.c @@ -213,11 +213,11 @@ static struct attribute *mtd_attrs[] = { NULL, }; -struct attribute_group mtd_group = { +static struct attribute_group mtd_group = { .attrs = mtd_attrs, }; -struct attribute_group *mtd_groups[] = { +static struct attribute_group *mtd_groups[] = { &mtd_group, NULL, }; From 2c78c44362bc9b7c715a3c2119b89a407c1cb739 Mon Sep 17 00:00:00 2001 From: Roel Kluin Date: Mon, 10 Aug 2009 10:16:37 +0200 Subject: [PATCH 0078/3409] mtd: pmcmsp-flash: fix error paths in init_msp_flash Cleanin up after errors in init_msp_flash(). Also cleanup_msp_flash() attempts to determine the size of msp_flash with `sizeof(msp_flash) / sizeof(struct mtd_info **)' This will not work since msp_flash is not an array. Signed-off-by: Roel Kluin Signed-off-by: Artem Bityutskiy Signed-off-by: David Woodhouse --- drivers/mtd/maps/pmcmsp-flash.c | 82 +++++++++++++++++++++++++-------- 1 file changed, 62 insertions(+), 20 deletions(-) diff --git a/drivers/mtd/maps/pmcmsp-flash.c b/drivers/mtd/maps/pmcmsp-flash.c index 4768bd5459d6..c8fd8da4bc87 100644 --- a/drivers/mtd/maps/pmcmsp-flash.c +++ b/drivers/mtd/maps/pmcmsp-flash.c @@ -50,7 +50,7 @@ static int fcnt; static int __init init_msp_flash(void) { - int i, j; + int i, j, ret = -ENOMEM; int offset, coff; char *env; int pcnt; @@ -75,14 +75,16 @@ static int __init init_msp_flash(void) printk(KERN_NOTICE "Found %d PMC flash devices\n", fcnt); msp_flash = kmalloc(fcnt * sizeof(struct map_info *), GFP_KERNEL); - msp_parts = kmalloc(fcnt * sizeof(struct mtd_partition *), GFP_KERNEL); - msp_maps = kcalloc(fcnt, sizeof(struct mtd_info), GFP_KERNEL); - if (!msp_flash || !msp_parts || !msp_maps) { - kfree(msp_maps); - kfree(msp_parts); - kfree(msp_flash); + if (!msp_flash) return -ENOMEM; - } + + msp_parts = kmalloc(fcnt * sizeof(struct mtd_partition *), GFP_KERNEL); + if (!msp_parts) + goto free_msp_flash; + + msp_maps = kcalloc(fcnt, sizeof(struct mtd_info), GFP_KERNEL); + if (!msp_maps) + goto free_msp_parts; /* loop over the flash devices, initializing each */ for (i = 0; i < fcnt; i++) { @@ -100,13 +102,18 @@ static int __init init_msp_flash(void) msp_parts[i] = kcalloc(pcnt, sizeof(struct mtd_partition), GFP_KERNEL); + if (!msp_parts[i]) + goto cleanup_loop; /* now initialize the devices proper */ flash_name[5] = '0' + i; env = prom_getenv(flash_name); - if (sscanf(env, "%x:%x", &addr, &size) < 2) - return -ENXIO; + if (sscanf(env, "%x:%x", &addr, &size) < 2) { + ret = -ENXIO; + kfree(msp_parts[i]); + goto cleanup_loop; + } addr = CPHYSADDR(addr); printk(KERN_NOTICE @@ -122,13 +129,23 @@ static int __init init_msp_flash(void) */ if (size > CONFIG_MSP_FLASH_MAP_LIMIT) size = CONFIG_MSP_FLASH_MAP_LIMIT; - msp_maps[i].virt = ioremap(addr, size); - msp_maps[i].bankwidth = 1; - msp_maps[i].name = strncpy(kmalloc(7, GFP_KERNEL), - flash_name, 7); - if (msp_maps[i].virt == NULL) - return -ENXIO; + msp_maps[i].virt = ioremap(addr, size); + if (msp_maps[i].virt == NULL) { + ret = -ENXIO; + kfree(msp_parts[i]); + goto cleanup_loop; + } + + msp_maps[i].bankwidth = 1; + msp_maps[i].name = kmalloc(7, GFP_KERNEL); + if (!msp_maps[i].name) { + iounmap(msp_maps[i].virt); + kfree(msp_parts[i]); + goto cleanup_loop; + } + + msp_maps[i].name = strncpy(msp_maps[i].name, flash_name, 7); for (j = 0; j < pcnt; j++) { part_name[5] = '0' + i; @@ -136,8 +153,14 @@ static int __init init_msp_flash(void) env = prom_getenv(part_name); - if (sscanf(env, "%x:%x:%n", &offset, &size, &coff) < 2) - return -ENXIO; + if (sscanf(env, "%x:%x:%n", &offset, &size, + &coff) < 2) { + ret = -ENXIO; + kfree(msp_maps[i].name); + iounmap(msp_maps[i].virt); + kfree(msp_parts[i]); + goto cleanup_loop; + } msp_parts[i][j].size = size; msp_parts[i][j].offset = offset; @@ -152,18 +175,37 @@ static int __init init_msp_flash(void) add_mtd_partitions(msp_flash[i], msp_parts[i], pcnt); } else { printk(KERN_ERR "map probe failed for flash\n"); - return -ENXIO; + ret = -ENXIO; + kfree(msp_maps[i].name); + iounmap(msp_maps[i].virt); + kfree(msp_parts[i]); + goto cleanup_loop; } } return 0; + +cleanup_loop: + while (i--) { + del_mtd_partitions(msp_flash[i]); + map_destroy(msp_flash[i]); + kfree(msp_maps[i].name); + iounmap(msp_maps[i].virt); + kfree(msp_parts[i]); + } + kfree(msp_maps); +free_msp_parts: + kfree(msp_parts); +free_msp_flash: + kfree(msp_flash); + return ret; } static void __exit cleanup_msp_flash(void) { int i; - for (i = 0; i < sizeof(msp_flash) / sizeof(struct mtd_info **); i++) { + for (i = 0; i < fcnt; i++) { del_mtd_partitions(msp_flash[i]); map_destroy(msp_flash[i]); iounmap((void *)msp_maps[i].virt); From 91e0955b57377578f7555b5d0f2a21040691004b Mon Sep 17 00:00:00 2001 From: Gerard Lledo Date: Wed, 17 Jun 2009 13:08:55 -0700 Subject: [PATCH 0079/3409] jffs2: move jffs2_gcd_mtd threads to the new kthread API Move the jffs2 garbage collecting thread to the new kthread API. Signed-off-by: Gerard Lledo Signed-off-by: Andrew Morton Signed-off-by: David Woodhouse --- fs/jffs2/background.c | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/fs/jffs2/background.c b/fs/jffs2/background.c index e9580104b6ba..3ff50da94789 100644 --- a/fs/jffs2/background.c +++ b/fs/jffs2/background.c @@ -15,6 +15,7 @@ #include #include #include +#include #include "nodelist.h" @@ -31,7 +32,7 @@ void jffs2_garbage_collect_trigger(struct jffs2_sb_info *c) /* This must only ever be called when no GC thread is currently running */ int jffs2_start_garbage_collect_thread(struct jffs2_sb_info *c) { - pid_t pid; + struct task_struct *tsk; int ret = 0; BUG_ON(c->gc_task); @@ -39,15 +40,16 @@ int jffs2_start_garbage_collect_thread(struct jffs2_sb_info *c) init_completion(&c->gc_thread_start); init_completion(&c->gc_thread_exit); - pid = kernel_thread(jffs2_garbage_collect_thread, c, CLONE_FS|CLONE_FILES); - if (pid < 0) { - printk(KERN_WARNING "fork failed for JFFS2 garbage collect thread: %d\n", -pid); + tsk = kthread_run(jffs2_garbage_collect_thread, c, "jffs2_gcd_mtd%d", c->mtd->index); + if (IS_ERR(tsk)) { + printk(KERN_WARNING "fork failed for JFFS2 garbage collect thread: %ld\n", -PTR_ERR(tsk)); complete(&c->gc_thread_exit); - ret = pid; + ret = PTR_ERR(tsk); } else { /* Wait for it... */ - D1(printk(KERN_DEBUG "JFFS2: Garbage collect thread is pid %d\n", pid)); + D1(printk(KERN_DEBUG "JFFS2: Garbage collect thread is pid %d\n", tsk->pid)); wait_for_completion(&c->gc_thread_start); + ret = tsk->pid; } return ret; @@ -71,7 +73,6 @@ static int jffs2_garbage_collect_thread(void *_c) { struct jffs2_sb_info *c = _c; - daemonize("jffs2_gcd_mtd%d", c->mtd->index); allow_signal(SIGKILL); allow_signal(SIGSTOP); allow_signal(SIGCONT); @@ -107,6 +108,11 @@ static int jffs2_garbage_collect_thread(void *_c) * the GC thread get there first. */ schedule_timeout_interruptible(msecs_to_jiffies(50)); + if (kthread_should_stop()) { + D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread(): kthread_stop() called.\n")); + goto die; + } + /* Put_super will send a SIGKILL and then wait on the sem. */ while (signal_pending(current) || freezing(current)) { From 32bb0e0c778a4a6cd4534a5b98f08cd45e9ab5b9 Mon Sep 17 00:00:00 2001 From: Marek Vasut Date: Thu, 23 Jul 2009 15:50:43 +0200 Subject: [PATCH 0080/3409] wm97xx-core: Pass platform_data to battery Signed-off-by: Marek Vasut Acked-by: Dmitry Torokhov Signed-off-by: Anton Vorontsov --- drivers/input/touchscreen/wm97xx-core.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/input/touchscreen/wm97xx-core.c b/drivers/input/touchscreen/wm97xx-core.c index 2957d48e0045..cf8cbc60d5ce 100644 --- a/drivers/input/touchscreen/wm97xx-core.c +++ b/drivers/input/touchscreen/wm97xx-core.c @@ -561,6 +561,7 @@ static void wm97xx_ts_input_close(struct input_dev *idev) static int wm97xx_probe(struct device *dev) { struct wm97xx *wm; + struct wm97xx_pdata *pdata = dev->platform_data; int ret = 0, id = 0; wm = kzalloc(sizeof(struct wm97xx), GFP_KERNEL); @@ -656,6 +657,7 @@ static int wm97xx_probe(struct device *dev) } platform_set_drvdata(wm->battery_dev, wm); wm->battery_dev->dev.parent = dev; + wm->battery_dev->dev.platform_data = pdata; ret = platform_device_add(wm->battery_dev); if (ret < 0) goto batt_reg_err; @@ -669,6 +671,7 @@ static int wm97xx_probe(struct device *dev) } platform_set_drvdata(wm->touch_dev, wm); wm->touch_dev->dev.parent = dev; + wm->touch_dev->dev.platform_data = pdata; ret = platform_device_add(wm->touch_dev); if (ret < 0) goto touch_reg_err; From b8bdc1d0cfc488ac0d94724639f9a61b0a5a1d40 Mon Sep 17 00:00:00 2001 From: Marek Vasut Date: Mon, 31 Aug 2009 06:20:12 +0200 Subject: [PATCH 0081/3409] wm97xx_battery: Use platform_data This patch converts the wm97xx-battery driver to use platform_data supplied by ac97 bus. Signed-off-by: Marek Vasut Signed-off-by: Anton Vorontsov --- drivers/power/wm97xx_battery.c | 32 +++++++++++++++++++++++++++----- include/linux/wm97xx.h | 18 ++++++++++++++++++ include/linux/wm97xx_batt.h | 18 ++++-------------- 3 files changed, 49 insertions(+), 19 deletions(-) diff --git a/drivers/power/wm97xx_battery.c b/drivers/power/wm97xx_battery.c index 8bde92126d34..14ebd960ebe5 100644 --- a/drivers/power/wm97xx_battery.c +++ b/drivers/power/wm97xx_battery.c @@ -22,17 +22,19 @@ #include #include #include -#include static DEFINE_MUTEX(bat_lock); static struct work_struct bat_work; struct mutex work_lock; static int bat_status = POWER_SUPPLY_STATUS_UNKNOWN; -static struct wm97xx_batt_info *pdata; +static struct wm97xx_batt_info *gpdata; static enum power_supply_property *prop; static unsigned long wm97xx_read_bat(struct power_supply *bat_ps) { + struct wm97xx_pdata *wmdata = bat_ps->dev->parent->platform_data; + struct wm97xx_batt_pdata *pdata = wmdata->batt_pdata; + return wm97xx_read_aux_adc(bat_ps->dev->parent->driver_data, pdata->batt_aux) * pdata->batt_mult / pdata->batt_div; @@ -40,6 +42,9 @@ static unsigned long wm97xx_read_bat(struct power_supply *bat_ps) static unsigned long wm97xx_read_temp(struct power_supply *bat_ps) { + struct wm97xx_pdata *wmdata = bat_ps->dev->parent->platform_data; + struct wm97xx_batt_pdata *pdata = wmdata->batt_pdata; + return wm97xx_read_aux_adc(bat_ps->dev->parent->driver_data, pdata->temp_aux) * pdata->temp_mult / pdata->temp_div; @@ -49,6 +54,9 @@ static int wm97xx_bat_get_property(struct power_supply *bat_ps, enum power_supply_property psp, union power_supply_propval *val) { + struct wm97xx_pdata *wmdata = bat_ps->dev->parent->platform_data; + struct wm97xx_batt_pdata *pdata = wmdata->batt_pdata; + switch (psp) { case POWER_SUPPLY_PROP_STATUS: val->intval = bat_status; @@ -97,6 +105,8 @@ static void wm97xx_bat_external_power_changed(struct power_supply *bat_ps) static void wm97xx_bat_update(struct power_supply *bat_ps) { int old_status = bat_status; + struct wm97xx_pdata *wmdata = bat_ps->dev->parent->platform_data; + struct wm97xx_batt_pdata *pdata = wmdata->batt_pdata; mutex_lock(&work_lock); @@ -149,6 +159,15 @@ static int __devinit wm97xx_bat_probe(struct platform_device *dev) int ret = 0; int props = 1; /* POWER_SUPPLY_PROP_PRESENT */ int i = 0; + struct wm97xx_pdata *wmdata = dev->dev.platform_data; + struct wm97xx_batt_pdata *pdata; + + if (gpdata) { + dev_err(&dev->dev, "Do not pass platform_data through " + "wm97xx_bat_set_pdata!\n"); + return -EINVAL; + } else + pdata = wmdata->batt_pdata; if (dev->id != -1) return -EINVAL; @@ -156,7 +175,7 @@ static int __devinit wm97xx_bat_probe(struct platform_device *dev) mutex_init(&work_lock); if (!pdata) { - dev_err(&dev->dev, "Please use wm97xx_bat_set_pdata\n"); + dev_err(&dev->dev, "No platform_data supplied\n"); return -EINVAL; } @@ -229,6 +248,9 @@ err: static int __devexit wm97xx_bat_remove(struct platform_device *dev) { + struct wm97xx_pdata *wmdata = dev->dev.platform_data; + struct wm97xx_batt_pdata *pdata = wmdata->batt_pdata; + if (pdata && pdata->charge_gpio && pdata->charge_gpio >= 0) gpio_free(pdata->charge_gpio); flush_scheduled_work(); @@ -258,9 +280,9 @@ static void __exit wm97xx_bat_exit(void) platform_driver_unregister(&wm97xx_bat_driver); } -void __init wm97xx_bat_set_pdata(struct wm97xx_batt_info *data) +void wm97xx_bat_set_pdata(struct wm97xx_batt_info *data) { - pdata = data; + gpdata = data; } EXPORT_SYMBOL_GPL(wm97xx_bat_set_pdata); diff --git a/include/linux/wm97xx.h b/include/linux/wm97xx.h index 6f69968eab24..b2c2297844e3 100644 --- a/include/linux/wm97xx.h +++ b/include/linux/wm97xx.h @@ -286,6 +286,24 @@ struct wm97xx { u16 suspend_mode; /* PRP in suspend mode */ }; +struct wm97xx_batt_pdata { + int batt_aux; + int temp_aux; + int charge_gpio; + int min_voltage; + int max_voltage; + int batt_div; + int batt_mult; + int temp_div; + int temp_mult; + int batt_tech; + char *batt_name; +}; + +struct wm97xx_pdata { + struct wm97xx_batt_pdata *batt_pdata; /* battery data */ +}; + /* * Codec GPIO access (not supported on WM9705) * This can be used to set/get codec GPIO and Virtual GPIO status. diff --git a/include/linux/wm97xx_batt.h b/include/linux/wm97xx_batt.h index 9681d1ab0e4f..a1d6419c2ff8 100644 --- a/include/linux/wm97xx_batt.h +++ b/include/linux/wm97xx_batt.h @@ -3,22 +3,12 @@ #include -struct wm97xx_batt_info { - int batt_aux; - int temp_aux; - int charge_gpio; - int min_voltage; - int max_voltage; - int batt_div; - int batt_mult; - int temp_div; - int temp_mult; - int batt_tech; - char *batt_name; -}; +#warning This file will be removed soon, use wm97xx.h instead! + +#define wm97xx_batt_info wm97xx_batt_pdata #ifdef CONFIG_BATTERY_WM97XX -void __init wm97xx_bat_set_pdata(struct wm97xx_batt_info *data); +void wm97xx_bat_set_pdata(struct wm97xx_batt_info *data); #else static inline void wm97xx_bat_set_pdata(struct wm97xx_batt_info *data) {} #endif From 7c87942aef52d2120e95ff1dec739998b9f95a78 Mon Sep 17 00:00:00 2001 From: Marek Vasut Date: Thu, 23 Jul 2009 16:02:08 +0200 Subject: [PATCH 0082/3409] wm97xx_battery: Use irq to detect charger state Signed-off-by: Marek Vasut Signed-off-by: Anton Vorontsov --- drivers/power/wm97xx_battery.c | 30 ++++++++++++++++++++++++------ 1 file changed, 24 insertions(+), 6 deletions(-) diff --git a/drivers/power/wm97xx_battery.c b/drivers/power/wm97xx_battery.c index 14ebd960ebe5..cad1ba283bfd 100644 --- a/drivers/power/wm97xx_battery.c +++ b/drivers/power/wm97xx_battery.c @@ -22,6 +22,7 @@ #include #include #include +#include static DEFINE_MUTEX(bat_lock); static struct work_struct bat_work; @@ -137,6 +138,12 @@ static void wm97xx_bat_work(struct work_struct *work) wm97xx_bat_update(&bat_ps); } +static irqreturn_t wm97xx_chrg_irq(int irq, void *data) +{ + schedule_work(&bat_work); + return IRQ_HANDLED; +} + #ifdef CONFIG_PM static int wm97xx_bat_suspend(struct platform_device *dev, pm_message_t state) { @@ -179,11 +186,16 @@ static int __devinit wm97xx_bat_probe(struct platform_device *dev) return -EINVAL; } - if (pdata->charge_gpio >= 0 && gpio_is_valid(pdata->charge_gpio)) { + if (gpio_is_valid(pdata->charge_gpio)) { ret = gpio_request(pdata->charge_gpio, "BATT CHRG"); if (ret) goto err; ret = gpio_direction_input(pdata->charge_gpio); + if (ret) + goto err2; + ret = request_irq(gpio_to_irq(pdata->charge_gpio), + wm97xx_chrg_irq, IRQF_DISABLED, + "AC Detect", 0); if (ret) goto err2; props++; /* POWER_SUPPLY_PROP_STATUS */ @@ -202,7 +214,7 @@ static int __devinit wm97xx_bat_probe(struct platform_device *dev) prop = kzalloc(props * sizeof(*prop), GFP_KERNEL); if (!prop) - goto err2; + goto err3; prop[i++] = POWER_SUPPLY_PROP_PRESENT; if (pdata->charge_gpio >= 0) @@ -235,13 +247,17 @@ static int __devinit wm97xx_bat_probe(struct platform_device *dev) if (!ret) schedule_work(&bat_work); else - goto err3; + goto err4; return 0; -err3: +err4: kfree(prop); +err3: + if (gpio_is_valid(pdata->charge_gpio)) + free_irq(gpio_to_irq(pdata->charge_gpio), dev); err2: - gpio_free(pdata->charge_gpio); + if (gpio_is_valid(pdata->charge_gpio)) + gpio_free(pdata->charge_gpio); err: return ret; } @@ -251,8 +267,10 @@ static int __devexit wm97xx_bat_remove(struct platform_device *dev) struct wm97xx_pdata *wmdata = dev->dev.platform_data; struct wm97xx_batt_pdata *pdata = wmdata->batt_pdata; - if (pdata && pdata->charge_gpio && pdata->charge_gpio >= 0) + if (pdata && gpio_is_valid(pdata->charge_gpio)) { + free_irq(gpio_to_irq(pdata->charge_gpio), dev); gpio_free(pdata->charge_gpio); + } flush_scheduled_work(); power_supply_unregister(&bat_ps); kfree(prop); From 83a8af0d31cfa6c728a68c00f6b1b518e2dcc03d Mon Sep 17 00:00:00 2001 From: Marek Vasut Date: Sat, 22 Aug 2009 00:36:43 +0200 Subject: [PATCH 0083/3409] wm97xx_battery: Convert to dev_pm_ops Signed-off-by: Marek Vasut Signed-off-by: Anton Vorontsov --- drivers/power/wm97xx_battery.c | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/drivers/power/wm97xx_battery.c b/drivers/power/wm97xx_battery.c index cad1ba283bfd..c552082c8128 100644 --- a/drivers/power/wm97xx_battery.c +++ b/drivers/power/wm97xx_battery.c @@ -145,20 +145,22 @@ static irqreturn_t wm97xx_chrg_irq(int irq, void *data) } #ifdef CONFIG_PM -static int wm97xx_bat_suspend(struct platform_device *dev, pm_message_t state) +static int wm97xx_bat_suspend(struct device *dev) { flush_scheduled_work(); return 0; } -static int wm97xx_bat_resume(struct platform_device *dev) +static int wm97xx_bat_resume(struct device *dev) { schedule_work(&bat_work); return 0; } -#else -#define wm97xx_bat_suspend NULL -#define wm97xx_bat_resume NULL + +static struct dev_pm_ops wm97xx_bat_pm_ops = { + .suspend = wm97xx_bat_suspend, + .resume = wm97xx_bat_resume, +}; #endif static int __devinit wm97xx_bat_probe(struct platform_device *dev) @@ -281,11 +283,12 @@ static struct platform_driver wm97xx_bat_driver = { .driver = { .name = "wm97xx-battery", .owner = THIS_MODULE, +#ifdef CONFIG_PM + .pm = &wm97xx_bat_pm_ops, +#endif }, .probe = wm97xx_bat_probe, .remove = __devexit_p(wm97xx_bat_remove), - .suspend = wm97xx_bat_suspend, - .resume = wm97xx_bat_resume, }; static int __init wm97xx_bat_init(void) From b0525b48f06714e8d5cf6a3266261b71de8d6dd4 Mon Sep 17 00:00:00 2001 From: Daniel Mack Date: Fri, 24 Jul 2009 15:08:11 +0200 Subject: [PATCH 0084/3409] ds2760_battery: Fix integer overflow for time_to_empty_now On the device we're currently developing, battery sizes of ~2.8Ah and current flow of ~600mA are typical. With that values, the life_sec computation overflows due to the multiplication by 3600. Signed-off-by: Daniel Mack Cc: Szabolcs Gyurko Cc: Matt Reimer Signed-off-by: Anton Vorontsov --- drivers/power/ds2760_battery.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/power/ds2760_battery.c b/drivers/power/ds2760_battery.c index 1bb8498f14be..6f1dba5a519d 100644 --- a/drivers/power/ds2760_battery.c +++ b/drivers/power/ds2760_battery.c @@ -211,9 +211,9 @@ static int ds2760_battery_read_status(struct ds2760_device_info *di) if (di->rem_capacity > 100) di->rem_capacity = 100; - if (di->current_uA) - di->life_sec = -((di->accum_current_uAh - di->empty_uAh) * - 3600L) / di->current_uA; + if (di->current_uA >= 100L) + di->life_sec = -((di->accum_current_uAh - di->empty_uAh) * 36L) + / (di->current_uA / 100L); else di->life_sec = 0; From 3961f7c3cf247eee5df7fabadc7a40f2deeb98f3 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 10 Aug 2009 17:43:53 +0100 Subject: [PATCH 0085/3409] power_supply: Add driver for the PMU on WM831x PMICs The WM831x PMICs provide power path management from three sources: a wall supply, USB and a battery with integrated charger. They also provide an additional backup supply with integrated for maintaining always on functionality such as the RTC and monitoring of power switches. After some initial configuration at startup the device operates autonomously, the driver simply provides reporting of the current state. Signed-off-by: Mark Brown Signed-off-by: Anton Vorontsov --- drivers/power/Kconfig | 7 + drivers/power/Makefile | 1 + drivers/power/wm831x_power.c | 779 +++++++++++++++++++++++++++++++++ include/linux/mfd/wm831x/pmu.h | 189 ++++++++ 4 files changed, 976 insertions(+) create mode 100644 drivers/power/wm831x_power.c create mode 100644 include/linux/mfd/wm831x/pmu.h diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig index bdbc4f73fcdc..cea6cef27e89 100644 --- a/drivers/power/Kconfig +++ b/drivers/power/Kconfig @@ -29,6 +29,13 @@ config APM_POWER Say Y here to enable support APM status emulation using battery class devices. +config WM831X_POWER + tristate "WM831X PMU support" + depends on MFD_WM831X + help + Say Y here to enable support for the power management unit + provided by Wolfson Microelectronics WM831x PMICs. + config WM8350_POWER tristate "WM8350 PMU support" depends on MFD_WM8350 diff --git a/drivers/power/Makefile b/drivers/power/Makefile index 380d17c9ae29..b96f29d91c28 100644 --- a/drivers/power/Makefile +++ b/drivers/power/Makefile @@ -16,6 +16,7 @@ obj-$(CONFIG_POWER_SUPPLY) += power_supply.o obj-$(CONFIG_PDA_POWER) += pda_power.o obj-$(CONFIG_APM_POWER) += apm_power.o +obj-$(CONFIG_WM831X_POWER) += wm831x_power.o obj-$(CONFIG_WM8350_POWER) += wm8350_power.o obj-$(CONFIG_BATTERY_DS2760) += ds2760_battery.o diff --git a/drivers/power/wm831x_power.c b/drivers/power/wm831x_power.c new file mode 100644 index 000000000000..2a4c8b0b829c --- /dev/null +++ b/drivers/power/wm831x_power.c @@ -0,0 +1,779 @@ +/* + * PMU driver for Wolfson Microelectronics wm831x PMICs + * + * Copyright 2009 Wolfson Microelectronics PLC. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include + +#include +#include +#include +#include + +struct wm831x_power { + struct wm831x *wm831x; + struct power_supply wall; + struct power_supply backup; + struct power_supply usb; + struct power_supply battery; +}; + +static int wm831x_power_check_online(struct wm831x *wm831x, int supply, + union power_supply_propval *val) +{ + int ret; + + ret = wm831x_reg_read(wm831x, WM831X_SYSTEM_STATUS); + if (ret < 0) + return ret; + + if (ret & supply) + val->intval = 1; + else + val->intval = 0; + + return 0; +} + +static int wm831x_power_read_voltage(struct wm831x *wm831x, + enum wm831x_auxadc src, + union power_supply_propval *val) +{ + int ret; + + ret = wm831x_auxadc_read_uv(wm831x, src); + if (ret >= 0) + val->intval = ret; + + return ret; +} + +/********************************************************************* + * WALL Power + *********************************************************************/ +static int wm831x_wall_get_prop(struct power_supply *psy, + enum power_supply_property psp, + union power_supply_propval *val) +{ + struct wm831x_power *wm831x_power = dev_get_drvdata(psy->dev->parent); + struct wm831x *wm831x = wm831x_power->wm831x; + int ret = 0; + + switch (psp) { + case POWER_SUPPLY_PROP_ONLINE: + ret = wm831x_power_check_online(wm831x, WM831X_PWR_WALL, val); + break; + case POWER_SUPPLY_PROP_VOLTAGE_NOW: + ret = wm831x_power_read_voltage(wm831x, WM831X_AUX_WALL, val); + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static enum power_supply_property wm831x_wall_props[] = { + POWER_SUPPLY_PROP_ONLINE, + POWER_SUPPLY_PROP_VOLTAGE_NOW, +}; + +/********************************************************************* + * USB Power + *********************************************************************/ +static int wm831x_usb_get_prop(struct power_supply *psy, + enum power_supply_property psp, + union power_supply_propval *val) +{ + struct wm831x_power *wm831x_power = dev_get_drvdata(psy->dev->parent); + struct wm831x *wm831x = wm831x_power->wm831x; + int ret = 0; + + switch (psp) { + case POWER_SUPPLY_PROP_ONLINE: + ret = wm831x_power_check_online(wm831x, WM831X_PWR_USB, val); + break; + case POWER_SUPPLY_PROP_VOLTAGE_NOW: + ret = wm831x_power_read_voltage(wm831x, WM831X_AUX_USB, val); + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static enum power_supply_property wm831x_usb_props[] = { + POWER_SUPPLY_PROP_ONLINE, + POWER_SUPPLY_PROP_VOLTAGE_NOW, +}; + +/********************************************************************* + * Battery properties + *********************************************************************/ + +struct chg_map { + int val; + int reg_val; +}; + +static struct chg_map trickle_ilims[] = { + { 50, 0 << WM831X_CHG_TRKL_ILIM_SHIFT }, + { 100, 1 << WM831X_CHG_TRKL_ILIM_SHIFT }, + { 150, 2 << WM831X_CHG_TRKL_ILIM_SHIFT }, + { 200, 3 << WM831X_CHG_TRKL_ILIM_SHIFT }, +}; + +static struct chg_map vsels[] = { + { 4050, 0 << WM831X_CHG_VSEL_SHIFT }, + { 4100, 1 << WM831X_CHG_VSEL_SHIFT }, + { 4150, 2 << WM831X_CHG_VSEL_SHIFT }, + { 4200, 3 << WM831X_CHG_VSEL_SHIFT }, +}; + +static struct chg_map fast_ilims[] = { + { 0, 0 << WM831X_CHG_FAST_ILIM_SHIFT }, + { 50, 1 << WM831X_CHG_FAST_ILIM_SHIFT }, + { 100, 2 << WM831X_CHG_FAST_ILIM_SHIFT }, + { 150, 3 << WM831X_CHG_FAST_ILIM_SHIFT }, + { 200, 4 << WM831X_CHG_FAST_ILIM_SHIFT }, + { 250, 5 << WM831X_CHG_FAST_ILIM_SHIFT }, + { 300, 6 << WM831X_CHG_FAST_ILIM_SHIFT }, + { 350, 7 << WM831X_CHG_FAST_ILIM_SHIFT }, + { 400, 8 << WM831X_CHG_FAST_ILIM_SHIFT }, + { 450, 9 << WM831X_CHG_FAST_ILIM_SHIFT }, + { 500, 10 << WM831X_CHG_FAST_ILIM_SHIFT }, + { 600, 11 << WM831X_CHG_FAST_ILIM_SHIFT }, + { 700, 12 << WM831X_CHG_FAST_ILIM_SHIFT }, + { 800, 13 << WM831X_CHG_FAST_ILIM_SHIFT }, + { 900, 14 << WM831X_CHG_FAST_ILIM_SHIFT }, + { 1000, 15 << WM831X_CHG_FAST_ILIM_SHIFT }, +}; + +static struct chg_map eoc_iterms[] = { + { 20, 0 << WM831X_CHG_ITERM_SHIFT }, + { 30, 1 << WM831X_CHG_ITERM_SHIFT }, + { 40, 2 << WM831X_CHG_ITERM_SHIFT }, + { 50, 3 << WM831X_CHG_ITERM_SHIFT }, + { 60, 4 << WM831X_CHG_ITERM_SHIFT }, + { 70, 5 << WM831X_CHG_ITERM_SHIFT }, + { 80, 6 << WM831X_CHG_ITERM_SHIFT }, + { 90, 7 << WM831X_CHG_ITERM_SHIFT }, +}; + +static struct chg_map chg_times[] = { + { 60, 0 << WM831X_CHG_TIME_SHIFT }, + { 90, 1 << WM831X_CHG_TIME_SHIFT }, + { 120, 2 << WM831X_CHG_TIME_SHIFT }, + { 150, 3 << WM831X_CHG_TIME_SHIFT }, + { 180, 4 << WM831X_CHG_TIME_SHIFT }, + { 210, 5 << WM831X_CHG_TIME_SHIFT }, + { 240, 6 << WM831X_CHG_TIME_SHIFT }, + { 270, 7 << WM831X_CHG_TIME_SHIFT }, + { 300, 8 << WM831X_CHG_TIME_SHIFT }, + { 330, 9 << WM831X_CHG_TIME_SHIFT }, + { 360, 10 << WM831X_CHG_TIME_SHIFT }, + { 390, 11 << WM831X_CHG_TIME_SHIFT }, + { 420, 12 << WM831X_CHG_TIME_SHIFT }, + { 450, 13 << WM831X_CHG_TIME_SHIFT }, + { 480, 14 << WM831X_CHG_TIME_SHIFT }, + { 510, 15 << WM831X_CHG_TIME_SHIFT }, +}; + +static void wm831x_battey_apply_config(struct wm831x *wm831x, + struct chg_map *map, int count, int val, + int *reg, const char *name, + const char *units) +{ + int i; + + for (i = 0; i < count; i++) + if (val == map[i].val) + break; + if (i == count) { + dev_err(wm831x->dev, "Invalid %s %d%s\n", + name, val, units); + } else { + *reg |= map[i].reg_val; + dev_dbg(wm831x->dev, "Set %s of %d%s\n", name, val, units); + } +} + +static void wm831x_config_battery(struct wm831x *wm831x) +{ + struct wm831x_pdata *wm831x_pdata = wm831x->dev->platform_data; + struct wm831x_battery_pdata *pdata; + int ret, reg1, reg2; + + if (!wm831x_pdata || !wm831x_pdata->battery) { + dev_warn(wm831x->dev, + "No battery charger configuration\n"); + return; + } + + pdata = wm831x_pdata->battery; + + reg1 = 0; + reg2 = 0; + + if (!pdata->enable) { + dev_info(wm831x->dev, "Battery charger disabled\n"); + return; + } + + reg1 |= WM831X_CHG_ENA; + if (pdata->off_mask) + reg2 |= WM831X_CHG_OFF_MSK; + if (pdata->fast_enable) + reg1 |= WM831X_CHG_FAST; + + wm831x_battey_apply_config(wm831x, trickle_ilims, + ARRAY_SIZE(trickle_ilims), + pdata->trickle_ilim, ®2, + "trickle charge current limit", "mA"); + + wm831x_battey_apply_config(wm831x, vsels, ARRAY_SIZE(vsels), + pdata->vsel, ®2, + "target voltage", "mV"); + + wm831x_battey_apply_config(wm831x, fast_ilims, ARRAY_SIZE(fast_ilims), + pdata->fast_ilim, ®2, + "fast charge current limit", "mA"); + + wm831x_battey_apply_config(wm831x, eoc_iterms, ARRAY_SIZE(eoc_iterms), + pdata->eoc_iterm, ®1, + "end of charge current threshold", "mA"); + + wm831x_battey_apply_config(wm831x, chg_times, ARRAY_SIZE(chg_times), + pdata->timeout, ®2, + "charger timeout", "min"); + + ret = wm831x_reg_unlock(wm831x); + if (ret != 0) { + dev_err(wm831x->dev, "Failed to unlock registers: %d\n", ret); + return; + } + + ret = wm831x_set_bits(wm831x, WM831X_CHARGER_CONTROL_1, + WM831X_CHG_ENA_MASK | + WM831X_CHG_FAST_MASK | + WM831X_CHG_ITERM_MASK | + WM831X_CHG_ITERM_MASK, + reg1); + if (ret != 0) + dev_err(wm831x->dev, "Failed to set charger control 1: %d\n", + ret); + + ret = wm831x_set_bits(wm831x, WM831X_CHARGER_CONTROL_2, + WM831X_CHG_OFF_MSK | + WM831X_CHG_TIME_MASK | + WM831X_CHG_FAST_ILIM_MASK | + WM831X_CHG_TRKL_ILIM_MASK | + WM831X_CHG_VSEL_MASK, + reg2); + if (ret != 0) + dev_err(wm831x->dev, "Failed to set charger control 2: %d\n", + ret); + + wm831x_reg_lock(wm831x); +} + +static int wm831x_bat_check_status(struct wm831x *wm831x, int *status) +{ + int ret; + + ret = wm831x_reg_read(wm831x, WM831X_SYSTEM_STATUS); + if (ret < 0) + return ret; + + if (ret & WM831X_PWR_SRC_BATT) { + *status = POWER_SUPPLY_STATUS_DISCHARGING; + return 0; + } + + ret = wm831x_reg_read(wm831x, WM831X_CHARGER_STATUS); + if (ret < 0) + return ret; + + switch (ret & WM831X_CHG_STATE_MASK) { + case WM831X_CHG_STATE_OFF: + *status = POWER_SUPPLY_STATUS_NOT_CHARGING; + break; + case WM831X_CHG_STATE_TRICKLE: + case WM831X_CHG_STATE_FAST: + *status = POWER_SUPPLY_STATUS_CHARGING; + break; + + default: + *status = POWER_SUPPLY_STATUS_UNKNOWN; + break; + } + + return 0; +} + +static int wm831x_bat_check_type(struct wm831x *wm831x, int *type) +{ + int ret; + + ret = wm831x_reg_read(wm831x, WM831X_CHARGER_STATUS); + if (ret < 0) + return ret; + + switch (ret & WM831X_CHG_STATE_MASK) { + case WM831X_CHG_STATE_TRICKLE: + case WM831X_CHG_STATE_TRICKLE_OT: + *type = POWER_SUPPLY_CHARGE_TYPE_TRICKLE; + break; + case WM831X_CHG_STATE_FAST: + case WM831X_CHG_STATE_FAST_OT: + *type = POWER_SUPPLY_CHARGE_TYPE_FAST; + break; + default: + *type = POWER_SUPPLY_CHARGE_TYPE_NONE; + break; + } + + return 0; +} + +static int wm831x_bat_check_health(struct wm831x *wm831x, int *health) +{ + int ret; + + ret = wm831x_reg_read(wm831x, WM831X_CHARGER_STATUS); + if (ret < 0) + return ret; + + if (ret & WM831X_BATT_HOT_STS) { + *health = POWER_SUPPLY_HEALTH_OVERHEAT; + return 0; + } + + if (ret & WM831X_BATT_COLD_STS) { + *health = POWER_SUPPLY_HEALTH_COLD; + return 0; + } + + if (ret & WM831X_BATT_OV_STS) { + *health = POWER_SUPPLY_HEALTH_OVERVOLTAGE; + return 0; + } + + switch (ret & WM831X_CHG_STATE_MASK) { + case WM831X_CHG_STATE_TRICKLE_OT: + case WM831X_CHG_STATE_FAST_OT: + *health = POWER_SUPPLY_HEALTH_OVERHEAT; + break; + case WM831X_CHG_STATE_DEFECTIVE: + *health = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE; + break; + default: + *health = POWER_SUPPLY_HEALTH_GOOD; + break; + } + + return 0; +} + +static int wm831x_bat_get_prop(struct power_supply *psy, + enum power_supply_property psp, + union power_supply_propval *val) +{ + struct wm831x_power *wm831x_power = dev_get_drvdata(psy->dev->parent); + struct wm831x *wm831x = wm831x_power->wm831x; + int ret = 0; + + switch (psp) { + case POWER_SUPPLY_PROP_STATUS: + ret = wm831x_bat_check_status(wm831x, &val->intval); + break; + case POWER_SUPPLY_PROP_ONLINE: + ret = wm831x_power_check_online(wm831x, WM831X_PWR_SRC_BATT, + val); + break; + case POWER_SUPPLY_PROP_VOLTAGE_NOW: + ret = wm831x_power_read_voltage(wm831x, WM831X_AUX_BATT, val); + break; + case POWER_SUPPLY_PROP_HEALTH: + ret = wm831x_bat_check_health(wm831x, &val->intval); + break; + case POWER_SUPPLY_PROP_CHARGE_TYPE: + ret = wm831x_bat_check_type(wm831x, &val->intval); + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static enum power_supply_property wm831x_bat_props[] = { + POWER_SUPPLY_PROP_STATUS, + POWER_SUPPLY_PROP_ONLINE, + POWER_SUPPLY_PROP_VOLTAGE_NOW, + POWER_SUPPLY_PROP_HEALTH, + POWER_SUPPLY_PROP_CHARGE_TYPE, +}; + +static const char *wm831x_bat_irqs[] = { + "BATT HOT", + "BATT COLD", + "BATT FAIL", + "OV", + "END", + "TO", + "MODE", + "START", +}; + +static irqreturn_t wm831x_bat_irq(int irq, void *data) +{ + struct wm831x_power *wm831x_power = data; + struct wm831x *wm831x = wm831x_power->wm831x; + + dev_dbg(wm831x->dev, "Battery status changed: %d\n", irq); + + /* The battery charger is autonomous so we don't need to do + * anything except kick user space */ + power_supply_changed(&wm831x_power->battery); + + return IRQ_HANDLED; +} + + +/********************************************************************* + * Backup supply properties + *********************************************************************/ + +static void wm831x_config_backup(struct wm831x *wm831x) +{ + struct wm831x_pdata *wm831x_pdata = wm831x->dev->platform_data; + struct wm831x_backup_pdata *pdata; + int ret, reg; + + if (!wm831x_pdata || !wm831x_pdata->backup) { + dev_warn(wm831x->dev, + "No backup battery charger configuration\n"); + return; + } + + pdata = wm831x_pdata->backup; + + reg = 0; + + if (pdata->charger_enable) + reg |= WM831X_BKUP_CHG_ENA | WM831X_BKUP_BATT_DET_ENA; + if (pdata->no_constant_voltage) + reg |= WM831X_BKUP_CHG_MODE; + + switch (pdata->vlim) { + case 2500: + break; + case 3100: + reg |= WM831X_BKUP_CHG_VLIM; + break; + default: + dev_err(wm831x->dev, "Invalid backup voltage limit %dmV\n", + pdata->vlim); + } + + switch (pdata->ilim) { + case 100: + break; + case 200: + reg |= 1; + break; + case 300: + reg |= 2; + break; + case 400: + reg |= 3; + break; + default: + dev_err(wm831x->dev, "Invalid backup current limit %duA\n", + pdata->ilim); + } + + ret = wm831x_reg_unlock(wm831x); + if (ret != 0) { + dev_err(wm831x->dev, "Failed to unlock registers: %d\n", ret); + return; + } + + ret = wm831x_set_bits(wm831x, WM831X_BACKUP_CHARGER_CONTROL, + WM831X_BKUP_CHG_ENA_MASK | + WM831X_BKUP_CHG_MODE_MASK | + WM831X_BKUP_BATT_DET_ENA_MASK | + WM831X_BKUP_CHG_VLIM_MASK | + WM831X_BKUP_CHG_ILIM_MASK, + reg); + if (ret != 0) + dev_err(wm831x->dev, + "Failed to set backup charger config: %d\n", ret); + + wm831x_reg_lock(wm831x); +} + +static int wm831x_backup_get_prop(struct power_supply *psy, + enum power_supply_property psp, + union power_supply_propval *val) +{ + struct wm831x_power *wm831x_power = dev_get_drvdata(psy->dev->parent); + struct wm831x *wm831x = wm831x_power->wm831x; + int ret = 0; + + ret = wm831x_reg_read(wm831x, WM831X_BACKUP_CHARGER_CONTROL); + if (ret < 0) + return ret; + + switch (psp) { + case POWER_SUPPLY_PROP_STATUS: + if (ret & WM831X_BKUP_CHG_STS) + val->intval = POWER_SUPPLY_STATUS_CHARGING; + else + val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING; + break; + + case POWER_SUPPLY_PROP_VOLTAGE_NOW: + ret = wm831x_power_read_voltage(wm831x, WM831X_AUX_BKUP_BATT, + val); + break; + + case POWER_SUPPLY_PROP_PRESENT: + if (ret & WM831X_BKUP_CHG_STS) + val->intval = 1; + else + val->intval = 0; + break; + + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static enum power_supply_property wm831x_backup_props[] = { + POWER_SUPPLY_PROP_STATUS, + POWER_SUPPLY_PROP_VOLTAGE_NOW, + POWER_SUPPLY_PROP_PRESENT, +}; + +/********************************************************************* + * Initialisation + *********************************************************************/ + +static irqreturn_t wm831x_syslo_irq(int irq, void *data) +{ + struct wm831x_power *wm831x_power = data; + struct wm831x *wm831x = wm831x_power->wm831x; + + /* Not much we can actually *do* but tell people for + * posterity, we're probably about to run out of power. */ + dev_crit(wm831x->dev, "SYSVDD under voltage\n"); + + return IRQ_HANDLED; +} + +static irqreturn_t wm831x_pwr_src_irq(int irq, void *data) +{ + struct wm831x_power *wm831x_power = data; + struct wm831x *wm831x = wm831x_power->wm831x; + + dev_dbg(wm831x->dev, "Power source changed\n"); + + /* Just notify for everything - little harm in overnotifying. + * The backup battery is not a power source while the system + * is running so skip that. + */ + power_supply_changed(&wm831x_power->battery); + power_supply_changed(&wm831x_power->usb); + power_supply_changed(&wm831x_power->wall); + + return IRQ_HANDLED; +} + +static __devinit int wm831x_power_probe(struct platform_device *pdev) +{ + struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent); + struct wm831x_power *power; + struct power_supply *usb; + struct power_supply *battery; + struct power_supply *wall; + struct power_supply *backup; + int ret, irq, i; + + power = kzalloc(sizeof(struct wm831x_power), GFP_KERNEL); + if (power == NULL) + return -ENOMEM; + + power->wm831x = wm831x; + platform_set_drvdata(pdev, power); + + usb = &power->usb; + battery = &power->battery; + wall = &power->wall; + backup = &power->backup; + + /* We ignore configuration failures since we can still read back + * the status without enabling either of the chargers. + */ + wm831x_config_battery(wm831x); + wm831x_config_backup(wm831x); + + wall->name = "wm831x-wall"; + wall->type = POWER_SUPPLY_TYPE_MAINS; + wall->properties = wm831x_wall_props; + wall->num_properties = ARRAY_SIZE(wm831x_wall_props); + wall->get_property = wm831x_wall_get_prop; + ret = power_supply_register(&pdev->dev, wall); + if (ret) + goto err_kmalloc; + + battery->name = "wm831x-battery"; + battery->properties = wm831x_bat_props; + battery->num_properties = ARRAY_SIZE(wm831x_bat_props); + battery->get_property = wm831x_bat_get_prop; + battery->use_for_apm = 1; + ret = power_supply_register(&pdev->dev, battery); + if (ret) + goto err_wall; + + usb->name = "wm831x-usb", + usb->type = POWER_SUPPLY_TYPE_USB; + usb->properties = wm831x_usb_props; + usb->num_properties = ARRAY_SIZE(wm831x_usb_props); + usb->get_property = wm831x_usb_get_prop; + ret = power_supply_register(&pdev->dev, usb); + if (ret) + goto err_battery; + + backup->name = "wm831x-backup"; + backup->type = POWER_SUPPLY_TYPE_BATTERY; + backup->properties = wm831x_backup_props; + backup->num_properties = ARRAY_SIZE(wm831x_backup_props); + backup->get_property = wm831x_backup_get_prop; + ret = power_supply_register(&pdev->dev, backup); + if (ret) + goto err_usb; + + irq = platform_get_irq_byname(pdev, "SYSLO"); + ret = wm831x_request_irq(wm831x, irq, wm831x_syslo_irq, + IRQF_TRIGGER_RISING, "SYSLO", + power); + if (ret != 0) { + dev_err(&pdev->dev, "Failed to request SYSLO IRQ %d: %d\n", + irq, ret); + goto err_backup; + } + + irq = platform_get_irq_byname(pdev, "PWR SRC"); + ret = wm831x_request_irq(wm831x, irq, wm831x_pwr_src_irq, + IRQF_TRIGGER_RISING, "Power source", + power); + if (ret != 0) { + dev_err(&pdev->dev, "Failed to request PWR SRC IRQ %d: %d\n", + irq, ret); + goto err_syslo; + } + + for (i = 0; i < ARRAY_SIZE(wm831x_bat_irqs); i++) { + irq = platform_get_irq_byname(pdev, wm831x_bat_irqs[i]); + ret = wm831x_request_irq(wm831x, irq, wm831x_bat_irq, + IRQF_TRIGGER_RISING, + wm831x_bat_irqs[i], + power); + if (ret != 0) { + dev_err(&pdev->dev, + "Failed to request %s IRQ %d: %d\n", + wm831x_bat_irqs[i], irq, ret); + goto err_bat_irq; + } + } + + return ret; + +err_bat_irq: + for (; i >= 0; i--) { + irq = platform_get_irq_byname(pdev, wm831x_bat_irqs[i]); + wm831x_free_irq(wm831x, irq, power); + } + irq = platform_get_irq_byname(pdev, "PWR SRC"); + wm831x_free_irq(wm831x, irq, power); +err_syslo: + irq = platform_get_irq_byname(pdev, "SYSLO"); + wm831x_free_irq(wm831x, irq, power); +err_backup: + power_supply_unregister(backup); +err_usb: + power_supply_unregister(usb); +err_battery: + power_supply_unregister(battery); +err_wall: + power_supply_unregister(wall); +err_kmalloc: + kfree(power); + return ret; +} + +static __devexit int wm831x_power_remove(struct platform_device *pdev) +{ + struct wm831x_power *wm831x_power = platform_get_drvdata(pdev); + struct wm831x *wm831x = wm831x_power->wm831x; + int irq, i; + + for (i = 0; i < ARRAY_SIZE(wm831x_bat_irqs); i++) { + irq = platform_get_irq_byname(pdev, wm831x_bat_irqs[i]); + wm831x_free_irq(wm831x, irq, wm831x_power); + } + + irq = platform_get_irq_byname(pdev, "PWR SRC"); + wm831x_free_irq(wm831x, irq, wm831x_power); + + irq = platform_get_irq_byname(pdev, "SYSLO"); + wm831x_free_irq(wm831x, irq, wm831x_power); + + power_supply_unregister(&wm831x_power->backup); + power_supply_unregister(&wm831x_power->battery); + power_supply_unregister(&wm831x_power->wall); + power_supply_unregister(&wm831x_power->usb); + return 0; +} + +static struct platform_driver wm831x_power_driver = { + .probe = wm831x_power_probe, + .remove = __devexit_p(wm831x_power_remove), + .driver = { + .name = "wm831x-power", + }, +}; + +static int __init wm831x_power_init(void) +{ + return platform_driver_register(&wm831x_power_driver); +} +module_init(wm831x_power_init); + +static void __exit wm831x_power_exit(void) +{ + platform_driver_unregister(&wm831x_power_driver); +} +module_exit(wm831x_power_exit); + +MODULE_DESCRIPTION("Power supply driver for WM831x PMICs"); +MODULE_AUTHOR("Mark Brown "); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:wm831x-power"); diff --git a/include/linux/mfd/wm831x/pmu.h b/include/linux/mfd/wm831x/pmu.h new file mode 100644 index 000000000000..b18cbb027bc3 --- /dev/null +++ b/include/linux/mfd/wm831x/pmu.h @@ -0,0 +1,189 @@ +/* + * include/linux/mfd/wm831x/pmu.h -- PMU for WM831x + * + * Copyright 2009 Wolfson Microelectronics PLC. + * + * Author: Mark Brown + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __MFD_WM831X_PMU_H__ +#define __MFD_WM831X_PMU_H__ + +/* + * R16387 (0x4003) - Power State + */ +#define WM831X_CHIP_ON 0x8000 /* CHIP_ON */ +#define WM831X_CHIP_ON_MASK 0x8000 /* CHIP_ON */ +#define WM831X_CHIP_ON_SHIFT 15 /* CHIP_ON */ +#define WM831X_CHIP_ON_WIDTH 1 /* CHIP_ON */ +#define WM831X_CHIP_SLP 0x4000 /* CHIP_SLP */ +#define WM831X_CHIP_SLP_MASK 0x4000 /* CHIP_SLP */ +#define WM831X_CHIP_SLP_SHIFT 14 /* CHIP_SLP */ +#define WM831X_CHIP_SLP_WIDTH 1 /* CHIP_SLP */ +#define WM831X_REF_LP 0x1000 /* REF_LP */ +#define WM831X_REF_LP_MASK 0x1000 /* REF_LP */ +#define WM831X_REF_LP_SHIFT 12 /* REF_LP */ +#define WM831X_REF_LP_WIDTH 1 /* REF_LP */ +#define WM831X_PWRSTATE_DLY_MASK 0x0C00 /* PWRSTATE_DLY - [11:10] */ +#define WM831X_PWRSTATE_DLY_SHIFT 10 /* PWRSTATE_DLY - [11:10] */ +#define WM831X_PWRSTATE_DLY_WIDTH 2 /* PWRSTATE_DLY - [11:10] */ +#define WM831X_SWRST_DLY 0x0200 /* SWRST_DLY */ +#define WM831X_SWRST_DLY_MASK 0x0200 /* SWRST_DLY */ +#define WM831X_SWRST_DLY_SHIFT 9 /* SWRST_DLY */ +#define WM831X_SWRST_DLY_WIDTH 1 /* SWRST_DLY */ +#define WM831X_USB100MA_STARTUP_MASK 0x0030 /* USB100MA_STARTUP - [5:4] */ +#define WM831X_USB100MA_STARTUP_SHIFT 4 /* USB100MA_STARTUP - [5:4] */ +#define WM831X_USB100MA_STARTUP_WIDTH 2 /* USB100MA_STARTUP - [5:4] */ +#define WM831X_USB_CURR_STS 0x0008 /* USB_CURR_STS */ +#define WM831X_USB_CURR_STS_MASK 0x0008 /* USB_CURR_STS */ +#define WM831X_USB_CURR_STS_SHIFT 3 /* USB_CURR_STS */ +#define WM831X_USB_CURR_STS_WIDTH 1 /* USB_CURR_STS */ +#define WM831X_USB_ILIM_MASK 0x0007 /* USB_ILIM - [2:0] */ +#define WM831X_USB_ILIM_SHIFT 0 /* USB_ILIM - [2:0] */ +#define WM831X_USB_ILIM_WIDTH 3 /* USB_ILIM - [2:0] */ + +/* + * R16397 (0x400D) - System Status + */ +#define WM831X_THW_STS 0x8000 /* THW_STS */ +#define WM831X_THW_STS_MASK 0x8000 /* THW_STS */ +#define WM831X_THW_STS_SHIFT 15 /* THW_STS */ +#define WM831X_THW_STS_WIDTH 1 /* THW_STS */ +#define WM831X_PWR_SRC_BATT 0x0400 /* PWR_SRC_BATT */ +#define WM831X_PWR_SRC_BATT_MASK 0x0400 /* PWR_SRC_BATT */ +#define WM831X_PWR_SRC_BATT_SHIFT 10 /* PWR_SRC_BATT */ +#define WM831X_PWR_SRC_BATT_WIDTH 1 /* PWR_SRC_BATT */ +#define WM831X_PWR_WALL 0x0200 /* PWR_WALL */ +#define WM831X_PWR_WALL_MASK 0x0200 /* PWR_WALL */ +#define WM831X_PWR_WALL_SHIFT 9 /* PWR_WALL */ +#define WM831X_PWR_WALL_WIDTH 1 /* PWR_WALL */ +#define WM831X_PWR_USB 0x0100 /* PWR_USB */ +#define WM831X_PWR_USB_MASK 0x0100 /* PWR_USB */ +#define WM831X_PWR_USB_SHIFT 8 /* PWR_USB */ +#define WM831X_PWR_USB_WIDTH 1 /* PWR_USB */ +#define WM831X_MAIN_STATE_MASK 0x001F /* MAIN_STATE - [4:0] */ +#define WM831X_MAIN_STATE_SHIFT 0 /* MAIN_STATE - [4:0] */ +#define WM831X_MAIN_STATE_WIDTH 5 /* MAIN_STATE - [4:0] */ + +/* + * R16456 (0x4048) - Charger Control 1 + */ +#define WM831X_CHG_ENA 0x8000 /* CHG_ENA */ +#define WM831X_CHG_ENA_MASK 0x8000 /* CHG_ENA */ +#define WM831X_CHG_ENA_SHIFT 15 /* CHG_ENA */ +#define WM831X_CHG_ENA_WIDTH 1 /* CHG_ENA */ +#define WM831X_CHG_FRC 0x4000 /* CHG_FRC */ +#define WM831X_CHG_FRC_MASK 0x4000 /* CHG_FRC */ +#define WM831X_CHG_FRC_SHIFT 14 /* CHG_FRC */ +#define WM831X_CHG_FRC_WIDTH 1 /* CHG_FRC */ +#define WM831X_CHG_ITERM_MASK 0x1C00 /* CHG_ITERM - [12:10] */ +#define WM831X_CHG_ITERM_SHIFT 10 /* CHG_ITERM - [12:10] */ +#define WM831X_CHG_ITERM_WIDTH 3 /* CHG_ITERM - [12:10] */ +#define WM831X_CHG_FAST 0x0020 /* CHG_FAST */ +#define WM831X_CHG_FAST_MASK 0x0020 /* CHG_FAST */ +#define WM831X_CHG_FAST_SHIFT 5 /* CHG_FAST */ +#define WM831X_CHG_FAST_WIDTH 1 /* CHG_FAST */ +#define WM831X_CHG_IMON_ENA 0x0002 /* CHG_IMON_ENA */ +#define WM831X_CHG_IMON_ENA_MASK 0x0002 /* CHG_IMON_ENA */ +#define WM831X_CHG_IMON_ENA_SHIFT 1 /* CHG_IMON_ENA */ +#define WM831X_CHG_IMON_ENA_WIDTH 1 /* CHG_IMON_ENA */ +#define WM831X_CHG_CHIP_TEMP_MON 0x0001 /* CHG_CHIP_TEMP_MON */ +#define WM831X_CHG_CHIP_TEMP_MON_MASK 0x0001 /* CHG_CHIP_TEMP_MON */ +#define WM831X_CHG_CHIP_TEMP_MON_SHIFT 0 /* CHG_CHIP_TEMP_MON */ +#define WM831X_CHG_CHIP_TEMP_MON_WIDTH 1 /* CHG_CHIP_TEMP_MON */ + +/* + * R16457 (0x4049) - Charger Control 2 + */ +#define WM831X_CHG_OFF_MSK 0x4000 /* CHG_OFF_MSK */ +#define WM831X_CHG_OFF_MSK_MASK 0x4000 /* CHG_OFF_MSK */ +#define WM831X_CHG_OFF_MSK_SHIFT 14 /* CHG_OFF_MSK */ +#define WM831X_CHG_OFF_MSK_WIDTH 1 /* CHG_OFF_MSK */ +#define WM831X_CHG_TIME_MASK 0x0F00 /* CHG_TIME - [11:8] */ +#define WM831X_CHG_TIME_SHIFT 8 /* CHG_TIME - [11:8] */ +#define WM831X_CHG_TIME_WIDTH 4 /* CHG_TIME - [11:8] */ +#define WM831X_CHG_TRKL_ILIM_MASK 0x00C0 /* CHG_TRKL_ILIM - [7:6] */ +#define WM831X_CHG_TRKL_ILIM_SHIFT 6 /* CHG_TRKL_ILIM - [7:6] */ +#define WM831X_CHG_TRKL_ILIM_WIDTH 2 /* CHG_TRKL_ILIM - [7:6] */ +#define WM831X_CHG_VSEL_MASK 0x0030 /* CHG_VSEL - [5:4] */ +#define WM831X_CHG_VSEL_SHIFT 4 /* CHG_VSEL - [5:4] */ +#define WM831X_CHG_VSEL_WIDTH 2 /* CHG_VSEL - [5:4] */ +#define WM831X_CHG_FAST_ILIM_MASK 0x000F /* CHG_FAST_ILIM - [3:0] */ +#define WM831X_CHG_FAST_ILIM_SHIFT 0 /* CHG_FAST_ILIM - [3:0] */ +#define WM831X_CHG_FAST_ILIM_WIDTH 4 /* CHG_FAST_ILIM - [3:0] */ + +/* + * R16458 (0x404A) - Charger Status + */ +#define WM831X_BATT_OV_STS 0x8000 /* BATT_OV_STS */ +#define WM831X_BATT_OV_STS_MASK 0x8000 /* BATT_OV_STS */ +#define WM831X_BATT_OV_STS_SHIFT 15 /* BATT_OV_STS */ +#define WM831X_BATT_OV_STS_WIDTH 1 /* BATT_OV_STS */ +#define WM831X_CHG_STATE_MASK 0x7000 /* CHG_STATE - [14:12] */ +#define WM831X_CHG_STATE_SHIFT 12 /* CHG_STATE - [14:12] */ +#define WM831X_CHG_STATE_WIDTH 3 /* CHG_STATE - [14:12] */ +#define WM831X_BATT_HOT_STS 0x0800 /* BATT_HOT_STS */ +#define WM831X_BATT_HOT_STS_MASK 0x0800 /* BATT_HOT_STS */ +#define WM831X_BATT_HOT_STS_SHIFT 11 /* BATT_HOT_STS */ +#define WM831X_BATT_HOT_STS_WIDTH 1 /* BATT_HOT_STS */ +#define WM831X_BATT_COLD_STS 0x0400 /* BATT_COLD_STS */ +#define WM831X_BATT_COLD_STS_MASK 0x0400 /* BATT_COLD_STS */ +#define WM831X_BATT_COLD_STS_SHIFT 10 /* BATT_COLD_STS */ +#define WM831X_BATT_COLD_STS_WIDTH 1 /* BATT_COLD_STS */ +#define WM831X_CHG_TOPOFF 0x0200 /* CHG_TOPOFF */ +#define WM831X_CHG_TOPOFF_MASK 0x0200 /* CHG_TOPOFF */ +#define WM831X_CHG_TOPOFF_SHIFT 9 /* CHG_TOPOFF */ +#define WM831X_CHG_TOPOFF_WIDTH 1 /* CHG_TOPOFF */ +#define WM831X_CHG_ACTIVE 0x0100 /* CHG_ACTIVE */ +#define WM831X_CHG_ACTIVE_MASK 0x0100 /* CHG_ACTIVE */ +#define WM831X_CHG_ACTIVE_SHIFT 8 /* CHG_ACTIVE */ +#define WM831X_CHG_ACTIVE_WIDTH 1 /* CHG_ACTIVE */ +#define WM831X_CHG_TIME_ELAPSED_MASK 0x00FF /* CHG_TIME_ELAPSED - [7:0] */ +#define WM831X_CHG_TIME_ELAPSED_SHIFT 0 /* CHG_TIME_ELAPSED - [7:0] */ +#define WM831X_CHG_TIME_ELAPSED_WIDTH 8 /* CHG_TIME_ELAPSED - [7:0] */ + +#define WM831X_CHG_STATE_OFF (0 << WM831X_CHG_STATE_SHIFT) +#define WM831X_CHG_STATE_TRICKLE (1 << WM831X_CHG_STATE_SHIFT) +#define WM831X_CHG_STATE_FAST (2 << WM831X_CHG_STATE_SHIFT) +#define WM831X_CHG_STATE_TRICKLE_OT (3 << WM831X_CHG_STATE_SHIFT) +#define WM831X_CHG_STATE_FAST_OT (4 << WM831X_CHG_STATE_SHIFT) +#define WM831X_CHG_STATE_DEFECTIVE (5 << WM831X_CHG_STATE_SHIFT) + +/* + * R16459 (0x404B) - Backup Charger Control + */ +#define WM831X_BKUP_CHG_ENA 0x8000 /* BKUP_CHG_ENA */ +#define WM831X_BKUP_CHG_ENA_MASK 0x8000 /* BKUP_CHG_ENA */ +#define WM831X_BKUP_CHG_ENA_SHIFT 15 /* BKUP_CHG_ENA */ +#define WM831X_BKUP_CHG_ENA_WIDTH 1 /* BKUP_CHG_ENA */ +#define WM831X_BKUP_CHG_STS 0x4000 /* BKUP_CHG_STS */ +#define WM831X_BKUP_CHG_STS_MASK 0x4000 /* BKUP_CHG_STS */ +#define WM831X_BKUP_CHG_STS_SHIFT 14 /* BKUP_CHG_STS */ +#define WM831X_BKUP_CHG_STS_WIDTH 1 /* BKUP_CHG_STS */ +#define WM831X_BKUP_CHG_MODE 0x1000 /* BKUP_CHG_MODE */ +#define WM831X_BKUP_CHG_MODE_MASK 0x1000 /* BKUP_CHG_MODE */ +#define WM831X_BKUP_CHG_MODE_SHIFT 12 /* BKUP_CHG_MODE */ +#define WM831X_BKUP_CHG_MODE_WIDTH 1 /* BKUP_CHG_MODE */ +#define WM831X_BKUP_BATT_DET_ENA 0x0800 /* BKUP_BATT_DET_ENA */ +#define WM831X_BKUP_BATT_DET_ENA_MASK 0x0800 /* BKUP_BATT_DET_ENA */ +#define WM831X_BKUP_BATT_DET_ENA_SHIFT 11 /* BKUP_BATT_DET_ENA */ +#define WM831X_BKUP_BATT_DET_ENA_WIDTH 1 /* BKUP_BATT_DET_ENA */ +#define WM831X_BKUP_BATT_STS 0x0400 /* BKUP_BATT_STS */ +#define WM831X_BKUP_BATT_STS_MASK 0x0400 /* BKUP_BATT_STS */ +#define WM831X_BKUP_BATT_STS_SHIFT 10 /* BKUP_BATT_STS */ +#define WM831X_BKUP_BATT_STS_WIDTH 1 /* BKUP_BATT_STS */ +#define WM831X_BKUP_CHG_VLIM 0x0010 /* BKUP_CHG_VLIM */ +#define WM831X_BKUP_CHG_VLIM_MASK 0x0010 /* BKUP_CHG_VLIM */ +#define WM831X_BKUP_CHG_VLIM_SHIFT 4 /* BKUP_CHG_VLIM */ +#define WM831X_BKUP_CHG_VLIM_WIDTH 1 /* BKUP_CHG_VLIM */ +#define WM831X_BKUP_CHG_ILIM_MASK 0x0003 /* BKUP_CHG_ILIM - [1:0] */ +#define WM831X_BKUP_CHG_ILIM_SHIFT 0 /* BKUP_CHG_ILIM - [1:0] */ +#define WM831X_BKUP_CHG_ILIM_WIDTH 2 /* BKUP_CHG_ILIM - [1:0] */ + +#endif From a4f3d55cb0b8e1199e352c30ea9a264cef9c1ae1 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Sat, 5 Sep 2009 14:09:20 +0100 Subject: [PATCH 0086/3409] backlight: Add WM831x backlight driver The WM831x series of PMICs provide DC-DC boost convertors and current sinks which can be used together to drive LEDs for use as backlights. Expose this functionality via the backlight API. Since when used in this configuration the current sink and boost convertor are very tightly coupled with a multi-stage startup for the current sink which overlaps with the boost convertor startup this driver bypasses the regulator API. Machine inititialisation is responsible for ensuring that the regulators are not accessed via both APIs. Signed-off-by: Mark Brown Signed-off-by: Richard Purdie --- drivers/video/backlight/Kconfig | 7 + drivers/video/backlight/Makefile | 1 + drivers/video/backlight/wm831x_bl.c | 250 ++++++++++++++++++++++++++++ 3 files changed, 258 insertions(+) create mode 100644 drivers/video/backlight/wm831x_bl.c diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig index f9d19be05540..f86dbfd209ad 100644 --- a/drivers/video/backlight/Kconfig +++ b/drivers/video/backlight/Kconfig @@ -229,3 +229,10 @@ config BACKLIGHT_SAHARA help If you have a Tabletkiosk Sahara Touch-iT, say y to enable the backlight driver. + +config BACKLIGHT_WM831X + tristate "WM831x PMIC Backlight Driver" + depends on BACKLIGHT_CLASS_DEVICE && MFD_WM831X + help + If you have a backlight driven by the ISINK and DCDC of a + WM831x PMIC say y to enable the backlight driver for it. diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile index 4eb178c1d684..df0b67ce88b6 100644 --- a/drivers/video/backlight/Makefile +++ b/drivers/video/backlight/Makefile @@ -24,4 +24,5 @@ obj-$(CONFIG_BACKLIGHT_DA903X) += da903x_bl.o obj-$(CONFIG_BACKLIGHT_MBP_NVIDIA) += mbp_nvidia_bl.o obj-$(CONFIG_BACKLIGHT_TOSA) += tosa_bl.o obj-$(CONFIG_BACKLIGHT_SAHARA) += kb3886_bl.o +obj-$(CONFIG_BACKLIGHT_WM831X) += wm831x_bl.o diff --git a/drivers/video/backlight/wm831x_bl.c b/drivers/video/backlight/wm831x_bl.c new file mode 100644 index 000000000000..467bdb7efb23 --- /dev/null +++ b/drivers/video/backlight/wm831x_bl.c @@ -0,0 +1,250 @@ +/* + * Backlight driver for Wolfson Microelectronics WM831x PMICs + * + * Copyright 2009 Wolfson Microelectonics plc + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include + +#include +#include +#include + +struct wm831x_backlight_data { + struct wm831x *wm831x; + int isink_reg; + int current_brightness; +}; + +static int wm831x_backlight_set(struct backlight_device *bl, int brightness) +{ + struct wm831x_backlight_data *data = bl_get_data(bl); + struct wm831x *wm831x = data->wm831x; + int power_up = !data->current_brightness && brightness; + int power_down = data->current_brightness && !brightness; + int ret; + + if (power_up) { + /* Enable the ISINK */ + ret = wm831x_set_bits(wm831x, data->isink_reg, + WM831X_CS1_ENA, WM831X_CS1_ENA); + if (ret < 0) + goto err; + + /* Enable the DC-DC */ + ret = wm831x_set_bits(wm831x, WM831X_DCDC_ENABLE, + WM831X_DC4_ENA, WM831X_DC4_ENA); + if (ret < 0) + goto err; + } + + if (power_down) { + /* DCDC first */ + ret = wm831x_set_bits(wm831x, WM831X_DCDC_ENABLE, + WM831X_DC4_ENA, 0); + if (ret < 0) + goto err; + + /* ISINK */ + ret = wm831x_set_bits(wm831x, data->isink_reg, + WM831X_CS1_DRIVE | WM831X_CS1_ENA, 0); + if (ret < 0) + goto err; + } + + /* Set the new brightness */ + ret = wm831x_set_bits(wm831x, data->isink_reg, + WM831X_CS1_ISEL_MASK, brightness); + if (ret < 0) + goto err; + + if (power_up) { + /* Drive current through the ISINK */ + ret = wm831x_set_bits(wm831x, data->isink_reg, + WM831X_CS1_DRIVE, WM831X_CS1_DRIVE); + if (ret < 0) + return ret; + } + + data->current_brightness = brightness; + + return 0; + +err: + /* If we were in the middle of a power transition always shut down + * for safety. + */ + if (power_up || power_down) { + wm831x_set_bits(wm831x, WM831X_DCDC_ENABLE, WM831X_DC4_ENA, 0); + wm831x_set_bits(wm831x, data->isink_reg, WM831X_CS1_ENA, 0); + } + + return ret; +} + +static int wm831x_backlight_update_status(struct backlight_device *bl) +{ + int brightness = bl->props.brightness; + + if (bl->props.power != FB_BLANK_UNBLANK) + brightness = 0; + + if (bl->props.fb_blank != FB_BLANK_UNBLANK) + brightness = 0; + + if (bl->props.state & BL_CORE_SUSPENDED) + brightness = 0; + + return wm831x_backlight_set(bl, brightness); +} + +static int wm831x_backlight_get_brightness(struct backlight_device *bl) +{ + struct wm831x_backlight_data *data = bl_get_data(bl); + return data->current_brightness; +} + +static struct backlight_ops wm831x_backlight_ops = { + .options = BL_CORE_SUSPENDRESUME, + .update_status = wm831x_backlight_update_status, + .get_brightness = wm831x_backlight_get_brightness, +}; + +static int wm831x_backlight_probe(struct platform_device *pdev) +{ + struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent); + struct wm831x_pdata *wm831x_pdata; + struct wm831x_backlight_pdata *pdata; + struct wm831x_backlight_data *data; + struct backlight_device *bl; + int ret, i, max_isel, isink_reg, dcdc_cfg; + + /* We need platform data */ + if (pdev->dev.parent->platform_data) { + wm831x_pdata = pdev->dev.parent->platform_data; + pdata = wm831x_pdata->backlight; + } else { + pdata = NULL; + } + + if (!pdata) { + dev_err(&pdev->dev, "No platform data supplied\n"); + return -EINVAL; + } + + /* Figure out the maximum current we can use */ + for (i = 0; i < WM831X_ISINK_MAX_ISEL; i++) { + if (wm831x_isinkv_values[i] > pdata->max_uA) + break; + } + + if (i == 0) { + dev_err(&pdev->dev, "Invalid max_uA: %duA\n", pdata->max_uA); + return -EINVAL; + } + max_isel = i - 1; + + if (pdata->max_uA != wm831x_isinkv_values[max_isel]) + dev_warn(&pdev->dev, + "Maximum current is %duA not %duA as requested\n", + wm831x_isinkv_values[max_isel], pdata->max_uA); + + switch (pdata->isink) { + case 1: + isink_reg = WM831X_CURRENT_SINK_1; + dcdc_cfg = 0; + break; + case 2: + isink_reg = WM831X_CURRENT_SINK_2; + dcdc_cfg = WM831X_DC4_FBSRC; + break; + default: + dev_err(&pdev->dev, "Invalid ISINK %d\n", pdata->isink); + return -EINVAL; + } + + /* Configure the ISINK to use for feedback */ + ret = wm831x_reg_unlock(wm831x); + if (ret < 0) + return ret; + + ret = wm831x_set_bits(wm831x, WM831X_DC4_CONTROL, WM831X_DC4_FBSRC, + dcdc_cfg); + + wm831x_reg_lock(wm831x); + if (ret < 0) + return ret; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (data == NULL) + return -ENOMEM; + + data->wm831x = wm831x; + data->current_brightness = 0; + data->isink_reg = isink_reg; + + bl = backlight_device_register("wm831x", &pdev->dev, + data, &wm831x_backlight_ops); + if (IS_ERR(bl)) { + dev_err(&pdev->dev, "failed to register backlight\n"); + kfree(data); + return PTR_ERR(bl); + } + + bl->props.max_brightness = max_isel; + bl->props.brightness = max_isel; + + platform_set_drvdata(pdev, bl); + + /* Disable the DCDC if it was started so we can bootstrap */ + wm831x_set_bits(wm831x, WM831X_DCDC_ENABLE, WM831X_DC4_ENA, 0); + + + backlight_update_status(bl); + + return 0; +} + +static int wm831x_backlight_remove(struct platform_device *pdev) +{ + struct backlight_device *bl = platform_get_drvdata(pdev); + struct wm831x_backlight_data *data = bl_get_data(bl); + + backlight_device_unregister(bl); + kfree(data); + return 0; +} + +static struct platform_driver wm831x_backlight_driver = { + .driver = { + .name = "wm831x-backlight", + .owner = THIS_MODULE, + }, + .probe = wm831x_backlight_probe, + .remove = wm831x_backlight_remove, +}; + +static int __init wm831x_backlight_init(void) +{ + return platform_driver_register(&wm831x_backlight_driver); +} +module_init(wm831x_backlight_init); + +static void __exit wm831x_backlight_exit(void) +{ + platform_driver_unregister(&wm831x_backlight_driver); +} +module_exit(wm831x_backlight_exit); + +MODULE_DESCRIPTION("Backlight Driver for WM831x PMICs"); +MODULE_AUTHOR("Mark Brown Date: Mon, 7 Sep 2009 14:19:54 +0100 Subject: [PATCH 0087/3409] backlight: mbp_nvidia_bl: add support for MacBookAir 1,1 Add LCD backlight support for the Intel GMA965 found in the MacBookAir 1,1. Signed-off-by: Henrik Rydberg Acked-by: Matthew Garrett Signed-off-by: Andrew Morton Signed-off-by: Richard Purdie --- drivers/video/backlight/mbp_nvidia_bl.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/video/backlight/mbp_nvidia_bl.c b/drivers/video/backlight/mbp_nvidia_bl.c index 3bb4c0a50c62..9fc7a531718a 100644 --- a/drivers/video/backlight/mbp_nvidia_bl.c +++ b/drivers/video/backlight/mbp_nvidia_bl.c @@ -164,6 +164,15 @@ static const struct dmi_system_id __initdata mbp_device_table[] = { }, .driver_data = (void *)&intel_chipset_data, }, + { + .callback = mbp_dmi_match, + .ident = "MacBookAir 1,1", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir1,1"), + }, + .driver_data = (void *)&intel_chipset_data, + }, { .callback = mbp_dmi_match, .ident = "MacBook 5,1", From c746b5519a88b8803d43946ad06326ece4829116 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Sat, 5 Sep 2009 14:09:21 +0100 Subject: [PATCH 0088/3409] leds: Add WM831x status LED driver The WM831x devices feature two software controlled status LEDs with hardware assisted blinking. The device can also autonomously control the LEDs based on a selection of sources. This can be configured at boot time using either platform data or the chip OTP. A sysfs file in the style of that for triggers allowing the control source to be configured at run time. Triggers can't be used here since they can't depend on the implementation details of a specific LED type. Signed-off-by: Mark Brown Signed-off-by: Richard Purdie --- drivers/leds/Kconfig | 7 + drivers/leds/Makefile | 1 + drivers/leds/leds-wm831x-status.c | 341 ++++++++++++++++++++++++++++++ include/linux/mfd/wm831x/status.h | 34 +++ 4 files changed, 383 insertions(+) create mode 100644 drivers/leds/leds-wm831x-status.c create mode 100644 include/linux/mfd/wm831x/status.h diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig index 7c8e7122aaa9..edfd4e302be7 100644 --- a/drivers/leds/Kconfig +++ b/drivers/leds/Kconfig @@ -195,6 +195,13 @@ config LEDS_PCA955X LED driver chips accessed via the I2C bus. Supported devices include PCA9550, PCA9551, PCA9552, and PCA9553. +config LEDS_WM831X_STATUS + tristate "LED support for status LEDs on WM831x PMICs" + depends on LEDS_CLASS && MFD_WM831X + help + This option enables support for the status LEDs of the WM831x + series of PMICs. + config LEDS_WM8350 tristate "LED Support for WM8350 AudioPlus PMIC" depends on LEDS_CLASS && MFD_WM8350 diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile index e8cdcf77a4c3..46d72704d606 100644 --- a/drivers/leds/Makefile +++ b/drivers/leds/Makefile @@ -26,6 +26,7 @@ obj-$(CONFIG_LEDS_HP6XX) += leds-hp6xx.o obj-$(CONFIG_LEDS_FSG) += leds-fsg.o obj-$(CONFIG_LEDS_PCA955X) += leds-pca955x.o obj-$(CONFIG_LEDS_DA903X) += leds-da903x.o +obj-$(CONFIG_LEDS_WM831X_STATUS) += leds-wm831x-status.o obj-$(CONFIG_LEDS_WM8350) += leds-wm8350.o obj-$(CONFIG_LEDS_PWM) += leds-pwm.o diff --git a/drivers/leds/leds-wm831x-status.c b/drivers/leds/leds-wm831x-status.c new file mode 100644 index 000000000000..c586d05e336a --- /dev/null +++ b/drivers/leds/leds-wm831x-status.c @@ -0,0 +1,341 @@ +/* + * LED driver for WM831x status LEDs + * + * Copyright(C) 2009 Wolfson Microelectronics PLC. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + + +struct wm831x_status { + struct led_classdev cdev; + struct wm831x *wm831x; + struct work_struct work; + struct mutex mutex; + + spinlock_t value_lock; + int reg; /* Control register */ + int reg_val; /* Control register value */ + + int blink; + int blink_time; + int blink_cyc; + int src; + enum led_brightness brightness; +}; + +#define to_wm831x_status(led_cdev) \ + container_of(led_cdev, struct wm831x_status, cdev) + +static void wm831x_status_work(struct work_struct *work) +{ + struct wm831x_status *led = container_of(work, struct wm831x_status, + work); + unsigned long flags; + + mutex_lock(&led->mutex); + + led->reg_val &= ~(WM831X_LED_SRC_MASK | WM831X_LED_MODE_MASK | + WM831X_LED_DUTY_CYC_MASK | WM831X_LED_DUR_MASK); + + spin_lock_irqsave(&led->value_lock, flags); + + led->reg_val |= led->src << WM831X_LED_SRC_SHIFT; + if (led->blink) { + led->reg_val |= 2 << WM831X_LED_MODE_SHIFT; + led->reg_val |= led->blink_time << WM831X_LED_DUR_SHIFT; + led->reg_val |= led->blink_cyc; + } else { + if (led->brightness != LED_OFF) + led->reg_val |= 1 << WM831X_LED_MODE_SHIFT; + } + + spin_unlock_irqrestore(&led->value_lock, flags); + + wm831x_reg_write(led->wm831x, led->reg, led->reg_val); + + mutex_unlock(&led->mutex); +} + +static void wm831x_status_set(struct led_classdev *led_cdev, + enum led_brightness value) +{ + struct wm831x_status *led = to_wm831x_status(led_cdev); + unsigned long flags; + + spin_lock_irqsave(&led->value_lock, flags); + led->brightness = value; + if (value == LED_OFF) + led->blink = 0; + schedule_work(&led->work); + spin_unlock_irqrestore(&led->value_lock, flags); +} + +static int wm831x_status_blink_set(struct led_classdev *led_cdev, + unsigned long *delay_on, + unsigned long *delay_off) +{ + struct wm831x_status *led = to_wm831x_status(led_cdev); + unsigned long flags; + int ret = 0; + + /* Pick some defaults if we've not been given times */ + if (*delay_on == 0 && *delay_off == 0) { + *delay_on = 250; + *delay_off = 250; + } + + spin_lock_irqsave(&led->value_lock, flags); + + /* We only have a limited selection of settings, see if we can + * support the configuration we're being given */ + switch (*delay_on) { + case 1000: + led->blink_time = 0; + break; + case 250: + led->blink_time = 1; + break; + case 125: + led->blink_time = 2; + break; + case 62: + case 63: + /* Actually 62.5ms */ + led->blink_time = 3; + break; + default: + ret = -EINVAL; + break; + } + + if (ret == 0) { + switch (*delay_off / *delay_on) { + case 1: + led->blink_cyc = 0; + break; + case 3: + led->blink_cyc = 1; + break; + case 4: + led->blink_cyc = 2; + break; + case 8: + led->blink_cyc = 3; + break; + default: + ret = -EINVAL; + break; + } + } + + if (ret == 0) + led->blink = 1; + else + led->blink = 0; + + /* Always update; if we fail turn off blinking since we expect + * a software fallback. */ + schedule_work(&led->work); + + spin_unlock_irqrestore(&led->value_lock, flags); + + return ret; +} + +static const char *led_src_texts[] = { + "otp", + "power", + "charger", + "soft", +}; + +static ssize_t wm831x_status_src_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct led_classdev *led_cdev = dev_get_drvdata(dev); + struct wm831x_status *led = to_wm831x_status(led_cdev); + int i; + ssize_t ret = 0; + + mutex_lock(&led->mutex); + + for (i = 0; i < ARRAY_SIZE(led_src_texts); i++) + if (i == led->src) + ret += sprintf(&buf[ret], "[%s] ", led_src_texts[i]); + else + ret += sprintf(&buf[ret], "%s ", led_src_texts[i]); + + mutex_unlock(&led->mutex); + + ret += sprintf(&buf[ret], "\n"); + + return ret; +} + +static ssize_t wm831x_status_src_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + struct led_classdev *led_cdev = dev_get_drvdata(dev); + struct wm831x_status *led = to_wm831x_status(led_cdev); + char name[20]; + int i; + size_t len; + + name[sizeof(name) - 1] = '\0'; + strncpy(name, buf, sizeof(name) - 1); + len = strlen(name); + + if (len && name[len - 1] == '\n') + name[len - 1] = '\0'; + + for (i = 0; i < ARRAY_SIZE(led_src_texts); i++) { + if (!strcmp(name, led_src_texts[i])) { + mutex_lock(&led->mutex); + + led->src = i; + schedule_work(&led->work); + + mutex_unlock(&led->mutex); + } + } + + return size; +} + +static DEVICE_ATTR(src, 0644, wm831x_status_src_show, wm831x_status_src_store); + +static int wm831x_status_probe(struct platform_device *pdev) +{ + struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent); + struct wm831x_pdata *chip_pdata; + struct wm831x_status_pdata pdata; + struct wm831x_status *drvdata; + struct resource *res; + int id = pdev->id % ARRAY_SIZE(chip_pdata->status); + int ret; + + res = platform_get_resource(pdev, IORESOURCE_IO, 0); + if (res == NULL) { + dev_err(&pdev->dev, "No I/O resource\n"); + ret = -EINVAL; + goto err; + } + + drvdata = kzalloc(sizeof(struct wm831x_status), GFP_KERNEL); + if (!drvdata) + return -ENOMEM; + dev_set_drvdata(&pdev->dev, drvdata); + + drvdata->wm831x = wm831x; + drvdata->reg = res->start; + + if (wm831x->dev->platform_data) + chip_pdata = wm831x->dev->platform_data; + else + chip_pdata = NULL; + + memset(&pdata, 0, sizeof(pdata)); + if (chip_pdata && chip_pdata->status[id]) + memcpy(&pdata, chip_pdata->status[id], sizeof(pdata)); + else + pdata.name = dev_name(&pdev->dev); + + mutex_init(&drvdata->mutex); + INIT_WORK(&drvdata->work, wm831x_status_work); + spin_lock_init(&drvdata->value_lock); + + /* We cache the configuration register and read startup values + * from it. */ + drvdata->reg_val = wm831x_reg_read(wm831x, drvdata->reg); + + if (drvdata->reg_val & WM831X_LED_MODE_MASK) + drvdata->brightness = LED_FULL; + else + drvdata->brightness = LED_OFF; + + /* Set a default source if configured, otherwise leave the + * current hardware setting. + */ + if (pdata.default_src == WM831X_STATUS_PRESERVE) { + drvdata->src = drvdata->reg_val; + drvdata->src &= WM831X_LED_SRC_MASK; + drvdata->src >>= WM831X_LED_SRC_SHIFT; + } else { + drvdata->src = pdata.default_src - 1; + } + + drvdata->cdev.name = pdata.name; + drvdata->cdev.default_trigger = pdata.default_trigger; + drvdata->cdev.brightness_set = wm831x_status_set; + drvdata->cdev.blink_set = wm831x_status_blink_set; + + ret = led_classdev_register(wm831x->dev, &drvdata->cdev); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to register LED: %d\n", ret); + goto err_led; + } + + ret = device_create_file(drvdata->cdev.dev, &dev_attr_src); + if (ret != 0) + dev_err(&pdev->dev, + "No source control for LED: %d\n", ret); + + return 0; + +err_led: + led_classdev_unregister(&drvdata->cdev); + kfree(drvdata); +err: + return ret; +} + +static int wm831x_status_remove(struct platform_device *pdev) +{ + struct wm831x_status *drvdata = platform_get_drvdata(pdev); + + device_remove_file(drvdata->cdev.dev, &dev_attr_src); + led_classdev_unregister(&drvdata->cdev); + kfree(drvdata); + + return 0; +} + +static struct platform_driver wm831x_status_driver = { + .driver = { + .name = "wm831x-status", + .owner = THIS_MODULE, + }, + .probe = wm831x_status_probe, + .remove = wm831x_status_remove, +}; + +static int __devinit wm831x_status_init(void) +{ + return platform_driver_register(&wm831x_status_driver); +} +module_init(wm831x_status_init); + +static void wm831x_status_exit(void) +{ + platform_driver_unregister(&wm831x_status_driver); +} +module_exit(wm831x_status_exit); + +MODULE_AUTHOR("Mark Brown "); +MODULE_DESCRIPTION("WM831x status LED driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:wm831x-status"); diff --git a/include/linux/mfd/wm831x/status.h b/include/linux/mfd/wm831x/status.h new file mode 100644 index 000000000000..6bc090d0e3ac --- /dev/null +++ b/include/linux/mfd/wm831x/status.h @@ -0,0 +1,34 @@ +/* + * include/linux/mfd/wm831x/status.h -- Status LEDs for WM831x + * + * Copyright 2009 Wolfson Microelectronics PLC. + * + * Author: Mark Brown + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + +#ifndef __MFD_WM831X_STATUS_H__ +#define __MFD_WM831X_STATUS_H__ + +#define WM831X_LED_SRC_MASK 0xC000 /* LED_SRC - [15:14] */ +#define WM831X_LED_SRC_SHIFT 14 /* LED_SRC - [15:14] */ +#define WM831X_LED_SRC_WIDTH 2 /* LED_SRC - [15:14] */ +#define WM831X_LED_MODE_MASK 0x0300 /* LED_MODE - [9:8] */ +#define WM831X_LED_MODE_SHIFT 8 /* LED_MODE - [9:8] */ +#define WM831X_LED_MODE_WIDTH 2 /* LED_MODE - [9:8] */ +#define WM831X_LED_SEQ_LEN_MASK 0x0030 /* LED_SEQ_LEN - [5:4] */ +#define WM831X_LED_SEQ_LEN_SHIFT 4 /* LED_SEQ_LEN - [5:4] */ +#define WM831X_LED_SEQ_LEN_WIDTH 2 /* LED_SEQ_LEN - [5:4] */ +#define WM831X_LED_DUR_MASK 0x000C /* LED_DUR - [3:2] */ +#define WM831X_LED_DUR_SHIFT 2 /* LED_DUR - [3:2] */ +#define WM831X_LED_DUR_WIDTH 2 /* LED_DUR - [3:2] */ +#define WM831X_LED_DUTY_CYC_MASK 0x0003 /* LED_DUTY_CYC - [1:0] */ +#define WM831X_LED_DUTY_CYC_SHIFT 0 /* LED_DUTY_CYC - [1:0] */ +#define WM831X_LED_DUTY_CYC_WIDTH 2 /* LED_DUTY_CYC - [1:0] */ + +#endif From 2fea09222ab48517d729a7fb4542092cf428f528 Mon Sep 17 00:00:00 2001 From: Michal Simek Date: Thu, 6 Aug 2009 16:04:51 -0700 Subject: [PATCH 0089/3409] leds: gpio-leds: fix typographics fault [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Michal Simek Signed-off-by: Andrew Morton Signed-off-by: Richard Purdie --- drivers/leds/leds-gpio.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c index 6b06638eb5b4..7467980b8cf9 100644 --- a/drivers/leds/leds-gpio.c +++ b/drivers/leds/leds-gpio.c @@ -80,7 +80,7 @@ static int __devinit create_gpio_led(const struct gpio_led *template, /* skip leds that aren't available */ if (!gpio_is_valid(template->gpio)) { - printk(KERN_INFO "Skipping unavilable LED gpio %d (%s)\n", + printk(KERN_INFO "Skipping unavailable LED gpio %d (%s)\n", template->gpio, template->name); return 0; } From 74cbe20294c9f54a0926d19235395cf8e22b7830 Mon Sep 17 00:00:00 2001 From: Thadeu Lima de Souza Cascardo Date: Thu, 6 Aug 2009 16:04:51 -0700 Subject: [PATCH 0090/3409] leds: fix coding style in worker thread code for ledtrig-gpio. [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Thadeu Lima de Souza Cascardo Signed-off-by: Samuel R. C. Vale Signed-off-by: Andrew Morton Signed-off-by: Richard Purdie --- drivers/leds/ledtrig-gpio.c | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/drivers/leds/ledtrig-gpio.c b/drivers/leds/ledtrig-gpio.c index 1bc5db4ece0d..f5913372d691 100644 --- a/drivers/leds/ledtrig-gpio.c +++ b/drivers/leds/ledtrig-gpio.c @@ -44,22 +44,22 @@ static void gpio_trig_work(struct work_struct *work) struct gpio_trig_data, work); int tmp; - if (!gpio_data->gpio) - return; + if (!gpio_data->gpio) + return; - tmp = gpio_get_value(gpio_data->gpio); - if (gpio_data->inverted) - tmp = !tmp; + tmp = gpio_get_value(gpio_data->gpio); + if (gpio_data->inverted) + tmp = !tmp; - if (tmp) { - if (gpio_data->desired_brightness) - led_set_brightness(gpio_data->led, - gpio_data->desired_brightness); - else - led_set_brightness(gpio_data->led, LED_FULL); - } else { - led_set_brightness(gpio_data->led, LED_OFF); - } + if (tmp) { + if (gpio_data->desired_brightness) + led_set_brightness(gpio_data->led, + gpio_data->desired_brightness); + else + led_set_brightness(gpio_data->led, LED_FULL); + } else { + led_set_brightness(gpio_data->led, LED_OFF); + } } static ssize_t gpio_trig_brightness_show(struct device *dev, From 1e9bd9426c3dfc4a5383b224cebbced9c5e05998 Mon Sep 17 00:00:00 2001 From: Mario Schwalbe Date: Thu, 20 Aug 2009 12:39:48 +0200 Subject: [PATCH 0091/3409] backlight: Add support for new Apple machines. This patch adds support for the new Apple models MacBook 5,1, MacBook Pro 5,1, MacBook Pro 5,2, MacBook Pro 5,5. It's just a device table update. Signed-off-by: Mario Schwalbe Signed-off-by: Richard Purdie --- drivers/video/backlight/mbp_nvidia_bl.c | 27 +++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/drivers/video/backlight/mbp_nvidia_bl.c b/drivers/video/backlight/mbp_nvidia_bl.c index 9fc7a531718a..9edb8d7c295f 100644 --- a/drivers/video/backlight/mbp_nvidia_bl.c +++ b/drivers/video/backlight/mbp_nvidia_bl.c @@ -182,6 +182,15 @@ static const struct dmi_system_id __initdata mbp_device_table[] = { }, .driver_data = (void *)&nvidia_chipset_data, }, + { + .callback = mbp_dmi_match, + .ident = "MacBook 5,2", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBook5,2"), + }, + .driver_data = (void *)&nvidia_chipset_data, + }, { .callback = mbp_dmi_match, .ident = "MacBookAir 2,1", @@ -200,6 +209,24 @@ static const struct dmi_system_id __initdata mbp_device_table[] = { }, .driver_data = (void *)&nvidia_chipset_data, }, + { + .callback = mbp_dmi_match, + .ident = "MacBookPro 5,2", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5,2"), + }, + .driver_data = (void *)&nvidia_chipset_data, + }, + { + .callback = mbp_dmi_match, + .ident = "MacBookPro 5,5", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5,5"), + }, + .driver_data = (void *)&nvidia_chipset_data, + }, { } }; From 85c5204a677bc1053cb636590859c32fd0cf6bf9 Mon Sep 17 00:00:00 2001 From: Richard Purdie Date: Mon, 7 Sep 2009 14:35:04 +0100 Subject: [PATCH 0092/3409] leds: Fix leds-pca9532 whitespace issues Signed-off-by: Richard Purdie 1690 || *delay_on < 6) @@ -227,7 +227,7 @@ static int pca9532_configure(struct i2c_client *client, break; case PCA9532_TYPE_LED: led->state = pled->state; - led->name = pled->name; + led->name = pled->name; led->ldev.name = led->name; led->ldev.brightness = LED_OFF; led->ldev.brightness_set = pca9532_set_brightness; @@ -254,7 +254,7 @@ static int pca9532_configure(struct i2c_client *client, data->idev->name = pled->name; data->idev->phys = "i2c/pca9532"; data->idev->id.bustype = BUS_HOST; - data->idev->id.vendor = 0x001f; + data->idev->id.vendor = 0x001f; data->idev->id.product = 0x0001; data->idev->id.version = 0x0100; data->idev->evbit[0] = BIT_MASK(EV_SND); From db3f520738a8c5bf593e13d4ac71f8da9ffcb964 Mon Sep 17 00:00:00 2001 From: Olaf Hering Date: Mon, 7 Sep 2009 14:37:27 +0100 Subject: [PATCH 0093/3409] leds: Fix LED names This is needed to get kde-powersave to work properly on some g4 powerbooks. From: Olaf Hering Signed-off-by: Greg Kroah-Hartman Signed-off-by: Richard Purdie --- drivers/leds/leds-cobalt-qube.c | 2 +- drivers/leds/leds-cobalt-raq.c | 4 ++-- drivers/macintosh/via-pmu-led.c | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/leds/leds-cobalt-qube.c b/drivers/leds/leds-cobalt-qube.c index 059aa2924b1c..8816806accd2 100644 --- a/drivers/leds/leds-cobalt-qube.c +++ b/drivers/leds/leds-cobalt-qube.c @@ -28,7 +28,7 @@ static void qube_front_led_set(struct led_classdev *led_cdev, } static struct led_classdev qube_front_led = { - .name = "qube-front", + .name = "qube::front", .brightness = LED_FULL, .brightness_set = qube_front_led_set, .default_trigger = "ide-disk", diff --git a/drivers/leds/leds-cobalt-raq.c b/drivers/leds/leds-cobalt-raq.c index 5f1ce810815f..defc212105f3 100644 --- a/drivers/leds/leds-cobalt-raq.c +++ b/drivers/leds/leds-cobalt-raq.c @@ -49,7 +49,7 @@ static void raq_web_led_set(struct led_classdev *led_cdev, } static struct led_classdev raq_web_led = { - .name = "raq-web", + .name = "raq::web", .brightness_set = raq_web_led_set, }; @@ -70,7 +70,7 @@ static void raq_power_off_led_set(struct led_classdev *led_cdev, } static struct led_classdev raq_power_off_led = { - .name = "raq-power-off", + .name = "raq::power-off", .brightness_set = raq_power_off_led_set, .default_trigger = "power-off", }; diff --git a/drivers/macintosh/via-pmu-led.c b/drivers/macintosh/via-pmu-led.c index 55ad95671387..d242976bcfe7 100644 --- a/drivers/macintosh/via-pmu-led.c +++ b/drivers/macintosh/via-pmu-led.c @@ -72,7 +72,7 @@ static void pmu_led_set(struct led_classdev *led_cdev, } static struct led_classdev pmu_led = { - .name = "pmu-front-led", + .name = "pmu-led::front", #ifdef CONFIG_ADB_PMU_LED_IDE .default_trigger = "ide-disk", #endif From 7f1be819cccea65dcd2a8c86de6369946b91c224 Mon Sep 17 00:00:00 2001 From: Antonio Ospite Date: Fri, 3 Jul 2009 16:18:45 +0200 Subject: [PATCH 0094/3409] leds: Fix indentation in LEDS_LP3944 Kconfig entry Signed-off-by: Antonio Ospite Signed-off-by: Richard Purdie --- drivers/leds/Kconfig | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig index edfd4e302be7..e4f599f20e38 100644 --- a/drivers/leds/Kconfig +++ b/drivers/leds/Kconfig @@ -150,9 +150,9 @@ config LEDS_LP3944 tristate "LED Support for N.S. LP3944 (Fun Light) I2C chip" depends on LEDS_CLASS && I2C help - This option enables support for LEDs connected to the National - Semiconductor LP3944 Lighting Management Unit (LMU) also known as - Fun Light Chip. + This option enables support for LEDs connected to the National + Semiconductor LP3944 Lighting Management Unit (LMU) also known as + Fun Light Chip. To compile this driver as a module, choose M here: the module will be called leds-lp3944. From 156ff0d473d3f5a11ba66e0b3debd9e50bd946e2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= Date: Sat, 11 Jul 2009 22:52:44 +0200 Subject: [PATCH 0095/3409] backlight: move hp680-bl's probe function to .devinit.text MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit A pointer to hp680bl_probe is passed to the core via platform_driver_register and so the function must not disappear when the .init sections are discarded. Otherwise (if also having HOTPLUG=y) unbinding and binding a device to the driver via sysfs will result in an oops as does a device being registered late. Signed-off-by: Uwe Kleine-König Acked-by: Kristoffer Ericson Signed-off-by: Richard Purdie --- drivers/video/backlight/hp680_bl.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/video/backlight/hp680_bl.c b/drivers/video/backlight/hp680_bl.c index 5be55a20d8c7..7fb4eefff80d 100644 --- a/drivers/video/backlight/hp680_bl.c +++ b/drivers/video/backlight/hp680_bl.c @@ -103,7 +103,7 @@ static struct backlight_ops hp680bl_ops = { .update_status = hp680bl_set_intensity, }; -static int __init hp680bl_probe(struct platform_device *pdev) +static int __devinit hp680bl_probe(struct platform_device *pdev) { struct backlight_device *bd; From f16a5dba01ed942f427f56b0d1128251721275a5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= Date: Sat, 11 Jul 2009 22:52:34 +0200 Subject: [PATCH 0096/3409] leds: move leds-clevo-mail's probe function to .devinit.text MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit A pointer to clevo_mail_led_probe is passed to the core via platform_driver_register and so the function must not disappear when the .init sections are discarded. Otherwise (if also having HOTPLUG=y) unbinding and binding a device to the driver via sysfs will result in an oops as does a device being registered late. Signed-off-by: Uwe Kleine-König Signed-off-by: Richard Purdie --- drivers/leds/leds-clevo-mail.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c index 1813c84ea5fc..8ee83ceb4a7d 100644 --- a/drivers/leds/leds-clevo-mail.c +++ b/drivers/leds/leds-clevo-mail.c @@ -145,7 +145,7 @@ static struct led_classdev clevo_mail_led = { .flags = LED_CORE_SUSPENDRESUME, }; -static int __init clevo_mail_led_probe(struct platform_device *pdev) +static int __devinit clevo_mail_led_probe(struct platform_device *pdev) { return led_classdev_register(&pdev->dev, &clevo_mail_led); } From 5036cc41e07d6614350e329666ee8a79cea6f793 Mon Sep 17 00:00:00 2001 From: Marek Vasut Date: Thu, 6 Aug 2009 16:07:10 -0700 Subject: [PATCH 0097/3409] backlight: spi driver for LMS283GF05 LCD ADd support for the SPI part of LMS283GF05 LCD. The LCD uses SPI for initialization and powerdown sequences. No further defails are specified in the datasheet about the initialization/powerdown sequence, just the magic numbers that have to be sent over SPI bus. This LCD can be found in the Aeronix Zipit Z2 handheld. Signed-off-by: Marek Vasut Signed-off-by: Andrew Morton Signed-off-by: Richard Purdie --- drivers/video/backlight/Kconfig | 7 + drivers/video/backlight/Makefile | 1 + drivers/video/backlight/lms283gf05.c | 242 +++++++++++++++++++++++++++ include/linux/spi/lms283gf05.h | 28 ++++ 4 files changed, 278 insertions(+) create mode 100644 drivers/video/backlight/lms283gf05.c create mode 100644 include/linux/spi/lms283gf05.h diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig index f86dbfd209ad..c0d4a536ea87 100644 --- a/drivers/video/backlight/Kconfig +++ b/drivers/video/backlight/Kconfig @@ -31,6 +31,13 @@ config LCD_CORGI Say y here to support the LCD panels usually found on SHARP corgi (C7x0) and spitz (Cxx00) models. +config LCD_LMS283GF05 + tristate "Samsung LMS283GF05 LCD" + depends on LCD_CLASS_DEVICE && SPI_MASTER && GENERIC_GPIO + help + SPI driver for Samsung LMS283GF05. This provides basic support + for powering the LCD up/down through a sysfs interface. + config LCD_LTV350QV tristate "Samsung LTV350QV LCD Panel" depends on LCD_CLASS_DEVICE && SPI_MASTER diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile index df0b67ce88b6..0abc014215ba 100644 --- a/drivers/video/backlight/Makefile +++ b/drivers/video/backlight/Makefile @@ -3,6 +3,7 @@ obj-$(CONFIG_LCD_CLASS_DEVICE) += lcd.o obj-$(CONFIG_LCD_CORGI) += corgi_lcd.o obj-$(CONFIG_LCD_HP700) += jornada720_lcd.o +obj-$(CONFIG_LCD_LMS283GF05) += lms283gf05.o obj-$(CONFIG_LCD_LTV350QV) += ltv350qv.o obj-$(CONFIG_LCD_ILI9320) += ili9320.o obj-$(CONFIG_LCD_PLATFORM) += platform_lcd.o diff --git a/drivers/video/backlight/lms283gf05.c b/drivers/video/backlight/lms283gf05.c new file mode 100644 index 000000000000..447b542a20ca --- /dev/null +++ b/drivers/video/backlight/lms283gf05.c @@ -0,0 +1,242 @@ +/* + * lms283gf05.c -- support for Samsung LMS283GF05 LCD + * + * Copyright (c) 2009 Marek Vasut + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include + +#include +#include + +struct lms283gf05_state { + struct spi_device *spi; + struct lcd_device *ld; +}; + +struct lms283gf05_seq { + unsigned char reg; + unsigned short value; + unsigned char delay; +}; + +/* Magic sequences supplied by manufacturer, for details refer to datasheet */ +static struct lms283gf05_seq disp_initseq[] = { + /* REG, VALUE, DELAY */ + { 0x07, 0x0000, 0 }, + { 0x13, 0x0000, 10 }, + + { 0x11, 0x3004, 0 }, + { 0x14, 0x200F, 0 }, + { 0x10, 0x1a20, 0 }, + { 0x13, 0x0040, 50 }, + + { 0x13, 0x0060, 0 }, + { 0x13, 0x0070, 200 }, + + { 0x01, 0x0127, 0 }, + { 0x02, 0x0700, 0 }, + { 0x03, 0x1030, 0 }, + { 0x08, 0x0208, 0 }, + { 0x0B, 0x0620, 0 }, + { 0x0C, 0x0110, 0 }, + { 0x30, 0x0120, 0 }, + { 0x31, 0x0127, 0 }, + { 0x32, 0x0000, 0 }, + { 0x33, 0x0503, 0 }, + { 0x34, 0x0727, 0 }, + { 0x35, 0x0124, 0 }, + { 0x36, 0x0706, 0 }, + { 0x37, 0x0701, 0 }, + { 0x38, 0x0F00, 0 }, + { 0x39, 0x0F00, 0 }, + { 0x40, 0x0000, 0 }, + { 0x41, 0x0000, 0 }, + { 0x42, 0x013f, 0 }, + { 0x43, 0x0000, 0 }, + { 0x44, 0x013f, 0 }, + { 0x45, 0x0000, 0 }, + { 0x46, 0xef00, 0 }, + { 0x47, 0x013f, 0 }, + { 0x48, 0x0000, 0 }, + { 0x07, 0x0015, 30 }, + + { 0x07, 0x0017, 0 }, + + { 0x20, 0x0000, 0 }, + { 0x21, 0x0000, 0 }, + { 0x22, 0x0000, 0 } +}; + +static struct lms283gf05_seq disp_pdwnseq[] = { + { 0x07, 0x0016, 30 }, + + { 0x07, 0x0004, 0 }, + { 0x10, 0x0220, 20 }, + + { 0x13, 0x0060, 50 }, + + { 0x13, 0x0040, 50 }, + + { 0x13, 0x0000, 0 }, + { 0x10, 0x0000, 0 } +}; + + +static void lms283gf05_reset(unsigned long gpio, bool inverted) +{ + gpio_set_value(gpio, !inverted); + mdelay(100); + gpio_set_value(gpio, inverted); + mdelay(20); + gpio_set_value(gpio, !inverted); + mdelay(20); +} + +static void lms283gf05_toggle(struct spi_device *spi, + struct lms283gf05_seq *seq, int sz) +{ + char buf[3]; + int i; + + for (i = 0; i < sz; i++) { + buf[0] = 0x74; + buf[1] = 0x00; + buf[2] = seq[i].reg; + spi_write(spi, buf, 3); + + buf[0] = 0x76; + buf[1] = seq[i].value >> 8; + buf[2] = seq[i].value & 0xff; + spi_write(spi, buf, 3); + + mdelay(seq[i].delay); + } +} + +static int lms283gf05_power_set(struct lcd_device *ld, int power) +{ + struct lms283gf05_state *st = lcd_get_data(ld); + struct spi_device *spi = st->spi; + struct lms283gf05_pdata *pdata = spi->dev.platform_data; + + if (power) { + if (pdata) + lms283gf05_reset(pdata->reset_gpio, + pdata->reset_inverted); + lms283gf05_toggle(spi, disp_initseq, ARRAY_SIZE(disp_initseq)); + } else { + lms283gf05_toggle(spi, disp_pdwnseq, ARRAY_SIZE(disp_pdwnseq)); + if (pdata) + gpio_set_value(pdata->reset_gpio, + pdata->reset_inverted); + } + + return 0; +} + +static struct lcd_ops lms_ops = { + .set_power = lms283gf05_power_set, + .get_power = NULL, +}; + +static int __devinit lms283gf05_probe(struct spi_device *spi) +{ + struct lms283gf05_state *st; + struct lms283gf05_pdata *pdata = spi->dev.platform_data; + struct lcd_device *ld; + int ret = 0; + + if (pdata != NULL) { + ret = gpio_request(pdata->reset_gpio, "LMS285GF05 RESET"); + if (ret) + return ret; + + ret = gpio_direction_output(pdata->reset_gpio, + !pdata->reset_inverted); + if (ret) + goto err; + } + + st = kzalloc(sizeof(struct lms283gf05_state), GFP_KERNEL); + if (st == NULL) { + dev_err(&spi->dev, "No memory for device state\n"); + ret = -ENOMEM; + goto err; + } + + ld = lcd_device_register("lms283gf05", &spi->dev, st, &lms_ops); + if (IS_ERR(ld)) { + ret = PTR_ERR(ld); + goto err2; + } + + st->spi = spi; + st->ld = ld; + + dev_set_drvdata(&spi->dev, st); + + /* kick in the LCD */ + if (pdata) + lms283gf05_reset(pdata->reset_gpio, pdata->reset_inverted); + lms283gf05_toggle(spi, disp_initseq, ARRAY_SIZE(disp_initseq)); + + return 0; + +err2: + kfree(st); +err: + if (pdata != NULL) + gpio_free(pdata->reset_gpio); + + return ret; +} + +static int __devexit lms283gf05_remove(struct spi_device *spi) +{ + struct lms283gf05_state *st = dev_get_drvdata(&spi->dev); + struct lms283gf05_pdata *pdata = st->spi->dev.platform_data; + + lcd_device_unregister(st->ld); + + if (pdata != NULL) + gpio_free(pdata->reset_gpio); + + kfree(st); + + return 0; +} + +static struct spi_driver lms283gf05_driver = { + .driver = { + .name = "lms283gf05", + .owner = THIS_MODULE, + }, + .probe = lms283gf05_probe, + .remove = __devexit_p(lms283gf05_remove), +}; + +static __init int lms283gf05_init(void) +{ + return spi_register_driver(&lms283gf05_driver); +} + +static __exit void lms283gf05_exit(void) +{ + spi_unregister_driver(&lms283gf05_driver); +} + +module_init(lms283gf05_init); +module_exit(lms283gf05_exit); + +MODULE_AUTHOR("Marek Vasut "); +MODULE_DESCRIPTION("LCD283GF05 LCD"); +MODULE_LICENSE("GPL v2"); diff --git a/include/linux/spi/lms283gf05.h b/include/linux/spi/lms283gf05.h new file mode 100644 index 000000000000..555d254e6606 --- /dev/null +++ b/include/linux/spi/lms283gf05.h @@ -0,0 +1,28 @@ +/* + * lms283gf05.h - Platform glue for Samsung LMS283GF05 LCD + * + * Copyright (C) 2009 Marek Vasut + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA +*/ + +#ifndef _INCLUDE_LINUX_SPI_LMS283GF05_H_ +#define _INCLUDE_LINUX_SPI_LMS283GF05_H_ + +struct lms283gf05_pdata { + unsigned long reset_gpio; + bool reset_inverted; +}; + +#endif /* _INCLUDE_LINUX_SPI_LMS283GF05_H_ */ From 3b96ea9ef837c010f2187e0618d823fbdd8eeb54 Mon Sep 17 00:00:00 2001 From: Thierry Reding Date: Fri, 14 Aug 2009 15:58:56 +0200 Subject: [PATCH 0098/3409] backlight: Add support for the Avionic Design Xanthos backlight device. This patch adds support for the backlight device found on Avionic Design Xanthos-based boards. Signed-off-by: Thierry Reding Signed-off-by: Richard Purdie --- drivers/video/backlight/Kconfig | 8 ++ drivers/video/backlight/Makefile | 2 +- drivers/video/backlight/adx_bl.c | 178 +++++++++++++++++++++++++++++++ 3 files changed, 187 insertions(+), 1 deletion(-) create mode 100644 drivers/video/backlight/adx_bl.c diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig index c0d4a536ea87..22a49944d98d 100644 --- a/drivers/video/backlight/Kconfig +++ b/drivers/video/backlight/Kconfig @@ -243,3 +243,11 @@ config BACKLIGHT_WM831X help If you have a backlight driven by the ISINK and DCDC of a WM831x PMIC say y to enable the backlight driver for it. + +config BACKLIGHT_ADX + tristate "Avionic Design Xanthos Backlight Driver" + depends on BACKLIGHT_CLASS_DEVICE && ARCH_PXA_ADX + default y + help + Say Y to enable the backlight driver on Avionic Design Xanthos-based + boards. diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile index 0abc014215ba..a483709205ac 100644 --- a/drivers/video/backlight/Makefile +++ b/drivers/video/backlight/Makefile @@ -26,4 +26,4 @@ obj-$(CONFIG_BACKLIGHT_MBP_NVIDIA) += mbp_nvidia_bl.o obj-$(CONFIG_BACKLIGHT_TOSA) += tosa_bl.o obj-$(CONFIG_BACKLIGHT_SAHARA) += kb3886_bl.o obj-$(CONFIG_BACKLIGHT_WM831X) += wm831x_bl.o - +obj-$(CONFIG_BACKLIGHT_ADX) += adx_bl.o diff --git a/drivers/video/backlight/adx_bl.c b/drivers/video/backlight/adx_bl.c new file mode 100644 index 000000000000..2c3bdfc620b7 --- /dev/null +++ b/drivers/video/backlight/adx_bl.c @@ -0,0 +1,178 @@ +/* + * linux/drivers/video/backlight/adx.c + * + * Copyright (C) 2009 Avionic Design GmbH + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Written by Thierry Reding + */ + +#include +#include +#include +#include +#include + +/* register definitions */ +#define ADX_BACKLIGHT_CONTROL 0x00 +#define ADX_BACKLIGHT_CONTROL_ENABLE (1 << 0) +#define ADX_BACKLIGHT_BRIGHTNESS 0x08 +#define ADX_BACKLIGHT_STATUS 0x10 +#define ADX_BACKLIGHT_ERROR 0x18 + +struct adxbl { + void __iomem *base; +}; + +static int adx_backlight_update_status(struct backlight_device *bldev) +{ + struct adxbl *bl = bl_get_data(bldev); + u32 value; + + value = bldev->props.brightness; + writel(value, bl->base + ADX_BACKLIGHT_BRIGHTNESS); + + value = readl(bl->base + ADX_BACKLIGHT_CONTROL); + + if (bldev->props.state & BL_CORE_FBBLANK) + value &= ~ADX_BACKLIGHT_CONTROL_ENABLE; + else + value |= ADX_BACKLIGHT_CONTROL_ENABLE; + + writel(value, bl->base + ADX_BACKLIGHT_CONTROL); + + return 0; +} + +static int adx_backlight_get_brightness(struct backlight_device *bldev) +{ + struct adxbl *bl = bl_get_data(bldev); + u32 brightness; + + brightness = readl(bl->base + ADX_BACKLIGHT_BRIGHTNESS); + return brightness & 0xff; +} + +static int adx_backlight_check_fb(struct fb_info *fb) +{ + return 1; +} + +static struct backlight_ops adx_backlight_ops = { + .options = 0, + .update_status = adx_backlight_update_status, + .get_brightness = adx_backlight_get_brightness, + .check_fb = adx_backlight_check_fb, +}; + +static int __devinit adx_backlight_probe(struct platform_device *pdev) +{ + struct backlight_device *bldev; + struct resource *res; + struct adxbl *bl; + int ret = 0; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { + ret = -ENXIO; + goto out; + } + + res = devm_request_mem_region(&pdev->dev, res->start, + resource_size(res), res->name); + if (!res) { + ret = -ENXIO; + goto out; + } + + bl = devm_kzalloc(&pdev->dev, sizeof(*bl), GFP_KERNEL); + if (!bl) { + ret = -ENOMEM; + goto out; + } + + bl->base = devm_ioremap_nocache(&pdev->dev, res->start, + resource_size(res)); + if (!bl->base) { + ret = -ENXIO; + goto out; + } + + bldev = backlight_device_register(dev_name(&pdev->dev), &pdev->dev, bl, + &adx_backlight_ops); + if (!bldev) { + ret = -ENOMEM; + goto out; + } + + bldev->props.max_brightness = 0xff; + bldev->props.brightness = 0xff; + bldev->props.power = FB_BLANK_UNBLANK; + + platform_set_drvdata(pdev, bldev); + +out: + return ret; +} + +static int __devexit adx_backlight_remove(struct platform_device *pdev) +{ + struct backlight_device *bldev; + int ret = 0; + + bldev = platform_get_drvdata(pdev); + bldev->props.power = FB_BLANK_UNBLANK; + bldev->props.brightness = 0xff; + backlight_update_status(bldev); + backlight_device_unregister(bldev); + platform_set_drvdata(pdev, NULL); + + return ret; +} + +#ifdef CONFIG_PM +static int adx_backlight_suspend(struct platform_device *pdev, + pm_message_t state) +{ + return 0; +} + +static int adx_backlight_resume(struct platform_device *pdev) +{ + return 0; +} +#else +#define adx_backlight_suspend NULL +#define adx_backlight_resume NULL +#endif + +static struct platform_driver adx_backlight_driver = { + .probe = adx_backlight_probe, + .remove = __devexit_p(adx_backlight_remove), + .suspend = adx_backlight_suspend, + .resume = adx_backlight_resume, + .driver = { + .name = "adx-backlight", + .owner = THIS_MODULE, + }, +}; + +static int __init adx_backlight_init(void) +{ + return platform_driver_register(&adx_backlight_driver); +} + +static void __exit adx_backlight_exit(void) +{ + platform_driver_unregister(&adx_backlight_driver); +} + +module_init(adx_backlight_init); +module_exit(adx_backlight_exit); + +MODULE_AUTHOR("Thierry Reding "); +MODULE_DESCRIPTION("Avionic Design Xanthos Backlight Driver"); +MODULE_LICENSE("GPL v2"); From 1f27adc2f050836c12deb4d99afe507636537a0b Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 8 Sep 2009 17:29:02 -0700 Subject: [PATCH 0099/3409] ioat: move definitions to dma.h Some of these defines may be useful outside of dma.c and the header is private so there are no namespace pollution concerns. Signed-off-by: Maciej Sosnowski Signed-off-by: Dan Williams --- drivers/dma/ioat/dma.c | 14 -------------- drivers/dma/ioat/dma.h | 16 ++++++++++++++++ 2 files changed, 16 insertions(+), 14 deletions(-) diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 648797e83295..16c080786a65 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -38,28 +38,14 @@ #include "registers.h" #include "hw.h" -#define to_ioat_chan(chan) container_of(chan, struct ioat_dma_chan, common) -#define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common) -#define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node) -#define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx) - -#define chan_num(ch) ((int)((ch)->reg_base - (ch)->device->reg_base) / 0x80) static int ioat_pending_level = 4; module_param(ioat_pending_level, int, 0644); MODULE_PARM_DESC(ioat_pending_level, "high-water mark for pushing ioat descriptors (default: 4)"); -#define RESET_DELAY msecs_to_jiffies(100) -#define WATCHDOG_DELAY round_jiffies(msecs_to_jiffies(2000)) static void ioat_dma_chan_reset_part2(struct work_struct *work); static void ioat_dma_chan_watchdog(struct work_struct *work); -/* - * workaround for IOAT ver.3.0 null descriptor issue - * (channel returns error when size is 0) - */ -#define NULL_DESC_BUFFER_SIZE 1 - /* internal functions */ static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan); static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan); diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index e80e787fe64f..ccb400f5e279 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h @@ -43,6 +43,22 @@ enum ioat_interrupt { #define IOAT_DMA_DCA_ANY_CPU ~0 #define IOAT_WATCHDOG_PERIOD (2 * HZ) +#define to_ioat_chan(chan) container_of(chan, struct ioat_dma_chan, common) +#define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common) +#define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node) +#define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx) + +#define chan_num(ch) ((int)((ch)->reg_base - (ch)->device->reg_base) / 0x80) + +#define RESET_DELAY msecs_to_jiffies(100) +#define WATCHDOG_DELAY round_jiffies(msecs_to_jiffies(2000)) + +/* + * workaround for IOAT ver.3.0 null descriptor issue + * (channel returns error when size is 0) + */ +#define NULL_DESC_BUFFER_SIZE 1 + /** * struct ioatdma_device - internal representation of a IOAT device From e6c0b69a43150c1a37cf342ce5faedf12583bf79 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 8 Sep 2009 17:29:44 -0700 Subject: [PATCH 0100/3409] ioat: convert ioat_probe to pcim/devm The driver currently duplicates much of what these routines offer, so just use the common code. For example ->irq_mode tracks what interrupt mode was initialized, which duplicates the ->msix_enabled and ->msi_enabled handling in pcim_release. This also adds a check to the return value of dma_async_device_register, which can fail. Signed-off-by: Maciej Sosnowski Signed-off-by: Dan Williams --- drivers/dma/ioat/dma.c | 130 ++++++++++++++--------------------------- drivers/dma/ioat/dma.h | 11 ---- drivers/dma/ioat/hw.h | 1 + drivers/dma/ioat/pci.c | 65 ++++++++------------- 4 files changed, 67 insertions(+), 140 deletions(-) diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 16c080786a65..65f8b7492a4d 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -121,6 +121,7 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device) u32 xfercap; int i; struct ioat_dma_chan *ioat_chan; + struct device *dev = &device->pdev->dev; /* * IOAT ver.3 workarounds @@ -164,7 +165,7 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device) } #endif for (i = 0; i < device->common.chancnt; i++) { - ioat_chan = kzalloc(sizeof(*ioat_chan), GFP_KERNEL); + ioat_chan = devm_kzalloc(dev, sizeof(*ioat_chan), GFP_KERNEL); if (!ioat_chan) { device->common.chancnt = i; break; @@ -1450,7 +1451,11 @@ MODULE_PARM_DESC(ioat_interrupt_style, static int ioat_dma_setup_interrupts(struct ioatdma_device *device) { struct ioat_dma_chan *ioat_chan; - int err, i, j, msixcnt; + struct pci_dev *pdev = device->pdev; + struct device *dev = &pdev->dev; + struct msix_entry *msix; + int i, j, msixcnt; + int err = -EINVAL; u8 intrctrl = 0; if (!strcmp(ioat_interrupt_style, "msix")) @@ -1461,8 +1466,7 @@ static int ioat_dma_setup_interrupts(struct ioatdma_device *device) goto msi; if (!strcmp(ioat_interrupt_style, "intx")) goto intx; - dev_err(&device->pdev->dev, "invalid ioat_interrupt_style %s\n", - ioat_interrupt_style); + dev_err(dev, "invalid ioat_interrupt_style %s\n", ioat_interrupt_style); goto err_no_irq; msix: @@ -1471,55 +1475,55 @@ msix: for (i = 0; i < msixcnt; i++) device->msix_entries[i].entry = i; - err = pci_enable_msix(device->pdev, device->msix_entries, msixcnt); + err = pci_enable_msix(pdev, device->msix_entries, msixcnt); if (err < 0) goto msi; if (err > 0) goto msix_single_vector; for (i = 0; i < msixcnt; i++) { + msix = &device->msix_entries[i]; ioat_chan = ioat_lookup_chan_by_index(device, i); - err = request_irq(device->msix_entries[i].vector, - ioat_dma_do_interrupt_msix, - 0, "ioat-msix", ioat_chan); + err = devm_request_irq(dev, msix->vector, + ioat_dma_do_interrupt_msix, 0, + "ioat-msix", ioat_chan); if (err) { for (j = 0; j < i; j++) { + msix = &device->msix_entries[j]; ioat_chan = ioat_lookup_chan_by_index(device, j); - free_irq(device->msix_entries[j].vector, - ioat_chan); + devm_free_irq(dev, msix->vector, ioat_chan); } goto msix_single_vector; } } intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL; - device->irq_mode = msix_multi_vector; goto done; msix_single_vector: - device->msix_entries[0].entry = 0; - err = pci_enable_msix(device->pdev, device->msix_entries, 1); + msix = &device->msix_entries[0]; + msix->entry = 0; + err = pci_enable_msix(pdev, device->msix_entries, 1); if (err) goto msi; - err = request_irq(device->msix_entries[0].vector, ioat_dma_do_interrupt, - 0, "ioat-msix", device); + err = devm_request_irq(dev, msix->vector, ioat_dma_do_interrupt, 0, + "ioat-msix", device); if (err) { - pci_disable_msix(device->pdev); + pci_disable_msix(pdev); goto msi; } - device->irq_mode = msix_single_vector; goto done; msi: - err = pci_enable_msi(device->pdev); + err = pci_enable_msi(pdev); if (err) goto intx; - err = request_irq(device->pdev->irq, ioat_dma_do_interrupt, - 0, "ioat-msi", device); + err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 0, + "ioat-msi", device); if (err) { - pci_disable_msi(device->pdev); + pci_disable_msi(pdev); goto intx; } /* @@ -1527,21 +1531,17 @@ msi: */ if (device->version == IOAT_VER_1_2) { u32 dmactrl; - pci_read_config_dword(device->pdev, - IOAT_PCI_DMACTRL_OFFSET, &dmactrl); + pci_read_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, &dmactrl); dmactrl |= IOAT_PCI_DMACTRL_MSI_EN; - pci_write_config_dword(device->pdev, - IOAT_PCI_DMACTRL_OFFSET, dmactrl); + pci_write_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, dmactrl); } - device->irq_mode = msi; goto done; intx: - err = request_irq(device->pdev->irq, ioat_dma_do_interrupt, - IRQF_SHARED, "ioat-intx", device); + err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, + IRQF_SHARED, "ioat-intx", device); if (err) goto err_no_irq; - device->irq_mode = intx; done: intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN; @@ -1551,60 +1551,26 @@ done: err_no_irq: /* Disable all interrupt generation */ writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET); - dev_err(&device->pdev->dev, "no usable interrupts\n"); - device->irq_mode = none; - return -1; + dev_err(dev, "no usable interrupts\n"); + return err; } -/** - * ioat_dma_remove_interrupts - remove whatever interrupts were set - * @device: ioat device - */ -static void ioat_dma_remove_interrupts(struct ioatdma_device *device) +static void ioat_disable_interrupts(struct ioatdma_device *device) { - struct ioat_dma_chan *ioat_chan; - int i; - /* Disable all interrupt generation */ writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET); - - switch (device->irq_mode) { - case msix_multi_vector: - for (i = 0; i < device->common.chancnt; i++) { - ioat_chan = ioat_lookup_chan_by_index(device, i); - free_irq(device->msix_entries[i].vector, ioat_chan); - } - pci_disable_msix(device->pdev); - break; - case msix_single_vector: - free_irq(device->msix_entries[0].vector, device); - pci_disable_msix(device->pdev); - break; - case msi: - free_irq(device->pdev->irq, device); - pci_disable_msi(device->pdev); - break; - case intx: - free_irq(device->pdev->irq, device); - break; - case none: - dev_warn(&device->pdev->dev, - "call to %s without interrupts setup\n", __func__); - } - device->irq_mode = none; } struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev, void __iomem *iobase) { int err; + struct device *dev = &pdev->dev; struct ioatdma_device *device; - device = kzalloc(sizeof(*device), GFP_KERNEL); - if (!device) { + device = devm_kzalloc(dev, sizeof(*device), GFP_KERNEL); + if (!device) err = -ENOMEM; - goto err_kzalloc; - } device->pdev = pdev; device->reg_base = iobase; device->version = readb(device->reg_base + IOAT_VER_OFFSET); @@ -1651,14 +1617,12 @@ struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev, break; } - dev_err(&device->pdev->dev, - "Intel(R) I/OAT DMA Engine found," + dev_err(dev, "Intel(R) I/OAT DMA Engine found," " %d channels, device version 0x%02x, driver version %s\n", device->common.chancnt, device->version, IOAT_DMA_VERSION); if (!device->common.chancnt) { - dev_err(&device->pdev->dev, - "Intel(R) I/OAT DMA Engine problem found: " + dev_err(dev, "Intel(R) I/OAT DMA Engine problem found: " "zero channels detected\n"); goto err_setup_interrupts; } @@ -1671,9 +1635,11 @@ struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev, if (err) goto err_self_test; - ioat_set_tcp_copy_break(device); + err = dma_async_device_register(&device->common); + if (err) + goto err_self_test; - dma_async_device_register(&device->common); + ioat_set_tcp_copy_break(device); if (device->version != IOAT_VER_3_0) { INIT_DELAYED_WORK(&device->work, ioat_dma_chan_watchdog); @@ -1684,16 +1650,12 @@ struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev, return device; err_self_test: - ioat_dma_remove_interrupts(device); + ioat_disable_interrupts(device); err_setup_interrupts: pci_pool_destroy(device->completion_pool); err_completion_pool: pci_pool_destroy(device->dma_pool); err_dma_pool: - kfree(device); -err_kzalloc: - dev_err(&pdev->dev, - "Intel(R) I/OAT DMA Engine initialization failed\n"); return NULL; } @@ -1705,23 +1667,17 @@ void ioat_dma_remove(struct ioatdma_device *device) if (device->version != IOAT_VER_3_0) cancel_delayed_work(&device->work); - ioat_dma_remove_interrupts(device); + ioat_disable_interrupts(device); dma_async_device_unregister(&device->common); pci_pool_destroy(device->dma_pool); pci_pool_destroy(device->completion_pool); - iounmap(device->reg_base); - pci_release_regions(device->pdev); - pci_disable_device(device->pdev); - list_for_each_entry_safe(chan, _chan, &device->common.channels, device_node) { ioat_chan = to_ioat_chan(chan); list_del(&chan->device_node); - kfree(ioat_chan); } - kfree(device); } diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index ccb400f5e279..5e8d7cfabc21 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h @@ -31,14 +31,6 @@ #define IOAT_DMA_VERSION "3.64" -enum ioat_interrupt { - none = 0, - msix_multi_vector = 1, - msix_single_vector = 2, - msi = 3, - intx = 4, -}; - #define IOAT_LOW_COMPLETION_MASK 0xffffffc0 #define IOAT_DMA_DCA_ANY_CPU ~0 #define IOAT_WATCHDOG_PERIOD (2 * HZ) @@ -59,7 +51,6 @@ enum ioat_interrupt { */ #define NULL_DESC_BUFFER_SIZE 1 - /** * struct ioatdma_device - internal representation of a IOAT device * @pdev: PCI-Express device @@ -67,7 +58,6 @@ enum ioat_interrupt { * @dma_pool: for allocating DMA descriptors * @common: embedded struct dma_device * @version: version of ioatdma device - * @irq_mode: which style irq to use * @msix_entries: irq handlers * @idx: per channel data */ @@ -79,7 +69,6 @@ struct ioatdma_device { struct pci_pool *completion_pool; struct dma_device common; u8 version; - enum ioat_interrupt irq_mode; struct delayed_work work; struct msix_entry msix_entries[4]; struct ioat_dma_chan *idx[4]; diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h index afa57eef86c9..1438fa5c4d1a 100644 --- a/drivers/dma/ioat/hw.h +++ b/drivers/dma/ioat/hw.h @@ -23,6 +23,7 @@ /* PCI Configuration Space Values */ #define IOAT_PCI_VID 0x8086 +#define IOAT_MMIO_BAR 0 /* CB device ID's */ #define IOAT_PCI_DID_5000 0x1A38 diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c index d7948bfd8fba..982e38fd177c 100644 --- a/drivers/dma/ioat/pci.c +++ b/drivers/dma/ioat/pci.c @@ -62,7 +62,6 @@ static struct pci_device_id ioat_pci_tbl[] = { struct ioat_device { struct pci_dev *pdev; - void __iomem *iobase; struct ioatdma_device *dma; struct dca_provider *dca; }; @@ -75,8 +74,10 @@ static int ioat_dca_enabled = 1; module_param(ioat_dca_enabled, int, 0644); MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)"); +#define DRV_NAME "ioatdma" + static struct pci_driver ioat_pci_driver = { - .name = "ioatdma", + .name = DRV_NAME, .id_table = ioat_pci_tbl, .probe = ioat_probe, .remove = __devexit_p(ioat_remove), @@ -85,47 +86,42 @@ static struct pci_driver ioat_pci_driver = { static int __devinit ioat_probe(struct pci_dev *pdev, const struct pci_device_id *id) { + void __iomem * const *iomap; void __iomem *iobase; + struct device *dev = &pdev->dev; struct ioat_device *device; - unsigned long mmio_start, mmio_len; int err; - err = pci_enable_device(pdev); + err = pcim_enable_device(pdev); if (err) - goto err_enable_device; + return err; - err = pci_request_regions(pdev, ioat_pci_driver.name); + err = pcim_iomap_regions(pdev, 1 << IOAT_MMIO_BAR, DRV_NAME); if (err) - goto err_request_regions; + return err; + iomap = pcim_iomap_table(pdev); + if (!iomap) + return -ENOMEM; err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); if (err) err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) - goto err_set_dma_mask; + return err; err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); if (err) err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) - goto err_set_dma_mask; + return err; - mmio_start = pci_resource_start(pdev, 0); - mmio_len = pci_resource_len(pdev, 0); - iobase = ioremap(mmio_start, mmio_len); - if (!iobase) { - err = -ENOMEM; - goto err_ioremap; - } + device = devm_kzalloc(dev, sizeof(*device), GFP_KERNEL); + if (!device) + return -ENOMEM; - device = kzalloc(sizeof(*device), GFP_KERNEL); - if (!device) { - err = -ENOMEM; - goto err_kzalloc; - } device->pdev = pdev; pci_set_drvdata(pdev, device); - device->iobase = iobase; + iobase = iomap[IOAT_MMIO_BAR]; pci_set_master(pdev); @@ -146,28 +142,15 @@ static int __devinit ioat_probe(struct pci_dev *pdev, device->dca = ioat3_dca_init(pdev, iobase); break; default: - err = -ENODEV; - break; + return -ENODEV; } - if (!device->dma) - err = -ENODEV; - if (err) - goto err_version; + if (!device->dma) { + dev_err(dev, "Intel(R) I/OAT DMA Engine init failed\n"); + return -ENODEV; + } return 0; - -err_version: - kfree(device); -err_kzalloc: - iounmap(iobase); -err_ioremap: -err_set_dma_mask: - pci_release_regions(pdev); - pci_disable_device(pdev); -err_request_regions: -err_enable_device: - return err; } static void __devexit ioat_remove(struct pci_dev *pdev) @@ -185,8 +168,6 @@ static void __devexit ioat_remove(struct pci_dev *pdev) ioat_dma_remove(device->dma); device->dma = NULL; } - - kfree(device); } static int __init ioat_init_module(void) From bc3c70258526a635325f1f15138a96297879bc1a Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 28 Jul 2009 14:33:42 -0700 Subject: [PATCH 0101/3409] ioat: cleanup some long deref chains and 80 column collisions * reduce device->common. to dma-> in ioat_dma_{probe,remove,selftest} * ioat_lookup_chan_by_index to ioat_chan_by_index * multi-line function definitions * ioat_desc_sw.async_tx to ioat_desc_sw.txd * desc->txd. to tx-> in cleanup routine Signed-off-by: Maciej Sosnowski Signed-off-by: Dan Williams --- drivers/dma/ioat/dma.c | 304 +++++++++++++++++++---------------------- drivers/dma/ioat/dma.h | 7 +- 2 files changed, 144 insertions(+), 167 deletions(-) diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 65f8b7492a4d..462dae627191 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -55,9 +55,8 @@ ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan); static struct ioat_desc_sw * ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan); -static inline struct ioat_dma_chan *ioat_lookup_chan_by_index( - struct ioatdma_device *device, - int index) +static inline struct ioat_dma_chan * +ioat_chan_by_index(struct ioatdma_device *device, int index) { return device->idx[index]; } @@ -87,7 +86,7 @@ static irqreturn_t ioat_dma_do_interrupt(int irq, void *data) attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET); for_each_bit(bit, &attnstatus, BITS_PER_LONG) { - ioat_chan = ioat_lookup_chan_by_index(instance, bit); + ioat_chan = ioat_chan_by_index(instance, bit); tasklet_schedule(&ioat_chan->cleanup_task); } @@ -205,8 +204,8 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device) * descriptors to hw * @chan: DMA channel handle */ -static inline void __ioat1_dma_memcpy_issue_pending( - struct ioat_dma_chan *ioat_chan) +static inline void +__ioat1_dma_memcpy_issue_pending(struct ioat_dma_chan *ioat_chan) { ioat_chan->pending = 0; writeb(IOAT_CHANCMD_APPEND, ioat_chan->reg_base + IOAT1_CHANCMD_OFFSET); @@ -223,8 +222,8 @@ static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan) } } -static inline void __ioat2_dma_memcpy_issue_pending( - struct ioat_dma_chan *ioat_chan) +static inline void +__ioat2_dma_memcpy_issue_pending(struct ioat_dma_chan *ioat_chan) { ioat_chan->pending = 0; writew(ioat_chan->dmacount, @@ -279,18 +278,18 @@ static void ioat_dma_chan_reset_part2(struct work_struct *work) desc = to_ioat_desc(ioat_chan->used_desc.prev); switch (ioat_chan->device->version) { case IOAT_VER_1_2: - writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF, + writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF, ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW); - writel(((u64) desc->async_tx.phys) >> 32, + writel(((u64) desc->txd.phys) >> 32, ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH); writeb(IOAT_CHANCMD_START, ioat_chan->reg_base + IOAT_CHANCMD_OFFSET(ioat_chan->device->version)); break; case IOAT_VER_2_0: - writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF, + writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF, ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW); - writel(((u64) desc->async_tx.phys) >> 32, + writel(((u64) desc->txd.phys) >> 32, ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); /* tell the engine to go with what's left to be done */ @@ -299,7 +298,7 @@ static void ioat_dma_chan_reset_part2(struct work_struct *work) break; } - dev_err(&ioat_chan->device->pdev->dev, + dev_err(to_dev(ioat_chan), "chan%d reset - %d descs waiting, %d total desc\n", chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount); @@ -322,7 +321,7 @@ static void ioat_dma_reset_channel(struct ioat_dma_chan *ioat_chan) chansts = (ioat_chan->completion_virt->low & IOAT_CHANSTS_DMA_TRANSFER_STATUS); if (chanerr) { - dev_err(&ioat_chan->device->pdev->dev, + dev_err(to_dev(ioat_chan), "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n", chan_num(ioat_chan), chansts, chanerr); writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET); @@ -367,7 +366,7 @@ static void ioat_dma_chan_watchdog(struct work_struct *work) unsigned long compl_desc_addr_hw; for (i = 0; i < device->common.chancnt; i++) { - ioat_chan = ioat_lookup_chan_by_index(device, i); + ioat_chan = ioat_chan_by_index(device, i); if (ioat_chan->device->version == IOAT_VER_1_2 /* have we started processing anything yet */ @@ -475,7 +474,7 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) len = first->len; src = first->src; dst = first->dst; - orig_flags = first->async_tx.flags; + orig_flags = first->txd.flags; new = first; spin_lock_bh(&ioat_chan->desc_lock); @@ -484,7 +483,7 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) do { copy = min_t(size_t, len, ioat_chan->xfercap); - async_tx_ack(&new->async_tx); + async_tx_ack(&new->txd); hw = new->hw; hw->size = copy; @@ -495,7 +494,7 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) /* chain together the physical address list for the HW */ wmb(); - prev->hw->next = (u64) new->async_tx.phys; + prev->hw->next = (u64) new->txd.phys; len -= copy; dst += copy; @@ -507,27 +506,26 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) } while (len && (new = ioat1_dma_get_next_descriptor(ioat_chan))); if (!new) { - dev_err(&ioat_chan->device->pdev->dev, - "tx submit failed\n"); + dev_err(to_dev(ioat_chan), "tx submit failed\n"); spin_unlock_bh(&ioat_chan->desc_lock); return -ENOMEM; } hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS; - if (first->async_tx.callback) { + if (first->txd.callback) { hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN; if (first != new) { /* move callback into to last desc */ - new->async_tx.callback = first->async_tx.callback; - new->async_tx.callback_param - = first->async_tx.callback_param; - first->async_tx.callback = NULL; - first->async_tx.callback_param = NULL; + new->txd.callback = first->txd.callback; + new->txd.callback_param + = first->txd.callback_param; + first->txd.callback = NULL; + first->txd.callback_param = NULL; } } new->tx_cnt = desc_count; - new->async_tx.flags = orig_flags; /* client is in control of this ack */ + new->txd.flags = orig_flags; /* client is in control of this ack */ /* store the original values for use in later cleanup */ if (new != first) { @@ -541,11 +539,11 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) cookie++; if (cookie < 0) cookie = 1; - ioat_chan->common.cookie = new->async_tx.cookie = cookie; + ioat_chan->common.cookie = new->txd.cookie = cookie; /* write address into NextDescriptor field of last desc in chain */ to_ioat_desc(ioat_chan->used_desc.prev)->hw->next = - first->async_tx.phys; + first->txd.phys; list_splice_tail(&new_chain, &ioat_chan->used_desc); ioat_chan->dmacount += desc_count; @@ -574,7 +572,7 @@ static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx) len = first->len; src = first->src; dst = first->dst; - orig_flags = first->async_tx.flags; + orig_flags = first->txd.flags; new = first; /* @@ -584,7 +582,7 @@ static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx) do { copy = min_t(size_t, len, ioat_chan->xfercap); - async_tx_ack(&new->async_tx); + async_tx_ack(&new->txd); hw = new->hw; hw->size = copy; @@ -599,27 +597,26 @@ static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx) } while (len && (new = ioat2_dma_get_next_descriptor(ioat_chan))); if (!new) { - dev_err(&ioat_chan->device->pdev->dev, - "tx submit failed\n"); + dev_err(to_dev(ioat_chan), "tx submit failed\n"); spin_unlock_bh(&ioat_chan->desc_lock); return -ENOMEM; } hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_CP_STS; - if (first->async_tx.callback) { + if (first->txd.callback) { hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN; if (first != new) { /* move callback into to last desc */ - new->async_tx.callback = first->async_tx.callback; - new->async_tx.callback_param - = first->async_tx.callback_param; - first->async_tx.callback = NULL; - first->async_tx.callback_param = NULL; + new->txd.callback = first->txd.callback; + new->txd.callback_param + = first->txd.callback_param; + first->txd.callback = NULL; + first->txd.callback_param = NULL; } } new->tx_cnt = desc_count; - new->async_tx.flags = orig_flags; /* client is in control of this ack */ + new->txd.flags = orig_flags; /* client is in control of this ack */ /* store the original values for use in later cleanup */ if (new != first) { @@ -633,7 +630,7 @@ static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx) cookie++; if (cookie < 0) cookie = 1; - ioat_chan->common.cookie = new->async_tx.cookie = cookie; + ioat_chan->common.cookie = new->txd.cookie = cookie; ioat_chan->dmacount += desc_count; ioat_chan->pending += desc_count; @@ -649,9 +646,8 @@ static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx) * @ioat_chan: the channel supplying the memory pool for the descriptors * @flags: allocation flags */ -static struct ioat_desc_sw *ioat_dma_alloc_descriptor( - struct ioat_dma_chan *ioat_chan, - gfp_t flags) +static struct ioat_desc_sw * +ioat_dma_alloc_descriptor(struct ioat_dma_chan *ioat_chan, gfp_t flags) { struct ioat_dma_descriptor *desc; struct ioat_desc_sw *desc_sw; @@ -670,19 +666,19 @@ static struct ioat_desc_sw *ioat_dma_alloc_descriptor( } memset(desc, 0, sizeof(*desc)); - dma_async_tx_descriptor_init(&desc_sw->async_tx, &ioat_chan->common); + dma_async_tx_descriptor_init(&desc_sw->txd, &ioat_chan->common); switch (ioat_chan->device->version) { case IOAT_VER_1_2: - desc_sw->async_tx.tx_submit = ioat1_tx_submit; + desc_sw->txd.tx_submit = ioat1_tx_submit; break; case IOAT_VER_2_0: case IOAT_VER_3_0: - desc_sw->async_tx.tx_submit = ioat2_tx_submit; + desc_sw->txd.tx_submit = ioat2_tx_submit; break; } desc_sw->hw = desc; - desc_sw->async_tx.phys = phys; + desc_sw->txd.phys = phys; return desc_sw; } @@ -712,9 +708,9 @@ static void ioat2_dma_massage_chan_desc(struct ioat_dma_chan *ioat_chan) /* circle link the hw descriptors */ desc = to_ioat_desc(ioat_chan->free_desc.next); - desc->hw->next = to_ioat_desc(desc->node.next)->async_tx.phys; + desc->hw->next = to_ioat_desc(desc->node.next)->txd.phys; list_for_each_entry_safe(desc, _desc, ioat_chan->free_desc.next, node) { - desc->hw->next = to_ioat_desc(desc->node.next)->async_tx.phys; + desc->hw->next = to_ioat_desc(desc->node.next)->txd.phys; } } @@ -743,8 +739,7 @@ static int ioat_dma_alloc_chan_resources(struct dma_chan *chan) chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); if (chanerr) { - dev_err(&ioat_chan->device->pdev->dev, - "CHANERR = %x, clearing\n", chanerr); + dev_err(to_dev(ioat_chan), "CHANERR = %x, clearing\n", chanerr); writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET); } @@ -752,7 +747,7 @@ static int ioat_dma_alloc_chan_resources(struct dma_chan *chan) for (i = 0; i < ioat_initial_desc_count; i++) { desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL); if (!desc) { - dev_err(&ioat_chan->device->pdev->dev, + dev_err(to_dev(ioat_chan), "Only %d initial descriptors\n", i); break; } @@ -819,14 +814,14 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan) in_use_descs++; list_del(&desc->node); pci_pool_free(ioatdma_device->dma_pool, desc->hw, - desc->async_tx.phys); + desc->txd.phys); kfree(desc); } list_for_each_entry_safe(desc, _desc, &ioat_chan->free_desc, node) { list_del(&desc->node); pci_pool_free(ioatdma_device->dma_pool, desc->hw, - desc->async_tx.phys); + desc->txd.phys); kfree(desc); } break; @@ -836,12 +831,12 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan) ioat_chan->free_desc.next, node) { list_del(&desc->node); pci_pool_free(ioatdma_device->dma_pool, desc->hw, - desc->async_tx.phys); + desc->txd.phys); kfree(desc); } desc = to_ioat_desc(ioat_chan->free_desc.next); pci_pool_free(ioatdma_device->dma_pool, desc->hw, - desc->async_tx.phys); + desc->txd.phys); kfree(desc); INIT_LIST_HEAD(&ioat_chan->free_desc); INIT_LIST_HEAD(&ioat_chan->used_desc); @@ -855,8 +850,7 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan) /* one is ok since we left it on there on purpose */ if (in_use_descs > 1) - dev_err(&ioat_chan->device->pdev->dev, - "Freeing %d in use descriptors!\n", + dev_err(to_dev(ioat_chan), "Freeing %d in use descriptors!\n", in_use_descs - 1); ioat_chan->last_completion = ioat_chan->completion_addr = 0; @@ -889,8 +883,7 @@ ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan) /* try to get another desc */ new = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC); if (!new) { - dev_err(&ioat_chan->device->pdev->dev, - "alloc failed\n"); + dev_err(to_dev(ioat_chan), "alloc failed\n"); return NULL; } } @@ -936,16 +929,15 @@ ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan) for (i = 16; i; i--) { desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC); if (!desc) { - dev_err(&ioat_chan->device->pdev->dev, - "alloc failed\n"); + dev_err(to_dev(ioat_chan), "alloc failed\n"); break; } list_add_tail(&desc->node, ioat_chan->used_desc.next); desc->hw->next - = to_ioat_desc(desc->node.next)->async_tx.phys; + = to_ioat_desc(desc->node.next)->txd.phys; to_ioat_desc(desc->node.prev)->hw->next - = desc->async_tx.phys; + = desc->txd.phys; ioat_chan->desccount++; } @@ -962,8 +954,8 @@ ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan) return new; } -static struct ioat_desc_sw *ioat_dma_get_next_descriptor( - struct ioat_dma_chan *ioat_chan) +static struct ioat_desc_sw * +ioat_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan) { if (!ioat_chan) return NULL; @@ -978,12 +970,9 @@ static struct ioat_desc_sw *ioat_dma_get_next_descriptor( return NULL; } -static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy( - struct dma_chan *chan, - dma_addr_t dma_dest, - dma_addr_t dma_src, - size_t len, - unsigned long flags) +static struct dma_async_tx_descriptor * +ioat1_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dma_dest, + dma_addr_t dma_src, size_t len, unsigned long flags) { struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); struct ioat_desc_sw *new; @@ -996,22 +985,19 @@ static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy( new->len = len; new->dst = dma_dest; new->src = dma_src; - new->async_tx.flags = flags; - return &new->async_tx; + new->txd.flags = flags; + return &new->txd; } else { - dev_err(&ioat_chan->device->pdev->dev, + dev_err(to_dev(ioat_chan), "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n", chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount); return NULL; } } -static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy( - struct dma_chan *chan, - dma_addr_t dma_dest, - dma_addr_t dma_src, - size_t len, - unsigned long flags) +static struct dma_async_tx_descriptor * +ioat2_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dma_dest, + dma_addr_t dma_src, size_t len, unsigned long flags) { struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); struct ioat_desc_sw *new; @@ -1028,11 +1014,11 @@ static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy( new->len = len; new->dst = dma_dest; new->src = dma_src; - new->async_tx.flags = flags; - return &new->async_tx; + new->txd.flags = flags; + return &new->txd; } else { spin_unlock_bh(&ioat_chan->desc_lock); - dev_err(&ioat_chan->device->pdev->dev, + dev_err(to_dev(ioat_chan), "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n", chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount); return NULL; @@ -1050,8 +1036,8 @@ static void ioat_dma_cleanup_tasklet(unsigned long data) static void ioat_dma_unmap(struct ioat_dma_chan *ioat_chan, struct ioat_desc_sw *desc) { - if (!(desc->async_tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { - if (desc->async_tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE) + if (!(desc->txd.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { + if (desc->txd.flags & DMA_COMPL_DEST_UNMAP_SINGLE) pci_unmap_single(ioat_chan->device->pdev, pci_unmap_addr(desc, dst), pci_unmap_len(desc, len), @@ -1063,8 +1049,8 @@ ioat_dma_unmap(struct ioat_dma_chan *ioat_chan, struct ioat_desc_sw *desc) PCI_DMA_FROMDEVICE); } - if (!(desc->async_tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { - if (desc->async_tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE) + if (!(desc->txd.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { + if (desc->txd.flags & DMA_COMPL_SRC_UNMAP_SINGLE) pci_unmap_single(ioat_chan->device->pdev, pci_unmap_addr(desc, src), pci_unmap_len(desc, len), @@ -1088,6 +1074,7 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan) dma_cookie_t cookie = 0; unsigned long desc_phys; struct ioat_desc_sw *latest_desc; + struct dma_async_tx_descriptor *tx; prefetch(ioat_chan->completion_virt); @@ -1111,8 +1098,7 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan) if ((ioat_chan->completion_virt->full & IOAT_CHANSTS_DMA_TRANSFER_STATUS) == IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) { - dev_err(&ioat_chan->device->pdev->dev, - "Channel halted, chanerr = %x\n", + dev_err(to_dev(ioat_chan), "Channel halted, chanerr = %x\n", readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET)); /* TODO do something to salvage the situation */ @@ -1145,38 +1131,38 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan) case IOAT_VER_1_2: list_for_each_entry_safe(desc, _desc, &ioat_chan->used_desc, node) { - + tx = &desc->txd; /* * Incoming DMA requests may use multiple descriptors, * due to exceeding xfercap, perhaps. If so, only the * last one will have a cookie, and require unmapping. */ - if (desc->async_tx.cookie) { - cookie = desc->async_tx.cookie; + if (tx->cookie) { + cookie = tx->cookie; ioat_dma_unmap(ioat_chan, desc); - if (desc->async_tx.callback) { - desc->async_tx.callback(desc->async_tx.callback_param); - desc->async_tx.callback = NULL; + if (tx->callback) { + tx->callback(tx->callback_param); + tx->callback = NULL; } } - if (desc->async_tx.phys != phys_complete) { + if (tx->phys != phys_complete) { /* * a completed entry, but not the last, so clean * up if the client is done with the descriptor */ - if (async_tx_test_ack(&desc->async_tx)) { + if (async_tx_test_ack(tx)) { list_move_tail(&desc->node, &ioat_chan->free_desc); } else - desc->async_tx.cookie = 0; + tx->cookie = 0; } else { /* * last used desc. Do not remove, so we can * append from it, but don't look at it next * time, either */ - desc->async_tx.cookie = 0; + tx->cookie = 0; /* TODO check status bits? */ break; @@ -1191,10 +1177,11 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan) /* work backwards to find latest finished desc */ desc = to_ioat_desc(ioat_chan->used_desc.next); + tx = &desc->txd; latest_desc = NULL; do { desc = to_ioat_desc(desc->node.prev); - desc_phys = (unsigned long)desc->async_tx.phys + desc_phys = (unsigned long)tx->phys & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; if (desc_phys == phys_complete) { latest_desc = desc; @@ -1203,19 +1190,18 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan) } while (&desc->node != ioat_chan->used_desc.prev); if (latest_desc != NULL) { - /* work forwards to clear finished descriptors */ for (desc = to_ioat_desc(ioat_chan->used_desc.prev); &desc->node != latest_desc->node.next && &desc->node != ioat_chan->used_desc.next; desc = to_ioat_desc(desc->node.next)) { - if (desc->async_tx.cookie) { - cookie = desc->async_tx.cookie; - desc->async_tx.cookie = 0; + if (tx->cookie) { + cookie = tx->cookie; + tx->cookie = 0; ioat_dma_unmap(ioat_chan, desc); - if (desc->async_tx.callback) { - desc->async_tx.callback(desc->async_tx.callback_param); - desc->async_tx.callback = NULL; + if (tx->callback) { + tx->callback(tx->callback_param); + tx->callback = NULL; } } } @@ -1245,10 +1231,9 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan) * @done: if not %NULL, updated with last completed transaction * @used: if not %NULL, updated with last used transaction */ -static enum dma_status ioat_dma_is_complete(struct dma_chan *chan, - dma_cookie_t cookie, - dma_cookie_t *done, - dma_cookie_t *used) +static enum dma_status +ioat_dma_is_complete(struct dma_chan *chan, dma_cookie_t cookie, + dma_cookie_t *done, dma_cookie_t *used) { struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); dma_cookie_t last_used; @@ -1290,7 +1275,7 @@ static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan) desc = ioat_dma_get_next_descriptor(ioat_chan); if (!desc) { - dev_err(&ioat_chan->device->pdev->dev, + dev_err(to_dev(ioat_chan), "Unable to start null desc - get next desc failed\n"); spin_unlock_bh(&ioat_chan->desc_lock); return; @@ -1303,15 +1288,15 @@ static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan) desc->hw->size = NULL_DESC_BUFFER_SIZE; desc->hw->src_addr = 0; desc->hw->dst_addr = 0; - async_tx_ack(&desc->async_tx); + async_tx_ack(&desc->txd); switch (ioat_chan->device->version) { case IOAT_VER_1_2: desc->hw->next = 0; list_add_tail(&desc->node, &ioat_chan->used_desc); - writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF, + writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF, ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW); - writel(((u64) desc->async_tx.phys) >> 32, + writel(((u64) desc->txd.phys) >> 32, ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH); writeb(IOAT_CHANCMD_START, ioat_chan->reg_base @@ -1319,9 +1304,9 @@ static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan) break; case IOAT_VER_2_0: case IOAT_VER_3_0: - writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF, + writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF, ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW); - writel(((u64) desc->async_tx.phys) >> 32, + writel(((u64) desc->txd.phys) >> 32, ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); ioat_chan->dmacount++; @@ -1352,6 +1337,8 @@ static int ioat_dma_self_test(struct ioatdma_device *device) int i; u8 *src; u8 *dest; + struct dma_device *dma = &device->common; + struct device *dev = &device->pdev->dev; struct dma_chan *dma_chan; struct dma_async_tx_descriptor *tx; dma_addr_t dma_dest, dma_src; @@ -1375,26 +1362,21 @@ static int ioat_dma_self_test(struct ioatdma_device *device) src[i] = (u8)i; /* Start copy, using first DMA channel */ - dma_chan = container_of(device->common.channels.next, - struct dma_chan, + dma_chan = container_of(dma->channels.next, struct dma_chan, device_node); - if (device->common.device_alloc_chan_resources(dma_chan) < 1) { - dev_err(&device->pdev->dev, - "selftest cannot allocate chan resource\n"); + if (dma->device_alloc_chan_resources(dma_chan) < 1) { + dev_err(dev, "selftest cannot allocate chan resource\n"); err = -ENODEV; goto out; } - dma_src = dma_map_single(dma_chan->device->dev, src, IOAT_TEST_SIZE, - DMA_TO_DEVICE); - dma_dest = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE, - DMA_FROM_DEVICE); + dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE); + dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); flags = DMA_COMPL_SRC_UNMAP_SINGLE | DMA_COMPL_DEST_UNMAP_SINGLE; tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src, IOAT_TEST_SIZE, flags); if (!tx) { - dev_err(&device->pdev->dev, - "Self-test prep failed, disabling\n"); + dev_err(dev, "Self-test prep failed, disabling\n"); err = -ENODEV; goto free_resources; } @@ -1405,32 +1387,29 @@ static int ioat_dma_self_test(struct ioatdma_device *device) tx->callback_param = &cmp; cookie = tx->tx_submit(tx); if (cookie < 0) { - dev_err(&device->pdev->dev, - "Self-test setup failed, disabling\n"); + dev_err(dev, "Self-test setup failed, disabling\n"); err = -ENODEV; goto free_resources; } - device->common.device_issue_pending(dma_chan); + dma->device_issue_pending(dma_chan); tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); if (tmo == 0 || - device->common.device_is_tx_complete(dma_chan, cookie, NULL, NULL) + dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { - dev_err(&device->pdev->dev, - "Self-test copy timed out, disabling\n"); + dev_err(dev, "Self-test copy timed out, disabling\n"); err = -ENODEV; goto free_resources; } if (memcmp(src, dest, IOAT_TEST_SIZE)) { - dev_err(&device->pdev->dev, - "Self-test copy failed compare, disabling\n"); + dev_err(dev, "Self-test copy failed compare, disabling\n"); err = -ENODEV; goto free_resources; } free_resources: - device->common.device_free_chan_resources(dma_chan); + dma->device_free_chan_resources(dma_chan); out: kfree(src); kfree(dest); @@ -1483,15 +1462,14 @@ msix: for (i = 0; i < msixcnt; i++) { msix = &device->msix_entries[i]; - ioat_chan = ioat_lookup_chan_by_index(device, i); + ioat_chan = ioat_chan_by_index(device, i); err = devm_request_irq(dev, msix->vector, ioat_dma_do_interrupt_msix, 0, "ioat-msix", ioat_chan); if (err) { for (j = 0; j < i; j++) { msix = &device->msix_entries[j]; - ioat_chan = - ioat_lookup_chan_by_index(device, j); + ioat_chan = ioat_chan_by_index(device, j); devm_free_irq(dev, msix->vector, ioat_chan); } goto msix_single_vector; @@ -1561,12 +1539,13 @@ static void ioat_disable_interrupts(struct ioatdma_device *device) writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET); } -struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev, - void __iomem *iobase) +struct ioatdma_device * +ioat_dma_probe(struct pci_dev *pdev, void __iomem *iobase) { int err; struct device *dev = &pdev->dev; struct ioatdma_device *device; + struct dma_device *dma; device = devm_kzalloc(dev, sizeof(*device), GFP_KERNEL); if (!device) @@ -1574,6 +1553,7 @@ struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev, device->pdev = pdev; device->reg_base = iobase; device->version = readb(device->reg_base + IOAT_VER_OFFSET); + dma = &device->common; /* DMA coherent memory pool for DMA descriptor allocations */ device->dma_pool = pci_pool_create("dma_desc_pool", pdev, @@ -1592,36 +1572,32 @@ struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev, goto err_completion_pool; } - INIT_LIST_HEAD(&device->common.channels); + INIT_LIST_HEAD(&dma->channels); ioat_dma_enumerate_channels(device); - device->common.device_alloc_chan_resources = - ioat_dma_alloc_chan_resources; - device->common.device_free_chan_resources = - ioat_dma_free_chan_resources; - device->common.dev = &pdev->dev; + dma->device_alloc_chan_resources = ioat_dma_alloc_chan_resources; + dma->device_free_chan_resources = ioat_dma_free_chan_resources; + dma->dev = &pdev->dev; - dma_cap_set(DMA_MEMCPY, device->common.cap_mask); - device->common.device_is_tx_complete = ioat_dma_is_complete; + dma_cap_set(DMA_MEMCPY, dma->cap_mask); + dma->device_is_tx_complete = ioat_dma_is_complete; switch (device->version) { case IOAT_VER_1_2: - device->common.device_prep_dma_memcpy = ioat1_dma_prep_memcpy; - device->common.device_issue_pending = - ioat1_dma_memcpy_issue_pending; + dma->device_prep_dma_memcpy = ioat1_dma_prep_memcpy; + dma->device_issue_pending = ioat1_dma_memcpy_issue_pending; break; case IOAT_VER_2_0: case IOAT_VER_3_0: - device->common.device_prep_dma_memcpy = ioat2_dma_prep_memcpy; - device->common.device_issue_pending = - ioat2_dma_memcpy_issue_pending; + dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy; + dma->device_issue_pending = ioat2_dma_memcpy_issue_pending; break; } dev_err(dev, "Intel(R) I/OAT DMA Engine found," " %d channels, device version 0x%02x, driver version %s\n", - device->common.chancnt, device->version, IOAT_DMA_VERSION); + dma->chancnt, device->version, IOAT_DMA_VERSION); - if (!device->common.chancnt) { + if (!dma->chancnt) { dev_err(dev, "Intel(R) I/OAT DMA Engine problem found: " "zero channels detected\n"); goto err_setup_interrupts; @@ -1635,7 +1611,7 @@ struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev, if (err) goto err_self_test; - err = dma_async_device_register(&device->common); + err = dma_async_device_register(dma); if (err) goto err_self_test; @@ -1663,19 +1639,19 @@ void ioat_dma_remove(struct ioatdma_device *device) { struct dma_chan *chan, *_chan; struct ioat_dma_chan *ioat_chan; + struct dma_device *dma = &device->common; if (device->version != IOAT_VER_3_0) cancel_delayed_work(&device->work); ioat_disable_interrupts(device); - dma_async_device_unregister(&device->common); + dma_async_device_unregister(dma); pci_pool_destroy(device->dma_pool); pci_pool_destroy(device->completion_pool); - list_for_each_entry_safe(chan, _chan, - &device->common.channels, device_node) { + list_for_each_entry_safe(chan, _chan, &dma->channels, device_node) { ioat_chan = to_ioat_chan(chan); list_del(&chan->device_node); } diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index 5e8d7cfabc21..c5eabae4c1b9 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h @@ -38,7 +38,8 @@ #define to_ioat_chan(chan) container_of(chan, struct ioat_dma_chan, common) #define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common) #define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node) -#define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx) +#define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, txd) +#define to_dev(ioat_chan) (&(ioat_chan)->device->pdev->dev) #define chan_num(ch) ((int)((ch)->reg_base - (ch)->device->reg_base) / 0x80) @@ -123,7 +124,7 @@ struct ioat_dma_chan { * @node: this descriptor will either be on the free list, * or attached to a transaction list (async_tx.tx_list) * @tx_cnt: number of descriptors required to complete the transaction - * @async_tx: the generic software descriptor for all engines + * @txd: the generic software descriptor for all engines */ struct ioat_desc_sw { struct ioat_dma_descriptor *hw; @@ -132,7 +133,7 @@ struct ioat_desc_sw { size_t len; dma_addr_t src; dma_addr_t dst; - struct dma_async_tx_descriptor async_tx; + struct dma_async_tx_descriptor txd; }; static inline void ioat_set_tcp_copy_break(struct ioatdma_device *dev) From b31b78f1ab7806759622b703357e39a21f757281 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 28 Jul 2009 14:42:32 -0700 Subject: [PATCH 0102/3409] ioat: kill function prototype ifdef guards The only .c files that utilize these protected prototypes depend on CONFIG_INTEL_IOATDMA=y, so there is no value gained in providing empty prototypes. [ Impact: pure cleanup ] Signed-off-by: Maciej Sosnowski Signed-off-by: Dan Williams --- drivers/dma/ioat/dma.h | 9 --------- 1 file changed, 9 deletions(-) diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index c5eabae4c1b9..6e27ddb1e98a 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h @@ -153,19 +153,10 @@ static inline void ioat_set_tcp_copy_break(struct ioatdma_device *dev) #endif } -#if defined(CONFIG_INTEL_IOATDMA) || defined(CONFIG_INTEL_IOATDMA_MODULE) struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev, void __iomem *iobase); void ioat_dma_remove(struct ioatdma_device *device); struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase); struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase); struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase); -#else -#define ioat_dma_probe(pdev, iobase) NULL -#define ioat_dma_remove(device) do { } while (0) -#define ioat_dca_init(pdev, iobase) NULL -#define ioat2_dca_init(pdev, iobase) NULL -#define ioat3_dca_init(pdev, iobase) NULL -#endif - #endif /* IOATDMA_H */ From f2427e276ffec5ce599c6bc116e0927269a360ef Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 28 Jul 2009 14:42:38 -0700 Subject: [PATCH 0103/3409] ioat: split ioat_dma_probe into core/version-specific routines Towards the removal of ioatdma_device.version split the initialization path into distinct versions. This conversion: 1/ moves version specific probe code to version specific routines 2/ removes the need for ioat_device 3/ turns off the ioat1 msi quirk if the device is reinitialized for intx Signed-off-by: Maciej Sosnowski Signed-off-by: Dan Williams --- drivers/dma/ioat/dma.c | 253 +++++++++++++++++++++++++---------------- drivers/dma/ioat/dma.h | 23 ++-- drivers/dma/ioat/pci.c | 81 +++++++------ 3 files changed, 201 insertions(+), 156 deletions(-) diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 462dae627191..b7508041c6d7 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -121,52 +121,21 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device) int i; struct ioat_dma_chan *ioat_chan; struct device *dev = &device->pdev->dev; + struct dma_device *dma = &device->common; - /* - * IOAT ver.3 workarounds - */ - if (device->version == IOAT_VER_3_0) { - u32 chan_err_mask; - u16 dev_id; - u32 dmauncerrsts; - - /* - * Write CHANERRMSK_INT with 3E07h to mask out the errors - * that can cause stability issues for IOAT ver.3 - */ - chan_err_mask = 0x3E07; - pci_write_config_dword(device->pdev, - IOAT_PCI_CHANERRMASK_INT_OFFSET, - chan_err_mask); - - /* - * Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit - * (workaround for spurious config parity error after restart) - */ - pci_read_config_word(device->pdev, - IOAT_PCI_DEVICE_ID_OFFSET, - &dev_id); - if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) { - dmauncerrsts = 0x10; - pci_write_config_dword(device->pdev, - IOAT_PCI_DMAUNCERRSTS_OFFSET, - dmauncerrsts); - } - } - - device->common.chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET); + INIT_LIST_HEAD(&dma->channels); + dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET); xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET); xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale)); #ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL - if (i7300_idle_platform_probe(NULL, NULL, 1) == 0) { - device->common.chancnt--; - } + if (i7300_idle_platform_probe(NULL, NULL, 1) == 0) + dma->chancnt--; #endif - for (i = 0; i < device->common.chancnt; i++) { + for (i = 0; i < dma->chancnt; i++) { ioat_chan = devm_kzalloc(dev, sizeof(*ioat_chan), GFP_KERNEL); if (!ioat_chan) { - device->common.chancnt = i; + dma->chancnt = i; break; } @@ -175,28 +144,20 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device) ioat_chan->xfercap = xfercap; ioat_chan->desccount = 0; INIT_DELAYED_WORK(&ioat_chan->work, ioat_dma_chan_reset_part2); - if (ioat_chan->device->version == IOAT_VER_2_0) - writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE | - IOAT_DMA_DCA_ANY_CPU, - ioat_chan->reg_base + IOAT_DCACTRL_OFFSET); - else if (ioat_chan->device->version == IOAT_VER_3_0) - writel(IOAT_DMA_DCA_ANY_CPU, - ioat_chan->reg_base + IOAT_DCACTRL_OFFSET); spin_lock_init(&ioat_chan->cleanup_lock); spin_lock_init(&ioat_chan->desc_lock); INIT_LIST_HEAD(&ioat_chan->free_desc); INIT_LIST_HEAD(&ioat_chan->used_desc); /* This should be made common somewhere in dmaengine.c */ ioat_chan->common.device = &device->common; - list_add_tail(&ioat_chan->common.device_node, - &device->common.channels); + list_add_tail(&ioat_chan->common.device_node, &dma->channels); device->idx[i] = ioat_chan; tasklet_init(&ioat_chan->cleanup_task, ioat_dma_cleanup_tasklet, (unsigned long) ioat_chan); tasklet_disable(&ioat_chan->cleanup_task); } - return device->common.chancnt; + return dma->chancnt; } /** @@ -1504,15 +1465,6 @@ msi: pci_disable_msi(pdev); goto intx; } - /* - * CB 1.2 devices need a bit set in configuration space to enable MSI - */ - if (device->version == IOAT_VER_1_2) { - u32 dmactrl; - pci_read_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, &dmactrl); - dmactrl |= IOAT_PCI_DMACTRL_MSI_EN; - pci_write_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, dmactrl); - } goto done; intx: @@ -1522,6 +1474,8 @@ intx: goto err_no_irq; done: + if (device->intr_quirk) + device->intr_quirk(device); intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN; writeb(intrctrl, device->reg_base + IOAT_INTRCTRL_OFFSET); return 0; @@ -1539,21 +1493,12 @@ static void ioat_disable_interrupts(struct ioatdma_device *device) writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET); } -struct ioatdma_device * -ioat_dma_probe(struct pci_dev *pdev, void __iomem *iobase) +static int ioat_probe(struct ioatdma_device *device) { - int err; + int err = -ENODEV; + struct dma_device *dma = &device->common; + struct pci_dev *pdev = device->pdev; struct device *dev = &pdev->dev; - struct ioatdma_device *device; - struct dma_device *dma; - - device = devm_kzalloc(dev, sizeof(*device), GFP_KERNEL); - if (!device) - err = -ENOMEM; - device->pdev = pdev; - device->reg_base = iobase; - device->version = readb(device->reg_base + IOAT_VER_OFFSET); - dma = &device->common; /* DMA coherent memory pool for DMA descriptor allocations */ device->dma_pool = pci_pool_create("dma_desc_pool", pdev, @@ -1572,26 +1517,13 @@ ioat_dma_probe(struct pci_dev *pdev, void __iomem *iobase) goto err_completion_pool; } - INIT_LIST_HEAD(&dma->channels); ioat_dma_enumerate_channels(device); + dma_cap_set(DMA_MEMCPY, dma->cap_mask); dma->device_alloc_chan_resources = ioat_dma_alloc_chan_resources; dma->device_free_chan_resources = ioat_dma_free_chan_resources; - dma->dev = &pdev->dev; - - dma_cap_set(DMA_MEMCPY, dma->cap_mask); dma->device_is_tx_complete = ioat_dma_is_complete; - switch (device->version) { - case IOAT_VER_1_2: - dma->device_prep_dma_memcpy = ioat1_dma_prep_memcpy; - dma->device_issue_pending = ioat1_dma_memcpy_issue_pending; - break; - case IOAT_VER_2_0: - case IOAT_VER_3_0: - dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy; - dma->device_issue_pending = ioat2_dma_memcpy_issue_pending; - break; - } + dma->dev = &pdev->dev; dev_err(dev, "Intel(R) I/OAT DMA Engine found," " %d channels, device version 0x%02x, driver version %s\n", @@ -1611,19 +1543,7 @@ ioat_dma_probe(struct pci_dev *pdev, void __iomem *iobase) if (err) goto err_self_test; - err = dma_async_device_register(dma); - if (err) - goto err_self_test; - - ioat_set_tcp_copy_break(device); - - if (device->version != IOAT_VER_3_0) { - INIT_DELAYED_WORK(&device->work, ioat_dma_chan_watchdog); - schedule_delayed_work(&device->work, - WATCHDOG_DELAY); - } - - return device; + return 0; err_self_test: ioat_disable_interrupts(device); @@ -1632,7 +1552,142 @@ err_setup_interrupts: err_completion_pool: pci_pool_destroy(device->dma_pool); err_dma_pool: - return NULL; + return err; +} + +static int ioat_register(struct ioatdma_device *device) +{ + int err = dma_async_device_register(&device->common); + + if (err) { + ioat_disable_interrupts(device); + pci_pool_destroy(device->completion_pool); + pci_pool_destroy(device->dma_pool); + } + + return err; +} + +/* ioat1_intr_quirk - fix up dma ctrl register to enable / disable msi */ +static void ioat1_intr_quirk(struct ioatdma_device *device) +{ + struct pci_dev *pdev = device->pdev; + u32 dmactrl; + + pci_read_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, &dmactrl); + if (pdev->msi_enabled) + dmactrl |= IOAT_PCI_DMACTRL_MSI_EN; + else + dmactrl &= ~IOAT_PCI_DMACTRL_MSI_EN; + pci_write_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, dmactrl); +} + +int ioat1_dma_probe(struct ioatdma_device *device, int dca) +{ + struct pci_dev *pdev = device->pdev; + struct dma_device *dma; + int err; + + device->intr_quirk = ioat1_intr_quirk; + dma = &device->common; + dma->device_prep_dma_memcpy = ioat1_dma_prep_memcpy; + dma->device_issue_pending = ioat1_dma_memcpy_issue_pending; + + err = ioat_probe(device); + if (err) + return err; + ioat_set_tcp_copy_break(4096); + err = ioat_register(device); + if (err) + return err; + if (dca) + device->dca = ioat_dca_init(pdev, device->reg_base); + + INIT_DELAYED_WORK(&device->work, ioat_dma_chan_watchdog); + schedule_delayed_work(&device->work, WATCHDOG_DELAY); + + return err; +} + +int ioat2_dma_probe(struct ioatdma_device *device, int dca) +{ + struct pci_dev *pdev = device->pdev; + struct dma_device *dma; + struct dma_chan *chan; + struct ioat_dma_chan *ioat_chan; + int err; + + dma = &device->common; + dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy; + dma->device_issue_pending = ioat2_dma_memcpy_issue_pending; + + err = ioat_probe(device); + if (err) + return err; + ioat_set_tcp_copy_break(2048); + + list_for_each_entry(chan, &dma->channels, device_node) { + ioat_chan = to_ioat_chan(chan); + writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE | IOAT_DMA_DCA_ANY_CPU, + ioat_chan->reg_base + IOAT_DCACTRL_OFFSET); + } + + err = ioat_register(device); + if (err) + return err; + if (dca) + device->dca = ioat2_dca_init(pdev, device->reg_base); + + INIT_DELAYED_WORK(&device->work, ioat_dma_chan_watchdog); + schedule_delayed_work(&device->work, WATCHDOG_DELAY); + + return err; +} + +int ioat3_dma_probe(struct ioatdma_device *device, int dca) +{ + struct pci_dev *pdev = device->pdev; + struct dma_device *dma; + struct dma_chan *chan; + struct ioat_dma_chan *ioat_chan; + int err; + u16 dev_id; + + dma = &device->common; + dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy; + dma->device_issue_pending = ioat2_dma_memcpy_issue_pending; + + /* -= IOAT ver.3 workarounds =- */ + /* Write CHANERRMSK_INT with 3E07h to mask out the errors + * that can cause stability issues for IOAT ver.3 + */ + pci_write_config_dword(pdev, IOAT_PCI_CHANERRMASK_INT_OFFSET, 0x3e07); + + /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit + * (workaround for spurious config parity error after restart) + */ + pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id); + if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) + pci_write_config_dword(pdev, IOAT_PCI_DMAUNCERRSTS_OFFSET, 0x10); + + err = ioat_probe(device); + if (err) + return err; + ioat_set_tcp_copy_break(262144); + + list_for_each_entry(chan, &dma->channels, device_node) { + ioat_chan = to_ioat_chan(chan); + writel(IOAT_DMA_DCA_ANY_CPU, + ioat_chan->reg_base + IOAT_DCACTRL_OFFSET); + } + + err = ioat_register(device); + if (err) + return err; + if (dca) + device->dca = ioat3_dca_init(pdev, device->reg_base); + + return err; } void ioat_dma_remove(struct ioatdma_device *device) diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index 6e27ddb1e98a..1226e35f2709 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h @@ -61,6 +61,8 @@ * @version: version of ioatdma device * @msix_entries: irq handlers * @idx: per channel data + * @dca: direct cache access context + * @intr_quirk: interrupt setup quirk (for ioat_v1 devices) */ struct ioatdma_device { @@ -73,6 +75,8 @@ struct ioatdma_device { struct delayed_work work; struct msix_entry msix_entries[4]; struct ioat_dma_chan *idx[4]; + struct dca_provider *dca; + void (*intr_quirk)(struct ioatdma_device *device); }; /** @@ -136,25 +140,16 @@ struct ioat_desc_sw { struct dma_async_tx_descriptor txd; }; -static inline void ioat_set_tcp_copy_break(struct ioatdma_device *dev) +static inline void ioat_set_tcp_copy_break(unsigned long copybreak) { #ifdef CONFIG_NET_DMA - switch (dev->version) { - case IOAT_VER_1_2: - sysctl_tcp_dma_copybreak = 4096; - break; - case IOAT_VER_2_0: - sysctl_tcp_dma_copybreak = 2048; - break; - case IOAT_VER_3_0: - sysctl_tcp_dma_copybreak = 262144; - break; - } + sysctl_tcp_dma_copybreak = copybreak; #endif } -struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev, - void __iomem *iobase); +int ioat1_dma_probe(struct ioatdma_device *dev, int dca); +int ioat2_dma_probe(struct ioatdma_device *dev, int dca); +int ioat3_dma_probe(struct ioatdma_device *dev, int dca); void ioat_dma_remove(struct ioatdma_device *device); struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase); struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase); diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c index 982e38fd177c..55414d88ac1b 100644 --- a/drivers/dma/ioat/pci.c +++ b/drivers/dma/ioat/pci.c @@ -60,14 +60,8 @@ static struct pci_device_id ioat_pci_tbl[] = { { 0, } }; -struct ioat_device { - struct pci_dev *pdev; - struct ioatdma_device *dma; - struct dca_provider *dca; -}; - -static int __devinit ioat_probe(struct pci_dev *pdev, - const struct pci_device_id *id); +static int __devinit ioat_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *id); static void __devexit ioat_remove(struct pci_dev *pdev); static int ioat_dca_enabled = 1; @@ -79,17 +73,28 @@ MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)" static struct pci_driver ioat_pci_driver = { .name = DRV_NAME, .id_table = ioat_pci_tbl, - .probe = ioat_probe, + .probe = ioat_pci_probe, .remove = __devexit_p(ioat_remove), }; -static int __devinit ioat_probe(struct pci_dev *pdev, - const struct pci_device_id *id) +static struct ioatdma_device * +alloc_ioatdma(struct pci_dev *pdev, void __iomem *iobase) +{ + struct device *dev = &pdev->dev; + struct ioatdma_device *d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL); + + if (!d) + return NULL; + d->pdev = pdev; + d->reg_base = iobase; + return d; +} + +static int __devinit ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { void __iomem * const *iomap; - void __iomem *iobase; struct device *dev = &pdev->dev; - struct ioat_device *device; + struct ioatdma_device *device; int err; err = pcim_enable_device(pdev); @@ -119,33 +124,24 @@ static int __devinit ioat_probe(struct pci_dev *pdev, if (!device) return -ENOMEM; - device->pdev = pdev; - pci_set_drvdata(pdev, device); - iobase = iomap[IOAT_MMIO_BAR]; - pci_set_master(pdev); - switch (readb(iobase + IOAT_VER_OFFSET)) { - case IOAT_VER_1_2: - device->dma = ioat_dma_probe(pdev, iobase); - if (device->dma && ioat_dca_enabled) - device->dca = ioat_dca_init(pdev, iobase); - break; - case IOAT_VER_2_0: - device->dma = ioat_dma_probe(pdev, iobase); - if (device->dma && ioat_dca_enabled) - device->dca = ioat2_dca_init(pdev, iobase); - break; - case IOAT_VER_3_0: - device->dma = ioat_dma_probe(pdev, iobase); - if (device->dma && ioat_dca_enabled) - device->dca = ioat3_dca_init(pdev, iobase); - break; - default: - return -ENODEV; - } + device = alloc_ioatdma(pdev, iomap[IOAT_MMIO_BAR]); + if (!device) + return -ENOMEM; + pci_set_drvdata(pdev, device); - if (!device->dma) { + device->version = readb(device->reg_base + IOAT_VER_OFFSET); + if (device->version == IOAT_VER_1_2) + err = ioat1_dma_probe(device, ioat_dca_enabled); + else if (device->version == IOAT_VER_2_0) + err = ioat2_dma_probe(device, ioat_dca_enabled); + else if (device->version >= IOAT_VER_3_0) + err = ioat3_dma_probe(device, ioat_dca_enabled); + else + return -ENODEV; + + if (err) { dev_err(dev, "Intel(R) I/OAT DMA Engine init failed\n"); return -ENODEV; } @@ -155,7 +151,10 @@ static int __devinit ioat_probe(struct pci_dev *pdev, static void __devexit ioat_remove(struct pci_dev *pdev) { - struct ioat_device *device = pci_get_drvdata(pdev); + struct ioatdma_device *device = pci_get_drvdata(pdev); + + if (!device) + return; dev_err(&pdev->dev, "Removing dma and dca services\n"); if (device->dca) { @@ -163,11 +162,7 @@ static void __devexit ioat_remove(struct pci_dev *pdev) free_dca_provider(device->dca); device->dca = NULL; } - - if (device->dma) { - ioat_dma_remove(device->dma); - device->dma = NULL; - } + ioat_dma_remove(device); } static int __init ioat_init_module(void) From 77867fff033ea549096c49d863c564ad7d8be36f Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 28 Jul 2009 14:44:04 -0700 Subject: [PATCH 0104/3409] ioat: fix type mismatch for ->dmacount ->dmacount tracks the sequence number of active descriptors. It is written to the DMACOUNT register to update the channel's view of pending descriptors in the chain. The register is 16-bits so ->dmacount should be unsigned and 16-bit as well. Also modify ->desccount to maintain alignment. This was never a problem in practice because we never compared dmacount values, but this is a bug waiting to happen. Signed-off-by: Maciej Sosnowski Signed-off-by: Dan Williams --- drivers/dma/ioat/dma.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index 1226e35f2709..9f0c853b6a77 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h @@ -102,8 +102,8 @@ struct ioat_dma_chan { struct delayed_work work; int pending; - int dmacount; - int desccount; + u16 dmacount; + u16 desccount; struct ioatdma_device *device; struct dma_chan common; From c7984f4e4e3af3bf8027d636283ea8658c7f80b9 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 28 Jul 2009 14:44:04 -0700 Subject: [PATCH 0105/3409] ioat: define descriptor control bit-field This cleans up a mess of and'ing and or'ing bit definitions, and allows simple assignments from the specified dma_ctrl_flags parameter. Signed-off-by: Maciej Sosnowski Signed-off-by: Dan Williams --- drivers/dma/ioat/dma.c | 28 ++++++++++++++++------------ drivers/dma/ioat/hw.h | 38 ++++++++++++++++++-------------------- 2 files changed, 34 insertions(+), 32 deletions(-) diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index b7508041c6d7..4840d4805d8c 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -472,9 +472,9 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) return -ENOMEM; } - hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS; + hw->ctl_f.compl_write = 1; if (first->txd.callback) { - hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN; + hw->ctl_f.int_en = 1; if (first != new) { /* move callback into to last desc */ new->txd.callback = first->txd.callback; @@ -563,9 +563,9 @@ static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx) return -ENOMEM; } - hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_CP_STS; + hw->ctl_f.compl_write = 1; if (first->txd.callback) { - hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN; + hw->ctl_f.int_en = 1; if (first != new) { /* move callback into to last desc */ new->txd.callback = first->txd.callback; @@ -878,7 +878,8 @@ ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan) noop_desc = to_ioat_desc(ioat_chan->used_desc.next); /* set size to non-zero value (channel returns error when size is 0) */ noop_desc->hw->size = NULL_DESC_BUFFER_SIZE; - noop_desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL; + noop_desc->hw->ctl = 0; + noop_desc->hw->ctl_f.null = 1; noop_desc->hw->src_addr = 0; noop_desc->hw->dst_addr = 0; @@ -1230,6 +1231,7 @@ ioat_dma_is_complete(struct dma_chan *chan, dma_cookie_t cookie, static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan) { struct ioat_desc_sw *desc; + struct ioat_dma_descriptor *hw; spin_lock_bh(&ioat_chan->desc_lock); @@ -1242,17 +1244,19 @@ static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan) return; } - desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL - | IOAT_DMA_DESCRIPTOR_CTL_INT_GN - | IOAT_DMA_DESCRIPTOR_CTL_CP_STS; + hw = desc->hw; + hw->ctl = 0; + hw->ctl_f.null = 1; + hw->ctl_f.int_en = 1; + hw->ctl_f.compl_write = 1; /* set size to non-zero value (channel returns error when size is 0) */ - desc->hw->size = NULL_DESC_BUFFER_SIZE; - desc->hw->src_addr = 0; - desc->hw->dst_addr = 0; + hw->size = NULL_DESC_BUFFER_SIZE; + hw->src_addr = 0; + hw->dst_addr = 0; async_tx_ack(&desc->txd); switch (ioat_chan->device->version) { case IOAT_VER_1_2: - desc->hw->next = 0; + hw->next = 0; list_add_tail(&desc->node, &ioat_chan->used_desc); writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF, diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h index 1438fa5c4d1a..e13f3ed47763 100644 --- a/drivers/dma/ioat/hw.h +++ b/drivers/dma/ioat/hw.h @@ -40,7 +40,24 @@ struct ioat_dma_descriptor { uint32_t size; - uint32_t ctl; + union { + uint32_t ctl; + struct { + unsigned int int_en:1; + unsigned int src_snoop_dis:1; + unsigned int dest_snoop_dis:1; + unsigned int compl_write:1; + unsigned int fence:1; + unsigned int null:1; + unsigned int src_brk:1; + unsigned int dest_brk:1; + unsigned int bundle:1; + unsigned int dest_dca:1; + unsigned int hint:1; + unsigned int rsvd2:13; + unsigned int op:8; + } ctl_f; + }; uint64_t src_addr; uint64_t dst_addr; uint64_t next; @@ -49,23 +66,4 @@ struct ioat_dma_descriptor { uint64_t user1; uint64_t user2; }; - -#define IOAT_DMA_DESCRIPTOR_CTL_INT_GN 0x00000001 -#define IOAT_DMA_DESCRIPTOR_CTL_SRC_SN 0x00000002 -#define IOAT_DMA_DESCRIPTOR_CTL_DST_SN 0x00000004 -#define IOAT_DMA_DESCRIPTOR_CTL_CP_STS 0x00000008 -#define IOAT_DMA_DESCRIPTOR_CTL_FRAME 0x00000010 -#define IOAT_DMA_DESCRIPTOR_NUL 0x00000020 -#define IOAT_DMA_DESCRIPTOR_CTL_SP_BRK 0x00000040 -#define IOAT_DMA_DESCRIPTOR_CTL_DP_BRK 0x00000080 -#define IOAT_DMA_DESCRIPTOR_CTL_BNDL 0x00000100 -#define IOAT_DMA_DESCRIPTOR_CTL_DCA 0x00000200 -#define IOAT_DMA_DESCRIPTOR_CTL_BUFHINT 0x00000400 - -#define IOAT_DMA_DESCRIPTOR_CTL_OPCODE_CONTEXT 0xFF000000 -#define IOAT_DMA_DESCRIPTOR_CTL_OPCODE_DMA 0x00000000 - -#define IOAT_DMA_DESCRIPTOR_CTL_CONTEXT_DCA 0x00000001 -#define IOAT_DMA_DESCRIPTOR_CTL_OPCODE_MASK 0xFF000000 - #endif From a0587bcf3e64029a4da2a5666cad18df38db0d56 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 28 Jul 2009 14:44:04 -0700 Subject: [PATCH 0106/3409] ioat1: move descriptor allocation from submit to prep The async_tx api assumes that after a successful ->prep a subsequent ->submit will not fail due to a lack of resources. This also fixes a bug in the allocation failure case. Previously the descriptors allocated prior to the allocation failure would not be returned to the free list. Signed-off-by: Maciej Sosnowski Signed-off-by: Dan Williams --- drivers/dma/ioat/dma.c | 152 +++++++++++++++++------------------------ 1 file changed, 64 insertions(+), 88 deletions(-) diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 4840d4805d8c..c4333be07608 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -420,95 +420,29 @@ static void ioat_dma_chan_watchdog(struct work_struct *work) static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) { struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan); - struct ioat_desc_sw *first = tx_to_ioat_desc(tx); - struct ioat_desc_sw *prev, *new; - struct ioat_dma_descriptor *hw; + struct ioat_desc_sw *desc = tx_to_ioat_desc(tx); + struct ioat_desc_sw *first; + struct ioat_desc_sw *chain_tail; dma_cookie_t cookie; - LIST_HEAD(new_chain); - u32 copy; - size_t len; - dma_addr_t src, dst; - unsigned long orig_flags; - unsigned int desc_count = 0; - - /* src and dest and len are stored in the initial descriptor */ - len = first->len; - src = first->src; - dst = first->dst; - orig_flags = first->txd.flags; - new = first; spin_lock_bh(&ioat_chan->desc_lock); - prev = to_ioat_desc(ioat_chan->used_desc.prev); - prefetch(prev->hw); - do { - copy = min_t(size_t, len, ioat_chan->xfercap); - - async_tx_ack(&new->txd); - - hw = new->hw; - hw->size = copy; - hw->ctl = 0; - hw->src_addr = src; - hw->dst_addr = dst; - hw->next = 0; - - /* chain together the physical address list for the HW */ - wmb(); - prev->hw->next = (u64) new->txd.phys; - - len -= copy; - dst += copy; - src += copy; - - list_add_tail(&new->node, &new_chain); - desc_count++; - prev = new; - } while (len && (new = ioat1_dma_get_next_descriptor(ioat_chan))); - - if (!new) { - dev_err(to_dev(ioat_chan), "tx submit failed\n"); - spin_unlock_bh(&ioat_chan->desc_lock); - return -ENOMEM; - } - - hw->ctl_f.compl_write = 1; - if (first->txd.callback) { - hw->ctl_f.int_en = 1; - if (first != new) { - /* move callback into to last desc */ - new->txd.callback = first->txd.callback; - new->txd.callback_param - = first->txd.callback_param; - first->txd.callback = NULL; - first->txd.callback_param = NULL; - } - } - - new->tx_cnt = desc_count; - new->txd.flags = orig_flags; /* client is in control of this ack */ - - /* store the original values for use in later cleanup */ - if (new != first) { - new->src = first->src; - new->dst = first->dst; - new->len = first->len; - } - /* cookie incr and addition to used_list must be atomic */ cookie = ioat_chan->common.cookie; cookie++; if (cookie < 0) cookie = 1; - ioat_chan->common.cookie = new->txd.cookie = cookie; + ioat_chan->common.cookie = tx->cookie = cookie; /* write address into NextDescriptor field of last desc in chain */ - to_ioat_desc(ioat_chan->used_desc.prev)->hw->next = - first->txd.phys; - list_splice_tail(&new_chain, &ioat_chan->used_desc); + first = to_ioat_desc(tx->tx_list.next); + chain_tail = to_ioat_desc(ioat_chan->used_desc.prev); + /* make descriptor updates globally visible before chaining */ + wmb(); + chain_tail->hw->next = first->txd.phys; + list_splice_tail_init(&tx->tx_list, &ioat_chan->used_desc); - ioat_chan->dmacount += desc_count; - ioat_chan->pending += desc_count; + ioat_chan->dmacount += desc->tx_cnt; + ioat_chan->pending += desc->tx_cnt; if (ioat_chan->pending >= ioat_pending_level) __ioat1_dma_memcpy_issue_pending(ioat_chan); spin_unlock_bh(&ioat_chan->desc_lock); @@ -937,24 +871,66 @@ ioat1_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src, size_t len, unsigned long flags) { struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); - struct ioat_desc_sw *new; + struct ioat_desc_sw *desc; + size_t copy; + LIST_HEAD(chain); + dma_addr_t src = dma_src; + dma_addr_t dest = dma_dest; + size_t total_len = len; + struct ioat_dma_descriptor *hw = NULL; + int tx_cnt = 0; spin_lock_bh(&ioat_chan->desc_lock); - new = ioat_dma_get_next_descriptor(ioat_chan); - spin_unlock_bh(&ioat_chan->desc_lock); + desc = ioat_dma_get_next_descriptor(ioat_chan); + do { + if (!desc) + break; - if (new) { - new->len = len; - new->dst = dma_dest; - new->src = dma_src; - new->txd.flags = flags; - return &new->txd; - } else { + tx_cnt++; + copy = min_t(size_t, len, ioat_chan->xfercap); + + hw = desc->hw; + hw->size = copy; + hw->ctl = 0; + hw->src_addr = src; + hw->dst_addr = dest; + + list_add_tail(&desc->node, &chain); + + len -= copy; + dest += copy; + src += copy; + if (len) { + struct ioat_desc_sw *next; + + async_tx_ack(&desc->txd); + next = ioat_dma_get_next_descriptor(ioat_chan); + hw->next = next ? next->txd.phys : 0; + desc = next; + } else + hw->next = 0; + } while (len); + + if (!desc) { dev_err(to_dev(ioat_chan), "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n", chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount); + list_splice(&chain, &ioat_chan->free_desc); + spin_unlock_bh(&ioat_chan->desc_lock); return NULL; } + spin_unlock_bh(&ioat_chan->desc_lock); + + desc->txd.flags = flags; + desc->tx_cnt = tx_cnt; + desc->src = dma_src; + desc->dst = dma_dest; + desc->len = total_len; + list_splice(&chain, &desc->txd.tx_list); + hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); + hw->ctl_f.compl_write = 1; + + return &desc->txd; } static struct dma_async_tx_descriptor * From a6a39ca1badbeafc16941fcf2c1010c8c65c8ddc Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 28 Jul 2009 14:44:05 -0700 Subject: [PATCH 0107/3409] ioat: fix self test interrupts If a callback is to be attached to a descriptor the channel needs to know at ->prep time so it can set the interrupt enable bit. This is in preparation for moving descriptor ioat2 descriptor preparation from ->submit to ->prep. Signed-off-by: Maciej Sosnowski Signed-off-by: Dan Williams --- drivers/dma/ioat/dma.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index c4333be07608..cc5c557ddc83 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -1313,7 +1313,8 @@ static int ioat_dma_self_test(struct ioatdma_device *device) dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE); dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE); - flags = DMA_COMPL_SRC_UNMAP_SINGLE | DMA_COMPL_DEST_UNMAP_SINGLE; + flags = DMA_COMPL_SRC_UNMAP_SINGLE | DMA_COMPL_DEST_UNMAP_SINGLE | + DMA_PREP_INTERRUPT; tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src, IOAT_TEST_SIZE, flags); if (!tx) { From dcbc853af6f0c056088e4df0794d9bf36184809e Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 28 Jul 2009 14:44:50 -0700 Subject: [PATCH 0108/3409] ioat: prepare the code for ioat[12]_dma_chan split Prepare the code for the conversion of the ioat2 linked-list-ring into a native ring buffer. After this conversion ioat2 channels will share less of the ioat1 infrastructure, but there will still be places where sharing is possible. struct ioat_chan_common is created to house the channel attributes that will remain common between ioat1 and ioat2 channels. For every routine that accesses both common and hardware specific fields the old unified 'ioat_chan' pointer is split into an 'ioat' and 'chan' pointer. Where 'chan' references common fields and 'ioat' the hardware/version specific. [ Impact: pure structure member movement/variable renames, no logic changes ] Signed-off-by: Maciej Sosnowski Signed-off-by: Dan Williams --- drivers/dma/ioat/dma.c | 709 +++++++++++++++++++++-------------------- drivers/dma/ioat/dma.h | 49 ++- 2 files changed, 389 insertions(+), 369 deletions(-) diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index cc5c557ddc83..2e81e0c76e61 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -47,15 +47,15 @@ static void ioat_dma_chan_reset_part2(struct work_struct *work); static void ioat_dma_chan_watchdog(struct work_struct *work); /* internal functions */ -static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan); -static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan); +static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat); +static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat); static struct ioat_desc_sw * -ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan); +ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat); static struct ioat_desc_sw * -ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan); +ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat); -static inline struct ioat_dma_chan * +static inline struct ioat_chan_common * ioat_chan_by_index(struct ioatdma_device *device, int index) { return device->idx[index]; @@ -69,7 +69,7 @@ ioat_chan_by_index(struct ioatdma_device *device, int index) static irqreturn_t ioat_dma_do_interrupt(int irq, void *data) { struct ioatdma_device *instance = data; - struct ioat_dma_chan *ioat_chan; + struct ioat_chan_common *chan; unsigned long attnstatus; int bit; u8 intrctrl; @@ -86,8 +86,8 @@ static irqreturn_t ioat_dma_do_interrupt(int irq, void *data) attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET); for_each_bit(bit, &attnstatus, BITS_PER_LONG) { - ioat_chan = ioat_chan_by_index(instance, bit); - tasklet_schedule(&ioat_chan->cleanup_task); + chan = ioat_chan_by_index(instance, bit); + tasklet_schedule(&chan->cleanup_task); } writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET); @@ -101,9 +101,9 @@ static irqreturn_t ioat_dma_do_interrupt(int irq, void *data) */ static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data) { - struct ioat_dma_chan *ioat_chan = data; + struct ioat_chan_common *chan = data; - tasklet_schedule(&ioat_chan->cleanup_task); + tasklet_schedule(&chan->cleanup_task); return IRQ_HANDLED; } @@ -119,7 +119,8 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device) u8 xfercap_scale; u32 xfercap; int i; - struct ioat_dma_chan *ioat_chan; + struct ioat_chan_common *chan; + struct ioat_dma_chan *ioat; struct device *dev = &device->pdev->dev; struct dma_device *dma = &device->common; @@ -133,29 +134,30 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device) dma->chancnt--; #endif for (i = 0; i < dma->chancnt; i++) { - ioat_chan = devm_kzalloc(dev, sizeof(*ioat_chan), GFP_KERNEL); - if (!ioat_chan) { + ioat = devm_kzalloc(dev, sizeof(*ioat), GFP_KERNEL); + if (!ioat) { dma->chancnt = i; break; } - ioat_chan->device = device; - ioat_chan->reg_base = device->reg_base + (0x80 * (i + 1)); - ioat_chan->xfercap = xfercap; - ioat_chan->desccount = 0; - INIT_DELAYED_WORK(&ioat_chan->work, ioat_dma_chan_reset_part2); - spin_lock_init(&ioat_chan->cleanup_lock); - spin_lock_init(&ioat_chan->desc_lock); - INIT_LIST_HEAD(&ioat_chan->free_desc); - INIT_LIST_HEAD(&ioat_chan->used_desc); + chan = &ioat->base; + chan->device = device; + chan->reg_base = device->reg_base + (0x80 * (i + 1)); + ioat->xfercap = xfercap; + ioat->desccount = 0; + INIT_DELAYED_WORK(&chan->work, ioat_dma_chan_reset_part2); + spin_lock_init(&chan->cleanup_lock); + spin_lock_init(&ioat->desc_lock); + INIT_LIST_HEAD(&ioat->free_desc); + INIT_LIST_HEAD(&ioat->used_desc); /* This should be made common somewhere in dmaengine.c */ - ioat_chan->common.device = &device->common; - list_add_tail(&ioat_chan->common.device_node, &dma->channels); - device->idx[i] = ioat_chan; - tasklet_init(&ioat_chan->cleanup_task, + chan->common.device = &device->common; + list_add_tail(&chan->common.device_node, &dma->channels); + device->idx[i] = chan; + tasklet_init(&chan->cleanup_task, ioat_dma_cleanup_tasklet, - (unsigned long) ioat_chan); - tasklet_disable(&ioat_chan->cleanup_task); + (unsigned long) ioat); + tasklet_disable(&chan->cleanup_task); } return dma->chancnt; } @@ -166,39 +168,42 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device) * @chan: DMA channel handle */ static inline void -__ioat1_dma_memcpy_issue_pending(struct ioat_dma_chan *ioat_chan) +__ioat1_dma_memcpy_issue_pending(struct ioat_dma_chan *ioat) { - ioat_chan->pending = 0; - writeb(IOAT_CHANCMD_APPEND, ioat_chan->reg_base + IOAT1_CHANCMD_OFFSET); + void __iomem *reg_base = ioat->base.reg_base; + + ioat->pending = 0; + writeb(IOAT_CHANCMD_APPEND, reg_base + IOAT1_CHANCMD_OFFSET); } static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan) { - struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); + struct ioat_dma_chan *ioat = to_ioat_chan(chan); - if (ioat_chan->pending > 0) { - spin_lock_bh(&ioat_chan->desc_lock); - __ioat1_dma_memcpy_issue_pending(ioat_chan); - spin_unlock_bh(&ioat_chan->desc_lock); + if (ioat->pending > 0) { + spin_lock_bh(&ioat->desc_lock); + __ioat1_dma_memcpy_issue_pending(ioat); + spin_unlock_bh(&ioat->desc_lock); } } static inline void -__ioat2_dma_memcpy_issue_pending(struct ioat_dma_chan *ioat_chan) +__ioat2_dma_memcpy_issue_pending(struct ioat_dma_chan *ioat) { - ioat_chan->pending = 0; - writew(ioat_chan->dmacount, - ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET); + void __iomem *reg_base = ioat->base.reg_base; + + ioat->pending = 0; + writew(ioat->dmacount, reg_base + IOAT_CHAN_DMACOUNT_OFFSET); } static void ioat2_dma_memcpy_issue_pending(struct dma_chan *chan) { - struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); + struct ioat_dma_chan *ioat = to_ioat_chan(chan); - if (ioat_chan->pending > 0) { - spin_lock_bh(&ioat_chan->desc_lock); - __ioat2_dma_memcpy_issue_pending(ioat_chan); - spin_unlock_bh(&ioat_chan->desc_lock); + if (ioat->pending > 0) { + spin_lock_bh(&ioat->desc_lock); + __ioat2_dma_memcpy_issue_pending(ioat); + spin_unlock_bh(&ioat->desc_lock); } } @@ -208,84 +213,88 @@ static void ioat2_dma_memcpy_issue_pending(struct dma_chan *chan) */ static void ioat_dma_chan_reset_part2(struct work_struct *work) { - struct ioat_dma_chan *ioat_chan = - container_of(work, struct ioat_dma_chan, work.work); + struct ioat_chan_common *chan; + struct ioat_dma_chan *ioat; struct ioat_desc_sw *desc; - spin_lock_bh(&ioat_chan->cleanup_lock); - spin_lock_bh(&ioat_chan->desc_lock); + chan = container_of(work, struct ioat_chan_common, work.work); + ioat = container_of(chan, struct ioat_dma_chan, base); + spin_lock_bh(&chan->cleanup_lock); + spin_lock_bh(&ioat->desc_lock); - ioat_chan->completion_virt->low = 0; - ioat_chan->completion_virt->high = 0; - ioat_chan->pending = 0; + chan->completion_virt->low = 0; + chan->completion_virt->high = 0; + ioat->pending = 0; /* * count the descriptors waiting, and be sure to do it * right for both the CB1 line and the CB2 ring */ - ioat_chan->dmacount = 0; - if (ioat_chan->used_desc.prev) { - desc = to_ioat_desc(ioat_chan->used_desc.prev); + ioat->dmacount = 0; + if (ioat->used_desc.prev) { + desc = to_ioat_desc(ioat->used_desc.prev); do { - ioat_chan->dmacount++; + ioat->dmacount++; desc = to_ioat_desc(desc->node.next); - } while (&desc->node != ioat_chan->used_desc.next); + } while (&desc->node != ioat->used_desc.next); } /* * write the new starting descriptor address * this puts channel engine into ARMED state */ - desc = to_ioat_desc(ioat_chan->used_desc.prev); - switch (ioat_chan->device->version) { + desc = to_ioat_desc(ioat->used_desc.prev); + switch (chan->device->version) { case IOAT_VER_1_2: writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF, - ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW); + chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW); writel(((u64) desc->txd.phys) >> 32, - ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH); + chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH); - writeb(IOAT_CHANCMD_START, ioat_chan->reg_base - + IOAT_CHANCMD_OFFSET(ioat_chan->device->version)); + writeb(IOAT_CHANCMD_START, chan->reg_base + + IOAT_CHANCMD_OFFSET(chan->device->version)); break; case IOAT_VER_2_0: writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF, - ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW); + chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW); writel(((u64) desc->txd.phys) >> 32, - ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); + chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); /* tell the engine to go with what's left to be done */ - writew(ioat_chan->dmacount, - ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET); + writew(ioat->dmacount, + chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET); break; } - dev_err(to_dev(ioat_chan), + dev_err(to_dev(chan), "chan%d reset - %d descs waiting, %d total desc\n", - chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount); + chan_num(chan), ioat->dmacount, ioat->desccount); - spin_unlock_bh(&ioat_chan->desc_lock); - spin_unlock_bh(&ioat_chan->cleanup_lock); + spin_unlock_bh(&ioat->desc_lock); + spin_unlock_bh(&chan->cleanup_lock); } /** * ioat_dma_reset_channel - restart a channel - * @ioat_chan: IOAT DMA channel handle + * @ioat: IOAT DMA channel handle */ -static void ioat_dma_reset_channel(struct ioat_dma_chan *ioat_chan) +static void ioat_dma_reset_channel(struct ioat_dma_chan *ioat) { + struct ioat_chan_common *chan = &ioat->base; + void __iomem *reg_base = chan->reg_base; u32 chansts, chanerr; - if (!ioat_chan->used_desc.prev) + if (!ioat->used_desc.prev) return; - chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); - chansts = (ioat_chan->completion_virt->low + chanerr = readl(reg_base + IOAT_CHANERR_OFFSET); + chansts = (chan->completion_virt->low & IOAT_CHANSTS_DMA_TRANSFER_STATUS); if (chanerr) { - dev_err(to_dev(ioat_chan), + dev_err(to_dev(chan), "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n", - chan_num(ioat_chan), chansts, chanerr); - writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET); + chan_num(chan), chansts, chanerr); + writel(chanerr, reg_base + IOAT_CHANERR_OFFSET); } /* @@ -296,15 +305,14 @@ static void ioat_dma_reset_channel(struct ioat_dma_chan *ioat_chan) * while we're waiting. */ - spin_lock_bh(&ioat_chan->desc_lock); - ioat_chan->pending = INT_MIN; + spin_lock_bh(&ioat->desc_lock); + ioat->pending = INT_MIN; writeb(IOAT_CHANCMD_RESET, - ioat_chan->reg_base - + IOAT_CHANCMD_OFFSET(ioat_chan->device->version)); - spin_unlock_bh(&ioat_chan->desc_lock); + reg_base + IOAT_CHANCMD_OFFSET(chan->device->version)); + spin_unlock_bh(&ioat->desc_lock); /* schedule the 2nd half instead of sleeping a long time */ - schedule_delayed_work(&ioat_chan->work, RESET_DELAY); + schedule_delayed_work(&chan->work, RESET_DELAY); } /** @@ -314,7 +322,8 @@ static void ioat_dma_chan_watchdog(struct work_struct *work) { struct ioatdma_device *device = container_of(work, struct ioatdma_device, work.work); - struct ioat_dma_chan *ioat_chan; + struct ioat_dma_chan *ioat; + struct ioat_chan_common *chan; int i; union { @@ -327,23 +336,21 @@ static void ioat_dma_chan_watchdog(struct work_struct *work) unsigned long compl_desc_addr_hw; for (i = 0; i < device->common.chancnt; i++) { - ioat_chan = ioat_chan_by_index(device, i); + chan = ioat_chan_by_index(device, i); + ioat = container_of(chan, struct ioat_dma_chan, base); - if (ioat_chan->device->version == IOAT_VER_1_2 + if (chan->device->version == IOAT_VER_1_2 /* have we started processing anything yet */ - && ioat_chan->last_completion + && chan->last_completion /* have we completed any since last watchdog cycle? */ - && (ioat_chan->last_completion == - ioat_chan->watchdog_completion) + && (chan->last_completion == chan->watchdog_completion) /* has TCP stuck on one cookie since last watchdog? */ - && (ioat_chan->watchdog_tcp_cookie == - ioat_chan->watchdog_last_tcp_cookie) - && (ioat_chan->watchdog_tcp_cookie != - ioat_chan->completed_cookie) + && (chan->watchdog_tcp_cookie == chan->watchdog_last_tcp_cookie) + && (chan->watchdog_tcp_cookie != chan->completed_cookie) /* is there something in the chain to be processed? */ /* CB1 chain always has at least the last one processed */ - && (ioat_chan->used_desc.prev != ioat_chan->used_desc.next) - && ioat_chan->pending == 0) { + && (ioat->used_desc.prev != ioat->used_desc.next) + && ioat->pending == 0) { /* * check CHANSTS register for completed @@ -360,10 +367,10 @@ static void ioat_dma_chan_watchdog(struct work_struct *work) * try resetting the channel */ - completion_hw.low = readl(ioat_chan->reg_base + - IOAT_CHANSTS_OFFSET_LOW(ioat_chan->device->version)); - completion_hw.high = readl(ioat_chan->reg_base + - IOAT_CHANSTS_OFFSET_HIGH(ioat_chan->device->version)); + completion_hw.low = readl(chan->reg_base + + IOAT_CHANSTS_OFFSET_LOW(chan->device->version)); + completion_hw.high = readl(chan->reg_base + + IOAT_CHANSTS_OFFSET_HIGH(chan->device->version)); #if (BITS_PER_LONG == 64) compl_desc_addr_hw = completion_hw.full @@ -374,15 +381,15 @@ static void ioat_dma_chan_watchdog(struct work_struct *work) #endif if ((compl_desc_addr_hw != 0) - && (compl_desc_addr_hw != ioat_chan->watchdog_completion) - && (compl_desc_addr_hw != ioat_chan->last_compl_desc_addr_hw)) { - ioat_chan->last_compl_desc_addr_hw = compl_desc_addr_hw; - ioat_chan->completion_virt->low = completion_hw.low; - ioat_chan->completion_virt->high = completion_hw.high; + && (compl_desc_addr_hw != chan->watchdog_completion) + && (compl_desc_addr_hw != chan->last_compl_desc_addr_hw)) { + chan->last_compl_desc_addr_hw = compl_desc_addr_hw; + chan->completion_virt->low = completion_hw.low; + chan->completion_virt->high = completion_hw.high; } else { - ioat_dma_reset_channel(ioat_chan); - ioat_chan->watchdog_completion = 0; - ioat_chan->last_compl_desc_addr_hw = 0; + ioat_dma_reset_channel(ioat); + chan->watchdog_completion = 0; + chan->last_compl_desc_addr_hw = 0; } /* @@ -393,25 +400,22 @@ static void ioat_dma_chan_watchdog(struct work_struct *work) * else * try resetting the channel */ - } else if (ioat_chan->device->version == IOAT_VER_2_0 - && ioat_chan->used_desc.prev - && ioat_chan->last_completion - && ioat_chan->last_completion == ioat_chan->watchdog_completion) { + } else if (chan->device->version == IOAT_VER_2_0 + && ioat->used_desc.prev + && chan->last_completion + && chan->last_completion == chan->watchdog_completion) { - if (ioat_chan->pending < ioat_pending_level) - ioat2_dma_memcpy_issue_pending(&ioat_chan->common); + if (ioat->pending < ioat_pending_level) + ioat2_dma_memcpy_issue_pending(&chan->common); else { - ioat_dma_reset_channel(ioat_chan); - ioat_chan->watchdog_completion = 0; + ioat_dma_reset_channel(ioat); + chan->watchdog_completion = 0; } } else { - ioat_chan->last_compl_desc_addr_hw = 0; - ioat_chan->watchdog_completion - = ioat_chan->last_completion; + chan->last_compl_desc_addr_hw = 0; + chan->watchdog_completion = chan->last_completion; } - - ioat_chan->watchdog_last_tcp_cookie = - ioat_chan->watchdog_tcp_cookie; + chan->watchdog_last_tcp_cookie = chan->watchdog_tcp_cookie; } schedule_delayed_work(&device->work, WATCHDOG_DELAY); @@ -419,40 +423,42 @@ static void ioat_dma_chan_watchdog(struct work_struct *work) static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) { - struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan); + struct dma_chan *c = tx->chan; + struct ioat_dma_chan *ioat = to_ioat_chan(c); struct ioat_desc_sw *desc = tx_to_ioat_desc(tx); struct ioat_desc_sw *first; struct ioat_desc_sw *chain_tail; dma_cookie_t cookie; - spin_lock_bh(&ioat_chan->desc_lock); + spin_lock_bh(&ioat->desc_lock); /* cookie incr and addition to used_list must be atomic */ - cookie = ioat_chan->common.cookie; + cookie = c->cookie; cookie++; if (cookie < 0) cookie = 1; - ioat_chan->common.cookie = tx->cookie = cookie; + c->cookie = cookie; + tx->cookie = cookie; /* write address into NextDescriptor field of last desc in chain */ first = to_ioat_desc(tx->tx_list.next); - chain_tail = to_ioat_desc(ioat_chan->used_desc.prev); + chain_tail = to_ioat_desc(ioat->used_desc.prev); /* make descriptor updates globally visible before chaining */ wmb(); chain_tail->hw->next = first->txd.phys; - list_splice_tail_init(&tx->tx_list, &ioat_chan->used_desc); + list_splice_tail_init(&tx->tx_list, &ioat->used_desc); - ioat_chan->dmacount += desc->tx_cnt; - ioat_chan->pending += desc->tx_cnt; - if (ioat_chan->pending >= ioat_pending_level) - __ioat1_dma_memcpy_issue_pending(ioat_chan); - spin_unlock_bh(&ioat_chan->desc_lock); + ioat->dmacount += desc->tx_cnt; + ioat->pending += desc->tx_cnt; + if (ioat->pending >= ioat_pending_level) + __ioat1_dma_memcpy_issue_pending(ioat); + spin_unlock_bh(&ioat->desc_lock); return cookie; } static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx) { - struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan); + struct ioat_dma_chan *ioat = to_ioat_chan(tx->chan); struct ioat_desc_sw *first = tx_to_ioat_desc(tx); struct ioat_desc_sw *new; struct ioat_dma_descriptor *hw; @@ -471,11 +477,11 @@ static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx) new = first; /* - * ioat_chan->desc_lock is still in force in version 2 path + * ioat->desc_lock is still in force in version 2 path * it gets unlocked at end of this function */ do { - copy = min_t(size_t, len, ioat_chan->xfercap); + copy = min_t(size_t, len, ioat->xfercap); async_tx_ack(&new->txd); @@ -489,11 +495,11 @@ static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx) dst += copy; src += copy; desc_count++; - } while (len && (new = ioat2_dma_get_next_descriptor(ioat_chan))); + } while (len && (new = ioat2_dma_get_next_descriptor(ioat))); if (!new) { - dev_err(to_dev(ioat_chan), "tx submit failed\n"); - spin_unlock_bh(&ioat_chan->desc_lock); + dev_err(to_dev(&ioat->base), "tx submit failed\n"); + spin_unlock_bh(&ioat->desc_lock); return -ENOMEM; } @@ -521,35 +527,35 @@ static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx) } /* cookie incr and addition to used_list must be atomic */ - cookie = ioat_chan->common.cookie; + cookie = ioat->base.common.cookie; cookie++; if (cookie < 0) cookie = 1; - ioat_chan->common.cookie = new->txd.cookie = cookie; + ioat->base.common.cookie = new->txd.cookie = cookie; - ioat_chan->dmacount += desc_count; - ioat_chan->pending += desc_count; - if (ioat_chan->pending >= ioat_pending_level) - __ioat2_dma_memcpy_issue_pending(ioat_chan); - spin_unlock_bh(&ioat_chan->desc_lock); + ioat->dmacount += desc_count; + ioat->pending += desc_count; + if (ioat->pending >= ioat_pending_level) + __ioat2_dma_memcpy_issue_pending(ioat); + spin_unlock_bh(&ioat->desc_lock); return cookie; } /** * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair - * @ioat_chan: the channel supplying the memory pool for the descriptors + * @ioat: the channel supplying the memory pool for the descriptors * @flags: allocation flags */ static struct ioat_desc_sw * -ioat_dma_alloc_descriptor(struct ioat_dma_chan *ioat_chan, gfp_t flags) +ioat_dma_alloc_descriptor(struct ioat_dma_chan *ioat, gfp_t flags) { struct ioat_dma_descriptor *desc; struct ioat_desc_sw *desc_sw; struct ioatdma_device *ioatdma_device; dma_addr_t phys; - ioatdma_device = to_ioatdma_device(ioat_chan->common.device); + ioatdma_device = ioat->base.device; desc = pci_pool_alloc(ioatdma_device->dma_pool, flags, &phys); if (unlikely(!desc)) return NULL; @@ -561,8 +567,8 @@ ioat_dma_alloc_descriptor(struct ioat_dma_chan *ioat_chan, gfp_t flags) } memset(desc, 0, sizeof(*desc)); - dma_async_tx_descriptor_init(&desc_sw->txd, &ioat_chan->common); - switch (ioat_chan->device->version) { + dma_async_tx_descriptor_init(&desc_sw->txd, &ioat->base.common); + switch (ioatdma_device->version) { case IOAT_VER_1_2: desc_sw->txd.tx_submit = ioat1_tx_submit; break; @@ -585,26 +591,26 @@ MODULE_PARM_DESC(ioat_initial_desc_count, /** * ioat2_dma_massage_chan_desc - link the descriptors into a circle - * @ioat_chan: the channel to be massaged + * @ioat: the channel to be massaged */ -static void ioat2_dma_massage_chan_desc(struct ioat_dma_chan *ioat_chan) +static void ioat2_dma_massage_chan_desc(struct ioat_dma_chan *ioat) { struct ioat_desc_sw *desc, *_desc; /* setup used_desc */ - ioat_chan->used_desc.next = ioat_chan->free_desc.next; - ioat_chan->used_desc.prev = NULL; + ioat->used_desc.next = ioat->free_desc.next; + ioat->used_desc.prev = NULL; /* pull free_desc out of the circle so that every node is a hw * descriptor, but leave it pointing to the list */ - ioat_chan->free_desc.prev->next = ioat_chan->free_desc.next; - ioat_chan->free_desc.next->prev = ioat_chan->free_desc.prev; + ioat->free_desc.prev->next = ioat->free_desc.next; + ioat->free_desc.next->prev = ioat->free_desc.prev; /* circle link the hw descriptors */ - desc = to_ioat_desc(ioat_chan->free_desc.next); + desc = to_ioat_desc(ioat->free_desc.next); desc->hw->next = to_ioat_desc(desc->node.next)->txd.phys; - list_for_each_entry_safe(desc, _desc, ioat_chan->free_desc.next, node) { + list_for_each_entry_safe(desc, _desc, ioat->free_desc.next, node) { desc->hw->next = to_ioat_desc(desc->node.next)->txd.phys; } } @@ -613,9 +619,10 @@ static void ioat2_dma_massage_chan_desc(struct ioat_dma_chan *ioat_chan) * ioat_dma_alloc_chan_resources - returns the number of allocated descriptors * @chan: the channel to be filled out */ -static int ioat_dma_alloc_chan_resources(struct dma_chan *chan) +static int ioat_dma_alloc_chan_resources(struct dma_chan *c) { - struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); + struct ioat_dma_chan *ioat = to_ioat_chan(c); + struct ioat_chan_common *chan = &ioat->base; struct ioat_desc_sw *desc; u16 chanctrl; u32 chanerr; @@ -623,89 +630,87 @@ static int ioat_dma_alloc_chan_resources(struct dma_chan *chan) LIST_HEAD(tmp_list); /* have we already been set up? */ - if (!list_empty(&ioat_chan->free_desc)) - return ioat_chan->desccount; + if (!list_empty(&ioat->free_desc)) + return ioat->desccount; /* Setup register to interrupt and write completion status on error */ chanctrl = IOAT_CHANCTRL_ERR_INT_EN | IOAT_CHANCTRL_ANY_ERR_ABORT_EN | IOAT_CHANCTRL_ERR_COMPLETION_EN; - writew(chanctrl, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET); + writew(chanctrl, chan->reg_base + IOAT_CHANCTRL_OFFSET); - chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); + chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); if (chanerr) { - dev_err(to_dev(ioat_chan), "CHANERR = %x, clearing\n", chanerr); - writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET); + dev_err(to_dev(chan), "CHANERR = %x, clearing\n", chanerr); + writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET); } /* Allocate descriptors */ for (i = 0; i < ioat_initial_desc_count; i++) { - desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL); + desc = ioat_dma_alloc_descriptor(ioat, GFP_KERNEL); if (!desc) { - dev_err(to_dev(ioat_chan), - "Only %d initial descriptors\n", i); + dev_err(to_dev(chan), "Only %d initial descriptors\n", i); break; } list_add_tail(&desc->node, &tmp_list); } - spin_lock_bh(&ioat_chan->desc_lock); - ioat_chan->desccount = i; - list_splice(&tmp_list, &ioat_chan->free_desc); - if (ioat_chan->device->version != IOAT_VER_1_2) - ioat2_dma_massage_chan_desc(ioat_chan); - spin_unlock_bh(&ioat_chan->desc_lock); + spin_lock_bh(&ioat->desc_lock); + ioat->desccount = i; + list_splice(&tmp_list, &ioat->free_desc); + if (chan->device->version != IOAT_VER_1_2) + ioat2_dma_massage_chan_desc(ioat); + spin_unlock_bh(&ioat->desc_lock); /* allocate a completion writeback area */ /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ - ioat_chan->completion_virt = - pci_pool_alloc(ioat_chan->device->completion_pool, - GFP_KERNEL, - &ioat_chan->completion_addr); - memset(ioat_chan->completion_virt, 0, - sizeof(*ioat_chan->completion_virt)); - writel(((u64) ioat_chan->completion_addr) & 0x00000000FFFFFFFF, - ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW); - writel(((u64) ioat_chan->completion_addr) >> 32, - ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); + chan->completion_virt = pci_pool_alloc(chan->device->completion_pool, + GFP_KERNEL, + &chan->completion_addr); + memset(chan->completion_virt, 0, + sizeof(*chan->completion_virt)); + writel(((u64) chan->completion_addr) & 0x00000000FFFFFFFF, + chan->reg_base + IOAT_CHANCMP_OFFSET_LOW); + writel(((u64) chan->completion_addr) >> 32, + chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); - tasklet_enable(&ioat_chan->cleanup_task); - ioat_dma_start_null_desc(ioat_chan); /* give chain to dma device */ - return ioat_chan->desccount; + tasklet_enable(&chan->cleanup_task); + ioat_dma_start_null_desc(ioat); /* give chain to dma device */ + return ioat->desccount; } /** * ioat_dma_free_chan_resources - release all the descriptors * @chan: the channel to be cleaned */ -static void ioat_dma_free_chan_resources(struct dma_chan *chan) +static void ioat_dma_free_chan_resources(struct dma_chan *c) { - struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); - struct ioatdma_device *ioatdma_device = to_ioatdma_device(chan->device); + struct ioat_dma_chan *ioat = to_ioat_chan(c); + struct ioat_chan_common *chan = &ioat->base; + struct ioatdma_device *ioatdma_device = chan->device; struct ioat_desc_sw *desc, *_desc; int in_use_descs = 0; /* Before freeing channel resources first check * if they have been previously allocated for this channel. */ - if (ioat_chan->desccount == 0) + if (ioat->desccount == 0) return; - tasklet_disable(&ioat_chan->cleanup_task); - ioat_dma_memcpy_cleanup(ioat_chan); + tasklet_disable(&chan->cleanup_task); + ioat_dma_memcpy_cleanup(ioat); /* Delay 100ms after reset to allow internal DMA logic to quiesce * before removing DMA descriptor resources. */ writeb(IOAT_CHANCMD_RESET, - ioat_chan->reg_base - + IOAT_CHANCMD_OFFSET(ioat_chan->device->version)); + chan->reg_base + IOAT_CHANCMD_OFFSET(chan->device->version)); mdelay(100); - spin_lock_bh(&ioat_chan->desc_lock); - switch (ioat_chan->device->version) { + spin_lock_bh(&ioat->desc_lock); + switch (chan->device->version) { case IOAT_VER_1_2: list_for_each_entry_safe(desc, _desc, - &ioat_chan->used_desc, node) { + &ioat->used_desc, node) { in_use_descs++; list_del(&desc->node); pci_pool_free(ioatdma_device->dma_pool, desc->hw, @@ -713,7 +718,7 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan) kfree(desc); } list_for_each_entry_safe(desc, _desc, - &ioat_chan->free_desc, node) { + &ioat->free_desc, node) { list_del(&desc->node); pci_pool_free(ioatdma_device->dma_pool, desc->hw, desc->txd.phys); @@ -723,62 +728,61 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan) case IOAT_VER_2_0: case IOAT_VER_3_0: list_for_each_entry_safe(desc, _desc, - ioat_chan->free_desc.next, node) { + ioat->free_desc.next, node) { list_del(&desc->node); pci_pool_free(ioatdma_device->dma_pool, desc->hw, desc->txd.phys); kfree(desc); } - desc = to_ioat_desc(ioat_chan->free_desc.next); + desc = to_ioat_desc(ioat->free_desc.next); pci_pool_free(ioatdma_device->dma_pool, desc->hw, desc->txd.phys); kfree(desc); - INIT_LIST_HEAD(&ioat_chan->free_desc); - INIT_LIST_HEAD(&ioat_chan->used_desc); + INIT_LIST_HEAD(&ioat->free_desc); + INIT_LIST_HEAD(&ioat->used_desc); break; } - spin_unlock_bh(&ioat_chan->desc_lock); + spin_unlock_bh(&ioat->desc_lock); pci_pool_free(ioatdma_device->completion_pool, - ioat_chan->completion_virt, - ioat_chan->completion_addr); + chan->completion_virt, + chan->completion_addr); /* one is ok since we left it on there on purpose */ if (in_use_descs > 1) - dev_err(to_dev(ioat_chan), "Freeing %d in use descriptors!\n", + dev_err(to_dev(chan), "Freeing %d in use descriptors!\n", in_use_descs - 1); - ioat_chan->last_completion = ioat_chan->completion_addr = 0; - ioat_chan->pending = 0; - ioat_chan->dmacount = 0; - ioat_chan->desccount = 0; - ioat_chan->watchdog_completion = 0; - ioat_chan->last_compl_desc_addr_hw = 0; - ioat_chan->watchdog_tcp_cookie = - ioat_chan->watchdog_last_tcp_cookie = 0; + chan->last_completion = chan->completion_addr = 0; + chan->watchdog_completion = 0; + chan->last_compl_desc_addr_hw = 0; + chan->watchdog_tcp_cookie = chan->watchdog_last_tcp_cookie = 0; + ioat->pending = 0; + ioat->dmacount = 0; + ioat->desccount = 0; } /** - * ioat_dma_get_next_descriptor - return the next available descriptor - * @ioat_chan: IOAT DMA channel handle + * ioat1_dma_get_next_descriptor - return the next available descriptor + * @ioat: IOAT DMA channel handle * * Gets the next descriptor from the chain, and must be called with the * channel's desc_lock held. Allocates more descriptors if the channel * has run out. */ static struct ioat_desc_sw * -ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan) +ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat) { struct ioat_desc_sw *new; - if (!list_empty(&ioat_chan->free_desc)) { - new = to_ioat_desc(ioat_chan->free_desc.next); + if (!list_empty(&ioat->free_desc)) { + new = to_ioat_desc(ioat->free_desc.next); list_del(&new->node); } else { /* try to get another desc */ - new = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC); + new = ioat_dma_alloc_descriptor(ioat, GFP_ATOMIC); if (!new) { - dev_err(to_dev(ioat_chan), "alloc failed\n"); + dev_err(to_dev(&ioat->base), "alloc failed\n"); return NULL; } } @@ -788,7 +792,7 @@ ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan) } static struct ioat_desc_sw * -ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan) +ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat) { struct ioat_desc_sw *new; @@ -801,15 +805,15 @@ ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan) * linking in a new set of descriptors, since the device * has probably already read the pointer to it */ - if (ioat_chan->used_desc.prev && - ioat_chan->used_desc.next == ioat_chan->used_desc.prev->prev) { + if (ioat->used_desc.prev && + ioat->used_desc.next == ioat->used_desc.prev->prev) { struct ioat_desc_sw *desc; struct ioat_desc_sw *noop_desc; int i; /* set up the noop descriptor */ - noop_desc = to_ioat_desc(ioat_chan->used_desc.next); + noop_desc = to_ioat_desc(ioat->used_desc.next); /* set size to non-zero value (channel returns error when size is 0) */ noop_desc->hw->size = NULL_DESC_BUFFER_SIZE; noop_desc->hw->ctl = 0; @@ -817,60 +821,61 @@ ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan) noop_desc->hw->src_addr = 0; noop_desc->hw->dst_addr = 0; - ioat_chan->used_desc.next = ioat_chan->used_desc.next->next; - ioat_chan->pending++; - ioat_chan->dmacount++; + ioat->used_desc.next = ioat->used_desc.next->next; + ioat->pending++; + ioat->dmacount++; /* try to get a few more descriptors */ for (i = 16; i; i--) { - desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC); + desc = ioat_dma_alloc_descriptor(ioat, GFP_ATOMIC); if (!desc) { - dev_err(to_dev(ioat_chan), "alloc failed\n"); + dev_err(to_dev(&ioat->base), + "alloc failed\n"); break; } - list_add_tail(&desc->node, ioat_chan->used_desc.next); + list_add_tail(&desc->node, ioat->used_desc.next); desc->hw->next = to_ioat_desc(desc->node.next)->txd.phys; to_ioat_desc(desc->node.prev)->hw->next = desc->txd.phys; - ioat_chan->desccount++; + ioat->desccount++; } - ioat_chan->used_desc.next = noop_desc->node.next; + ioat->used_desc.next = noop_desc->node.next; } - new = to_ioat_desc(ioat_chan->used_desc.next); + new = to_ioat_desc(ioat->used_desc.next); prefetch(new); - ioat_chan->used_desc.next = new->node.next; + ioat->used_desc.next = new->node.next; - if (ioat_chan->used_desc.prev == NULL) - ioat_chan->used_desc.prev = &new->node; + if (ioat->used_desc.prev == NULL) + ioat->used_desc.prev = &new->node; prefetch(new->hw); return new; } static struct ioat_desc_sw * -ioat_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan) +ioat_dma_get_next_descriptor(struct ioat_dma_chan *ioat) { - if (!ioat_chan) + if (!ioat) return NULL; - switch (ioat_chan->device->version) { + switch (ioat->base.device->version) { case IOAT_VER_1_2: - return ioat1_dma_get_next_descriptor(ioat_chan); + return ioat1_dma_get_next_descriptor(ioat); case IOAT_VER_2_0: case IOAT_VER_3_0: - return ioat2_dma_get_next_descriptor(ioat_chan); + return ioat2_dma_get_next_descriptor(ioat); } return NULL; } static struct dma_async_tx_descriptor * -ioat1_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dma_dest, +ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest, dma_addr_t dma_src, size_t len, unsigned long flags) { - struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); + struct ioat_dma_chan *ioat = to_ioat_chan(c); struct ioat_desc_sw *desc; size_t copy; LIST_HEAD(chain); @@ -880,14 +885,14 @@ ioat1_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dma_dest, struct ioat_dma_descriptor *hw = NULL; int tx_cnt = 0; - spin_lock_bh(&ioat_chan->desc_lock); - desc = ioat_dma_get_next_descriptor(ioat_chan); + spin_lock_bh(&ioat->desc_lock); + desc = ioat_dma_get_next_descriptor(ioat); do { if (!desc) break; tx_cnt++; - copy = min_t(size_t, len, ioat_chan->xfercap); + copy = min_t(size_t, len, ioat->xfercap); hw = desc->hw; hw->size = copy; @@ -904,7 +909,7 @@ ioat1_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dma_dest, struct ioat_desc_sw *next; async_tx_ack(&desc->txd); - next = ioat_dma_get_next_descriptor(ioat_chan); + next = ioat_dma_get_next_descriptor(ioat); hw->next = next ? next->txd.phys : 0; desc = next; } else @@ -912,14 +917,16 @@ ioat1_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dma_dest, } while (len); if (!desc) { - dev_err(to_dev(ioat_chan), + struct ioat_chan_common *chan = &ioat->base; + + dev_err(to_dev(chan), "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n", - chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount); - list_splice(&chain, &ioat_chan->free_desc); - spin_unlock_bh(&ioat_chan->desc_lock); + chan_num(chan), ioat->dmacount, ioat->desccount); + list_splice(&chain, &ioat->free_desc); + spin_unlock_bh(&ioat->desc_lock); return NULL; } - spin_unlock_bh(&ioat_chan->desc_lock); + spin_unlock_bh(&ioat->desc_lock); desc->txd.flags = flags; desc->tx_cnt = tx_cnt; @@ -934,17 +941,17 @@ ioat1_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dma_dest, } static struct dma_async_tx_descriptor * -ioat2_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dma_dest, +ioat2_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest, dma_addr_t dma_src, size_t len, unsigned long flags) { - struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); + struct ioat_dma_chan *ioat = to_ioat_chan(c); struct ioat_desc_sw *new; - spin_lock_bh(&ioat_chan->desc_lock); - new = ioat2_dma_get_next_descriptor(ioat_chan); + spin_lock_bh(&ioat->desc_lock); + new = ioat2_dma_get_next_descriptor(ioat); /* - * leave ioat_chan->desc_lock set in ioat 2 path + * leave ioat->desc_lock set in ioat 2 path * it will get unlocked at end of tx_submit */ @@ -955,10 +962,12 @@ ioat2_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dma_dest, new->txd.flags = flags; return &new->txd; } else { - spin_unlock_bh(&ioat_chan->desc_lock); - dev_err(to_dev(ioat_chan), + struct ioat_chan_common *chan = &ioat->base; + + spin_unlock_bh(&ioat->desc_lock); + dev_err(to_dev(chan), "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n", - chan_num(ioat_chan), ioat_chan->dmacount, ioat_chan->desccount); + chan_num(chan), ioat->dmacount, ioat->desccount); return NULL; } } @@ -968,20 +977,20 @@ static void ioat_dma_cleanup_tasklet(unsigned long data) struct ioat_dma_chan *chan = (void *)data; ioat_dma_memcpy_cleanup(chan); writew(IOAT_CHANCTRL_INT_DISABLE, - chan->reg_base + IOAT_CHANCTRL_OFFSET); + chan->base.reg_base + IOAT_CHANCTRL_OFFSET); } static void -ioat_dma_unmap(struct ioat_dma_chan *ioat_chan, struct ioat_desc_sw *desc) +ioat_dma_unmap(struct ioat_chan_common *chan, struct ioat_desc_sw *desc) { if (!(desc->txd.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { if (desc->txd.flags & DMA_COMPL_DEST_UNMAP_SINGLE) - pci_unmap_single(ioat_chan->device->pdev, + pci_unmap_single(chan->device->pdev, pci_unmap_addr(desc, dst), pci_unmap_len(desc, len), PCI_DMA_FROMDEVICE); else - pci_unmap_page(ioat_chan->device->pdev, + pci_unmap_page(chan->device->pdev, pci_unmap_addr(desc, dst), pci_unmap_len(desc, len), PCI_DMA_FROMDEVICE); @@ -989,12 +998,12 @@ ioat_dma_unmap(struct ioat_dma_chan *ioat_chan, struct ioat_desc_sw *desc) if (!(desc->txd.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { if (desc->txd.flags & DMA_COMPL_SRC_UNMAP_SINGLE) - pci_unmap_single(ioat_chan->device->pdev, + pci_unmap_single(chan->device->pdev, pci_unmap_addr(desc, src), pci_unmap_len(desc, len), PCI_DMA_TODEVICE); else - pci_unmap_page(ioat_chan->device->pdev, + pci_unmap_page(chan->device->pdev, pci_unmap_addr(desc, src), pci_unmap_len(desc, len), PCI_DMA_TODEVICE); @@ -1005,8 +1014,9 @@ ioat_dma_unmap(struct ioat_dma_chan *ioat_chan, struct ioat_desc_sw *desc) * ioat_dma_memcpy_cleanup - cleanup up finished descriptors * @chan: ioat channel to be cleaned up */ -static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan) +static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat) { + struct ioat_chan_common *chan = &ioat->base; unsigned long phys_complete; struct ioat_desc_sw *desc, *_desc; dma_cookie_t cookie = 0; @@ -1014,9 +1024,9 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan) struct ioat_desc_sw *latest_desc; struct dma_async_tx_descriptor *tx; - prefetch(ioat_chan->completion_virt); + prefetch(chan->completion_virt); - if (!spin_trylock_bh(&ioat_chan->cleanup_lock)) + if (!spin_trylock_bh(&chan->cleanup_lock)) return; /* The completion writeback can happen at any time, @@ -1026,49 +1036,47 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan) #if (BITS_PER_LONG == 64) phys_complete = - ioat_chan->completion_virt->full + chan->completion_virt->full & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; #else - phys_complete = - ioat_chan->completion_virt->low & IOAT_LOW_COMPLETION_MASK; + phys_complete = chan->completion_virt->low & IOAT_LOW_COMPLETION_MASK; #endif - if ((ioat_chan->completion_virt->full + if ((chan->completion_virt->full & IOAT_CHANSTS_DMA_TRANSFER_STATUS) == IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) { - dev_err(to_dev(ioat_chan), "Channel halted, chanerr = %x\n", - readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET)); + dev_err(to_dev(chan), "Channel halted, chanerr = %x\n", + readl(chan->reg_base + IOAT_CHANERR_OFFSET)); /* TODO do something to salvage the situation */ } - if (phys_complete == ioat_chan->last_completion) { - spin_unlock_bh(&ioat_chan->cleanup_lock); + if (phys_complete == chan->last_completion) { + spin_unlock_bh(&chan->cleanup_lock); /* * perhaps we're stuck so hard that the watchdog can't go off? * try to catch it after 2 seconds */ - if (ioat_chan->device->version != IOAT_VER_3_0) { + if (chan->device->version != IOAT_VER_3_0) { if (time_after(jiffies, - ioat_chan->last_completion_time + HZ*WATCHDOG_DELAY)) { - ioat_dma_chan_watchdog(&(ioat_chan->device->work.work)); - ioat_chan->last_completion_time = jiffies; + chan->last_completion_time + HZ*WATCHDOG_DELAY)) { + ioat_dma_chan_watchdog(&(chan->device->work.work)); + chan->last_completion_time = jiffies; } } return; } - ioat_chan->last_completion_time = jiffies; + chan->last_completion_time = jiffies; cookie = 0; - if (!spin_trylock_bh(&ioat_chan->desc_lock)) { - spin_unlock_bh(&ioat_chan->cleanup_lock); + if (!spin_trylock_bh(&ioat->desc_lock)) { + spin_unlock_bh(&chan->cleanup_lock); return; } - switch (ioat_chan->device->version) { + switch (chan->device->version) { case IOAT_VER_1_2: - list_for_each_entry_safe(desc, _desc, - &ioat_chan->used_desc, node) { + list_for_each_entry_safe(desc, _desc, &ioat->used_desc, node) { tx = &desc->txd; /* * Incoming DMA requests may use multiple descriptors, @@ -1077,7 +1085,7 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan) */ if (tx->cookie) { cookie = tx->cookie; - ioat_dma_unmap(ioat_chan, desc); + ioat_dma_unmap(chan, desc); if (tx->callback) { tx->callback(tx->callback_param); tx->callback = NULL; @@ -1091,7 +1099,7 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan) */ if (async_tx_test_ack(tx)) { list_move_tail(&desc->node, - &ioat_chan->free_desc); + &ioat->free_desc); } else tx->cookie = 0; } else { @@ -1110,11 +1118,11 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan) case IOAT_VER_2_0: case IOAT_VER_3_0: /* has some other thread has already cleaned up? */ - if (ioat_chan->used_desc.prev == NULL) + if (ioat->used_desc.prev == NULL) break; /* work backwards to find latest finished desc */ - desc = to_ioat_desc(ioat_chan->used_desc.next); + desc = to_ioat_desc(ioat->used_desc.next); tx = &desc->txd; latest_desc = NULL; do { @@ -1125,18 +1133,18 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan) latest_desc = desc; break; } - } while (&desc->node != ioat_chan->used_desc.prev); + } while (&desc->node != ioat->used_desc.prev); if (latest_desc != NULL) { /* work forwards to clear finished descriptors */ - for (desc = to_ioat_desc(ioat_chan->used_desc.prev); + for (desc = to_ioat_desc(ioat->used_desc.prev); &desc->node != latest_desc->node.next && - &desc->node != ioat_chan->used_desc.next; + &desc->node != ioat->used_desc.next; desc = to_ioat_desc(desc->node.next)) { if (tx->cookie) { cookie = tx->cookie; tx->cookie = 0; - ioat_dma_unmap(ioat_chan, desc); + ioat_dma_unmap(chan, desc); if (tx->callback) { tx->callback(tx->callback_param); tx->callback = NULL; @@ -1145,21 +1153,21 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan) } /* move used.prev up beyond those that are finished */ - if (&desc->node == ioat_chan->used_desc.next) - ioat_chan->used_desc.prev = NULL; + if (&desc->node == ioat->used_desc.next) + ioat->used_desc.prev = NULL; else - ioat_chan->used_desc.prev = &desc->node; + ioat->used_desc.prev = &desc->node; } break; } - spin_unlock_bh(&ioat_chan->desc_lock); + spin_unlock_bh(&ioat->desc_lock); - ioat_chan->last_completion = phys_complete; + chan->last_completion = phys_complete; if (cookie != 0) - ioat_chan->completed_cookie = cookie; + chan->completed_cookie = cookie; - spin_unlock_bh(&ioat_chan->cleanup_lock); + spin_unlock_bh(&chan->cleanup_lock); } /** @@ -1170,17 +1178,18 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan) * @used: if not %NULL, updated with last used transaction */ static enum dma_status -ioat_dma_is_complete(struct dma_chan *chan, dma_cookie_t cookie, +ioat_dma_is_complete(struct dma_chan *c, dma_cookie_t cookie, dma_cookie_t *done, dma_cookie_t *used) { - struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); + struct ioat_dma_chan *ioat = to_ioat_chan(c); + struct ioat_chan_common *chan = &ioat->base; dma_cookie_t last_used; dma_cookie_t last_complete; enum dma_status ret; - last_used = chan->cookie; - last_complete = ioat_chan->completed_cookie; - ioat_chan->watchdog_tcp_cookie = cookie; + last_used = c->cookie; + last_complete = chan->completed_cookie; + chan->watchdog_tcp_cookie = cookie; if (done) *done = last_complete; @@ -1191,10 +1200,10 @@ ioat_dma_is_complete(struct dma_chan *chan, dma_cookie_t cookie, if (ret == DMA_SUCCESS) return ret; - ioat_dma_memcpy_cleanup(ioat_chan); + ioat_dma_memcpy_cleanup(ioat); - last_used = chan->cookie; - last_complete = ioat_chan->completed_cookie; + last_used = c->cookie; + last_complete = chan->completed_cookie; if (done) *done = last_complete; @@ -1204,19 +1213,20 @@ ioat_dma_is_complete(struct dma_chan *chan, dma_cookie_t cookie, return dma_async_is_complete(cookie, last_complete, last_used); } -static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan) +static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat) { + struct ioat_chan_common *chan = &ioat->base; struct ioat_desc_sw *desc; struct ioat_dma_descriptor *hw; - spin_lock_bh(&ioat_chan->desc_lock); + spin_lock_bh(&ioat->desc_lock); - desc = ioat_dma_get_next_descriptor(ioat_chan); + desc = ioat_dma_get_next_descriptor(ioat); if (!desc) { - dev_err(to_dev(ioat_chan), + dev_err(to_dev(chan), "Unable to start null desc - get next desc failed\n"); - spin_unlock_bh(&ioat_chan->desc_lock); + spin_unlock_bh(&ioat->desc_lock); return; } @@ -1230,31 +1240,31 @@ static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan) hw->src_addr = 0; hw->dst_addr = 0; async_tx_ack(&desc->txd); - switch (ioat_chan->device->version) { + switch (chan->device->version) { case IOAT_VER_1_2: hw->next = 0; - list_add_tail(&desc->node, &ioat_chan->used_desc); + list_add_tail(&desc->node, &ioat->used_desc); writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF, - ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW); + chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW); writel(((u64) desc->txd.phys) >> 32, - ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH); + chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH); - writeb(IOAT_CHANCMD_START, ioat_chan->reg_base - + IOAT_CHANCMD_OFFSET(ioat_chan->device->version)); + writeb(IOAT_CHANCMD_START, chan->reg_base + + IOAT_CHANCMD_OFFSET(chan->device->version)); break; case IOAT_VER_2_0: case IOAT_VER_3_0: writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF, - ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW); + chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW); writel(((u64) desc->txd.phys) >> 32, - ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); + chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); - ioat_chan->dmacount++; - __ioat2_dma_memcpy_issue_pending(ioat_chan); + ioat->dmacount++; + __ioat2_dma_memcpy_issue_pending(ioat); break; } - spin_unlock_bh(&ioat_chan->desc_lock); + spin_unlock_bh(&ioat->desc_lock); } /* @@ -1371,7 +1381,7 @@ MODULE_PARM_DESC(ioat_interrupt_style, */ static int ioat_dma_setup_interrupts(struct ioatdma_device *device) { - struct ioat_dma_chan *ioat_chan; + struct ioat_chan_common *chan; struct pci_dev *pdev = device->pdev; struct device *dev = &pdev->dev; struct msix_entry *msix; @@ -1404,15 +1414,15 @@ msix: for (i = 0; i < msixcnt; i++) { msix = &device->msix_entries[i]; - ioat_chan = ioat_chan_by_index(device, i); + chan = ioat_chan_by_index(device, i); err = devm_request_irq(dev, msix->vector, ioat_dma_do_interrupt_msix, 0, - "ioat-msix", ioat_chan); + "ioat-msix", chan); if (err) { for (j = 0; j < i; j++) { msix = &device->msix_entries[j]; - ioat_chan = ioat_chan_by_index(device, j); - devm_free_irq(dev, msix->vector, ioat_chan); + chan = ioat_chan_by_index(device, j); + devm_free_irq(dev, msix->vector, chan); } goto msix_single_vector; } @@ -1594,8 +1604,8 @@ int ioat2_dma_probe(struct ioatdma_device *device, int dca) { struct pci_dev *pdev = device->pdev; struct dma_device *dma; - struct dma_chan *chan; - struct ioat_dma_chan *ioat_chan; + struct dma_chan *c; + struct ioat_chan_common *chan; int err; dma = &device->common; @@ -1607,10 +1617,10 @@ int ioat2_dma_probe(struct ioatdma_device *device, int dca) return err; ioat_set_tcp_copy_break(2048); - list_for_each_entry(chan, &dma->channels, device_node) { - ioat_chan = to_ioat_chan(chan); + list_for_each_entry(c, &dma->channels, device_node) { + chan = to_chan_common(c); writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE | IOAT_DMA_DCA_ANY_CPU, - ioat_chan->reg_base + IOAT_DCACTRL_OFFSET); + chan->reg_base + IOAT_DCACTRL_OFFSET); } err = ioat_register(device); @@ -1629,8 +1639,8 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca) { struct pci_dev *pdev = device->pdev; struct dma_device *dma; - struct dma_chan *chan; - struct ioat_dma_chan *ioat_chan; + struct dma_chan *c; + struct ioat_chan_common *chan; int err; u16 dev_id; @@ -1656,10 +1666,10 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca) return err; ioat_set_tcp_copy_break(262144); - list_for_each_entry(chan, &dma->channels, device_node) { - ioat_chan = to_ioat_chan(chan); + list_for_each_entry(c, &dma->channels, device_node) { + chan = to_chan_common(c); writel(IOAT_DMA_DCA_ANY_CPU, - ioat_chan->reg_base + IOAT_DCACTRL_OFFSET); + chan->reg_base + IOAT_DCACTRL_OFFSET); } err = ioat_register(device); @@ -1673,8 +1683,6 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca) void ioat_dma_remove(struct ioatdma_device *device) { - struct dma_chan *chan, *_chan; - struct ioat_dma_chan *ioat_chan; struct dma_device *dma = &device->common; if (device->version != IOAT_VER_3_0) @@ -1687,9 +1695,6 @@ void ioat_dma_remove(struct ioatdma_device *device) pci_pool_destroy(device->dma_pool); pci_pool_destroy(device->completion_pool); - list_for_each_entry_safe(chan, _chan, &dma->channels, device_node) { - ioat_chan = to_ioat_chan(chan); - list_del(&chan->device_node); - } + INIT_LIST_HEAD(&dma->channels); } diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index 9f0c853b6a77..5b31db73ad8e 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h @@ -35,7 +35,6 @@ #define IOAT_DMA_DCA_ANY_CPU ~0 #define IOAT_WATCHDOG_PERIOD (2 * HZ) -#define to_ioat_chan(chan) container_of(chan, struct ioat_dma_chan, common) #define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common) #define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node) #define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, txd) @@ -74,37 +73,24 @@ struct ioatdma_device { u8 version; struct delayed_work work; struct msix_entry msix_entries[4]; - struct ioat_dma_chan *idx[4]; + struct ioat_chan_common *idx[4]; struct dca_provider *dca; void (*intr_quirk)(struct ioatdma_device *device); }; -/** - * struct ioat_dma_chan - internal representation of a DMA channel - */ -struct ioat_dma_chan { - +struct ioat_chan_common { void __iomem *reg_base; - dma_cookie_t completed_cookie; unsigned long last_completion; unsigned long last_completion_time; - size_t xfercap; /* XFERCAP register value expanded out */ - spinlock_t cleanup_lock; - spinlock_t desc_lock; - struct list_head free_desc; - struct list_head used_desc; + dma_cookie_t completed_cookie; unsigned long watchdog_completion; int watchdog_tcp_cookie; u32 watchdog_last_tcp_cookie; struct delayed_work work; - int pending; - u16 dmacount; - u16 desccount; - struct ioatdma_device *device; struct dma_chan common; @@ -120,6 +106,35 @@ struct ioat_dma_chan { struct tasklet_struct cleanup_task; }; +/** + * struct ioat_dma_chan - internal representation of a DMA channel + */ +struct ioat_dma_chan { + struct ioat_chan_common base; + + size_t xfercap; /* XFERCAP register value expanded out */ + + spinlock_t desc_lock; + struct list_head free_desc; + struct list_head used_desc; + + int pending; + u16 dmacount; + u16 desccount; +}; + +static inline struct ioat_chan_common *to_chan_common(struct dma_chan *c) +{ + return container_of(c, struct ioat_chan_common, common); +} + +static inline struct ioat_dma_chan *to_ioat_chan(struct dma_chan *c) +{ + struct ioat_chan_common *chan = to_chan_common(c); + + return container_of(chan, struct ioat_dma_chan, base); +} + /* wrapper around hardware descriptor format + additional software fields */ /** From 5cbafa65b92ee4f5b8ba915cddf94b91f186b989 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Wed, 26 Aug 2009 13:01:44 -0700 Subject: [PATCH 0109/3409] ioat2,3: convert to a true ring buffer Replace the current linked list munged into a ring with a native ring buffer implementation. The benefit of this approach is reduced overhead as many parameters can be derived from ring position with simple pointer comparisons and descriptor allocation/freeing becomes just a manipulation of head/tail pointers. It requires a contiguous allocation for the software descriptor information. Since this arrangement is significantly different from the ioat1 chain, move ioat2,3 support into its own file and header. Common routines are exported from driver/dma/ioat/dma.[ch]. Signed-off-by: Maciej Sosnowski Signed-off-by: Dan Williams --- drivers/dma/ioat/Makefile | 2 +- drivers/dma/ioat/dma.c | 886 +++++++++----------------------------- drivers/dma/ioat/dma.h | 50 ++- drivers/dma/ioat/dma_v2.c | 750 ++++++++++++++++++++++++++++++++ drivers/dma/ioat/dma_v2.h | 131 ++++++ drivers/dma/ioat/pci.c | 1 + 6 files changed, 1128 insertions(+), 692 deletions(-) create mode 100644 drivers/dma/ioat/dma_v2.c create mode 100644 drivers/dma/ioat/dma_v2.h diff --git a/drivers/dma/ioat/Makefile b/drivers/dma/ioat/Makefile index 2ce3d3a4270b..205a639e84df 100644 --- a/drivers/dma/ioat/Makefile +++ b/drivers/dma/ioat/Makefile @@ -1,2 +1,2 @@ obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o -ioatdma-objs := pci.o dma.o dca.o +ioatdma-objs := pci.o dma.o dma_v2.o dca.o diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 2e81e0c76e61..64b4d75059aa 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -38,28 +38,14 @@ #include "registers.h" #include "hw.h" -static int ioat_pending_level = 4; +int ioat_pending_level = 4; module_param(ioat_pending_level, int, 0644); MODULE_PARM_DESC(ioat_pending_level, "high-water mark for pushing ioat descriptors (default: 4)"); -static void ioat_dma_chan_reset_part2(struct work_struct *work); -static void ioat_dma_chan_watchdog(struct work_struct *work); - /* internal functions */ -static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat); -static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat); - -static struct ioat_desc_sw * -ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat); -static struct ioat_desc_sw * -ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat); - -static inline struct ioat_chan_common * -ioat_chan_by_index(struct ioatdma_device *device, int index) -{ - return device->idx[index]; -} +static void ioat1_cleanup(struct ioat_dma_chan *ioat); +static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat); /** * ioat_dma_do_interrupt - handler used for single vector interrupt mode @@ -108,18 +94,38 @@ static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data) return IRQ_HANDLED; } -static void ioat_dma_cleanup_tasklet(unsigned long data); +static void ioat1_cleanup_tasklet(unsigned long data); + +/* common channel initialization */ +void ioat_init_channel(struct ioatdma_device *device, + struct ioat_chan_common *chan, int idx, + work_func_t work_fn, void (*tasklet)(unsigned long), + unsigned long tasklet_data) +{ + struct dma_device *dma = &device->common; + + chan->device = device; + chan->reg_base = device->reg_base + (0x80 * (idx + 1)); + INIT_DELAYED_WORK(&chan->work, work_fn); + spin_lock_init(&chan->cleanup_lock); + chan->common.device = dma; + list_add_tail(&chan->common.device_node, &dma->channels); + device->idx[idx] = chan; + tasklet_init(&chan->cleanup_task, tasklet, tasklet_data); + tasklet_disable(&chan->cleanup_task); +} + +static void ioat1_reset_part2(struct work_struct *work); /** - * ioat_dma_enumerate_channels - find and initialize the device's channels + * ioat1_dma_enumerate_channels - find and initialize the device's channels * @device: the device to be enumerated */ -static int ioat_dma_enumerate_channels(struct ioatdma_device *device) +static int ioat1_enumerate_channels(struct ioatdma_device *device) { u8 xfercap_scale; u32 xfercap; int i; - struct ioat_chan_common *chan; struct ioat_dma_chan *ioat; struct device *dev = &device->pdev->dev; struct dma_device *dma = &device->common; @@ -135,31 +141,20 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device) #endif for (i = 0; i < dma->chancnt; i++) { ioat = devm_kzalloc(dev, sizeof(*ioat), GFP_KERNEL); - if (!ioat) { - dma->chancnt = i; + if (!ioat) break; - } - chan = &ioat->base; - chan->device = device; - chan->reg_base = device->reg_base + (0x80 * (i + 1)); + ioat_init_channel(device, &ioat->base, i, + ioat1_reset_part2, + ioat1_cleanup_tasklet, + (unsigned long) ioat); ioat->xfercap = xfercap; - ioat->desccount = 0; - INIT_DELAYED_WORK(&chan->work, ioat_dma_chan_reset_part2); - spin_lock_init(&chan->cleanup_lock); spin_lock_init(&ioat->desc_lock); INIT_LIST_HEAD(&ioat->free_desc); INIT_LIST_HEAD(&ioat->used_desc); - /* This should be made common somewhere in dmaengine.c */ - chan->common.device = &device->common; - list_add_tail(&chan->common.device_node, &dma->channels); - device->idx[i] = chan; - tasklet_init(&chan->cleanup_task, - ioat_dma_cleanup_tasklet, - (unsigned long) ioat); - tasklet_disable(&chan->cleanup_task); } - return dma->chancnt; + dma->chancnt = i; + return i; } /** @@ -187,35 +182,16 @@ static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan) } } -static inline void -__ioat2_dma_memcpy_issue_pending(struct ioat_dma_chan *ioat) -{ - void __iomem *reg_base = ioat->base.reg_base; - - ioat->pending = 0; - writew(ioat->dmacount, reg_base + IOAT_CHAN_DMACOUNT_OFFSET); -} - -static void ioat2_dma_memcpy_issue_pending(struct dma_chan *chan) -{ - struct ioat_dma_chan *ioat = to_ioat_chan(chan); - - if (ioat->pending > 0) { - spin_lock_bh(&ioat->desc_lock); - __ioat2_dma_memcpy_issue_pending(ioat); - spin_unlock_bh(&ioat->desc_lock); - } -} - - /** - * ioat_dma_chan_reset_part2 - reinit the channel after a reset + * ioat1_reset_part2 - reinit the channel after a reset */ -static void ioat_dma_chan_reset_part2(struct work_struct *work) +static void ioat1_reset_part2(struct work_struct *work) { struct ioat_chan_common *chan; struct ioat_dma_chan *ioat; struct ioat_desc_sw *desc; + int dmacount; + bool start_null = false; chan = container_of(work, struct ioat_chan_common, work.work); ioat = container_of(chan, struct ioat_dma_chan, base); @@ -226,26 +202,22 @@ static void ioat_dma_chan_reset_part2(struct work_struct *work) chan->completion_virt->high = 0; ioat->pending = 0; - /* - * count the descriptors waiting, and be sure to do it - * right for both the CB1 line and the CB2 ring - */ - ioat->dmacount = 0; + /* count the descriptors waiting */ + dmacount = 0; if (ioat->used_desc.prev) { desc = to_ioat_desc(ioat->used_desc.prev); do { - ioat->dmacount++; + dmacount++; desc = to_ioat_desc(desc->node.next); } while (&desc->node != ioat->used_desc.next); } - /* - * write the new starting descriptor address - * this puts channel engine into ARMED state - */ - desc = to_ioat_desc(ioat->used_desc.prev); - switch (chan->device->version) { - case IOAT_VER_1_2: + if (dmacount) { + /* + * write the new starting descriptor address + * this puts channel engine into ARMED state + */ + desc = to_ioat_desc(ioat->used_desc.prev); writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF, chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW); writel(((u64) desc->txd.phys) >> 32, @@ -253,32 +225,24 @@ static void ioat_dma_chan_reset_part2(struct work_struct *work) writeb(IOAT_CHANCMD_START, chan->reg_base + IOAT_CHANCMD_OFFSET(chan->device->version)); - break; - case IOAT_VER_2_0: - writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF, - chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW); - writel(((u64) desc->txd.phys) >> 32, - chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); - - /* tell the engine to go with what's left to be done */ - writew(ioat->dmacount, - chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET); - - break; - } - dev_err(to_dev(chan), - "chan%d reset - %d descs waiting, %d total desc\n", - chan_num(chan), ioat->dmacount, ioat->desccount); - + } else + start_null = true; spin_unlock_bh(&ioat->desc_lock); spin_unlock_bh(&chan->cleanup_lock); + + dev_err(to_dev(chan), + "chan%d reset - %d descs waiting, %d total desc\n", + chan_num(chan), dmacount, ioat->desccount); + + if (start_null) + ioat1_dma_start_null_desc(ioat); } /** - * ioat_dma_reset_channel - restart a channel + * ioat1_reset_channel - restart a channel * @ioat: IOAT DMA channel handle */ -static void ioat_dma_reset_channel(struct ioat_dma_chan *ioat) +static void ioat1_reset_channel(struct ioat_dma_chan *ioat) { struct ioat_chan_common *chan = &ioat->base; void __iomem *reg_base = chan->reg_base; @@ -316,9 +280,9 @@ static void ioat_dma_reset_channel(struct ioat_dma_chan *ioat) } /** - * ioat_dma_chan_watchdog - watch for stuck channels + * ioat1_chan_watchdog - watch for stuck channels */ -static void ioat_dma_chan_watchdog(struct work_struct *work) +static void ioat1_chan_watchdog(struct work_struct *work) { struct ioatdma_device *device = container_of(work, struct ioatdma_device, work.work); @@ -339,16 +303,15 @@ static void ioat_dma_chan_watchdog(struct work_struct *work) chan = ioat_chan_by_index(device, i); ioat = container_of(chan, struct ioat_dma_chan, base); - if (chan->device->version == IOAT_VER_1_2 - /* have we started processing anything yet */ - && chan->last_completion - /* have we completed any since last watchdog cycle? */ + if (/* have we started processing anything yet */ + chan->last_completion + /* have we completed any since last watchdog cycle? */ && (chan->last_completion == chan->watchdog_completion) - /* has TCP stuck on one cookie since last watchdog? */ + /* has TCP stuck on one cookie since last watchdog? */ && (chan->watchdog_tcp_cookie == chan->watchdog_last_tcp_cookie) && (chan->watchdog_tcp_cookie != chan->completed_cookie) - /* is there something in the chain to be processed? */ - /* CB1 chain always has at least the last one processed */ + /* is there something in the chain to be processed? */ + /* CB1 chain always has at least the last one processed */ && (ioat->used_desc.prev != ioat->used_desc.next) && ioat->pending == 0) { @@ -387,34 +350,15 @@ static void ioat_dma_chan_watchdog(struct work_struct *work) chan->completion_virt->low = completion_hw.low; chan->completion_virt->high = completion_hw.high; } else { - ioat_dma_reset_channel(ioat); + ioat1_reset_channel(ioat); chan->watchdog_completion = 0; chan->last_compl_desc_addr_hw = 0; } - - /* - * for version 2.0 if there are descriptors yet to be processed - * and the last completed hasn't changed since the last watchdog - * if they haven't hit the pending level - * issue the pending to push them through - * else - * try resetting the channel - */ - } else if (chan->device->version == IOAT_VER_2_0 - && ioat->used_desc.prev - && chan->last_completion - && chan->last_completion == chan->watchdog_completion) { - - if (ioat->pending < ioat_pending_level) - ioat2_dma_memcpy_issue_pending(&chan->common); - else { - ioat_dma_reset_channel(ioat); - chan->watchdog_completion = 0; - } } else { chan->last_compl_desc_addr_hw = 0; chan->watchdog_completion = chan->last_completion; } + chan->watchdog_last_tcp_cookie = chan->watchdog_tcp_cookie; } @@ -447,7 +391,6 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) chain_tail->hw->next = first->txd.phys; list_splice_tail_init(&tx->tx_list, &ioat->used_desc); - ioat->dmacount += desc->tx_cnt; ioat->pending += desc->tx_cnt; if (ioat->pending >= ioat_pending_level) __ioat1_dma_memcpy_issue_pending(ioat); @@ -456,92 +399,6 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) return cookie; } -static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx) -{ - struct ioat_dma_chan *ioat = to_ioat_chan(tx->chan); - struct ioat_desc_sw *first = tx_to_ioat_desc(tx); - struct ioat_desc_sw *new; - struct ioat_dma_descriptor *hw; - dma_cookie_t cookie; - u32 copy; - size_t len; - dma_addr_t src, dst; - unsigned long orig_flags; - unsigned int desc_count = 0; - - /* src and dest and len are stored in the initial descriptor */ - len = first->len; - src = first->src; - dst = first->dst; - orig_flags = first->txd.flags; - new = first; - - /* - * ioat->desc_lock is still in force in version 2 path - * it gets unlocked at end of this function - */ - do { - copy = min_t(size_t, len, ioat->xfercap); - - async_tx_ack(&new->txd); - - hw = new->hw; - hw->size = copy; - hw->ctl = 0; - hw->src_addr = src; - hw->dst_addr = dst; - - len -= copy; - dst += copy; - src += copy; - desc_count++; - } while (len && (new = ioat2_dma_get_next_descriptor(ioat))); - - if (!new) { - dev_err(to_dev(&ioat->base), "tx submit failed\n"); - spin_unlock_bh(&ioat->desc_lock); - return -ENOMEM; - } - - hw->ctl_f.compl_write = 1; - if (first->txd.callback) { - hw->ctl_f.int_en = 1; - if (first != new) { - /* move callback into to last desc */ - new->txd.callback = first->txd.callback; - new->txd.callback_param - = first->txd.callback_param; - first->txd.callback = NULL; - first->txd.callback_param = NULL; - } - } - - new->tx_cnt = desc_count; - new->txd.flags = orig_flags; /* client is in control of this ack */ - - /* store the original values for use in later cleanup */ - if (new != first) { - new->src = first->src; - new->dst = first->dst; - new->len = first->len; - } - - /* cookie incr and addition to used_list must be atomic */ - cookie = ioat->base.common.cookie; - cookie++; - if (cookie < 0) - cookie = 1; - ioat->base.common.cookie = new->txd.cookie = cookie; - - ioat->dmacount += desc_count; - ioat->pending += desc_count; - if (ioat->pending >= ioat_pending_level) - __ioat2_dma_memcpy_issue_pending(ioat); - spin_unlock_bh(&ioat->desc_lock); - - return cookie; -} - /** * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair * @ioat: the channel supplying the memory pool for the descriptors @@ -567,17 +424,9 @@ ioat_dma_alloc_descriptor(struct ioat_dma_chan *ioat, gfp_t flags) } memset(desc, 0, sizeof(*desc)); - dma_async_tx_descriptor_init(&desc_sw->txd, &ioat->base.common); - switch (ioatdma_device->version) { - case IOAT_VER_1_2: - desc_sw->txd.tx_submit = ioat1_tx_submit; - break; - case IOAT_VER_2_0: - case IOAT_VER_3_0: - desc_sw->txd.tx_submit = ioat2_tx_submit; - break; - } + dma_async_tx_descriptor_init(&desc_sw->txd, &ioat->base.common); + desc_sw->txd.tx_submit = ioat1_tx_submit; desc_sw->hw = desc; desc_sw->txd.phys = phys; @@ -587,39 +436,12 @@ ioat_dma_alloc_descriptor(struct ioat_dma_chan *ioat, gfp_t flags) static int ioat_initial_desc_count = 256; module_param(ioat_initial_desc_count, int, 0644); MODULE_PARM_DESC(ioat_initial_desc_count, - "initial descriptors per channel (default: 256)"); - + "ioat1: initial descriptors per channel (default: 256)"); /** - * ioat2_dma_massage_chan_desc - link the descriptors into a circle - * @ioat: the channel to be massaged - */ -static void ioat2_dma_massage_chan_desc(struct ioat_dma_chan *ioat) -{ - struct ioat_desc_sw *desc, *_desc; - - /* setup used_desc */ - ioat->used_desc.next = ioat->free_desc.next; - ioat->used_desc.prev = NULL; - - /* pull free_desc out of the circle so that every node is a hw - * descriptor, but leave it pointing to the list - */ - ioat->free_desc.prev->next = ioat->free_desc.next; - ioat->free_desc.next->prev = ioat->free_desc.prev; - - /* circle link the hw descriptors */ - desc = to_ioat_desc(ioat->free_desc.next); - desc->hw->next = to_ioat_desc(desc->node.next)->txd.phys; - list_for_each_entry_safe(desc, _desc, ioat->free_desc.next, node) { - desc->hw->next = to_ioat_desc(desc->node.next)->txd.phys; - } -} - -/** - * ioat_dma_alloc_chan_resources - returns the number of allocated descriptors + * ioat1_dma_alloc_chan_resources - returns the number of allocated descriptors * @chan: the channel to be filled out */ -static int ioat_dma_alloc_chan_resources(struct dma_chan *c) +static int ioat1_dma_alloc_chan_resources(struct dma_chan *c) { struct ioat_dma_chan *ioat = to_ioat_chan(c); struct ioat_chan_common *chan = &ioat->base; @@ -657,8 +479,6 @@ static int ioat_dma_alloc_chan_resources(struct dma_chan *c) spin_lock_bh(&ioat->desc_lock); ioat->desccount = i; list_splice(&tmp_list, &ioat->free_desc); - if (chan->device->version != IOAT_VER_1_2) - ioat2_dma_massage_chan_desc(ioat); spin_unlock_bh(&ioat->desc_lock); /* allocate a completion writeback area */ @@ -674,15 +494,15 @@ static int ioat_dma_alloc_chan_resources(struct dma_chan *c) chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); tasklet_enable(&chan->cleanup_task); - ioat_dma_start_null_desc(ioat); /* give chain to dma device */ + ioat1_dma_start_null_desc(ioat); /* give chain to dma device */ return ioat->desccount; } /** - * ioat_dma_free_chan_resources - release all the descriptors + * ioat1_dma_free_chan_resources - release all the descriptors * @chan: the channel to be cleaned */ -static void ioat_dma_free_chan_resources(struct dma_chan *c) +static void ioat1_dma_free_chan_resources(struct dma_chan *c) { struct ioat_dma_chan *ioat = to_ioat_chan(c); struct ioat_chan_common *chan = &ioat->base; @@ -697,7 +517,7 @@ static void ioat_dma_free_chan_resources(struct dma_chan *c) return; tasklet_disable(&chan->cleanup_task); - ioat_dma_memcpy_cleanup(ioat); + ioat1_cleanup(ioat); /* Delay 100ms after reset to allow internal DMA logic to quiesce * before removing DMA descriptor resources. @@ -707,40 +527,20 @@ static void ioat_dma_free_chan_resources(struct dma_chan *c) mdelay(100); spin_lock_bh(&ioat->desc_lock); - switch (chan->device->version) { - case IOAT_VER_1_2: - list_for_each_entry_safe(desc, _desc, - &ioat->used_desc, node) { - in_use_descs++; - list_del(&desc->node); - pci_pool_free(ioatdma_device->dma_pool, desc->hw, - desc->txd.phys); - kfree(desc); - } - list_for_each_entry_safe(desc, _desc, - &ioat->free_desc, node) { - list_del(&desc->node); - pci_pool_free(ioatdma_device->dma_pool, desc->hw, - desc->txd.phys); - kfree(desc); - } - break; - case IOAT_VER_2_0: - case IOAT_VER_3_0: - list_for_each_entry_safe(desc, _desc, - ioat->free_desc.next, node) { - list_del(&desc->node); - pci_pool_free(ioatdma_device->dma_pool, desc->hw, - desc->txd.phys); - kfree(desc); - } - desc = to_ioat_desc(ioat->free_desc.next); + list_for_each_entry_safe(desc, _desc, + &ioat->used_desc, node) { + in_use_descs++; + list_del(&desc->node); + pci_pool_free(ioatdma_device->dma_pool, desc->hw, + desc->txd.phys); + kfree(desc); + } + list_for_each_entry_safe(desc, _desc, + &ioat->free_desc, node) { + list_del(&desc->node); pci_pool_free(ioatdma_device->dma_pool, desc->hw, desc->txd.phys); kfree(desc); - INIT_LIST_HEAD(&ioat->free_desc); - INIT_LIST_HEAD(&ioat->used_desc); - break; } spin_unlock_bh(&ioat->desc_lock); @@ -758,7 +558,6 @@ static void ioat_dma_free_chan_resources(struct dma_chan *c) chan->last_compl_desc_addr_hw = 0; chan->watchdog_tcp_cookie = chan->watchdog_last_tcp_cookie = 0; ioat->pending = 0; - ioat->dmacount = 0; ioat->desccount = 0; } @@ -791,86 +590,6 @@ ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat) return new; } -static struct ioat_desc_sw * -ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat) -{ - struct ioat_desc_sw *new; - - /* - * used.prev points to where to start processing - * used.next points to next free descriptor - * if used.prev == NULL, there are none waiting to be processed - * if used.next == used.prev.prev, there is only one free descriptor, - * and we need to use it to as a noop descriptor before - * linking in a new set of descriptors, since the device - * has probably already read the pointer to it - */ - if (ioat->used_desc.prev && - ioat->used_desc.next == ioat->used_desc.prev->prev) { - - struct ioat_desc_sw *desc; - struct ioat_desc_sw *noop_desc; - int i; - - /* set up the noop descriptor */ - noop_desc = to_ioat_desc(ioat->used_desc.next); - /* set size to non-zero value (channel returns error when size is 0) */ - noop_desc->hw->size = NULL_DESC_BUFFER_SIZE; - noop_desc->hw->ctl = 0; - noop_desc->hw->ctl_f.null = 1; - noop_desc->hw->src_addr = 0; - noop_desc->hw->dst_addr = 0; - - ioat->used_desc.next = ioat->used_desc.next->next; - ioat->pending++; - ioat->dmacount++; - - /* try to get a few more descriptors */ - for (i = 16; i; i--) { - desc = ioat_dma_alloc_descriptor(ioat, GFP_ATOMIC); - if (!desc) { - dev_err(to_dev(&ioat->base), - "alloc failed\n"); - break; - } - list_add_tail(&desc->node, ioat->used_desc.next); - - desc->hw->next - = to_ioat_desc(desc->node.next)->txd.phys; - to_ioat_desc(desc->node.prev)->hw->next - = desc->txd.phys; - ioat->desccount++; - } - - ioat->used_desc.next = noop_desc->node.next; - } - new = to_ioat_desc(ioat->used_desc.next); - prefetch(new); - ioat->used_desc.next = new->node.next; - - if (ioat->used_desc.prev == NULL) - ioat->used_desc.prev = &new->node; - - prefetch(new->hw); - return new; -} - -static struct ioat_desc_sw * -ioat_dma_get_next_descriptor(struct ioat_dma_chan *ioat) -{ - if (!ioat) - return NULL; - - switch (ioat->base.device->version) { - case IOAT_VER_1_2: - return ioat1_dma_get_next_descriptor(ioat); - case IOAT_VER_2_0: - case IOAT_VER_3_0: - return ioat2_dma_get_next_descriptor(ioat); - } - return NULL; -} - static struct dma_async_tx_descriptor * ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest, dma_addr_t dma_src, size_t len, unsigned long flags) @@ -886,7 +605,7 @@ ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest, int tx_cnt = 0; spin_lock_bh(&ioat->desc_lock); - desc = ioat_dma_get_next_descriptor(ioat); + desc = ioat1_dma_get_next_descriptor(ioat); do { if (!desc) break; @@ -909,7 +628,7 @@ ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest, struct ioat_desc_sw *next; async_tx_ack(&desc->txd); - next = ioat_dma_get_next_descriptor(ioat); + next = ioat1_dma_get_next_descriptor(ioat); hw->next = next ? next->txd.phys : 0; desc = next; } else @@ -920,8 +639,7 @@ ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest, struct ioat_chan_common *chan = &ioat->base; dev_err(to_dev(chan), - "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n", - chan_num(chan), ioat->dmacount, ioat->desccount); + "chan%d - get_next_desc failed\n", chan_num(chan)); list_splice(&chain, &ioat->free_desc); spin_unlock_bh(&ioat->desc_lock); return NULL; @@ -940,94 +658,43 @@ ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest, return &desc->txd; } -static struct dma_async_tx_descriptor * -ioat2_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest, - dma_addr_t dma_src, size_t len, unsigned long flags) -{ - struct ioat_dma_chan *ioat = to_ioat_chan(c); - struct ioat_desc_sw *new; - - spin_lock_bh(&ioat->desc_lock); - new = ioat2_dma_get_next_descriptor(ioat); - - /* - * leave ioat->desc_lock set in ioat 2 path - * it will get unlocked at end of tx_submit - */ - - if (new) { - new->len = len; - new->dst = dma_dest; - new->src = dma_src; - new->txd.flags = flags; - return &new->txd; - } else { - struct ioat_chan_common *chan = &ioat->base; - - spin_unlock_bh(&ioat->desc_lock); - dev_err(to_dev(chan), - "chan%d - get_next_desc failed: %d descs waiting, %d total desc\n", - chan_num(chan), ioat->dmacount, ioat->desccount); - return NULL; - } -} - -static void ioat_dma_cleanup_tasklet(unsigned long data) +static void ioat1_cleanup_tasklet(unsigned long data) { struct ioat_dma_chan *chan = (void *)data; - ioat_dma_memcpy_cleanup(chan); + ioat1_cleanup(chan); writew(IOAT_CHANCTRL_INT_DISABLE, chan->base.reg_base + IOAT_CHANCTRL_OFFSET); } -static void -ioat_dma_unmap(struct ioat_chan_common *chan, struct ioat_desc_sw *desc) +static void ioat_unmap(struct pci_dev *pdev, dma_addr_t addr, size_t len, + int direction, enum dma_ctrl_flags flags, bool dst) { - if (!(desc->txd.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { - if (desc->txd.flags & DMA_COMPL_DEST_UNMAP_SINGLE) - pci_unmap_single(chan->device->pdev, - pci_unmap_addr(desc, dst), - pci_unmap_len(desc, len), - PCI_DMA_FROMDEVICE); - else - pci_unmap_page(chan->device->pdev, - pci_unmap_addr(desc, dst), - pci_unmap_len(desc, len), - PCI_DMA_FROMDEVICE); - } - - if (!(desc->txd.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { - if (desc->txd.flags & DMA_COMPL_SRC_UNMAP_SINGLE) - pci_unmap_single(chan->device->pdev, - pci_unmap_addr(desc, src), - pci_unmap_len(desc, len), - PCI_DMA_TODEVICE); - else - pci_unmap_page(chan->device->pdev, - pci_unmap_addr(desc, src), - pci_unmap_len(desc, len), - PCI_DMA_TODEVICE); - } + if ((dst && (flags & DMA_COMPL_DEST_UNMAP_SINGLE)) || + (!dst && (flags & DMA_COMPL_SRC_UNMAP_SINGLE))) + pci_unmap_single(pdev, addr, len, direction); + else + pci_unmap_page(pdev, addr, len, direction); } -/** - * ioat_dma_memcpy_cleanup - cleanup up finished descriptors - * @chan: ioat channel to be cleaned up - */ -static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat) + +void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, + size_t len, struct ioat_dma_descriptor *hw) +{ + struct pci_dev *pdev = chan->device->pdev; + size_t offset = len - hw->size; + + if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) + ioat_unmap(pdev, hw->dst_addr - offset, len, + PCI_DMA_FROMDEVICE, flags, 1); + + if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) + ioat_unmap(pdev, hw->src_addr - offset, len, + PCI_DMA_TODEVICE, flags, 0); +} + +unsigned long ioat_get_current_completion(struct ioat_chan_common *chan) { - struct ioat_chan_common *chan = &ioat->base; unsigned long phys_complete; - struct ioat_desc_sw *desc, *_desc; - dma_cookie_t cookie = 0; - unsigned long desc_phys; - struct ioat_desc_sw *latest_desc; - struct dma_async_tx_descriptor *tx; - - prefetch(chan->completion_virt); - - if (!spin_trylock_bh(&chan->cleanup_lock)) - return; /* The completion writeback can happen at any time, so reads by the driver need to be atomic operations @@ -1051,18 +718,37 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat) /* TODO do something to salvage the situation */ } + return phys_complete; +} + +/** + * ioat1_cleanup - cleanup up finished descriptors + * @chan: ioat channel to be cleaned up + */ +static void ioat1_cleanup(struct ioat_dma_chan *ioat) +{ + struct ioat_chan_common *chan = &ioat->base; + unsigned long phys_complete; + struct ioat_desc_sw *desc, *_desc; + dma_cookie_t cookie = 0; + struct dma_async_tx_descriptor *tx; + + prefetch(chan->completion_virt); + + if (!spin_trylock_bh(&chan->cleanup_lock)) + return; + + phys_complete = ioat_get_current_completion(chan); if (phys_complete == chan->last_completion) { spin_unlock_bh(&chan->cleanup_lock); /* * perhaps we're stuck so hard that the watchdog can't go off? * try to catch it after 2 seconds */ - if (chan->device->version != IOAT_VER_3_0) { - if (time_after(jiffies, - chan->last_completion_time + HZ*WATCHDOG_DELAY)) { - ioat_dma_chan_watchdog(&(chan->device->work.work)); - chan->last_completion_time = jiffies; - } + if (time_after(jiffies, + chan->last_completion_time + HZ*WATCHDOG_DELAY)) { + ioat1_chan_watchdog(&(chan->device->work.work)); + chan->last_completion_time = jiffies; } return; } @@ -1074,91 +760,42 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat) return; } - switch (chan->device->version) { - case IOAT_VER_1_2: - list_for_each_entry_safe(desc, _desc, &ioat->used_desc, node) { - tx = &desc->txd; - /* - * Incoming DMA requests may use multiple descriptors, - * due to exceeding xfercap, perhaps. If so, only the - * last one will have a cookie, and require unmapping. - */ - if (tx->cookie) { - cookie = tx->cookie; - ioat_dma_unmap(chan, desc); - if (tx->callback) { - tx->callback(tx->callback_param); - tx->callback = NULL; - } - } - - if (tx->phys != phys_complete) { - /* - * a completed entry, but not the last, so clean - * up if the client is done with the descriptor - */ - if (async_tx_test_ack(tx)) { - list_move_tail(&desc->node, - &ioat->free_desc); - } else - tx->cookie = 0; - } else { - /* - * last used desc. Do not remove, so we can - * append from it, but don't look at it next - * time, either - */ - tx->cookie = 0; - - /* TODO check status bits? */ - break; - } - } - break; - case IOAT_VER_2_0: - case IOAT_VER_3_0: - /* has some other thread has already cleaned up? */ - if (ioat->used_desc.prev == NULL) - break; - - /* work backwards to find latest finished desc */ - desc = to_ioat_desc(ioat->used_desc.next); + list_for_each_entry_safe(desc, _desc, &ioat->used_desc, node) { tx = &desc->txd; - latest_desc = NULL; - do { - desc = to_ioat_desc(desc->node.prev); - desc_phys = (unsigned long)tx->phys - & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; - if (desc_phys == phys_complete) { - latest_desc = desc; - break; + /* + * Incoming DMA requests may use multiple descriptors, + * due to exceeding xfercap, perhaps. If so, only the + * last one will have a cookie, and require unmapping. + */ + if (tx->cookie) { + cookie = tx->cookie; + ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); + if (tx->callback) { + tx->callback(tx->callback_param); + tx->callback = NULL; } - } while (&desc->node != ioat->used_desc.prev); - - if (latest_desc != NULL) { - /* work forwards to clear finished descriptors */ - for (desc = to_ioat_desc(ioat->used_desc.prev); - &desc->node != latest_desc->node.next && - &desc->node != ioat->used_desc.next; - desc = to_ioat_desc(desc->node.next)) { - if (tx->cookie) { - cookie = tx->cookie; - tx->cookie = 0; - ioat_dma_unmap(chan, desc); - if (tx->callback) { - tx->callback(tx->callback_param); - tx->callback = NULL; - } - } - } - - /* move used.prev up beyond those that are finished */ - if (&desc->node == ioat->used_desc.next) - ioat->used_desc.prev = NULL; - else - ioat->used_desc.prev = &desc->node; } - break; + + if (tx->phys != phys_complete) { + /* + * a completed entry, but not the last, so clean + * up if the client is done with the descriptor + */ + if (async_tx_test_ack(tx)) + list_move_tail(&desc->node, &ioat->free_desc); + else + tx->cookie = 0; + } else { + /* + * last used desc. Do not remove, so we can + * append from it, but don't look at it next + * time, either + */ + tx->cookie = 0; + + /* TODO check status bits? */ + break; + } } spin_unlock_bh(&ioat->desc_lock); @@ -1170,50 +807,21 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat) spin_unlock_bh(&chan->cleanup_lock); } -/** - * ioat_dma_is_complete - poll the status of a IOAT DMA transaction - * @chan: IOAT DMA channel handle - * @cookie: DMA transaction identifier - * @done: if not %NULL, updated with last completed transaction - * @used: if not %NULL, updated with last used transaction - */ static enum dma_status -ioat_dma_is_complete(struct dma_chan *c, dma_cookie_t cookie, - dma_cookie_t *done, dma_cookie_t *used) +ioat1_dma_is_complete(struct dma_chan *c, dma_cookie_t cookie, + dma_cookie_t *done, dma_cookie_t *used) { struct ioat_dma_chan *ioat = to_ioat_chan(c); - struct ioat_chan_common *chan = &ioat->base; - dma_cookie_t last_used; - dma_cookie_t last_complete; - enum dma_status ret; - last_used = c->cookie; - last_complete = chan->completed_cookie; - chan->watchdog_tcp_cookie = cookie; + if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS) + return DMA_SUCCESS; - if (done) - *done = last_complete; - if (used) - *used = last_used; + ioat1_cleanup(ioat); - ret = dma_async_is_complete(cookie, last_complete, last_used); - if (ret == DMA_SUCCESS) - return ret; - - ioat_dma_memcpy_cleanup(ioat); - - last_used = c->cookie; - last_complete = chan->completed_cookie; - - if (done) - *done = last_complete; - if (used) - *used = last_used; - - return dma_async_is_complete(cookie, last_complete, last_used); + return ioat_is_complete(c, cookie, done, used); } -static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat) +static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat) { struct ioat_chan_common *chan = &ioat->base; struct ioat_desc_sw *desc; @@ -1221,7 +829,7 @@ static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat) spin_lock_bh(&ioat->desc_lock); - desc = ioat_dma_get_next_descriptor(ioat); + desc = ioat1_dma_get_next_descriptor(ioat); if (!desc) { dev_err(to_dev(chan), @@ -1240,30 +848,16 @@ static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat) hw->src_addr = 0; hw->dst_addr = 0; async_tx_ack(&desc->txd); - switch (chan->device->version) { - case IOAT_VER_1_2: - hw->next = 0; - list_add_tail(&desc->node, &ioat->used_desc); + hw->next = 0; + list_add_tail(&desc->node, &ioat->used_desc); - writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF, - chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW); - writel(((u64) desc->txd.phys) >> 32, - chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH); + writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF, + chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW); + writel(((u64) desc->txd.phys) >> 32, + chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH); - writeb(IOAT_CHANCMD_START, chan->reg_base - + IOAT_CHANCMD_OFFSET(chan->device->version)); - break; - case IOAT_VER_2_0: - case IOAT_VER_3_0: - writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF, - chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW); - writel(((u64) desc->txd.phys) >> 32, - chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); - - ioat->dmacount++; - __ioat2_dma_memcpy_issue_pending(ioat); - break; - } + writeb(IOAT_CHANCMD_START, chan->reg_base + + IOAT_CHANCMD_OFFSET(chan->device->version)); spin_unlock_bh(&ioat->desc_lock); } @@ -1484,7 +1078,7 @@ static void ioat_disable_interrupts(struct ioatdma_device *device) writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET); } -static int ioat_probe(struct ioatdma_device *device) +int ioat_probe(struct ioatdma_device *device) { int err = -ENODEV; struct dma_device *dma = &device->common; @@ -1503,17 +1097,15 @@ static int ioat_probe(struct ioatdma_device *device) device->completion_pool = pci_pool_create("completion_pool", pdev, sizeof(u64), SMP_CACHE_BYTES, SMP_CACHE_BYTES); + if (!device->completion_pool) { err = -ENOMEM; goto err_completion_pool; } - ioat_dma_enumerate_channels(device); + device->enumerate_channels(device); dma_cap_set(DMA_MEMCPY, dma->cap_mask); - dma->device_alloc_chan_resources = ioat_dma_alloc_chan_resources; - dma->device_free_chan_resources = ioat_dma_free_chan_resources; - dma->device_is_tx_complete = ioat_dma_is_complete; dma->dev = &pdev->dev; dev_err(dev, "Intel(R) I/OAT DMA Engine found," @@ -1546,7 +1138,7 @@ err_dma_pool: return err; } -static int ioat_register(struct ioatdma_device *device) +int ioat_register(struct ioatdma_device *device) { int err = dma_async_device_register(&device->common); @@ -1580,9 +1172,13 @@ int ioat1_dma_probe(struct ioatdma_device *device, int dca) int err; device->intr_quirk = ioat1_intr_quirk; + device->enumerate_channels = ioat1_enumerate_channels; dma = &device->common; dma->device_prep_dma_memcpy = ioat1_dma_prep_memcpy; dma->device_issue_pending = ioat1_dma_memcpy_issue_pending; + dma->device_alloc_chan_resources = ioat1_dma_alloc_chan_resources; + dma->device_free_chan_resources = ioat1_dma_free_chan_resources; + dma->device_is_tx_complete = ioat1_dma_is_complete; err = ioat_probe(device); if (err) @@ -1594,93 +1190,12 @@ int ioat1_dma_probe(struct ioatdma_device *device, int dca) if (dca) device->dca = ioat_dca_init(pdev, device->reg_base); - INIT_DELAYED_WORK(&device->work, ioat_dma_chan_watchdog); + INIT_DELAYED_WORK(&device->work, ioat1_chan_watchdog); schedule_delayed_work(&device->work, WATCHDOG_DELAY); return err; } -int ioat2_dma_probe(struct ioatdma_device *device, int dca) -{ - struct pci_dev *pdev = device->pdev; - struct dma_device *dma; - struct dma_chan *c; - struct ioat_chan_common *chan; - int err; - - dma = &device->common; - dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy; - dma->device_issue_pending = ioat2_dma_memcpy_issue_pending; - - err = ioat_probe(device); - if (err) - return err; - ioat_set_tcp_copy_break(2048); - - list_for_each_entry(c, &dma->channels, device_node) { - chan = to_chan_common(c); - writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE | IOAT_DMA_DCA_ANY_CPU, - chan->reg_base + IOAT_DCACTRL_OFFSET); - } - - err = ioat_register(device); - if (err) - return err; - if (dca) - device->dca = ioat2_dca_init(pdev, device->reg_base); - - INIT_DELAYED_WORK(&device->work, ioat_dma_chan_watchdog); - schedule_delayed_work(&device->work, WATCHDOG_DELAY); - - return err; -} - -int ioat3_dma_probe(struct ioatdma_device *device, int dca) -{ - struct pci_dev *pdev = device->pdev; - struct dma_device *dma; - struct dma_chan *c; - struct ioat_chan_common *chan; - int err; - u16 dev_id; - - dma = &device->common; - dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy; - dma->device_issue_pending = ioat2_dma_memcpy_issue_pending; - - /* -= IOAT ver.3 workarounds =- */ - /* Write CHANERRMSK_INT with 3E07h to mask out the errors - * that can cause stability issues for IOAT ver.3 - */ - pci_write_config_dword(pdev, IOAT_PCI_CHANERRMASK_INT_OFFSET, 0x3e07); - - /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit - * (workaround for spurious config parity error after restart) - */ - pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id); - if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) - pci_write_config_dword(pdev, IOAT_PCI_DMAUNCERRSTS_OFFSET, 0x10); - - err = ioat_probe(device); - if (err) - return err; - ioat_set_tcp_copy_break(262144); - - list_for_each_entry(c, &dma->channels, device_node) { - chan = to_chan_common(c); - writel(IOAT_DMA_DCA_ANY_CPU, - chan->reg_base + IOAT_DCACTRL_OFFSET); - } - - err = ioat_register(device); - if (err) - return err; - if (dca) - device->dca = ioat3_dca_init(pdev, device->reg_base); - - return err; -} - void ioat_dma_remove(struct ioatdma_device *device) { struct dma_device *dma = &device->common; @@ -1697,4 +1212,3 @@ void ioat_dma_remove(struct ioatdma_device *device) INIT_LIST_HEAD(&dma->channels); } - diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index 5b31db73ad8e..84065dfa4d40 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h @@ -62,6 +62,7 @@ * @idx: per channel data * @dca: direct cache access context * @intr_quirk: interrupt setup quirk (for ioat_v1 devices) + * @enumerate_channels: hw version specific channel enumeration */ struct ioatdma_device { @@ -76,6 +77,7 @@ struct ioatdma_device { struct ioat_chan_common *idx[4]; struct dca_provider *dca; void (*intr_quirk)(struct ioatdma_device *device); + int (*enumerate_channels)(struct ioatdma_device *device); }; struct ioat_chan_common { @@ -106,6 +108,7 @@ struct ioat_chan_common { struct tasklet_struct cleanup_task; }; + /** * struct ioat_dma_chan - internal representation of a DMA channel */ @@ -119,7 +122,6 @@ struct ioat_dma_chan { struct list_head used_desc; int pending; - u16 dmacount; u16 desccount; }; @@ -135,6 +137,33 @@ static inline struct ioat_dma_chan *to_ioat_chan(struct dma_chan *c) return container_of(chan, struct ioat_dma_chan, base); } +/** + * ioat_is_complete - poll the status of an ioat transaction + * @c: channel handle + * @cookie: transaction identifier + * @done: if set, updated with last completed transaction + * @used: if set, updated with last used transaction + */ +static inline enum dma_status +ioat_is_complete(struct dma_chan *c, dma_cookie_t cookie, + dma_cookie_t *done, dma_cookie_t *used) +{ + struct ioat_chan_common *chan = to_chan_common(c); + dma_cookie_t last_used; + dma_cookie_t last_complete; + + last_used = c->cookie; + last_complete = chan->completed_cookie; + chan->watchdog_tcp_cookie = cookie; + + if (done) + *done = last_complete; + if (used) + *used = last_used; + + return dma_async_is_complete(cookie, last_complete, last_used); +} + /* wrapper around hardware descriptor format + additional software fields */ /** @@ -162,11 +191,22 @@ static inline void ioat_set_tcp_copy_break(unsigned long copybreak) #endif } +static inline struct ioat_chan_common * +ioat_chan_by_index(struct ioatdma_device *device, int index) +{ + return device->idx[index]; +} + +int ioat_probe(struct ioatdma_device *device); +int ioat_register(struct ioatdma_device *device); int ioat1_dma_probe(struct ioatdma_device *dev, int dca); -int ioat2_dma_probe(struct ioatdma_device *dev, int dca); -int ioat3_dma_probe(struct ioatdma_device *dev, int dca); void ioat_dma_remove(struct ioatdma_device *device); struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase); -struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase); -struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase); +unsigned long ioat_get_current_completion(struct ioat_chan_common *chan); +void ioat_init_channel(struct ioatdma_device *device, + struct ioat_chan_common *chan, int idx, + work_func_t work_fn, void (*tasklet)(unsigned long), + unsigned long tasklet_data); +void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, + size_t len, struct ioat_dma_descriptor *hw); #endif /* IOATDMA_H */ diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c new file mode 100644 index 000000000000..49ba1c73d95e --- /dev/null +++ b/drivers/dma/ioat/dma_v2.c @@ -0,0 +1,750 @@ +/* + * Intel I/OAT DMA Linux driver + * Copyright(c) 2004 - 2009 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + */ + +/* + * This driver supports an Intel I/OAT DMA engine (versions >= 2), which + * does asynchronous data movement and checksumming operations. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "dma.h" +#include "dma_v2.h" +#include "registers.h" +#include "hw.h" + +static int ioat_ring_alloc_order = 8; +module_param(ioat_ring_alloc_order, int, 0644); +MODULE_PARM_DESC(ioat_ring_alloc_order, + "ioat2+: allocate 2^n descriptors per channel (default: n=8)"); + +static void __ioat2_issue_pending(struct ioat2_dma_chan *ioat) +{ + void * __iomem reg_base = ioat->base.reg_base; + + ioat->pending = 0; + ioat->dmacount += ioat2_ring_pending(ioat); + ioat->issued = ioat->head; + /* make descriptor updates globally visible before notifying channel */ + wmb(); + writew(ioat->dmacount, reg_base + IOAT_CHAN_DMACOUNT_OFFSET); + +} + +static void ioat2_issue_pending(struct dma_chan *chan) +{ + struct ioat2_dma_chan *ioat = to_ioat2_chan(chan); + + spin_lock_bh(&ioat->ring_lock); + if (ioat->pending == 1) + __ioat2_issue_pending(ioat); + spin_unlock_bh(&ioat->ring_lock); +} + +/** + * ioat2_update_pending - log pending descriptors + * @ioat: ioat2+ channel + * + * set pending to '1' unless pending is already set to '2', pending == 2 + * indicates that submission is temporarily blocked due to an in-flight + * reset. If we are already above the ioat_pending_level threshold then + * just issue pending. + * + * called with ring_lock held + */ +static void ioat2_update_pending(struct ioat2_dma_chan *ioat) +{ + if (unlikely(ioat->pending == 2)) + return; + else if (ioat2_ring_pending(ioat) > ioat_pending_level) + __ioat2_issue_pending(ioat); + else + ioat->pending = 1; +} + +static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat) +{ + void __iomem *reg_base = ioat->base.reg_base; + struct ioat_ring_ent *desc; + struct ioat_dma_descriptor *hw; + int idx; + + if (ioat2_ring_space(ioat) < 1) { + dev_err(to_dev(&ioat->base), + "Unable to start null desc - ring full\n"); + return; + } + + idx = ioat2_desc_alloc(ioat, 1); + desc = ioat2_get_ring_ent(ioat, idx); + + hw = desc->hw; + hw->ctl = 0; + hw->ctl_f.null = 1; + hw->ctl_f.int_en = 1; + hw->ctl_f.compl_write = 1; + /* set size to non-zero value (channel returns error when size is 0) */ + hw->size = NULL_DESC_BUFFER_SIZE; + hw->src_addr = 0; + hw->dst_addr = 0; + async_tx_ack(&desc->txd); + writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF, + reg_base + IOAT2_CHAINADDR_OFFSET_LOW); + writel(((u64) desc->txd.phys) >> 32, + reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); + __ioat2_issue_pending(ioat); +} + +static void ioat2_start_null_desc(struct ioat2_dma_chan *ioat) +{ + spin_lock_bh(&ioat->ring_lock); + __ioat2_start_null_desc(ioat); + spin_unlock_bh(&ioat->ring_lock); +} + +static void ioat2_cleanup(struct ioat2_dma_chan *ioat); + +/** + * ioat2_reset_part2 - reinit the channel after a reset + */ +static void ioat2_reset_part2(struct work_struct *work) +{ + struct ioat_chan_common *chan; + struct ioat2_dma_chan *ioat; + + chan = container_of(work, struct ioat_chan_common, work.work); + ioat = container_of(chan, struct ioat2_dma_chan, base); + + /* ensure that ->tail points to the stalled descriptor + * (ioat->pending is set to 2 at this point so no new + * descriptors will be issued while we perform this cleanup) + */ + ioat2_cleanup(ioat); + + spin_lock_bh(&chan->cleanup_lock); + spin_lock_bh(&ioat->ring_lock); + + /* set the tail to be re-issued */ + ioat->issued = ioat->tail; + ioat->dmacount = 0; + + if (ioat2_ring_pending(ioat)) { + struct ioat_ring_ent *desc; + + desc = ioat2_get_ring_ent(ioat, ioat->tail); + writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF, + chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW); + writel(((u64) desc->txd.phys) >> 32, + chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); + __ioat2_issue_pending(ioat); + } else + __ioat2_start_null_desc(ioat); + + spin_unlock_bh(&ioat->ring_lock); + spin_unlock_bh(&chan->cleanup_lock); + + dev_info(to_dev(chan), + "chan%d reset - %d descs waiting, %d total desc\n", + chan_num(chan), ioat->dmacount, 1 << ioat->alloc_order); +} + +/** + * ioat2_reset_channel - restart a channel + * @ioat: IOAT DMA channel handle + */ +static void ioat2_reset_channel(struct ioat2_dma_chan *ioat) +{ + u32 chansts, chanerr; + struct ioat_chan_common *chan = &ioat->base; + u16 active; + + spin_lock_bh(&ioat->ring_lock); + active = ioat2_ring_active(ioat); + spin_unlock_bh(&ioat->ring_lock); + if (!active) + return; + + chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); + chansts = (chan->completion_virt->low + & IOAT_CHANSTS_DMA_TRANSFER_STATUS); + if (chanerr) { + dev_err(to_dev(chan), + "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n", + chan_num(chan), chansts, chanerr); + writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET); + } + + spin_lock_bh(&ioat->ring_lock); + ioat->pending = 2; + writeb(IOAT_CHANCMD_RESET, + chan->reg_base + + IOAT_CHANCMD_OFFSET(chan->device->version)); + spin_unlock_bh(&ioat->ring_lock); + schedule_delayed_work(&chan->work, RESET_DELAY); +} + +/** + * ioat2_chan_watchdog - watch for stuck channels + */ +static void ioat2_chan_watchdog(struct work_struct *work) +{ + struct ioatdma_device *device = + container_of(work, struct ioatdma_device, work.work); + struct ioat2_dma_chan *ioat; + struct ioat_chan_common *chan; + u16 active; + int i; + + for (i = 0; i < device->common.chancnt; i++) { + chan = ioat_chan_by_index(device, i); + ioat = container_of(chan, struct ioat2_dma_chan, base); + + /* + * for version 2.0 if there are descriptors yet to be processed + * and the last completed hasn't changed since the last watchdog + * if they haven't hit the pending level + * issue the pending to push them through + * else + * try resetting the channel + */ + spin_lock_bh(&ioat->ring_lock); + active = ioat2_ring_active(ioat); + spin_unlock_bh(&ioat->ring_lock); + + if (active && + chan->last_completion && + chan->last_completion == chan->watchdog_completion) { + + if (ioat->pending == 1) + ioat2_issue_pending(&chan->common); + else { + ioat2_reset_channel(ioat); + chan->watchdog_completion = 0; + } + } else { + chan->last_compl_desc_addr_hw = 0; + chan->watchdog_completion = chan->last_completion; + } + chan->watchdog_last_tcp_cookie = chan->watchdog_tcp_cookie; + } + schedule_delayed_work(&device->work, WATCHDOG_DELAY); +} + +/** + * ioat2_cleanup - clean finished descriptors (advance tail pointer) + * @chan: ioat channel to be cleaned up + */ +static void ioat2_cleanup(struct ioat2_dma_chan *ioat) +{ + struct ioat_chan_common *chan = &ioat->base; + unsigned long phys_complete; + struct ioat_ring_ent *desc; + bool seen_current = false; + u16 active; + int i; + struct dma_async_tx_descriptor *tx; + + prefetch(chan->completion_virt); + + spin_lock_bh(&chan->cleanup_lock); + phys_complete = ioat_get_current_completion(chan); + if (phys_complete == chan->last_completion) { + spin_unlock_bh(&chan->cleanup_lock); + /* + * perhaps we're stuck so hard that the watchdog can't go off? + * try to catch it after WATCHDOG_DELAY seconds + */ + if (chan->device->version < IOAT_VER_3_0) { + unsigned long tmo; + + tmo = chan->last_completion_time + HZ*WATCHDOG_DELAY; + if (time_after(jiffies, tmo)) { + ioat2_chan_watchdog(&(chan->device->work.work)); + chan->last_completion_time = jiffies; + } + } + return; + } + chan->last_completion_time = jiffies; + + spin_lock_bh(&ioat->ring_lock); + + active = ioat2_ring_active(ioat); + for (i = 0; i < active && !seen_current; i++) { + prefetch(ioat2_get_ring_ent(ioat, ioat->tail + i + 1)); + desc = ioat2_get_ring_ent(ioat, ioat->tail + i); + tx = &desc->txd; + if (tx->cookie) { + ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); + chan->completed_cookie = tx->cookie; + tx->cookie = 0; + if (tx->callback) { + tx->callback(tx->callback_param); + tx->callback = NULL; + } + } + + if (tx->phys == phys_complete) + seen_current = true; + } + ioat->tail += i; + BUG_ON(!seen_current); /* no active descs have written a completion? */ + spin_unlock_bh(&ioat->ring_lock); + + chan->last_completion = phys_complete; + + spin_unlock_bh(&chan->cleanup_lock); +} + +static void ioat2_cleanup_tasklet(unsigned long data) +{ + struct ioat2_dma_chan *ioat = (void *) data; + + ioat2_cleanup(ioat); + writew(IOAT_CHANCTRL_INT_DISABLE, + ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); +} + +/** + * ioat2_enumerate_channels - find and initialize the device's channels + * @device: the device to be enumerated + */ +static int ioat2_enumerate_channels(struct ioatdma_device *device) +{ + struct ioat2_dma_chan *ioat; + struct device *dev = &device->pdev->dev; + struct dma_device *dma = &device->common; + u8 xfercap_log; + int i; + + INIT_LIST_HEAD(&dma->channels); + dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET); + xfercap_log = readb(device->reg_base + IOAT_XFERCAP_OFFSET); + if (xfercap_log == 0) + return 0; + + /* FIXME which i/oat version is i7300? */ +#ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL + if (i7300_idle_platform_probe(NULL, NULL, 1) == 0) + dma->chancnt--; +#endif + for (i = 0; i < dma->chancnt; i++) { + ioat = devm_kzalloc(dev, sizeof(*ioat), GFP_KERNEL); + if (!ioat) + break; + + ioat_init_channel(device, &ioat->base, i, + ioat2_reset_part2, + ioat2_cleanup_tasklet, + (unsigned long) ioat); + ioat->xfercap_log = xfercap_log; + spin_lock_init(&ioat->ring_lock); + } + dma->chancnt = i; + return i; +} + +static dma_cookie_t ioat2_tx_submit_unlock(struct dma_async_tx_descriptor *tx) +{ + struct dma_chan *c = tx->chan; + struct ioat2_dma_chan *ioat = to_ioat2_chan(c); + dma_cookie_t cookie = c->cookie; + + cookie++; + if (cookie < 0) + cookie = 1; + tx->cookie = cookie; + c->cookie = cookie; + ioat2_update_pending(ioat); + spin_unlock_bh(&ioat->ring_lock); + + return cookie; +} + +static struct ioat_ring_ent *ioat2_alloc_ring_ent(struct dma_chan *chan) +{ + struct ioat_dma_descriptor *hw; + struct ioat_ring_ent *desc; + struct ioatdma_device *dma; + dma_addr_t phys; + + dma = to_ioatdma_device(chan->device); + hw = pci_pool_alloc(dma->dma_pool, GFP_KERNEL, &phys); + if (!hw) + return NULL; + memset(hw, 0, sizeof(*hw)); + + desc = kzalloc(sizeof(*desc), GFP_KERNEL); + if (!desc) { + pci_pool_free(dma->dma_pool, hw, phys); + return NULL; + } + + dma_async_tx_descriptor_init(&desc->txd, chan); + desc->txd.tx_submit = ioat2_tx_submit_unlock; + desc->hw = hw; + desc->txd.phys = phys; + return desc; +} + +static void ioat2_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan) +{ + struct ioatdma_device *dma; + + dma = to_ioatdma_device(chan->device); + pci_pool_free(dma->dma_pool, desc->hw, desc->txd.phys); + kfree(desc); +} + +/* ioat2_alloc_chan_resources - allocate/initialize ioat2 descriptor ring + * @chan: channel to be initialized + */ +static int ioat2_alloc_chan_resources(struct dma_chan *c) +{ + struct ioat2_dma_chan *ioat = to_ioat2_chan(c); + struct ioat_chan_common *chan = &ioat->base; + struct ioat_ring_ent **ring; + u16 chanctrl; + u32 chanerr; + int descs; + int i; + + /* have we already been set up? */ + if (ioat->ring) + return 1 << ioat->alloc_order; + + /* Setup register to interrupt and write completion status on error */ + chanctrl = IOAT_CHANCTRL_ERR_INT_EN | IOAT_CHANCTRL_ANY_ERR_ABORT_EN | + IOAT_CHANCTRL_ERR_COMPLETION_EN; + writew(chanctrl, chan->reg_base + IOAT_CHANCTRL_OFFSET); + + chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); + if (chanerr) { + dev_err(to_dev(chan), "CHANERR = %x, clearing\n", chanerr); + writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET); + } + + /* allocate a completion writeback area */ + /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ + chan->completion_virt = pci_pool_alloc(chan->device->completion_pool, + GFP_KERNEL, + &chan->completion_addr); + if (!chan->completion_virt) + return -ENOMEM; + + memset(chan->completion_virt, 0, + sizeof(*chan->completion_virt)); + writel(((u64) chan->completion_addr) & 0x00000000FFFFFFFF, + chan->reg_base + IOAT_CHANCMP_OFFSET_LOW); + writel(((u64) chan->completion_addr) >> 32, + chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); + + ioat->alloc_order = ioat_get_alloc_order(); + descs = 1 << ioat->alloc_order; + + /* allocate the array to hold the software ring */ + ring = kcalloc(descs, sizeof(*ring), GFP_KERNEL); + if (!ring) + return -ENOMEM; + for (i = 0; i < descs; i++) { + ring[i] = ioat2_alloc_ring_ent(c); + if (!ring[i]) { + while (i--) + ioat2_free_ring_ent(ring[i], c); + kfree(ring); + return -ENOMEM; + } + } + + /* link descs */ + for (i = 0; i < descs-1; i++) { + struct ioat_ring_ent *next = ring[i+1]; + struct ioat_dma_descriptor *hw = ring[i]->hw; + + hw->next = next->txd.phys; + } + ring[i]->hw->next = ring[0]->txd.phys; + + spin_lock_bh(&ioat->ring_lock); + ioat->ring = ring; + ioat->head = 0; + ioat->issued = 0; + ioat->tail = 0; + ioat->pending = 0; + spin_unlock_bh(&ioat->ring_lock); + + tasklet_enable(&chan->cleanup_task); + ioat2_start_null_desc(ioat); + + return descs; +} + +/** + * ioat2_alloc_and_lock - common descriptor alloc boilerplate for ioat2,3 ops + * @idx: gets starting descriptor index on successful allocation + * @ioat: ioat2,3 channel (ring) to operate on + * @num_descs: allocation length + */ +static int ioat2_alloc_and_lock(u16 *idx, struct ioat2_dma_chan *ioat, int num_descs) +{ + struct ioat_chan_common *chan = &ioat->base; + + spin_lock_bh(&ioat->ring_lock); + if (unlikely(ioat2_ring_space(ioat) < num_descs)) { + if (printk_ratelimit()) + dev_dbg(to_dev(chan), + "%s: ring full! num_descs: %d (%x:%x:%x)\n", + __func__, num_descs, ioat->head, ioat->tail, + ioat->issued); + spin_unlock_bh(&ioat->ring_lock); + + /* do direct reclaim in the allocation failure case */ + ioat2_cleanup(ioat); + + return -ENOMEM; + } + + dev_dbg(to_dev(chan), "%s: num_descs: %d (%x:%x:%x)\n", + __func__, num_descs, ioat->head, ioat->tail, ioat->issued); + + *idx = ioat2_desc_alloc(ioat, num_descs); + return 0; /* with ioat->ring_lock held */ +} + +static struct dma_async_tx_descriptor * +ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest, + dma_addr_t dma_src, size_t len, unsigned long flags) +{ + struct ioat2_dma_chan *ioat = to_ioat2_chan(c); + struct ioat_dma_descriptor *hw; + struct ioat_ring_ent *desc; + dma_addr_t dst = dma_dest; + dma_addr_t src = dma_src; + size_t total_len = len; + int num_descs; + u16 idx; + int i; + + num_descs = ioat2_xferlen_to_descs(ioat, len); + if (likely(num_descs) && + ioat2_alloc_and_lock(&idx, ioat, num_descs) == 0) + /* pass */; + else + return NULL; + for (i = 0; i < num_descs; i++) { + size_t copy = min_t(size_t, len, 1 << ioat->xfercap_log); + + desc = ioat2_get_ring_ent(ioat, idx + i); + hw = desc->hw; + + hw->size = copy; + hw->ctl = 0; + hw->src_addr = src; + hw->dst_addr = dst; + + len -= copy; + dst += copy; + src += copy; + } + + desc->txd.flags = flags; + desc->len = total_len; + hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); + hw->ctl_f.compl_write = 1; + /* we leave the channel locked to ensure in order submission */ + + return &desc->txd; +} + +/** + * ioat2_free_chan_resources - release all the descriptors + * @chan: the channel to be cleaned + */ +static void ioat2_free_chan_resources(struct dma_chan *c) +{ + struct ioat2_dma_chan *ioat = to_ioat2_chan(c); + struct ioat_chan_common *chan = &ioat->base; + struct ioatdma_device *ioatdma_device = chan->device; + struct ioat_ring_ent *desc; + const u16 total_descs = 1 << ioat->alloc_order; + int descs; + int i; + + /* Before freeing channel resources first check + * if they have been previously allocated for this channel. + */ + if (!ioat->ring) + return; + + tasklet_disable(&chan->cleanup_task); + ioat2_cleanup(ioat); + + /* Delay 100ms after reset to allow internal DMA logic to quiesce + * before removing DMA descriptor resources. + */ + writeb(IOAT_CHANCMD_RESET, + chan->reg_base + IOAT_CHANCMD_OFFSET(chan->device->version)); + mdelay(100); + + spin_lock_bh(&ioat->ring_lock); + descs = ioat2_ring_space(ioat); + for (i = 0; i < descs; i++) { + desc = ioat2_get_ring_ent(ioat, ioat->head + i); + ioat2_free_ring_ent(desc, c); + } + + if (descs < total_descs) + dev_err(to_dev(chan), "Freeing %d in use descriptors!\n", + total_descs - descs); + + for (i = 0; i < total_descs - descs; i++) { + desc = ioat2_get_ring_ent(ioat, ioat->tail + i); + ioat2_free_ring_ent(desc, c); + } + + kfree(ioat->ring); + ioat->ring = NULL; + ioat->alloc_order = 0; + pci_pool_free(ioatdma_device->completion_pool, + chan->completion_virt, + chan->completion_addr); + spin_unlock_bh(&ioat->ring_lock); + + chan->last_completion = 0; + chan->completion_addr = 0; + ioat->pending = 0; + ioat->dmacount = 0; + chan->watchdog_completion = 0; + chan->last_compl_desc_addr_hw = 0; + chan->watchdog_tcp_cookie = 0; + chan->watchdog_last_tcp_cookie = 0; +} + +static enum dma_status +ioat2_is_complete(struct dma_chan *c, dma_cookie_t cookie, + dma_cookie_t *done, dma_cookie_t *used) +{ + struct ioat2_dma_chan *ioat = to_ioat2_chan(c); + + if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS) + return DMA_SUCCESS; + + ioat2_cleanup(ioat); + + return ioat_is_complete(c, cookie, done, used); +} + +int ioat2_dma_probe(struct ioatdma_device *device, int dca) +{ + struct pci_dev *pdev = device->pdev; + struct dma_device *dma; + struct dma_chan *c; + struct ioat_chan_common *chan; + int err; + + device->enumerate_channels = ioat2_enumerate_channels; + dma = &device->common; + dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock; + dma->device_issue_pending = ioat2_issue_pending; + dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; + dma->device_free_chan_resources = ioat2_free_chan_resources; + dma->device_is_tx_complete = ioat2_is_complete; + + err = ioat_probe(device); + if (err) + return err; + ioat_set_tcp_copy_break(2048); + + list_for_each_entry(c, &dma->channels, device_node) { + chan = to_chan_common(c); + writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE | IOAT_DMA_DCA_ANY_CPU, + chan->reg_base + IOAT_DCACTRL_OFFSET); + } + + err = ioat_register(device); + if (err) + return err; + if (dca) + device->dca = ioat2_dca_init(pdev, device->reg_base); + + INIT_DELAYED_WORK(&device->work, ioat2_chan_watchdog); + schedule_delayed_work(&device->work, WATCHDOG_DELAY); + + return err; +} + +int ioat3_dma_probe(struct ioatdma_device *device, int dca) +{ + struct pci_dev *pdev = device->pdev; + struct dma_device *dma; + struct dma_chan *c; + struct ioat_chan_common *chan; + int err; + u16 dev_id; + + device->enumerate_channels = ioat2_enumerate_channels; + dma = &device->common; + dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock; + dma->device_issue_pending = ioat2_issue_pending; + dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; + dma->device_free_chan_resources = ioat2_free_chan_resources; + dma->device_is_tx_complete = ioat2_is_complete; + + /* -= IOAT ver.3 workarounds =- */ + /* Write CHANERRMSK_INT with 3E07h to mask out the errors + * that can cause stability issues for IOAT ver.3 + */ + pci_write_config_dword(pdev, IOAT_PCI_CHANERRMASK_INT_OFFSET, 0x3e07); + + /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit + * (workaround for spurious config parity error after restart) + */ + pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id); + if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) + pci_write_config_dword(pdev, IOAT_PCI_DMAUNCERRSTS_OFFSET, 0x10); + + err = ioat_probe(device); + if (err) + return err; + ioat_set_tcp_copy_break(262144); + + list_for_each_entry(c, &dma->channels, device_node) { + chan = to_chan_common(c); + writel(IOAT_DMA_DCA_ANY_CPU, + chan->reg_base + IOAT_DCACTRL_OFFSET); + } + + err = ioat_register(device); + if (err) + return err; + if (dca) + device->dca = ioat3_dca_init(pdev, device->reg_base); + + return err; +} diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h new file mode 100644 index 000000000000..94a553eacdbd --- /dev/null +++ b/drivers/dma/ioat/dma_v2.h @@ -0,0 +1,131 @@ +/* + * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * The full GNU General Public License is included in this distribution in the + * file called COPYING. + */ +#ifndef IOATDMA_V2_H +#define IOATDMA_V2_H + +#include +#include "dma.h" +#include "hw.h" + + +extern int ioat_pending_level; + +/* + * workaround for IOAT ver.3.0 null descriptor issue + * (channel returns error when size is 0) + */ +#define NULL_DESC_BUFFER_SIZE 1 + +#define IOAT_MAX_ORDER 16 +#define ioat_get_alloc_order() \ + (min(ioat_ring_alloc_order, IOAT_MAX_ORDER)) + +/* struct ioat2_dma_chan - ioat v2 / v3 channel attributes + * @base: common ioat channel parameters + * @xfercap_log; log2 of channel max transfer length (for fast division) + * @head: allocated index + * @issued: hardware notification point + * @tail: cleanup index + * @pending: lock free indicator for issued != head + * @dmacount: identical to 'head' except for occasionally resetting to zero + * @alloc_order: log2 of the number of allocated descriptors + * @ring: software ring buffer implementation of hardware ring + * @ring_lock: protects ring attributes + */ +struct ioat2_dma_chan { + struct ioat_chan_common base; + size_t xfercap_log; + u16 head; + u16 issued; + u16 tail; + u16 dmacount; + u16 alloc_order; + int pending; + struct ioat_ring_ent **ring; + spinlock_t ring_lock; +}; + +static inline struct ioat2_dma_chan *to_ioat2_chan(struct dma_chan *c) +{ + struct ioat_chan_common *chan = to_chan_common(c); + + return container_of(chan, struct ioat2_dma_chan, base); +} + +static inline u16 ioat2_ring_mask(struct ioat2_dma_chan *ioat) +{ + return (1 << ioat->alloc_order) - 1; +} + +/* count of descriptors in flight with the engine */ +static inline u16 ioat2_ring_active(struct ioat2_dma_chan *ioat) +{ + return (ioat->head - ioat->tail) & ioat2_ring_mask(ioat); +} + +/* count of descriptors pending submission to hardware */ +static inline u16 ioat2_ring_pending(struct ioat2_dma_chan *ioat) +{ + return (ioat->head - ioat->issued) & ioat2_ring_mask(ioat); +} + +static inline u16 ioat2_ring_space(struct ioat2_dma_chan *ioat) +{ + u16 num_descs = ioat2_ring_mask(ioat) + 1; + u16 active = ioat2_ring_active(ioat); + + BUG_ON(active > num_descs); + + return num_descs - active; +} + +/* assumes caller already checked space */ +static inline u16 ioat2_desc_alloc(struct ioat2_dma_chan *ioat, u16 len) +{ + ioat->head += len; + return ioat->head - len; +} + +static inline u16 ioat2_xferlen_to_descs(struct ioat2_dma_chan *ioat, size_t len) +{ + u16 num_descs = len >> ioat->xfercap_log; + + num_descs += !!(len & ((1 << ioat->xfercap_log) - 1)); + return num_descs; +} + +struct ioat_ring_ent { + struct ioat_dma_descriptor *hw; + struct dma_async_tx_descriptor txd; + size_t len; +}; + +static inline struct ioat_ring_ent * +ioat2_get_ring_ent(struct ioat2_dma_chan *ioat, u16 idx) +{ + return ioat->ring[idx & ioat2_ring_mask(ioat)]; +} + +int ioat2_dma_probe(struct ioatdma_device *dev, int dca); +int ioat3_dma_probe(struct ioatdma_device *dev, int dca); +struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase); +struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase); +#endif /* IOATDMA_V2_H */ diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c index 55414d88ac1b..c4e432269252 100644 --- a/drivers/dma/ioat/pci.c +++ b/drivers/dma/ioat/pci.c @@ -31,6 +31,7 @@ #include #include #include "dma.h" +#include "dma_v2.h" #include "registers.h" #include "hw.h" From 38e12f64a165e83617c21dae3c15972fd8d639f5 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 8 Sep 2009 12:00:46 -0700 Subject: [PATCH 0110/3409] ioat1: kill unused unmap parameters The unified ioat1/ioat2 ioat_dma_unmap() implementation derives the source and dest addresses from the unmap descriptor. There is no longer a need to track this information in struct ioat_desc_sw. Signed-off-by: Maciej Sosnowski Signed-off-by: Dan Williams --- drivers/dma/ioat/dma.c | 2 -- drivers/dma/ioat/dma.h | 2 -- 2 files changed, 4 deletions(-) diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 64b4d75059aa..696d4de3bb8f 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -648,8 +648,6 @@ ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest, desc->txd.flags = flags; desc->tx_cnt = tx_cnt; - desc->src = dma_src; - desc->dst = dma_dest; desc->len = total_len; list_splice(&chain, &desc->txd.tx_list); hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index 84065dfa4d40..fa15e77652a0 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h @@ -179,8 +179,6 @@ struct ioat_desc_sw { struct list_head node; int tx_cnt; size_t len; - dma_addr_t src; - dma_addr_t dst; struct dma_async_tx_descriptor txd; }; From 6df9183a153291a2585a8dfe67597fc18c201147 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 8 Sep 2009 12:00:55 -0700 Subject: [PATCH 0111/3409] ioat: add some dev_dbg() calls Provide some output for debugging the driver. Signed-off-by: Maciej Sosnowski Signed-off-by: Dan Williams --- drivers/dma/ioat/dma.c | 29 ++++++++++++++++++++++++++--- drivers/dma/ioat/dma.h | 28 ++++++++++++++++++++++++++++ drivers/dma/ioat/dma_v2.c | 25 ++++++++++++++++++++++++- drivers/dma/ioat/dma_v2.h | 3 +++ 4 files changed, 81 insertions(+), 4 deletions(-) diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 696d4de3bb8f..edf4f5e5de73 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -134,6 +134,7 @@ static int ioat1_enumerate_channels(struct ioatdma_device *device) dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET); xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET); xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale)); + dev_dbg(dev, "%s: xfercap = %d\n", __func__, xfercap); #ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL if (i7300_idle_platform_probe(NULL, NULL, 1) == 0) @@ -167,6 +168,8 @@ __ioat1_dma_memcpy_issue_pending(struct ioat_dma_chan *ioat) { void __iomem *reg_base = ioat->base.reg_base; + dev_dbg(to_dev(&ioat->base), "%s: pending: %d\n", + __func__, ioat->pending); ioat->pending = 0; writeb(IOAT_CHANCMD_APPEND, reg_base + IOAT1_CHANCMD_OFFSET); } @@ -251,6 +254,7 @@ static void ioat1_reset_channel(struct ioat_dma_chan *ioat) if (!ioat->used_desc.prev) return; + dev_dbg(to_dev(chan), "%s\n", __func__); chanerr = readl(reg_base + IOAT_CHANERR_OFFSET); chansts = (chan->completion_virt->low & IOAT_CHANSTS_DMA_TRANSFER_STATUS); @@ -382,6 +386,7 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) cookie = 1; c->cookie = cookie; tx->cookie = cookie; + dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie); /* write address into NextDescriptor field of last desc in chain */ first = to_ioat_desc(tx->tx_list.next); @@ -390,6 +395,8 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) wmb(); chain_tail->hw->next = first->txd.phys; list_splice_tail_init(&tx->tx_list, &ioat->used_desc); + dump_desc_dbg(ioat, chain_tail); + dump_desc_dbg(ioat, first); ioat->pending += desc->tx_cnt; if (ioat->pending >= ioat_pending_level) @@ -429,6 +436,7 @@ ioat_dma_alloc_descriptor(struct ioat_dma_chan *ioat, gfp_t flags) desc_sw->txd.tx_submit = ioat1_tx_submit; desc_sw->hw = desc; desc_sw->txd.phys = phys; + set_desc_id(desc_sw, -1); return desc_sw; } @@ -474,6 +482,7 @@ static int ioat1_dma_alloc_chan_resources(struct dma_chan *c) dev_err(to_dev(chan), "Only %d initial descriptors\n", i); break; } + set_desc_id(desc, i); list_add_tail(&desc->node, &tmp_list); } spin_lock_bh(&ioat->desc_lock); @@ -495,6 +504,8 @@ static int ioat1_dma_alloc_chan_resources(struct dma_chan *c) tasklet_enable(&chan->cleanup_task); ioat1_dma_start_null_desc(ioat); /* give chain to dma device */ + dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n", + __func__, ioat->desccount); return ioat->desccount; } @@ -527,8 +538,10 @@ static void ioat1_dma_free_chan_resources(struct dma_chan *c) mdelay(100); spin_lock_bh(&ioat->desc_lock); - list_for_each_entry_safe(desc, _desc, - &ioat->used_desc, node) { + list_for_each_entry_safe(desc, _desc, &ioat->used_desc, node) { + dev_dbg(to_dev(chan), "%s: freeing %d from used list\n", + __func__, desc_id(desc)); + dump_desc_dbg(ioat, desc); in_use_descs++; list_del(&desc->node); pci_pool_free(ioatdma_device->dma_pool, desc->hw, @@ -585,7 +598,8 @@ ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat) return NULL; } } - + dev_dbg(to_dev(&ioat->base), "%s: allocated: %d\n", + __func__, desc_id(new)); prefetch(new->hw); return new; } @@ -630,6 +644,7 @@ ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest, async_tx_ack(&desc->txd); next = ioat1_dma_get_next_descriptor(ioat); hw->next = next ? next->txd.phys : 0; + dump_desc_dbg(ioat, desc); desc = next; } else hw->next = 0; @@ -652,6 +667,7 @@ ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest, list_splice(&chain, &desc->txd.tx_list); hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); hw->ctl_f.compl_write = 1; + dump_desc_dbg(ioat, desc); return &desc->txd; } @@ -707,6 +723,9 @@ unsigned long ioat_get_current_completion(struct ioat_chan_common *chan) phys_complete = chan->completion_virt->low & IOAT_LOW_COMPLETION_MASK; #endif + dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__, + (unsigned long long) phys_complete); + if ((chan->completion_virt->full & IOAT_CHANSTS_DMA_TRANSFER_STATUS) == IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) { @@ -758,6 +777,8 @@ static void ioat1_cleanup(struct ioat_dma_chan *ioat) return; } + dev_dbg(to_dev(chan), "%s: phys_complete: %lx\n", + __func__, phys_complete); list_for_each_entry_safe(desc, _desc, &ioat->used_desc, node) { tx = &desc->txd; /* @@ -765,6 +786,7 @@ static void ioat1_cleanup(struct ioat_dma_chan *ioat) * due to exceeding xfercap, perhaps. If so, only the * last one will have a cookie, and require unmapping. */ + dump_desc_dbg(ioat, desc); if (tx->cookie) { cookie = tx->cookie; ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); @@ -848,6 +870,7 @@ static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat) async_tx_ack(&desc->txd); hw->next = 0; list_add_tail(&desc->node, &ioat->used_desc); + dump_desc_dbg(ioat, desc); writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF, chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW); diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index fa15e77652a0..9f9edc2cd079 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h @@ -173,6 +173,7 @@ ioat_is_complete(struct dma_chan *c, dma_cookie_t cookie, * or attached to a transaction list (async_tx.tx_list) * @tx_cnt: number of descriptors required to complete the transaction * @txd: the generic software descriptor for all engines + * @id: identifier for debug */ struct ioat_desc_sw { struct ioat_dma_descriptor *hw; @@ -180,8 +181,35 @@ struct ioat_desc_sw { int tx_cnt; size_t len; struct dma_async_tx_descriptor txd; + #ifdef DEBUG + int id; + #endif }; +#ifdef DEBUG +#define set_desc_id(desc, i) ((desc)->id = (i)) +#define desc_id(desc) ((desc)->id) +#else +#define set_desc_id(desc, i) +#define desc_id(desc) (0) +#endif + +static inline void +__dump_desc_dbg(struct ioat_chan_common *chan, struct ioat_dma_descriptor *hw, + struct dma_async_tx_descriptor *tx, int id) +{ + struct device *dev = to_dev(chan); + + dev_dbg(dev, "desc[%d]: (%#llx->%#llx) cookie: %d flags: %#x" + " ctl: %#x (op: %d int_en: %d compl: %d)\n", id, + (unsigned long long) tx->phys, + (unsigned long long) hw->next, tx->cookie, tx->flags, + hw->ctl, hw->ctl_f.op, hw->ctl_f.int_en, hw->ctl_f.compl_write); +} + +#define dump_desc_dbg(c, d) \ + ({ if (d) __dump_desc_dbg(&c->base, d->hw, &d->txd, desc_id(d)); 0; }) + static inline void ioat_set_tcp_copy_break(unsigned long copybreak) { #ifdef CONFIG_NET_DMA diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c index 49ba1c73d95e..58881860f400 100644 --- a/drivers/dma/ioat/dma_v2.c +++ b/drivers/dma/ioat/dma_v2.c @@ -54,7 +54,9 @@ static void __ioat2_issue_pending(struct ioat2_dma_chan *ioat) /* make descriptor updates globally visible before notifying channel */ wmb(); writew(ioat->dmacount, reg_base + IOAT_CHAN_DMACOUNT_OFFSET); - + dev_dbg(to_dev(&ioat->base), + "%s: head: %#x tail: %#x issued: %#x count: %#x\n", + __func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount); } static void ioat2_issue_pending(struct dma_chan *chan) @@ -101,6 +103,8 @@ static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat) return; } + dev_dbg(to_dev(&ioat->base), "%s: head: %#x tail: %#x issued: %#x\n", + __func__, ioat->head, ioat->tail, ioat->issued); idx = ioat2_desc_alloc(ioat, 1); desc = ioat2_get_ring_ent(ioat, idx); @@ -118,6 +122,7 @@ static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat) reg_base + IOAT2_CHAINADDR_OFFSET_LOW); writel(((u64) desc->txd.phys) >> 32, reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); + dump_desc_dbg(ioat, desc); __ioat2_issue_pending(ioat); } @@ -154,6 +159,10 @@ static void ioat2_reset_part2(struct work_struct *work) ioat->issued = ioat->tail; ioat->dmacount = 0; + dev_dbg(to_dev(&ioat->base), + "%s: head: %#x tail: %#x issued: %#x count: %#x\n", + __func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount); + if (ioat2_ring_pending(ioat)) { struct ioat_ring_ent *desc; @@ -221,6 +230,8 @@ static void ioat2_chan_watchdog(struct work_struct *work) u16 active; int i; + dev_dbg(&device->pdev->dev, "%s\n", __func__); + for (i = 0; i < device->common.chancnt; i++) { chan = ioat_chan_by_index(device, i); ioat = container_of(chan, struct ioat2_dma_chan, base); @@ -295,11 +306,15 @@ static void ioat2_cleanup(struct ioat2_dma_chan *ioat) spin_lock_bh(&ioat->ring_lock); + dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n", + __func__, ioat->head, ioat->tail, ioat->issued); + active = ioat2_ring_active(ioat); for (i = 0; i < active && !seen_current; i++) { prefetch(ioat2_get_ring_ent(ioat, ioat->tail + i + 1)); desc = ioat2_get_ring_ent(ioat, ioat->tail + i); tx = &desc->txd; + dump_desc_dbg(ioat, desc); if (tx->cookie) { ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); chan->completed_cookie = tx->cookie; @@ -348,6 +363,7 @@ static int ioat2_enumerate_channels(struct ioatdma_device *device) xfercap_log = readb(device->reg_base + IOAT_XFERCAP_OFFSET); if (xfercap_log == 0) return 0; + dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log); /* FIXME which i/oat version is i7300? */ #ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL @@ -381,6 +397,8 @@ static dma_cookie_t ioat2_tx_submit_unlock(struct dma_async_tx_descriptor *tx) cookie = 1; tx->cookie = cookie; c->cookie = cookie; + dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie); + ioat2_update_pending(ioat); spin_unlock_bh(&ioat->ring_lock); @@ -480,6 +498,7 @@ static int ioat2_alloc_chan_resources(struct dma_chan *c) kfree(ring); return -ENOMEM; } + set_desc_id(ring[i], i); } /* link descs */ @@ -571,12 +590,14 @@ ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest, len -= copy; dst += copy; src += copy; + dump_desc_dbg(ioat, desc); } desc->txd.flags = flags; desc->len = total_len; hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); hw->ctl_f.compl_write = 1; + dump_desc_dbg(ioat, desc); /* we leave the channel locked to ensure in order submission */ return &desc->txd; @@ -614,6 +635,7 @@ static void ioat2_free_chan_resources(struct dma_chan *c) spin_lock_bh(&ioat->ring_lock); descs = ioat2_ring_space(ioat); + dev_dbg(to_dev(chan), "freeing %d idle descriptors\n", descs); for (i = 0; i < descs; i++) { desc = ioat2_get_ring_ent(ioat, ioat->head + i); ioat2_free_ring_ent(desc, c); @@ -625,6 +647,7 @@ static void ioat2_free_chan_resources(struct dma_chan *c) for (i = 0; i < total_descs - descs; i++) { desc = ioat2_get_ring_ent(ioat, ioat->tail + i); + dump_desc_dbg(ioat, desc); ioat2_free_ring_ent(desc, c); } diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h index 94a553eacdbd..c72ccb5dfd5b 100644 --- a/drivers/dma/ioat/dma_v2.h +++ b/drivers/dma/ioat/dma_v2.h @@ -116,6 +116,9 @@ struct ioat_ring_ent { struct ioat_dma_descriptor *hw; struct dma_async_tx_descriptor txd; size_t len; + #ifdef DEBUG + int id; + #endif }; static inline struct ioat_ring_ent * From 4fb9b9e8d55880523db550043dfb204696dd0422 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 8 Sep 2009 12:01:04 -0700 Subject: [PATCH 0112/3409] ioat: cleanup completion status reads The cleanup path makes an effort to only perform an atomic read of the 64-bit completion address. However in the 32-bit case it does not matter if we read the upper-32 and lower-32 non-atomically because the upper-32 will always be zero. Signed-off-by: Maciej Sosnowski Signed-off-by: Dan Williams --- drivers/dma/ioat/dma.c | 78 ++++++++++++++---------------------- drivers/dma/ioat/dma.h | 10 +---- drivers/dma/ioat/dma_v2.c | 25 +++++------- drivers/dma/ioat/registers.h | 8 ++-- 4 files changed, 46 insertions(+), 75 deletions(-) diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index edf4f5e5de73..08417ad4edca 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -201,8 +201,7 @@ static void ioat1_reset_part2(struct work_struct *work) spin_lock_bh(&chan->cleanup_lock); spin_lock_bh(&ioat->desc_lock); - chan->completion_virt->low = 0; - chan->completion_virt->high = 0; + *chan->completion = 0; ioat->pending = 0; /* count the descriptors waiting */ @@ -256,8 +255,7 @@ static void ioat1_reset_channel(struct ioat_dma_chan *ioat) dev_dbg(to_dev(chan), "%s\n", __func__); chanerr = readl(reg_base + IOAT_CHANERR_OFFSET); - chansts = (chan->completion_virt->low - & IOAT_CHANSTS_DMA_TRANSFER_STATUS); + chansts = *chan->completion & IOAT_CHANSTS_DMA_TRANSFER_STATUS; if (chanerr) { dev_err(to_dev(chan), "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n", @@ -293,14 +291,8 @@ static void ioat1_chan_watchdog(struct work_struct *work) struct ioat_dma_chan *ioat; struct ioat_chan_common *chan; int i; - - union { - u64 full; - struct { - u32 low; - u32 high; - }; - } completion_hw; + u64 completion; + u32 completion_low; unsigned long compl_desc_addr_hw; for (i = 0; i < device->common.chancnt; i++) { @@ -334,25 +326,24 @@ static void ioat1_chan_watchdog(struct work_struct *work) * try resetting the channel */ - completion_hw.low = readl(chan->reg_base + + /* we need to read the low address first as this + * causes the chipset to latch the upper bits + * for the subsequent read + */ + completion_low = readl(chan->reg_base + IOAT_CHANSTS_OFFSET_LOW(chan->device->version)); - completion_hw.high = readl(chan->reg_base + + completion = readl(chan->reg_base + IOAT_CHANSTS_OFFSET_HIGH(chan->device->version)); -#if (BITS_PER_LONG == 64) - compl_desc_addr_hw = - completion_hw.full - & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; -#else - compl_desc_addr_hw = - completion_hw.low & IOAT_LOW_COMPLETION_MASK; -#endif + completion <<= 32; + completion |= completion_low; + compl_desc_addr_hw = completion & + IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; if ((compl_desc_addr_hw != 0) && (compl_desc_addr_hw != chan->watchdog_completion) && (compl_desc_addr_hw != chan->last_compl_desc_addr_hw)) { chan->last_compl_desc_addr_hw = compl_desc_addr_hw; - chan->completion_virt->low = completion_hw.low; - chan->completion_virt->high = completion_hw.high; + *chan->completion = completion; } else { ioat1_reset_channel(ioat); chan->watchdog_completion = 0; @@ -492,14 +483,12 @@ static int ioat1_dma_alloc_chan_resources(struct dma_chan *c) /* allocate a completion writeback area */ /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ - chan->completion_virt = pci_pool_alloc(chan->device->completion_pool, - GFP_KERNEL, - &chan->completion_addr); - memset(chan->completion_virt, 0, - sizeof(*chan->completion_virt)); - writel(((u64) chan->completion_addr) & 0x00000000FFFFFFFF, + chan->completion = pci_pool_alloc(chan->device->completion_pool, + GFP_KERNEL, &chan->completion_dma); + memset(chan->completion, 0, sizeof(*chan->completion)); + writel(((u64) chan->completion_dma) & 0x00000000FFFFFFFF, chan->reg_base + IOAT_CHANCMP_OFFSET_LOW); - writel(((u64) chan->completion_addr) >> 32, + writel(((u64) chan->completion_dma) >> 32, chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); tasklet_enable(&chan->cleanup_task); @@ -558,15 +547,16 @@ static void ioat1_dma_free_chan_resources(struct dma_chan *c) spin_unlock_bh(&ioat->desc_lock); pci_pool_free(ioatdma_device->completion_pool, - chan->completion_virt, - chan->completion_addr); + chan->completion, + chan->completion_dma); /* one is ok since we left it on there on purpose */ if (in_use_descs > 1) dev_err(to_dev(chan), "Freeing %d in use descriptors!\n", in_use_descs - 1); - chan->last_completion = chan->completion_addr = 0; + chan->last_completion = 0; + chan->completion_dma = 0; chan->watchdog_completion = 0; chan->last_compl_desc_addr_hw = 0; chan->watchdog_tcp_cookie = chan->watchdog_last_tcp_cookie = 0; @@ -709,25 +699,15 @@ void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, unsigned long ioat_get_current_completion(struct ioat_chan_common *chan) { unsigned long phys_complete; + u64 completion; - /* The completion writeback can happen at any time, - so reads by the driver need to be atomic operations - The descriptor physical addresses are limited to 32-bits - when the CPU can only do a 32-bit mov */ - -#if (BITS_PER_LONG == 64) - phys_complete = - chan->completion_virt->full - & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; -#else - phys_complete = chan->completion_virt->low & IOAT_LOW_COMPLETION_MASK; -#endif + completion = *chan->completion; + phys_complete = completion & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__, (unsigned long long) phys_complete); - if ((chan->completion_virt->full - & IOAT_CHANSTS_DMA_TRANSFER_STATUS) == + if ((completion & IOAT_CHANSTS_DMA_TRANSFER_STATUS) == IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) { dev_err(to_dev(chan), "Channel halted, chanerr = %x\n", readl(chan->reg_base + IOAT_CHANERR_OFFSET)); @@ -750,7 +730,7 @@ static void ioat1_cleanup(struct ioat_dma_chan *ioat) dma_cookie_t cookie = 0; struct dma_async_tx_descriptor *tx; - prefetch(chan->completion_virt); + prefetch(chan->completion); if (!spin_trylock_bh(&chan->cleanup_lock)) return; diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index 9f9edc2cd079..5fd6e2de84db 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h @@ -96,14 +96,8 @@ struct ioat_chan_common { struct ioatdma_device *device; struct dma_chan common; - dma_addr_t completion_addr; - union { - u64 full; /* HW completion writeback */ - struct { - u32 low; - u32 high; - }; - } *completion_virt; + dma_addr_t completion_dma; + u64 *completion; unsigned long last_compl_desc_addr_hw; struct tasklet_struct cleanup_task; }; diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c index 58881860f400..ca1134249341 100644 --- a/drivers/dma/ioat/dma_v2.c +++ b/drivers/dma/ioat/dma_v2.c @@ -200,8 +200,7 @@ static void ioat2_reset_channel(struct ioat2_dma_chan *ioat) return; chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); - chansts = (chan->completion_virt->low - & IOAT_CHANSTS_DMA_TRANSFER_STATUS); + chansts = *chan->completion & IOAT_CHANSTS_DMA_TRANSFER_STATUS; if (chanerr) { dev_err(to_dev(chan), "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n", @@ -281,7 +280,7 @@ static void ioat2_cleanup(struct ioat2_dma_chan *ioat) int i; struct dma_async_tx_descriptor *tx; - prefetch(chan->completion_virt); + prefetch(chan->completion); spin_lock_bh(&chan->cleanup_lock); phys_complete = ioat_get_current_completion(chan); @@ -470,17 +469,15 @@ static int ioat2_alloc_chan_resources(struct dma_chan *c) /* allocate a completion writeback area */ /* doing 2 32bit writes to mmio since 1 64b write doesn't work */ - chan->completion_virt = pci_pool_alloc(chan->device->completion_pool, - GFP_KERNEL, - &chan->completion_addr); - if (!chan->completion_virt) + chan->completion = pci_pool_alloc(chan->device->completion_pool, + GFP_KERNEL, &chan->completion_dma); + if (!chan->completion) return -ENOMEM; - memset(chan->completion_virt, 0, - sizeof(*chan->completion_virt)); - writel(((u64) chan->completion_addr) & 0x00000000FFFFFFFF, + memset(chan->completion, 0, sizeof(*chan->completion)); + writel(((u64) chan->completion_dma) & 0x00000000FFFFFFFF, chan->reg_base + IOAT_CHANCMP_OFFSET_LOW); - writel(((u64) chan->completion_addr) >> 32, + writel(((u64) chan->completion_dma) >> 32, chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); ioat->alloc_order = ioat_get_alloc_order(); @@ -655,12 +652,12 @@ static void ioat2_free_chan_resources(struct dma_chan *c) ioat->ring = NULL; ioat->alloc_order = 0; pci_pool_free(ioatdma_device->completion_pool, - chan->completion_virt, - chan->completion_addr); + chan->completion, + chan->completion_dma); spin_unlock_bh(&ioat->ring_lock); chan->last_completion = 0; - chan->completion_addr = 0; + chan->completion_dma = 0; ioat->pending = 0; ioat->dmacount = 0; chan->watchdog_completion = 0; diff --git a/drivers/dma/ioat/registers.h b/drivers/dma/ioat/registers.h index 49bc277424f8..a83c7332125c 100644 --- a/drivers/dma/ioat/registers.h +++ b/drivers/dma/ioat/registers.h @@ -94,10 +94,10 @@ #define IOAT2_CHANSTS_OFFSET_HIGH 0x0C #define IOAT_CHANSTS_OFFSET_HIGH(ver) ((ver) < IOAT_VER_2_0 \ ? IOAT1_CHANSTS_OFFSET_HIGH : IOAT2_CHANSTS_OFFSET_HIGH) -#define IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR ~0x3F -#define IOAT_CHANSTS_SOFT_ERR 0x0000000000000010 -#define IOAT_CHANSTS_UNAFFILIATED_ERR 0x0000000000000008 -#define IOAT_CHANSTS_DMA_TRANSFER_STATUS 0x0000000000000007 +#define IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR (~0x3fULL) +#define IOAT_CHANSTS_SOFT_ERR 0x10ULL +#define IOAT_CHANSTS_UNAFFILIATED_ERR 0x8ULL +#define IOAT_CHANSTS_DMA_TRANSFER_STATUS 0x7ULL #define IOAT_CHANSTS_DMA_TRANSFER_STATUS_ACTIVE 0x0 #define IOAT_CHANSTS_DMA_TRANSFER_STATUS_DONE 0x1 #define IOAT_CHANSTS_DMA_TRANSFER_STATUS_SUSPENDED 0x2 From bb3207863014c7310593146f11fbc6573eab43c8 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 8 Sep 2009 12:01:14 -0700 Subject: [PATCH 0113/3409] ioat: ignore reserved bits for chancnt and xfercap Don't trust that the reserved bits are always zero, also sanity check the returned value. Signed-off-by: Maciej Sosnowski Signed-off-by: Dan Williams --- drivers/dma/ioat/dma.c | 7 +++++++ drivers/dma/ioat/dma_v2.c | 7 +++++++ 2 files changed, 14 insertions(+) diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 08417ad4edca..5173ba97ba31 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -132,7 +132,14 @@ static int ioat1_enumerate_channels(struct ioatdma_device *device) INIT_LIST_HEAD(&dma->channels); dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET); + dma->chancnt &= 0x1f; /* bits [4:0] valid */ + if (dma->chancnt > ARRAY_SIZE(device->idx)) { + dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n", + dma->chancnt, ARRAY_SIZE(device->idx)); + dma->chancnt = ARRAY_SIZE(device->idx); + } xfercap_scale = readb(device->reg_base + IOAT_XFERCAP_OFFSET); + xfercap_scale &= 0x1f; /* bits [4:0] valid */ xfercap = (xfercap_scale == 0 ? -1 : (1UL << xfercap_scale)); dev_dbg(dev, "%s: xfercap = %d\n", __func__, xfercap); diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c index ca1134249341..137cf879265f 100644 --- a/drivers/dma/ioat/dma_v2.c +++ b/drivers/dma/ioat/dma_v2.c @@ -359,7 +359,14 @@ static int ioat2_enumerate_channels(struct ioatdma_device *device) INIT_LIST_HEAD(&dma->channels); dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET); + dma->chancnt &= 0x1f; /* bits [4:0] valid */ + if (dma->chancnt > ARRAY_SIZE(device->idx)) { + dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n", + dma->chancnt, ARRAY_SIZE(device->idx)); + dma->chancnt = ARRAY_SIZE(device->idx); + } xfercap_log = readb(device->reg_base + IOAT_XFERCAP_OFFSET); + xfercap_log &= 0x1f; /* bits [4:0] valid */ if (xfercap_log == 0) return 0; dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log); From f6ab95b55735fa03cad8d0f966647e5df206e207 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 8 Sep 2009 12:01:21 -0700 Subject: [PATCH 0114/3409] ioat: preserve chanctrl bits when re-arming interrupts The register write in ioat_dma_cleanup_tasklet is unfortunate in two ways: 1/ It clears the extra 'enable' bits that we set at alloc_chan_resources time 2/ It gives the impression that it disables interrupts when it is in fact re-arming interrupts [ Impact: fix, persist the value of the chanctrl register when re-arming ] Signed-off-by: Maciej Sosnowski Signed-off-by: Dan Williams --- drivers/dma/ioat/dma.c | 10 +++------- drivers/dma/ioat/dma_v2.c | 8 ++------ drivers/dma/ioat/registers.h | 6 +++++- 3 files changed, 10 insertions(+), 14 deletions(-) diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 5173ba97ba31..6dd0af194b8a 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -452,7 +452,6 @@ static int ioat1_dma_alloc_chan_resources(struct dma_chan *c) struct ioat_dma_chan *ioat = to_ioat_chan(c); struct ioat_chan_common *chan = &ioat->base; struct ioat_desc_sw *desc; - u16 chanctrl; u32 chanerr; int i; LIST_HEAD(tmp_list); @@ -462,10 +461,7 @@ static int ioat1_dma_alloc_chan_resources(struct dma_chan *c) return ioat->desccount; /* Setup register to interrupt and write completion status on error */ - chanctrl = IOAT_CHANCTRL_ERR_INT_EN | - IOAT_CHANCTRL_ANY_ERR_ABORT_EN | - IOAT_CHANCTRL_ERR_COMPLETION_EN; - writew(chanctrl, chan->reg_base + IOAT_CHANCTRL_OFFSET); + writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET); chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); if (chanerr) { @@ -672,9 +668,9 @@ ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest, static void ioat1_cleanup_tasklet(unsigned long data) { struct ioat_dma_chan *chan = (void *)data; + ioat1_cleanup(chan); - writew(IOAT_CHANCTRL_INT_DISABLE, - chan->base.reg_base + IOAT_CHANCTRL_OFFSET); + writew(IOAT_CHANCTRL_RUN, chan->base.reg_base + IOAT_CHANCTRL_OFFSET); } static void ioat_unmap(struct pci_dev *pdev, dma_addr_t addr, size_t len, diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c index 137cf879265f..2f34f290041e 100644 --- a/drivers/dma/ioat/dma_v2.c +++ b/drivers/dma/ioat/dma_v2.c @@ -341,8 +341,7 @@ static void ioat2_cleanup_tasklet(unsigned long data) struct ioat2_dma_chan *ioat = (void *) data; ioat2_cleanup(ioat); - writew(IOAT_CHANCTRL_INT_DISABLE, - ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); + writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); } /** @@ -454,7 +453,6 @@ static int ioat2_alloc_chan_resources(struct dma_chan *c) struct ioat2_dma_chan *ioat = to_ioat2_chan(c); struct ioat_chan_common *chan = &ioat->base; struct ioat_ring_ent **ring; - u16 chanctrl; u32 chanerr; int descs; int i; @@ -464,9 +462,7 @@ static int ioat2_alloc_chan_resources(struct dma_chan *c) return 1 << ioat->alloc_order; /* Setup register to interrupt and write completion status on error */ - chanctrl = IOAT_CHANCTRL_ERR_INT_EN | IOAT_CHANCTRL_ANY_ERR_ABORT_EN | - IOAT_CHANCTRL_ERR_COMPLETION_EN; - writew(chanctrl, chan->reg_base + IOAT_CHANCTRL_OFFSET); + writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET); chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); if (chanerr) { diff --git a/drivers/dma/ioat/registers.h b/drivers/dma/ioat/registers.h index a83c7332125c..4380f6fbf056 100644 --- a/drivers/dma/ioat/registers.h +++ b/drivers/dma/ioat/registers.h @@ -75,7 +75,11 @@ #define IOAT_CHANCTRL_ERR_INT_EN 0x0010 #define IOAT_CHANCTRL_ANY_ERR_ABORT_EN 0x0008 #define IOAT_CHANCTRL_ERR_COMPLETION_EN 0x0004 -#define IOAT_CHANCTRL_INT_DISABLE 0x0001 +#define IOAT_CHANCTRL_INT_REARM 0x0001 +#define IOAT_CHANCTRL_RUN (IOAT_CHANCTRL_INT_REARM |\ + IOAT_CHANCTRL_ERR_COMPLETION_EN |\ + IOAT_CHANCTRL_ANY_ERR_ABORT_EN |\ + IOAT_CHANCTRL_ERR_INT_EN) #define IOAT_DMA_COMP_OFFSET 0x02 /* 16-bit DMA channel compatibility */ #define IOAT_DMA_COMP_V1 0x0001 /* Compatibility with DMA version 1 */ From 345d852391cf3fdc73f23a9ca522c6e7b5eb5a52 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 8 Sep 2009 12:01:30 -0700 Subject: [PATCH 0115/3409] ioat: ___devinit annotate the initialization paths Mark all single use initialization routines with __devinit. Signed-off-by: Maciej Sosnowski Signed-off-by: Dan Williams --- drivers/dma/ioat/dca.c | 9 ++++++--- drivers/dma/ioat/dma.c | 12 ++++++------ drivers/dma/ioat/dma.h | 11 ++++++----- drivers/dma/ioat/dma_v2.c | 4 ++-- drivers/dma/ioat/dma_v2.h | 8 ++++---- 5 files changed, 24 insertions(+), 20 deletions(-) diff --git a/drivers/dma/ioat/dca.c b/drivers/dma/ioat/dca.c index af1c762dd9d0..69d02615c4d6 100644 --- a/drivers/dma/ioat/dca.c +++ b/drivers/dma/ioat/dca.c @@ -242,7 +242,8 @@ static struct dca_ops ioat_dca_ops = { }; -struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase) +struct dca_provider * __devinit +ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase) { struct dca_provider *dca; struct ioat_dca_priv *ioatdca; @@ -407,7 +408,8 @@ static int ioat2_dca_count_dca_slots(void __iomem *iobase, u16 dca_offset) return slots; } -struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase) +struct dca_provider * __devinit +ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase) { struct dca_provider *dca; struct ioat_dca_priv *ioatdca; @@ -602,7 +604,8 @@ static int ioat3_dca_count_dca_slots(void *iobase, u16 dca_offset) return slots; } -struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase) +struct dca_provider * __devinit +ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase) { struct dca_provider *dca; struct ioat_dca_priv *ioatdca; diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 6dd0af194b8a..abc96c4c0796 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -870,7 +870,7 @@ static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat) */ #define IOAT_TEST_SIZE 2000 -static void ioat_dma_test_callback(void *dma_async_param) +static void __devinit ioat_dma_test_callback(void *dma_async_param) { struct completion *cmp = dma_async_param; @@ -881,7 +881,7 @@ static void ioat_dma_test_callback(void *dma_async_param) * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works. * @device: device to be tested */ -static int ioat_dma_self_test(struct ioatdma_device *device) +static int __devinit ioat_dma_self_test(struct ioatdma_device *device) { int i; u8 *src; @@ -1082,7 +1082,7 @@ static void ioat_disable_interrupts(struct ioatdma_device *device) writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET); } -int ioat_probe(struct ioatdma_device *device) +int __devinit ioat_probe(struct ioatdma_device *device) { int err = -ENODEV; struct dma_device *dma = &device->common; @@ -1142,7 +1142,7 @@ err_dma_pool: return err; } -int ioat_register(struct ioatdma_device *device) +int __devinit ioat_register(struct ioatdma_device *device) { int err = dma_async_device_register(&device->common); @@ -1169,7 +1169,7 @@ static void ioat1_intr_quirk(struct ioatdma_device *device) pci_write_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, dmactrl); } -int ioat1_dma_probe(struct ioatdma_device *device, int dca) +int __devinit ioat1_dma_probe(struct ioatdma_device *device, int dca) { struct pci_dev *pdev = device->pdev; struct dma_device *dma; @@ -1200,7 +1200,7 @@ int ioat1_dma_probe(struct ioatdma_device *device, int dca) return err; } -void ioat_dma_remove(struct ioatdma_device *device) +void __devexit ioat_dma_remove(struct ioatdma_device *device) { struct dma_device *dma = &device->common; diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index 5fd6e2de84db..e47083b52ee7 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h @@ -217,11 +217,12 @@ ioat_chan_by_index(struct ioatdma_device *device, int index) return device->idx[index]; } -int ioat_probe(struct ioatdma_device *device); -int ioat_register(struct ioatdma_device *device); -int ioat1_dma_probe(struct ioatdma_device *dev, int dca); -void ioat_dma_remove(struct ioatdma_device *device); -struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase); +int __devinit ioat_probe(struct ioatdma_device *device); +int __devinit ioat_register(struct ioatdma_device *device); +int __devinit ioat1_dma_probe(struct ioatdma_device *dev, int dca); +void __devexit ioat_dma_remove(struct ioatdma_device *device); +struct dca_provider * __devinit ioat_dca_init(struct pci_dev *pdev, + void __iomem *iobase); unsigned long ioat_get_current_completion(struct ioat_chan_common *chan); void ioat_init_channel(struct ioatdma_device *device, struct ioat_chan_common *chan, int idx, diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c index 2f34f290041e..1aa2974e7a93 100644 --- a/drivers/dma/ioat/dma_v2.c +++ b/drivers/dma/ioat/dma_v2.c @@ -683,7 +683,7 @@ ioat2_is_complete(struct dma_chan *c, dma_cookie_t cookie, return ioat_is_complete(c, cookie, done, used); } -int ioat2_dma_probe(struct ioatdma_device *device, int dca) +int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca) { struct pci_dev *pdev = device->pdev; struct dma_device *dma; @@ -722,7 +722,7 @@ int ioat2_dma_probe(struct ioatdma_device *device, int dca) return err; } -int ioat3_dma_probe(struct ioatdma_device *device, int dca) +int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) { struct pci_dev *pdev = device->pdev; struct dma_device *dma; diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h index c72ccb5dfd5b..bdde5373cf66 100644 --- a/drivers/dma/ioat/dma_v2.h +++ b/drivers/dma/ioat/dma_v2.h @@ -127,8 +127,8 @@ ioat2_get_ring_ent(struct ioat2_dma_chan *ioat, u16 idx) return ioat->ring[idx & ioat2_ring_mask(ioat)]; } -int ioat2_dma_probe(struct ioatdma_device *dev, int dca); -int ioat3_dma_probe(struct ioatdma_device *dev, int dca); -struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase); -struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase); +int __devinit ioat2_dma_probe(struct ioatdma_device *dev, int dca); +int __devinit ioat3_dma_probe(struct ioatdma_device *dev, int dca); +struct dca_provider * __devinit ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase); +struct dca_provider * __devinit ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase); #endif /* IOATDMA_V2_H */ From ad643f54c8514998333bc6c7b201fda2267496be Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 8 Sep 2009 12:01:38 -0700 Subject: [PATCH 0116/3409] ioat1: trim ioat_dma_desc_sw Save 4 bytes per software descriptor by transmitting tx_cnt in an unused portion of the hardware descriptor. Signed-off-by: Maciej Sosnowski Signed-off-by: Dan Williams --- drivers/dma/ioat/dma.c | 4 ++-- drivers/dma/ioat/dma.h | 2 -- drivers/dma/ioat/hw.h | 6 +++++- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index abc96c4c0796..f59b6f42f866 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -396,7 +396,7 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) dump_desc_dbg(ioat, chain_tail); dump_desc_dbg(ioat, first); - ioat->pending += desc->tx_cnt; + ioat->pending += desc->hw->tx_cnt; if (ioat->pending >= ioat_pending_level) __ioat1_dma_memcpy_issue_pending(ioat); spin_unlock_bh(&ioat->desc_lock); @@ -655,11 +655,11 @@ ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest, spin_unlock_bh(&ioat->desc_lock); desc->txd.flags = flags; - desc->tx_cnt = tx_cnt; desc->len = total_len; list_splice(&chain, &desc->txd.tx_list); hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); hw->ctl_f.compl_write = 1; + hw->tx_cnt = tx_cnt; dump_desc_dbg(ioat, desc); return &desc->txd; diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index e47083b52ee7..ec851cf5345c 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h @@ -165,14 +165,12 @@ ioat_is_complete(struct dma_chan *c, dma_cookie_t cookie, * @hw: hardware DMA descriptor * @node: this descriptor will either be on the free list, * or attached to a transaction list (async_tx.tx_list) - * @tx_cnt: number of descriptors required to complete the transaction * @txd: the generic software descriptor for all engines * @id: identifier for debug */ struct ioat_desc_sw { struct ioat_dma_descriptor *hw; struct list_head node; - int tx_cnt; size_t len; struct dma_async_tx_descriptor txd; #ifdef DEBUG diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h index e13f3ed47763..7481fb13ce00 100644 --- a/drivers/dma/ioat/hw.h +++ b/drivers/dma/ioat/hw.h @@ -63,7 +63,11 @@ struct ioat_dma_descriptor { uint64_t next; uint64_t rsv1; uint64_t rsv2; - uint64_t user1; + /* store some driver data in an unused portion of the descriptor */ + union { + uint64_t user1; + uint64_t tx_cnt; + }; uint64_t user2; }; #endif From 09c8a5b85e5f1e74a19bdd7c85547429d51df1cd Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 8 Sep 2009 12:01:49 -0700 Subject: [PATCH 0117/3409] ioat: switch watchdog and reset handler from workqueue to timer In order to support dynamic resizing of the descriptor ring or polling for a descriptor in the presence of a hung channel the reset handler needs to make progress while in a non-preemptible context. The current workqueue implementation precludes polling channel reset completion under spin_lock(). This conversion also allows us to return to opportunistic cleanup in the ioat2 case as the timer implementation guarantees at least one cleanup after every descriptor is submitted. This means the worst case completion latency becomes the timer frequency (for exceptional circumstances), but with the benefit of avoiding busy waiting when the lock is contended. Signed-off-by: Maciej Sosnowski Signed-off-by: Dan Williams --- drivers/dma/ioat/dma.c | 353 ++++++++++++++--------------------- drivers/dma/ioat/dma.h | 112 +++++++++-- drivers/dma/ioat/dma_v2.c | 321 ++++++++++++++----------------- drivers/dma/ioat/dma_v2.h | 10 + drivers/dma/ioat/registers.h | 22 +-- drivers/idle/i7300_idle.c | 16 +- 6 files changed, 397 insertions(+), 437 deletions(-) diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index f59b6f42f866..17a518d0386f 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -99,23 +99,26 @@ static void ioat1_cleanup_tasklet(unsigned long data); /* common channel initialization */ void ioat_init_channel(struct ioatdma_device *device, struct ioat_chan_common *chan, int idx, - work_func_t work_fn, void (*tasklet)(unsigned long), - unsigned long tasklet_data) + void (*timer_fn)(unsigned long), + void (*tasklet)(unsigned long), + unsigned long ioat) { struct dma_device *dma = &device->common; chan->device = device; chan->reg_base = device->reg_base + (0x80 * (idx + 1)); - INIT_DELAYED_WORK(&chan->work, work_fn); spin_lock_init(&chan->cleanup_lock); chan->common.device = dma; list_add_tail(&chan->common.device_node, &dma->channels); device->idx[idx] = chan; - tasklet_init(&chan->cleanup_task, tasklet, tasklet_data); + init_timer(&chan->timer); + chan->timer.function = timer_fn; + chan->timer.data = ioat; + tasklet_init(&chan->cleanup_task, tasklet, ioat); tasklet_disable(&chan->cleanup_task); } -static void ioat1_reset_part2(struct work_struct *work); +static void ioat1_timer_event(unsigned long data); /** * ioat1_dma_enumerate_channels - find and initialize the device's channels @@ -153,7 +156,7 @@ static int ioat1_enumerate_channels(struct ioatdma_device *device) break; ioat_init_channel(device, &ioat->base, i, - ioat1_reset_part2, + ioat1_timer_event, ioat1_cleanup_tasklet, (unsigned long) ioat); ioat->xfercap = xfercap; @@ -192,61 +195,6 @@ static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan) } } -/** - * ioat1_reset_part2 - reinit the channel after a reset - */ -static void ioat1_reset_part2(struct work_struct *work) -{ - struct ioat_chan_common *chan; - struct ioat_dma_chan *ioat; - struct ioat_desc_sw *desc; - int dmacount; - bool start_null = false; - - chan = container_of(work, struct ioat_chan_common, work.work); - ioat = container_of(chan, struct ioat_dma_chan, base); - spin_lock_bh(&chan->cleanup_lock); - spin_lock_bh(&ioat->desc_lock); - - *chan->completion = 0; - ioat->pending = 0; - - /* count the descriptors waiting */ - dmacount = 0; - if (ioat->used_desc.prev) { - desc = to_ioat_desc(ioat->used_desc.prev); - do { - dmacount++; - desc = to_ioat_desc(desc->node.next); - } while (&desc->node != ioat->used_desc.next); - } - - if (dmacount) { - /* - * write the new starting descriptor address - * this puts channel engine into ARMED state - */ - desc = to_ioat_desc(ioat->used_desc.prev); - writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF, - chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW); - writel(((u64) desc->txd.phys) >> 32, - chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH); - - writeb(IOAT_CHANCMD_START, chan->reg_base - + IOAT_CHANCMD_OFFSET(chan->device->version)); - } else - start_null = true; - spin_unlock_bh(&ioat->desc_lock); - spin_unlock_bh(&chan->cleanup_lock); - - dev_err(to_dev(chan), - "chan%d reset - %d descs waiting, %d total desc\n", - chan_num(chan), dmacount, ioat->desccount); - - if (start_null) - ioat1_dma_start_null_desc(ioat); -} - /** * ioat1_reset_channel - restart a channel * @ioat: IOAT DMA channel handle @@ -257,12 +205,9 @@ static void ioat1_reset_channel(struct ioat_dma_chan *ioat) void __iomem *reg_base = chan->reg_base; u32 chansts, chanerr; - if (!ioat->used_desc.prev) - return; - - dev_dbg(to_dev(chan), "%s\n", __func__); + dev_warn(to_dev(chan), "reset\n"); chanerr = readl(reg_base + IOAT_CHANERR_OFFSET); - chansts = *chan->completion & IOAT_CHANSTS_DMA_TRANSFER_STATUS; + chansts = *chan->completion & IOAT_CHANSTS_STATUS; if (chanerr) { dev_err(to_dev(chan), "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n", @@ -278,93 +223,11 @@ static void ioat1_reset_channel(struct ioat_dma_chan *ioat) * while we're waiting. */ - spin_lock_bh(&ioat->desc_lock); ioat->pending = INT_MIN; writeb(IOAT_CHANCMD_RESET, reg_base + IOAT_CHANCMD_OFFSET(chan->device->version)); - spin_unlock_bh(&ioat->desc_lock); - - /* schedule the 2nd half instead of sleeping a long time */ - schedule_delayed_work(&chan->work, RESET_DELAY); -} - -/** - * ioat1_chan_watchdog - watch for stuck channels - */ -static void ioat1_chan_watchdog(struct work_struct *work) -{ - struct ioatdma_device *device = - container_of(work, struct ioatdma_device, work.work); - struct ioat_dma_chan *ioat; - struct ioat_chan_common *chan; - int i; - u64 completion; - u32 completion_low; - unsigned long compl_desc_addr_hw; - - for (i = 0; i < device->common.chancnt; i++) { - chan = ioat_chan_by_index(device, i); - ioat = container_of(chan, struct ioat_dma_chan, base); - - if (/* have we started processing anything yet */ - chan->last_completion - /* have we completed any since last watchdog cycle? */ - && (chan->last_completion == chan->watchdog_completion) - /* has TCP stuck on one cookie since last watchdog? */ - && (chan->watchdog_tcp_cookie == chan->watchdog_last_tcp_cookie) - && (chan->watchdog_tcp_cookie != chan->completed_cookie) - /* is there something in the chain to be processed? */ - /* CB1 chain always has at least the last one processed */ - && (ioat->used_desc.prev != ioat->used_desc.next) - && ioat->pending == 0) { - - /* - * check CHANSTS register for completed - * descriptor address. - * if it is different than completion writeback, - * it is not zero - * and it has changed since the last watchdog - * we can assume that channel - * is still working correctly - * and the problem is in completion writeback. - * update completion writeback - * with actual CHANSTS value - * else - * try resetting the channel - */ - - /* we need to read the low address first as this - * causes the chipset to latch the upper bits - * for the subsequent read - */ - completion_low = readl(chan->reg_base + - IOAT_CHANSTS_OFFSET_LOW(chan->device->version)); - completion = readl(chan->reg_base + - IOAT_CHANSTS_OFFSET_HIGH(chan->device->version)); - completion <<= 32; - completion |= completion_low; - compl_desc_addr_hw = completion & - IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; - - if ((compl_desc_addr_hw != 0) - && (compl_desc_addr_hw != chan->watchdog_completion) - && (compl_desc_addr_hw != chan->last_compl_desc_addr_hw)) { - chan->last_compl_desc_addr_hw = compl_desc_addr_hw; - *chan->completion = completion; - } else { - ioat1_reset_channel(ioat); - chan->watchdog_completion = 0; - chan->last_compl_desc_addr_hw = 0; - } - } else { - chan->last_compl_desc_addr_hw = 0; - chan->watchdog_completion = chan->last_completion; - } - - chan->watchdog_last_tcp_cookie = chan->watchdog_tcp_cookie; - } - - schedule_delayed_work(&device->work, WATCHDOG_DELAY); + set_bit(IOAT_RESET_PENDING, &chan->state); + mod_timer(&chan->timer, jiffies + RESET_DELAY); } static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) @@ -372,6 +235,7 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) struct dma_chan *c = tx->chan; struct ioat_dma_chan *ioat = to_ioat_chan(c); struct ioat_desc_sw *desc = tx_to_ioat_desc(tx); + struct ioat_chan_common *chan = &ioat->base; struct ioat_desc_sw *first; struct ioat_desc_sw *chain_tail; dma_cookie_t cookie; @@ -396,6 +260,9 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) dump_desc_dbg(ioat, chain_tail); dump_desc_dbg(ioat, first); + if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state)) + mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); + ioat->pending += desc->hw->tx_cnt; if (ioat->pending >= ioat_pending_level) __ioat1_dma_memcpy_issue_pending(ioat); @@ -520,6 +387,7 @@ static void ioat1_dma_free_chan_resources(struct dma_chan *c) return; tasklet_disable(&chan->cleanup_task); + del_timer_sync(&chan->timer); ioat1_cleanup(ioat); /* Delay 100ms after reset to allow internal DMA logic to quiesce @@ -560,9 +428,6 @@ static void ioat1_dma_free_chan_resources(struct dma_chan *c) chan->last_completion = 0; chan->completion_dma = 0; - chan->watchdog_completion = 0; - chan->last_compl_desc_addr_hw = 0; - chan->watchdog_tcp_cookie = chan->watchdog_last_tcp_cookie = 0; ioat->pending = 0; ioat->desccount = 0; } @@ -705,15 +570,15 @@ unsigned long ioat_get_current_completion(struct ioat_chan_common *chan) u64 completion; completion = *chan->completion; - phys_complete = completion & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; + phys_complete = ioat_chansts_to_addr(completion); dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__, (unsigned long long) phys_complete); - if ((completion & IOAT_CHANSTS_DMA_TRANSFER_STATUS) == - IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED) { + if (is_ioat_halted(completion)) { + u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); dev_err(to_dev(chan), "Channel halted, chanerr = %x\n", - readl(chan->reg_base + IOAT_CHANERR_OFFSET)); + chanerr); /* TODO do something to salvage the situation */ } @@ -721,48 +586,31 @@ unsigned long ioat_get_current_completion(struct ioat_chan_common *chan) return phys_complete; } -/** - * ioat1_cleanup - cleanup up finished descriptors - * @chan: ioat channel to be cleaned up - */ -static void ioat1_cleanup(struct ioat_dma_chan *ioat) +bool ioat_cleanup_preamble(struct ioat_chan_common *chan, + unsigned long *phys_complete) +{ + *phys_complete = ioat_get_current_completion(chan); + if (*phys_complete == chan->last_completion) + return false; + clear_bit(IOAT_COMPLETION_ACK, &chan->state); + mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); + + return true; +} + +static void __cleanup(struct ioat_dma_chan *ioat, unsigned long phys_complete) { struct ioat_chan_common *chan = &ioat->base; - unsigned long phys_complete; - struct ioat_desc_sw *desc, *_desc; - dma_cookie_t cookie = 0; + struct list_head *_desc, *n; struct dma_async_tx_descriptor *tx; - prefetch(chan->completion); - - if (!spin_trylock_bh(&chan->cleanup_lock)) - return; - - phys_complete = ioat_get_current_completion(chan); - if (phys_complete == chan->last_completion) { - spin_unlock_bh(&chan->cleanup_lock); - /* - * perhaps we're stuck so hard that the watchdog can't go off? - * try to catch it after 2 seconds - */ - if (time_after(jiffies, - chan->last_completion_time + HZ*WATCHDOG_DELAY)) { - ioat1_chan_watchdog(&(chan->device->work.work)); - chan->last_completion_time = jiffies; - } - return; - } - chan->last_completion_time = jiffies; - - cookie = 0; - if (!spin_trylock_bh(&ioat->desc_lock)) { - spin_unlock_bh(&chan->cleanup_lock); - return; - } - dev_dbg(to_dev(chan), "%s: phys_complete: %lx\n", __func__, phys_complete); - list_for_each_entry_safe(desc, _desc, &ioat->used_desc, node) { + list_for_each_safe(_desc, n, &ioat->used_desc) { + struct ioat_desc_sw *desc; + + prefetch(n); + desc = list_entry(_desc, typeof(*desc), node); tx = &desc->txd; /* * Incoming DMA requests may use multiple descriptors, @@ -771,7 +619,8 @@ static void ioat1_cleanup(struct ioat_dma_chan *ioat) */ dump_desc_dbg(ioat, desc); if (tx->cookie) { - cookie = tx->cookie; + chan->completed_cookie = tx->cookie; + tx->cookie = 0; ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); if (tx->callback) { tx->callback(tx->callback_param); @@ -786,27 +635,110 @@ static void ioat1_cleanup(struct ioat_dma_chan *ioat) */ if (async_tx_test_ack(tx)) list_move_tail(&desc->node, &ioat->free_desc); - else - tx->cookie = 0; } else { /* * last used desc. Do not remove, so we can - * append from it, but don't look at it next - * time, either + * append from it. */ - tx->cookie = 0; + + /* if nothing else is pending, cancel the + * completion timeout + */ + if (n == &ioat->used_desc) { + dev_dbg(to_dev(chan), + "%s cancel completion timeout\n", + __func__); + clear_bit(IOAT_COMPLETION_PENDING, &chan->state); + } /* TODO check status bits? */ break; } } - spin_unlock_bh(&ioat->desc_lock); - chan->last_completion = phys_complete; - if (cookie != 0) - chan->completed_cookie = cookie; +} +/** + * ioat1_cleanup - cleanup up finished descriptors + * @chan: ioat channel to be cleaned up + * + * To prevent lock contention we defer cleanup when the locks are + * contended with a terminal timeout that forces cleanup and catches + * completion notification errors. + */ +static void ioat1_cleanup(struct ioat_dma_chan *ioat) +{ + struct ioat_chan_common *chan = &ioat->base; + unsigned long phys_complete; + + prefetch(chan->completion); + + if (!spin_trylock_bh(&chan->cleanup_lock)) + return; + + if (!ioat_cleanup_preamble(chan, &phys_complete)) { + spin_unlock_bh(&chan->cleanup_lock); + return; + } + + if (!spin_trylock_bh(&ioat->desc_lock)) { + spin_unlock_bh(&chan->cleanup_lock); + return; + } + + __cleanup(ioat, phys_complete); + + spin_unlock_bh(&ioat->desc_lock); + spin_unlock_bh(&chan->cleanup_lock); +} + +static void ioat1_timer_event(unsigned long data) +{ + struct ioat_dma_chan *ioat = (void *) data; + struct ioat_chan_common *chan = &ioat->base; + + dev_dbg(to_dev(chan), "%s: state: %lx\n", __func__, chan->state); + + spin_lock_bh(&chan->cleanup_lock); + if (test_and_clear_bit(IOAT_RESET_PENDING, &chan->state)) { + struct ioat_desc_sw *desc; + + spin_lock_bh(&ioat->desc_lock); + + /* restart active descriptors */ + desc = to_ioat_desc(ioat->used_desc.prev); + ioat_set_chainaddr(ioat, desc->txd.phys); + ioat_start(chan); + + ioat->pending = 0; + set_bit(IOAT_COMPLETION_PENDING, &chan->state); + mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); + spin_unlock_bh(&ioat->desc_lock); + } else if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { + unsigned long phys_complete; + + spin_lock_bh(&ioat->desc_lock); + /* if we haven't made progress and we have already + * acknowledged a pending completion once, then be more + * forceful with a restart + */ + if (ioat_cleanup_preamble(chan, &phys_complete)) + __cleanup(ioat, phys_complete); + else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) + ioat1_reset_channel(ioat); + else { + u64 status = ioat_chansts(chan); + + /* manually update the last completion address */ + if (ioat_chansts_to_addr(status) != 0) + *chan->completion = status; + + set_bit(IOAT_COMPLETION_ACK, &chan->state); + mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); + } + spin_unlock_bh(&ioat->desc_lock); + } spin_unlock_bh(&chan->cleanup_lock); } @@ -855,13 +787,8 @@ static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat) list_add_tail(&desc->node, &ioat->used_desc); dump_desc_dbg(ioat, desc); - writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF, - chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW); - writel(((u64) desc->txd.phys) >> 32, - chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH); - - writeb(IOAT_CHANCMD_START, chan->reg_base - + IOAT_CHANCMD_OFFSET(chan->device->version)); + ioat_set_chainaddr(ioat, desc->txd.phys); + ioat_start(chan); spin_unlock_bh(&ioat->desc_lock); } @@ -1194,9 +1121,6 @@ int __devinit ioat1_dma_probe(struct ioatdma_device *device, int dca) if (dca) device->dca = ioat_dca_init(pdev, device->reg_base); - INIT_DELAYED_WORK(&device->work, ioat1_chan_watchdog); - schedule_delayed_work(&device->work, WATCHDOG_DELAY); - return err; } @@ -1204,9 +1128,6 @@ void __devexit ioat_dma_remove(struct ioatdma_device *device) { struct dma_device *dma = &device->common; - if (device->version != IOAT_VER_3_0) - cancel_delayed_work(&device->work); - ioat_disable_interrupts(device); dma_async_device_unregister(dma); diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index ec851cf5345c..dbfccac3e80c 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h @@ -23,6 +23,7 @@ #include #include "hw.h" +#include "registers.h" #include #include #include @@ -33,7 +34,6 @@ #define IOAT_LOW_COMPLETION_MASK 0xffffffc0 #define IOAT_DMA_DCA_ANY_CPU ~0 -#define IOAT_WATCHDOG_PERIOD (2 * HZ) #define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common) #define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node) @@ -42,9 +42,6 @@ #define chan_num(ch) ((int)((ch)->reg_base - (ch)->device->reg_base) / 0x80) -#define RESET_DELAY msecs_to_jiffies(100) -#define WATCHDOG_DELAY round_jiffies(msecs_to_jiffies(2000)) - /* * workaround for IOAT ver.3.0 null descriptor issue * (channel returns error when size is 0) @@ -72,7 +69,6 @@ struct ioatdma_device { struct pci_pool *completion_pool; struct dma_device common; u8 version; - struct delayed_work work; struct msix_entry msix_entries[4]; struct ioat_chan_common *idx[4]; struct dca_provider *dca; @@ -81,24 +77,21 @@ struct ioatdma_device { }; struct ioat_chan_common { + struct dma_chan common; void __iomem *reg_base; - unsigned long last_completion; - unsigned long last_completion_time; - spinlock_t cleanup_lock; dma_cookie_t completed_cookie; - unsigned long watchdog_completion; - int watchdog_tcp_cookie; - u32 watchdog_last_tcp_cookie; - struct delayed_work work; - + unsigned long state; + #define IOAT_COMPLETION_PENDING 0 + #define IOAT_COMPLETION_ACK 1 + #define IOAT_RESET_PENDING 2 + struct timer_list timer; + #define COMPLETION_TIMEOUT msecs_to_jiffies(100) + #define RESET_DELAY msecs_to_jiffies(100) struct ioatdma_device *device; - struct dma_chan common; - dma_addr_t completion_dma; u64 *completion; - unsigned long last_compl_desc_addr_hw; struct tasklet_struct cleanup_task; }; @@ -148,7 +141,6 @@ ioat_is_complete(struct dma_chan *c, dma_cookie_t cookie, last_used = c->cookie; last_complete = chan->completed_cookie; - chan->watchdog_tcp_cookie = cookie; if (done) *done = last_complete; @@ -215,6 +207,85 @@ ioat_chan_by_index(struct ioatdma_device *device, int index) return device->idx[index]; } +static inline u64 ioat_chansts(struct ioat_chan_common *chan) +{ + u8 ver = chan->device->version; + u64 status; + u32 status_lo; + + /* We need to read the low address first as this causes the + * chipset to latch the upper bits for the subsequent read + */ + status_lo = readl(chan->reg_base + IOAT_CHANSTS_OFFSET_LOW(ver)); + status = readl(chan->reg_base + IOAT_CHANSTS_OFFSET_HIGH(ver)); + status <<= 32; + status |= status_lo; + + return status; +} + +static inline void ioat_start(struct ioat_chan_common *chan) +{ + u8 ver = chan->device->version; + + writeb(IOAT_CHANCMD_START, chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); +} + +static inline u64 ioat_chansts_to_addr(u64 status) +{ + return status & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; +} + +static inline u32 ioat_chanerr(struct ioat_chan_common *chan) +{ + return readl(chan->reg_base + IOAT_CHANERR_OFFSET); +} + +static inline void ioat_suspend(struct ioat_chan_common *chan) +{ + u8 ver = chan->device->version; + + writeb(IOAT_CHANCMD_SUSPEND, chan->reg_base + IOAT_CHANCMD_OFFSET(ver)); +} + +static inline void ioat_set_chainaddr(struct ioat_dma_chan *ioat, u64 addr) +{ + struct ioat_chan_common *chan = &ioat->base; + + writel(addr & 0x00000000FFFFFFFF, + chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW); + writel(addr >> 32, + chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH); +} + +static inline bool is_ioat_active(unsigned long status) +{ + return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_ACTIVE); +} + +static inline bool is_ioat_idle(unsigned long status) +{ + return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_DONE); +} + +static inline bool is_ioat_halted(unsigned long status) +{ + return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_HALTED); +} + +static inline bool is_ioat_suspended(unsigned long status) +{ + return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_SUSPENDED); +} + +/* channel was fatally programmed */ +static inline bool is_ioat_bug(unsigned long err) +{ + return !!(err & (IOAT_CHANERR_SRC_ADDR_ERR|IOAT_CHANERR_DEST_ADDR_ERR| + IOAT_CHANERR_NEXT_ADDR_ERR|IOAT_CHANERR_CONTROL_ERR| + IOAT_CHANERR_LENGTH_ERR)); +} + int __devinit ioat_probe(struct ioatdma_device *device); int __devinit ioat_register(struct ioatdma_device *device); int __devinit ioat1_dma_probe(struct ioatdma_device *dev, int dca); @@ -224,8 +295,11 @@ struct dca_provider * __devinit ioat_dca_init(struct pci_dev *pdev, unsigned long ioat_get_current_completion(struct ioat_chan_common *chan); void ioat_init_channel(struct ioatdma_device *device, struct ioat_chan_common *chan, int idx, - work_func_t work_fn, void (*tasklet)(unsigned long), - unsigned long tasklet_data); + void (*timer_fn)(unsigned long), + void (*tasklet)(unsigned long), + unsigned long ioat); void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, size_t len, struct ioat_dma_descriptor *hw); +bool ioat_cleanup_preamble(struct ioat_chan_common *chan, + unsigned long *phys_complete); #endif /* IOATDMA_H */ diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c index 1aa2974e7a93..72e59a0d0f2e 100644 --- a/drivers/dma/ioat/dma_v2.c +++ b/drivers/dma/ioat/dma_v2.c @@ -49,7 +49,7 @@ static void __ioat2_issue_pending(struct ioat2_dma_chan *ioat) void * __iomem reg_base = ioat->base.reg_base; ioat->pending = 0; - ioat->dmacount += ioat2_ring_pending(ioat); + ioat->dmacount += ioat2_ring_pending(ioat);; ioat->issued = ioat->head; /* make descriptor updates globally visible before notifying channel */ wmb(); @@ -92,7 +92,6 @@ static void ioat2_update_pending(struct ioat2_dma_chan *ioat) static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat) { - void __iomem *reg_base = ioat->base.reg_base; struct ioat_ring_ent *desc; struct ioat_dma_descriptor *hw; int idx; @@ -118,10 +117,7 @@ static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat) hw->src_addr = 0; hw->dst_addr = 0; async_tx_ack(&desc->txd); - writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF, - reg_base + IOAT2_CHAINADDR_OFFSET_LOW); - writel(((u64) desc->txd.phys) >> 32, - reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); + ioat2_set_chainaddr(ioat, desc->txd.phys); dump_desc_dbg(ioat, desc); __ioat2_issue_pending(ioat); } @@ -133,177 +129,14 @@ static void ioat2_start_null_desc(struct ioat2_dma_chan *ioat) spin_unlock_bh(&ioat->ring_lock); } -static void ioat2_cleanup(struct ioat2_dma_chan *ioat); - -/** - * ioat2_reset_part2 - reinit the channel after a reset - */ -static void ioat2_reset_part2(struct work_struct *work) -{ - struct ioat_chan_common *chan; - struct ioat2_dma_chan *ioat; - - chan = container_of(work, struct ioat_chan_common, work.work); - ioat = container_of(chan, struct ioat2_dma_chan, base); - - /* ensure that ->tail points to the stalled descriptor - * (ioat->pending is set to 2 at this point so no new - * descriptors will be issued while we perform this cleanup) - */ - ioat2_cleanup(ioat); - - spin_lock_bh(&chan->cleanup_lock); - spin_lock_bh(&ioat->ring_lock); - - /* set the tail to be re-issued */ - ioat->issued = ioat->tail; - ioat->dmacount = 0; - - dev_dbg(to_dev(&ioat->base), - "%s: head: %#x tail: %#x issued: %#x count: %#x\n", - __func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount); - - if (ioat2_ring_pending(ioat)) { - struct ioat_ring_ent *desc; - - desc = ioat2_get_ring_ent(ioat, ioat->tail); - writel(((u64) desc->txd.phys) & 0x00000000FFFFFFFF, - chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW); - writel(((u64) desc->txd.phys) >> 32, - chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); - __ioat2_issue_pending(ioat); - } else - __ioat2_start_null_desc(ioat); - - spin_unlock_bh(&ioat->ring_lock); - spin_unlock_bh(&chan->cleanup_lock); - - dev_info(to_dev(chan), - "chan%d reset - %d descs waiting, %d total desc\n", - chan_num(chan), ioat->dmacount, 1 << ioat->alloc_order); -} - -/** - * ioat2_reset_channel - restart a channel - * @ioat: IOAT DMA channel handle - */ -static void ioat2_reset_channel(struct ioat2_dma_chan *ioat) -{ - u32 chansts, chanerr; - struct ioat_chan_common *chan = &ioat->base; - u16 active; - - spin_lock_bh(&ioat->ring_lock); - active = ioat2_ring_active(ioat); - spin_unlock_bh(&ioat->ring_lock); - if (!active) - return; - - chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); - chansts = *chan->completion & IOAT_CHANSTS_DMA_TRANSFER_STATUS; - if (chanerr) { - dev_err(to_dev(chan), - "chan%d, CHANSTS = 0x%08x CHANERR = 0x%04x, clearing\n", - chan_num(chan), chansts, chanerr); - writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET); - } - - spin_lock_bh(&ioat->ring_lock); - ioat->pending = 2; - writeb(IOAT_CHANCMD_RESET, - chan->reg_base - + IOAT_CHANCMD_OFFSET(chan->device->version)); - spin_unlock_bh(&ioat->ring_lock); - schedule_delayed_work(&chan->work, RESET_DELAY); -} - -/** - * ioat2_chan_watchdog - watch for stuck channels - */ -static void ioat2_chan_watchdog(struct work_struct *work) -{ - struct ioatdma_device *device = - container_of(work, struct ioatdma_device, work.work); - struct ioat2_dma_chan *ioat; - struct ioat_chan_common *chan; - u16 active; - int i; - - dev_dbg(&device->pdev->dev, "%s\n", __func__); - - for (i = 0; i < device->common.chancnt; i++) { - chan = ioat_chan_by_index(device, i); - ioat = container_of(chan, struct ioat2_dma_chan, base); - - /* - * for version 2.0 if there are descriptors yet to be processed - * and the last completed hasn't changed since the last watchdog - * if they haven't hit the pending level - * issue the pending to push them through - * else - * try resetting the channel - */ - spin_lock_bh(&ioat->ring_lock); - active = ioat2_ring_active(ioat); - spin_unlock_bh(&ioat->ring_lock); - - if (active && - chan->last_completion && - chan->last_completion == chan->watchdog_completion) { - - if (ioat->pending == 1) - ioat2_issue_pending(&chan->common); - else { - ioat2_reset_channel(ioat); - chan->watchdog_completion = 0; - } - } else { - chan->last_compl_desc_addr_hw = 0; - chan->watchdog_completion = chan->last_completion; - } - chan->watchdog_last_tcp_cookie = chan->watchdog_tcp_cookie; - } - schedule_delayed_work(&device->work, WATCHDOG_DELAY); -} - -/** - * ioat2_cleanup - clean finished descriptors (advance tail pointer) - * @chan: ioat channel to be cleaned up - */ -static void ioat2_cleanup(struct ioat2_dma_chan *ioat) +static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) { struct ioat_chan_common *chan = &ioat->base; - unsigned long phys_complete; + struct dma_async_tx_descriptor *tx; struct ioat_ring_ent *desc; bool seen_current = false; u16 active; int i; - struct dma_async_tx_descriptor *tx; - - prefetch(chan->completion); - - spin_lock_bh(&chan->cleanup_lock); - phys_complete = ioat_get_current_completion(chan); - if (phys_complete == chan->last_completion) { - spin_unlock_bh(&chan->cleanup_lock); - /* - * perhaps we're stuck so hard that the watchdog can't go off? - * try to catch it after WATCHDOG_DELAY seconds - */ - if (chan->device->version < IOAT_VER_3_0) { - unsigned long tmo; - - tmo = chan->last_completion_time + HZ*WATCHDOG_DELAY; - if (time_after(jiffies, tmo)) { - ioat2_chan_watchdog(&(chan->device->work.work)); - chan->last_completion_time = jiffies; - } - } - return; - } - chan->last_completion_time = jiffies; - - spin_lock_bh(&ioat->ring_lock); dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n", __func__, ioat->head, ioat->tail, ioat->issued); @@ -329,10 +162,42 @@ static void ioat2_cleanup(struct ioat2_dma_chan *ioat) } ioat->tail += i; BUG_ON(!seen_current); /* no active descs have written a completion? */ - spin_unlock_bh(&ioat->ring_lock); chan->last_completion = phys_complete; + if (ioat->head == ioat->tail) { + dev_dbg(to_dev(chan), "%s: cancel completion timeout\n", + __func__); + clear_bit(IOAT_COMPLETION_PENDING, &chan->state); + } +} +/** + * ioat2_cleanup - clean finished descriptors (advance tail pointer) + * @chan: ioat channel to be cleaned up + */ +static void ioat2_cleanup(struct ioat2_dma_chan *ioat) +{ + struct ioat_chan_common *chan = &ioat->base; + unsigned long phys_complete; + + prefetch(chan->completion); + + if (!spin_trylock_bh(&chan->cleanup_lock)) + return; + + if (!ioat_cleanup_preamble(chan, &phys_complete)) { + spin_unlock_bh(&chan->cleanup_lock); + return; + } + + if (!spin_trylock_bh(&ioat->ring_lock)) { + spin_unlock_bh(&chan->cleanup_lock); + return; + } + + __cleanup(ioat, phys_complete); + + spin_unlock_bh(&ioat->ring_lock); spin_unlock_bh(&chan->cleanup_lock); } @@ -344,6 +209,90 @@ static void ioat2_cleanup_tasklet(unsigned long data) writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); } +static void __restart_chan(struct ioat2_dma_chan *ioat) +{ + struct ioat_chan_common *chan = &ioat->base; + + /* set the tail to be re-issued */ + ioat->issued = ioat->tail; + ioat->dmacount = 0; + set_bit(IOAT_COMPLETION_PENDING, &chan->state); + mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); + + dev_dbg(to_dev(chan), + "%s: head: %#x tail: %#x issued: %#x count: %#x\n", + __func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount); + + if (ioat2_ring_pending(ioat)) { + struct ioat_ring_ent *desc; + + desc = ioat2_get_ring_ent(ioat, ioat->tail); + ioat2_set_chainaddr(ioat, desc->txd.phys); + __ioat2_issue_pending(ioat); + } else + __ioat2_start_null_desc(ioat); +} + +static void ioat2_restart_channel(struct ioat2_dma_chan *ioat) +{ + struct ioat_chan_common *chan = &ioat->base; + unsigned long phys_complete; + u32 status; + + status = ioat_chansts(chan); + if (is_ioat_active(status) || is_ioat_idle(status)) + ioat_suspend(chan); + while (is_ioat_active(status) || is_ioat_idle(status)) { + status = ioat_chansts(chan); + cpu_relax(); + } + + if (ioat_cleanup_preamble(chan, &phys_complete)) + __cleanup(ioat, phys_complete); + + __restart_chan(ioat); +} + +static void ioat2_timer_event(unsigned long data) +{ + struct ioat2_dma_chan *ioat = (void *) data; + struct ioat_chan_common *chan = &ioat->base; + + spin_lock_bh(&chan->cleanup_lock); + if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { + unsigned long phys_complete; + u64 status; + + spin_lock_bh(&ioat->ring_lock); + status = ioat_chansts(chan); + + /* when halted due to errors check for channel + * programming errors before advancing the completion state + */ + if (is_ioat_halted(status)) { + u32 chanerr; + + chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); + BUG_ON(is_ioat_bug(chanerr)); + } + + /* if we haven't made progress and we have already + * acknowledged a pending completion once, then be more + * forceful with a restart + */ + if (ioat_cleanup_preamble(chan, &phys_complete)) + __cleanup(ioat, phys_complete); + else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) + ioat2_restart_channel(ioat); + else { + set_bit(IOAT_COMPLETION_ACK, &chan->state); + mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); + } + spin_unlock_bh(&ioat->ring_lock); + } + spin_unlock_bh(&chan->cleanup_lock); +} + /** * ioat2_enumerate_channels - find and initialize the device's channels * @device: the device to be enumerated @@ -381,7 +330,7 @@ static int ioat2_enumerate_channels(struct ioatdma_device *device) break; ioat_init_channel(device, &ioat->base, i, - ioat2_reset_part2, + ioat2_timer_event, ioat2_cleanup_tasklet, (unsigned long) ioat); ioat->xfercap_log = xfercap_log; @@ -395,6 +344,7 @@ static dma_cookie_t ioat2_tx_submit_unlock(struct dma_async_tx_descriptor *tx) { struct dma_chan *c = tx->chan; struct ioat2_dma_chan *ioat = to_ioat2_chan(c); + struct ioat_chan_common *chan = &ioat->base; dma_cookie_t cookie = c->cookie; cookie++; @@ -404,6 +354,8 @@ static dma_cookie_t ioat2_tx_submit_unlock(struct dma_async_tx_descriptor *tx) c->cookie = cookie; dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie); + if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state)) + mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); ioat2_update_pending(ioat); spin_unlock_bh(&ioat->ring_lock); @@ -543,9 +495,18 @@ static int ioat2_alloc_and_lock(u16 *idx, struct ioat2_dma_chan *ioat, int num_d ioat->issued); spin_unlock_bh(&ioat->ring_lock); - /* do direct reclaim in the allocation failure case */ - ioat2_cleanup(ioat); - + /* progress reclaim in the allocation failure case we + * may be called under bh_disabled so we need to trigger + * the timer event directly + */ + spin_lock_bh(&chan->cleanup_lock); + if (jiffies > chan->timer.expires && + timer_pending(&chan->timer)) { + mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); + spin_unlock_bh(&chan->cleanup_lock); + ioat2_timer_event((unsigned long) ioat); + } else + spin_unlock_bh(&chan->cleanup_lock); return -ENOMEM; } @@ -624,6 +585,7 @@ static void ioat2_free_chan_resources(struct dma_chan *c) return; tasklet_disable(&chan->cleanup_task); + del_timer_sync(&chan->timer); ioat2_cleanup(ioat); /* Delay 100ms after reset to allow internal DMA logic to quiesce @@ -663,10 +625,6 @@ static void ioat2_free_chan_resources(struct dma_chan *c) chan->completion_dma = 0; ioat->pending = 0; ioat->dmacount = 0; - chan->watchdog_completion = 0; - chan->last_compl_desc_addr_hw = 0; - chan->watchdog_tcp_cookie = 0; - chan->watchdog_last_tcp_cookie = 0; } static enum dma_status @@ -716,9 +674,6 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca) if (dca) device->dca = ioat2_dca_init(pdev, device->reg_base); - INIT_DELAYED_WORK(&device->work, ioat2_chan_watchdog); - schedule_delayed_work(&device->work, WATCHDOG_DELAY); - return err; } diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h index bdde5373cf66..73b04a2eb4b0 100644 --- a/drivers/dma/ioat/dma_v2.h +++ b/drivers/dma/ioat/dma_v2.h @@ -127,6 +127,16 @@ ioat2_get_ring_ent(struct ioat2_dma_chan *ioat, u16 idx) return ioat->ring[idx & ioat2_ring_mask(ioat)]; } +static inline void ioat2_set_chainaddr(struct ioat2_dma_chan *ioat, u64 addr) +{ + struct ioat_chan_common *chan = &ioat->base; + + writel(addr & 0x00000000FFFFFFFF, + chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW); + writel(addr >> 32, + chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); +} + int __devinit ioat2_dma_probe(struct ioatdma_device *dev, int dca); int __devinit ioat3_dma_probe(struct ioatdma_device *dev, int dca); struct dca_provider * __devinit ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase); diff --git a/drivers/dma/ioat/registers.h b/drivers/dma/ioat/registers.h index 4380f6fbf056..e4334a195380 100644 --- a/drivers/dma/ioat/registers.h +++ b/drivers/dma/ioat/registers.h @@ -101,11 +101,11 @@ #define IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR (~0x3fULL) #define IOAT_CHANSTS_SOFT_ERR 0x10ULL #define IOAT_CHANSTS_UNAFFILIATED_ERR 0x8ULL -#define IOAT_CHANSTS_DMA_TRANSFER_STATUS 0x7ULL -#define IOAT_CHANSTS_DMA_TRANSFER_STATUS_ACTIVE 0x0 -#define IOAT_CHANSTS_DMA_TRANSFER_STATUS_DONE 0x1 -#define IOAT_CHANSTS_DMA_TRANSFER_STATUS_SUSPENDED 0x2 -#define IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED 0x3 +#define IOAT_CHANSTS_STATUS 0x7ULL +#define IOAT_CHANSTS_ACTIVE 0x0 +#define IOAT_CHANSTS_DONE 0x1 +#define IOAT_CHANSTS_SUSPENDED 0x2 +#define IOAT_CHANSTS_HALTED 0x3 @@ -208,18 +208,18 @@ #define IOAT_CDAR_OFFSET_HIGH 0x24 #define IOAT_CHANERR_OFFSET 0x28 /* 32-bit Channel Error Register */ -#define IOAT_CHANERR_DMA_TRANSFER_SRC_ADDR_ERR 0x0001 -#define IOAT_CHANERR_DMA_TRANSFER_DEST_ADDR_ERR 0x0002 -#define IOAT_CHANERR_NEXT_DESCRIPTOR_ADDR_ERR 0x0004 -#define IOAT_CHANERR_NEXT_DESCRIPTOR_ALIGNMENT_ERR 0x0008 +#define IOAT_CHANERR_SRC_ADDR_ERR 0x0001 +#define IOAT_CHANERR_DEST_ADDR_ERR 0x0002 +#define IOAT_CHANERR_NEXT_ADDR_ERR 0x0004 +#define IOAT_CHANERR_NEXT_DESC_ALIGN_ERR 0x0008 #define IOAT_CHANERR_CHAIN_ADDR_VALUE_ERR 0x0010 #define IOAT_CHANERR_CHANCMD_ERR 0x0020 #define IOAT_CHANERR_CHIPSET_UNCORRECTABLE_DATA_INTEGRITY_ERR 0x0040 #define IOAT_CHANERR_DMA_UNCORRECTABLE_DATA_INTEGRITY_ERR 0x0080 #define IOAT_CHANERR_READ_DATA_ERR 0x0100 #define IOAT_CHANERR_WRITE_DATA_ERR 0x0200 -#define IOAT_CHANERR_DESCRIPTOR_CONTROL_ERR 0x0400 -#define IOAT_CHANERR_DESCRIPTOR_LENGTH_ERR 0x0800 +#define IOAT_CHANERR_CONTROL_ERR 0x0400 +#define IOAT_CHANERR_LENGTH_ERR 0x0800 #define IOAT_CHANERR_COMPLETION_ADDR_ERR 0x1000 #define IOAT_CHANERR_INT_CONFIGURATION_ERR 0x2000 #define IOAT_CHANERR_SOFT_ERR 0x4000 diff --git a/drivers/idle/i7300_idle.c b/drivers/idle/i7300_idle.c index f2ec7243549e..1f20a042a4f5 100644 --- a/drivers/idle/i7300_idle.c +++ b/drivers/idle/i7300_idle.c @@ -126,9 +126,9 @@ static void i7300_idle_ioat_stop(void) udelay(10); sts = readq(ioat_chanbase + IOAT1_CHANSTS_OFFSET) & - IOAT_CHANSTS_DMA_TRANSFER_STATUS; + IOAT_CHANSTS_STATUS; - if (sts != IOAT_CHANSTS_DMA_TRANSFER_STATUS_ACTIVE) + if (sts != IOAT_CHANSTS_ACTIVE) break; } @@ -160,9 +160,9 @@ static int __init i7300_idle_ioat_selftest(u8 *ctl, udelay(1000); chan_sts = readq(ioat_chanbase + IOAT1_CHANSTS_OFFSET) & - IOAT_CHANSTS_DMA_TRANSFER_STATUS; + IOAT_CHANSTS_STATUS; - if (chan_sts != IOAT_CHANSTS_DMA_TRANSFER_STATUS_DONE) { + if (chan_sts != IOAT_CHANSTS_DONE) { /* Not complete, reset the channel */ writeb(IOAT_CHANCMD_RESET, ioat_chanbase + IOAT1_CHANCMD_OFFSET); @@ -288,9 +288,9 @@ static void __exit i7300_idle_ioat_exit(void) ioat_chanbase + IOAT1_CHANCMD_OFFSET); chan_sts = readq(ioat_chanbase + IOAT1_CHANSTS_OFFSET) & - IOAT_CHANSTS_DMA_TRANSFER_STATUS; + IOAT_CHANSTS_STATUS; - if (chan_sts != IOAT_CHANSTS_DMA_TRANSFER_STATUS_ACTIVE) { + if (chan_sts != IOAT_CHANSTS_ACTIVE) { writew(0, ioat_chanbase + IOAT_CHANCTRL_OFFSET); break; } @@ -298,14 +298,14 @@ static void __exit i7300_idle_ioat_exit(void) } chan_sts = readq(ioat_chanbase + IOAT1_CHANSTS_OFFSET) & - IOAT_CHANSTS_DMA_TRANSFER_STATUS; + IOAT_CHANSTS_STATUS; /* * We tried to reset multiple times. If IO A/T channel is still active * flag an error and return without cleanup. Memory leak is better * than random corruption in that extreme error situation. */ - if (chan_sts == IOAT_CHANSTS_DMA_TRANSFER_STATUS_ACTIVE) { + if (chan_sts == IOAT_CHANSTS_ACTIVE) { printk(KERN_ERR I7300_PRINT "Unable to stop IO A/T channels." " Not freeing resources\n"); return; From a309218acee8606f7e235da20cc826eb06d9b0f6 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 8 Sep 2009 12:02:01 -0700 Subject: [PATCH 0118/3409] ioat2,3: dynamically resize descriptor ring Increment the allocation order of the descriptor ring every time we run out of descriptors up to a maximum of allocation order specified by the module parameter 'ioat_max_alloc_order'. After each idle period decrement the allocation order to a minimum order of 'ioat_ring_alloc_order' (i.e. the default ring size, tunable as a module parameter). Signed-off-by: Maciej Sosnowski Signed-off-by: Dan Williams --- drivers/dma/ioat/dma.h | 1 + drivers/dma/ioat/dma_v2.c | 215 ++++++++++++++++++++++++++++++++------ drivers/dma/ioat/dma_v2.h | 2 + 3 files changed, 187 insertions(+), 31 deletions(-) diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index dbfccac3e80c..d9d6a7e3cd76 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h @@ -88,6 +88,7 @@ struct ioat_chan_common { #define IOAT_RESET_PENDING 2 struct timer_list timer; #define COMPLETION_TIMEOUT msecs_to_jiffies(100) + #define IDLE_TIMEOUT msecs_to_jiffies(2000) #define RESET_DELAY msecs_to_jiffies(100) struct ioatdma_device *device; dma_addr_t completion_dma; diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c index 72e59a0d0f2e..460b77301332 100644 --- a/drivers/dma/ioat/dma_v2.c +++ b/drivers/dma/ioat/dma_v2.c @@ -43,6 +43,10 @@ static int ioat_ring_alloc_order = 8; module_param(ioat_ring_alloc_order, int, 0644); MODULE_PARM_DESC(ioat_ring_alloc_order, "ioat2+: allocate 2^n descriptors per channel (default: n=8)"); +static int ioat_ring_max_alloc_order = IOAT_MAX_ORDER; +module_param(ioat_ring_max_alloc_order, int, 0644); +MODULE_PARM_DESC(ioat_ring_max_alloc_order, + "ioat2+: upper limit for dynamic ring resizing (default: n=16)"); static void __ioat2_issue_pending(struct ioat2_dma_chan *ioat) { @@ -168,6 +172,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) dev_dbg(to_dev(chan), "%s: cancel completion timeout\n", __func__); clear_bit(IOAT_COMPLETION_PENDING, &chan->state); + mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); } } @@ -253,6 +258,8 @@ static void ioat2_restart_channel(struct ioat2_dma_chan *ioat) __restart_chan(ioat); } +static bool reshape_ring(struct ioat2_dma_chan *ioat, int order); + static void ioat2_timer_event(unsigned long data) { struct ioat2_dma_chan *ioat = (void *) data; @@ -289,6 +296,23 @@ static void ioat2_timer_event(unsigned long data) mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); } spin_unlock_bh(&ioat->ring_lock); + } else { + u16 active; + + /* if the ring is idle, empty, and oversized try to step + * down the size + */ + spin_lock_bh(&ioat->ring_lock); + active = ioat2_ring_active(ioat); + if (active == 0 && ioat->alloc_order > ioat_get_alloc_order()) + reshape_ring(ioat, ioat->alloc_order-1); + spin_unlock_bh(&ioat->ring_lock); + + /* keep shrinking until we get back to our minimum + * default size + */ + if (ioat->alloc_order > ioat_get_alloc_order()) + mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); } spin_unlock_bh(&chan->cleanup_lock); } @@ -362,7 +386,7 @@ static dma_cookie_t ioat2_tx_submit_unlock(struct dma_async_tx_descriptor *tx) return cookie; } -static struct ioat_ring_ent *ioat2_alloc_ring_ent(struct dma_chan *chan) +static struct ioat_ring_ent *ioat2_alloc_ring_ent(struct dma_chan *chan, gfp_t flags) { struct ioat_dma_descriptor *hw; struct ioat_ring_ent *desc; @@ -370,12 +394,12 @@ static struct ioat_ring_ent *ioat2_alloc_ring_ent(struct dma_chan *chan) dma_addr_t phys; dma = to_ioatdma_device(chan->device); - hw = pci_pool_alloc(dma->dma_pool, GFP_KERNEL, &phys); + hw = pci_pool_alloc(dma->dma_pool, flags, &phys); if (!hw) return NULL; memset(hw, 0, sizeof(*hw)); - desc = kzalloc(sizeof(*desc), GFP_KERNEL); + desc = kzalloc(sizeof(*desc), flags); if (!desc) { pci_pool_free(dma->dma_pool, hw, phys); return NULL; @@ -397,6 +421,42 @@ static void ioat2_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *cha kfree(desc); } +static struct ioat_ring_ent **ioat2_alloc_ring(struct dma_chan *c, int order, gfp_t flags) +{ + struct ioat_ring_ent **ring; + int descs = 1 << order; + int i; + + if (order > ioat_get_max_alloc_order()) + return NULL; + + /* allocate the array to hold the software ring */ + ring = kcalloc(descs, sizeof(*ring), flags); + if (!ring) + return NULL; + for (i = 0; i < descs; i++) { + ring[i] = ioat2_alloc_ring_ent(c, flags); + if (!ring[i]) { + while (i--) + ioat2_free_ring_ent(ring[i], c); + kfree(ring); + return NULL; + } + set_desc_id(ring[i], i); + } + + /* link descs */ + for (i = 0; i < descs-1; i++) { + struct ioat_ring_ent *next = ring[i+1]; + struct ioat_dma_descriptor *hw = ring[i]->hw; + + hw->next = next->txd.phys; + } + ring[i]->hw->next = ring[0]->txd.phys; + + return ring; +} + /* ioat2_alloc_chan_resources - allocate/initialize ioat2 descriptor ring * @chan: channel to be initialized */ @@ -406,8 +466,7 @@ static int ioat2_alloc_chan_resources(struct dma_chan *c) struct ioat_chan_common *chan = &ioat->base; struct ioat_ring_ent **ring; u32 chanerr; - int descs; - int i; + int order; /* have we already been set up? */ if (ioat->ring) @@ -435,32 +494,10 @@ static int ioat2_alloc_chan_resources(struct dma_chan *c) writel(((u64) chan->completion_dma) >> 32, chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); - ioat->alloc_order = ioat_get_alloc_order(); - descs = 1 << ioat->alloc_order; - - /* allocate the array to hold the software ring */ - ring = kcalloc(descs, sizeof(*ring), GFP_KERNEL); + order = ioat_get_alloc_order(); + ring = ioat2_alloc_ring(c, order, GFP_KERNEL); if (!ring) return -ENOMEM; - for (i = 0; i < descs; i++) { - ring[i] = ioat2_alloc_ring_ent(c); - if (!ring[i]) { - while (i--) - ioat2_free_ring_ent(ring[i], c); - kfree(ring); - return -ENOMEM; - } - set_desc_id(ring[i], i); - } - - /* link descs */ - for (i = 0; i < descs-1; i++) { - struct ioat_ring_ent *next = ring[i+1]; - struct ioat_dma_descriptor *hw = ring[i]->hw; - - hw->next = next->txd.phys; - } - ring[i]->hw->next = ring[0]->txd.phys; spin_lock_bh(&ioat->ring_lock); ioat->ring = ring; @@ -468,12 +505,120 @@ static int ioat2_alloc_chan_resources(struct dma_chan *c) ioat->issued = 0; ioat->tail = 0; ioat->pending = 0; + ioat->alloc_order = order; spin_unlock_bh(&ioat->ring_lock); tasklet_enable(&chan->cleanup_task); ioat2_start_null_desc(ioat); - return descs; + return 1 << ioat->alloc_order; +} + +static bool reshape_ring(struct ioat2_dma_chan *ioat, int order) +{ + /* reshape differs from normal ring allocation in that we want + * to allocate a new software ring while only + * extending/truncating the hardware ring + */ + struct ioat_chan_common *chan = &ioat->base; + struct dma_chan *c = &chan->common; + const u16 curr_size = ioat2_ring_mask(ioat) + 1; + const u16 active = ioat2_ring_active(ioat); + const u16 new_size = 1 << order; + struct ioat_ring_ent **ring; + u16 i; + + if (order > ioat_get_max_alloc_order()) + return false; + + /* double check that we have at least 1 free descriptor */ + if (active == curr_size) + return false; + + /* when shrinking, verify that we can hold the current active + * set in the new ring + */ + if (active >= new_size) + return false; + + /* allocate the array to hold the software ring */ + ring = kcalloc(new_size, sizeof(*ring), GFP_NOWAIT); + if (!ring) + return false; + + /* allocate/trim descriptors as needed */ + if (new_size > curr_size) { + /* copy current descriptors to the new ring */ + for (i = 0; i < curr_size; i++) { + u16 curr_idx = (ioat->tail+i) & (curr_size-1); + u16 new_idx = (ioat->tail+i) & (new_size-1); + + ring[new_idx] = ioat->ring[curr_idx]; + set_desc_id(ring[new_idx], new_idx); + } + + /* add new descriptors to the ring */ + for (i = curr_size; i < new_size; i++) { + u16 new_idx = (ioat->tail+i) & (new_size-1); + + ring[new_idx] = ioat2_alloc_ring_ent(c, GFP_NOWAIT); + if (!ring[new_idx]) { + while (i--) { + u16 new_idx = (ioat->tail+i) & (new_size-1); + + ioat2_free_ring_ent(ring[new_idx], c); + } + kfree(ring); + return false; + } + set_desc_id(ring[new_idx], new_idx); + } + + /* hw link new descriptors */ + for (i = curr_size-1; i < new_size; i++) { + u16 new_idx = (ioat->tail+i) & (new_size-1); + struct ioat_ring_ent *next = ring[(new_idx+1) & (new_size-1)]; + struct ioat_dma_descriptor *hw = ring[new_idx]->hw; + + hw->next = next->txd.phys; + } + } else { + struct ioat_dma_descriptor *hw; + struct ioat_ring_ent *next; + + /* copy current descriptors to the new ring, dropping the + * removed descriptors + */ + for (i = 0; i < new_size; i++) { + u16 curr_idx = (ioat->tail+i) & (curr_size-1); + u16 new_idx = (ioat->tail+i) & (new_size-1); + + ring[new_idx] = ioat->ring[curr_idx]; + set_desc_id(ring[new_idx], new_idx); + } + + /* free deleted descriptors */ + for (i = new_size; i < curr_size; i++) { + struct ioat_ring_ent *ent; + + ent = ioat2_get_ring_ent(ioat, ioat->tail+i); + ioat2_free_ring_ent(ent, c); + } + + /* fix up hardware ring */ + hw = ring[(ioat->tail+new_size-1) & (new_size-1)]->hw; + next = ring[(ioat->tail+new_size) & (new_size-1)]; + hw->next = next->txd.phys; + } + + dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n", + __func__, new_size); + + kfree(ioat->ring); + ioat->ring = ring; + ioat->alloc_order = order; + + return true; } /** @@ -487,7 +632,15 @@ static int ioat2_alloc_and_lock(u16 *idx, struct ioat2_dma_chan *ioat, int num_d struct ioat_chan_common *chan = &ioat->base; spin_lock_bh(&ioat->ring_lock); - if (unlikely(ioat2_ring_space(ioat) < num_descs)) { + /* never allow the last descriptor to be consumed, we need at + * least one free at all times to allow for on-the-fly ring + * resizing. + */ + while (unlikely(ioat2_ring_space(ioat) <= num_descs)) { + if (reshape_ring(ioat, ioat->alloc_order + 1) && + ioat2_ring_space(ioat) > num_descs) + break; + if (printk_ratelimit()) dev_dbg(to_dev(chan), "%s: ring full! num_descs: %d (%x:%x:%x)\n", diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h index 73b04a2eb4b0..9baa3d6065ff 100644 --- a/drivers/dma/ioat/dma_v2.h +++ b/drivers/dma/ioat/dma_v2.h @@ -37,6 +37,8 @@ extern int ioat_pending_level; #define IOAT_MAX_ORDER 16 #define ioat_get_alloc_order() \ (min(ioat_ring_alloc_order, IOAT_MAX_ORDER)) +#define ioat_get_max_alloc_order() \ + (min(ioat_ring_max_alloc_order, IOAT_MAX_ORDER)) /* struct ioat2_dma_chan - ioat v2 / v3 channel attributes * @base: common ioat channel parameters From 4b652f0db3be891c7b76b109c3b55003b920fc96 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 8 Sep 2009 12:02:15 -0700 Subject: [PATCH 0119/3409] net_dma: poll for a descriptor after allocation failure Handle descriptor allocation failures by polling for a descriptor. The driver will force forward progress when polled. In the best case this polling interval will be the time it takes for one dma memcpy transaction to complete. In the worst case, channel hang, we will need to wait 100ms for the cleanup watchdog to fire (ioatdma driver). Signed-off-by: Maciej Sosnowski Signed-off-by: Dan Williams --- drivers/dma/iovlock.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/drivers/dma/iovlock.c b/drivers/dma/iovlock.c index 9f6fe46a9b87..c0a272c73682 100644 --- a/drivers/dma/iovlock.c +++ b/drivers/dma/iovlock.c @@ -183,6 +183,11 @@ dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov, iov_byte_offset, kdata, copy); + /* poll for a descriptor slot */ + if (unlikely(dma_cookie < 0)) { + dma_async_issue_pending(chan); + continue; + } len -= copy; iov[iovec_idx].iov_len -= copy; @@ -248,6 +253,11 @@ dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov, page, offset, copy); + /* poll for a descriptor slot */ + if (unlikely(dma_cookie < 0)) { + dma_async_issue_pending(chan); + continue; + } len -= copy; iov[iovec_idx].iov_len -= copy; From 0403e3827788d878163f9ef0541b748b0f88ca5d Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 8 Sep 2009 17:42:50 -0700 Subject: [PATCH 0120/3409] dmaengine: add fence support Some engines optimize operation by reading ahead in the descriptor chain such that descriptor2 may start execution before descriptor1 completes. If descriptor2 depends on the result from descriptor1 then a fence is required (on descriptor2) to disable this optimization. The async_tx api could implicitly identify dependencies via the 'depend_tx' parameter, but that would constrain cases where the dependency chain only specifies a completion order rather than a data dependency. So, provide an ASYNC_TX_FENCE to explicitly identify data dependencies. Signed-off-by: Dan Williams --- crypto/async_tx/async_memcpy.c | 7 +++-- crypto/async_tx/async_memset.c | 7 +++-- crypto/async_tx/async_pq.c | 5 +++ crypto/async_tx/async_raid6_recov.c | 47 +++++++++++++++++------------ crypto/async_tx/async_xor.c | 11 +++++-- drivers/md/raid5.c | 37 ++++++++++++++--------- include/linux/async_tx.h | 3 ++ include/linux/dmaengine.h | 3 ++ 8 files changed, 79 insertions(+), 41 deletions(-) diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c index 98e15bd0dcb5..b38cbb3fd527 100644 --- a/crypto/async_tx/async_memcpy.c +++ b/crypto/async_tx/async_memcpy.c @@ -52,9 +52,12 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, if (device) { dma_addr_t dma_dest, dma_src; - unsigned long dma_prep_flags; + unsigned long dma_prep_flags = 0; - dma_prep_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0; + if (submit->cb_fn) + dma_prep_flags |= DMA_PREP_INTERRUPT; + if (submit->flags & ASYNC_TX_FENCE) + dma_prep_flags |= DMA_PREP_FENCE; dma_dest = dma_map_page(device->dev, dest, dest_offset, len, DMA_FROM_DEVICE); diff --git a/crypto/async_tx/async_memset.c b/crypto/async_tx/async_memset.c index b896a6e5f673..a374784e3329 100644 --- a/crypto/async_tx/async_memset.c +++ b/crypto/async_tx/async_memset.c @@ -49,9 +49,12 @@ async_memset(struct page *dest, int val, unsigned int offset, size_t len, if (device) { dma_addr_t dma_dest; - unsigned long dma_prep_flags; + unsigned long dma_prep_flags = 0; - dma_prep_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0; + if (submit->cb_fn) + dma_prep_flags |= DMA_PREP_INTERRUPT; + if (submit->flags & ASYNC_TX_FENCE) + dma_prep_flags |= DMA_PREP_FENCE; dma_dest = dma_map_page(device->dev, dest, offset, len, DMA_FROM_DEVICE); diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c index 108b21efb499..a25e290c39fb 100644 --- a/crypto/async_tx/async_pq.c +++ b/crypto/async_tx/async_pq.c @@ -101,6 +101,7 @@ do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks, */ if (src_cnt > pq_src_cnt) { submit->flags &= ~ASYNC_TX_ACK; + submit->flags |= ASYNC_TX_FENCE; dma_flags |= DMA_COMPL_SKIP_DEST_UNMAP; submit->cb_fn = NULL; submit->cb_param = NULL; @@ -111,6 +112,8 @@ do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks, if (cb_fn_orig) dma_flags |= DMA_PREP_INTERRUPT; } + if (submit->flags & ASYNC_TX_FENCE) + dma_flags |= DMA_PREP_FENCE; /* Since we have clobbered the src_list we are committed * to doing this asynchronously. Drivers force forward @@ -282,6 +285,8 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks, dma_flags |= DMA_PREP_PQ_DISABLE_P; if (!Q(blocks, disks)) dma_flags |= DMA_PREP_PQ_DISABLE_Q; + if (submit->flags & ASYNC_TX_FENCE) + dma_flags |= DMA_PREP_FENCE; for (i = 0; i < disks; i++) if (likely(blocks[i])) { BUG_ON(is_raid6_zero_block(blocks[i])); diff --git a/crypto/async_tx/async_raid6_recov.c b/crypto/async_tx/async_raid6_recov.c index 0c14d48c9896..822a42d10061 100644 --- a/crypto/async_tx/async_raid6_recov.c +++ b/crypto/async_tx/async_raid6_recov.c @@ -44,6 +44,8 @@ async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef, struct dma_async_tx_descriptor *tx; enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P; + if (submit->flags & ASYNC_TX_FENCE) + dma_flags |= DMA_PREP_FENCE; dma_dest[1] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL); dma_src[0] = dma_map_page(dev, srcs[0], 0, len, DMA_TO_DEVICE); dma_src[1] = dma_map_page(dev, srcs[1], 0, len, DMA_TO_DEVICE); @@ -89,6 +91,8 @@ async_mult(struct page *dest, struct page *src, u8 coef, size_t len, struct dma_async_tx_descriptor *tx; enum dma_ctrl_flags dma_flags = DMA_PREP_PQ_DISABLE_P; + if (submit->flags & ASYNC_TX_FENCE) + dma_flags |= DMA_PREP_FENCE; dma_dest[1] = dma_map_page(dev, dest, 0, len, DMA_BIDIRECTIONAL); dma_src[0] = dma_map_page(dev, src, 0, len, DMA_TO_DEVICE); tx = dma->device_prep_dma_pq(chan, dma_dest, dma_src, 1, &coef, @@ -138,7 +142,7 @@ __2data_recov_4(size_t bytes, int faila, int failb, struct page **blocks, srcs[1] = q; coef[0] = raid6_gfexi[failb-faila]; coef[1] = raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]]; - init_async_submit(submit, 0, tx, NULL, NULL, scribble); + init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); tx = async_sum_product(b, srcs, coef, bytes, submit); /* Dy = P+Pxy+Dx */ @@ -188,23 +192,23 @@ __2data_recov_5(size_t bytes, int faila, int failb, struct page **blocks, dp = blocks[faila]; dq = blocks[failb]; - init_async_submit(submit, 0, tx, NULL, NULL, scribble); + init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); tx = async_memcpy(dp, g, 0, 0, bytes, submit); - init_async_submit(submit, 0, tx, NULL, NULL, scribble); + init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); tx = async_mult(dq, g, raid6_gfexp[good], bytes, submit); /* compute P + Pxy */ srcs[0] = dp; srcs[1] = p; - init_async_submit(submit, ASYNC_TX_XOR_DROP_DST, tx, NULL, NULL, - scribble); + init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, + NULL, NULL, scribble); tx = async_xor(dp, srcs, 0, 2, bytes, submit); /* compute Q + Qxy */ srcs[0] = dq; srcs[1] = q; - init_async_submit(submit, ASYNC_TX_XOR_DROP_DST, tx, NULL, NULL, - scribble); + init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, + NULL, NULL, scribble); tx = async_xor(dq, srcs, 0, 2, bytes, submit); /* Dx = A*(P+Pxy) + B*(Q+Qxy) */ @@ -212,7 +216,7 @@ __2data_recov_5(size_t bytes, int faila, int failb, struct page **blocks, srcs[1] = dq; coef[0] = raid6_gfexi[failb-faila]; coef[1] = raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]]; - init_async_submit(submit, 0, tx, NULL, NULL, scribble); + init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); tx = async_sum_product(dq, srcs, coef, bytes, submit); /* Dy = P+Pxy+Dx */ @@ -252,7 +256,7 @@ __2data_recov_n(int disks, size_t bytes, int faila, int failb, blocks[failb] = (void *)raid6_empty_zero_page; blocks[disks-1] = dq; - init_async_submit(submit, 0, tx, NULL, NULL, scribble); + init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); tx = async_gen_syndrome(blocks, 0, disks, bytes, submit); /* Restore pointer table */ @@ -264,15 +268,15 @@ __2data_recov_n(int disks, size_t bytes, int faila, int failb, /* compute P + Pxy */ srcs[0] = dp; srcs[1] = p; - init_async_submit(submit, ASYNC_TX_XOR_DROP_DST, tx, NULL, NULL, - scribble); + init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, + NULL, NULL, scribble); tx = async_xor(dp, srcs, 0, 2, bytes, submit); /* compute Q + Qxy */ srcs[0] = dq; srcs[1] = q; - init_async_submit(submit, ASYNC_TX_XOR_DROP_DST, tx, NULL, NULL, - scribble); + init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, + NULL, NULL, scribble); tx = async_xor(dq, srcs, 0, 2, bytes, submit); /* Dx = A*(P+Pxy) + B*(Q+Qxy) */ @@ -280,7 +284,7 @@ __2data_recov_n(int disks, size_t bytes, int faila, int failb, srcs[1] = dq; coef[0] = raid6_gfexi[failb-faila]; coef[1] = raid6_gfinv[raid6_gfexp[faila]^raid6_gfexp[failb]]; - init_async_submit(submit, 0, tx, NULL, NULL, scribble); + init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); tx = async_sum_product(dq, srcs, coef, bytes, submit); /* Dy = P+Pxy+Dx */ @@ -407,13 +411,16 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila, int good = faila == 0 ? 1 : 0; struct page *g = blocks[good]; - init_async_submit(submit, 0, tx, NULL, NULL, scribble); + init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, + scribble); tx = async_memcpy(p, g, 0, 0, bytes, submit); - init_async_submit(submit, 0, tx, NULL, NULL, scribble); + init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, + scribble); tx = async_mult(dq, g, raid6_gfexp[good], bytes, submit); } else { - init_async_submit(submit, 0, tx, NULL, NULL, scribble); + init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, + scribble); tx = async_gen_syndrome(blocks, 0, disks, bytes, submit); } @@ -426,11 +433,11 @@ async_raid6_datap_recov(int disks, size_t bytes, int faila, srcs[0] = dq; srcs[1] = q; - init_async_submit(submit, ASYNC_TX_XOR_DROP_DST, tx, NULL, NULL, - scribble); + init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, + NULL, NULL, scribble); tx = async_xor(dq, srcs, 0, 2, bytes, submit); - init_async_submit(submit, 0, tx, NULL, NULL, scribble); + init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); tx = async_mult(dq, dq, coef, bytes, submit); srcs[0] = p; diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c index 56b5f98da463..db279872ef3d 100644 --- a/crypto/async_tx/async_xor.c +++ b/crypto/async_tx/async_xor.c @@ -69,6 +69,7 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, */ if (src_cnt > xor_src_cnt) { submit->flags &= ~ASYNC_TX_ACK; + submit->flags |= ASYNC_TX_FENCE; dma_flags = DMA_COMPL_SKIP_DEST_UNMAP; submit->cb_fn = NULL; submit->cb_param = NULL; @@ -78,7 +79,8 @@ do_async_xor(struct dma_chan *chan, struct page *dest, struct page **src_list, } if (submit->cb_fn) dma_flags |= DMA_PREP_INTERRUPT; - + if (submit->flags & ASYNC_TX_FENCE) + dma_flags |= DMA_PREP_FENCE; /* Since we have clobbered the src_list we are committed * to doing this asynchronously. Drivers force forward progress * in case they can not provide a descriptor @@ -264,12 +266,15 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, dma_src = (dma_addr_t *) src_list; if (dma_src && device && src_cnt <= device->max_xor) { - unsigned long dma_prep_flags; + unsigned long dma_prep_flags = 0; int i; pr_debug("%s: (async) len: %zu\n", __func__, len); - dma_prep_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0; + if (submit->cb_fn) + dma_prep_flags |= DMA_PREP_INTERRUPT; + if (submit->flags & ASYNC_TX_FENCE) + dma_prep_flags |= DMA_PREP_FENCE; for (i = 0; i < src_cnt; i++) dma_src[i] = dma_map_page(device->dev, src_list[i], offset, len, DMA_TO_DEVICE); diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 0a5cf2171214..54ef8d75541d 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -502,13 +502,17 @@ async_copy_data(int frombio, struct bio *bio, struct page *page, int i; int page_offset; struct async_submit_ctl submit; + enum async_tx_flags flags = 0; if (bio->bi_sector >= sector) page_offset = (signed)(bio->bi_sector - sector) * 512; else page_offset = (signed)(sector - bio->bi_sector) * -512; - init_async_submit(&submit, 0, tx, NULL, NULL, NULL); + if (frombio) + flags |= ASYNC_TX_FENCE; + init_async_submit(&submit, flags, tx, NULL, NULL, NULL); + bio_for_each_segment(bvl, bio, i) { int len = bio_iovec_idx(bio, i)->bv_len; int clen; @@ -685,7 +689,7 @@ ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu) atomic_inc(&sh->count); - init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL, + init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, NULL, ops_complete_compute, sh, to_addr_conv(sh, percpu)); if (unlikely(count == 1)) tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit); @@ -763,7 +767,8 @@ ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu) count = set_syndrome_sources(blocks, sh); blocks[count] = NULL; /* regenerating p is not necessary */ BUG_ON(blocks[count+1] != dest); /* q should already be set */ - init_async_submit(&submit, 0, NULL, ops_complete_compute, sh, + init_async_submit(&submit, ASYNC_TX_FENCE, NULL, + ops_complete_compute, sh, to_addr_conv(sh, percpu)); tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit); } else { @@ -775,8 +780,8 @@ ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu) blocks[count++] = sh->dev[i].page; } - init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL, - ops_complete_compute, sh, + init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, + NULL, ops_complete_compute, sh, to_addr_conv(sh, percpu)); tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE, &submit); } @@ -837,8 +842,9 @@ ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu) /* Q disk is one of the missing disks */ if (faila == syndrome_disks) { /* Missing P+Q, just recompute */ - init_async_submit(&submit, 0, NULL, ops_complete_compute, - sh, to_addr_conv(sh, percpu)); + init_async_submit(&submit, ASYNC_TX_FENCE, NULL, + ops_complete_compute, sh, + to_addr_conv(sh, percpu)); return async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit); } else { @@ -859,21 +865,24 @@ ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu) blocks[count++] = sh->dev[i].page; } dest = sh->dev[data_target].page; - init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL, - NULL, NULL, to_addr_conv(sh, percpu)); + init_async_submit(&submit, + ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, + NULL, NULL, NULL, + to_addr_conv(sh, percpu)); tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE, &submit); count = set_syndrome_sources(blocks, sh); - init_async_submit(&submit, 0, tx, ops_complete_compute, - sh, to_addr_conv(sh, percpu)); + init_async_submit(&submit, ASYNC_TX_FENCE, tx, + ops_complete_compute, sh, + to_addr_conv(sh, percpu)); return async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit); } } - init_async_submit(&submit, 0, NULL, ops_complete_compute, sh, - to_addr_conv(sh, percpu)); + init_async_submit(&submit, ASYNC_TX_FENCE, NULL, ops_complete_compute, + sh, to_addr_conv(sh, percpu)); if (failb == syndrome_disks) { /* We're missing D+P. */ return async_raid6_datap_recov(syndrome_disks+2, STRIPE_SIZE, @@ -916,7 +925,7 @@ ops_run_prexor(struct stripe_head *sh, struct raid5_percpu *percpu, xor_srcs[count++] = dev->page; } - init_async_submit(&submit, ASYNC_TX_XOR_DROP_DST, tx, + init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, ops_complete_prexor, sh, to_addr_conv(sh, percpu)); tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit); diff --git a/include/linux/async_tx.h b/include/linux/async_tx.h index 866e61c4e2e0..a1c486a88e88 100644 --- a/include/linux/async_tx.h +++ b/include/linux/async_tx.h @@ -58,11 +58,14 @@ struct dma_chan_ref { * array. * @ASYNC_TX_ACK: immediately ack the descriptor, precludes setting up a * dependency chain + * @ASYNC_TX_FENCE: specify that the next operation in the dependency + * chain uses this operation's result as an input */ enum async_tx_flags { ASYNC_TX_XOR_ZERO_DST = (1 << 0), ASYNC_TX_XOR_DROP_DST = (1 << 1), ASYNC_TX_ACK = (1 << 2), + ASYNC_TX_FENCE = (1 << 3), }; /** diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 1012f1abcb54..4d6c1c925fd4 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -87,6 +87,8 @@ enum dma_transaction_type { * @DMA_PREP_CONTINUE - indicate to a driver that it is reusing buffers as * sources that were the result of a previous operation, in the case of a PQ * operation it continues the calculation with new sources + * @DMA_PREP_FENCE - tell the driver that subsequent operations depend + * on the result of this operation */ enum dma_ctrl_flags { DMA_PREP_INTERRUPT = (1 << 0), @@ -98,6 +100,7 @@ enum dma_ctrl_flags { DMA_PREP_PQ_DISABLE_P = (1 << 6), DMA_PREP_PQ_DISABLE_Q = (1 << 7), DMA_PREP_CONTINUE = (1 << 8), + DMA_PREP_FENCE = (1 << 9), }; /** From 138f4c359d23d2ec38d18bd70dd9613ae515fe93 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 8 Sep 2009 17:42:51 -0700 Subject: [PATCH 0121/3409] dmaengine, async_tx: add a "no channel switch" allocator Channel switching is problematic for some dmaengine drivers as the architecture precludes separating the ->prep from ->submit. In these cases the driver can select ASYNC_TX_DISABLE_CHANNEL_SWITCH to modify the async_tx allocator to only return channels that support all of the required asynchronous operations. For example MD_RAID456=y selects support for asynchronous xor, xor validate, pq, pq validate, and memcpy. When ASYNC_TX_DISABLE_CHANNEL_SWITCH=y any channel with all these capabilities is marked DMA_ASYNC_TX allowing async_tx_find_channel() to quickly locate compatible channels with the guarantee that dependency chains will remain on one channel. When ASYNC_TX_DISABLE_CHANNEL_SWITCH=n async_tx_find_channel() may select channels that lead to operation chains that need to cross channel boundaries using the async_tx channel switch capability. Signed-off-by: Dan Williams --- crypto/async_tx/async_tx.c | 4 ++++ drivers/dma/Kconfig | 4 ++++ drivers/dma/dmaengine.c | 40 ++++++++++++++++++++++++++++++++++++++ include/linux/dmaengine.h | 10 +++++++++- 4 files changed, 57 insertions(+), 1 deletion(-) diff --git a/crypto/async_tx/async_tx.c b/crypto/async_tx/async_tx.c index 60615fedcf5e..f9cdf04fe7c0 100644 --- a/crypto/async_tx/async_tx.c +++ b/crypto/async_tx/async_tx.c @@ -81,6 +81,10 @@ async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, struct dma_device *device = chan->device; struct dma_async_tx_descriptor *intr_tx = (void *) ~0; + #ifdef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH + BUG(); + #endif + /* first check to see if we can still append to depend_tx */ spin_lock_bh(&depend_tx->lock); if (depend_tx->parent && depend_tx->chan == tx->chan) { diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 912a51b5cbd3..ddcd9793b25c 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -17,11 +17,15 @@ if DMADEVICES comment "DMA Devices" +config ASYNC_TX_DISABLE_CHANNEL_SWITCH + bool + config INTEL_IOATDMA tristate "Intel I/OAT DMA support" depends on PCI && X86 select DMA_ENGINE select DCA + select ASYNC_TX_DISABLE_CHANNEL_SWITCH help Enable support for the Intel(R) I/OAT DMA engine present in recent Intel Xeon chipsets. diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 96598479eece..d5bc628d207c 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -608,6 +608,40 @@ void dmaengine_put(void) } EXPORT_SYMBOL(dmaengine_put); +static bool device_has_all_tx_types(struct dma_device *device) +{ + /* A device that satisfies this test has channels that will never cause + * an async_tx channel switch event as all possible operation types can + * be handled. + */ + #ifdef CONFIG_ASYNC_TX_DMA + if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask)) + return false; + #endif + + #if defined(CONFIG_ASYNC_MEMCPY) || defined(CONFIG_ASYNC_MEMCPY_MODULE) + if (!dma_has_cap(DMA_MEMCPY, device->cap_mask)) + return false; + #endif + + #if defined(CONFIG_ASYNC_MEMSET) || defined(CONFIG_ASYNC_MEMSET_MODULE) + if (!dma_has_cap(DMA_MEMSET, device->cap_mask)) + return false; + #endif + + #if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE) + if (!dma_has_cap(DMA_XOR, device->cap_mask)) + return false; + #endif + + #if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE) + if (!dma_has_cap(DMA_PQ, device->cap_mask)) + return false; + #endif + + return true; +} + static int get_dma_id(struct dma_device *device) { int rc; @@ -665,6 +699,12 @@ int dma_async_device_register(struct dma_device *device) BUG_ON(!device->device_issue_pending); BUG_ON(!device->dev); + /* note: this only matters in the + * CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH=y case + */ + if (device_has_all_tx_types(device)) + dma_cap_set(DMA_ASYNC_TX, device->cap_mask); + idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL); if (!idr_ref) return -ENOMEM; diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 4d6c1c925fd4..86853ed7970b 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -48,6 +48,9 @@ enum dma_status { /** * enum dma_transaction_type - DMA transaction types/indexes + * + * Note: The DMA_ASYNC_TX capability is not to be set by drivers. It is + * automatically set as dma devices are registered. */ enum dma_transaction_type { DMA_MEMCPY, @@ -61,6 +64,7 @@ enum dma_transaction_type { DMA_MEMCPY_CRC32C, DMA_INTERRUPT, DMA_PRIVATE, + DMA_ASYNC_TX, DMA_SLAVE, }; @@ -396,7 +400,11 @@ static inline void net_dmaengine_put(void) #ifdef CONFIG_ASYNC_TX_DMA #define async_dmaengine_get() dmaengine_get() #define async_dmaengine_put() dmaengine_put() +#ifdef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH +#define async_dma_find_channel(type) dma_find_channel(DMA_ASYNC_TX) +#else #define async_dma_find_channel(type) dma_find_channel(type) +#endif /* CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH */ #else static inline void async_dmaengine_get(void) { @@ -409,7 +417,7 @@ async_dma_find_channel(enum dma_transaction_type type) { return NULL; } -#endif +#endif /* CONFIG_ASYNC_TX_DMA */ dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, void *src, size_t len); From 9308add6ea4fedeba37b0d7c4630a542bd34f214 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 8 Sep 2009 17:42:52 -0700 Subject: [PATCH 0122/3409] dmaengine: cleanup unused transaction types No drivers currently implement these operation types, so they can be deleted. Signed-off-by: Dan Williams --- arch/arm/mach-iop13xx/setup.c | 7 ------- arch/arm/plat-iop/adma.c | 2 -- drivers/dma/iop-adma.c | 5 +---- include/linux/dmaengine.h | 3 --- 4 files changed, 1 insertion(+), 16 deletions(-) diff --git a/arch/arm/mach-iop13xx/setup.c b/arch/arm/mach-iop13xx/setup.c index faaef95342b6..5c147fb66a01 100644 --- a/arch/arm/mach-iop13xx/setup.c +++ b/arch/arm/mach-iop13xx/setup.c @@ -477,10 +477,8 @@ void __init iop13xx_platform_init(void) plat_data = &iop13xx_adma_0_data; dma_cap_set(DMA_MEMCPY, plat_data->cap_mask); dma_cap_set(DMA_XOR, plat_data->cap_mask); - dma_cap_set(DMA_DUAL_XOR, plat_data->cap_mask); dma_cap_set(DMA_XOR_VAL, plat_data->cap_mask); dma_cap_set(DMA_MEMSET, plat_data->cap_mask); - dma_cap_set(DMA_MEMCPY_CRC32C, plat_data->cap_mask); dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask); break; case IOP13XX_INIT_ADMA_1: @@ -489,10 +487,8 @@ void __init iop13xx_platform_init(void) plat_data = &iop13xx_adma_1_data; dma_cap_set(DMA_MEMCPY, plat_data->cap_mask); dma_cap_set(DMA_XOR, plat_data->cap_mask); - dma_cap_set(DMA_DUAL_XOR, plat_data->cap_mask); dma_cap_set(DMA_XOR_VAL, plat_data->cap_mask); dma_cap_set(DMA_MEMSET, plat_data->cap_mask); - dma_cap_set(DMA_MEMCPY_CRC32C, plat_data->cap_mask); dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask); break; case IOP13XX_INIT_ADMA_2: @@ -501,13 +497,10 @@ void __init iop13xx_platform_init(void) plat_data = &iop13xx_adma_2_data; dma_cap_set(DMA_MEMCPY, plat_data->cap_mask); dma_cap_set(DMA_XOR, plat_data->cap_mask); - dma_cap_set(DMA_DUAL_XOR, plat_data->cap_mask); dma_cap_set(DMA_XOR_VAL, plat_data->cap_mask); dma_cap_set(DMA_MEMSET, plat_data->cap_mask); - dma_cap_set(DMA_MEMCPY_CRC32C, plat_data->cap_mask); dma_cap_set(DMA_INTERRUPT, plat_data->cap_mask); dma_cap_set(DMA_PQ, plat_data->cap_mask); - dma_cap_set(DMA_PQ_UPDATE, plat_data->cap_mask); dma_cap_set(DMA_PQ_VAL, plat_data->cap_mask); break; } diff --git a/arch/arm/plat-iop/adma.c b/arch/arm/plat-iop/adma.c index da1dd0dab07c..1ff6a37e893c 100644 --- a/arch/arm/plat-iop/adma.c +++ b/arch/arm/plat-iop/adma.c @@ -179,7 +179,6 @@ static int __init iop3xx_adma_cap_init(void) dma_cap_set(DMA_INTERRUPT, iop3xx_dma_0_data.cap_mask); #else dma_cap_set(DMA_MEMCPY, iop3xx_dma_0_data.cap_mask); - dma_cap_set(DMA_MEMCPY_CRC32C, iop3xx_dma_0_data.cap_mask); dma_cap_set(DMA_INTERRUPT, iop3xx_dma_0_data.cap_mask); #endif @@ -188,7 +187,6 @@ static int __init iop3xx_adma_cap_init(void) dma_cap_set(DMA_INTERRUPT, iop3xx_dma_1_data.cap_mask); #else dma_cap_set(DMA_MEMCPY, iop3xx_dma_1_data.cap_mask); - dma_cap_set(DMA_MEMCPY_CRC32C, iop3xx_dma_1_data.cap_mask); dma_cap_set(DMA_INTERRUPT, iop3xx_dma_1_data.cap_mask); #endif diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index 4496bc606662..cecb6d657d55 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c @@ -1256,15 +1256,12 @@ static int __devinit iop_adma_probe(struct platform_device *pdev) } dev_printk(KERN_INFO, &pdev->dev, "Intel(R) IOP: " - "( %s%s%s%s%s%s%s%s%s%s)\n", + "( %s%s%s%s%s%s%s)\n", dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "pq " : "", - dma_has_cap(DMA_PQ_UPDATE, dma_dev->cap_mask) ? "pq_update " : "", dma_has_cap(DMA_PQ_VAL, dma_dev->cap_mask) ? "pq_val " : "", dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", - dma_has_cap(DMA_DUAL_XOR, dma_dev->cap_mask) ? "dual_xor " : "", dma_has_cap(DMA_XOR_VAL, dma_dev->cap_mask) ? "xor_val " : "", dma_has_cap(DMA_MEMSET, dma_dev->cap_mask) ? "fill " : "", - dma_has_cap(DMA_MEMCPY_CRC32C, dma_dev->cap_mask) ? "cpy+crc " : "", dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "cpy " : "", dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "intr " : ""); diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 86853ed7970b..db23fd583f98 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -56,12 +56,9 @@ enum dma_transaction_type { DMA_MEMCPY, DMA_XOR, DMA_PQ, - DMA_DUAL_XOR, - DMA_PQ_UPDATE, DMA_XOR_VAL, DMA_PQ_VAL, DMA_MEMSET, - DMA_MEMCPY_CRC32C, DMA_INTERRUPT, DMA_PRIVATE, DMA_ASYNC_TX, From 83544ae9f3991bfc7d5e0fe9a3008cd05a8d57b7 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 8 Sep 2009 17:42:53 -0700 Subject: [PATCH 0123/3409] dmaengine, async_tx: support alignment checks Some engines have transfer size and address alignment restrictions. Add a per-operation alignment property to struct dma_device that the async routines and dmatest can use to check alignment capabilities. Signed-off-by: Dan Williams --- crypto/async_tx/async_memcpy.c | 2 +- crypto/async_tx/async_memset.c | 2 +- crypto/async_tx/async_pq.c | 6 +++-- crypto/async_tx/async_xor.c | 5 ++-- drivers/dma/dmatest.c | 14 +++++++++++ include/linux/dmaengine.h | 44 ++++++++++++++++++++++++++++++++++ 6 files changed, 67 insertions(+), 6 deletions(-) diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c index b38cbb3fd527..0ec1fb69d4ea 100644 --- a/crypto/async_tx/async_memcpy.c +++ b/crypto/async_tx/async_memcpy.c @@ -50,7 +50,7 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, struct dma_device *device = chan ? chan->device : NULL; struct dma_async_tx_descriptor *tx = NULL; - if (device) { + if (device && is_dma_copy_aligned(device, src_offset, dest_offset, len)) { dma_addr_t dma_dest, dma_src; unsigned long dma_prep_flags = 0; diff --git a/crypto/async_tx/async_memset.c b/crypto/async_tx/async_memset.c index a374784e3329..58e4a8752aee 100644 --- a/crypto/async_tx/async_memset.c +++ b/crypto/async_tx/async_memset.c @@ -47,7 +47,7 @@ async_memset(struct page *dest, int val, unsigned int offset, size_t len, struct dma_device *device = chan ? chan->device : NULL; struct dma_async_tx_descriptor *tx = NULL; - if (device) { + if (device && is_dma_fill_aligned(device, offset, 0, len)) { dma_addr_t dma_dest; unsigned long dma_prep_flags = 0; diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c index a25e290c39fb..b88db6d1dc65 100644 --- a/crypto/async_tx/async_pq.c +++ b/crypto/async_tx/async_pq.c @@ -211,7 +211,8 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, if (dma_src && device && (src_cnt <= dma_maxpq(device, 0) || - dma_maxpq(device, DMA_PREP_CONTINUE) > 0)) { + dma_maxpq(device, DMA_PREP_CONTINUE) > 0) && + is_dma_pq_aligned(device, offset, 0, len)) { /* run the p+q asynchronously */ pr_debug("%s: (async) disks: %d len: %zu\n", __func__, disks, len); @@ -274,7 +275,8 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks, else if (sizeof(dma_addr_t) <= sizeof(struct page *)) dma_src = (dma_addr_t *) blocks; - if (dma_src && device && disks <= dma_maxpq(device, 0)) { + if (dma_src && device && disks <= dma_maxpq(device, 0) && + is_dma_pq_aligned(device, offset, 0, len)) { struct device *dev = device->dev; dma_addr_t *pq = &dma_src[disks-2]; int i; diff --git a/crypto/async_tx/async_xor.c b/crypto/async_tx/async_xor.c index db279872ef3d..b459a9034aac 100644 --- a/crypto/async_tx/async_xor.c +++ b/crypto/async_tx/async_xor.c @@ -193,7 +193,7 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset, else if (sizeof(dma_addr_t) <= sizeof(struct page *)) dma_src = (dma_addr_t *) src_list; - if (dma_src && chan) { + if (dma_src && chan && is_dma_xor_aligned(chan->device, offset, 0, len)) { /* run the xor asynchronously */ pr_debug("%s (async): len: %zu\n", __func__, len); @@ -265,7 +265,8 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, else if (sizeof(dma_addr_t) <= sizeof(struct page *)) dma_src = (dma_addr_t *) src_list; - if (dma_src && device && src_cnt <= device->max_xor) { + if (dma_src && device && src_cnt <= device->max_xor && + is_dma_xor_aligned(device, offset, 0, len)) { unsigned long dma_prep_flags = 0; int i; diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 58e49e41c7a3..a3722a7384b5 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -288,6 +288,7 @@ static int dmatest_func(void *data) dma_addr_t dma_dsts[dst_cnt]; struct completion cmp; unsigned long tmo = msecs_to_jiffies(3000); + u8 align = 0; total_tests++; @@ -295,6 +296,18 @@ static int dmatest_func(void *data) src_off = dmatest_random() % (test_buf_size - len + 1); dst_off = dmatest_random() % (test_buf_size - len + 1); + /* honor alignment restrictions */ + if (thread->type == DMA_MEMCPY) + align = dev->copy_align; + else if (thread->type == DMA_XOR) + align = dev->xor_align; + else if (thread->type == DMA_PQ) + align = dev->pq_align; + + len = (len >> align) << align; + src_off = (src_off >> align) << align; + dst_off = (dst_off >> align) << align; + dmatest_init_srcs(thread->srcs, src_off, len); dmatest_init_dsts(thread->dsts, dst_off, len); @@ -311,6 +324,7 @@ static int dmatest_func(void *data) DMA_BIDIRECTIONAL); } + if (thread->type == DMA_MEMCPY) tx = dev->device_prep_dma_memcpy(chan, dma_dsts[0] + dst_off, diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index db23fd583f98..835b9c7bf1c2 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -245,6 +245,10 @@ struct dma_async_tx_descriptor { * @cap_mask: one or more dma_capability flags * @max_xor: maximum number of xor sources, 0 if no capability * @max_pq: maximum number of PQ sources and PQ-continue capability + * @copy_align: alignment shift for memcpy operations + * @xor_align: alignment shift for xor operations + * @pq_align: alignment shift for pq operations + * @fill_align: alignment shift for memset operations * @dev_id: unique device ID * @dev: struct device reference for dma mapping api * @device_alloc_chan_resources: allocate resources and return the @@ -271,6 +275,10 @@ struct dma_device { dma_cap_mask_t cap_mask; unsigned short max_xor; unsigned short max_pq; + u8 copy_align; + u8 xor_align; + u8 pq_align; + u8 fill_align; #define DMA_HAS_PQ_CONTINUE (1 << 15) int dev_id; @@ -314,6 +322,42 @@ struct dma_device { void (*device_issue_pending)(struct dma_chan *chan); }; +static inline bool dmaengine_check_align(u8 align, size_t off1, size_t off2, size_t len) +{ + size_t mask; + + if (!align) + return true; + mask = (1 << align) - 1; + if (mask & (off1 | off2 | len)) + return false; + return true; +} + +static inline bool is_dma_copy_aligned(struct dma_device *dev, size_t off1, + size_t off2, size_t len) +{ + return dmaengine_check_align(dev->copy_align, off1, off2, len); +} + +static inline bool is_dma_xor_aligned(struct dma_device *dev, size_t off1, + size_t off2, size_t len) +{ + return dmaengine_check_align(dev->xor_align, off1, off2, len); +} + +static inline bool is_dma_pq_aligned(struct dma_device *dev, size_t off1, + size_t off2, size_t len) +{ + return dmaengine_check_align(dev->pq_align, off1, off2, len); +} + +static inline bool is_dma_fill_aligned(struct dma_device *dev, size_t off1, + size_t off2, size_t len) +{ + return dmaengine_check_align(dev->fill_align, off1, off2, len); +} + static inline void dma_set_maxpq(struct dma_device *dma, int maxpq, int has_pq_continue) { From 128f2d567f906d38b11d993d8d97b9b988848e26 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 8 Sep 2009 17:42:53 -0700 Subject: [PATCH 0124/3409] ioat2+: add fence support In preparation for adding more operation types to the ioat3 path the driver needs to honor the DMA_PREP_FENCE flag. For example the async_tx api will hand xor->memcpy->xor chains to the driver with the 'fence' flag set on the first xor and the memcpy operation. This flag in turn sets the 'fence' flag in the descriptor control field telling the hardware that future descriptors in the chain depend on the result of the current descriptor, so wait for all writes to complete before starting the next operation. Note that ioat1 does not prefetch the descriptor chain, so does not require/support fenced operations. Signed-off-by: Dan Williams --- drivers/dma/ioat/dma_v2.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c index 460b77301332..568923c5ddec 100644 --- a/drivers/dma/ioat/dma_v2.c +++ b/drivers/dma/ioat/dma_v2.c @@ -710,6 +710,7 @@ ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest, desc->txd.flags = flags; desc->len = total_len; hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); + hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE); hw->ctl_f.compl_write = 1; dump_desc_dbg(ioat, desc); /* we leave the channel locked to ensure in order submission */ From 2aec048cdc4a5a81163a42a61df903f76a27e737 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 8 Sep 2009 17:42:54 -0700 Subject: [PATCH 0125/3409] ioat3: hardware version 3.2 register / descriptor definitions ioat3.2 adds raid5 and raid6 offload capabilities. Signed-off-by: Tom Picard Signed-off-by: Dan Williams --- drivers/dma/ioat/dma.h | 2 +- drivers/dma/ioat/dma_v2.h | 26 ++++++- drivers/dma/ioat/hw.h | 142 +++++++++++++++++++++++++++++++++++ drivers/dma/ioat/registers.h | 17 +++++ 4 files changed, 185 insertions(+), 2 deletions(-) diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index d9d6a7e3cd76..0d94e7804c13 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h @@ -155,7 +155,7 @@ ioat_is_complete(struct dma_chan *c, dma_cookie_t cookie, /** * struct ioat_desc_sw - wrapper around hardware descriptor - * @hw: hardware DMA descriptor + * @hw: hardware DMA descriptor (for memcpy) * @node: this descriptor will either be on the free list, * or attached to a transaction list (async_tx.tx_list) * @txd: the generic software descriptor for all engines diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h index 9baa3d6065ff..ed4bb82a283d 100644 --- a/drivers/dma/ioat/dma_v2.h +++ b/drivers/dma/ioat/dma_v2.h @@ -114,8 +114,32 @@ static inline u16 ioat2_xferlen_to_descs(struct ioat2_dma_chan *ioat, size_t len return num_descs; } +/** + * struct ioat_ring_ent - wrapper around hardware descriptor + * @hw: hardware DMA descriptor (for memcpy) + * @fill: hardware fill descriptor + * @xor: hardware xor descriptor + * @xor_ex: hardware xor extension descriptor + * @pq: hardware pq descriptor + * @pq_ex: hardware pq extension descriptor + * @pqu: hardware pq update descriptor + * @raw: hardware raw (un-typed) descriptor + * @txd: the generic software descriptor for all engines + * @len: total transaction length for unmap + * @id: identifier for debug + */ + struct ioat_ring_ent { - struct ioat_dma_descriptor *hw; + union { + struct ioat_dma_descriptor *hw; + struct ioat_fill_descriptor *fill; + struct ioat_xor_descriptor *xor; + struct ioat_xor_ext_descriptor *xor_ex; + struct ioat_pq_descriptor *pq; + struct ioat_pq_ext_descriptor *pq_ex; + struct ioat_pq_update_descriptor *pqu; + struct ioat_raw_descriptor *raw; + }; struct dma_async_tx_descriptor txd; size_t len; #ifdef DEBUG diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h index 7481fb13ce00..99afb12bd409 100644 --- a/drivers/dma/ioat/hw.h +++ b/drivers/dma/ioat/hw.h @@ -37,6 +37,7 @@ #define IOAT_VER_1_2 0x12 /* Version 1.2 */ #define IOAT_VER_2_0 0x20 /* Version 2.0 */ #define IOAT_VER_3_0 0x30 /* Version 3.0 */ +#define IOAT_VER_3_2 0x32 /* Version 3.2 */ struct ioat_dma_descriptor { uint32_t size; @@ -55,6 +56,7 @@ struct ioat_dma_descriptor { unsigned int dest_dca:1; unsigned int hint:1; unsigned int rsvd2:13; + #define IOAT_OP_COPY 0x00 unsigned int op:8; } ctl_f; }; @@ -70,4 +72,144 @@ struct ioat_dma_descriptor { }; uint64_t user2; }; + +struct ioat_fill_descriptor { + uint32_t size; + union { + uint32_t ctl; + struct { + unsigned int int_en:1; + unsigned int rsvd:1; + unsigned int dest_snoop_dis:1; + unsigned int compl_write:1; + unsigned int fence:1; + unsigned int rsvd2:2; + unsigned int dest_brk:1; + unsigned int bundle:1; + unsigned int rsvd4:15; + #define IOAT_OP_FILL 0x01 + unsigned int op:8; + } ctl_f; + }; + uint64_t src_data; + uint64_t dst_addr; + uint64_t next; + uint64_t rsv1; + uint64_t next_dst_addr; + uint64_t user1; + uint64_t user2; +}; + +struct ioat_xor_descriptor { + uint32_t size; + union { + uint32_t ctl; + struct { + unsigned int int_en:1; + unsigned int src_snoop_dis:1; + unsigned int dest_snoop_dis:1; + unsigned int compl_write:1; + unsigned int fence:1; + unsigned int src_cnt:3; + unsigned int bundle:1; + unsigned int dest_dca:1; + unsigned int hint:1; + unsigned int rsvd:13; + #define IOAT_OP_XOR 0x87 + #define IOAT_OP_XOR_VAL 0x88 + unsigned int op:8; + } ctl_f; + }; + uint64_t src_addr; + uint64_t dst_addr; + uint64_t next; + uint64_t src_addr2; + uint64_t src_addr3; + uint64_t src_addr4; + uint64_t src_addr5; +}; + +struct ioat_xor_ext_descriptor { + uint64_t src_addr6; + uint64_t src_addr7; + uint64_t src_addr8; + uint64_t next; + uint64_t rsvd[4]; +}; + +struct ioat_pq_descriptor { + uint32_t size; + union { + uint32_t ctl; + struct { + unsigned int int_en:1; + unsigned int src_snoop_dis:1; + unsigned int dest_snoop_dis:1; + unsigned int compl_write:1; + unsigned int fence:1; + unsigned int src_cnt:3; + unsigned int bundle:1; + unsigned int dest_dca:1; + unsigned int hint:1; + unsigned int p_disable:1; + unsigned int q_disable:1; + unsigned int rsvd:11; + #define IOAT_OP_PQ 0x89 + #define IOAT_OP_PQ_VAL 0x8a + unsigned int op:8; + } ctl_f; + }; + uint64_t src_addr; + uint64_t p_addr; + uint64_t next; + uint64_t src_addr2; + uint64_t src_addr3; + uint8_t coef[8]; + uint64_t q_addr; +}; + +struct ioat_pq_ext_descriptor { + uint64_t src_addr4; + uint64_t src_addr5; + uint64_t src_addr6; + uint64_t next; + uint64_t src_addr7; + uint64_t src_addr8; + uint64_t rsvd[2]; +}; + +struct ioat_pq_update_descriptor { + uint32_t size; + union { + uint32_t ctl; + struct { + unsigned int int_en:1; + unsigned int src_snoop_dis:1; + unsigned int dest_snoop_dis:1; + unsigned int compl_write:1; + unsigned int fence:1; + unsigned int src_cnt:3; + unsigned int bundle:1; + unsigned int dest_dca:1; + unsigned int hint:1; + unsigned int p_disable:1; + unsigned int q_disable:1; + unsigned int rsvd:3; + unsigned int coef:8; + #define IOAT_OP_PQ_UP 0x8b + unsigned int op:8; + } ctl_f; + }; + uint64_t src_addr; + uint64_t p_addr; + uint64_t next; + uint64_t src_addr2; + uint64_t p_src; + uint64_t q_src; + uint64_t q_addr; +}; + +struct ioat_raw_descriptor { + uint64_t field[8]; +}; #endif diff --git a/drivers/dma/ioat/registers.h b/drivers/dma/ioat/registers.h index e4334a195380..85d04b8c563c 100644 --- a/drivers/dma/ioat/registers.h +++ b/drivers/dma/ioat/registers.h @@ -64,6 +64,20 @@ #define IOAT_DEVICE_STATUS_OFFSET 0x0E /* 16-bit */ #define IOAT_DEVICE_STATUS_DEGRADED_MODE 0x0001 +#define IOAT_DEVICE_MMIO_RESTRICTED 0x0002 +#define IOAT_DEVICE_MEMORY_BYPASS 0x0004 +#define IOAT_DEVICE_ADDRESS_REMAPPING 0x0008 + +#define IOAT_DMA_CAP_OFFSET 0x10 /* 32-bit */ +#define IOAT_CAP_PAGE_BREAK 0x00000001 +#define IOAT_CAP_CRC 0x00000002 +#define IOAT_CAP_SKIP_MARKER 0x00000004 +#define IOAT_CAP_DCA 0x00000010 +#define IOAT_CAP_CRC_MOVE 0x00000020 +#define IOAT_CAP_FILL_BLOCK 0x00000040 +#define IOAT_CAP_APIC 0x00000080 +#define IOAT_CAP_XOR 0x00000100 +#define IOAT_CAP_PQ 0x00000200 #define IOAT_CHANNEL_MMIO_SIZE 0x80 /* Each Channel MMIO space is this size */ @@ -224,6 +238,9 @@ #define IOAT_CHANERR_INT_CONFIGURATION_ERR 0x2000 #define IOAT_CHANERR_SOFT_ERR 0x4000 #define IOAT_CHANERR_UNAFFILIATED_ERR 0x8000 +#define IOAT_CHANERR_XOR_P_OR_CRC_ERR 0x10000 +#define IOAT_CHANERR_XOR_Q_ERR 0x20000 +#define IOAT_CHANERR_DESCRIPTOR_COUNT_ERR 0x40000 #define IOAT_CHANERR_MASK_OFFSET 0x2C /* 32-bit Channel Error Register */ From bf40a6869c9198bdf56fe173961feb89e9f0d961 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 8 Sep 2009 17:42:55 -0700 Subject: [PATCH 0126/3409] ioat3: split ioat3 support to its own file, add memset Up until this point the driver for Intel(R) QuickData Technology engines, specification versions 2 and 3, were mostly identical save for a few quirks. Version 3.2 hardware adds many new capabilities (like raid offload support) requiring some infrastructure that is not relevant for v2. For better code organization of the new funcionality move v3 and v3.2 support to its own file dma_v3.c, and export some routines from the base files (dma.c and dma_v2.c) that can be reused directly. The first new capability included in this code reorganization is support for v3.2 memset operations. Signed-off-by: Dan Williams --- drivers/dma/ioat/Makefile | 2 +- drivers/dma/ioat/dma.c | 11 -- drivers/dma/ioat/dma.h | 16 ++ drivers/dma/ioat/dma_v2.c | 94 +++------- drivers/dma/ioat/dma_v2.h | 13 ++ drivers/dma/ioat/dma_v3.c | 367 ++++++++++++++++++++++++++++++++++++++ drivers/dma/ioat/pci.c | 2 +- 7 files changed, 421 insertions(+), 84 deletions(-) create mode 100644 drivers/dma/ioat/dma_v3.c diff --git a/drivers/dma/ioat/Makefile b/drivers/dma/ioat/Makefile index 205a639e84df..8997d3fb9051 100644 --- a/drivers/dma/ioat/Makefile +++ b/drivers/dma/ioat/Makefile @@ -1,2 +1,2 @@ obj-$(CONFIG_INTEL_IOATDMA) += ioatdma.o -ioatdma-objs := pci.o dma.o dma_v2.o dca.o +ioatdma-objs := pci.o dma.o dma_v2.o dma_v3.o dca.o diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 17a518d0386f..70262c0131d9 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -538,17 +538,6 @@ static void ioat1_cleanup_tasklet(unsigned long data) writew(IOAT_CHANCTRL_RUN, chan->base.reg_base + IOAT_CHANCTRL_OFFSET); } -static void ioat_unmap(struct pci_dev *pdev, dma_addr_t addr, size_t len, - int direction, enum dma_ctrl_flags flags, bool dst) -{ - if ((dst && (flags & DMA_COMPL_DEST_UNMAP_SINGLE)) || - (!dst && (flags & DMA_COMPL_SRC_UNMAP_SINGLE))) - pci_unmap_single(pdev, addr, len, direction); - else - pci_unmap_page(pdev, addr, len, direction); -} - - void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, size_t len, struct ioat_dma_descriptor *hw) { diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index 0d94e7804c13..c6d58bf541d1 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h @@ -60,6 +60,10 @@ * @dca: direct cache access context * @intr_quirk: interrupt setup quirk (for ioat_v1 devices) * @enumerate_channels: hw version specific channel enumeration + * @cleanup_tasklet: select between the v2 and v3 cleanup routines + * @timer_fn: select between the v2 and v3 timer watchdog routines + * + * Note: the v3 cleanup routine supports raid operations */ struct ioatdma_device { @@ -74,6 +78,8 @@ struct ioatdma_device { struct dca_provider *dca; void (*intr_quirk)(struct ioatdma_device *device); int (*enumerate_channels)(struct ioatdma_device *device); + void (*cleanup_tasklet)(unsigned long data); + void (*timer_fn)(unsigned long data); }; struct ioat_chan_common { @@ -287,6 +293,16 @@ static inline bool is_ioat_bug(unsigned long err) IOAT_CHANERR_LENGTH_ERR)); } +static inline void ioat_unmap(struct pci_dev *pdev, dma_addr_t addr, size_t len, + int direction, enum dma_ctrl_flags flags, bool dst) +{ + if ((dst && (flags & DMA_COMPL_DEST_UNMAP_SINGLE)) || + (!dst && (flags & DMA_COMPL_SRC_UNMAP_SINGLE))) + pci_unmap_single(pdev, addr, len, direction); + else + pci_unmap_page(pdev, addr, len, direction); +} + int __devinit ioat_probe(struct ioatdma_device *device); int __devinit ioat_register(struct ioatdma_device *device); int __devinit ioat1_dma_probe(struct ioatdma_device *dev, int dca); diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c index 568923c5ddec..7492e9165e08 100644 --- a/drivers/dma/ioat/dma_v2.c +++ b/drivers/dma/ioat/dma_v2.c @@ -39,7 +39,7 @@ #include "registers.h" #include "hw.h" -static int ioat_ring_alloc_order = 8; +int ioat_ring_alloc_order = 8; module_param(ioat_ring_alloc_order, int, 0644); MODULE_PARM_DESC(ioat_ring_alloc_order, "ioat2+: allocate 2^n descriptors per channel (default: n=8)"); @@ -63,7 +63,7 @@ static void __ioat2_issue_pending(struct ioat2_dma_chan *ioat) __func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount); } -static void ioat2_issue_pending(struct dma_chan *chan) +void ioat2_issue_pending(struct dma_chan *chan) { struct ioat2_dma_chan *ioat = to_ioat2_chan(chan); @@ -214,7 +214,7 @@ static void ioat2_cleanup_tasklet(unsigned long data) writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); } -static void __restart_chan(struct ioat2_dma_chan *ioat) +void __ioat2_restart_chan(struct ioat2_dma_chan *ioat) { struct ioat_chan_common *chan = &ioat->base; @@ -255,11 +255,9 @@ static void ioat2_restart_channel(struct ioat2_dma_chan *ioat) if (ioat_cleanup_preamble(chan, &phys_complete)) __cleanup(ioat, phys_complete); - __restart_chan(ioat); + __ioat2_restart_chan(ioat); } -static bool reshape_ring(struct ioat2_dma_chan *ioat, int order); - static void ioat2_timer_event(unsigned long data) { struct ioat2_dma_chan *ioat = (void *) data; @@ -321,7 +319,7 @@ static void ioat2_timer_event(unsigned long data) * ioat2_enumerate_channels - find and initialize the device's channels * @device: the device to be enumerated */ -static int ioat2_enumerate_channels(struct ioatdma_device *device) +int ioat2_enumerate_channels(struct ioatdma_device *device) { struct ioat2_dma_chan *ioat; struct device *dev = &device->pdev->dev; @@ -354,8 +352,8 @@ static int ioat2_enumerate_channels(struct ioatdma_device *device) break; ioat_init_channel(device, &ioat->base, i, - ioat2_timer_event, - ioat2_cleanup_tasklet, + device->timer_fn, + device->cleanup_tasklet, (unsigned long) ioat); ioat->xfercap_log = xfercap_log; spin_lock_init(&ioat->ring_lock); @@ -460,7 +458,7 @@ static struct ioat_ring_ent **ioat2_alloc_ring(struct dma_chan *c, int order, gf /* ioat2_alloc_chan_resources - allocate/initialize ioat2 descriptor ring * @chan: channel to be initialized */ -static int ioat2_alloc_chan_resources(struct dma_chan *c) +int ioat2_alloc_chan_resources(struct dma_chan *c) { struct ioat2_dma_chan *ioat = to_ioat2_chan(c); struct ioat_chan_common *chan = &ioat->base; @@ -514,7 +512,7 @@ static int ioat2_alloc_chan_resources(struct dma_chan *c) return 1 << ioat->alloc_order; } -static bool reshape_ring(struct ioat2_dma_chan *ioat, int order) +bool reshape_ring(struct ioat2_dma_chan *ioat, int order) { /* reshape differs from normal ring allocation in that we want * to allocate a new software ring while only @@ -627,7 +625,7 @@ static bool reshape_ring(struct ioat2_dma_chan *ioat, int order) * @ioat: ioat2,3 channel (ring) to operate on * @num_descs: allocation length */ -static int ioat2_alloc_and_lock(u16 *idx, struct ioat2_dma_chan *ioat, int num_descs) +int ioat2_alloc_and_lock(u16 *idx, struct ioat2_dma_chan *ioat, int num_descs) { struct ioat_chan_common *chan = &ioat->base; @@ -655,9 +653,11 @@ static int ioat2_alloc_and_lock(u16 *idx, struct ioat2_dma_chan *ioat, int num_d spin_lock_bh(&chan->cleanup_lock); if (jiffies > chan->timer.expires && timer_pending(&chan->timer)) { + struct ioatdma_device *device = chan->device; + mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); spin_unlock_bh(&chan->cleanup_lock); - ioat2_timer_event((unsigned long) ioat); + device->timer_fn((unsigned long) ioat); } else spin_unlock_bh(&chan->cleanup_lock); return -ENOMEM; @@ -670,7 +670,7 @@ static int ioat2_alloc_and_lock(u16 *idx, struct ioat2_dma_chan *ioat, int num_d return 0; /* with ioat->ring_lock held */ } -static struct dma_async_tx_descriptor * +struct dma_async_tx_descriptor * ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest, dma_addr_t dma_src, size_t len, unsigned long flags) { @@ -722,11 +722,11 @@ ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest, * ioat2_free_chan_resources - release all the descriptors * @chan: the channel to be cleaned */ -static void ioat2_free_chan_resources(struct dma_chan *c) +void ioat2_free_chan_resources(struct dma_chan *c) { struct ioat2_dma_chan *ioat = to_ioat2_chan(c); struct ioat_chan_common *chan = &ioat->base; - struct ioatdma_device *ioatdma_device = chan->device; + struct ioatdma_device *device = chan->device; struct ioat_ring_ent *desc; const u16 total_descs = 1 << ioat->alloc_order; int descs; @@ -740,7 +740,7 @@ static void ioat2_free_chan_resources(struct dma_chan *c) tasklet_disable(&chan->cleanup_task); del_timer_sync(&chan->timer); - ioat2_cleanup(ioat); + device->cleanup_tasklet((unsigned long) ioat); /* Delay 100ms after reset to allow internal DMA logic to quiesce * before removing DMA descriptor resources. @@ -770,8 +770,7 @@ static void ioat2_free_chan_resources(struct dma_chan *c) kfree(ioat->ring); ioat->ring = NULL; ioat->alloc_order = 0; - pci_pool_free(ioatdma_device->completion_pool, - chan->completion, + pci_pool_free(device->completion_pool, chan->completion, chan->completion_dma); spin_unlock_bh(&ioat->ring_lock); @@ -781,16 +780,17 @@ static void ioat2_free_chan_resources(struct dma_chan *c) ioat->dmacount = 0; } -static enum dma_status +enum dma_status ioat2_is_complete(struct dma_chan *c, dma_cookie_t cookie, dma_cookie_t *done, dma_cookie_t *used) { struct ioat2_dma_chan *ioat = to_ioat2_chan(c); + struct ioatdma_device *device = ioat->base.device; if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS) return DMA_SUCCESS; - ioat2_cleanup(ioat); + device->cleanup_tasklet((unsigned long) ioat); return ioat_is_complete(c, cookie, done, used); } @@ -804,6 +804,8 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca) int err; device->enumerate_channels = ioat2_enumerate_channels; + device->cleanup_tasklet = ioat2_cleanup_tasklet; + device->timer_fn = ioat2_timer_event; dma = &device->common; dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock; dma->device_issue_pending = ioat2_issue_pending; @@ -830,53 +832,3 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca) return err; } - -int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) -{ - struct pci_dev *pdev = device->pdev; - struct dma_device *dma; - struct dma_chan *c; - struct ioat_chan_common *chan; - int err; - u16 dev_id; - - device->enumerate_channels = ioat2_enumerate_channels; - dma = &device->common; - dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock; - dma->device_issue_pending = ioat2_issue_pending; - dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; - dma->device_free_chan_resources = ioat2_free_chan_resources; - dma->device_is_tx_complete = ioat2_is_complete; - - /* -= IOAT ver.3 workarounds =- */ - /* Write CHANERRMSK_INT with 3E07h to mask out the errors - * that can cause stability issues for IOAT ver.3 - */ - pci_write_config_dword(pdev, IOAT_PCI_CHANERRMASK_INT_OFFSET, 0x3e07); - - /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit - * (workaround for spurious config parity error after restart) - */ - pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id); - if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) - pci_write_config_dword(pdev, IOAT_PCI_DMAUNCERRSTS_OFFSET, 0x10); - - err = ioat_probe(device); - if (err) - return err; - ioat_set_tcp_copy_break(262144); - - list_for_each_entry(c, &dma->channels, device_node) { - chan = to_chan_common(c); - writel(IOAT_DMA_DCA_ANY_CPU, - chan->reg_base + IOAT_DCACTRL_OFFSET); - } - - err = ioat_register(device); - if (err) - return err; - if (dca) - device->dca = ioat3_dca_init(pdev, device->reg_base); - - return err; -} diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h index ed4bb82a283d..bde57ddf555d 100644 --- a/drivers/dma/ioat/dma_v2.h +++ b/drivers/dma/ioat/dma_v2.h @@ -27,6 +27,7 @@ extern int ioat_pending_level; +extern int ioat_ring_alloc_order; /* * workaround for IOAT ver.3.0 null descriptor issue @@ -167,4 +168,16 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *dev, int dca); int __devinit ioat3_dma_probe(struct ioatdma_device *dev, int dca); struct dca_provider * __devinit ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase); struct dca_provider * __devinit ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase); +int ioat2_alloc_and_lock(u16 *idx, struct ioat2_dma_chan *ioat, int num_descs); +int ioat2_enumerate_channels(struct ioatdma_device *device); +struct dma_async_tx_descriptor * +ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest, + dma_addr_t dma_src, size_t len, unsigned long flags); +void ioat2_issue_pending(struct dma_chan *chan); +int ioat2_alloc_chan_resources(struct dma_chan *c); +void ioat2_free_chan_resources(struct dma_chan *c); +enum dma_status ioat2_is_complete(struct dma_chan *c, dma_cookie_t cookie, + dma_cookie_t *done, dma_cookie_t *used); +void __ioat2_restart_chan(struct ioat2_dma_chan *ioat); +bool reshape_ring(struct ioat2_dma_chan *ioat, int order); #endif /* IOATDMA_V2_H */ diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c new file mode 100644 index 000000000000..b223d66b97e9 --- /dev/null +++ b/drivers/dma/ioat/dma_v3.c @@ -0,0 +1,367 @@ +/* + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2004 - 2009 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * BSD LICENSE + * + * Copyright(c) 2004-2009 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * Support routines for v3+ hardware + */ + +#include +#include +#include +#include "registers.h" +#include "hw.h" +#include "dma.h" +#include "dma_v2.h" + +static void ioat3_dma_unmap(struct ioat2_dma_chan *ioat, + struct ioat_ring_ent *desc) +{ + struct ioat_chan_common *chan = &ioat->base; + struct pci_dev *pdev = chan->device->pdev; + size_t len = desc->len; + size_t offset = len - desc->hw->size; + struct dma_async_tx_descriptor *tx = &desc->txd; + enum dma_ctrl_flags flags = tx->flags; + + switch (desc->hw->ctl_f.op) { + case IOAT_OP_COPY: + ioat_dma_unmap(chan, flags, len, desc->hw); + break; + case IOAT_OP_FILL: { + struct ioat_fill_descriptor *hw = desc->fill; + + if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) + ioat_unmap(pdev, hw->dst_addr - offset, len, + PCI_DMA_FROMDEVICE, flags, 1); + break; + } + default: + dev_err(&pdev->dev, "%s: unknown op type: %#x\n", + __func__, desc->hw->ctl_f.op); + } +} + + +static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) +{ + struct ioat_chan_common *chan = &ioat->base; + struct ioat_ring_ent *desc; + bool seen_current = false; + u16 active; + int i; + + dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n", + __func__, ioat->head, ioat->tail, ioat->issued); + + active = ioat2_ring_active(ioat); + for (i = 0; i < active && !seen_current; i++) { + struct dma_async_tx_descriptor *tx; + + prefetch(ioat2_get_ring_ent(ioat, ioat->tail + i + 1)); + desc = ioat2_get_ring_ent(ioat, ioat->tail + i); + dump_desc_dbg(ioat, desc); + tx = &desc->txd; + if (tx->cookie) { + chan->completed_cookie = tx->cookie; + ioat3_dma_unmap(ioat, desc); + tx->cookie = 0; + if (tx->callback) { + tx->callback(tx->callback_param); + tx->callback = NULL; + } + } + + if (tx->phys == phys_complete) + seen_current = true; + } + ioat->tail += i; + BUG_ON(!seen_current); /* no active descs have written a completion? */ + chan->last_completion = phys_complete; + if (ioat->head == ioat->tail) { + dev_dbg(to_dev(chan), "%s: cancel completion timeout\n", + __func__); + clear_bit(IOAT_COMPLETION_PENDING, &chan->state); + mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); + } +} + +static void ioat3_cleanup(struct ioat2_dma_chan *ioat) +{ + struct ioat_chan_common *chan = &ioat->base; + unsigned long phys_complete; + + prefetch(chan->completion); + + if (!spin_trylock_bh(&chan->cleanup_lock)) + return; + + if (!ioat_cleanup_preamble(chan, &phys_complete)) { + spin_unlock_bh(&chan->cleanup_lock); + return; + } + + if (!spin_trylock_bh(&ioat->ring_lock)) { + spin_unlock_bh(&chan->cleanup_lock); + return; + } + + __cleanup(ioat, phys_complete); + + spin_unlock_bh(&ioat->ring_lock); + spin_unlock_bh(&chan->cleanup_lock); +} + +static void ioat3_cleanup_tasklet(unsigned long data) +{ + struct ioat2_dma_chan *ioat = (void *) data; + + ioat3_cleanup(ioat); + writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); +} + +static void ioat3_restart_channel(struct ioat2_dma_chan *ioat) +{ + struct ioat_chan_common *chan = &ioat->base; + unsigned long phys_complete; + u32 status; + + status = ioat_chansts(chan); + if (is_ioat_active(status) || is_ioat_idle(status)) + ioat_suspend(chan); + while (is_ioat_active(status) || is_ioat_idle(status)) { + status = ioat_chansts(chan); + cpu_relax(); + } + + if (ioat_cleanup_preamble(chan, &phys_complete)) + __cleanup(ioat, phys_complete); + + __ioat2_restart_chan(ioat); +} + +static void ioat3_timer_event(unsigned long data) +{ + struct ioat2_dma_chan *ioat = (void *) data; + struct ioat_chan_common *chan = &ioat->base; + + spin_lock_bh(&chan->cleanup_lock); + if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { + unsigned long phys_complete; + u64 status; + + spin_lock_bh(&ioat->ring_lock); + status = ioat_chansts(chan); + + /* when halted due to errors check for channel + * programming errors before advancing the completion state + */ + if (is_ioat_halted(status)) { + u32 chanerr; + + chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); + BUG_ON(is_ioat_bug(chanerr)); + } + + /* if we haven't made progress and we have already + * acknowledged a pending completion once, then be more + * forceful with a restart + */ + if (ioat_cleanup_preamble(chan, &phys_complete)) + __cleanup(ioat, phys_complete); + else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) + ioat3_restart_channel(ioat); + else { + set_bit(IOAT_COMPLETION_ACK, &chan->state); + mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); + } + spin_unlock_bh(&ioat->ring_lock); + } else { + u16 active; + + /* if the ring is idle, empty, and oversized try to step + * down the size + */ + spin_lock_bh(&ioat->ring_lock); + active = ioat2_ring_active(ioat); + if (active == 0 && ioat->alloc_order > ioat_get_alloc_order()) + reshape_ring(ioat, ioat->alloc_order-1); + spin_unlock_bh(&ioat->ring_lock); + + /* keep shrinking until we get back to our minimum + * default size + */ + if (ioat->alloc_order > ioat_get_alloc_order()) + mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); + } + spin_unlock_bh(&chan->cleanup_lock); +} + +static enum dma_status +ioat3_is_complete(struct dma_chan *c, dma_cookie_t cookie, + dma_cookie_t *done, dma_cookie_t *used) +{ + struct ioat2_dma_chan *ioat = to_ioat2_chan(c); + + if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS) + return DMA_SUCCESS; + + ioat3_cleanup(ioat); + + return ioat_is_complete(c, cookie, done, used); +} + +static struct dma_async_tx_descriptor * +ioat3_prep_memset_lock(struct dma_chan *c, dma_addr_t dest, int value, + size_t len, unsigned long flags) +{ + struct ioat2_dma_chan *ioat = to_ioat2_chan(c); + struct ioat_ring_ent *desc; + size_t total_len = len; + struct ioat_fill_descriptor *fill; + int num_descs; + u64 src_data = (0x0101010101010101ULL) * (value & 0xff); + u16 idx; + int i; + + num_descs = ioat2_xferlen_to_descs(ioat, len); + if (likely(num_descs) && + ioat2_alloc_and_lock(&idx, ioat, num_descs) == 0) + /* pass */; + else + return NULL; + for (i = 0; i < num_descs; i++) { + size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log); + + desc = ioat2_get_ring_ent(ioat, idx + i); + fill = desc->fill; + + fill->size = xfer_size; + fill->src_data = src_data; + fill->dst_addr = dest; + fill->ctl = 0; + fill->ctl_f.op = IOAT_OP_FILL; + + len -= xfer_size; + dest += xfer_size; + dump_desc_dbg(ioat, desc); + } + + desc->txd.flags = flags; + desc->len = total_len; + fill->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); + fill->ctl_f.fence = !!(flags & DMA_PREP_FENCE); + fill->ctl_f.compl_write = 1; + dump_desc_dbg(ioat, desc); + + /* we leave the channel locked to ensure in order submission */ + return &desc->txd; +} + +int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) +{ + struct pci_dev *pdev = device->pdev; + struct dma_device *dma; + struct dma_chan *c; + struct ioat_chan_common *chan; + int err; + u16 dev_id; + u32 cap; + + device->enumerate_channels = ioat2_enumerate_channels; + device->cleanup_tasklet = ioat3_cleanup_tasklet; + device->timer_fn = ioat3_timer_event; + dma = &device->common; + dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock; + dma->device_issue_pending = ioat2_issue_pending; + dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; + dma->device_free_chan_resources = ioat2_free_chan_resources; + dma->device_is_tx_complete = ioat3_is_complete; + cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET); + if (cap & IOAT_CAP_FILL_BLOCK) { + dma_cap_set(DMA_MEMSET, dma->cap_mask); + dma->device_prep_dma_memset = ioat3_prep_memset_lock; + } + + /* -= IOAT ver.3 workarounds =- */ + /* Write CHANERRMSK_INT with 3E07h to mask out the errors + * that can cause stability issues for IOAT ver.3 + */ + pci_write_config_dword(pdev, IOAT_PCI_CHANERRMASK_INT_OFFSET, 0x3e07); + + /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit + * (workaround for spurious config parity error after restart) + */ + pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id); + if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) + pci_write_config_dword(pdev, IOAT_PCI_DMAUNCERRSTS_OFFSET, 0x10); + + err = ioat_probe(device); + if (err) + return err; + ioat_set_tcp_copy_break(262144); + + list_for_each_entry(c, &dma->channels, device_node) { + chan = to_chan_common(c); + writel(IOAT_DMA_DCA_ANY_CPU, + chan->reg_base + IOAT_DCACTRL_OFFSET); + } + + err = ioat_register(device); + if (err) + return err; + if (dca) + device->dca = ioat3_dca_init(pdev, device->reg_base); + + return 0; +} diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c index c4e432269252..0f3ec6e97fe9 100644 --- a/drivers/dma/ioat/pci.c +++ b/drivers/dma/ioat/pci.c @@ -36,7 +36,7 @@ #include "hw.h" MODULE_VERSION(IOAT_DMA_VERSION); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Intel Corporation"); static struct pci_device_id ioat_pci_tbl[] = { From 5669e31c5a4874f1634bc0ffba268a6e2fa0cdd2 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 8 Sep 2009 17:42:56 -0700 Subject: [PATCH 0127/3409] ioat: add 'ioat' sysfs attributes Export driver attributes for diagnostic purposes: 'ring_size': total number of descriptors available to the engine 'ring_active': number of descriptors in-flight 'capabilities': supported operation types for this channel 'version': Intel(R) QuickData specfication revision This also allows some chattiness to be removed from the driver startup as this information is now available via sysfs. Signed-off-by: Dan Williams --- drivers/dma/ioat/dma.c | 120 ++++++++++++++++++++++++++++++++++++-- drivers/dma/ioat/dma.h | 12 ++++ drivers/dma/ioat/dma_v2.c | 33 +++++++++++ drivers/dma/ioat/dma_v2.h | 1 + drivers/dma/ioat/dma_v3.c | 3 + drivers/dma/ioat/pci.c | 3 + 6 files changed, 166 insertions(+), 6 deletions(-) diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 70262c0131d9..cb08f8108496 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -263,6 +263,7 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state)) mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); + ioat->active += desc->hw->tx_cnt; ioat->pending += desc->hw->tx_cnt; if (ioat->pending >= ioat_pending_level) __ioat1_dma_memcpy_issue_pending(ioat); @@ -611,6 +612,7 @@ static void __cleanup(struct ioat_dma_chan *ioat, unsigned long phys_complete) chan->completed_cookie = tx->cookie; tx->cookie = 0; ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw); + ioat->active -= desc->hw->tx_cnt; if (tx->callback) { tx->callback(tx->callback_param); tx->callback = NULL; @@ -1028,13 +1030,8 @@ int __devinit ioat_probe(struct ioatdma_device *device) dma_cap_set(DMA_MEMCPY, dma->cap_mask); dma->dev = &pdev->dev; - dev_err(dev, "Intel(R) I/OAT DMA Engine found," - " %d channels, device version 0x%02x, driver version %s\n", - dma->chancnt, device->version, IOAT_DMA_VERSION); - if (!dma->chancnt) { - dev_err(dev, "Intel(R) I/OAT DMA Engine problem found: " - "zero channels detected\n"); + dev_err(dev, "zero channels detected\n"); goto err_setup_interrupts; } @@ -1085,6 +1082,113 @@ static void ioat1_intr_quirk(struct ioatdma_device *device) pci_write_config_dword(pdev, IOAT_PCI_DMACTRL_OFFSET, dmactrl); } +static ssize_t ring_size_show(struct dma_chan *c, char *page) +{ + struct ioat_dma_chan *ioat = to_ioat_chan(c); + + return sprintf(page, "%d\n", ioat->desccount); +} +static struct ioat_sysfs_entry ring_size_attr = __ATTR_RO(ring_size); + +static ssize_t ring_active_show(struct dma_chan *c, char *page) +{ + struct ioat_dma_chan *ioat = to_ioat_chan(c); + + return sprintf(page, "%d\n", ioat->active); +} +static struct ioat_sysfs_entry ring_active_attr = __ATTR_RO(ring_active); + +static ssize_t cap_show(struct dma_chan *c, char *page) +{ + struct dma_device *dma = c->device; + + return sprintf(page, "copy%s%s%s%s%s%s\n", + dma_has_cap(DMA_PQ, dma->cap_mask) ? " pq" : "", + dma_has_cap(DMA_PQ_VAL, dma->cap_mask) ? " pq_val" : "", + dma_has_cap(DMA_XOR, dma->cap_mask) ? " xor" : "", + dma_has_cap(DMA_XOR_VAL, dma->cap_mask) ? " xor_val" : "", + dma_has_cap(DMA_MEMSET, dma->cap_mask) ? " fill" : "", + dma_has_cap(DMA_INTERRUPT, dma->cap_mask) ? " intr" : ""); + +} +struct ioat_sysfs_entry ioat_cap_attr = __ATTR_RO(cap); + +static ssize_t version_show(struct dma_chan *c, char *page) +{ + struct dma_device *dma = c->device; + struct ioatdma_device *device = to_ioatdma_device(dma); + + return sprintf(page, "%d.%d\n", + device->version >> 4, device->version & 0xf); +} +struct ioat_sysfs_entry ioat_version_attr = __ATTR_RO(version); + +static struct attribute *ioat1_attrs[] = { + &ring_size_attr.attr, + &ring_active_attr.attr, + &ioat_cap_attr.attr, + &ioat_version_attr.attr, + NULL, +}; + +static ssize_t +ioat_attr_show(struct kobject *kobj, struct attribute *attr, char *page) +{ + struct ioat_sysfs_entry *entry; + struct ioat_chan_common *chan; + + entry = container_of(attr, struct ioat_sysfs_entry, attr); + chan = container_of(kobj, struct ioat_chan_common, kobj); + + if (!entry->show) + return -EIO; + return entry->show(&chan->common, page); +} + +struct sysfs_ops ioat_sysfs_ops = { + .show = ioat_attr_show, +}; + +static struct kobj_type ioat1_ktype = { + .sysfs_ops = &ioat_sysfs_ops, + .default_attrs = ioat1_attrs, +}; + +void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type) +{ + struct dma_device *dma = &device->common; + struct dma_chan *c; + + list_for_each_entry(c, &dma->channels, device_node) { + struct ioat_chan_common *chan = to_chan_common(c); + struct kobject *parent = &c->dev->device.kobj; + int err; + + err = kobject_init_and_add(&chan->kobj, type, parent, "quickdata"); + if (err) { + dev_warn(to_dev(chan), + "sysfs init error (%d), continuing...\n", err); + kobject_put(&chan->kobj); + set_bit(IOAT_KOBJ_INIT_FAIL, &chan->state); + } + } +} + +void ioat_kobject_del(struct ioatdma_device *device) +{ + struct dma_device *dma = &device->common; + struct dma_chan *c; + + list_for_each_entry(c, &dma->channels, device_node) { + struct ioat_chan_common *chan = to_chan_common(c); + + if (!test_bit(IOAT_KOBJ_INIT_FAIL, &chan->state)) { + kobject_del(&chan->kobj); + kobject_put(&chan->kobj); + } + } +} + int __devinit ioat1_dma_probe(struct ioatdma_device *device, int dca) { struct pci_dev *pdev = device->pdev; @@ -1107,6 +1211,8 @@ int __devinit ioat1_dma_probe(struct ioatdma_device *device, int dca) err = ioat_register(device); if (err) return err; + ioat_kobject_add(device, &ioat1_ktype); + if (dca) device->dca = ioat_dca_init(pdev, device->reg_base); @@ -1119,6 +1225,8 @@ void __devexit ioat_dma_remove(struct ioatdma_device *device) ioat_disable_interrupts(device); + ioat_kobject_del(device); + dma_async_device_unregister(dma); pci_pool_destroy(device->dma_pool); diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index c6d58bf541d1..c2939b289185 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h @@ -92,6 +92,7 @@ struct ioat_chan_common { #define IOAT_COMPLETION_PENDING 0 #define IOAT_COMPLETION_ACK 1 #define IOAT_RESET_PENDING 2 + #define IOAT_KOBJ_INIT_FAIL 3 struct timer_list timer; #define COMPLETION_TIMEOUT msecs_to_jiffies(100) #define IDLE_TIMEOUT msecs_to_jiffies(2000) @@ -100,8 +101,13 @@ struct ioat_chan_common { dma_addr_t completion_dma; u64 *completion; struct tasklet_struct cleanup_task; + struct kobject kobj; }; +struct ioat_sysfs_entry { + struct attribute attr; + ssize_t (*show)(struct dma_chan *, char *); +}; /** * struct ioat_dma_chan - internal representation of a DMA channel @@ -117,6 +123,7 @@ struct ioat_dma_chan { int pending; u16 desccount; + u16 active; }; static inline struct ioat_chan_common *to_chan_common(struct dma_chan *c) @@ -319,4 +326,9 @@ void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, size_t len, struct ioat_dma_descriptor *hw); bool ioat_cleanup_preamble(struct ioat_chan_common *chan, unsigned long *phys_complete); +void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type); +void ioat_kobject_del(struct ioatdma_device *device); +extern struct sysfs_ops ioat_sysfs_ops; +extern struct ioat_sysfs_entry ioat_version_attr; +extern struct ioat_sysfs_entry ioat_cap_attr; #endif /* IOATDMA_H */ diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c index 7492e9165e08..80ce32de8d32 100644 --- a/drivers/dma/ioat/dma_v2.c +++ b/drivers/dma/ioat/dma_v2.c @@ -795,6 +795,36 @@ ioat2_is_complete(struct dma_chan *c, dma_cookie_t cookie, return ioat_is_complete(c, cookie, done, used); } +static ssize_t ring_size_show(struct dma_chan *c, char *page) +{ + struct ioat2_dma_chan *ioat = to_ioat2_chan(c); + + return sprintf(page, "%d\n", (1 << ioat->alloc_order) & ~1); +} +static struct ioat_sysfs_entry ring_size_attr = __ATTR_RO(ring_size); + +static ssize_t ring_active_show(struct dma_chan *c, char *page) +{ + struct ioat2_dma_chan *ioat = to_ioat2_chan(c); + + /* ...taken outside the lock, no need to be precise */ + return sprintf(page, "%d\n", ioat2_ring_active(ioat)); +} +static struct ioat_sysfs_entry ring_active_attr = __ATTR_RO(ring_active); + +static struct attribute *ioat2_attrs[] = { + &ring_size_attr.attr, + &ring_active_attr.attr, + &ioat_cap_attr.attr, + &ioat_version_attr.attr, + NULL, +}; + +struct kobj_type ioat2_ktype = { + .sysfs_ops = &ioat_sysfs_ops, + .default_attrs = ioat2_attrs, +}; + int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca) { struct pci_dev *pdev = device->pdev; @@ -827,6 +857,9 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca) err = ioat_register(device); if (err) return err; + + ioat_kobject_add(device, &ioat2_ktype); + if (dca) device->dca = ioat2_dca_init(pdev, device->reg_base); diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h index bde57ddf555d..fa030f8e1f27 100644 --- a/drivers/dma/ioat/dma_v2.h +++ b/drivers/dma/ioat/dma_v2.h @@ -180,4 +180,5 @@ enum dma_status ioat2_is_complete(struct dma_chan *c, dma_cookie_t cookie, dma_cookie_t *done, dma_cookie_t *used); void __ioat2_restart_chan(struct ioat2_dma_chan *ioat); bool reshape_ring(struct ioat2_dma_chan *ioat, int order); +extern struct kobj_type ioat2_ktype; #endif /* IOATDMA_V2_H */ diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index b223d66b97e9..22af78ec2573 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c @@ -360,6 +360,9 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) err = ioat_register(device); if (err) return err; + + ioat_kobject_add(device, &ioat2_ktype); + if (dca) device->dca = ioat3_dca_init(pdev, device->reg_base); diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c index 0f3ec6e97fe9..626a508a4f8b 100644 --- a/drivers/dma/ioat/pci.c +++ b/drivers/dma/ioat/pci.c @@ -168,6 +168,9 @@ static void __devexit ioat_remove(struct pci_dev *pdev) static int __init ioat_init_module(void) { + pr_info("%s: Intel(R) QuickData Technology Driver %s\n", + DRV_NAME, IOAT_DMA_VERSION); + return pci_register_driver(&ioat_pci_driver); } module_init(ioat_init_module); From e61dacaeb3918cd00cd642e8fb0828324ac59819 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 8 Sep 2009 17:42:57 -0700 Subject: [PATCH 0128/3409] ioat3: enable dca for completion writes Tag completion writes for direct cache access to reduce the latency of checking for descriptor completions. Signed-off-by: Dan Williams --- drivers/dma/ioat/dma_v3.c | 3 ++- drivers/dma/ioat/registers.h | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index 22af78ec2573..0913d11e09ee 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c @@ -167,7 +167,8 @@ static void ioat3_cleanup_tasklet(unsigned long data) struct ioat2_dma_chan *ioat = (void *) data; ioat3_cleanup(ioat); - writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); + writew(IOAT_CHANCTRL_RUN | IOAT3_CHANCTRL_COMPL_DCA_EN, + ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); } static void ioat3_restart_channel(struct ioat2_dma_chan *ioat) diff --git a/drivers/dma/ioat/registers.h b/drivers/dma/ioat/registers.h index 85d04b8c563c..97d26ea6d72f 100644 --- a/drivers/dma/ioat/registers.h +++ b/drivers/dma/ioat/registers.h @@ -84,6 +84,7 @@ /* DMA Channel Registers */ #define IOAT_CHANCTRL_OFFSET 0x00 /* 16-bit Channel Control Register */ #define IOAT_CHANCTRL_CHANNEL_PRIORITY_MASK 0xF000 +#define IOAT3_CHANCTRL_COMPL_DCA_EN 0x0200 #define IOAT_CHANCTRL_CHANNEL_IN_USE 0x0100 #define IOAT_CHANCTRL_DESCRIPTOR_ADDR_SNOOP_CONTROL 0x0020 #define IOAT_CHANCTRL_ERR_INT_EN 0x0010 From b094ad3be564e7cc59cca4ff0256550d3a55dd3b Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 8 Sep 2009 17:42:57 -0700 Subject: [PATCH 0129/3409] ioat3: xor support ioat3.2 adds xor offload support for up to 8 sources. It can also perform an xor-zero-sum operation to validate whether all given sources sum to zero, without writing to a destination. Xor descriptors differ from memcpy in that one operation may require multiple descriptors depending on the number of sources. When the number of sources exceeds 5 an extended descriptor is needed. These descriptors need to be accounted for when updating the DMA_COUNT register. Signed-off-by: Dan Williams --- drivers/dma/ioat/dma_v2.c | 2 +- drivers/dma/ioat/dma_v2.h | 3 + drivers/dma/ioat/dma_v3.c | 218 ++++++++++++++++++++++++++++++++++- drivers/dma/ioat/registers.h | 2 + 4 files changed, 222 insertions(+), 3 deletions(-) diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c index 80ce32de8d32..ee295d48ba2c 100644 --- a/drivers/dma/ioat/dma_v2.c +++ b/drivers/dma/ioat/dma_v2.c @@ -48,7 +48,7 @@ module_param(ioat_ring_max_alloc_order, int, 0644); MODULE_PARM_DESC(ioat_ring_max_alloc_order, "ioat2+: upper limit for dynamic ring resizing (default: n=16)"); -static void __ioat2_issue_pending(struct ioat2_dma_chan *ioat) +void __ioat2_issue_pending(struct ioat2_dma_chan *ioat) { void * __iomem reg_base = ioat->base.reg_base; diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h index fa030f8e1f27..e23027d3dcbd 100644 --- a/drivers/dma/ioat/dma_v2.h +++ b/drivers/dma/ioat/dma_v2.h @@ -127,6 +127,7 @@ static inline u16 ioat2_xferlen_to_descs(struct ioat2_dma_chan *ioat, size_t len * @raw: hardware raw (un-typed) descriptor * @txd: the generic software descriptor for all engines * @len: total transaction length for unmap + * @result: asynchronous result of validate operations * @id: identifier for debug */ @@ -143,6 +144,7 @@ struct ioat_ring_ent { }; struct dma_async_tx_descriptor txd; size_t len; + enum sum_check_flags *result; #ifdef DEBUG int id; #endif @@ -180,5 +182,6 @@ enum dma_status ioat2_is_complete(struct dma_chan *c, dma_cookie_t cookie, dma_cookie_t *done, dma_cookie_t *used); void __ioat2_restart_chan(struct ioat2_dma_chan *ioat); bool reshape_ring(struct ioat2_dma_chan *ioat, int order); +void __ioat2_issue_pending(struct ioat2_dma_chan *ioat); extern struct kobj_type ioat2_ktype; #endif /* IOATDMA_V2_H */ diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index 0913d11e09ee..957c205f91d0 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c @@ -64,8 +64,33 @@ #include "dma.h" #include "dma_v2.h" +/* ioat hardware assumes at least two sources for raid operations */ +#define src_cnt_to_sw(x) ((x) + 2) +#define src_cnt_to_hw(x) ((x) - 2) + +/* provide a lookup table for setting the source address in the base or + * extended descriptor of an xor descriptor + */ +static const u8 xor_idx_to_desc __read_mostly = 0xd0; +static const u8 xor_idx_to_field[] __read_mostly = { 1, 4, 5, 6, 7, 0, 1, 2 }; + +static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx) +{ + struct ioat_raw_descriptor *raw = descs[xor_idx_to_desc >> idx & 1]; + + return raw->field[xor_idx_to_field[idx]]; +} + +static void xor_set_src(struct ioat_raw_descriptor *descs[2], + dma_addr_t addr, u32 offset, int idx) +{ + struct ioat_raw_descriptor *raw = descs[xor_idx_to_desc >> idx & 1]; + + raw->field[xor_idx_to_field[idx]] = addr + offset; +} + static void ioat3_dma_unmap(struct ioat2_dma_chan *ioat, - struct ioat_ring_ent *desc) + struct ioat_ring_ent *desc, int idx) { struct ioat_chan_common *chan = &ioat->base; struct pci_dev *pdev = chan->device->pdev; @@ -86,13 +111,71 @@ static void ioat3_dma_unmap(struct ioat2_dma_chan *ioat, PCI_DMA_FROMDEVICE, flags, 1); break; } + case IOAT_OP_XOR_VAL: + case IOAT_OP_XOR: { + struct ioat_xor_descriptor *xor = desc->xor; + struct ioat_ring_ent *ext; + struct ioat_xor_ext_descriptor *xor_ex = NULL; + int src_cnt = src_cnt_to_sw(xor->ctl_f.src_cnt); + struct ioat_raw_descriptor *descs[2]; + int i; + + if (src_cnt > 5) { + ext = ioat2_get_ring_ent(ioat, idx + 1); + xor_ex = ext->xor_ex; + } + + if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { + descs[0] = (struct ioat_raw_descriptor *) xor; + descs[1] = (struct ioat_raw_descriptor *) xor_ex; + for (i = 0; i < src_cnt; i++) { + dma_addr_t src = xor_get_src(descs, i); + + ioat_unmap(pdev, src - offset, len, + PCI_DMA_TODEVICE, flags, 0); + } + + /* dest is a source in xor validate operations */ + if (xor->ctl_f.op == IOAT_OP_XOR_VAL) { + ioat_unmap(pdev, xor->dst_addr - offset, len, + PCI_DMA_TODEVICE, flags, 1); + break; + } + } + + if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) + ioat_unmap(pdev, xor->dst_addr - offset, len, + PCI_DMA_FROMDEVICE, flags, 1); + break; + } default: dev_err(&pdev->dev, "%s: unknown op type: %#x\n", __func__, desc->hw->ctl_f.op); } } +static bool desc_has_ext(struct ioat_ring_ent *desc) +{ + struct ioat_dma_descriptor *hw = desc->hw; + if (hw->ctl_f.op == IOAT_OP_XOR || + hw->ctl_f.op == IOAT_OP_XOR_VAL) { + struct ioat_xor_descriptor *xor = desc->xor; + + if (src_cnt_to_sw(xor->ctl_f.src_cnt) > 5) + return true; + } + + return false; +} + +/** + * __cleanup - reclaim used descriptors + * @ioat: channel (ring) to clean + * + * The difference from the dma_v2.c __cleanup() is that this routine + * handles extended descriptors and dma-unmapping raid operations. + */ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) { struct ioat_chan_common *chan = &ioat->base; @@ -114,7 +197,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) tx = &desc->txd; if (tx->cookie) { chan->completed_cookie = tx->cookie; - ioat3_dma_unmap(ioat, desc); + ioat3_dma_unmap(ioat, desc, ioat->tail + i); tx->cookie = 0; if (tx->callback) { tx->callback(tx->callback_param); @@ -124,6 +207,12 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) if (tx->phys == phys_complete) seen_current = true; + + /* skip extended descriptors */ + if (desc_has_ext(desc)) { + BUG_ON(i + 1 >= active); + i++; + } } ioat->tail += i; BUG_ON(!seen_current); /* no active descs have written a completion? */ @@ -309,6 +398,121 @@ ioat3_prep_memset_lock(struct dma_chan *c, dma_addr_t dest, int value, return &desc->txd; } +static struct dma_async_tx_descriptor * +__ioat3_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result, + dma_addr_t dest, dma_addr_t *src, unsigned int src_cnt, + size_t len, unsigned long flags) +{ + struct ioat2_dma_chan *ioat = to_ioat2_chan(c); + struct ioat_ring_ent *compl_desc; + struct ioat_ring_ent *desc; + struct ioat_ring_ent *ext; + size_t total_len = len; + struct ioat_xor_descriptor *xor; + struct ioat_xor_ext_descriptor *xor_ex = NULL; + struct ioat_dma_descriptor *hw; + u32 offset = 0; + int num_descs; + int with_ext; + int i; + u16 idx; + u8 op = result ? IOAT_OP_XOR_VAL : IOAT_OP_XOR; + + BUG_ON(src_cnt < 2); + + num_descs = ioat2_xferlen_to_descs(ioat, len); + /* we need 2x the number of descriptors to cover greater than 5 + * sources + */ + if (src_cnt > 5) { + with_ext = 1; + num_descs *= 2; + } else + with_ext = 0; + + /* completion writes from the raid engine may pass completion + * writes from the legacy engine, so we need one extra null + * (legacy) descriptor to ensure all completion writes arrive in + * order. + */ + if (likely(num_descs) && + ioat2_alloc_and_lock(&idx, ioat, num_descs+1) == 0) + /* pass */; + else + return NULL; + for (i = 0; i < num_descs; i += 1 + with_ext) { + struct ioat_raw_descriptor *descs[2]; + size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log); + int s; + + desc = ioat2_get_ring_ent(ioat, idx + i); + xor = desc->xor; + + /* save a branch by unconditionally retrieving the + * extended descriptor xor_set_src() knows to not write + * to it in the single descriptor case + */ + ext = ioat2_get_ring_ent(ioat, idx + i + 1); + xor_ex = ext->xor_ex; + + descs[0] = (struct ioat_raw_descriptor *) xor; + descs[1] = (struct ioat_raw_descriptor *) xor_ex; + for (s = 0; s < src_cnt; s++) + xor_set_src(descs, src[s], offset, s); + xor->size = xfer_size; + xor->dst_addr = dest + offset; + xor->ctl = 0; + xor->ctl_f.op = op; + xor->ctl_f.src_cnt = src_cnt_to_hw(src_cnt); + + len -= xfer_size; + offset += xfer_size; + dump_desc_dbg(ioat, desc); + } + + /* last xor descriptor carries the unmap parameters and fence bit */ + desc->txd.flags = flags; + desc->len = total_len; + if (result) + desc->result = result; + xor->ctl_f.fence = !!(flags & DMA_PREP_FENCE); + + /* completion descriptor carries interrupt bit */ + compl_desc = ioat2_get_ring_ent(ioat, idx + i); + compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT; + hw = compl_desc->hw; + hw->ctl = 0; + hw->ctl_f.null = 1; + hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); + hw->ctl_f.compl_write = 1; + hw->size = NULL_DESC_BUFFER_SIZE; + dump_desc_dbg(ioat, compl_desc); + + /* we leave the channel locked to ensure in order submission */ + return &desc->txd; +} + +static struct dma_async_tx_descriptor * +ioat3_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, + unsigned int src_cnt, size_t len, unsigned long flags) +{ + return __ioat3_prep_xor_lock(chan, NULL, dest, src, src_cnt, len, flags); +} + +struct dma_async_tx_descriptor * +ioat3_prep_xor_val(struct dma_chan *chan, dma_addr_t *src, + unsigned int src_cnt, size_t len, + enum sum_check_flags *result, unsigned long flags) +{ + /* the cleanup routine only sets bits on validate failure, it + * does not clear bits on validate success... so clear it here + */ + *result = 0; + + return __ioat3_prep_xor_lock(chan, result, src[0], &src[1], + src_cnt - 1, len, flags); +} + int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) { struct pci_dev *pdev = device->pdev; @@ -333,6 +537,16 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) dma_cap_set(DMA_MEMSET, dma->cap_mask); dma->device_prep_dma_memset = ioat3_prep_memset_lock; } + if (cap & IOAT_CAP_XOR) { + dma->max_xor = 8; + dma->xor_align = 2; + + dma_cap_set(DMA_XOR, dma->cap_mask); + dma->device_prep_dma_xor = ioat3_prep_xor; + + dma_cap_set(DMA_XOR_VAL, dma->cap_mask); + dma->device_prep_dma_xor_val = ioat3_prep_xor_val; + } /* -= IOAT ver.3 workarounds =- */ /* Write CHANERRMSK_INT with 3E07h to mask out the errors diff --git a/drivers/dma/ioat/registers.h b/drivers/dma/ioat/registers.h index 97d26ea6d72f..63038e18ab03 100644 --- a/drivers/dma/ioat/registers.h +++ b/drivers/dma/ioat/registers.h @@ -243,6 +243,8 @@ #define IOAT_CHANERR_XOR_Q_ERR 0x20000 #define IOAT_CHANERR_DESCRIPTOR_COUNT_ERR 0x40000 +#define IOAT_CHANERR_HANDLE_MASK (IOAT_CHANERR_XOR_P_OR_CRC_ERR | IOAT_CHANERR_XOR_Q_ERR) + #define IOAT_CHANERR_MASK_OFFSET 0x2C /* 32-bit Channel Error Register */ #endif /* _IOAT_REGISTERS_H_ */ From 9de6fc717bdc574cf5faf9d46ce0f9d6265c7952 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 8 Sep 2009 17:42:58 -0700 Subject: [PATCH 0130/3409] ioat3: xor self test This adds a hardware specific self test to be called from ioat_probe. In the ioat3 case we will have tests for all the different raid operations, while ioat1 and ioat2 will continue to just test memcpy. Signed-off-by: Dan Williams --- drivers/dma/ioat/dma.c | 5 +- drivers/dma/ioat/dma.h | 4 +- drivers/dma/ioat/dma_v2.c | 1 + drivers/dma/ioat/dma_v3.c | 275 ++++++++++++++++++++++++++++++++++++++ 4 files changed, 282 insertions(+), 3 deletions(-) diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index cb08f8108496..32a757be75c1 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -799,7 +799,7 @@ static void __devinit ioat_dma_test_callback(void *dma_async_param) * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works. * @device: device to be tested */ -static int __devinit ioat_dma_self_test(struct ioatdma_device *device) +int __devinit ioat_dma_self_test(struct ioatdma_device *device) { int i; u8 *src; @@ -1039,7 +1039,7 @@ int __devinit ioat_probe(struct ioatdma_device *device) if (err) goto err_setup_interrupts; - err = ioat_dma_self_test(device); + err = device->self_test(device); if (err) goto err_self_test; @@ -1197,6 +1197,7 @@ int __devinit ioat1_dma_probe(struct ioatdma_device *device, int dca) device->intr_quirk = ioat1_intr_quirk; device->enumerate_channels = ioat1_enumerate_channels; + device->self_test = ioat_dma_self_test; dma = &device->common; dma->device_prep_dma_memcpy = ioat1_dma_prep_memcpy; dma->device_issue_pending = ioat1_dma_memcpy_issue_pending; diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index c2939b289185..0e37e426c729 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h @@ -62,10 +62,10 @@ * @enumerate_channels: hw version specific channel enumeration * @cleanup_tasklet: select between the v2 and v3 cleanup routines * @timer_fn: select between the v2 and v3 timer watchdog routines + * @self_test: hardware version specific self test for each supported op type * * Note: the v3 cleanup routine supports raid operations */ - struct ioatdma_device { struct pci_dev *pdev; void __iomem *reg_base; @@ -80,6 +80,7 @@ struct ioatdma_device { int (*enumerate_channels)(struct ioatdma_device *device); void (*cleanup_tasklet)(unsigned long data); void (*timer_fn)(unsigned long data); + int (*self_test)(struct ioatdma_device *device); }; struct ioat_chan_common { @@ -313,6 +314,7 @@ static inline void ioat_unmap(struct pci_dev *pdev, dma_addr_t addr, size_t len, int __devinit ioat_probe(struct ioatdma_device *device); int __devinit ioat_register(struct ioatdma_device *device); int __devinit ioat1_dma_probe(struct ioatdma_device *dev, int dca); +int __devinit ioat_dma_self_test(struct ioatdma_device *device); void __devexit ioat_dma_remove(struct ioatdma_device *device); struct dca_provider * __devinit ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase); diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c index ee295d48ba2c..12c64e1a7e31 100644 --- a/drivers/dma/ioat/dma_v2.c +++ b/drivers/dma/ioat/dma_v2.c @@ -836,6 +836,7 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca) device->enumerate_channels = ioat2_enumerate_channels; device->cleanup_tasklet = ioat2_cleanup_tasklet; device->timer_fn = ioat2_timer_event; + device->self_test = ioat_dma_self_test; dma = &device->common; dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock; dma->device_issue_pending = ioat2_issue_pending; diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index 957c205f91d0..927c08b08861 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c @@ -513,6 +513,280 @@ ioat3_prep_xor_val(struct dma_chan *chan, dma_addr_t *src, src_cnt - 1, len, flags); } +static void __devinit ioat3_dma_test_callback(void *dma_async_param) +{ + struct completion *cmp = dma_async_param; + + complete(cmp); +} + +#define IOAT_NUM_SRC_TEST 6 /* must be <= 8 */ +static int __devinit ioat_xor_val_self_test(struct ioatdma_device *device) +{ + int i, src_idx; + struct page *dest; + struct page *xor_srcs[IOAT_NUM_SRC_TEST]; + struct page *xor_val_srcs[IOAT_NUM_SRC_TEST + 1]; + dma_addr_t dma_srcs[IOAT_NUM_SRC_TEST + 1]; + dma_addr_t dma_addr, dest_dma; + struct dma_async_tx_descriptor *tx; + struct dma_chan *dma_chan; + dma_cookie_t cookie; + u8 cmp_byte = 0; + u32 cmp_word; + u32 xor_val_result; + int err = 0; + struct completion cmp; + unsigned long tmo; + struct device *dev = &device->pdev->dev; + struct dma_device *dma = &device->common; + + dev_dbg(dev, "%s\n", __func__); + + if (!dma_has_cap(DMA_XOR, dma->cap_mask)) + return 0; + + for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) { + xor_srcs[src_idx] = alloc_page(GFP_KERNEL); + if (!xor_srcs[src_idx]) { + while (src_idx--) + __free_page(xor_srcs[src_idx]); + return -ENOMEM; + } + } + + dest = alloc_page(GFP_KERNEL); + if (!dest) { + while (src_idx--) + __free_page(xor_srcs[src_idx]); + return -ENOMEM; + } + + /* Fill in src buffers */ + for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) { + u8 *ptr = page_address(xor_srcs[src_idx]); + for (i = 0; i < PAGE_SIZE; i++) + ptr[i] = (1 << src_idx); + } + + for (src_idx = 0; src_idx < IOAT_NUM_SRC_TEST; src_idx++) + cmp_byte ^= (u8) (1 << src_idx); + + cmp_word = (cmp_byte << 24) | (cmp_byte << 16) | + (cmp_byte << 8) | cmp_byte; + + memset(page_address(dest), 0, PAGE_SIZE); + + dma_chan = container_of(dma->channels.next, struct dma_chan, + device_node); + if (dma->device_alloc_chan_resources(dma_chan) < 1) { + err = -ENODEV; + goto out; + } + + /* test xor */ + dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE); + for (i = 0; i < IOAT_NUM_SRC_TEST; i++) + dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE, + DMA_TO_DEVICE); + tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs, + IOAT_NUM_SRC_TEST, PAGE_SIZE, + DMA_PREP_INTERRUPT); + + if (!tx) { + dev_err(dev, "Self-test xor prep failed\n"); + err = -ENODEV; + goto free_resources; + } + + async_tx_ack(tx); + init_completion(&cmp); + tx->callback = ioat3_dma_test_callback; + tx->callback_param = &cmp; + cookie = tx->tx_submit(tx); + if (cookie < 0) { + dev_err(dev, "Self-test xor setup failed\n"); + err = -ENODEV; + goto free_resources; + } + dma->device_issue_pending(dma_chan); + + tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); + + if (dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { + dev_err(dev, "Self-test xor timed out\n"); + err = -ENODEV; + goto free_resources; + } + + dma_sync_single_for_cpu(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); + for (i = 0; i < (PAGE_SIZE / sizeof(u32)); i++) { + u32 *ptr = page_address(dest); + if (ptr[i] != cmp_word) { + dev_err(dev, "Self-test xor failed compare\n"); + err = -ENODEV; + goto free_resources; + } + } + dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_TO_DEVICE); + + /* skip validate if the capability is not present */ + if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask)) + goto free_resources; + + /* validate the sources with the destintation page */ + for (i = 0; i < IOAT_NUM_SRC_TEST; i++) + xor_val_srcs[i] = xor_srcs[i]; + xor_val_srcs[i] = dest; + + xor_val_result = 1; + + for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) + dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE, + DMA_TO_DEVICE); + tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, + IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, + &xor_val_result, DMA_PREP_INTERRUPT); + if (!tx) { + dev_err(dev, "Self-test zero prep failed\n"); + err = -ENODEV; + goto free_resources; + } + + async_tx_ack(tx); + init_completion(&cmp); + tx->callback = ioat3_dma_test_callback; + tx->callback_param = &cmp; + cookie = tx->tx_submit(tx); + if (cookie < 0) { + dev_err(dev, "Self-test zero setup failed\n"); + err = -ENODEV; + goto free_resources; + } + dma->device_issue_pending(dma_chan); + + tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); + + if (dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { + dev_err(dev, "Self-test validate timed out\n"); + err = -ENODEV; + goto free_resources; + } + + if (xor_val_result != 0) { + dev_err(dev, "Self-test validate failed compare\n"); + err = -ENODEV; + goto free_resources; + } + + /* skip memset if the capability is not present */ + if (!dma_has_cap(DMA_MEMSET, dma_chan->device->cap_mask)) + goto free_resources; + + /* test memset */ + dma_addr = dma_map_page(dev, dest, 0, + PAGE_SIZE, DMA_FROM_DEVICE); + tx = dma->device_prep_dma_memset(dma_chan, dma_addr, 0, PAGE_SIZE, + DMA_PREP_INTERRUPT); + if (!tx) { + dev_err(dev, "Self-test memset prep failed\n"); + err = -ENODEV; + goto free_resources; + } + + async_tx_ack(tx); + init_completion(&cmp); + tx->callback = ioat3_dma_test_callback; + tx->callback_param = &cmp; + cookie = tx->tx_submit(tx); + if (cookie < 0) { + dev_err(dev, "Self-test memset setup failed\n"); + err = -ENODEV; + goto free_resources; + } + dma->device_issue_pending(dma_chan); + + tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); + + if (dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { + dev_err(dev, "Self-test memset timed out\n"); + err = -ENODEV; + goto free_resources; + } + + for (i = 0; i < PAGE_SIZE/sizeof(u32); i++) { + u32 *ptr = page_address(dest); + if (ptr[i]) { + dev_err(dev, "Self-test memset failed compare\n"); + err = -ENODEV; + goto free_resources; + } + } + + /* test for non-zero parity sum */ + xor_val_result = 0; + for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) + dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE, + DMA_TO_DEVICE); + tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, + IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, + &xor_val_result, DMA_PREP_INTERRUPT); + if (!tx) { + dev_err(dev, "Self-test 2nd zero prep failed\n"); + err = -ENODEV; + goto free_resources; + } + + async_tx_ack(tx); + init_completion(&cmp); + tx->callback = ioat3_dma_test_callback; + tx->callback_param = &cmp; + cookie = tx->tx_submit(tx); + if (cookie < 0) { + dev_err(dev, "Self-test 2nd zero setup failed\n"); + err = -ENODEV; + goto free_resources; + } + dma->device_issue_pending(dma_chan); + + tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); + + if (dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { + dev_err(dev, "Self-test 2nd validate timed out\n"); + err = -ENODEV; + goto free_resources; + } + + if (xor_val_result != SUM_CHECK_P_RESULT) { + dev_err(dev, "Self-test validate failed compare\n"); + err = -ENODEV; + goto free_resources; + } + +free_resources: + dma->device_free_chan_resources(dma_chan); +out: + src_idx = IOAT_NUM_SRC_TEST; + while (src_idx--) + __free_page(xor_srcs[src_idx]); + __free_page(dest); + return err; +} + +static int __devinit ioat3_dma_self_test(struct ioatdma_device *device) +{ + int rc = ioat_dma_self_test(device); + + if (rc) + return rc; + + rc = ioat_xor_val_self_test(device); + if (rc) + return rc; + + return 0; +} + int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) { struct pci_dev *pdev = device->pdev; @@ -526,6 +800,7 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) device->enumerate_channels = ioat2_enumerate_channels; device->cleanup_tasklet = ioat3_cleanup_tasklet; device->timer_fn = ioat3_timer_event; + device->self_test = ioat3_dma_self_test; dma = &device->common; dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock; dma->device_issue_pending = ioat2_issue_pending; From d69d235b7da2778891640ee95efcd68075978904 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 8 Sep 2009 17:42:59 -0700 Subject: [PATCH 0131/3409] ioat3: pq support ioat3.2 adds support for raid6 syndrome generation (xor sum of galois field multiplication products) using up to 8 sources. It can also perform an pq-zero-sum operation to validate whether the syndrome for a given set of sources matches a previously computed syndrome. Signed-off-by: Dan Williams --- drivers/dma/ioat/dma_v3.c | 265 +++++++++++++++++++++++++++++++++++++- 1 file changed, 264 insertions(+), 1 deletion(-) diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index 927c08b08861..ca2af0fa1c36 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c @@ -69,10 +69,12 @@ #define src_cnt_to_hw(x) ((x) - 2) /* provide a lookup table for setting the source address in the base or - * extended descriptor of an xor descriptor + * extended descriptor of an xor or pq descriptor */ static const u8 xor_idx_to_desc __read_mostly = 0xd0; static const u8 xor_idx_to_field[] __read_mostly = { 1, 4, 5, 6, 7, 0, 1, 2 }; +static const u8 pq_idx_to_desc __read_mostly = 0xf8; +static const u8 pq_idx_to_field[] __read_mostly = { 1, 4, 5, 0, 1, 2, 4, 5 }; static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx) { @@ -89,6 +91,23 @@ static void xor_set_src(struct ioat_raw_descriptor *descs[2], raw->field[xor_idx_to_field[idx]] = addr + offset; } +static dma_addr_t pq_get_src(struct ioat_raw_descriptor *descs[2], int idx) +{ + struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1]; + + return raw->field[pq_idx_to_field[idx]]; +} + +static void pq_set_src(struct ioat_raw_descriptor *descs[2], + dma_addr_t addr, u32 offset, u8 coef, int idx) +{ + struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *) descs[0]; + struct ioat_raw_descriptor *raw = descs[pq_idx_to_desc >> idx & 1]; + + raw->field[pq_idx_to_field[idx]] = addr + offset; + pq->coef[idx] = coef; +} + static void ioat3_dma_unmap(struct ioat2_dma_chan *ioat, struct ioat_ring_ent *desc, int idx) { @@ -148,6 +167,58 @@ static void ioat3_dma_unmap(struct ioat2_dma_chan *ioat, PCI_DMA_FROMDEVICE, flags, 1); break; } + case IOAT_OP_PQ_VAL: + case IOAT_OP_PQ: { + struct ioat_pq_descriptor *pq = desc->pq; + struct ioat_ring_ent *ext; + struct ioat_pq_ext_descriptor *pq_ex = NULL; + int src_cnt = src_cnt_to_sw(pq->ctl_f.src_cnt); + struct ioat_raw_descriptor *descs[2]; + int i; + + if (src_cnt > 3) { + ext = ioat2_get_ring_ent(ioat, idx + 1); + pq_ex = ext->pq_ex; + } + + /* in the 'continue' case don't unmap the dests as sources */ + if (dmaf_p_disabled_continue(flags)) + src_cnt--; + else if (dmaf_continue(flags)) + src_cnt -= 3; + + if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { + descs[0] = (struct ioat_raw_descriptor *) pq; + descs[1] = (struct ioat_raw_descriptor *) pq_ex; + for (i = 0; i < src_cnt; i++) { + dma_addr_t src = pq_get_src(descs, i); + + ioat_unmap(pdev, src - offset, len, + PCI_DMA_TODEVICE, flags, 0); + } + + /* the dests are sources in pq validate operations */ + if (pq->ctl_f.op == IOAT_OP_XOR_VAL) { + if (!(flags & DMA_PREP_PQ_DISABLE_P)) + ioat_unmap(pdev, pq->p_addr - offset, + len, PCI_DMA_TODEVICE, flags, 0); + if (!(flags & DMA_PREP_PQ_DISABLE_Q)) + ioat_unmap(pdev, pq->q_addr - offset, + len, PCI_DMA_TODEVICE, flags, 0); + break; + } + } + + if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { + if (!(flags & DMA_PREP_PQ_DISABLE_P)) + ioat_unmap(pdev, pq->p_addr - offset, len, + PCI_DMA_BIDIRECTIONAL, flags, 1); + if (!(flags & DMA_PREP_PQ_DISABLE_Q)) + ioat_unmap(pdev, pq->q_addr - offset, len, + PCI_DMA_BIDIRECTIONAL, flags, 1); + } + break; + } default: dev_err(&pdev->dev, "%s: unknown op type: %#x\n", __func__, desc->hw->ctl_f.op); @@ -164,6 +235,12 @@ static bool desc_has_ext(struct ioat_ring_ent *desc) if (src_cnt_to_sw(xor->ctl_f.src_cnt) > 5) return true; + } else if (hw->ctl_f.op == IOAT_OP_PQ || + hw->ctl_f.op == IOAT_OP_PQ_VAL) { + struct ioat_pq_descriptor *pq = desc->pq; + + if (src_cnt_to_sw(pq->ctl_f.src_cnt) > 3) + return true; } return false; @@ -513,6 +590,182 @@ ioat3_prep_xor_val(struct dma_chan *chan, dma_addr_t *src, src_cnt - 1, len, flags); } +static void +dump_pq_desc_dbg(struct ioat2_dma_chan *ioat, struct ioat_ring_ent *desc, struct ioat_ring_ent *ext) +{ + struct device *dev = to_dev(&ioat->base); + struct ioat_pq_descriptor *pq = desc->pq; + struct ioat_pq_ext_descriptor *pq_ex = ext ? ext->pq_ex : NULL; + struct ioat_raw_descriptor *descs[] = { (void *) pq, (void *) pq_ex }; + int src_cnt = src_cnt_to_sw(pq->ctl_f.src_cnt); + int i; + + dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x" + " sz: %#x ctl: %#x (op: %d int: %d compl: %d pq: '%s%s' src_cnt: %d)\n", + desc_id(desc), (unsigned long long) desc->txd.phys, + (unsigned long long) (pq_ex ? pq_ex->next : pq->next), + desc->txd.flags, pq->size, pq->ctl, pq->ctl_f.op, pq->ctl_f.int_en, + pq->ctl_f.compl_write, + pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q", + pq->ctl_f.src_cnt); + for (i = 0; i < src_cnt; i++) + dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i, + (unsigned long long) pq_get_src(descs, i), pq->coef[i]); + dev_dbg(dev, "\tP: %#llx\n", pq->p_addr); + dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr); +} + +static struct dma_async_tx_descriptor * +__ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result, + const dma_addr_t *dst, const dma_addr_t *src, + unsigned int src_cnt, const unsigned char *scf, + size_t len, unsigned long flags) +{ + struct ioat2_dma_chan *ioat = to_ioat2_chan(c); + struct ioat_chan_common *chan = &ioat->base; + struct ioat_ring_ent *compl_desc; + struct ioat_ring_ent *desc; + struct ioat_ring_ent *ext; + size_t total_len = len; + struct ioat_pq_descriptor *pq; + struct ioat_pq_ext_descriptor *pq_ex = NULL; + struct ioat_dma_descriptor *hw; + u32 offset = 0; + int num_descs; + int with_ext; + int i, s; + u16 idx; + u8 op = result ? IOAT_OP_PQ_VAL : IOAT_OP_PQ; + + dev_dbg(to_dev(chan), "%s\n", __func__); + /* the engine requires at least two sources (we provide + * at least 1 implied source in the DMA_PREP_CONTINUE case) + */ + BUG_ON(src_cnt + dmaf_continue(flags) < 2); + + num_descs = ioat2_xferlen_to_descs(ioat, len); + /* we need 2x the number of descriptors to cover greater than 3 + * sources + */ + if (src_cnt > 3 || flags & DMA_PREP_CONTINUE) { + with_ext = 1; + num_descs *= 2; + } else + with_ext = 0; + + /* completion writes from the raid engine may pass completion + * writes from the legacy engine, so we need one extra null + * (legacy) descriptor to ensure all completion writes arrive in + * order. + */ + if (likely(num_descs) && + ioat2_alloc_and_lock(&idx, ioat, num_descs+1) == 0) + /* pass */; + else + return NULL; + for (i = 0; i < num_descs; i += 1 + with_ext) { + struct ioat_raw_descriptor *descs[2]; + size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log); + + desc = ioat2_get_ring_ent(ioat, idx + i); + pq = desc->pq; + + /* save a branch by unconditionally retrieving the + * extended descriptor pq_set_src() knows to not write + * to it in the single descriptor case + */ + ext = ioat2_get_ring_ent(ioat, idx + i + with_ext); + pq_ex = ext->pq_ex; + + descs[0] = (struct ioat_raw_descriptor *) pq; + descs[1] = (struct ioat_raw_descriptor *) pq_ex; + + for (s = 0; s < src_cnt; s++) + pq_set_src(descs, src[s], offset, scf[s], s); + + /* see the comment for dma_maxpq in include/linux/dmaengine.h */ + if (dmaf_p_disabled_continue(flags)) + pq_set_src(descs, dst[1], offset, 1, s++); + else if (dmaf_continue(flags)) { + pq_set_src(descs, dst[0], offset, 0, s++); + pq_set_src(descs, dst[1], offset, 1, s++); + pq_set_src(descs, dst[1], offset, 0, s++); + } + pq->size = xfer_size; + pq->p_addr = dst[0] + offset; + pq->q_addr = dst[1] + offset; + pq->ctl = 0; + pq->ctl_f.op = op; + pq->ctl_f.src_cnt = src_cnt_to_hw(s); + pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P); + pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q); + + len -= xfer_size; + offset += xfer_size; + } + + /* last pq descriptor carries the unmap parameters and fence bit */ + desc->txd.flags = flags; + desc->len = total_len; + if (result) + desc->result = result; + pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE); + dump_pq_desc_dbg(ioat, desc, ext); + + /* completion descriptor carries interrupt bit */ + compl_desc = ioat2_get_ring_ent(ioat, idx + i); + compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT; + hw = compl_desc->hw; + hw->ctl = 0; + hw->ctl_f.null = 1; + hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); + hw->ctl_f.compl_write = 1; + hw->size = NULL_DESC_BUFFER_SIZE; + dump_desc_dbg(ioat, compl_desc); + + /* we leave the channel locked to ensure in order submission */ + return &desc->txd; +} + +static struct dma_async_tx_descriptor * +ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, + unsigned int src_cnt, const unsigned char *scf, size_t len, + unsigned long flags) +{ + /* handle the single source multiply case from the raid6 + * recovery path + */ + if (unlikely((flags & DMA_PREP_PQ_DISABLE_P) && src_cnt == 1)) { + dma_addr_t single_source[2]; + unsigned char single_source_coef[2]; + + BUG_ON(flags & DMA_PREP_PQ_DISABLE_Q); + single_source[0] = src[0]; + single_source[1] = src[0]; + single_source_coef[0] = scf[0]; + single_source_coef[1] = 0; + + return __ioat3_prep_pq_lock(chan, NULL, dst, single_source, 2, + single_source_coef, len, flags); + } else + return __ioat3_prep_pq_lock(chan, NULL, dst, src, src_cnt, scf, + len, flags); +} + +struct dma_async_tx_descriptor * +ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, + unsigned int src_cnt, const unsigned char *scf, size_t len, + enum sum_check_flags *pqres, unsigned long flags) +{ + /* the cleanup routine only sets bits on validate failure, it + * does not clear bits on validate success... so clear it here + */ + *pqres = 0; + + return __ioat3_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len, + flags); +} + static void __devinit ioat3_dma_test_callback(void *dma_async_param) { struct completion *cmp = dma_async_param; @@ -822,6 +1075,16 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) dma_cap_set(DMA_XOR_VAL, dma->cap_mask); dma->device_prep_dma_xor_val = ioat3_prep_xor_val; } + if (cap & IOAT_CAP_PQ) { + dma_set_maxpq(dma, 8, 0); + dma->pq_align = 2; + + dma_cap_set(DMA_PQ, dma->cap_mask); + dma->device_prep_dma_pq = ioat3_prep_pq; + + dma_cap_set(DMA_PQ_VAL, dma->cap_mask); + dma->device_prep_dma_pq_val = ioat3_prep_pq_val; + } /* -= IOAT ver.3 workarounds =- */ /* Write CHANERRMSK_INT with 3E07h to mask out the errors From ae786624c27411c1d38823f640b39f3d97412d5a Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 8 Sep 2009 17:43:00 -0700 Subject: [PATCH 0132/3409] ioat3: support xor via pq descriptors If a platform advertises pq capabilities, but not xor, then use ioat3_prep_pqxor and ioat3_prep_pqxor_val to simulate xor support. Signed-off-by: Dan Williams --- drivers/dma/ioat/dma_v3.c | 49 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index ca2af0fa1c36..bb57491f3fb3 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c @@ -766,6 +766,44 @@ ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, flags); } +static struct dma_async_tx_descriptor * +ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src, + unsigned int src_cnt, size_t len, unsigned long flags) +{ + unsigned char scf[src_cnt]; + dma_addr_t pq[2]; + + memset(scf, 0, src_cnt); + flags |= DMA_PREP_PQ_DISABLE_Q; + pq[0] = dst; + pq[1] = ~0; + + return __ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len, + flags); +} + +struct dma_async_tx_descriptor * +ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src, + unsigned int src_cnt, size_t len, + enum sum_check_flags *result, unsigned long flags) +{ + unsigned char scf[src_cnt]; + dma_addr_t pq[2]; + + /* the cleanup routine only sets bits on validate failure, it + * does not clear bits on validate success... so clear it here + */ + *result = 0; + + memset(scf, 0, src_cnt); + flags |= DMA_PREP_PQ_DISABLE_Q; + pq[0] = src[0]; + pq[1] = ~0; + + return __ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1, scf, + len, flags); +} + static void __devinit ioat3_dma_test_callback(void *dma_async_param) { struct completion *cmp = dma_async_param; @@ -1084,6 +1122,17 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) dma_cap_set(DMA_PQ_VAL, dma->cap_mask); dma->device_prep_dma_pq_val = ioat3_prep_pq_val; + + if (!(cap & IOAT_CAP_XOR)) { + dma->max_xor = 8; + dma->xor_align = 2; + + dma_cap_set(DMA_XOR, dma->cap_mask); + dma->device_prep_dma_xor = ioat3_prep_pqxor; + + dma_cap_set(DMA_XOR_VAL, dma->cap_mask); + dma->device_prep_dma_xor_val = ioat3_prep_pqxor_val; + } } /* -= IOAT ver.3 workarounds =- */ From 58c8649e0e25de511c4a66ce3fa38891e2ec4e9e Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 8 Sep 2009 17:43:00 -0700 Subject: [PATCH 0133/3409] ioat3: interrupt descriptor support The async_tx api uses the DMA_INTERRUPT operation type to terminate a chain of issued operations with a callback routine. Signed-off-by: Dan Williams --- drivers/dma/ioat/dma_v3.c | 39 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 38 insertions(+), 1 deletion(-) diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index bb57491f3fb3..ff4afdc8e59b 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c @@ -120,7 +120,8 @@ static void ioat3_dma_unmap(struct ioat2_dma_chan *ioat, switch (desc->hw->ctl_f.op) { case IOAT_OP_COPY: - ioat_dma_unmap(chan, flags, len, desc->hw); + if (!desc->hw->ctl_f.null) /* skip 'interrupt' ops */ + ioat_dma_unmap(chan, flags, len, desc->hw); break; case IOAT_OP_FILL: { struct ioat_fill_descriptor *hw = desc->fill; @@ -804,6 +805,38 @@ ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src, len, flags); } +static struct dma_async_tx_descriptor * +ioat3_prep_interrupt_lock(struct dma_chan *c, unsigned long flags) +{ + struct ioat2_dma_chan *ioat = to_ioat2_chan(c); + struct ioat_ring_ent *desc; + struct ioat_dma_descriptor *hw; + u16 idx; + + if (ioat2_alloc_and_lock(&idx, ioat, 1) == 0) + desc = ioat2_get_ring_ent(ioat, idx); + else + return NULL; + + hw = desc->hw; + hw->ctl = 0; + hw->ctl_f.null = 1; + hw->ctl_f.int_en = 1; + hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE); + hw->ctl_f.compl_write = 1; + hw->size = NULL_DESC_BUFFER_SIZE; + hw->src_addr = 0; + hw->dst_addr = 0; + + desc->txd.flags = flags; + desc->len = 1; + + dump_desc_dbg(ioat, desc); + + /* we leave the channel locked to ensure in order submission */ + return &desc->txd; +} + static void __devinit ioat3_dma_test_callback(void *dma_async_param) { struct completion *cmp = dma_async_param; @@ -1098,6 +1131,10 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; dma->device_free_chan_resources = ioat2_free_chan_resources; dma->device_is_tx_complete = ioat3_is_complete; + + dma_cap_set(DMA_INTERRUPT, dma->cap_mask); + dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock; + cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET); if (cap & IOAT_CAP_FILL_BLOCK) { dma_cap_set(DMA_MEMSET, dma->cap_mask); From b265b11fc1a0bd6ae5a7fde12e374583a52ab326 Mon Sep 17 00:00:00 2001 From: Tom Picard Date: Tue, 8 Sep 2009 17:43:01 -0700 Subject: [PATCH 0134/3409] ioat3: ioat3.2 pci ids for Jasper Forest Jasper Forest introduces raid offload support via ioat3.2 support. When raid offload is enabled two (out of 8 channels) will report raid5/raid6 offload capabilities. The remaining channels will only report ioat3.0 capabilities (memcpy). Signed-off-by: Tom Picard Signed-off-by: Dan Williams --- drivers/dma/ioat/pci.c | 13 +++++++++++++ include/linux/pci_ids.h | 10 ++++++++++ 2 files changed, 23 insertions(+) diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c index 626a508a4f8b..6c1aac5b3598 100644 --- a/drivers/dma/ioat/pci.c +++ b/drivers/dma/ioat/pci.c @@ -58,6 +58,19 @@ static struct pci_device_id ioat_pci_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG5) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG6) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG7) }, + + /* I/OAT v3.2 platforms */ + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF0) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF1) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF2) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF3) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF4) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF5) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF6) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF7) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF8) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF9) }, + { 0, } }; diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 0f71812d67d3..2b4b8ce53256 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2529,6 +2529,16 @@ #define PCI_DEVICE_ID_INTEL_E7525_MCH 0x359e #define PCI_DEVICE_ID_INTEL_IOAT_CNB 0x360b #define PCI_DEVICE_ID_INTEL_FBD_CNB 0x360c +#define PCI_DEVICE_ID_INTEL_IOAT_JSF0 0x3710 +#define PCI_DEVICE_ID_INTEL_IOAT_JSF1 0x3711 +#define PCI_DEVICE_ID_INTEL_IOAT_JSF2 0x3712 +#define PCI_DEVICE_ID_INTEL_IOAT_JSF3 0x3713 +#define PCI_DEVICE_ID_INTEL_IOAT_JSF4 0x3714 +#define PCI_DEVICE_ID_INTEL_IOAT_JSF5 0x3715 +#define PCI_DEVICE_ID_INTEL_IOAT_JSF6 0x3716 +#define PCI_DEVICE_ID_INTEL_IOAT_JSF7 0x3717 +#define PCI_DEVICE_ID_INTEL_IOAT_JSF8 0x3718 +#define PCI_DEVICE_ID_INTEL_IOAT_JSF9 0x3719 #define PCI_DEVICE_ID_INTEL_ICH10_0 0x3a14 #define PCI_DEVICE_ID_INTEL_ICH10_1 0x3a16 #define PCI_DEVICE_ID_INTEL_ICH10_2 0x3a18 From e3232714d465c42ac631929b990f5e35e2d8a955 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 8 Sep 2009 17:43:02 -0700 Subject: [PATCH 0135/3409] ioat3: segregate raid engines The cleanup routine for the raid cases imposes extra checks for handling raid descriptors and extended descriptors. If the channel does not support raid it can avoid this extra overhead by using the ioat2 cleanup path. Signed-off-by: Dan Williams --- drivers/dma/ioat/dma_v2.c | 4 ++-- drivers/dma/ioat/dma_v2.h | 2 ++ drivers/dma/ioat/dma_v3.c | 25 ++++++++++++++++++------- 3 files changed, 22 insertions(+), 9 deletions(-) diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c index 12c64e1a7e31..7bbbd83d12e6 100644 --- a/drivers/dma/ioat/dma_v2.c +++ b/drivers/dma/ioat/dma_v2.c @@ -206,7 +206,7 @@ static void ioat2_cleanup(struct ioat2_dma_chan *ioat) spin_unlock_bh(&chan->cleanup_lock); } -static void ioat2_cleanup_tasklet(unsigned long data) +void ioat2_cleanup_tasklet(unsigned long data) { struct ioat2_dma_chan *ioat = (void *) data; @@ -258,7 +258,7 @@ static void ioat2_restart_channel(struct ioat2_dma_chan *ioat) __ioat2_restart_chan(ioat); } -static void ioat2_timer_event(unsigned long data) +void ioat2_timer_event(unsigned long data) { struct ioat2_dma_chan *ioat = (void *) data; struct ioat_chan_common *chan = &ioat->base; diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h index e23027d3dcbd..246e646b1904 100644 --- a/drivers/dma/ioat/dma_v2.h +++ b/drivers/dma/ioat/dma_v2.h @@ -183,5 +183,7 @@ enum dma_status ioat2_is_complete(struct dma_chan *c, dma_cookie_t cookie, void __ioat2_restart_chan(struct ioat2_dma_chan *ioat); bool reshape_ring(struct ioat2_dma_chan *ioat, int order); void __ioat2_issue_pending(struct ioat2_dma_chan *ioat); +void ioat2_cleanup_tasklet(unsigned long data); +void ioat2_timer_event(unsigned long data); extern struct kobj_type ioat2_ktype; #endif /* IOATDMA_V2_H */ diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index ff4afdc8e59b..3686dddf6bff 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c @@ -1117,30 +1117,25 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) struct dma_device *dma; struct dma_chan *c; struct ioat_chan_common *chan; + bool is_raid_device = false; int err; u16 dev_id; u32 cap; device->enumerate_channels = ioat2_enumerate_channels; - device->cleanup_tasklet = ioat3_cleanup_tasklet; - device->timer_fn = ioat3_timer_event; device->self_test = ioat3_dma_self_test; dma = &device->common; dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock; dma->device_issue_pending = ioat2_issue_pending; dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; dma->device_free_chan_resources = ioat2_free_chan_resources; - dma->device_is_tx_complete = ioat3_is_complete; dma_cap_set(DMA_INTERRUPT, dma->cap_mask); dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock; cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET); - if (cap & IOAT_CAP_FILL_BLOCK) { - dma_cap_set(DMA_MEMSET, dma->cap_mask); - dma->device_prep_dma_memset = ioat3_prep_memset_lock; - } if (cap & IOAT_CAP_XOR) { + is_raid_device = true; dma->max_xor = 8; dma->xor_align = 2; @@ -1151,6 +1146,7 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) dma->device_prep_dma_xor_val = ioat3_prep_xor_val; } if (cap & IOAT_CAP_PQ) { + is_raid_device = true; dma_set_maxpq(dma, 8, 0); dma->pq_align = 2; @@ -1171,6 +1167,21 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) dma->device_prep_dma_xor_val = ioat3_prep_pqxor_val; } } + if (is_raid_device && (cap & IOAT_CAP_FILL_BLOCK)) { + dma_cap_set(DMA_MEMSET, dma->cap_mask); + dma->device_prep_dma_memset = ioat3_prep_memset_lock; + } + + + if (is_raid_device) { + dma->device_is_tx_complete = ioat3_is_complete; + device->cleanup_tasklet = ioat3_cleanup_tasklet; + device->timer_fn = ioat3_timer_event; + } else { + dma->device_is_tx_complete = ioat2_is_complete; + device->cleanup_tasklet = ioat2_cleanup_tasklet; + device->timer_fn = ioat2_timer_event; + } /* -= IOAT ver.3 workarounds =- */ /* Write CHANERRMSK_INT with 3E07h to mask out the errors From 6506cbca6b5b36d682bd39afcbf3f575c81dddb6 Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Tue, 8 Sep 2009 17:43:03 -0700 Subject: [PATCH 0136/3409] Add MODULE_DEVICE_TABLE() so ioatdma module is autoloaded The ioatdma module is missing aliases for the PCI devices it supports, so it is not autoloaded on boot. Add a MODULE_DEVICE_TABLE() to get these aliases. Signed-off-by: Roland Dreier Signed-off-by: Dan Williams --- drivers/dma/ioat/pci.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c index 6c1aac5b3598..3b2f40e9b3bd 100644 --- a/drivers/dma/ioat/pci.c +++ b/drivers/dma/ioat/pci.c @@ -73,6 +73,7 @@ static struct pci_device_id ioat_pci_tbl[] = { { 0, } }; +MODULE_DEVICE_TABLE(pci, ioat_pci_tbl); static int __devinit ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id); From a6417dd58d6832f123f36c6f22c63ec1ab62ce1c Mon Sep 17 00:00:00 2001 From: Roland Dreier Date: Tue, 8 Sep 2009 17:43:03 -0700 Subject: [PATCH 0137/3409] I/OAT: Convert to PCI_VDEVICE() Trivial cleanup to make the PCI ID table easier to read. [dan.j.williams@intel.com: extended to v3.2 devices] Signed-off-by: Roland Dreier Signed-off-by: Dan Williams --- drivers/dma/ioat/pci.c | 46 +++++++++++++++++++++--------------------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c index 3b2f40e9b3bd..b77d3a2864ad 100644 --- a/drivers/dma/ioat/pci.c +++ b/drivers/dma/ioat/pci.c @@ -41,35 +41,35 @@ MODULE_AUTHOR("Intel Corporation"); static struct pci_device_id ioat_pci_tbl[] = { /* I/OAT v1 platforms */ - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_CNB) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SCNB) }, - { PCI_DEVICE(PCI_VENDOR_ID_UNISYS, PCI_DEVICE_ID_UNISYS_DMA_DIRECTOR) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_CNB) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SCNB) }, + { PCI_VDEVICE(UNISYS, PCI_DEVICE_ID_UNISYS_DMA_DIRECTOR) }, /* I/OAT v2 platforms */ - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB) }, /* I/OAT v3 platforms */ - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG0) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG1) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG2) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG3) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG4) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG5) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG6) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG7) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG0) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG1) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG2) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG3) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG4) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG5) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG6) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_TBG7) }, /* I/OAT v3.2 platforms */ - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF0) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF1) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF2) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF3) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF4) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF5) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF6) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF7) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF8) }, - { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF9) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF0) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF1) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF2) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF3) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF4) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF5) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF6) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF7) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF8) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_JSF9) }, { 0, } }; From e0bd0f8cb09cf3ccac1425f0f3a6705106c4d65c Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 8 Sep 2009 17:53:02 -0700 Subject: [PATCH 0138/3409] dw_dmac: implement a private tx_list Drop dw_dmac's use of tx_list from struct dma_async_tx_descriptor in preparation for removal of this field. Cc: Haavard Skinnemoen Signed-off-by: Dan Williams --- drivers/dma/dw_dmac.c | 19 ++++++++++--------- drivers/dma/dw_dmac_regs.h | 1 + 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index 98c9a847bf51..514ef7d71bcf 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c @@ -116,7 +116,7 @@ static void dwc_sync_desc_for_cpu(struct dw_dma_chan *dwc, struct dw_desc *desc) { struct dw_desc *child; - list_for_each_entry(child, &desc->txd.tx_list, desc_node) + list_for_each_entry(child, &desc->tx_list, desc_node) dma_sync_single_for_cpu(chan2parent(&dwc->chan), child->txd.phys, sizeof(child->lli), DMA_TO_DEVICE); @@ -137,11 +137,11 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc) dwc_sync_desc_for_cpu(dwc, desc); spin_lock_bh(&dwc->lock); - list_for_each_entry(child, &desc->txd.tx_list, desc_node) + list_for_each_entry(child, &desc->tx_list, desc_node) dev_vdbg(chan2dev(&dwc->chan), "moving child desc %p to freelist\n", child); - list_splice_init(&desc->txd.tx_list, &dwc->free_list); + list_splice_init(&desc->tx_list, &dwc->free_list); dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc); list_add(&desc->desc_node, &dwc->free_list); spin_unlock_bh(&dwc->lock); @@ -209,7 +209,7 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc) param = txd->callback_param; dwc_sync_desc_for_cpu(dwc, desc); - list_splice_init(&txd->tx_list, &dwc->free_list); + list_splice_init(&desc->tx_list, &dwc->free_list); list_move(&desc->desc_node, &dwc->free_list); /* @@ -289,7 +289,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) /* This one is currently in progress */ return; - list_for_each_entry(child, &desc->txd.tx_list, desc_node) + list_for_each_entry(child, &desc->tx_list, desc_node) if (child->lli.llp == llp) /* Currently in progress */ return; @@ -356,7 +356,7 @@ static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) dev_printk(KERN_CRIT, chan2dev(&dwc->chan), " cookie: %d\n", bad_desc->txd.cookie); dwc_dump_lli(dwc, &bad_desc->lli); - list_for_each_entry(child, &bad_desc->txd.tx_list, desc_node) + list_for_each_entry(child, &bad_desc->tx_list, desc_node) dwc_dump_lli(dwc, &child->lli); /* Pretend the descriptor completed successfully */ @@ -608,7 +608,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, prev->txd.phys, sizeof(prev->lli), DMA_TO_DEVICE); list_add_tail(&desc->desc_node, - &first->txd.tx_list); + &first->tx_list); } prev = desc; } @@ -700,7 +700,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, sizeof(prev->lli), DMA_TO_DEVICE); list_add_tail(&desc->desc_node, - &first->txd.tx_list); + &first->tx_list); } prev = desc; total_len += len; @@ -746,7 +746,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, sizeof(prev->lli), DMA_TO_DEVICE); list_add_tail(&desc->desc_node, - &first->txd.tx_list); + &first->tx_list); } prev = desc; total_len += len; @@ -902,6 +902,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan) break; } + INIT_LIST_HEAD(&desc->tx_list); dma_async_tx_descriptor_init(&desc->txd, chan); desc->txd.tx_submit = dwc_tx_submit; desc->txd.flags = DMA_CTRL_ACK; diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h index 13a580767031..d9a939f67f46 100644 --- a/drivers/dma/dw_dmac_regs.h +++ b/drivers/dma/dw_dmac_regs.h @@ -217,6 +217,7 @@ struct dw_desc { /* THEN values for driver housekeeping */ struct list_head desc_node; + struct list_head tx_list; struct dma_async_tx_descriptor txd; size_t len; }; From eda34234578fd822c950fd06b5c5ff7ac08b3001 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 8 Sep 2009 17:53:02 -0700 Subject: [PATCH 0139/3409] fsldma: implement a private tx_list Drop fsldma's use of tx_list from struct dma_async_tx_descriptor in preparation for removal of this field. Cc: Li Yang Signed-off-by: Dan Williams --- drivers/dma/fsldma.c | 16 +++++++++------- drivers/dma/fsldma.h | 1 + 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index ef87a8984145..73dd74823195 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -326,7 +326,8 @@ static void fsl_chan_toggle_ext_start(struct fsl_dma_chan *fsl_chan, int enable) static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) { struct fsl_dma_chan *fsl_chan = to_fsl_chan(tx->chan); - struct fsl_desc_sw *desc; + struct fsl_desc_sw *desc = tx_to_fsl_desc(tx); + struct fsl_desc_sw *child; unsigned long flags; dma_cookie_t cookie; @@ -334,7 +335,7 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) spin_lock_irqsave(&fsl_chan->desc_lock, flags); cookie = fsl_chan->common.cookie; - list_for_each_entry(desc, &tx->tx_list, node) { + list_for_each_entry(child, &desc->tx_list, node) { cookie++; if (cookie < 0) cookie = 1; @@ -343,8 +344,8 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) } fsl_chan->common.cookie = cookie; - append_ld_queue(fsl_chan, tx_to_fsl_desc(tx)); - list_splice_init(&tx->tx_list, fsl_chan->ld_queue.prev); + append_ld_queue(fsl_chan, desc); + list_splice_init(&desc->tx_list, fsl_chan->ld_queue.prev); spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); @@ -366,6 +367,7 @@ static struct fsl_desc_sw *fsl_dma_alloc_descriptor( desc_sw = dma_pool_alloc(fsl_chan->desc_pool, GFP_ATOMIC, &pdesc); if (desc_sw) { memset(desc_sw, 0, sizeof(struct fsl_desc_sw)); + INIT_LIST_HEAD(&desc_sw->tx_list); dma_async_tx_descriptor_init(&desc_sw->async_tx, &fsl_chan->common); desc_sw->async_tx.tx_submit = fsl_dma_tx_submit; @@ -455,7 +457,7 @@ fsl_dma_prep_interrupt(struct dma_chan *chan, unsigned long flags) new->async_tx.flags = flags; /* Insert the link descriptor to the LD ring */ - list_add_tail(&new->node, &new->async_tx.tx_list); + list_add_tail(&new->node, &new->tx_list); /* Set End-of-link to the last link descriptor of new list*/ set_ld_eol(fsl_chan, new); @@ -513,7 +515,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( dma_dest += copy; /* Insert the link descriptor to the LD ring */ - list_add_tail(&new->node, &first->async_tx.tx_list); + list_add_tail(&new->node, &first->tx_list); } while (len); new->async_tx.flags = flags; /* client is in control of this ack */ @@ -528,7 +530,7 @@ fail: if (!first) return NULL; - list = &first->async_tx.tx_list; + list = &first->tx_list; list_for_each_entry_safe_reverse(new, prev, list, node) { list_del(&new->node); dma_pool_free(fsl_chan->desc_pool, new, new->async_tx.phys); diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h index dc7f26865797..4493afed53f0 100644 --- a/drivers/dma/fsldma.h +++ b/drivers/dma/fsldma.h @@ -90,6 +90,7 @@ struct fsl_dma_ld_hw { struct fsl_desc_sw { struct fsl_dma_ld_hw hw; struct list_head node; + struct list_head tx_list; struct dma_async_tx_descriptor async_tx; struct list_head *ld; void *priv; From 308136d1abcb2d759bac40ed4f5d42ac4af59d8b Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 8 Sep 2009 17:53:02 -0700 Subject: [PATCH 0140/3409] iop-adma: implement a private tx_list Drop iop-adma's use of tx_list from struct dma_async_tx_descriptor in preparation for removal of this field. Signed-off-by: Dan Williams --- arch/arm/include/asm/hardware/iop_adma.h | 2 ++ drivers/dma/iop-adma.c | 9 +++++---- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/arch/arm/include/asm/hardware/iop_adma.h b/arch/arm/include/asm/hardware/iop_adma.h index 385c6e8cbbd2..95dc133d0a7f 100644 --- a/arch/arm/include/asm/hardware/iop_adma.h +++ b/arch/arm/include/asm/hardware/iop_adma.h @@ -86,6 +86,7 @@ struct iop_adma_chan { * @idx: pool index * @unmap_src_cnt: number of xor sources * @unmap_len: transaction bytecount + * @tx_list: list of descriptors that are associated with one operation * @async_tx: support for the async_tx api * @group_list: list of slots that make up a multi-descriptor transaction * for example transfer lengths larger than the supported hw max @@ -102,6 +103,7 @@ struct iop_adma_desc_slot { u16 idx; u16 unmap_src_cnt; size_t unmap_len; + struct list_head tx_list; struct dma_async_tx_descriptor async_tx; union { u32 *xor_check_result; diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c index 2f052265122f..9f6c16f8e2be 100644 --- a/drivers/dma/iop-adma.c +++ b/drivers/dma/iop-adma.c @@ -370,7 +370,7 @@ retry: } alloc_tail->group_head = alloc_start; alloc_tail->async_tx.cookie = -EBUSY; - list_splice(&chain, &alloc_tail->async_tx.tx_list); + list_splice(&chain, &alloc_tail->tx_list); iop_chan->last_used = last_used; iop_desc_clear_next_desc(alloc_start); iop_desc_clear_next_desc(alloc_tail); @@ -429,7 +429,7 @@ iop_adma_tx_submit(struct dma_async_tx_descriptor *tx) old_chain_tail = list_entry(iop_chan->chain.prev, struct iop_adma_desc_slot, chain_node); - list_splice_init(&sw_desc->async_tx.tx_list, + list_splice_init(&sw_desc->tx_list, &old_chain_tail->chain_node); /* fix up the hardware chain */ @@ -496,6 +496,7 @@ static int iop_adma_alloc_chan_resources(struct dma_chan *chan) dma_async_tx_descriptor_init(&slot->async_tx, chan); slot->async_tx.tx_submit = iop_adma_tx_submit; + INIT_LIST_HEAD(&slot->tx_list); INIT_LIST_HEAD(&slot->chain_node); INIT_LIST_HEAD(&slot->slot_node); hw_desc = (char *) iop_chan->device->dma_desc_pool; @@ -1296,7 +1297,7 @@ static void iop_chan_start_null_memcpy(struct iop_adma_chan *iop_chan) if (sw_desc) { grp_start = sw_desc->group_head; - list_splice_init(&sw_desc->async_tx.tx_list, &iop_chan->chain); + list_splice_init(&sw_desc->tx_list, &iop_chan->chain); async_tx_ack(&sw_desc->async_tx); iop_desc_init_memcpy(grp_start, 0); iop_desc_set_byte_count(grp_start, iop_chan, 0); @@ -1352,7 +1353,7 @@ static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan) sw_desc = iop_adma_alloc_slots(iop_chan, slot_cnt, slots_per_op); if (sw_desc) { grp_start = sw_desc->group_head; - list_splice_init(&sw_desc->async_tx.tx_list, &iop_chan->chain); + list_splice_init(&sw_desc->tx_list, &iop_chan->chain); async_tx_ack(&sw_desc->async_tx); iop_desc_init_null_xor(grp_start, 2, 0); iop_desc_set_byte_count(grp_start, iop_chan, 0); From ea25968a32a621b02c3715d6b649f0c6ef53c24e Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 8 Sep 2009 17:53:02 -0700 Subject: [PATCH 0141/3409] ioat: implement a private tx_list Drop ioatdma's use of tx_list from struct dma_async_tx_descriptor in preparation for removal of this field. Cc: Maciej Sosnowski Signed-off-by: Dan Williams --- drivers/dma/ioat/dma.c | 7 ++++--- drivers/dma/ioat/dma.h | 3 ++- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 17a518d0386f..21527b89590c 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -251,12 +251,12 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie); /* write address into NextDescriptor field of last desc in chain */ - first = to_ioat_desc(tx->tx_list.next); + first = to_ioat_desc(desc->tx_list.next); chain_tail = to_ioat_desc(ioat->used_desc.prev); /* make descriptor updates globally visible before chaining */ wmb(); chain_tail->hw->next = first->txd.phys; - list_splice_tail_init(&tx->tx_list, &ioat->used_desc); + list_splice_tail_init(&desc->tx_list, &ioat->used_desc); dump_desc_dbg(ioat, chain_tail); dump_desc_dbg(ioat, first); @@ -297,6 +297,7 @@ ioat_dma_alloc_descriptor(struct ioat_dma_chan *ioat, gfp_t flags) memset(desc, 0, sizeof(*desc)); + INIT_LIST_HEAD(&desc_sw->tx_list); dma_async_tx_descriptor_init(&desc_sw->txd, &ioat->base.common); desc_sw->txd.tx_submit = ioat1_tx_submit; desc_sw->hw = desc; @@ -521,7 +522,7 @@ ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest, desc->txd.flags = flags; desc->len = total_len; - list_splice(&chain, &desc->txd.tx_list); + list_splice(&chain, &desc->tx_list); hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); hw->ctl_f.compl_write = 1; hw->tx_cnt = tx_cnt; diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index d9d6a7e3cd76..8966fa5453a7 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h @@ -157,7 +157,7 @@ ioat_is_complete(struct dma_chan *c, dma_cookie_t cookie, * struct ioat_desc_sw - wrapper around hardware descriptor * @hw: hardware DMA descriptor * @node: this descriptor will either be on the free list, - * or attached to a transaction list (async_tx.tx_list) + * or attached to a transaction list (tx_list) * @txd: the generic software descriptor for all engines * @id: identifier for debug */ @@ -165,6 +165,7 @@ struct ioat_desc_sw { struct ioat_dma_descriptor *hw; struct list_head node; size_t len; + struct list_head tx_list; struct dma_async_tx_descriptor txd; #ifdef DEBUG int id; From 64203b67274680e95e0c2eec935a22fc94e9ecb5 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 8 Sep 2009 17:53:03 -0700 Subject: [PATCH 0142/3409] mv_xor: implement a private tx_list Drop mv_xor's use of tx_list from struct dma_async_tx_descriptor in preparation for removal of this field. Cc: Saeed Bishara Signed-off-by: Dan Williams --- drivers/dma/mv_xor.c | 7 ++++--- drivers/dma/mv_xor.h | 4 ++-- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index 3f23eabe09f2..466ab10c1ff1 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c @@ -517,7 +517,7 @@ retry: } alloc_tail->group_head = alloc_start; alloc_tail->async_tx.cookie = -EBUSY; - list_splice(&chain, &alloc_tail->async_tx.tx_list); + list_splice(&chain, &alloc_tail->tx_list); mv_chan->last_used = last_used; mv_desc_clear_next_desc(alloc_start); mv_desc_clear_next_desc(alloc_tail); @@ -565,14 +565,14 @@ mv_xor_tx_submit(struct dma_async_tx_descriptor *tx) cookie = mv_desc_assign_cookie(mv_chan, sw_desc); if (list_empty(&mv_chan->chain)) - list_splice_init(&sw_desc->async_tx.tx_list, &mv_chan->chain); + list_splice_init(&sw_desc->tx_list, &mv_chan->chain); else { new_hw_chain = 0; old_chain_tail = list_entry(mv_chan->chain.prev, struct mv_xor_desc_slot, chain_node); - list_splice_init(&grp_start->async_tx.tx_list, + list_splice_init(&grp_start->tx_list, &old_chain_tail->chain_node); if (!mv_can_chain(grp_start)) @@ -632,6 +632,7 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan) slot->async_tx.tx_submit = mv_xor_tx_submit; INIT_LIST_HEAD(&slot->chain_node); INIT_LIST_HEAD(&slot->slot_node); + INIT_LIST_HEAD(&slot->tx_list); hw_desc = (char *) mv_chan->device->dma_desc_pool; slot->async_tx.phys = (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE]; diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h index 06cafe1ef521..977b592e976b 100644 --- a/drivers/dma/mv_xor.h +++ b/drivers/dma/mv_xor.h @@ -126,9 +126,8 @@ struct mv_xor_chan { * @idx: pool index * @unmap_src_cnt: number of xor sources * @unmap_len: transaction bytecount + * @tx_list: list of slots that make up a multi-descriptor transaction * @async_tx: support for the async_tx api - * @group_list: list of slots that make up a multi-descriptor transaction - * for example transfer lengths larger than the supported hw max * @xor_check_result: result of zero sum * @crc32_result: result crc calculation */ @@ -145,6 +144,7 @@ struct mv_xor_desc_slot { u16 unmap_src_cnt; u32 value; size_t unmap_len; + struct list_head tx_list; struct dma_async_tx_descriptor async_tx; union { u32 *xor_check_result; From 285a3c71640ad7101b7237b8fbaa4ead22c6551c Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 8 Sep 2009 17:53:03 -0700 Subject: [PATCH 0143/3409] at_hdmac: implement a private tx_list Drop at_hdmac's use of tx_list from struct dma_async_tx_descriptor in preparation for removal of this field. Cc: Nicolas Ferre Signed-off-by: Dan Williams --- drivers/dma/at_hdmac.c | 17 +++++++++-------- drivers/dma/at_hdmac_regs.h | 1 + 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 9a1e5fb412ed..da4e8b710a9b 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -87,6 +87,7 @@ static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan, desc = dma_pool_alloc(atdma->dma_desc_pool, gfp_flags, &phys); if (desc) { memset(desc, 0, sizeof(struct at_desc)); + INIT_LIST_HEAD(&desc->tx_list); dma_async_tx_descriptor_init(&desc->txd, chan); /* txd.flags will be overwritten in prep functions */ desc->txd.flags = DMA_CTRL_ACK; @@ -150,11 +151,11 @@ static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc) struct at_desc *child; spin_lock_bh(&atchan->lock); - list_for_each_entry(child, &desc->txd.tx_list, desc_node) + list_for_each_entry(child, &desc->tx_list, desc_node) dev_vdbg(chan2dev(&atchan->chan_common), "moving child desc %p to freelist\n", child); - list_splice_init(&desc->txd.tx_list, &atchan->free_list); + list_splice_init(&desc->tx_list, &atchan->free_list); dev_vdbg(chan2dev(&atchan->chan_common), "moving desc %p to freelist\n", desc); list_add(&desc->desc_node, &atchan->free_list); @@ -247,7 +248,7 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) param = txd->callback_param; /* move children to free_list */ - list_splice_init(&txd->tx_list, &atchan->free_list); + list_splice_init(&desc->tx_list, &atchan->free_list); /* move myself to free_list */ list_move(&desc->desc_node, &atchan->free_list); @@ -334,7 +335,7 @@ static void atc_cleanup_descriptors(struct at_dma_chan *atchan) /* This one is currently in progress */ return; - list_for_each_entry(child, &desc->txd.tx_list, desc_node) + list_for_each_entry(child, &desc->tx_list, desc_node) if (!(child->lli.ctrla & ATC_DONE)) /* Currently in progress */ return; @@ -407,7 +408,7 @@ static void atc_handle_error(struct at_dma_chan *atchan) dev_crit(chan2dev(&atchan->chan_common), " cookie: %d\n", bad_desc->txd.cookie); atc_dump_lli(atchan, &bad_desc->lli); - list_for_each_entry(child, &bad_desc->txd.tx_list, desc_node) + list_for_each_entry(child, &bad_desc->tx_list, desc_node) atc_dump_lli(atchan, &child->lli); /* Pretend the descriptor completed successfully */ @@ -587,7 +588,7 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, prev->lli.dscr = desc->txd.phys; /* insert the link descriptor to the LD ring */ list_add_tail(&desc->desc_node, - &first->txd.tx_list); + &first->tx_list); } prev = desc; } @@ -687,7 +688,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, prev->lli.dscr = desc->txd.phys; /* insert the link descriptor to the LD ring */ list_add_tail(&desc->desc_node, - &first->txd.tx_list); + &first->tx_list); } prev = desc; total_len += len; @@ -729,7 +730,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, prev->lli.dscr = desc->txd.phys; /* insert the link descriptor to the LD ring */ list_add_tail(&desc->desc_node, - &first->txd.tx_list); + &first->tx_list); } prev = desc; total_len += len; diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h index 4c972afc49ec..495457e3dc4b 100644 --- a/drivers/dma/at_hdmac_regs.h +++ b/drivers/dma/at_hdmac_regs.h @@ -165,6 +165,7 @@ struct at_desc { struct at_lli lli; /* THEN values for driver housekeeping */ + struct list_head tx_list; struct dma_async_tx_descriptor txd; struct list_head desc_node; size_t len; From 1979b186b80449ac6574d97c254b694c8a99b703 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 8 Sep 2009 17:53:03 -0700 Subject: [PATCH 0144/3409] txx9dmac: implement a private tx_list Drop txx9dmac's use of tx_list from struct dma_async_tx_descriptor in preparation for removal of this field. Cc: Atsushi Nemoto Signed-off-by: Dan Williams --- drivers/dma/txx9dmac.c | 24 +++++++++++------------- drivers/dma/txx9dmac.h | 1 + 2 files changed, 12 insertions(+), 13 deletions(-) diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c index 88dab52926f4..197c7bc37895 100644 --- a/drivers/dma/txx9dmac.c +++ b/drivers/dma/txx9dmac.c @@ -180,9 +180,8 @@ static struct txx9dmac_desc *txx9dmac_first_queued(struct txx9dmac_chan *dc) static struct txx9dmac_desc *txx9dmac_last_child(struct txx9dmac_desc *desc) { - if (!list_empty(&desc->txd.tx_list)) - desc = list_entry(desc->txd.tx_list.prev, - struct txx9dmac_desc, desc_node); + if (!list_empty(&desc->tx_list)) + desc = list_entry(desc->tx_list.prev, typeof(*desc), desc_node); return desc; } @@ -197,6 +196,7 @@ static struct txx9dmac_desc *txx9dmac_desc_alloc(struct txx9dmac_chan *dc, desc = kzalloc(sizeof(*desc), flags); if (!desc) return NULL; + INIT_LIST_HEAD(&desc->tx_list); dma_async_tx_descriptor_init(&desc->txd, &dc->chan); desc->txd.tx_submit = txx9dmac_tx_submit; /* txd.flags will be overwritten in prep funcs */ @@ -245,7 +245,7 @@ static void txx9dmac_sync_desc_for_cpu(struct txx9dmac_chan *dc, struct txx9dmac_dev *ddev = dc->ddev; struct txx9dmac_desc *child; - list_for_each_entry(child, &desc->txd.tx_list, desc_node) + list_for_each_entry(child, &desc->tx_list, desc_node) dma_sync_single_for_cpu(chan2parent(&dc->chan), child->txd.phys, ddev->descsize, DMA_TO_DEVICE); @@ -267,11 +267,11 @@ static void txx9dmac_desc_put(struct txx9dmac_chan *dc, txx9dmac_sync_desc_for_cpu(dc, desc); spin_lock_bh(&dc->lock); - list_for_each_entry(child, &desc->txd.tx_list, desc_node) + list_for_each_entry(child, &desc->tx_list, desc_node) dev_vdbg(chan2dev(&dc->chan), "moving child desc %p to freelist\n", child); - list_splice_init(&desc->txd.tx_list, &dc->free_list); + list_splice_init(&desc->tx_list, &dc->free_list); dev_vdbg(chan2dev(&dc->chan), "moving desc %p to freelist\n", desc); list_add(&desc->desc_node, &dc->free_list); @@ -429,7 +429,7 @@ txx9dmac_descriptor_complete(struct txx9dmac_chan *dc, param = txd->callback_param; txx9dmac_sync_desc_for_cpu(dc, desc); - list_splice_init(&txd->tx_list, &dc->free_list); + list_splice_init(&desc->tx_list, &dc->free_list); list_move(&desc->desc_node, &dc->free_list); if (!ds) { @@ -571,7 +571,7 @@ static void txx9dmac_handle_error(struct txx9dmac_chan *dc, u32 csr) "Bad descriptor submitted for DMA! (cookie: %d)\n", bad_desc->txd.cookie); txx9dmac_dump_desc(dc, &bad_desc->hwdesc); - list_for_each_entry(child, &bad_desc->txd.tx_list, desc_node) + list_for_each_entry(child, &bad_desc->tx_list, desc_node) txx9dmac_dump_desc(dc, &child->hwdesc); /* Pretend the descriptor completed successfully */ txx9dmac_descriptor_complete(dc, bad_desc); @@ -613,7 +613,7 @@ static void txx9dmac_scan_descriptors(struct txx9dmac_chan *dc) return; } - list_for_each_entry(child, &desc->txd.tx_list, desc_node) + list_for_each_entry(child, &desc->tx_list, desc_node) if (desc_read_CHAR(dc, child) == chain) { /* Currently in progress */ if (csr & TXX9_DMA_CSR_ABCHC) @@ -823,8 +823,7 @@ txx9dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, dma_sync_single_for_device(chan2parent(&dc->chan), prev->txd.phys, ddev->descsize, DMA_TO_DEVICE); - list_add_tail(&desc->desc_node, - &first->txd.tx_list); + list_add_tail(&desc->desc_node, &first->tx_list); } prev = desc; } @@ -919,8 +918,7 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, prev->txd.phys, ddev->descsize, DMA_TO_DEVICE); - list_add_tail(&desc->desc_node, - &first->txd.tx_list); + list_add_tail(&desc->desc_node, &first->tx_list); } prev = desc; } diff --git a/drivers/dma/txx9dmac.h b/drivers/dma/txx9dmac.h index c907ff01d276..365d42366b9f 100644 --- a/drivers/dma/txx9dmac.h +++ b/drivers/dma/txx9dmac.h @@ -231,6 +231,7 @@ struct txx9dmac_desc { /* THEN values for driver housekeeping */ struct list_head desc_node ____cacheline_aligned; + struct list_head tx_list; struct dma_async_tx_descriptor txd; size_t len; }; From 0803172778901e24a75ab074798d98c2b7411559 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 8 Sep 2009 17:53:04 -0700 Subject: [PATCH 0145/3409] dmaengine: kill tx_list The tx_list attribute of struct dma_async_tx_descriptor is common to most, but not all dma driver implementations. None of the upper level code (dmaengine/async_tx) uses it, so allow drivers to implement it locally if they need it. This saves sizeof(struct list_head) bytes for drivers that do not manage descriptors with a linked list (e.g.: ioatdma v2,3). Signed-off-by: Dan Williams --- drivers/dma/dmaengine.c | 1 - include/linux/dmaengine.h | 3 --- 2 files changed, 4 deletions(-) diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 5a87384ea4ff..562d182eae66 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -933,7 +933,6 @@ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, { tx->chan = chan; spin_lock_init(&tx->lock); - INIT_LIST_HEAD(&tx->tx_list); } EXPORT_SYMBOL(dma_async_tx_descriptor_init); diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index ffefba81c818..f114bc7790bc 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -180,8 +180,6 @@ typedef void (*dma_async_tx_callback)(void *dma_async_param); * @flags: flags to augment operation preparation, control completion, and * communicate status * @phys: physical address of the descriptor - * @tx_list: driver common field for operations that require multiple - * descriptors * @chan: target channel for this operation * @tx_submit: set the prepared descriptor(s) to be executed by the engine * @callback: routine to call after this operation is complete @@ -195,7 +193,6 @@ struct dma_async_tx_descriptor { dma_cookie_t cookie; enum dma_ctrl_flags flags; /* not a 'long' to pack with cookie */ dma_addr_t phys; - struct list_head tx_list; struct dma_chan *chan; dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); dma_async_tx_callback callback; From 162b96e63e518aa6ff029ce23de12d7f027483bf Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 8 Sep 2009 17:53:04 -0700 Subject: [PATCH 0146/3409] ioat2,3: cacheline align software descriptor allocations All the necessary fields for handling an ioat2,3 ring entry can fit into one cacheline. Move ->len prior to ->txd in struct ioat_ring_ent, and move allocation of these entries to a hw-cache-aligned kmem cache to reduce the number of cachelines dirtied for descriptor management. Signed-off-by: Dan Williams --- drivers/dma/ioat/dma_v2.c | 5 +++-- drivers/dma/ioat/dma_v2.h | 3 ++- drivers/dma/ioat/pci.c | 16 +++++++++++++++- 3 files changed, 20 insertions(+), 4 deletions(-) diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c index 460b77301332..fa3d6db6624c 100644 --- a/drivers/dma/ioat/dma_v2.c +++ b/drivers/dma/ioat/dma_v2.c @@ -399,11 +399,12 @@ static struct ioat_ring_ent *ioat2_alloc_ring_ent(struct dma_chan *chan, gfp_t f return NULL; memset(hw, 0, sizeof(*hw)); - desc = kzalloc(sizeof(*desc), flags); + desc = kmem_cache_alloc(ioat2_cache, flags); if (!desc) { pci_pool_free(dma->dma_pool, hw, phys); return NULL; } + memset(desc, 0, sizeof(*desc)); dma_async_tx_descriptor_init(&desc->txd, chan); desc->txd.tx_submit = ioat2_tx_submit_unlock; @@ -418,7 +419,7 @@ static void ioat2_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *cha dma = to_ioatdma_device(chan->device); pci_pool_free(dma->dma_pool, desc->hw, desc->txd.phys); - kfree(desc); + kmem_cache_free(ioat2_cache, desc); } static struct ioat_ring_ent **ioat2_alloc_ring(struct dma_chan *c, int order, gfp_t flags) diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h index 9baa3d6065ff..ac00adc81974 100644 --- a/drivers/dma/ioat/dma_v2.h +++ b/drivers/dma/ioat/dma_v2.h @@ -116,8 +116,8 @@ static inline u16 ioat2_xferlen_to_descs(struct ioat2_dma_chan *ioat, size_t len struct ioat_ring_ent { struct ioat_dma_descriptor *hw; - struct dma_async_tx_descriptor txd; size_t len; + struct dma_async_tx_descriptor txd; #ifdef DEBUG int id; #endif @@ -143,4 +143,5 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *dev, int dca); int __devinit ioat3_dma_probe(struct ioatdma_device *dev, int dca); struct dca_provider * __devinit ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase); struct dca_provider * __devinit ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase); +extern struct kmem_cache *ioat2_cache; #endif /* IOATDMA_V2_H */ diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c index c4e432269252..61086c6bbf42 100644 --- a/drivers/dma/ioat/pci.c +++ b/drivers/dma/ioat/pci.c @@ -69,6 +69,8 @@ static int ioat_dca_enabled = 1; module_param(ioat_dca_enabled, int, 0644); MODULE_PARM_DESC(ioat_dca_enabled, "control support of dca service (default: 1)"); +struct kmem_cache *ioat2_cache; + #define DRV_NAME "ioatdma" static struct pci_driver ioat_pci_driver = { @@ -168,12 +170,24 @@ static void __devexit ioat_remove(struct pci_dev *pdev) static int __init ioat_init_module(void) { - return pci_register_driver(&ioat_pci_driver); + int err; + + ioat2_cache = kmem_cache_create("ioat2", sizeof(struct ioat_ring_ent), + 0, SLAB_HWCACHE_ALIGN, NULL); + if (!ioat2_cache) + return -ENOMEM; + + err = pci_register_driver(&ioat_pci_driver); + if (err) + kmem_cache_destroy(ioat2_cache); + + return err; } module_init(ioat_init_module); static void __exit ioat_exit_module(void) { pci_unregister_driver(&ioat_pci_driver); + kmem_cache_destroy(ioat2_cache); } module_exit(ioat_exit_module); From e6c7ecb64e08ef346cb7062b4a5421f00bc602bd Mon Sep 17 00:00:00 2001 From: Ira Snyder Date: Tue, 8 Sep 2009 17:53:04 -0700 Subject: [PATCH 0147/3409] fsldma: split apart external pause and request count features When using the Freescale DMA controller in external control mode, both the request count and external pause bits need to be setup correctly. This was being done with the same function. The 83xx controller lacks the external pause feature, but has a similar feature called external start. This feature requires that the request count bits be setup correctly. Split the function into two parts, to make it possible to use the external start feature on the 83xx controller. Signed-off-by: Ira W. Snyder Signed-off-by: Dan Williams --- drivers/dma/fsldma.c | 45 ++++++++++++++++++++++++++++---------------- drivers/dma/fsldma.h | 3 ++- 2 files changed, 31 insertions(+), 17 deletions(-) diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 73dd74823195..7a0cb6064f83 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -279,29 +279,41 @@ static void fsl_chan_set_dest_loop_size(struct fsl_dma_chan *fsl_chan, int size) } } +/** + * fsl_chan_set_request_count - Set DMA Request Count for external control + * @fsl_chan : Freescale DMA channel + * @size : Number of bytes to transfer in a single request + * + * The Freescale DMA channel can be controlled by the external signal DREQ#. + * The DMA request count is how many bytes are allowed to transfer before + * pausing the channel, after which a new assertion of DREQ# resumes channel + * operation. + * + * A size of 0 disables external pause control. The maximum size is 1024. + */ +static void fsl_chan_set_request_count(struct fsl_dma_chan *fsl_chan, int size) +{ + BUG_ON(size > 1024); + DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, + DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) + | ((__ilog2(size) << 24) & 0x0f000000), + 32); +} + /** * fsl_chan_toggle_ext_pause - Toggle channel external pause status * @fsl_chan : Freescale DMA channel - * @size : Pause control size, 0 for disable external pause control. - * The maximum is 1024. + * @enable : 0 is disabled, 1 is enabled. * - * The Freescale DMA channel can be controlled by the external - * signal DREQ#. The pause control size is how many bytes are allowed - * to transfer before pausing the channel, after which a new assertion - * of DREQ# resumes channel operation. + * The Freescale DMA channel can be controlled by the external signal DREQ#. + * The DMA Request Count feature should be used in addition to this feature + * to set the number of bytes to transfer before pausing the channel. */ -static void fsl_chan_toggle_ext_pause(struct fsl_dma_chan *fsl_chan, int size) +static void fsl_chan_toggle_ext_pause(struct fsl_dma_chan *fsl_chan, int enable) { - if (size > 1024) - return; - - if (size) { - DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, - DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) - | ((__ilog2(size) << 24) & 0x0f000000), - 32); + if (enable) fsl_chan->feature |= FSL_DMA_CHAN_PAUSE_EXT; - } else + else fsl_chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT; } @@ -885,6 +897,7 @@ static int __devinit fsl_dma_chan_probe(struct fsl_dma_device *fdev, new_fsl_chan->toggle_ext_start = fsl_chan_toggle_ext_start; new_fsl_chan->set_src_loop_size = fsl_chan_set_src_loop_size; new_fsl_chan->set_dest_loop_size = fsl_chan_set_dest_loop_size; + new_fsl_chan->set_request_count = fsl_chan_set_request_count; } spin_lock_init(&new_fsl_chan->desc_lock); diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h index 4493afed53f0..0df14cbb8ca3 100644 --- a/drivers/dma/fsldma.h +++ b/drivers/dma/fsldma.h @@ -144,10 +144,11 @@ struct fsl_dma_chan { struct tasklet_struct tasklet; u32 feature; - void (*toggle_ext_pause)(struct fsl_dma_chan *fsl_chan, int size); + void (*toggle_ext_pause)(struct fsl_dma_chan *fsl_chan, int enable); void (*toggle_ext_start)(struct fsl_dma_chan *fsl_chan, int enable); void (*set_src_loop_size)(struct fsl_dma_chan *fsl_chan, int size); void (*set_dest_loop_size)(struct fsl_dma_chan *fsl_chan, int size); + void (*set_request_count)(struct fsl_dma_chan *fsl_chan, int size); }; #define to_fsl_chan(chan) container_of(chan, struct fsl_dma_chan, common) From bbea0b6e0d214ef1511b9c6ccf3af26b38f0af7d Mon Sep 17 00:00:00 2001 From: Ira Snyder Date: Tue, 8 Sep 2009 17:53:04 -0700 Subject: [PATCH 0148/3409] fsldma: Add DMA_SLAVE support Use the DMA_SLAVE capability of the DMAEngine API to copy/from a scatterlist into an arbitrary list of hardware address/length pairs. This allows a single DMA transaction to copy data from several different devices into a scatterlist at the same time. This also adds support to enable some controller-specific features such as external start and external pause for a DMA transaction. [dan.j.williams@intel.com: rebased on tx_list movement] Signed-off-by: Ira W. Snyder Acked-by: Li Yang Acked-by: Kumar Gala Signed-off-by: Dan Williams --- arch/powerpc/include/asm/fsldma.h | 136 ++++++++++++++++++ drivers/dma/fsldma.c | 227 ++++++++++++++++++++++++++++++ 2 files changed, 363 insertions(+) create mode 100644 arch/powerpc/include/asm/fsldma.h diff --git a/arch/powerpc/include/asm/fsldma.h b/arch/powerpc/include/asm/fsldma.h new file mode 100644 index 000000000000..a67aeed17d40 --- /dev/null +++ b/arch/powerpc/include/asm/fsldma.h @@ -0,0 +1,136 @@ +/* + * Freescale MPC83XX / MPC85XX DMA Controller + * + * Copyright (c) 2009 Ira W. Snyder + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + */ + +#ifndef __ARCH_POWERPC_ASM_FSLDMA_H__ +#define __ARCH_POWERPC_ASM_FSLDMA_H__ + +#include + +/* + * Definitions for the Freescale DMA controller's DMA_SLAVE implemention + * + * The Freescale DMA_SLAVE implementation was designed to handle many-to-many + * transfers. An example usage would be an accelerated copy between two + * scatterlists. Another example use would be an accelerated copy from + * multiple non-contiguous device buffers into a single scatterlist. + * + * A DMA_SLAVE transaction is defined by a struct fsl_dma_slave. This + * structure contains a list of hardware addresses that should be copied + * to/from the scatterlist passed into device_prep_slave_sg(). The structure + * also has some fields to enable hardware-specific features. + */ + +/** + * struct fsl_dma_hw_addr + * @entry: linked list entry + * @address: the hardware address + * @length: length to transfer + * + * Holds a single physical hardware address / length pair for use + * with the DMAEngine DMA_SLAVE API. + */ +struct fsl_dma_hw_addr { + struct list_head entry; + + dma_addr_t address; + size_t length; +}; + +/** + * struct fsl_dma_slave + * @addresses: a linked list of struct fsl_dma_hw_addr structures + * @request_count: value for DMA request count + * @src_loop_size: setup and enable constant source-address DMA transfers + * @dst_loop_size: setup and enable constant destination address DMA transfers + * @external_start: enable externally started DMA transfers + * @external_pause: enable externally paused DMA transfers + * + * Holds a list of address / length pairs for use with the DMAEngine + * DMA_SLAVE API implementation for the Freescale DMA controller. + */ +struct fsl_dma_slave { + + /* List of hardware address/length pairs */ + struct list_head addresses; + + /* Support for extra controller features */ + unsigned int request_count; + unsigned int src_loop_size; + unsigned int dst_loop_size; + bool external_start; + bool external_pause; +}; + +/** + * fsl_dma_slave_append - add an address/length pair to a struct fsl_dma_slave + * @slave: the &struct fsl_dma_slave to add to + * @address: the hardware address to add + * @length: the length of bytes to transfer from @address + * + * Add a hardware address/length pair to a struct fsl_dma_slave. Returns 0 on + * success, -ERRNO otherwise. + */ +static inline int fsl_dma_slave_append(struct fsl_dma_slave *slave, + dma_addr_t address, size_t length) +{ + struct fsl_dma_hw_addr *addr; + + addr = kzalloc(sizeof(*addr), GFP_ATOMIC); + if (!addr) + return -ENOMEM; + + INIT_LIST_HEAD(&addr->entry); + addr->address = address; + addr->length = length; + + list_add_tail(&addr->entry, &slave->addresses); + return 0; +} + +/** + * fsl_dma_slave_free - free a struct fsl_dma_slave + * @slave: the struct fsl_dma_slave to free + * + * Free a struct fsl_dma_slave and all associated address/length pairs + */ +static inline void fsl_dma_slave_free(struct fsl_dma_slave *slave) +{ + struct fsl_dma_hw_addr *addr, *tmp; + + if (slave) { + list_for_each_entry_safe(addr, tmp, &slave->addresses, entry) { + list_del(&addr->entry); + kfree(addr); + } + + kfree(slave); + } +} + +/** + * fsl_dma_slave_alloc - allocate a struct fsl_dma_slave + * @gfp: the flags to pass to kmalloc when allocating this structure + * + * Allocate a struct fsl_dma_slave for use by the DMA_SLAVE API. Returns a new + * struct fsl_dma_slave on success, or NULL on failure. + */ +static inline struct fsl_dma_slave *fsl_dma_slave_alloc(gfp_t gfp) +{ + struct fsl_dma_slave *slave; + + slave = kzalloc(sizeof(*slave), gfp); + if (!slave) + return NULL; + + INIT_LIST_HEAD(&slave->addresses); + return slave; +} + +#endif /* __ARCH_POWERPC_ASM_FSLDMA_H__ */ diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 7a0cb6064f83..296f9e747fac 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -34,6 +34,7 @@ #include #include +#include #include "fsldma.h" static void dma_init(struct fsl_dma_chan *fsl_chan) @@ -551,6 +552,229 @@ fail: return NULL; } +/** + * fsl_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction + * @chan: DMA channel + * @sgl: scatterlist to transfer to/from + * @sg_len: number of entries in @scatterlist + * @direction: DMA direction + * @flags: DMAEngine flags + * + * Prepare a set of descriptors for a DMA_SLAVE transaction. Following the + * DMA_SLAVE API, this gets the device-specific information from the + * chan->private variable. + */ +static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( + struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, + enum dma_data_direction direction, unsigned long flags) +{ + struct fsl_dma_chan *fsl_chan; + struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL; + struct fsl_dma_slave *slave; + struct list_head *tx_list; + size_t copy; + + int i; + struct scatterlist *sg; + size_t sg_used; + size_t hw_used; + struct fsl_dma_hw_addr *hw; + dma_addr_t dma_dst, dma_src; + + if (!chan) + return NULL; + + if (!chan->private) + return NULL; + + fsl_chan = to_fsl_chan(chan); + slave = chan->private; + + if (list_empty(&slave->addresses)) + return NULL; + + hw = list_first_entry(&slave->addresses, struct fsl_dma_hw_addr, entry); + hw_used = 0; + + /* + * Build the hardware transaction to copy from the scatterlist to + * the hardware, or from the hardware to the scatterlist + * + * If you are copying from the hardware to the scatterlist and it + * takes two hardware entries to fill an entire page, then both + * hardware entries will be coalesced into the same page + * + * If you are copying from the scatterlist to the hardware and a + * single page can fill two hardware entries, then the data will + * be read out of the page into the first hardware entry, and so on + */ + for_each_sg(sgl, sg, sg_len, i) { + sg_used = 0; + + /* Loop until the entire scatterlist entry is used */ + while (sg_used < sg_dma_len(sg)) { + + /* + * If we've used up the current hardware address/length + * pair, we need to load a new one + * + * This is done in a while loop so that descriptors with + * length == 0 will be skipped + */ + while (hw_used >= hw->length) { + + /* + * If the current hardware entry is the last + * entry in the list, we're finished + */ + if (list_is_last(&hw->entry, &slave->addresses)) + goto finished; + + /* Get the next hardware address/length pair */ + hw = list_entry(hw->entry.next, + struct fsl_dma_hw_addr, entry); + hw_used = 0; + } + + /* Allocate the link descriptor from DMA pool */ + new = fsl_dma_alloc_descriptor(fsl_chan); + if (!new) { + dev_err(fsl_chan->dev, "No free memory for " + "link descriptor\n"); + goto fail; + } +#ifdef FSL_DMA_LD_DEBUG + dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new); +#endif + + /* + * Calculate the maximum number of bytes to transfer, + * making sure it is less than the DMA controller limit + */ + copy = min_t(size_t, sg_dma_len(sg) - sg_used, + hw->length - hw_used); + copy = min_t(size_t, copy, FSL_DMA_BCR_MAX_CNT); + + /* + * DMA_FROM_DEVICE + * from the hardware to the scatterlist + * + * DMA_TO_DEVICE + * from the scatterlist to the hardware + */ + if (direction == DMA_FROM_DEVICE) { + dma_src = hw->address + hw_used; + dma_dst = sg_dma_address(sg) + sg_used; + } else { + dma_src = sg_dma_address(sg) + sg_used; + dma_dst = hw->address + hw_used; + } + + /* Fill in the descriptor */ + set_desc_cnt(fsl_chan, &new->hw, copy); + set_desc_src(fsl_chan, &new->hw, dma_src); + set_desc_dest(fsl_chan, &new->hw, dma_dst); + + /* + * If this is not the first descriptor, chain the + * current descriptor after the previous descriptor + */ + if (!first) { + first = new; + } else { + set_desc_next(fsl_chan, &prev->hw, + new->async_tx.phys); + } + + new->async_tx.cookie = 0; + async_tx_ack(&new->async_tx); + + prev = new; + sg_used += copy; + hw_used += copy; + + /* Insert the link descriptor into the LD ring */ + list_add_tail(&new->node, &first->tx_list); + } + } + +finished: + + /* All of the hardware address/length pairs had length == 0 */ + if (!first || !new) + return NULL; + + new->async_tx.flags = flags; + new->async_tx.cookie = -EBUSY; + + /* Set End-of-link to the last link descriptor of new list */ + set_ld_eol(fsl_chan, new); + + /* Enable extra controller features */ + if (fsl_chan->set_src_loop_size) + fsl_chan->set_src_loop_size(fsl_chan, slave->src_loop_size); + + if (fsl_chan->set_dest_loop_size) + fsl_chan->set_dest_loop_size(fsl_chan, slave->dst_loop_size); + + if (fsl_chan->toggle_ext_start) + fsl_chan->toggle_ext_start(fsl_chan, slave->external_start); + + if (fsl_chan->toggle_ext_pause) + fsl_chan->toggle_ext_pause(fsl_chan, slave->external_pause); + + if (fsl_chan->set_request_count) + fsl_chan->set_request_count(fsl_chan, slave->request_count); + + return &first->async_tx; + +fail: + /* If first was not set, then we failed to allocate the very first + * descriptor, and we're done */ + if (!first) + return NULL; + + /* + * First is set, so all of the descriptors we allocated have been added + * to first->tx_list, INCLUDING "first" itself. Therefore we + * must traverse the list backwards freeing each descriptor in turn + * + * We're re-using variables for the loop, oh well + */ + tx_list = &first->tx_list; + list_for_each_entry_safe_reverse(new, prev, tx_list, node) { + list_del_init(&new->node); + dma_pool_free(fsl_chan->desc_pool, new, new->async_tx.phys); + } + + return NULL; +} + +static void fsl_dma_device_terminate_all(struct dma_chan *chan) +{ + struct fsl_dma_chan *fsl_chan; + struct fsl_desc_sw *desc, *tmp; + unsigned long flags; + + if (!chan) + return; + + fsl_chan = to_fsl_chan(chan); + + /* Halt the DMA engine */ + dma_halt(fsl_chan); + + spin_lock_irqsave(&fsl_chan->desc_lock, flags); + + /* Remove and free all of the descriptors in the LD queue */ + list_for_each_entry_safe(desc, tmp, &fsl_chan->ld_queue, node) { + list_del(&desc->node); + dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys); + } + + spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); +} + /** * fsl_dma_update_completed_cookie - Update the completed cookie. * @fsl_chan : Freescale DMA channel @@ -977,12 +1201,15 @@ static int __devinit of_fsl_dma_probe(struct of_device *dev, dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask); dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask); + dma_cap_set(DMA_SLAVE, fdev->common.cap_mask); fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources; fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources; fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt; fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy; fdev->common.device_is_tx_complete = fsl_dma_is_complete; fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; + fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg; + fdev->common.device_terminate_all = fsl_dma_device_terminate_all; fdev->common.dev = &dev->dev; fdev->irq = irq_of_parse_and_map(dev->node, 0); From 657a77fa7284d8ae28dfa48f1dc5d919bf5b2843 Mon Sep 17 00:00:00 2001 From: Atsushi Nemoto Date: Tue, 8 Sep 2009 17:53:05 -0700 Subject: [PATCH 0149/3409] dmaengine: Move all map_sg/unmap_sg for slave channel to its client Dan Williams wrote: ... DMA-slave clients request specific channels and know the hardware details at a low level, so it should not be too high an expectation to push dma mapping responsibility to the client. Also this patch includes DMA_COMPL_{SRC,DEST}_UNMAP_SINGLE support for dw_dmac driver. Acked-by: Maciej Sosnowski Acked-by: Nicolas Ferre Signed-off-by: Atsushi Nemoto Signed-off-by: Dan Williams --- drivers/dma/at_hdmac.c | 43 ++++++++++++++++++------------------ drivers/dma/dw_dmac.c | 31 ++++++++++++++++---------- drivers/mmc/host/atmel-mci.c | 9 +++++++- 3 files changed, 49 insertions(+), 34 deletions(-) diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index da4e8b710a9b..0aeb578a24e3 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -253,25 +253,28 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) list_move(&desc->desc_node, &atchan->free_list); /* unmap dma addresses */ - if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { - if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) - dma_unmap_single(chan2parent(&atchan->chan_common), - desc->lli.daddr, - desc->len, DMA_FROM_DEVICE); - else - dma_unmap_page(chan2parent(&atchan->chan_common), - desc->lli.daddr, - desc->len, DMA_FROM_DEVICE); - } - if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { - if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) - dma_unmap_single(chan2parent(&atchan->chan_common), - desc->lli.saddr, - desc->len, DMA_TO_DEVICE); - else - dma_unmap_page(chan2parent(&atchan->chan_common), - desc->lli.saddr, - desc->len, DMA_TO_DEVICE); + if (!atchan->chan_common.private) { + struct device *parent = chan2parent(&atchan->chan_common); + if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { + if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) + dma_unmap_single(parent, + desc->lli.daddr, + desc->len, DMA_FROM_DEVICE); + else + dma_unmap_page(parent, + desc->lli.daddr, + desc->len, DMA_FROM_DEVICE); + } + if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { + if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) + dma_unmap_single(parent, + desc->lli.saddr, + desc->len, DMA_TO_DEVICE); + else + dma_unmap_page(parent, + desc->lli.saddr, + desc->len, DMA_TO_DEVICE); + } } /* @@ -647,8 +650,6 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, reg_width = atslave->reg_width; - sg_len = dma_map_sg(chan2parent(chan), sgl, sg_len, direction); - ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla; ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN; diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index 514ef7d71bcf..8fb748280361 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c @@ -212,16 +212,25 @@ dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc) list_splice_init(&desc->tx_list, &dwc->free_list); list_move(&desc->desc_node, &dwc->free_list); - /* - * We use dma_unmap_page() regardless of how the buffers were - * mapped before they were submitted... - */ - if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) - dma_unmap_page(chan2parent(&dwc->chan), desc->lli.dar, - desc->len, DMA_FROM_DEVICE); - if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) - dma_unmap_page(chan2parent(&dwc->chan), desc->lli.sar, - desc->len, DMA_TO_DEVICE); + if (!dwc->chan.private) { + struct device *parent = chan2parent(&dwc->chan); + if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { + if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) + dma_unmap_single(parent, desc->lli.dar, + desc->len, DMA_FROM_DEVICE); + else + dma_unmap_page(parent, desc->lli.dar, + desc->len, DMA_FROM_DEVICE); + } + if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { + if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) + dma_unmap_single(parent, desc->lli.sar, + desc->len, DMA_TO_DEVICE); + else + dma_unmap_page(parent, desc->lli.sar, + desc->len, DMA_TO_DEVICE); + } + } /* * The API requires that no submissions are done from a @@ -658,8 +667,6 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, reg_width = dws->reg_width; prev = first = NULL; - sg_len = dma_map_sg(chan2parent(chan), sgl, sg_len, direction); - switch (direction) { case DMA_TO_DEVICE: ctllo = (DWC_DEFAULT_CTLLO diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c index 7b603e4b41db..5e10d3663ab5 100644 --- a/drivers/mmc/host/atmel-mci.c +++ b/drivers/mmc/host/atmel-mci.c @@ -576,6 +576,7 @@ atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data) struct scatterlist *sg; unsigned int i; enum dma_data_direction direction; + unsigned int sglen; /* * We don't do DMA on "complex" transfers, i.e. with @@ -605,11 +606,14 @@ atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data) else direction = DMA_TO_DEVICE; + sglen = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len, direction); + if (sglen != data->sg_len) + goto unmap_exit; desc = chan->device->device_prep_slave_sg(chan, data->sg, data->sg_len, direction, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!desc) - return -ENOMEM; + goto unmap_exit; host->dma.data_desc = desc; desc->callback = atmci_dma_complete; @@ -620,6 +624,9 @@ atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data) chan->device->device_issue_pending(chan); return 0; +unmap_exit: + dma_unmap_sg(&host->pdev->dev, data->sg, sglen, direction); + return -ENOMEM; } #else /* CONFIG_MMC_ATMELMCI_DMA */ From d8902adcc1a9fd484c8cb5e575152e32192c1ff8 Mon Sep 17 00:00:00 2001 From: Nobuhiro Iwamatsu Date: Mon, 7 Sep 2009 03:26:23 +0000 Subject: [PATCH 0150/3409] dmaengine: sh: Add Support SuperH DMA Engine driver This supported all DMA channels, and it was tested in SH7722, SH7780, SH7785 and SH7763. This can not use with SH DMA API. Signed-off-by: Nobuhiro Iwamatsu Reviewed-by: Matt Fleming Acked-by: Maciej Sosnowski Acked-by: Paul Mundt Signed-off-by: Dan Williams --- arch/sh/drivers/dma/Kconfig | 12 +- arch/sh/drivers/dma/Makefile | 3 +- arch/sh/include/asm/dma-sh.h | 13 + drivers/dma/Kconfig | 8 + drivers/dma/Makefile | 1 + drivers/dma/shdma.c | 786 +++++++++++++++++++++++++++++++++++ drivers/dma/shdma.h | 64 +++ 7 files changed, 882 insertions(+), 5 deletions(-) create mode 100644 drivers/dma/shdma.c create mode 100644 drivers/dma/shdma.h diff --git a/arch/sh/drivers/dma/Kconfig b/arch/sh/drivers/dma/Kconfig index 63e9dd30b41c..f935a7e5cbe5 100644 --- a/arch/sh/drivers/dma/Kconfig +++ b/arch/sh/drivers/dma/Kconfig @@ -1,12 +1,9 @@ menu "DMA support" -config SH_DMA_API - bool config SH_DMA bool "SuperH on-chip DMA controller (DMAC) support" depends on CPU_SH3 || CPU_SH4 - select SH_DMA_API default n config SH_DMA_IRQ_MULTI @@ -19,6 +16,15 @@ config SH_DMA_IRQ_MULTI CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7785 || \ CPU_SUBTYPE_SH7760 +config SH_DMA_API + depends on SH_DMA + bool "SuperH DMA API support" + default n + help + SH_DMA_API always enabled DMA API of used SuperH. + If you want to use DMA ENGINE, you must not enable this. + Please enable DMA_ENGINE and SH_DMAE. + config NR_ONCHIP_DMA_CHANNELS int depends on SH_DMA diff --git a/arch/sh/drivers/dma/Makefile b/arch/sh/drivers/dma/Makefile index c6068137b46f..d88c9484762c 100644 --- a/arch/sh/drivers/dma/Makefile +++ b/arch/sh/drivers/dma/Makefile @@ -2,8 +2,7 @@ # Makefile for the SuperH DMA specific kernel interface routines under Linux. # -obj-$(CONFIG_SH_DMA_API) += dma-api.o dma-sysfs.o -obj-$(CONFIG_SH_DMA) += dma-sh.o +obj-$(CONFIG_SH_DMA_API) += dma-sh.o dma-api.o dma-sysfs.o obj-$(CONFIG_PVR2_DMA) += dma-pvr2.o obj-$(CONFIG_G2_DMA) += dma-g2.o obj-$(CONFIG_SH_DMABRG) += dmabrg.o diff --git a/arch/sh/include/asm/dma-sh.h b/arch/sh/include/asm/dma-sh.h index 0c8f8e14622a..89bdac558dbc 100644 --- a/arch/sh/include/asm/dma-sh.h +++ b/arch/sh/include/asm/dma-sh.h @@ -115,4 +115,17 @@ static u32 dma_base_addr[] __maybe_unused = { #define CHCR 0x0C #define DMAOR 0x40 +/* + * for dma engine + * + * SuperH DMA mode + */ +#define SHDMA_MIX_IRQ (1 << 1) +#define SHDMA_DMAOR1 (1 << 2) +#define SHDMA_DMAE1 (1 << 3) + +struct sh_dmae_pdata { + unsigned int mode; +}; + #endif /* __DMA_SH_H */ diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index fe1f3717b1ff..3230a780c3de 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -101,6 +101,14 @@ config TXX9_DMAC Support the TXx9 SoC internal DMA controller. This can be integrated in chips such as the Toshiba TX4927/38/39. +config SH_DMAE + tristate "Renesas SuperH DMAC support" + depends on SUPERH && SH_DMA + depends on !SH_DMA_API + select DMA_ENGINE + help + Enable support for the Renesas SuperH DMA controllers. + config DMA_ENGINE bool diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 8f115e93b4a1..eca71ba78ae9 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -9,3 +9,4 @@ obj-$(CONFIG_DW_DMAC) += dw_dmac.o obj-$(CONFIG_AT_HDMAC) += at_hdmac.o obj-$(CONFIG_MX3_IPU) += ipu/ obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o +obj-$(CONFIG_SH_DMAE) += shdma.o diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c new file mode 100644 index 000000000000..b3b065c4e5c1 --- /dev/null +++ b/drivers/dma/shdma.c @@ -0,0 +1,786 @@ +/* + * Renesas SuperH DMA Engine support + * + * base is drivers/dma/flsdma.c + * + * Copyright (C) 2009 Nobuhiro Iwamatsu + * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved. + * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. + * + * This is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * - DMA of SuperH does not have Hardware DMA chain mode. + * - MAX DMA size is 16MB. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "shdma.h" + +/* DMA descriptor control */ +#define DESC_LAST (-1) +#define DESC_COMP (1) +#define DESC_NCOMP (0) + +#define NR_DESCS_PER_CHANNEL 32 +/* + * Define the default configuration for dual address memory-memory transfer. + * The 0x400 value represents auto-request, external->external. + * + * And this driver set 4byte burst mode. + * If you want to change mode, you need to change RS_DEFAULT of value. + * (ex 1byte burst mode -> (RS_DUAL & ~TS_32) + */ +#define RS_DEFAULT (RS_DUAL) + +#define SH_DMAC_CHAN_BASE(id) (dma_base_addr[id]) +static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg) +{ + ctrl_outl(data, (SH_DMAC_CHAN_BASE(sh_dc->id) + reg)); +} + +static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg) +{ + return ctrl_inl((SH_DMAC_CHAN_BASE(sh_dc->id) + reg)); +} + +static void dmae_init(struct sh_dmae_chan *sh_chan) +{ + u32 chcr = RS_DEFAULT; /* default is DUAL mode */ + sh_dmae_writel(sh_chan, chcr, CHCR); +} + +/* + * Reset DMA controller + * + * SH7780 has two DMAOR register + */ +static void sh_dmae_ctl_stop(int id) +{ + unsigned short dmaor = dmaor_read_reg(id); + + dmaor &= ~(DMAOR_NMIF | DMAOR_AE); + dmaor_write_reg(id, dmaor); +} + +static int sh_dmae_rst(int id) +{ + unsigned short dmaor; + + sh_dmae_ctl_stop(id); + dmaor = (dmaor_read_reg(id)|DMAOR_INIT); + + dmaor_write_reg(id, dmaor); + if ((dmaor_read_reg(id) & (DMAOR_AE | DMAOR_NMIF))) { + pr_warning(KERN_ERR "dma-sh: Can't initialize DMAOR.\n"); + return -EINVAL; + } + return 0; +} + +static int dmae_is_idle(struct sh_dmae_chan *sh_chan) +{ + u32 chcr = sh_dmae_readl(sh_chan, CHCR); + if (chcr & CHCR_DE) { + if (!(chcr & CHCR_TE)) + return -EBUSY; /* working */ + } + return 0; /* waiting */ +} + +static inline unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan) +{ + u32 chcr = sh_dmae_readl(sh_chan, CHCR); + return ts_shift[(chcr & CHCR_TS_MASK) >> CHCR_TS_SHIFT]; +} + +static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs hw) +{ + sh_dmae_writel(sh_chan, hw.sar, SAR); + sh_dmae_writel(sh_chan, hw.dar, DAR); + sh_dmae_writel(sh_chan, + (hw.tcr >> calc_xmit_shift(sh_chan)), TCR); +} + +static void dmae_start(struct sh_dmae_chan *sh_chan) +{ + u32 chcr = sh_dmae_readl(sh_chan, CHCR); + + chcr |= (CHCR_DE|CHCR_IE); + sh_dmae_writel(sh_chan, chcr, CHCR); +} + +static void dmae_halt(struct sh_dmae_chan *sh_chan) +{ + u32 chcr = sh_dmae_readl(sh_chan, CHCR); + + chcr &= ~(CHCR_DE | CHCR_TE | CHCR_IE); + sh_dmae_writel(sh_chan, chcr, CHCR); +} + +static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val) +{ + int ret = dmae_is_idle(sh_chan); + /* When DMA was working, can not set data to CHCR */ + if (ret) + return ret; + + sh_dmae_writel(sh_chan, val, CHCR); + return 0; +} + +#define DMARS1_ADDR 0x04 +#define DMARS2_ADDR 0x08 +#define DMARS_SHIFT 8 +#define DMARS_CHAN_MSK 0x01 +static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) +{ + u32 addr; + int shift = 0; + int ret = dmae_is_idle(sh_chan); + if (ret) + return ret; + + if (sh_chan->id & DMARS_CHAN_MSK) + shift = DMARS_SHIFT; + + switch (sh_chan->id) { + /* DMARS0 */ + case 0: + case 1: + addr = SH_DMARS_BASE; + break; + /* DMARS1 */ + case 2: + case 3: + addr = (SH_DMARS_BASE + DMARS1_ADDR); + break; + /* DMARS2 */ + case 4: + case 5: + addr = (SH_DMARS_BASE + DMARS2_ADDR); + break; + default: + return -EINVAL; + } + + ctrl_outw((val << shift) | + (ctrl_inw(addr) & (shift ? 0xFF00 : 0x00FF)), + addr); + + return 0; +} + +static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx) +{ + struct sh_desc *desc = tx_to_sh_desc(tx); + struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan); + dma_cookie_t cookie; + + spin_lock_bh(&sh_chan->desc_lock); + + cookie = sh_chan->common.cookie; + cookie++; + if (cookie < 0) + cookie = 1; + + /* If desc only in the case of 1 */ + if (desc->async_tx.cookie != -EBUSY) + desc->async_tx.cookie = cookie; + sh_chan->common.cookie = desc->async_tx.cookie; + + list_splice_init(&desc->tx_list, sh_chan->ld_queue.prev); + + spin_unlock_bh(&sh_chan->desc_lock); + + return cookie; +} + +static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan) +{ + struct sh_desc *desc, *_desc, *ret = NULL; + + spin_lock_bh(&sh_chan->desc_lock); + list_for_each_entry_safe(desc, _desc, &sh_chan->ld_free, node) { + if (async_tx_test_ack(&desc->async_tx)) { + list_del(&desc->node); + ret = desc; + break; + } + } + spin_unlock_bh(&sh_chan->desc_lock); + + return ret; +} + +static void sh_dmae_put_desc(struct sh_dmae_chan *sh_chan, struct sh_desc *desc) +{ + if (desc) { + spin_lock_bh(&sh_chan->desc_lock); + + list_splice_init(&desc->tx_list, &sh_chan->ld_free); + list_add(&desc->node, &sh_chan->ld_free); + + spin_unlock_bh(&sh_chan->desc_lock); + } +} + +static int sh_dmae_alloc_chan_resources(struct dma_chan *chan) +{ + struct sh_dmae_chan *sh_chan = to_sh_chan(chan); + struct sh_desc *desc; + + spin_lock_bh(&sh_chan->desc_lock); + while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) { + spin_unlock_bh(&sh_chan->desc_lock); + desc = kzalloc(sizeof(struct sh_desc), GFP_KERNEL); + if (!desc) { + spin_lock_bh(&sh_chan->desc_lock); + break; + } + dma_async_tx_descriptor_init(&desc->async_tx, + &sh_chan->common); + desc->async_tx.tx_submit = sh_dmae_tx_submit; + desc->async_tx.flags = DMA_CTRL_ACK; + INIT_LIST_HEAD(&desc->tx_list); + sh_dmae_put_desc(sh_chan, desc); + + spin_lock_bh(&sh_chan->desc_lock); + sh_chan->descs_allocated++; + } + spin_unlock_bh(&sh_chan->desc_lock); + + return sh_chan->descs_allocated; +} + +/* + * sh_dma_free_chan_resources - Free all resources of the channel. + */ +static void sh_dmae_free_chan_resources(struct dma_chan *chan) +{ + struct sh_dmae_chan *sh_chan = to_sh_chan(chan); + struct sh_desc *desc, *_desc; + LIST_HEAD(list); + + BUG_ON(!list_empty(&sh_chan->ld_queue)); + spin_lock_bh(&sh_chan->desc_lock); + + list_splice_init(&sh_chan->ld_free, &list); + sh_chan->descs_allocated = 0; + + spin_unlock_bh(&sh_chan->desc_lock); + + list_for_each_entry_safe(desc, _desc, &list, node) + kfree(desc); +} + +static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy( + struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src, + size_t len, unsigned long flags) +{ + struct sh_dmae_chan *sh_chan; + struct sh_desc *first = NULL, *prev = NULL, *new; + size_t copy_size; + + if (!chan) + return NULL; + + if (!len) + return NULL; + + sh_chan = to_sh_chan(chan); + + do { + /* Allocate the link descriptor from DMA pool */ + new = sh_dmae_get_desc(sh_chan); + if (!new) { + dev_err(sh_chan->dev, + "No free memory for link descriptor\n"); + goto err_get_desc; + } + + copy_size = min(len, (size_t)SH_DMA_TCR_MAX); + + new->hw.sar = dma_src; + new->hw.dar = dma_dest; + new->hw.tcr = copy_size; + if (!first) + first = new; + + new->mark = DESC_NCOMP; + async_tx_ack(&new->async_tx); + + prev = new; + len -= copy_size; + dma_src += copy_size; + dma_dest += copy_size; + /* Insert the link descriptor to the LD ring */ + list_add_tail(&new->node, &first->tx_list); + } while (len); + + new->async_tx.flags = flags; /* client is in control of this ack */ + new->async_tx.cookie = -EBUSY; /* Last desc */ + + return &first->async_tx; + +err_get_desc: + sh_dmae_put_desc(sh_chan, first); + return NULL; + +} + +/* + * sh_chan_ld_cleanup - Clean up link descriptors + * + * This function clean up the ld_queue of DMA channel. + */ +static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan) +{ + struct sh_desc *desc, *_desc; + + spin_lock_bh(&sh_chan->desc_lock); + list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) { + dma_async_tx_callback callback; + void *callback_param; + + /* non send data */ + if (desc->mark == DESC_NCOMP) + break; + + /* send data sesc */ + callback = desc->async_tx.callback; + callback_param = desc->async_tx.callback_param; + + /* Remove from ld_queue list */ + list_splice_init(&desc->tx_list, &sh_chan->ld_free); + + dev_dbg(sh_chan->dev, "link descriptor %p will be recycle.\n", + desc); + + list_move(&desc->node, &sh_chan->ld_free); + /* Run the link descriptor callback function */ + if (callback) { + spin_unlock_bh(&sh_chan->desc_lock); + dev_dbg(sh_chan->dev, "link descriptor %p callback\n", + desc); + callback(callback_param); + spin_lock_bh(&sh_chan->desc_lock); + } + } + spin_unlock_bh(&sh_chan->desc_lock); +} + +static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan) +{ + struct list_head *ld_node; + struct sh_dmae_regs hw; + + /* DMA work check */ + if (dmae_is_idle(sh_chan)) + return; + + /* Find the first un-transfer desciptor */ + for (ld_node = sh_chan->ld_queue.next; + (ld_node != &sh_chan->ld_queue) + && (to_sh_desc(ld_node)->mark == DESC_COMP); + ld_node = ld_node->next) + cpu_relax(); + + if (ld_node != &sh_chan->ld_queue) { + /* Get the ld start address from ld_queue */ + hw = to_sh_desc(ld_node)->hw; + dmae_set_reg(sh_chan, hw); + dmae_start(sh_chan); + } +} + +static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan) +{ + struct sh_dmae_chan *sh_chan = to_sh_chan(chan); + sh_chan_xfer_ld_queue(sh_chan); +} + +static enum dma_status sh_dmae_is_complete(struct dma_chan *chan, + dma_cookie_t cookie, + dma_cookie_t *done, + dma_cookie_t *used) +{ + struct sh_dmae_chan *sh_chan = to_sh_chan(chan); + dma_cookie_t last_used; + dma_cookie_t last_complete; + + sh_dmae_chan_ld_cleanup(sh_chan); + + last_used = chan->cookie; + last_complete = sh_chan->completed_cookie; + if (last_complete == -EBUSY) + last_complete = last_used; + + if (done) + *done = last_complete; + + if (used) + *used = last_used; + + return dma_async_is_complete(cookie, last_complete, last_used); +} + +static irqreturn_t sh_dmae_interrupt(int irq, void *data) +{ + irqreturn_t ret = IRQ_NONE; + struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data; + u32 chcr = sh_dmae_readl(sh_chan, CHCR); + + if (chcr & CHCR_TE) { + /* DMA stop */ + dmae_halt(sh_chan); + + ret = IRQ_HANDLED; + tasklet_schedule(&sh_chan->tasklet); + } + + return ret; +} + +#if defined(CONFIG_CPU_SH4) +static irqreturn_t sh_dmae_err(int irq, void *data) +{ + int err = 0; + struct sh_dmae_device *shdev = (struct sh_dmae_device *)data; + + /* IRQ Multi */ + if (shdev->pdata.mode & SHDMA_MIX_IRQ) { + int cnt = 0; + switch (irq) { +#if defined(DMTE6_IRQ) && defined(DMAE1_IRQ) + case DMTE6_IRQ: + cnt++; +#endif + case DMTE0_IRQ: + if (dmaor_read_reg(cnt) & (DMAOR_NMIF | DMAOR_AE)) { + disable_irq(irq); + return IRQ_HANDLED; + } + default: + return IRQ_NONE; + } + } else { + /* reset dma controller */ + err = sh_dmae_rst(0); + if (err) + return err; + if (shdev->pdata.mode & SHDMA_DMAOR1) { + err = sh_dmae_rst(1); + if (err) + return err; + } + disable_irq(irq); + return IRQ_HANDLED; + } +} +#endif + +static void dmae_do_tasklet(unsigned long data) +{ + struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data; + struct sh_desc *desc, *_desc, *cur_desc = NULL; + u32 sar_buf = sh_dmae_readl(sh_chan, SAR); + list_for_each_entry_safe(desc, _desc, + &sh_chan->ld_queue, node) { + if ((desc->hw.sar + desc->hw.tcr) == sar_buf) { + cur_desc = desc; + break; + } + } + + if (cur_desc) { + switch (cur_desc->async_tx.cookie) { + case 0: /* other desc data */ + break; + case -EBUSY: /* last desc */ + sh_chan->completed_cookie = + cur_desc->async_tx.cookie; + break; + default: /* first desc ( 0 < )*/ + sh_chan->completed_cookie = + cur_desc->async_tx.cookie - 1; + break; + } + cur_desc->mark = DESC_COMP; + } + /* Next desc */ + sh_chan_xfer_ld_queue(sh_chan); + sh_dmae_chan_ld_cleanup(sh_chan); +} + +static unsigned int get_dmae_irq(unsigned int id) +{ + unsigned int irq = 0; + if (id < ARRAY_SIZE(dmte_irq_map)) + irq = dmte_irq_map[id]; + return irq; +} + +static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id) +{ + int err; + unsigned int irq = get_dmae_irq(id); + unsigned long irqflags = IRQF_DISABLED; + struct sh_dmae_chan *new_sh_chan; + + /* alloc channel */ + new_sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL); + if (!new_sh_chan) { + dev_err(shdev->common.dev, "No free memory for allocating " + "dma channels!\n"); + return -ENOMEM; + } + + new_sh_chan->dev = shdev->common.dev; + new_sh_chan->id = id; + + /* Init DMA tasklet */ + tasklet_init(&new_sh_chan->tasklet, dmae_do_tasklet, + (unsigned long)new_sh_chan); + + /* Init the channel */ + dmae_init(new_sh_chan); + + spin_lock_init(&new_sh_chan->desc_lock); + + /* Init descripter manage list */ + INIT_LIST_HEAD(&new_sh_chan->ld_queue); + INIT_LIST_HEAD(&new_sh_chan->ld_free); + + /* copy struct dma_device */ + new_sh_chan->common.device = &shdev->common; + + /* Add the channel to DMA device channel list */ + list_add_tail(&new_sh_chan->common.device_node, + &shdev->common.channels); + shdev->common.chancnt++; + + if (shdev->pdata.mode & SHDMA_MIX_IRQ) { + irqflags = IRQF_SHARED; +#if defined(DMTE6_IRQ) + if (irq >= DMTE6_IRQ) + irq = DMTE6_IRQ; + else +#endif + irq = DMTE0_IRQ; + } + + snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id), + "sh-dmae%d", new_sh_chan->id); + + /* set up channel irq */ + err = request_irq(irq, &sh_dmae_interrupt, + irqflags, new_sh_chan->dev_id, new_sh_chan); + if (err) { + dev_err(shdev->common.dev, "DMA channel %d request_irq error " + "with return %d\n", id, err); + goto err_no_irq; + } + + /* CHCR register control function */ + new_sh_chan->set_chcr = dmae_set_chcr; + /* DMARS register control function */ + new_sh_chan->set_dmars = dmae_set_dmars; + + shdev->chan[id] = new_sh_chan; + return 0; + +err_no_irq: + /* remove from dmaengine device node */ + list_del(&new_sh_chan->common.device_node); + kfree(new_sh_chan); + return err; +} + +static void sh_dmae_chan_remove(struct sh_dmae_device *shdev) +{ + int i; + + for (i = shdev->common.chancnt - 1 ; i >= 0 ; i--) { + if (shdev->chan[i]) { + struct sh_dmae_chan *shchan = shdev->chan[i]; + if (!(shdev->pdata.mode & SHDMA_MIX_IRQ)) + free_irq(dmte_irq_map[i], shchan); + + list_del(&shchan->common.device_node); + kfree(shchan); + shdev->chan[i] = NULL; + } + } + shdev->common.chancnt = 0; +} + +static int __init sh_dmae_probe(struct platform_device *pdev) +{ + int err = 0, cnt, ecnt; + unsigned long irqflags = IRQF_DISABLED; +#if defined(CONFIG_CPU_SH4) + int eirq[] = { DMAE0_IRQ, +#if defined(DMAE1_IRQ) + DMAE1_IRQ +#endif + }; +#endif + struct sh_dmae_device *shdev; + + shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL); + if (!shdev) { + dev_err(&pdev->dev, "No enough memory\n"); + err = -ENOMEM; + goto shdev_err; + } + + /* get platform data */ + if (!pdev->dev.platform_data) + goto shdev_err; + + /* platform data */ + memcpy(&shdev->pdata, pdev->dev.platform_data, + sizeof(struct sh_dmae_pdata)); + + /* reset dma controller */ + err = sh_dmae_rst(0); + if (err) + goto rst_err; + + /* SH7780/85/23 has DMAOR1 */ + if (shdev->pdata.mode & SHDMA_DMAOR1) { + err = sh_dmae_rst(1); + if (err) + goto rst_err; + } + + INIT_LIST_HEAD(&shdev->common.channels); + + dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask); + shdev->common.device_alloc_chan_resources + = sh_dmae_alloc_chan_resources; + shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources; + shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy; + shdev->common.device_is_tx_complete = sh_dmae_is_complete; + shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending; + shdev->common.dev = &pdev->dev; + +#if defined(CONFIG_CPU_SH4) + /* Non Mix IRQ mode SH7722/SH7730 etc... */ + if (shdev->pdata.mode & SHDMA_MIX_IRQ) { + irqflags = IRQF_SHARED; + eirq[0] = DMTE0_IRQ; +#if defined(DMTE6_IRQ) && defined(DMAE1_IRQ) + eirq[1] = DMTE6_IRQ; +#endif + } + + for (ecnt = 0 ; ecnt < ARRAY_SIZE(eirq); ecnt++) { + err = request_irq(eirq[ecnt], sh_dmae_err, + irqflags, "DMAC Address Error", shdev); + if (err) { + dev_err(&pdev->dev, "DMA device request_irq" + "error (irq %d) with return %d\n", + eirq[ecnt], err); + goto eirq_err; + } + } +#endif /* CONFIG_CPU_SH4 */ + + /* Create DMA Channel */ + for (cnt = 0 ; cnt < MAX_DMA_CHANNELS ; cnt++) { + err = sh_dmae_chan_probe(shdev, cnt); + if (err) + goto chan_probe_err; + } + + platform_set_drvdata(pdev, shdev); + dma_async_device_register(&shdev->common); + + return err; + +chan_probe_err: + sh_dmae_chan_remove(shdev); + +eirq_err: + for (ecnt-- ; ecnt >= 0; ecnt--) + free_irq(eirq[ecnt], shdev); + +rst_err: + kfree(shdev); + +shdev_err: + return err; +} + +static int __exit sh_dmae_remove(struct platform_device *pdev) +{ + struct sh_dmae_device *shdev = platform_get_drvdata(pdev); + + dma_async_device_unregister(&shdev->common); + + if (shdev->pdata.mode & SHDMA_MIX_IRQ) { + free_irq(DMTE0_IRQ, shdev); +#if defined(DMTE6_IRQ) + free_irq(DMTE6_IRQ, shdev); +#endif + } + + /* channel data remove */ + sh_dmae_chan_remove(shdev); + + if (!(shdev->pdata.mode & SHDMA_MIX_IRQ)) { + free_irq(DMAE0_IRQ, shdev); +#if defined(DMAE1_IRQ) + free_irq(DMAE1_IRQ, shdev); +#endif + } + kfree(shdev); + + return 0; +} + +static void sh_dmae_shutdown(struct platform_device *pdev) +{ + struct sh_dmae_device *shdev = platform_get_drvdata(pdev); + sh_dmae_ctl_stop(0); + if (shdev->pdata.mode & SHDMA_DMAOR1) + sh_dmae_ctl_stop(1); +} + +static struct platform_driver sh_dmae_driver = { + .remove = __exit_p(sh_dmae_remove), + .shutdown = sh_dmae_shutdown, + .driver = { + .name = "sh-dma-engine", + }, +}; + +static int __init sh_dmae_init(void) +{ + return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe); +} +module_init(sh_dmae_init); + +static void __exit sh_dmae_exit(void) +{ + platform_driver_unregister(&sh_dmae_driver); +} +module_exit(sh_dmae_exit); + +MODULE_AUTHOR("Nobuhiro Iwamatsu "); +MODULE_DESCRIPTION("Renesas SH DMA Engine driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/dma/shdma.h b/drivers/dma/shdma.h new file mode 100644 index 000000000000..2b4bc15a2c0a --- /dev/null +++ b/drivers/dma/shdma.h @@ -0,0 +1,64 @@ +/* + * Renesas SuperH DMA Engine support + * + * Copyright (C) 2009 Nobuhiro Iwamatsu + * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved. + * + * This is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + */ +#ifndef __DMA_SHDMA_H +#define __DMA_SHDMA_H + +#include +#include +#include + +#define SH_DMA_TCR_MAX 0x00FFFFFF /* 16MB */ + +struct sh_dmae_regs { + u32 sar; /* SAR / source address */ + u32 dar; /* DAR / destination address */ + u32 tcr; /* TCR / transfer count */ +}; + +struct sh_desc { + struct list_head tx_list; + struct sh_dmae_regs hw; + struct list_head node; + struct dma_async_tx_descriptor async_tx; + int mark; +}; + +struct sh_dmae_chan { + dma_cookie_t completed_cookie; /* The maximum cookie completed */ + spinlock_t desc_lock; /* Descriptor operation lock */ + struct list_head ld_queue; /* Link descriptors queue */ + struct list_head ld_free; /* Link descriptors free */ + struct dma_chan common; /* DMA common channel */ + struct device *dev; /* Channel device */ + struct tasklet_struct tasklet; /* Tasklet */ + int descs_allocated; /* desc count */ + int id; /* Raw id of this channel */ + char dev_id[16]; /* unique name per DMAC of channel */ + + /* Set chcr */ + int (*set_chcr)(struct sh_dmae_chan *sh_chan, u32 regs); + /* Set DMA resource */ + int (*set_dmars)(struct sh_dmae_chan *sh_chan, u16 res); +}; + +struct sh_dmae_device { + struct dma_device common; + struct sh_dmae_chan *chan[MAX_DMA_CHANNELS]; + struct sh_dmae_pdata pdata; +}; + +#define to_sh_chan(chan) container_of(chan, struct sh_dmae_chan, common) +#define to_sh_desc(lh) container_of(lh, struct sh_desc, node) +#define tx_to_sh_desc(tx) container_of(tx, struct sh_desc, async_tx) + +#endif /* __DMA_SHDMA_H */ From 9a8de639f35ca3951b910d5e3a2f92f4cf3afc8f Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 8 Sep 2009 15:06:10 -0700 Subject: [PATCH 0151/3409] async_tx: remove HIGHMEM64G restriction This restriction prevented ASYNC_TX_DMA from being enabled on platform configurations where DMA address conversion could not be performed in place on the stack. Since commit 04ce9ab3 ("async_xor: permit callers to pass in a 'dma/page scribble' region") the async_tx api now either uses a caller provided 'scribble' buffer, or performs the conversion in place when sizeof(dma_addr_t) <= sizeof(struct page *). Signed-off-by: Dan Williams --- drivers/dma/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 3230a780c3de..5903a88351bf 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -128,7 +128,7 @@ config NET_DMA config ASYNC_TX_DMA bool "Async_tx: Offload support for the async_tx api" - depends on DMA_ENGINE && !HIGHMEM64G + depends on DMA_ENGINE help This allows the async_tx api to take advantage of offload engines for memcpy, memset, xor, and raid6 p+q operations. If your platform has From 38d8a95621b20ed7868e232a35a26ee61bdcae6f Mon Sep 17 00:00:00 2001 From: Fabian Henze Date: Tue, 8 Sep 2009 00:59:58 +0800 Subject: [PATCH 0152/3409] agp/intel: Add B43 chipset support Signed-off-by: Fabian Henze [Fix reversed HB & IG ids for B43] Signed-off-by: Zhenyu Wang Signed-off-by: Eric Anholt --- drivers/char/agp/intel-agp.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c index c58557790585..c17291715031 100644 --- a/drivers/char/agp/intel-agp.c +++ b/drivers/char/agp/intel-agp.c @@ -36,6 +36,8 @@ #define PCI_DEVICE_ID_INTEL_Q35_IG 0x29B2 #define PCI_DEVICE_ID_INTEL_Q33_HB 0x29D0 #define PCI_DEVICE_ID_INTEL_Q33_IG 0x29D2 +#define PCI_DEVICE_ID_INTEL_B43_HB 0x2E40 +#define PCI_DEVICE_ID_INTEL_B43_IG 0x2E42 #define PCI_DEVICE_ID_INTEL_GM45_HB 0x2A40 #define PCI_DEVICE_ID_INTEL_GM45_IG 0x2A42 #define PCI_DEVICE_ID_INTEL_IGD_E_HB 0x2E00 @@ -81,6 +83,7 @@ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G45_HB || \ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB || \ + agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_B43_HB || \ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_D_HB || \ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_M_HB || \ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_MA_HB) @@ -1216,6 +1219,7 @@ static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size) case PCI_DEVICE_ID_INTEL_Q45_HB: case PCI_DEVICE_ID_INTEL_G45_HB: case PCI_DEVICE_ID_INTEL_G41_HB: + case PCI_DEVICE_ID_INTEL_B43_HB: case PCI_DEVICE_ID_INTEL_IGDNG_D_HB: case PCI_DEVICE_ID_INTEL_IGDNG_M_HB: case PCI_DEVICE_ID_INTEL_IGDNG_MA_HB: @@ -2192,6 +2196,8 @@ static const struct intel_driver_description { "Q45/Q43", NULL, &intel_i965_driver }, { PCI_DEVICE_ID_INTEL_G45_HB, PCI_DEVICE_ID_INTEL_G45_IG, 0, "G45/G43", NULL, &intel_i965_driver }, + { PCI_DEVICE_ID_INTEL_B43_HB, PCI_DEVICE_ID_INTEL_B43_IG, 0, + "B43", NULL, &intel_i965_driver }, { PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG, 0, "G41", NULL, &intel_i965_driver }, { PCI_DEVICE_ID_INTEL_IGDNG_D_HB, PCI_DEVICE_ID_INTEL_IGDNG_D_IG, 0, @@ -2401,6 +2407,7 @@ static struct pci_device_id agp_intel_pci_table[] = { ID(PCI_DEVICE_ID_INTEL_Q45_HB), ID(PCI_DEVICE_ID_INTEL_G45_HB), ID(PCI_DEVICE_ID_INTEL_G41_HB), + ID(PCI_DEVICE_ID_INTEL_B43_HB), ID(PCI_DEVICE_ID_INTEL_IGDNG_D_HB), ID(PCI_DEVICE_ID_INTEL_IGDNG_M_HB), ID(PCI_DEVICE_ID_INTEL_IGDNG_MA_HB), From 7839c5d5519b6d9e2ccf3cdbf1c39e3817ad0835 Mon Sep 17 00:00:00 2001 From: Fabian Henze Date: Tue, 8 Sep 2009 00:59:59 +0800 Subject: [PATCH 0153/3409] drm/i915: add B43 chipset support Signed-off-by: Fabian Henze Signed-off-by: Zhenyu Wang Signed-off-by: Eric Anholt --- drivers/gpu/drm/i915/i915_drv.h | 2 ++ include/drm/drm_pciids.h | 1 + 2 files changed, 3 insertions(+) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 77ed060b4292..e5f20e40ac59 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -863,6 +863,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); (dev)->pci_device == 0x2E12 || \ (dev)->pci_device == 0x2E22 || \ (dev)->pci_device == 0x2E32 || \ + (dev)->pci_device == 0x2E42 || \ (dev)->pci_device == 0x0042 || \ (dev)->pci_device == 0x0046) @@ -875,6 +876,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); (dev)->pci_device == 0x2E12 || \ (dev)->pci_device == 0x2E22 || \ (dev)->pci_device == 0x2E32 || \ + (dev)->pci_device == 0x2E42 || \ IS_GM45(dev)) #define IS_IGDG(dev) ((dev)->pci_device == 0xa001) diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h index 853508499d20..3f6e545609be 100644 --- a/include/drm/drm_pciids.h +++ b/include/drm/drm_pciids.h @@ -552,6 +552,7 @@ {0x8086, 0x2e12, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ {0x8086, 0x2e22, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ {0x8086, 0x2e32, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ + {0x8086, 0x2e42, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ {0x8086, 0xa001, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ {0x8086, 0xa011, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ {0x8086, 0x35e8, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ From 5323fd042f89164927ee8c311f0a975e8c846412 Mon Sep 17 00:00:00 2001 From: Eric Anholt Date: Wed, 9 Sep 2009 11:50:45 -0700 Subject: [PATCH 0154/3409] drm/i915: Zap mmaps of objects before unbinding them from the GTT. Otherwise, some other userland writing into its buffer may race to land writes either after the CPU thinks it's got a coherent view, or after its GTT entries have been redirected to point at the scratch page. Either result is unpleasant. Signed-off-by: Eric Anholt --- drivers/gpu/drm/i915/i915_gem.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 954fb699131b..f3758f9fc979 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1915,6 +1915,12 @@ i915_gem_object_unbind(struct drm_gem_object *obj) return -EINVAL; } + /* blow away mappings if mapped through GTT */ + i915_gem_release_mmap(obj); + + if (obj_priv->fence_reg != I915_FENCE_REG_NONE) + i915_gem_clear_fence_reg(obj); + /* Move the object to the CPU domain to ensure that * any possible CPU writes while it's not in the GTT * are flushed when we go to remap it. This will @@ -1928,20 +1934,14 @@ i915_gem_object_unbind(struct drm_gem_object *obj) return ret; } + BUG_ON(obj_priv->active); + if (obj_priv->agp_mem != NULL) { drm_unbind_agp(obj_priv->agp_mem); drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE); obj_priv->agp_mem = NULL; } - BUG_ON(obj_priv->active); - - /* blow away mappings if mapped through GTT */ - i915_gem_release_mmap(obj); - - if (obj_priv->fence_reg != I915_FENCE_REG_NONE) - i915_gem_clear_fence_reg(obj); - i915_gem_object_put_pages(obj); if (obj_priv->gtt_space) { From ee297936a7ed7e8477a200eb9c6de13150adbf0c Mon Sep 17 00:00:00 2001 From: Philipp Zabel Date: Fri, 7 Aug 2009 15:51:12 +0200 Subject: [PATCH 0155/3409] [ARM] pxa: remove left-over struct clk *other field from struct clk Its removal was omitted when all its uses were removed in 8c3abc7d... "[ARM] pxa: convert to clkdev and match clocks by struct device where possible" Signed-off-by: Philipp Zabel Signed-off-by: Eric Miao --- arch/arm/mach-pxa/clock.h | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/arm/mach-pxa/clock.h b/arch/arm/mach-pxa/clock.h index 5599bceff738..978a3667e90d 100644 --- a/arch/arm/mach-pxa/clock.h +++ b/arch/arm/mach-pxa/clock.h @@ -12,7 +12,6 @@ struct clk { unsigned int cken; unsigned int delay; unsigned int enabled; - struct clk *other; }; #define INIT_CLKREG(_clk,_devname,_conname) \ From 9304d008d6ba41ff52ef6eae44337398e83b4436 Mon Sep 17 00:00:00 2001 From: Haojian Zhuang Date: Mon, 31 Aug 2009 20:16:54 +0800 Subject: [PATCH 0156/3409] [ARM] pxa: remove duplicate mfp definition in mach Signed-off-by: Haojian Zhuang Signed-off-by: Eric Miao --- arch/arm/mach-pxa/include/mach/mfp.h | 301 +-------------------------- 1 file changed, 1 insertion(+), 300 deletions(-) diff --git a/arch/arm/mach-pxa/include/mach/mfp.h b/arch/arm/mach-pxa/include/mach/mfp.h index 482185053a92..271e249ae34f 100644 --- a/arch/arm/mach-pxa/include/mach/mfp.h +++ b/arch/arm/mach-pxa/include/mach/mfp.h @@ -16,305 +16,6 @@ #ifndef __ASM_ARCH_MFP_H #define __ASM_ARCH_MFP_H -#define mfp_to_gpio(m) ((m) % 128) - -/* list of all the configurable MFP pins */ -enum { - MFP_PIN_INVALID = -1, - - MFP_PIN_GPIO0 = 0, - MFP_PIN_GPIO1, - MFP_PIN_GPIO2, - MFP_PIN_GPIO3, - MFP_PIN_GPIO4, - MFP_PIN_GPIO5, - MFP_PIN_GPIO6, - MFP_PIN_GPIO7, - MFP_PIN_GPIO8, - MFP_PIN_GPIO9, - MFP_PIN_GPIO10, - MFP_PIN_GPIO11, - MFP_PIN_GPIO12, - MFP_PIN_GPIO13, - MFP_PIN_GPIO14, - MFP_PIN_GPIO15, - MFP_PIN_GPIO16, - MFP_PIN_GPIO17, - MFP_PIN_GPIO18, - MFP_PIN_GPIO19, - MFP_PIN_GPIO20, - MFP_PIN_GPIO21, - MFP_PIN_GPIO22, - MFP_PIN_GPIO23, - MFP_PIN_GPIO24, - MFP_PIN_GPIO25, - MFP_PIN_GPIO26, - MFP_PIN_GPIO27, - MFP_PIN_GPIO28, - MFP_PIN_GPIO29, - MFP_PIN_GPIO30, - MFP_PIN_GPIO31, - MFP_PIN_GPIO32, - MFP_PIN_GPIO33, - MFP_PIN_GPIO34, - MFP_PIN_GPIO35, - MFP_PIN_GPIO36, - MFP_PIN_GPIO37, - MFP_PIN_GPIO38, - MFP_PIN_GPIO39, - MFP_PIN_GPIO40, - MFP_PIN_GPIO41, - MFP_PIN_GPIO42, - MFP_PIN_GPIO43, - MFP_PIN_GPIO44, - MFP_PIN_GPIO45, - MFP_PIN_GPIO46, - MFP_PIN_GPIO47, - MFP_PIN_GPIO48, - MFP_PIN_GPIO49, - MFP_PIN_GPIO50, - MFP_PIN_GPIO51, - MFP_PIN_GPIO52, - MFP_PIN_GPIO53, - MFP_PIN_GPIO54, - MFP_PIN_GPIO55, - MFP_PIN_GPIO56, - MFP_PIN_GPIO57, - MFP_PIN_GPIO58, - MFP_PIN_GPIO59, - MFP_PIN_GPIO60, - MFP_PIN_GPIO61, - MFP_PIN_GPIO62, - MFP_PIN_GPIO63, - MFP_PIN_GPIO64, - MFP_PIN_GPIO65, - MFP_PIN_GPIO66, - MFP_PIN_GPIO67, - MFP_PIN_GPIO68, - MFP_PIN_GPIO69, - MFP_PIN_GPIO70, - MFP_PIN_GPIO71, - MFP_PIN_GPIO72, - MFP_PIN_GPIO73, - MFP_PIN_GPIO74, - MFP_PIN_GPIO75, - MFP_PIN_GPIO76, - MFP_PIN_GPIO77, - MFP_PIN_GPIO78, - MFP_PIN_GPIO79, - MFP_PIN_GPIO80, - MFP_PIN_GPIO81, - MFP_PIN_GPIO82, - MFP_PIN_GPIO83, - MFP_PIN_GPIO84, - MFP_PIN_GPIO85, - MFP_PIN_GPIO86, - MFP_PIN_GPIO87, - MFP_PIN_GPIO88, - MFP_PIN_GPIO89, - MFP_PIN_GPIO90, - MFP_PIN_GPIO91, - MFP_PIN_GPIO92, - MFP_PIN_GPIO93, - MFP_PIN_GPIO94, - MFP_PIN_GPIO95, - MFP_PIN_GPIO96, - MFP_PIN_GPIO97, - MFP_PIN_GPIO98, - MFP_PIN_GPIO99, - MFP_PIN_GPIO100, - MFP_PIN_GPIO101, - MFP_PIN_GPIO102, - MFP_PIN_GPIO103, - MFP_PIN_GPIO104, - MFP_PIN_GPIO105, - MFP_PIN_GPIO106, - MFP_PIN_GPIO107, - MFP_PIN_GPIO108, - MFP_PIN_GPIO109, - MFP_PIN_GPIO110, - MFP_PIN_GPIO111, - MFP_PIN_GPIO112, - MFP_PIN_GPIO113, - MFP_PIN_GPIO114, - MFP_PIN_GPIO115, - MFP_PIN_GPIO116, - MFP_PIN_GPIO117, - MFP_PIN_GPIO118, - MFP_PIN_GPIO119, - MFP_PIN_GPIO120, - MFP_PIN_GPIO121, - MFP_PIN_GPIO122, - MFP_PIN_GPIO123, - MFP_PIN_GPIO124, - MFP_PIN_GPIO125, - MFP_PIN_GPIO126, - MFP_PIN_GPIO127, - MFP_PIN_GPIO0_2, - MFP_PIN_GPIO1_2, - MFP_PIN_GPIO2_2, - MFP_PIN_GPIO3_2, - MFP_PIN_GPIO4_2, - MFP_PIN_GPIO5_2, - MFP_PIN_GPIO6_2, - MFP_PIN_GPIO7_2, - MFP_PIN_GPIO8_2, - MFP_PIN_GPIO9_2, - MFP_PIN_GPIO10_2, - MFP_PIN_GPIO11_2, - MFP_PIN_GPIO12_2, - MFP_PIN_GPIO13_2, - MFP_PIN_GPIO14_2, - MFP_PIN_GPIO15_2, - MFP_PIN_GPIO16_2, - MFP_PIN_GPIO17_2, - - MFP_PIN_ULPI_STP, - MFP_PIN_ULPI_NXT, - MFP_PIN_ULPI_DIR, - - MFP_PIN_nXCVREN, - MFP_PIN_DF_CLE_nOE, - MFP_PIN_DF_nADV1_ALE, - MFP_PIN_DF_SCLK_E, - MFP_PIN_DF_SCLK_S, - MFP_PIN_nBE0, - MFP_PIN_nBE1, - MFP_PIN_DF_nADV2_ALE, - MFP_PIN_DF_INT_RnB, - MFP_PIN_DF_nCS0, - MFP_PIN_DF_nCS1, - MFP_PIN_nLUA, - MFP_PIN_nLLA, - MFP_PIN_DF_nWE, - MFP_PIN_DF_ALE_nWE, - MFP_PIN_DF_nRE_nOE, - MFP_PIN_DF_ADDR0, - MFP_PIN_DF_ADDR1, - MFP_PIN_DF_ADDR2, - MFP_PIN_DF_ADDR3, - MFP_PIN_DF_IO0, - MFP_PIN_DF_IO1, - MFP_PIN_DF_IO2, - MFP_PIN_DF_IO3, - MFP_PIN_DF_IO4, - MFP_PIN_DF_IO5, - MFP_PIN_DF_IO6, - MFP_PIN_DF_IO7, - MFP_PIN_DF_IO8, - MFP_PIN_DF_IO9, - MFP_PIN_DF_IO10, - MFP_PIN_DF_IO11, - MFP_PIN_DF_IO12, - MFP_PIN_DF_IO13, - MFP_PIN_DF_IO14, - MFP_PIN_DF_IO15, - - /* additional pins on PXA930 */ - MFP_PIN_GSIM_UIO, - MFP_PIN_GSIM_UCLK, - MFP_PIN_GSIM_UDET, - MFP_PIN_GSIM_nURST, - MFP_PIN_PMIC_INT, - MFP_PIN_RDY, - - MFP_PIN_MAX, -}; - -/* - * a possible MFP configuration is represented by a 32-bit integer - * - * bit 0.. 9 - MFP Pin Number (1024 Pins Maximum) - * bit 10..12 - Alternate Function Selection - * bit 13..15 - Drive Strength - * bit 16..18 - Low Power Mode State - * bit 19..20 - Low Power Mode Edge Detection - * bit 21..22 - Run Mode Pull State - * - * to facilitate the definition, the following macros are provided - * - * MFP_CFG_DEFAULT - default MFP configuration value, with - * alternate function = 0, - * drive strength = fast 3mA (MFP_DS03X) - * low power mode = default - * edge detection = none - * - * MFP_CFG - default MFPR value with alternate function - * MFP_CFG_DRV - default MFPR value with alternate function and - * pin drive strength - * MFP_CFG_LPM - default MFPR value with alternate function and - * low power mode - * MFP_CFG_X - default MFPR value with alternate function, - * pin drive strength and low power mode - */ - -typedef unsigned long mfp_cfg_t; - -#define MFP_PIN(x) ((x) & 0x3ff) - -#define MFP_AF0 (0x0 << 10) -#define MFP_AF1 (0x1 << 10) -#define MFP_AF2 (0x2 << 10) -#define MFP_AF3 (0x3 << 10) -#define MFP_AF4 (0x4 << 10) -#define MFP_AF5 (0x5 << 10) -#define MFP_AF6 (0x6 << 10) -#define MFP_AF7 (0x7 << 10) -#define MFP_AF_MASK (0x7 << 10) -#define MFP_AF(x) (((x) >> 10) & 0x7) - -#define MFP_DS01X (0x0 << 13) -#define MFP_DS02X (0x1 << 13) -#define MFP_DS03X (0x2 << 13) -#define MFP_DS04X (0x3 << 13) -#define MFP_DS06X (0x4 << 13) -#define MFP_DS08X (0x5 << 13) -#define MFP_DS10X (0x6 << 13) -#define MFP_DS13X (0x7 << 13) -#define MFP_DS_MASK (0x7 << 13) -#define MFP_DS(x) (((x) >> 13) & 0x7) - -#define MFP_LPM_DEFAULT (0x0 << 16) -#define MFP_LPM_DRIVE_LOW (0x1 << 16) -#define MFP_LPM_DRIVE_HIGH (0x2 << 16) -#define MFP_LPM_PULL_LOW (0x3 << 16) -#define MFP_LPM_PULL_HIGH (0x4 << 16) -#define MFP_LPM_FLOAT (0x5 << 16) -#define MFP_LPM_INPUT (0x6 << 16) -#define MFP_LPM_STATE_MASK (0x7 << 16) -#define MFP_LPM_STATE(x) (((x) >> 16) & 0x7) - -#define MFP_LPM_EDGE_NONE (0x0 << 19) -#define MFP_LPM_EDGE_RISE (0x1 << 19) -#define MFP_LPM_EDGE_FALL (0x2 << 19) -#define MFP_LPM_EDGE_BOTH (0x3 << 19) -#define MFP_LPM_EDGE_MASK (0x3 << 19) -#define MFP_LPM_EDGE(x) (((x) >> 19) & 0x3) - -#define MFP_PULL_NONE (0x0 << 21) -#define MFP_PULL_LOW (0x1 << 21) -#define MFP_PULL_HIGH (0x2 << 21) -#define MFP_PULL_BOTH (0x3 << 21) -#define MFP_PULL_MASK (0x3 << 21) -#define MFP_PULL(x) (((x) >> 21) & 0x3) - -#define MFP_CFG_DEFAULT (MFP_AF0 | MFP_DS03X | MFP_LPM_DEFAULT |\ - MFP_LPM_EDGE_NONE | MFP_PULL_NONE) - -#define MFP_CFG(pin, af) \ - ((MFP_CFG_DEFAULT & ~MFP_AF_MASK) |\ - (MFP_PIN(MFP_PIN_##pin) | MFP_##af)) - -#define MFP_CFG_DRV(pin, af, drv) \ - ((MFP_CFG_DEFAULT & ~(MFP_AF_MASK | MFP_DS_MASK)) |\ - (MFP_PIN(MFP_PIN_##pin) | MFP_##af | MFP_##drv)) - -#define MFP_CFG_LPM(pin, af, lpm) \ - ((MFP_CFG_DEFAULT & ~(MFP_AF_MASK | MFP_LPM_STATE_MASK)) |\ - (MFP_PIN(MFP_PIN_##pin) | MFP_##af | MFP_LPM_##lpm)) - -#define MFP_CFG_X(pin, af, drv, lpm) \ - ((MFP_CFG_DEFAULT & ~(MFP_AF_MASK | MFP_DS_MASK | MFP_LPM_STATE_MASK)) |\ - (MFP_PIN(MFP_PIN_##pin) | MFP_##af | MFP_##drv | MFP_LPM_##lpm)) +#include #endif /* __ASM_ARCH_MFP_H */ From fb60870f630f926d2299b6ebd31fc94714dcd734 Mon Sep 17 00:00:00 2001 From: Daniel Mack Date: Tue, 11 Aug 2009 14:39:09 +0200 Subject: [PATCH 0157/3409] [ARM] pxa: add MFP_PULL_FLOAT There is currently an uncovered case for MFP configuration on PXAs which is selected by setting the PULL_SEL bit but none of the PULL{UP,DOWN}_EN bits. This case is needed to explicitly let pins float, even if the selected alternate function would default to a configuration with a pull resistor enabled. Signed-off-by: Daniel Mack Signed-off-by: Eric Miao --- arch/arm/plat-pxa/include/plat/mfp.h | 5 +++-- arch/arm/plat-pxa/mfp.c | 3 +++ 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/arch/arm/plat-pxa/include/plat/mfp.h b/arch/arm/plat-pxa/include/plat/mfp.h index 64019464c8db..08ad665460ba 100644 --- a/arch/arm/plat-pxa/include/plat/mfp.h +++ b/arch/arm/plat-pxa/include/plat/mfp.h @@ -325,8 +325,9 @@ typedef unsigned long mfp_cfg_t; #define MFP_PULL_LOW (0x1 << 21) #define MFP_PULL_HIGH (0x2 << 21) #define MFP_PULL_BOTH (0x3 << 21) -#define MFP_PULL_MASK (0x3 << 21) -#define MFP_PULL(x) (((x) >> 21) & 0x3) +#define MFP_PULL_FLOAT (0x4 << 21) +#define MFP_PULL_MASK (0x7 << 21) +#define MFP_PULL(x) (((x) >> 21) & 0x7) #define MFP_CFG_DEFAULT (MFP_AF0 | MFP_DS03X | MFP_LPM_DEFAULT |\ MFP_LPM_EDGE_NONE | MFP_PULL_NONE) diff --git a/arch/arm/plat-pxa/mfp.c b/arch/arm/plat-pxa/mfp.c index e716c622a17c..9405d0379c85 100644 --- a/arch/arm/plat-pxa/mfp.c +++ b/arch/arm/plat-pxa/mfp.c @@ -77,11 +77,13 @@ * MFPR_PULL_LOW 1 0 1 * MFPR_PULL_HIGH 1 1 0 * MFPR_PULL_BOTH 1 1 1 + * MFPR_PULL_FLOAT 1 0 0 */ #define MFPR_PULL_NONE (0) #define MFPR_PULL_LOW (MFPR_PULL_SEL | MFPR_PULLDOWN_EN) #define MFPR_PULL_BOTH (MFPR_PULL_LOW | MFPR_PULLUP_EN) #define MFPR_PULL_HIGH (MFPR_PULL_SEL | MFPR_PULLUP_EN) +#define MFPR_PULL_FLOAT (MFPR_PULL_SEL) /* mfp_spin_lock is used to ensure that MFP register configuration * (most likely a read-modify-write operation) is atomic, and that @@ -116,6 +118,7 @@ static const unsigned long mfpr_pull[] = { MFPR_PULL_LOW, MFPR_PULL_HIGH, MFPR_PULL_BOTH, + MFPR_PULL_FLOAT, }; /* mapping of MFP_LPM_EDGE_* definitions to MFPR_EDGE_* register bits */ From 063936df925f54a32649490f828af9af66ef8c8e Mon Sep 17 00:00:00 2001 From: Haojian Zhuang Date: Wed, 17 Jun 2009 13:57:40 +0800 Subject: [PATCH 0158/3409] [ARM] pxa: support mfp of pxa935 Add and initialize the mfp setting of pxa935 chip. Signed-off-by: Haojian Zhuang Signed-off-by: Eric Miao --- arch/arm/mach-pxa/pxa930.c | 19 +++++++- arch/arm/plat-pxa/include/plat/mfp.h | 68 ++++++++++++++++++++++++++++ 2 files changed, 86 insertions(+), 1 deletion(-) diff --git a/arch/arm/mach-pxa/pxa930.c b/arch/arm/mach-pxa/pxa930.c index 71131742fffd..064292008288 100644 --- a/arch/arm/mach-pxa/pxa930.c +++ b/arch/arm/mach-pxa/pxa930.c @@ -176,13 +176,30 @@ static struct mfp_addr_map pxa930_mfp_addr_map[] __initdata = { MFP_ADDR_END, }; +static struct mfp_addr_map pxa935_mfp_addr_map[] __initdata = { + MFP_ADDR(GPIO159, 0x0524), + MFP_ADDR(GPIO163, 0x0534), + MFP_ADDR(GPIO167, 0x0544), + MFP_ADDR(GPIO168, 0x0548), + MFP_ADDR(GPIO169, 0x054c), + MFP_ADDR(GPIO170, 0x0550), + MFP_ADDR(GPIO171, 0x0554), + MFP_ADDR(GPIO172, 0x0558), + MFP_ADDR(GPIO173, 0x055c), + + MFP_ADDR_END, +}; + static int __init pxa930_init(void) { - if (cpu_is_pxa930()) { + if (cpu_is_pxa930() || cpu_is_pxa935()) { mfp_init_base(io_p2v(MFPR_BASE)); mfp_init_addr(pxa930_mfp_addr_map); } + if (cpu_is_pxa935()) + mfp_init_addr(pxa935_mfp_addr_map); + return 0; } diff --git a/arch/arm/plat-pxa/include/plat/mfp.h b/arch/arm/plat-pxa/include/plat/mfp.h index 08ad665460ba..22086e696e8e 100644 --- a/arch/arm/plat-pxa/include/plat/mfp.h +++ b/arch/arm/plat-pxa/include/plat/mfp.h @@ -150,6 +150,74 @@ enum { MFP_PIN_GPIO125, MFP_PIN_GPIO126, MFP_PIN_GPIO127, + + MFP_PIN_GPIO128, + MFP_PIN_GPIO129, + MFP_PIN_GPIO130, + MFP_PIN_GPIO131, + MFP_PIN_GPIO132, + MFP_PIN_GPIO133, + MFP_PIN_GPIO134, + MFP_PIN_GPIO135, + MFP_PIN_GPIO136, + MFP_PIN_GPIO137, + MFP_PIN_GPIO138, + MFP_PIN_GPIO139, + MFP_PIN_GPIO140, + MFP_PIN_GPIO141, + MFP_PIN_GPIO142, + MFP_PIN_GPIO143, + MFP_PIN_GPIO144, + MFP_PIN_GPIO145, + MFP_PIN_GPIO146, + MFP_PIN_GPIO147, + MFP_PIN_GPIO148, + MFP_PIN_GPIO149, + MFP_PIN_GPIO150, + MFP_PIN_GPIO151, + MFP_PIN_GPIO152, + MFP_PIN_GPIO153, + MFP_PIN_GPIO154, + MFP_PIN_GPIO155, + MFP_PIN_GPIO156, + MFP_PIN_GPIO157, + MFP_PIN_GPIO158, + MFP_PIN_GPIO159, + MFP_PIN_GPIO160, + MFP_PIN_GPIO161, + MFP_PIN_GPIO162, + MFP_PIN_GPIO163, + MFP_PIN_GPIO164, + MFP_PIN_GPIO165, + MFP_PIN_GPIO166, + MFP_PIN_GPIO167, + MFP_PIN_GPIO168, + MFP_PIN_GPIO169, + MFP_PIN_GPIO170, + MFP_PIN_GPIO171, + MFP_PIN_GPIO172, + MFP_PIN_GPIO173, + MFP_PIN_GPIO174, + MFP_PIN_GPIO175, + MFP_PIN_GPIO176, + MFP_PIN_GPIO177, + MFP_PIN_GPIO178, + MFP_PIN_GPIO179, + MFP_PIN_GPIO180, + MFP_PIN_GPIO181, + MFP_PIN_GPIO182, + MFP_PIN_GPIO183, + MFP_PIN_GPIO184, + MFP_PIN_GPIO185, + MFP_PIN_GPIO186, + MFP_PIN_GPIO187, + MFP_PIN_GPIO188, + MFP_PIN_GPIO189, + MFP_PIN_GPIO190, + MFP_PIN_GPIO191, + + MFP_PIN_GPIO255 = 255, + MFP_PIN_GPIO0_2, MFP_PIN_GPIO1_2, MFP_PIN_GPIO2_2, From 9db95cb6c430b3d9b8abbd5870e0d1e69b884ba0 Mon Sep 17 00:00:00 2001 From: Haojian Zhuang Date: Mon, 31 Aug 2009 17:23:44 +0800 Subject: [PATCH 0159/3409] [ARM] pxa: expand irq support for PXA93x and PXA950 PXA93x/950 has additional 64 GPIOs, each is a secondary interrupt source for IRQ_GPIO_2_x, extend PXA_GPIO_IRQ_{BASE,NUM}. PXA93x/950 specific IRQ definitions are added as well. Signed-off-by: Haojian Zhuang Signed-off-by: Eric Miao --- arch/arm/mach-pxa/include/mach/irqs.h | 28 +++++++++++++++++++++++++-- 1 file changed, 26 insertions(+), 2 deletions(-) diff --git a/arch/arm/mach-pxa/include/mach/irqs.h b/arch/arm/mach-pxa/include/mach/irqs.h index 6a1d95993342..abd2c2cf213c 100644 --- a/arch/arm/mach-pxa/include/mach/irqs.h +++ b/arch/arm/mach-pxa/include/mach/irqs.h @@ -68,6 +68,7 @@ #ifdef CONFIG_PXA3xx #define IRQ_SSP4 PXA_IRQ(13) /* SSP4 service request */ #define IRQ_CIR PXA_IRQ(34) /* Consumer IR */ +#define IRQ_COMM_WDT PXA_IRQ(35) /* Comm WDT interrupt */ #define IRQ_TSI PXA_IRQ(36) /* Touch Screen Interface (PXA320) */ #define IRQ_USIM2 PXA_IRQ(38) /* USIM2 Controller */ #define IRQ_GRPHICS PXA_IRQ(39) /* Graphics Controller */ @@ -81,8 +82,31 @@ #define IRQ_MMC3 PXA_IRQ(55) /* MMC3 Controller (PXA310) */ #endif -#define PXA_GPIO_IRQ_BASE PXA_IRQ(64) -#define PXA_GPIO_IRQ_NUM (128) +#ifdef CONFIG_CPU_PXA935 +#define IRQ_U2O PXA_IRQ(64) /* USB OTG 2.0 Controller (PXA935) */ +#define IRQ_U2H PXA_IRQ(65) /* USB Host 2.0 Controller (PXA935) */ + +#define IRQ_MMC3_PXA935 PXA_IRQ(72) /* MMC3 Controller (PXA935) */ +#define IRQ_MMC4_PXA935 PXA_IRQ(73) /* MMC4 Controller (PXA935) */ +#define IRQ_MMC5_PXA935 PXA_IRQ(74) /* MMC5 Controller (PXA935) */ + +#define IRQ_U2P PXA_IRQ(93) /* USB PHY D+/D- Lines (PXA935) */ +#endif + +#ifdef CONFIG_CPU_PXA930 +#define IRQ_ENHROT PXA_IRQ(37) /* Enhanced Rotary (PXA930) */ +#define IRQ_ACIPC0 PXA_IRQ(5) +#define IRQ_ACIPC1 PXA_IRQ(40) +#define IRQ_ACIPC2 PXA_IRQ(19) +#define IRQ_TRKBALL PXA_IRQ(43) /* Track Ball */ +#endif + +#ifdef CONFIG_CPU_PXA950 +#define IRQ_GC500 PXA_IRQ(70) /* Graphics Controller (PXA950) */ +#endif + +#define PXA_GPIO_IRQ_BASE PXA_IRQ(96) +#define PXA_GPIO_IRQ_NUM (192) #define GPIO_2_x_TO_IRQ(x) (PXA_GPIO_IRQ_BASE + (x)) #define IRQ_GPIO(x) (((x) < 2) ? (IRQ_GPIO0 + (x)) : GPIO_2_x_TO_IRQ(x)) From 337c1db645bce3f5a832129c4a803dc157ac7e9a Mon Sep 17 00:00:00 2001 From: Haojian Zhuang Date: Fri, 21 Aug 2009 16:10:41 +0800 Subject: [PATCH 0160/3409] [ARM] pxa: update cpu_is_xsc3() to include Marvell CPUID CPU id is changed in Marvell chip. So update the code in cpu_is_xsc3(). Signed-off-by: Haojian Zhuang Signed-off-by: Eric Miao --- arch/arm/include/asm/cputype.h | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h index b3e656c6fb78..7c6e208a7de7 100644 --- a/arch/arm/include/asm/cputype.h +++ b/arch/arm/include/asm/cputype.h @@ -73,7 +73,10 @@ static inline unsigned int __attribute_const__ read_cpuid_cachetype(void) #else static inline int cpu_is_xsc3(void) { - if ((read_cpuid_id() & 0xffffe000) == 0x69056000) + unsigned int id; + id = read_cpuid_id() & 0xffffe000; + /* It covers both Intel ID and Marvell ID */ + if ((id == 0x69056000) || (id == 0x56056000)) return 1; return 0; From 4646dd2795e793f97b7fd40206567bf72e6cdf21 Mon Sep 17 00:00:00 2001 From: Haojian Zhuang Date: Wed, 26 Aug 2009 10:32:00 +0800 Subject: [PATCH 0161/3409] [ARM] pxa: add cpu_is_pxa950() and Kconfig options Signed-off-by: Haojian Zhuang Signed-off-by: Eric Miao --- arch/arm/mach-pxa/Kconfig | 5 +++++ arch/arm/mach-pxa/include/mach/hardware.h | 17 +++++++++++++++++ 2 files changed, 22 insertions(+) diff --git a/arch/arm/mach-pxa/Kconfig b/arch/arm/mach-pxa/Kconfig index 89c992b8f75b..626cecb6e739 100644 --- a/arch/arm/mach-pxa/Kconfig +++ b/arch/arm/mach-pxa/Kconfig @@ -21,6 +21,11 @@ config CPU_PXA930 config CPU_PXA935 bool "PXA935 (codename Tavor-P65)" + select CPU_PXA930 + +config CPU_PXA950 + bool "PXA950 (codename Tavor-PV2)" + select CPU_PXA930 endmenu diff --git a/arch/arm/mach-pxa/include/mach/hardware.h b/arch/arm/mach-pxa/include/mach/hardware.h index 16ab79547dae..aa3d9f70a08a 100644 --- a/arch/arm/mach-pxa/include/mach/hardware.h +++ b/arch/arm/mach-pxa/include/mach/hardware.h @@ -197,6 +197,16 @@ #define __cpu_is_pxa935(id) (0) #endif +#ifdef CONFIG_CPU_PXA950 +#define __cpu_is_pxa950(id) \ + ({ \ + unsigned int _id = (id) >> 4 & 0xfff; \ + id == 0x697; \ + }) +#else +#define __cpu_is_pxa950(id) (0) +#endif + #define cpu_is_pxa210() \ ({ \ __cpu_is_pxa210(read_cpuid_id()); \ @@ -249,6 +259,13 @@ __cpu_is_pxa935(id); \ }) +#define cpu_is_pxa950() \ + ({ \ + unsigned int id = read_cpuid(CPUID_ID); \ + __cpu_is_pxa950(id); \ + }) + + /* * CPUID Core Generation Bit * <= 0x2 for pxa21x/pxa25x/pxa26x/pxa27x From 6ba39282bb3ee486a142ee3fd61196d329622ed9 Mon Sep 17 00:00:00 2001 From: Haojian Zhuang Date: Wed, 19 Aug 2009 19:30:24 +0800 Subject: [PATCH 0162/3409] [ARM] pxa: add more registers in interrupt controller Add priority registers and new registers of pxa935. Signed-off-by: Haojian Zhuang Signed-off-by: Eric Miao --- arch/arm/mach-pxa/include/mach/regs-intc.h | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/arch/arm/mach-pxa/include/mach/regs-intc.h b/arch/arm/mach-pxa/include/mach/regs-intc.h index ad23e74b762f..68464ce1c1ea 100644 --- a/arch/arm/mach-pxa/include/mach/regs-intc.h +++ b/arch/arm/mach-pxa/include/mach/regs-intc.h @@ -13,6 +13,7 @@ #define ICFP __REG(0x40D0000C) /* Interrupt Controller FIQ Pending Register */ #define ICPR __REG(0x40D00010) /* Interrupt Controller Pending Register */ #define ICCR __REG(0x40D00014) /* Interrupt Controller Control Register */ +#define ICHP __REG(0x40D00018) /* Interrupt Controller Highest Priority Register */ #define ICIP2 __REG(0x40D0009C) /* Interrupt Controller IRQ Pending Register 2 */ #define ICMR2 __REG(0x40D000A0) /* Interrupt Controller Mask Register 2 */ @@ -20,4 +21,14 @@ #define ICFP2 __REG(0x40D000A8) /* Interrupt Controller FIQ Pending Register 2 */ #define ICPR2 __REG(0x40D000AC) /* Interrupt Controller Pending Register 2 */ +#define ICIP3 __REG(0x40D00130) /* Interrupt Controller IRQ Pending Register 3 */ +#define ICMR3 __REG(0x40D00134) /* Interrupt Controller Mask Register 3 */ +#define ICLR3 __REG(0x40D00138) /* Interrupt Controller Level Register 3 */ +#define ICFP3 __REG(0x40D0013C) /* Interrupt Controller FIQ Pending Register 3 */ +#define ICPR3 __REG(0x40D00140) /* Interrupt Controller Pending Register 3 */ + +#define IPR(x) __REG(0x40D0001C + (x < 32 ? (x << 2) \ + : (x < 64 ? (0x94 + ((x - 32) << 2)) \ + : (0x128 + ((x - 64) << 2))))) + #endif /* __ASM_MACH_REGS_INTC_H */ From d2c37068429b29d6549cf3486fc84b836689e122 Mon Sep 17 00:00:00 2001 From: Haojian Zhuang Date: Wed, 19 Aug 2009 19:49:31 +0800 Subject: [PATCH 0163/3409] [ARM] pxa: initialize default interrupt priority and use ICHP for IRQ handling Signed-off-by: Haojian Zhuang Signed-off-by: Eric Miao --- arch/arm/mach-pxa/include/mach/entry-macro.S | 27 ++++++++------------ arch/arm/mach-pxa/irq.c | 8 +++++- 2 files changed, 17 insertions(+), 18 deletions(-) diff --git a/arch/arm/mach-pxa/include/mach/entry-macro.S b/arch/arm/mach-pxa/include/mach/entry-macro.S index f6b4bf3e73d2..241880608ac6 100644 --- a/arch/arm/mach-pxa/include/mach/entry-macro.S +++ b/arch/arm/mach-pxa/include/mach/entry-macro.S @@ -24,34 +24,27 @@ mov \tmp, \tmp, lsr #13 and \tmp, \tmp, #0x7 @ Core G cmp \tmp, #1 - bhi 1004f + bhi 1002f + @ Core Generation 1 (PXA25x) mov \base, #io_p2v(0x40000000) @ IIR Ctl = 0x40d00000 add \base, \base, #0x00d00000 ldr \irqstat, [\base, #0] @ ICIP ldr \irqnr, [\base, #4] @ ICMR - b 1002f -1004: - mrc p6, 0, \irqstat, c6, c0, 0 @ ICIP2 - mrc p6, 0, \irqnr, c7, c0, 0 @ ICMR2 - ands \irqnr, \irqstat, \irqnr - beq 1003f - rsb \irqstat, \irqnr, #0 - and \irqstat, \irqstat, \irqnr - clz \irqnr, \irqstat - rsb \irqnr, \irqnr, #31 - add \irqnr, \irqnr, #(32 + PXA_IRQ(0)) - b 1001f -1003: - mrc p6, 0, \irqstat, c0, c0, 0 @ ICIP - mrc p6, 0, \irqnr, c1, c0, 0 @ ICMR -1002: ands \irqnr, \irqstat, \irqnr beq 1001f rsb \irqstat, \irqnr, #0 and \irqstat, \irqstat, \irqnr clz \irqnr, \irqstat rsb \irqnr, \irqnr, #(31 + PXA_IRQ(0)) + b 1001f +1002: + @ Core Generation 2 (PXA27x) or Core Generation 3 (PXA3xx) + mrc p6, 0, \irqstat, c5, c0, 0 @ ICHP + tst \irqstat, #0x80000000 + beq 1001f + bic \irqstat, \irqstat, #0x80000000 + mov \irqnr, \irqstat, lsr #16 1001: .endm diff --git a/arch/arm/mach-pxa/irq.c b/arch/arm/mach-pxa/irq.c index f6e0300e4f64..d694ce289668 100644 --- a/arch/arm/mach-pxa/irq.c +++ b/arch/arm/mach-pxa/irq.c @@ -120,7 +120,7 @@ static void __init pxa_init_low_gpio_irq(set_wake_t fn) void __init pxa_init_irq(int irq_nr, set_wake_t fn) { - int irq; + int irq, i; pxa_internal_irq_nr = irq_nr; @@ -129,6 +129,12 @@ void __init pxa_init_irq(int irq_nr, set_wake_t fn) _ICLR(irq) = 0; /* all IRQs are IRQ, not FIQ */ } + /* initialize interrupt priority */ + if (cpu_is_pxa27x() || cpu_is_pxa3xx()) { + for (i = 0; i < irq_nr; i++) + IPR(i) = i | (1 << 31); + } + /* only unmasked interrupts kick us out of idle */ ICCR = 1; From f0f04f0840e41411895056bece50bfbd5dfc81cc Mon Sep 17 00:00:00 2001 From: Daniel Mack Date: Thu, 9 Jul 2009 19:04:48 +0200 Subject: [PATCH 0164/3409] [ARM] pxa: add clock definition for graphics controller Signed-off-by: Daniel Mack Signed-off-by: Eric Miao --- arch/arm/mach-pxa/include/mach/pxa3xx-regs.h | 4 ++-- arch/arm/mach-pxa/pxa300.c | 2 ++ arch/arm/mach-pxa/pxa320.c | 2 ++ 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/arch/arm/mach-pxa/include/mach/pxa3xx-regs.h b/arch/arm/mach-pxa/include/mach/pxa3xx-regs.h index 7d1a059b3d43..e91d63cfe811 100644 --- a/arch/arm/mach-pxa/include/mach/pxa3xx-regs.h +++ b/arch/arm/mach-pxa/include/mach/pxa3xx-regs.h @@ -208,7 +208,7 @@ #define CKEN_MVED 43 /* < MVED clock enable */ /* Note: GCU clock enable bit differs on PXA300/PXA310 and PXA320 */ -#define PXA300_CKEN_GRAPHICS 42 /* Graphics controller clock enable */ -#define PXA320_CKEN_GRAPHICS 7 /* Graphics controller clock enable */ +#define CKEN_PXA300_GCU 42 /* Graphics controller clock enable */ +#define CKEN_PXA320_GCU 7 /* Graphics controller clock enable */ #endif /* __ASM_ARCH_PXA3XX_REGS_H */ diff --git a/arch/arm/mach-pxa/pxa300.c b/arch/arm/mach-pxa/pxa300.c index 4ba6d21f851c..f4af6e2bef89 100644 --- a/arch/arm/mach-pxa/pxa300.c +++ b/arch/arm/mach-pxa/pxa300.c @@ -84,9 +84,11 @@ static struct mfp_addr_map pxa310_mfp_addr_map[] __initdata = { }; static DEFINE_PXA3_CKEN(common_nand, NAND, 156000000, 0); +static DEFINE_PXA3_CKEN(gcu, PXA300_GCU, 0, 0); static struct clk_lookup common_clkregs[] = { INIT_CLKREG(&clk_common_nand, "pxa3xx-nand", NULL), + INIT_CLKREG(&clk_gcu, "pxa3xx-gcu", NULL), }; static DEFINE_PXA3_CKEN(pxa310_mmc3, MMC3, 19500000, 0); diff --git a/arch/arm/mach-pxa/pxa320.c b/arch/arm/mach-pxa/pxa320.c index 8b3d97efadab..c7373e74a109 100644 --- a/arch/arm/mach-pxa/pxa320.c +++ b/arch/arm/mach-pxa/pxa320.c @@ -78,9 +78,11 @@ static struct mfp_addr_map pxa320_mfp_addr_map[] __initdata = { }; static DEFINE_PXA3_CKEN(pxa320_nand, NAND, 104000000, 0); +static DEFINE_PXA3_CKEN(gcu, PXA320_GCU, 0, 0); static struct clk_lookup pxa320_clkregs[] = { INIT_CLKREG(&clk_pxa320_nand, "pxa3xx-nand", NULL), + INIT_CLKREG(&clk_gcu, "pxa3xx-gcu", NULL), }; static int __init pxa320_init(void) From 1ff2c33e197f0a65e4641b2dbac1a8c85f5204dd Mon Sep 17 00:00:00 2001 From: Daniel Mack Date: Thu, 9 Jul 2009 19:04:49 +0200 Subject: [PATCH 0165/3409] [ARM] pxa: add device definition for graphics controller Signed-off-by: Daniel Mack Signed-off-by: Eric Miao --- arch/arm/mach-pxa/devices.c | 27 +++++++++++++++++++++++++++ arch/arm/mach-pxa/devices.h | 2 ++ 2 files changed, 29 insertions(+) diff --git a/arch/arm/mach-pxa/devices.c b/arch/arm/mach-pxa/devices.c index ecc08f360b68..46fabe1cca11 100644 --- a/arch/arm/mach-pxa/devices.c +++ b/arch/arm/mach-pxa/devices.c @@ -935,6 +935,33 @@ void __init pxa3xx_set_nand_info(struct pxa3xx_nand_platform_data *info) { pxa_register_device(&pxa3xx_device_nand, info); } + +static struct resource pxa3xx_resources_gcu[] = { + { + .start = 0x54000000, + .end = 0x54000fff, + .flags = IORESOURCE_MEM, + }, + { + .start = IRQ_GCU, + .end = IRQ_GCU, + .flags = IORESOURCE_IRQ, + }, +}; + +static u64 pxa3xx_gcu_dmamask = DMA_BIT_MASK(32); + +struct platform_device pxa3xx_device_gcu = { + .name = "pxa3xx-gcu", + .id = -1, + .num_resources = ARRAY_SIZE(pxa3xx_resources_gcu), + .resource = pxa3xx_resources_gcu, + .dev = { + .dma_mask = &pxa3xx_gcu_dmamask, + .coherent_dma_mask = 0xffffffff, + }, +}; + #endif /* CONFIG_PXA3xx */ /* pxa2xx-spi platform-device ID equals respective SSP platform-device ID + 1. diff --git a/arch/arm/mach-pxa/devices.h b/arch/arm/mach-pxa/devices.h index ecc24a4dca6d..93817d99761e 100644 --- a/arch/arm/mach-pxa/devices.h +++ b/arch/arm/mach-pxa/devices.h @@ -35,4 +35,6 @@ extern struct platform_device pxa27x_device_pwm1; extern struct platform_device pxa3xx_device_nand; extern struct platform_device pxa3xx_device_i2c_power; +extern struct platform_device pxa3xx_device_gcu; + void __init pxa_register_device(struct platform_device *dev, void *data); From 17e513ec79f9524f737f367a1efe97489aff9546 Mon Sep 17 00:00:00 2001 From: Daniel Mack Date: Thu, 9 Jul 2009 19:04:50 +0200 Subject: [PATCH 0166/3409] [ARM] pxa: rename GCU IRQ for consistency For consistency reasons, rename IRQ_GRPHICS to IRQ_GCU. Signed-off-by: Daniel Mack Signed-off-by: Eric Miao --- arch/arm/mach-pxa/include/mach/irqs.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/mach-pxa/include/mach/irqs.h b/arch/arm/mach-pxa/include/mach/irqs.h index abd2c2cf213c..7af0de4bf657 100644 --- a/arch/arm/mach-pxa/include/mach/irqs.h +++ b/arch/arm/mach-pxa/include/mach/irqs.h @@ -71,7 +71,7 @@ #define IRQ_COMM_WDT PXA_IRQ(35) /* Comm WDT interrupt */ #define IRQ_TSI PXA_IRQ(36) /* Touch Screen Interface (PXA320) */ #define IRQ_USIM2 PXA_IRQ(38) /* USIM2 Controller */ -#define IRQ_GRPHICS PXA_IRQ(39) /* Graphics Controller */ +#define IRQ_GCU PXA_IRQ(39) /* Graphics Controller */ #define IRQ_MMC2 PXA_IRQ(41) /* MMC2 Controller */ #define IRQ_1WIRE PXA_IRQ(44) /* 1-Wire Controller */ #define IRQ_NAND PXA_IRQ(45) /* NAND Controller */ From d7c46ddde3fc51e81b094f4cfdbf9097386d3044 Mon Sep 17 00:00:00 2001 From: Haojian Zhuang Date: Fri, 4 Sep 2009 17:37:17 +0800 Subject: [PATCH 0167/3409] [ARM] pxa: update dependancy of pxa i2c module PXA I2C module is also used in Marvell PXA168 and PXA910 series. Update the module dependancy of PXA I2C. Signed-off-by: Haojian Zhuang Signed-off-by: Eric Miao --- drivers/i2c/busses/Kconfig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 8206442fbabd..834a03414900 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -460,8 +460,8 @@ config I2C_PNX will be called i2c-pnx. config I2C_PXA - tristate "Intel PXA2XX I2C adapter (EXPERIMENTAL)" - depends on EXPERIMENTAL && ARCH_PXA + tristate "Intel PXA2XX I2C adapter" + depends on ARCH_PXA || ARCH_MMP help If you have devices in the PXA I2C bus, say yes to this option. This driver can also be built as a module. If so, the module From 36714c935a5d4125c6ccd013484de725dc09b387 Mon Sep 17 00:00:00 2001 From: Haojian Zhuang Date: Wed, 1 Jul 2009 14:17:16 +0800 Subject: [PATCH 0168/3409] [ARM] pxa: merge {zylonite,littleton}_defconfig into pxa3xx_defconfig Merge zylonite_defconfig and littleton_defconfig into pxa3xx_defconfig. Since they're similar platform and servicing for same SoC family. Signed-off-by: Haojian Zhuang Signed-off-by: Eric Miao --- arch/arm/configs/littleton_defconfig | 783 --------------- arch/arm/configs/pxa3xx_defconfig | 1332 ++++++++++++++++++++++++++ arch/arm/configs/zylonite_defconfig | 736 -------------- 3 files changed, 1332 insertions(+), 1519 deletions(-) delete mode 100644 arch/arm/configs/littleton_defconfig create mode 100644 arch/arm/configs/pxa3xx_defconfig delete mode 100644 arch/arm/configs/zylonite_defconfig diff --git a/arch/arm/configs/littleton_defconfig b/arch/arm/configs/littleton_defconfig deleted file mode 100644 index 1db496908052..000000000000 --- a/arch/arm/configs/littleton_defconfig +++ /dev/null @@ -1,783 +0,0 @@ -# -# Automatically generated make config: don't edit -# Linux kernel version: 2.6.24-rc5 -# Fri Dec 21 11:06:19 2007 -# -CONFIG_ARM=y -CONFIG_SYS_SUPPORTS_APM_EMULATION=y -CONFIG_GENERIC_GPIO=y -CONFIG_GENERIC_TIME=y -CONFIG_GENERIC_CLOCKEVENTS=y -CONFIG_MMU=y -# CONFIG_NO_IOPORT is not set -CONFIG_GENERIC_HARDIRQS=y -CONFIG_STACKTRACE_SUPPORT=y -CONFIG_LOCKDEP_SUPPORT=y -CONFIG_TRACE_IRQFLAGS_SUPPORT=y -CONFIG_HARDIRQS_SW_RESEND=y -CONFIG_GENERIC_IRQ_PROBE=y -CONFIG_RWSEM_GENERIC_SPINLOCK=y -# CONFIG_ARCH_HAS_ILOG2_U32 is not set -# CONFIG_ARCH_HAS_ILOG2_U64 is not set -CONFIG_GENERIC_HWEIGHT=y -CONFIG_GENERIC_CALIBRATE_DELAY=y -CONFIG_ZONE_DMA=y -CONFIG_ARCH_MTD_XIP=y -CONFIG_VECTORS_BASE=0xffff0000 -CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" - -# -# General setup -# -CONFIG_EXPERIMENTAL=y -CONFIG_BROKEN_ON_SMP=y -CONFIG_LOCK_KERNEL=y -CONFIG_INIT_ENV_ARG_LIMIT=32 -CONFIG_LOCALVERSION="" -CONFIG_LOCALVERSION_AUTO=y -CONFIG_SWAP=y -CONFIG_SYSVIPC=y -CONFIG_SYSVIPC_SYSCTL=y -# CONFIG_POSIX_MQUEUE is not set -# CONFIG_BSD_PROCESS_ACCT is not set -# CONFIG_TASKSTATS is not set -# CONFIG_USER_NS is not set -# CONFIG_PID_NS is not set -# CONFIG_AUDIT is not set -# CONFIG_IKCONFIG is not set -CONFIG_LOG_BUF_SHIFT=14 -# CONFIG_CGROUPS is not set -CONFIG_FAIR_GROUP_SCHED=y -CONFIG_FAIR_USER_SCHED=y -# CONFIG_FAIR_CGROUP_SCHED is not set -CONFIG_SYSFS_DEPRECATED=y -# CONFIG_RELAY is not set -CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="" -CONFIG_CC_OPTIMIZE_FOR_SIZE=y -CONFIG_SYSCTL=y -# CONFIG_EMBEDDED is not set -CONFIG_UID16=y -CONFIG_SYSCTL_SYSCALL=y -CONFIG_KALLSYMS=y -# CONFIG_KALLSYMS_ALL is not set -# CONFIG_KALLSYMS_EXTRA_PASS is not set -CONFIG_HOTPLUG=y -CONFIG_PRINTK=y -CONFIG_BUG=y -CONFIG_ELF_CORE=y -CONFIG_BASE_FULL=y -CONFIG_FUTEX=y -CONFIG_ANON_INODES=y -CONFIG_EPOLL=y -CONFIG_SIGNALFD=y -CONFIG_EVENTFD=y -CONFIG_SHMEM=y -CONFIG_VM_EVENT_COUNTERS=y -CONFIG_SLAB=y -# CONFIG_SLUB is not set -# CONFIG_SLOB is not set -CONFIG_RT_MUTEXES=y -# CONFIG_TINY_SHMEM is not set -CONFIG_BASE_SMALL=0 -CONFIG_MODULES=y -CONFIG_MODULE_UNLOAD=y -CONFIG_MODULE_FORCE_UNLOAD=y -# CONFIG_MODVERSIONS is not set -# CONFIG_MODULE_SRCVERSION_ALL is not set -# CONFIG_KMOD is not set -CONFIG_BLOCK=y -# CONFIG_LBD is not set -# CONFIG_BLK_DEV_IO_TRACE is not set -# CONFIG_LSF is not set -# CONFIG_BLK_DEV_BSG is not set - -# -# IO Schedulers -# -CONFIG_IOSCHED_NOOP=y -CONFIG_IOSCHED_AS=y -CONFIG_IOSCHED_DEADLINE=y -CONFIG_IOSCHED_CFQ=y -# CONFIG_DEFAULT_AS is not set -# CONFIG_DEFAULT_DEADLINE is not set -CONFIG_DEFAULT_CFQ=y -# CONFIG_DEFAULT_NOOP is not set -CONFIG_DEFAULT_IOSCHED="cfq" - -# -# System Type -# -# CONFIG_ARCH_AAEC2000 is not set -# CONFIG_ARCH_INTEGRATOR is not set -# CONFIG_ARCH_REALVIEW is not set -# CONFIG_ARCH_VERSATILE is not set -# CONFIG_ARCH_AT91 is not set -# CONFIG_ARCH_CLPS7500 is not set -# CONFIG_ARCH_CLPS711X is not set -# CONFIG_ARCH_CO285 is not set -# CONFIG_ARCH_EBSA110 is not set -# CONFIG_ARCH_EP93XX is not set -# CONFIG_ARCH_FOOTBRIDGE is not set -# CONFIG_ARCH_NETX is not set -# CONFIG_ARCH_H720X is not set -# CONFIG_ARCH_IMX is not set -# CONFIG_ARCH_IOP13XX is not set -# CONFIG_ARCH_IOP32X is not set -# CONFIG_ARCH_IOP33X is not set -# CONFIG_ARCH_IXP23XX is not set -# CONFIG_ARCH_IXP2000 is not set -# CONFIG_ARCH_IXP4XX is not set -# CONFIG_ARCH_L7200 is not set -# CONFIG_ARCH_KS8695 is not set -# CONFIG_ARCH_NS9XXX is not set -# CONFIG_ARCH_MXC is not set -# CONFIG_ARCH_PNX4008 is not set -CONFIG_ARCH_PXA=y -# CONFIG_ARCH_RPC is not set -# CONFIG_ARCH_SA1100 is not set -# CONFIG_ARCH_S3C2410 is not set -# CONFIG_ARCH_SHARK is not set -# CONFIG_ARCH_LH7A40X is not set -# CONFIG_ARCH_DAVINCI is not set -# CONFIG_ARCH_OMAP is not set - -# -# Intel PXA2xx/PXA3xx Implementations -# - -# -# Supported PXA3xx Processor Variants -# -CONFIG_CPU_PXA300=y -CONFIG_CPU_PXA310=y -# CONFIG_CPU_PXA320 is not set -# CONFIG_ARCH_LUBBOCK is not set -# CONFIG_MACH_LOGICPD_PXA270 is not set -# CONFIG_MACH_MAINSTONE is not set -# CONFIG_ARCH_PXA_IDP is not set -# CONFIG_PXA_SHARPSL is not set -# CONFIG_MACH_TRIZEPS4 is not set -# CONFIG_MACH_EM_X270 is not set -# CONFIG_MACH_ZYLONITE is not set -CONFIG_MACH_LITTLETON=y -# CONFIG_MACH_ARMCORE is not set -CONFIG_PXA3xx=y -CONFIG_PXA_SSP=y - -# -# Boot options -# - -# -# Power management -# - -# -# Processor Type -# -CONFIG_CPU_32=y -CONFIG_CPU_XSC3=y -CONFIG_CPU_32v5=y -CONFIG_CPU_ABRT_EV5T=y -CONFIG_CPU_CACHE_VIVT=y -CONFIG_CPU_TLB_V4WBI=y -CONFIG_CPU_CP15=y -CONFIG_CPU_CP15_MMU=y -CONFIG_IO_36=y - -# -# Processor Features -# -# CONFIG_ARM_THUMB is not set -# CONFIG_CPU_DCACHE_DISABLE is not set -# CONFIG_CPU_BPREDICT_DISABLE is not set -# CONFIG_OUTER_CACHE is not set -CONFIG_IWMMXT=y - -# -# Bus support -# -# CONFIG_PCI_SYSCALL is not set -# CONFIG_ARCH_SUPPORTS_MSI is not set -# CONFIG_PCCARD is not set - -# -# Kernel Features -# -CONFIG_TICK_ONESHOT=y -# CONFIG_NO_HZ is not set -# CONFIG_HIGH_RES_TIMERS is not set -CONFIG_GENERIC_CLOCKEVENTS_BUILD=y -CONFIG_PREEMPT=y -CONFIG_HZ=100 -CONFIG_AEABI=y -CONFIG_OABI_COMPAT=y -# CONFIG_ARCH_DISCONTIGMEM_ENABLE is not set -CONFIG_SELECT_MEMORY_MODEL=y -CONFIG_FLATMEM_MANUAL=y -# CONFIG_DISCONTIGMEM_MANUAL is not set -# CONFIG_SPARSEMEM_MANUAL is not set -CONFIG_FLATMEM=y -CONFIG_FLAT_NODE_MEM_MAP=y -# CONFIG_SPARSEMEM_STATIC is not set -# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set -CONFIG_SPLIT_PTLOCK_CPUS=4096 -# CONFIG_RESOURCES_64BIT is not set -CONFIG_ZONE_DMA_FLAG=1 -CONFIG_BOUNCE=y -CONFIG_VIRT_TO_BUS=y -CONFIG_ALIGNMENT_TRAP=y - -# -# Boot options -# -CONFIG_ZBOOT_ROM_TEXT=0x0 -CONFIG_ZBOOT_ROM_BSS=0x0 -CONFIG_CMDLINE="root=/dev/nfs rootfstype=nfs nfsroot=192.168.1.100:/nfsroot/ ip=192.168.1.101:192.168.1.100::255.255.255.0::eth0:on console=ttyS2,38400 mem=64M" -# CONFIG_XIP_KERNEL is not set -# CONFIG_KEXEC is not set - -# -# CPU Frequency scaling -# -# CONFIG_CPU_FREQ is not set - -# -# Floating point emulation -# - -# -# At least one emulation must be selected -# -CONFIG_FPE_NWFPE=y -# CONFIG_FPE_NWFPE_XP is not set -# CONFIG_FPE_FASTFPE is not set - -# -# Userspace binary formats -# -CONFIG_BINFMT_ELF=y -# CONFIG_BINFMT_AOUT is not set -# CONFIG_BINFMT_MISC is not set - -# -# Power management options -# -# CONFIG_PM is not set -CONFIG_SUSPEND_UP_POSSIBLE=y - -# -# Networking -# -CONFIG_NET=y - -# -# Networking options -# -CONFIG_PACKET=y -# CONFIG_PACKET_MMAP is not set -CONFIG_UNIX=y -CONFIG_XFRM=y -# CONFIG_XFRM_USER is not set -# CONFIG_XFRM_SUB_POLICY is not set -# CONFIG_XFRM_MIGRATE is not set -# CONFIG_NET_KEY is not set -CONFIG_INET=y -# CONFIG_IP_MULTICAST is not set -# CONFIG_IP_ADVANCED_ROUTER is not set -CONFIG_IP_FIB_HASH=y -CONFIG_IP_PNP=y -# CONFIG_IP_PNP_DHCP is not set -# CONFIG_IP_PNP_BOOTP is not set -# CONFIG_IP_PNP_RARP is not set -# CONFIG_NET_IPIP is not set -# CONFIG_NET_IPGRE is not set -# CONFIG_ARPD is not set -# CONFIG_SYN_COOKIES is not set -# CONFIG_INET_AH is not set -# CONFIG_INET_ESP is not set -# CONFIG_INET_IPCOMP is not set -# CONFIG_INET_XFRM_TUNNEL is not set -# CONFIG_INET_TUNNEL is not set -CONFIG_INET_XFRM_MODE_TRANSPORT=y -CONFIG_INET_XFRM_MODE_TUNNEL=y -CONFIG_INET_XFRM_MODE_BEET=y -# CONFIG_INET_LRO is not set -CONFIG_INET_DIAG=y -CONFIG_INET_TCP_DIAG=y -# CONFIG_TCP_CONG_ADVANCED is not set -CONFIG_TCP_CONG_CUBIC=y -CONFIG_DEFAULT_TCP_CONG="cubic" -# CONFIG_TCP_MD5SIG is not set -# CONFIG_IPV6 is not set -# CONFIG_INET6_XFRM_TUNNEL is not set -# CONFIG_INET6_TUNNEL is not set -# CONFIG_NETWORK_SECMARK is not set -# CONFIG_NETFILTER is not set -# CONFIG_IP_DCCP is not set -# CONFIG_IP_SCTP is not set -# CONFIG_TIPC is not set -# CONFIG_ATM is not set -# CONFIG_BRIDGE is not set -# CONFIG_VLAN_8021Q is not set -# CONFIG_DECNET is not set -# CONFIG_LLC2 is not set -# CONFIG_IPX is not set -# CONFIG_ATALK is not set -# CONFIG_X25 is not set -# CONFIG_LAPB is not set -# CONFIG_ECONET is not set -# CONFIG_WAN_ROUTER is not set -# CONFIG_NET_SCHED is not set - -# -# Network testing -# -# CONFIG_NET_PKTGEN is not set -# CONFIG_HAMRADIO is not set -# CONFIG_IRDA is not set -# CONFIG_BT is not set -# CONFIG_AF_RXRPC is not set - -# -# Wireless -# -# CONFIG_CFG80211 is not set -# CONFIG_WIRELESS_EXT is not set -# CONFIG_MAC80211 is not set -# CONFIG_IEEE80211 is not set -# CONFIG_RFKILL is not set -# CONFIG_NET_9P is not set - -# -# Device Drivers -# - -# -# Generic Driver Options -# -CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" -# CONFIG_STANDALONE is not set -# CONFIG_PREVENT_FIRMWARE_BUILD is not set -CONFIG_FW_LOADER=y -# CONFIG_DEBUG_DRIVER is not set -# CONFIG_DEBUG_DEVRES is not set -# CONFIG_SYS_HYPERVISOR is not set -# CONFIG_CONNECTOR is not set -# CONFIG_MTD is not set -# CONFIG_PARPORT is not set -# CONFIG_BLK_DEV is not set -# CONFIG_MISC_DEVICES is not set -# CONFIG_IDE is not set - -# -# SCSI device support -# -# CONFIG_RAID_ATTRS is not set -# CONFIG_SCSI is not set -# CONFIG_SCSI_DMA is not set -# CONFIG_SCSI_NETLINK is not set -# CONFIG_ATA is not set -# CONFIG_MD is not set -CONFIG_NETDEVICES=y -# CONFIG_NETDEVICES_MULTIQUEUE is not set -# CONFIG_DUMMY is not set -# CONFIG_BONDING is not set -# CONFIG_MACVLAN is not set -# CONFIG_EQUALIZER is not set -# CONFIG_TUN is not set -# CONFIG_VETH is not set -# CONFIG_PHYLIB is not set -CONFIG_NET_ETHERNET=y -CONFIG_MII=y -# CONFIG_AX88796 is not set -CONFIG_SMC91X=y -# CONFIG_DM9000 is not set -# CONFIG_SMC911X is not set -# CONFIG_IBM_NEW_EMAC_ZMII is not set -# CONFIG_IBM_NEW_EMAC_RGMII is not set -# CONFIG_IBM_NEW_EMAC_TAH is not set -# CONFIG_IBM_NEW_EMAC_EMAC4 is not set -# CONFIG_B44 is not set -# CONFIG_NETDEV_1000 is not set -# CONFIG_NETDEV_10000 is not set - -# -# Wireless LAN -# -# CONFIG_WLAN_PRE80211 is not set -# CONFIG_WLAN_80211 is not set -# CONFIG_WAN is not set -# CONFIG_PPP is not set -# CONFIG_SLIP is not set -# CONFIG_SHAPER is not set -# CONFIG_NETCONSOLE is not set -# CONFIG_NETPOLL is not set -# CONFIG_NET_POLL_CONTROLLER is not set -# CONFIG_ISDN is not set - -# -# Input device support -# -CONFIG_INPUT=y -# CONFIG_INPUT_FF_MEMLESS is not set -# CONFIG_INPUT_POLLDEV is not set - -# -# Userland interfaces -# -CONFIG_INPUT_MOUSEDEV=y -# CONFIG_INPUT_MOUSEDEV_PSAUX is not set -CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 -CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 -# CONFIG_INPUT_JOYDEV is not set -# CONFIG_INPUT_EVDEV is not set -# CONFIG_INPUT_EVBUG is not set - -# -# Input Device Drivers -# -# CONFIG_INPUT_KEYBOARD is not set -# CONFIG_INPUT_MOUSE is not set -# CONFIG_INPUT_JOYSTICK is not set -# CONFIG_INPUT_TABLET is not set -# CONFIG_INPUT_TOUCHSCREEN is not set -# CONFIG_INPUT_MISC is not set - -# -# Hardware I/O ports -# -# CONFIG_SERIO is not set -# CONFIG_GAMEPORT is not set - -# -# Character devices -# -CONFIG_VT=y -CONFIG_VT_CONSOLE=y -CONFIG_HW_CONSOLE=y -# CONFIG_VT_HW_CONSOLE_BINDING is not set -# CONFIG_SERIAL_NONSTANDARD is not set - -# -# Serial drivers -# -# CONFIG_SERIAL_8250 is not set - -# -# Non-8250 serial port support -# -CONFIG_SERIAL_PXA=y -CONFIG_SERIAL_PXA_CONSOLE=y -CONFIG_SERIAL_CORE=y -CONFIG_SERIAL_CORE_CONSOLE=y -CONFIG_UNIX98_PTYS=y -# CONFIG_LEGACY_PTYS is not set -# CONFIG_IPMI_HANDLER is not set -# CONFIG_HW_RANDOM is not set -# CONFIG_NVRAM is not set -# CONFIG_R3964 is not set -# CONFIG_RAW_DRIVER is not set -# CONFIG_TCG_TPM is not set -# CONFIG_I2C is not set - -# -# SPI support -# -# CONFIG_SPI is not set -# CONFIG_SPI_MASTER is not set -# CONFIG_W1 is not set -# CONFIG_POWER_SUPPLY is not set -# CONFIG_HWMON is not set -# CONFIG_WATCHDOG is not set - -# -# Sonics Silicon Backplane -# -CONFIG_SSB_POSSIBLE=y -# CONFIG_SSB is not set - -# -# Multifunction device drivers -# -# CONFIG_MFD_SM501 is not set - -# -# Multimedia devices -# -# CONFIG_VIDEO_DEV is not set -# CONFIG_DVB_CORE is not set -# CONFIG_DAB is not set - -# -# Graphics support -# -# CONFIG_VGASTATE is not set -# CONFIG_VIDEO_OUTPUT_CONTROL is not set -CONFIG_FB=y -# CONFIG_FIRMWARE_EDID is not set -# CONFIG_FB_DDC is not set -CONFIG_FB_CFB_FILLRECT=y -CONFIG_FB_CFB_COPYAREA=y -CONFIG_FB_CFB_IMAGEBLIT=y -# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set -# CONFIG_FB_SYS_FILLRECT is not set -# CONFIG_FB_SYS_COPYAREA is not set -# CONFIG_FB_SYS_IMAGEBLIT is not set -# CONFIG_FB_SYS_FOPS is not set -CONFIG_FB_DEFERRED_IO=y -# CONFIG_FB_SVGALIB is not set -# CONFIG_FB_MACMODES is not set -# CONFIG_FB_BACKLIGHT is not set -# CONFIG_FB_MODE_HELPERS is not set -# CONFIG_FB_TILEBLITTING is not set - -# -# Frame buffer hardware drivers -# -# CONFIG_FB_S1D13XXX is not set -CONFIG_FB_PXA=y -# CONFIG_FB_PXA_PARAMETERS is not set -# CONFIG_FB_MBX is not set -# CONFIG_FB_VIRTUAL is not set -# CONFIG_BACKLIGHT_LCD_SUPPORT is not set - -# -# Display device support -# -# CONFIG_DISPLAY_SUPPORT is not set - -# -# Console display driver support -# -# CONFIG_VGA_CONSOLE is not set -CONFIG_DUMMY_CONSOLE=y -CONFIG_FRAMEBUFFER_CONSOLE=y -# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set -# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set -CONFIG_FONTS=y -# CONFIG_FONT_8x8 is not set -CONFIG_FONT_8x16=y -# CONFIG_FONT_6x11 is not set -# CONFIG_FONT_7x14 is not set -# CONFIG_FONT_PEARL_8x8 is not set -# CONFIG_FONT_ACORN_8x8 is not set -# CONFIG_FONT_MINI_4x6 is not set -# CONFIG_FONT_SUN8x16 is not set -# CONFIG_FONT_SUN12x22 is not set -# CONFIG_FONT_10x18 is not set -CONFIG_LOGO=y -CONFIG_LOGO_LINUX_MONO=y -CONFIG_LOGO_LINUX_VGA16=y -CONFIG_LOGO_LINUX_CLUT224=y - -# -# Sound -# -# CONFIG_SOUND is not set -# CONFIG_HID_SUPPORT is not set -# CONFIG_USB_SUPPORT is not set -# CONFIG_MMC is not set -# CONFIG_NEW_LEDS is not set -CONFIG_RTC_LIB=y -# CONFIG_RTC_CLASS is not set - -# -# File systems -# -# CONFIG_EXT2_FS is not set -# CONFIG_EXT3_FS is not set -# CONFIG_EXT4DEV_FS is not set -# CONFIG_REISERFS_FS is not set -# CONFIG_JFS_FS is not set -CONFIG_FS_POSIX_ACL=y -# CONFIG_XFS_FS is not set -# CONFIG_GFS2_FS is not set -# CONFIG_OCFS2_FS is not set -# CONFIG_MINIX_FS is not set -# CONFIG_ROMFS_FS is not set -# CONFIG_INOTIFY is not set -# CONFIG_QUOTA is not set -# CONFIG_DNOTIFY is not set -# CONFIG_AUTOFS_FS is not set -# CONFIG_AUTOFS4_FS is not set -# CONFIG_FUSE_FS is not set - -# -# CD-ROM/DVD Filesystems -# -# CONFIG_ISO9660_FS is not set -# CONFIG_UDF_FS is not set - -# -# DOS/FAT/NT Filesystems -# -# CONFIG_MSDOS_FS is not set -# CONFIG_VFAT_FS is not set -# CONFIG_NTFS_FS is not set - -# -# Pseudo filesystems -# -CONFIG_PROC_FS=y -CONFIG_PROC_SYSCTL=y -CONFIG_SYSFS=y -# CONFIG_TMPFS is not set -# CONFIG_HUGETLB_PAGE is not set -# CONFIG_CONFIGFS_FS is not set - -# -# Miscellaneous filesystems -# -# CONFIG_ADFS_FS is not set -# CONFIG_AFFS_FS is not set -# CONFIG_HFS_FS is not set -# CONFIG_HFSPLUS_FS is not set -# CONFIG_BEFS_FS is not set -# CONFIG_BFS_FS is not set -# CONFIG_EFS_FS is not set -# CONFIG_CRAMFS is not set -# CONFIG_VXFS_FS is not set -# CONFIG_HPFS_FS is not set -# CONFIG_QNX4FS_FS is not set -# CONFIG_SYSV_FS is not set -# CONFIG_UFS_FS is not set -CONFIG_NETWORK_FILESYSTEMS=y -CONFIG_NFS_FS=y -CONFIG_NFS_V3=y -CONFIG_NFS_V3_ACL=y -CONFIG_NFS_V4=y -CONFIG_NFS_DIRECTIO=y -# CONFIG_NFSD is not set -CONFIG_ROOT_NFS=y -CONFIG_LOCKD=y -CONFIG_LOCKD_V4=y -CONFIG_NFS_ACL_SUPPORT=y -CONFIG_NFS_COMMON=y -CONFIG_SUNRPC=y -CONFIG_SUNRPC_GSS=y -# CONFIG_SUNRPC_BIND34 is not set -CONFIG_RPCSEC_GSS_KRB5=y -# CONFIG_RPCSEC_GSS_SPKM3 is not set -# CONFIG_SMB_FS is not set -# CONFIG_CIFS is not set -# CONFIG_NCP_FS is not set -# CONFIG_CODA_FS is not set -# CONFIG_AFS_FS is not set - -# -# Partition Types -# -# CONFIG_PARTITION_ADVANCED is not set -CONFIG_MSDOS_PARTITION=y -# CONFIG_NLS is not set -# CONFIG_DLM is not set -# CONFIG_INSTRUMENTATION is not set - -# -# Kernel hacking -# -CONFIG_PRINTK_TIME=y -CONFIG_ENABLE_WARN_DEPRECATED=y -CONFIG_ENABLE_MUST_CHECK=y -CONFIG_MAGIC_SYSRQ=y -# CONFIG_UNUSED_SYMBOLS is not set -# CONFIG_DEBUG_FS is not set -# CONFIG_HEADERS_CHECK is not set -CONFIG_DEBUG_KERNEL=y -# CONFIG_DEBUG_SHIRQ is not set -CONFIG_DETECT_SOFTLOCKUP=y -CONFIG_SCHED_DEBUG=y -# CONFIG_SCHEDSTATS is not set -# CONFIG_TIMER_STATS is not set -# CONFIG_DEBUG_SLAB is not set -# CONFIG_DEBUG_PREEMPT is not set -# CONFIG_DEBUG_RT_MUTEXES is not set -# CONFIG_RT_MUTEX_TESTER is not set -# CONFIG_DEBUG_SPINLOCK is not set -# CONFIG_DEBUG_MUTEXES is not set -# CONFIG_DEBUG_LOCK_ALLOC is not set -# CONFIG_PROVE_LOCKING is not set -# CONFIG_LOCK_STAT is not set -# CONFIG_DEBUG_SPINLOCK_SLEEP is not set -# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set -# CONFIG_DEBUG_KOBJECT is not set -CONFIG_DEBUG_BUGVERBOSE=y -CONFIG_DEBUG_INFO=y -# CONFIG_DEBUG_VM is not set -# CONFIG_DEBUG_LIST is not set -# CONFIG_DEBUG_SG is not set -CONFIG_FRAME_POINTER=y -CONFIG_FORCED_INLINING=y -# CONFIG_BOOT_PRINTK_DELAY is not set -# CONFIG_RCU_TORTURE_TEST is not set -# CONFIG_FAULT_INJECTION is not set -# CONFIG_SAMPLES is not set -CONFIG_DEBUG_USER=y -CONFIG_DEBUG_ERRORS=y -CONFIG_DEBUG_LL=y -# CONFIG_DEBUG_ICEDCC is not set - -# -# Security options -# -# CONFIG_KEYS is not set -# CONFIG_SECURITY is not set -# CONFIG_SECURITY_FILE_CAPABILITIES is not set -CONFIG_CRYPTO=y -CONFIG_CRYPTO_ALGAPI=y -CONFIG_CRYPTO_BLKCIPHER=y -CONFIG_CRYPTO_MANAGER=y -# CONFIG_CRYPTO_HMAC is not set -# CONFIG_CRYPTO_XCBC is not set -# CONFIG_CRYPTO_NULL is not set -# CONFIG_CRYPTO_MD4 is not set -CONFIG_CRYPTO_MD5=y -# CONFIG_CRYPTO_SHA1 is not set -# CONFIG_CRYPTO_SHA256 is not set -# CONFIG_CRYPTO_SHA512 is not set -# CONFIG_CRYPTO_WP512 is not set -# CONFIG_CRYPTO_TGR192 is not set -# CONFIG_CRYPTO_GF128MUL is not set -# CONFIG_CRYPTO_ECB is not set -CONFIG_CRYPTO_CBC=y -# CONFIG_CRYPTO_PCBC is not set -# CONFIG_CRYPTO_LRW is not set -# CONFIG_CRYPTO_XTS is not set -# CONFIG_CRYPTO_CRYPTD is not set -CONFIG_CRYPTO_DES=y -# CONFIG_CRYPTO_FCRYPT is not set -# CONFIG_CRYPTO_BLOWFISH is not set -# CONFIG_CRYPTO_TWOFISH is not set -# CONFIG_CRYPTO_SERPENT is not set -# CONFIG_CRYPTO_AES is not set -# CONFIG_CRYPTO_CAST5 is not set -# CONFIG_CRYPTO_CAST6 is not set -# CONFIG_CRYPTO_TEA is not set -# CONFIG_CRYPTO_ARC4 is not set -# CONFIG_CRYPTO_KHAZAD is not set -# CONFIG_CRYPTO_ANUBIS is not set -# CONFIG_CRYPTO_SEED is not set -# CONFIG_CRYPTO_DEFLATE is not set -# CONFIG_CRYPTO_MICHAEL_MIC is not set -# CONFIG_CRYPTO_CRC32C is not set -# CONFIG_CRYPTO_CAMELLIA is not set -# CONFIG_CRYPTO_TEST is not set -# CONFIG_CRYPTO_AUTHENC is not set -CONFIG_CRYPTO_HW=y - -# -# Library routines -# -CONFIG_BITREVERSE=y -CONFIG_CRC_CCITT=y -# CONFIG_CRC16 is not set -# CONFIG_CRC_ITU_T is not set -CONFIG_CRC32=y -# CONFIG_CRC7 is not set -# CONFIG_LIBCRC32C is not set -CONFIG_PLIST=y -CONFIG_HAS_IOMEM=y -CONFIG_HAS_IOPORT=y -CONFIG_HAS_DMA=y diff --git a/arch/arm/configs/pxa3xx_defconfig b/arch/arm/configs/pxa3xx_defconfig new file mode 100644 index 000000000000..733b851e5b7e --- /dev/null +++ b/arch/arm/configs/pxa3xx_defconfig @@ -0,0 +1,1332 @@ +# +# Automatically generated make config: don't edit +# Linux kernel version: 2.6.31-rc1 +# Mon Jul 13 22:48:49 2009 +# +CONFIG_ARM=y +CONFIG_HAVE_PWM=y +CONFIG_SYS_SUPPORTS_APM_EMULATION=y +CONFIG_GENERIC_GPIO=y +CONFIG_GENERIC_TIME=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_MMU=y +CONFIG_GENERIC_HARDIRQS=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_HAVE_LATENCYTOP_SUPPORT=y +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_HARDIRQS_SW_RESEND=y +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_RWSEM_GENERIC_SPINLOCK=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_ARCH_MTD_XIP=y +CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y +CONFIG_VECTORS_BASE=0xffff0000 +CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" +CONFIG_CONSTRUCTORS=y + +# +# General setup +# +CONFIG_EXPERIMENTAL=y +CONFIG_BROKEN_ON_SMP=y +CONFIG_LOCK_KERNEL=y +CONFIG_INIT_ENV_ARG_LIMIT=32 +CONFIG_LOCALVERSION="" +CONFIG_LOCALVERSION_AUTO=y +CONFIG_SWAP=y +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +# CONFIG_POSIX_MQUEUE is not set +# CONFIG_BSD_PROCESS_ACCT is not set +# CONFIG_TASKSTATS is not set +# CONFIG_AUDIT is not set + +# +# RCU Subsystem +# +# CONFIG_CLASSIC_RCU is not set +CONFIG_TREE_RCU=y +# CONFIG_PREEMPT_RCU is not set +# CONFIG_RCU_TRACE is not set +CONFIG_RCU_FANOUT=32 +# CONFIG_RCU_FANOUT_EXACT is not set +# CONFIG_TREE_RCU_TRACE is not set +# CONFIG_PREEMPT_RCU_TRACE is not set +# CONFIG_IKCONFIG is not set +CONFIG_LOG_BUF_SHIFT=18 +CONFIG_GROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +# CONFIG_RT_GROUP_SCHED is not set +CONFIG_USER_SCHED=y +# CONFIG_CGROUP_SCHED is not set +# CONFIG_CGROUPS is not set +CONFIG_SYSFS_DEPRECATED=y +CONFIG_SYSFS_DEPRECATED_V2=y +# CONFIG_RELAY is not set +CONFIG_NAMESPACES=y +# CONFIG_UTS_NS is not set +# CONFIG_IPC_NS is not set +# CONFIG_USER_NS is not set +# CONFIG_PID_NS is not set +# CONFIG_NET_NS is not set +# CONFIG_BLK_DEV_INITRD is not set +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_SYSCTL=y +CONFIG_ANON_INODES=y +# CONFIG_EMBEDDED is not set +CONFIG_UID16=y +CONFIG_SYSCTL_SYSCALL=y +CONFIG_KALLSYMS=y +CONFIG_KALLSYMS_ALL=y +# CONFIG_KALLSYMS_EXTRA_PASS is not set +CONFIG_HOTPLUG=y +CONFIG_PRINTK=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_AIO=y + +# +# Performance Counters +# +CONFIG_VM_EVENT_COUNTERS=y +# CONFIG_STRIP_ASM_SYMS is not set +CONFIG_COMPAT_BRK=y +CONFIG_SLAB=y +# CONFIG_SLUB is not set +# CONFIG_SLOB is not set +# CONFIG_PROFILING is not set +# CONFIG_MARKERS is not set +CONFIG_HAVE_OPROFILE=y +# CONFIG_KPROBES is not set +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_HAVE_CLK=y + +# +# GCOV-based kernel profiling +# +# CONFIG_SLOW_WORK is not set +CONFIG_HAVE_GENERIC_DMA_COHERENT=y +CONFIG_SLABINFO=y +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULES=y +# CONFIG_MODULE_FORCE_LOAD is not set +# CONFIG_MODULE_UNLOAD is not set +# CONFIG_MODVERSIONS is not set +# CONFIG_MODULE_SRCVERSION_ALL is not set +CONFIG_BLOCK=y +CONFIG_LBDAF=y +# CONFIG_BLK_DEV_BSG is not set +# CONFIG_BLK_DEV_INTEGRITY is not set + +# +# IO Schedulers +# +CONFIG_IOSCHED_NOOP=y +CONFIG_IOSCHED_AS=y +CONFIG_IOSCHED_DEADLINE=y +CONFIG_IOSCHED_CFQ=y +# CONFIG_DEFAULT_AS is not set +# CONFIG_DEFAULT_DEADLINE is not set +CONFIG_DEFAULT_CFQ=y +# CONFIG_DEFAULT_NOOP is not set +CONFIG_DEFAULT_IOSCHED="cfq" +# CONFIG_FREEZER is not set + +# +# System Type +# +# CONFIG_ARCH_AAEC2000 is not set +# CONFIG_ARCH_INTEGRATOR is not set +# CONFIG_ARCH_REALVIEW is not set +# CONFIG_ARCH_VERSATILE is not set +# CONFIG_ARCH_AT91 is not set +# CONFIG_ARCH_CLPS711X is not set +# CONFIG_ARCH_GEMINI is not set +# CONFIG_ARCH_EBSA110 is not set +# CONFIG_ARCH_EP93XX is not set +# CONFIG_ARCH_FOOTBRIDGE is not set +# CONFIG_ARCH_MXC is not set +# CONFIG_ARCH_STMP3XXX is not set +# CONFIG_ARCH_NETX is not set +# CONFIG_ARCH_H720X is not set +# CONFIG_ARCH_IOP13XX is not set +# CONFIG_ARCH_IOP32X is not set +# CONFIG_ARCH_IOP33X is not set +# CONFIG_ARCH_IXP23XX is not set +# CONFIG_ARCH_IXP2000 is not set +# CONFIG_ARCH_IXP4XX is not set +# CONFIG_ARCH_L7200 is not set +# CONFIG_ARCH_KIRKWOOD is not set +# CONFIG_ARCH_LOKI is not set +# CONFIG_ARCH_MV78XX0 is not set +# CONFIG_ARCH_ORION5X is not set +# CONFIG_ARCH_MMP is not set +# CONFIG_ARCH_KS8695 is not set +# CONFIG_ARCH_NS9XXX is not set +# CONFIG_ARCH_W90X900 is not set +# CONFIG_ARCH_PNX4008 is not set +CONFIG_ARCH_PXA=y +# CONFIG_ARCH_MSM is not set +# CONFIG_ARCH_RPC is not set +# CONFIG_ARCH_SA1100 is not set +# CONFIG_ARCH_S3C2410 is not set +# CONFIG_ARCH_S3C64XX is not set +# CONFIG_ARCH_SHARK is not set +# CONFIG_ARCH_LH7A40X is not set +# CONFIG_ARCH_U300 is not set +# CONFIG_ARCH_DAVINCI is not set +# CONFIG_ARCH_OMAP is not set + +# +# Intel PXA2xx/PXA3xx Implementations +# + +# +# Supported PXA3xx Processor Variants +# +CONFIG_CPU_PXA300=y +CONFIG_CPU_PXA310=y +CONFIG_CPU_PXA320=y +CONFIG_CPU_PXA930=y +CONFIG_CPU_PXA935=y +# CONFIG_ARCH_GUMSTIX is not set +# CONFIG_MACH_INTELMOTE2 is not set +# CONFIG_MACH_STARGATE2 is not set +# CONFIG_ARCH_LUBBOCK is not set +# CONFIG_MACH_LOGICPD_PXA270 is not set +# CONFIG_MACH_MAINSTONE is not set +# CONFIG_MACH_MP900C is not set +# CONFIG_ARCH_PXA_IDP is not set +# CONFIG_PXA_SHARPSL is not set +# CONFIG_ARCH_VIPER is not set +# CONFIG_ARCH_PXA_ESERIES is not set +# CONFIG_TRIZEPS_PXA is not set +# CONFIG_MACH_H5000 is not set +# CONFIG_MACH_EM_X270 is not set +# CONFIG_MACH_EXEDA is not set +# CONFIG_MACH_COLIBRI is not set +# CONFIG_MACH_COLIBRI300 is not set +# CONFIG_MACH_COLIBRI320 is not set +CONFIG_MACH_ZYLONITE=y +CONFIG_MACH_LITTLETON=y +CONFIG_MACH_TAVOREVB=y +CONFIG_MACH_SAAR=y +# CONFIG_MACH_ARMCORE is not set +# CONFIG_MACH_CM_X300 is not set +# CONFIG_MACH_H4700 is not set +# CONFIG_MACH_MAGICIAN is not set +# CONFIG_MACH_HIMALAYA is not set +# CONFIG_MACH_MIOA701 is not set +# CONFIG_MACH_PCM027 is not set +# CONFIG_ARCH_PXA_PALM is not set +# CONFIG_MACH_CSB726 is not set +# CONFIG_PXA_EZX is not set +CONFIG_PXA3xx=y +CONFIG_PXA_SSP=y +CONFIG_PXA_HAVE_BOARD_IRQS=y +CONFIG_PLAT_PXA=y + +# +# Processor Type +# +CONFIG_CPU_32=y +CONFIG_CPU_XSC3=y +CONFIG_CPU_32v5=y +CONFIG_CPU_ABRT_EV5T=y +CONFIG_CPU_PABRT_NOIFAR=y +CONFIG_CPU_CACHE_VIVT=y +CONFIG_CPU_TLB_V4WBI=y +CONFIG_CPU_CP15=y +CONFIG_CPU_CP15_MMU=y +CONFIG_IO_36=y + +# +# Processor Features +# +CONFIG_ARM_THUMB=y +# CONFIG_CPU_DCACHE_DISABLE is not set +# CONFIG_CPU_BPREDICT_DISABLE is not set +CONFIG_OUTER_CACHE=y +CONFIG_CACHE_XSC3L2=y +CONFIG_IWMMXT=y +CONFIG_COMMON_CLKDEV=y + +# +# Bus support +# +# CONFIG_PCI_SYSCALL is not set +# CONFIG_ARCH_SUPPORTS_MSI is not set +# CONFIG_PCCARD is not set + +# +# Kernel Features +# +CONFIG_TICK_ONESHOT=y +# CONFIG_NO_HZ is not set +# CONFIG_HIGH_RES_TIMERS is not set +CONFIG_GENERIC_CLOCKEVENTS_BUILD=y +CONFIG_VMSPLIT_3G=y +# CONFIG_VMSPLIT_2G is not set +# CONFIG_VMSPLIT_1G is not set +CONFIG_PAGE_OFFSET=0xC0000000 +CONFIG_PREEMPT=y +CONFIG_HZ=100 +CONFIG_AEABI=y +CONFIG_OABI_COMPAT=y +# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set +# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set +# CONFIG_HIGHMEM is not set +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_FLATMEM_MANUAL=y +# CONFIG_DISCONTIGMEM_MANUAL is not set +# CONFIG_SPARSEMEM_MANUAL is not set +CONFIG_FLATMEM=y +CONFIG_FLAT_NODE_MEM_MAP=y +CONFIG_PAGEFLAGS_EXTENDED=y +CONFIG_SPLIT_PTLOCK_CPUS=4096 +# CONFIG_PHYS_ADDR_T_64BIT is not set +CONFIG_ZONE_DMA_FLAG=0 +CONFIG_VIRT_TO_BUS=y +CONFIG_HAVE_MLOCK=y +CONFIG_HAVE_MLOCKED_PAGE_BIT=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 +CONFIG_ALIGNMENT_TRAP=y +# CONFIG_UACCESS_WITH_MEMCPY is not set + +# +# Boot options +# +CONFIG_ZBOOT_ROM_TEXT=0x0 +CONFIG_ZBOOT_ROM_BSS=0x0 +CONFIG_CMDLINE="root=/dev/nfs rootfstype=nfs nfsroot=192.168.1.100:/nfsroot/ ip=192.168.1.101:192.168.1.100::255.255.255.0::eth0:on console=ttyS0,115200 mem=64M debug" +# CONFIG_XIP_KERNEL is not set +# CONFIG_KEXEC is not set + +# +# CPU Power Management +# +# CONFIG_CPU_FREQ is not set +# CONFIG_CPU_IDLE is not set + +# +# Floating point emulation +# + +# +# At least one emulation must be selected +# +CONFIG_FPE_NWFPE=y +# CONFIG_FPE_NWFPE_XP is not set +# CONFIG_FPE_FASTFPE is not set + +# +# Userspace binary formats +# +CONFIG_BINFMT_ELF=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_HAVE_AOUT=y +# CONFIG_BINFMT_AOUT is not set +# CONFIG_BINFMT_MISC is not set + +# +# Power management options +# +# CONFIG_PM is not set +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_NET=y + +# +# Networking options +# +CONFIG_PACKET=y +# CONFIG_PACKET_MMAP is not set +CONFIG_UNIX=y +# CONFIG_NET_KEY is not set +CONFIG_INET=y +# CONFIG_IP_MULTICAST is not set +# CONFIG_IP_ADVANCED_ROUTER is not set +CONFIG_IP_FIB_HASH=y +CONFIG_IP_PNP=y +# CONFIG_IP_PNP_DHCP is not set +# CONFIG_IP_PNP_BOOTP is not set +# CONFIG_IP_PNP_RARP is not set +# CONFIG_NET_IPIP is not set +# CONFIG_NET_IPGRE is not set +# CONFIG_ARPD is not set +# CONFIG_SYN_COOKIES is not set +# CONFIG_INET_AH is not set +# CONFIG_INET_ESP is not set +# CONFIG_INET_IPCOMP is not set +# CONFIG_INET_XFRM_TUNNEL is not set +# CONFIG_INET_TUNNEL is not set +# CONFIG_INET_XFRM_MODE_TRANSPORT is not set +# CONFIG_INET_XFRM_MODE_TUNNEL is not set +# CONFIG_INET_XFRM_MODE_BEET is not set +# CONFIG_INET_LRO is not set +# CONFIG_INET_DIAG is not set +# CONFIG_TCP_CONG_ADVANCED is not set +CONFIG_TCP_CONG_CUBIC=y +CONFIG_DEFAULT_TCP_CONG="cubic" +# CONFIG_TCP_MD5SIG is not set +# CONFIG_IPV6 is not set +# CONFIG_NETWORK_SECMARK is not set +# CONFIG_NETFILTER is not set +# CONFIG_IP_DCCP is not set +# CONFIG_IP_SCTP is not set +# CONFIG_TIPC is not set +# CONFIG_ATM is not set +# CONFIG_BRIDGE is not set +# CONFIG_NET_DSA is not set +# CONFIG_VLAN_8021Q is not set +# CONFIG_DECNET is not set +# CONFIG_LLC2 is not set +# CONFIG_IPX is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_ECONET is not set +# CONFIG_WAN_ROUTER is not set +# CONFIG_PHONET is not set +# CONFIG_IEEE802154 is not set +# CONFIG_NET_SCHED is not set +# CONFIG_DCB is not set + +# +# Network testing +# +# CONFIG_NET_PKTGEN is not set +# CONFIG_HAMRADIO is not set +# CONFIG_CAN is not set +# CONFIG_IRDA is not set +# CONFIG_BT is not set +# CONFIG_AF_RXRPC is not set +# CONFIG_WIRELESS is not set +# CONFIG_WIMAX is not set +# CONFIG_RFKILL is not set +# CONFIG_NET_9P is not set + +# +# Device Drivers +# + +# +# Generic Driver Options +# +CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +# CONFIG_STANDALONE is not set +# CONFIG_PREVENT_FIRMWARE_BUILD is not set +CONFIG_FW_LOADER=y +CONFIG_FIRMWARE_IN_KERNEL=y +CONFIG_EXTRA_FIRMWARE="" +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_SYS_HYPERVISOR is not set +# CONFIG_CONNECTOR is not set +CONFIG_MTD=y +# CONFIG_MTD_DEBUG is not set +CONFIG_MTD_CONCAT=y +CONFIG_MTD_PARTITIONS=y +# CONFIG_MTD_TESTS is not set +# CONFIG_MTD_REDBOOT_PARTS is not set +# CONFIG_MTD_CMDLINE_PARTS is not set +# CONFIG_MTD_AFS_PARTS is not set +# CONFIG_MTD_AR7_PARTS is not set + +# +# User Modules And Translation Layers +# +CONFIG_MTD_CHAR=y +CONFIG_MTD_BLKDEVS=y +CONFIG_MTD_BLOCK=y +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set +# CONFIG_MTD_OOPS is not set + +# +# RAM/ROM/Flash chip drivers +# +# CONFIG_MTD_CFI is not set +# CONFIG_MTD_JEDECPROBE is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set +# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +# CONFIG_MTD_CFI_I4 is not set +# CONFIG_MTD_CFI_I8 is not set +# CONFIG_MTD_RAM is not set +# CONFIG_MTD_ROM is not set +# CONFIG_MTD_ABSENT is not set + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +# CONFIG_MTD_PLATRAM is not set + +# +# Self-contained MTD device drivers +# +# CONFIG_MTD_DATAFLASH is not set +# CONFIG_MTD_M25P80 is not set +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLOCK2MTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOC2000 is not set +# CONFIG_MTD_DOC2001 is not set +# CONFIG_MTD_DOC2001PLUS is not set +CONFIG_MTD_NAND=y +CONFIG_MTD_NAND_VERIFY_WRITE=y +# CONFIG_MTD_NAND_ECC_SMC is not set +# CONFIG_MTD_NAND_MUSEUM_IDS is not set +# CONFIG_MTD_NAND_H1900 is not set +# CONFIG_MTD_NAND_GPIO is not set +CONFIG_MTD_NAND_IDS=y +# CONFIG_MTD_NAND_DISKONCHIP is not set +# CONFIG_MTD_NAND_SHARPSL is not set +CONFIG_MTD_NAND_PXA3xx=y +CONFIG_MTD_NAND_PXA3xx_BUILTIN=y +# CONFIG_MTD_NAND_NANDSIM is not set +# CONFIG_MTD_NAND_PLATFORM is not set +CONFIG_MTD_ONENAND=y +CONFIG_MTD_ONENAND_VERIFY_WRITE=y +CONFIG_MTD_ONENAND_GENERIC=y +# CONFIG_MTD_ONENAND_OTP is not set +# CONFIG_MTD_ONENAND_2X_PROGRAM is not set +# CONFIG_MTD_ONENAND_SIM is not set + +# +# LPDDR flash memory drivers +# +# CONFIG_MTD_LPDDR is not set + +# +# UBI - Unsorted block images +# +# CONFIG_MTD_UBI is not set +# CONFIG_PARPORT is not set +CONFIG_BLK_DEV=y +# CONFIG_BLK_DEV_COW_COMMON is not set +CONFIG_BLK_DEV_LOOP=y +# CONFIG_BLK_DEV_CRYPTOLOOP is not set +# CONFIG_BLK_DEV_NBD is not set +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_COUNT=16 +CONFIG_BLK_DEV_RAM_SIZE=4096 +# CONFIG_BLK_DEV_XIP is not set +# CONFIG_CDROM_PKTCDVD is not set +# CONFIG_ATA_OVER_ETH is not set +# CONFIG_MG_DISK is not set +# CONFIG_MISC_DEVICES is not set +CONFIG_HAVE_IDE=y +# CONFIG_IDE is not set + +# +# SCSI device support +# +# CONFIG_RAID_ATTRS is not set +# CONFIG_SCSI is not set +# CONFIG_SCSI_DMA is not set +# CONFIG_SCSI_NETLINK is not set +# CONFIG_ATA is not set +# CONFIG_MD is not set +CONFIG_NETDEVICES=y +# CONFIG_DUMMY is not set +# CONFIG_BONDING is not set +# CONFIG_MACVLAN is not set +# CONFIG_EQUALIZER is not set +# CONFIG_TUN is not set +# CONFIG_VETH is not set +# CONFIG_PHYLIB is not set +CONFIG_NET_ETHERNET=y +CONFIG_MII=y +# CONFIG_AX88796 is not set +CONFIG_SMC91X=y +# CONFIG_DM9000 is not set +# CONFIG_ENC28J60 is not set +# CONFIG_ETHOC is not set +# CONFIG_SMC911X is not set +# CONFIG_SMSC911X is not set +# CONFIG_DNET is not set +# CONFIG_IBM_NEW_EMAC_ZMII is not set +# CONFIG_IBM_NEW_EMAC_RGMII is not set +# CONFIG_IBM_NEW_EMAC_TAH is not set +# CONFIG_IBM_NEW_EMAC_EMAC4 is not set +# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set +# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set +# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set +# CONFIG_B44 is not set +# CONFIG_KS8842 is not set +# CONFIG_NETDEV_1000 is not set +# CONFIG_NETDEV_10000 is not set + +# +# Wireless LAN +# +# CONFIG_WLAN_PRE80211 is not set +# CONFIG_WLAN_80211 is not set + +# +# Enable WiMAX (Networking options) to see the WiMAX drivers +# +# CONFIG_WAN is not set +# CONFIG_PPP is not set +# CONFIG_SLIP is not set +# CONFIG_NETCONSOLE is not set +# CONFIG_NETPOLL is not set +# CONFIG_NET_POLL_CONTROLLER is not set +# CONFIG_ISDN is not set + +# +# Input device support +# +CONFIG_INPUT=y +# CONFIG_INPUT_FF_MEMLESS is not set +# CONFIG_INPUT_POLLDEV is not set + +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=y +# CONFIG_INPUT_MOUSEDEV_PSAUX is not set +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +# CONFIG_INPUT_JOYDEV is not set +# CONFIG_INPUT_EVDEV is not set +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +# CONFIG_KEYBOARD_ATKBD is not set +# CONFIG_KEYBOARD_SUNKBD is not set +# CONFIG_KEYBOARD_LKKBD is not set +# CONFIG_KEYBOARD_XTKBD is not set +# CONFIG_KEYBOARD_NEWTON is not set +# CONFIG_KEYBOARD_STOWAWAY is not set +# CONFIG_KEYBOARD_LM8323 is not set +CONFIG_KEYBOARD_PXA27x=y +CONFIG_KEYBOARD_PXA930_ROTARY=y +CONFIG_KEYBOARD_GPIO=y +CONFIG_INPUT_MOUSE=y +CONFIG_MOUSE_PS2=y +CONFIG_MOUSE_PS2_ALPS=y +CONFIG_MOUSE_PS2_LOGIPS2PP=y +CONFIG_MOUSE_PS2_SYNAPTICS=y +CONFIG_MOUSE_PS2_TRACKPOINT=y +# CONFIG_MOUSE_PS2_ELANTECH is not set +# CONFIG_MOUSE_PS2_TOUCHKIT is not set +# CONFIG_MOUSE_SERIAL is not set +# CONFIG_MOUSE_VSXXXAA is not set +# CONFIG_MOUSE_GPIO is not set +CONFIG_MOUSE_PXA930_TRKBALL=y +# CONFIG_MOUSE_SYNAPTICS_I2C is not set +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TABLET is not set +CONFIG_INPUT_TOUCHSCREEN=y +# CONFIG_TOUCHSCREEN_ADS7846 is not set +# CONFIG_TOUCHSCREEN_AD7877 is not set +# CONFIG_TOUCHSCREEN_AD7879_I2C is not set +# CONFIG_TOUCHSCREEN_AD7879_SPI is not set +# CONFIG_TOUCHSCREEN_AD7879 is not set +CONFIG_TOUCHSCREEN_DA9034=y +# CONFIG_TOUCHSCREEN_EETI is not set +# CONFIG_TOUCHSCREEN_FUJITSU is not set +# CONFIG_TOUCHSCREEN_GUNZE is not set +# CONFIG_TOUCHSCREEN_ELO is not set +# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set +# CONFIG_TOUCHSCREEN_MTOUCH is not set +# CONFIG_TOUCHSCREEN_INEXIO is not set +# CONFIG_TOUCHSCREEN_MK712 is not set +# CONFIG_TOUCHSCREEN_PENMOUNT is not set +# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set +# CONFIG_TOUCHSCREEN_TOUCHWIN is not set +# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set +# CONFIG_TOUCHSCREEN_TSC2007 is not set +# CONFIG_TOUCHSCREEN_W90X900 is not set +# CONFIG_INPUT_MISC is not set + +# +# Hardware I/O ports +# +CONFIG_SERIO=y +CONFIG_SERIO_SERPORT=y +CONFIG_SERIO_LIBPS2=y +# CONFIG_SERIO_RAW is not set +# CONFIG_GAMEPORT is not set + +# +# Character devices +# +CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_VT_CONSOLE=y +CONFIG_HW_CONSOLE=y +# CONFIG_VT_HW_CONSOLE_BINDING is not set +CONFIG_DEVKMEM=y +# CONFIG_SERIAL_NONSTANDARD is not set + +# +# Serial drivers +# +# CONFIG_SERIAL_8250 is not set + +# +# Non-8250 serial port support +# +# CONFIG_SERIAL_MAX3100 is not set +CONFIG_SERIAL_PXA=y +CONFIG_SERIAL_PXA_CONSOLE=y +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_UNIX98_PTYS=y +# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set +# CONFIG_LEGACY_PTYS is not set +# CONFIG_IPMI_HANDLER is not set +# CONFIG_HW_RANDOM is not set +# CONFIG_R3964 is not set +# CONFIG_RAW_DRIVER is not set +# CONFIG_TCG_TPM is not set +CONFIG_I2C=y +CONFIG_I2C_BOARDINFO=y +# CONFIG_I2C_CHARDEV is not set +# CONFIG_I2C_HELPER_AUTO is not set + +# +# I2C Algorithms +# +# CONFIG_I2C_ALGOBIT is not set +# CONFIG_I2C_ALGOPCF is not set +# CONFIG_I2C_ALGOPCA is not set + +# +# I2C Hardware Bus support +# + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_DESIGNWARE is not set +# CONFIG_I2C_GPIO is not set +# CONFIG_I2C_OCORES is not set +CONFIG_I2C_PXA=y +# CONFIG_I2C_PXA_SLAVE is not set +# CONFIG_I2C_SIMTEC is not set + +# +# External I2C/SMBus adapter drivers +# +# CONFIG_I2C_PARPORT_LIGHT is not set +# CONFIG_I2C_TAOS_EVM is not set + +# +# Other I2C/SMBus bus drivers +# +# CONFIG_I2C_PCA_PLATFORM is not set +# CONFIG_I2C_STUB is not set + +# +# Miscellaneous I2C Chip support +# +# CONFIG_DS1682 is not set +# CONFIG_SENSORS_TSL2550 is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +# CONFIG_I2C_DEBUG_CHIP is not set +CONFIG_SPI=y +# CONFIG_SPI_DEBUG is not set +CONFIG_SPI_MASTER=y + +# +# SPI Master Controller Drivers +# +# CONFIG_SPI_BITBANG is not set +# CONFIG_SPI_GPIO is not set +CONFIG_SPI_PXA2XX=y + +# +# SPI Protocol Masters +# +# CONFIG_SPI_SPIDEV is not set +# CONFIG_SPI_TLE62X0 is not set +CONFIG_ARCH_REQUIRE_GPIOLIB=y +CONFIG_GPIOLIB=y +# CONFIG_DEBUG_GPIO is not set +# CONFIG_GPIO_SYSFS is not set + +# +# Memory mapped GPIO expanders: +# + +# +# I2C GPIO expanders: +# +CONFIG_GPIO_MAX732X=y +CONFIG_GPIO_PCA953X=y +CONFIG_GPIO_PCF857X=y + +# +# PCI GPIO expanders: +# + +# +# SPI GPIO expanders: +# +CONFIG_GPIO_MAX7301=y +# CONFIG_GPIO_MCP23S08 is not set +# CONFIG_W1 is not set +CONFIG_POWER_SUPPLY=y +CONFIG_POWER_SUPPLY_DEBUG=y +CONFIG_PDA_POWER=y +# CONFIG_BATTERY_DS2760 is not set +# CONFIG_BATTERY_BQ27x00 is not set +CONFIG_BATTERY_DA9030=y +# CONFIG_BATTERY_MAX17040 is not set +# CONFIG_HWMON is not set +# CONFIG_THERMAL is not set +# CONFIG_THERMAL_HWMON is not set +# CONFIG_WATCHDOG is not set +CONFIG_SSB_POSSIBLE=y + +# +# Sonics Silicon Backplane +# +# CONFIG_SSB is not set + +# +# Multifunction device drivers +# +# CONFIG_MFD_CORE is not set +# CONFIG_MFD_SM501 is not set +# CONFIG_MFD_ASIC3 is not set +# CONFIG_HTC_EGPIO is not set +# CONFIG_HTC_PASIC3 is not set +# CONFIG_TPS65010 is not set +# CONFIG_TWL4030_CORE is not set +# CONFIG_MFD_TMIO is not set +# CONFIG_MFD_T7L66XB is not set +# CONFIG_MFD_TC6387XB is not set +# CONFIG_MFD_TC6393XB is not set +CONFIG_PMIC_DA903X=y +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_AB3100_CORE is not set +# CONFIG_EZX_PCAP is not set +# CONFIG_MEDIA_SUPPORT is not set + +# +# Graphics support +# +# CONFIG_VGASTATE is not set +# CONFIG_VIDEO_OUTPUT_CONTROL is not set +CONFIG_FB=y +# CONFIG_FIRMWARE_EDID is not set +# CONFIG_FB_DDC is not set +# CONFIG_FB_BOOT_VESA_SUPPORT is not set +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set +# CONFIG_FB_SYS_FILLRECT is not set +# CONFIG_FB_SYS_COPYAREA is not set +# CONFIG_FB_SYS_IMAGEBLIT is not set +# CONFIG_FB_FOREIGN_ENDIAN is not set +# CONFIG_FB_SYS_FOPS is not set +# CONFIG_FB_SVGALIB is not set +# CONFIG_FB_MACMODES is not set +# CONFIG_FB_BACKLIGHT is not set +# CONFIG_FB_MODE_HELPERS is not set +# CONFIG_FB_TILEBLITTING is not set + +# +# Frame buffer hardware drivers +# +# CONFIG_FB_S1D13XXX is not set +CONFIG_FB_PXA=y +# CONFIG_FB_PXA_OVERLAY is not set +# CONFIG_FB_PXA_SMARTPANEL is not set +# CONFIG_FB_PXA_PARAMETERS is not set +# CONFIG_FB_MBX is not set +# CONFIG_FB_W100 is not set +# CONFIG_FB_VIRTUAL is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +# CONFIG_FB_BROADSHEET is not set +CONFIG_BACKLIGHT_LCD_SUPPORT=y +CONFIG_LCD_CLASS_DEVICE=y +# CONFIG_LCD_LTV350QV is not set +# CONFIG_LCD_ILI9320 is not set +CONFIG_LCD_TDO24M=y +# CONFIG_LCD_VGG2432A4 is not set +# CONFIG_LCD_PLATFORM is not set +CONFIG_BACKLIGHT_CLASS_DEVICE=y +# CONFIG_BACKLIGHT_GENERIC is not set +CONFIG_BACKLIGHT_PWM=y +CONFIG_BACKLIGHT_DA903X=y + +# +# Display device support +# +# CONFIG_DISPLAY_SUPPORT is not set + +# +# Console display driver support +# +# CONFIG_VGA_CONSOLE is not set +CONFIG_DUMMY_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y +# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set +CONFIG_FONTS=y +# CONFIG_FONT_8x8 is not set +# CONFIG_FONT_8x16 is not set +CONFIG_FONT_6x11=y +# CONFIG_FONT_7x14 is not set +# CONFIG_FONT_PEARL_8x8 is not set +# CONFIG_FONT_ACORN_8x8 is not set +# CONFIG_FONT_MINI_4x6 is not set +# CONFIG_FONT_SUN8x16 is not set +# CONFIG_FONT_SUN12x22 is not set +# CONFIG_FONT_10x18 is not set +CONFIG_LOGO=y +CONFIG_LOGO_LINUX_MONO=y +CONFIG_LOGO_LINUX_VGA16=y +CONFIG_LOGO_LINUX_CLUT224=y +# CONFIG_SOUND is not set +# CONFIG_HID_SUPPORT is not set +# CONFIG_USB_SUPPORT is not set +CONFIG_MMC=y +# CONFIG_MMC_DEBUG is not set +# CONFIG_MMC_UNSAFE_RESUME is not set + +# +# MMC/SD/SDIO Card Drivers +# +CONFIG_MMC_BLOCK=y +CONFIG_MMC_BLOCK_BOUNCE=y +# CONFIG_SDIO_UART is not set +# CONFIG_MMC_TEST is not set + +# +# MMC/SD/SDIO Host Controller Drivers +# +CONFIG_MMC_PXA=y +# CONFIG_MMC_SDHCI is not set +# CONFIG_MMC_SPI is not set +# CONFIG_MEMSTICK is not set +# CONFIG_ACCESSIBILITY is not set +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=m + +# +# LED drivers +# +# CONFIG_LEDS_PCA9532 is not set +CONFIG_LEDS_GPIO=m +CONFIG_LEDS_GPIO_PLATFORM=y +# CONFIG_LEDS_LP5521 is not set +# CONFIG_LEDS_PCA955X is not set +CONFIG_LEDS_DA903X=m +# CONFIG_LEDS_DAC124S085 is not set +# CONFIG_LEDS_PWM is not set +# CONFIG_LEDS_BD2802 is not set + +# +# LED Triggers +# +CONFIG_LEDS_TRIGGERS=y +CONFIG_LEDS_TRIGGER_TIMER=m +CONFIG_LEDS_TRIGGER_HEARTBEAT=m +CONFIG_LEDS_TRIGGER_BACKLIGHT=m +CONFIG_LEDS_TRIGGER_GPIO=m +CONFIG_LEDS_TRIGGER_DEFAULT_ON=m + +# +# iptables trigger is under Netfilter config (LED target) +# +CONFIG_RTC_LIB=y +# CONFIG_RTC_CLASS is not set +# CONFIG_DMADEVICES is not set +# CONFIG_AUXDISPLAY is not set +CONFIG_REGULATOR=y +CONFIG_REGULATOR_DEBUG=y +# CONFIG_REGULATOR_FIXED_VOLTAGE is not set +CONFIG_REGULATOR_VIRTUAL_CONSUMER=y +# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set +# CONFIG_REGULATOR_BQ24022 is not set +# CONFIG_REGULATOR_MAX1586 is not set +CONFIG_REGULATOR_DA903X=y +# CONFIG_REGULATOR_LP3971 is not set +# CONFIG_UIO is not set +# CONFIG_STAGING is not set + +# +# File systems +# +# CONFIG_EXT2_FS is not set +# CONFIG_EXT3_FS is not set +# CONFIG_EXT4_FS is not set +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +CONFIG_FS_POSIX_ACL=y +# CONFIG_XFS_FS is not set +# CONFIG_GFS2_FS is not set +# CONFIG_OCFS2_FS is not set +# CONFIG_BTRFS_FS is not set +CONFIG_FILE_LOCKING=y +CONFIG_FSNOTIFY=y +CONFIG_DNOTIFY=y +# CONFIG_INOTIFY is not set +CONFIG_INOTIFY_USER=y +# CONFIG_QUOTA is not set +# CONFIG_AUTOFS_FS is not set +# CONFIG_AUTOFS4_FS is not set +# CONFIG_FUSE_FS is not set + +# +# Caches +# +# CONFIG_FSCACHE is not set + +# +# CD-ROM/DVD Filesystems +# +# CONFIG_ISO9660_FS is not set +# CONFIG_UDF_FS is not set + +# +# DOS/FAT/NT Filesystems +# +# CONFIG_MSDOS_FS is not set +# CONFIG_VFAT_FS is not set +# CONFIG_NTFS_FS is not set + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_SYSFS=y +# CONFIG_TMPFS is not set +# CONFIG_HUGETLB_PAGE is not set +# CONFIG_CONFIGFS_FS is not set +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +CONFIG_JFFS2_FS=y +CONFIG_JFFS2_FS_DEBUG=0 +CONFIG_JFFS2_FS_WRITEBUFFER=y +CONFIG_JFFS2_FS_WBUF_VERIFY=y +# CONFIG_JFFS2_SUMMARY is not set +# CONFIG_JFFS2_FS_XATTR is not set +CONFIG_JFFS2_COMPRESSION_OPTIONS=y +CONFIG_JFFS2_ZLIB=y +CONFIG_JFFS2_LZO=y +CONFIG_JFFS2_RTIME=y +CONFIG_JFFS2_RUBIN=y +# CONFIG_JFFS2_CMODE_NONE is not set +CONFIG_JFFS2_CMODE_PRIORITY=y +# CONFIG_JFFS2_CMODE_SIZE is not set +# CONFIG_JFFS2_CMODE_FAVOURLZO is not set +# CONFIG_CRAMFS is not set +# CONFIG_SQUASHFS is not set +# CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_ROMFS_FS is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set +# CONFIG_NILFS2_FS is not set +CONFIG_NETWORK_FILESYSTEMS=y +CONFIG_NFS_FS=y +CONFIG_NFS_V3=y +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=y +# CONFIG_NFS_V4_1 is not set +CONFIG_ROOT_NFS=y +# CONFIG_NFSD is not set +CONFIG_LOCKD=y +CONFIG_LOCKD_V4=y +CONFIG_NFS_ACL_SUPPORT=y +CONFIG_NFS_COMMON=y +CONFIG_SUNRPC=y +CONFIG_SUNRPC_GSS=y +CONFIG_RPCSEC_GSS_KRB5=y +# CONFIG_RPCSEC_GSS_SPKM3 is not set +# CONFIG_SMB_FS is not set +# CONFIG_CIFS is not set +# CONFIG_NCP_FS is not set +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set + +# +# Partition Types +# +# CONFIG_PARTITION_ADVANCED is not set +CONFIG_MSDOS_PARTITION=y +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="iso8859-1" +# CONFIG_NLS_CODEPAGE_437 is not set +# CONFIG_NLS_CODEPAGE_737 is not set +# CONFIG_NLS_CODEPAGE_775 is not set +# CONFIG_NLS_CODEPAGE_850 is not set +# CONFIG_NLS_CODEPAGE_852 is not set +# CONFIG_NLS_CODEPAGE_855 is not set +# CONFIG_NLS_CODEPAGE_857 is not set +# CONFIG_NLS_CODEPAGE_860 is not set +# CONFIG_NLS_CODEPAGE_861 is not set +# CONFIG_NLS_CODEPAGE_862 is not set +# CONFIG_NLS_CODEPAGE_863 is not set +# CONFIG_NLS_CODEPAGE_864 is not set +# CONFIG_NLS_CODEPAGE_865 is not set +# CONFIG_NLS_CODEPAGE_866 is not set +# CONFIG_NLS_CODEPAGE_869 is not set +# CONFIG_NLS_CODEPAGE_936 is not set +# CONFIG_NLS_CODEPAGE_950 is not set +# CONFIG_NLS_CODEPAGE_932 is not set +# CONFIG_NLS_CODEPAGE_949 is not set +# CONFIG_NLS_CODEPAGE_874 is not set +# CONFIG_NLS_ISO8859_8 is not set +# CONFIG_NLS_CODEPAGE_1250 is not set +# CONFIG_NLS_CODEPAGE_1251 is not set +# CONFIG_NLS_ASCII is not set +# CONFIG_NLS_ISO8859_1 is not set +# CONFIG_NLS_ISO8859_2 is not set +# CONFIG_NLS_ISO8859_3 is not set +# CONFIG_NLS_ISO8859_4 is not set +# CONFIG_NLS_ISO8859_5 is not set +# CONFIG_NLS_ISO8859_6 is not set +# CONFIG_NLS_ISO8859_7 is not set +# CONFIG_NLS_ISO8859_9 is not set +# CONFIG_NLS_ISO8859_13 is not set +# CONFIG_NLS_ISO8859_14 is not set +# CONFIG_NLS_ISO8859_15 is not set +# CONFIG_NLS_KOI8_R is not set +# CONFIG_NLS_KOI8_U is not set +# CONFIG_NLS_UTF8 is not set +# CONFIG_DLM is not set + +# +# Kernel hacking +# +CONFIG_PRINTK_TIME=y +CONFIG_ENABLE_WARN_DEPRECATED=y +CONFIG_ENABLE_MUST_CHECK=y +CONFIG_FRAME_WARN=1024 +CONFIG_MAGIC_SYSRQ=y +# CONFIG_UNUSED_SYMBOLS is not set +# CONFIG_DEBUG_FS is not set +# CONFIG_HEADERS_CHECK is not set +CONFIG_DEBUG_KERNEL=y +CONFIG_DEBUG_SHIRQ=y +CONFIG_DETECT_SOFTLOCKUP=y +CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y +CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=1 +CONFIG_DETECT_HUNG_TASK=y +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 +# CONFIG_SCHED_DEBUG is not set +# CONFIG_SCHEDSTATS is not set +# CONFIG_TIMER_STATS is not set +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_DEBUG_SLAB is not set +# CONFIG_DEBUG_KMEMLEAK is not set +CONFIG_DEBUG_PREEMPT=y +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_RT_MUTEX_TESTER is not set +CONFIG_DEBUG_SPINLOCK=y +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_PROVE_LOCKING is not set +# CONFIG_LOCK_STAT is not set +CONFIG_DEBUG_SPINLOCK_SLEEP=y +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +# CONFIG_DEBUG_KOBJECT is not set +CONFIG_DEBUG_BUGVERBOSE=y +# CONFIG_DEBUG_INFO is not set +# CONFIG_DEBUG_VM is not set +# CONFIG_DEBUG_WRITECOUNT is not set +CONFIG_DEBUG_MEMORY_INIT=y +# CONFIG_DEBUG_LIST is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_BOOT_PRINTK_DELAY is not set +# CONFIG_RCU_TORTURE_TEST is not set +# CONFIG_RCU_CPU_STALL_DETECTOR is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set +# CONFIG_FAULT_INJECTION is not set +# CONFIG_LATENCYTOP is not set +# CONFIG_SYSCTL_SYSCALL_CHECK is not set +# CONFIG_PAGE_POISONING is not set +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_TRACING_SUPPORT=y +# CONFIG_FTRACE is not set +# CONFIG_SAMPLES is not set +CONFIG_HAVE_ARCH_KGDB=y +# CONFIG_KGDB is not set +# CONFIG_KMEMCHECK is not set +CONFIG_ARM_UNWIND=y +CONFIG_DEBUG_USER=y +# CONFIG_DEBUG_ERRORS is not set +# CONFIG_DEBUG_STACK_USAGE is not set +# CONFIG_DEBUG_LL is not set + +# +# Security options +# +# CONFIG_KEYS is not set +# CONFIG_SECURITY is not set +# CONFIG_SECURITYFS is not set +# CONFIG_SECURITY_FILE_CAPABILITIES is not set +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +# CONFIG_CRYPTO_FIPS is not set +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_BLKCIPHER=y +CONFIG_CRYPTO_BLKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_PCOMP=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +# CONFIG_CRYPTO_GF128MUL is not set +# CONFIG_CRYPTO_NULL is not set +CONFIG_CRYPTO_WORKQUEUE=y +# CONFIG_CRYPTO_CRYPTD is not set +# CONFIG_CRYPTO_AUTHENC is not set +# CONFIG_CRYPTO_TEST is not set + +# +# Authenticated Encryption with Associated Data +# +# CONFIG_CRYPTO_CCM is not set +# CONFIG_CRYPTO_GCM is not set +# CONFIG_CRYPTO_SEQIV is not set + +# +# Block modes +# +CONFIG_CRYPTO_CBC=y +# CONFIG_CRYPTO_CTR is not set +# CONFIG_CRYPTO_CTS is not set +# CONFIG_CRYPTO_ECB is not set +# CONFIG_CRYPTO_LRW is not set +# CONFIG_CRYPTO_PCBC is not set +# CONFIG_CRYPTO_XTS is not set + +# +# Hash modes +# +# CONFIG_CRYPTO_HMAC is not set +# CONFIG_CRYPTO_XCBC is not set + +# +# Digest +# +# CONFIG_CRYPTO_CRC32C is not set +# CONFIG_CRYPTO_MD4 is not set +CONFIG_CRYPTO_MD5=y +# CONFIG_CRYPTO_MICHAEL_MIC is not set +# CONFIG_CRYPTO_RMD128 is not set +# CONFIG_CRYPTO_RMD160 is not set +# CONFIG_CRYPTO_RMD256 is not set +# CONFIG_CRYPTO_RMD320 is not set +# CONFIG_CRYPTO_SHA1 is not set +# CONFIG_CRYPTO_SHA256 is not set +# CONFIG_CRYPTO_SHA512 is not set +# CONFIG_CRYPTO_TGR192 is not set +# CONFIG_CRYPTO_WP512 is not set + +# +# Ciphers +# +# CONFIG_CRYPTO_AES is not set +# CONFIG_CRYPTO_ANUBIS is not set +# CONFIG_CRYPTO_ARC4 is not set +# CONFIG_CRYPTO_BLOWFISH is not set +# CONFIG_CRYPTO_CAMELLIA is not set +# CONFIG_CRYPTO_CAST5 is not set +# CONFIG_CRYPTO_CAST6 is not set +CONFIG_CRYPTO_DES=y +# CONFIG_CRYPTO_FCRYPT is not set +# CONFIG_CRYPTO_KHAZAD is not set +# CONFIG_CRYPTO_SALSA20 is not set +# CONFIG_CRYPTO_SEED is not set +# CONFIG_CRYPTO_SERPENT is not set +# CONFIG_CRYPTO_TEA is not set +# CONFIG_CRYPTO_TWOFISH is not set + +# +# Compression +# +# CONFIG_CRYPTO_DEFLATE is not set +# CONFIG_CRYPTO_ZLIB is not set +# CONFIG_CRYPTO_LZO is not set + +# +# Random Number Generation +# +# CONFIG_CRYPTO_ANSI_CPRNG is not set +# CONFIG_CRYPTO_HW is not set +# CONFIG_BINARY_PRINTF is not set + +# +# Library routines +# +CONFIG_BITREVERSE=y +CONFIG_GENERIC_FIND_LAST_BIT=y +# CONFIG_CRC_CCITT is not set +# CONFIG_CRC16 is not set +# CONFIG_CRC_T10DIF is not set +# CONFIG_CRC_ITU_T is not set +CONFIG_CRC32=y +# CONFIG_CRC7 is not set +# CONFIG_LIBCRC32C is not set +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT=y +CONFIG_HAS_DMA=y +CONFIG_NLATTR=y diff --git a/arch/arm/configs/zylonite_defconfig b/arch/arm/configs/zylonite_defconfig deleted file mode 100644 index 7949d04a3602..000000000000 --- a/arch/arm/configs/zylonite_defconfig +++ /dev/null @@ -1,736 +0,0 @@ -# -# Automatically generated make config: don't edit -# Linux kernel version: 2.6.23 -# Tue Oct 23 13:33:20 2007 -# -CONFIG_ARM=y -CONFIG_SYS_SUPPORTS_APM_EMULATION=y -CONFIG_GENERIC_GPIO=y -CONFIG_GENERIC_TIME=y -CONFIG_GENERIC_CLOCKEVENTS=y -CONFIG_MMU=y -# CONFIG_NO_IOPORT is not set -CONFIG_GENERIC_HARDIRQS=y -CONFIG_STACKTRACE_SUPPORT=y -CONFIG_LOCKDEP_SUPPORT=y -CONFIG_TRACE_IRQFLAGS_SUPPORT=y -CONFIG_HARDIRQS_SW_RESEND=y -CONFIG_GENERIC_IRQ_PROBE=y -CONFIG_RWSEM_GENERIC_SPINLOCK=y -# CONFIG_ARCH_HAS_ILOG2_U32 is not set -# CONFIG_ARCH_HAS_ILOG2_U64 is not set -CONFIG_GENERIC_HWEIGHT=y -CONFIG_GENERIC_CALIBRATE_DELAY=y -CONFIG_ZONE_DMA=y -CONFIG_ARCH_MTD_XIP=y -CONFIG_VECTORS_BASE=0xffff0000 -CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" - -# -# General setup -# -CONFIG_EXPERIMENTAL=y -CONFIG_BROKEN_ON_SMP=y -CONFIG_INIT_ENV_ARG_LIMIT=32 -CONFIG_LOCALVERSION="" -CONFIG_LOCALVERSION_AUTO=y -CONFIG_SWAP=y -CONFIG_SYSVIPC=y -CONFIG_SYSVIPC_SYSCTL=y -# CONFIG_POSIX_MQUEUE is not set -# CONFIG_BSD_PROCESS_ACCT is not set -# CONFIG_TASKSTATS is not set -# CONFIG_USER_NS is not set -# CONFIG_AUDIT is not set -# CONFIG_IKCONFIG is not set -CONFIG_LOG_BUF_SHIFT=18 -# CONFIG_CGROUPS is not set -CONFIG_FAIR_GROUP_SCHED=y -CONFIG_FAIR_USER_SCHED=y -# CONFIG_FAIR_CGROUP_SCHED is not set -CONFIG_SYSFS_DEPRECATED=y -# CONFIG_RELAY is not set -# CONFIG_BLK_DEV_INITRD is not set -# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set -CONFIG_SYSCTL=y -# CONFIG_EMBEDDED is not set -CONFIG_UID16=y -CONFIG_SYSCTL_SYSCALL=y -CONFIG_KALLSYMS=y -# CONFIG_KALLSYMS_EXTRA_PASS is not set -CONFIG_HOTPLUG=y -CONFIG_PRINTK=y -CONFIG_BUG=y -CONFIG_ELF_CORE=y -CONFIG_BASE_FULL=y -CONFIG_FUTEX=y -CONFIG_ANON_INODES=y -CONFIG_EPOLL=y -CONFIG_SIGNALFD=y -CONFIG_EVENTFD=y -CONFIG_SHMEM=y -CONFIG_VM_EVENT_COUNTERS=y -CONFIG_SLUB_DEBUG=y -# CONFIG_SLAB is not set -CONFIG_SLUB=y -# CONFIG_SLOB is not set -CONFIG_RT_MUTEXES=y -# CONFIG_TINY_SHMEM is not set -CONFIG_BASE_SMALL=0 -CONFIG_MODULES=y -# CONFIG_MODULE_UNLOAD is not set -# CONFIG_MODVERSIONS is not set -# CONFIG_MODULE_SRCVERSION_ALL is not set -# CONFIG_KMOD is not set -CONFIG_BLOCK=y -# CONFIG_LBD is not set -# CONFIG_BLK_DEV_IO_TRACE is not set -# CONFIG_LSF is not set -# CONFIG_BLK_DEV_BSG is not set - -# -# IO Schedulers -# -CONFIG_IOSCHED_NOOP=y -CONFIG_IOSCHED_AS=y -CONFIG_IOSCHED_DEADLINE=y -CONFIG_IOSCHED_CFQ=y -# CONFIG_DEFAULT_AS is not set -# CONFIG_DEFAULT_DEADLINE is not set -CONFIG_DEFAULT_CFQ=y -# CONFIG_DEFAULT_NOOP is not set -CONFIG_DEFAULT_IOSCHED="cfq" - -# -# System Type -# -# CONFIG_ARCH_AAEC2000 is not set -# CONFIG_ARCH_INTEGRATOR is not set -# CONFIG_ARCH_REALVIEW is not set -# CONFIG_ARCH_VERSATILE is not set -# CONFIG_ARCH_AT91 is not set -# CONFIG_ARCH_CLPS7500 is not set -# CONFIG_ARCH_CLPS711X is not set -# CONFIG_ARCH_CO285 is not set -# CONFIG_ARCH_EBSA110 is not set -# CONFIG_ARCH_EP93XX is not set -# CONFIG_ARCH_FOOTBRIDGE is not set -# CONFIG_ARCH_NETX is not set -# CONFIG_ARCH_H720X is not set -# CONFIG_ARCH_IMX is not set -# CONFIG_ARCH_IOP13XX is not set -# CONFIG_ARCH_IOP32X is not set -# CONFIG_ARCH_IOP33X is not set -# CONFIG_ARCH_IXP23XX is not set -# CONFIG_ARCH_IXP2000 is not set -# CONFIG_ARCH_IXP4XX is not set -# CONFIG_ARCH_L7200 is not set -# CONFIG_ARCH_KS8695 is not set -# CONFIG_ARCH_NS9XXX is not set -# CONFIG_ARCH_MXC is not set -# CONFIG_ARCH_PNX4008 is not set -CONFIG_ARCH_PXA=y -# CONFIG_ARCH_RPC is not set -# CONFIG_ARCH_SA1100 is not set -# CONFIG_ARCH_S3C2410 is not set -# CONFIG_ARCH_SHARK is not set -# CONFIG_ARCH_LH7A40X is not set -# CONFIG_ARCH_DAVINCI is not set -# CONFIG_ARCH_OMAP is not set - -# -# Intel PXA2xx/PXA3xx Implementations -# - -# -# Supported PXA3xx Processor Variants -# -CONFIG_CPU_PXA300=y -CONFIG_CPU_PXA310=y -CONFIG_CPU_PXA320=y -# CONFIG_ARCH_LUBBOCK is not set -# CONFIG_MACH_LOGICPD_PXA270 is not set -# CONFIG_MACH_MAINSTONE is not set -# CONFIG_ARCH_PXA_IDP is not set -# CONFIG_PXA_SHARPSL is not set -# CONFIG_MACH_TRIZEPS4 is not set -# CONFIG_MACH_EM_X270 is not set -CONFIG_MACH_ZYLONITE=y -# CONFIG_MACH_ARMCORE is not set -CONFIG_PXA3xx=y - -# -# Boot options -# - -# -# Power management -# - -# -# Processor Type -# -CONFIG_CPU_32=y -CONFIG_CPU_XSC3=y -CONFIG_CPU_32v5=y -CONFIG_CPU_ABRT_EV5T=y -CONFIG_CPU_CACHE_VIVT=y -CONFIG_CPU_TLB_V4WBI=y -CONFIG_CPU_CP15=y -CONFIG_CPU_CP15_MMU=y -CONFIG_IO_36=y - -# -# Processor Features -# -# CONFIG_ARM_THUMB is not set -# CONFIG_CPU_DCACHE_DISABLE is not set -# CONFIG_CPU_BPREDICT_DISABLE is not set -# CONFIG_OUTER_CACHE is not set -CONFIG_IWMMXT=y - -# -# Bus support -# -# CONFIG_PCI_SYSCALL is not set -# CONFIG_ARCH_SUPPORTS_MSI is not set -# CONFIG_PCCARD is not set - -# -# Kernel Features -# -# CONFIG_TICK_ONESHOT is not set -# CONFIG_NO_HZ is not set -# CONFIG_HIGH_RES_TIMERS is not set -CONFIG_GENERIC_CLOCKEVENTS_BUILD=y -# CONFIG_PREEMPT is not set -CONFIG_HZ=100 -CONFIG_AEABI=y -CONFIG_OABI_COMPAT=y -# CONFIG_ARCH_DISCONTIGMEM_ENABLE is not set -CONFIG_SELECT_MEMORY_MODEL=y -CONFIG_FLATMEM_MANUAL=y -# CONFIG_DISCONTIGMEM_MANUAL is not set -# CONFIG_SPARSEMEM_MANUAL is not set -CONFIG_FLATMEM=y -CONFIG_FLAT_NODE_MEM_MAP=y -# CONFIG_SPARSEMEM_STATIC is not set -# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set -CONFIG_SPLIT_PTLOCK_CPUS=4096 -# CONFIG_RESOURCES_64BIT is not set -CONFIG_ZONE_DMA_FLAG=1 -CONFIG_BOUNCE=y -CONFIG_VIRT_TO_BUS=y -CONFIG_ALIGNMENT_TRAP=y - -# -# Boot options -# -CONFIG_ZBOOT_ROM_TEXT=0x0 -CONFIG_ZBOOT_ROM_BSS=0x0 -CONFIG_CMDLINE="root=/dev/nfs rootfstype=nfs nfsroot=192.168.1.100:/nfs/rootfs/ ip=192.168.1.101:192.168.1.100::255.255.255.0::eth0:on console=ttyS0,38400 mem=64M debug" -# CONFIG_XIP_KERNEL is not set -# CONFIG_KEXEC is not set - -# -# Floating point emulation -# - -# -# At least one emulation must be selected -# -CONFIG_FPE_NWFPE=y -# CONFIG_FPE_NWFPE_XP is not set -# CONFIG_FPE_FASTFPE is not set - -# -# Userspace binary formats -# -CONFIG_BINFMT_ELF=y -# CONFIG_BINFMT_AOUT is not set -# CONFIG_BINFMT_MISC is not set - -# -# Power management options -# -# CONFIG_PM is not set -CONFIG_SUSPEND_UP_POSSIBLE=y - -# -# Networking -# -CONFIG_NET=y - -# -# Networking options -# -CONFIG_PACKET=y -# CONFIG_PACKET_MMAP is not set -CONFIG_UNIX=y -# CONFIG_NET_KEY is not set -CONFIG_INET=y -# CONFIG_IP_MULTICAST is not set -# CONFIG_IP_ADVANCED_ROUTER is not set -CONFIG_IP_FIB_HASH=y -CONFIG_IP_PNP=y -CONFIG_IP_PNP_DHCP=y -CONFIG_IP_PNP_BOOTP=y -CONFIG_IP_PNP_RARP=y -# CONFIG_NET_IPIP is not set -# CONFIG_NET_IPGRE is not set -# CONFIG_ARPD is not set -# CONFIG_SYN_COOKIES is not set -# CONFIG_INET_AH is not set -# CONFIG_INET_ESP is not set -# CONFIG_INET_IPCOMP is not set -# CONFIG_INET_XFRM_TUNNEL is not set -# CONFIG_INET_TUNNEL is not set -# CONFIG_INET_XFRM_MODE_TRANSPORT is not set -# CONFIG_INET_XFRM_MODE_TUNNEL is not set -# CONFIG_INET_XFRM_MODE_BEET is not set -# CONFIG_INET_LRO is not set -# CONFIG_INET_DIAG is not set -# CONFIG_TCP_CONG_ADVANCED is not set -CONFIG_TCP_CONG_CUBIC=y -CONFIG_DEFAULT_TCP_CONG="cubic" -# CONFIG_TCP_MD5SIG is not set -# CONFIG_IPV6 is not set -# CONFIG_INET6_XFRM_TUNNEL is not set -# CONFIG_INET6_TUNNEL is not set -# CONFIG_NETWORK_SECMARK is not set -# CONFIG_NETFILTER is not set -# CONFIG_IP_DCCP is not set -# CONFIG_IP_SCTP is not set -# CONFIG_TIPC is not set -# CONFIG_ATM is not set -# CONFIG_BRIDGE is not set -# CONFIG_VLAN_8021Q is not set -# CONFIG_DECNET is not set -# CONFIG_LLC2 is not set -# CONFIG_IPX is not set -# CONFIG_ATALK is not set -# CONFIG_X25 is not set -# CONFIG_LAPB is not set -# CONFIG_ECONET is not set -# CONFIG_WAN_ROUTER is not set -# CONFIG_NET_SCHED is not set - -# -# Network testing -# -# CONFIG_NET_PKTGEN is not set -# CONFIG_HAMRADIO is not set -# CONFIG_IRDA is not set -# CONFIG_BT is not set -# CONFIG_AF_RXRPC is not set - -# -# Wireless -# -# CONFIG_CFG80211 is not set -# CONFIG_WIRELESS_EXT is not set -# CONFIG_MAC80211 is not set -# CONFIG_IEEE80211 is not set -# CONFIG_RFKILL is not set -# CONFIG_NET_9P is not set - -# -# Device Drivers -# - -# -# Generic Driver Options -# -CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" -CONFIG_STANDALONE=y -CONFIG_PREVENT_FIRMWARE_BUILD=y -CONFIG_FW_LOADER=y -# CONFIG_SYS_HYPERVISOR is not set -# CONFIG_CONNECTOR is not set -# CONFIG_MTD is not set -# CONFIG_PARPORT is not set -# CONFIG_BLK_DEV is not set -# CONFIG_MISC_DEVICES is not set -# CONFIG_IDE is not set - -# -# SCSI device support -# -# CONFIG_RAID_ATTRS is not set -# CONFIG_SCSI is not set -# CONFIG_SCSI_DMA is not set -# CONFIG_SCSI_NETLINK is not set -# CONFIG_ATA is not set -# CONFIG_MD is not set -CONFIG_NETDEVICES=y -# CONFIG_NETDEVICES_MULTIQUEUE is not set -# CONFIG_DUMMY is not set -# CONFIG_BONDING is not set -# CONFIG_MACVLAN is not set -# CONFIG_EQUALIZER is not set -# CONFIG_TUN is not set -# CONFIG_VETH is not set -# CONFIG_PHYLIB is not set -CONFIG_NET_ETHERNET=y -CONFIG_MII=y -# CONFIG_AX88796 is not set -CONFIG_SMC91X=y -# CONFIG_DM9000 is not set -# CONFIG_SMC911X is not set -# CONFIG_IBM_NEW_EMAC_ZMII is not set -# CONFIG_IBM_NEW_EMAC_RGMII is not set -# CONFIG_IBM_NEW_EMAC_TAH is not set -# CONFIG_IBM_NEW_EMAC_EMAC4 is not set -# CONFIG_B44 is not set -# CONFIG_NETDEV_1000 is not set -# CONFIG_NETDEV_10000 is not set - -# -# Wireless LAN -# -# CONFIG_WLAN_PRE80211 is not set -# CONFIG_WLAN_80211 is not set -# CONFIG_WAN is not set -# CONFIG_PPP is not set -# CONFIG_SLIP is not set -# CONFIG_SHAPER is not set -# CONFIG_NETCONSOLE is not set -# CONFIG_NETPOLL is not set -# CONFIG_NET_POLL_CONTROLLER is not set -# CONFIG_ISDN is not set - -# -# Input device support -# -CONFIG_INPUT=y -# CONFIG_INPUT_FF_MEMLESS is not set -# CONFIG_INPUT_POLLDEV is not set - -# -# Userland interfaces -# -CONFIG_INPUT_MOUSEDEV=y -# CONFIG_INPUT_MOUSEDEV_PSAUX is not set -CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 -CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 -# CONFIG_INPUT_JOYDEV is not set -# CONFIG_INPUT_EVDEV is not set -# CONFIG_INPUT_EVBUG is not set - -# -# Input Device Drivers -# -# CONFIG_INPUT_KEYBOARD is not set -# CONFIG_INPUT_MOUSE is not set -# CONFIG_INPUT_JOYSTICK is not set -# CONFIG_INPUT_TABLET is not set -# CONFIG_INPUT_TOUCHSCREEN is not set -# CONFIG_INPUT_MISC is not set - -# -# Hardware I/O ports -# -# CONFIG_SERIO is not set -# CONFIG_GAMEPORT is not set - -# -# Character devices -# -CONFIG_VT=y -CONFIG_VT_CONSOLE=y -CONFIG_HW_CONSOLE=y -# CONFIG_VT_HW_CONSOLE_BINDING is not set -# CONFIG_SERIAL_NONSTANDARD is not set - -# -# Serial drivers -# -# CONFIG_SERIAL_8250 is not set - -# -# Non-8250 serial port support -# -CONFIG_SERIAL_PXA=y -CONFIG_SERIAL_PXA_CONSOLE=y -CONFIG_SERIAL_CORE=y -CONFIG_SERIAL_CORE_CONSOLE=y -CONFIG_UNIX98_PTYS=y -# CONFIG_LEGACY_PTYS is not set -# CONFIG_IPMI_HANDLER is not set -# CONFIG_HW_RANDOM is not set -# CONFIG_NVRAM is not set -# CONFIG_R3964 is not set -# CONFIG_RAW_DRIVER is not set -# CONFIG_TCG_TPM is not set -# CONFIG_I2C is not set - -# -# SPI support -# -# CONFIG_SPI is not set -# CONFIG_SPI_MASTER is not set -# CONFIG_W1 is not set -# CONFIG_POWER_SUPPLY is not set -# CONFIG_HWMON is not set - -# -# Sonics Silicon Backplane -# -CONFIG_SSB_POSSIBLE=y -# CONFIG_SSB is not set - -# -# Multifunction device drivers -# -# CONFIG_MFD_SM501 is not set - -# -# Multimedia devices -# -# CONFIG_VIDEO_DEV is not set -# CONFIG_DVB_CORE is not set -# CONFIG_DAB is not set - -# -# Graphics support -# -# CONFIG_VGASTATE is not set -# CONFIG_VIDEO_OUTPUT_CONTROL is not set -CONFIG_FB=y -# CONFIG_FIRMWARE_EDID is not set -# CONFIG_FB_DDC is not set -CONFIG_FB_CFB_FILLRECT=y -CONFIG_FB_CFB_COPYAREA=y -CONFIG_FB_CFB_IMAGEBLIT=y -# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set -# CONFIG_FB_SYS_FILLRECT is not set -# CONFIG_FB_SYS_COPYAREA is not set -# CONFIG_FB_SYS_IMAGEBLIT is not set -# CONFIG_FB_SYS_FOPS is not set -CONFIG_FB_DEFERRED_IO=y -# CONFIG_FB_SVGALIB is not set -# CONFIG_FB_MACMODES is not set -# CONFIG_FB_BACKLIGHT is not set -# CONFIG_FB_MODE_HELPERS is not set -# CONFIG_FB_TILEBLITTING is not set - -# -# Frame buffer hardware drivers -# -# CONFIG_FB_S1D13XXX is not set -CONFIG_FB_PXA=y -# CONFIG_FB_PXA_PARAMETERS is not set -# CONFIG_FB_MBX is not set -# CONFIG_FB_VIRTUAL is not set -# CONFIG_BACKLIGHT_LCD_SUPPORT is not set - -# -# Display device support -# -# CONFIG_DISPLAY_SUPPORT is not set - -# -# Console display driver support -# -# CONFIG_VGA_CONSOLE is not set -CONFIG_DUMMY_CONSOLE=y -CONFIG_FRAMEBUFFER_CONSOLE=y -CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y -# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set -CONFIG_FONTS=y -# CONFIG_FONT_8x8 is not set -# CONFIG_FONT_8x16 is not set -CONFIG_FONT_6x11=y -# CONFIG_FONT_7x14 is not set -# CONFIG_FONT_PEARL_8x8 is not set -# CONFIG_FONT_ACORN_8x8 is not set -# CONFIG_FONT_MINI_4x6 is not set -# CONFIG_FONT_SUN8x16 is not set -# CONFIG_FONT_SUN12x22 is not set -# CONFIG_FONT_10x18 is not set -CONFIG_LOGO=y -CONFIG_LOGO_LINUX_MONO=y -CONFIG_LOGO_LINUX_VGA16=y -CONFIG_LOGO_LINUX_CLUT224=y - -# -# Sound -# -# CONFIG_SOUND is not set -# CONFIG_HID_SUPPORT is not set -# CONFIG_USB_SUPPORT is not set -# CONFIG_MMC is not set -# CONFIG_NEW_LEDS is not set -CONFIG_RTC_LIB=y -# CONFIG_RTC_CLASS is not set - -# -# File systems -# -# CONFIG_EXT2_FS is not set -# CONFIG_EXT3_FS is not set -# CONFIG_EXT4DEV_FS is not set -# CONFIG_REISERFS_FS is not set -# CONFIG_JFS_FS is not set -CONFIG_FS_POSIX_ACL=y -# CONFIG_XFS_FS is not set -# CONFIG_GFS2_FS is not set -# CONFIG_OCFS2_FS is not set -# CONFIG_MINIX_FS is not set -# CONFIG_ROMFS_FS is not set -# CONFIG_INOTIFY is not set -# CONFIG_QUOTA is not set -CONFIG_DNOTIFY=y -# CONFIG_AUTOFS_FS is not set -# CONFIG_AUTOFS4_FS is not set -# CONFIG_FUSE_FS is not set - -# -# CD-ROM/DVD Filesystems -# -# CONFIG_ISO9660_FS is not set -# CONFIG_UDF_FS is not set - -# -# DOS/FAT/NT Filesystems -# -# CONFIG_MSDOS_FS is not set -# CONFIG_VFAT_FS is not set -# CONFIG_NTFS_FS is not set - -# -# Pseudo filesystems -# -CONFIG_PROC_FS=y -CONFIG_PROC_SYSCTL=y -CONFIG_SYSFS=y -# CONFIG_TMPFS is not set -# CONFIG_HUGETLB_PAGE is not set -# CONFIG_CONFIGFS_FS is not set - -# -# Miscellaneous filesystems -# -# CONFIG_ADFS_FS is not set -# CONFIG_AFFS_FS is not set -# CONFIG_HFS_FS is not set -# CONFIG_HFSPLUS_FS is not set -# CONFIG_BEFS_FS is not set -# CONFIG_BFS_FS is not set -# CONFIG_EFS_FS is not set -# CONFIG_CRAMFS is not set -# CONFIG_VXFS_FS is not set -# CONFIG_HPFS_FS is not set -# CONFIG_QNX4FS_FS is not set -# CONFIG_SYSV_FS is not set -# CONFIG_UFS_FS is not set -CONFIG_NETWORK_FILESYSTEMS=y -CONFIG_NFS_FS=y -CONFIG_NFS_V3=y -CONFIG_NFS_V3_ACL=y -CONFIG_NFS_V4=y -CONFIG_NFS_DIRECTIO=y -# CONFIG_NFSD is not set -CONFIG_ROOT_NFS=y -CONFIG_LOCKD=y -CONFIG_LOCKD_V4=y -CONFIG_NFS_ACL_SUPPORT=y -CONFIG_NFS_COMMON=y -CONFIG_SUNRPC=y -CONFIG_SUNRPC_GSS=y -# CONFIG_SUNRPC_BIND34 is not set -CONFIG_RPCSEC_GSS_KRB5=y -# CONFIG_RPCSEC_GSS_SPKM3 is not set -# CONFIG_SMB_FS is not set -# CONFIG_CIFS is not set -# CONFIG_NCP_FS is not set -# CONFIG_CODA_FS is not set -# CONFIG_AFS_FS is not set - -# -# Partition Types -# -# CONFIG_PARTITION_ADVANCED is not set -CONFIG_MSDOS_PARTITION=y -# CONFIG_NLS is not set -# CONFIG_DLM is not set -# CONFIG_INSTRUMENTATION is not set - -# -# Kernel hacking -# -# CONFIG_PRINTK_TIME is not set -CONFIG_ENABLE_MUST_CHECK=y -# CONFIG_MAGIC_SYSRQ is not set -# CONFIG_UNUSED_SYMBOLS is not set -# CONFIG_DEBUG_FS is not set -# CONFIG_HEADERS_CHECK is not set -# CONFIG_DEBUG_KERNEL is not set -# CONFIG_SLUB_DEBUG_ON is not set -CONFIG_DEBUG_BUGVERBOSE=y -CONFIG_FRAME_POINTER=y -# CONFIG_SAMPLES is not set -CONFIG_DEBUG_USER=y - -# -# Security options -# -# CONFIG_KEYS is not set -# CONFIG_SECURITY is not set -# CONFIG_SECURITY_FILE_CAPABILITIES is not set -CONFIG_CRYPTO=y -CONFIG_CRYPTO_ALGAPI=y -CONFIG_CRYPTO_BLKCIPHER=y -CONFIG_CRYPTO_MANAGER=y -# CONFIG_CRYPTO_HMAC is not set -# CONFIG_CRYPTO_XCBC is not set -# CONFIG_CRYPTO_NULL is not set -# CONFIG_CRYPTO_MD4 is not set -CONFIG_CRYPTO_MD5=y -# CONFIG_CRYPTO_SHA1 is not set -# CONFIG_CRYPTO_SHA256 is not set -# CONFIG_CRYPTO_SHA512 is not set -# CONFIG_CRYPTO_WP512 is not set -# CONFIG_CRYPTO_TGR192 is not set -# CONFIG_CRYPTO_GF128MUL is not set -# CONFIG_CRYPTO_ECB is not set -CONFIG_CRYPTO_CBC=y -# CONFIG_CRYPTO_PCBC is not set -# CONFIG_CRYPTO_LRW is not set -# CONFIG_CRYPTO_XTS is not set -# CONFIG_CRYPTO_CRYPTD is not set -CONFIG_CRYPTO_DES=y -# CONFIG_CRYPTO_FCRYPT is not set -# CONFIG_CRYPTO_BLOWFISH is not set -# CONFIG_CRYPTO_TWOFISH is not set -# CONFIG_CRYPTO_SERPENT is not set -# CONFIG_CRYPTO_AES is not set -# CONFIG_CRYPTO_CAST5 is not set -# CONFIG_CRYPTO_CAST6 is not set -# CONFIG_CRYPTO_TEA is not set -# CONFIG_CRYPTO_ARC4 is not set -# CONFIG_CRYPTO_KHAZAD is not set -# CONFIG_CRYPTO_ANUBIS is not set -# CONFIG_CRYPTO_SEED is not set -# CONFIG_CRYPTO_DEFLATE is not set -# CONFIG_CRYPTO_MICHAEL_MIC is not set -# CONFIG_CRYPTO_CRC32C is not set -# CONFIG_CRYPTO_CAMELLIA is not set -# CONFIG_CRYPTO_TEST is not set -# CONFIG_CRYPTO_AUTHENC is not set -# CONFIG_CRYPTO_HW is not set - -# -# Library routines -# -CONFIG_BITREVERSE=y -# CONFIG_CRC_CCITT is not set -# CONFIG_CRC16 is not set -# CONFIG_CRC_ITU_T is not set -CONFIG_CRC32=y -# CONFIG_CRC7 is not set -# CONFIG_LIBCRC32C is not set -CONFIG_PLIST=y -CONFIG_HAS_IOMEM=y -CONFIG_HAS_IOPORT=y -CONFIG_HAS_DMA=y From 43c6342b1562cc27d8ba1240220cb887ad0e36f0 Mon Sep 17 00:00:00 2001 From: Robert Jarzmik Date: Sat, 8 Aug 2009 23:07:20 +0200 Subject: [PATCH 0169/3409] [ARM] pxa/dma: cosmetic move of EXPORT_SYMBOL under their functions Signed-off-by: Robert Jarzmik Signed-off-by: Eric Miao --- arch/arm/plat-pxa/dma.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/arch/arm/plat-pxa/dma.c b/arch/arm/plat-pxa/dma.c index 70aeee407f7d..897663dbc5f4 100644 --- a/arch/arm/plat-pxa/dma.c +++ b/arch/arm/plat-pxa/dma.c @@ -71,6 +71,7 @@ int pxa_request_dma (char *name, pxa_dma_prio prio, local_irq_restore(flags); return i; } +EXPORT_SYMBOL(pxa_request_dma); void pxa_free_dma (int dma_ch) { @@ -88,6 +89,7 @@ void pxa_free_dma (int dma_ch) dma_channels[dma_ch].name = NULL; local_irq_restore(flags); } +EXPORT_SYMBOL(pxa_free_dma); static irqreturn_t dma_irq_handler(int irq, void *dev_id) { @@ -139,6 +141,3 @@ int __init pxa_init_dma(int irq, int num_ch) num_dma_channels = num_ch; return 0; } - -EXPORT_SYMBOL(pxa_request_dma); -EXPORT_SYMBOL(pxa_free_dma); From d46f5e4a20867da84d12a35407cf7dcc8713c9cc Mon Sep 17 00:00:00 2001 From: Robert Jarzmik Date: Sat, 8 Aug 2009 23:07:21 +0200 Subject: [PATCH 0170/3409] [ARM] pxa/dma: optimize irq handler loop Reduce loop for dma irq handler callbacks to the minimum required. Since V1: included suggestion from Nicolas Pitre to improve even further the loop. Signed-off-by: Robert Jarzmik Signed-off-by: Eric Miao --- arch/arm/plat-pxa/dma.c | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/arch/arm/plat-pxa/dma.c b/arch/arm/plat-pxa/dma.c index 897663dbc5f4..56b51cf0718b 100644 --- a/arch/arm/plat-pxa/dma.c +++ b/arch/arm/plat-pxa/dma.c @@ -94,20 +94,21 @@ EXPORT_SYMBOL(pxa_free_dma); static irqreturn_t dma_irq_handler(int irq, void *dev_id) { int i, dint = DINT; + struct dma_channel *channel; - for (i = 0; i < num_dma_channels; i++) { - if (dint & (1 << i)) { - struct dma_channel *channel = &dma_channels[i]; - if (channel->name && channel->irq_handler) { - channel->irq_handler(i, channel->data); - } else { - /* - * IRQ for an unregistered DMA channel: - * let's clear the interrupts and disable it. - */ - printk (KERN_WARNING "spurious IRQ for DMA channel %d\n", i); - DCSR(i) = DCSR_STARTINTR|DCSR_ENDINTR|DCSR_BUSERR; - } + while (dint) { + i = __ffs(dint); + dint &= (dint - 1); + channel = &dma_channels[i]; + if (channel->name && channel->irq_handler) { + channel->irq_handler(i, channel->data); + } else { + /* + * IRQ for an unregistered DMA channel: + * let's clear the interrupts and disable it. + */ + printk (KERN_WARNING "spurious IRQ for DMA channel %d\n", i); + DCSR(i) = DCSR_STARTINTR|DCSR_ENDINTR|DCSR_BUSERR; } } return IRQ_HANDLED; From d294948c2ce4e1c85f452154469752cc9b8e876d Mon Sep 17 00:00:00 2001 From: Robert Jarzmik Date: Sat, 8 Aug 2009 23:07:22 +0200 Subject: [PATCH 0171/3409] [ARM] pxa/dma: add debugfs entries Add some debug information for PXA DMA : - descriptors queued - channels state - global state -- Since V1: reverted to old register access (no more dma_readl() or dma_writel()). Signed-off-by: Robert Jarzmik Signed-off-by: Eric Miao --- arch/arm/plat-pxa/dma.c | 249 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 248 insertions(+), 1 deletion(-) diff --git a/arch/arm/plat-pxa/dma.c b/arch/arm/plat-pxa/dma.c index 56b51cf0718b..2975798d411f 100644 --- a/arch/arm/plat-pxa/dma.c +++ b/arch/arm/plat-pxa/dma.c @@ -17,22 +17,266 @@ #include #include #include +#include #include #include +#include #include #include +#define DMA_DEBUG_NAME "pxa_dma" +#define DMA_MAX_REQUESTERS 64 + struct dma_channel { char *name; pxa_dma_prio prio; void (*irq_handler)(int, void *); void *data; + spinlock_t lock; }; static struct dma_channel *dma_channels; static int num_dma_channels; +/* + * Debug fs + */ +#ifdef CONFIG_DEBUG_FS +#include +#include +#include + +static struct dentry *dbgfs_root, *dbgfs_state, **dbgfs_chan; + +static int dbg_show_requester_chan(struct seq_file *s, void *p) +{ + int pos = 0; + int chan = (int)s->private; + int i; + u32 drcmr; + + pos += seq_printf(s, "DMA channel %d requesters list :\n", chan); + for (i = 0; i < DMA_MAX_REQUESTERS; i++) { + drcmr = DRCMR(i); + if ((drcmr & DRCMR_CHLNUM) == chan) + pos += seq_printf(s, "\tRequester %d (MAPVLD=%d)\n", i, + !!(drcmr & DRCMR_MAPVLD)); + } + return pos; +} + +static inline int dbg_burst_from_dcmd(u32 dcmd) +{ + int burst = (dcmd >> 16) & 0x3; + + return burst ? 4 << burst : 0; +} + +static int is_phys_valid(unsigned long addr) +{ + return pfn_valid(__phys_to_pfn(addr)); +} + +#define DCSR_STR(flag) (dcsr & DCSR_##flag ? #flag" " : "") +#define DCMD_STR(flag) (dcmd & DCMD_##flag ? #flag" " : "") + +static int dbg_show_descriptors(struct seq_file *s, void *p) +{ + int pos = 0; + int chan = (int)s->private; + int i, max_show = 20, burst, width; + u32 dcmd; + unsigned long phys_desc; + struct pxa_dma_desc *desc; + unsigned long flags; + + spin_lock_irqsave(&dma_channels[chan].lock, flags); + phys_desc = DDADR(chan); + + pos += seq_printf(s, "DMA channel %d descriptors :\n", chan); + pos += seq_printf(s, "[%03d] First descriptor unknown\n", 0); + for (i = 1; i < max_show && is_phys_valid(phys_desc); i++) { + desc = phys_to_virt(phys_desc); + dcmd = desc->dcmd; + burst = dbg_burst_from_dcmd(dcmd); + width = (1 << ((dcmd >> 14) & 0x3)) >> 1; + + pos += seq_printf(s, "[%03d] Desc at %08lx(virt %p)\n", + i, phys_desc, desc); + pos += seq_printf(s, "\tDDADR = %08x\n", desc->ddadr); + pos += seq_printf(s, "\tDSADR = %08x\n", desc->dsadr); + pos += seq_printf(s, "\tDTADR = %08x\n", desc->dtadr); + pos += seq_printf(s, "\tDCMD = %08x (%s%s%s%s%s%s%sburst=%d" + " width=%d len=%d)\n", + dcmd, + DCMD_STR(INCSRCADDR), DCMD_STR(INCTRGADDR), + DCMD_STR(FLOWSRC), DCMD_STR(FLOWTRG), + DCMD_STR(STARTIRQEN), DCMD_STR(ENDIRQEN), + DCMD_STR(ENDIAN), burst, width, + dcmd & DCMD_LENGTH); + phys_desc = desc->ddadr; + } + if (i == max_show) + pos += seq_printf(s, "[%03d] Desc at %08lx ... max display reached\n", + i, phys_desc); + else + pos += seq_printf(s, "[%03d] Desc at %08lx is %s\n", + i, phys_desc, phys_desc == DDADR_STOP ? + "DDADR_STOP" : "invalid"); + + spin_unlock_irqrestore(&dma_channels[chan].lock, flags); + return pos; +} + +static int dbg_show_chan_state(struct seq_file *s, void *p) +{ + int pos = 0; + int chan = (int)s->private; + u32 dcsr, dcmd; + int burst, width; + static char *str_prio[] = { "high", "normal", "low" }; + + dcsr = DCSR(chan); + dcmd = DCMD(chan); + burst = dbg_burst_from_dcmd(dcmd); + width = (1 << ((dcmd >> 14) & 0x3)) >> 1; + + pos += seq_printf(s, "DMA channel %d\n", chan); + pos += seq_printf(s, "\tPriority : %s\n", + str_prio[dma_channels[chan].prio]); + pos += seq_printf(s, "\tUnaligned transfer bit: %s\n", + DALGN & (1 << chan) ? "yes" : "no"); + pos += seq_printf(s, "\tDCSR = %08x (%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s)\n", + dcsr, DCSR_STR(RUN), DCSR_STR(NODESC), + DCSR_STR(STOPIRQEN), DCSR_STR(EORIRQEN), + DCSR_STR(EORJMPEN), DCSR_STR(EORSTOPEN), + DCSR_STR(SETCMPST), DCSR_STR(CLRCMPST), + DCSR_STR(CMPST), DCSR_STR(EORINTR), DCSR_STR(REQPEND), + DCSR_STR(STOPSTATE), DCSR_STR(ENDINTR), + DCSR_STR(STARTINTR), DCSR_STR(BUSERR)); + + pos += seq_printf(s, "\tDCMD = %08x (%s%s%s%s%s%s%sburst=%d width=%d" + " len=%d)\n", + dcmd, + DCMD_STR(INCSRCADDR), DCMD_STR(INCTRGADDR), + DCMD_STR(FLOWSRC), DCMD_STR(FLOWTRG), + DCMD_STR(STARTIRQEN), DCMD_STR(ENDIRQEN), + DCMD_STR(ENDIAN), burst, width, dcmd & DCMD_LENGTH); + pos += seq_printf(s, "\tDSADR = %08x\n", DSADR(chan)); + pos += seq_printf(s, "\tDTADR = %08x\n", DTADR(chan)); + pos += seq_printf(s, "\tDDADR = %08x\n", DDADR(chan)); + return pos; +} + +static int dbg_show_state(struct seq_file *s, void *p) +{ + int pos = 0; + + /* basic device status */ + pos += seq_printf(s, "DMA engine status\n"); + pos += seq_printf(s, "\tChannel number: %d\n", num_dma_channels); + + return pos; +} + +#define DBGFS_FUNC_DECL(name) \ +static int dbg_open_##name(struct inode *inode, struct file *file) \ +{ \ + return single_open(file, dbg_show_##name, inode->i_private); \ +} \ +static const struct file_operations dbg_fops_##name = { \ + .owner = THIS_MODULE, \ + .open = dbg_open_##name, \ + .llseek = seq_lseek, \ + .read = seq_read, \ + .release = single_release, \ +} + +DBGFS_FUNC_DECL(state); +DBGFS_FUNC_DECL(chan_state); +DBGFS_FUNC_DECL(descriptors); +DBGFS_FUNC_DECL(requester_chan); + +static struct dentry *pxa_dma_dbg_alloc_chan(int ch, struct dentry *chandir) +{ + char chan_name[11]; + struct dentry *chan, *chan_state = NULL, *chan_descr = NULL; + struct dentry *chan_reqs = NULL; + void *dt; + + scnprintf(chan_name, sizeof(chan_name), "%d", ch); + chan = debugfs_create_dir(chan_name, chandir); + dt = (void *)ch; + + if (chan) + chan_state = debugfs_create_file("state", 0400, chan, dt, + &dbg_fops_chan_state); + if (chan_state) + chan_descr = debugfs_create_file("descriptors", 0400, chan, dt, + &dbg_fops_descriptors); + if (chan_descr) + chan_reqs = debugfs_create_file("requesters", 0400, chan, dt, + &dbg_fops_requester_chan); + if (!chan_reqs) + goto err_state; + + return chan; + +err_state: + debugfs_remove_recursive(chan); + return NULL; +} + +static void pxa_dma_init_debugfs(void) +{ + int i; + struct dentry *chandir; + + dbgfs_root = debugfs_create_dir(DMA_DEBUG_NAME, NULL); + if (IS_ERR(dbgfs_root) || !dbgfs_root) + goto err_root; + + dbgfs_state = debugfs_create_file("state", 0400, dbgfs_root, NULL, + &dbg_fops_state); + if (!dbgfs_state) + goto err_state; + + dbgfs_chan = kmalloc(sizeof(*dbgfs_state) * num_dma_channels, + GFP_KERNEL); + if (!dbgfs_state) + goto err_alloc; + + chandir = debugfs_create_dir("channels", dbgfs_root); + if (!chandir) + goto err_chandir; + + for (i = 0; i < num_dma_channels; i++) { + dbgfs_chan[i] = pxa_dma_dbg_alloc_chan(i, chandir); + if (!dbgfs_chan[i]) + goto err_chans; + } + + return; +err_chans: +err_chandir: + kfree(dbgfs_chan); +err_alloc: +err_state: + debugfs_remove_recursive(dbgfs_root); +err_root: + pr_err("pxa_dma: debugfs is not available\n"); +} + +static void __exit pxa_dma_cleanup_debugfs(void) +{ + debugfs_remove_recursive(dbgfs_root); +} +#else +static inline void pxa_dma_init_debugfs(void) {} +static inline void pxa_dma_cleanup_debugfs(void) {} +#endif + int pxa_request_dma (char *name, pxa_dma_prio prio, void (*irq_handler)(int, void *), void *data) @@ -130,6 +374,7 @@ int __init pxa_init_dma(int irq, int num_ch) for (i = 0; i < num_ch; i++) { DCSR(i) = 0; dma_channels[i].prio = min((i & 0xf) >> 2, DMA_PRIO_LOW); + spin_lock_init(&dma_channels[i].lock); } ret = request_irq(irq, dma_irq_handler, IRQF_DISABLED, "DMA", NULL); @@ -138,7 +383,9 @@ int __init pxa_init_dma(int irq, int num_ch) kfree(dma_channels); return ret; } - num_dma_channels = num_ch; + + pxa_dma_init_debugfs(); + return 0; } From 53eff4175f1eb25f97425b1526774fecb81444d4 Mon Sep 17 00:00:00 2001 From: Daniel Mack Date: Mon, 22 Jun 2009 21:08:04 +0200 Subject: [PATCH 0172/3409] [ARM] pxafb: use resource_size() function Cleanup only, no function change. Signed-off-by: Daniel Mack Signed-off-by: Eric Miao --- drivers/video/pxafb.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/video/pxafb.c b/drivers/video/pxafb.c index 6506117c134b..0a301003d928 100644 --- a/drivers/video/pxafb.c +++ b/drivers/video/pxafb.c @@ -2091,14 +2091,14 @@ static int __devinit pxafb_probe(struct platform_device *dev) goto failed_fbi; } - r = request_mem_region(r->start, r->end - r->start + 1, dev->name); + r = request_mem_region(r->start, resource_size(r), dev->name); if (r == NULL) { dev_err(&dev->dev, "failed to request I/O memory\n"); ret = -EBUSY; goto failed_fbi; } - fbi->mmio_base = ioremap(r->start, r->end - r->start + 1); + fbi->mmio_base = ioremap(r->start, resource_size(r)); if (fbi->mmio_base == NULL) { dev_err(&dev->dev, "failed to map I/O memory\n"); ret = -EBUSY; @@ -2197,7 +2197,7 @@ failed_free_dma: failed_free_io: iounmap(fbi->mmio_base); failed_free_res: - release_mem_region(r->start, r->end - r->start + 1); + release_mem_region(r->start, resource_size(r)); failed_fbi: clk_put(fbi->clk); platform_set_drvdata(dev, NULL); @@ -2237,7 +2237,7 @@ static int __devexit pxafb_remove(struct platform_device *dev) iounmap(fbi->mmio_base); r = platform_get_resource(dev, IORESOURCE_MEM, 0); - release_mem_region(r->start, r->end - r->start + 1); + release_mem_region(r->start, resource_size(r)); clk_put(fbi->clk); kfree(fbi); From b405db6c015fe8e4c9d8199a0355bb16d95d7049 Mon Sep 17 00:00:00 2001 From: Robert Jarzmik Date: Tue, 23 Jun 2009 23:21:03 +0200 Subject: [PATCH 0173/3409] [ARM] pxamci: add simple gpio controls The MMC block needs 3 external datas to work : - is the MMC card put in "read-only mode" ? - is a MMC card inserted or removed ? - enable power towards the MMC card Several platforms provide these controls through gpios. Expand the platform_data to request and use these gpios is set up by board code. Signed-off-by: Robert Jarzmik Acked-by: Pierre.Ossman Signed-off-by: Eric Miao --- arch/arm/mach-pxa/include/mach/mmc.h | 5 ++ drivers/mmc/host/pxamci.c | 81 +++++++++++++++++++++++++++- 2 files changed, 85 insertions(+), 1 deletion(-) diff --git a/arch/arm/mach-pxa/include/mach/mmc.h b/arch/arm/mach-pxa/include/mach/mmc.h index 6d1304c9270f..02a69dc2ee63 100644 --- a/arch/arm/mach-pxa/include/mach/mmc.h +++ b/arch/arm/mach-pxa/include/mach/mmc.h @@ -14,6 +14,11 @@ struct pxamci_platform_data { int (*get_ro)(struct device *); void (*setpower)(struct device *, unsigned int); void (*exit)(struct device *, void *); + int gpio_card_detect; /* gpio detecting card insertion */ + int gpio_card_ro; /* gpio detecting read only toggle */ + bool gpio_card_ro_invert; /* gpio ro is inverted */ + int gpio_power; /* gpio powering up MMC bus */ + bool gpio_power_invert; /* gpio power is inverted */ }; extern void pxa_set_mci_info(struct pxamci_platform_data *info); diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c index e55ac792d68c..972efa8d2c8e 100644 --- a/drivers/mmc/host/pxamci.c +++ b/drivers/mmc/host/pxamci.c @@ -28,6 +28,7 @@ #include #include #include +#include #include @@ -96,10 +97,18 @@ static inline void pxamci_init_ocr(struct pxamci_host *host) static inline void pxamci_set_power(struct pxamci_host *host, unsigned int vdd) { + int on; + #ifdef CONFIG_REGULATOR if (host->vcc) mmc_regulator_set_ocr(host->vcc, vdd); #endif + if (!host->vcc && host->pdata && + gpio_is_valid(host->pdata->gpio_power)) { + on = ((1 << vdd) & host->pdata->ocr_mask); + gpio_set_value(host->pdata->gpio_power, + !!on ^ host->pdata->gpio_power_invert); + } if (!host->vcc && host->pdata && host->pdata->setpower) host->pdata->setpower(mmc_dev(host->mmc), vdd); } @@ -421,6 +430,12 @@ static int pxamci_get_ro(struct mmc_host *mmc) { struct pxamci_host *host = mmc_priv(mmc); + if (host->pdata && gpio_is_valid(host->pdata->gpio_card_ro)) { + if (host->pdata->gpio_card_ro_invert) + return !gpio_get_value(host->pdata->gpio_card_ro); + else + return gpio_get_value(host->pdata->gpio_card_ro); + } if (host->pdata && host->pdata->get_ro) return !!host->pdata->get_ro(mmc_dev(mmc)); /* @@ -534,7 +549,7 @@ static int pxamci_probe(struct platform_device *pdev) struct mmc_host *mmc; struct pxamci_host *host = NULL; struct resource *r, *dmarx, *dmatx; - int ret, irq; + int ret, irq, gpio_cd = -1, gpio_ro = -1, gpio_power = -1; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_irq(pdev, 0); @@ -661,13 +676,63 @@ static int pxamci_probe(struct platform_device *pdev) } host->dma_drcmrtx = dmatx->start; + if (host->pdata) { + gpio_cd = host->pdata->gpio_card_detect; + gpio_ro = host->pdata->gpio_card_ro; + gpio_power = host->pdata->gpio_power; + } + if (gpio_is_valid(gpio_power)) { + ret = gpio_request(gpio_power, "mmc card power"); + if (ret) { + dev_err(&pdev->dev, "Failed requesting gpio_power %d\n", gpio_power); + goto out; + } + gpio_direction_output(gpio_power, + host->pdata->gpio_power_invert); + } + if (gpio_is_valid(gpio_ro)) { + ret = gpio_request(gpio_ro, "mmc card read only"); + if (ret) { + dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_power); + goto err_gpio_ro; + } + gpio_direction_input(gpio_ro); + } + if (gpio_is_valid(gpio_cd)) { + ret = gpio_request(gpio_cd, "mmc card detect"); + if (ret) { + dev_err(&pdev->dev, "Failed requesting gpio_cd %d\n", gpio_power); + goto err_gpio_cd; + } + gpio_direction_input(gpio_cd); + + ret = request_irq(gpio_to_irq(gpio_cd), pxamci_detect_irq, + IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, + "mmc card detect", mmc); + if (ret) { + dev_err(&pdev->dev, "failed to request card detect IRQ\n"); + goto err_request_irq; + } + } + if (host->pdata && host->pdata->init) host->pdata->init(&pdev->dev, pxamci_detect_irq, mmc); + if (gpio_is_valid(gpio_power) && host->pdata->setpower) + dev_warn(&pdev->dev, "gpio_power and setpower() both defined\n"); + if (gpio_is_valid(gpio_ro) && host->pdata->get_ro) + dev_warn(&pdev->dev, "gpio_ro and get_ro() both defined\n"); + mmc_add_host(mmc); return 0; +err_request_irq: + gpio_free(gpio_cd); +err_gpio_cd: + gpio_free(gpio_ro); +err_gpio_ro: + gpio_free(gpio_power); out: if (host) { if (host->dma >= 0) @@ -688,12 +753,26 @@ static int pxamci_probe(struct platform_device *pdev) static int pxamci_remove(struct platform_device *pdev) { struct mmc_host *mmc = platform_get_drvdata(pdev); + int gpio_cd = -1, gpio_ro = -1, gpio_power = -1; platform_set_drvdata(pdev, NULL); if (mmc) { struct pxamci_host *host = mmc_priv(mmc); + if (host->pdata) { + gpio_cd = host->pdata->gpio_card_detect; + gpio_ro = host->pdata->gpio_card_ro; + gpio_power = host->pdata->gpio_power; + } + if (gpio_is_valid(gpio_cd)) { + free_irq(gpio_to_irq(gpio_cd), mmc); + gpio_free(gpio_cd); + } + if (gpio_is_valid(gpio_ro)) + gpio_free(gpio_ro); + if (gpio_is_valid(gpio_power)) + gpio_free(gpio_power); if (host->vcc) regulator_put(host->vcc); From 7a648256b20c493c99757fe1d248daf7954647bc Mon Sep 17 00:00:00 2001 From: Robert Jarzmik Date: Mon, 6 Jul 2009 22:16:42 +0200 Subject: [PATCH 0174/3409] [ARM] pxa: factor pxamci gpio handling Several boards use always the same pattern with pxamci : request gpio, request irq for that gpio to detect MMC card insertion, request gpio for read-only mode detection, etc ... Now that pxamci provides platform_data to describe simple gpio management of the MMC external controls, use it. Signed-off-by: Robert Jarzmik Acked-by: Mike Rapoport Acked-by: Philipp Zabel Acked-by: Dmitry Eremin-Solenikov Cc: rpurdie@rpsys.net Cc: drwyrm@gmail.com Cc: sakoman@gmail.com Cc: marek.vasut@gmail.com Cc: s.hauer@pengutronix.de Signed-off-by: Eric Miao --- arch/arm/mach-pxa/cm-x270.c | 54 ++--------------- arch/arm/mach-pxa/cm-x300.c | 71 ++++------------------ arch/arm/mach-pxa/colibri-pxa3xx.c | 11 ++-- arch/arm/mach-pxa/corgi.c | 75 ++---------------------- arch/arm/mach-pxa/csb726.c | 54 ++--------------- arch/arm/mach-pxa/em-x270.c | 17 +++--- arch/arm/mach-pxa/gumstix.c | 5 +- arch/arm/mach-pxa/idp.c | 5 +- arch/arm/mach-pxa/imote2.c | 3 + arch/arm/mach-pxa/lubbock.c | 13 ++-- arch/arm/mach-pxa/magician.c | 49 +++------------- arch/arm/mach-pxa/mainstone.c | 11 ++-- arch/arm/mach-pxa/mioa701.c | 68 ++------------------- arch/arm/mach-pxa/palmld.c | 80 ++----------------------- arch/arm/mach-pxa/palmt5.c | 80 ++----------------------- arch/arm/mach-pxa/palmte2.c | 80 ++----------------------- arch/arm/mach-pxa/palmtx.c | 80 ++----------------------- arch/arm/mach-pxa/palmz72.c | 88 +++------------------------- arch/arm/mach-pxa/pcm990-baseboard.c | 13 ++-- arch/arm/mach-pxa/poodle.c | 53 +++-------------- arch/arm/mach-pxa/spitz.c | 62 ++------------------ arch/arm/mach-pxa/tosa.c | 82 +++----------------------- arch/arm/mach-pxa/trizeps4.c | 3 + arch/arm/mach-pxa/zylonite.c | 3 + 24 files changed, 143 insertions(+), 917 deletions(-) diff --git a/arch/arm/mach-pxa/cm-x270.c b/arch/arm/mach-pxa/cm-x270.c index 1d2cec25391d..59292181088c 100644 --- a/arch/arm/mach-pxa/cm-x270.c +++ b/arch/arm/mach-pxa/cm-x270.c @@ -271,56 +271,12 @@ static inline void cmx270_init_ohci(void) {} #endif #if defined(CONFIG_MMC) || defined(CONFIG_MMC_MODULE) -static int cmx270_mci_init(struct device *dev, - irq_handler_t cmx270_detect_int, - void *data) -{ - int err; - - err = gpio_request(GPIO105_MMC_POWER, "MMC/SD power"); - if (err) { - dev_warn(dev, "power gpio unavailable\n"); - return err; - } - - gpio_direction_output(GPIO105_MMC_POWER, 0); - - err = request_irq(CMX270_MMC_IRQ, cmx270_detect_int, - IRQF_DISABLED | IRQF_TRIGGER_FALLING, - "MMC card detect", data); - if (err) { - gpio_free(GPIO105_MMC_POWER); - dev_err(dev, "cmx270_mci_init: MMC/SD: can't" - " request MMC card detect IRQ\n"); - } - - return err; -} - -static void cmx270_mci_setpower(struct device *dev, unsigned int vdd) -{ - struct pxamci_platform_data *p_d = dev->platform_data; - - if ((1 << vdd) & p_d->ocr_mask) { - dev_dbg(dev, "power on\n"); - gpio_set_value(GPIO105_MMC_POWER, 0); - } else { - gpio_set_value(GPIO105_MMC_POWER, 1); - dev_dbg(dev, "power off\n"); - } -} - -static void cmx270_mci_exit(struct device *dev, void *data) -{ - free_irq(CMX270_MMC_IRQ, data); - gpio_free(GPIO105_MMC_POWER); -} - static struct pxamci_platform_data cmx270_mci_platform_data = { - .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34, - .init = cmx270_mci_init, - .setpower = cmx270_mci_setpower, - .exit = cmx270_mci_exit, + .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34, + .gpio_card_detect = GPIO83_MMC_IRQ, + .gpio_card_ro = -1, + .gpio_power = GPIO105_MMC_POWER, + .gpio_power_invert = 1, }; static void __init cmx270_init_mmc(void) diff --git a/arch/arm/mach-pxa/cm-x300.c b/arch/arm/mach-pxa/cm-x300.c index 465da26591bd..aac2cda60e09 100644 --- a/arch/arm/mach-pxa/cm-x300.c +++ b/arch/arm/mach-pxa/cm-x300.c @@ -306,68 +306,21 @@ static void cm_x300_mci_exit(struct device *dev, void *data) } static struct pxamci_platform_data cm_x300_mci_platform_data = { - .detect_delay = 20, - .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34, - .init = cm_x300_mci_init, - .exit = cm_x300_mci_exit, + .detect_delay = 20, + .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34, + .init = cm_x300_mci_init, + .exit = cm_x300_mci_exit, + .gpio_card_detect = -1, + .gpio_card_ro = -1, + .gpio_power = -1, }; -static int cm_x300_mci2_ro(struct device *dev) -{ - return gpio_get_value(GPIO85_MMC2_WP); -} - -static int cm_x300_mci2_init(struct device *dev, - irq_handler_t cm_x300_detect_int, - void *data) -{ - int err; - - /* - * setup GPIO for CM-X300 MMC controller - */ - err = gpio_request(GPIO82_MMC2_IRQ, "mmc card detect"); - if (err) - goto err_request_cd; - gpio_direction_input(GPIO82_MMC2_IRQ); - - err = gpio_request(GPIO85_MMC2_WP, "mmc write protect"); - if (err) - goto err_request_wp; - gpio_direction_input(GPIO85_MMC2_WP); - - err = request_irq(CM_X300_MMC2_IRQ, cm_x300_detect_int, - IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, - "MMC card detect", data); - if (err) { - printk(KERN_ERR "%s: MMC/SD/SDIO: " - "can't request card detect IRQ\n", __func__); - goto err_request_irq; - } - - return 0; - -err_request_irq: - gpio_free(GPIO85_MMC2_WP); -err_request_wp: - gpio_free(GPIO82_MMC2_IRQ); -err_request_cd: - return err; -} - -static void cm_x300_mci2_exit(struct device *dev, void *data) -{ - free_irq(CM_X300_MMC2_IRQ, data); - gpio_free(GPIO82_MMC2_IRQ); - gpio_free(GPIO85_MMC2_WP); -} - static struct pxamci_platform_data cm_x300_mci2_platform_data = { - .detect_delay = 20, - .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34, - .init = cm_x300_mci2_init, - .exit = cm_x300_mci2_exit, - .get_ro = cm_x300_mci2_ro, + .detect_delay = 20, + .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34, + .gpio_card_detect = GPIO82_MMC2_IRQ, + .gpio_card_ro = GPIO85_MMC2_WP, + .gpio_power = -1, }; static void __init cm_x300_init_mmc(void) diff --git a/arch/arm/mach-pxa/colibri-pxa3xx.c b/arch/arm/mach-pxa/colibri-pxa3xx.c index ea34e34f8cd8..2c846bbfc435 100644 --- a/arch/arm/mach-pxa/colibri-pxa3xx.c +++ b/arch/arm/mach-pxa/colibri-pxa3xx.c @@ -95,10 +95,13 @@ static void colibri_pxa3xx_mci_exit(struct device *dev, void *data) } static struct pxamci_platform_data colibri_pxa3xx_mci_platform_data = { - .detect_delay = 20, - .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, - .init = colibri_pxa3xx_mci_init, - .exit = colibri_pxa3xx_mci_exit, + .detect_delay = 20, + .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, + .init = colibri_pxa3xx_mci_init, + .exit = colibri_pxa3xx_mci_exit, + .gpio_card_detect = -1, + .gpio_card_ro = -1, + .gpio_power = -1, }; void __init colibri_pxa3xx_init_mmc(mfp_cfg_t *pins, int len, int detect_pin) diff --git a/arch/arm/mach-pxa/corgi.c b/arch/arm/mach-pxa/corgi.c index 5363e1aea3fb..39a94585f97b 100644 --- a/arch/arm/mach-pxa/corgi.c +++ b/arch/arm/mach-pxa/corgi.c @@ -307,77 +307,11 @@ static struct platform_device corgiled_device = { * The card detect interrupt isn't debounced so we delay it by 250ms * to give the card a chance to fully insert/eject. */ -static struct pxamci_platform_data corgi_mci_platform_data; - -static int corgi_mci_init(struct device *dev, irq_handler_t corgi_detect_int, void *data) -{ - int err; - - err = gpio_request(CORGI_GPIO_nSD_DETECT, "nSD_DETECT"); - if (err) - goto err_out; - - err = gpio_request(CORGI_GPIO_nSD_WP, "nSD_WP"); - if (err) - goto err_free_1; - - err = gpio_request(CORGI_GPIO_SD_PWR, "SD_PWR"); - if (err) - goto err_free_2; - - gpio_direction_input(CORGI_GPIO_nSD_DETECT); - gpio_direction_input(CORGI_GPIO_nSD_WP); - gpio_direction_output(CORGI_GPIO_SD_PWR, 0); - - corgi_mci_platform_data.detect_delay = msecs_to_jiffies(250); - - err = request_irq(CORGI_IRQ_GPIO_nSD_DETECT, corgi_detect_int, - IRQF_DISABLED | IRQF_TRIGGER_RISING | - IRQF_TRIGGER_FALLING, - "MMC card detect", data); - if (err) { - pr_err("%s: MMC/SD: can't request MMC card detect IRQ\n", - __func__); - goto err_free_3; - } - return 0; - -err_free_3: - gpio_free(CORGI_GPIO_SD_PWR); -err_free_2: - gpio_free(CORGI_GPIO_nSD_WP); -err_free_1: - gpio_free(CORGI_GPIO_nSD_DETECT); -err_out: - return err; -} - -static void corgi_mci_setpower(struct device *dev, unsigned int vdd) -{ - struct pxamci_platform_data* p_d = dev->platform_data; - - gpio_set_value(CORGI_GPIO_SD_PWR, ((1 << vdd) & p_d->ocr_mask)); -} - -static int corgi_mci_get_ro(struct device *dev) -{ - return gpio_get_value(CORGI_GPIO_nSD_WP); -} - -static void corgi_mci_exit(struct device *dev, void *data) -{ - free_irq(CORGI_IRQ_GPIO_nSD_DETECT, data); - gpio_free(CORGI_GPIO_SD_PWR); - gpio_free(CORGI_GPIO_nSD_WP); - gpio_free(CORGI_GPIO_nSD_DETECT); -} - static struct pxamci_platform_data corgi_mci_platform_data = { - .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34, - .init = corgi_mci_init, - .get_ro = corgi_mci_get_ro, - .setpower = corgi_mci_setpower, - .exit = corgi_mci_exit, + .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34, + .gpio_card_detect = -1, + .gpio_card_ro = CORGI_GPIO_nSD_WP, + .gpio_power = CORGI_GPIO_SD_PWR, }; @@ -636,6 +570,7 @@ static void __init corgi_init(void) corgi_init_spi(); pxa_set_udc_info(&udc_info); + corgi_mci_platform_data.detect_delay = msecs_to_jiffies(250); pxa_set_mci_info(&corgi_mci_platform_data); pxa_set_ficp_info(&corgi_ficp_platform_data); pxa_set_i2c_info(NULL); diff --git a/arch/arm/mach-pxa/csb726.c b/arch/arm/mach-pxa/csb726.c index 7d3e1b46e550..79141f862728 100644 --- a/arch/arm/mach-pxa/csb726.c +++ b/arch/arm/mach-pxa/csb726.c @@ -130,61 +130,17 @@ static struct pxamci_platform_data csb726_mci_data; static int csb726_mci_init(struct device *dev, irq_handler_t detect, void *data) { - int err; - csb726_mci_data.detect_delay = msecs_to_jiffies(500); - - err = gpio_request(CSB726_GPIO_MMC_DETECT, "MMC detect"); - if (err) - goto err_det_req; - - err = gpio_direction_input(CSB726_GPIO_MMC_DETECT); - if (err) - goto err_det_dir; - - err = gpio_request(CSB726_GPIO_MMC_RO, "MMC ro"); - if (err) - goto err_ro_req; - - err = gpio_direction_input(CSB726_GPIO_MMC_RO); - if (err) - goto err_ro_dir; - - err = request_irq(gpio_to_irq(CSB726_GPIO_MMC_DETECT), detect, - IRQF_DISABLED, "MMC card detect", data); - if (err) - goto err_irq; - return 0; - -err_irq: -err_ro_dir: - gpio_free(CSB726_GPIO_MMC_RO); -err_ro_req: -err_det_dir: - gpio_free(CSB726_GPIO_MMC_DETECT); -err_det_req: - return err; -} - -static int csb726_mci_get_ro(struct device *dev) -{ - return gpio_get_value(CSB726_GPIO_MMC_RO); -} - -static void csb726_mci_exit(struct device *dev, void *data) -{ - free_irq(gpio_to_irq(CSB726_GPIO_MMC_DETECT), data); - gpio_free(CSB726_GPIO_MMC_RO); - gpio_free(CSB726_GPIO_MMC_DETECT); } static struct pxamci_platform_data csb726_mci = { - .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34, - .init = csb726_mci_init, - .get_ro = csb726_mci_get_ro, + .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34, + .init = csb726_mci_init, /* FIXME setpower */ - .exit = csb726_mci_exit, + .gpio_card_detect = CSB726_GPIO_MMC_DETECT, + .gpio_card_ro = CSB726_GPIO_MMC_RO, + .gpio_power = -1, }; static struct pxaohci_platform_data csb726_ohci_platform_data = { diff --git a/arch/arm/mach-pxa/em-x270.c b/arch/arm/mach-pxa/em-x270.c index 9cd09465a0e8..e0121f4fcaf4 100644 --- a/arch/arm/mach-pxa/em-x270.c +++ b/arch/arm/mach-pxa/em-x270.c @@ -646,13 +646,16 @@ static int em_x270_mci_get_ro(struct device *dev) } static struct pxamci_platform_data em_x270_mci_platform_data = { - .ocr_mask = MMC_VDD_20_21|MMC_VDD_21_22|MMC_VDD_22_23| - MMC_VDD_24_25|MMC_VDD_25_26|MMC_VDD_26_27| - MMC_VDD_27_28|MMC_VDD_28_29|MMC_VDD_29_30| - MMC_VDD_30_31|MMC_VDD_31_32, - .init = em_x270_mci_init, - .setpower = em_x270_mci_setpower, - .exit = em_x270_mci_exit, + .ocr_mask = MMC_VDD_20_21|MMC_VDD_21_22|MMC_VDD_22_23| + MMC_VDD_24_25|MMC_VDD_25_26|MMC_VDD_26_27| + MMC_VDD_27_28|MMC_VDD_28_29|MMC_VDD_29_30| + MMC_VDD_30_31|MMC_VDD_31_32, + .init = em_x270_mci_init, + .setpower = em_x270_mci_setpower, + .exit = em_x270_mci_exit, + .gpio_card_detect = -1, + .gpio_card_ro = -1, + .gpio_power = -1, }; static void __init em_x270_init_mmc(void) diff --git a/arch/arm/mach-pxa/gumstix.c b/arch/arm/mach-pxa/gumstix.c index ca9912ea78d9..1708c0109844 100644 --- a/arch/arm/mach-pxa/gumstix.c +++ b/arch/arm/mach-pxa/gumstix.c @@ -88,7 +88,10 @@ static struct platform_device *devices[] __initdata = { #ifdef CONFIG_MMC_PXA static struct pxamci_platform_data gumstix_mci_platform_data = { - .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34, + .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34, + .gpio_card_detect = -1, + .gpio_card_ro = -1, + .gpio_power = -1, }; static void __init gumstix_mmc_init(void) diff --git a/arch/arm/mach-pxa/idp.c b/arch/arm/mach-pxa/idp.c index b6243b59d9be..b6486ef20b17 100644 --- a/arch/arm/mach-pxa/idp.c +++ b/arch/arm/mach-pxa/idp.c @@ -168,7 +168,10 @@ static struct pxafb_mach_info sharp_lm8v31 = { }; static struct pxamci_platform_data idp_mci_platform_data = { - .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34, + .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34, + .gpio_card_detect = -1, + .gpio_card_ro = -1, + .gpio_power = -1, }; static void __init idp_init(void) diff --git a/arch/arm/mach-pxa/imote2.c b/arch/arm/mach-pxa/imote2.c index 961807dc6467..2a4945db31c5 100644 --- a/arch/arm/mach-pxa/imote2.c +++ b/arch/arm/mach-pxa/imote2.c @@ -389,6 +389,9 @@ static int imote2_mci_get_ro(struct device *dev) static struct pxamci_platform_data imote2_mci_platform_data = { .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, /* default anyway */ .get_ro = imote2_mci_get_ro, + .gpio_card_detect = -1, + .gpio_card_ro = -1, + .gpio_power = -1, }; static struct mtd_partition imote2flash_partitions[] = { diff --git a/arch/arm/mach-pxa/lubbock.c b/arch/arm/mach-pxa/lubbock.c index f04c8333dff7..1785cc9494d9 100644 --- a/arch/arm/mach-pxa/lubbock.c +++ b/arch/arm/mach-pxa/lubbock.c @@ -482,11 +482,14 @@ static void lubbock_mci_exit(struct device *dev, void *data) } static struct pxamci_platform_data lubbock_mci_platform_data = { - .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34, - .detect_delay = 1, - .init = lubbock_mci_init, - .get_ro = lubbock_mci_get_ro, - .exit = lubbock_mci_exit, + .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34, + .detect_delay = 1, + .init = lubbock_mci_init, + .get_ro = lubbock_mci_get_ro, + .exit = lubbock_mci_exit, + .gpio_card_detect = -1, + .gpio_card_ro = -1, + .gpio_power = -1, }; static void lubbock_irda_transceiver_mode(struct device *dev, int mode) diff --git a/arch/arm/mach-pxa/magician.c b/arch/arm/mach-pxa/magician.c index ca39669cffc5..0daba43d7ca1 100644 --- a/arch/arm/mach-pxa/magician.c +++ b/arch/arm/mach-pxa/magician.c @@ -651,55 +651,24 @@ static struct platform_device bq24022 = { static int magician_mci_init(struct device *dev, irq_handler_t detect_irq, void *data) { - int err; - - err = request_irq(IRQ_MAGICIAN_SD, detect_irq, + return request_irq(IRQ_MAGICIAN_SD, detect_irq, IRQF_DISABLED | IRQF_SAMPLE_RANDOM, - "MMC card detect", data); - if (err) - goto err_request_irq; - err = gpio_request(EGPIO_MAGICIAN_SD_POWER, "SD_POWER"); - if (err) - goto err_request_power; - err = gpio_request(EGPIO_MAGICIAN_nSD_READONLY, "nSD_READONLY"); - if (err) - goto err_request_readonly; - - return 0; - -err_request_readonly: - gpio_free(EGPIO_MAGICIAN_SD_POWER); -err_request_power: - free_irq(IRQ_MAGICIAN_SD, data); -err_request_irq: - return err; -} - -static void magician_mci_setpower(struct device *dev, unsigned int vdd) -{ - struct pxamci_platform_data *pdata = dev->platform_data; - - gpio_set_value(EGPIO_MAGICIAN_SD_POWER, (1 << vdd) & pdata->ocr_mask); -} - -static int magician_mci_get_ro(struct device *dev) -{ - return (!gpio_get_value(EGPIO_MAGICIAN_nSD_READONLY)); + "mmc card detect", data); } static void magician_mci_exit(struct device *dev, void *data) { - gpio_free(EGPIO_MAGICIAN_nSD_READONLY); - gpio_free(EGPIO_MAGICIAN_SD_POWER); free_irq(IRQ_MAGICIAN_SD, data); } static struct pxamci_platform_data magician_mci_info = { - .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34, - .init = magician_mci_init, - .get_ro = magician_mci_get_ro, - .setpower = magician_mci_setpower, - .exit = magician_mci_exit, + .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34, + .init = magician_mci_init, + .exit = magician_mci_exit, + .gpio_card_detect = -1, + .gpio_card_ro = EGPIO_MAGICIAN_nSD_READONLY, + .gpio_card_ro_invert = 1, + .gpio_power = EGPIO_MAGICIAN_SD_POWER, }; diff --git a/arch/arm/mach-pxa/mainstone.c b/arch/arm/mach-pxa/mainstone.c index f4dabf0273ca..f7dc23078ce3 100644 --- a/arch/arm/mach-pxa/mainstone.c +++ b/arch/arm/mach-pxa/mainstone.c @@ -450,10 +450,13 @@ static void mainstone_mci_exit(struct device *dev, void *data) } static struct pxamci_platform_data mainstone_mci_platform_data = { - .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34, - .init = mainstone_mci_init, - .setpower = mainstone_mci_setpower, - .exit = mainstone_mci_exit, + .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34, + .init = mainstone_mci_init, + .setpower = mainstone_mci_setpower, + .exit = mainstone_mci_exit, + .gpio_card_detect = -1, + .gpio_card_ro = -1, + .gpio_power = -1, }; static void mainstone_irda_transceiver_mode(struct device *dev, int mode) diff --git a/arch/arm/mach-pxa/mioa701.c b/arch/arm/mach-pxa/mioa701.c index 2d28132c725b..a8122ecd2a3a 100644 --- a/arch/arm/mach-pxa/mioa701.c +++ b/arch/arm/mach-pxa/mioa701.c @@ -434,72 +434,15 @@ struct gpio_vbus_mach_info gpio_vbus_data = { /* * SDIO/MMC Card controller */ -static void mci_setpower(struct device *dev, unsigned int vdd) -{ - struct pxamci_platform_data *p_d = dev->platform_data; - - if ((1 << vdd) & p_d->ocr_mask) - gpio_set_value(GPIO91_SDIO_EN, 1); /* enable SDIO power */ - else - gpio_set_value(GPIO91_SDIO_EN, 0); /* disable SDIO power */ -} - -static int mci_get_ro(struct device *dev) -{ - return gpio_get_value(GPIO78_SDIO_RO); -} - -struct gpio_ress mci_gpios[] = { - MIO_GPIO_IN(GPIO78_SDIO_RO, "SDIO readonly detect"), - MIO_GPIO_IN(GPIO15_SDIO_INSERT, "SDIO insertion detect"), - MIO_GPIO_OUT(GPIO91_SDIO_EN, 0, "SDIO power enable") -}; - -static void mci_exit(struct device *dev, void *data) -{ - mio_gpio_free(ARRAY_AND_SIZE(mci_gpios)); - free_irq(gpio_to_irq(GPIO15_SDIO_INSERT), data); -} - -static struct pxamci_platform_data mioa701_mci_info; - /** * The card detect interrupt isn't debounced so we delay it by 250ms * to give the card a chance to fully insert/eject. */ -static int mci_init(struct device *dev, irq_handler_t detect_int, void *data) -{ - int rc; - int irq = gpio_to_irq(GPIO15_SDIO_INSERT); - - rc = mio_gpio_request(ARRAY_AND_SIZE(mci_gpios)); - if (rc) - goto err_gpio; - /* enable RE/FE interrupt on card insertion and removal */ - rc = request_irq(irq, detect_int, - IRQF_DISABLED | IRQF_TRIGGER_RISING | - IRQF_TRIGGER_FALLING, - "MMC card detect", data); - if (rc) - goto err_irq; - - mioa701_mci_info.detect_delay = msecs_to_jiffies(250); - return 0; - -err_irq: - dev_err(dev, "mioa701_mci_init: MMC/SD:" - " can't request MMC card detect IRQ\n"); - mio_gpio_free(ARRAY_AND_SIZE(mci_gpios)); -err_gpio: - return rc; -} - static struct pxamci_platform_data mioa701_mci_info = { - .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, - .init = mci_init, - .get_ro = mci_get_ro, - .setpower = mci_setpower, - .exit = mci_exit, + .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, + .gpio_card_detect = GPIO15_SDIO_INSERT, + .gpio_card_ro = GPIO78_SDIO_RO, + .gpio_power = GPIO91_SDIO_EN, }; /* FlashRAM */ @@ -841,7 +784,7 @@ static void mioa701_restart(char c, const char *cmd) static struct gpio_ress global_gpios[] = { MIO_GPIO_OUT(GPIO9_CHARGE_EN, 1, "Charger enable"), MIO_GPIO_OUT(GPIO18_POWEROFF, 0, "Power Off"), - MIO_GPIO_OUT(GPIO87_LCD_POWER, 0, "LCD Power") + MIO_GPIO_OUT(GPIO87_LCD_POWER, 0, "LCD Power"), }; static void __init mioa701_machine_init(void) @@ -855,6 +798,7 @@ static void __init mioa701_machine_init(void) mio_gpio_request(ARRAY_AND_SIZE(global_gpios)); bootstrap_init(); set_pxa_fb_info(&mioa701_pxafb_info); + mioa701_mci_info.detect_delay = msecs_to_jiffies(250); pxa_set_mci_info(&mioa701_mci_info); pxa_set_keypad_info(&mioa701_keypad_info); wm97xx_bat_set_pdata(&mioa701_battery_data); diff --git a/arch/arm/mach-pxa/palmld.c b/arch/arm/mach-pxa/palmld.c index 169fcc18154e..445749198cd8 100644 --- a/arch/arm/mach-pxa/palmld.c +++ b/arch/arm/mach-pxa/palmld.c @@ -143,83 +143,11 @@ static unsigned long palmld_pin_config[] __initdata = { /****************************************************************************** * SD/MMC card controller ******************************************************************************/ -static int palmld_mci_init(struct device *dev, irq_handler_t palmld_detect_int, - void *data) -{ - int err = 0; - - /* Setup an interrupt for detecting card insert/remove events */ - err = gpio_request(GPIO_NR_PALMLD_SD_DETECT_N, "SD IRQ"); - if (err) - goto err; - err = gpio_direction_input(GPIO_NR_PALMLD_SD_DETECT_N); - if (err) - goto err2; - err = request_irq(gpio_to_irq(GPIO_NR_PALMLD_SD_DETECT_N), - palmld_detect_int, IRQF_DISABLED | IRQF_SAMPLE_RANDOM | - IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, - "SD/MMC card detect", data); - if (err) { - printk(KERN_ERR "%s: cannot request SD/MMC card detect IRQ\n", - __func__); - goto err2; - } - - err = gpio_request(GPIO_NR_PALMLD_SD_POWER, "SD_POWER"); - if (err) - goto err3; - err = gpio_direction_output(GPIO_NR_PALMLD_SD_POWER, 0); - if (err) - goto err4; - - err = gpio_request(GPIO_NR_PALMLD_SD_READONLY, "SD_READONLY"); - if (err) - goto err4; - err = gpio_direction_input(GPIO_NR_PALMLD_SD_READONLY); - if (err) - goto err5; - - printk(KERN_DEBUG "%s: irq registered\n", __func__); - - return 0; - -err5: - gpio_free(GPIO_NR_PALMLD_SD_READONLY); -err4: - gpio_free(GPIO_NR_PALMLD_SD_POWER); -err3: - free_irq(gpio_to_irq(GPIO_NR_PALMLD_SD_DETECT_N), data); -err2: - gpio_free(GPIO_NR_PALMLD_SD_DETECT_N); -err: - return err; -} - -static void palmld_mci_exit(struct device *dev, void *data) -{ - gpio_free(GPIO_NR_PALMLD_SD_READONLY); - gpio_free(GPIO_NR_PALMLD_SD_POWER); - free_irq(gpio_to_irq(GPIO_NR_PALMLD_SD_DETECT_N), data); - gpio_free(GPIO_NR_PALMLD_SD_DETECT_N); -} - -static void palmld_mci_power(struct device *dev, unsigned int vdd) -{ - struct pxamci_platform_data *p_d = dev->platform_data; - gpio_set_value(GPIO_NR_PALMLD_SD_POWER, p_d->ocr_mask & (1 << vdd)); -} - -static int palmld_mci_get_ro(struct device *dev) -{ - return gpio_get_value(GPIO_NR_PALMLD_SD_READONLY); -} - static struct pxamci_platform_data palmld_mci_platform_data = { - .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, - .setpower = palmld_mci_power, - .get_ro = palmld_mci_get_ro, - .init = palmld_mci_init, - .exit = palmld_mci_exit, + .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, + .gpio_card_detect = GPIO_NR_PALMLD_SD_DETECT_N, + .gpio_card_ro = GPIO_NR_PALMLD_SD_READONLY, + .gpio_power = GPIO_NR_PALMLD_SD_POWER, }; /****************************************************************************** diff --git a/arch/arm/mach-pxa/palmt5.c b/arch/arm/mach-pxa/palmt5.c index 33f726ff55e5..42d0528d3e9e 100644 --- a/arch/arm/mach-pxa/palmt5.c +++ b/arch/arm/mach-pxa/palmt5.c @@ -124,83 +124,11 @@ static unsigned long palmt5_pin_config[] __initdata = { /****************************************************************************** * SD/MMC card controller ******************************************************************************/ -static int palmt5_mci_init(struct device *dev, irq_handler_t palmt5_detect_int, - void *data) -{ - int err = 0; - - /* Setup an interrupt for detecting card insert/remove events */ - err = gpio_request(GPIO_NR_PALMT5_SD_DETECT_N, "SD IRQ"); - if (err) - goto err; - err = gpio_direction_input(GPIO_NR_PALMT5_SD_DETECT_N); - if (err) - goto err2; - err = request_irq(gpio_to_irq(GPIO_NR_PALMT5_SD_DETECT_N), - palmt5_detect_int, IRQF_DISABLED | IRQF_SAMPLE_RANDOM | - IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, - "SD/MMC card detect", data); - if (err) { - printk(KERN_ERR "%s: cannot request SD/MMC card detect IRQ\n", - __func__); - goto err2; - } - - err = gpio_request(GPIO_NR_PALMT5_SD_POWER, "SD_POWER"); - if (err) - goto err3; - err = gpio_direction_output(GPIO_NR_PALMT5_SD_POWER, 0); - if (err) - goto err4; - - err = gpio_request(GPIO_NR_PALMT5_SD_READONLY, "SD_READONLY"); - if (err) - goto err4; - err = gpio_direction_input(GPIO_NR_PALMT5_SD_READONLY); - if (err) - goto err5; - - printk(KERN_DEBUG "%s: irq registered\n", __func__); - - return 0; - -err5: - gpio_free(GPIO_NR_PALMT5_SD_READONLY); -err4: - gpio_free(GPIO_NR_PALMT5_SD_POWER); -err3: - free_irq(gpio_to_irq(GPIO_NR_PALMT5_SD_DETECT_N), data); -err2: - gpio_free(GPIO_NR_PALMT5_SD_DETECT_N); -err: - return err; -} - -static void palmt5_mci_exit(struct device *dev, void *data) -{ - gpio_free(GPIO_NR_PALMT5_SD_READONLY); - gpio_free(GPIO_NR_PALMT5_SD_POWER); - free_irq(IRQ_GPIO_PALMT5_SD_DETECT_N, data); - gpio_free(GPIO_NR_PALMT5_SD_DETECT_N); -} - -static void palmt5_mci_power(struct device *dev, unsigned int vdd) -{ - struct pxamci_platform_data *p_d = dev->platform_data; - gpio_set_value(GPIO_NR_PALMT5_SD_POWER, p_d->ocr_mask & (1 << vdd)); -} - -static int palmt5_mci_get_ro(struct device *dev) -{ - return gpio_get_value(GPIO_NR_PALMT5_SD_READONLY); -} - static struct pxamci_platform_data palmt5_mci_platform_data = { - .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, - .setpower = palmt5_mci_power, - .get_ro = palmt5_mci_get_ro, - .init = palmt5_mci_init, - .exit = palmt5_mci_exit, + .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, + .gpio_card_detect = GPIO_NR_PALMT5_SD_DETECT_N, + .gpio_card_ro = GPIO_NR_PALMT5_SD_READONLY, + .gpio_power = GPIO_NR_PALMT5_SD_POWER, }; /****************************************************************************** diff --git a/arch/arm/mach-pxa/palmte2.c b/arch/arm/mach-pxa/palmte2.c index d823b09801df..d9ef76c9278f 100644 --- a/arch/arm/mach-pxa/palmte2.c +++ b/arch/arm/mach-pxa/palmte2.c @@ -117,83 +117,11 @@ static unsigned long palmte2_pin_config[] __initdata = { /****************************************************************************** * SD/MMC card controller ******************************************************************************/ -static int palmte2_mci_init(struct device *dev, - irq_handler_t palmte2_detect_int, void *data) -{ - int err = 0; - - /* Setup an interrupt for detecting card insert/remove events */ - err = gpio_request(GPIO_NR_PALMTE2_SD_DETECT_N, "SD IRQ"); - if (err) - goto err; - err = gpio_direction_input(GPIO_NR_PALMTE2_SD_DETECT_N); - if (err) - goto err2; - err = request_irq(gpio_to_irq(GPIO_NR_PALMTE2_SD_DETECT_N), - palmte2_detect_int, IRQF_DISABLED | IRQF_SAMPLE_RANDOM | - IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, - "SD/MMC card detect", data); - if (err) { - printk(KERN_ERR "%s: cannot request SD/MMC card detect IRQ\n", - __func__); - goto err2; - } - - err = gpio_request(GPIO_NR_PALMTE2_SD_POWER, "SD_POWER"); - if (err) - goto err3; - err = gpio_direction_output(GPIO_NR_PALMTE2_SD_POWER, 0); - if (err) - goto err4; - - err = gpio_request(GPIO_NR_PALMTE2_SD_READONLY, "SD_READONLY"); - if (err) - goto err4; - err = gpio_direction_input(GPIO_NR_PALMTE2_SD_READONLY); - if (err) - goto err5; - - printk(KERN_DEBUG "%s: irq registered\n", __func__); - - return 0; - -err5: - gpio_free(GPIO_NR_PALMTE2_SD_READONLY); -err4: - gpio_free(GPIO_NR_PALMTE2_SD_POWER); -err3: - free_irq(gpio_to_irq(GPIO_NR_PALMTE2_SD_DETECT_N), data); -err2: - gpio_free(GPIO_NR_PALMTE2_SD_DETECT_N); -err: - return err; -} - -static void palmte2_mci_exit(struct device *dev, void *data) -{ - gpio_free(GPIO_NR_PALMTE2_SD_READONLY); - gpio_free(GPIO_NR_PALMTE2_SD_POWER); - free_irq(gpio_to_irq(GPIO_NR_PALMTE2_SD_DETECT_N), data); - gpio_free(GPIO_NR_PALMTE2_SD_DETECT_N); -} - -static void palmte2_mci_power(struct device *dev, unsigned int vdd) -{ - struct pxamci_platform_data *p_d = dev->platform_data; - gpio_set_value(GPIO_NR_PALMTE2_SD_POWER, p_d->ocr_mask & (1 << vdd)); -} - -static int palmte2_mci_get_ro(struct device *dev) -{ - return gpio_get_value(GPIO_NR_PALMTE2_SD_READONLY); -} - static struct pxamci_platform_data palmte2_mci_platform_data = { - .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, - .setpower = palmte2_mci_power, - .get_ro = palmte2_mci_get_ro, - .init = palmte2_mci_init, - .exit = palmte2_mci_exit, + .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, + .gpio_card_detect = GPIO_NR_PALMTE2_SD_DETECT_N, + .gpio_card_ro = GPIO_NR_PALMTE2_SD_READONLY, + .gpio_power = GPIO_NR_PALMTE2_SD_POWER, }; /****************************************************************************** diff --git a/arch/arm/mach-pxa/palmtx.c b/arch/arm/mach-pxa/palmtx.c index 83d020879581..4a9ca718962f 100644 --- a/arch/arm/mach-pxa/palmtx.c +++ b/arch/arm/mach-pxa/palmtx.c @@ -140,83 +140,11 @@ static unsigned long palmtx_pin_config[] __initdata = { /****************************************************************************** * SD/MMC card controller ******************************************************************************/ -static int palmtx_mci_init(struct device *dev, irq_handler_t palmtx_detect_int, - void *data) -{ - int err = 0; - - /* Setup an interrupt for detecting card insert/remove events */ - err = gpio_request(GPIO_NR_PALMTX_SD_DETECT_N, "SD IRQ"); - if (err) - goto err; - err = gpio_direction_input(GPIO_NR_PALMTX_SD_DETECT_N); - if (err) - goto err2; - err = request_irq(gpio_to_irq(GPIO_NR_PALMTX_SD_DETECT_N), - palmtx_detect_int, IRQF_DISABLED | IRQF_SAMPLE_RANDOM | - IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, - "SD/MMC card detect", data); - if (err) { - printk(KERN_ERR "%s: cannot request SD/MMC card detect IRQ\n", - __func__); - goto err2; - } - - err = gpio_request(GPIO_NR_PALMTX_SD_POWER, "SD_POWER"); - if (err) - goto err3; - err = gpio_direction_output(GPIO_NR_PALMTX_SD_POWER, 0); - if (err) - goto err4; - - err = gpio_request(GPIO_NR_PALMTX_SD_READONLY, "SD_READONLY"); - if (err) - goto err4; - err = gpio_direction_input(GPIO_NR_PALMTX_SD_READONLY); - if (err) - goto err5; - - printk(KERN_DEBUG "%s: irq registered\n", __func__); - - return 0; - -err5: - gpio_free(GPIO_NR_PALMTX_SD_READONLY); -err4: - gpio_free(GPIO_NR_PALMTX_SD_POWER); -err3: - free_irq(gpio_to_irq(GPIO_NR_PALMTX_SD_DETECT_N), data); -err2: - gpio_free(GPIO_NR_PALMTX_SD_DETECT_N); -err: - return err; -} - -static void palmtx_mci_exit(struct device *dev, void *data) -{ - gpio_free(GPIO_NR_PALMTX_SD_READONLY); - gpio_free(GPIO_NR_PALMTX_SD_POWER); - free_irq(gpio_to_irq(GPIO_NR_PALMTX_SD_DETECT_N), data); - gpio_free(GPIO_NR_PALMTX_SD_DETECT_N); -} - -static void palmtx_mci_power(struct device *dev, unsigned int vdd) -{ - struct pxamci_platform_data *p_d = dev->platform_data; - gpio_set_value(GPIO_NR_PALMTX_SD_POWER, p_d->ocr_mask & (1 << vdd)); -} - -static int palmtx_mci_get_ro(struct device *dev) -{ - return gpio_get_value(GPIO_NR_PALMTX_SD_READONLY); -} - static struct pxamci_platform_data palmtx_mci_platform_data = { - .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, - .setpower = palmtx_mci_power, - .get_ro = palmtx_mci_get_ro, - .init = palmtx_mci_init, - .exit = palmtx_mci_exit, + .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, + .gpio_card_detect = GPIO_NR_PALMTX_SD_DETECT_N, + .gpio_card_ro = GPIO_NR_PALMTX_SD_READONLY, + .gpio_power = GPIO_NR_PALMTX_SD_POWER, }; /****************************************************************************** diff --git a/arch/arm/mach-pxa/palmz72.c b/arch/arm/mach-pxa/palmz72.c index c3645aa3fa3d..d8fa53c19178 100644 --- a/arch/arm/mach-pxa/palmz72.c +++ b/arch/arm/mach-pxa/palmz72.c @@ -129,88 +129,14 @@ static unsigned long palmz72_pin_config[] __initdata = { /****************************************************************************** * SD/MMC card controller ******************************************************************************/ -static int palmz72_mci_init(struct device *dev, - irq_handler_t palmz72_detect_int, void *data) -{ - int err = 0; - - /* Setup an interrupt for detecting card insert/remove events */ - err = gpio_request(GPIO_NR_PALMZ72_SD_DETECT_N, "SD IRQ"); - if (err) - goto err; - err = gpio_direction_input(GPIO_NR_PALMZ72_SD_DETECT_N); - if (err) - goto err2; - err = request_irq(gpio_to_irq(GPIO_NR_PALMZ72_SD_DETECT_N), - palmz72_detect_int, IRQF_DISABLED | IRQF_SAMPLE_RANDOM | - IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, - "SD/MMC card detect", data); - if (err) { - printk(KERN_ERR "%s: cannot request SD/MMC card detect IRQ\n", - __func__); - goto err2; - } - - /* SD_POWER is not actually power, but it is more like chip - * select, i.e. it is inverted */ - - err = gpio_request(GPIO_NR_PALMZ72_SD_POWER_N, "SD_POWER"); - if (err) - goto err3; - err = gpio_direction_output(GPIO_NR_PALMZ72_SD_POWER_N, 0); - if (err) - goto err4; - err = gpio_request(GPIO_NR_PALMZ72_SD_RO, "SD_RO"); - if (err) - goto err4; - err = gpio_direction_input(GPIO_NR_PALMZ72_SD_RO); - if (err) - goto err5; - - printk(KERN_DEBUG "%s: irq registered\n", __func__); - - return 0; - -err5: - gpio_free(GPIO_NR_PALMZ72_SD_RO); -err4: - gpio_free(GPIO_NR_PALMZ72_SD_POWER_N); -err3: - free_irq(gpio_to_irq(GPIO_NR_PALMZ72_SD_DETECT_N), data); -err2: - gpio_free(GPIO_NR_PALMZ72_SD_DETECT_N); -err: - return err; -} - -static void palmz72_mci_exit(struct device *dev, void *data) -{ - gpio_free(GPIO_NR_PALMZ72_SD_POWER_N); - free_irq(gpio_to_irq(GPIO_NR_PALMZ72_SD_DETECT_N), data); - gpio_free(GPIO_NR_PALMZ72_SD_DETECT_N); - gpio_free(GPIO_NR_PALMZ72_SD_RO); -} - -static void palmz72_mci_power(struct device *dev, unsigned int vdd) -{ - struct pxamci_platform_data *p_d = dev->platform_data; - if (p_d->ocr_mask & (1 << vdd)) - gpio_set_value(GPIO_NR_PALMZ72_SD_POWER_N, 0); - else - gpio_set_value(GPIO_NR_PALMZ72_SD_POWER_N, 1); -} - -static int palmz72_mci_ro(struct device *dev) -{ - return gpio_get_value(GPIO_NR_PALMZ72_SD_RO); -} - +/* SD_POWER is not actually power, but it is more like chip + * select, i.e. it is inverted */ static struct pxamci_platform_data palmz72_mci_platform_data = { - .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, - .setpower = palmz72_mci_power, - .get_ro = palmz72_mci_ro, - .init = palmz72_mci_init, - .exit = palmz72_mci_exit, + .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, + .gpio_card_detect = GPIO_NR_PALMZ72_SD_DETECT_N, + .gpio_card_ro = GPIO_NR_PALMZ72_SD_RO, + .gpio_power = GPIO_NR_PALMZ72_SD_POWER_N, + .gpio_power_invert = 1, }; /****************************************************************************** diff --git a/arch/arm/mach-pxa/pcm990-baseboard.c b/arch/arm/mach-pxa/pcm990-baseboard.c index 01791d74e08e..e211633471f7 100644 --- a/arch/arm/mach-pxa/pcm990-baseboard.c +++ b/arch/arm/mach-pxa/pcm990-baseboard.c @@ -321,11 +321,14 @@ static void pcm990_mci_exit(struct device *dev, void *data) #define MSECS_PER_JIFFY (1000/HZ) static struct pxamci_platform_data pcm990_mci_platform_data = { - .detect_delay = 250 / MSECS_PER_JIFFY, - .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, - .init = pcm990_mci_init, - .setpower = pcm990_mci_setpower, - .exit = pcm990_mci_exit, + .detect_delay = 250 / MSECS_PER_JIFFY, + .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, + .init = pcm990_mci_init, + .setpower = pcm990_mci_setpower, + .exit = pcm990_mci_exit, + .gpio_card_detect = -1, + .gpio_card_ro = -1, + .gpio_power = -1, }; static struct pxaohci_platform_data pcm990_ohci_platform_data = { diff --git a/arch/arm/mach-pxa/poodle.c b/arch/arm/mach-pxa/poodle.c index 9352d4a34837..333b1dc2dd3e 100644 --- a/arch/arm/mach-pxa/poodle.c +++ b/arch/arm/mach-pxa/poodle.c @@ -245,20 +245,10 @@ static inline void poodle_init_spi(void) {} * The card detect interrupt isn't debounced so we delay it by 250ms * to give the card a chance to fully insert/eject. */ -static struct pxamci_platform_data poodle_mci_platform_data; - static int poodle_mci_init(struct device *dev, irq_handler_t poodle_detect_int, void *data) { int err; - err = gpio_request(POODLE_GPIO_nSD_DETECT, "nSD_DETECT"); - if (err) - goto err_out; - - err = gpio_request(POODLE_GPIO_nSD_WP, "nSD_WP"); - if (err) - goto err_free_1; - err = gpio_request(POODLE_GPIO_SD_PWR, "SD_PWR"); if (err) goto err_free_2; @@ -267,34 +257,14 @@ static int poodle_mci_init(struct device *dev, irq_handler_t poodle_detect_int, if (err) goto err_free_3; - gpio_direction_input(POODLE_GPIO_nSD_DETECT); - gpio_direction_input(POODLE_GPIO_nSD_WP); - gpio_direction_output(POODLE_GPIO_SD_PWR, 0); gpio_direction_output(POODLE_GPIO_SD_PWR1, 0); - poodle_mci_platform_data.detect_delay = msecs_to_jiffies(250); - - err = request_irq(POODLE_IRQ_GPIO_nSD_DETECT, poodle_detect_int, - IRQF_DISABLED | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, - "MMC card detect", data); - if (err) { - pr_err("%s: MMC/SD: can't request MMC card detect IRQ\n", - __func__); - goto err_free_4; - } - return 0; -err_free_4: - gpio_free(POODLE_GPIO_SD_PWR1); err_free_3: gpio_free(POODLE_GPIO_SD_PWR); err_free_2: - gpio_free(POODLE_GPIO_nSD_WP); -err_free_1: - gpio_free(POODLE_GPIO_nSD_DETECT); -err_out: return err; } @@ -312,28 +282,20 @@ static void poodle_mci_setpower(struct device *dev, unsigned int vdd) } } -static int poodle_mci_get_ro(struct device *dev) -{ - return !!gpio_get_value(POODLE_GPIO_nSD_WP); - return GPLR(POODLE_GPIO_nSD_WP) & GPIO_bit(POODLE_GPIO_nSD_WP); -} - - static void poodle_mci_exit(struct device *dev, void *data) { - free_irq(POODLE_IRQ_GPIO_nSD_DETECT, data); gpio_free(POODLE_GPIO_SD_PWR1); gpio_free(POODLE_GPIO_SD_PWR); - gpio_free(POODLE_GPIO_nSD_WP); - gpio_free(POODLE_GPIO_nSD_DETECT); } static struct pxamci_platform_data poodle_mci_platform_data = { - .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34, - .init = poodle_mci_init, - .get_ro = poodle_mci_get_ro, - .setpower = poodle_mci_setpower, - .exit = poodle_mci_exit, + .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34, + .init = poodle_mci_init, + .setpower = poodle_mci_setpower, + .exit = poodle_mci_exit, + .gpio_card_detect = POODLE_IRQ_GPIO_nSD_DETECT, + .gpio_card_ro = POODLE_GPIO_nSD_WP, + .gpio_power = -1, }; @@ -521,6 +483,7 @@ static void __init poodle_init(void) set_pxa_fb_parent(&poodle_locomo_device.dev); set_pxa_fb_info(&poodle_fb_info); pxa_set_udc_info(&udc_info); + poodle_mci_platform_data.detect_delay = msecs_to_jiffies(250); pxa_set_mci_info(&poodle_mci_platform_data); pxa_set_ficp_info(&poodle_ficp_platform_data); pxa_set_i2c_info(NULL); diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c index dda310fe71c8..da85327e630c 100644 --- a/arch/arm/mach-pxa/spitz.c +++ b/arch/arm/mach-pxa/spitz.c @@ -378,45 +378,6 @@ static inline void spitz_init_spi(void) {} * The card detect interrupt isn't debounced so we delay it by 250ms * to give the card a chance to fully insert/eject. */ - -static struct pxamci_platform_data spitz_mci_platform_data; - -static int spitz_mci_init(struct device *dev, irq_handler_t spitz_detect_int, void *data) -{ - int err; - - err = gpio_request(SPITZ_GPIO_nSD_DETECT, "nSD_DETECT"); - if (err) - goto err_out; - - err = gpio_request(SPITZ_GPIO_nSD_WP, "nSD_WP"); - if (err) - goto err_free_1; - - gpio_direction_input(SPITZ_GPIO_nSD_DETECT); - gpio_direction_input(SPITZ_GPIO_nSD_WP); - - spitz_mci_platform_data.detect_delay = msecs_to_jiffies(250); - - err = request_irq(SPITZ_IRQ_GPIO_nSD_DETECT, spitz_detect_int, - IRQF_DISABLED | IRQF_TRIGGER_RISING | - IRQF_TRIGGER_FALLING, - "MMC card detect", data); - if (err) { - pr_err("%s: MMC/SD: can't request MMC card detect IRQ\n", - __func__); - goto err_free_2; - } - return 0; - -err_free_2: - gpio_free(SPITZ_GPIO_nSD_WP); -err_free_1: - gpio_free(SPITZ_GPIO_nSD_DETECT); -err_out: - return err; -} - static void spitz_mci_setpower(struct device *dev, unsigned int vdd) { struct pxamci_platform_data* p_d = dev->platform_data; @@ -427,24 +388,12 @@ static void spitz_mci_setpower(struct device *dev, unsigned int vdd) spitz_card_pwr_ctrl(SPITZ_PWR_SD, 0x0000); } -static int spitz_mci_get_ro(struct device *dev) -{ - return gpio_get_value(SPITZ_GPIO_nSD_WP); -} - -static void spitz_mci_exit(struct device *dev, void *data) -{ - free_irq(SPITZ_IRQ_GPIO_nSD_DETECT, data); - gpio_free(SPITZ_GPIO_nSD_WP); - gpio_free(SPITZ_GPIO_nSD_DETECT); -} - static struct pxamci_platform_data spitz_mci_platform_data = { - .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34, - .init = spitz_mci_init, - .get_ro = spitz_mci_get_ro, - .setpower = spitz_mci_setpower, - .exit = spitz_mci_exit, + .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34, + .setpower = spitz_mci_setpower, + .gpio_card_detect = SPITZ_GPIO_nSD_DETECT, + .gpio_card_ro = SPITZ_GPIO_nSD_WP, + .gpio_power = -1, }; @@ -695,6 +644,7 @@ static void __init common_init(void) spitz_init_spi(); platform_add_devices(devices, ARRAY_SIZE(devices)); + spitz_mci_platform_data.detect_delay = msecs_to_jiffies(250); pxa_set_mci_info(&spitz_mci_platform_data); pxa_set_ohci_info(&spitz_ohci_platform_data); pxa_set_ficp_info(&spitz_ficp_platform_data); diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c index 117ad5920e53..b56cc5667bb4 100644 --- a/arch/arm/mach-pxa/tosa.c +++ b/arch/arm/mach-pxa/tosa.c @@ -247,49 +247,10 @@ static struct pxa2xx_udc_mach_info udc_info __initdata = { /* * MMC/SD Device */ -static struct pxamci_platform_data tosa_mci_platform_data; - static int tosa_mci_init(struct device *dev, irq_handler_t tosa_detect_int, void *data) { int err; - tosa_mci_platform_data.detect_delay = msecs_to_jiffies(250); - - err = gpio_request(TOSA_GPIO_nSD_DETECT, "MMC/SD card detect"); - if (err) { - printk(KERN_ERR "tosa_mci_init: can't request nSD_DETECT gpio\n"); - goto err_gpio_detect; - } - err = gpio_direction_input(TOSA_GPIO_nSD_DETECT); - if (err) - goto err_gpio_detect_dir; - - err = request_irq(TOSA_IRQ_GPIO_nSD_DETECT, tosa_detect_int, - IRQF_DISABLED | IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, - "MMC/SD card detect", data); - if (err) { - printk(KERN_ERR "tosa_mci_init: MMC/SD: can't request MMC card detect IRQ\n"); - goto err_irq; - } - - err = gpio_request(TOSA_GPIO_SD_WP, "SD Write Protect"); - if (err) { - printk(KERN_ERR "tosa_mci_init: can't request SD_WP gpio\n"); - goto err_gpio_wp; - } - err = gpio_direction_input(TOSA_GPIO_SD_WP); - if (err) - goto err_gpio_wp_dir; - - err = gpio_request(TOSA_GPIO_PWR_ON, "SD Power"); - if (err) { - printk(KERN_ERR "tosa_mci_init: can't request SD_PWR gpio\n"); - goto err_gpio_pwr; - } - err = gpio_direction_output(TOSA_GPIO_PWR_ON, 0); - if (err) - goto err_gpio_pwr_dir; - err = gpio_request(TOSA_GPIO_nSD_INT, "SD Int"); if (err) { printk(KERN_ERR "tosa_mci_init: can't request SD_PWR gpio\n"); @@ -304,51 +265,21 @@ static int tosa_mci_init(struct device *dev, irq_handler_t tosa_detect_int, void err_gpio_int_dir: gpio_free(TOSA_GPIO_nSD_INT); err_gpio_int: -err_gpio_pwr_dir: - gpio_free(TOSA_GPIO_PWR_ON); -err_gpio_pwr: -err_gpio_wp_dir: - gpio_free(TOSA_GPIO_SD_WP); -err_gpio_wp: - free_irq(TOSA_IRQ_GPIO_nSD_DETECT, data); -err_irq: -err_gpio_detect_dir: - gpio_free(TOSA_GPIO_nSD_DETECT); -err_gpio_detect: return err; } -static void tosa_mci_setpower(struct device *dev, unsigned int vdd) -{ - struct pxamci_platform_data* p_d = dev->platform_data; - - if (( 1 << vdd) & p_d->ocr_mask) { - gpio_set_value(TOSA_GPIO_PWR_ON, 1); - } else { - gpio_set_value(TOSA_GPIO_PWR_ON, 0); - } -} - -static int tosa_mci_get_ro(struct device *dev) -{ - return gpio_get_value(TOSA_GPIO_SD_WP); -} - static void tosa_mci_exit(struct device *dev, void *data) { gpio_free(TOSA_GPIO_nSD_INT); - gpio_free(TOSA_GPIO_PWR_ON); - gpio_free(TOSA_GPIO_SD_WP); - free_irq(TOSA_IRQ_GPIO_nSD_DETECT, data); - gpio_free(TOSA_GPIO_nSD_DETECT); } static struct pxamci_platform_data tosa_mci_platform_data = { - .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34, - .init = tosa_mci_init, - .get_ro = tosa_mci_get_ro, - .setpower = tosa_mci_setpower, - .exit = tosa_mci_exit, + .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34, + .init = tosa_mci_init, + .exit = tosa_mci_exit, + .gpio_card_detect = TOSA_GPIO_nSD_DETECT, + .gpio_card_ro = TOSA_GPIO_SD_WP, + .gpio_power = TOSA_GPIO_PWR_ON, }; /* @@ -910,6 +841,7 @@ static void __init tosa_init(void) dummy = gpiochip_reserve(TOSA_SCOOP_JC_GPIO_BASE, 12); dummy = gpiochip_reserve(TOSA_TC6393XB_GPIO_BASE, 16); + tosa_mci_platform_data.detect_delay = msecs_to_jiffies(250); pxa_set_mci_info(&tosa_mci_platform_data); pxa_set_udc_info(&udc_info); pxa_set_ficp_info(&tosa_ficp_platform_data); diff --git a/arch/arm/mach-pxa/trizeps4.c b/arch/arm/mach-pxa/trizeps4.c index 825f540176d2..32299869b352 100644 --- a/arch/arm/mach-pxa/trizeps4.c +++ b/arch/arm/mach-pxa/trizeps4.c @@ -367,6 +367,9 @@ static struct pxamci_platform_data trizeps4_mci_platform_data = { .exit = trizeps4_mci_exit, .get_ro = NULL, /* write-protection not supported */ .setpower = NULL, /* power-switching not supported */ + .gpio_card_detect = -1, + .gpio_card_ro = -1, + .gpio_power = -1, }; /**************************************************************************** diff --git a/arch/arm/mach-pxa/zylonite.c b/arch/arm/mach-pxa/zylonite.c index 218d2001f1df..09784d3954e4 100644 --- a/arch/arm/mach-pxa/zylonite.c +++ b/arch/arm/mach-pxa/zylonite.c @@ -290,6 +290,9 @@ static struct pxamci_platform_data zylonite_mci_platform_data = { .init = zylonite_mci_init, .exit = zylonite_mci_exit, .get_ro = zylonite_mci_ro, + .gpio_card_detect = -1, + .gpio_card_ro = -1, + .gpio_power = -1, }; static struct pxamci_platform_data zylonite_mci2_platform_data = { From 70f6d3e8a6670d1cccab5e206315b935eb62a08e Mon Sep 17 00:00:00 2001 From: Eric Miao Date: Mon, 13 Jul 2009 22:57:56 +0800 Subject: [PATCH 0175/3409] [ARM] pxa/littleton: update littleton to use the new gpio_card_detect for mmc Signed-off-by: Eric Miao --- arch/arm/mach-pxa/littleton.c | 43 ++++------------------------------- 1 file changed, 5 insertions(+), 38 deletions(-) diff --git a/arch/arm/mach-pxa/littleton.c b/arch/arm/mach-pxa/littleton.c index 55b3788fd1ae..13848955d133 100644 --- a/arch/arm/mach-pxa/littleton.c +++ b/arch/arm/mach-pxa/littleton.c @@ -265,45 +265,12 @@ static inline void littleton_init_keypad(void) {} #endif #if defined(CONFIG_MMC_PXA) || defined(CONFIG_MMC_PXA_MODULE) -static int littleton_mci_init(struct device *dev, - irq_handler_t littleton_detect_int, void *data) -{ - int err, gpio_cd = GPIO_MMC1_CARD_DETECT; - - err = gpio_request(gpio_cd, "mmc card detect"); - if (err) - goto err_request_cd; - - gpio_direction_input(gpio_cd); - - err = request_irq(gpio_to_irq(gpio_cd), littleton_detect_int, - IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, - "mmc card detect", data); - if (err) { - dev_err(dev, "failed to request card detect IRQ\n"); - goto err_request_irq; - } - return 0; - -err_request_irq: - gpio_free(gpio_cd); -err_request_cd: - return err; -} - -static void littleton_mci_exit(struct device *dev, void *data) -{ - int gpio_cd = GPIO_MMC1_CARD_DETECT; - - free_irq(gpio_to_irq(gpio_cd), data); - gpio_free(gpio_cd); -} - static struct pxamci_platform_data littleton_mci_platform_data = { - .detect_delay = 20, - .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, - .init = littleton_mci_init, - .exit = littleton_mci_exit, + .detect_delay = 20, + .ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, + .gpio_card_detect = GPIO_MMC1_CARD_DETECT, + .gpio_card_ro = -1, + .gpio_power = -1, }; static void __init littleton_init_mmc(void) From 0d95c1fa5661b11cf7777af2613563cc3c683d8d Mon Sep 17 00:00:00 2001 From: Daniel Ribeiro Date: Tue, 23 Jun 2009 12:39:25 -0300 Subject: [PATCH 0176/3409] [ARM] pxa/ezx: set IRQ_BOARD_END for EZX Signed-off-by: Daniel Ribeiro Signed-off-by: Eric Miao --- arch/arm/mach-pxa/Kconfig | 1 + arch/arm/mach-pxa/include/mach/irqs.h | 2 ++ 2 files changed, 3 insertions(+) diff --git a/arch/arm/mach-pxa/Kconfig b/arch/arm/mach-pxa/Kconfig index 626cecb6e739..32e44b3a8fbe 100644 --- a/arch/arm/mach-pxa/Kconfig +++ b/arch/arm/mach-pxa/Kconfig @@ -463,6 +463,7 @@ config PXA_EZX select PXA27x select IWMMXT select HAVE_PWM + select PXA_HAVE_BOARD_IRQS config MACH_EZX_A780 bool "Motorola EZX A780" diff --git a/arch/arm/mach-pxa/include/mach/irqs.h b/arch/arm/mach-pxa/include/mach/irqs.h index 7af0de4bf657..567204c3959b 100644 --- a/arch/arm/mach-pxa/include/mach/irqs.h +++ b/arch/arm/mach-pxa/include/mach/irqs.h @@ -129,6 +129,8 @@ #define IRQ_BOARD_END (IRQ_BOARD_START + 70) #elif defined(CONFIG_MACH_ZYLONITE) #define IRQ_BOARD_END (IRQ_BOARD_START + 32) +#elif defined(CONFIG_PXA_EZX) +#define IRQ_BOARD_END (IRQ_BOARD_START + 23) #else #define IRQ_BOARD_END (IRQ_BOARD_START + 16) #endif From 53740df16f42163255031d24d99ba207a9ac81a9 Mon Sep 17 00:00:00 2001 From: Daniel Mack Date: Wed, 17 Jun 2009 10:32:54 +0200 Subject: [PATCH 0177/3409] [ARM] pxa/colibri: add NAND config for Colibri PXA3xx boards NAND feature will be enabled when the appropriate config option is set. Signed-off-by: Daniel Mack Signed-off-by: Eric Miao --- arch/arm/mach-pxa/colibri-pxa300.c | 1 + arch/arm/mach-pxa/colibri-pxa320.c | 1 + arch/arm/mach-pxa/colibri-pxa3xx.c | 41 ++++++++++++++++++++++++ arch/arm/mach-pxa/include/mach/colibri.h | 6 ++++ 4 files changed, 49 insertions(+) diff --git a/arch/arm/mach-pxa/colibri-pxa300.c b/arch/arm/mach-pxa/colibri-pxa300.c index 7c9c34c19ae2..37c239c56568 100644 --- a/arch/arm/mach-pxa/colibri-pxa300.c +++ b/arch/arm/mach-pxa/colibri-pxa300.c @@ -172,6 +172,7 @@ void __init colibri_pxa300_init(void) { colibri_pxa300_init_eth(); colibri_pxa300_init_ohci(); + colibri_pxa3xx_init_nand(); colibri_pxa300_init_lcd(); colibri_pxa3xx_init_lcd(mfp_to_gpio(GPIO39_GPIO)); colibri_pxa310_init_ac97(); diff --git a/arch/arm/mach-pxa/colibri-pxa320.c b/arch/arm/mach-pxa/colibri-pxa320.c index a18d37b3c5e6..458c1a147b6c 100644 --- a/arch/arm/mach-pxa/colibri-pxa320.c +++ b/arch/arm/mach-pxa/colibri-pxa320.c @@ -168,6 +168,7 @@ void __init colibri_pxa320_init(void) { colibri_pxa320_init_eth(); colibri_pxa320_init_ohci(); + colibri_pxa3xx_init_nand(); colibri_pxa320_init_lcd(); colibri_pxa3xx_init_lcd(mfp_to_gpio(GPIO49_GPIO)); colibri_pxa320_init_ac97(); diff --git a/arch/arm/mach-pxa/colibri-pxa3xx.c b/arch/arm/mach-pxa/colibri-pxa3xx.c index 2c846bbfc435..efebaf4d734d 100644 --- a/arch/arm/mach-pxa/colibri-pxa3xx.c +++ b/arch/arm/mach-pxa/colibri-pxa3xx.c @@ -25,6 +25,7 @@ #include #include #include +#include #include "generic.h" #include "devices.h" @@ -157,3 +158,43 @@ void __init colibri_pxa3xx_init_lcd(int bl_pin) } #endif +#if defined(CONFIG_MTD_NAND_PXA3xx) || defined(CONFIG_MTD_NAND_PXA3xx_MODULE) +static struct mtd_partition colibri_nand_partitions[] = { + { + .name = "bootloader", + .offset = 0, + .size = SZ_512K, + .mask_flags = MTD_WRITEABLE, /* force read-only */ + }, + { + .name = "kernel", + .offset = MTDPART_OFS_APPEND, + .size = SZ_4M, + .mask_flags = MTD_WRITEABLE, /* force read-only */ + }, + { + .name = "reserved", + .offset = MTDPART_OFS_APPEND, + .size = SZ_1M, + .mask_flags = MTD_WRITEABLE, /* force read-only */ + }, + { + .name = "fs", + .offset = MTDPART_OFS_APPEND, + .size = MTDPART_SIZ_FULL, + }, +}; + +static struct pxa3xx_nand_platform_data colibri_nand_info = { + .enable_arbiter = 1, + .keep_config = 1, + .parts = colibri_nand_partitions, + .nr_parts = ARRAY_SIZE(colibri_nand_partitions), +}; + +void __init colibri_pxa3xx_init_nand(void) +{ + pxa3xx_set_nand_info(&colibri_nand_info); +} +#endif + diff --git a/arch/arm/mach-pxa/include/mach/colibri.h b/arch/arm/mach-pxa/include/mach/colibri.h index a88d7caff0d1..811743c56147 100644 --- a/arch/arm/mach-pxa/include/mach/colibri.h +++ b/arch/arm/mach-pxa/include/mach/colibri.h @@ -23,6 +23,12 @@ static inline void colibri_pxa3xx_init_lcd(int bl_pin) {} extern void colibri_pxa3xx_init_eth(struct ax_plat_data *plat_data); #endif +#if defined(CONFIG_MTD_NAND_PXA3xx) || defined(CONFIG_MTD_NAND_PXA3xx_MODULE) +extern void colibri_pxa3xx_init_nand(void); +#else +static inline void colibri_pxa3xx_init_nand(void) {} +#endif + /* physical memory regions */ #define COLIBRI_SDRAM_BASE 0xa0000000 /* SDRAM region */ From ecc85e166fbec0378215aab13925b0e980a48d1e Mon Sep 17 00:00:00 2001 From: Alex Roman Date: Fri, 3 Jul 2009 14:58:59 +0800 Subject: [PATCH 0178/3409] [ARM] pxa/colibri: add UART MFP configuration for the PXA320 module This patch configures the MFP pads for UART1, UART2, UART3 for the Toradex Colibri PXA320 module. Previously they were just not configured resulting in just the first UART working because it was the only one that was configured by the bootloader (Toradex EBOOT in our case). This patch is against vanilla 2.6.30 and has been tested with the Toradex Orchid carrier board (all three UARTs were functional). Signed-off-by: Alex Roman Acked-by: Daniel Mack Signed-off-by: Eric Miao --- arch/arm/mach-pxa/colibri-pxa320.c | 32 ++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/arch/arm/mach-pxa/colibri-pxa320.c b/arch/arm/mach-pxa/colibri-pxa320.c index 458c1a147b6c..494572825c7d 100644 --- a/arch/arm/mach-pxa/colibri-pxa320.c +++ b/arch/arm/mach-pxa/colibri-pxa320.c @@ -164,6 +164,37 @@ static inline void __init colibri_pxa320_init_ac97(void) static inline void colibri_pxa320_init_ac97(void) {} #endif +/* + * The following configuration is verified to work with the Toradex Orchid + * carrier board + */ +static mfp_cfg_t colibri_pxa320_uart_pin_config[] __initdata = { + /* UART 1 configuration (may be set by bootloader) */ + GPIO99_UART1_CTS, + GPIO104_UART1_RTS, + GPIO97_UART1_RXD, + GPIO98_UART1_TXD, + GPIO101_UART1_DTR, + GPIO103_UART1_DSR, + GPIO100_UART1_DCD, + GPIO102_UART1_RI, + + /* UART 2 configuration */ + GPIO109_UART2_CTS, + GPIO112_UART2_RTS, + GPIO110_UART2_RXD, + GPIO111_UART2_TXD, + + /* UART 3 configuration */ + GPIO30_UART3_RXD, + GPIO31_UART3_TXD, +}; + +static void __init colibri_pxa320_init_uart(void) +{ + pxa3xx_mfp_config(ARRAY_AND_SIZE(colibri_pxa320_uart_pin_config)); +} + void __init colibri_pxa320_init(void) { colibri_pxa320_init_eth(); @@ -174,6 +205,7 @@ void __init colibri_pxa320_init(void) colibri_pxa320_init_ac97(); colibri_pxa3xx_init_mmc(ARRAY_AND_SIZE(colibri_pxa320_mmc_pin_config), mfp_to_gpio(MFP_PIN_GPIO28)); + colibri_pxa320_init_uart(); } MACHINE_START(COLIBRI320, "Toradex Colibri PXA320") From 4a3d3abe91963d7b3e562e70fc6c5c9d3f76fb30 Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Wed, 13 May 2009 11:33:43 +0200 Subject: [PATCH 0179/3409] [ARM] pxa/pcm990: convert pcm990 to soc-camera as platform-device Signed-off-by: Guennadi Liakhovetski Signed-off-by: Eric Miao --- arch/arm/mach-pxa/pcm990-baseboard.c | 54 ++++++++++++++++++++++------ 1 file changed, 44 insertions(+), 10 deletions(-) diff --git a/arch/arm/mach-pxa/pcm990-baseboard.c b/arch/arm/mach-pxa/pcm990-baseboard.c index e211633471f7..bbda57078e0f 100644 --- a/arch/arm/mach-pxa/pcm990-baseboard.c +++ b/arch/arm/mach-pxa/pcm990-baseboard.c @@ -430,25 +430,56 @@ static void pcm990_camera_free_bus(struct soc_camera_link *link) gpio_bus_switch = -EINVAL; } -static struct soc_camera_link iclink = { - .bus_id = 0, /* Must match with the camera ID above */ - .query_bus_param = pcm990_camera_query_bus_param, - .set_bus_param = pcm990_camera_set_bus_param, - .free_bus = pcm990_camera_free_bus, -}; - /* Board I2C devices. */ static struct i2c_board_info __initdata pcm990_i2c_devices[] = { { /* Must initialize before the camera(s) */ I2C_BOARD_INFO("pca9536", 0x41), .platform_data = &pca9536_data, - }, { + }, +}; + +static struct i2c_board_info pcm990_camera_i2c[] = { + { I2C_BOARD_INFO("mt9v022", 0x48), - .platform_data = &iclink, /* With extender */ }, { I2C_BOARD_INFO("mt9m001", 0x5d), - .platform_data = &iclink, /* With extender */ + }, +}; + +static struct soc_camera_link iclink[] = { + { + .bus_id = 0, /* Must match with the camera ID */ + .board_info = &pcm990_camera_i2c[0], + .i2c_adapter_id = 0, + .query_bus_param = pcm990_camera_query_bus_param, + .set_bus_param = pcm990_camera_set_bus_param, + .free_bus = pcm990_camera_free_bus, + .module_name = "mt9v022", + }, { + .bus_id = 0, /* Must match with the camera ID */ + .board_info = &pcm990_camera_i2c[1], + .i2c_adapter_id = 0, + .query_bus_param = pcm990_camera_query_bus_param, + .set_bus_param = pcm990_camera_set_bus_param, + .free_bus = pcm990_camera_free_bus, + .module_name = "mt9m001", + }, +}; + +static struct platform_device pcm990_camera[] = { + { + .name = "soc-camera-pdrv", + .id = 0, + .dev = { + .platform_data = &iclink[0], + }, + }, { + .name = "soc-camera-pdrv", + .id = 1, + .dev = { + .platform_data = &iclink[1], + }, }, }; #endif /* CONFIG_VIDEO_PXA27x ||CONFIG_VIDEO_PXA27x_MODULE */ @@ -504,6 +535,9 @@ void __init pcm990_baseboard_init(void) pxa_set_camera_info(&pcm990_pxacamera_platform_data); i2c_register_board_info(0, ARRAY_AND_SIZE(pcm990_i2c_devices)); + + platform_device_register(&pcm990_camera[0]); + platform_device_register(&pcm990_camera[1]); #endif printk(KERN_INFO "PCM-990 Evaluation baseboard initialized\n"); From a48c24a696f0d93c49f913b7818e9819612b1f4e Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Fri, 24 Apr 2009 18:40:27 +0200 Subject: [PATCH 0180/3409] [ARM] pxa/mioa701: convert mioa701 to the new platform-device soc-camera interface Signed-off-by: Guennadi Liakhovetski Signed-off-by: Eric Miao --- arch/arm/mach-pxa/mioa701.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/arch/arm/mach-pxa/mioa701.c b/arch/arm/mach-pxa/mioa701.c index a8122ecd2a3a..3cab452e5567 100644 --- a/arch/arm/mach-pxa/mioa701.c +++ b/arch/arm/mach-pxa/mioa701.c @@ -708,19 +708,20 @@ static struct i2c_board_info __initdata mioa701_pi2c_devices[] = { }, }; -static struct soc_camera_link iclink = { - .bus_id = 0, /* Must match id in pxa27x_device_camera in device.c */ -}; - /* Board I2C devices. */ static struct i2c_board_info __initdata mioa701_i2c_devices[] = { { - /* Must initialize before the camera(s) */ I2C_BOARD_INFO("mt9m111", 0x5d), - .platform_data = &iclink, }, }; +static struct soc_camera_link iclink = { + .bus_id = 0, /* Match id in pxa27x_device_camera in device.c */ + .board_info = &mioa701_i2c_devices[0], + .i2c_adapter_id = 0, + .module_name = "mt9m111", +}; + struct i2c_pxa_platform_data i2c_pdata = { .fast_mode = 1, }; @@ -754,6 +755,7 @@ MIO_SIMPLE_DEV(pxa2xx_pcm, "pxa2xx-pcm", NULL) MIO_SIMPLE_DEV(mioa701_sound, "mioa701-wm9713", NULL) MIO_SIMPLE_DEV(mioa701_board, "mioa701-board", NULL) MIO_SIMPLE_DEV(gpio_vbus, "gpio-vbus", &gpio_vbus_data); +MIO_SIMPLE_DEV(mioa701_camera, "soc-camera-pdrv",&iclink); static struct platform_device *devices[] __initdata = { &mioa701_gpio_keys, @@ -764,6 +766,7 @@ static struct platform_device *devices[] __initdata = { &power_dev, &strataflash, &gpio_vbus, + &mioa701_camera, &mioa701_board, }; @@ -813,7 +816,6 @@ static void __init mioa701_machine_init(void) pxa_set_i2c_info(&i2c_pdata); pxa27x_set_i2c_power_info(NULL); pxa_set_camera_info(&mioa701_pxacamera_platform_data); - i2c_register_board_info(0, ARRAY_AND_SIZE(mioa701_i2c_devices)); } static void mioa701_machine_exit(void) From 489db6a662af5ca1b6574bae2d974017346bb983 Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Tue, 12 May 2009 18:12:29 +0200 Subject: [PATCH 0181/3409] [ARM] pxa/em-x270: convert em-x270 to soc-camera as platform-device Signed-off-by: Guennadi Liakhovetski Acked-by: Mike Rapoport Signed-off-by: Eric Miao --- arch/arm/mach-pxa/em-x270.c | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/arch/arm/mach-pxa/em-x270.c b/arch/arm/mach-pxa/em-x270.c index e0121f4fcaf4..2ed1dfe194b3 100644 --- a/arch/arm/mach-pxa/em-x270.c +++ b/arch/arm/mach-pxa/em-x270.c @@ -1025,22 +1025,32 @@ static int em_x270_sensor_power(struct device *dev, int on) return 0; } -static struct soc_camera_link iclink = { - .bus_id = 0, - .power = em_x270_sensor_power, -}; - static struct i2c_board_info em_x270_i2c_cam_info[] = { { I2C_BOARD_INFO("mt9m111", 0x48), + }, +}; + +static struct soc_camera_link iclink = { + .bus_id = 0, + .power = em_x270_sensor_power, + .board_info = &em_x270_i2c_cam_info[0], + .i2c_adapter_id = 0, + .module_name = "mt9m111", +}; + +static struct platform_device em_x270_camera = { + .name = "soc-camera-pdrv", + .id = -1, + .dev = { .platform_data = &iclink, }, }; static void __init em_x270_init_camera(void) { - i2c_register_board_info(0, ARRAY_AND_SIZE(em_x270_i2c_cam_info)); pxa_set_camera_info(&em_x270_camera_platform_data); + platform_device_register(&em_x270_camera); } #else static inline void em_x270_init_camera(void) {} From 9cf97b1beeb8596b1e923642e3f2af949fc68082 Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Sun, 26 Jul 2009 13:48:39 +0300 Subject: [PATCH 0182/3409] [ARM] pxa/em-x270: remove debug leftovers Signed-off-by: Mike Rapoport Signed-off-by: Eric Miao --- arch/arm/mach-pxa/em-x270.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/arch/arm/mach-pxa/em-x270.c b/arch/arm/mach-pxa/em-x270.c index 2ed1dfe194b3..8614cabd2ddf 100644 --- a/arch/arm/mach-pxa/em-x270.c +++ b/arch/arm/mach-pxa/em-x270.c @@ -1258,7 +1258,6 @@ static void __init em_x270_init_i2c(void) static void __init em_x270_module_init(void) { - pr_info("%s\n", __func__); pxa2xx_mfp_config(ARRAY_AND_SIZE(em_x270_pin_config)); mmc_cd = GPIO13_MMC_CD; @@ -1270,7 +1269,6 @@ static void __init em_x270_module_init(void) static void __init em_x270_exeda_init(void) { - pr_info("%s\n", __func__); pxa2xx_mfp_config(ARRAY_AND_SIZE(exeda_pin_config)); mmc_cd = GPIO114_MMC_CD; From 100a1d250d3806caa97b08d04cb558372c8e8097 Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Sun, 26 Jul 2009 13:48:40 +0300 Subject: [PATCH 0183/3409] [ARM] pxa/em-x270: add vcc_core regulator Signed-off-by: Mike Rapoport Signed-off-by: Eric Miao --- arch/arm/mach-pxa/em-x270.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/arm/mach-pxa/em-x270.c b/arch/arm/mach-pxa/em-x270.c index 8614cabd2ddf..aec7f4214b14 100644 --- a/arch/arm/mach-pxa/em-x270.c +++ b/arch/arm/mach-pxa/em-x270.c @@ -1116,6 +1116,7 @@ REGULATOR_CONSUMER(ldo5, NULL, "vcc cam"); REGULATOR_CONSUMER(ldo10, &pxa_device_mci.dev, "vcc sdio"); REGULATOR_CONSUMER(ldo12, NULL, "vcc usb"); REGULATOR_CONSUMER(ldo19, &em_x270_gprs_userspace_consumer.dev, "vcc gprs"); +REGULATOR_CONSUMER(buck2, NULL, "vcc_core"); #define REGULATOR_INIT(_ldo, _min_uV, _max_uV, _ops_mask) \ static struct regulator_init_data _ldo##_data = { \ @@ -1138,6 +1139,7 @@ REGULATOR_INIT(ldo10, 2000000, 3200000, REGULATOR_CHANGE_STATUS | REGULATOR_CHANGE_VOLTAGE); REGULATOR_INIT(ldo12, 3000000, 3000000, REGULATOR_CHANGE_STATUS); REGULATOR_INIT(ldo19, 3200000, 3200000, REGULATOR_CHANGE_STATUS); +REGULATOR_INIT(buck2, 1000000, 1650000, REGULATOR_CHANGE_VOLTAGE); struct led_info em_x270_led_info = { .name = "em-x270:orange", @@ -1207,6 +1209,8 @@ struct da903x_subdev_info em_x270_da9030_subdevs[] = { DA9030_LDO(12), DA9030_LDO(19), + DA9030_SUBDEV(regulator, BUCK2, &buck2_data), + DA9030_SUBDEV(led, LED_PC, &em_x270_led_info), DA9030_SUBDEV(backlight, WLED, &em_x270_led_info), DA9030_SUBDEV(battery, BAT, &em_x270_batterty_info), From a2099e49f4f8728b0c0301a4e3eb650644ee9206 Mon Sep 17 00:00:00 2001 From: Mike Rapoport Date: Mon, 3 Aug 2009 12:07:52 +0300 Subject: [PATCH 0184/3409] [ARM] pxa/cm-x270: add libertas device registration v2 changes: - use gpio_to_irq instead of IRQ_GPIO - check gpio_direction_output return value to be on the safe side :) Signed-off-by: Mike Rapoport Signed-off-by: Eric Miao --- arch/arm/mach-pxa/cm-x270.c | 110 +++++++++++++++++++++++++++++++++++- 1 file changed, 108 insertions(+), 2 deletions(-) diff --git a/arch/arm/mach-pxa/cm-x270.c b/arch/arm/mach-pxa/cm-x270.c index 59292181088c..eea78b6c2bc5 100644 --- a/arch/arm/mach-pxa/cm-x270.c +++ b/arch/arm/mach-pxa/cm-x270.c @@ -13,13 +13,18 @@ #include #include #include +#include #include #include