diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c index e515b8a6f590..25c9bd409a87 100644 --- a/drivers/spi/spi-s3c64xx.c +++ b/drivers/spi/spi-s3c64xx.c @@ -381,7 +381,7 @@ static void s3c64xx_spi_dma_stop(struct s3c64xx_spi_driver_data *sdd, #else static void prepare_dma(struct s3c64xx_spi_dma_data *dma, - unsigned len, dma_addr_t buf) + struct sg_table *sgt) { struct s3c64xx_spi_driver_data *sdd; struct dma_slave_config config; @@ -407,8 +407,8 @@ static void prepare_dma(struct s3c64xx_spi_dma_data *dma, dmaengine_slave_config(dma->ch, &config); } - desc = dmaengine_prep_slave_single(dma->ch, buf, len, - dma->direction, DMA_PREP_INTERRUPT); + desc = dmaengine_prep_slave_sg(dma->ch, sgt->sgl, sgt->nents, + dma->direction, DMA_PREP_INTERRUPT); desc->callback = s3c64xx_spi_dmacb; desc->callback_param = dma; @@ -515,7 +515,11 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd, chcfg |= S3C64XX_SPI_CH_TXCH_ON; if (dma_mode) { modecfg |= S3C64XX_SPI_MODE_TXDMA_ON; +#ifndef CONFIG_S3C_DMA + prepare_dma(&sdd->tx_dma, &xfer->tx_sg); +#else prepare_dma(&sdd->tx_dma, xfer->len, xfer->tx_dma); +#endif } else { switch (sdd->cur_bpw) { case 32: @@ -547,7 +551,11 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd, writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff) | S3C64XX_SPI_PACKET_CNT_EN, regs + S3C64XX_SPI_PACKET_CNT); +#ifndef CONFIG_S3C_DMA + prepare_dma(&sdd->rx_dma, &xfer->rx_sg); +#else prepare_dma(&sdd->rx_dma, xfer->len, xfer->rx_dma); +#endif } } diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index bb7cf561c311..49313dd0a144 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -582,13 +582,70 @@ static void spi_set_cs(struct spi_device *spi, bool enable) spi->master->set_cs(spi, !enable); } +static int spi_map_buf(struct spi_master *master, struct device *dev, + struct sg_table *sgt, void *buf, size_t len, + enum dma_data_direction dir) +{ + const bool vmalloced_buf = is_vmalloc_addr(buf); + const int desc_len = vmalloced_buf ? PAGE_SIZE : master->max_dma_len; + const int sgs = DIV_ROUND_UP(len, desc_len); + struct page *vm_page; + void *sg_buf; + size_t min; + int i, ret; + + ret = sg_alloc_table(sgt, sgs, GFP_KERNEL); + if (ret != 0) + return ret; + + for (i = 0; i < sgs; i++) { + min = min_t(size_t, len, desc_len); + + if (vmalloced_buf) { + vm_page = vmalloc_to_page(buf); + if (!vm_page) { + sg_free_table(sgt); + return -ENOMEM; + } + sg_buf = page_address(vm_page) + + ((size_t)buf & ~PAGE_MASK); + } else { + sg_buf = buf; + } + + sg_set_buf(&sgt->sgl[i], sg_buf, min); + + buf += min; + len -= min; + } + + ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir); + if (ret < 0) { + sg_free_table(sgt); + return ret; + } + + sgt->nents = ret; + + return 0; +} + +static void spi_unmap_buf(struct spi_master *master, struct device *dev, + struct sg_table *sgt, enum dma_data_direction dir) +{ + if (sgt->orig_nents) { + dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir); + sg_free_table(sgt); + } +} + static int spi_map_msg(struct spi_master *master, struct spi_message *msg) { - struct device *dev = master->dev.parent; struct device *tx_dev, *rx_dev; struct spi_transfer *xfer; void *tmp; size_t max_tx, max_rx; + int ret; if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) { max_tx = 0; @@ -631,7 +688,7 @@ static int spi_map_msg(struct spi_master *master, struct spi_message *msg) } } - if (msg->is_dma_mapped || !master->can_dma) + if (!master->can_dma) return 0; tx_dev = &master->dma_tx->dev->device; @@ -642,25 +699,21 @@ static int spi_map_msg(struct spi_master *master, struct spi_message *msg) continue; if (xfer->tx_buf != NULL) { - xfer->tx_dma = dma_map_single(tx_dev, - (void *)xfer->tx_buf, - xfer->len, - DMA_TO_DEVICE); - if (dma_mapping_error(dev, xfer->tx_dma)) { - dev_err(dev, "dma_map_single Tx failed\n"); - return -ENOMEM; - } + ret = spi_map_buf(master, tx_dev, &xfer->tx_sg, + (void *)xfer->tx_buf, xfer->len, + DMA_TO_DEVICE); + if (ret != 0) + return ret; } if (xfer->rx_buf != NULL) { - xfer->rx_dma = dma_map_single(rx_dev, - xfer->rx_buf, xfer->len, - DMA_FROM_DEVICE); - if (dma_mapping_error(dev, xfer->rx_dma)) { - dev_err(dev, "dma_map_single Rx failed\n"); - dma_unmap_single(tx_dev, xfer->tx_dma, - xfer->len, DMA_TO_DEVICE); - return -ENOMEM; + ret = spi_map_buf(master, rx_dev, &xfer->rx_sg, + xfer->rx_buf, xfer->len, + DMA_FROM_DEVICE); + if (ret != 0) { + spi_unmap_buf(master, tx_dev, &xfer->tx_sg, + DMA_TO_DEVICE); + return ret; } } } @@ -675,7 +728,7 @@ static int spi_unmap_msg(struct spi_master *master, struct spi_message *msg) struct spi_transfer *xfer; struct device *tx_dev, *rx_dev; - if (!master->cur_msg_mapped || msg->is_dma_mapped || !master->can_dma) + if (!master->cur_msg_mapped || !master->can_dma) return 0; tx_dev = &master->dma_tx->dev->device; @@ -685,12 +738,8 @@ static int spi_unmap_msg(struct spi_master *master, struct spi_message *msg) if (!master->can_dma(master, msg->spi, xfer)) continue; - if (xfer->rx_buf) - dma_unmap_single(rx_dev, xfer->rx_dma, xfer->len, - DMA_FROM_DEVICE); - if (xfer->tx_buf) - dma_unmap_single(tx_dev, xfer->tx_dma, xfer->len, - DMA_TO_DEVICE); + spi_unmap_buf(master, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE); + spi_unmap_buf(master, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); } return 0; @@ -1503,6 +1552,8 @@ int spi_register_master(struct spi_master *master) mutex_init(&master->bus_lock_mutex); master->bus_lock_flag = 0; init_completion(&master->xfer_completion); + if (!master->max_dma_len) + master->max_dma_len = INT_MAX; /* register the device, then userspace will see it. * registration fails if the bus ID is in use. diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 31a5b0ee93ec..0c23c835d48b 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -24,6 +24,7 @@ #include #include #include +#include struct dma_chan; @@ -268,6 +269,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) * @auto_runtime_pm: the core should ensure a runtime PM reference is held * while the hardware is prepared, using the parent * device for the spidev + * @max_dma_len: Maximum length of a DMA transfer for the device. * @prepare_transfer_hardware: a message will soon arrive from the queue * so the subsystem requests the driver to prepare the transfer hardware * by issuing this call @@ -421,6 +423,7 @@ struct spi_master { bool cur_msg_prepared; bool cur_msg_mapped; struct completion xfer_completion; + size_t max_dma_len; int (*prepare_transfer_hardware)(struct spi_master *master); int (*transfer_one_message)(struct spi_master *master, @@ -533,6 +536,8 @@ extern struct spi_master *spi_busnum_to_master(u16 busnum); * (optionally) changing the chipselect status, then starting * the next transfer or completing this @spi_message. * @transfer_list: transfers are sequenced through @spi_message.transfers + * @tx_sg: Scatterlist for transmit, currently not for client use + * @rx_sg: Scatterlist for receive, currently not for client use * * SPI transfers always write the same number of bytes as they read. * Protocol drivers should always provide @rx_buf and/or @tx_buf. @@ -600,6 +605,8 @@ struct spi_transfer { dma_addr_t tx_dma; dma_addr_t rx_dma; + struct sg_table tx_sg; + struct sg_table rx_sg; unsigned cs_change:1; unsigned tx_nbits:3;