aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/spi/pxa2xx.rst3
-rw-r--r--Documentation/spi/spi-summary.rst4
-rw-r--r--drivers/spi/spi-amd.c112
-rw-r--r--drivers/spi/spi-atmel.c8
-rw-r--r--drivers/spi/spi-mt65xx.c32
-rw-r--r--drivers/spi/spi-mt7621.c95
-rw-r--r--drivers/spi/spi-pxa2xx.c11
-rw-r--r--drivers/spi/spi-rspi.c12
-rw-r--r--drivers/spi/spi.c7
-rw-r--r--include/linux/spi/rspi.h18
-rw-r--r--include/linux/spi/spi.h11
11 files changed, 178 insertions, 135 deletions
diff --git a/Documentation/spi/pxa2xx.rst b/Documentation/spi/pxa2xx.rst
index 0997d8eaf190..33d2ad95901e 100644
--- a/Documentation/spi/pxa2xx.rst
+++ b/Documentation/spi/pxa2xx.rst
@@ -194,9 +194,6 @@ The following logic is used to determine the type of I/O to be used on
a per "spi_transfer" basis::
if spi_message.len > 65536 then
- if spi_message.is_dma_mapped or rx_dma_buf != 0 or tx_dma_buf != 0 then
- reject premapped transfers
-
print "rate limited" warning
use PIO transfers
diff --git a/Documentation/spi/spi-summary.rst b/Documentation/spi/spi-summary.rst
index 546de37d6caf..f7f8b1573f25 100644
--- a/Documentation/spi/spi-summary.rst
+++ b/Documentation/spi/spi-summary.rst
@@ -419,10 +419,6 @@ any more such messages.
to make extra copies unless the hardware requires it (e.g. working
around hardware errata that force the use of bounce buffering).
- If standard dma_map_single() handling of these buffers is inappropriate,
- you can use spi_message.is_dma_mapped to tell the controller driver
- that you've already provided the relevant DMA addresses.
-
- The basic I/O primitive is spi_async(). Async requests may be
issued in any context (irq handler, task, etc) and completion
is reported using a callback provided with the message.
diff --git a/drivers/spi/spi-amd.c b/drivers/spi/spi-amd.c
index 5d9b246b6963..2245ad54b03a 100644
--- a/drivers/spi/spi-amd.c
+++ b/drivers/spi/spi-amd.c
@@ -13,6 +13,7 @@
#include <linux/delay.h>
#include <linux/spi/spi.h>
#include <linux/iopoll.h>
+#include <linux/spi/spi-mem.h>
#define AMD_SPI_CTRL0_REG 0x00
#define AMD_SPI_EXEC_CMD BIT(16)
@@ -35,6 +36,7 @@
#define AMD_SPI_FIFO_SIZE 70
#define AMD_SPI_MEM_SIZE 200
+#define AMD_SPI_MAX_DATA 64
#define AMD_SPI_ENA_REG 0x20
#define AMD_SPI_ALT_SPD_SHIFT 20
@@ -358,6 +360,115 @@ fin_msg:
return message->status;
}
+static bool amd_spi_supports_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ /* bus width is number of IO lines used to transmit */
+ if (op->cmd.buswidth > 1 || op->addr.buswidth > 1 ||
+ op->data.buswidth > 1 || op->data.nbytes > AMD_SPI_MAX_DATA)
+ return false;
+
+ return spi_mem_default_supports_op(mem, op);
+}
+
+static int amd_spi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
+{
+ op->data.nbytes = clamp_val(op->data.nbytes, 0, AMD_SPI_MAX_DATA);
+ return 0;
+}
+
+static void amd_spi_set_addr(struct amd_spi *amd_spi,
+ const struct spi_mem_op *op)
+{
+ u8 nbytes = op->addr.nbytes;
+ u64 addr_val = op->addr.val;
+ int base_addr, i;
+
+ base_addr = AMD_SPI_FIFO_BASE + nbytes;
+
+ for (i = 0; i < nbytes; i++) {
+ amd_spi_writereg8(amd_spi, base_addr - i - 1, addr_val &
+ GENMASK(7, 0));
+ addr_val >>= 8;
+ }
+}
+
+static void amd_spi_mem_data_out(struct amd_spi *amd_spi,
+ const struct spi_mem_op *op)
+{
+ int base_addr = AMD_SPI_FIFO_BASE + op->addr.nbytes;
+ u8 *buf = (u8 *)op->data.buf.out;
+ u32 nbytes = op->data.nbytes;
+ int i;
+
+ amd_spi_set_opcode(amd_spi, op->cmd.opcode);
+ amd_spi_set_addr(amd_spi, op);
+
+ for (i = 0; i < nbytes; i++)
+ amd_spi_writereg8(amd_spi, (base_addr + i), buf[i]);
+
+ amd_spi_set_tx_count(amd_spi, op->addr.nbytes + op->data.nbytes);
+ amd_spi_set_rx_count(amd_spi, 0);
+ amd_spi_clear_fifo_ptr(amd_spi);
+ amd_spi_execute_opcode(amd_spi);
+}
+
+static void amd_spi_mem_data_in(struct amd_spi *amd_spi,
+ const struct spi_mem_op *op)
+{
+ int offset = (op->addr.nbytes == 0) ? 0 : 1;
+ u8 *buf = (u8 *)op->data.buf.in;
+ u32 nbytes = op->data.nbytes;
+ int base_addr, i;
+
+ base_addr = AMD_SPI_FIFO_BASE + op->addr.nbytes + offset;
+
+ amd_spi_set_opcode(amd_spi, op->cmd.opcode);
+ amd_spi_set_addr(amd_spi, op);
+ amd_spi_set_tx_count(amd_spi, op->addr.nbytes);
+ amd_spi_set_rx_count(amd_spi, op->data.nbytes + 1);
+ amd_spi_clear_fifo_ptr(amd_spi);
+ amd_spi_execute_opcode(amd_spi);
+ amd_spi_busy_wait(amd_spi);
+
+ for (i = 0; i < nbytes; i++)
+ buf[i] = amd_spi_readreg8(amd_spi, base_addr + i);
+}
+
+static int amd_spi_exec_mem_op(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ struct amd_spi *amd_spi;
+ int ret;
+
+ amd_spi = spi_controller_get_devdata(mem->spi->controller);
+
+ ret = amd_set_spi_freq(amd_spi, mem->spi->max_speed_hz);
+ if (ret)
+ return ret;
+
+ switch (op->data.dir) {
+ case SPI_MEM_DATA_IN:
+ amd_spi_mem_data_in(amd_spi, op);
+ break;
+ case SPI_MEM_DATA_OUT:
+ fallthrough;
+ case SPI_MEM_NO_DATA:
+ amd_spi_mem_data_out(amd_spi, op);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static const struct spi_controller_mem_ops amd_spi_mem_ops = {
+ .exec_op = amd_spi_exec_mem_op,
+ .adjust_op_size = amd_spi_adjust_op_size,
+ .supports_op = amd_spi_supports_op,
+};
+
static int amd_spi_host_transfer(struct spi_controller *host,
struct spi_message *msg)
{
@@ -409,6 +520,7 @@ static int amd_spi_probe(struct platform_device *pdev)
host->min_speed_hz = AMD_SPI_MIN_HZ;
host->setup = amd_spi_host_setup;
host->transfer_one_message = amd_spi_host_transfer;
+ host->mem_ops = &amd_spi_mem_ops;
host->max_transfer_size = amd_spi_max_transfer_size;
host->max_message_size = amd_spi_max_transfer_size;
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index bad34998454a..b62f57390d8f 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -987,8 +987,6 @@ static void atmel_spi_pdc_next_xfer(struct spi_controller *host,
* For DMA, tx_buf/tx_dma have the same relationship as rx_buf/rx_dma:
* - The buffer is either valid for CPU access, else NULL
* - If the buffer is valid, so is its DMA address
- *
- * This driver manages the dma address unless message->is_dma_mapped.
*/
static int
atmel_spi_dma_map_xfer(struct atmel_spi *as, struct spi_transfer *xfer)
@@ -1374,8 +1372,7 @@ static int atmel_spi_one_transfer(struct spi_controller *host,
* DMA map early, for performance (empties dcache ASAP) and
* better fault reporting.
*/
- if ((!host->cur_msg->is_dma_mapped)
- && as->use_pdc) {
+ if (as->use_pdc) {
if (atmel_spi_dma_map_xfer(as, xfer) < 0)
return -ENOMEM;
}
@@ -1454,8 +1451,7 @@ static int atmel_spi_one_transfer(struct spi_controller *host,
}
}
- if (!host->cur_msg->is_dma_mapped
- && as->use_pdc)
+ if (as->use_pdc)
atmel_spi_dma_unmap_xfer(host, xfer);
if (as->use_pdc)
diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
index e4cb22fe0075..36c2f52cd6b8 100644
--- a/drivers/spi/spi-mt65xx.c
+++ b/drivers/spi/spi-mt65xx.c
@@ -748,7 +748,7 @@ static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id)
u32 cmd, reg_val, cnt, remainder, len;
struct spi_controller *host = dev_id;
struct mtk_spi *mdata = spi_controller_get_devdata(host);
- struct spi_transfer *trans = mdata->cur_transfer;
+ struct spi_transfer *xfer = mdata->cur_transfer;
reg_val = readl(mdata->base + SPI_STATUS0_REG);
if (reg_val & MTK_SPI_PAUSE_INT_STATUS)
@@ -762,42 +762,40 @@ static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
- if (!host->can_dma(host, NULL, trans)) {
- if (trans->rx_buf) {
+ if (!host->can_dma(host, NULL, xfer)) {
+ if (xfer->rx_buf) {
cnt = mdata->xfer_len / 4;
ioread32_rep(mdata->base + SPI_RX_DATA_REG,
- trans->rx_buf + mdata->num_xfered, cnt);
+ xfer->rx_buf + mdata->num_xfered, cnt);
remainder = mdata->xfer_len % 4;
if (remainder > 0) {
reg_val = readl(mdata->base + SPI_RX_DATA_REG);
- memcpy(trans->rx_buf +
- mdata->num_xfered +
- (cnt * 4),
+ memcpy(xfer->rx_buf + (cnt * 4) + mdata->num_xfered,
&reg_val,
remainder);
}
}
mdata->num_xfered += mdata->xfer_len;
- if (mdata->num_xfered == trans->len) {
+ if (mdata->num_xfered == xfer->len) {
spi_finalize_current_transfer(host);
return IRQ_HANDLED;
}
- len = trans->len - mdata->num_xfered;
+ len = xfer->len - mdata->num_xfered;
mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, len);
mtk_spi_setup_packet(host);
- if (trans->tx_buf) {
+ if (xfer->tx_buf) {
cnt = mdata->xfer_len / 4;
iowrite32_rep(mdata->base + SPI_TX_DATA_REG,
- trans->tx_buf + mdata->num_xfered, cnt);
+ xfer->tx_buf + mdata->num_xfered, cnt);
remainder = mdata->xfer_len % 4;
if (remainder > 0) {
reg_val = 0;
memcpy(&reg_val,
- trans->tx_buf + (cnt * 4) + mdata->num_xfered,
+ xfer->tx_buf + (cnt * 4) + mdata->num_xfered,
remainder);
writel(reg_val, mdata->base + SPI_TX_DATA_REG);
}
@@ -809,21 +807,21 @@ static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id)
}
if (mdata->tx_sgl)
- trans->tx_dma += mdata->xfer_len;
+ xfer->tx_dma += mdata->xfer_len;
if (mdata->rx_sgl)
- trans->rx_dma += mdata->xfer_len;
+ xfer->rx_dma += mdata->xfer_len;
if (mdata->tx_sgl && (mdata->tx_sgl_len == 0)) {
mdata->tx_sgl = sg_next(mdata->tx_sgl);
if (mdata->tx_sgl) {
- trans->tx_dma = sg_dma_address(mdata->tx_sgl);
+ xfer->tx_dma = sg_dma_address(mdata->tx_sgl);
mdata->tx_sgl_len = sg_dma_len(mdata->tx_sgl);
}
}
if (mdata->rx_sgl && (mdata->rx_sgl_len == 0)) {
mdata->rx_sgl = sg_next(mdata->rx_sgl);
if (mdata->rx_sgl) {
- trans->rx_dma = sg_dma_address(mdata->rx_sgl);
+ xfer->rx_dma = sg_dma_address(mdata->rx_sgl);
mdata->rx_sgl_len = sg_dma_len(mdata->rx_sgl);
}
}
@@ -841,7 +839,7 @@ static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id)
mtk_spi_update_mdata_len(host);
mtk_spi_setup_packet(host);
- mtk_spi_setup_dma_addr(host, trans);
+ mtk_spi_setup_dma_addr(host, xfer);
mtk_spi_enable_transfer(host);
return IRQ_HANDLED;
diff --git a/drivers/spi/spi-mt7621.c b/drivers/spi/spi-mt7621.c
index 4e9053d03d5a..3770b8e096a4 100644
--- a/drivers/spi/spi-mt7621.c
+++ b/drivers/spi/spi-mt7621.c
@@ -52,6 +52,8 @@
#define MT7621_CPOL BIT(4)
#define MT7621_LSB_FIRST BIT(3)
+#define MT7621_NATIVE_CS_COUNT 2
+
struct mt7621_spi {
struct spi_controller *host;
void __iomem *base;
@@ -75,10 +77,11 @@ static inline void mt7621_spi_write(struct mt7621_spi *rs, u32 reg, u32 val)
iowrite32(val, rs->base + reg);
}
-static void mt7621_spi_set_cs(struct spi_device *spi, int enable)
+static void mt7621_spi_set_native_cs(struct spi_device *spi, bool enable)
{
struct mt7621_spi *rs = spidev_to_mt7621_spi(spi);
int cs = spi_get_chipselect(spi, 0);
+ bool active = spi->mode & SPI_CS_HIGH ? enable : !enable;
u32 polar = 0;
u32 host;
@@ -94,7 +97,7 @@ static void mt7621_spi_set_cs(struct spi_device *spi, int enable)
rs->pending_write = 0;
- if (enable)
+ if (active)
polar = BIT(cs);
mt7621_spi_write(rs, MT7621_SPI_POLAR, polar);
}
@@ -154,6 +157,23 @@ static inline int mt7621_spi_wait_till_ready(struct mt7621_spi *rs)
return -ETIMEDOUT;
}
+static int mt7621_spi_prepare_message(struct spi_controller *host,
+ struct spi_message *m)
+{
+ struct mt7621_spi *rs = spi_controller_get_devdata(host);
+ struct spi_device *spi = m->spi;
+ unsigned int speed = spi->max_speed_hz;
+ struct spi_transfer *t = NULL;
+
+ mt7621_spi_wait_till_ready(rs);
+
+ list_for_each_entry(t, &m->transfers, transfer_list)
+ if (t->speed_hz < speed)
+ speed = t->speed_hz;
+
+ return mt7621_spi_prepare(spi, speed);
+}
+
static void mt7621_spi_read_half_duplex(struct mt7621_spi *rs,
int rx_len, u8 *buf)
{
@@ -243,59 +263,30 @@ static void mt7621_spi_write_half_duplex(struct mt7621_spi *rs,
}
rs->pending_write = len;
+ mt7621_spi_flush(rs);
}
-static int mt7621_spi_transfer_one_message(struct spi_controller *host,
- struct spi_message *m)
+static int mt7621_spi_transfer_one(struct spi_controller *host,
+ struct spi_device *spi,
+ struct spi_transfer *t)
{
struct mt7621_spi *rs = spi_controller_get_devdata(host);
- struct spi_device *spi = m->spi;
- unsigned int speed = spi->max_speed_hz;
- struct spi_transfer *t = NULL;
- int status = 0;
-
- mt7621_spi_wait_till_ready(rs);
- list_for_each_entry(t, &m->transfers, transfer_list)
- if (t->speed_hz < speed)
- speed = t->speed_hz;
-
- if (mt7621_spi_prepare(spi, speed)) {
- status = -EIO;
- goto msg_done;
- }
-
- /* Assert CS */
- mt7621_spi_set_cs(spi, 1);
-
- m->actual_length = 0;
- list_for_each_entry(t, &m->transfers, transfer_list) {
- if ((t->rx_buf) && (t->tx_buf)) {
- /*
- * This controller will shift some extra data out
- * of spi_opcode if (mosi_bit_cnt > 0) &&
- * (cmd_bit_cnt == 0). So the claimed full-duplex
- * support is broken since we have no way to read
- * the MISO value during that bit.
- */
- status = -EIO;
- goto msg_done;
- } else if (t->rx_buf) {
- mt7621_spi_read_half_duplex(rs, t->len, t->rx_buf);
- } else if (t->tx_buf) {
- mt7621_spi_write_half_duplex(rs, t->len, t->tx_buf);
- }
- m->actual_length += t->len;
+ if ((t->rx_buf) && (t->tx_buf)) {
+ /*
+ * This controller will shift some extra data out
+ * of spi_opcode if (mosi_bit_cnt > 0) &&
+ * (cmd_bit_cnt == 0). So the claimed full-duplex
+ * support is broken since we have no way to read
+ * the MISO value during that bit.
+ */
+ return -EIO;
+ } else if (t->rx_buf) {
+ mt7621_spi_read_half_duplex(rs, t->len, t->rx_buf);
+ } else if (t->tx_buf) {
+ mt7621_spi_write_half_duplex(rs, t->len, t->tx_buf);
}
- /* Flush data and deassert CS */
- mt7621_spi_flush(rs);
- mt7621_spi_set_cs(spi, 0);
-
-msg_done:
- m->status = status;
- spi_finalize_current_message(host);
-
return 0;
}
@@ -353,10 +344,14 @@ static int mt7621_spi_probe(struct platform_device *pdev)
host->mode_bits = SPI_LSB_FIRST;
host->flags = SPI_CONTROLLER_HALF_DUPLEX;
host->setup = mt7621_spi_setup;
- host->transfer_one_message = mt7621_spi_transfer_one_message;
+ host->prepare_message = mt7621_spi_prepare_message;
+ host->set_cs = mt7621_spi_set_native_cs;
+ host->transfer_one = mt7621_spi_transfer_one;
host->bits_per_word_mask = SPI_BPW_MASK(8);
host->dev.of_node = pdev->dev.of_node;
- host->num_chipselect = 2;
+ host->max_native_cs = MT7621_NATIVE_CS_COUNT;
+ host->num_chipselect = MT7621_NATIVE_CS_COUNT;
+ host->use_gpio_descriptors = true;
dev_set_drvdata(&pdev->dev, host);
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index f2a856f6a99e..6c2a14418972 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -944,7 +944,6 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *controller,
struct spi_transfer *transfer)
{
struct driver_data *drv_data = spi_controller_get_devdata(controller);
- struct spi_message *message = controller->cur_msg;
struct chip_data *chip = spi_get_ctldata(spi);
u32 dma_thresh = chip->dma_threshold;
u32 dma_burst = chip->dma_burst_size;
@@ -959,16 +958,6 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *controller,
/* Check if we can DMA this transfer */
if (transfer->len > MAX_DMA_LEN && chip->enable_dma) {
-
- /* Reject already-mapped transfers; PIO won't always work */
- if (message->is_dma_mapped
- || transfer->rx_dma || transfer->tx_dma) {
- dev_err(&spi->dev,
- "Mapped transfer length of %u is greater than %d\n",
- transfer->len, MAX_DMA_LEN);
- return -EINVAL;
- }
-
/* Warn ... we force this to PIO mode */
dev_warn_ratelimited(&spi->dev,
"DMA disabled for transfer length %u greater than %d\n",
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index 8e81f1a8623f..7f95d22fb1ac 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -24,7 +24,6 @@
#include <linux/reset.h>
#include <linux/sh_dma.h>
#include <linux/spi/spi.h>
-#include <linux/spi/rspi.h>
#include <linux/spinlock.h>
#define RSPI_SPCR 0x00 /* Control Register */
@@ -1131,16 +1130,12 @@ static struct dma_chan *rspi_request_dma_chan(struct device *dev,
static int rspi_request_dma(struct device *dev, struct spi_controller *ctlr,
const struct resource *res)
{
- const struct rspi_plat_data *rspi_pd = dev_get_platdata(dev);
unsigned int dma_tx_id, dma_rx_id;
if (dev->of_node) {
/* In the OF case we will get the slave IDs from the DT */
dma_tx_id = 0;
dma_rx_id = 0;
- } else if (rspi_pd && rspi_pd->dma_tx_id && rspi_pd->dma_rx_id) {
- dma_tx_id = rspi_pd->dma_tx_id;
- dma_rx_id = rspi_pd->dma_rx_id;
} else {
/* The driver assumes no error. */
return 0;
@@ -1290,7 +1285,6 @@ static int rspi_probe(struct platform_device *pdev)
struct spi_controller *ctlr;
struct rspi_data *rspi;
int ret;
- const struct rspi_plat_data *rspi_pd;
const struct spi_ops *ops;
unsigned long clksrc;
@@ -1305,11 +1299,7 @@ static int rspi_probe(struct platform_device *pdev)
goto error1;
} else {
ops = (struct spi_ops *)pdev->id_entry->driver_data;
- rspi_pd = dev_get_platdata(&pdev->dev);
- if (rspi_pd && rspi_pd->num_chipselect)
- ctlr->num_chipselect = rspi_pd->num_chipselect;
- else
- ctlr->num_chipselect = 2; /* default */
+ ctlr->num_chipselect = 2; /* default */
}
rspi = spi_controller_get_devdata(ctlr);
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index ff75838c1b5d..a2f01116ba09 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -3709,9 +3709,6 @@ static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
* to the same values as *xferp, so tx_buf, rx_buf and len
* are all identical (as well as most others)
* so we just have to fix up len and the pointers.
- *
- * This also includes support for the depreciated
- * spi_message.is_dma_mapped interface.
*/
/*
@@ -3725,12 +3722,8 @@ static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
/* Update rx_buf, tx_buf and DMA */
if (xfers[i].rx_buf)
xfers[i].rx_buf += offset;
- if (xfers[i].rx_dma)
- xfers[i].rx_dma += offset;
if (xfers[i].tx_buf)
xfers[i].tx_buf += offset;
- if (xfers[i].tx_dma)
- xfers[i].tx_dma += offset;
/* Update length */
xfers[i].len = min(maxsize, xfers[i].len - offset);
diff --git a/include/linux/spi/rspi.h b/include/linux/spi/rspi.h
deleted file mode 100644
index dbdfcc7a3db2..000000000000
--- a/include/linux/spi/rspi.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Renesas SPI driver
- *
- * Copyright (C) 2012 Renesas Solutions Corp.
- */
-
-#ifndef __LINUX_SPI_RENESAS_SPI_H__
-#define __LINUX_SPI_RENESAS_SPI_H__
-
-struct rspi_plat_data {
- unsigned int dma_tx_id;
- unsigned int dma_rx_id;
-
- u16 num_chipselect;
-};
-
-#endif
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index c459809efee4..b589e2547439 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -955,8 +955,8 @@ struct spi_res {
* struct spi_transfer - a read/write buffer pair
* @tx_buf: data to be written (DMA-safe memory), or NULL
* @rx_buf: data to be read (DMA-safe memory), or NULL
- * @tx_dma: DMA address of tx_buf, if @spi_message.is_dma_mapped
- * @rx_dma: DMA address of rx_buf, if @spi_message.is_dma_mapped
+ * @tx_dma: DMA address of tx_buf, currently not for client use
+ * @rx_dma: DMA address of rx_buf, currently not for client use
* @tx_nbits: number of bits used for writing. If 0 the default
* (SPI_NBITS_SINGLE) is used.
* @rx_nbits: number of bits used for reading. If 0 the default
@@ -1066,8 +1066,7 @@ struct spi_transfer {
/*
* It's okay if tx_buf == rx_buf (right?).
* For MicroWire, one buffer must be NULL.
- * Buffers must work with dma_*map_single() calls, unless
- * spi_message.is_dma_mapped reports a pre-existing mapping.
+ * Buffers must work with dma_*map_single() calls.
*/
const void *tx_buf;
void *rx_buf;
@@ -1111,8 +1110,6 @@ struct spi_transfer {
* struct spi_message - one multi-segment SPI transaction
* @transfers: list of transfer segments in this transaction
* @spi: SPI device to which the transaction is queued
- * @is_dma_mapped: if true, the caller provided both DMA and CPU virtual
- * addresses for each transfer buffer
* @pre_optimized: peripheral driver pre-optimized the message
* @optimized: the message is in the optimized state
* @prepared: spi_prepare_message was called for the this message
@@ -1146,8 +1143,6 @@ struct spi_message {
struct spi_device *spi;
- unsigned is_dma_mapped:1;
-
/* spi_optimize_message() was called for this message */
bool pre_optimized;
/* __spi_optimize_message() was called for this message */