diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-08-07 13:18:06 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-08-07 13:18:06 +0000 |
commit | 638a9e433ecd61e64761352dbec1fa4f5874c941 (patch) | |
tree | fdbff74a238d7a5a7d1cef071b7230bc064b9f25 /drivers/spi/spi.c | |
parent | Releasing progress-linux version 6.9.12-1~progress7.99u1. (diff) | |
download | linux-638a9e433ecd61e64761352dbec1fa4f5874c941.tar.xz linux-638a9e433ecd61e64761352dbec1fa4f5874c941.zip |
Merging upstream version 6.10.3.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/spi/spi.c')
-rw-r--r-- | drivers/spi/spi.c | 153 |
1 files changed, 80 insertions, 73 deletions
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index fcc39523d6..0f04e832f9 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -312,7 +312,7 @@ static const struct attribute_group *spi_master_groups[] = { static void spi_statistics_add_transfer_stats(struct spi_statistics __percpu *pcpu_stats, struct spi_transfer *xfer, - struct spi_controller *ctlr) + struct spi_message *msg) { int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1; struct spi_statistics *stats; @@ -328,11 +328,9 @@ static void spi_statistics_add_transfer_stats(struct spi_statistics __percpu *pc u64_stats_inc(&stats->transfer_bytes_histo[l2len]); u64_stats_add(&stats->bytes, xfer->len); - if ((xfer->tx_buf) && - (xfer->tx_buf != ctlr->dummy_tx)) + if (spi_valid_txbuf(msg, xfer)) u64_stats_add(&stats->bytes_tx, xfer->len); - if ((xfer->rx_buf) && - (xfer->rx_buf != ctlr->dummy_rx)) + if (spi_valid_rxbuf(msg, xfer)) u64_stats_add(&stats->bytes_rx, xfer->len); u64_stats_update_end(&stats->syncp); @@ -597,10 +595,16 @@ EXPORT_SYMBOL_GPL(spi_alloc_device); static void spi_dev_set_name(struct spi_device *spi) { - struct acpi_device *adev = ACPI_COMPANION(&spi->dev); + struct device *dev = &spi->dev; + struct fwnode_handle *fwnode = dev_fwnode(dev); - if (adev) { - dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev)); + if (is_acpi_device_node(fwnode)) { + dev_set_name(dev, "spi-%s", acpi_dev_name(to_acpi_device_node(fwnode))); + return; + } + + if (is_software_node(fwnode)) { + dev_set_name(dev, "spi-%pfwP", fwnode); return; } @@ -824,14 +828,10 @@ struct spi_device *spi_new_device(struct spi_controller *ctlr, proxy->controller_data = chip->controller_data; proxy->controller_state = NULL; /* - * spi->chip_select[i] gives the corresponding physical CS for logical CS i - * logical CS number is represented by setting the ith bit in spi->cs_index_mask - * So, for example, if spi->cs_index_mask = 0x01 then logical CS number is 0 and - * spi->chip_select[0] will give the physical CS. - * By default spi->chip_select[0] will hold the physical CS number so, set - * spi->cs_index_mask as 0x01. + * By default spi->chip_select[0] will hold the physical CS number, + * so set bit 0 in spi->cs_index_mask. */ - proxy->cs_index_mask = 0x01; + proxy->cs_index_mask = BIT(0); if (chip->swnode) { status = device_add_software_node(&proxy->dev, chip->swnode); @@ -1024,20 +1024,45 @@ static void spi_res_release(struct spi_controller *ctlr, struct spi_message *mes } /*-------------------------------------------------------------------------*/ +#define spi_for_each_valid_cs(spi, idx) \ + for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) \ + if (!(spi->cs_index_mask & BIT(idx))) {} else + static inline bool spi_is_last_cs(struct spi_device *spi) { u8 idx; bool last = false; - for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) { - if (spi->cs_index_mask & BIT(idx)) { - if (spi->controller->last_cs[idx] == spi_get_chipselect(spi, idx)) - last = true; - } + spi_for_each_valid_cs(spi, idx) { + if (spi->controller->last_cs[idx] == spi_get_chipselect(spi, idx)) + last = true; } return last; } +static void spi_toggle_csgpiod(struct spi_device *spi, u8 idx, bool enable, bool activate) +{ + /* + * Historically ACPI has no means of the GPIO polarity and + * thus the SPISerialBus() resource defines it on the per-chip + * basis. In order to avoid a chain of negations, the GPIO + * polarity is considered being Active High. Even for the cases + * when _DSD() is involved (in the updated versions of ACPI) + * the GPIO CS polarity must be defined Active High to avoid + * ambiguity. That's why we use enable, that takes SPI_CS_HIGH + * into account. + */ + if (has_acpi_companion(&spi->dev)) + gpiod_set_value_cansleep(spi_get_csgpiod(spi, idx), !enable); + else + /* Polarity handled by GPIO library */ + gpiod_set_value_cansleep(spi_get_csgpiod(spi, idx), activate); + + if (activate) + spi_delay_exec(&spi->cs_setup, NULL); + else + spi_delay_exec(&spi->cs_inactive, NULL); +} static void spi_set_cs(struct spi_device *spi, bool enable, bool force) { @@ -1074,31 +1099,9 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force) if (spi_is_csgpiod(spi)) { if (!(spi->mode & SPI_NO_CS)) { - /* - * Historically ACPI has no means of the GPIO polarity and - * thus the SPISerialBus() resource defines it on the per-chip - * basis. In order to avoid a chain of negations, the GPIO - * polarity is considered being Active High. Even for the cases - * when _DSD() is involved (in the updated versions of ACPI) - * the GPIO CS polarity must be defined Active High to avoid - * ambiguity. That's why we use enable, that takes SPI_CS_HIGH - * into account. - */ - for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) { - if ((spi->cs_index_mask & BIT(idx)) && spi_get_csgpiod(spi, idx)) { - if (has_acpi_companion(&spi->dev)) - gpiod_set_value_cansleep(spi_get_csgpiod(spi, idx), - !enable); - else - /* Polarity handled by GPIO library */ - gpiod_set_value_cansleep(spi_get_csgpiod(spi, idx), - activate); - - if (activate) - spi_delay_exec(&spi->cs_setup, NULL); - else - spi_delay_exec(&spi->cs_inactive, NULL); - } + spi_for_each_valid_cs(spi, idx) { + if (spi_get_csgpiod(spi, idx)) + spi_toggle_csgpiod(spi, idx, enable, activate); } } /* Some SPI masters need both GPIO CS & slave_select */ @@ -1207,12 +1210,10 @@ static void spi_unmap_buf_attrs(struct spi_controller *ctlr, enum dma_data_direction dir, unsigned long attrs) { - if (sgt->orig_nents) { - dma_unmap_sgtable(dev, sgt, dir, attrs); - sg_free_table(sgt); - sgt->orig_nents = 0; - sgt->nents = 0; - } + dma_unmap_sgtable(dev, sgt, dir, attrs); + sg_free_table(sgt); + sgt->orig_nents = 0; + sgt->nents = 0; } void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev, @@ -1221,6 +1222,11 @@ void spi_unmap_buf(struct spi_controller *ctlr, struct device *dev, spi_unmap_buf_attrs(ctlr, dev, sgt, dir, 0); } +/* Dummy SG for unidirect transfers */ +static struct scatterlist dummy_sg = { + .page_link = SG_END, +}; + static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg) { struct device *tx_dev, *rx_dev; @@ -1259,6 +1265,8 @@ static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg) attrs); if (ret != 0) return ret; + } else { + xfer->tx_sg.sgl = &dummy_sg; } if (xfer->rx_buf != NULL) { @@ -1272,6 +1280,8 @@ static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg) return ret; } + } else { + xfer->rx_sg.sgl = &dummy_sg; } } /* No transfer has been mapped, bail out with success */ @@ -1312,7 +1322,7 @@ static int __spi_unmap_msg(struct spi_controller *ctlr, struct spi_message *msg) return 0; } -static void spi_dma_sync_for_device(struct spi_controller *ctlr, +static void spi_dma_sync_for_device(struct spi_controller *ctlr, struct spi_message *msg, struct spi_transfer *xfer) { struct device *rx_dev = ctlr->cur_rx_dma_dev; @@ -1321,13 +1331,14 @@ static void spi_dma_sync_for_device(struct spi_controller *ctlr, if (!ctlr->cur_msg_mapped) return; - if (xfer->tx_sg.orig_nents) - dma_sync_sgtable_for_device(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); - if (xfer->rx_sg.orig_nents) - dma_sync_sgtable_for_device(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE); + if (!ctlr->can_dma(ctlr, msg->spi, xfer)) + return; + + dma_sync_sgtable_for_device(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); + dma_sync_sgtable_for_device(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE); } -static void spi_dma_sync_for_cpu(struct spi_controller *ctlr, +static void spi_dma_sync_for_cpu(struct spi_controller *ctlr, struct spi_message *msg, struct spi_transfer *xfer) { struct device *rx_dev = ctlr->cur_rx_dma_dev; @@ -1336,10 +1347,11 @@ static void spi_dma_sync_for_cpu(struct spi_controller *ctlr, if (!ctlr->cur_msg_mapped) return; - if (xfer->rx_sg.orig_nents) - dma_sync_sgtable_for_cpu(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE); - if (xfer->tx_sg.orig_nents) - dma_sync_sgtable_for_cpu(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); + if (!ctlr->can_dma(ctlr, msg->spi, xfer)) + return; + + dma_sync_sgtable_for_cpu(rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE); + dma_sync_sgtable_for_cpu(tx_dev, &xfer->tx_sg, DMA_TO_DEVICE); } #else /* !CONFIG_HAS_DMA */ static inline int __spi_map_msg(struct spi_controller *ctlr, @@ -1355,11 +1367,13 @@ static inline int __spi_unmap_msg(struct spi_controller *ctlr, } static void spi_dma_sync_for_device(struct spi_controller *ctrl, + struct spi_message *msg, struct spi_transfer *xfer) { } static void spi_dma_sync_for_cpu(struct spi_controller *ctrl, + struct spi_message *msg, struct spi_transfer *xfer) { } @@ -1619,8 +1633,8 @@ static int spi_transfer_one_message(struct spi_controller *ctlr, list_for_each_entry(xfer, &msg->transfers, transfer_list) { trace_spi_transfer_start(msg, xfer); - spi_statistics_add_transfer_stats(statm, xfer, ctlr); - spi_statistics_add_transfer_stats(stats, xfer, ctlr); + spi_statistics_add_transfer_stats(statm, xfer, msg); + spi_statistics_add_transfer_stats(stats, xfer, msg); if (!ctlr->ptp_sts_supported) { xfer->ptp_sts_word_pre = 0; @@ -1631,10 +1645,10 @@ static int spi_transfer_one_message(struct spi_controller *ctlr, reinit_completion(&ctlr->xfer_completion); fallback_pio: - spi_dma_sync_for_device(ctlr, xfer); + spi_dma_sync_for_device(ctlr, msg, xfer); ret = ctlr->transfer_one(ctlr, msg->spi, xfer); if (ret < 0) { - spi_dma_sync_for_cpu(ctlr, xfer); + spi_dma_sync_for_cpu(ctlr, msg, xfer); if (ctlr->cur_msg_mapped && (xfer->error & SPI_TRANS_FAIL_NO_START)) { @@ -1659,7 +1673,7 @@ fallback_pio: msg->status = ret; } - spi_dma_sync_for_cpu(ctlr, xfer); + spi_dma_sync_for_cpu(ctlr, msg, xfer); } else { if (xfer->len) dev_err(&msg->spi->dev, @@ -3716,9 +3730,6 @@ static int __spi_split_transfer_maxsize(struct spi_controller *ctlr, * to the same values as *xferp, so tx_buf, rx_buf and len * are all identical (as well as most others) * so we just have to fix up len and the pointers. - * - * This also includes support for the depreciated - * spi_message.is_dma_mapped interface. */ /* @@ -3732,12 +3743,8 @@ static int __spi_split_transfer_maxsize(struct spi_controller *ctlr, /* Update rx_buf, tx_buf and DMA */ if (xfers[i].rx_buf) xfers[i].rx_buf += offset; - if (xfers[i].rx_dma) - xfers[i].rx_dma += offset; if (xfers[i].tx_buf) xfers[i].tx_buf += offset; - if (xfers[i].tx_dma) - xfers[i].tx_dma += offset; /* Update length */ xfers[i].len = min(maxsize, xfers[i].len - offset); |