diff --git a/drivers/crypto/qce/common.c b/drivers/crypto/qce/common.c index 930006aaba4ac..84d6d49a56378 100644 --- a/drivers/crypto/qce/common.c +++ b/drivers/crypto/qce/common.c @@ -597,7 +597,7 @@ int qce_bam_lock(struct qce_device *qce) { qce_clear_bam_transaction(qce); /* Dummy write to acquire the lock on the BAM pipe. */ - qce_write(qce, REG_AUTH_SEG_CFG, 0); + qce_write(qce, REG_VERSION, 0); return qce_submit_cmd_desc_lock(qce); } @@ -606,7 +606,7 @@ int qce_bam_unlock(struct qce_device *qce) { qce_clear_bam_transaction(qce); /* Dummy write to release the lock on the BAM pipe. */ - qce_write(qce, REG_AUTH_SEG_CFG, 0); + qce_write(qce, REG_VERSION, 0); return qce_submit_cmd_desc_unlock(qce); } diff --git a/drivers/crypto/qce/core.c b/drivers/crypto/qce/core.c index 391a6bc6e6cbd..d1723a01be0ce 100644 --- a/drivers/crypto/qce/core.c +++ b/drivers/crypto/qce/core.c @@ -92,8 +92,9 @@ static int qce_handle_queue(struct qce_device *qce, struct crypto_async_request *async_req, *backlog; int ret = 0, err; - ret = pm_runtime_resume_and_get(qce->dev); - if (ret < 0) + ACQUIRE(pm_runtime_active_try, pm)(qce->dev); + ret = ACQUIRE_ERR(pm_runtime_active_auto_try, &pm); + if (ret) return ret; scoped_guard(mutex, &qce->lock) { @@ -102,7 +103,7 @@ static int qce_handle_queue(struct qce_device *qce, /* busy, do not dequeue request */ if (qce->req) - goto qce_suspend; + return ret; backlog = crypto_get_backlog(&qce->queue); async_req = crypto_dequeue_request(&qce->queue); @@ -111,7 +112,7 @@ static int qce_handle_queue(struct qce_device *qce, } if (!async_req) - goto qce_suspend; + return ret; if (backlog) { scoped_guard(mutex, &qce->lock) @@ -124,8 +125,6 @@ static int qce_handle_queue(struct qce_device *qce, schedule_work(&qce->done_work); } -qce_suspend: - pm_runtime_put_autosuspend(qce->dev); return ret; } @@ -250,21 +249,22 @@ static int qce_crypto_probe(struct platform_device *pdev) if (ret) return ret; - ret = pm_runtime_resume_and_get(dev); + ACQUIRE(pm_runtime_active_try, pm)(dev); + ret = ACQUIRE_ERR(pm_runtime_active_auto_try, &pm); if (ret) return ret; ret = devm_qce_dma_request(qce); if (ret) - goto err_pm; + return ret; ret = qce_check_version(qce); if (ret) - goto err_pm; + return ret; ret = devm_mutex_init(qce->dev, &qce->lock); if (ret) - goto err_pm; + return ret; INIT_WORK(&qce->done_work, qce_req_done_work); crypto_init_queue(&qce->queue, QCE_QUEUE_LENGTH); @@ -274,7 +274,7 @@ static int qce_crypto_probe(struct platform_device *pdev) ret = devm_qce_register_algs(qce); if (ret) - goto err_pm; + return ret; qce->dma_size = resource_size(res); qce->base_dma = dma_map_resource(dev, res->start, qce->dma_size, @@ -282,24 +282,19 @@ static int qce_crypto_probe(struct platform_device *pdev) qce->base_phys = res->start; ret = dma_mapping_error(dev, qce->base_dma); if (ret) - goto err_pm; + return ret; ret = devm_add_action_or_reset(qce->dev, qce_crypto_unmap_dma, qce); if (ret) - goto err_pm; + return ret; /* Configure autosuspend after successful init */ pm_runtime_set_autosuspend_delay(dev, 100); pm_runtime_use_autosuspend(dev); pm_runtime_mark_last_busy(dev); - pm_runtime_put_autosuspend(dev); return 0; -err_pm: - pm_runtime_put(dev); - - return ret; } static int __maybe_unused qce_runtime_suspend(struct device *dev) @@ -308,7 +303,7 @@ static int __maybe_unused qce_runtime_suspend(struct device *dev) icc_disable(qce->mem_path); - return 0; + return pm_clk_suspend(dev); } static int __maybe_unused qce_runtime_resume(struct device *dev) @@ -316,7 +311,7 @@ static int __maybe_unused qce_runtime_resume(struct device *dev) struct qce_device *qce = dev_get_drvdata(dev); int ret = 0; - ret = icc_enable(qce->mem_path); + ret = pm_clk_resume(dev); if (ret) return ret; @@ -327,7 +322,7 @@ static int __maybe_unused qce_runtime_resume(struct device *dev) return 0; err_icc: - icc_disable(qce->mem_path); + pm_clk_suspend(dev); return ret; } diff --git a/drivers/crypto/qce/dma.c b/drivers/crypto/qce/dma.c index de13a49cb2754..885053955ac3d 100644 --- a/drivers/crypto/qce/dma.c +++ b/drivers/crypto/qce/dma.c @@ -41,16 +41,16 @@ void qce_clear_bam_transaction(struct qce_device *qce) bam_txn->pre_bam_ce_idx = 0; } -static int qce_do_submit_cmd_desc(struct qce_device *qce, unsigned long flags) +static int qce_do_submit_cmd_desc(struct qce_device *qce, struct bam_desc_metadata *meta) { struct qce_desc_info *qce_desc = qce->dma.bam_txn->desc; struct qce_bam_transaction *bam_txn = qce->dma.bam_txn; struct dma_async_tx_descriptor *dma_desc; struct dma_chan *chan = qce->dma.rxchan; - unsigned long attrs = DMA_PREP_CMD | flags; + unsigned long attrs = DMA_PREP_CMD; dma_cookie_t cookie; unsigned int mapped; - int ret; + int ret = -ENOMEM; mapped = dma_map_sg_attrs(qce->dev, bam_txn->wr_sgl, bam_txn->wr_sgl_cnt, DMA_TO_DEVICE, attrs); @@ -59,9 +59,15 @@ static int qce_do_submit_cmd_desc(struct qce_device *qce, unsigned long flags) dma_desc = dmaengine_prep_slave_sg(chan, bam_txn->wr_sgl, bam_txn->wr_sgl_cnt, DMA_MEM_TO_DEV, attrs); - if (!dma_desc) { - dma_unmap_sg(qce->dev, bam_txn->wr_sgl, bam_txn->wr_sgl_cnt, DMA_TO_DEVICE); - return -ENOMEM; + if (!dma_desc) + goto err_out; + + if (meta) { + meta->chan = chan; + + ret = dmaengine_desc_attach_metadata(dma_desc, meta, 0); + if (ret) + goto err_out; } qce_desc->dma_desc = dma_desc; @@ -74,21 +80,29 @@ static int qce_do_submit_cmd_desc(struct qce_device *qce, unsigned long flags) qce_dma_issue_pending(&qce->dma); return 0; + +err_out: + dma_unmap_sg(qce->dev, bam_txn->wr_sgl, bam_txn->wr_sgl_cnt, DMA_TO_DEVICE); + return ret; } int qce_submit_cmd_desc(struct qce_device *qce) { - return qce_do_submit_cmd_desc(qce, 0); + return qce_do_submit_cmd_desc(qce, NULL); } int qce_submit_cmd_desc_lock(struct qce_device *qce) { - return qce_do_submit_cmd_desc(qce, DMA_PREP_LOCK); + struct bam_desc_metadata meta = { .op = BAM_META_CMD_LOCK, }; + + return qce_do_submit_cmd_desc(qce, &meta); } int qce_submit_cmd_desc_unlock(struct qce_device *qce) { - return qce_do_submit_cmd_desc(qce, DMA_PREP_UNLOCK); + struct bam_desc_metadata meta = { .op = BAM_META_CMD_UNLOCK }; + + return qce_do_submit_cmd_desc(qce, &meta); } static void qce_prep_dma_cmd_desc(struct qce_device *qce, struct qce_dma_data *dma, @@ -133,7 +147,8 @@ int devm_qce_dma_request(struct qce_device *qce) dma->rxchan = devm_dma_request_chan(dev, "rx"); if (IS_ERR(dma->rxchan)) - return PTR_ERR(dma->rxchan); + return dev_err_probe(dev, PTR_ERR(dma->rxchan), + "Failed to get RX DMA channel\n"); dma->result_buf = devm_kmalloc(dev, QCE_RESULT_BUF_SZ + QCE_IGNORE_BUF_SZ, GFP_KERNEL); if (!dma->result_buf) diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c index 68921d22ad7ab..d371d095b62f5 100644 --- a/drivers/dma/qcom/bam_dma.c +++ b/drivers/dma/qcom/bam_dma.c @@ -392,8 +392,7 @@ struct bam_chan { struct list_head node; - /* Is the BAM currently locked? */ - bool locked; + bool bam_locked; }; static inline struct bam_chan *to_bam_chan(struct dma_chan *common) @@ -658,6 +657,53 @@ static int bam_slave_config(struct dma_chan *chan, return 0; } +static int bam_metadata_attach(struct dma_async_tx_descriptor *desc, void *data, size_t len) +{ + struct virt_dma_desc *vd = container_of(desc, struct virt_dma_desc, tx); + struct bam_async_desc *async_desc = container_of(vd, struct bam_async_desc, vd); + struct bam_desc_hw *hw_desc = async_desc->desc; + struct bam_desc_metadata *metadata = data; + struct bam_chan *bchan = to_bam_chan(metadata->chan); + struct bam_device *bdev = bchan->bdev; + + if (!data) + return -EINVAL; + + if (metadata->op == BAM_META_CMD_LOCK || metadata->op == BAM_META_CMD_UNLOCK) { + if (!bdev->dev_data->bam_pipe_lock) + return -EOPNOTSUPP; + + /* Expecting a dummy write when locking, only one descriptor allowed. */ + if (async_desc->num_desc != 1) + return -EINVAL; + } + + switch (metadata->op) { + case BAM_META_CMD_LOCK: + if (bchan->bam_locked) + return -EBUSY; + + hw_desc->flags |= DESC_FLAG_LOCK; + bchan->bam_locked = true; + break; + case BAM_META_CMD_UNLOCK: + if (!bchan->bam_locked) + return -EPERM; + + hw_desc->flags |= DESC_FLAG_UNLOCK; + bchan->bam_locked = false; + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static const struct dma_descriptor_metadata_ops bam_metadata_ops = { + .attach = bam_metadata_attach, +}; + /** * bam_prep_slave_sg - Prep slave sg transaction * @@ -674,8 +720,8 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan, void *context) { struct bam_chan *bchan = to_bam_chan(chan); + struct dma_async_tx_descriptor *tx_desc; struct bam_device *bdev = bchan->bdev; - const struct bam_device_data *bdata = bdev->dev_data; struct bam_async_desc *async_desc; struct scatterlist *sg; u32 i; @@ -716,34 +762,9 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan, unsigned int curr_offset = 0; do { - if (flags & DMA_PREP_CMD) { - if (!bdata->bam_pipe_lock && - (flags & (DMA_PREP_LOCK | DMA_PREP_UNLOCK))) { - dev_err(bdev->dev, "Device doesn't support BAM locking\n"); - return NULL; - } - + if (flags & DMA_PREP_CMD) desc->flags |= cpu_to_le16(DESC_FLAG_CMD); - if (bdata->bam_pipe_lock && (flags & DMA_PREP_LOCK)) { - if (bchan->locked) { - dev_err(bdev->dev, "BAM already locked\n"); - return NULL; - } - - desc->flags |= cpu_to_le16(DESC_FLAG_LOCK); - bchan->locked = true; - } else if (bdata->bam_pipe_lock && (flags & DMA_PREP_UNLOCK)) { - if (!bchan->locked) { - dev_err(bdev->dev, "BAM is not locked\n"); - return NULL; - } - - desc->flags |= cpu_to_le16(DESC_FLAG_UNLOCK); - bchan->locked = false; - } - } - desc->addr = cpu_to_le32(sg_dma_address(sg) + curr_offset); @@ -761,7 +782,12 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan, } while (remainder > 0); } - return vchan_tx_prep(&bchan->vc, &async_desc->vd, flags); + tx_desc = vchan_tx_prep(&bchan->vc, &async_desc->vd, flags); + if (!tx_desc) + return NULL; + + tx_desc->metadata_ops = &bam_metadata_ops; + return tx_desc; } /** @@ -1401,6 +1427,7 @@ static int bam_dma_probe(struct platform_device *pdev) bdev->common.device_terminate_all = bam_dma_terminate_all; bdev->common.device_issue_pending = bam_issue_pending; bdev->common.device_tx_status = bam_tx_status; + bdev->common.desc_metadata_modes = DESC_METADATA_CLIENT; bdev->common.dev = bdev->dev; ret = dma_async_device_register(&bdev->common); diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c index aa2dc762140f6..75c8eed063af8 100644 --- a/drivers/dma/ti/k3-udma.c +++ b/drivers/dma/ti/k3-udma.c @@ -3408,7 +3408,7 @@ static int udma_set_metadata_len(struct dma_async_tx_descriptor *desc, return 0; } -static struct dma_descriptor_metadata_ops metadata_ops = { +static const struct dma_descriptor_metadata_ops metadata_ops = { .attach = udma_attach_metadata, .get_ptr = udma_get_metadata_ptr, .set_len = udma_set_metadata_len, diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c index fabff602065f6..e2a40e8dd4b20 100644 --- a/drivers/dma/xilinx/xilinx_dma.c +++ b/drivers/dma/xilinx/xilinx_dma.c @@ -652,7 +652,7 @@ static void *xilinx_dma_get_metadata_ptr(struct dma_async_tx_descriptor *tx, return seg->hw.app; } -static struct dma_descriptor_metadata_ops xilinx_dma_metadata_ops = { +static const struct dma_descriptor_metadata_ops xilinx_dma_metadata_ops = { .get_ptr = xilinx_dma_get_metadata_ptr, }; diff --git a/include/linux/dma/qcom_bam_dma.h b/include/linux/dma/qcom_bam_dma.h index 68fc0e643b1b9..dd30bb9c520fa 100644 --- a/include/linux/dma/qcom_bam_dma.h +++ b/include/linux/dma/qcom_bam_dma.h @@ -8,6 +8,8 @@ #include +struct dma_chan; + /* * This data type corresponds to the native Command Element * supported by BAM DMA Engine. @@ -34,6 +36,16 @@ enum bam_command_type { BAM_READ_COMMAND, }; +enum bam_desc_metadata_op { + BAM_META_CMD_LOCK = 1, + BAM_META_CMD_UNLOCK, +}; + +struct bam_desc_metadata { + enum bam_desc_metadata_op op; + struct dma_chan *chan; +}; + /* * prep_bam_ce_le32 - Wrapper function to prepare a single BAM command * element with the data already in le32 format. diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index c02be4bc8ac4c..51fbe31a667eb 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -629,7 +629,7 @@ struct dma_async_tx_descriptor { void *callback_param; struct dmaengine_unmap_data *unmap; enum dma_desc_metadata_mode desc_metadata_mode; - struct dma_descriptor_metadata_ops *metadata_ops; + const struct dma_descriptor_metadata_ops *metadata_ops; #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH struct dma_async_tx_descriptor *next; struct dma_async_tx_descriptor *parent;