Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions drivers/crypto/qce/common.c
Original file line number Diff line number Diff line change
Expand Up @@ -597,7 +597,7 @@ int qce_bam_lock(struct qce_device *qce)
{
qce_clear_bam_transaction(qce);
/* Dummy write to acquire the lock on the BAM pipe. */
qce_write(qce, REG_AUTH_SEG_CFG, 0);
qce_write(qce, REG_VERSION, 0);

return qce_submit_cmd_desc_lock(qce);
}
Expand All @@ -606,7 +606,7 @@ int qce_bam_unlock(struct qce_device *qce)
{
qce_clear_bam_transaction(qce);
/* Dummy write to release the lock on the BAM pipe. */
qce_write(qce, REG_AUTH_SEG_CFG, 0);
qce_write(qce, REG_VERSION, 0);

return qce_submit_cmd_desc_unlock(qce);
}
37 changes: 16 additions & 21 deletions drivers/crypto/qce/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -92,8 +92,9 @@ static int qce_handle_queue(struct qce_device *qce,
struct crypto_async_request *async_req, *backlog;
int ret = 0, err;

ret = pm_runtime_resume_and_get(qce->dev);
if (ret < 0)
ACQUIRE(pm_runtime_active_try, pm)(qce->dev);
ret = ACQUIRE_ERR(pm_runtime_active_auto_try, &pm);
if (ret)
return ret;

scoped_guard(mutex, &qce->lock) {
Expand All @@ -102,7 +103,7 @@ static int qce_handle_queue(struct qce_device *qce,

/* busy, do not dequeue request */
if (qce->req)
goto qce_suspend;
return ret;

backlog = crypto_get_backlog(&qce->queue);
async_req = crypto_dequeue_request(&qce->queue);
Expand All @@ -111,7 +112,7 @@ static int qce_handle_queue(struct qce_device *qce,
}

if (!async_req)
goto qce_suspend;
return ret;

if (backlog) {
scoped_guard(mutex, &qce->lock)
Expand All @@ -124,8 +125,6 @@ static int qce_handle_queue(struct qce_device *qce,
schedule_work(&qce->done_work);
}

qce_suspend:
pm_runtime_put_autosuspend(qce->dev);
return ret;
}

Expand Down Expand Up @@ -250,21 +249,22 @@ static int qce_crypto_probe(struct platform_device *pdev)
if (ret)
return ret;

ret = pm_runtime_resume_and_get(dev);
ACQUIRE(pm_runtime_active_try, pm)(dev);
ret = ACQUIRE_ERR(pm_runtime_active_auto_try, &pm);
if (ret)
return ret;

ret = devm_qce_dma_request(qce);
if (ret)
goto err_pm;
return ret;

ret = qce_check_version(qce);
if (ret)
goto err_pm;
return ret;

ret = devm_mutex_init(qce->dev, &qce->lock);
if (ret)
goto err_pm;
return ret;

INIT_WORK(&qce->done_work, qce_req_done_work);
crypto_init_queue(&qce->queue, QCE_QUEUE_LENGTH);
Expand All @@ -274,32 +274,27 @@ static int qce_crypto_probe(struct platform_device *pdev)

ret = devm_qce_register_algs(qce);
if (ret)
goto err_pm;
return ret;

qce->dma_size = resource_size(res);
qce->base_dma = dma_map_resource(dev, res->start, qce->dma_size,
DMA_BIDIRECTIONAL, 0);
qce->base_phys = res->start;
ret = dma_mapping_error(dev, qce->base_dma);
if (ret)
goto err_pm;
return ret;

ret = devm_add_action_or_reset(qce->dev, qce_crypto_unmap_dma, qce);
if (ret)
goto err_pm;
return ret;

/* Configure autosuspend after successful init */
pm_runtime_set_autosuspend_delay(dev, 100);
pm_runtime_use_autosuspend(dev);
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);

return 0;

err_pm:
pm_runtime_put(dev);

return ret;
}

static int __maybe_unused qce_runtime_suspend(struct device *dev)
Expand All @@ -308,15 +303,15 @@ static int __maybe_unused qce_runtime_suspend(struct device *dev)

icc_disable(qce->mem_path);

return 0;
return pm_clk_suspend(dev);
}

static int __maybe_unused qce_runtime_resume(struct device *dev)
{
struct qce_device *qce = dev_get_drvdata(dev);
int ret = 0;

ret = icc_enable(qce->mem_path);
ret = pm_clk_resume(dev);
if (ret)
return ret;

Expand All @@ -327,7 +322,7 @@ static int __maybe_unused qce_runtime_resume(struct device *dev)
return 0;

err_icc:
icc_disable(qce->mem_path);
pm_clk_suspend(dev);
return ret;
}

Expand Down
35 changes: 25 additions & 10 deletions drivers/crypto/qce/dma.c
Original file line number Diff line number Diff line change
Expand Up @@ -41,16 +41,16 @@ void qce_clear_bam_transaction(struct qce_device *qce)
bam_txn->pre_bam_ce_idx = 0;
}

static int qce_do_submit_cmd_desc(struct qce_device *qce, unsigned long flags)
static int qce_do_submit_cmd_desc(struct qce_device *qce, struct bam_desc_metadata *meta)
{
struct qce_desc_info *qce_desc = qce->dma.bam_txn->desc;
struct qce_bam_transaction *bam_txn = qce->dma.bam_txn;
struct dma_async_tx_descriptor *dma_desc;
struct dma_chan *chan = qce->dma.rxchan;
unsigned long attrs = DMA_PREP_CMD | flags;
unsigned long attrs = DMA_PREP_CMD;
dma_cookie_t cookie;
unsigned int mapped;
int ret;
int ret = -ENOMEM;

mapped = dma_map_sg_attrs(qce->dev, bam_txn->wr_sgl, bam_txn->wr_sgl_cnt,
DMA_TO_DEVICE, attrs);
Expand All @@ -59,9 +59,15 @@ static int qce_do_submit_cmd_desc(struct qce_device *qce, unsigned long flags)

dma_desc = dmaengine_prep_slave_sg(chan, bam_txn->wr_sgl, bam_txn->wr_sgl_cnt,
DMA_MEM_TO_DEV, attrs);
if (!dma_desc) {
dma_unmap_sg(qce->dev, bam_txn->wr_sgl, bam_txn->wr_sgl_cnt, DMA_TO_DEVICE);
return -ENOMEM;
if (!dma_desc)
goto err_out;

if (meta) {
meta->chan = chan;

ret = dmaengine_desc_attach_metadata(dma_desc, meta, 0);
if (ret)
goto err_out;
}

qce_desc->dma_desc = dma_desc;
Expand All @@ -74,21 +80,29 @@ static int qce_do_submit_cmd_desc(struct qce_device *qce, unsigned long flags)
qce_dma_issue_pending(&qce->dma);

return 0;

err_out:
dma_unmap_sg(qce->dev, bam_txn->wr_sgl, bam_txn->wr_sgl_cnt, DMA_TO_DEVICE);
return ret;
}

int qce_submit_cmd_desc(struct qce_device *qce)
{
return qce_do_submit_cmd_desc(qce, 0);
return qce_do_submit_cmd_desc(qce, NULL);
}

int qce_submit_cmd_desc_lock(struct qce_device *qce)
{
return qce_do_submit_cmd_desc(qce, DMA_PREP_LOCK);
struct bam_desc_metadata meta = { .op = BAM_META_CMD_LOCK, };

return qce_do_submit_cmd_desc(qce, &meta);
}

int qce_submit_cmd_desc_unlock(struct qce_device *qce)
{
return qce_do_submit_cmd_desc(qce, DMA_PREP_UNLOCK);
struct bam_desc_metadata meta = { .op = BAM_META_CMD_UNLOCK };

return qce_do_submit_cmd_desc(qce, &meta);
}

static void qce_prep_dma_cmd_desc(struct qce_device *qce, struct qce_dma_data *dma,
Expand Down Expand Up @@ -133,7 +147,8 @@ int devm_qce_dma_request(struct qce_device *qce)

dma->rxchan = devm_dma_request_chan(dev, "rx");
if (IS_ERR(dma->rxchan))
return PTR_ERR(dma->rxchan);
return dev_err_probe(dev, PTR_ERR(dma->rxchan),
"Failed to get RX DMA channel\n");

dma->result_buf = devm_kmalloc(dev, QCE_RESULT_BUF_SZ + QCE_IGNORE_BUF_SZ, GFP_KERNEL);
if (!dma->result_buf)
Expand Down
87 changes: 57 additions & 30 deletions drivers/dma/qcom/bam_dma.c
Original file line number Diff line number Diff line change
Expand Up @@ -392,8 +392,7 @@ struct bam_chan {

struct list_head node;

/* Is the BAM currently locked? */
bool locked;
bool bam_locked;
};

static inline struct bam_chan *to_bam_chan(struct dma_chan *common)
Expand Down Expand Up @@ -658,6 +657,53 @@ static int bam_slave_config(struct dma_chan *chan,
return 0;
}

static int bam_metadata_attach(struct dma_async_tx_descriptor *desc, void *data, size_t len)
{
struct virt_dma_desc *vd = container_of(desc, struct virt_dma_desc, tx);
struct bam_async_desc *async_desc = container_of(vd, struct bam_async_desc, vd);
struct bam_desc_hw *hw_desc = async_desc->desc;
struct bam_desc_metadata *metadata = data;
struct bam_chan *bchan = to_bam_chan(metadata->chan);
struct bam_device *bdev = bchan->bdev;

if (!data)
return -EINVAL;

if (metadata->op == BAM_META_CMD_LOCK || metadata->op == BAM_META_CMD_UNLOCK) {
if (!bdev->dev_data->bam_pipe_lock)
return -EOPNOTSUPP;

/* Expecting a dummy write when locking, only one descriptor allowed. */
if (async_desc->num_desc != 1)
return -EINVAL;
}

switch (metadata->op) {
case BAM_META_CMD_LOCK:
if (bchan->bam_locked)
return -EBUSY;

hw_desc->flags |= DESC_FLAG_LOCK;
bchan->bam_locked = true;
break;
case BAM_META_CMD_UNLOCK:
if (!bchan->bam_locked)
return -EPERM;

hw_desc->flags |= DESC_FLAG_UNLOCK;
bchan->bam_locked = false;
break;
default:
return -EOPNOTSUPP;
}

return 0;
}

static const struct dma_descriptor_metadata_ops bam_metadata_ops = {
.attach = bam_metadata_attach,
};

/**
* bam_prep_slave_sg - Prep slave sg transaction
*
Expand All @@ -674,8 +720,8 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan,
void *context)
{
struct bam_chan *bchan = to_bam_chan(chan);
struct dma_async_tx_descriptor *tx_desc;
struct bam_device *bdev = bchan->bdev;
const struct bam_device_data *bdata = bdev->dev_data;
struct bam_async_desc *async_desc;
struct scatterlist *sg;
u32 i;
Expand Down Expand Up @@ -716,34 +762,9 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan,
unsigned int curr_offset = 0;

do {
if (flags & DMA_PREP_CMD) {
if (!bdata->bam_pipe_lock &&
(flags & (DMA_PREP_LOCK | DMA_PREP_UNLOCK))) {
dev_err(bdev->dev, "Device doesn't support BAM locking\n");
return NULL;
}

if (flags & DMA_PREP_CMD)
desc->flags |= cpu_to_le16(DESC_FLAG_CMD);

if (bdata->bam_pipe_lock && (flags & DMA_PREP_LOCK)) {
if (bchan->locked) {
dev_err(bdev->dev, "BAM already locked\n");
return NULL;
}

desc->flags |= cpu_to_le16(DESC_FLAG_LOCK);
bchan->locked = true;
} else if (bdata->bam_pipe_lock && (flags & DMA_PREP_UNLOCK)) {
if (!bchan->locked) {
dev_err(bdev->dev, "BAM is not locked\n");
return NULL;
}

desc->flags |= cpu_to_le16(DESC_FLAG_UNLOCK);
bchan->locked = false;
}
}

desc->addr = cpu_to_le32(sg_dma_address(sg) +
curr_offset);

Expand All @@ -761,7 +782,12 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan,
} while (remainder > 0);
}

return vchan_tx_prep(&bchan->vc, &async_desc->vd, flags);
tx_desc = vchan_tx_prep(&bchan->vc, &async_desc->vd, flags);
if (!tx_desc)
return NULL;

tx_desc->metadata_ops = &bam_metadata_ops;
return tx_desc;
}

/**
Expand Down Expand Up @@ -1401,6 +1427,7 @@ static int bam_dma_probe(struct platform_device *pdev)
bdev->common.device_terminate_all = bam_dma_terminate_all;
bdev->common.device_issue_pending = bam_issue_pending;
bdev->common.device_tx_status = bam_tx_status;
bdev->common.desc_metadata_modes = DESC_METADATA_CLIENT;
bdev->common.dev = bdev->dev;

ret = dma_async_device_register(&bdev->common);
Expand Down
2 changes: 1 addition & 1 deletion drivers/dma/ti/k3-udma.c
Original file line number Diff line number Diff line change
Expand Up @@ -3408,7 +3408,7 @@ static int udma_set_metadata_len(struct dma_async_tx_descriptor *desc,
return 0;
}

static struct dma_descriptor_metadata_ops metadata_ops = {
static const struct dma_descriptor_metadata_ops metadata_ops = {
.attach = udma_attach_metadata,
.get_ptr = udma_get_metadata_ptr,
.set_len = udma_set_metadata_len,
Expand Down
2 changes: 1 addition & 1 deletion drivers/dma/xilinx/xilinx_dma.c
Original file line number Diff line number Diff line change
Expand Up @@ -652,7 +652,7 @@ static void *xilinx_dma_get_metadata_ptr(struct dma_async_tx_descriptor *tx,
return seg->hw.app;
}

static struct dma_descriptor_metadata_ops xilinx_dma_metadata_ops = {
static const struct dma_descriptor_metadata_ops xilinx_dma_metadata_ops = {
.get_ptr = xilinx_dma_get_metadata_ptr,
};

Expand Down
Loading