1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
4 */
5
6 #include <linux/device.h>
7 #include <linux/dmaengine.h>
8 #include <crypto/scatterwalk.h>
9
10 #include "dma.h"
11
qce_dma_release(void * data)12 static void qce_dma_release(void *data)
13 {
14 struct qce_dma_data *dma = data;
15
16 dma_release_channel(dma->txchan);
17 dma_release_channel(dma->rxchan);
18 kfree(dma->result_buf);
19 }
20
devm_qce_dma_request(struct device * dev,struct qce_dma_data * dma)21 int devm_qce_dma_request(struct device *dev, struct qce_dma_data *dma)
22 {
23 int ret;
24
25 dma->txchan = dma_request_chan(dev, "tx");
26 if (IS_ERR(dma->txchan))
27 return PTR_ERR(dma->txchan);
28
29 dma->rxchan = dma_request_chan(dev, "rx");
30 if (IS_ERR(dma->rxchan)) {
31 ret = PTR_ERR(dma->rxchan);
32 goto error_rx;
33 }
34
35 dma->result_buf = kmalloc(QCE_RESULT_BUF_SZ + QCE_IGNORE_BUF_SZ,
36 GFP_KERNEL);
37 if (!dma->result_buf) {
38 ret = -ENOMEM;
39 goto error_nomem;
40 }
41
42 dma->ignore_buf = dma->result_buf + QCE_RESULT_BUF_SZ;
43
44 return devm_add_action_or_reset(dev, qce_dma_release, dma);
45
46 error_nomem:
47 dma_release_channel(dma->rxchan);
48 error_rx:
49 dma_release_channel(dma->txchan);
50 return ret;
51 }
52
53 struct scatterlist *
qce_sgtable_add(struct sg_table * sgt,struct scatterlist * new_sgl,unsigned int max_len)54 qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl,
55 unsigned int max_len)
56 {
57 struct scatterlist *sg = sgt->sgl, *sg_last = NULL;
58 unsigned int new_len;
59
60 while (sg) {
61 if (!sg_page(sg))
62 break;
63 sg = sg_next(sg);
64 }
65
66 if (!sg)
67 return ERR_PTR(-EINVAL);
68
69 while (new_sgl && sg && max_len) {
70 new_len = new_sgl->length > max_len ? max_len : new_sgl->length;
71 sg_set_page(sg, sg_page(new_sgl), new_len, new_sgl->offset);
72 sg_last = sg;
73 sg = sg_next(sg);
74 new_sgl = sg_next(new_sgl);
75 max_len -= new_len;
76 }
77
78 return sg_last;
79 }
80
qce_dma_prep_sg(struct dma_chan * chan,struct scatterlist * sg,int nents,unsigned long flags,enum dma_transfer_direction dir,dma_async_tx_callback cb,void * cb_param)81 static int qce_dma_prep_sg(struct dma_chan *chan, struct scatterlist *sg,
82 int nents, unsigned long flags,
83 enum dma_transfer_direction dir,
84 dma_async_tx_callback cb, void *cb_param)
85 {
86 struct dma_async_tx_descriptor *desc;
87 dma_cookie_t cookie;
88
89 if (!sg || !nents)
90 return -EINVAL;
91
92 desc = dmaengine_prep_slave_sg(chan, sg, nents, dir, flags);
93 if (!desc)
94 return -EINVAL;
95
96 desc->callback = cb;
97 desc->callback_param = cb_param;
98 cookie = dmaengine_submit(desc);
99
100 return dma_submit_error(cookie);
101 }
102
qce_dma_prep_sgs(struct qce_dma_data * dma,struct scatterlist * rx_sg,int rx_nents,struct scatterlist * tx_sg,int tx_nents,dma_async_tx_callback cb,void * cb_param)103 int qce_dma_prep_sgs(struct qce_dma_data *dma, struct scatterlist *rx_sg,
104 int rx_nents, struct scatterlist *tx_sg, int tx_nents,
105 dma_async_tx_callback cb, void *cb_param)
106 {
107 struct dma_chan *rxchan = dma->rxchan;
108 struct dma_chan *txchan = dma->txchan;
109 unsigned long flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
110 int ret;
111
112 ret = qce_dma_prep_sg(rxchan, rx_sg, rx_nents, flags, DMA_MEM_TO_DEV,
113 NULL, NULL);
114 if (ret)
115 return ret;
116
117 return qce_dma_prep_sg(txchan, tx_sg, tx_nents, flags, DMA_DEV_TO_MEM,
118 cb, cb_param);
119 }
120
qce_dma_issue_pending(struct qce_dma_data * dma)121 void qce_dma_issue_pending(struct qce_dma_data *dma)
122 {
123 dma_async_issue_pending(dma->rxchan);
124 dma_async_issue_pending(dma->txchan);
125 }
126
qce_dma_terminate_all(struct qce_dma_data * dma)127 int qce_dma_terminate_all(struct qce_dma_data *dma)
128 {
129 int ret;
130
131 ret = dmaengine_terminate_all(dma->rxchan);
132 return ret ?: dmaengine_terminate_all(dma->txchan);
133 }
134