Skip to content

Commit c957f8b

Browse files
atenartherbertx
authored andcommitted
crypto: inside-secure - avoid unmapping DMA memory that was not mapped
This patch adds a parameter in the SafeXcel ahash request structure to keep track of the number of SG entries mapped. This allows not to call dma_unmap_sg() when dma_map_sg() wasn't called in the first place. This also removes a warning when the debugging of the DMA-API is enabled in the kernel configuration: "DMA-API: device driver tries to free DMA memory it has not allocated". Cc: stable@vger.kernel.org Fixes: 1b44c5a ("crypto: inside-secure - add SafeXcel EIP197 crypto engine driver") Signed-off-by: Antoine Tenart <antoine.tenart@free-electrons.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
1 parent 0b5a7f7 commit c957f8b

File tree

1 file changed

+12
-8
lines changed

1 file changed

+12
-8
lines changed

drivers/crypto/inside-secure/safexcel_hash.c

Lines changed: 12 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,8 @@ struct safexcel_ahash_req {
3333
bool hmac;
3434
bool needs_inv;
3535

36+
int nents;
37+
3638
u8 state_sz; /* expected sate size, only set once */
3739
u32 state[SHA256_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32));
3840

@@ -151,8 +153,10 @@ static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int rin
151153
memcpy(areq->result, sreq->state,
152154
crypto_ahash_digestsize(ahash));
153155

154-
dma_unmap_sg(priv->dev, areq->src,
155-
sg_nents_for_len(areq->src, areq->nbytes), DMA_TO_DEVICE);
156+
if (sreq->nents) {
157+
dma_unmap_sg(priv->dev, areq->src, sreq->nents, DMA_TO_DEVICE);
158+
sreq->nents = 0;
159+
}
156160

157161
safexcel_free_context(priv, async, sreq->state_sz);
158162

@@ -177,7 +181,7 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
177181
struct safexcel_command_desc *cdesc, *first_cdesc = NULL;
178182
struct safexcel_result_desc *rdesc;
179183
struct scatterlist *sg;
180-
int i, nents, queued, len, cache_len, extra, n_cdesc = 0, ret = 0;
184+
int i, queued, len, cache_len, extra, n_cdesc = 0, ret = 0;
181185

182186
queued = len = req->len - req->processed;
183187
if (queued < crypto_ahash_blocksize(ahash))
@@ -233,15 +237,15 @@ static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
233237
}
234238

235239
/* Now handle the current ahash request buffer(s) */
236-
nents = dma_map_sg(priv->dev, areq->src,
237-
sg_nents_for_len(areq->src, areq->nbytes),
238-
DMA_TO_DEVICE);
239-
if (!nents) {
240+
req->nents = dma_map_sg(priv->dev, areq->src,
241+
sg_nents_for_len(areq->src, areq->nbytes),
242+
DMA_TO_DEVICE);
243+
if (!req->nents) {
240244
ret = -ENOMEM;
241245
goto cdesc_rollback;
242246
}
243247

244-
for_each_sg(areq->src, sg, nents, i) {
248+
for_each_sg(areq->src, sg, req->nents, i) {
245249
int sglen = sg_dma_len(sg);
246250

247251
/* Do not overflow the request */

0 commit comments

Comments
 (0)