blob: d87e4f35ac395c768dd4f57ecf25a85f862c9352 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
|
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright(c) 2014 - 2022 Intel Corporation */
#ifndef QAT_BL_H
#define QAT_BL_H
#include <linux/crypto.h>
#include <linux/scatterlist.h>
#include <linux/types.h>
#define QAT_MAX_BUFF_DESC 4
struct qat_alg_buf {
u32 len;
u32 resrvd;
u64 addr;
} __packed;
struct qat_alg_buf_list {
u64 resrvd;
u32 num_bufs;
u32 num_mapped_bufs;
struct qat_alg_buf buffers[];
} __packed;
struct qat_alg_fixed_buf_list {
struct qat_alg_buf_list sgl_hdr;
struct qat_alg_buf descriptors[QAT_MAX_BUFF_DESC];
} __packed __aligned(64);
struct qat_request_buffs {
struct qat_alg_buf_list *bl;
dma_addr_t blp;
struct qat_alg_buf_list *blout;
dma_addr_t bloutp;
size_t sz;
size_t sz_out;
bool sgl_src_valid;
bool sgl_dst_valid;
struct qat_alg_fixed_buf_list sgl_src;
struct qat_alg_fixed_buf_list sgl_dst;
};
struct qat_sgl_to_bufl_params {
dma_addr_t extra_dst_buff;
size_t sz_extra_dst_buff;
unsigned int sskip;
unsigned int dskip;
};
void qat_bl_free_bufl(struct adf_accel_dev *accel_dev,
struct qat_request_buffs *buf);
int qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
struct scatterlist *sgl,
struct scatterlist *sglout,
struct qat_request_buffs *buf,
struct qat_sgl_to_bufl_params *params,
gfp_t flags);
static inline gfp_t qat_algs_alloc_flags(struct crypto_async_request *req)
{
return req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
}
int qat_bl_realloc_map_new_dst(struct adf_accel_dev *accel_dev,
struct scatterlist **newd,
unsigned int dlen,
struct qat_request_buffs *qat_bufs,
gfp_t gfp);
#endif
|