aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto/virtio/virtio_crypto_common.h
blob: da6d8c0ea407aec77b4d9c7dbffede0517c91ef1 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
/* Common header for Virtio crypto device.
 *
 * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, see <http://www.gnu.org/licenses/>.
 */

#ifndef _VIRTIO_CRYPTO_COMMON_H
#define _VIRTIO_CRYPTO_COMMON_H

#include <linux/virtio.h>
#include <linux/crypto.h>
#include <linux/spinlock.h>
#include <crypto/aead.h>
#include <crypto/aes.h>
#include <crypto/authenc.h>
#include <crypto/engine.h>


/* Internal representation of a data virtqueue */
struct data_queue {
	/* Virtqueue associated with this send _queue */
	struct virtqueue *vq;

	/* To protect the vq operations for the dataq */
	spinlock_t lock;

	/* Name of the tx queue: dataq.$index */
	char name[32];

	struct crypto_engine *engine;
};

struct virtio_crypto {
	struct virtio_device *vdev;
	struct virtqueue *ctrl_vq;
	struct data_queue *data_vq;

	/* To protect the vq operations for the controlq */
	spinlock_t ctrl_lock;

	/* Maximum of data queues supported by the device */
	u32 max_data_queues;

	/* Number of queue currently used by the driver */
	u32 curr_queue;

	/* Maximum length of cipher key */
	u32 max_cipher_key_len;
	/* Maximum length of authenticated key */
	u32 max_auth_key_len;
	/* Maximum size of per request */
	u64 max_size;

	/* Control VQ buffers: protected by the ctrl_lock */
	struct virtio_crypto_op_ctrl_req ctrl;
	struct virtio_crypto_session_input input;
	struct virtio_crypto_inhdr ctrl_status;

	unsigned long status;
	atomic_t ref_count;
	struct list_head list;
	struct module *owner;
	uint8_t dev_id;

	/* Does the affinity hint is set for virtqueues? */
	bool affinity_hint_set;
};

struct virtio_crypto_sym_session_info {
	/* Backend session id, which come from the host side */
	__u64 session_id;
};

struct virtio_crypto_ablkcipher_ctx {
	struct virtio_crypto *vcrypto;
	struct crypto_tfm *tfm;

	struct virtio_crypto_sym_session_info enc_sess_info;
	struct virtio_crypto_sym_session_info dec_sess_info;
};

struct virtio_crypto_request {
	/* Cipher or aead */
	uint32_t type;
	uint8_t status;
	struct virtio_crypto_ablkcipher_ctx *ablkcipher_ctx;
	struct ablkcipher_request *ablkcipher_req;
	struct virtio_crypto_op_data_req *req_data;
	struct scatterlist **sgs;
	uint8_t *iv;
	/* Encryption? */
	bool encrypt;
	struct data_queue *dataq;
};

int virtcrypto_devmgr_add_dev(struct virtio_crypto *vcrypto_dev);
struct list_head *virtcrypto_devmgr_get_head(void);
void virtcrypto_devmgr_rm_dev(struct virtio_crypto *vcrypto_dev);
struct virtio_crypto *virtcrypto_devmgr_get_first(void);
int virtcrypto_dev_in_use(struct virtio_crypto *vcrypto_dev);
int virtcrypto_dev_get(struct virtio_crypto *vcrypto_dev);
void virtcrypto_dev_put(struct virtio_crypto *vcrypto_dev);
int virtcrypto_dev_started(struct virtio_crypto *vcrypto_dev);
struct virtio_crypto *virtcrypto_get_dev_node(int node);
int virtcrypto_dev_start(struct virtio_crypto *vcrypto);
void virtcrypto_dev_stop(struct virtio_crypto *vcrypto);
int virtio_crypto_ablkcipher_crypt_req(
	struct crypto_engine *engine,
	struct ablkcipher_request *req);
void virtio_crypto_ablkcipher_finalize_req(
	struct virtio_crypto_request *vc_req,
	struct ablkcipher_request *req,
	int err);

void
virtcrypto_clear_request(struct virtio_crypto_request *vc_req);

static inline int virtio_crypto_get_current_node(void)
{
	int cpu, node;

	cpu = get_cpu();
	node = topology_physical_package_id(cpu);
	put_cpu();

	return node;
}

int virtio_crypto_algs_register(void);
void virtio_crypto_algs_unregister(void);

#endif /* _VIRTIO_CRYPTO_COMMON_H */