b.liu | e958203 | 2025-04-17 19:18:16 +0800 | [diff] [blame^] | 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
| 2 | /* Common header for Virtio crypto device. |
| 3 | * |
| 4 | * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD. |
| 5 | */ |
| 6 | |
| 7 | #ifndef _VIRTIO_CRYPTO_COMMON_H |
| 8 | #define _VIRTIO_CRYPTO_COMMON_H |
| 9 | |
| 10 | #include <linux/virtio.h> |
| 11 | #include <linux/crypto.h> |
| 12 | #include <linux/spinlock.h> |
| 13 | #include <linux/interrupt.h> |
| 14 | #include <crypto/aead.h> |
| 15 | #include <crypto/aes.h> |
| 16 | #include <crypto/engine.h> |
| 17 | |
| 18 | |
| 19 | /* Internal representation of a data virtqueue */ |
| 20 | struct data_queue { |
| 21 | /* Virtqueue associated with this send _queue */ |
| 22 | struct virtqueue *vq; |
| 23 | |
| 24 | /* To protect the vq operations for the dataq */ |
| 25 | spinlock_t lock; |
| 26 | |
| 27 | /* Name of the tx queue: dataq.$index */ |
| 28 | char name[32]; |
| 29 | |
| 30 | struct crypto_engine *engine; |
| 31 | struct tasklet_struct done_task; |
| 32 | }; |
| 33 | |
| 34 | struct virtio_crypto { |
| 35 | struct virtio_device *vdev; |
| 36 | struct virtqueue *ctrl_vq; |
| 37 | struct data_queue *data_vq; |
| 38 | |
| 39 | /* To protect the vq operations for the controlq */ |
| 40 | spinlock_t ctrl_lock; |
| 41 | |
| 42 | /* Maximum of data queues supported by the device */ |
| 43 | u32 max_data_queues; |
| 44 | |
| 45 | /* Number of queue currently used by the driver */ |
| 46 | u32 curr_queue; |
| 47 | |
| 48 | /* |
| 49 | * Specifies the services mask which the device support, |
| 50 | * see VIRTIO_CRYPTO_SERVICE_* |
| 51 | */ |
| 52 | u32 crypto_services; |
| 53 | |
| 54 | /* Detailed algorithms mask */ |
| 55 | u32 cipher_algo_l; |
| 56 | u32 cipher_algo_h; |
| 57 | u32 hash_algo; |
| 58 | u32 mac_algo_l; |
| 59 | u32 mac_algo_h; |
| 60 | u32 aead_algo; |
| 61 | |
| 62 | /* Maximum length of cipher key */ |
| 63 | u32 max_cipher_key_len; |
| 64 | /* Maximum length of authenticated key */ |
| 65 | u32 max_auth_key_len; |
| 66 | /* Maximum size of per request */ |
| 67 | u64 max_size; |
| 68 | |
| 69 | /* Control VQ buffers: protected by the ctrl_lock */ |
| 70 | struct virtio_crypto_op_ctrl_req ctrl; |
| 71 | struct virtio_crypto_session_input input; |
| 72 | struct virtio_crypto_inhdr ctrl_status; |
| 73 | |
| 74 | unsigned long status; |
| 75 | atomic_t ref_count; |
| 76 | struct list_head list; |
| 77 | struct module *owner; |
| 78 | uint8_t dev_id; |
| 79 | |
| 80 | /* Does the affinity hint is set for virtqueues? */ |
| 81 | bool affinity_hint_set; |
| 82 | }; |
| 83 | |
| 84 | struct virtio_crypto_sym_session_info { |
| 85 | /* Backend session id, which come from the host side */ |
| 86 | __u64 session_id; |
| 87 | }; |
| 88 | |
| 89 | struct virtio_crypto_request; |
| 90 | typedef void (*virtio_crypto_data_callback) |
| 91 | (struct virtio_crypto_request *vc_req, int len); |
| 92 | |
| 93 | struct virtio_crypto_request { |
| 94 | uint8_t status; |
| 95 | struct virtio_crypto_op_data_req *req_data; |
| 96 | struct scatterlist **sgs; |
| 97 | struct data_queue *dataq; |
| 98 | virtio_crypto_data_callback alg_cb; |
| 99 | }; |
| 100 | |
| 101 | int virtcrypto_devmgr_add_dev(struct virtio_crypto *vcrypto_dev); |
| 102 | struct list_head *virtcrypto_devmgr_get_head(void); |
| 103 | void virtcrypto_devmgr_rm_dev(struct virtio_crypto *vcrypto_dev); |
| 104 | struct virtio_crypto *virtcrypto_devmgr_get_first(void); |
| 105 | int virtcrypto_dev_in_use(struct virtio_crypto *vcrypto_dev); |
| 106 | int virtcrypto_dev_get(struct virtio_crypto *vcrypto_dev); |
| 107 | void virtcrypto_dev_put(struct virtio_crypto *vcrypto_dev); |
| 108 | int virtcrypto_dev_started(struct virtio_crypto *vcrypto_dev); |
| 109 | bool virtcrypto_algo_is_supported(struct virtio_crypto *vcrypto_dev, |
| 110 | uint32_t service, |
| 111 | uint32_t algo); |
| 112 | struct virtio_crypto *virtcrypto_get_dev_node(int node, |
| 113 | uint32_t service, |
| 114 | uint32_t algo); |
| 115 | int virtcrypto_dev_start(struct virtio_crypto *vcrypto); |
| 116 | void virtcrypto_dev_stop(struct virtio_crypto *vcrypto); |
| 117 | int virtio_crypto_ablkcipher_crypt_req( |
| 118 | struct crypto_engine *engine, void *vreq); |
| 119 | |
| 120 | void |
| 121 | virtcrypto_clear_request(struct virtio_crypto_request *vc_req); |
| 122 | |
| 123 | static inline int virtio_crypto_get_current_node(void) |
| 124 | { |
| 125 | int cpu, node; |
| 126 | |
| 127 | cpu = get_cpu(); |
| 128 | node = topology_physical_package_id(cpu); |
| 129 | put_cpu(); |
| 130 | |
| 131 | return node; |
| 132 | } |
| 133 | |
| 134 | int virtio_crypto_algs_register(struct virtio_crypto *vcrypto); |
| 135 | void virtio_crypto_algs_unregister(struct virtio_crypto *vcrypto); |
| 136 | |
| 137 | #endif /* _VIRTIO_CRYPTO_COMMON_H */ |