aboutsummaryrefslogtreecommitdiff
path: root/platform/linux-dpdk/odp_crypto.c
diff options
context:
space:
mode:
authorJanne Peltonen <janne.peltonen@nokia.com>2020-09-24 16:11:43 +0300
committerMatias Elo <matias.elo@nokia.com>2020-10-01 10:54:48 +0300
commit47c53cabf0c42656665b02b9fee785871fc1525d (patch)
treec1bb0ad8e5cbf831caca9582277d6c078a10b66d /platform/linux-dpdk/odp_crypto.c
parent79244082fb6b524944f1faf2def23edd26828d84 (diff)
linux-dpdk: crypto: fix locking around crypto operations
Current code may share the same cryptodev queue pair between different threads, leading to two problems: 1) rte_cryptodev enqueue and dequeue operations may be done simultaneously for the same queue pair by different threads, which is not allowed by DPKD. 2) Even if the races in enqueue and dequeue themselves would not cause problems, there is no guarantee that dequeue returns the same crypto operation that was submitted by the same thread. This in turn breaks the synchronous ODP crypto API. Sharing of the queue pairs can occur if a cryptodev supports a limited number of queue pairs or if the range of odp_cpu_id() is not from zero to odp_cpu_count() - 1. Fixes and improvements in this commit: Use thread specific cryptodev queue pairs when the maximum number of queue pairs is sufficient. This gets rid of locking around cryptodev enqueue and dequeue. Use odp_thread_id() instead of odp_cpu_id() to calculate cryptodev queue pair index, taking advantage of the well defined range of the former. Add global spinlocking around the enqueue and dequeue operations when thread specific queue pairs could not be allocated. Keep the lock for the whole enqueue + dequeue sequence to guarantee that the dequeued operation is the enqueued one. Signed-off-by: Janne Peltonen <janne.peltonen@nokia.com> Reviewed-by: Matias Elo <matias.elo@nokia.com>
Diffstat (limited to 'platform/linux-dpdk/odp_crypto.c')
-rw-r--r--platform/linux-dpdk/odp_crypto.c40
1 files changed, 37 insertions, 3 deletions
diff --git a/platform/linux-dpdk/odp_crypto.c b/platform/linux-dpdk/odp_crypto.c
index a700a7e15..27ae7fcd3 100644
--- a/platform/linux-dpdk/odp_crypto.c
+++ b/platform/linux-dpdk/odp_crypto.c
@@ -1,4 +1,5 @@
/* Copyright (c) 2017-2018, Linaro Limited
+ * Copyright (c) 2020, Nokia
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -56,6 +57,7 @@ typedef struct crypto_session_entry_s {
struct rte_crypto_sym_xform cipher_xform;
struct rte_crypto_sym_xform auth_xform;
uint16_t cdev_nb_qpairs;
+ odp_bool_t cdev_qpairs_shared;
uint8_t cdev_id;
uint8_t cipher_iv_data[MAX_IV_LENGTH];
uint8_t auth_iv_data[MAX_IV_LENGTH];
@@ -66,6 +68,7 @@ typedef struct crypto_global_s {
uint8_t enabled_crypto_devs;
uint8_t enabled_crypto_dev_ids[RTE_CRYPTO_MAX_DEVS];
uint16_t enabled_crypto_dev_qpairs[RTE_CRYPTO_MAX_DEVS];
+ odp_bool_t enabled_crypto_dev_qpairs_shared[RTE_CRYPTO_MAX_DEVS];
crypto_session_entry_t *free;
crypto_session_entry_t sessions[MAX_SESSIONS];
int is_crypto_dev_initialized;
@@ -366,11 +369,17 @@ int _odp_crypto_init_global(void)
for (cdev_id = cdev_count - 1; cdev_id >= 0; cdev_id--) {
struct rte_cryptodev_info dev_info;
struct rte_mempool *mp;
+ odp_bool_t queue_pairs_shared = false;
rte_cryptodev_info_get(cdev_id, &dev_info);
- nb_queue_pairs = odp_cpu_count();
- if (nb_queue_pairs > dev_info.max_nb_queue_pairs)
+ nb_queue_pairs = odp_thread_count_max();
+ if (nb_queue_pairs > dev_info.max_nb_queue_pairs) {
nb_queue_pairs = dev_info.max_nb_queue_pairs;
+ queue_pairs_shared = true;
+ ODP_PRINT("Using shared queue pairs for crypto device %"
+ PRIu16 " (driver: %s)\n",
+ cdev_id, dev_info.driver_name);
+ }
struct rte_cryptodev_qp_conf qp_conf;
uint8_t socket_id = rte_cryptodev_socket_id(cdev_id);
@@ -452,6 +461,8 @@ int _odp_crypto_init_global(void)
global->enabled_crypto_dev_ids[global->enabled_crypto_devs] =
cdev_id;
global->enabled_crypto_dev_qpairs[cdev_id] = nb_queue_pairs;
+ global->enabled_crypto_dev_qpairs_shared[cdev_id] =
+ queue_pairs_shared;
global->enabled_crypto_devs++;
}
@@ -1473,6 +1484,7 @@ int odp_crypto_session_create(const odp_crypto_session_param_t *param,
}
session->cdev_nb_qpairs = global->enabled_crypto_dev_qpairs[cdev_id];
+ session->cdev_qpairs_shared = global->enabled_crypto_dev_qpairs_shared[cdev_id];
out_null:
session->rte_session = rte_session;
session->cdev_id = cdev_id;
@@ -1861,16 +1873,36 @@ int odp_crypto_int(odp_packet_t pkt_in,
if (rc_cipher == ODP_CRYPTO_ALG_ERR_NONE &&
rc_auth == ODP_CRYPTO_ALG_ERR_NONE) {
int retry_count = 0;
- int queue_pair = odp_cpu_id() % session->cdev_nb_qpairs;
+ int queue_pair;
int rc;
+ odp_bool_t queue_pairs_shared = session->cdev_qpairs_shared;
+
+ if (odp_unlikely(queue_pairs_shared))
+ queue_pair = odp_thread_id() % session->cdev_nb_qpairs;
+ else
+ queue_pair = odp_thread_id();
/* Set crypto operation data parameters */
rte_crypto_op_attach_sym_session(op, rte_session);
op->sym->m_src = (struct rte_mbuf *)(intptr_t)out_pkt;
+ /*
+ * If queue pairs are shared between multiple threads,
+ * we protect enqueue and dequeue using a lock. In addition,
+ * we keep the lock over the whole enqueue-dequeue sequence
+ * to guarantee that we get the same op back as what we
+ * enqueued. Otherwise synchronous ODP crypto operations
+ * could report the completion and status of an unrelated
+ * operation that was sent to the same queue pair from
+ * another thread.
+ */
+ if (odp_unlikely(queue_pairs_shared))
+ odp_spinlock_lock(&global->lock);
rc = rte_cryptodev_enqueue_burst(session->cdev_id,
queue_pair, &op, 1);
if (rc == 0) {
+ if (odp_unlikely(queue_pairs_shared))
+ odp_spinlock_unlock(&global->lock);
ODP_ERR("Failed to enqueue packet\n");
goto err_op_free;
}
@@ -1887,6 +1919,8 @@ int odp_crypto_int(odp_packet_t pkt_in,
}
break;
}
+ if (odp_unlikely(queue_pairs_shared))
+ odp_spinlock_unlock(&global->lock);
if (rc == 0) {
ODP_ERR("Failed to dequeue packet");
goto err_op_free;