Commit d6c4f029 authored by Aviad Yehezkel's avatar Aviad Yehezkel Committed by Saeed Mahameed
Browse files

net/mlx5: Refactor accel IPSec code



The current code has one layer that executed FPGA commands and
the Ethernet part directly used this code. Since downstream patches
introduces support for IPSec in mlx5_ib, we need to provide some
abstractions. This patch refactors the accel code into one layer
that creates a software IPSec transformation and another one which
creates the actual hardware context.
The internal command implementation is now hidden in the FPGA
core layer. The code also adds the ability to share FPGA hardware
contexts. If two contexts are the same, only a reference count
is taken.

Signed-off-by: default avatarAviad Yehezkel <aviadye@mellanox.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
parent af9fe19d
......@@ -37,27 +37,6 @@
#include "mlx5_core.h"
#include "fpga/ipsec.h"
void *mlx5_accel_ipsec_sa_cmd_exec(struct mlx5_core_dev *mdev,
struct mlx5_accel_ipsec_sa *cmd)
{
int cmd_size;
if (!MLX5_IPSEC_DEV(mdev))
return ERR_PTR(-EOPNOTSUPP);
if (mlx5_accel_ipsec_device_caps(mdev) & MLX5_ACCEL_IPSEC_CAP_V2_CMD)
cmd_size = sizeof(*cmd);
else
cmd_size = sizeof(cmd->ipsec_sa_v1);
return mlx5_fpga_ipsec_sa_cmd_exec(mdev, cmd, cmd_size);
}
int mlx5_accel_ipsec_sa_cmd_wait(void *ctx)
{
return mlx5_fpga_ipsec_sa_cmd_wait(ctx);
}
u32 mlx5_accel_ipsec_device_caps(struct mlx5_core_dev *mdev)
{
return mlx5_fpga_ipsec_device_caps(mdev);
......@@ -75,6 +54,21 @@ int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
return mlx5_fpga_ipsec_counters_read(mdev, counters, count);
}
void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
struct mlx5_accel_esp_xfrm *xfrm,
const __be32 saddr[4],
const __be32 daddr[4],
const __be32 spi, bool is_ipv6)
{
return mlx5_fpga_ipsec_create_sa_ctx(mdev, xfrm, saddr, daddr,
spi, is_ipv6);
}
void mlx5_accel_esp_free_hw_context(void *context)
{
mlx5_fpga_ipsec_delete_sa_ctx(context);
}
int mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev)
{
return mlx5_fpga_ipsec_init(mdev);
......@@ -84,3 +78,25 @@ void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev)
{
mlx5_fpga_ipsec_cleanup(mdev);
}
struct mlx5_accel_esp_xfrm *
mlx5_accel_esp_create_xfrm(struct mlx5_core_dev *mdev,
const struct mlx5_accel_esp_xfrm_attrs *attrs,
u32 flags)
{
struct mlx5_accel_esp_xfrm *xfrm;
xfrm = mlx5_fpga_esp_create_xfrm(mdev, attrs, flags);
if (IS_ERR(xfrm))
return xfrm;
xfrm->mdev = mdev;
return xfrm;
}
EXPORT_SYMBOL_GPL(mlx5_accel_esp_create_xfrm);
void mlx5_accel_esp_destroy_xfrm(struct mlx5_accel_esp_xfrm *xfrm)
{
mlx5_fpga_esp_destroy_xfrm(xfrm);
}
EXPORT_SYMBOL_GPL(mlx5_accel_esp_destroy_xfrm);
......@@ -39,89 +39,20 @@
#ifdef CONFIG_MLX5_ACCEL
#define MLX5_IPSEC_SADB_IP_AH BIT(7)
#define MLX5_IPSEC_SADB_IP_ESP BIT(6)
#define MLX5_IPSEC_SADB_SA_VALID BIT(5)
#define MLX5_IPSEC_SADB_SPI_EN BIT(4)
#define MLX5_IPSEC_SADB_DIR_SX BIT(3)
#define MLX5_IPSEC_SADB_IPV6 BIT(2)
enum {
MLX5_IPSEC_CMD_ADD_SA = 0,
MLX5_IPSEC_CMD_DEL_SA = 1,
MLX5_IPSEC_CMD_ADD_SA_V2 = 2,
MLX5_IPSEC_CMD_DEL_SA_V2 = 3,
MLX5_IPSEC_CMD_MOD_SA_V2 = 4,
MLX5_IPSEC_CMD_SET_CAP = 5,
};
enum mlx5_accel_ipsec_enc_mode {
MLX5_IPSEC_SADB_MODE_NONE = 0,
MLX5_IPSEC_SADB_MODE_AES_GCM_128_AUTH_128 = 1,
MLX5_IPSEC_SADB_MODE_AES_GCM_256_AUTH_128 = 3,
};
#define MLX5_IPSEC_DEV(mdev) (mlx5_accel_ipsec_device_caps(mdev) & \
MLX5_ACCEL_IPSEC_CAP_DEVICE)
struct mlx5_accel_ipsec_sa_v1 {
__be32 cmd;
u8 key_enc[32];
u8 key_auth[32];
__be32 sip[4];
__be32 dip[4];
union {
struct {
__be32 reserved;
u8 salt_iv[8];
__be32 salt;
} __packed gcm;
struct {
u8 salt[16];
} __packed cbc;
};
__be32 spi;
__be32 sw_sa_handle;
__be16 tfclen;
u8 enc_mode;
u8 reserved1[2];
u8 flags;
u8 reserved2[2];
};
struct mlx5_accel_ipsec_sa {
struct mlx5_accel_ipsec_sa_v1 ipsec_sa_v1;
__be16 udp_sp;
__be16 udp_dp;
u8 reserved1[4];
__be32 esn;
__be16 vid; /* only 12 bits, rest is reserved */
__be16 reserved2;
} __packed;
/**
* mlx5_accel_ipsec_sa_cmd_exec - Execute an IPSec SADB command
* @mdev: mlx5 device
* @cmd: command to execute
* May be called from atomic context. Returns context pointer, or error
* Caller must eventually call mlx5_accel_ipsec_sa_cmd_wait from non-atomic
* context, to cleanup the context pointer
*/
void *mlx5_accel_ipsec_sa_cmd_exec(struct mlx5_core_dev *mdev,
struct mlx5_accel_ipsec_sa *cmd);
/**
* mlx5_accel_ipsec_sa_cmd_wait - Wait for command execution completion
* @context: Context pointer returned from call to mlx5_accel_ipsec_sa_cmd_exec
* Sleeps (killable) until command execution is complete.
* Returns the command result, or -EINTR if killed
*/
int mlx5_accel_ipsec_sa_cmd_wait(void *context);
unsigned int mlx5_accel_ipsec_counters_count(struct mlx5_core_dev *mdev);
int mlx5_accel_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters,
unsigned int count);
void *mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
struct mlx5_accel_esp_xfrm *xfrm,
const __be32 saddr[4],
const __be32 daddr[4],
const __be32 spi, bool is_ipv6);
void mlx5_accel_esp_free_hw_context(void *context);
int mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev);
void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev);
......@@ -129,6 +60,20 @@ void mlx5_accel_ipsec_cleanup(struct mlx5_core_dev *mdev);
#define MLX5_IPSEC_DEV(mdev) false
static inline void *
mlx5_accel_esp_create_hw_context(struct mlx5_core_dev *mdev,
struct mlx5_accel_esp_xfrm *xfrm,
const __be32 saddr[4],
const __be32 daddr[4],
const __be32 spi, bool is_ipv6)
{
return NULL;
}
static inline void mlx5_accel_esp_free_hw_context(void *context)
{
}
static inline int mlx5_accel_ipsec_init(struct mlx5_core_dev *mdev)
{
return 0;
......
......@@ -47,7 +47,8 @@ struct mlx5e_ipsec_sa_entry {
unsigned int handle; /* Handle in SADB_RX */
struct xfrm_state *x;
struct mlx5e_ipsec *ipsec;
void *context;
struct mlx5_accel_esp_xfrm *xfrm;
void *hw_context;
};
struct xfrm_state *mlx5e_ipsec_sadb_rx_lookup(struct mlx5e_ipsec *ipsec,
......@@ -105,74 +106,51 @@ static void mlx5e_ipsec_sadb_rx_free(struct mlx5e_ipsec_sa_entry *sa_entry)
ida_simple_remove(&ipsec->halloc, sa_entry->handle);
}
static enum mlx5_accel_ipsec_enc_mode mlx5e_ipsec_enc_mode(struct xfrm_state *x)
{
unsigned int key_len = (x->aead->alg_key_len + 7) / 8 - 4;
switch (key_len) {
case 16:
return MLX5_IPSEC_SADB_MODE_AES_GCM_128_AUTH_128;
case 32:
return MLX5_IPSEC_SADB_MODE_AES_GCM_256_AUTH_128;
default:
netdev_warn(x->xso.dev, "Bad key len: %d for alg %s\n",
key_len, x->aead->alg_name);
return -1;
}
}
static void mlx5e_ipsec_build_hw_sa(u32 op, struct mlx5e_ipsec_sa_entry *sa_entry,
struct mlx5_accel_ipsec_sa *hw_sa)
static void
mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
struct mlx5_accel_esp_xfrm_attrs *attrs)
{
struct xfrm_state *x = sa_entry->x;
struct aes_gcm_keymat *aes_gcm = &attrs->keymat.aes_gcm;
struct aead_geniv_ctx *geniv_ctx;
unsigned int crypto_data_len;
struct crypto_aead *aead;
unsigned int key_len;
unsigned int crypto_data_len, key_len;
int ivsize;
memset(hw_sa, 0, sizeof(*hw_sa));
memset(attrs, 0, sizeof(*attrs));
/* key */
crypto_data_len = (x->aead->alg_key_len + 7) / 8;
key_len = crypto_data_len - 4; /* 4 bytes salt at end */
memcpy(aes_gcm->aes_key, x->aead->alg_key, key_len);
aes_gcm->key_len = key_len * 8;
/* salt and seq_iv */
aead = x->data;
geniv_ctx = crypto_aead_ctx(aead);
ivsize = crypto_aead_ivsize(aead);
memcpy(&hw_sa->ipsec_sa_v1.key_enc, x->aead->alg_key, key_len);
/* Duplicate 128 bit key twice according to HW layout */
if (key_len == 16)
memcpy(&hw_sa->ipsec_sa_v1.key_enc[16], x->aead->alg_key, key_len);
memcpy(&hw_sa->ipsec_sa_v1.gcm.salt_iv, geniv_ctx->salt, ivsize);
hw_sa->ipsec_sa_v1.gcm.salt = *((__be32 *)(x->aead->alg_key + key_len));
hw_sa->ipsec_sa_v1.cmd = htonl(op);
hw_sa->ipsec_sa_v1.flags |= MLX5_IPSEC_SADB_SA_VALID | MLX5_IPSEC_SADB_SPI_EN;
if (x->props.family == AF_INET) {
hw_sa->ipsec_sa_v1.sip[3] = x->props.saddr.a4;
hw_sa->ipsec_sa_v1.dip[3] = x->id.daddr.a4;
} else {
memcpy(hw_sa->ipsec_sa_v1.sip, x->props.saddr.a6,
sizeof(hw_sa->ipsec_sa_v1.sip));
memcpy(hw_sa->ipsec_sa_v1.dip, x->id.daddr.a6,
sizeof(hw_sa->ipsec_sa_v1.dip));
hw_sa->ipsec_sa_v1.flags |= MLX5_IPSEC_SADB_IPV6;
}
hw_sa->ipsec_sa_v1.spi = x->id.spi;
hw_sa->ipsec_sa_v1.sw_sa_handle = htonl(sa_entry->handle);
switch (x->id.proto) {
case IPPROTO_ESP:
hw_sa->ipsec_sa_v1.flags |= MLX5_IPSEC_SADB_IP_ESP;
break;
case IPPROTO_AH:
hw_sa->ipsec_sa_v1.flags |= MLX5_IPSEC_SADB_IP_AH;
break;
default:
break;
}
hw_sa->ipsec_sa_v1.enc_mode = mlx5e_ipsec_enc_mode(x);
if (!(x->xso.flags & XFRM_OFFLOAD_INBOUND))
hw_sa->ipsec_sa_v1.flags |= MLX5_IPSEC_SADB_DIR_SX;
memcpy(&aes_gcm->seq_iv, &geniv_ctx->salt, ivsize);
memcpy(&aes_gcm->salt, x->aead->alg_key + key_len,
sizeof(aes_gcm->salt));
/* iv len */
aes_gcm->icv_len = x->aead->alg_icv_len;
/* rx handle */
attrs->sa_handle = sa_entry->handle;
/* algo type */
attrs->keymat_type = MLX5_ACCEL_ESP_KEYMAT_AES_GCM;
/* action */
attrs->action = (!(x->xso.flags & XFRM_OFFLOAD_INBOUND)) ?
MLX5_ACCEL_ESP_ACTION_ENCRYPT :
MLX5_ACCEL_ESP_ACTION_DECRYPT;
/* flags */
attrs->flags |= (x->props.mode == XFRM_MODE_TRANSPORT) ?
MLX5_ACCEL_ESP_FLAGS_TRANSPORT :
MLX5_ACCEL_ESP_FLAGS_TUNNEL;
}
static inline int mlx5e_xfrm_validate_state(struct xfrm_state *x)
......@@ -254,9 +232,10 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x)
{
struct mlx5e_ipsec_sa_entry *sa_entry = NULL;
struct net_device *netdev = x->xso.dev;
struct mlx5_accel_ipsec_sa hw_sa;
struct mlx5_accel_esp_xfrm_attrs attrs;
struct mlx5e_priv *priv;
void *context;
__be32 saddr[4] = {0}, daddr[4] = {0}, spi;
bool is_ipv6 = false;
int err;
priv = netdev_priv(netdev);
......@@ -285,20 +264,41 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x)
}
}
mlx5e_ipsec_build_hw_sa(MLX5_IPSEC_CMD_ADD_SA, sa_entry, &hw_sa);
context = mlx5_accel_ipsec_sa_cmd_exec(sa_entry->ipsec->en_priv->mdev, &hw_sa);
if (IS_ERR(context)) {
err = PTR_ERR(context);
/* create xfrm */
mlx5e_ipsec_build_accel_xfrm_attrs(sa_entry, &attrs);
sa_entry->xfrm =
mlx5_accel_esp_create_xfrm(priv->mdev, &attrs,
MLX5_ACCEL_XFRM_FLAG_REQUIRE_METADATA);
if (IS_ERR(sa_entry->xfrm)) {
err = PTR_ERR(sa_entry->xfrm);
goto err_sadb_rx;
}
err = mlx5_accel_ipsec_sa_cmd_wait(context);
if (err)
goto err_sadb_rx;
/* create hw context */
if (x->props.family == AF_INET) {
saddr[3] = x->props.saddr.a4;
daddr[3] = x->id.daddr.a4;
} else {
memcpy(saddr, x->props.saddr.a6, sizeof(saddr));
memcpy(daddr, x->id.daddr.a6, sizeof(daddr));
is_ipv6 = true;
}
spi = x->id.spi;
sa_entry->hw_context =
mlx5_accel_esp_create_hw_context(priv->mdev,
sa_entry->xfrm,
saddr, daddr, spi,
is_ipv6);
if (IS_ERR(sa_entry->hw_context)) {
err = PTR_ERR(sa_entry->hw_context);
goto err_xfrm;
}
x->xso.offload_handle = (unsigned long)sa_entry;
goto out;
err_xfrm:
mlx5_accel_esp_destroy_xfrm(sa_entry->xfrm);
err_sadb_rx:
if (x->xso.flags & XFRM_OFFLOAD_INBOUND) {
mlx5e_ipsec_sadb_rx_del(sa_entry);
......@@ -313,8 +313,6 @@ out:
static void mlx5e_xfrm_del_state(struct xfrm_state *x)
{
struct mlx5e_ipsec_sa_entry *sa_entry;
struct mlx5_accel_ipsec_sa hw_sa;
void *context;
if (!x->xso.offload_handle)
return;
......@@ -324,19 +322,11 @@ static void mlx5e_xfrm_del_state(struct xfrm_state *x)
if (x->xso.flags & XFRM_OFFLOAD_INBOUND)
mlx5e_ipsec_sadb_rx_del(sa_entry);
mlx5e_ipsec_build_hw_sa(MLX5_IPSEC_CMD_DEL_SA, sa_entry, &hw_sa);
context = mlx5_accel_ipsec_sa_cmd_exec(sa_entry->ipsec->en_priv->mdev, &hw_sa);
if (IS_ERR(context))
return;
sa_entry->context = context;
}
static void mlx5e_xfrm_free_state(struct xfrm_state *x)
{
struct mlx5e_ipsec_sa_entry *sa_entry;
int res;
if (!x->xso.offload_handle)
return;
......@@ -344,11 +334,9 @@ static void mlx5e_xfrm_free_state(struct xfrm_state *x)
sa_entry = (struct mlx5e_ipsec_sa_entry *)x->xso.offload_handle;
WARN_ON(sa_entry->x != x);
res = mlx5_accel_ipsec_sa_cmd_wait(sa_entry->context);
sa_entry->context = NULL;
if (res) {
/* Leftover object will leak */
return;
if (sa_entry->hw_context) {
mlx5_accel_esp_free_hw_context(sa_entry->hw_context);
mlx5_accel_esp_destroy_xfrm(sa_entry->xfrm);
}
if (x->xso.flags & XFRM_OFFLOAD_INBOUND)
......
......@@ -31,6 +31,7 @@
*
*/
#include <linux/rhashtable.h>
#include <linux/mlx5/driver.h>
#include "mlx5_core.h"
......@@ -47,7 +48,7 @@ enum mlx5_fpga_ipsec_cmd_status {
MLX5_FPGA_IPSEC_CMD_COMPLETE,
};
struct mlx5_ipsec_command_context {
struct mlx5_fpga_ipsec_cmd_context {
struct mlx5_fpga_dma_buf buf;
enum mlx5_fpga_ipsec_cmd_status status;
struct mlx5_ifc_fpga_ipsec_cmd_resp resp;
......@@ -58,11 +59,43 @@ struct mlx5_ipsec_command_context {
u8 command[0];
};
struct mlx5_fpga_esp_xfrm;
struct mlx5_fpga_ipsec_sa_ctx {
struct rhash_head hash;
struct mlx5_ifc_fpga_ipsec_sa hw_sa;
struct mlx5_core_dev *dev;
struct mlx5_fpga_esp_xfrm *fpga_xfrm;
};
struct mlx5_fpga_esp_xfrm {
unsigned int num_rules;
struct mlx5_fpga_ipsec_sa_ctx *sa_ctx;
struct mutex lock; /* xfrm lock */
struct mlx5_accel_esp_xfrm accel_xfrm;
};
static const struct rhashtable_params rhash_sa = {
.key_len = FIELD_SIZEOF(struct mlx5_fpga_ipsec_sa_ctx, hw_sa),
.key_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hw_sa),
.head_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hash),
.automatic_shrinking = true,
.min_size = 1,
};
struct mlx5_fpga_ipsec {
struct list_head pending_cmds;
spinlock_t pending_cmds_lock; /* Protects pending_cmds */
u32 caps[MLX5_ST_SZ_DW(ipsec_extended_cap)];
struct mlx5_fpga_conn *conn;
/* Map hardware SA --> SA context
* (mlx5_fpga_ipsec_sa) (mlx5_fpga_ipsec_sa_ctx)
* We will use this hash to avoid SAs duplication in fpga which
* aren't allowed
*/
struct rhashtable sa_hash; /* hw_sa -> mlx5_fpga_ipsec_sa_ctx */
struct mutex sa_hash_lock;
};
static bool mlx5_fpga_is_ipsec_device(struct mlx5_core_dev *mdev)
......@@ -86,10 +119,10 @@ static void mlx5_fpga_ipsec_send_complete(struct mlx5_fpga_conn *conn,
struct mlx5_fpga_dma_buf *buf,
u8 status)
{
struct mlx5_ipsec_command_context *context;
struct mlx5_fpga_ipsec_cmd_context *context;
if (status) {
context = container_of(buf, struct mlx5_ipsec_command_context,
context = container_of(buf, struct mlx5_fpga_ipsec_cmd_context,
buf);
mlx5_fpga_warn(fdev, "IPSec command send failed with status %u\n",
status);
......@@ -117,7 +150,7 @@ int syndrome_to_errno(enum mlx5_ifc_fpga_ipsec_response_syndrome syndrome)
static void mlx5_fpga_ipsec_recv(void *cb_arg, struct mlx5_fpga_dma_buf *buf)
{
struct mlx5_ifc_fpga_ipsec_cmd_resp *resp = buf->sg[0].data;
struct mlx5_ipsec_command_context *context;
struct mlx5_fpga_ipsec_cmd_context *context;
enum mlx5_ifc_fpga_ipsec_response_syndrome syndrome;
struct mlx5_fpga_device *fdev = cb_arg;
unsigned long flags;
......@@ -133,7 +166,7 @@ static void mlx5_fpga_ipsec_recv(void *cb_arg, struct mlx5_fpga_dma_buf *buf)
spin_lock_irqsave(&fdev->ipsec->pending_cmds_lock, flags);
context = list_first_entry_or_null(&fdev->ipsec->pending_cmds,
struct mlx5_ipsec_command_context,
struct mlx5_fpga_ipsec_cmd_context,
list);
if (context)
list_del(&context->list);
......@@ -160,7 +193,7 @@ static void mlx5_fpga_ipsec_recv(void *cb_arg, struct mlx5_fpga_dma_buf *buf)
static void *mlx5_fpga_ipsec_cmd_exec(struct mlx5_core_dev *mdev,
const void *cmd, int cmd_size)
{
struct mlx5_ipsec_command_context *context;
struct mlx5_fpga_ipsec_cmd_context *context;
struct mlx5_fpga_device *fdev = mdev->fpga;
unsigned long flags;
int res;
......@@ -203,7 +236,7 @@ static void *mlx5_fpga_ipsec_cmd_exec(struct mlx5_core_dev *mdev,
static int mlx5_fpga_ipsec_cmd_wait(void *ctx)
{
struct mlx5_ipsec_command_context *context = ctx;
struct mlx5_fpga_ipsec_cmd_context *context = ctx;
unsigned long timeout =
msecs_to_jiffies(MLX5_FPGA_IPSEC_CMD_TIMEOUT_MSEC);
int res;
......@@ -222,33 +255,49 @@ static int mlx5_fpga_ipsec_cmd_wait(void *ctx)
return res;
}
void *mlx5_fpga_ipsec_sa_cmd_exec(struct mlx5_core_dev *mdev,
struct mlx5_accel_ipsec_sa *cmd, int cmd_size)
static inline bool is_v2_sadb_supported(struct mlx5_fpga_ipsec *fipsec)
{
return mlx5_fpga_ipsec_cmd_exec(mdev, cmd, cmd_size);
if (MLX5_GET(ipsec_extended_cap, fipsec->caps, v2_command))
return true;
return false;
}
int mlx5_fpga_ipsec_sa_cmd_wait(void *ctx)
static int mlx5_fpga_ipsec_update_hw_sa(struct mlx5_fpga_device *fdev,
struct mlx5_ifc_fpga_ipsec_sa *hw_sa,
int opcode)
{
struct mlx5_ipsec_command_context *context = ctx;
struct mlx5_accel_ipsec_sa *sa;
int res;
struct mlx5_core_dev *dev = fdev->mdev;
struct mlx5_ifc_fpga_ipsec_sa *sa;
struct mlx5_fpga_ipsec_cmd_context *cmd_context;
size_t sa_cmd_size;
int err;
res = mlx5_fpga_ipsec_cmd_wait(ctx);
if (res)
hw_sa->ipsec_sa_v1.cmd = htonl(opcode);
if (is_v2_sadb_supported(fdev->ipsec))
sa_cmd_size = sizeof(*hw_sa);
else
sa_cmd_size = sizeof(hw_sa->ipsec_sa_v1);
cmd_context = (struct mlx5_fpga_ipsec_cmd_context *)
mlx5_fpga_ipsec_cmd_exec(dev, hw_sa, sa_cmd_size);
if (IS_ERR(cmd_context))
return PTR_ERR(cmd_context);
err = mlx5_fpga_ipsec_cmd_wait(cmd_context);
if (err)
goto out;
sa = (struct mlx5_accel_ipsec_sa *)&context->command;
if (sa->ipsec_sa_v1.sw_sa_handle != context->resp.sw_sa_handle) {
mlx5_fpga_err(context->dev, "mismatch SA handle. cmd 0x%08x vs resp 0x%08x\n",
sa = (struct mlx5_ifc_fpga_ipsec_sa *)&cmd_context->command;
if (sa->ipsec_sa_v1.sw_sa_handle != cmd_context->resp.sw_sa_handle) {
mlx5_fpga_err(fdev, "mismatch SA handle. cmd 0x%08x vs resp 0x%08x\n",
ntohl(sa->ipsec_sa_v1.sw_sa_handle),
ntohl(context->resp.sw_sa_handle));
res = -EIO;
ntohl(cmd_context->resp.sw_sa_handle));
err = -EIO;
}
out:
kfree(context);
return res;
kfree(cmd_context);
return err;
}
u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev)
......@@ -278,9 +327,6 @@ u32 mlx5_fpga_ipsec_device_caps(struct mlx5_core_dev *mdev)
if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, rx_no_trailer))
ret |= MLX5_ACCEL_IPSEC_CAP_RX_NO_TRAILER;
if (MLX5_GET(ipsec_extended_cap, fdev->ipsec->caps, v2_command))
ret |= MLX5_ACCEL_IPSEC_CAP_V2_CMD;
return ret;
}
......