sriov.c 6.57 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
/*
 * Copyright (c) 2014, Mellanox Technologies inc.  All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
 * General Public License (GPL) Version 2, available from the file
 * COPYING in the main directory of this source tree, or the
 * OpenIB.org BSD license below:
 *
 *     Redistribution and use in source and binary forms, with or
 *     without modification, are permitted provided that the following
 *     conditions are met:
 *
 *      - Redistributions of source code must retain the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer.
 *
 *      - Redistributions in binary form must reproduce the above
 *        copyright notice, this list of conditions and the following
 *        disclaimer in the documentation and/or other materials
 *        provided with the distribution.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 * SOFTWARE.
 */

#include <linux/pci.h>
#include <linux/mlx5/driver.h>
35
#include <linux/mlx5/vport.h>
36
#include "mlx5_core.h"
37
#include "eswitch.h"
38

39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
static int sriov_restore_guids(struct mlx5_core_dev *dev, int vf)
{
	struct mlx5_core_sriov *sriov = &dev->priv.sriov;
	struct mlx5_hca_vport_context *in;
	int err = 0;

	/* Restore sriov guid and policy settings */
	if (sriov->vfs_ctx[vf].node_guid ||
	    sriov->vfs_ctx[vf].port_guid ||
	    sriov->vfs_ctx[vf].policy != MLX5_POLICY_INVALID) {
		in = kzalloc(sizeof(*in), GFP_KERNEL);
		if (!in)
			return -ENOMEM;

		in->node_guid = sriov->vfs_ctx[vf].node_guid;
		in->port_guid = sriov->vfs_ctx[vf].port_guid;
		in->policy = sriov->vfs_ctx[vf].policy;
		in->field_select =
			!!(in->port_guid) * MLX5_HCA_VPORT_SEL_PORT_GUID |
			!!(in->node_guid) * MLX5_HCA_VPORT_SEL_NODE_GUID |
			!!(in->policy) * MLX5_HCA_VPORT_SEL_STATE_POLICY;

		err = mlx5_core_modify_hca_vport_context(dev, 1, 1, vf + 1, in);
		if (err)
			mlx5_core_warn(dev, "modify vport context failed, unable to restore VF %d settings\n", vf);

		kfree(in);
	}

	return err;
}

71
static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs)
72
73
74
75
76
{
	struct mlx5_core_sriov *sriov = &dev->priv.sriov;
	int err;
	int vf;

77
78
79
	if (!MLX5_ESWITCH_MANAGER(dev))
		goto enable_vfs_hca;

80
81
	mlx5_eswitch_update_num_of_vfs(dev->priv.eswitch, num_vfs);
	err = mlx5_eswitch_enable(dev->priv.eswitch, MLX5_ESWITCH_LEGACY);
82
83
84
85
86
87
	if (err) {
		mlx5_core_warn(dev,
			       "failed to enable eswitch SRIOV (%d)\n", err);
		return err;
	}

88
enable_vfs_hca:
89
90
	for (vf = 0; vf < num_vfs; vf++) {
		err = mlx5_core_enable_hca(dev, vf + 1);
91
		if (err) {
92
93
			mlx5_core_warn(dev, "failed to enable VF %d (%d)\n", vf, err);
			continue;
94
		}
95
		sriov->vfs_ctx[vf].enabled = 1;
96
97
98
99
100
101
		if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) {
			err = sriov_restore_guids(dev, vf);
			if (err) {
				mlx5_core_warn(dev,
					       "failed to restore VF %d settings, err %d\n",
					       vf, err);
102
				continue;
103
104
			}
		}
105
		mlx5_core_dbg(dev, "successfully enabled VF* %d\n", vf);
106
	}
107
108

	return 0;
109
110
}

111
static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev, bool clear_vf)
112
113
{
	struct mlx5_core_sriov *sriov = &dev->priv.sriov;
114
	int num_vfs = pci_num_vf(dev->pdev);
115
	int err;
116
117
	int vf;

118
	for (vf = num_vfs - 1; vf >= 0; vf--) {
119
120
121
122
123
124
		if (!sriov->vfs_ctx[vf].enabled)
			continue;
		err = mlx5_core_disable_hca(dev, vf + 1);
		if (err) {
			mlx5_core_warn(dev, "failed to disable VF %d\n", vf);
			continue;
125
		}
126
		sriov->vfs_ctx[vf].enabled = 0;
127
	}
128

129
	if (MLX5_ESWITCH_MANAGER(dev))
130
		mlx5_eswitch_disable(dev->priv.eswitch, clear_vf);
131

132
	if (mlx5_wait_for_pages(dev, &dev->priv.vfs_pages))
133
		mlx5_core_warn(dev, "timeout reclaiming VFs pages\n");
134
135
}

136
static int mlx5_sriov_enable(struct pci_dev *pdev, int num_vfs)
137
138
{
	struct mlx5_core_dev *dev  = pci_get_drvdata(pdev);
139
	int err;
140

141
142
143
144
145
	err = mlx5_device_enable_sriov(dev, num_vfs);
	if (err) {
		mlx5_core_warn(dev, "mlx5_device_enable_sriov failed : %d\n", err);
		return err;
	}
146

147
	err = pci_enable_sriov(pdev, num_vfs);
148
	if (err) {
149
		mlx5_core_warn(dev, "pci_enable_sriov failed : %d\n", err);
150
		mlx5_device_disable_sriov(dev, true);
151
	}
152
	return err;
153
154
}

155
static void mlx5_sriov_disable(struct pci_dev *pdev)
156
{
157
	struct mlx5_core_dev *dev  = pci_get_drvdata(pdev);
158

159
	pci_disable_sriov(pdev);
160
	mlx5_device_disable_sriov(dev, true);
161
162
163
164
165
}

int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs)
{
	struct mlx5_core_dev *dev  = pci_get_drvdata(pdev);
166
	struct mlx5_core_sriov *sriov = &dev->priv.sriov;
167
	int err = 0;
168

Masanari Iida's avatar
Masanari Iida committed
169
	mlx5_core_dbg(dev, "requested num_vfs %d\n", num_vfs);
170

171
	if (num_vfs)
172
		err = mlx5_sriov_enable(pdev, num_vfs);
173
	else
174
		mlx5_sriov_disable(pdev);
175

176
177
	if (!err)
		sriov->num_vfs = num_vfs;
178
	return err ? err : num_vfs;
179
180
}

181
182
int mlx5_sriov_attach(struct mlx5_core_dev *dev)
{
183
	if (!mlx5_core_is_pf(dev) || !pci_num_vf(dev->pdev))
184
185
186
		return 0;

	/* If sriov VFs exist in PCI level, enable them in device level */
187
	return mlx5_device_enable_sriov(dev, pci_num_vf(dev->pdev));
188
189
190
191
192
193
194
}

void mlx5_sriov_detach(struct mlx5_core_dev *dev)
{
	if (!mlx5_core_is_pf(dev))
		return;

195
	mlx5_device_disable_sriov(dev, false);
196
197
}

198
199
200
static u16 mlx5_get_max_vfs(struct mlx5_core_dev *dev)
{
	u16 host_total_vfs;
201
	const u32 *out;
202
203

	if (mlx5_core_is_ecpf_esw_manager(dev)) {
204
		out = mlx5_esw_query_functions(dev);
205
206
207
208

		/* Old FW doesn't support getting total_vfs from esw func
		 * but supports getting it from pci_sriov.
		 */
209
210
211
212
213
214
		if (IS_ERR(out))
			goto done;
		host_total_vfs = MLX5_GET(query_esw_functions_out, out,
					  host_params_context.host_total_vfs);
		kvfree(out);
		if (host_total_vfs)
215
216
217
			return host_total_vfs;
	}

218
done:
219
220
221
	return pci_sriov_get_totalvfs(dev->pdev);
}

222
223
224
225
int mlx5_sriov_init(struct mlx5_core_dev *dev)
{
	struct mlx5_core_sriov *sriov = &dev->priv.sriov;
	struct pci_dev *pdev = dev->pdev;
226
	int total_vfs;
227
228
229
230

	if (!mlx5_core_is_pf(dev))
		return 0;

231
	total_vfs = pci_sriov_get_totalvfs(pdev);
232
	sriov->max_vfs = mlx5_get_max_vfs(dev);
233
234
	sriov->num_vfs = pci_num_vf(pdev);
	sriov->vfs_ctx = kcalloc(total_vfs, sizeof(*sriov->vfs_ctx), GFP_KERNEL);
235
236
237
	if (!sriov->vfs_ctx)
		return -ENOMEM;

238
	return 0;
239
240
}

241
void mlx5_sriov_cleanup(struct mlx5_core_dev *dev)
242
{
243
	struct mlx5_core_sriov *sriov = &dev->priv.sriov;
244
245

	if (!mlx5_core_is_pf(dev))
246
		return;
247

248
	kfree(sriov->vfs_ctx);
249
}