Commit 54654e14 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma

Pull rdma fixes from Jason Gunthorpe:
 "Not too much going on here, though there are about four fixes related
  to stuff merged during the last merge window.

  We also see the return of a syzkaller instance with access to RDMA
  devices, and a few bugs detected by that squished.

   - Fix three crashers and a memory memory leak for HFI1

   - Several bugs found by syzkaller

   - A bug fix for the recent QP counters feature on older mlx5 HW

   - Locking inversion in cxgb4

   - Unnecessary WARN_ON in siw

   - A umad crasher regression during unload, from a bug fix for
     something else

   - Bugs introduced in the merge window:
       - Missed list_del in uverbs file rework, core and mlx5 devx
       - Unexpected integer math truncation in the mlx5 VAR patches
       - Compilation bug fix for the VAR patches on 32 bit"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma:
  IB/mlx5: Use div64_u64 for num_var_hw_entries calculation
  RDMA/core: Fix protection fault in get_pkey_idx_qp_list
  RDMA/rxe: Fix soft lockup problem due to using tasklets in softirq
  RDMA/mlx5: Prevent overflow in mmap offset calculations
  IB/umad: Fix kernel crash while unloading ib_umad
  RDMA/mlx5: Fix async events cleanup flows
  RDMA/core: Add missing list deletion on freeing event queue
  RDMA/siw: Remove unwanted WARN_ON in siw_cm_llp_data_ready()
  RDMA/iw_cxgb4: initiate CLOSE when entering TERM
  IB/mlx5: Return failure when rts2rts_qp_counters_set_id is not supported
  RDMA/core: Fix invalid memory access in spec_filter_size
  IB/rdmavt: Reset all QPs when the device is shut down
  IB/hfi1: Close window for pq and request coliding
  IB/hfi1: Acquire lock to release TID entries when user file is closed
  RDMA/hfi1: Fix memory leak in _dev_comp_vect_mappings_create
parents b719ae07 685eff51
......@@ -339,22 +339,16 @@ static struct ib_ports_pkeys *get_new_pps(const struct ib_qp *qp,
if (!new_pps)
return NULL;
if (qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) {
if (!qp_pps) {
new_pps->main.port_num = qp_attr->port_num;
new_pps->main.pkey_index = qp_attr->pkey_index;
} else {
new_pps->main.port_num = (qp_attr_mask & IB_QP_PORT) ?
qp_attr->port_num :
qp_pps->main.port_num;
new_pps->main.pkey_index =
(qp_attr_mask & IB_QP_PKEY_INDEX) ?
qp_attr->pkey_index :
qp_pps->main.pkey_index;
}
if (qp_attr_mask & IB_QP_PORT)
new_pps->main.port_num =
(qp_pps) ? qp_pps->main.port_num : qp_attr->port_num;
if (qp_attr_mask & IB_QP_PKEY_INDEX)
new_pps->main.pkey_index = (qp_pps) ? qp_pps->main.pkey_index :
qp_attr->pkey_index;
if ((qp_attr_mask & IB_QP_PKEY_INDEX) && (qp_attr_mask & IB_QP_PORT))
new_pps->main.state = IB_PORT_PKEY_VALID;
} else if (qp_pps) {
if (!(qp_attr_mask & (IB_QP_PKEY_INDEX || IB_QP_PORT)) && qp_pps) {
new_pps->main.port_num = qp_pps->main.port_num;
new_pps->main.pkey_index = qp_pps->main.pkey_index;
if (qp_pps->main.state != IB_PORT_PKEY_NOT_VALID)
......
......@@ -1312,6 +1312,9 @@ static void ib_umad_kill_port(struct ib_umad_port *port)
struct ib_umad_file *file;
int id;
cdev_device_del(&port->sm_cdev, &port->sm_dev);
cdev_device_del(&port->cdev, &port->dev);
mutex_lock(&port->file_mutex);
/* Mark ib_dev NULL and block ioctl or other file ops to progress
......@@ -1331,8 +1334,6 @@ static void ib_umad_kill_port(struct ib_umad_port *port)
mutex_unlock(&port->file_mutex);
cdev_device_del(&port->sm_cdev, &port->sm_dev);
cdev_device_del(&port->cdev, &port->dev);
ida_free(&umad_ida, port->dev_num);
/* balances device_initialize() */
......
......@@ -2745,12 +2745,6 @@ static int kern_spec_to_ib_spec_action(struct uverbs_attr_bundle *attrs,
return 0;
}
static size_t kern_spec_filter_sz(const struct ib_uverbs_flow_spec_hdr *spec)
{
/* Returns user space filter size, includes padding */
return (spec->size - sizeof(struct ib_uverbs_flow_spec_hdr)) / 2;
}
static ssize_t spec_filter_size(const void *kern_spec_filter, u16 kern_filter_size,
u16 ib_real_filter_sz)
{
......@@ -2894,11 +2888,16 @@ int ib_uverbs_kern_spec_to_ib_spec_filter(enum ib_flow_spec_type type,
static int kern_spec_to_ib_spec_filter(struct ib_uverbs_flow_spec *kern_spec,
union ib_flow_spec *ib_spec)
{
ssize_t kern_filter_sz;
size_t kern_filter_sz;
void *kern_spec_mask;
void *kern_spec_val;
kern_filter_sz = kern_spec_filter_sz(&kern_spec->hdr);
if (check_sub_overflow((size_t)kern_spec->hdr.size,
sizeof(struct ib_uverbs_flow_spec_hdr),
&kern_filter_sz))
return -EINVAL;
kern_filter_sz /= 2;
kern_spec_val = (void *)kern_spec +
sizeof(struct ib_uverbs_flow_spec_hdr);
......
......@@ -220,6 +220,7 @@ void ib_uverbs_free_event_queue(struct ib_uverbs_event_queue *event_queue)
list_for_each_entry_safe(entry, tmp, &event_queue->event_list, list) {
if (entry->counter)
list_del(&entry->obj_list);
list_del(&entry->list);
kfree(entry);
}
spin_unlock_irq(&event_queue->lock);
......
......@@ -3036,6 +3036,10 @@ static int terminate(struct c4iw_dev *dev, struct sk_buff *skb)
C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
}
/* As per draft-hilland-iwarp-verbs-v1.0, sec 6.2.3,
* when entering the TERM state the RNIC MUST initiate a CLOSE.
*/
c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
c4iw_put_ep(&ep->com);
} else
pr_warn("TERM received tid %u no ep/qp\n", tid);
......
......@@ -1948,10 +1948,10 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
qhp->attr.layer_etype = attrs->layer_etype;
qhp->attr.ecode = attrs->ecode;
ep = qhp->ep;
c4iw_get_ep(&ep->com);
disconnect = 1;
if (!internal) {
c4iw_get_ep(&ep->com);
terminate = 1;
disconnect = 1;
} else {
terminate = qhp->attr.send_term;
ret = rdma_fini(rhp, qhp, ep);
......
......@@ -479,6 +479,8 @@ static int _dev_comp_vect_mappings_create(struct hfi1_devdata *dd,
rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), i, cpu);
}
free_cpumask_var(available_cpus);
free_cpumask_var(non_intr_cpus);
return 0;
fail:
......
......@@ -200,23 +200,24 @@ static int hfi1_file_open(struct inode *inode, struct file *fp)
fd = kzalloc(sizeof(*fd), GFP_KERNEL);
if (fd) {
fd->rec_cpu_num = -1; /* no cpu affinity by default */
fd->mm = current->mm;
mmgrab(fd->mm);
fd->dd = dd;
kobject_get(&fd->dd->kobj);
fp->private_data = fd;
} else {
fp->private_data = NULL;
if (atomic_dec_and_test(&dd->user_refcount))
complete(&dd->user_comp);
return -ENOMEM;
}
if (!fd || init_srcu_struct(&fd->pq_srcu))
goto nomem;
spin_lock_init(&fd->pq_rcu_lock);
spin_lock_init(&fd->tid_lock);
spin_lock_init(&fd->invalid_lock);
fd->rec_cpu_num = -1; /* no cpu affinity by default */
fd->mm = current->mm;
mmgrab(fd->mm);
fd->dd = dd;
kobject_get(&fd->dd->kobj);
fp->private_data = fd;
return 0;
nomem:
kfree(fd);
fp->private_data = NULL;
if (atomic_dec_and_test(&dd->user_refcount))
complete(&dd->user_comp);
return -ENOMEM;
}
static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
......@@ -301,21 +302,30 @@ static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from)
{
struct hfi1_filedata *fd = kiocb->ki_filp->private_data;
struct hfi1_user_sdma_pkt_q *pq = fd->pq;
struct hfi1_user_sdma_pkt_q *pq;
struct hfi1_user_sdma_comp_q *cq = fd->cq;
int done = 0, reqs = 0;
unsigned long dim = from->nr_segs;
int idx;
if (!cq || !pq)
idx = srcu_read_lock(&fd->pq_srcu);
pq = srcu_dereference(fd->pq, &fd->pq_srcu);
if (!cq || !pq) {
srcu_read_unlock(&fd->pq_srcu, idx);
return -EIO;
}
if (!iter_is_iovec(from) || !dim)
if (!iter_is_iovec(from) || !dim) {
srcu_read_unlock(&fd->pq_srcu, idx);
return -EINVAL;
}
trace_hfi1_sdma_request(fd->dd, fd->uctxt->ctxt, fd->subctxt, dim);
if (atomic_read(&pq->n_reqs) == pq->n_max_reqs)
if (atomic_read(&pq->n_reqs) == pq->n_max_reqs) {
srcu_read_unlock(&fd->pq_srcu, idx);
return -ENOSPC;
}
while (dim) {
int ret;
......@@ -333,6 +343,7 @@ static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from)
reqs++;
}
srcu_read_unlock(&fd->pq_srcu, idx);
return reqs;
}
......@@ -707,6 +718,7 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
if (atomic_dec_and_test(&dd->user_refcount))
complete(&dd->user_comp);
cleanup_srcu_struct(&fdata->pq_srcu);
kfree(fdata);
return 0;
}
......
......@@ -1444,10 +1444,13 @@ struct mmu_rb_handler;
/* Private data for file operations */
struct hfi1_filedata {
struct srcu_struct pq_srcu;
struct hfi1_devdata *dd;
struct hfi1_ctxtdata *uctxt;
struct hfi1_user_sdma_comp_q *cq;
struct hfi1_user_sdma_pkt_q *pq;
/* update side lock for SRCU */
spinlock_t pq_rcu_lock;
struct hfi1_user_sdma_pkt_q __rcu *pq;
u16 subctxt;
/* for cpu affinity; -1 if none */
int rec_cpu_num;
......
......@@ -87,9 +87,6 @@ int hfi1_user_exp_rcv_init(struct hfi1_filedata *fd,
{
int ret = 0;
spin_lock_init(&fd->tid_lock);
spin_lock_init(&fd->invalid_lock);
fd->entry_to_rb = kcalloc(uctxt->expected_count,
sizeof(struct rb_node *),
GFP_KERNEL);
......@@ -142,10 +139,12 @@ void hfi1_user_exp_rcv_free(struct hfi1_filedata *fd)
{
struct hfi1_ctxtdata *uctxt = fd->uctxt;
mutex_lock(&uctxt->exp_mutex);
if (!EXP_TID_SET_EMPTY(uctxt->tid_full_list))
unlock_exp_tids(uctxt, &uctxt->tid_full_list, fd);
if (!EXP_TID_SET_EMPTY(uctxt->tid_used_list))
unlock_exp_tids(uctxt, &uctxt->tid_used_list, fd);
mutex_unlock(&uctxt->exp_mutex);
kfree(fd->invalid_tids);
fd->invalid_tids = NULL;
......
......@@ -179,7 +179,6 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt,
pq = kzalloc(sizeof(*pq), GFP_KERNEL);
if (!pq)
return -ENOMEM;
pq->dd = dd;
pq->ctxt = uctxt->ctxt;
pq->subctxt = fd->subctxt;
......@@ -236,7 +235,7 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt,
goto pq_mmu_fail;
}
fd->pq = pq;
rcu_assign_pointer(fd->pq, pq);
fd->cq = cq;
return 0;
......@@ -264,8 +263,14 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd,
trace_hfi1_sdma_user_free_queues(uctxt->dd, uctxt->ctxt, fd->subctxt);
pq = fd->pq;
spin_lock(&fd->pq_rcu_lock);
pq = srcu_dereference_check(fd->pq, &fd->pq_srcu,
lockdep_is_held(&fd->pq_rcu_lock));
if (pq) {
rcu_assign_pointer(fd->pq, NULL);
spin_unlock(&fd->pq_rcu_lock);
synchronize_srcu(&fd->pq_srcu);
/* at this point there can be no more new requests */
if (pq->handler)
hfi1_mmu_rb_unregister(pq->handler);
iowait_sdma_drain(&pq->busy);
......@@ -277,7 +282,8 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd,
kfree(pq->req_in_use);
kmem_cache_destroy(pq->txreq_cache);
kfree(pq);
fd->pq = NULL;
} else {
spin_unlock(&fd->pq_rcu_lock);
}
if (fd->cq) {
vfree(fd->cq->comps);
......@@ -321,7 +327,8 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
{
int ret = 0, i;
struct hfi1_ctxtdata *uctxt = fd->uctxt;
struct hfi1_user_sdma_pkt_q *pq = fd->pq;
struct hfi1_user_sdma_pkt_q *pq =
srcu_dereference(fd->pq, &fd->pq_srcu);
struct hfi1_user_sdma_comp_q *cq = fd->cq;
struct hfi1_devdata *dd = pq->dd;
unsigned long idx = 0;
......
......@@ -2319,14 +2319,12 @@ static int deliver_event(struct devx_event_subscription *event_sub,
if (ev_file->omit_data) {
spin_lock_irqsave(&ev_file->lock, flags);
if (!list_empty(&event_sub->event_list)) {
if (!list_empty(&event_sub->event_list) ||
ev_file->is_destroyed) {
spin_unlock_irqrestore(&ev_file->lock, flags);
return 0;
}
/* is_destroyed is ignored here because we don't have any memory
* allocation to clean up for the omit_data case
*/
list_add_tail(&event_sub->event_list, &ev_file->event_list);
spin_unlock_irqrestore(&ev_file->lock, flags);
wake_up_interruptible(&ev_file->poll_wait);
......@@ -2473,11 +2471,11 @@ static ssize_t devx_async_cmd_event_read(struct file *filp, char __user *buf,
return -ERESTARTSYS;
}
if (list_empty(&ev_queue->event_list) &&
ev_queue->is_destroyed)
return -EIO;
spin_lock_irq(&ev_queue->lock);
if (ev_queue->is_destroyed) {
spin_unlock_irq(&ev_queue->lock);
return -EIO;
}
}
event = list_entry(ev_queue->event_list.next,
......@@ -2551,10 +2549,6 @@ static ssize_t devx_async_event_read(struct file *filp, char __user *buf,
return -EOVERFLOW;
}
if (ev_file->is_destroyed) {
spin_unlock_irq(&ev_file->lock);
return -EIO;
}
while (list_empty(&ev_file->event_list)) {
spin_unlock_irq(&ev_file->lock);
......@@ -2667,8 +2661,10 @@ static int devx_async_cmd_event_destroy_uobj(struct ib_uobject *uobj,
spin_lock_irq(&comp_ev_file->ev_queue.lock);
list_for_each_entry_safe(entry, tmp,
&comp_ev_file->ev_queue.event_list, list)
&comp_ev_file->ev_queue.event_list, list) {
list_del(&entry->list);
kvfree(entry);
}
spin_unlock_irq(&comp_ev_file->ev_queue.lock);
return 0;
};
......@@ -2680,11 +2676,29 @@ static int devx_async_event_destroy_uobj(struct ib_uobject *uobj,
container_of(uobj, struct devx_async_event_file,
uobj);
struct devx_event_subscription *event_sub, *event_sub_tmp;
struct devx_async_event_data *entry, *tmp;
struct mlx5_ib_dev *dev = ev_file->dev;
spin_lock_irq(&ev_file->lock);
ev_file->is_destroyed = 1;
/* free the pending events allocation */
if (ev_file->omit_data) {
struct devx_event_subscription *event_sub, *tmp;
list_for_each_entry_safe(event_sub, tmp, &ev_file->event_list,
event_list)
list_del_init(&event_sub->event_list);
} else {
struct devx_async_event_data *entry, *tmp;
list_for_each_entry_safe(entry, tmp, &ev_file->event_list,
list) {
list_del(&entry->list);
kfree(entry);
}
}
spin_unlock_irq(&ev_file->lock);
wake_up_interruptible(&ev_file->poll_wait);
......@@ -2699,15 +2713,6 @@ static int devx_async_event_destroy_uobj(struct ib_uobject *uobj,
}
mutex_unlock(&dev->devx_event_table.event_xa_lock);
/* free the pending events allocation */
if (!ev_file->omit_data) {
spin_lock_irq(&ev_file->lock);
list_for_each_entry_safe(entry, tmp,
&ev_file->event_list, list)
kfree(entry); /* read can't come any more */
spin_unlock_irq(&ev_file->lock);
}
put_device(&dev->ib_dev.dev);
return 0;
};
......
......@@ -2283,8 +2283,8 @@ static int mlx5_ib_mmap_offset(struct mlx5_ib_dev *dev,
static u64 mlx5_entry_to_mmap_offset(struct mlx5_user_mmap_entry *entry)
{
u16 cmd = entry->rdma_entry.start_pgoff >> 16;
u16 index = entry->rdma_entry.start_pgoff & 0xFFFF;
u64 cmd = (entry->rdma_entry.start_pgoff >> 16) & 0xFFFF;
u64 index = entry->rdma_entry.start_pgoff & 0xFFFF;
return (((index >> 8) << 16) | (cmd << MLX5_IB_MMAP_CMD_SHIFT) |
(index & 0xFF)) << PAGE_SHIFT;
......@@ -6545,7 +6545,7 @@ static int mlx5_ib_init_var_table(struct mlx5_ib_dev *dev)
doorbell_bar_offset);
bar_size = (1ULL << log_doorbell_bar_size) * 4096;
var_table->stride_size = 1ULL << log_doorbell_stride;
var_table->num_var_hw_entries = bar_size / var_table->stride_size;
var_table->num_var_hw_entries = div64_u64(bar_size, var_table->stride_size);
mutex_init(&var_table->bitmap_lock);
var_table->bitmap = bitmap_zalloc(var_table->num_var_hw_entries,
GFP_KERNEL);
......
......@@ -3441,9 +3441,6 @@ static int __mlx5_ib_qp_set_counter(struct ib_qp *qp,
struct mlx5_ib_qp_base *base;
u32 set_id;
if (!MLX5_CAP_GEN(dev->mdev, rts2rts_qp_counters_set_id))
return 0;
if (counter)
set_id = counter->id;
else
......@@ -6576,6 +6573,7 @@ void mlx5_ib_drain_rq(struct ib_qp *qp)
*/
int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter)
{
struct mlx5_ib_dev *dev = to_mdev(qp->device);
struct mlx5_ib_qp *mqp = to_mqp(qp);
int err = 0;
......@@ -6585,6 +6583,11 @@ int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter)
goto out;
}
if (!MLX5_CAP_GEN(dev->mdev, rts2rts_qp_counters_set_id)) {
err = -EOPNOTSUPP;
goto out;
}
if (mqp->state == IB_QPS_RTS) {
err = __mlx5_ib_qp_set_counter(qp, counter);
if (!err)
......
......@@ -61,6 +61,8 @@
#define RVT_RWQ_COUNT_THRESHOLD 16
static void rvt_rc_timeout(struct timer_list *t);
static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
enum ib_qp_type type);
/*
* Convert the AETH RNR timeout code into the number of microseconds.
......@@ -452,40 +454,41 @@ int rvt_driver_qp_init(struct rvt_dev_info *rdi)
}
/**
* free_all_qps - check for QPs still in use
* rvt_free_qp_cb - callback function to reset a qp
* @qp: the qp to reset
* @v: a 64-bit value
*
* This function resets the qp and removes it from the
* qp hash table.
*/
static void rvt_free_qp_cb(struct rvt_qp *qp, u64 v)
{
unsigned int *qp_inuse = (unsigned int *)v;
struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
/* Reset the qp and remove it from the qp hash list */
rvt_reset_qp(rdi, qp, qp->ibqp.qp_type);
/* Increment the qp_inuse count */
(*qp_inuse)++;
}
/**
* rvt_free_all_qps - check for QPs still in use
* @rdi: rvt device info structure
*
* There should not be any QPs still in use.
* Free memory for table.
* Return the number of QPs still in use.
*/
static unsigned rvt_free_all_qps(struct rvt_dev_info *rdi)
{
unsigned long flags;
struct rvt_qp *qp;
unsigned n, qp_inuse = 0;
spinlock_t *ql; /* work around too long line below */
if (rdi->driver_f.free_all_qps)
qp_inuse = rdi->driver_f.free_all_qps(rdi);
unsigned int qp_inuse = 0;
qp_inuse += rvt_mcast_tree_empty(rdi);
if (!rdi->qp_dev)
return qp_inuse;
ql = &rdi->qp_dev->qpt_lock;
spin_lock_irqsave(ql, flags);
for (n = 0; n < rdi->qp_dev->qp_table_size; n++) {
qp = rcu_dereference_protected(rdi->qp_dev->qp_table[n],
lockdep_is_held(ql));
RCU_INIT_POINTER(rdi->qp_dev->qp_table[n], NULL);
rvt_qp_iter(rdi, (u64)&qp_inuse, rvt_free_qp_cb);
for (; qp; qp = rcu_dereference_protected(qp->next,
lockdep_is_held(ql)))
qp_inuse++;
}
spin_unlock_irqrestore(ql, flags);
synchronize_rcu();
return qp_inuse;
}
......@@ -902,14 +905,14 @@ static void rvt_init_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
}
/**
* rvt_reset_qp - initialize the QP state to the reset state
* _rvt_reset_qp - initialize the QP state to the reset state
* @qp: the QP to reset
* @type: the QP type
*
* r_lock, s_hlock, and s_lock are required to be held by the caller
*/
static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
enum ib_qp_type type)
static void _rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
enum ib_qp_type type)
__must_hold(&qp->s_lock)
__must_hold(&qp->s_hlock)
__must_hold(&qp->r_lock)
......@@ -955,6 +958,27 @@ static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
lockdep_assert_held(&qp->s_lock);
}
/**
* rvt_reset_qp - initialize the QP state to the reset state
* @rdi: the device info
* @qp: the QP to reset
* @type: the QP type
*
* This is the wrapper function to acquire the r_lock, s_hlock, and s_lock
* before calling _rvt_reset_qp().
*/
static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
enum ib_qp_type type)
{
spin_lock_irq(&qp->r_lock);
spin_lock(&qp->s_hlock);
spin_lock(&qp->s_lock);
_rvt_reset_qp(rdi, qp, type);
spin_unlock(&qp->s_lock);
spin_unlock(&qp->s_hlock);
spin_unlock_irq(&qp->r_lock);
}
/** rvt_free_qpn - Free a qpn from the bit map
* @qpt: QP table
* @qpn: queue pair number to free
......@@ -1546,7 +1570,7 @@ int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
switch (new_state) {
case IB_QPS_RESET:
if (qp->state != IB_QPS_RESET)
rvt_reset_qp(rdi, qp, ibqp->qp_type);
_rvt_reset_qp(rdi, qp, ibqp->qp_type);
break;
case IB_QPS_RTR:
......@@ -1695,13 +1719,7 @@ int rvt_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
spin_lock_irq(&qp->r_lock);
spin_lock(&qp->s_hlock);
spin_lock(&qp->s_lock);
rvt_reset_qp(rdi, qp, ibqp->qp_type);
spin_unlock(&qp->s_lock);
spin_unlock(&qp->s_hlock);
spin_unlock_irq(&qp->r_lock);
wait_event(qp->wait, !atomic_read(&qp->refcount));
/* qpn is now available for use again */
......
......@@ -329,7 +329,7 @@ static inline enum comp_state check_ack(struct rxe_qp *qp,
qp->comp.psn = pkt->psn;
if (qp->req.wait_psn) {
qp->req.wait_psn = 0;
rxe_run_task(&qp->req.task, 1);
rxe_run_task(&qp->req.task, 0);
}
}
return COMPST_ERROR_RETRY;
......@@ -463,7 +463,7 @@ static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
*/
if (qp->req.wait_fence) {
qp->req.wait_fence = 0;
rxe_run_task(&qp->req.task, 1);
rxe_run_task(&qp->req.task, 0);
}
}
......@@ -479,7 +479,7 @@ static inline enum comp_state complete_ack(struct rxe_qp *qp,
if (qp->req.need_rd_atomic) {
qp->comp.timeout_retry = 0;
qp->req.need_rd_atomic = 0;
rxe_run_task(&qp->req.task, 1);
rxe_run_task(&qp->req.task, 0);
}
}
......@@ -725,7 +725,7 @@ int rxe_completer(void *arg)
RXE_CNT_COMP_RETRY);
qp->req.need_retry = 1;
qp->comp.started_retry = 1;
rxe_run_task(&qp->req.task, 1);
rxe_run_task(&qp->req.task, 0);
}
if (pkt) {
......
......@@ -1225,10 +1225,9 @@ static void siw_cm_llp_data_ready(struct sock *sk)
read_lock(&sk->sk_callback_lock);
cep = sk_to_cep(sk);
if (!cep) {
WARN_ON(1);
if (!cep)
goto out;
}
siw_dbg_cep(cep, "state: %d\n", cep->state);
switch (cep->state) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment