]> rtime.felk.cvut.cz Git - linux-imx.git/blobdiff - drivers/infiniband/hw/mthca/mthca_qp.c
[PATCH] mthca: initialize send and receive queue locks separately
[linux-imx.git] / drivers / infiniband / hw / mthca / mthca_qp.c
index 07c13be07a4a5da858a975fe6d11059b0da3a35b..490fc783bb0c899709488f7f83fbeaf261b36a0c 100644 (file)
@@ -224,7 +224,7 @@ static void *get_send_wqe(struct mthca_qp *qp, int n)
 
 static void mthca_wq_init(struct mthca_wq *wq)
 {
-       spin_lock_init(&wq->lock);
+       /* mthca_alloc_qp_common() initializes the locks */
        wq->next_ind  = 0;
        wq->last_comp = wq->max - 1;
        wq->head      = 0;
@@ -534,7 +534,9 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
        struct mthca_qp_context *qp_context;
        u32 sqd_event = 0;
        u8 status;
-       int err;
+       int err = -EINVAL;
+
+       mutex_lock(&qp->mutex);
 
        if (attr_mask & IB_QP_CUR_STATE) {
                cur_state = attr->cur_qp_state;
@@ -553,39 +555,41 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
                          "%d->%d with attr 0x%08x\n",
                          qp->transport, cur_state, new_state,
                          attr_mask);
-               return -EINVAL;
+               goto out;
        }
 
        if ((attr_mask & IB_QP_PKEY_INDEX) &&
             attr->pkey_index >= dev->limits.pkey_table_len) {
                mthca_dbg(dev, "P_Key index (%u) too large. max is %d\n",
                          attr->pkey_index, dev->limits.pkey_table_len-1);
-               return -EINVAL;
+               goto out;
        }
 
        if ((attr_mask & IB_QP_PORT) &&
            (attr->port_num == 0 || attr->port_num > dev->limits.num_ports)) {
                mthca_dbg(dev, "Port number (%u) is invalid\n", attr->port_num);
-               return -EINVAL;
+               goto out;
        }
 
        if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
            attr->max_rd_atomic > dev->limits.max_qp_init_rdma) {
                mthca_dbg(dev, "Max rdma_atomic as initiator %u too large (max is %d)\n",
                          attr->max_rd_atomic, dev->limits.max_qp_init_rdma);
-               return -EINVAL;
+               goto out;
        }
 
        if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
            attr->max_dest_rd_atomic > 1 << dev->qp_table.rdb_shift) {
                mthca_dbg(dev, "Max rdma_atomic as responder %u too large (max %d)\n",
                          attr->max_dest_rd_atomic, 1 << dev->qp_table.rdb_shift);
-               return -EINVAL;
+               goto out;
        }
 
        mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
-       if (IS_ERR(mailbox))
-               return PTR_ERR(mailbox);
+       if (IS_ERR(mailbox)) {
+               err = PTR_ERR(mailbox);
+               goto out;
+       }
        qp_param = mailbox->buf;
        qp_context = &qp_param->context;
        memset(qp_param, 0, sizeof *qp_param);
@@ -618,7 +622,7 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
                if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_2048) {
                        mthca_dbg(dev, "path MTU (%u) is invalid\n",
                                  attr->path_mtu);
-                       return -EINVAL;
+                       goto out_mailbox;
                }
                qp_context->mtu_msgmax = (attr->path_mtu << 5) | 31;
        }
@@ -672,7 +676,7 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
        if (attr_mask & IB_QP_AV) {
                if (mthca_path_set(dev, &attr->ah_attr, &qp_context->pri_path,
                                   attr_mask & IB_QP_PORT ? attr->port_num : qp->port))
-                       return -EINVAL;
+                       goto out_mailbox;
 
                qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH);
        }
@@ -686,18 +690,18 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
                if (attr->alt_pkey_index >= dev->limits.pkey_table_len) {
                        mthca_dbg(dev, "Alternate P_Key index (%u) too large. max is %d\n",
                                  attr->alt_pkey_index, dev->limits.pkey_table_len-1);
-                       return -EINVAL;
+                       goto out_mailbox;
                }
 
                if (attr->alt_port_num == 0 || attr->alt_port_num > dev->limits.num_ports) {
                        mthca_dbg(dev, "Alternate port number (%u) is invalid\n",
                                attr->alt_port_num);
-                       return -EINVAL;
+                       goto out_mailbox;
                }
 
                if (mthca_path_set(dev, &attr->alt_ah_attr, &qp_context->alt_path,
                                   attr->alt_ah_attr.port_num))
-                       return -EINVAL;
+                       goto out_mailbox;
 
                qp_context->alt_path.port_pkey |= cpu_to_be32(attr->alt_pkey_index |
                                                              attr->alt_port_num << 24);
@@ -793,12 +797,12 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
        err = mthca_MODIFY_QP(dev, cur_state, new_state, qp->qpn, 0,
                              mailbox, sqd_event, &status);
        if (err)
-               goto out;
+               goto out_mailbox;
        if (status) {
                mthca_warn(dev, "modify QP %d->%d returned status %02x.\n",
                           cur_state, new_state, status);
                err = -EINVAL;
-               goto out;
+               goto out_mailbox;
        }
 
        qp->state = new_state;
@@ -853,8 +857,11 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
                }
        }
 
-out:
+out_mailbox:
        mthca_free_mailbox(dev, mailbox);
+
+out:
+       mutex_unlock(&qp->mutex);
        return err;
 }
 
@@ -1100,12 +1107,16 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev,
 
        qp->refcount = 1;
        init_waitqueue_head(&qp->wait);
+       mutex_init(&qp->mutex);
        qp->state        = IB_QPS_RESET;
        qp->atomic_rd_en = 0;
        qp->resp_depth   = 0;
        qp->sq_policy    = send_policy;
        mthca_wq_init(&qp->sq);
        mthca_wq_init(&qp->rq);
+       /* these are initialized separately so lockdep can tell them apart */
+       spin_lock_init(&qp->sq.lock);
+       spin_lock_init(&qp->rq.lock);
 
        ret = mthca_map_memfree(dev, qp);
        if (ret)