spatch away Linux spinlock functions.
authorDan Cross <crossd@gmail.com>
Wed, 12 Apr 2017 16:33:05 +0000 (12:33 -0400)
committerBarret Rhoden <brho@cs.berkeley.edu>
Mon, 17 Apr 2017 18:07:36 +0000 (14:07 -0400)
Instead of trying to paper over these with preprocessor macros,
spatch them to what they are supposed to be.

Change-Id: I7b9ed4d27e2e595020186dc435d9d41e37262ce9
Signed-off-by: Dan Cross <crossd@gmail.com>
Signed-off-by: Barret Rhoden <brho@cs.berkeley.edu>
kern/drivers/net/mlx4u/cq.c
kern/drivers/net/mlx4u/main.c
kern/drivers/net/mlx4u/qp.c
kern/drivers/net/mlx4u/srq.c
kern/drivers/net/udrvr/compat.c
kern/drivers/net/udrvr/compat.h
kern/drivers/net/udrvr/device.c
kern/drivers/net/udrvr/uverbs_main.c
kern/drivers/net/udrvr/verbs.c

index 9ce6bf8..06bd17c 100644 (file)
@@ -185,7 +185,7 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector
        entries      = roundup_pow_of_two(entries + 1);
        cq->ibcq.cqe = entries - 1;
        mutex_init(&cq->resize_mutex);
-       spin_lock_init(&cq->lock);
+       spinlock_init_irqsave(&cq->lock);
        cq->resize_buf = NULL;
        cq->resize_umem = NULL;
        INIT_LIST_HEAD(&cq->send_qp_list);
@@ -422,7 +422,7 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
                struct mlx4_ib_cq_buf tmp_buf;
                int tmp_cqe = 0;
 
-               spin_lock_irq(&cq->lock);
+               spin_lock_irqsave(&cq->lock);
                if (cq->resize_buf) {
                        mlx4_ib_cq_resize_copy_cqes(cq);
                        tmp_buf = cq->buf;
@@ -433,7 +433,7 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
                        kfree(cq->resize_buf);
                        cq->resize_buf = NULL;
                }
-               spin_unlock_irq(&cq->lock);
+               spin_unlock_irqsave(&cq->lock);
 
                if (tmp_cqe)
                        mlx4_ib_free_cq_buf(dev, &tmp_buf, tmp_cqe);
@@ -888,7 +888,7 @@ int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
        int err = 0;
        struct mlx4_ib_dev *mdev = to_mdev(cq->ibcq.device);
 
-       spin_lock_irqsave(&cq->lock, flags);
+       spin_lock_irqsave(&cq->lock);
        if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
                mlx4_ib_poll_sw_comp(cq, num_entries, wc, &npolled);
                goto out;
@@ -903,7 +903,7 @@ int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
        mlx4_cq_set_ci(&cq->mcq);
 
 out:
-       spin_unlock_irqrestore(&cq->lock, flags);
+       spin_unlock_irqsave(&cq->lock);
 
        if (err == 0 || err == -EAGAIN)
                return npolled;
@@ -977,7 +977,7 @@ void __mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq)
 
 void mlx4_ib_cq_clean(struct mlx4_ib_cq *cq, u32 qpn, struct mlx4_ib_srq *srq)
 {
-       spin_lock_irq(&cq->lock);
+       spin_lock_irqsave(&cq->lock);
        __mlx4_ib_cq_clean(cq, qpn, srq);
-       spin_unlock_irq(&cq->lock);
+       spin_unlock_irqsave(&cq->lock);
 }
index a2cb9dc..d7ba859 100644 (file)
 #define        iboe_get_mtu(v)                 IB_MTU_1024     /* TODO */
 
 /* STUB START */
-int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev) { return 0; }
+int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev)
+{
+       spinlock_init_irqsave(&dev->sriov.going_down_lock);
+       return 0;
+}
 int mlx4_ib_mad_init(struct mlx4_ib_dev *dev) { return 0; }
 void mlx4_ib_close_sriov(struct mlx4_ib_dev *dev) {}
 void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev) {}
@@ -413,7 +417,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
        props->active_mtu       = IB_MTU_256;
        if (is_bonded)
                rtnl_lock(); /* required to get upper dev */
-       spin_lock_bh(&iboe->lock);
+       spin_lock(&iboe->lock);
 #if 0  /* AKAROS */
        ndev = iboe->netdevs[port - 1];
        if (ndev && is_bonded)
@@ -429,7 +433,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
                                        IB_PORT_ACTIVE : IB_PORT_DOWN;
        props->phys_state       = state_to_phys_state(props->state);
 out_unlock:
-       spin_unlock_bh(&iboe->lock);
+       spin_unlock(&iboe->lock);
        if (is_bonded)
                rtnl_unlock();
 out:
@@ -586,9 +590,9 @@ static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
        if (mlx4_is_slave(to_mdev(ibdev)->dev))
                return -EOPNOTSUPP;
 
-       spin_lock_irqsave(&to_mdev(ibdev)->sm_lock, flags);
+       spin_lock_irqsave(&to_mdev(ibdev)->sm_lock);
        memcpy(ibdev->node_desc, props->node_desc, 64);
-       spin_unlock_irqrestore(&to_mdev(ibdev)->sm_lock, flags);
+       spin_unlock_irqsave(&to_mdev(ibdev)->sm_lock);
 
        /*
         * If possible, pass node desc to FW, so it can generate
@@ -909,11 +913,11 @@ int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
        if (!mqp->port)
                return 0;
 
-       spin_lock_bh(&mdev->iboe.lock);
+       spin_lock(&mdev->iboe.lock);
        ndev = mdev->iboe.netdevs[mqp->port - 1];
        if (ndev)
                dev_hold(ndev);
-       spin_unlock_bh(&mdev->iboe.lock);
+       spin_unlock(&mdev->iboe.lock);
 
        if (ndev) {
                ret = 1;
@@ -1460,11 +1464,11 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
        mutex_lock(&mqp->mutex);
        ge = find_gid_entry(mqp, gid->raw);
        if (ge) {
-               spin_lock_bh(&mdev->iboe.lock);
+               spin_lock(&mdev->iboe.lock);
                ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL;
                if (ndev)
                        dev_hold(ndev);
-               spin_unlock_bh(&mdev->iboe.lock);
+               spin_unlock(&mdev->iboe.lock);
                if (ndev)
                        dev_put(ndev);
                list_del(&ge->list);
@@ -1767,7 +1771,7 @@ static int mlx4_ib_addr_event(int event, struct net_device *event_netdev,
                return 0;
 
        iboe = &ibdev->iboe;
-       spin_lock_bh(&iboe->lock);
+       spin_lock(&iboe->lock);
 
        for (port = 1; port <= ibdev->dev->caps.num_ports; ++port)
                if ((netif_is_bond_master(real_dev) &&
@@ -1777,7 +1781,7 @@ static int mlx4_ib_addr_event(int event, struct net_device *event_netdev,
                        update_gid_table(ibdev, port, gid,
                                         event == NETDEV_DOWN, 0);
 
-       spin_unlock_bh(&iboe->lock);
+       spin_unlock(&iboe->lock);
        return 0;
 
 }
@@ -1969,7 +1973,7 @@ static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev)
 
 #if 0  /* AKAROS */
        read_lock(&dev_base_lock);
-       spin_lock_bh(&iboe->lock);
+       spin_lock(&iboe->lock);
 
        for_each_netdev(&init_net, dev) {
                u8 port = mlx4_ib_get_dev_port(dev, ibdev);
@@ -1980,7 +1984,7 @@ static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev)
                }
        }
 
-       spin_unlock_bh(&iboe->lock);
+       spin_unlock(&iboe->lock);
        read_unlock(&dev_base_lock);
 #endif /* AKAROS */
 out:
@@ -2000,7 +2004,7 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
 
        iboe = &ibdev->iboe;
 
-       spin_lock_bh(&iboe->lock);
+       spin_lock(&iboe->lock);
        mlx4_foreach_ib_transport_port(port, ibdev->dev) {
                enum ib_port_state      port_state = IB_PORT_NOP;
                struct net_device *old_master = iboe->masters[port - 1];
@@ -2073,7 +2077,7 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev,
                }
        }
 
-       spin_unlock_bh(&iboe->lock);
+       spin_unlock(&iboe->lock);
 
        if (update_qps_port > 0)
                mlx4_ib_update_qps(ibdev, dev, update_qps_port);
@@ -2434,10 +2438,10 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
        mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
                ib_num_ports++;
 
-       spin_lock_init(&ibdev->sm_lock);
+       spinlock_init_irqsave(&ibdev->sm_lock);
        mutex_init(&ibdev->cap_mask_mutex);
        INIT_LIST_HEAD(&ibdev->qp_list);
-       spin_lock_init(&ibdev->reset_flow_resource_lock);
+       spinlock_init_irqsave(&ibdev->reset_flow_resource_lock);
 
        if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED &&
            ib_num_ports) {
@@ -2752,10 +2756,10 @@ static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init)
                dm[i]->slave = slave;
                dm[i]->do_init = do_init;
                dm[i]->dev = ibdev;
-               spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags);
+               spin_lock_irqsave(&ibdev->sriov.going_down_lock);
                if (!ibdev->sriov.is_going_down)
                        queue_work(ibdev->sriov.demux[i].ud_wq, &dm[i]->work);
-               spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);
+               spin_unlock_irqsave(&ibdev->sriov.going_down_lock);
        }
 out:
        kfree(dm);
@@ -2776,13 +2780,13 @@ static void mlx4_ib_handle_catas_error(struct mlx4_ib_dev *ibdev)
        INIT_LIST_HEAD(&cq_notify_list);
 
        /* Go over qp list reside on that ibdev, sync with create/destroy qp.*/
-       spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags);
+       spin_lock_irqsave(&ibdev->reset_flow_resource_lock);
 
        list_for_each_entry(mqp, &ibdev->qp_list, qps_list) {
-               spin_lock_irqsave(&mqp->sq.lock, flags_qp);
+               spin_lock_irqsave(&mqp->sq.lock);
                if (mqp->sq.tail != mqp->sq.head) {
                        send_mcq = to_mcq(mqp->ibqp.send_cq);
-                       spin_lock_irqsave(&send_mcq->lock, flags_cq);
+                       spin_lock_irqsave(&send_mcq->lock);
                        if (send_mcq->mcq.comp &&
                            mqp->ibqp.send_cq->comp_handler) {
                                if (!send_mcq->mcq.reset_notify_added) {
@@ -2791,16 +2795,16 @@ static void mlx4_ib_handle_catas_error(struct mlx4_ib_dev *ibdev)
                                                      &cq_notify_list);
                                }
                        }
-                       spin_unlock_irqrestore(&send_mcq->lock, flags_cq);
+                       spin_unlock_irqsave(&send_mcq->lock);
                }
-               spin_unlock_irqrestore(&mqp->sq.lock, flags_qp);
+               spin_unlock_irqsave(&mqp->sq.lock);
                /* Now, handle the QP's receive queue */
-               spin_lock_irqsave(&mqp->rq.lock, flags_qp);
+               spin_lock_irqsave(&mqp->rq.lock);
                /* no handling is needed for SRQ */
                if (!mqp->ibqp.srq) {
                        if (mqp->rq.tail != mqp->rq.head) {
                                recv_mcq = to_mcq(mqp->ibqp.recv_cq);
-                               spin_lock_irqsave(&recv_mcq->lock, flags_cq);
+                               spin_lock_irqsave(&recv_mcq->lock);
                                if (recv_mcq->mcq.comp &&
                                    mqp->ibqp.recv_cq->comp_handler) {
                                        if (!recv_mcq->mcq.reset_notify_added) {
@@ -2809,17 +2813,16 @@ static void mlx4_ib_handle_catas_error(struct mlx4_ib_dev *ibdev)
                                                              &cq_notify_list);
                                        }
                                }
-                               spin_unlock_irqrestore(&recv_mcq->lock,
-                                                      flags_cq);
+                               spin_unlock_irqsave(&recv_mcq->lock);
                        }
                }
-               spin_unlock_irqrestore(&mqp->rq.lock, flags_qp);
+               spin_unlock_irqsave(&mqp->rq.lock);
        }
 
        list_for_each_entry(mcq, &cq_notify_list, reset_notify) {
                mcq->comp(mcq);
        }
-       spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags);
+       spin_unlock_irqsave(&ibdev->reset_flow_resource_lock);
        pr_warn("mlx4_ib_handle_catas_error ended\n");
 }
 
@@ -2833,7 +2836,7 @@ static void handle_bonded_port_state_event(struct work_struct *work)
        struct ib_event ibev;
 
        kfree(ew);
-       spin_lock_bh(&ibdev->iboe.lock);
+       spin_lock(&ibdev->iboe.lock);
        for (i = 0; i < MLX4_MAX_PORTS; ++i) {
                struct net_device *curr_netdev = ibdev->iboe.netdevs[i];
                enum ib_port_state curr_port_state;
@@ -2849,7 +2852,7 @@ static void handle_bonded_port_state_event(struct work_struct *work)
                bonded_port_state = (bonded_port_state != IB_PORT_ACTIVE) ?
                        curr_port_state : IB_PORT_ACTIVE;
        }
-       spin_unlock_bh(&ibdev->iboe.lock);
+       spin_unlock(&ibdev->iboe.lock);
 
        ibev.device = &ibdev->ib_dev;
        ibev.element.port_num = 1;
index 4a3c160..81e16d0 100644 (file)
@@ -696,8 +696,8 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
        qp->mlx4_ib_qp_type = qp_type;
 
        mutex_init(&qp->mutex);
-       spin_lock_init(&qp->sq.lock);
-       spin_lock_init(&qp->rq.lock);
+       spinlock_init_irqsave(&qp->sq.lock);
+       spinlock_init_irqsave(&qp->rq.lock);
        INIT_LIST_HEAD(&qp->gid_list);
        INIT_LIST_HEAD(&qp->steering_rules);
 
@@ -842,7 +842,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
        if (!*caller_qp)
                *caller_qp = qp;
 
-       spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
+       spin_lock_irqsave(&dev->reset_flow_resource_lock);
        mlx4_ib_lock_cqs(to_mcq(init_attr->send_cq),
                         to_mcq(init_attr->recv_cq));
        /* Maintain device to QPs access, needed for further handling
@@ -858,7 +858,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
        list_add_tail(&qp->cq_recv_list, &mcq->recv_qp_list);
        mlx4_ib_unlock_cqs(to_mcq(init_attr->send_cq),
                           to_mcq(init_attr->recv_cq));
-       spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
+       spin_unlock_irqsave(&dev->reset_flow_resource_lock);
        return 0;
 
 err_qpn:
@@ -917,14 +917,18 @@ static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv
        __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
 {
        if (send_cq == recv_cq) {
-               spin_lock(&send_cq->lock);
+               spin_lock_irqsave(&send_cq->lock);
                __acquire(&recv_cq->lock);
        } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
-               spin_lock(&send_cq->lock);
-               spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
+               spin_lock_irqsave(&send_cq->lock);
+               // TODO(dcross): Was nested lock:
+               // spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
+               spin_lock_irqsave(&recv_cq->lock);
        } else {
-               spin_lock(&recv_cq->lock);
-               spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
+               spin_lock_irqsave(&recv_cq->lock);
+               // TODO(dcross): Was nested lock:
+               // spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
+               spin_lock_irqsave(&send_cq->lock);
        }
 }
 
@@ -1016,7 +1020,7 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
 
        get_cqs(qp, &send_cq, &recv_cq);
 
-       spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
+       spin_lock_irqsave(&dev->reset_flow_resource_lock);
        mlx4_ib_lock_cqs(send_cq, recv_cq);
 
        /* del from lists under both locks above to protect reset flow paths */
@@ -1033,7 +1037,7 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
        mlx4_qp_remove(dev->dev, &qp->mqp);
 
        mlx4_ib_unlock_cqs(send_cq, recv_cq);
-       spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
+       spin_unlock_irqsave(&dev->reset_flow_resource_lock);
 
        mlx4_qp_free(dev->dev, &qp->mqp);
 
index dfa930f..438e466 100644 (file)
@@ -321,7 +321,7 @@ int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
        int i;
        struct mlx4_ib_dev *mdev = to_mdev(ibsrq->device);
 
-       spin_lock_irqsave(&srq->lock, flags);
+       spin_lock_irqsave(&srq->lock);
        if (mdev->dev->persist->state & MLX4_DEVICE_STATE_INTERNAL_ERROR) {
                err = -EIO;
                *bad_wr = wr;
@@ -374,7 +374,7 @@ int mlx4_ib_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
        }
 out:
 
-       spin_unlock_irqrestore(&srq->lock, flags);
+       spin_unlock_irqsave(&srq->lock);
 
        return err;
 }
index 6bf2aa9..08d786e 100644 (file)
@@ -178,7 +178,7 @@ int idr_alloc(struct idr *idp, void *ptr, int start, int end, gfp_t gfp_mask)
        /* We use values[] == NULL as an indicator that slot is free */
        BUG_ON(ptr == NULL);
 
-       spin_lock_irqsave(&idp->lock, f);
+       spin_lock_irqsave(&idp->lock);
 
        for (i = 0; i < MAXITEMS; i++) {
                if (idp->values[i] == NULL) {
index e8a0cd6..4119a25 100644 (file)
@@ -51,16 +51,6 @@ typedef atomic_t                     atomic64_t;
 #define        mutex_lock(a)                   qlock(a)
 #define        mutex_unlock(a)                 qunlock(a)
 
-#define        spin_lock_init(E)               spinlock_init_irqsave(E)
-#define        spin_lock_irq(a)                spin_lock_irqsave(a, 0)
-#define        spin_unlock_irq(a)              spin_unlock_irqsave(a)
-#define        spin_lock_irqsave(l, f)         spin_lock_irqsave(l)
-#define        spin_unlock_irqrestore(l, f)    spin_unlock_irqsave(l)
-#define        spin_lock_nested(l, d)          spin_lock(l)
-#define        spin_lock_bh(E)                 spin_lock(E)
-#define        spin_unlock_bh(E)               spin_unlock(E)
-#define        DEFINE_SPINLOCK(x)              spinlock_t x = SPINLOCK_INITIALIZER
-
 extern unsigned long pgprot_noncached(int vmprot);
 extern unsigned long pgprot_writecombine(int vmprot);
 
index f6208be..409c02a 100644 (file)
@@ -221,7 +221,6 @@ EXPORT_SYMBOL(ib_dealloc_device);
 static int add_client_context(struct ib_device *device, struct ib_client *client)
 {
        struct ib_client_data *context;
-       unsigned long flags;
 
        context = kmalloc(sizeof *context, GFP_KERNEL);
        if (!context) {
@@ -233,9 +232,9 @@ static int add_client_context(struct ib_device *device, struct ib_client *client
        context->client = client;
        context->data   = NULL;
 
-       spin_lock_irqsave(&device->client_data_lock, flags);
+       spin_lock_irqsave(&device->client_data_lock);
        list_add(&context->list, &device->client_data_list);
-       spin_unlock_irqrestore(&device->client_data_lock, flags);
+       spin_unlock_irqsave(&device->client_data_lock);
 
        return 0;
 }
@@ -309,8 +308,8 @@ int ib_register_device(struct ib_device *device,
 
        INIT_LIST_HEAD(&device->event_handler_list);
        INIT_LIST_HEAD(&device->client_data_list);
-       spin_lock_init(&device->event_handler_lock);
-       spin_lock_init(&device->client_data_lock);
+       spinlock_init_irqsave(&device->event_handler_lock);
+       spinlock_init_irqsave(&device->client_data_lock);
 
        ret = read_port_table_lengths(device);
        if (ret) {
@@ -356,7 +355,6 @@ void ib_unregister_device(struct ib_device *device)
 {
        struct ib_client *client;
        struct ib_client_data *context, *tmp;
-       unsigned long flags;
 
        mutex_lock(&device_mutex);
 
@@ -373,10 +371,10 @@ void ib_unregister_device(struct ib_device *device)
 
        ib_device_unregister_sysfs(device);
 
-       spin_lock_irqsave(&device->client_data_lock, flags);
+       spin_lock_irqsave(&device->client_data_lock);
        list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
                kfree(context);
-       spin_unlock_irqrestore(&device->client_data_lock, flags);
+       spin_unlock_irqsave(&device->client_data_lock);
 
        device->reg_state = IB_DEV_UNREGISTERED;
 }
@@ -424,7 +422,6 @@ void ib_unregister_client(struct ib_client *client)
 {
        struct ib_client_data *context, *tmp;
        struct ib_device *device;
-       unsigned long flags;
 
        mutex_lock(&device_mutex);
 
@@ -432,13 +429,13 @@ void ib_unregister_client(struct ib_client *client)
                if (client->remove)
                        client->remove(device);
 
-               spin_lock_irqsave(&device->client_data_lock, flags);
+               spin_lock_irqsave(&device->client_data_lock);
                list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
                        if (context->client == client) {
                                list_del(&context->list);
                                kfree(context);
                        }
-               spin_unlock_irqrestore(&device->client_data_lock, flags);
+               spin_unlock_irqsave(&device->client_data_lock);
        }
        list_del(&client->list);
 
@@ -458,15 +455,14 @@ void *ib_get_client_data(struct ib_device *device, struct ib_client *client)
 {
        struct ib_client_data *context;
        void *ret = NULL;
-       unsigned long flags;
 
-       spin_lock_irqsave(&device->client_data_lock, flags);
+       spin_lock_irqsave(&device->client_data_lock);
        list_for_each_entry(context, &device->client_data_list, list)
                if (context->client == client) {
                        ret = context->data;
                        break;
                }
-       spin_unlock_irqrestore(&device->client_data_lock, flags);
+       spin_unlock_irqsave(&device->client_data_lock);
 
        return ret;
 }
@@ -485,9 +481,8 @@ void ib_set_client_data(struct ib_device *device, struct ib_client *client,
                        void *data)
 {
        struct ib_client_data *context;
-       unsigned long flags;
 
-       spin_lock_irqsave(&device->client_data_lock, flags);
+       spin_lock_irqsave(&device->client_data_lock);
        list_for_each_entry(context, &device->client_data_list, list)
                if (context->client == client) {
                        context->data = data;
@@ -498,7 +493,7 @@ void ib_set_client_data(struct ib_device *device, struct ib_client *client,
               device->name, client->name);
 
 out:
-       spin_unlock_irqrestore(&device->client_data_lock, flags);
+       spin_unlock_irqsave(&device->client_data_lock);
 }
 EXPORT_SYMBOL(ib_set_client_data);
 
@@ -513,12 +508,10 @@ EXPORT_SYMBOL(ib_set_client_data);
  */
 int ib_register_event_handler  (struct ib_event_handler *event_handler)
 {
-       unsigned long flags;
-
-       spin_lock_irqsave(&event_handler->device->event_handler_lock, flags);
+       spin_lock_irqsave(&event_handler->device->event_handler_lock);
        list_add_tail(&event_handler->list,
                      &event_handler->device->event_handler_list);
-       spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags);
+       spin_unlock_irqsave(&event_handler->device->event_handler_lock);
 
        return 0;
 }
@@ -533,11 +526,9 @@ EXPORT_SYMBOL(ib_register_event_handler);
  */
 int ib_unregister_event_handler(struct ib_event_handler *event_handler)
 {
-       unsigned long flags;
-
-       spin_lock_irqsave(&event_handler->device->event_handler_lock, flags);
+       spin_lock_irqsave(&event_handler->device->event_handler_lock);
        list_del(&event_handler->list);
-       spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags);
+       spin_unlock_irqsave(&event_handler->device->event_handler_lock);
 
        return 0;
 }
@@ -553,15 +544,14 @@ EXPORT_SYMBOL(ib_unregister_event_handler);
  */
 void ib_dispatch_event(struct ib_event *event)
 {
-       unsigned long flags;
        struct ib_event_handler *handler;
 
-       spin_lock_irqsave(&event->device->event_handler_lock, flags);
+       spin_lock_irqsave(&event->device->event_handler_lock);
 
        list_for_each_entry(handler, &event->device->event_handler_list, list)
                handler->handler(handler, event);
 
-       spin_unlock_irqrestore(&event->device->event_handler_lock, flags);
+       spin_unlock_irqsave(&event->device->event_handler_lock);
 }
 EXPORT_SYMBOL(ib_dispatch_event);
 
index b61e5a4..04402cf 100644 (file)
@@ -92,7 +92,7 @@ enum {
 
 static struct class *uverbs_class;
 
-DEFINE_SPINLOCK(ib_uverbs_idr_lock);
+spinlock_t ib_uverbs_idr_lock = SPINLOCK_INITIALIZER;
 DEFINE_IDR(ib_uverbs_pd_idr);
 DEFINE_IDR(ib_uverbs_mr_idr);
 DEFINE_IDR(ib_uverbs_mw_idr);
@@ -103,7 +103,7 @@ DEFINE_IDR(ib_uverbs_srq_idr);
 DEFINE_IDR(ib_uverbs_xrcd_idr);
 DEFINE_IDR(ib_uverbs_rule_idr);
 
-static DEFINE_SPINLOCK(map_lock);
+static spinlock_t map_lock = SPINLOCK_INITIALIZER;
 static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES);
 
 static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file,
@@ -186,23 +186,23 @@ void ib_uverbs_release_ucq(struct ib_uverbs_file *file,
        struct ib_uverbs_event *evt, *tmp;
 
        if (ev_file) {
-               spin_lock_irq(&ev_file->lock);
+               spin_lock_irqsave(&ev_file->lock);
                list_for_each_entry_safe(evt, tmp, &uobj->comp_list, obj_list) {
                        list_del(&evt->list);
                        kfree(evt);
                }
-               spin_unlock_irq(&ev_file->lock);
+               spin_unlock_irqsave(&ev_file->lock);
 
                kref_put(&ev_file->ref, ib_uverbs_release_event_file);
        }
 
 #if 0  /* AKAROS */
-       spin_lock_irq(&file->async_file->lock);
+       spin_lock_irqsave(&file->async_file->lock);
        list_for_each_entry_safe(evt, tmp, &uobj->async_list, obj_list) {
                list_del(&evt->list);
                kfree(evt);
        }
-       spin_unlock_irq(&file->async_file->lock);
+       spin_unlock_irqsave(&file->async_file->lock);
 #endif /* AKAROS */
 }
 
@@ -212,12 +212,12 @@ void ib_uverbs_release_uevent(struct ib_uverbs_file *file,
 #if 0  /* AKAROS */
        struct ib_uverbs_event *evt, *tmp;
 
-       spin_lock_irq(&file->async_file->lock);
+       spin_lock_irqsave(&file->async_file->lock);
        list_for_each_entry_safe(evt, tmp, &uobj->event_list, obj_list) {
                list_del(&evt->list);
                kfree(evt);
        }
-       spin_unlock_irq(&file->async_file->lock);
+       spin_unlock_irqsave(&file->async_file->lock);
 #endif /* AKAROS */
 }
 
@@ -366,10 +366,10 @@ static ssize_t ib_uverbs_event_read(struct file *filp, char __user *buf,
        int eventsz;
        int ret = 0;
 
-       spin_lock_irq(&file->lock);
+       spin_lock_irqsave(&file->lock);
 
        while (list_empty(&file->event_list)) {
-               spin_unlock_irq(&file->lock);
+               spin_unlock_irqsave(&file->lock);
 
                if (filp->f_flags & O_NONBLOCK)
                        return -EAGAIN;
@@ -378,7 +378,7 @@ static ssize_t ib_uverbs_event_read(struct file *filp, char __user *buf,
                                             !list_empty(&file->event_list)))
                        return -ERESTARTSYS;
 
-               spin_lock_irq(&file->lock);
+               spin_lock_irqsave(&file->lock);
        }
 
        event = list_entry(file->event_list.next, struct ib_uverbs_event, list);
@@ -399,7 +399,7 @@ static ssize_t ib_uverbs_event_read(struct file *filp, char __user *buf,
                }
        }
 
-       spin_unlock_irq(&file->lock);
+       spin_unlock_irqsave(&file->lock);
 
        if (event) {
                if (copy_to_user(buf, event, eventsz))
@@ -421,10 +421,10 @@ static unsigned int ib_uverbs_event_poll(struct file *filp,
 
        poll_wait(filp, &file->poll_wait, wait);
 
-       spin_lock_irq(&file->lock);
+       spin_lock_irqsave(&file->lock);
        if (!list_empty(&file->event_list))
                pollflags = POLLIN | POLLRDNORM;
-       spin_unlock_irq(&file->lock);
+       spin_unlock_irqsave(&file->lock);
 
        return pollflags;
 }
@@ -441,14 +441,14 @@ static int ib_uverbs_event_close(struct inode *inode, struct file *filp)
        struct ib_uverbs_event_file *file = filp->private_data;
        struct ib_uverbs_event *entry, *tmp;
 
-       spin_lock_irq(&file->lock);
+       spin_lock_irqsave(&file->lock);
        file->is_closed = 1;
        list_for_each_entry_safe(entry, tmp, &file->event_list, list) {
                if (entry->counter)
                        list_del(&entry->obj_list);
                kfree(entry);
        }
-       spin_unlock_irq(&file->lock);
+       spin_unlock_irqsave(&file->lock);
 
        if (file->is_async) {
                ib_unregister_event_handler(&file->uverbs_file->event_handler);
@@ -475,20 +475,19 @@ void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context)
        struct ib_uverbs_event_file    *file = cq_context;
        struct ib_ucq_object           *uobj;
        struct ib_uverbs_event         *entry;
-       unsigned long                   flags;
 
        if (!file)
                return;
 
-       spin_lock_irqsave(&file->lock, flags);
+       spin_lock_irqsave(&file->lock);
        if (file->is_closed) {
-               spin_unlock_irqrestore(&file->lock, flags);
+               spin_unlock_irqsave(&file->lock);
                return;
        }
 
        entry = kmalloc(sizeof *entry, GFP_ATOMIC);
        if (!entry) {
-               spin_unlock_irqrestore(&file->lock, flags);
+               spin_unlock_irqsave(&file->lock);
                return;
        }
 
@@ -499,7 +498,7 @@ void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context)
 
        list_add_tail(&entry->list, &file->event_list);
        list_add_tail(&entry->obj_list, &uobj->comp_list);
-       spin_unlock_irqrestore(&file->lock, flags);
+       spin_unlock_irqsave(&file->lock);
 
        wake_up_interruptible(&file->poll_wait);
        kill_fasync(&file->async_queue, SIGIO, POLL_IN);
@@ -511,17 +510,16 @@ static void ib_uverbs_async_handler(struct ib_uverbs_file *file,
                                    u32 *counter)
 {
        struct ib_uverbs_event *entry;
-       unsigned long flags;
 
-       spin_lock_irqsave(&file->async_file->lock, flags);
+       spin_lock_irqsave(&file->async_file->lock);
        if (file->async_file->is_closed) {
-               spin_unlock_irqrestore(&file->async_file->lock, flags);
+               spin_unlock_irqsave(&file->async_file->lock);
                return;
        }
 
        entry = kmalloc(sizeof *entry, GFP_ATOMIC);
        if (!entry) {
-               spin_unlock_irqrestore(&file->async_file->lock, flags);
+               spin_unlock_irqsave(&file->async_file->lock);
                return;
        }
 
@@ -533,7 +531,7 @@ static void ib_uverbs_async_handler(struct ib_uverbs_file *file,
        list_add_tail(&entry->list, &file->async_file->event_list);
        if (obj_list)
                list_add_tail(&entry->obj_list, obj_list);
-       spin_unlock_irqrestore(&file->async_file->lock, flags);
+       spin_unlock_irqsave(&file->async_file->lock);
 
        wake_up_interruptible(&file->async_file->poll_wait);
        kill_fasync(&file->async_file->async_queue, SIGIO, POLL_IN);
@@ -599,7 +597,7 @@ struct file *ib_uverbs_alloc_event_file(struct ib_uverbs_file *uverbs_file,
                return ERR_PTR(-ENOMEM);
 
        kref_init(&ev_file->ref);
-       spin_lock_init(&ev_file->lock);
+       spinlock_init_irqsave(&ev_file->lock);
        INIT_LIST_HEAD(&ev_file->event_list);
        init_waitqueue_head(&ev_file->poll_wait);
        ev_file->uverbs_file = uverbs_file;
index 55b5381..edd820b 100644 (file)
@@ -383,13 +383,12 @@ EXPORT_SYMBOL(ib_destroy_srq);
 static void __ib_shared_qp_event_handler(struct ib_event *event, void *context)
 {
        struct ib_qp *qp = context;
-       unsigned long flags;
 
-       spin_lock_irqsave(&qp->device->event_handler_lock, flags);
+       spin_lock_irqsave(&qp->device->event_handler_lock);
        list_for_each_entry(event->element.qp, &qp->open_list, open_list)
                if (event->element.qp->event_handler)
                        event->element.qp->event_handler(event, event->element.qp->qp_context);
-       spin_unlock_irqrestore(&qp->device->event_handler_lock, flags);
+       spin_unlock_irqsave(&qp->device->event_handler_lock);
 }
 
 static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp)
@@ -404,7 +403,6 @@ static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp,
                                  void *qp_context)
 {
        struct ib_qp *qp;
-       unsigned long flags;
 
        qp = kzalloc(sizeof *qp, GFP_KERNEL);
        if (!qp)
@@ -418,9 +416,9 @@ static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp,
        qp->qp_num = real_qp->qp_num;
        qp->qp_type = real_qp->qp_type;
 
-       spin_lock_irqsave(&real_qp->device->event_handler_lock, flags);
+       spin_lock_irqsave(&real_qp->device->event_handler_lock);
        list_add(&qp->open_list, &real_qp->open_list);
-       spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
+       spin_unlock_irqsave(&real_qp->device->event_handler_lock);
 
        return qp;
 }
@@ -940,15 +938,14 @@ EXPORT_SYMBOL(ib_query_qp);
 int ib_close_qp(struct ib_qp *qp)
 {
        struct ib_qp *real_qp;
-       unsigned long flags;
 
        real_qp = qp->real_qp;
        if (real_qp == qp)
                return -EINVAL;
 
-       spin_lock_irqsave(&real_qp->device->event_handler_lock, flags);
+       spin_lock_irqsave(&real_qp->device->event_handler_lock);
        list_del(&qp->open_list);
-       spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
+       spin_unlock_irqsave(&real_qp->device->event_handler_lock);
 
        atomic_dec(&real_qp->usecnt);
        kfree(qp);