BNX2X: Spatch memory allocations
authorBarret Rhoden <brho@cs.berkeley.edu>
Fri, 6 Feb 2015 17:06:17 +0000 (12:06 -0500)
committerBarret Rhoden <brho@cs.berkeley.edu>
Mon, 2 Mar 2015 16:59:08 +0000 (11:59 -0500)
GFP_ATOMIC is the default flag in Akaros (0).  GFP_KERNEL needed to be done
manually in the macro in main.c.

Also, this splits out the memory.cocci.  funcs.cocci is getting too large.

kern/drivers/net/bnx2x/bnx2x.h
kern/drivers/net/bnx2x/bnx2x_cmn.c
kern/drivers/net/bnx2x/bnx2x_cmn.h
kern/drivers/net/bnx2x/bnx2x_ethtool.c
kern/drivers/net/bnx2x/bnx2x_main.c
kern/drivers/net/bnx2x/bnx2x_sp.c
kern/drivers/net/bnx2x/bnx2x_sriov.c
kern/drivers/net/bnx2x/bnx2x_vfpf.c
scripts/spatch/linux/memory.cocci [new file with mode: 0644]

index 7c1b749..1b540a8 100644 (file)
@@ -2187,7 +2187,7 @@ void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, uint8_t func,
                            bool is_pf);
 
 #define BNX2X_ILT_ZALLOC(x, y, size)                                   \
-       x = dma_zalloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL)
+       x = dma_zalloc_coherent(&bp->pdev->dev, size, y, KMALLOC_WAIT)
 
 #define BNX2X_ILT_FREE(x, y, size) \
        do { \
index 7b08a3f..7d75cda 100644 (file)
@@ -534,7 +534,7 @@ static void bnx2x_set_gro_params(struct sk_buff *skb, uint16_t parsing_flags,
 static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
                              uint16_t index, gfp_t gfp_mask)
 {
-       struct page *page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
+       struct page *page = get_cont_pages(PAGES_PER_SGE_SHIFT, gfp_mask);
        struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
        struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
        dma_addr_t mapping;
@@ -547,7 +547,7 @@ static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
        mapping = dma_map_page(&bp->pdev->dev, page, 0,
                               SGE_PAGES, DMA_FROM_DEVICE);
        if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
-               __free_pages(page, PAGES_PER_SGE_SHIFT);
+               free_cont_pages(page, PAGES_PER_SGE_SHIFT);
                BNX2X_ERR("Can't map sge\n");
                return -ENOMEM;
        }
@@ -615,7 +615,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 
                /* If we fail to allocate a substitute page, we simply stop
                   where we are and drop the whole packet */
-               err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC);
+               err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, 0);
                if (unlikely(err)) {
                        bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
                        return err;
@@ -636,7 +636,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
                                skb_fill_page_desc(skb, frag_id++,
                                                   old_rx_pg.page, offset, len);
                                if (offset)
-                                       get_page(old_rx_pg.page);
+                                       page_incref(old_rx_pg.page);
                                offset += len;
                        }
                }
@@ -654,7 +654,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
 {
        if (fp->rx_frag_size)
-               put_page(virt_to_head_page(data));
+               page_decref(kva2page(data));
        else
                kfree(data);
 }
@@ -663,8 +663,8 @@ static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
 {
        if (fp->rx_frag_size) {
                /* GFP_KERNEL allocations are used only during initialization */
-               if (unlikely(gfp_mask & __GFP_WAIT))
-                       return (void *)__get_free_page(gfp_mask);
+               if (unlikely(gfp_mask & KMALLOC_WAIT))
+                       return (void *)kpage_alloc_addr();
 
                return netdev_alloc_frag(fp->rx_frag_size);
        }
@@ -750,7 +750,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
                goto drop;
 
        /* Try to allocate the new data */
-       new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC);
+       new_data = bnx2x_frag_alloc(fp, 0);
        /* Unmap skb in the pool anyway, as we are going to change
           pool entry status to BNX2X_TPA_STOP even if new skb allocation
           fails. */
@@ -1018,7 +1018,7 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
                        bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
                } else {
                        if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
-                                                      GFP_ATOMIC) == 0)) {
+                                                      0) == 0)) {
                                dma_unmap_single(&bp->pdev->dev,
                                                 dma_unmap_addr(rx_buf, mapping),
                                                 fp->rx_buf_size,
@@ -1397,7 +1397,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
                                        &tpa_info->first_buf;
 
                                first_buf->data =
-                                       bnx2x_frag_alloc(fp, GFP_KERNEL);
+                                       bnx2x_frag_alloc(fp, KMALLOC_WAIT);
                                if (!first_buf->data) {
                                        BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
                                                  j);
@@ -1420,7 +1420,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
                             i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
 
                                if (bnx2x_alloc_rx_sge(bp, fp, ring_prod,
-                                                      GFP_KERNEL) < 0) {
+                                                      KMALLOC_WAIT) < 0) {
                                        BNX2X_ERR("was only able to allocate %d rx sges\n",
                                                  i);
                                        BNX2X_ERR("disabling TPA for queue[%d]\n",
@@ -4361,7 +4361,7 @@ static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
         * fp->eth_q_stats.rx_skb_alloc_failed = 0
         */
        for (i = 0; i < rx_ring_size; i++) {
-               if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) {
+               if (bnx2x_alloc_rx_data(bp, fp, ring_prod, KMALLOC_WAIT) < 0) {
                        failure_cnt++;
                        continue;
                }
@@ -4473,9 +4473,8 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
                           "allocating tx memory of fp %d cos %d\n",
                           index, cos);
 
-                       txdata->tx_buf_ring = kcalloc(NUM_TX_BD,
-                                                     sizeof(struct sw_tx_bd),
-                                                     GFP_KERNEL);
+                       txdata->tx_buf_ring = kzmalloc((NUM_TX_BD) * (sizeof(struct sw_tx_bd)),
+                                                      KMALLOC_WAIT);
                        if (!txdata->tx_buf_ring)
                                goto alloc_mem_err;
                        txdata->tx_desc_ring = BNX2X_PCI_ALLOC(&txdata->tx_desc_mapping,
@@ -4489,7 +4488,8 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
        if (!skip_rx_queue(bp, index)) {
                /* fastpath rx rings: rx_buf rx_desc rx_comp */
                bnx2x_fp(bp, index, rx_buf_ring) =
-                       kcalloc(NUM_RX_BD, sizeof(struct sw_rx_bd), GFP_KERNEL);
+                       kzmalloc((NUM_RX_BD) * (sizeof(struct sw_rx_bd)),
+                                KMALLOC_WAIT);
                if (!bnx2x_fp(bp, index, rx_buf_ring))
                        goto alloc_mem_err;
                bnx2x_fp(bp, index, rx_desc_ring) =
@@ -4507,8 +4507,8 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
 
                /* SGE ring */
                bnx2x_fp(bp, index, rx_page_ring) =
-                       kcalloc(NUM_RX_SGE, sizeof(struct sw_rx_page),
-                               GFP_KERNEL);
+                       kzmalloc((NUM_RX_SGE) * (sizeof(struct sw_rx_page)),
+                                KMALLOC_WAIT);
                if (!bnx2x_fp(bp, index, rx_page_ring))
                        goto alloc_mem_err;
                bnx2x_fp(bp, index, rx_sge_ring) =
@@ -4638,13 +4638,13 @@ int bnx2x_alloc_mem_bp(struct bnx2x *bp)
        bp->fp_array_size = fp_array_size;
        BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
 
-       fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
+       fp = kzmalloc((bp->fp_array_size) * (sizeof(*fp)), KMALLOC_WAIT);
        if (!fp)
                goto alloc_err;
        for (i = 0; i < bp->fp_array_size; i++) {
                fp[i].tpa_info =
-                       kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
-                               sizeof(struct bnx2x_agg_info), GFP_KERNEL);
+                       kzmalloc((ETH_MAX_AGGREGATION_QUEUES_E1H_E2) * (sizeof(struct bnx2x_agg_info)),
+                                KMALLOC_WAIT);
                if (!(fp[i].tpa_info))
                        goto alloc_err;
        }
@@ -4652,14 +4652,14 @@ int bnx2x_alloc_mem_bp(struct bnx2x *bp)
        bp->fp = fp;
 
        /* allocate sp objs */
-       bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
-                             GFP_KERNEL);
+       bp->sp_objs = kzmalloc((bp->fp_array_size) * (sizeof(struct bnx2x_sp_objs)),
+                              KMALLOC_WAIT);
        if (!bp->sp_objs)
                goto alloc_err;
 
        /* allocate fp_stats */
-       bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
-                              GFP_KERNEL);
+       bp->fp_stats = kzmalloc((bp->fp_array_size) * (sizeof(struct bnx2x_fp_stats)),
+                               KMALLOC_WAIT);
        if (!bp->fp_stats)
                goto alloc_err;
 
@@ -4668,19 +4668,19 @@ int bnx2x_alloc_mem_bp(struct bnx2x *bp)
                BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
        BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
 
-       bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
-                               GFP_KERNEL);
+       bp->bnx2x_txq = kzmalloc((txq_array_size) * (sizeof(struct bnx2x_fp_txdata)),
+                                KMALLOC_WAIT);
        if (!bp->bnx2x_txq)
                goto alloc_err;
 
        /* msix table */
-       tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
+       tbl = kzmalloc((msix_table_size) * (sizeof(*tbl)), KMALLOC_WAIT);
        if (!tbl)
                goto alloc_err;
        bp->msix_table = tbl;
 
        /* ilt */
-       ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
+       ilt = kzmalloc(sizeof(*ilt), KMALLOC_WAIT);
        if (!ilt)
                goto alloc_err;
        bp->ilt = ilt;
index d62a218..a9d2dcd 100644 (file)
@@ -46,7 +46,7 @@ extern int bnx2x_num_queues;
 
 #define BNX2X_PCI_ALLOC(y, size)                                       \
 ({                                                                     \
-       void *x = dma_zalloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
+       void *x = dma_zalloc_coherent(&bp->pdev->dev, size, y, KMALLOC_WAIT); \
        if (x)                                                          \
                DP(NETIF_MSG_HW,                                        \
                   "BNX2X_PCI_ALLOC: Physical %Lx Virtual %p\n",        \
@@ -55,7 +55,7 @@ extern int bnx2x_num_queues;
 })
 #define BNX2X_PCI_FALLOC(y, size)                                      \
 ({                                                                     \
-       void *x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
+       void *x = dma_alloc_coherent(&bp->pdev->dev, size, y, KMALLOC_WAIT); \
        if (x) {                                                        \
                memset(x, 0xff, size);                                  \
                DP(NETIF_MSG_HW,                                        \
@@ -808,7 +808,7 @@ static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
 
        dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
                       SGE_PAGES, DMA_FROM_DEVICE);
-       __free_pages(page, PAGES_PER_SGE_SHIFT);
+       free_cont_pages(page, PAGES_PER_SGE_SHIFT);
 
        sw_buf->page = NULL;
        sge->addr_hi = 0;
index 4242cdf..78f6903 100644 (file)
@@ -2833,7 +2833,7 @@ static int bnx2x_test_nvram(struct bnx2x *bp)
        if (BP_NOMCP(bp))
                return 0;
 
-       buf = kmalloc(CRC_BUFF_SIZE, GFP_KERNEL);
+       buf = kmalloc(CRC_BUFF_SIZE, KMALLOC_WAIT);
        if (!buf) {
                DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "kmalloc failed\n");
                rc = -ENOMEM;
index b7fcd8a..a143c80 100644 (file)
@@ -6474,11 +6474,11 @@ void bnx2x_post_irq_nic_init(struct bnx2x *bp, uint32_t load_code)
 static int bnx2x_gunzip_init(struct bnx2x *bp)
 {
        bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
-                                           &bp->gunzip_mapping, GFP_KERNEL);
+                                           &bp->gunzip_mapping, KMALLOC_WAIT);
        if (bp->gunzip_buf  == NULL)
                goto gunzip_nomem1;
 
-       bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
+       bp->strm = kmalloc(sizeof(*bp->strm), KMALLOC_WAIT);
        if (bp->strm  == NULL)
                goto gunzip_nomem2;
 
@@ -8281,8 +8281,8 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
                        goto alloc_mem_err;
                allocated += bp->context[i].size;
        }
-       bp->ilt->lines = kcalloc(ILT_MAX_LINES, sizeof(struct ilt_line),
-                                GFP_KERNEL);
+       bp->ilt->lines = kzmalloc((ILT_MAX_LINES) * (sizeof(struct ilt_line)),
+                                 KMALLOC_WAIT);
        if (!bp->ilt->lines)
                goto alloc_mem_err;
 
@@ -10351,7 +10351,7 @@ static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi)
        up(&bnx2x_prev_sem);
 
        /* Create an entry for this path and add it */
-       tmp_list = kmalloc(sizeof(struct bnx2x_prev_path_list), GFP_KERNEL);
+       tmp_list = kmalloc(sizeof(struct bnx2x_prev_path_list), KMALLOC_WAIT);
        if (!tmp_list) {
                BNX2X_ERR("Failed to allocate 'bnx2x_prev_path_list'\n");
                return -ENOMEM;
@@ -11895,7 +11895,7 @@ static void bnx2x_read_fwinfo(struct bnx2x *bp)
        i += PCI_VPD_LRDT_TAG_SIZE;
 
        if (block_end > BNX2X_VPD_LEN) {
-               vpd_extended_data = kmalloc(block_end, GFP_KERNEL);
+               vpd_extended_data = kmalloc(block_end, KMALLOC_WAIT);
                if (vpd_extended_data  == NULL)
                        goto out_not_found;
 
@@ -12233,7 +12233,7 @@ static int bnx2x_init_mcast_macs_list(struct bnx2x *bp,
 {
        int mc_count = netdev_mc_count(bp->dev);
        struct bnx2x_mcast_list_elem *mc_mac =
-               kcalloc(mc_count, sizeof(*mc_mac), GFP_ATOMIC);
+               kzmalloc((mc_count) * (sizeof(*mc_mac)), 0);
        struct netdev_hw_addr *ha;
 
        if (!mc_mac)
@@ -12910,7 +12910,7 @@ static void be16_to_cpu_n(const uint8_t *_source, uint8_t *_target,
 #define BNX2X_ALLOC_AND_SET(arr, lbl, func)                            \
 do {                                                                   \
        uint32_t len = be32_to_cpu(fw_hdr->arr.len);                            \
-       bp->arr = kmalloc(len, GFP_KERNEL);                             \
+       bp->arr = kmalloc(len, KMALLOC_WAIT);                           \
        if (!bp->arr)                                                   \
                goto lbl;                                               \
        func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset),      \
@@ -14380,7 +14380,7 @@ static int bnx2x_register_cnic(struct ether *dev, struct cnic_ops *ops,
 
        bp->cnic_enabled = true;
 
-       bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
+       bp->cnic_kwq = kzmalloc(PAGE_SIZE, KMALLOC_WAIT);
        if (!bp->cnic_kwq)
                return -ENOMEM;
 
index 347705e..9a11b9a 100644 (file)
@@ -242,7 +242,7 @@ static inline struct bnx2x_exeq_elem *bnx2x_exe_queue_alloc_elem(
        struct bnx2x *bp)
 {
        DP(BNX2X_MSG_SP, "Allocating a new exe_queue element\n");
-       return kzalloc(sizeof(struct bnx2x_exeq_elem), GFP_ATOMIC);
+       return kzmalloc(sizeof(struct bnx2x_exeq_elem), 0);
 }
 
 /************************ raw_obj functions ***********************************/
@@ -1546,7 +1546,7 @@ static inline int bnx2x_vlan_mac_get_registry_elem(
        /* Allocate a new registry element if needed. */
        if (!restore &&
            ((cmd == BNX2X_VLAN_MAC_ADD) || (cmd == BNX2X_VLAN_MAC_MOVE))) {
-               reg_elem = kzalloc(sizeof(*reg_elem), GFP_ATOMIC);
+               reg_elem = kzmalloc(sizeof(*reg_elem), 0);
                if (!reg_elem)
                        return -ENOMEM;
 
@@ -2403,7 +2403,7 @@ static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp,
                macs_list_len * sizeof(struct bnx2x_mcast_mac_elem);
 
        /* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */
-       new_cmd = kzalloc(total_sz, GFP_ATOMIC);
+       new_cmd = kzmalloc(total_sz, 0);
 
        if (!new_cmd)
                return -ENOMEM;
@@ -3382,7 +3382,7 @@ static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp,
                if (!list_empty(&o->registry.exact_match.macs))
                        return 0;
 
-               elem = kcalloc(len, sizeof(*elem), GFP_ATOMIC);
+               elem = kzmalloc((len) * (sizeof(*elem)), 0);
                if (!elem) {
                        BNX2X_ERR("Failed to allocate registry memory\n");
                        return -ENOMEM;
index f739068..52fefb4 100644 (file)
@@ -590,8 +590,8 @@ int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf,
        else
                set_bit(RAMROD_COMP_WAIT, &mcast.ramrod_flags);
        if (mc_num) {
-               mc = kzalloc(mc_num * sizeof(struct bnx2x_mcast_list_elem),
-                            GFP_KERNEL);
+               mc = kzmalloc(mc_num * sizeof(struct bnx2x_mcast_list_elem),
+                             KMALLOC_WAIT);
                if (!mc) {
                        BNX2X_ERR("Cannot Configure mulicasts due to lack of memory\n");
                        return -ENOMEM;
@@ -1272,7 +1272,7 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
        }
 
        /* allocate the vfs database */
-       bp->vfdb = kzalloc(sizeof(*(bp->vfdb)), GFP_KERNEL);
+       bp->vfdb = kzmalloc(sizeof(*(bp->vfdb)), KMALLOC_WAIT);
        if (!bp->vfdb) {
                BNX2X_ERR("failed to allocate vf database\n");
                err = -ENOMEM;
@@ -1299,8 +1299,8 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
           num_vfs_param, iov->nr_virtfn);
 
        /* allocate the vf array */
-       bp->vfdb->vfs = kzalloc(sizeof(struct bnx2x_virtf) *
-                               BNX2X_NR_VIRTFN(bp), GFP_KERNEL);
+       bp->vfdb->vfs = kzmalloc(sizeof(struct bnx2x_virtf) * BNX2X_NR_VIRTFN(bp),
+                                KMALLOC_WAIT);
        if (!bp->vfdb->vfs) {
                BNX2X_ERR("failed to allocate vf array\n");
                err = -ENOMEM;
@@ -1324,9 +1324,8 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
        }
 
        /* allocate the queue arrays for all VFs */
-       bp->vfdb->vfqs = kzalloc(
-               BNX2X_MAX_NUM_VF_QUEUES * sizeof(struct bnx2x_vf_queue),
-               GFP_KERNEL);
+       bp->vfdb->vfqs = kzmalloc(BNX2X_MAX_NUM_VF_QUEUES * sizeof(struct bnx2x_vf_queue),
+                                 KMALLOC_WAIT);
 
        if (!bp->vfdb->vfqs) {
                BNX2X_ERR("failed to allocate vf queue array\n");
index 3398f5e..9b1f0cd 100644 (file)
@@ -1573,7 +1573,7 @@ static int bnx2x_vf_mbx_macvlan_list(struct bnx2x *bp,
              sizeof(struct bnx2x_vf_mac_vlan_filter) +
              sizeof(struct bnx2x_vf_mac_vlan_filters);
 
-       fl = kzalloc(fsz, GFP_KERNEL);
+       fl = kzmalloc(fsz, KMALLOC_WAIT);
        if (!fl)
                return -ENOMEM;
 
diff --git a/scripts/spatch/linux/memory.cocci b/scripts/spatch/linux/memory.cocci
new file mode 100644 (file)
index 0000000..3b65631
--- /dev/null
@@ -0,0 +1,78 @@
+@@
+@@
+-GFP_ATOMIC
++0
+
+@@
+@@
+-GFP_KERNEL
++KMALLOC_WAIT
+
+@@
+@@
+-GFP_WAIT
++KMALLOC_WAIT
+
+@@
+@@
+-__GFP_WAIT
++KMALLOC_WAIT
+
+@@
+expression SZ;
+expression FL;
+@@
+-kzalloc(SZ, FL)
++kzmalloc(SZ, FL)
+
+@@
+expression SZ;
+expression CNT;
+expression FL;
+@@
+-kcalloc(CNT, SZ, FL)
++kzmalloc((CNT) * (SZ), FL)
+
+@@
+expression ADDR;
+expression ORDER;
+@@
+-__free_pages(ADDR, ORDER)
++free_cont_pages(ADDR, ORDER)
+
+@@
+expression FLAGS;
+expression ORDER;
+@@
+-alloc_pages(FLAGS, ORDER)
++get_cont_pages(ORDER, FLAGS)
+
+@@
+expression FLAGS;
+@@
+-__get_free_page(FLAGS)
++kpage_alloc_addr()
+
+@@
+expression PG;
+@@
+-get_page(PG)
++page_incref(PG)
+
+@@
+expression PG;
+@@
+-put_page(PG)
++page_decref(PG)
+
+@@
+expression KVA;
+@@
+-virt_to_head_page(KVA)
++kva2page(KVA)
+
+@@
+expression KVA;
+@@
+-virt_to_bus(KVA)
++PADDR(KVA)