OSDN Git Service

Merge tag 'ntb-4.6' of git://github.com/jonmason/ntb
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 26 Mar 2016 18:37:42 +0000 (11:37 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 26 Mar 2016 18:37:42 +0000 (11:37 -0700)
Pull NTB bug fixes from Jon Mason:
 "NTB bug fixes for tasklet from spinning forever, link errors,
  translation window setup, NULL ptr dereference, and ntb-perf errors.

  Also, a modification to the driver API that makes _addr functions
  optional"

* tag 'ntb-4.6' of git://github.com/jonmason/ntb:
  NTB: Remove _addr functions from ntb_hw_amd
  NTB: Make _addr functions optional in the API
  NTB: Fix incorrect clean up routine in ntb_perf
  NTB: Fix incorrect return check in ntb_perf
  ntb: fix possible NULL dereference
  ntb: add missing setup of translation window
  ntb: stop link work when we do not have memory
  ntb: stop tasklet from spinning forever during shutdown.
  ntb: perf test: fix address space confusion

drivers/ntb/hw/amd/ntb_hw_amd.c
drivers/ntb/ntb_transport.c
drivers/ntb/test/ntb_perf.c
include/linux/ntb.h

index 588803a..6ccba0d 100644 (file)
@@ -357,20 +357,6 @@ static int amd_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits)
        return 0;
 }
 
-static int amd_ntb_peer_db_addr(struct ntb_dev *ntb,
-                               phys_addr_t *db_addr,
-                               resource_size_t *db_size)
-{
-       struct amd_ntb_dev *ndev = ntb_ndev(ntb);
-
-       if (db_addr)
-               *db_addr = (phys_addr_t)(ndev->peer_mmio + AMD_DBREQ_OFFSET);
-       if (db_size)
-               *db_size = sizeof(u32);
-
-       return 0;
-}
-
 static int amd_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits)
 {
        struct amd_ntb_dev *ndev = ntb_ndev(ntb);
@@ -415,20 +401,6 @@ static int amd_ntb_spad_write(struct ntb_dev *ntb,
        return 0;
 }
 
-static int amd_ntb_peer_spad_addr(struct ntb_dev *ntb, int idx,
-                                 phys_addr_t *spad_addr)
-{
-       struct amd_ntb_dev *ndev = ntb_ndev(ntb);
-
-       if (idx < 0 || idx >= ndev->spad_count)
-               return -EINVAL;
-
-       if (spad_addr)
-               *spad_addr = (phys_addr_t)(ndev->self_mmio + AMD_SPAD_OFFSET +
-                                          ndev->peer_spad + (idx << 2));
-       return 0;
-}
-
 static u32 amd_ntb_peer_spad_read(struct ntb_dev *ntb, int idx)
 {
        struct amd_ntb_dev *ndev = ntb_ndev(ntb);
@@ -472,12 +444,10 @@ static const struct ntb_dev_ops amd_ntb_ops = {
        .db_clear               = amd_ntb_db_clear,
        .db_set_mask            = amd_ntb_db_set_mask,
        .db_clear_mask          = amd_ntb_db_clear_mask,
-       .peer_db_addr           = amd_ntb_peer_db_addr,
        .peer_db_set            = amd_ntb_peer_db_set,
        .spad_count             = amd_ntb_spad_count,
        .spad_read              = amd_ntb_spad_read,
        .spad_write             = amd_ntb_spad_write,
-       .peer_spad_addr         = amd_ntb_peer_spad_addr,
        .peer_spad_read         = amd_ntb_peer_spad_read,
        .peer_spad_write        = amd_ntb_peer_spad_write,
 };
index ec4775f..2ef9d91 100644 (file)
@@ -124,6 +124,7 @@ struct ntb_transport_qp {
 
        bool client_ready;
        bool link_is_up;
+       bool active;
 
        u8 qp_num;      /* Only 64 QP's are allowed.  0-63 */
        u64 qp_bit;
@@ -719,6 +720,7 @@ static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw,
 static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp)
 {
        qp->link_is_up = false;
+       qp->active = false;
 
        qp->tx_index = 0;
        qp->rx_index = 0;
@@ -827,7 +829,7 @@ static void ntb_transport_link_work(struct work_struct *work)
        struct pci_dev *pdev = ndev->pdev;
        resource_size_t size;
        u32 val;
-       int rc, i, spad;
+       int rc = 0, i, spad;
 
        /* send the local info, in the opposite order of the way we read it */
        for (i = 0; i < nt->mw_count; i++) {
@@ -897,6 +899,13 @@ static void ntb_transport_link_work(struct work_struct *work)
 out1:
        for (i = 0; i < nt->mw_count; i++)
                ntb_free_mw(nt, i);
+
+       /* if there's an actual failure, we should just bail */
+       if (rc < 0) {
+               ntb_link_disable(ndev);
+               return;
+       }
+
 out:
        if (ntb_link_is_up(ndev, NULL, NULL) == 1)
                schedule_delayed_work(&nt->link_work,
@@ -926,11 +935,13 @@ static void ntb_qp_link_work(struct work_struct *work)
        if (val & BIT(qp->qp_num)) {
                dev_info(&pdev->dev, "qp %d: Link Up\n", qp->qp_num);
                qp->link_is_up = true;
+               qp->active = true;
 
                if (qp->event_handler)
                        qp->event_handler(qp->cb_data, qp->link_is_up);
 
-               tasklet_schedule(&qp->rxc_db_work);
+               if (qp->active)
+                       tasklet_schedule(&qp->rxc_db_work);
        } else if (nt->link_is_up)
                schedule_delayed_work(&qp->link_work,
                                      msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
@@ -1411,7 +1422,8 @@ static void ntb_transport_rxc_db(unsigned long data)
 
        if (i == qp->rx_max_entry) {
                /* there is more work to do */
-               tasklet_schedule(&qp->rxc_db_work);
+               if (qp->active)
+                       tasklet_schedule(&qp->rxc_db_work);
        } else if (ntb_db_read(qp->ndev) & BIT_ULL(qp->qp_num)) {
                /* the doorbell bit is set: clear it */
                ntb_db_clear(qp->ndev, BIT_ULL(qp->qp_num));
@@ -1422,7 +1434,8 @@ static void ntb_transport_rxc_db(unsigned long data)
                 * ntb_process_rxc and clearing the doorbell bit:
                 * there might be some more work to do.
                 */
-               tasklet_schedule(&qp->rxc_db_work);
+               if (qp->active)
+                       tasklet_schedule(&qp->rxc_db_work);
        }
 }
 
@@ -1760,6 +1773,8 @@ void ntb_transport_free_queue(struct ntb_transport_qp *qp)
 
        pdev = qp->ndev->pdev;
 
+       qp->active = false;
+
        if (qp->tx_dma_chan) {
                struct dma_chan *chan = qp->tx_dma_chan;
                /* Putting the dma_chan to NULL will force any new traffic to be
@@ -1793,7 +1808,7 @@ void ntb_transport_free_queue(struct ntb_transport_qp *qp)
        qp_bit = BIT_ULL(qp->qp_num);
 
        ntb_db_set_mask(qp->ndev, qp_bit);
-       tasklet_disable(&qp->rxc_db_work);
+       tasklet_kill(&qp->rxc_db_work);
 
        cancel_delayed_work_sync(&qp->link_work);
 
@@ -1886,7 +1901,8 @@ int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
 
        ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_pend_q);
 
-       tasklet_schedule(&qp->rxc_db_work);
+       if (qp->active)
+               tasklet_schedule(&qp->rxc_db_work);
 
        return 0;
 }
@@ -2069,7 +2085,8 @@ static void ntb_transport_doorbell_callback(void *data, int vector)
                qp_num = __ffs(db_bits);
                qp = &nt->qp_vec[qp_num];
 
-               tasklet_schedule(&qp->rxc_db_work);
+               if (qp->active)
+                       tasklet_schedule(&qp->rxc_db_work);
 
                db_bits &= ~BIT_ULL(qp_num);
        }
index c8a37ba..8dfce9c 100644 (file)
@@ -178,7 +178,7 @@ static void perf_copy_callback(void *data)
        atomic_dec(&pctx->dma_sync);
 }
 
-static ssize_t perf_copy(struct pthr_ctx *pctx, char *dst,
+static ssize_t perf_copy(struct pthr_ctx *pctx, char __iomem *dst,
                         char *src, size_t size)
 {
        struct perf_ctx *perf = pctx->perf;
@@ -189,7 +189,8 @@ static ssize_t perf_copy(struct pthr_ctx *pctx, char *dst,
        dma_cookie_t cookie;
        size_t src_off, dst_off;
        struct perf_mw *mw = &perf->mw;
-       u64 vbase, dst_vaddr;
+       void __iomem *vbase;
+       void __iomem *dst_vaddr;
        dma_addr_t dst_phys;
        int retries = 0;
 
@@ -204,14 +205,14 @@ static ssize_t perf_copy(struct pthr_ctx *pctx, char *dst,
        }
 
        device = chan->device;
-       src_off = (size_t)src & ~PAGE_MASK;
-       dst_off = (size_t)dst & ~PAGE_MASK;
+       src_off = (uintptr_t)src & ~PAGE_MASK;
+       dst_off = (uintptr_t __force)dst & ~PAGE_MASK;
 
        if (!is_dma_copy_aligned(device, src_off, dst_off, size))
                return -ENODEV;
 
-       vbase = (u64)(u64 *)mw->vbase;
-       dst_vaddr = (u64)(u64 *)dst;
+       vbase = mw->vbase;
+       dst_vaddr = dst;
        dst_phys = mw->phys_addr + (dst_vaddr - vbase);
 
        unmap = dmaengine_get_unmap_data(device->dev, 1, GFP_NOWAIT);
@@ -261,13 +262,13 @@ err_get_unmap:
        return 0;
 }
 
-static int perf_move_data(struct pthr_ctx *pctx, char *dst, char *src,
+static int perf_move_data(struct pthr_ctx *pctx, char __iomem *dst, char *src,
                          u64 buf_size, u64 win_size, u64 total)
 {
        int chunks, total_chunks, i;
        int copied_chunks = 0;
        u64 copied = 0, result;
-       char *tmp = dst;
+       char __iomem *tmp = dst;
        u64 perf, diff_us;
        ktime_t kstart, kstop, kdiff;
 
@@ -324,7 +325,7 @@ static int ntb_perf_thread(void *data)
        struct perf_ctx *perf = pctx->perf;
        struct pci_dev *pdev = perf->ntb->pdev;
        struct perf_mw *mw = &perf->mw;
-       char *dst;
+       char __iomem *dst;
        u64 win_size, buf_size, total;
        void *src;
        int rc, node, i;
@@ -364,7 +365,7 @@ static int ntb_perf_thread(void *data)
        if (buf_size > MAX_TEST_SIZE)
                buf_size = MAX_TEST_SIZE;
 
-       dst = (char *)mw->vbase;
+       dst = (char __iomem *)mw->vbase;
 
        atomic_inc(&perf->tsync);
        while (atomic_read(&perf->tsync) != perf->perf_threads)
@@ -424,6 +425,7 @@ static int perf_set_mw(struct perf_ctx *perf, resource_size_t size)
 {
        struct perf_mw *mw = &perf->mw;
        size_t xlat_size, buf_size;
+       int rc;
 
        if (!size)
                return -EINVAL;
@@ -447,6 +449,13 @@ static int perf_set_mw(struct perf_ctx *perf, resource_size_t size)
                mw->buf_size = 0;
        }
 
+       rc = ntb_mw_set_trans(perf->ntb, 0, mw->dma_addr, mw->xlat_size);
+       if (rc) {
+               dev_err(&perf->ntb->dev, "Unable to set mw0 translation\n");
+               perf_free_mw(perf);
+               return -EIO;
+       }
+
        return 0;
 }
 
@@ -541,6 +550,8 @@ static ssize_t debugfs_run_read(struct file *filp, char __user *ubuf,
                return 0;
 
        buf = kmalloc(64, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
        out_offset = snprintf(buf, 64, "%d\n", perf->run);
        ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset);
        kfree(buf);
@@ -548,6 +559,21 @@ static ssize_t debugfs_run_read(struct file *filp, char __user *ubuf,
        return ret;
 }
 
+static void threads_cleanup(struct perf_ctx *perf)
+{
+       struct pthr_ctx *pctx;
+       int i;
+
+       perf->run = false;
+       for (i = 0; i < MAX_THREADS; i++) {
+               pctx = &perf->pthr_ctx[i];
+               if (pctx->thread) {
+                       kthread_stop(pctx->thread);
+                       pctx->thread = NULL;
+               }
+       }
+}
+
 static ssize_t debugfs_run_write(struct file *filp, const char __user *ubuf,
                                 size_t count, loff_t *offp)
 {
@@ -563,17 +589,9 @@ static ssize_t debugfs_run_write(struct file *filp, const char __user *ubuf,
        if (atomic_read(&perf->tsync) == 0)
                perf->run = false;
 
-       if (perf->run) {
-               /* lets stop the threads */
-               perf->run = false;
-               for (i = 0; i < MAX_THREADS; i++) {
-                       if (perf->pthr_ctx[i].thread) {
-                               kthread_stop(perf->pthr_ctx[i].thread);
-                               perf->pthr_ctx[i].thread = NULL;
-                       } else
-                               break;
-               }
-       } else {
+       if (perf->run)
+               threads_cleanup(perf);
+       else {
                perf->run = true;
 
                if (perf->perf_threads > MAX_THREADS) {
@@ -604,17 +622,11 @@ static ssize_t debugfs_run_write(struct file *filp, const char __user *ubuf,
                                kthread_create_on_node(ntb_perf_thread,
                                                       (void *)pctx,
                                                       node, "ntb_perf %d", i);
-                       if (pctx->thread)
+                       if (IS_ERR(pctx->thread)) {
+                               pctx->thread = NULL;
+                               goto err;
+                       } else
                                wake_up_process(pctx->thread);
-                       else {
-                               perf->run = false;
-                               for (i = 0; i < MAX_THREADS; i++) {
-                                       if (pctx->thread) {
-                                               kthread_stop(pctx->thread);
-                                               pctx->thread = NULL;
-                                       }
-                               }
-                       }
 
                        if (perf->run == false)
                                return -ENXIO;
@@ -623,6 +635,10 @@ static ssize_t debugfs_run_write(struct file *filp, const char __user *ubuf,
        }
 
        return count;
+
+err:
+       threads_cleanup(perf);
+       return -ENXIO;
 }
 
 static const struct file_operations ntb_perf_debugfs_run = {
index f798e2a..6f47562 100644 (file)
@@ -284,7 +284,7 @@ static inline int ntb_dev_ops_is_valid(const struct ntb_dev_ops *ops)
                /* ops->db_read_mask                    && */
                ops->db_set_mask                        &&
                ops->db_clear_mask                      &&
-               ops->peer_db_addr                       &&
+               /* ops->peer_db_addr                    && */
                /* ops->peer_db_read                    && */
                ops->peer_db_set                        &&
                /* ops->peer_db_clear                   && */
@@ -295,7 +295,7 @@ static inline int ntb_dev_ops_is_valid(const struct ntb_dev_ops *ops)
                ops->spad_count                         &&
                ops->spad_read                          &&
                ops->spad_write                         &&
-               ops->peer_spad_addr                     &&
+               /* ops->peer_spad_addr                  && */
                /* ops->peer_spad_read                  && */
                ops->peer_spad_write                    &&
                1;
@@ -757,6 +757,9 @@ static inline int ntb_peer_db_addr(struct ntb_dev *ntb,
                                   phys_addr_t *db_addr,
                                   resource_size_t *db_size)
 {
+       if (!ntb->ops->peer_db_addr)
+               return -EINVAL;
+
        return ntb->ops->peer_db_addr(ntb, db_addr, db_size);
 }
 
@@ -948,6 +951,9 @@ static inline int ntb_spad_write(struct ntb_dev *ntb, int idx, u32 val)
 static inline int ntb_peer_spad_addr(struct ntb_dev *ntb, int idx,
                                     phys_addr_t *spad_addr)
 {
+       if (!ntb->ops->peer_spad_addr)
+               return -EINVAL;
+
        return ntb->ops->peer_spad_addr(ntb, idx, spad_addr);
 }