OSDN Git Service

ioatdma: disable RAID on non-Atom platforms and reenable unaligned copies
authorBrice Goglin <Brice.Goglin@inria.fr>
Fri, 2 Aug 2013 19:18:03 +0000 (21:18 +0200)
committerDan Williams <djbw@fb.com>
Fri, 23 Aug 2013 05:57:39 +0000 (22:57 -0700)
Disable RAID on non-Atom platform and remove related fixups such as the
64-byte alignement restriction on legacy DMA operations (introduced in
commit f26df1a1 as a workaround for silicon errata).

Signed-off-by: Brice Goglin <Brice.Goglin@inria.fr>
Acked-by: Dave Jiang <dave.jiang@intel.com>
Acked-by: Jon Mason <jon.mason@intel.com>
Signed-off-by: Dan Williams <djbw@fb.com>
drivers/dma/ioat/dma_v3.c

index b642e03..c94e0d2 100644 (file)
@@ -1775,15 +1775,12 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)
        dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
        dma->device_free_chan_resources = ioat2_free_chan_resources;
 
-       if (is_xeon_cb32(pdev))
-               dma->copy_align = 6;
-
        dma_cap_set(DMA_INTERRUPT, dma->cap_mask);
        dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock;
 
        device->cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET);
 
-       if (is_bwd_noraid(pdev))
+       if (is_xeon_cb32(pdev) || is_bwd_noraid(pdev))
                device->cap &= ~(IOAT_CAP_XOR | IOAT_CAP_PQ | IOAT_CAP_RAID16SS);
 
        /* dca is incompatible with raid operations */
@@ -1793,7 +1790,6 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)
        if (device->cap & IOAT_CAP_XOR) {
                is_raid_device = true;
                dma->max_xor = 8;
-               dma->xor_align = 6;
 
                dma_cap_set(DMA_XOR, dma->cap_mask);
                dma->device_prep_dma_xor = ioat3_prep_xor;
@@ -1812,13 +1808,8 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)
 
                if (device->cap & IOAT_CAP_RAID16SS) {
                        dma_set_maxpq(dma, 16, 0);
-                       dma->pq_align = 0;
                } else {
                        dma_set_maxpq(dma, 8, 0);
-                       if (is_xeon_cb32(pdev))
-                               dma->pq_align = 6;
-                       else
-                               dma->pq_align = 0;
                }
 
                if (!(device->cap & IOAT_CAP_XOR)) {
@@ -1829,13 +1820,8 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)
 
                        if (device->cap & IOAT_CAP_RAID16SS) {
                                dma->max_xor = 16;
-                               dma->xor_align = 0;
                        } else {
                                dma->max_xor = 8;
-                               if (is_xeon_cb32(pdev))
-                                       dma->xor_align = 6;
-                               else
-                                       dma->xor_align = 0;
                        }
                }
        }
@@ -1844,14 +1830,6 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca)
        device->cleanup_fn = ioat3_cleanup_event;
        device->timer_fn = ioat3_timer_event;
 
-       if (is_xeon_cb32(pdev)) {
-               dma_cap_clear(DMA_XOR_VAL, dma->cap_mask);
-               dma->device_prep_dma_xor_val = NULL;
-
-               dma_cap_clear(DMA_PQ_VAL, dma->cap_mask);
-               dma->device_prep_dma_pq_val = NULL;
-       }
-
        /* starting with CB3.3 super extended descriptors are supported */
        if (device->cap & IOAT_CAP_RAID16SS) {
                char pool_name[14];