OSDN Git Service

Merge branch 'slab/urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg...
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 30 Dec 2009 21:14:25 +0000 (13:14 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 30 Dec 2009 21:14:25 +0000 (13:14 -0800)
* 'slab/urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6:
  SLAB: Fix lockdep annotation breakage

157 files changed:
Documentation/block/00-INDEX
Documentation/block/as-iosched.txt [deleted file]
Documentation/kvm/api.txt
Documentation/sound/alsa/Procfile.txt
Documentation/vgaarbiter.txt
arch/ia64/kvm/vcpu.h
arch/ia64/kvm/vmm.c
arch/ia64/kvm/vtlb.c
arch/powerpc/kernel/pci-common.c
arch/powerpc/kvm/book3s_64_mmu.c
arch/x86/include/asm/kvm.h
arch/x86/include/asm/uv/uv_hub.h
arch/x86/kernel/apic/x2apic_uv_x.c
arch/x86/kvm/lapic.c
arch/x86/kvm/paging_tmpl.h
arch/x86/kvm/x86.c
arch/x86/pci/bus_numa.c
block/blk-barrier.c
block/blk-settings.c
block/cfq-iosched.c
drivers/block/DAC960.c
drivers/block/aoe/aoecmd.c
drivers/block/drbd/drbd_int.h
drivers/block/drbd/drbd_main.c
drivers/block/drbd/drbd_proc.c
drivers/block/drbd/drbd_receiver.c
drivers/block/drbd/drbd_worker.c
drivers/block/mg_disk.c
drivers/char/hw_random/core.c
drivers/md/md.c
drivers/net/3c507.c
drivers/net/Kconfig
drivers/net/benet/be.h
drivers/net/benet/be_cmds.c
drivers/net/benet/be_cmds.h
drivers/net/benet/be_ethtool.c
drivers/net/bnx2x_main.c
drivers/net/bonding/bond_3ad.c
drivers/net/gianfar.c
drivers/net/ibmlana.c
drivers/net/igb/e1000_82575.c
drivers/net/igb/e1000_phy.c
drivers/net/igb/igb_ethtool.c
drivers/net/igb/igb_main.c
drivers/net/igbvf/netdev.c
drivers/net/ixgbe/ixgbe_main.c
drivers/net/pcnet32.c
drivers/net/sfc/efx.c
drivers/net/sfc/falcon.c
drivers/net/sfc/falcon_xmac.c
drivers/net/sfc/mcdi_phy.c
drivers/net/sfc/net_driver.h
drivers/net/sfc/nic.c
drivers/net/sfc/qt202x_phy.c
drivers/net/sfc/siena.c
drivers/net/sfc/tenxpress.c
drivers/net/sfc/tx.c
drivers/net/tun.c
drivers/net/ucc_geth.c
drivers/net/via-rhine.c
drivers/net/vxge/vxge-main.c
drivers/net/wireless/ath/ath5k/base.c
drivers/net/wireless/ath/ath9k/mac.c
drivers/net/wireless/ath/ath9k/mac.h
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/ath/ath9k/pci.c
drivers/net/wireless/ath/ath9k/xmit.c
drivers/net/wireless/b43/dma.c
drivers/net/wireless/b43/dma.h
drivers/net/wireless/iwlwifi/iwl-3945.c
drivers/net/wireless/iwlwifi/iwl-3945.h
drivers/net/wireless/iwlwifi/iwl-4965.c
drivers/net/wireless/iwlwifi/iwl-5000-hw.h
drivers/net/wireless/iwlwifi/iwl-5000.c
drivers/net/wireless/iwlwifi/iwl-agn-rs.c
drivers/net/wireless/iwlwifi/iwl-agn.c
drivers/net/wireless/iwlwifi/iwl-csr.h
drivers/net/wireless/iwlwifi/iwl-dev.h
drivers/net/wireless/iwlwifi/iwl-eeprom.c
drivers/net/wireless/iwlwifi/iwl-eeprom.h
drivers/net/wireless/iwlwifi/iwl-hcmd.c
drivers/net/wireless/iwlwifi/iwl-rx.c
drivers/net/wireless/iwlwifi/iwl-scan.c
drivers/net/wireless/iwlwifi/iwl-sta.c
drivers/net/wireless/iwlwifi/iwl-tx.c
drivers/net/wireless/iwlwifi/iwl3945-base.c
drivers/net/wireless/iwmc3200wifi/iwm.h
drivers/net/wireless/iwmc3200wifi/netdev.c
drivers/net/wireless/iwmc3200wifi/rx.c
drivers/net/wireless/libertas/mesh.c
drivers/net/wireless/libertas/scan.c
drivers/net/wireless/libertas/wext.c
drivers/net/wireless/libertas_tf/main.c
drivers/net/wireless/orinoco/wext.c
drivers/net/wireless/rt2x00/rt2800.h
drivers/net/wireless/rt2x00/rt2800lib.c
drivers/net/wireless/rt2x00/rt2800usb.c
drivers/net/wireless/rt2x00/rt61pci.c
drivers/net/wireless/rtl818x/rtl8180_dev.c
drivers/net/wireless/wl12xx/wl1251_boot.c
drivers/net/wireless/wl12xx/wl1271_cmd.c
drivers/net/wireless/zd1211rw/zd_chip.c
drivers/net/wireless/zd1211rw/zd_chip.h
drivers/net/wireless/zd1211rw/zd_mac.c
drivers/pci/hotplug/shpchp.h
drivers/pci/intel-iommu.c
drivers/pci/intr_remapping.c
drivers/pci/pci-acpi.c
drivers/pci/pci.c
drivers/pci/pci.h
drivers/pci/pcie/aer/Kconfig.debug
drivers/pci/pcie/aer/aer_inject.c
drivers/pci/pcie/aer/aerdrv.c
drivers/pci/pcie/aer/aerdrv_acpi.c
drivers/pci/pcie/aer/aerdrv_core.c
drivers/pci/pcie/aer/aerdrv_errprint.c
drivers/pci/pcie/aspm.c
drivers/pci/pcie/portdrv_pci.c
drivers/pci/quirks.c
drivers/pci/search.c
drivers/pcmcia/cardbus.c
drivers/platform/x86/dell-wmi.c
drivers/platform/x86/wmi.c
fs/namei.c
include/linux/blkdev.h
include/linux/ieee80211.h
include/linux/inetdevice.h
include/linux/kfifo.h
include/linux/pci.h
include/linux/sysctl.h
include/net/mac80211.h
include/scsi/libsrp.h
mm/mmap.c
mm/nommu.c
mm/util.c
net/core/pktgen.c
net/ipv4/devinet.c
net/ipv4/fib_frontend.c
net/mac80211/ht.c
net/mac80211/ibss.c
net/mac80211/main.c
net/mac80211/mlme.c
net/mac80211/tx.c
net/mac80211/util.c
net/wireless/mlme.c
net/wireless/scan.c
net/xfrm/xfrm_policy.c
sound/arm/aaci.c
sound/core/pcm_native.c
sound/pci/hda/hda_beep.c
sound/pci/hda/hda_codec.c
sound/pci/hda/hda_codec.h
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_analog.c
sound/pci/hda/patch_sigmatel.c
virt/kvm/assigned-dev.c
virt/kvm/kvm_main.c

index 961a051..a406286 100644 (file)
@@ -1,7 +1,5 @@
 00-INDEX
        - This file
-as-iosched.txt
-       - Anticipatory IO scheduler
 barrier.txt
        - I/O Barriers
 biodoc.txt
diff --git a/Documentation/block/as-iosched.txt b/Documentation/block/as-iosched.txt
deleted file mode 100644 (file)
index 738b72b..0000000
+++ /dev/null
@@ -1,172 +0,0 @@
-Anticipatory IO scheduler
--------------------------
-Nick Piggin <piggin@cyberone.com.au>    13 Sep 2003
-
-Attention! Database servers, especially those using "TCQ" disks should
-investigate performance with the 'deadline' IO scheduler. Any system with high
-disk performance requirements should do so, in fact.
-
-If you see unusual performance characteristics of your disk systems, or you
-see big performance regressions versus the deadline scheduler, please email
-me. Database users don't bother unless you're willing to test a lot of patches
-from me ;) its a known issue.
-
-Also, users with hardware RAID controllers, doing striping, may find
-highly variable performance results with using the as-iosched. The
-as-iosched anticipatory implementation is based on the notion that a disk
-device has only one physical seeking head.  A striped RAID controller
-actually has a head for each physical device in the logical RAID device.
-
-However, setting the antic_expire (see tunable parameters below) produces
-very similar behavior to the deadline IO scheduler.
-
-Selecting IO schedulers
------------------------
-Refer to Documentation/block/switching-sched.txt for information on
-selecting an io scheduler on a per-device basis.
-
-Anticipatory IO scheduler Policies
-----------------------------------
-The as-iosched implementation implements several layers of policies
-to determine when an IO request is dispatched to the disk controller.
-Here are the policies outlined, in order of application.
-
-1. one-way Elevator algorithm.
-
-The elevator algorithm is similar to that used in deadline scheduler, with
-the addition that it allows limited backward movement of the elevator
-(i.e. seeks backwards).  A seek backwards can occur when choosing between
-two IO requests where one is behind the elevator's current position, and
-the other is in front of the elevator's position. If the seek distance to
-the request in back of the elevator is less than half the seek distance to
-the request in front of the elevator, then the request in back can be chosen.
-Backward seeks are also limited to a maximum of MAXBACK (1024*1024) sectors.
-This favors forward movement of the elevator, while allowing opportunistic
-"short" backward seeks.
-
-2. FIFO expiration times for reads and for writes.
-
-This is again very similar to the deadline IO scheduler.  The expiration
-times for requests on these lists is tunable using the parameters read_expire
-and write_expire discussed below.  When a read or a write expires in this way,
-the IO scheduler will interrupt its current elevator sweep or read anticipation
-to service the expired request.
-
-3. Read and write request batching
-
-A batch is a collection of read requests or a collection of write
-requests.  The as scheduler alternates dispatching read and write batches
-to the driver.  In the case a read batch, the scheduler submits read
-requests to the driver as long as there are read requests to submit, and
-the read batch time limit has not been exceeded (read_batch_expire).
-The read batch time limit begins counting down only when there are
-competing write requests pending.
-
-In the case of a write batch, the scheduler submits write requests to
-the driver as long as there are write requests available, and the
-write batch time limit has not been exceeded (write_batch_expire).
-However, the length of write batches will be gradually shortened
-when read batches frequently exceed their time limit.
-
-When changing between batch types, the scheduler waits for all requests
-from the previous batch to complete before scheduling requests for the
-next batch.
-
-The read and write fifo expiration times described in policy 2 above
-are checked only when in scheduling IO of a batch for the corresponding
-(read/write) type.  So for example, the read FIFO timeout values are
-tested only during read batches.  Likewise, the write FIFO timeout
-values are tested only during write batches.  For this reason,
-it is generally not recommended for the read batch time
-to be longer than the write expiration time, nor for the write batch
-time to exceed the read expiration time (see tunable parameters below).
-
-When the IO scheduler changes from a read to a write batch,
-it begins the elevator from the request that is on the head of the
-write expiration FIFO.  Likewise, when changing from a write batch to
-a read batch, scheduler begins the elevator from the first entry
-on the read expiration FIFO.
-
-4. Read anticipation.
-
-Read anticipation occurs only when scheduling a read batch.
-This implementation of read anticipation allows only one read request
-to be dispatched to the disk controller at a time.  In
-contrast, many write requests may be dispatched to the disk controller
-at a time during a write batch.  It is this characteristic that can make
-the anticipatory scheduler perform anomalously with controllers supporting
-TCQ, or with hardware striped RAID devices. Setting the antic_expire
-queue parameter (see below) to zero disables this behavior, and the 
-anticipatory scheduler behaves essentially like the deadline scheduler.
-
-When read anticipation is enabled (antic_expire is not zero), reads
-are dispatched to the disk controller one at a time.
-At the end of each read request, the IO scheduler examines its next
-candidate read request from its sorted read list.  If that next request
-is from the same process as the request that just completed,
-or if the next request in the queue is "very close" to the
-just completed request, it is dispatched immediately.  Otherwise,
-statistics (average think time, average seek distance) on the process
-that submitted the just completed request are examined.  If it seems
-likely that that process will submit another request soon, and that
-request is likely to be near the just completed request, then the IO
-scheduler will stop dispatching more read requests for up to (antic_expire)
-milliseconds, hoping that process will submit a new request near the one
-that just completed.  If such a request is made, then it is dispatched
-immediately.  If the antic_expire wait time expires, then the IO scheduler
-will dispatch the next read request from the sorted read queue.
-
-To decide whether an anticipatory wait is worthwhile, the scheduler
-maintains statistics for each process that can be used to compute
-mean "think time" (the time between read requests), and mean seek
-distance for that process.  One observation is that these statistics
-are associated with each process, but those statistics are not associated
-with a specific IO device.  So for example, if a process is doing IO
-on several file systems on separate devices, the statistics will be
-a combination of IO behavior from all those devices.
-
-
-Tuning the anticipatory IO scheduler
-------------------------------------
-When using 'as', the anticipatory IO scheduler there are 5 parameters under
-/sys/block/*/queue/iosched/. All are units of milliseconds.
-
-The parameters are:
-* read_expire
-    Controls how long until a read request becomes "expired". It also controls the
-    interval between which expired requests are served, so set to 50, a request
-    might take anywhere < 100ms to be serviced _if_ it is the next on the
-    expired list. Obviously request expiration strategies won't make the disk
-    go faster. The result basically equates to the timeslice a single reader
-    gets in the presence of other IO. 100*((seek time / read_expire) + 1) is
-    very roughly the % streaming read efficiency your disk should get with
-    multiple readers.
-
-* read_batch_expire
-    Controls how much time a batch of reads is given before pending writes are
-    served. A higher value is more efficient. This might be set below read_expire
-    if writes are to be given higher priority than reads, but reads are to be
-    as efficient as possible when there are no writes. Generally though, it
-    should be some multiple of read_expire.
-
-* write_expire, and
-* write_batch_expire are equivalent to the above, for writes.
-
-* antic_expire
-    Controls the maximum amount of time we can anticipate a good read (one
-    with a short seek distance from the most recently completed request) before
-    giving up. Many other factors may cause anticipation to be stopped early,
-    or some processes will not be "anticipated" at all. Should be a bit higher
-    for big seek time devices though not a linear correspondence - most
-    processes have only a few ms thinktime.
-
-In addition to the tunables above there is a read-only file named est_time
-which, when read, will show:
-
-    - The probability of a task exiting without a cooperating task
-      submitting an anticipated IO.
-
-    - The current mean think time.
-
-    - The seek distance used to determine if an incoming IO is better.
-
index e1a1141..2811e45 100644 (file)
@@ -685,7 +685,7 @@ struct kvm_vcpu_events {
                __u8 pad;
        } nmi;
        __u32 sipi_vector;
-       __u32 flags;   /* must be zero */
+       __u32 flags;
 };
 
 4.30 KVM_SET_VCPU_EVENTS
@@ -701,6 +701,14 @@ vcpu.
 
 See KVM_GET_VCPU_EVENTS for the data structure.
 
+Fields that may be modified asynchronously by running VCPUs can be excluded
+from the update. These fields are nmi.pending and sipi_vector. Keep the
+corresponding bits in the flags field cleared to suppress overwriting the
+current in-kernel state. The bits are:
+
+KVM_VCPUEVENT_VALID_NMI_PENDING - transfer nmi.pending to the kernel
+KVM_VCPUEVENT_VALID_SIPI_VECTOR - transfer sipi_vector
+
 
 5. The kvm_run structure
 
index 719a819..07301de 100644 (file)
@@ -95,7 +95,7 @@ card*/pcm*/xrun_debug
        It takes an integer value, can be changed by writing to this
        file, such as
 
-                # cat 5 > /proc/asound/card0/pcm0p/xrun_debug
+                # echo 5 > /proc/asound/card0/pcm0p/xrun_debug
 
        The value consists of the following bit flags:
          bit 0 = Enable XRUN/jiffies debug messages
index 987f9b0..43a9b06 100644 (file)
@@ -103,7 +103,7 @@ I.2 libpciaccess
 ----------------
 
 To use the vga arbiter char device it was implemented an API inside the
-libpciaccess library. One fieldd was added to struct pci_device (each device
+libpciaccess library. One field was added to struct pci_device (each device
 on the system):
 
     /* the type of resource decoded by the device */
index 360724d..988911b 100644 (file)
@@ -388,6 +388,9 @@ static inline u64 __gpfn_is_io(u64 gpfn)
 #define _vmm_raw_spin_lock(x)   do {}while(0)
 #define _vmm_raw_spin_unlock(x) do {}while(0)
 #else
+typedef struct {
+       volatile unsigned int lock;
+} vmm_spinlock_t;
 #define _vmm_raw_spin_lock(x)                                          \
        do {                                                            \
                __u32 *ia64_spinlock_ptr = (__u32 *) (x);               \
@@ -405,12 +408,12 @@ static inline u64 __gpfn_is_io(u64 gpfn)
 
 #define _vmm_raw_spin_unlock(x)                                \
        do { barrier();                         \
-               ((spinlock_t *)x)->raw_lock.lock = 0; } \
+               ((vmm_spinlock_t *)x)->lock = 0; } \
 while (0)
 #endif
 
-void vmm_spin_lock(spinlock_t *lock);
-void vmm_spin_unlock(spinlock_t *lock);
+void vmm_spin_lock(vmm_spinlock_t *lock);
+void vmm_spin_unlock(vmm_spinlock_t *lock);
 enum {
        I_TLB = 1,
        D_TLB = 2
index f4b4c89..7a62f75 100644 (file)
@@ -60,12 +60,12 @@ static void __exit kvm_vmm_exit(void)
        return ;
 }
 
-void vmm_spin_lock(spinlock_t *lock)
+void vmm_spin_lock(vmm_spinlock_t *lock)
 {
        _vmm_raw_spin_lock(lock);
 }
 
-void vmm_spin_unlock(spinlock_t *lock)
+void vmm_spin_unlock(vmm_spinlock_t *lock)
 {
        _vmm_raw_spin_unlock(lock);
 }
index 20b3852..4332f7e 100644 (file)
@@ -182,7 +182,7 @@ void mark_pages_dirty(struct kvm_vcpu *v, u64 pte, u64 ps)
 {
        u64 i, dirty_pages = 1;
        u64 base_gfn = (pte&_PAGE_PPN_MASK) >> PAGE_SHIFT;
-       spinlock_t *lock = __kvm_va(v->arch.dirty_log_lock_pa);
+       vmm_spinlock_t *lock = __kvm_va(v->arch.dirty_log_lock_pa);
        void *dirty_bitmap = (void *)KVM_MEM_DIRTY_LOG_BASE;
 
        dirty_pages <<= ps <= PAGE_SHIFT ? 0 : ps - PAGE_SHIFT;
index e8dfdbd..cadbed6 100644 (file)
@@ -1107,6 +1107,12 @@ void __devinit pcibios_setup_bus_devices(struct pci_bus *bus)
        list_for_each_entry(dev, &bus->devices, bus_list) {
                struct dev_archdata *sd = &dev->dev.archdata;
 
+               /* Cardbus can call us to add new devices to a bus, so ignore
+                * those who are already fully discovered
+                */
+               if (dev->is_added)
+                       continue;
+
                /* Setup OF node pointer in archdata */
                sd->of_node = pci_device_to_OF_node(dev);
 
@@ -1147,6 +1153,13 @@ void __devinit pcibios_fixup_bus(struct pci_bus *bus)
 }
 EXPORT_SYMBOL(pcibios_fixup_bus);
 
+void __devinit pci_fixup_cardbus(struct pci_bus *bus)
+{
+       /* Now fixup devices on that bus */
+       pcibios_setup_bus_devices(bus);
+}
+
+
 static int skip_isa_ioresource_align(struct pci_dev *dev)
 {
        if ((ppc_pci_flags & PPC_PCI_CAN_SKIP_ISA_ALIGN) &&
index 5598f88..e4beeb3 100644 (file)
@@ -390,6 +390,26 @@ static void kvmppc_mmu_book3s_64_mtsrin(struct kvm_vcpu *vcpu, u32 srnum,
 {
        u64 rb = 0, rs = 0;
 
+       /*
+        * According to Book3 2.01 mtsrin is implemented as:
+        *
+        * The SLB entry specified by (RB)32:35 is loaded from register
+        * RS, as follows.
+        *
+        * SLBE Bit     Source                  SLB Field
+        *
+        * 0:31         0x0000_0000             ESID-0:31
+        * 32:35        (RB)32:35               ESID-32:35
+        * 36           0b1                     V
+        * 37:61        0x00_0000|| 0b0         VSID-0:24
+        * 62:88        (RS)37:63               VSID-25:51
+        * 89:91        (RS)33:35               Ks Kp N
+        * 92           (RS)36                  L ((RS)36 must be 0b0)
+        * 93           0b0                     C
+        */
+
+       dprintk("KVM MMU: mtsrin(0x%x, 0x%lx)\n", srnum, value);
+
        /* ESID = srnum */
        rb |= (srnum & 0xf) << 28;
        /* Set the valid bit */
@@ -400,7 +420,7 @@ static void kvmppc_mmu_book3s_64_mtsrin(struct kvm_vcpu *vcpu, u32 srnum,
        /* VSID = VSID */
        rs |= (value & 0xfffffff) << 12;
        /* flags = flags */
-       rs |= ((value >> 27) & 0xf) << 9;
+       rs |= ((value >> 28) & 0x7) << 9;
 
        kvmppc_mmu_book3s_64_slbmte(vcpu, rs, rb);
 }
index 950df43..f46b79f 100644 (file)
@@ -254,6 +254,10 @@ struct kvm_reinject_control {
        __u8 reserved[31];
 };
 
+/* When set in flags, include corresponding fields on KVM_SET_VCPU_EVENTS */
+#define KVM_VCPUEVENT_VALID_NMI_PENDING        0x00000001
+#define KVM_VCPUEVENT_VALID_SIPI_VECTOR        0x00000002
+
 /* for KVM_GET/SET_VCPU_EVENTS */
 struct kvm_vcpu_events {
        struct {
index 811bfab..bcdb708 100644 (file)
@@ -338,6 +338,18 @@ static inline unsigned long uv_global_gru_mmr_address(int pnode, unsigned long o
        return UV_GLOBAL_GRU_MMR_BASE | offset | (pnode << uv_hub_info->m_val);
 }
 
+static inline void uv_write_global_mmr8(int pnode, unsigned long offset,
+                               unsigned char val)
+{
+       writeb(val, uv_global_mmr64_address(pnode, offset));
+}
+
+static inline unsigned char uv_read_global_mmr8(int pnode,
+                                                unsigned long offset)
+{
+       return readb(uv_global_mmr64_address(pnode, offset));
+}
+
 /*
  * Access hub local MMRs. Faster than using global space but only local MMRs
  * are accessible.
@@ -457,11 +469,17 @@ static inline void uv_set_scir_bits(unsigned char value)
        }
 }
 
+static inline unsigned long uv_scir_offset(int apicid)
+{
+       return SCIR_LOCAL_MMR_BASE | (apicid & 0x3f);
+}
+
 static inline void uv_set_cpu_scir_bits(int cpu, unsigned char value)
 {
        if (uv_cpu_hub_info(cpu)->scir.state != value) {
+               uv_write_global_mmr8(uv_cpu_to_pnode(cpu),
+                               uv_cpu_hub_info(cpu)->scir.offset, value);
                uv_cpu_hub_info(cpu)->scir.state = value;
-               uv_write_local_mmr8(uv_cpu_hub_info(cpu)->scir.offset, value);
        }
 }
 
index d56b0ef..5f92494 100644 (file)
@@ -629,8 +629,10 @@ void __init uv_system_init(void)
        uv_rtc_init();
 
        for_each_present_cpu(cpu) {
+               int apicid = per_cpu(x86_cpu_to_apicid, cpu);
+
                nid = cpu_to_node(cpu);
-               pnode = uv_apicid_to_pnode(per_cpu(x86_cpu_to_apicid, cpu));
+               pnode = uv_apicid_to_pnode(apicid);
                blade = boot_pnode_to_blade(pnode);
                lcpu = uv_blade_info[blade].nr_possible_cpus;
                uv_blade_info[blade].nr_possible_cpus++;
@@ -651,15 +653,13 @@ void __init uv_system_init(void)
                uv_cpu_hub_info(cpu)->gnode_extra = gnode_extra;
                uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base;
                uv_cpu_hub_info(cpu)->coherency_domain_number = sn_coherency_id;
-               uv_cpu_hub_info(cpu)->scir.offset = SCIR_LOCAL_MMR_BASE + lcpu;
+               uv_cpu_hub_info(cpu)->scir.offset = uv_scir_offset(apicid);
                uv_node_to_blade[nid] = blade;
                uv_cpu_to_blade[cpu] = blade;
                max_pnode = max(pnode, max_pnode);
 
-               printk(KERN_DEBUG "UV: cpu %d, apicid 0x%x, pnode %d, nid %d, "
-                       "lcpu %d, blade %d\n",
-                       cpu, per_cpu(x86_cpu_to_apicid, cpu), pnode, nid,
-                       lcpu, blade);
+               printk(KERN_DEBUG "UV: cpu %d, apicid 0x%x, pnode %d, nid %d, lcpu %d, blade %d\n",
+                       cpu, apicid, pnode, nid, lcpu, blade);
        }
 
        /* Add blade/pnode info for nodes without cpus */
index cd60c0b..3063a0c 100644 (file)
@@ -1150,6 +1150,7 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu)
        hrtimer_cancel(&apic->lapic_timer.timer);
        update_divide_count(apic);
        start_apic_timer(apic);
+       apic->irr_pending = true;
 }
 
 void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
index a601713..58a0f1e 100644 (file)
@@ -455,8 +455,6 @@ out_unlock:
 static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
 {
        struct kvm_shadow_walk_iterator iterator;
-       pt_element_t gpte;
-       gpa_t pte_gpa = -1;
        int level;
        u64 *sptep;
        int need_flush = 0;
@@ -470,10 +468,6 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
                if (level == PT_PAGE_TABLE_LEVEL  ||
                    ((level == PT_DIRECTORY_LEVEL && is_large_pte(*sptep))) ||
                    ((level == PT_PDPE_LEVEL && is_large_pte(*sptep)))) {
-                       struct kvm_mmu_page *sp = page_header(__pa(sptep));
-
-                       pte_gpa = (sp->gfn << PAGE_SHIFT);
-                       pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
 
                        if (is_shadow_present_pte(*sptep)) {
                                rmap_remove(vcpu->kvm, sptep);
@@ -492,18 +486,6 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
        if (need_flush)
                kvm_flush_remote_tlbs(vcpu->kvm);
        spin_unlock(&vcpu->kvm->mmu_lock);
-
-       if (pte_gpa == -1)
-               return;
-       if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
-                                 sizeof(pt_element_t)))
-               return;
-       if (is_present_gpte(gpte) && (gpte & PT_ACCESSED_MASK)) {
-               if (mmu_topup_memory_caches(vcpu))
-                       return;
-               kvm_mmu_pte_write(vcpu, pte_gpa, (const u8 *)&gpte,
-                                 sizeof(pt_element_t), 0);
-       }
 }
 
 static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
index 9d06896..6651dbf 100644 (file)
@@ -1913,7 +1913,8 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
 
        events->sipi_vector = vcpu->arch.sipi_vector;
 
-       events->flags = 0;
+       events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
+                        | KVM_VCPUEVENT_VALID_SIPI_VECTOR);
 
        vcpu_put(vcpu);
 }
@@ -1921,7 +1922,8 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
 static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
                                              struct kvm_vcpu_events *events)
 {
-       if (events->flags)
+       if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING
+                             | KVM_VCPUEVENT_VALID_SIPI_VECTOR))
                return -EINVAL;
 
        vcpu_load(vcpu);
@@ -1938,10 +1940,12 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
                kvm_pic_clear_isr_ack(vcpu->kvm);
 
        vcpu->arch.nmi_injected = events->nmi.injected;
-       vcpu->arch.nmi_pending = events->nmi.pending;
+       if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING)
+               vcpu->arch.nmi_pending = events->nmi.pending;
        kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked);
 
-       vcpu->arch.sipi_vector = events->sipi_vector;
+       if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR)
+               vcpu->arch.sipi_vector = events->sipi_vector;
 
        vcpu_put(vcpu);
 
index 145df00..f939d60 100644 (file)
@@ -51,7 +51,7 @@ void x86_pci_root_bus_res_quirks(struct pci_bus *b)
        }
 }
 
-void __init update_res(struct pci_root_info *info, size_t start,
+void __devinit update_res(struct pci_root_info *info, size_t start,
                              size_t end, unsigned long flags, int merge)
 {
        int i;
index 8873b9b..8618d89 100644 (file)
@@ -402,7 +402,7 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
                 * our current implementations need.  If we'll ever need
                 * more the interface will need revisiting.
                 */
-               page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+               page = alloc_page(gfp_mask | __GFP_ZERO);
                if (!page)
                        goto out_free_bio;
                if (bio_add_pc_page(q, bio, page, sector_size, 0) < sector_size)
index 6ae118d..d52d4ad 100644 (file)
@@ -505,21 +505,30 @@ static unsigned int lcm(unsigned int a, unsigned int b)
 
 /**
  * blk_stack_limits - adjust queue_limits for stacked devices
- * @t: the stacking driver limits (top)
- * @b:  the underlying queue limits (bottom)
+ * @t: the stacking driver limits (top device)
+ * @b:  the underlying queue limits (bottom, component device)
  * @offset:  offset to beginning of data within component device
  *
  * Description:
- *    Merges two queue_limit structs.  Returns 0 if alignment didn't
- *    change.  Returns -1 if adding the bottom device caused
- *    misalignment.
+ *    This function is used by stacking drivers like MD and DM to ensure
+ *    that all component devices have compatible block sizes and
+ *    alignments.  The stacking driver must provide a queue_limits
+ *    struct (top) and then iteratively call the stacking function for
+ *    all component (bottom) devices.  The stacking function will
+ *    attempt to combine the values and ensure proper alignment.
+ *
+ *    Returns 0 if the top and bottom queue_limits are compatible.  The
+ *    top device's block sizes and alignment offsets may be adjusted to
+ *    ensure alignment with the bottom device. If no compatible sizes
+ *    and alignments exist, -1 is returned and the resulting top
+ *    queue_limits will have the misaligned flag set to indicate that
+ *    the alignment_offset is undefined.
  */
 int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
                     sector_t offset)
 {
-       int ret;
-
-       ret = 0;
+       sector_t alignment;
+       unsigned int top, bottom;
 
        t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
        t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
@@ -537,6 +546,22 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
        t->max_segment_size = min_not_zero(t->max_segment_size,
                                           b->max_segment_size);
 
+       alignment = queue_limit_alignment_offset(b, offset);
+
+       /* Bottom device has different alignment.  Check that it is
+        * compatible with the current top alignment.
+        */
+       if (t->alignment_offset != alignment) {
+
+               top = max(t->physical_block_size, t->io_min)
+                       + t->alignment_offset;
+               bottom = max(b->physical_block_size, b->io_min) + alignment;
+
+               /* Verify that top and bottom intervals line up */
+               if (max(top, bottom) & (min(top, bottom) - 1))
+                       t->misaligned = 1;
+       }
+
        t->logical_block_size = max(t->logical_block_size,
                                    b->logical_block_size);
 
@@ -544,54 +569,64 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
                                     b->physical_block_size);
 
        t->io_min = max(t->io_min, b->io_min);
+       t->io_opt = lcm(t->io_opt, b->io_opt);
+
        t->no_cluster |= b->no_cluster;
        t->discard_zeroes_data &= b->discard_zeroes_data;
 
-       /* Bottom device offset aligned? */
-       if (offset &&
-           (offset & (b->physical_block_size - 1)) != b->alignment_offset) {
+       /* Physical block size a multiple of the logical block size? */
+       if (t->physical_block_size & (t->logical_block_size - 1)) {
+               t->physical_block_size = t->logical_block_size;
                t->misaligned = 1;
-               ret = -1;
        }
 
-       /*
-        * Temporarily disable discard granularity. It's currently buggy
-        * since we default to 0 for discard_granularity, hence this
-        * "failure" will always trigger for non-zero offsets.
-        */
-#if 0
-       if (offset &&
-           (offset & (b->discard_granularity - 1)) != b->discard_alignment) {
-               t->discard_misaligned = 1;
-               ret = -1;
+       /* Minimum I/O a multiple of the physical block size? */
+       if (t->io_min & (t->physical_block_size - 1)) {
+               t->io_min = t->physical_block_size;
+               t->misaligned = 1;
        }
-#endif
-
-       /* If top has no alignment offset, inherit from bottom */
-       if (!t->alignment_offset)
-               t->alignment_offset =
-                       b->alignment_offset & (b->physical_block_size - 1);
 
-       if (!t->discard_alignment)
-               t->discard_alignment =
-                       b->discard_alignment & (b->discard_granularity - 1);
-
-       /* Top device aligned on logical block boundary? */
-       if (t->alignment_offset & (t->logical_block_size - 1)) {
+       /* Optimal I/O a multiple of the physical block size? */
+       if (t->io_opt & (t->physical_block_size - 1)) {
+               t->io_opt = 0;
                t->misaligned = 1;
-               ret = -1;
        }
 
-       /* Find lcm() of optimal I/O size and granularity */
-       t->io_opt = lcm(t->io_opt, b->io_opt);
-       t->discard_granularity = lcm(t->discard_granularity,
-                                    b->discard_granularity);
+       /* Find lowest common alignment_offset */
+       t->alignment_offset = lcm(t->alignment_offset, alignment)
+               & (max(t->physical_block_size, t->io_min) - 1);
 
-       /* Verify that optimal I/O size is a multiple of io_min */
-       if (t->io_min && t->io_opt % t->io_min)
-               ret = -1;
+       /* Verify that new alignment_offset is on a logical block boundary */
+       if (t->alignment_offset & (t->logical_block_size - 1))
+               t->misaligned = 1;
+
+       /* Discard alignment and granularity */
+       if (b->discard_granularity) {
+               unsigned int granularity = b->discard_granularity;
+               offset &= granularity - 1;
+
+               alignment = (granularity + b->discard_alignment - offset)
+                       & (granularity - 1);
+
+               if (t->discard_granularity != 0 &&
+                   t->discard_alignment != alignment) {
+                       top = t->discard_granularity + t->discard_alignment;
+                       bottom = b->discard_granularity + alignment;
+
+                       /* Verify that top and bottom intervals line up */
+                       if (max(top, bottom) & (min(top, bottom) - 1))
+                               t->discard_misaligned = 1;
+               }
+
+               t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
+                                                     b->max_discard_sectors);
+               t->discard_granularity = max(t->discard_granularity,
+                                            b->discard_granularity);
+               t->discard_alignment = lcm(t->discard_alignment, alignment) &
+                       (t->discard_granularity - 1);
+       }
 
-       return ret;
+       return t->misaligned ? -1 : 0;
 }
 EXPORT_SYMBOL(blk_stack_limits);
 
index e2f8046..918c7fd 100644 (file)
@@ -208,8 +208,6 @@ struct cfq_data {
        /* Root service tree for cfq_groups */
        struct cfq_rb_root grp_service_tree;
        struct cfq_group root_group;
-       /* Number of active cfq groups on group service tree */
-       int nr_groups;
 
        /*
         * The priority currently being served
@@ -294,8 +292,7 @@ static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
 
 static struct cfq_rb_root *service_tree_for(struct cfq_group *cfqg,
                                            enum wl_prio_t prio,
-                                           enum wl_type_t type,
-                                           struct cfq_data *cfqd)
+                                           enum wl_type_t type)
 {
        if (!cfqg)
                return NULL;
@@ -842,7 +839,6 @@ cfq_group_service_tree_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
 
        __cfq_group_service_tree_add(st, cfqg);
        cfqg->on_st = true;
-       cfqd->nr_groups++;
        st->total_weight += cfqg->weight;
 }
 
@@ -863,7 +859,6 @@ cfq_group_service_tree_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
 
        cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
        cfqg->on_st = false;
-       cfqd->nr_groups--;
        st->total_weight -= cfqg->weight;
        if (!RB_EMPTY_NODE(&cfqg->rb_node))
                cfq_rb_erase(&cfqg->rb_node, st);
@@ -1150,7 +1145,7 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
 #endif
 
        service_tree = service_tree_for(cfqq->cfqg, cfqq_prio(cfqq),
-                                               cfqq_type(cfqq), cfqd);
+                                               cfqq_type(cfqq));
        if (cfq_class_idle(cfqq)) {
                rb_key = CFQ_IDLE_DELAY;
                parent = rb_last(&service_tree->rb);
@@ -1513,9 +1508,6 @@ static int cfq_allow_merge(struct request_queue *q, struct request *rq,
        struct cfq_io_context *cic;
        struct cfq_queue *cfqq;
 
-       /* Deny merge if bio and rq don't belong to same cfq group */
-       if ((RQ_CFQQ(rq))->cfqg != cfq_get_cfqg(cfqd, 0))
-               return false;
        /*
         * Disallow merge of a sync bio into an async request.
         */
@@ -1616,7 +1608,7 @@ static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
 {
        struct cfq_rb_root *service_tree =
                service_tree_for(cfqd->serving_group, cfqd->serving_prio,
-                                       cfqd->serving_type, cfqd);
+                                       cfqd->serving_type);
 
        if (!cfqd->rq_queued)
                return NULL;
@@ -1675,13 +1667,17 @@ static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
 #define CFQQ_SEEKY(cfqq)       ((cfqq)->seek_mean > CFQQ_SEEK_THR)
 
 static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
-                              struct request *rq)
+                              struct request *rq, bool for_preempt)
 {
        sector_t sdist = cfqq->seek_mean;
 
        if (!sample_valid(cfqq->seek_samples))
                sdist = CFQQ_SEEK_THR;
 
+       /* if seek_mean is big, using it as close criteria is meaningless */
+       if (sdist > CFQQ_SEEK_THR && !for_preempt)
+               sdist = CFQQ_SEEK_THR;
+
        return cfq_dist_from_last(cfqd, rq) <= sdist;
 }
 
@@ -1709,7 +1705,7 @@ static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
         * will contain the closest sector.
         */
        __cfqq = rb_entry(parent, struct cfq_queue, p_node);
-       if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
+       if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq, false))
                return __cfqq;
 
        if (blk_rq_pos(__cfqq->next_rq) < sector)
@@ -1720,7 +1716,7 @@ static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
                return NULL;
 
        __cfqq = rb_entry(node, struct cfq_queue, p_node);
-       if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
+       if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq, false))
                return __cfqq;
 
        return NULL;
@@ -1963,8 +1959,7 @@ static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
 }
 
 static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd,
-                               struct cfq_group *cfqg, enum wl_prio_t prio,
-                               bool prio_changed)
+                               struct cfq_group *cfqg, enum wl_prio_t prio)
 {
        struct cfq_queue *queue;
        int i;
@@ -1972,24 +1967,9 @@ static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd,
        unsigned long lowest_key = 0;
        enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD;
 
-       if (prio_changed) {
-               /*
-                * When priorities switched, we prefer starting
-                * from SYNC_NOIDLE (first choice), or just SYNC
-                * over ASYNC
-                */
-               if (service_tree_for(cfqg, prio, cur_best, cfqd)->count)
-                       return cur_best;
-               cur_best = SYNC_WORKLOAD;
-               if (service_tree_for(cfqg, prio, cur_best, cfqd)->count)
-                       return cur_best;
-
-               return ASYNC_WORKLOAD;
-       }
-
-       for (i = 0; i < 3; ++i) {
-               /* otherwise, select the one with lowest rb_key */
-               queue = cfq_rb_first(service_tree_for(cfqg, prio, i, cfqd));
+       for (i = 0; i <= SYNC_WORKLOAD; ++i) {
+               /* select the one with lowest rb_key */
+               queue = cfq_rb_first(service_tree_for(cfqg, prio, i));
                if (queue &&
                    (!key_valid || time_before(queue->rb_key, lowest_key))) {
                        lowest_key = queue->rb_key;
@@ -2003,8 +1983,6 @@ static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd,
 
 static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
 {
-       enum wl_prio_t previous_prio = cfqd->serving_prio;
-       bool prio_changed;
        unsigned slice;
        unsigned count;
        struct cfq_rb_root *st;
@@ -2032,24 +2010,19 @@ static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
         * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
         * expiration time
         */
-       prio_changed = (cfqd->serving_prio != previous_prio);
-       st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type,
-                               cfqd);
+       st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
        count = st->count;
 
        /*
-        * If priority didn't change, check workload expiration,
-        * and that we still have other queues ready
+        * check workload expiration, and that we still have other queues ready
         */
-       if (!prio_changed && count &&
-           !time_after(jiffies, cfqd->workload_expires))
+       if (count && !time_after(jiffies, cfqd->workload_expires))
                return;
 
        /* otherwise select new workload type */
        cfqd->serving_type =
-               cfq_choose_wl(cfqd, cfqg, cfqd->serving_prio, prio_changed);
-       st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type,
-                               cfqd);
+               cfq_choose_wl(cfqd, cfqg, cfqd->serving_prio);
+       st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
        count = st->count;
 
        /*
@@ -3143,7 +3116,7 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
         * if this request is as-good as one we would expect from the
         * current cfqq, let it preempt
         */
-       if (cfq_rq_close(cfqd, cfqq, rq))
+       if (cfq_rq_close(cfqd, cfqq, rq, true))
                return true;
 
        return false;
index eb4fa19..ce1fa92 100644 (file)
@@ -7101,7 +7101,7 @@ static struct DAC960_privdata DAC960_BA_privdata = {
 
 static struct DAC960_privdata DAC960_LP_privdata = {
        .HardwareType =         DAC960_LP_Controller,
-       .FirmwareType   =       DAC960_LP_Controller,
+       .FirmwareType   =       DAC960_V2_Controller,
        .InterruptHandler =     DAC960_LP_InterruptHandler,
        .MemoryWindowSize =     DAC960_LP_RegisterWindowSize,
 };
index 13bb69d..64a223b 100644 (file)
@@ -735,21 +735,6 @@ diskstats(struct gendisk *disk, struct bio *bio, ulong duration, sector_t sector
        part_stat_unlock();
 }
 
-/*
- * Ensure we don't create aliases in VI caches
- */
-static inline void
-killalias(struct bio *bio)
-{
-       struct bio_vec *bv;
-       int i;
-
-       if (bio_data_dir(bio) == READ)
-               __bio_for_each_segment(bv, bio, i, 0) {
-                       flush_dcache_page(bv->bv_page);
-               }
-}
-
 void
 aoecmd_ata_rsp(struct sk_buff *skb)
 {
@@ -871,7 +856,7 @@ aoecmd_ata_rsp(struct sk_buff *skb)
                if (buf->flags & BUFFL_FAIL)
                        bio_endio(buf->bio, -EIO);
                else {
-                       killalias(buf->bio);
+                       bio_flush_dcache_pages(buf->bio);
                        bio_endio(buf->bio, 0);
                }
                mempool_free(buf, d->bufpool);
index 2312d78..c975587 100644 (file)
@@ -1490,7 +1490,7 @@ void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo);
 
 /* drbd_proc.c */
 extern struct proc_dir_entry *drbd_proc;
-extern struct file_operations drbd_proc_fops;
+extern const struct file_operations drbd_proc_fops;
 extern const char *drbd_conn_str(enum drbd_conns s);
 extern const char *drbd_role_str(enum drbd_role s);
 
index 157d1e4..9348f33 100644 (file)
@@ -27,7 +27,6 @@
  */
 
 #include <linux/module.h>
-#include <linux/version.h>
 #include <linux/drbd.h>
 #include <asm/uaccess.h>
 #include <asm/types.h>
@@ -151,7 +150,7 @@ wait_queue_head_t drbd_pp_wait;
 
 DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5);
 
-static struct block_device_operations drbd_ops = {
+static const struct block_device_operations drbd_ops = {
        .owner =   THIS_MODULE,
        .open =    drbd_open,
        .release = drbd_release,
@@ -3623,7 +3622,7 @@ _drbd_fault_random(struct fault_random_state *rsp)
 {
        long refresh;
 
-       if (--rsp->count < 0) {
+       if (!rsp->count--) {
                get_random_bytes(&refresh, sizeof(refresh));
                rsp->state += refresh;
                rsp->count = FAULT_RANDOM_REFRESH;
index bdd0b49..df8ad96 100644 (file)
@@ -38,7 +38,7 @@ static int drbd_proc_open(struct inode *inode, struct file *file);
 
 
 struct proc_dir_entry *drbd_proc;
-struct file_operations drbd_proc_fops = {
+const struct file_operations drbd_proc_fops = {
        .owner          = THIS_MODULE,
        .open           = drbd_proc_open,
        .read           = seq_read,
index c548f24..259c135 100644 (file)
@@ -28,7 +28,6 @@
 #include <asm/uaccess.h>
 #include <net/sock.h>
 
-#include <linux/version.h>
 #include <linux/drbd.h>
 #include <linux/fs.h>
 #include <linux/file.h>
index ed8796f..b453c2b 100644 (file)
@@ -24,7 +24,6 @@
  */
 
 #include <linux/module.h>
-#include <linux/version.h>
 #include <linux/drbd.h>
 #include <linux/sched.h>
 #include <linux/smp_lock.h>
@@ -34,7 +33,6 @@
 #include <linux/mm_inline.h>
 #include <linux/slab.h>
 #include <linux/random.h>
-#include <linux/mm.h>
 #include <linux/string.h>
 #include <linux/scatterlist.h>
 
index e0339aa..02b2583 100644 (file)
@@ -860,7 +860,7 @@ static int mg_probe(struct platform_device *plat_dev)
                err = -EINVAL;
                goto probe_err_2;
        }
-       host->dev_base = ioremap(rsc->start , rsc->end + 1);
+       host->dev_base = ioremap(rsc->start, resource_size(rsc));
        if (!host->dev_base) {
                printk(KERN_ERR "%s:%d ioremap fail\n",
                                __func__, __LINE__);
index e989f67..3d9c61e 100644 (file)
@@ -158,10 +158,11 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
                        goto out;
                }
        }
-out_unlock:
-       mutex_unlock(&rng_mutex);
 out:
        return ret ? : err;
+out_unlock:
+       mutex_unlock(&rng_mutex);
+       goto out;
 }
 
 
index f4f5f82..dd3dfe4 100644 (file)
@@ -386,7 +386,9 @@ static void mddev_put(mddev_t *mddev)
        if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock))
                return;
        if (!mddev->raid_disks && list_empty(&mddev->disks) &&
-           !mddev->hold_active) {
+           mddev->ctime == 0 && !mddev->hold_active) {
+               /* Array is not configured at all, and not held active,
+                * so destroy it */
                list_del(&mddev->all_mddevs);
                if (mddev->gendisk) {
                        /* we did a probe so need to clean up.
@@ -4355,7 +4357,7 @@ static int do_md_run(mddev_t * mddev)
        mddev->barriers_work = 1;
        mddev->ok_start_degraded = start_dirty_degraded;
 
-       if (start_readonly)
+       if (start_readonly && mddev->ro == 0)
                mddev->ro = 2; /* read-only, but switch on first write */
 
        err = mddev->pers->run(mddev);
@@ -4419,33 +4421,6 @@ static int do_md_run(mddev_t * mddev)
 
        set_capacity(disk, mddev->array_sectors);
 
-       /* If there is a partially-recovered drive we need to
-        * start recovery here.  If we leave it to md_check_recovery,
-        * it will remove the drives and not do the right thing
-        */
-       if (mddev->degraded && !mddev->sync_thread) {
-               int spares = 0;
-               list_for_each_entry(rdev, &mddev->disks, same_set)
-                       if (rdev->raid_disk >= 0 &&
-                           !test_bit(In_sync, &rdev->flags) &&
-                           !test_bit(Faulty, &rdev->flags))
-                               /* complete an interrupted recovery */
-                               spares++;
-               if (spares && mddev->pers->sync_request) {
-                       mddev->recovery = 0;
-                       set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
-                       mddev->sync_thread = md_register_thread(md_do_sync,
-                                                               mddev,
-                                                               "resync");
-                       if (!mddev->sync_thread) {
-                               printk(KERN_ERR "%s: could not start resync"
-                                      " thread...\n",
-                                      mdname(mddev));
-                               /* leave the spares where they are, it shouldn't hurt */
-                               mddev->recovery = 0;
-                       }
-               }
-       }
        md_wakeup_thread(mddev->thread);
        md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
 
@@ -5262,6 +5237,10 @@ static int set_array_info(mddev_t * mddev, mdu_array_info_t *info)
                mddev->minor_version = info->minor_version;
                mddev->patch_version = info->patch_version;
                mddev->persistent = !info->not_persistent;
+               /* ensure mddev_put doesn't delete this now that there
+                * is some minimal configuration.
+                */
+               mddev->ctime         = get_seconds();
                return 0;
        }
        mddev->major_version = MD_MAJOR_VERSION;
@@ -6494,10 +6473,11 @@ void md_do_sync(mddev_t *mddev)
                mddev->curr_resync = 2;
 
        try_again:
-               if (kthread_should_stop()) {
+               if (kthread_should_stop())
                        set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+
+               if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
                        goto skip;
-               }
                for_each_mddev(mddev2, tmp) {
                        if (mddev2 == mddev)
                                continue;
index fbc2311..77cf090 100644 (file)
@@ -56,6 +56,7 @@ static const char version[] =
 #include <linux/errno.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
+#include <linux/if_ether.h>
 #include <linux/skbuff.h>
 #include <linux/slab.h>
 #include <linux/init.h>
@@ -734,8 +735,7 @@ static void init_82586_mem(struct net_device *dev)
        memcpy_toio(lp->base, init_words + 5, sizeof(init_words) - 10);
 
        /* Fill in the station address. */
-       memcpy_toio(lp->base+SA_OFFSET, dev->dev_addr,
-                  sizeof(dev->dev_addr));
+       memcpy_toio(lp->base+SA_OFFSET, dev->dev_addr, ETH_ALEN);
 
        /* The Tx-block list is written as needed.  We just set up the values. */
        lp->tx_cmd_link = IDLELOOP + 4;
index e58a653..dd9a09c 100644 (file)
@@ -2346,6 +2346,7 @@ config GELIC_NET
 
 config GELIC_WIRELESS
        bool "PS3 Wireless support"
+       depends on WLAN
        depends on GELIC_NET
        select WIRELESS_EXT
        help
@@ -2358,6 +2359,7 @@ config GELIC_WIRELESS
 config GELIC_WIRELESS_OLD_PSK_INTERFACE
        bool "PS3 Wireless private PSK interface (OBSOLETE)"
        depends on GELIC_WIRELESS
+       select WEXT_PRIV
        help
           This option retains the obsolete private interface to pass
           the PSK from user space programs to the driver.  The PSK
index 9e56014..9fd8e5e 100644 (file)
@@ -275,6 +275,7 @@ struct be_adapter {
        u32 tx_fc;              /* Tx flow control */
        int link_speed;
        u8 port_type;
+       u8 transceiver;
 };
 
 extern const struct ethtool_ops be_ethtool_ops;
index 1b68bd9..102ade1 100644 (file)
@@ -1479,6 +1479,41 @@ err:
        return status;
 }
 
+int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
+                       u8 loopback_type, u8 enable)
+{
+       struct be_mcc_wrb *wrb;
+       struct be_cmd_req_set_lmode *req;
+       int status;
+
+       spin_lock_bh(&adapter->mcc_lock);
+
+       wrb = wrb_from_mccq(adapter);
+       if (!wrb) {
+               status = -EBUSY;
+               goto err;
+       }
+
+       req = embedded_payload(wrb);
+
+       be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
+                               OPCODE_LOWLEVEL_SET_LOOPBACK_MODE);
+
+       be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
+                       OPCODE_LOWLEVEL_SET_LOOPBACK_MODE,
+                       sizeof(*req));
+
+       req->src_port = port_num;
+       req->dest_port = port_num;
+       req->loopback_type = loopback_type;
+       req->loopback_state = enable;
+
+       status = be_mcc_notify_wait(adapter);
+err:
+       spin_unlock_bh(&adapter->mcc_lock);
+       return status;
+}
+
 int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
                u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern)
 {
@@ -1501,6 +1536,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
 
        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
                        OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req));
+       req->hdr.timeout = 4;
 
        req->pattern = cpu_to_le64(pattern);
        req->src_port = cpu_to_le32(port_num);
index 92b87ef..c002b83 100644 (file)
@@ -155,6 +155,7 @@ struct be_mcc_mailbox {
 
 #define OPCODE_LOWLEVEL_HOST_DDR_DMA                    17
 #define OPCODE_LOWLEVEL_LOOPBACK_TEST                   18
+#define OPCODE_LOWLEVEL_SET_LOOPBACK_MODE              19
 
 struct be_cmd_req_hdr {
        u8 opcode;              /* dword 0 */
@@ -821,6 +822,19 @@ struct be_cmd_resp_loopback_test {
        u32    ticks_compl;
 };
 
+struct be_cmd_req_set_lmode {
+       struct be_cmd_req_hdr hdr;
+       u8 src_port;
+       u8 dest_port;
+       u8 loopback_type;
+       u8 loopback_state;
+};
+
+struct be_cmd_resp_set_lmode {
+       struct be_cmd_resp_hdr resp_hdr;
+       u8 rsvd0[4];
+};
+
 /********************** DDR DMA test *********************/
 struct be_cmd_req_ddrdma_test {
        struct be_cmd_req_hdr hdr;
@@ -912,3 +926,5 @@ extern int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
                                u32 num_pkts, u64 pattern);
 extern int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
                        u32 byte_cnt, struct be_dma_mem *cmd);
+extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
+                               u8 loopback_type, u8 enable);
index 298b92c..5d001c4 100644 (file)
@@ -118,6 +118,7 @@ static const char et_self_tests[][ETH_GSTRING_LEN] = {
 #define BE_MAC_LOOPBACK 0x0
 #define BE_PHY_LOOPBACK 0x1
 #define BE_ONE_PORT_EXT_LOOPBACK 0x2
+#define BE_NO_LOOPBACK 0xff
 
 static void
 be_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
@@ -339,28 +340,50 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
 
                status = be_cmd_read_port_type(adapter, adapter->port_num,
                                                &connector);
-               switch (connector) {
-               case 7:
-                       ecmd->port = PORT_FIBRE;
-                       break;
-               default:
-                       ecmd->port = PORT_TP;
-                       break;
+               if (!status) {
+                       switch (connector) {
+                       case 7:
+                               ecmd->port = PORT_FIBRE;
+                               ecmd->transceiver = XCVR_EXTERNAL;
+                               break;
+                       case 0:
+                               ecmd->port = PORT_TP;
+                               ecmd->transceiver = XCVR_EXTERNAL;
+                               break;
+                       default:
+                               ecmd->port = PORT_TP;
+                               ecmd->transceiver = XCVR_INTERNAL;
+                               break;
+                       }
+               } else {
+                       ecmd->port = PORT_AUI;
+                       ecmd->transceiver = XCVR_INTERNAL;
                }
 
                /* Save for future use */
                adapter->link_speed = ecmd->speed;
                adapter->port_type = ecmd->port;
+               adapter->transceiver = ecmd->transceiver;
        } else {
                ecmd->speed = adapter->link_speed;
                ecmd->port = adapter->port_type;
+               ecmd->transceiver = adapter->transceiver;
        }
 
        ecmd->duplex = DUPLEX_FULL;
        ecmd->autoneg = AUTONEG_DISABLE;
-       ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_TP);
        ecmd->phy_address = adapter->port_num;
-       ecmd->transceiver = XCVR_INTERNAL;
+       switch (ecmd->port) {
+       case PORT_FIBRE:
+               ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
+               break;
+       case PORT_TP:
+               ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_TP);
+               break;
+       case PORT_AUI:
+               ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_AUI);
+               break;
+       }
 
        return 0;
 }
@@ -489,6 +512,19 @@ err:
        return ret;
 }
 
+static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type,
+                               u64 *status)
+{
+       be_cmd_set_loopback(adapter, adapter->port_num,
+                               loopback_type, 1);
+       *status = be_cmd_loopback_test(adapter, adapter->port_num,
+                               loopback_type, 1500,
+                               2, 0xabc);
+       be_cmd_set_loopback(adapter, adapter->port_num,
+                               BE_NO_LOOPBACK, 1);
+       return *status;
+}
+
 static void
 be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
 {
@@ -497,23 +533,18 @@ be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
        memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM);
 
        if (test->flags & ETH_TEST_FL_OFFLINE) {
-               data[0] = be_cmd_loopback_test(adapter, adapter->port_num,
-                                               BE_MAC_LOOPBACK, 1500,
-                                               2, 0xabc);
-               if (data[0] != 0)
+               if (be_loopback_test(adapter, BE_MAC_LOOPBACK,
+                                               &data[0]) != 0) {
                        test->flags |= ETH_TEST_FL_FAILED;
-
-               data[1] = be_cmd_loopback_test(adapter, adapter->port_num,
-                                               BE_PHY_LOOPBACK, 1500,
-                                               2, 0xabc);
-               if (data[1] != 0)
+               }
+               if (be_loopback_test(adapter, BE_PHY_LOOPBACK,
+                                               &data[1]) != 0) {
                        test->flags |= ETH_TEST_FL_FAILED;
-
-               data[2] = be_cmd_loopback_test(adapter, adapter->port_num,
-                                               BE_ONE_PORT_EXT_LOOPBACK,
-                                               1500, 2, 0xabc);
-               if (data[2] != 0)
+               }
+               if (be_loopback_test(adapter, BE_ONE_PORT_EXT_LOOPBACK,
+                                               &data[2]) != 0) {
                        test->flags |= ETH_TEST_FL_FAILED;
+               }
 
                data[3] = be_test_ddr_dma(adapter);
                if (data[3] != 0)
index 77ba135..306c2b8 100644 (file)
@@ -7593,6 +7593,8 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
                if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
                        bnx2x_set_iscsi_eth_mac_addr(bp, 1);
                        bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
+                       bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping,
+                                     CNIC_SB_ID(bp));
                }
                mutex_unlock(&bp->cnic_mutex);
 #endif
index 0fb7a49..822f586 100644 (file)
@@ -1580,7 +1580,7 @@ static void ad_agg_selection_logic(struct aggregator *agg)
                // check if any partner replys
                if (best->is_individual) {
                        pr_warning("%s: Warning: No 802.3ad response from the link partner for any adapters in the bond\n",
-                                  best->slave->dev->master->name);
+                                  best->slave ? best->slave->dev->master->name : "NULL");
                }
 
                best->is_active = 1;
index e0620d0..8bd3c9f 100644 (file)
@@ -143,7 +143,6 @@ void gfar_start(struct net_device *dev);
 static void gfar_clear_exact_match(struct net_device *dev);
 static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr);
 static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
-u16 gfar_select_queue(struct net_device *dev, struct sk_buff *skb);
 
 MODULE_AUTHOR("Freescale Semiconductor, Inc");
 MODULE_DESCRIPTION("Gianfar Ethernet Driver");
@@ -455,7 +454,6 @@ static const struct net_device_ops gfar_netdev_ops = {
        .ndo_set_multicast_list = gfar_set_multi,
        .ndo_tx_timeout = gfar_timeout,
        .ndo_do_ioctl = gfar_ioctl,
-       .ndo_select_queue = gfar_select_queue,
        .ndo_get_stats = gfar_get_stats,
        .ndo_vlan_rx_register = gfar_vlan_rx_register,
        .ndo_set_mac_address = eth_mac_addr,
@@ -506,10 +504,6 @@ static inline int gfar_uses_fcb(struct gfar_private *priv)
        return priv->vlgrp || priv->rx_csum_enable;
 }
 
-u16 gfar_select_queue(struct net_device *dev, struct sk_buff *skb)
-{
-       return skb_get_queue_mapping(skb);
-}
 static void free_tx_pointers(struct gfar_private *priv)
 {
        int i = 0;
@@ -2470,10 +2464,11 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
        fcb = (struct rxfcb *)skb->data;
 
        /* Remove the FCB from the skb */
-       skb_set_queue_mapping(skb, fcb->rq);
        /* Remove the padded bytes, if there are any */
-       if (amount_pull)
+       if (amount_pull) {
+               skb_record_rx_queue(skb, fcb->rq);
                skb_pull(skb, amount_pull);
+       }
 
        if (priv->rx_csum_enable)
                gfar_rx_checksum(skb, fcb);
@@ -2554,7 +2549,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
                                /* Remove the FCS from the packet length */
                                skb_put(skb, pkt_len);
                                rx_queue->stats.rx_bytes += pkt_len;
-
+                               skb_record_rx_queue(skb, rx_queue->qindex);
                                gfar_process_frame(dev, skb, amount_pull);
 
                        } else {
index 090a6d3..052c740 100644 (file)
@@ -87,6 +87,7 @@ History:
 #include <linux/module.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
+#include <linux/if_ether.h>
 #include <linux/skbuff.h>
 #include <linux/bitops.h>
 
@@ -988,7 +989,7 @@ static int __devinit ibmlana_init_one(struct device *kdev)
 
        /* copy out MAC address */
 
-       for (z = 0; z < sizeof(dev->dev_addr); z++)
+       for (z = 0; z < ETH_ALEN; z++)
                dev->dev_addr[z] = inb(dev->base_addr + MACADDRPROM + z);
 
        /* print config */
index e8e9e91..c505b50 100644 (file)
@@ -1096,9 +1096,7 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
                hw_dbg("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg);
        } else {
                /* Set PCS register for forced link */
-               reg |= E1000_PCS_LCTL_FSD |        /* Force Speed */
-                      E1000_PCS_LCTL_FORCE_LINK | /* Force Link */
-                      E1000_PCS_LCTL_FLV_LINK_UP; /* Force link value up */
+               reg |= E1000_PCS_LCTL_FSD;        /* Force Speed */
 
                hw_dbg("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg);
        }
index 5c9d73e..3670a66 100644 (file)
@@ -457,15 +457,6 @@ s32 igb_copper_link_setup_82580(struct e1000_hw *hw)
        phy_data |= I82580_CFG_ENABLE_DOWNSHIFT;
 
        ret_val = phy->ops.write_reg(hw, I82580_CFG_REG, phy_data);
-       if (ret_val)
-               goto out;
-
-       /* Set number of link attempts before downshift */
-       ret_val = phy->ops.read_reg(hw, I82580_CTRL_REG, &phy_data);
-       if (ret_val)
-               goto out;
-       phy_data &= ~I82580_CTRL_DOWNSHIFT_MASK;
-       ret_val = phy->ops.write_reg(hw, I82580_CTRL_REG, phy_data);
 
 out:
        return ret_val;
index ac9d527..f771a6c 100644 (file)
@@ -1795,7 +1795,7 @@ static int igb_wol_exclusion(struct igb_adapter *adapter,
                /* dual port cards only support WoL on port A from now on
                 * unless it was enabled in the eeprom for port B
                 * so exclude FUNC_1 ports from having WoL enabled */
-               if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1 &&
+               if ((rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) &&
                    !adapter->eeprom_wol) {
                        wol->supported = 0;
                        break;
index 78963a0..933c64f 100644 (file)
@@ -1306,13 +1306,8 @@ void igb_reset(struct igb_adapter *adapter)
        hwm = min(((pba << 10) * 9 / 10),
                        ((pba << 10) - 2 * adapter->max_frame_size));
 
-       if (mac->type < e1000_82576) {
-               fc->high_water = hwm & 0xFFF8;  /* 8-byte granularity */
-               fc->low_water = fc->high_water - 8;
-       } else {
-               fc->high_water = hwm & 0xFFF0;  /* 16-byte granularity */
-               fc->low_water = fc->high_water - 16;
-       }
+       fc->high_water = hwm & 0xFFF0;  /* 16-byte granularity */
+       fc->low_water = fc->high_water - 16;
        fc->pause_time = 0xFFFF;
        fc->send_xon = 1;
        fc->current_mode = fc->requested_mode;
index e9dd95f..0dbd032 100644 (file)
@@ -2763,7 +2763,8 @@ static int __devinit igbvf_probe(struct pci_dev *pdev,
        err = hw->mac.ops.reset_hw(hw);
        if (err) {
                dev_info(&pdev->dev,
-                        "PF still in reset state, assigning new address\n");
+                        "PF still in reset state, assigning new address."
+                        " Is the PF interface up?\n");
                random_ether_addr(hw->mac.addr);
        } else {
                err = hw->mac.ops.read_mac_addr(hw);
index bd64387..1a2ea62 100644 (file)
@@ -4373,6 +4373,11 @@ static int ixgbe_resume(struct pci_dev *pdev)
 
        pci_set_power_state(pdev, PCI_D0);
        pci_restore_state(pdev);
+       /*
+        * pci_restore_state clears dev->state_saved so call
+        * pci_save_state to restore it.
+        */
+       pci_save_state(pdev);
 
        err = pci_enable_device_mem(pdev);
        if (err) {
index dcc67a3..e154677 100644 (file)
@@ -45,6 +45,7 @@ static const char *const version =
 #include <linux/crc32.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
+#include <linux/if_ether.h>
 #include <linux/skbuff.h>
 #include <linux/spinlock.h>
 #include <linux/moduleparam.h>
@@ -1765,7 +1766,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
 
        /* if the ethernet address is not valid, force to 00:00:00:00:00:00 */
        if (!is_valid_ether_addr(dev->perm_addr))
-               memset(dev->dev_addr, 0, sizeof(dev->dev_addr));
+               memset(dev->dev_addr, 0, ETH_ALEN);
 
        if (pcnet32_debug & NETIF_MSG_PROBE) {
                printk(" %pM", dev->dev_addr);
index f983e3b..103e8b0 100644 (file)
@@ -741,14 +741,14 @@ static int efx_probe_port(struct efx_nic *efx)
 
        EFX_LOG(efx, "create port\n");
 
+       if (phy_flash_cfg)
+               efx->phy_mode = PHY_MODE_SPECIAL;
+
        /* Connect up MAC/PHY operations table */
        rc = efx->type->probe_port(efx);
        if (rc)
                goto err;
 
-       if (phy_flash_cfg)
-               efx->phy_mode = PHY_MODE_SPECIAL;
-
        /* Sanity check MAC address */
        if (is_valid_ether_addr(efx->mac_address)) {
                memcpy(efx->net_dev->dev_addr, efx->mac_address, ETH_ALEN);
index 17afcd2..9d009c4 100644 (file)
@@ -925,6 +925,7 @@ static int falcon_probe_port(struct efx_nic *efx)
 
 static void falcon_remove_port(struct efx_nic *efx)
 {
+       efx->phy_op->remove(efx);
        efx_nic_free_buffer(efx, &efx->stats_buffer);
 }
 
index 3da933f..8ccab2c 100644 (file)
@@ -111,16 +111,12 @@ static void falcon_mask_status_intr(struct efx_nic *efx, bool enable)
        efx_writeo(efx, &reg, FR_AB_XM_MGT_INT_MASK);
 }
 
-/* Get status of XAUI link */
-static bool falcon_xaui_link_ok(struct efx_nic *efx)
+static bool falcon_xgxs_link_ok(struct efx_nic *efx)
 {
        efx_oword_t reg;
        bool align_done, link_ok = false;
        int sync_status;
 
-       if (LOOPBACK_INTERNAL(efx))
-               return true;
-
        /* Read link status */
        efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
 
@@ -135,14 +131,24 @@ static bool falcon_xaui_link_ok(struct efx_nic *efx)
        EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_DISPERR, FFE_AB_XX_STAT_ALL_LANES);
        efx_writeo(efx, &reg, FR_AB_XX_CORE_STAT);
 
-       /* If the link is up, then check the phy side of the xaui link */
-       if (efx->link_state.up && link_ok)
-               if (efx->mdio.mmds & (1 << MDIO_MMD_PHYXS))
-                       link_ok = efx_mdio_phyxgxs_lane_sync(efx);
-
        return link_ok;
 }
 
+static bool falcon_xmac_link_ok(struct efx_nic *efx)
+{
+       /*
+        * Check MAC's XGXS link status except when using XGMII loopback
+        * which bypasses the XGXS block.
+        * If possible, check PHY's XGXS link status except when using
+        * MAC loopback.
+        */
+       return (efx->loopback_mode == LOOPBACK_XGMII ||
+               falcon_xgxs_link_ok(efx)) &&
+               (!(efx->mdio.mmds & (1 << MDIO_MMD_PHYXS)) ||
+                LOOPBACK_INTERNAL(efx) || 
+                efx_mdio_phyxgxs_lane_sync(efx));
+}
+
 void falcon_reconfigure_xmac_core(struct efx_nic *efx)
 {
        unsigned int max_frame_len;
@@ -245,9 +251,9 @@ static void falcon_reconfigure_xgxs_core(struct efx_nic *efx)
 
 
 /* Try to bring up the Falcon side of the Falcon-Phy XAUI link */
-static bool falcon_check_xaui_link_up(struct efx_nic *efx, int tries)
+static bool falcon_xmac_link_ok_retry(struct efx_nic *efx, int tries)
 {
-       bool mac_up = falcon_xaui_link_ok(efx);
+       bool mac_up = falcon_xmac_link_ok(efx);
 
        if (LOOPBACK_MASK(efx) & LOOPBACKS_EXTERNAL(efx) & LOOPBACKS_WS ||
            efx_phy_mode_disabled(efx->phy_mode))
@@ -261,7 +267,7 @@ static bool falcon_check_xaui_link_up(struct efx_nic *efx, int tries)
                falcon_reset_xaui(efx);
                udelay(200);
 
-               mac_up = falcon_xaui_link_ok(efx);
+               mac_up = falcon_xmac_link_ok(efx);
                --tries;
        }
 
@@ -272,7 +278,7 @@ static bool falcon_check_xaui_link_up(struct efx_nic *efx, int tries)
 
 static bool falcon_xmac_check_fault(struct efx_nic *efx)
 {
-       return !falcon_check_xaui_link_up(efx, 5);
+       return !falcon_xmac_link_ok_retry(efx, 5);
 }
 
 static int falcon_reconfigure_xmac(struct efx_nic *efx)
@@ -284,7 +290,7 @@ static int falcon_reconfigure_xmac(struct efx_nic *efx)
 
        falcon_reconfigure_mac_wrapper(efx);
 
-       efx->xmac_poll_required = !falcon_check_xaui_link_up(efx, 5);
+       efx->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 5);
        falcon_mask_status_intr(efx, true);
 
        return 0;
@@ -357,7 +363,7 @@ void falcon_poll_xmac(struct efx_nic *efx)
                return;
 
        falcon_mask_status_intr(efx, false);
-       efx->xmac_poll_required = !falcon_check_xaui_link_up(efx, 1);
+       efx->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1);
        falcon_mask_status_intr(efx, true);
 }
 
index 0e1bcc5..eb694af 100644 (file)
@@ -304,31 +304,47 @@ static u32 mcdi_to_ethtool_media(u32 media)
 
 static int efx_mcdi_phy_probe(struct efx_nic *efx)
 {
-       struct efx_mcdi_phy_cfg *phy_cfg;
+       struct efx_mcdi_phy_cfg *phy_data;
+       u8 outbuf[MC_CMD_GET_LINK_OUT_LEN];
+       u32 caps;
        int rc;
 
-       /* TODO: Move phy_data initialisation to
-        * phy_op->probe/remove, rather than init/fini */
-       phy_cfg = kzalloc(sizeof(*phy_cfg), GFP_KERNEL);
-       if (phy_cfg == NULL) {
-               rc = -ENOMEM;
-               goto fail_alloc;
-       }
-       rc = efx_mcdi_get_phy_cfg(efx, phy_cfg);
+       /* Initialise and populate phy_data */
+       phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
+       if (phy_data == NULL)
+               return -ENOMEM;
+
+       rc = efx_mcdi_get_phy_cfg(efx, phy_data);
        if (rc != 0)
                goto fail;
 
-       efx->phy_type = phy_cfg->type;
+       /* Read initial link advertisement */
+       BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
+       rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
+                         outbuf, sizeof(outbuf), NULL);
+       if (rc)
+               goto fail;
+
+       /* Fill out nic state */
+       efx->phy_data = phy_data;
+       efx->phy_type = phy_data->type;
 
-       efx->mdio_bus = phy_cfg->channel;
-       efx->mdio.prtad = phy_cfg->port;
-       efx->mdio.mmds = phy_cfg->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22);
+       efx->mdio_bus = phy_data->channel;
+       efx->mdio.prtad = phy_data->port;
+       efx->mdio.mmds = phy_data->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22);
        efx->mdio.mode_support = 0;
-       if (phy_cfg->mmd_mask & (1 << MC_CMD_MMD_CLAUSE22))
+       if (phy_data->mmd_mask & (1 << MC_CMD_MMD_CLAUSE22))
                efx->mdio.mode_support |= MDIO_SUPPORTS_C22;
-       if (phy_cfg->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22))
+       if (phy_data->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22))
                efx->mdio.mode_support |= MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
 
+       caps = MCDI_DWORD(outbuf, GET_LINK_OUT_CAP);
+       if (caps & (1 << MC_CMD_PHY_CAP_AN_LBN))
+               efx->link_advertising =
+                       mcdi_to_ethtool_cap(phy_data->media, caps);
+       else
+               phy_data->forced_cap = caps;
+
        /* Assert that we can map efx -> mcdi loopback modes */
        BUILD_BUG_ON(LOOPBACK_NONE != MC_CMD_LOOPBACK_NONE);
        BUILD_BUG_ON(LOOPBACK_DATA != MC_CMD_LOOPBACK_DATA);
@@ -365,46 +381,6 @@ static int efx_mcdi_phy_probe(struct efx_nic *efx)
         * but by convention we don't */
        efx->loopback_modes &= ~(1 << LOOPBACK_NONE);
 
-       kfree(phy_cfg);
-
-       return 0;
-
-fail:
-       kfree(phy_cfg);
-fail_alloc:
-       return rc;
-}
-
-static int efx_mcdi_phy_init(struct efx_nic *efx)
-{
-       struct efx_mcdi_phy_cfg *phy_data;
-       u8 outbuf[MC_CMD_GET_LINK_OUT_LEN];
-       u32 caps;
-       int rc;
-
-       phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
-       if (phy_data == NULL)
-               return -ENOMEM;
-
-       rc = efx_mcdi_get_phy_cfg(efx, phy_data);
-       if (rc != 0)
-               goto fail;
-
-       efx->phy_data = phy_data;
-
-       BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
-       rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
-                         outbuf, sizeof(outbuf), NULL);
-       if (rc)
-               goto fail;
-
-       caps = MCDI_DWORD(outbuf, GET_LINK_OUT_CAP);
-       if (caps & (1 << MC_CMD_PHY_CAP_AN_LBN))
-               efx->link_advertising =
-                       mcdi_to_ethtool_cap(phy_data->media, caps);
-       else
-               phy_data->forced_cap = caps;
-
        return 0;
 
 fail:
@@ -504,7 +480,7 @@ static bool efx_mcdi_phy_poll(struct efx_nic *efx)
        return !efx_link_state_equal(&efx->link_state, &old_state);
 }
 
-static void efx_mcdi_phy_fini(struct efx_nic *efx)
+static void efx_mcdi_phy_remove(struct efx_nic *efx)
 {
        struct efx_mcdi_phy_data *phy_data = efx->phy_data;
 
@@ -586,10 +562,11 @@ static int efx_mcdi_phy_set_settings(struct efx_nic *efx, struct ethtool_cmd *ec
 
 struct efx_phy_operations efx_mcdi_phy_ops = {
        .probe          = efx_mcdi_phy_probe,
-       .init           = efx_mcdi_phy_init,
+       .init           = efx_port_dummy_op_int,
        .reconfigure    = efx_mcdi_phy_reconfigure,
        .poll           = efx_mcdi_phy_poll,
-       .fini           = efx_mcdi_phy_fini,
+       .fini           = efx_port_dummy_op_void,
+       .remove         = efx_mcdi_phy_remove,
        .get_settings   = efx_mcdi_phy_get_settings,
        .set_settings   = efx_mcdi_phy_set_settings,
        .run_tests      = NULL,
index 34c381f..d5aab5b 100644 (file)
@@ -524,6 +524,7 @@ struct efx_phy_operations {
        int (*probe) (struct efx_nic *efx);
        int (*init) (struct efx_nic *efx);
        void (*fini) (struct efx_nic *efx);
+       void (*remove) (struct efx_nic *efx);
        int (*reconfigure) (struct efx_nic *efx);
        bool (*poll) (struct efx_nic *efx);
        void (*get_settings) (struct efx_nic *efx,
index a577be2..db44224 100644 (file)
@@ -1576,6 +1576,8 @@ void efx_nic_init_common(struct efx_nic *efx)
        EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
        /* Prefetch threshold 2 => fetch when descriptor cache half empty */
        EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2);
+       /* Disable hardware watchdog which can misfire */
+       EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff);
        /* Squash TX of packets of 16 bytes or less */
        if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
                EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
index 3800fc7..ff8f0a4 100644 (file)
@@ -33,6 +33,9 @@
 #define PCS_FW_HEARTBEAT_REG   0xd7ee
 #define PCS_FW_HEARTB_LBN      0
 #define PCS_FW_HEARTB_WIDTH    8
+#define PCS_FW_PRODUCT_CODE_1  0xd7f0
+#define PCS_FW_VERSION_1       0xd7f3
+#define PCS_FW_BUILD_1         0xd7f6
 #define PCS_UC8051_STATUS_REG  0xd7fd
 #define PCS_UC_STATUS_LBN      0
 #define PCS_UC_STATUS_WIDTH    8
@@ -52,14 +55,24 @@ void falcon_qt202x_set_led(struct efx_nic *p, int led, int mode)
 
 struct qt202x_phy_data {
        enum efx_phy_mode phy_mode;
+       bool bug17190_in_bad_state;
+       unsigned long bug17190_timer;
+       u32 firmware_ver;
 };
 
 #define QT2022C2_MAX_RESET_TIME 500
 #define QT2022C2_RESET_WAIT 10
 
-static int qt2025c_wait_reset(struct efx_nic *efx)
+#define QT2025C_MAX_HEARTB_TIME (5 * HZ)
+#define QT2025C_HEARTB_WAIT 100
+#define QT2025C_MAX_FWSTART_TIME (25 * HZ / 10)
+#define QT2025C_FWSTART_WAIT 100
+
+#define BUG17190_INTERVAL (2 * HZ)
+
+static int qt2025c_wait_heartbeat(struct efx_nic *efx)
 {
-       unsigned long timeout = jiffies + 10 * HZ;
+       unsigned long timeout = jiffies + QT2025C_MAX_HEARTB_TIME;
        int reg, old_counter = 0;
 
        /* Wait for firmware heartbeat to start */
@@ -74,11 +87,25 @@ static int qt2025c_wait_reset(struct efx_nic *efx)
                        old_counter = counter;
                else if (counter != old_counter)
                        break;
-               if (time_after(jiffies, timeout))
+               if (time_after(jiffies, timeout)) {
+                       /* Some cables have EEPROMs that conflict with the
+                        * PHY's on-board EEPROM so it cannot load firmware */
+                       EFX_ERR(efx, "If an SFP+ direct attach cable is"
+                               " connected, please check that it complies"
+                               " with the SFP+ specification\n");
                        return -ETIMEDOUT;
-               msleep(10);
+               }
+               msleep(QT2025C_HEARTB_WAIT);
        }
 
+       return 0;
+}
+
+static int qt2025c_wait_fw_status_good(struct efx_nic *efx)
+{
+       unsigned long timeout = jiffies + QT2025C_MAX_FWSTART_TIME;
+       int reg;
+
        /* Wait for firmware status to look good */
        for (;;) {
                reg = efx_mdio_read(efx, MDIO_MMD_PCS, PCS_UC8051_STATUS_REG);
@@ -90,7 +117,178 @@ static int qt2025c_wait_reset(struct efx_nic *efx)
                        break;
                if (time_after(jiffies, timeout))
                        return -ETIMEDOUT;
+               msleep(QT2025C_FWSTART_WAIT);
+       }
+
+       return 0;
+}
+
+static void qt2025c_restart_firmware(struct efx_nic *efx)
+{
+       /* Restart microcontroller execution of firmware from RAM */
+       efx_mdio_write(efx, 3, 0xe854, 0x00c0);
+       efx_mdio_write(efx, 3, 0xe854, 0x0040);
+       msleep(50);
+}
+
+static int qt2025c_wait_reset(struct efx_nic *efx)
+{
+       int rc;
+
+       rc = qt2025c_wait_heartbeat(efx);
+       if (rc != 0)
+               return rc;
+
+       rc = qt2025c_wait_fw_status_good(efx);
+       if (rc == -ETIMEDOUT) {
+               /* Bug 17689: occasionally heartbeat starts but firmware status
+                * code never progresses beyond 0x00.  Try again, once, after
+                * restarting execution of the firmware image. */
+               EFX_LOG(efx, "bashing QT2025C microcontroller\n");
+               qt2025c_restart_firmware(efx);
+               rc = qt2025c_wait_heartbeat(efx);
+               if (rc != 0)
+                       return rc;
+               rc = qt2025c_wait_fw_status_good(efx);
+       }
+
+       return rc;
+}
+
+static void qt2025c_firmware_id(struct efx_nic *efx)
+{
+       struct qt202x_phy_data *phy_data = efx->phy_data;
+       u8 firmware_id[9];
+       size_t i;
+
+       for (i = 0; i < sizeof(firmware_id); i++)
+               firmware_id[i] = efx_mdio_read(efx, MDIO_MMD_PCS,
+                                              PCS_FW_PRODUCT_CODE_1 + i);
+       EFX_INFO(efx, "QT2025C firmware %xr%d v%d.%d.%d.%d [20%02d-%02d-%02d]\n",
+                (firmware_id[0] << 8) | firmware_id[1], firmware_id[2],
+                firmware_id[3] >> 4, firmware_id[3] & 0xf,
+                firmware_id[4], firmware_id[5],
+                firmware_id[6], firmware_id[7], firmware_id[8]);
+       phy_data->firmware_ver = ((firmware_id[3] & 0xf0) << 20) |
+                                ((firmware_id[3] & 0x0f) << 16) |
+                                (firmware_id[4] << 8) | firmware_id[5];
+}
+
+static void qt2025c_bug17190_workaround(struct efx_nic *efx)
+{
+       struct qt202x_phy_data *phy_data = efx->phy_data;
+
+       /* The PHY can get stuck in a state where it reports PHY_XS and PMA/PMD
+        * layers up, but PCS down (no block_lock).  If we notice this state
+        * persisting for a couple of seconds, we switch PMA/PMD loopback
+        * briefly on and then off again, which is normally sufficient to
+        * recover it.
+        */
+       if (efx->link_state.up ||
+           !efx_mdio_links_ok(efx, MDIO_DEVS_PMAPMD | MDIO_DEVS_PHYXS)) {
+               phy_data->bug17190_in_bad_state = false;
+               return;
+       }
+
+       if (!phy_data->bug17190_in_bad_state) {
+               phy_data->bug17190_in_bad_state = true;
+               phy_data->bug17190_timer = jiffies + BUG17190_INTERVAL;
+               return;
+       }
+
+       if (time_after_eq(jiffies, phy_data->bug17190_timer)) {
+               EFX_LOG(efx, "bashing QT2025C PMA/PMD\n");
+               efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_CTRL1,
+                                 MDIO_PMA_CTRL1_LOOPBACK, true);
                msleep(100);
+               efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_CTRL1,
+                                 MDIO_PMA_CTRL1_LOOPBACK, false);
+               phy_data->bug17190_timer = jiffies + BUG17190_INTERVAL;
+       }
+}
+
+static int qt2025c_select_phy_mode(struct efx_nic *efx)
+{
+       struct qt202x_phy_data *phy_data = efx->phy_data;
+       struct falcon_board *board = falcon_board(efx);
+       int reg, rc, i;
+       uint16_t phy_op_mode;
+
+       /* Only 2.0.1.0+ PHY firmware supports the more optimal SFP+
+        * Self-Configure mode.  Don't attempt any switching if we encounter
+        * older firmware. */
+       if (phy_data->firmware_ver < 0x02000100)
+               return 0;
+
+       /* In general we will get optimal behaviour in "SFP+ Self-Configure"
+        * mode; however, that powers down most of the PHY when no module is
+        * present, so we must use a different mode (any fixed mode will do)
+        * to be sure that loopbacks will work. */
+       phy_op_mode = (efx->loopback_mode == LOOPBACK_NONE) ? 0x0038 : 0x0020;
+
+       /* Only change mode if really necessary */
+       reg = efx_mdio_read(efx, 1, 0xc319);
+       if ((reg & 0x0038) == phy_op_mode)
+               return 0;
+       EFX_LOG(efx, "Switching PHY to mode 0x%04x\n", phy_op_mode);
+
+       /* This sequence replicates the register writes configured in the boot
+        * EEPROM (including the differences between board revisions), except
+        * that the operating mode is changed, and the PHY is prevented from
+        * unnecessarily reloading the main firmware image again. */
+       efx_mdio_write(efx, 1, 0xc300, 0x0000);
+       /* (Note: this portion of the boot EEPROM sequence, which bit-bashes 9
+        * STOPs onto the firmware/module I2C bus to reset it, varies across
+        * board revisions, as the bus is connected to different GPIO/LED
+        * outputs on the PHY.) */
+       if (board->major == 0 && board->minor < 2) {
+               efx_mdio_write(efx, 1, 0xc303, 0x4498);
+               for (i = 0; i < 9; i++) {
+                       efx_mdio_write(efx, 1, 0xc303, 0x4488);
+                       efx_mdio_write(efx, 1, 0xc303, 0x4480);
+                       efx_mdio_write(efx, 1, 0xc303, 0x4490);
+                       efx_mdio_write(efx, 1, 0xc303, 0x4498);
+               }
+       } else {
+               efx_mdio_write(efx, 1, 0xc303, 0x0920);
+               efx_mdio_write(efx, 1, 0xd008, 0x0004);
+               for (i = 0; i < 9; i++) {
+                       efx_mdio_write(efx, 1, 0xc303, 0x0900);
+                       efx_mdio_write(efx, 1, 0xd008, 0x0005);
+                       efx_mdio_write(efx, 1, 0xc303, 0x0920);
+                       efx_mdio_write(efx, 1, 0xd008, 0x0004);
+               }
+               efx_mdio_write(efx, 1, 0xc303, 0x4900);
+       }
+       efx_mdio_write(efx, 1, 0xc303, 0x4900);
+       efx_mdio_write(efx, 1, 0xc302, 0x0004);
+       efx_mdio_write(efx, 1, 0xc316, 0x0013);
+       efx_mdio_write(efx, 1, 0xc318, 0x0054);
+       efx_mdio_write(efx, 1, 0xc319, phy_op_mode);
+       efx_mdio_write(efx, 1, 0xc31a, 0x0098);
+       efx_mdio_write(efx, 3, 0x0026, 0x0e00);
+       efx_mdio_write(efx, 3, 0x0027, 0x0013);
+       efx_mdio_write(efx, 3, 0x0028, 0xa528);
+       efx_mdio_write(efx, 1, 0xd006, 0x000a);
+       efx_mdio_write(efx, 1, 0xd007, 0x0009);
+       efx_mdio_write(efx, 1, 0xd008, 0x0004);
+       /* This additional write is not present in the boot EEPROM.  It
+        * prevents the PHY's internal boot ROM doing another pointless (and
+        * slow) reload of the firmware image (the microcontroller's code
+        * memory is not affected by the microcontroller reset). */
+       efx_mdio_write(efx, 1, 0xc317, 0x00ff);
+       efx_mdio_write(efx, 1, 0xc300, 0x0002);
+       msleep(20);
+
+       /* Restart microcontroller execution of firmware from RAM */
+       qt2025c_restart_firmware(efx);
+
+       /* Wait for the microcontroller to be ready again */
+       rc = qt2025c_wait_reset(efx);
+       if (rc < 0) {
+               EFX_ERR(efx, "PHY microcontroller reset during mode switch "
+                               "timed out\n");
+               return rc;
        }
 
        return 0;
@@ -137,6 +335,16 @@ static int qt202x_reset_phy(struct efx_nic *efx)
 
 static int qt202x_phy_probe(struct efx_nic *efx)
 {
+       struct qt202x_phy_data *phy_data;
+
+       phy_data = kzalloc(sizeof(struct qt202x_phy_data), GFP_KERNEL);
+       if (!phy_data)
+               return -ENOMEM;
+       efx->phy_data = phy_data;
+       phy_data->phy_mode = efx->phy_mode;
+       phy_data->bug17190_in_bad_state = false;
+       phy_data->bug17190_timer = 0;
+
        efx->mdio.mmds = QT202X_REQUIRED_DEVS;
        efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
        efx->loopback_modes = QT202X_LOOPBACKS | FALCON_XMAC_LOOPBACKS;
@@ -145,7 +353,6 @@ static int qt202x_phy_probe(struct efx_nic *efx)
 
 static int qt202x_phy_init(struct efx_nic *efx)
 {
-       struct qt202x_phy_data *phy_data;
        u32 devid;
        int rc;
 
@@ -155,17 +362,14 @@ static int qt202x_phy_init(struct efx_nic *efx)
                return rc;
        }
 
-       phy_data = kzalloc(sizeof(struct qt202x_phy_data), GFP_KERNEL);
-       if (!phy_data)
-               return -ENOMEM;
-       efx->phy_data = phy_data;
-
        devid = efx_mdio_read_id(efx, MDIO_MMD_PHYXS);
        EFX_INFO(efx, "PHY ID reg %x (OUI %06x model %02x revision %x)\n",
                 devid, efx_mdio_id_oui(devid), efx_mdio_id_model(devid),
                 efx_mdio_id_rev(devid));
 
-       phy_data->phy_mode = efx->phy_mode;
+       if (efx->phy_type == PHY_TYPE_QT2025C)
+               qt2025c_firmware_id(efx);
+
        return 0;
 }
 
@@ -183,6 +387,9 @@ static bool qt202x_phy_poll(struct efx_nic *efx)
        efx->link_state.fd = true;
        efx->link_state.fc = efx->wanted_fc;
 
+       if (efx->phy_type == PHY_TYPE_QT2025C)
+               qt2025c_bug17190_workaround(efx);
+
        return efx->link_state.up != was_up;
 }
 
@@ -191,6 +398,10 @@ static int qt202x_phy_reconfigure(struct efx_nic *efx)
        struct qt202x_phy_data *phy_data = efx->phy_data;
 
        if (efx->phy_type == PHY_TYPE_QT2025C) {
+               int rc = qt2025c_select_phy_mode(efx);
+               if (rc)
+                       return rc;
+
                /* There are several different register bits which can
                 * disable TX (and save power) on direct-attach cables
                 * or optical transceivers, varying somewhat between
@@ -224,7 +435,7 @@ static void qt202x_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecm
        mdio45_ethtool_gset(&efx->mdio, ecmd);
 }
 
-static void qt202x_phy_fini(struct efx_nic *efx)
+static void qt202x_phy_remove(struct efx_nic *efx)
 {
        /* Free the context block */
        kfree(efx->phy_data);
@@ -236,7 +447,8 @@ struct efx_phy_operations falcon_qt202x_phy_ops = {
        .init            = qt202x_phy_init,
        .reconfigure     = qt202x_phy_reconfigure,
        .poll            = qt202x_phy_poll,
-       .fini            = qt202x_phy_fini,
+       .fini            = efx_port_dummy_op_void,
+       .remove          = qt202x_phy_remove,
        .get_settings    = qt202x_phy_get_settings,
        .set_settings    = efx_mdio_set_settings,
 };
index de07a4f..f8c6771 100644 (file)
@@ -133,6 +133,7 @@ static int siena_probe_port(struct efx_nic *efx)
 
 void siena_remove_port(struct efx_nic *efx)
 {
+       efx->phy_op->remove(efx);
        efx_nic_free_buffer(efx, &efx->stats_buffer);
 }
 
index ca11572..3009c29 100644 (file)
@@ -202,10 +202,14 @@ static ssize_t set_phy_short_reach(struct device *dev,
        int rc;
 
        rtnl_lock();
-       efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_PMA_10GBT_TXPWR,
-                         MDIO_PMA_10GBT_TXPWR_SHORT,
-                         count != 0 && *buf != '0');
-       rc = efx_reconfigure_port(efx);
+       if (efx->state != STATE_RUNNING) {
+               rc = -EBUSY;
+       } else {
+               efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_PMA_10GBT_TXPWR,
+                                 MDIO_PMA_10GBT_TXPWR_SHORT,
+                                 count != 0 && *buf != '0');
+               rc = efx_reconfigure_port(efx);
+       }
        rtnl_unlock();
 
        return rc < 0 ? rc : (ssize_t)count;
@@ -298,36 +302,62 @@ static int tenxpress_init(struct efx_nic *efx)
        return 0;
 }
 
-static int sfx7101_phy_probe(struct efx_nic *efx)
+static int tenxpress_phy_probe(struct efx_nic *efx)
 {
-       efx->mdio.mmds = TENXPRESS_REQUIRED_DEVS;
-       efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
-       efx->loopback_modes = SFX7101_LOOPBACKS | FALCON_XMAC_LOOPBACKS;
-       return 0;
-}
+       struct tenxpress_phy_data *phy_data;
+       int rc;
+
+       /* Allocate phy private storage */
+       phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
+       if (!phy_data)
+               return -ENOMEM;
+       efx->phy_data = phy_data;
+       phy_data->phy_mode = efx->phy_mode;
+
+       /* Create any special files */
+       if (efx->phy_type == PHY_TYPE_SFT9001B) {
+               rc = device_create_file(&efx->pci_dev->dev,
+                                       &dev_attr_phy_short_reach);
+               if (rc)
+                       goto fail;
+       }
+
+       if (efx->phy_type == PHY_TYPE_SFX7101) {
+               efx->mdio.mmds = TENXPRESS_REQUIRED_DEVS;
+               efx->mdio.mode_support = MDIO_SUPPORTS_C45;
+
+               efx->loopback_modes = SFX7101_LOOPBACKS | FALCON_XMAC_LOOPBACKS;
+
+               efx->link_advertising = (ADVERTISED_TP | ADVERTISED_Autoneg |
+                                        ADVERTISED_10000baseT_Full);
+       } else {
+               efx->mdio.mmds = TENXPRESS_REQUIRED_DEVS;
+               efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
+
+               efx->loopback_modes = (SFT9001_LOOPBACKS |
+                                      FALCON_XMAC_LOOPBACKS | 
+                                      FALCON_GMAC_LOOPBACKS);
+
+               efx->link_advertising = (ADVERTISED_TP | ADVERTISED_Autoneg |
+                                        ADVERTISED_10000baseT_Full |
+                                        ADVERTISED_1000baseT_Full |
+                                        ADVERTISED_100baseT_Full);
+       }
 
-static int sft9001_phy_probe(struct efx_nic *efx)
-{
-       efx->mdio.mmds = TENXPRESS_REQUIRED_DEVS;
-       efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
-       efx->loopback_modes = (SFT9001_LOOPBACKS | FALCON_XMAC_LOOPBACKS |
-                              FALCON_GMAC_LOOPBACKS);
        return 0;
+
+fail:
+       kfree(efx->phy_data);
+       efx->phy_data = NULL;
+       return rc;
 }
 
 static int tenxpress_phy_init(struct efx_nic *efx)
 {
-       struct tenxpress_phy_data *phy_data;
-       int rc = 0;
+       int rc;
 
        falcon_board(efx)->type->init_phy(efx);
 
-       phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
-       if (!phy_data)
-               return -ENOMEM;
-       efx->phy_data = phy_data;
-       phy_data->phy_mode = efx->phy_mode;
-
        if (!(efx->phy_mode & PHY_MODE_SPECIAL)) {
                if (efx->phy_type == PHY_TYPE_SFT9001A) {
                        int reg;
@@ -341,44 +371,27 @@ static int tenxpress_phy_init(struct efx_nic *efx)
 
                rc = efx_mdio_wait_reset_mmds(efx, TENXPRESS_REQUIRED_DEVS);
                if (rc < 0)
-                       goto fail;
+                       return rc;
 
                rc = efx_mdio_check_mmds(efx, TENXPRESS_REQUIRED_DEVS, 0);
                if (rc < 0)
-                       goto fail;
+                       return rc;
        }
 
        rc = tenxpress_init(efx);
        if (rc < 0)
-               goto fail;
+               return rc;
 
-       /* Initialise advertising flags */
-       efx->link_advertising = (ADVERTISED_TP | ADVERTISED_Autoneg |
-                                 ADVERTISED_10000baseT_Full);
-       if (efx->phy_type != PHY_TYPE_SFX7101)
-               efx->link_advertising |= (ADVERTISED_1000baseT_Full |
-                                          ADVERTISED_100baseT_Full);
+       /* Reinitialise flow control settings */
        efx_link_set_wanted_fc(efx, efx->wanted_fc);
        efx_mdio_an_reconfigure(efx);
 
-       if (efx->phy_type == PHY_TYPE_SFT9001B) {
-               rc = device_create_file(&efx->pci_dev->dev,
-                                       &dev_attr_phy_short_reach);
-               if (rc)
-                       goto fail;
-       }
-
        schedule_timeout_uninterruptible(HZ / 5); /* 200ms */
 
        /* Let XGXS and SerDes out of reset */
        falcon_reset_xaui(efx);
 
        return 0;
-
- fail:
-       kfree(efx->phy_data);
-       efx->phy_data = NULL;
-       return rc;
 }
 
 /* Perform a "special software reset" on the PHY. The caller is
@@ -589,25 +602,26 @@ static bool tenxpress_phy_poll(struct efx_nic *efx)
        return !efx_link_state_equal(&efx->link_state, &old_state);
 }
 
-static void tenxpress_phy_fini(struct efx_nic *efx)
+static void sfx7101_phy_fini(struct efx_nic *efx)
 {
        int reg;
 
+       /* Power down the LNPGA */
+       reg = (1 << PMA_PMD_LNPGA_POWERDOWN_LBN);
+       efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG, reg);
+
+       /* Waiting here ensures that the board fini, which can turn
+        * off the power to the PHY, won't get run until the LNPGA
+        * powerdown has been given long enough to complete. */
+       schedule_timeout_uninterruptible(LNPGA_PDOWN_WAIT); /* 200 ms */
+}
+
+static void tenxpress_phy_remove(struct efx_nic *efx)
+{
        if (efx->phy_type == PHY_TYPE_SFT9001B)
                device_remove_file(&efx->pci_dev->dev,
                                   &dev_attr_phy_short_reach);
 
-       if (efx->phy_type == PHY_TYPE_SFX7101) {
-               /* Power down the LNPGA */
-               reg = (1 << PMA_PMD_LNPGA_POWERDOWN_LBN);
-               efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG, reg);
-
-               /* Waiting here ensures that the board fini, which can turn
-                * off the power to the PHY, won't get run until the LNPGA
-                * powerdown has been given long enough to complete. */
-               schedule_timeout_uninterruptible(LNPGA_PDOWN_WAIT); /* 200 ms */
-       }
-
        kfree(efx->phy_data);
        efx->phy_data = NULL;
 }
@@ -819,11 +833,12 @@ static void sft9001_set_npage_adv(struct efx_nic *efx, u32 advertising)
 }
 
 struct efx_phy_operations falcon_sfx7101_phy_ops = {
-       .probe            = sfx7101_phy_probe,
+       .probe            = tenxpress_phy_probe,
        .init             = tenxpress_phy_init,
        .reconfigure      = tenxpress_phy_reconfigure,
        .poll             = tenxpress_phy_poll,
-       .fini             = tenxpress_phy_fini,
+       .fini             = sfx7101_phy_fini,
+       .remove           = tenxpress_phy_remove,
        .get_settings     = tenxpress_get_settings,
        .set_settings     = tenxpress_set_settings,
        .set_npage_adv    = sfx7101_set_npage_adv,
@@ -832,11 +847,12 @@ struct efx_phy_operations falcon_sfx7101_phy_ops = {
 };
 
 struct efx_phy_operations falcon_sft9001_phy_ops = {
-       .probe            = sft9001_phy_probe,
+       .probe            = tenxpress_phy_probe,
        .init             = tenxpress_phy_init,
        .reconfigure      = tenxpress_phy_reconfigure,
        .poll             = tenxpress_phy_poll,
-       .fini             = tenxpress_phy_fini,
+       .fini             = efx_port_dummy_op_void,
+       .remove           = tenxpress_phy_remove,
        .get_settings     = tenxpress_get_settings,
        .set_settings     = tenxpress_set_settings,
        .set_npage_adv    = sft9001_set_npage_adv,
index e669f94..a8b70ef 100644 (file)
@@ -821,8 +821,6 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
                                           EFX_TXQ_MASK];
                efx_tsoh_free(tx_queue, buffer);
                EFX_BUG_ON_PARANOID(buffer->skb);
-               buffer->len = 0;
-               buffer->continuation = true;
                if (buffer->unmap_len) {
                        unmap_addr = (buffer->dma_addr + buffer->len -
                                      buffer->unmap_len);
@@ -836,6 +834,8 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
                                               PCI_DMA_TODEVICE);
                        buffer->unmap_len = 0;
                }
+               buffer->len = 0;
+               buffer->continuation = true;
        }
 }
 
index 01e99f2..2834a01 100644 (file)
@@ -849,13 +849,13 @@ static void tun_sock_write_space(struct sock *sk)
        if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
                wake_up_interruptible_sync(sk->sk_sleep);
 
-       tun = container_of(sk, struct tun_sock, sk)->tun;
+       tun = tun_sk(sk)->tun;
        kill_fasync(&tun->fasync, SIGIO, POLL_OUT);
 }
 
 static void tun_sock_destruct(struct sock *sk)
 {
-       free_netdev(container_of(sk, struct tun_sock, sk)->tun->dev);
+       free_netdev(tun_sk(sk)->tun->dev);
 }
 
 static struct proto tun_proto = {
@@ -990,7 +990,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
                sk->sk_write_space = tun_sock_write_space;
                sk->sk_sndbuf = INT_MAX;
 
-               container_of(sk, struct tun_sock, sk)->tun = tun;
+               tun_sk(sk)->tun = tun;
 
                security_tun_dev_post_create(sk);
 
index afaf088..41ad2f3 100644 (file)
@@ -1563,7 +1563,10 @@ static int ugeth_disable(struct ucc_geth_private *ugeth, enum comm_dir mode)
 
 static void ugeth_quiesce(struct ucc_geth_private *ugeth)
 {
-       /* Wait for and prevent any further xmits. */
+       /* Prevent any further xmits, plus detach the device. */
+       netif_device_detach(ugeth->ndev);
+
+       /* Wait for any current xmits to finish. */
        netif_tx_disable(ugeth->ndev);
 
        /* Disable the interrupt to avoid NAPI rescheduling. */
@@ -1577,7 +1580,7 @@ static void ugeth_activate(struct ucc_geth_private *ugeth)
 {
        napi_enable(&ugeth->napi);
        enable_irq(ugeth->ug_info->uf_info.irq);
-       netif_tx_wake_all_queues(ugeth->ndev);
+       netif_device_attach(ugeth->ndev);
 }
 
 /* Called every time the controller might need to be made
@@ -1648,25 +1651,28 @@ static void adjust_link(struct net_device *dev)
                        ugeth->oldspeed = phydev->speed;
                }
 
-               /*
-                * To change the MAC configuration we need to disable the
-                * controller. To do so, we have to either grab ugeth->lock,
-                * which is a bad idea since 'graceful stop' commands might
-                * take quite a while, or we can quiesce driver's activity.
-                */
-               ugeth_quiesce(ugeth);
-               ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
-
-               out_be32(&ug_regs->maccfg2, tempval);
-               out_be32(&uf_regs->upsmr, upsmr);
-
-               ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
-               ugeth_activate(ugeth);
-
                if (!ugeth->oldlink) {
                        new_state = 1;
                        ugeth->oldlink = 1;
                }
+
+               if (new_state) {
+                       /*
+                        * To change the MAC configuration we need to disable
+                        * the controller. To do so, we have to either grab
+                        * ugeth->lock, which is a bad idea since 'graceful
+                        * stop' commands might take quite a while, or we can
+                        * quiesce driver's activity.
+                        */
+                       ugeth_quiesce(ugeth);
+                       ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
+
+                       out_be32(&ug_regs->maccfg2, tempval);
+                       out_be32(&uf_regs->upsmr, upsmr);
+
+                       ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
+                       ugeth_activate(ugeth);
+               }
        } else if (ugeth->oldlink) {
                        new_state = 1;
                        ugeth->oldlink = 0;
@@ -3273,7 +3279,7 @@ static int ucc_geth_tx(struct net_device *dev, u8 txQ)
                /* Handle the transmitted buffer and release */
                /* the BD to be used with the current frame  */
 
-               if ((bd == ugeth->txBd[txQ]) && (netif_queue_stopped(dev) == 0))
+               if (bd == ugeth->txBd[txQ]) /* queue empty? */
                        break;
 
                dev->stats.tx_packets++;
index 593e01f..611b804 100644 (file)
@@ -102,6 +102,7 @@ static const int multicast_filter_limit = 32;
 #include <linux/ethtool.h>
 #include <linux/crc32.h>
 #include <linux/bitops.h>
+#include <linux/workqueue.h>
 #include <asm/processor.h>     /* Processor type for cache alignment. */
 #include <asm/io.h>
 #include <asm/irq.h>
@@ -389,6 +390,7 @@ struct rhine_private {
        struct net_device *dev;
        struct napi_struct napi;
        spinlock_t lock;
+       struct work_struct reset_task;
 
        /* Frequently used values: keep some adjacent for cache effect. */
        u32 quirks;
@@ -407,6 +409,7 @@ struct rhine_private {
 static int  mdio_read(struct net_device *dev, int phy_id, int location);
 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
 static int  rhine_open(struct net_device *dev);
+static void rhine_reset_task(struct work_struct *work);
 static void rhine_tx_timeout(struct net_device *dev);
 static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
                                  struct net_device *dev);
@@ -775,6 +778,8 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
        dev->irq = pdev->irq;
 
        spin_lock_init(&rp->lock);
+       INIT_WORK(&rp->reset_task, rhine_reset_task);
+
        rp->mii_if.dev = dev;
        rp->mii_if.mdio_read = mdio_read;
        rp->mii_if.mdio_write = mdio_write;
@@ -1179,22 +1184,18 @@ static int rhine_open(struct net_device *dev)
        return 0;
 }
 
-static void rhine_tx_timeout(struct net_device *dev)
+static void rhine_reset_task(struct work_struct *work)
 {
-       struct rhine_private *rp = netdev_priv(dev);
-       void __iomem *ioaddr = rp->base;
-
-       printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status "
-              "%4.4x, resetting...\n",
-              dev->name, ioread16(ioaddr + IntrStatus),
-              mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
+       struct rhine_private *rp = container_of(work, struct rhine_private,
+                                               reset_task);
+       struct net_device *dev = rp->dev;
 
        /* protect against concurrent rx interrupts */
        disable_irq(rp->pdev->irq);
 
        napi_disable(&rp->napi);
 
-       spin_lock(&rp->lock);
+       spin_lock_bh(&rp->lock);
 
        /* clear all descriptors */
        free_tbufs(dev);
@@ -1206,7 +1207,7 @@ static void rhine_tx_timeout(struct net_device *dev)
        rhine_chip_reset(dev);
        init_registers(dev);
 
-       spin_unlock(&rp->lock);
+       spin_unlock_bh(&rp->lock);
        enable_irq(rp->pdev->irq);
 
        dev->trans_start = jiffies;
@@ -1214,6 +1215,19 @@ static void rhine_tx_timeout(struct net_device *dev)
        netif_wake_queue(dev);
 }
 
+static void rhine_tx_timeout(struct net_device *dev)
+{
+       struct rhine_private *rp = netdev_priv(dev);
+       void __iomem *ioaddr = rp->base;
+
+       printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status "
+              "%4.4x, resetting...\n",
+              dev->name, ioread16(ioaddr + IntrStatus),
+              mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
+
+       schedule_work(&rp->reset_task);
+}
+
 static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
                                  struct net_device *dev)
 {
@@ -1830,10 +1844,11 @@ static int rhine_close(struct net_device *dev)
        struct rhine_private *rp = netdev_priv(dev);
        void __iomem *ioaddr = rp->base;
 
-       spin_lock_irq(&rp->lock);
-
-       netif_stop_queue(dev);
        napi_disable(&rp->napi);
+       cancel_work_sync(&rp->reset_task);
+       netif_stop_queue(dev);
+
+       spin_lock_irq(&rp->lock);
 
        if (debug > 1)
                printk(KERN_DEBUG "%s: Shutting down ethercard, "
index f1c4b2a..0fdfd58 100644 (file)
@@ -4087,21 +4087,21 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
                goto _exit0;
        }
 
-       if (!pci_set_dma_mask(pdev, 0xffffffffffffffffULL)) {
+       if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
                vxge_debug_ll_config(VXGE_TRACE,
                        "%s : using 64bit DMA", __func__);
 
                high_dma = 1;
 
                if (pci_set_consistent_dma_mask(pdev,
-                                               0xffffffffffffffffULL)) {
+                                               DMA_BIT_MASK(64))) {
                        vxge_debug_init(VXGE_ERR,
                                "%s : unable to obtain 64bit DMA for "
                                "consistent allocations", __func__);
                        ret = -ENOMEM;
                        goto _exit1;
                }
-       } else if (!pci_set_dma_mask(pdev, 0xffffffffUL)) {
+       } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
                vxge_debug_ll_config(VXGE_TRACE,
                        "%s : using 32bit DMA", __func__);
        } else {
index a4c086f..e63b7c4 100644 (file)
@@ -1903,17 +1903,6 @@ accept:
                rxs->noise = sc->ah->ah_noise_floor;
                rxs->signal = rxs->noise + rs.rs_rssi;
 
-               /* An rssi of 35 indicates you should be able use
-                * 54 Mbps reliably. A more elaborate scheme can be used
-                * here but it requires a map of SNR/throughput for each
-                * possible mode used */
-               rxs->qual = rs.rs_rssi * 100 / 35;
-
-               /* rssi can be more than 35 though, anything above that
-                * should be considered at 100% */
-               if (rxs->qual > 100)
-                       rxs->qual = 100;
-
                rxs->antenna = rs.rs_antenna;
                rxs->rate_idx = ath5k_hw_to_driver_rix(sc, rs.rs_rate);
                rxs->flag |= ath5k_rx_decrypted(sc, ds, skb, &rs);
@@ -2381,6 +2370,9 @@ ath5k_init(struct ath5k_softc *sc)
         */
        ath5k_stop_locked(sc);
 
+       /* Set PHY calibration interval */
+       ah->ah_cal_intval = ath5k_calinterval;
+
        /*
         * The basic interface to setting the hardware in a good
         * state is ``reset''.  On return the hardware is known to
@@ -2408,10 +2400,6 @@ ath5k_init(struct ath5k_softc *sc)
 
        /* Set ack to be sent at low bit-rates */
        ath5k_hw_set_ack_bitrate_high(ah, false);
-
-       /* Set PHY calibration inteval */
-       ah->ah_cal_intval = ath5k_calinterval;
-
        ret = 0;
 done:
        mmiowb();
index 71b84d9..efc420c 100644 (file)
@@ -186,7 +186,7 @@ bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q)
                wait = wait_time;
                while (ath9k_hw_numtxpending(ah, q)) {
                        if ((--wait) == 0) {
-                               ath_print(common, ATH_DBG_QUEUE,
+                               ath_print(common, ATH_DBG_FATAL,
                                          "Failed to stop TX DMA in 100 "
                                          "msec after killing last frame\n");
                                break;
index 0c87771..e185479 100644 (file)
@@ -77,6 +77,9 @@
 #define ATH9K_TXERR_XTXOP          0x08
 #define ATH9K_TXERR_TIMER_EXPIRED  0x10
 #define ATH9K_TX_ACKED            0x20
+#define ATH9K_TXERR_MASK                                               \
+       (ATH9K_TXERR_XRETRY | ATH9K_TXERR_FILT | ATH9K_TXERR_FIFO |     \
+        ATH9K_TXERR_XTXOP | ATH9K_TXERR_TIMER_EXPIRED)
 
 #define ATH9K_TX_BA                0x01
 #define ATH9K_TX_PWRMGMT           0x02
index c487434..996eb90 100644 (file)
@@ -1973,6 +1973,9 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
        struct ieee80211_hw *hw = sc->hw;
        int r;
 
+       /* Stop ANI */
+       del_timer_sync(&common->ani.timer);
+
        ath9k_hw_set_interrupts(ah, 0);
        ath_drain_all_txq(sc, retry_tx);
        ath_stoprecv(sc);
@@ -2014,6 +2017,9 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
                }
        }
 
+       /* Start ANI */
+       ath_start_ani(common);
+
        return r;
 }
 
@@ -2508,6 +2514,9 @@ static void ath9k_stop(struct ieee80211_hw *hw)
                return; /* another wiphy still in use */
        }
 
+       /* Ensure HW is awake when we try to shut it down. */
+       ath9k_ps_wakeup(sc);
+
        if (ah->btcoex_hw.enabled) {
                ath9k_hw_btcoex_disable(ah);
                if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
@@ -2528,6 +2537,9 @@ static void ath9k_stop(struct ieee80211_hw *hw)
        /* disable HAL and put h/w to sleep */
        ath9k_hw_disable(ah);
        ath9k_hw_configpcipowersave(ah, 1, 1);
+       ath9k_ps_restore(sc);
+
+       /* Finally, put the chip in FULL SLEEP mode */
        ath9k_setpower(sc, ATH9K_PM_FULL_SLEEP);
 
        sc->sc_flags |= SC_OP_INVALID;
@@ -2641,8 +2653,10 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
        if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) ||
            (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC) ||
            (sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT)) {
+               ath9k_ps_wakeup(sc);
                ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
                ath_beacon_return(sc, avp);
+               ath9k_ps_restore(sc);
        }
 
        sc->sc_flags &= ~SC_OP_BEACONS;
@@ -3091,15 +3105,21 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
        case IEEE80211_AMPDU_RX_STOP:
                break;
        case IEEE80211_AMPDU_TX_START:
+               ath9k_ps_wakeup(sc);
                ath_tx_aggr_start(sc, sta, tid, ssn);
                ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+               ath9k_ps_restore(sc);
                break;
        case IEEE80211_AMPDU_TX_STOP:
+               ath9k_ps_wakeup(sc);
                ath_tx_aggr_stop(sc, sta, tid);
                ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+               ath9k_ps_restore(sc);
                break;
        case IEEE80211_AMPDU_TX_OPERATIONAL:
+               ath9k_ps_wakeup(sc);
                ath_tx_aggr_resume(sc, sta, tid);
+               ath9k_ps_restore(sc);
                break;
        default:
                ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
index 5321f73..f7af5ea 100644 (file)
@@ -96,7 +96,7 @@ static void ath_pci_bt_coex_prep(struct ath_common *common)
        pci_write_config_byte(pdev, ATH_PCIE_CAP_LINK_CTRL, aspm);
 }
 
-const static struct ath_bus_ops ath_pci_bus_ops = {
+static const struct ath_bus_ops ath_pci_bus_ops = {
        .read_cachesize = ath_pci_read_cachesize,
        .cleanup = ath_pci_cleanup,
        .eeprom_read = ath_pci_eeprom_read,
index 2a11cc5..fa12b90 100644 (file)
@@ -1108,11 +1108,11 @@ void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
        if (npend) {
                int r;
 
-               ath_print(common, ATH_DBG_XMIT,
+               ath_print(common, ATH_DBG_FATAL,
                          "Unable to stop TxDMA. Reset HAL!\n");
 
                spin_lock_bh(&sc->sc_resetlock);
-               r = ath9k_hw_reset(ah, sc->sc_ah->curchan, true);
+               r = ath9k_hw_reset(ah, sc->sc_ah->curchan, false);
                if (r)
                        ath_print(common, ATH_DBG_FATAL,
                                  "Unable to reset hardware; reset status %d\n",
@@ -1414,17 +1414,9 @@ static void assign_aggr_tid_seqno(struct sk_buff *skb,
         * For HT capable stations, we save tidno for later use.
         * We also override seqno set by upper layer with the one
         * in tx aggregation state.
-        *
-        * If fragmentation is on, the sequence number is
-        * not overridden, since it has been
-        * incremented by the fragmentation routine.
-        *
-        * FIXME: check if the fragmentation threshold exceeds
-        * IEEE80211 max.
         */
        tid = ATH_AN_2_TID(an, bf->bf_tidno);
-       hdr->seq_ctrl = cpu_to_le16(tid->seq_next <<
-                       IEEE80211_SEQ_SEQ_SHIFT);
+       hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
        bf->bf_seqno = tid->seq_next;
        INCR(tid->seq_next, IEEE80211_SEQ_MAX);
 }
@@ -1636,7 +1628,8 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
                bf->bf_keyix = ATH9K_TXKEYIX_INVALID;
        }
 
-       if (ieee80211_is_data_qos(fc) && (sc->sc_flags & SC_OP_TXAGGR))
+       if (ieee80211_is_data_qos(fc) && bf_isht(bf) &&
+           (sc->sc_flags & SC_OP_TXAGGR))
                assign_aggr_tid_seqno(skb, bf);
 
        bf->bf_mpdu = skb;
@@ -1780,7 +1773,8 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
        struct ath_wiphy *aphy = hw->priv;
        struct ath_softc *sc = aphy->sc;
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
-       int hdrlen, padsize;
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
+       int padpos, padsize;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
        struct ath_tx_control txctl;
 
@@ -1792,7 +1786,6 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
         * BSSes.
         */
        if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
-               struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
                if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
                        sc->tx.seq_no += 0x10;
                hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
@@ -1800,9 +1793,9 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
        }
 
        /* Add the padding after the header if this is not already done */
-       hdrlen = ieee80211_get_hdrlen_from_skb(skb);
-       if (hdrlen & 3) {
-               padsize = hdrlen % 4;
+       padpos = ath9k_cmn_padpos(hdr->frame_control);
+       padsize = padpos & 3;
+       if (padsize && skb->len>padpos) {
                if (skb_headroom(skb) < padsize) {
                        ath_print(common, ATH_DBG_XMIT,
                                  "TX CABQ padding failed\n");
@@ -1810,7 +1803,7 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
                        return;
                }
                skb_push(skb, padsize);
-               memmove(skb->data, skb->data + padsize, hdrlen);
+               memmove(skb->data, skb->data + padsize, padpos);
        }
 
        txctl.txq = sc->beacon.cabq;
@@ -1838,7 +1831,8 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
        struct ieee80211_hw *hw = sc->hw;
        struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
        struct ath_common *common = ath9k_hw_common(sc->sc_ah);
-       int hdrlen, padsize;
+       struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
+       int padpos, padsize;
 
        ath_print(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
 
@@ -1853,14 +1847,14 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
                tx_info->flags |= IEEE80211_TX_STAT_ACK;
        }
 
-       hdrlen = ieee80211_get_hdrlen_from_skb(skb);
-       padsize = hdrlen & 3;
-       if (padsize && hdrlen >= 24) {
+       padpos = ath9k_cmn_padpos(hdr->frame_control);
+       padsize = padpos & 3;
+       if (padsize && skb->len>padpos+padsize) {
                /*
                 * Remove MAC header padding before giving the frame back to
                 * mac80211.
                 */
-               memmove(skb->data + padsize, skb->data, hdrlen);
+               memmove(skb->data + padsize, skb->data, padpos);
                skb_pull(skb, padsize);
        }
 
@@ -2078,7 +2072,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
                                &txq->axq_q, lastbf->list.prev);
 
                txq->axq_depth--;
-               txok = !(ds->ds_txstat.ts_status & ATH9K_TXERR_FILT);
+               txok = !(ds->ds_txstat.ts_status & ATH9K_TXERR_MASK);
                txq->axq_tx_inprogress = false;
                spin_unlock_bh(&txq->axq_lock);
 
index 027be27..88d1fd0 100644 (file)
@@ -383,160 +383,44 @@ static inline
        }
 }
 
-/* Check if a DMA region fits the device constraints.
- * Returns true, if the region is OK for usage with this device. */
-static inline bool b43_dma_address_ok(struct b43_dmaring *ring,
-                                     dma_addr_t addr, size_t size)
-{
-       switch (ring->type) {
-       case B43_DMA_30BIT:
-               if ((u64)addr + size > (1ULL << 30))
-                       return 0;
-               break;
-       case B43_DMA_32BIT:
-               if ((u64)addr + size > (1ULL << 32))
-                       return 0;
-               break;
-       case B43_DMA_64BIT:
-               /* Currently we can't have addresses beyond
-                * 64bit in the kernel. */
-               break;
-       }
-       return 1;
-}
-
-#define is_4k_aligned(addr)    (((u64)(addr) & 0x0FFFull) == 0)
-#define is_8k_aligned(addr)    (((u64)(addr) & 0x1FFFull) == 0)
-
-static void b43_unmap_and_free_ringmem(struct b43_dmaring *ring, void *base,
-                                      dma_addr_t dmaaddr, size_t size)
-{
-       ssb_dma_unmap_single(ring->dev->dev, dmaaddr, size, DMA_TO_DEVICE);
-       free_pages((unsigned long)base, get_order(size));
-}
-
-static void * __b43_get_and_map_ringmem(struct b43_dmaring *ring,
-                                       dma_addr_t *dmaaddr, size_t size,
-                                       gfp_t gfp_flags)
-{
-       void *base;
-
-       base = (void *)__get_free_pages(gfp_flags, get_order(size));
-       if (!base)
-               return NULL;
-       memset(base, 0, size);
-       *dmaaddr = ssb_dma_map_single(ring->dev->dev, base, size,
-                                     DMA_TO_DEVICE);
-       if (ssb_dma_mapping_error(ring->dev->dev, *dmaaddr)) {
-               free_pages((unsigned long)base, get_order(size));
-               return NULL;
-       }
-
-       return base;
-}
-
-static void * b43_get_and_map_ringmem(struct b43_dmaring *ring,
-                                     dma_addr_t *dmaaddr, size_t size)
-{
-       void *base;
-
-       base = __b43_get_and_map_ringmem(ring, dmaaddr, size,
-                                        GFP_KERNEL);
-       if (!base) {
-               b43err(ring->dev->wl, "Failed to allocate or map pages "
-                      "for DMA ringmemory\n");
-               return NULL;
-       }
-       if (!b43_dma_address_ok(ring, *dmaaddr, size)) {
-               /* The memory does not fit our device constraints.
-                * Retry with GFP_DMA set to get lower memory. */
-               b43_unmap_and_free_ringmem(ring, base, *dmaaddr, size);
-               base = __b43_get_and_map_ringmem(ring, dmaaddr, size,
-                                                GFP_KERNEL | GFP_DMA);
-               if (!base) {
-                       b43err(ring->dev->wl, "Failed to allocate or map pages "
-                              "in the GFP_DMA region for DMA ringmemory\n");
-                       return NULL;
-               }
-               if (!b43_dma_address_ok(ring, *dmaaddr, size)) {
-                       b43_unmap_and_free_ringmem(ring, base, *dmaaddr, size);
-                       b43err(ring->dev->wl, "Failed to allocate DMA "
-                              "ringmemory that fits device constraints\n");
-                       return NULL;
-               }
-       }
-       /* We expect the memory to be 4k aligned, at least. */
-       if (B43_WARN_ON(!is_4k_aligned(*dmaaddr))) {
-               b43_unmap_and_free_ringmem(ring, base, *dmaaddr, size);
-               return NULL;
-       }
-
-       return base;
-}
-
 static int alloc_ringmemory(struct b43_dmaring *ring)
 {
-       unsigned int required;
-       void *base;
-       dma_addr_t dmaaddr;
-
-       /* There are several requirements to the descriptor ring memory:
-        * - The memory region needs to fit the address constraints for the
-        *   device (same as for frame buffers).
-        * - For 30/32bit DMA devices, the descriptor ring must be 4k aligned.
-        * - For 64bit DMA devices, the descriptor ring must be 8k aligned.
+       gfp_t flags = GFP_KERNEL;
+
+       /* The specs call for 4K buffers for 30- and 32-bit DMA with 4K
+        * alignment and 8K buffers for 64-bit DMA with 8K alignment. Testing
+        * has shown that 4K is sufficient for the latter as long as the buffer
+        * does not cross an 8K boundary.
+        *
+        * For unknown reasons - possibly a hardware error - the BCM4311 rev
+        * 02, which uses 64-bit DMA, needs the ring buffer in very low memory,
+        * which accounts for the GFP_DMA flag below.
+        *
+        * The flags here must match the flags in free_ringmemory below!
         */
-
        if (ring->type == B43_DMA_64BIT)
-               required = ring->nr_slots * sizeof(struct b43_dmadesc64);
-       else
-               required = ring->nr_slots * sizeof(struct b43_dmadesc32);
-       if (B43_WARN_ON(required > 0x1000))
+               flags |= GFP_DMA;
+       ring->descbase = ssb_dma_alloc_consistent(ring->dev->dev,
+                                                 B43_DMA_RINGMEMSIZE,
+                                                 &(ring->dmabase), flags);
+       if (!ring->descbase) {
+               b43err(ring->dev->wl, "DMA ringmemory allocation failed\n");
                return -ENOMEM;
-
-       ring->alloc_descsize = 0x1000;
-       base = b43_get_and_map_ringmem(ring, &dmaaddr, ring->alloc_descsize);
-       if (!base)
-               return -ENOMEM;
-       ring->alloc_descbase = base;
-       ring->alloc_dmabase = dmaaddr;
-
-       if ((ring->type != B43_DMA_64BIT) || is_8k_aligned(dmaaddr)) {
-               /* We're on <=32bit DMA, or we already got 8k aligned memory.
-                * That's all we need, so we're fine. */
-               ring->descbase = base;
-               ring->dmabase = dmaaddr;
-               return 0;
-       }
-       b43_unmap_and_free_ringmem(ring, base, dmaaddr, ring->alloc_descsize);
-
-       /* Ok, we failed at the 8k alignment requirement.
-        * Try to force-align the memory region now. */
-       ring->alloc_descsize = 0x2000;
-       base = b43_get_and_map_ringmem(ring, &dmaaddr, ring->alloc_descsize);
-       if (!base)
-               return -ENOMEM;
-       ring->alloc_descbase = base;
-       ring->alloc_dmabase = dmaaddr;
-
-       if (is_8k_aligned(dmaaddr)) {
-               /* We're already 8k aligned. That Ok, too. */
-               ring->descbase = base;
-               ring->dmabase = dmaaddr;
-               return 0;
        }
-       /* Force-align it to 8k */
-       ring->descbase = (void *)((u8 *)base + 0x1000);
-       ring->dmabase = dmaaddr + 0x1000;
-       B43_WARN_ON(!is_8k_aligned(ring->dmabase));
+       memset(ring->descbase, 0, B43_DMA_RINGMEMSIZE);
 
        return 0;
 }
 
 static void free_ringmemory(struct b43_dmaring *ring)
 {
-       b43_unmap_and_free_ringmem(ring, ring->alloc_descbase,
-                                  ring->alloc_dmabase, ring->alloc_descsize);
+       gfp_t flags = GFP_KERNEL;
+
+       if (ring->type == B43_DMA_64BIT)
+               flags |= GFP_DMA;
+
+       ssb_dma_free_consistent(ring->dev->dev, B43_DMA_RINGMEMSIZE,
+                               ring->descbase, ring->dmabase, flags);
 }
 
 /* Reset the RX DMA channel */
@@ -646,14 +530,29 @@ static bool b43_dma_mapping_error(struct b43_dmaring *ring,
        if (unlikely(ssb_dma_mapping_error(ring->dev->dev, addr)))
                return 1;
 
-       if (!b43_dma_address_ok(ring, addr, buffersize)) {
-               /* We can't support this address. Unmap it again. */
-               unmap_descbuffer(ring, addr, buffersize, dma_to_device);
-               return 1;
+       switch (ring->type) {
+       case B43_DMA_30BIT:
+               if ((u64)addr + buffersize > (1ULL << 30))
+                       goto address_error;
+               break;
+       case B43_DMA_32BIT:
+               if ((u64)addr + buffersize > (1ULL << 32))
+                       goto address_error;
+               break;
+       case B43_DMA_64BIT:
+               /* Currently we can't have addresses beyond
+                * 64bit in the kernel. */
+               break;
        }
 
        /* The address is OK. */
        return 0;
+
+address_error:
+       /* We can't support this address. Unmap it again. */
+       unmap_descbuffer(ring, addr, buffersize, dma_to_device);
+
+       return 1;
 }
 
 static bool b43_rx_buffer_is_poisoned(struct b43_dmaring *ring, struct sk_buff *skb)
@@ -715,9 +614,6 @@ static int setup_rx_descbuffer(struct b43_dmaring *ring,
        meta->dmaaddr = dmaaddr;
        ring->ops->fill_descriptor(ring, desc, dmaaddr,
                                   ring->rx_buffersize, 0, 0, 0);
-       ssb_dma_sync_single_for_device(ring->dev->dev,
-                                      ring->alloc_dmabase,
-                                      ring->alloc_descsize, DMA_TO_DEVICE);
 
        return 0;
 }
@@ -1354,9 +1250,6 @@ static int dma_tx_fragment(struct b43_dmaring *ring,
        }
        /* Now transfer the whole frame. */
        wmb();
-       ssb_dma_sync_single_for_device(ring->dev->dev,
-                                      ring->alloc_dmabase,
-                                      ring->alloc_descsize, DMA_TO_DEVICE);
        ops->poke_tx(ring, next_slot(ring, slot));
        return 0;
 
index e607b39..f7ab37c 100644 (file)
@@ -157,6 +157,7 @@ struct b43_dmadesc_generic {
 } __attribute__ ((__packed__));
 
 /* Misc DMA constants */
+#define B43_DMA_RINGMEMSIZE            PAGE_SIZE
 #define B43_DMA0_RX_FRAMEOFFSET                30
 
 /* DMA engine tuning knobs */
@@ -246,12 +247,6 @@ struct b43_dmaring {
        /* The QOS priority assigned to this ring. Only used for TX rings.
         * This is the mac80211 "queue" value. */
        u8 queue_prio;
-       /* Pointers and size of the originally allocated and mapped memory
-        * region for the descriptor ring. */
-       void *alloc_descbase;
-       dma_addr_t alloc_dmabase;
-       unsigned int alloc_descsize;
-       /* Pointer to our wireless device. */
        struct b43_wldev *dev;
 #ifdef CONFIG_B43_DEBUG
        /* Maximum number of used slots. */
index 7da1dab..234891d 100644 (file)
@@ -681,19 +681,13 @@ static void iwl3945_rx_reply_rx(struct iwl_priv *priv,
                snr = rx_stats_sig_avg / rx_stats_noise_diff;
                rx_status.noise = rx_status.signal -
                                        iwl3945_calc_db_from_ratio(snr);
-               rx_status.qual = iwl3945_calc_sig_qual(rx_status.signal,
-                                                        rx_status.noise);
-
-       /* If noise info not available, calculate signal quality indicator (%)
-        *   using just the dBm signal level. */
        } else {
                rx_status.noise = priv->last_rx_noise;
-               rx_status.qual = iwl3945_calc_sig_qual(rx_status.signal, 0);
        }
 
 
-       IWL_DEBUG_STATS(priv, "Rssi %d noise %d qual %d sig_avg %d noise_diff %d\n",
-                       rx_status.signal, rx_status.noise, rx_status.qual,
+       IWL_DEBUG_STATS(priv, "Rssi %d noise %d sig_avg %d noise_diff %d\n",
+                       rx_status.signal, rx_status.noise,
                        rx_stats_sig_avg, rx_stats_noise_diff);
 
        header = (struct ieee80211_hdr *)IWL_RX_DATA(pkt);
@@ -1835,8 +1829,7 @@ static int iwl3945_send_rxon_assoc(struct iwl_priv *priv)
                rc = -EIO;
        }
 
-       priv->alloc_rxb_page--;
-       free_pages(cmd.reply_page, priv->hw_params.rx_page_order);
+       iwl_free_pages(priv, cmd.reply_page);
 
        return rc;
 }
@@ -2836,6 +2829,7 @@ static struct iwl_cfg iwl3945_bg_cfg = {
        .use_isr_legacy = true,
        .ht_greenfield_support = false,
        .led_compensation = 64,
+       .broken_powersave = true,
 };
 
 static struct iwl_cfg iwl3945_abg_cfg = {
@@ -2852,6 +2846,7 @@ static struct iwl_cfg iwl3945_abg_cfg = {
        .use_isr_legacy = true,
        .ht_greenfield_support = false,
        .led_compensation = 64,
+       .broken_powersave = true,
 };
 
 struct pci_device_id iwl3945_hw_card_ids[] = {
index ecc23ec..531fa12 100644 (file)
@@ -222,7 +222,6 @@ struct iwl3945_ibss_seq {
  *
  *****************************************************************************/
 extern int iwl3945_calc_db_from_ratio(int sig_ratio);
-extern int iwl3945_calc_sig_qual(int rssi_dbm, int noise_dbm);
 extern void iwl3945_rx_replenish(void *data);
 extern void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
 extern unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,
index 386513b..484c5fd 100644 (file)
@@ -1204,7 +1204,7 @@ static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel,
        iwl4965_interpolate_chan(priv, channel, &ch_eeprom_info);
 
        /* calculate tx gain adjustment based on power supply voltage */
-       voltage = priv->calib_info->voltage;
+       voltage = le16_to_cpu(priv->calib_info->voltage);
        init_voltage = (s32)le32_to_cpu(priv->card_alive_init.voltage);
        voltage_compensation =
            iwl4965_get_voltage_compensation(voltage, init_voltage);
index 4ef6804..bc056e9 100644 (file)
 
 static inline s32 iwl_temp_calib_to_offset(struct iwl_priv *priv)
 {
-       u16 *temp_calib = (u16 *)iwl_eeprom_query_addr(priv,
-                                                      EEPROM_5000_TEMPERATURE);
-       /* offset =  temperature -  voltage / coef */
-       s32 offset = (s32)(temp_calib[0] - temp_calib[1] / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF);
-       return offset;
+       u16 temperature, voltage;
+       __le16 *temp_calib =
+               (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_TEMPERATURE);
+
+       temperature = le16_to_cpu(temp_calib[0]);
+       voltage = le16_to_cpu(temp_calib[1]);
+
+       /* offset = temp - volt / coeff */
+       return (s32)(temperature - voltage / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF);
 }
 
 /* Fixed (non-configurable) rx data from phy */
index e2f8615..33a5866 100644 (file)
@@ -333,14 +333,15 @@ static void iwl5000_set_ct_threshold(struct iwl_priv *priv)
 static int iwl5000_set_Xtal_calib(struct iwl_priv *priv)
 {
        struct iwl_calib_xtal_freq_cmd cmd;
-       u16 *xtal_calib = (u16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_XTAL);
+       __le16 *xtal_calib =
+               (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_XTAL);
 
        cmd.hdr.op_code = IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD;
        cmd.hdr.first_group = 0;
        cmd.hdr.groups_num = 1;
        cmd.hdr.data_valid = 1;
-       cmd.cap_pin1 = (u8)xtal_calib[0];
-       cmd.cap_pin2 = (u8)xtal_calib[1];
+       cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]);
+       cmd.cap_pin2 = le16_to_cpu(xtal_calib[1]);
        return iwl_calib_set(&priv->calib_results[IWL_CALIB_XTAL],
                             (u8 *)&cmd, sizeof(cmd));
 }
index fe511cb..b93e491 100644 (file)
@@ -150,7 +150,7 @@ static s32 expected_tpt_mimo3_40MHz[4][IWL_RATE_COUNT] = {
 };
 
 /* mbps, mcs */
-const static struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = {
+static const struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = {
        {  "1", "BPSK DSSS"},
        {  "2", "QPSK DSSS"},
        {"5.5", "BPSK CCK"},
index b8377ef..1c9866d 100644 (file)
@@ -1842,7 +1842,7 @@ void iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
        }
 
 #ifdef CONFIG_IWLWIFI_DEBUG
-       if (!(iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS))
+       if (!(iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) && !full_log)
                size = (size > DEFAULT_DUMP_EVENT_LOG_ENTRIES)
                        ? DEFAULT_DUMP_EVENT_LOG_ENTRIES : size;
 #else
@@ -3173,7 +3173,6 @@ static int iwl_init_drv(struct iwl_priv *priv)
 
        priv->ibss_beacon = NULL;
 
-       spin_lock_init(&priv->lock);
        spin_lock_init(&priv->sta_lock);
        spin_lock_init(&priv->hcmd_lock);
 
@@ -3361,10 +3360,11 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                (unsigned long long) pci_resource_len(pdev, 0));
        IWL_DEBUG_INFO(priv, "pci_resource_base = %p\n", priv->hw_base);
 
-       /* this spin lock will be used in apm_ops.init and EEPROM access
+       /* these spin locks will be used in apm_ops.init and EEPROM access
         * we should init now
         */
        spin_lock_init(&priv->reg_lock);
+       spin_lock_init(&priv->lock);
        iwl_hw_detect(priv);
        IWL_INFO(priv, "Detected Intel Wireless WiFi Link %s REV=0x%X\n",
                priv->cfg->name, priv->hw_rev);
index a7bfae0..1ec8cb4 100644 (file)
@@ -77,8 +77,7 @@
  * The MAC (uCode processor, etc.) does not need to be powered up for accessing
  * the CSR registers.
  *
- * NOTE:  Newer devices using one-time-programmable (OTP) memory
- *        require device to be awake in order to read this memory
+ * NOTE:  Device does need to be awake in order to read this memory
  *        via CSR_EEPROM and CSR_OTP registers
  */
 #define CSR_BASE    (0x000)
 /*
  * EEPROM and OTP (one-time-programmable) memory reads
  *
- * NOTE:  For (newer) devices using OTP, device must be awake, initialized via
- *        apm_ops.init() in order to read.  Older devices (3945/4965/5000)
- *        use EEPROM and do not require this.
+ * NOTE:  Device must be awake, initialized via apm_ops.init(),
+ *        in order to read.
  */
 #define CSR_EEPROM_REG          (CSR_BASE+0x02c)
 #define CSR_EEPROM_GP           (CSR_BASE+0x030)
index 2673e9a..165d1f6 100644 (file)
@@ -1168,7 +1168,7 @@ struct iwl_priv {
        u32 last_beacon_time;
        u64 last_tsf;
 
-       /* eeprom */
+       /* eeprom -- this is in the card's little endian byte order */
        u8 *eeprom;
        int    nvm_device_type;
        struct iwl_eeprom_calib_info *calib_info;
@@ -1353,4 +1353,15 @@ static inline int is_channel_ibss(const struct iwl_channel_info *ch)
        return ((ch->flags & EEPROM_CHANNEL_IBSS)) ? 1 : 0;
 }
 
+static inline void __iwl_free_pages(struct iwl_priv *priv, struct page *page)
+{
+       __free_pages(page, priv->hw_params.rx_page_order);
+       priv->alloc_rxb_page--;
+}
+
+static inline void iwl_free_pages(struct iwl_priv *priv, unsigned long page)
+{
+       free_pages(page, priv->hw_params.rx_page_order);
+       priv->alloc_rxb_page--;
+}
 #endif                         /* __iwl_dev_h__ */
index 3946e5c..4a30969 100644 (file)
@@ -370,7 +370,7 @@ static int iwl_init_otp_access(struct iwl_priv *priv)
        return ret;
 }
 
-static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, u16 *eeprom_data)
+static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, __le16 *eeprom_data)
 {
        int ret = 0;
        u32 r;
@@ -404,7 +404,7 @@ static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, u16 *eeprom_data)
                                CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK);
                IWL_ERR(priv, "Correctable OTP ECC error, continue read\n");
        }
-       *eeprom_data = le16_to_cpu((__force __le16)(r >> 16));
+       *eeprom_data = cpu_to_le16(r >> 16);
        return 0;
 }
 
@@ -413,7 +413,8 @@ static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, u16 *eeprom_data)
  */
 static bool iwl_is_otp_empty(struct iwl_priv *priv)
 {
-       u16 next_link_addr = 0, link_value;
+       u16 next_link_addr = 0;
+       __le16 link_value;
        bool is_empty = false;
 
        /* locate the beginning of OTP link list */
@@ -443,7 +444,8 @@ static bool iwl_is_otp_empty(struct iwl_priv *priv)
 static int iwl_find_otp_image(struct iwl_priv *priv,
                                        u16 *validblockaddr)
 {
-       u16 next_link_addr = 0, link_value = 0, valid_addr;
+       u16 next_link_addr = 0, valid_addr;
+       __le16 link_value = 0;
        int usedblocks = 0;
 
        /* set addressing mode to absolute to traverse the link list */
@@ -463,7 +465,7 @@ static int iwl_find_otp_image(struct iwl_priv *priv,
                 * check for more block on the link list
                 */
                valid_addr = next_link_addr;
-               next_link_addr = link_value * sizeof(u16);
+               next_link_addr = le16_to_cpu(link_value) * sizeof(u16);
                IWL_DEBUG_INFO(priv, "OTP blocks %d addr 0x%x\n",
                               usedblocks, next_link_addr);
                if (iwl_read_otp_word(priv, next_link_addr, &link_value))
@@ -497,7 +499,7 @@ static int iwl_find_otp_image(struct iwl_priv *priv,
  */
 int iwl_eeprom_init(struct iwl_priv *priv)
 {
-       u16 *e;
+       __le16 *e;
        u32 gp = iwl_read32(priv, CSR_EEPROM_GP);
        int sz;
        int ret;
@@ -516,12 +518,9 @@ int iwl_eeprom_init(struct iwl_priv *priv)
                ret = -ENOMEM;
                goto alloc_err;
        }
-       e = (u16 *)priv->eeprom;
+       e = (__le16 *)priv->eeprom;
 
-       if (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP) {
-               /* OTP reads require powered-up chip */
-               priv->cfg->ops->lib->apm_ops.init(priv);
-       }
+       priv->cfg->ops->lib->apm_ops.init(priv);
 
        ret = priv->cfg->ops->lib->eeprom_ops.verify_signature(priv);
        if (ret < 0) {
@@ -562,7 +561,7 @@ int iwl_eeprom_init(struct iwl_priv *priv)
                }
                for (addr = validblockaddr; addr < validblockaddr + sz;
                     addr += sizeof(u16)) {
-                       u16 eeprom_data;
+                       __le16 eeprom_data;
 
                        ret = iwl_read_otp_word(priv, addr, &eeprom_data);
                        if (ret)
@@ -570,13 +569,6 @@ int iwl_eeprom_init(struct iwl_priv *priv)
                        e[cache_addr / 2] = eeprom_data;
                        cache_addr += sizeof(u16);
                }
-
-               /*
-                * Now that OTP reads are complete, reset chip to save
-                *   power until we load uCode during "up".
-                */
-               priv->cfg->ops->lib->apm_ops.stop(priv);
-
        } else {
                /* eeprom is an array of 16bit values */
                for (addr = 0; addr < sz; addr += sizeof(u16)) {
@@ -594,7 +586,7 @@ int iwl_eeprom_init(struct iwl_priv *priv)
                                goto done;
                        }
                        r = _iwl_read_direct32(priv, CSR_EEPROM_REG);
-                       e[addr / 2] = le16_to_cpu((__force __le16)(r >> 16));
+                       e[addr / 2] = cpu_to_le16(r >> 16);
                }
        }
        ret = 0;
@@ -603,6 +595,8 @@ done:
 err:
        if (ret)
                iwl_eeprom_free(priv);
+       /* Reset chip to save power until we load uCode during "up". */
+       priv->cfg->ops->lib->apm_ops.stop(priv);
 alloc_err:
        return ret;
 }
@@ -755,7 +749,8 @@ static int iwl_mod_ht40_chan_info(struct iwl_priv *priv,
        ch_info->ht40_eeprom = *eeprom_ch;
        ch_info->ht40_max_power_avg = eeprom_ch->max_power_avg;
        ch_info->ht40_flags = eeprom_ch->flags;
-       ch_info->ht40_extension_channel &= ~clear_ht40_extension_channel;
+       if (eeprom_ch->flags & EEPROM_CHANNEL_VALID)
+               ch_info->ht40_extension_channel &= ~clear_ht40_extension_channel;
 
        return 0;
 }
index 5cd2b66..0cd9c02 100644 (file)
@@ -137,7 +137,7 @@ struct iwl_eeprom_channel {
  *
  */
 struct iwl_eeprom_enhanced_txpwr {
-       u16 common;
+       __le16 common;
        s8 chain_a_max;
        s8 chain_b_max;
        s8 chain_c_max;
@@ -360,7 +360,7 @@ struct iwl_eeprom_calib_subband_info {
 struct iwl_eeprom_calib_info {
        u8 saturation_power24;  /* half-dBm (e.g. "34" = 17 dBm) */
        u8 saturation_power52;  /* half-dBm */
-       s16 voltage;            /* signed */
+       __le16 voltage;         /* signed */
        struct iwl_eeprom_calib_subband_info
                band_info[EEPROM_TX_POWER_BANDS];
 } __attribute__ ((packed));
index a231659..30e9ea6 100644 (file)
@@ -234,7 +234,7 @@ cancel:
        }
 fail:
        if (cmd->reply_page) {
-               free_pages(cmd->reply_page, priv->hw_params.rx_page_order);
+               iwl_free_pages(priv, cmd->reply_page);
                cmd->reply_page = 0;
        }
 out:
index 6090bc1..6f36b6e 100644 (file)
@@ -345,10 +345,8 @@ void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
                        pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
                                PAGE_SIZE << priv->hw_params.rx_page_order,
                                PCI_DMA_FROMDEVICE);
-                       __free_pages(rxq->pool[i].page,
-                                    priv->hw_params.rx_page_order);
+                       __iwl_free_pages(priv, rxq->pool[i].page);
                        rxq->pool[i].page = NULL;
-                       priv->alloc_rxb_page--;
                }
        }
 
@@ -416,9 +414,7 @@ void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
                        pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
                                PAGE_SIZE << priv->hw_params.rx_page_order,
                                PCI_DMA_FROMDEVICE);
-                       priv->alloc_rxb_page--;
-                       __free_pages(rxq->pool[i].page,
-                                    priv->hw_params.rx_page_order);
+                       __iwl_free_pages(priv, rxq->pool[i].page);
                        rxq->pool[i].page = NULL;
                }
                list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
@@ -654,47 +650,6 @@ void iwl_reply_statistics(struct iwl_priv *priv,
 }
 EXPORT_SYMBOL(iwl_reply_statistics);
 
-#define PERFECT_RSSI (-20) /* dBm */
-#define WORST_RSSI (-95)   /* dBm */
-#define RSSI_RANGE (PERFECT_RSSI - WORST_RSSI)
-
-/* Calculate an indication of rx signal quality (a percentage, not dBm!).
- * See http://www.ces.clemson.edu/linux/signal_quality.shtml for info
- *   about formulas used below. */
-static int iwl_calc_sig_qual(int rssi_dbm, int noise_dbm)
-{
-       int sig_qual;
-       int degradation = PERFECT_RSSI - rssi_dbm;
-
-       /* If we get a noise measurement, use signal-to-noise ratio (SNR)
-        * as indicator; formula is (signal dbm - noise dbm).
-        * SNR at or above 40 is a great signal (100%).
-        * Below that, scale to fit SNR of 0 - 40 dB within 0 - 100% indicator.
-        * Weakest usable signal is usually 10 - 15 dB SNR. */
-       if (noise_dbm) {
-               if (rssi_dbm - noise_dbm >= 40)
-                       return 100;
-               else if (rssi_dbm < noise_dbm)
-                       return 0;
-               sig_qual = ((rssi_dbm - noise_dbm) * 5) / 2;
-
-       /* Else use just the signal level.
-        * This formula is a least squares fit of data points collected and
-        *   compared with a reference system that had a percentage (%) display
-        *   for signal quality. */
-       } else
-               sig_qual = (100 * (RSSI_RANGE * RSSI_RANGE) - degradation *
-                           (15 * RSSI_RANGE + 62 * degradation)) /
-                          (RSSI_RANGE * RSSI_RANGE);
-
-       if (sig_qual > 100)
-               sig_qual = 100;
-       else if (sig_qual < 1)
-               sig_qual = 0;
-
-       return sig_qual;
-}
-
 /* Calc max signal level (dBm) among 3 possible receivers */
 static inline int iwl_calc_rssi(struct iwl_priv *priv,
                                struct iwl_rx_phy_res *rx_resp)
@@ -1105,11 +1060,8 @@ void iwl_rx_reply_rx(struct iwl_priv *priv,
        if (iwl_is_associated(priv) &&
            !test_bit(STATUS_SCANNING, &priv->status)) {
                rx_status.noise = priv->last_rx_noise;
-               rx_status.qual = iwl_calc_sig_qual(rx_status.signal,
-                                                        rx_status.noise);
        } else {
                rx_status.noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
-               rx_status.qual = iwl_calc_sig_qual(rx_status.signal, 0);
        }
 
        /* Reset beacon noise level if not associated. */
@@ -1122,8 +1074,8 @@ void iwl_rx_reply_rx(struct iwl_priv *priv,
                iwl_dbg_report_frame(priv, phy_res, len, header, 1);
 #endif
        iwl_dbg_log_rx_data_frame(priv, len, header);
-       IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, noise %d, qual %d, TSF %llu\n",
-               rx_status.signal, rx_status.noise, rx_status.qual,
+       IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, noise %d, TSF %llu\n",
+               rx_status.signal, rx_status.noise,
                (unsigned long long)rx_status.mactime);
 
        /*
index a2b2b83..fa1c89b 100644 (file)
@@ -144,8 +144,7 @@ static int iwl_send_scan_abort(struct iwl_priv *priv)
                clear_bit(STATUS_SCAN_HW, &priv->status);
        }
 
-       priv->alloc_rxb_page--;
-       free_pages(cmd.reply_page, priv->hw_params.rx_page_order);
+       iwl_free_pages(priv, cmd.reply_page);
 
        return ret;
 }
index cd6a690..cde09a8 100644 (file)
@@ -164,9 +164,7 @@ int iwl_send_add_sta(struct iwl_priv *priv,
                        break;
                }
        }
-
-       priv->alloc_rxb_page--;
-       free_pages(cmd.reply_page, priv->hw_params.rx_page_order);
+       iwl_free_pages(priv, cmd.reply_page);
 
        return ret;
 }
@@ -391,9 +389,7 @@ static int iwl_send_remove_station(struct iwl_priv *priv, const u8 *addr,
                        break;
                }
        }
-
-       priv->alloc_rxb_page--;
-       free_pages(cmd.reply_page, priv->hw_params.rx_page_order);
+       iwl_free_pages(priv, cmd.reply_page);
 
        return ret;
 }
index 00da5e1..87ce2bd 100644 (file)
@@ -407,13 +407,14 @@ void iwl_hw_txq_ctx_free(struct iwl_priv *priv)
        int txq_id;
 
        /* Tx queues */
-       if (priv->txq)
+       if (priv->txq) {
                for (txq_id = 0; txq_id < priv->hw_params.max_txq_num;
                     txq_id++)
                        if (txq_id == IWL_CMD_QUEUE_NUM)
                                iwl_cmd_queue_free(priv);
                        else
                                iwl_tx_queue_free(priv, txq_id);
+       }
        iwl_free_dma_ptr(priv, &priv->kw);
 
        iwl_free_dma_ptr(priv, &priv->scd_bc_tbls);
index 2a28a1f..f8e4e4b 100644 (file)
@@ -548,6 +548,9 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
        txq = &priv->txq[txq_id];
        q = &txq->q;
 
+       if ((iwl_queue_space(q) < q->high_mark))
+               goto drop;
+
        spin_lock_irqsave(&priv->lock, flags);
 
        idx = get_cmd_index(q, q->write_ptr, 0);
@@ -812,7 +815,7 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
                break;
        }
 
-       free_pages(cmd.reply_page, priv->hw_params.rx_page_order);
+       iwl_free_pages(priv, cmd.reply_page);
 
        return rc;
 }
@@ -1198,9 +1201,7 @@ void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
                        pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
                                PAGE_SIZE << priv->hw_params.rx_page_order,
                                PCI_DMA_FROMDEVICE);
-                       priv->alloc_rxb_page--;
-                       __free_pages(rxq->pool[i].page,
-                                    priv->hw_params.rx_page_order);
+                       __iwl_free_pages(priv, rxq->pool[i].page);
                        rxq->pool[i].page = NULL;
                }
                list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
@@ -1247,10 +1248,8 @@ static void iwl3945_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rx
                        pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
                                PAGE_SIZE << priv->hw_params.rx_page_order,
                                PCI_DMA_FROMDEVICE);
-                       __free_pages(rxq->pool[i].page,
-                                    priv->hw_params.rx_page_order);
+                       __iwl_free_pages(priv, rxq->pool[i].page);
                        rxq->pool[i].page = NULL;
-                       priv->alloc_rxb_page--;
                }
        }
 
@@ -1300,47 +1299,6 @@ int iwl3945_calc_db_from_ratio(int sig_ratio)
        return (int)ratio2dB[sig_ratio];
 }
 
-#define PERFECT_RSSI (-20) /* dBm */
-#define WORST_RSSI (-95)   /* dBm */
-#define RSSI_RANGE (PERFECT_RSSI - WORST_RSSI)
-
-/* Calculate an indication of rx signal quality (a percentage, not dBm!).
- * See http://www.ces.clemson.edu/linux/signal_quality.shtml for info
- *   about formulas used below. */
-int iwl3945_calc_sig_qual(int rssi_dbm, int noise_dbm)
-{
-       int sig_qual;
-       int degradation = PERFECT_RSSI - rssi_dbm;
-
-       /* If we get a noise measurement, use signal-to-noise ratio (SNR)
-        * as indicator; formula is (signal dbm - noise dbm).
-        * SNR at or above 40 is a great signal (100%).
-        * Below that, scale to fit SNR of 0 - 40 dB within 0 - 100% indicator.
-        * Weakest usable signal is usually 10 - 15 dB SNR. */
-       if (noise_dbm) {
-               if (rssi_dbm - noise_dbm >= 40)
-                       return 100;
-               else if (rssi_dbm < noise_dbm)
-                       return 0;
-               sig_qual = ((rssi_dbm - noise_dbm) * 5) / 2;
-
-       /* Else use just the signal level.
-        * This formula is a least squares fit of data points collected and
-        *   compared with a reference system that had a percentage (%) display
-        *   for signal quality. */
-       } else
-               sig_qual = (100 * (RSSI_RANGE * RSSI_RANGE) - degradation *
-                           (15 * RSSI_RANGE + 62 * degradation)) /
-                          (RSSI_RANGE * RSSI_RANGE);
-
-       if (sig_qual > 100)
-               sig_qual = 100;
-       else if (sig_qual < 1)
-               sig_qual = 0;
-
-       return sig_qual;
-}
-
 /**
  * iwl3945_rx_handle - Main entry function for receiving responses from uCode
  *
@@ -1688,7 +1646,7 @@ void iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
        }
 
 #ifdef CONFIG_IWLWIFI_DEBUG
-       if (!(iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS))
+       if (!(iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) && !full_log)
                size = (size > DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES)
                        ? DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES : size;
 #else
@@ -3867,7 +3825,6 @@ static int iwl3945_init_drv(struct iwl_priv *priv)
        priv->retry_rate = 1;
        priv->ibss_beacon = NULL;
 
-       spin_lock_init(&priv->lock);
        spin_lock_init(&priv->sta_lock);
        spin_lock_init(&priv->hcmd_lock);
 
@@ -3936,9 +3893,11 @@ static int iwl3945_setup_mac(struct iwl_priv *priv)
        /* Tell mac80211 our characteristics */
        hw->flags = IEEE80211_HW_SIGNAL_DBM |
                    IEEE80211_HW_NOISE_DBM |
-                   IEEE80211_HW_SPECTRUM_MGMT |
-                   IEEE80211_HW_SUPPORTS_PS |
-                   IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
+                   IEEE80211_HW_SPECTRUM_MGMT;
+
+       if (!priv->cfg->broken_powersave)
+               hw->flags |= IEEE80211_HW_SUPPORTS_PS |
+                            IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
 
        hw->wiphy->interface_modes =
                BIT(NL80211_IFTYPE_STATION) |
@@ -4057,10 +4016,11 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
         * PCI Tx retries from interfering with C3 CPU state */
        pci_write_config_byte(pdev, 0x41, 0x00);
 
-       /* this spin lock will be used in apm_ops.init and EEPROM access
+       /* these spin locks will be used in apm_ops.init and EEPROM access
         * we should init now
         */
        spin_lock_init(&priv->reg_lock);
+       spin_lock_init(&priv->lock);
 
        /***********************
         * 4. Read EEPROM
index 5a26bb0..8428111 100644 (file)
@@ -268,7 +268,7 @@ struct iwm_priv {
 
        struct sk_buff_head rx_list;
        struct list_head rx_tickets;
-       struct list_head rx_packets[IWM_RX_ID_HASH];
+       struct list_head rx_packets[IWM_RX_ID_HASH + 1];
        struct workqueue_struct *rx_wq;
        struct work_struct rx_worker;
 
@@ -349,7 +349,7 @@ int iwm_up(struct iwm_priv *iwm);
 int iwm_down(struct iwm_priv *iwm);
 
 /* TX API */
-u16 iwm_tid_to_queue(u16 tid);
+int iwm_tid_to_queue(u16 tid);
 void iwm_tx_credit_inc(struct iwm_priv *iwm, int id, int total_freed_pages);
 void iwm_tx_worker(struct work_struct *work);
 int iwm_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
index e4f0f87..c4c0d23 100644 (file)
@@ -76,7 +76,7 @@ static int iwm_stop(struct net_device *ndev)
  */
 static const u16 iwm_1d_to_queue[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
 
-u16 iwm_tid_to_queue(u16 tid)
+int iwm_tid_to_queue(u16 tid)
 {
        if (tid > IWM_UMAC_TID_NR - 2)
                return -EINVAL;
index 1c57c1f..6d6ed74 100644 (file)
@@ -1126,7 +1126,7 @@ static int iwm_ntf_stop_resume_tx(struct iwm_priv *iwm, u8 *buf,
 
                if (!stop) {
                        struct iwm_tx_queue *txq;
-                       u16 queue = iwm_tid_to_queue(bit);
+                       int queue = iwm_tid_to_queue(bit);
 
                        if (queue < 0)
                                continue;
index 2f91c9b..92b7a35 100644 (file)
@@ -2,6 +2,7 @@
 #include <linux/delay.h>
 #include <linux/etherdevice.h>
 #include <linux/netdevice.h>
+#include <linux/if_ether.h>
 #include <linux/if_arp.h>
 #include <linux/kthread.h>
 #include <linux/kfifo.h>
@@ -351,8 +352,7 @@ int lbs_add_mesh(struct lbs_private *priv)
 
        mesh_dev->netdev_ops = &mesh_netdev_ops;
        mesh_dev->ethtool_ops = &lbs_ethtool_ops;
-       memcpy(mesh_dev->dev_addr, priv->dev->dev_addr,
-                       sizeof(priv->dev->dev_addr));
+       memcpy(mesh_dev->dev_addr, priv->dev->dev_addr, ETH_ALEN);
 
        SET_NETDEV_DEV(priv->mesh_dev, priv->dev->dev.parent);
 
index c6a6c04..b0b1c78 100644 (file)
@@ -567,11 +567,8 @@ int lbs_scan_networks(struct lbs_private *priv, int full_scan)
        chan_count = lbs_scan_create_channel_list(priv, chan_list);
 
        netif_stop_queue(priv->dev);
-       netif_carrier_off(priv->dev);
-       if (priv->mesh_dev) {
+       if (priv->mesh_dev)
                netif_stop_queue(priv->mesh_dev);
-               netif_carrier_off(priv->mesh_dev);
-       }
 
        /* Prepare to continue an interrupted scan */
        lbs_deb_scan("chan_count %d, scan_channel %d\n",
@@ -635,16 +632,13 @@ out2:
        priv->scan_channel = 0;
 
 out:
-       if (priv->connect_status == LBS_CONNECTED) {
-               netif_carrier_on(priv->dev);
-               if (!priv->tx_pending_len)
-                       netif_wake_queue(priv->dev);
-       }
-       if (priv->mesh_dev && (priv->mesh_connect_status == LBS_CONNECTED)) {
-               netif_carrier_on(priv->mesh_dev);
-               if (!priv->tx_pending_len)
-                       netif_wake_queue(priv->mesh_dev);
-       }
+       if (priv->connect_status == LBS_CONNECTED && !priv->tx_pending_len)
+               netif_wake_queue(priv->dev);
+
+       if (priv->mesh_dev && (priv->mesh_connect_status == LBS_CONNECTED) &&
+           !priv->tx_pending_len)
+               netif_wake_queue(priv->mesh_dev);
+
        kfree(chan_list);
 
        lbs_deb_leave_args(LBS_DEB_SCAN, "ret %d", ret);
index a8eb9e1..4b1aab5 100644 (file)
@@ -2025,10 +2025,8 @@ static int lbs_get_essid(struct net_device *dev, struct iw_request_info *info,
        if (priv->connect_status == LBS_CONNECTED) {
                memcpy(extra, priv->curbssparams.ssid,
                       priv->curbssparams.ssid_len);
-               extra[priv->curbssparams.ssid_len] = '\0';
        } else {
                memset(extra, 0, 32);
-               extra[priv->curbssparams.ssid_len] = '\0';
        }
        /*
         * If none, we may want to get the one that was set
index 019431d..26a1abd 100644 (file)
@@ -495,7 +495,6 @@ int lbtf_rx(struct lbtf_private *priv, struct sk_buff *skb)
        stats.band = IEEE80211_BAND_2GHZ;
        stats.signal = prxpd->snr;
        stats.noise = prxpd->nf;
-       stats.qual = prxpd->snr - prxpd->nf;
        /* Marvell rate index has a hole at value 4 */
        if (prxpd->rx_rate > 4)
                --prxpd->rx_rate;
index 7698fdd..31ca241 100644 (file)
@@ -23,7 +23,7 @@
 #define MAX_RID_LEN 1024
 
 /* Helper routine to record keys
- * Do not call from interrupt context */
+ * It is called under orinoco_lock so it may not sleep */
 static int orinoco_set_key(struct orinoco_private *priv, int index,
                           enum orinoco_alg alg, const u8 *key, int key_len,
                           const u8 *seq, int seq_len)
@@ -32,14 +32,14 @@ static int orinoco_set_key(struct orinoco_private *priv, int index,
        kzfree(priv->keys[index].seq);
 
        if (key_len) {
-               priv->keys[index].key = kzalloc(key_len, GFP_KERNEL);
+               priv->keys[index].key = kzalloc(key_len, GFP_ATOMIC);
                if (!priv->keys[index].key)
                        goto nomem;
        } else
                priv->keys[index].key = NULL;
 
        if (seq_len) {
-               priv->keys[index].seq = kzalloc(seq_len, GFP_KERNEL);
+               priv->keys[index].seq = kzalloc(seq_len, GFP_ATOMIC);
                if (!priv->keys[index].seq)
                        goto free_key;
        } else
index c5fe867..1a7eae3 100644 (file)
 #define PAIRWISE_KEY_ENTRY(__idx) \
        ( PAIRWISE_KEY_TABLE_BASE + ((__idx) * sizeof(struct hw_key_entry)) )
 #define MAC_IVEIV_ENTRY(__idx) \
-       ( MAC_IVEIV_TABLE_BASE + ((__idx) & sizeof(struct mac_iveiv_entry)) )
+       ( MAC_IVEIV_TABLE_BASE + ((__idx) * sizeof(struct mac_iveiv_entry)) )
 #define MAC_WCID_ATTR_ENTRY(__idx) \
        ( MAC_WCID_ATTRIBUTE_BASE + ((__idx) * sizeof(u32)) )
 #define SHARED_KEY_ENTRY(__idx) \
index eb1e1d0..27bf887 100644 (file)
@@ -37,7 +37,7 @@
 #include <linux/module.h>
 
 #include "rt2x00.h"
-#ifdef CONFIG_RT2800USB
+#if defined(CONFIG_RT2800USB) || defined(CONFIG_RT2800USB_MODULE)
 #include "rt2x00usb.h"
 #endif
 #include "rt2800lib.h"
@@ -1121,7 +1121,7 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
 
        if (rt2x00_intf_is_usb(rt2x00dev)) {
                rt2800_register_write(rt2x00dev, USB_DMA_CFG, 0x00000000);
-#ifdef CONFIG_RT2800USB
+#if defined(CONFIG_RT2800USB) || defined(CONFIG_RT2800USB_MODULE)
                rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0,
                                            USB_MODE_RESET, REGISTER_TIMEOUT);
 #endif
@@ -2022,6 +2022,12 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
        u16 eeprom;
 
        /*
+        * Disable powersaving as default on PCI devices.
+        */
+       if (rt2x00_intf_is_pci(rt2x00dev))
+               rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+
+       /*
         * Initialize all hw fields.
         */
        rt2x00dev->hw->flags =
@@ -2074,8 +2080,7 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
            IEEE80211_HT_CAP_SGI_20 |
            IEEE80211_HT_CAP_SGI_40 |
            IEEE80211_HT_CAP_TX_STBC |
-           IEEE80211_HT_CAP_RX_STBC |
-           IEEE80211_HT_CAP_PSMP_SUPPORT;
+           IEEE80211_HT_CAP_RX_STBC;
        spec->ht.ampdu_factor = 3;
        spec->ht.ampdu_density = 4;
        spec->ht.mcs.tx_params =
@@ -2140,8 +2145,8 @@ static void rt2800_get_tkip_seq(struct ieee80211_hw *hw, u8 hw_key_idx,
        rt2800_register_multiread(rt2x00dev, offset,
                                      &iveiv_entry, sizeof(iveiv_entry));
 
-       memcpy(&iveiv_entry.iv[0], iv16, sizeof(iv16));
-       memcpy(&iveiv_entry.iv[4], iv32, sizeof(iv32));
+       memcpy(iv16, &iveiv_entry.iv[0], sizeof(*iv16));
+       memcpy(iv32, &iveiv_entry.iv[4], sizeof(*iv32));
 }
 
 static int rt2800_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
index af85d18..ab95346 100644 (file)
@@ -922,6 +922,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
        { USB_DEVICE(0x1737, 0x0070), USB_DEVICE_DATA(&rt2800usb_ops) },
        { USB_DEVICE(0x1737, 0x0071), USB_DEVICE_DATA(&rt2800usb_ops) },
        { USB_DEVICE(0x1737, 0x0077), USB_DEVICE_DATA(&rt2800usb_ops) },
+       { USB_DEVICE(0x1737, 0x0079), USB_DEVICE_DATA(&rt2800usb_ops) },
        /* Logitec */
        { USB_DEVICE(0x0789, 0x0162), USB_DEVICE_DATA(&rt2800usb_ops) },
        { USB_DEVICE(0x0789, 0x0163), USB_DEVICE_DATA(&rt2800usb_ops) },
index 687e17d..0ca5893 100644 (file)
@@ -2539,6 +2539,11 @@ static int rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
        unsigned int i;
 
        /*
+        * Disable powersaving as default.
+        */
+       rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+
+       /*
         * Initialize all hw fields.
         */
        rt2x00dev->hw->flags =
index a1a3dd1..8a40a14 100644 (file)
@@ -132,7 +132,6 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
 
                        rx_status.antenna = (flags2 >> 15) & 1;
                        /* TODO: improve signal/rssi reporting */
-                       rx_status.qual = flags2 & 0xFF;
                        rx_status.signal = (flags2 >> 8) & 0x7F;
                        /* XXX: is this correct? */
                        rx_status.rate_idx = (flags >> 20) & 0xF;
index 2e733e7..28a8086 100644 (file)
@@ -256,7 +256,7 @@ int wl1251_boot_run_firmware(struct wl1251 *wl)
                }
        }
 
-       if (loop >= INIT_LOOP) {
+       if (loop > INIT_LOOP) {
                wl1251_error("timeout waiting for the hardware to "
                             "complete initialization");
                return -EIO;
index 886a9bc..c3385b3 100644 (file)
@@ -777,7 +777,7 @@ out:
        return ret;
 }
 
-static int wl1271_build_basic_rates(char *rates, u8 band)
+static int wl1271_build_basic_rates(u8 *rates, u8 band)
 {
        u8 index = 0;
 
@@ -804,7 +804,7 @@ static int wl1271_build_basic_rates(char *rates, u8 band)
        return index;
 }
 
-static int wl1271_build_extended_rates(char *rates, u8 band)
+static int wl1271_build_extended_rates(u8 *rates, u8 band)
 {
        u8 index = 0;
 
index dfa1b9b..7ca95c4 100644 (file)
@@ -1325,151 +1325,11 @@ int zd_chip_set_basic_rates(struct zd_chip *chip, u16 cr_rates)
        return r;
 }
 
-static int ofdm_qual_db(u8 status_quality, u8 zd_rate, unsigned int size)
-{
-       static const u16 constants[] = {
-               715, 655, 585, 540, 470, 410, 360, 315,
-               270, 235, 205, 175, 150, 125, 105,  85,
-                65,  50,  40,  25,  15
-       };
-
-       int i;
-       u32 x;
-
-       /* It seems that their quality parameter is somehow per signal
-        * and is now transferred per bit.
-        */
-       switch (zd_rate) {
-       case ZD_OFDM_RATE_6M:
-       case ZD_OFDM_RATE_12M:
-       case ZD_OFDM_RATE_24M:
-               size *= 2;
-               break;
-       case ZD_OFDM_RATE_9M:
-       case ZD_OFDM_RATE_18M:
-       case ZD_OFDM_RATE_36M:
-       case ZD_OFDM_RATE_54M:
-               size *= 4;
-               size /= 3;
-               break;
-       case ZD_OFDM_RATE_48M:
-               size *= 3;
-               size /= 2;
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       x = (10000 * status_quality)/size;
-       for (i = 0; i < ARRAY_SIZE(constants); i++) {
-               if (x > constants[i])
-                       break;
-       }
-
-       switch (zd_rate) {
-       case ZD_OFDM_RATE_6M:
-       case ZD_OFDM_RATE_9M:
-               i += 3;
-               break;
-       case ZD_OFDM_RATE_12M:
-       case ZD_OFDM_RATE_18M:
-               i += 5;
-               break;
-       case ZD_OFDM_RATE_24M:
-       case ZD_OFDM_RATE_36M:
-               i += 9;
-               break;
-       case ZD_OFDM_RATE_48M:
-       case ZD_OFDM_RATE_54M:
-               i += 15;
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       return i;
-}
-
-static int ofdm_qual_percent(u8 status_quality, u8 zd_rate, unsigned int size)
-{
-       int r;
-
-       r = ofdm_qual_db(status_quality, zd_rate, size);
-       ZD_ASSERT(r >= 0);
-       if (r < 0)
-               r = 0;
-
-       r = (r * 100)/29;
-       return r <= 100 ? r : 100;
-}
-
-static unsigned int log10times100(unsigned int x)
-{
-       static const u8 log10[] = {
-                 0,
-                 0,   30,   47,   60,   69,   77,   84,   90,   95,  100,
-               104,  107,  111,  114,  117,  120,  123,  125,  127,  130,
-               132,  134,  136,  138,  139,  141,  143,  144,  146,  147,
-               149,  150,  151,  153,  154,  155,  156,  157,  159,  160,
-               161,  162,  163,  164,  165,  166,  167,  168,  169,  169,
-               170,  171,  172,  173,  174,  174,  175,  176,  177,  177,
-               178,  179,  179,  180,  181,  181,  182,  183,  183,  184,
-               185,  185,  186,  186,  187,  188,  188,  189,  189,  190,
-               190,  191,  191,  192,  192,  193,  193,  194,  194,  195,
-               195,  196,  196,  197,  197,  198,  198,  199,  199,  200,
-               200,  200,  201,  201,  202,  202,  202,  203,  203,  204,
-               204,  204,  205,  205,  206,  206,  206,  207,  207,  207,
-               208,  208,  208,  209,  209,  210,  210,  210,  211,  211,
-               211,  212,  212,  212,  213,  213,  213,  213,  214,  214,
-               214,  215,  215,  215,  216,  216,  216,  217,  217,  217,
-               217,  218,  218,  218,  219,  219,  219,  219,  220,  220,
-               220,  220,  221,  221,  221,  222,  222,  222,  222,  223,
-               223,  223,  223,  224,  224,  224,  224,
-       };
-
-       return x < ARRAY_SIZE(log10) ? log10[x] : 225;
-}
-
-enum {
-       MAX_CCK_EVM_DB = 45,
-};
-
-static int cck_evm_db(u8 status_quality)
-{
-       return (20 * log10times100(status_quality)) / 100;
-}
-
-static int cck_snr_db(u8 status_quality)
-{
-       int r = MAX_CCK_EVM_DB - cck_evm_db(status_quality);
-       ZD_ASSERT(r >= 0);
-       return r;
-}
-
-static int cck_qual_percent(u8 status_quality)
-{
-       int r;
-
-       r = cck_snr_db(status_quality);
-       r = (100*r)/17;
-       return r <= 100 ? r : 100;
-}
-
 static inline u8 zd_rate_from_ofdm_plcp_header(const void *rx_frame)
 {
        return ZD_OFDM | zd_ofdm_plcp_header_rate(rx_frame);
 }
 
-u8 zd_rx_qual_percent(const void *rx_frame, unsigned int size,
-                     const struct rx_status *status)
-{
-       return (status->frame_status&ZD_RX_OFDM) ?
-               ofdm_qual_percent(status->signal_quality_ofdm,
-                                 zd_rate_from_ofdm_plcp_header(rx_frame),
-                                 size) :
-               cck_qual_percent(status->signal_quality_cck);
-}
-
 /**
  * zd_rx_rate - report zd-rate
  * @rx_frame - received frame
index 9fd8f35..f8bbf7d 100644 (file)
@@ -929,9 +929,6 @@ static inline int zd_get_beacon_interval(struct zd_chip *chip, u32 *interval)
 
 struct rx_status;
 
-u8 zd_rx_qual_percent(const void *rx_frame, unsigned int size,
-                      const struct rx_status *status);
-
 u8 zd_rx_rate(const void *rx_frame, const struct rx_status *status);
 
 struct zd_mc_hash {
index cf51e8f..8ebf5c3 100644 (file)
@@ -828,9 +828,6 @@ int zd_mac_rx(struct ieee80211_hw *hw, const u8 *buffer, unsigned int length)
        stats.freq = zd_channels[_zd_chip_get_channel(&mac->chip) - 1].center_freq;
        stats.band = IEEE80211_BAND_2GHZ;
        stats.signal = status->signal_strength;
-       stats.qual = zd_rx_qual_percent(buffer,
-                                         length - sizeof(struct rx_status),
-                                         status);
 
        rate = zd_rx_rate(buffer, status);
 
index bd588eb..8e210cd 100644 (file)
@@ -121,7 +121,7 @@ struct controller {
 #define PCI_DEVICE_ID_AMD_GOLAM_7450   0x7450
 #define PCI_DEVICE_ID_AMD_POGO_7458    0x7458
 
-/* AMD PCIX bridge registers */
+/* AMD PCI-X bridge registers */
 #define PCIX_MEM_BASE_LIMIT_OFFSET     0x1C
 #define PCIX_MISCII_OFFSET             0x48
 #define PCIX_MISC_BRIDGE_ERRORS_OFFSET 0x80
index e56f9be..4173125 100644 (file)
@@ -305,7 +305,7 @@ struct device_domain_info {
        int segment;            /* PCI domain */
        u8 bus;                 /* PCI bus number */
        u8 devfn;               /* PCI devfn number */
-       struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */
+       struct pci_dev *dev; /* it's NULL for PCIe-to-PCI bridge */
        struct intel_iommu *iommu; /* IOMMU used by this device */
        struct dmar_domain *domain; /* pointer to domain */
 };
@@ -1604,7 +1604,7 @@ domain_context_mapping(struct dmar_domain *domain, struct pci_dev *pdev,
                        return ret;
                parent = parent->bus->self;
        }
-       if (pci_is_pcie(tmp)) /* this is a PCIE-to-PCI bridge */
+       if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
                return domain_context_mapping_one(domain,
                                        pci_domain_nr(tmp->subordinate),
                                        tmp->subordinate->number, 0,
@@ -3325,7 +3325,7 @@ static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
                                         parent->devfn);
                        parent = parent->bus->self;
                }
-               if (pci_is_pcie(tmp)) /* this is a PCIE-to-PCI bridge */
+               if (pci_is_pcie(tmp)) /* this is a PCIe-to-PCI bridge */
                        iommu_detach_dev(iommu,
                                tmp->subordinate->number, 0);
                else /* this is a legacy PCI bridge */
index 8b65a48..95b8491 100644 (file)
@@ -528,7 +528,7 @@ int set_msi_sid(struct irte *irte, struct pci_dev *dev)
 
        bridge = pci_find_upstream_pcie_bridge(dev);
        if (bridge) {
-               if (pci_is_pcie(bridge))/* this is a PCIE-to-PCI/PCIX bridge */
+               if (pci_is_pcie(bridge))/* this is a PCIe-to-PCI/PCIX bridge */
                        set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16,
                                (bridge->bus->number << 8) | dev->bus->number);
                else /* this is a legacy PCI bridge */
index cc617dd..7e28295 100644 (file)
@@ -112,11 +112,7 @@ static bool acpi_pci_can_wakeup(struct pci_dev *dev)
 static void acpi_pci_propagate_wakeup_enable(struct pci_bus *bus, bool enable)
 {
        while (bus->parent) {
-               struct pci_dev *bridge = bus->self;
-               int ret;
-
-               ret = acpi_pm_device_sleep_wake(&bridge->dev, enable);
-               if (!ret || pci_is_pcie(bridge))
+               if (!acpi_pm_device_sleep_wake(&bus->self->dev, enable))
                        return;
                bus = bus->parent;
        }
@@ -131,9 +127,7 @@ static int acpi_pci_sleep_wake(struct pci_dev *dev, bool enable)
        if (acpi_pci_can_wakeup(dev))
                return acpi_pm_device_sleep_wake(&dev->dev, enable);
 
-       if (!pci_is_pcie(dev))
-               acpi_pci_propagate_wakeup_enable(dev->bus, enable);
-
+       acpi_pci_propagate_wakeup_enable(dev->bus, enable);
        return 0;
 }
 
index 0bc27e0..864e703 100644 (file)
@@ -1153,11 +1153,11 @@ pci_disable_device(struct pci_dev *dev)
 
 /**
  * pcibios_set_pcie_reset_state - set reset state for device dev
- * @dev: the PCI-E device reset
+ * @dev: the PCIe device reset
  * @state: Reset state to enter into
  *
  *
- * Sets the PCI-E reset state for the device. This is the default
+ * Sets the PCIe reset state for the device. This is the default
  * implementation. Architecture implementations can override this.
  */
 int __attribute__ ((weak)) pcibios_set_pcie_reset_state(struct pci_dev *dev,
@@ -1168,7 +1168,7 @@ int __attribute__ ((weak)) pcibios_set_pcie_reset_state(struct pci_dev *dev,
 
 /**
  * pci_set_pcie_reset_state - set reset state for device dev
- * @dev: the PCI-E device reset
+ * @dev: the PCIe device reset
  * @state: Reset state to enter into
  *
  *
@@ -2284,6 +2284,21 @@ static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
        return 0;
 }
 
+static int pci_dev_specific_reset(struct pci_dev *dev, int probe)
+{
+       struct pci_dev_reset_methods *i;
+
+       for (i = pci_dev_reset_methods; i->reset; i++) {
+               if ((i->vendor == dev->vendor ||
+                    i->vendor == (u16)PCI_ANY_ID) &&
+                   (i->device == dev->device ||
+                    i->device == (u16)PCI_ANY_ID))
+                       return i->reset(dev, probe);
+       }
+
+       return -ENOTTY;
+}
+
 static int pci_dev_reset(struct pci_dev *dev, int probe)
 {
        int rc;
@@ -2296,6 +2311,10 @@ static int pci_dev_reset(struct pci_dev *dev, int probe)
                down(&dev->dev.sem);
        }
 
+       rc = pci_dev_specific_reset(dev, probe);
+       if (rc != -ENOTTY)
+               goto done;
+
        rc = pcie_flr(dev, probe);
        if (rc != -ENOTTY)
                goto done;
@@ -2779,6 +2798,11 @@ int __attribute__ ((weak)) pci_ext_cfg_avail(struct pci_dev *dev)
        return 1;
 }
 
+void __weak pci_fixup_cardbus(struct pci_bus *bus)
+{
+}
+EXPORT_SYMBOL(pci_fixup_cardbus);
+
 static int __init pci_setup(char *str)
 {
        while (str) {
index 33ed8e0..709eaa4 100644 (file)
@@ -313,4 +313,12 @@ static inline int pci_resource_alignment(struct pci_dev *dev,
 
 extern void pci_enable_acs(struct pci_dev *dev);
 
+struct pci_dev_reset_methods {
+       u16 vendor;
+       u16 device;
+       int (*reset)(struct pci_dev *dev, int probe);
+};
+
+extern struct pci_dev_reset_methods pci_dev_reset_methods[];
+
 #endif /* DRIVERS_PCI_H */
index b8c925c..9142949 100644 (file)
@@ -3,14 +3,14 @@
 #
 
 config PCIEAER_INJECT
-       tristate "PCIE AER error injector support"
+       tristate "PCIe AER error injector support"
        depends on PCIEAER
        default n
        help
          This enables PCI Express Root Port Advanced Error Reporting
          (AER) software error injector.
 
-         Debuging PCIE AER code is quite difficult because it is hard
+         Debugging PCIe AER code is quite difficult because it is hard
          to trigger various real hardware errors. Software based
          error injection can fake almost all kinds of errors with the
          help of a user space helper tool aer-inject, which can be
index 7fcd533..797d478 100644 (file)
@@ -1,7 +1,7 @@
 /*
- * PCIE AER software error injection support.
+ * PCIe AER software error injection support.
  *
- * Debuging PCIE AER code is quite difficult because it is hard to
+ * Debuging PCIe AER code is quite difficult because it is hard to
  * trigger various real hardware errors. Software based error
  * injection can fake almost all kinds of errors with the help of a
  * user space helper tool aer-inject, which can be gotten from:
@@ -484,5 +484,5 @@ static void __exit aer_inject_exit(void)
 module_init(aer_inject_init);
 module_exit(aer_inject_exit);
 
-MODULE_DESCRIPTION("PCIE AER software error injector");
+MODULE_DESCRIPTION("PCIe AER software error injector");
 MODULE_LICENSE("GPL");
index 97a3459..21f215f 100644 (file)
@@ -155,7 +155,7 @@ static struct aer_rpc *aer_alloc_rpc(struct pcie_device *dev)
        mutex_init(&rpc->rpc_mutex);
        init_waitqueue_head(&rpc->wait_release);
 
-       /* Use PCIE bus function to store rpc into PCIE device */
+       /* Use PCIe bus function to store rpc into PCIe device */
        set_service_data(dev, rpc);
 
        return rpc;
index 8edb2f3..0481408 100644 (file)
@@ -24,7 +24,7 @@
  *
  * @return: Zero on success. Nonzero otherwise.
  *
- * Invoked when PCIE bus loads AER service driver. To avoid conflict with
+ * Invoked when PCIe bus loads AER service driver. To avoid conflict with
  * BIOS AER support requires BIOS to yield AER control to OS native driver.
  **/
 int aer_osc_setup(struct pcie_device *pciedev)
index ae672ca..c843a79 100644 (file)
@@ -587,7 +587,7 @@ static void handle_error_source(struct pcie_device *aerdev,
  * aer_enable_rootport - enable Root Port's interrupts when receiving messages
  * @rpc: pointer to a Root Port data structure
  *
- * Invoked when PCIE bus loads AER service driver.
+ * Invoked when PCIe bus loads AER service driver.
  */
 void aer_enable_rootport(struct aer_rpc *rpc)
 {
@@ -597,7 +597,7 @@ void aer_enable_rootport(struct aer_rpc *rpc)
        u32 reg32;
 
        pos = pci_pcie_cap(pdev);
-       /* Clear PCIE Capability's Device Status */
+       /* Clear PCIe Capability's Device Status */
        pci_read_config_word(pdev, pos+PCI_EXP_DEVSTA, &reg16);
        pci_write_config_word(pdev, pos+PCI_EXP_DEVSTA, reg16);
 
@@ -631,7 +631,7 @@ void aer_enable_rootport(struct aer_rpc *rpc)
  * disable_root_aer - disable Root Port's interrupts when receiving messages
  * @rpc: pointer to a Root Port data structure
  *
- * Invoked when PCIE bus unloads AER service driver.
+ * Invoked when PCIe bus unloads AER service driver.
  */
 static void disable_root_aer(struct aer_rpc *rpc)
 {
index 44acde7..9d3e4c8 100644 (file)
@@ -184,7 +184,7 @@ void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
 
        if (info->status == 0) {
                AER_PR(info, dev,
-                       "PCIE Bus Error: severity=%s, type=Unaccessible, "
+                       "PCIe Bus Error: severity=%s, type=Unaccessible, "
                        "id=%04x(Unregistered Agent ID)\n",
                        aer_error_severity_string[info->severity], id);
        } else {
@@ -194,7 +194,7 @@ void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)
                agent = AER_GET_AGENT(info->severity, info->status);
 
                AER_PR(info, dev,
-                       "PCIE Bus Error: severity=%s, type=%s, id=%04x(%s)\n",
+                       "PCIe Bus Error: severity=%s, type=%s, id=%04x(%s)\n",
                        aer_error_severity_string[info->severity],
                        aer_error_layer[layer], id, aer_agent_string[agent]);
 
index 5a01fc7..be53d98 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * File:       drivers/pci/pcie/aspm.c
- * Enabling PCIE link L0s/L1 state and Clock Power Management
+ * Enabling PCIe link L0s/L1 state and Clock Power Management
  *
  * Copyright (C) 2007 Intel
  * Copyright (C) Zhang Yanmin (yanmin.zhang@intel.com)
@@ -499,7 +499,7 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev)
        int pos;
        u32 reg32;
        /*
-        * Some functions in a slot might not all be PCIE functions,
+        * Some functions in a slot might not all be PCIe functions,
         * very strange. Disable ASPM for the whole slot
         */
        list_for_each_entry(child, &pdev->subordinate->devices, bus_list) {
index a49452e..34d6517 100644 (file)
@@ -24,7 +24,7 @@
  */
 #define DRIVER_VERSION "v1.0"
 #define DRIVER_AUTHOR "tom.l.nguyen@intel.com"
-#define DRIVER_DESC "PCIE Port Bus Driver"
+#define DRIVER_DESC "PCIe Port Bus Driver"
 MODULE_AUTHOR(DRIVER_AUTHOR);
 MODULE_DESCRIPTION(DRIVER_DESC);
 MODULE_LICENSE("GPL");
index 7cfa7c3..8726698 100644 (file)
@@ -2629,13 +2629,68 @@ static int __init pci_apply_final_quirks(void)
        if (!pci_cache_line_size) {
                printk(KERN_DEBUG "PCI: CLS %u bytes, default %u\n",
                       cls << 2, pci_dfl_cache_line_size << 2);
-               pci_cache_line_size = cls;
+               pci_cache_line_size = cls ? cls : pci_dfl_cache_line_size;
        }
 
        return 0;
 }
 
 fs_initcall_sync(pci_apply_final_quirks);
+
+/*
+ * Followings are device-specific reset methods which can be used to
+ * reset a single function if other methods (e.g. FLR, PM D0->D3) are
+ * not available.
+ */
+static int reset_intel_generic_dev(struct pci_dev *dev, int probe)
+{
+       int pos;
+
+       /* only implement PCI_CLASS_SERIAL_USB at present */
+       if (dev->class == PCI_CLASS_SERIAL_USB) {
+               pos = pci_find_capability(dev, PCI_CAP_ID_VNDR);
+               if (!pos)
+                       return -ENOTTY;
+
+               if (probe)
+                       return 0;
+
+               pci_write_config_byte(dev, pos + 0x4, 1);
+               msleep(100);
+
+               return 0;
+       } else {
+               return -ENOTTY;
+       }
+}
+
+static int reset_intel_82599_sfp_virtfn(struct pci_dev *dev, int probe)
+{
+       int pos;
+
+       pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
+       if (!pos)
+               return -ENOTTY;
+
+       if (probe)
+               return 0;
+
+       pci_write_config_word(dev, pos + PCI_EXP_DEVCTL,
+                               PCI_EXP_DEVCTL_BCR_FLR);
+       msleep(100);
+
+       return 0;
+}
+
+#define PCI_DEVICE_ID_INTEL_82599_SFP_VF   0x10ed
+
+struct pci_dev_reset_methods pci_dev_reset_methods[] = {
+       { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82599_SFP_VF,
+                reset_intel_82599_sfp_virtfn },
+       { PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
+               reset_intel_generic_dev },
+       { 0 }
+};
 #else
 void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev) {}
 #endif
index 6dae871..4a471dc 100644 (file)
@@ -15,9 +15,9 @@
 
 DECLARE_RWSEM(pci_bus_sem);
 /*
- * find the upstream PCIE-to-PCI bridge of a PCI device
+ * find the upstream PCIe-to-PCI bridge of a PCI device
  * if the device is PCIE, return NULL
- * if the device isn't connected to a PCIE bridge (that is its parent is a
+ * if the device isn't connected to a PCIe bridge (that is its parent is a
  * legacy PCI bridge and the bridge is directly connected to bus 0), return its
  * parent
  */
@@ -37,7 +37,7 @@ pci_find_upstream_pcie_bridge(struct pci_dev *pdev)
                        tmp = pdev;
                        continue;
                }
-               /* PCI device should connect to a PCIE bridge */
+               /* PCI device should connect to a PCIe bridge */
                if (pdev->pcie_type != PCI_EXP_TYPE_PCI_BRIDGE) {
                        /* Busted hardware? */
                        WARN_ON_ONCE(1);
index cdf50f3..d99f846 100644 (file)
@@ -222,7 +222,7 @@ int __ref cb_alloc(struct pcmcia_socket *s)
        unsigned int max, pass;
 
        s->functions = pci_scan_slot(bus, PCI_DEVFN(0, 0));
-/*     pcibios_fixup_bus(bus); */
+       pci_fixup_cardbus(bus); 
 
        max = bus->secondary;
        for (pass = 0; pass < 2; pass++)
index 916ccb2..4c7e702 100644 (file)
@@ -323,6 +323,7 @@ static int __init dell_wmi_input_setup(void)
 static int __init dell_wmi_init(void)
 {
        int err;
+       acpi_status status;
 
        if (wmi_has_guid(DELL_EVENT_GUID)) {
                printk(KERN_WARNING "dell-wmi: No known WMI GUID found\n");
@@ -336,14 +337,14 @@ static int __init dell_wmi_init(void)
        if (err)
                return err;
 
-       err = wmi_install_notify_handler(DELL_EVENT_GUID,
+       status = wmi_install_notify_handler(DELL_EVENT_GUID,
                                         dell_wmi_notify, NULL);
-       if (err) {
+       if (ACPI_FAILURE(status)) {
                input_unregister_device(dell_wmi_input_dev);
                printk(KERN_ERR
                        "dell-wmi: Unable to register notify handler - %d\n",
-                       err);
-               return err;
+                       status);
+               return -ENODEV;
        }
 
        return 0;
index 9f93d6c..cc9ad74 100644 (file)
@@ -492,8 +492,7 @@ wmi_notify_handler handler, void *data)
        if (!guid || !handler)
                return AE_BAD_PARAMETER;
 
-       find_guid(guid, &block);
-       if (!block)
+       if (!find_guid(guid, &block))
                return AE_NOT_EXIST;
 
        if (block->handler)
@@ -521,8 +520,7 @@ acpi_status wmi_remove_notify_handler(const char *guid)
        if (!guid)
                return AE_BAD_PARAMETER;
 
-       find_guid(guid, &block);
-       if (!block)
+       if (!find_guid(guid, &block))
                return AE_NOT_EXIST;
 
        if (!block->handler)
index 68921d9..b55440b 100644 (file)
@@ -232,6 +232,7 @@ int generic_permission(struct inode *inode, int mask,
        /*
         * Searching includes executable on directories, else just read.
         */
+       mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
        if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
                if (capable(CAP_DAC_READ_SEARCH))
                        return 0;
index 784a919..9b98173 100644 (file)
@@ -845,7 +845,6 @@ static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
  * blk_rq_err_bytes()          : bytes left till the next error boundary
  * blk_rq_sectors()            : sectors left in the entire request
  * blk_rq_cur_sectors()                : sectors left in the current segment
- * blk_rq_err_sectors()                : sectors left till the next error boundary
  */
 static inline sector_t blk_rq_pos(const struct request *rq)
 {
@@ -874,11 +873,6 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
        return blk_rq_cur_bytes(rq) >> 9;
 }
 
-static inline unsigned int blk_rq_err_sectors(const struct request *rq)
-{
-       return blk_rq_err_bytes(rq) >> 9;
-}
-
 /*
  * Request issue related functions.
  */
@@ -1116,11 +1110,18 @@ static inline int queue_alignment_offset(struct request_queue *q)
        return q->limits.alignment_offset;
 }
 
+static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t offset)
+{
+       unsigned int granularity = max(lim->physical_block_size, lim->io_min);
+
+       offset &= granularity - 1;
+       return (granularity + lim->alignment_offset - offset) & (granularity - 1);
+}
+
 static inline int queue_sector_alignment_offset(struct request_queue *q,
                                                sector_t sector)
 {
-       return ((sector << 9) - q->limits.alignment_offset)
-               & (q->limits.io_min - 1);
+       return queue_limit_alignment_offset(&q->limits, sector << 9);
 }
 
 static inline int bdev_alignment_offset(struct block_device *bdev)
index d9724a2..163c840 100644 (file)
@@ -832,7 +832,7 @@ struct ieee80211_ht_cap {
 #define IEEE80211_HT_CAP_DELAY_BA              0x0400
 #define IEEE80211_HT_CAP_MAX_AMSDU             0x0800
 #define IEEE80211_HT_CAP_DSSSCCK40             0x1000
-#define IEEE80211_HT_CAP_PSMP_SUPPORT          0x2000
+#define IEEE80211_HT_CAP_RESERVED              0x2000
 #define IEEE80211_HT_CAP_40MHZ_INTOLERANT      0x4000
 #define IEEE80211_HT_CAP_LSIG_TXOP_PROT                0x8000
 
index 699e85c..b230492 100644 (file)
@@ -81,6 +81,7 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev)
 #define IN_DEV_FORWARD(in_dev)         IN_DEV_CONF_GET((in_dev), FORWARDING)
 #define IN_DEV_MFORWARD(in_dev)                IN_DEV_ANDCONF((in_dev), MC_FORWARDING)
 #define IN_DEV_RPFILTER(in_dev)                IN_DEV_MAXCONF((in_dev), RP_FILTER)
+#define IN_DEV_SRC_VMARK(in_dev)       IN_DEV_ORCONF((in_dev), SRC_VMARK)
 #define IN_DEV_SOURCE_ROUTE(in_dev)    IN_DEV_ANDCONF((in_dev), \
                                                       ACCEPT_SOURCE_ROUTE)
 #define IN_DEV_ACCEPT_LOCAL(in_dev)    IN_DEV_ORCONF((in_dev), ACCEPT_LOCAL)
index 3d44e9c..7c6b32a 100644 (file)
@@ -81,7 +81,7 @@ union { \
 }
 
 /**
- * INIT_KFIFO - Initialize a kfifo declared by DECLARED_KFIFO
+ * INIT_KFIFO - Initialize a kfifo declared by DECLARE_KFIFO
  * @name: name of the declared kfifo datatype
  */
 #define INIT_KFIFO(name) \
index bf1e670..5da0690 100644 (file)
@@ -566,6 +566,9 @@ void pcibios_align_resource(void *, struct resource *, resource_size_t,
                                resource_size_t);
 void pcibios_update_irq(struct pci_dev *, int irq);
 
+/* Weak but can be overriden by arch */
+void pci_fixup_cardbus(struct pci_bus *);
+
 /* Generic PCI functions used internally */
 
 extern struct pci_bus *pci_find_bus(int domain, int busnr);
index 877ba03..bd27fbc 100644 (file)
@@ -482,6 +482,7 @@ enum
        NET_IPV4_CONF_ARP_ACCEPT=21,
        NET_IPV4_CONF_ARP_NOTIFY=22,
        NET_IPV4_CONF_ACCEPT_LOCAL=23,
+       NET_IPV4_CONF_SRC_VMARK=24,
        __NET_IPV4_CONF_MAX
 };
 
index 2aff490..0bf3697 100644 (file)
@@ -547,7 +547,6 @@ enum mac80211_rx_flags {
  *     unspecified depending on the hardware capabilities flags
  *     @IEEE80211_HW_SIGNAL_*
  * @noise: noise when receiving this frame, in dBm.
- * @qual: overall signal quality indication, in percent (0-100).
  * @antenna: antenna used
  * @rate_idx: index of data rate into band's supported rates or MCS index if
  *     HT rates are use (RX_FLAG_HT)
@@ -559,7 +558,6 @@ struct ieee80211_rx_status {
        int freq;
        int signal;
        int noise;
-       int __deprecated qual;
        int antenna;
        int rate_idx;
        int flag;
@@ -1737,6 +1735,12 @@ static inline void ieee80211_rx_ni(struct ieee80211_hw *hw,
        local_bh_enable();
 }
 
+/*
+ * The TX headroom reserved by mac80211 for its own tx_status functions.
+ * This is enough for the radiotap header.
+ */
+#define IEEE80211_TX_STATUS_HEADROOM   13
+
 /**
  * ieee80211_tx_status - transmit status callback
  *
index 07e3add..f4105c9 100644 (file)
@@ -2,6 +2,7 @@
 #define __LIBSRP_H__
 
 #include <linux/list.h>
+#include <linux/kfifo.h>
 #include <scsi/scsi_cmnd.h>
 #include <scsi/scsi_host.h>
 #include <scsi/srp.h>
index d9c77b2..ee22989 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1043,6 +1043,46 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
 }
 EXPORT_SYMBOL(do_mmap_pgoff);
 
+SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
+               unsigned long, prot, unsigned long, flags,
+               unsigned long, fd, unsigned long, pgoff)
+{
+       struct file *file = NULL;
+       unsigned long retval = -EBADF;
+
+       if (!(flags & MAP_ANONYMOUS)) {
+               if (unlikely(flags & MAP_HUGETLB))
+                       return -EINVAL;
+               file = fget(fd);
+               if (!file)
+                       goto out;
+       } else if (flags & MAP_HUGETLB) {
+               struct user_struct *user = NULL;
+               /*
+                * VM_NORESERVE is used because the reservations will be
+                * taken when vm_ops->mmap() is called
+                * A dummy user value is used because we are not locking
+                * memory so no accounting is necessary
+                */
+               len = ALIGN(len, huge_page_size(&default_hstate));
+               file = hugetlb_file_setup(HUGETLB_ANON_FILE, len, VM_NORESERVE,
+                                               &user, HUGETLB_ANONHUGE_INODE);
+               if (IS_ERR(file))
+                       return PTR_ERR(file);
+       }
+
+       flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+
+       down_write(&current->mm->mmap_sem);
+       retval = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
+       up_write(&current->mm->mmap_sem);
+
+       if (file)
+               fput(file);
+out:
+       return retval;
+}
+
 /*
  * Some shared mappigns will want the pages marked read-only
  * to track write events. If so, we'll downgrade vm_page_prot
index 8687973..6f9248f 100644 (file)
@@ -1398,6 +1398,31 @@ error_getting_region:
 }
 EXPORT_SYMBOL(do_mmap_pgoff);
 
+SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
+               unsigned long, prot, unsigned long, flags,
+               unsigned long, fd, unsigned long, pgoff)
+{
+       struct file *file = NULL;
+       unsigned long retval = -EBADF;
+
+       if (!(flags & MAP_ANONYMOUS)) {
+               file = fget(fd);
+               if (!file)
+                       goto out;
+       }
+
+       flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+
+       down_write(&current->mm->mmap_sem);
+       retval = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
+       up_write(&current->mm->mmap_sem);
+
+       if (file)
+               fput(file);
+out:
+       return retval;
+}
+
 /*
  * split a vma into two pieces at address 'addr', a new vma is allocated either
  * for the first part or the tail.
index b377ce4..7c35ad9 100644 (file)
--- a/mm/util.c
+++ b/mm/util.c
@@ -4,10 +4,6 @@
 #include <linux/module.h>
 #include <linux/err.h>
 #include <linux/sched.h>
-#include <linux/hugetlb.h>
-#include <linux/syscalls.h>
-#include <linux/mman.h>
-#include <linux/file.h>
 #include <asm/uaccess.h>
 
 #define CREATE_TRACE_POINTS
@@ -272,46 +268,6 @@ int __attribute__((weak)) get_user_pages_fast(unsigned long start,
 }
 EXPORT_SYMBOL_GPL(get_user_pages_fast);
 
-SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
-               unsigned long, prot, unsigned long, flags,
-               unsigned long, fd, unsigned long, pgoff)
-{
-       struct file * file = NULL;
-       unsigned long retval = -EBADF;
-
-       if (!(flags & MAP_ANONYMOUS)) {
-               if (unlikely(flags & MAP_HUGETLB))
-                       return -EINVAL;
-               file = fget(fd);
-               if (!file)
-                       goto out;
-       } else if (flags & MAP_HUGETLB) {
-               struct user_struct *user = NULL;
-               /*
-                * VM_NORESERVE is used because the reservations will be
-                * taken when vm_ops->mmap() is called
-                * A dummy user value is used because we are not locking
-                * memory so no accounting is necessary
-                */
-               len = ALIGN(len, huge_page_size(&default_hstate));
-               file = hugetlb_file_setup(HUGETLB_ANON_FILE, len, VM_NORESERVE,
-                                               &user, HUGETLB_ANONHUGE_INODE);
-               if (IS_ERR(file))
-                       return PTR_ERR(file);
-       }
-
-       flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
-
-       down_write(&current->mm->mmap_sem);
-       retval = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
-       up_write(&current->mm->mmap_sem);
-
-       if (file)
-               fput(file);
-out:
-       return retval;
-}
-
 /* Tracepoints definitions. */
 EXPORT_TRACEPOINT_SYMBOL(kmalloc);
 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
index a23b45f..de0c2c7 100644 (file)
@@ -250,8 +250,7 @@ struct pktgen_dev {
        __u64 count;            /* Default No packets to send */
        __u64 sofar;            /* How many pkts we've sent so far */
        __u64 tx_bytes;         /* How many bytes we've transmitted */
-       __u64 errors;           /* Errors when trying to transmit,
-                                  pkts will be re-sent */
+       __u64 errors;           /* Errors when trying to transmit, */
 
        /* runtime counters relating to clone_skb */
 
@@ -3465,6 +3464,12 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
                pkt_dev->seq_num++;
                pkt_dev->tx_bytes += pkt_dev->last_pkt_size;
                break;
+       case NET_XMIT_DROP:
+       case NET_XMIT_CN:
+       case NET_XMIT_POLICED:
+               /* skb has been consumed */
+               pkt_dev->errors++;
+               break;
        default: /* Drivers are not supposed to return other values! */
                if (net_ratelimit())
                        pr_info("pktgen: %s xmit error: %d\n",
index 5cdbc10..040c4f0 100644 (file)
@@ -1397,6 +1397,7 @@ static struct devinet_sysctl_table {
                DEVINET_SYSCTL_RW_ENTRY(ACCEPT_SOURCE_ROUTE,
                                        "accept_source_route"),
                DEVINET_SYSCTL_RW_ENTRY(ACCEPT_LOCAL, "accept_local"),
+               DEVINET_SYSCTL_RW_ENTRY(SRC_VMARK, "src_valid_mark"),
                DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP, "proxy_arp"),
                DEVINET_SYSCTL_RW_ENTRY(MEDIUM_ID, "medium_id"),
                DEVINET_SYSCTL_RW_ENTRY(BOOTP_RELAY, "bootp_relay"),
index 3323168..82dbf71 100644 (file)
@@ -252,6 +252,8 @@ int fib_validate_source(__be32 src, __be32 dst, u8 tos, int oif,
                no_addr = in_dev->ifa_list == NULL;
                rpf = IN_DEV_RPFILTER(in_dev);
                accept_local = IN_DEV_ACCEPT_LOCAL(in_dev);
+               if (mark && !IN_DEV_SRC_VMARK(in_dev))
+                       fl.mark = 0;
        }
        rcu_read_unlock();
 
index 3787455..d7dcee6 100644 (file)
@@ -34,9 +34,28 @@ void ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_supported_band *sband,
 
        ht_cap->ht_supported = true;
 
-       ht_cap->cap = le16_to_cpu(ht_cap_ie->cap_info) & sband->ht_cap.cap;
-       ht_cap->cap &= ~IEEE80211_HT_CAP_SM_PS;
-       ht_cap->cap |= sband->ht_cap.cap & IEEE80211_HT_CAP_SM_PS;
+       /*
+        * The bits listed in this expression should be
+        * the same for the peer and us, if the station
+        * advertises more then we can't use those thus
+        * we mask them out.
+        */
+       ht_cap->cap = le16_to_cpu(ht_cap_ie->cap_info) &
+               (sband->ht_cap.cap |
+                ~(IEEE80211_HT_CAP_LDPC_CODING |
+                  IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+                  IEEE80211_HT_CAP_GRN_FLD |
+                  IEEE80211_HT_CAP_SGI_20 |
+                  IEEE80211_HT_CAP_SGI_40 |
+                  IEEE80211_HT_CAP_DSSSCCK40));
+       /*
+        * The STBC bits are asymmetric -- if we don't have
+        * TX then mask out the peer's RX and vice versa.
+        */
+       if (!(sband->ht_cap.cap & IEEE80211_HT_CAP_TX_STBC))
+               ht_cap->cap &= ~IEEE80211_HT_CAP_RX_STBC;
+       if (!(sband->ht_cap.cap & IEEE80211_HT_CAP_RX_STBC))
+               ht_cap->cap &= ~IEEE80211_HT_CAP_TX_STBC;
 
        ampdu_info = ht_cap_ie->ampdu_params_info;
        ht_cap->ampdu_factor =
index 10d1385..1f2db64 100644 (file)
@@ -382,6 +382,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
 struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
                                        u8 *bssid,u8 *addr, u32 supp_rates)
 {
+       struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
        struct ieee80211_local *local = sdata->local;
        struct sta_info *sta;
        int band = local->hw.conf.channel->band;
@@ -397,6 +398,9 @@ struct sta_info *ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata,
                return NULL;
        }
 
+       if (ifibss->state == IEEE80211_IBSS_MLME_SEARCH)
+               return NULL;
+
        if (compare_ether_addr(bssid, sdata->u.ibss.bssid))
                return NULL;
 
index 8116d1a..0d2d948 100644 (file)
@@ -515,6 +515,8 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
         * and we need some headroom for passing the frame to monitor
         * interfaces, but never both at the same time.
         */
+       BUILD_BUG_ON(IEEE80211_TX_STATUS_HEADROOM !=
+                       sizeof(struct ieee80211_tx_status_rtap_hdr));
        local->tx_headroom = max_t(unsigned int , local->hw.extra_tx_headroom,
                                   sizeof(struct ieee80211_tx_status_rtap_hdr));
 
index d8d50fb..c79e59f 100644 (file)
@@ -915,6 +915,14 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
        sdata->u.mgd.flags &= ~(IEEE80211_STA_CONNECTION_POLL |
                                IEEE80211_STA_BEACON_POLL);
 
+       /*
+        * Always handle WMM once after association regardless
+        * of the first value the AP uses. Setting -1 here has
+        * that effect because the AP values is an unsigned
+        * 4-bit value.
+        */
+       sdata->u.mgd.wmm_last_param_set = -1;
+
        ieee80211_led_assoc(local, 1);
 
        sdata->vif.bss_conf.assoc = 1;
index 8834cc9..27ceaef 100644 (file)
@@ -1419,6 +1419,10 @@ static bool need_dynamic_ps(struct ieee80211_local *local)
        if (!local->ps_sdata)
                return false;
 
+       /* No point if we're going to suspend */
+       if (local->quiescing)
+               return false;
+
        return true;
 }
 
index 78a6e92..dc76267 100644 (file)
@@ -1039,7 +1039,19 @@ int ieee80211_reconfig(struct ieee80211_local *local)
 
        /* restart hardware */
        if (local->open_count) {
+               /*
+                * Upon resume hardware can sometimes be goofy due to
+                * various platform / driver / bus issues, so restarting
+                * the device may at times not work immediately. Propagate
+                * the error.
+                */
                res = drv_start(local);
+               if (res) {
+                       WARN(local->suspended, "Harware became unavailable "
+                            "upon resume. This is could be a software issue"
+                            "prior to suspend or a harware issue\n");
+                       return res;
+               }
 
                ieee80211_led_radio(local, true);
        }
index 1001db4..82e6002 100644 (file)
@@ -93,7 +93,18 @@ void cfg80211_send_rx_assoc(struct net_device *dev, const u8 *buf, size_t len)
                        }
                }
 
-               WARN_ON(!bss);
+               /*
+                * We might be coming here because the driver reported
+                * a successful association at the same time as the
+                * user requested a deauth. In that case, we will have
+                * removed the BSS from the auth_bsses list due to the
+                * deauth request when the assoc response makes it. If
+                * the two code paths acquire the lock the other way
+                * around, that's just the standard situation of a
+                * deauth being requested while connected.
+                */
+               if (!bss)
+                       goto out;
        } else if (wdev->conn) {
                cfg80211_sme_failed_assoc(wdev);
                /*
index 12dfa62..0c2cbbe 100644 (file)
@@ -601,7 +601,7 @@ int cfg80211_wext_siwscan(struct net_device *dev,
        struct cfg80211_registered_device *rdev;
        struct wiphy *wiphy;
        struct iw_scan_req *wreq = NULL;
-       struct cfg80211_scan_request *creq;
+       struct cfg80211_scan_request *creq = NULL;
        int i, err, n_channels = 0;
        enum ieee80211_band band;
 
@@ -694,8 +694,10 @@ int cfg80211_wext_siwscan(struct net_device *dev,
        /* translate "Scan for SSID" request */
        if (wreq) {
                if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
-                       if (wreq->essid_len > IEEE80211_MAX_SSID_LEN)
-                               return -EINVAL;
+                       if (wreq->essid_len > IEEE80211_MAX_SSID_LEN) {
+                               err = -EINVAL;
+                               goto out;
+                       }
                        memcpy(creq->ssids[0].ssid, wreq->essid, wreq->essid_len);
                        creq->ssids[0].ssid_len = wreq->essid_len;
                }
@@ -707,12 +709,15 @@ int cfg80211_wext_siwscan(struct net_device *dev,
        err = rdev->ops->scan(wiphy, dev, creq);
        if (err) {
                rdev->scan_req = NULL;
-               kfree(creq);
+               /* creq will be freed below */
        } else {
                nl80211_send_scan_start(rdev, dev);
+               /* creq now owned by driver */
+               creq = NULL;
                dev_hold(dev);
        }
  out:
+       kfree(creq);
        cfg80211_unlock_rdev(rdev);
        return err;
 }
index cb81ca3..4725a54 100644 (file)
@@ -1445,7 +1445,7 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
        if (!dev)
                goto free_dst;
 
-       /* Copy neighbout for reachability confirmation */
+       /* Copy neighbour for reachability confirmation */
        dst0->neighbour = neigh_clone(dst->neighbour);
 
        xfrm_init_path((struct xfrm_dst *)dst0, dst, nfheader_len);
index c569986..656e474 100644 (file)
@@ -441,6 +441,7 @@ static int aaci_pcm_hw_params(struct snd_pcm_substream *substream,
                              struct snd_pcm_hw_params *params)
 {
        int err;
+       struct aaci *aaci = substream->private_data;
 
        aaci_pcm_hw_free(substream);
        if (aacirun->pcm_open) {
@@ -560,7 +561,6 @@ static int aaci_pcm_open(struct snd_pcm_substream *substream)
 static int aaci_pcm_playback_hw_params(struct snd_pcm_substream *substream,
                                       struct snd_pcm_hw_params *params)
 {
-       struct aaci *aaci = substream->private_data;
        struct aaci_runtime *aacirun = substream->runtime->private_data;
        unsigned int channels = params_channels(params);
        int ret;
@@ -659,7 +659,6 @@ static struct snd_pcm_ops aaci_playback_ops = {
 static int aaci_pcm_capture_hw_params(struct snd_pcm_substream *substream,
                                      struct snd_pcm_hw_params *params)
 {
-       struct aaci *aaci = substream->private_data;
        struct aaci_runtime *aacirun = substream->runtime->private_data;
        int ret;
 
index 29ab46a..25b0641 100644 (file)
@@ -1918,13 +1918,13 @@ int snd_pcm_hw_constraints_complete(struct snd_pcm_substream *substream)
 
        err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_RATE,
                                           hw->rate_min, hw->rate_max);
-        if (err < 0)
-                return err;
+       if (err < 0)
+               return err;
 
        err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
                                           hw->period_bytes_min, hw->period_bytes_max);
-        if (err < 0)
-                return err;
+       if (err < 0)
+               return err;
 
        err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIODS,
                                           hw->periods_min, hw->periods_max);
index 5fe34a8..e4581a4 100644 (file)
@@ -42,7 +42,7 @@ static void snd_hda_generate_beep(struct work_struct *work)
                return;
 
        /* generate tone */
-       snd_hda_codec_write_cache(codec, beep->nid, 0,
+       snd_hda_codec_write(codec, beep->nid, 0,
                        AC_VERB_SET_BEEP_CONTROL, beep->tone);
 }
 
@@ -119,7 +119,7 @@ static void snd_hda_do_detach(struct hda_beep *beep)
        beep->dev = NULL;
        cancel_work_sync(&beep->beep_work);
        /* turn off beep for sure */
-       snd_hda_codec_write_cache(beep->codec, beep->nid, 0,
+       snd_hda_codec_write(beep->codec, beep->nid, 0,
                                  AC_VERB_SET_BEEP_CONTROL, 0);
 }
 
@@ -192,7 +192,7 @@ int snd_hda_enable_beep_device(struct hda_codec *codec, int enable)
                beep->enabled = enable;
                if (!enable) {
                        /* turn off beep */
-                       snd_hda_codec_write_cache(beep->codec, beep->nid, 0,
+                       snd_hda_codec_write(beep->codec, beep->nid, 0,
                                                  AC_VERB_SET_BEEP_CONTROL, 0);
                }
                if (beep->mode == HDA_BEEP_MODE_SWREG) {
@@ -239,8 +239,12 @@ int snd_hda_attach_beep_device(struct hda_codec *codec, int nid)
        mutex_init(&beep->mutex);
 
        if (beep->mode == HDA_BEEP_MODE_ON) {
-               beep->enabled = 1;
-               snd_hda_do_register(&beep->register_work);
+               int err = snd_hda_do_attach(beep);
+               if (err < 0) {
+                       kfree(beep);
+                       codec->beep = NULL;
+                       return err;
+               }
        }
 
        return 0;
@@ -253,7 +257,7 @@ void snd_hda_detach_beep_device(struct hda_codec *codec)
        if (beep) {
                cancel_work_sync(&beep->register_work);
                cancel_delayed_work(&beep->unregister_work);
-               if (beep->enabled)
+               if (beep->dev)
                        snd_hda_do_detach(beep);
                codec->beep = NULL;
                kfree(beep);
index 950ee5c..f98b47c 100644 (file)
@@ -1327,11 +1327,13 @@ EXPORT_SYMBOL_HDA(snd_hda_query_pin_caps);
  */
 u32 snd_hda_pin_sense(struct hda_codec *codec, hda_nid_t nid)
 {
-       u32 pincap = snd_hda_query_pin_caps(codec, nid);
-
-       if (pincap & AC_PINCAP_TRIG_REQ) /* need trigger? */
-               snd_hda_codec_read(codec, nid, 0, AC_VERB_SET_PIN_SENSE, 0);
+       u32 pincap;
 
+       if (!codec->no_trigger_sense) {
+               pincap = snd_hda_query_pin_caps(codec, nid);
+               if (pincap & AC_PINCAP_TRIG_REQ) /* need trigger? */
+                       snd_hda_codec_read(codec, nid, 0, AC_VERB_SET_PIN_SENSE, 0);
+       }
        return snd_hda_codec_read(codec, nid, 0,
                                  AC_VERB_GET_PIN_SENSE, 0);
 }
index 1d541b7..0a770a2 100644 (file)
@@ -817,6 +817,7 @@ struct hda_codec {
        unsigned int pin_amp_workaround:1; /* pin out-amp takes index
                                            * (e.g. Conexant codecs)
                                            */
+       unsigned int no_trigger_sense:1; /* don't trigger at pin-sensing */
 #ifdef CONFIG_SND_HDA_POWER_SAVE
        unsigned int power_on :1;       /* current (global) power-state */
        unsigned int power_transition :1; /* power-state in transition */
index ff8ad46..ec9c348 100644 (file)
@@ -356,6 +356,7 @@ struct azx_dev {
                                         */
        unsigned char stream_tag;       /* assigned stream */
        unsigned char index;            /* stream index */
+       int device;                     /* last device number assigned to */
 
        unsigned int opened :1;
        unsigned int running :1;
@@ -1441,10 +1442,13 @@ static int __devinit azx_codec_configure(struct azx *chip)
  */
 
 /* assign a stream for the PCM */
-static inline struct azx_dev *azx_assign_device(struct azx *chip, int stream)
+static inline struct azx_dev *
+azx_assign_device(struct azx *chip, struct snd_pcm_substream *substream)
 {
        int dev, i, nums;
-       if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
+       struct azx_dev *res = NULL;
+
+       if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
                dev = chip->playback_index_offset;
                nums = chip->playback_streams;
        } else {
@@ -1453,10 +1457,15 @@ static inline struct azx_dev *azx_assign_device(struct azx *chip, int stream)
        }
        for (i = 0; i < nums; i++, dev++)
                if (!chip->azx_dev[dev].opened) {
-                       chip->azx_dev[dev].opened = 1;
-                       return &chip->azx_dev[dev];
+                       res = &chip->azx_dev[dev];
+                       if (res->device == substream->pcm->device)
+                               break;
                }
-       return NULL;
+       if (res) {
+               res->opened = 1;
+               res->device = substream->pcm->device;
+       }
+       return res;
 }
 
 /* release the assigned stream */
@@ -1505,7 +1514,7 @@ static int azx_pcm_open(struct snd_pcm_substream *substream)
        int err;
 
        mutex_lock(&chip->open_mutex);
-       azx_dev = azx_assign_device(chip, substream->stream);
+       azx_dev = azx_assign_device(chip, substream);
        if (azx_dev == NULL) {
                mutex_unlock(&chip->open_mutex);
                return -EBUSY;
index 1a36137..69a941c 100644 (file)
@@ -1186,6 +1186,8 @@ static int patch_ad1986a(struct hda_codec *codec)
         */
        spec->multiout.no_share_stream = 1;
 
+       codec->no_trigger_sense = 1;
+
        return 0;
 }
 
@@ -1371,6 +1373,8 @@ static int patch_ad1983(struct hda_codec *codec)
 
        codec->patch_ops = ad198x_patch_ops;
 
+       codec->no_trigger_sense = 1;
+
        return 0;
 }
 
@@ -1813,6 +1817,9 @@ static int patch_ad1981(struct hda_codec *codec)
                codec->patch_ops.unsol_event = ad1981_hp_unsol_event;
                break;
        }
+
+       codec->no_trigger_sense = 1;
+
        return 0;
 }
 
@@ -3118,6 +3125,8 @@ static int patch_ad1988(struct hda_codec *codec)
 #endif
        spec->vmaster_nid = 0x04;
 
+       codec->no_trigger_sense = 1;
+
        return 0;
 }
 
@@ -3330,6 +3339,8 @@ static int patch_ad1884(struct hda_codec *codec)
 
        codec->patch_ops = ad198x_patch_ops;
 
+       codec->no_trigger_sense = 1;
+
        return 0;
 }
 
@@ -4287,6 +4298,8 @@ static int patch_ad1884a(struct hda_codec *codec)
                break;
        }
 
+       codec->no_trigger_sense = 1;
+
        return 0;
 }
 
@@ -4623,6 +4636,9 @@ static int patch_ad1882(struct hda_codec *codec)
                spec->mixers[2] = ad1882_6stack_mixers;
                break;
        }
+
+       codec->no_trigger_sense = 1;
+
        return 0;
 }
 
index eeda7be..2291a83 100644 (file)
@@ -4453,14 +4453,7 @@ static inline int get_pin_presence(struct hda_codec *codec, hda_nid_t nid)
 {
        if (!nid)
                return 0;
-       /* NOTE: we can't use snd_hda_jack_detect() here because STAC/IDT
-        * codecs behave wrongly when SET_PIN_SENSE is triggered, although
-        * the pincap gives TRIG_REQ bit.
-        */
-       if (snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_PIN_SENSE, 0) &
-           AC_PINSENSE_PRESENCE)
-               return 1;
-       return 0;
+       return snd_hda_jack_detect(codec, nid);
 }
 
 static void stac92xx_line_out_detect(struct hda_codec *codec,
@@ -4962,6 +4955,7 @@ static int patch_stac9200(struct hda_codec *codec)
        if (spec == NULL)
                return -ENOMEM;
 
+       codec->no_trigger_sense = 1;
        codec->spec = spec;
        spec->num_pins = ARRAY_SIZE(stac9200_pin_nids);
        spec->pin_nids = stac9200_pin_nids;
@@ -5024,6 +5018,7 @@ static int patch_stac925x(struct hda_codec *codec)
        if (spec == NULL)
                return -ENOMEM;
 
+       codec->no_trigger_sense = 1;
        codec->spec = spec;
        spec->num_pins = ARRAY_SIZE(stac925x_pin_nids);
        spec->pin_nids = stac925x_pin_nids;
@@ -5108,6 +5103,7 @@ static int patch_stac92hd73xx(struct hda_codec *codec)
        if (spec == NULL)
                return -ENOMEM;
 
+       codec->no_trigger_sense = 1;
        codec->spec = spec;
        codec->slave_dig_outs = stac92hd73xx_slave_dig_outs;
        spec->num_pins = ARRAY_SIZE(stac92hd73xx_pin_nids);
@@ -5255,6 +5251,7 @@ static int patch_stac92hd83xxx(struct hda_codec *codec)
        if (spec == NULL)
                return -ENOMEM;
 
+       codec->no_trigger_sense = 1;
        codec->spec = spec;
        codec->slave_dig_outs = stac92hd83xxx_slave_dig_outs;
        spec->digbeep_nid = 0x21;
@@ -5418,6 +5415,7 @@ static int patch_stac92hd71bxx(struct hda_codec *codec)
        if (spec == NULL)
                return -ENOMEM;
 
+       codec->no_trigger_sense = 1;
        codec->spec = spec;
        codec->patch_ops = stac92xx_patch_ops;
        spec->num_pins = STAC92HD71BXX_NUM_PINS;
@@ -5661,6 +5659,7 @@ static int patch_stac922x(struct hda_codec *codec)
        if (spec == NULL)
                return -ENOMEM;
 
+       codec->no_trigger_sense = 1;
        codec->spec = spec;
        spec->num_pins = ARRAY_SIZE(stac922x_pin_nids);
        spec->pin_nids = stac922x_pin_nids;
@@ -5764,6 +5763,7 @@ static int patch_stac927x(struct hda_codec *codec)
        if (spec == NULL)
                return -ENOMEM;
 
+       codec->no_trigger_sense = 1;
        codec->spec = spec;
        codec->slave_dig_outs = stac927x_slave_dig_outs;
        spec->num_pins = ARRAY_SIZE(stac927x_pin_nids);
@@ -5898,6 +5898,7 @@ static int patch_stac9205(struct hda_codec *codec)
        if (spec == NULL)
                return -ENOMEM;
 
+       codec->no_trigger_sense = 1;
        codec->spec = spec;
        spec->num_pins = ARRAY_SIZE(stac9205_pin_nids);
        spec->pin_nids = stac9205_pin_nids;
@@ -6053,6 +6054,7 @@ static int patch_stac9872(struct hda_codec *codec)
        spec  = kzalloc(sizeof(*spec), GFP_KERNEL);
        if (spec == NULL)
                return -ENOMEM;
+       codec->no_trigger_sense = 1;
        codec->spec = spec;
        spec->num_pins = ARRAY_SIZE(stac9872_pin_nids);
        spec->pin_nids = stac9872_pin_nids;
index fd9c097..f73de63 100644 (file)
@@ -508,8 +508,8 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
        struct kvm_assigned_dev_kernel *match;
        struct pci_dev *dev;
 
-       down_read(&kvm->slots_lock);
        mutex_lock(&kvm->lock);
+       down_read(&kvm->slots_lock);
 
        match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
                                      assigned_dev->assigned_dev_id);
@@ -573,8 +573,8 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
        }
 
 out:
-       mutex_unlock(&kvm->lock);
        up_read(&kvm->slots_lock);
+       mutex_unlock(&kvm->lock);
        return r;
 out_list_del:
        list_del(&match->list);
@@ -585,8 +585,8 @@ out_put:
        pci_dev_put(dev);
 out_free:
        kfree(match);
-       mutex_unlock(&kvm->lock);
        up_read(&kvm->slots_lock);
+       mutex_unlock(&kvm->lock);
        return r;
 }
 
index b5af881..a944be3 100644 (file)
@@ -64,7 +64,7 @@ MODULE_LICENSE("GPL");
 /*
  * Ordering of locks:
  *
- *             kvm->slots_lock --> kvm->lock --> kvm->irq_lock
+ *             kvm->lock --> kvm->slots_lock --> kvm->irq_lock
  */
 
 DEFINE_SPINLOCK(kvm_lock);
@@ -406,8 +406,11 @@ static struct kvm *kvm_create_vm(void)
 out:
        return kvm;
 
+#if defined(KVM_COALESCED_MMIO_PAGE_OFFSET) || \
+    (defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER))
 out_err:
        hardware_disable_all();
+#endif
 out_err_nodisable:
        kfree(kvm);
        return ERR_PTR(r);