* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6:
ieee1394: change deprecation status of dv1394
set_irq_msi(irq, desc);
dest_phys_id = cpu_physical_id(first_cpu(cpu_online_map));
- vector = irq;
+ vector = irq_to_vector(irq);
msg.address_hi = 0;
msg.address_lo =
static int ia64_msi_retrigger_irq(unsigned int irq)
{
- unsigned int vector = irq;
+ unsigned int vector = irq_to_vector(irq);
ia64_resend_irq(vector);
return 1;
"features : %s\n"
"cpu number : %lu\n"
"cpu regs : %u\n"
- "cpu MHz : %lu.%06lu\n"
+ "cpu MHz : %lu.%03lu\n"
"itc MHz : %lu.%06lu\n"
"BogoMIPS : %lu.%02lu\n",
cpunum, c->vendor, c->family, c->model,
* There are errors which still need to be cleaned up by
* hubiio_crb_error_handler
*/
- mod_timer(recovery_timer, HZ * 5);
+ mod_timer(recovery_timer, jiffies + (HZ * 5));
BTE_PRINTK(("eh:%p:%d Marked Giving up\n", err_nodepda,
smp_processor_id()));
return 1;
icrbd.ii_icrb0_d_regval =
REMOTE_HUB_L(nasid, IIO_ICRB_D(i));
if (icrbd.d_bteop) {
- mod_timer(recovery_timer, HZ * 5);
+ mod_timer(recovery_timer, jiffies + (HZ * 5));
BTE_PRINTK(("eh:%p:%d Valid %d, Giving up\n",
err_nodepda, smp_processor_id(),
i));
status = BTE_LNSTAT_LOAD(bte);
if ((status & IBLS_ERROR) || !(status & IBLS_BUSY))
continue;
- mod_timer(recovery_timer, HZ * 5);
+ mod_timer(recovery_timer, jiffies + (HZ * 5));
BTE_PRINTK(("eh:%p:%d Marked Giving up\n", err_nodepda,
smp_processor_id()));
return 1;
}
/*
- * If we're mapping for MSI, set the MSI bit in the ATE
+ * If we're mapping for MSI, set the MSI bit in the ATE. If it's a
+ * TIOCP based pci bus, we also need to set the PIO bit in the ATE.
*/
- if (dma_flags & SN_DMA_MSI)
+ if (dma_flags & SN_DMA_MSI) {
ate |= PCI32_ATE_MSI;
+ if (IS_TIOCP_SOFT(pcibus_info))
+ ate |= PCI32_ATE_PIO;
+ }
ate_write(pcibus_info, ate_index, ate_count, ate);
#define PCI_DEVICE_ID_INTEL_82965Q_IG 0x2992
#define PCI_DEVICE_ID_INTEL_82965G_HB 0x29A0
#define PCI_DEVICE_ID_INTEL_82965G_IG 0x29A2
+#define PCI_DEVICE_ID_INTEL_82965GM_HB 0x2A00
+#define PCI_DEVICE_ID_INTEL_82965GM_IG 0x2A02
#define IS_I965 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82946GZ_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_1_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965Q_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_HB)
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GM_HB)
extern int agp_memory_reserved;
bridge->driver = &intel_845_driver;
name = "965G";
break;
-
+ case PCI_DEVICE_ID_INTEL_82965GM_HB:
+ if (find_i830(PCI_DEVICE_ID_INTEL_82965GM_IG))
+ bridge->driver = &intel_i965_driver;
+ else
+ bridge->driver = &intel_845_driver;
+ name = "965GM";
+ break;
case PCI_DEVICE_ID_INTEL_7505_0:
bridge->driver = &intel_7505_driver;
name = "E7505";
ID(PCI_DEVICE_ID_INTEL_82965G_1_HB),
ID(PCI_DEVICE_ID_INTEL_82965Q_HB),
ID(PCI_DEVICE_ID_INTEL_82965G_HB),
+ ID(PCI_DEVICE_ID_INTEL_82965GM_HB),
{ }
};
if (size < rsize) {
dbg("report %d is too short, (%d < %d)", report->id, size, rsize);
- return -1;
+ memset(data + size, 0, rsize - size);
}
if ((hid->claimed & HID_CLAIMED_HIDDEV) && hid->hiddev_report_event)
cdrom_saw_media_change (drive);
/*printk("%s: media changed\n",drive->name);*/
return 0;
+ } else if ((sense_key == ILLEGAL_REQUEST) &&
+ (rq->cmd[0] == GPCMD_START_STOP_UNIT)) {
+ /*
+ * Don't print error message for this condition--
+ * SFF8090i indicates that 5/24/00 is the correct
+ * response to a request to close the tray if the
+ * drive doesn't have that capability.
+ * cdrom_log_sense() knows this!
+ */
} else if (!(rq->cmd_flags & REQ_QUIET)) {
/* Otherwise, print an error. */
ide_dump_status(drive, "packet command error", stat);
#endif
/* so that ide_timer_expiry knows what to do */
hwgroup->sleeping = 1;
+ hwgroup->req_gen_timer = hwgroup->req_gen;
mod_timer(&hwgroup->timer, sleep);
/* we purposely leave hwgroup->busy==1
* while sleeping */
spin_lock_irqsave(&ide_lock, flags);
- if ((handler = hwgroup->handler) == NULL) {
+ if (((handler = hwgroup->handler) == NULL) ||
+ (hwgroup->req_gen != hwgroup->req_gen_timer)) {
/*
* Either a marginal timeout occurred
* (got the interrupt just as timer expired),
if ((wait = expiry(drive)) > 0) {
/* reset timer */
hwgroup->timer.expires = jiffies + wait;
+ hwgroup->req_gen_timer = hwgroup->req_gen;
add_timer(&hwgroup->timer);
spin_unlock_irqrestore(&ide_lock, flags);
return;
printk(KERN_ERR "%s: ide_intr: hwgroup->busy was 0 ??\n", drive->name);
}
hwgroup->handler = NULL;
+ hwgroup->req_gen++;
del_timer(&hwgroup->timer);
spin_unlock(&ide_lock);
hwgroup->handler = handler;
hwgroup->expiry = expiry;
hwgroup->timer.expires = jiffies + timeout;
+ hwgroup->req_gen_timer = hwgroup->req_gen;
add_timer(&hwgroup->timer);
}
hwgroup->handler = handler;
hwgroup->expiry = expiry;
hwgroup->timer.expires = jiffies + timeout;
+ hwgroup->req_gen_timer = hwgroup->req_gen;
add_timer(&hwgroup->timer);
hwif->OUTBSYNC(drive, cmd, IDE_COMMAND_REG);
/* Drive takes 400nS to respond, we must avoid the IRQ being
return "tape";
case ide_floppy:
return "floppy";
+ case ide_optical:
+ return "optical";
default:
return "UNKNOWN";
}
skb_fill_page_desc(skb, i, page, 0, PAGE_SIZE);
mapping[i + 1] = ib_dma_map_page(priv->ca, skb_shinfo(skb)->frags[i].page,
- 0, PAGE_SIZE, DMA_TO_DEVICE);
+ 0, PAGE_SIZE, DMA_FROM_DEVICE);
if (unlikely(ib_dma_mapping_error(priv->ca, mapping[i + 1])))
goto partial_error;
}
wait_queue_head_t wait; /* waitq for conn/disconn */
atomic_t post_recv_buf_count; /* posted rx count */
atomic_t post_send_buf_count; /* posted tx count */
- struct work_struct comperror_work; /* conn term sleepable ctx*/
char name[ISER_OBJECT_NAME_SIZE];
struct iser_page_vec *page_vec; /* represents SG to fmr maps*
* maps serialized as tx is*/
static void iser_cq_tasklet_fn(unsigned long data);
static void iser_cq_callback(struct ib_cq *cq, void *cq_context);
-static void iser_comp_error_worker(struct work_struct *work);
static void iser_cq_event_callback(struct ib_event *cause, void *context)
{
init_waitqueue_head(&ib_conn->wait);
atomic_set(&ib_conn->post_recv_buf_count, 0);
atomic_set(&ib_conn->post_send_buf_count, 0);
- INIT_WORK(&ib_conn->comperror_work, iser_comp_error_worker);
INIT_LIST_HEAD(&ib_conn->conn_list);
spin_lock_init(&ib_conn->lock);
return ret_val;
}
-static void iser_comp_error_worker(struct work_struct *work)
-{
- struct iser_conn *ib_conn =
- container_of(work, struct iser_conn, comperror_work);
-
- /* getting here when the state is UP means that the conn is being *
- * terminated asynchronously from the iSCSI layer's perspective. */
- if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP,
- ISER_CONN_TERMINATING))
- iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn,
- ISCSI_ERR_CONN_FAILED);
-
- /* complete the termination process if disconnect event was delivered *
- * note there are no more non completed posts to the QP */
- if (ib_conn->disc_evt_flag) {
- ib_conn->state = ISER_CONN_DOWN;
- wake_up_interruptible(&ib_conn->wait);
- }
-}
-
static void iser_handle_comp_error(struct iser_desc *desc)
{
struct iser_dto *dto = &desc->dto;
}
if (atomic_read(&ib_conn->post_recv_buf_count) == 0 &&
- atomic_read(&ib_conn->post_send_buf_count) == 0)
- schedule_work(&ib_conn->comperror_work);
+ atomic_read(&ib_conn->post_send_buf_count) == 0) {
+ /* getting here when the state is UP means that the conn is *
+ * being terminated asynchronously from the iSCSI layer's *
+ * perspective. */
+ if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP,
+ ISER_CONN_TERMINATING))
+ iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn,
+ ISCSI_ERR_CONN_FAILED);
+
+ /* complete the termination process if disconnect event was delivered *
+ * note there are no more non completed posts to the QP */
+ if (ib_conn->disc_evt_flag) {
+ ib_conn->state = ISER_CONN_DOWN;
+ wake_up_interruptible(&ib_conn->wait);
+ }
+ }
}
static void iser_cq_tasklet_fn(unsigned long data)
goto out;
}
- spin_lock_bh(&priv->lock);
+ spin_lock(&priv->lock);
if (unlikely(!netif_carrier_ok(dev))) {
err = -ENOLINK;
netif_stop_queue(dev);
out_unlock:
- spin_unlock_bh(&priv->lock);
+ spin_unlock(&priv->lock);
out:
dev_kfree_skb(skb);
priv->pm_config = 0;
/* Interrupts already disabled by sc92031_stop or sc92031_probe */
- spin_lock(&priv->lock);
+ spin_lock_bh(&priv->lock);
_sc92031_reset(dev);
mmiowb();
- spin_unlock(&priv->lock);
+ spin_unlock_bh(&priv->lock);
sc92031_enable_interrupts(dev);
if (netif_carrier_ok(dev))
/* Disable interrupts, stop Tx and Rx. */
sc92031_disable_interrupts(dev);
- spin_lock(&priv->lock);
+ spin_lock_bh(&priv->lock);
_sc92031_disable_tx_rx(dev);
_sc92031_tx_clear(dev);
mmiowb();
- spin_unlock(&priv->lock);
+ spin_unlock_bh(&priv->lock);
free_irq(pdev->irq, dev);
pci_free_consistent(pdev, TX_BUF_TOT_LEN, priv->tx_bufs,
/* Disable interrupts, stop Tx and Rx. */
sc92031_disable_interrupts(dev);
- spin_lock(&priv->lock);
+ spin_lock_bh(&priv->lock);
_sc92031_disable_tx_rx(dev);
_sc92031_tx_clear(dev);
mmiowb();
- spin_unlock(&priv->lock);
+ spin_unlock_bh(&priv->lock);
out:
pci_set_power_state(pdev, pci_choose_state(pdev, state));
goto out;
/* Interrupts already disabled by sc92031_suspend */
- spin_lock(&priv->lock);
+ spin_lock_bh(&priv->lock);
_sc92031_reset(dev);
mmiowb();
- spin_unlock(&priv->lock);
+ spin_unlock_bh(&priv->lock);
sc92031_enable_interrupts(dev);
netif_device_attach(dev);
#define IS_PCI_BRIDGE_ASIC(asic) (asic == PCIIO_ASIC_TYPE_PIC || \
asic == PCIIO_ASIC_TYPE_TIOCP)
#define IS_PIC_SOFT(ps) (ps->pbi_bridge_type == PCIBR_BRIDGETYPE_PIC)
+#define IS_TIOCP_SOFT(ps) (ps->pbi_bridge_type == PCIBR_BRIDGETYPE_TIOCP)
/*
* Bridge PMU Address Transaltion Entry Attibutes
*/
#define PCI32_ATE_V (0x1 << 0)
-#define PCI32_ATE_CO (0x1 << 1)
-#define PCI32_ATE_PREC (0x1 << 2)
+#define PCI32_ATE_CO (0x1 << 1) /* PIC ASIC ONLY */
+#define PCI32_ATE_PIO (0x1 << 1) /* TIOCP ASIC ONLY */
#define PCI32_ATE_MSI (0x1 << 2)
#define PCI32_ATE_PREF (0x1 << 3)
#define PCI32_ATE_BAR (0x1 << 4)
int (*expiry)(ide_drive_t *);
/* ide_system_bus_speed */
int pio_clock;
+ int req_gen;
+ int req_gen_timer;
unsigned char cmd_buf[4];
} ide_hwgroup_t;
"has invalid config pointer!\n");
return 0;
}
- clusterip_config_entry_get(cipinfo->config);
} else {
/* Case B: This is a new rule referring to an existing
* clusterip config. */
cipinfo->config = config;
- clusterip_config_entry_get(cipinfo->config);
}
} else {
/* Case C: This is a completely new clusterip config */
if (tp->packets_out > tp->snd_cwnd_used)
tp->snd_cwnd_used = tp->packets_out;
- if ((s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto)
+ if (sysctl_tcp_slow_start_after_idle &&
+ (s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto)
tcp_cwnd_application_limited(sk);
}
}
}
if (tb[TCA_TCINDEX_SHIFT-1]) {
- if (RTA_PAYLOAD(tb[TCA_TCINDEX_SHIFT-1]) < sizeof(u16))
+ if (RTA_PAYLOAD(tb[TCA_TCINDEX_SHIFT-1]) < sizeof(int))
goto errout;
- cp.shift = *(u16 *) RTA_DATA(tb[TCA_TCINDEX_SHIFT-1]);
+ cp.shift = *(int *) RTA_DATA(tb[TCA_TCINDEX_SHIFT-1]);
}
err = -EBUSY;