return 0;
}
-static DEFINE_PER_CPU(atomic_t, use_cnt = ATOMIC_INIT(0));
+static DEFINE_PER_CPU(atomic_t, siw_use_cnt);
static struct {
struct cpumask **tx_valid_cpus;
if (!siw_tx_thread[cpu])
continue;
- usage = atomic_read(&per_cpu(use_cnt, cpu));
+ usage = atomic_read(&per_cpu(siw_use_cnt, cpu));
if (usage <= min_use) {
tx_cpu = cpu;
min_use = usage;
out:
if (tx_cpu >= 0)
- atomic_inc(&per_cpu(use_cnt, tx_cpu));
+ atomic_inc(&per_cpu(siw_use_cnt, tx_cpu));
else
pr_warn("siw: no tx cpu found\n");
void siw_put_tx_cpu(int cpu)
{
- atomic_dec(&per_cpu(use_cnt, cpu));
+ atomic_dec(&per_cpu(siw_use_cnt, cpu));
}
static struct ib_qp *siw_get_base_qp(struct ib_device *base_dev, int id)
wait_queue_head_t waiting;
};
-static DEFINE_PER_CPU(struct tx_task_t, tx_task_g);
+static DEFINE_PER_CPU(struct tx_task_t, siw_tx_task_g);
void siw_stop_tx_thread(int nr_cpu)
{
kthread_stop(siw_tx_thread[nr_cpu]);
- wake_up(&per_cpu(tx_task_g, nr_cpu).waiting);
+ wake_up(&per_cpu(siw_tx_task_g, nr_cpu).waiting);
}
int siw_run_sq(void *data)
const int nr_cpu = (unsigned int)(long)data;
struct llist_node *active;
struct siw_qp *qp;
- struct tx_task_t *tx_task = &per_cpu(tx_task_g, nr_cpu);
+ struct tx_task_t *tx_task = &per_cpu(siw_tx_task_g, nr_cpu);
init_llist_head(&tx_task->active);
init_waitqueue_head(&tx_task->waiting);
}
siw_qp_get(qp);
- llist_add(&qp->tx_list, &per_cpu(tx_task_g, qp->tx_cpu).active);
+ llist_add(&qp->tx_list, &per_cpu(siw_tx_task_g, qp->tx_cpu).active);
- wake_up(&per_cpu(tx_task_g, qp->tx_cpu).waiting);
+ wake_up(&per_cpu(siw_tx_task_g, qp->tx_cpu).waiting);
return 0;
}