/* Map the M64 here */
if (pdn->m64_single_mode) {
- pe_num = pdn->offset + j;
+ pe_num = pdn->pe_num_map[j];
rc = opal_pci_map_pe_mmio_window(phb->opal_id,
pe_num, OPAL_M64_WINDOW_TYPE,
pdn->m64_map[j][i], 0);
struct pnv_phb *phb;
struct pci_dn *pdn;
struct pci_sriov *iov;
- u16 num_vfs;
+ u16 num_vfs, i;
bus = pdev->bus;
hose = pci_bus_to_host(bus);
if (phb->type == PNV_PHB_IODA2) {
if (!pdn->m64_single_mode)
- pnv_pci_vf_resource_shift(pdev, -pdn->offset);
+ pnv_pci_vf_resource_shift(pdev, -*pdn->pe_num_map);
/* Release M64 windows */
pnv_pci_vf_release_m64(pdev, num_vfs);
/* Release PE numbers */
- bitmap_clear(phb->ioda.pe_alloc, pdn->offset, num_vfs);
- pdn->offset = 0;
+ if (pdn->m64_single_mode) {
+ for (i = 0; i < num_vfs; i++) {
+ if (pdn->pe_num_map[i] != IODA_INVALID_PE)
+ pnv_ioda_free_pe(phb, pdn->pe_num_map[i]);
+ }
+ } else
+ bitmap_clear(phb->ioda.pe_alloc, *pdn->pe_num_map, num_vfs);
+ /* Releasing pe_num_map */
+ kfree(pdn->pe_num_map);
}
}
/* Reserve PE for each VF */
for (vf_index = 0; vf_index < num_vfs; vf_index++) {
- pe_num = pdn->offset + vf_index;
+ if (pdn->m64_single_mode)
+ pe_num = pdn->pe_num_map[vf_index];
+ else
+ pe_num = *pdn->pe_num_map + vf_index;
pe = &phb->ioda.pe_array[pe_num];
pe->pe_number = pe_num;
struct pnv_phb *phb;
struct pci_dn *pdn;
int ret;
+ u16 i;
bus = pdev->bus;
hose = pci_bus_to_host(bus);
return -EBUSY;
}
+ /* Allocating pe_num_map */
+ if (pdn->m64_single_mode)
+ pdn->pe_num_map = kmalloc(sizeof(*pdn->pe_num_map) * num_vfs,
+ GFP_KERNEL);
+ else
+ pdn->pe_num_map = kmalloc(sizeof(*pdn->pe_num_map), GFP_KERNEL);
+
+ if (!pdn->pe_num_map)
+ return -ENOMEM;
+
+ if (pdn->m64_single_mode)
+ for (i = 0; i < num_vfs; i++)
+ pdn->pe_num_map[i] = IODA_INVALID_PE;
+
/* Calculate available PE for required VFs */
- mutex_lock(&phb->ioda.pe_alloc_mutex);
- pdn->offset = bitmap_find_next_zero_area(
- phb->ioda.pe_alloc, phb->ioda.total_pe,
- 0, num_vfs, 0);
- if (pdn->offset >= phb->ioda.total_pe) {
+ if (pdn->m64_single_mode) {
+ for (i = 0; i < num_vfs; i++) {
+ pdn->pe_num_map[i] = pnv_ioda_alloc_pe(phb);
+ if (pdn->pe_num_map[i] == IODA_INVALID_PE) {
+ ret = -EBUSY;
+ goto m64_failed;
+ }
+ }
+ } else {
+ mutex_lock(&phb->ioda.pe_alloc_mutex);
+ *pdn->pe_num_map = bitmap_find_next_zero_area(
+ phb->ioda.pe_alloc, phb->ioda.total_pe,
+ 0, num_vfs, 0);
+ if (*pdn->pe_num_map >= phb->ioda.total_pe) {
+ mutex_unlock(&phb->ioda.pe_alloc_mutex);
+ dev_info(&pdev->dev, "Failed to enable VF%d\n", num_vfs);
+ kfree(pdn->pe_num_map);
+ return -EBUSY;
+ }
+ bitmap_set(phb->ioda.pe_alloc, *pdn->pe_num_map, num_vfs);
mutex_unlock(&phb->ioda.pe_alloc_mutex);
- dev_info(&pdev->dev, "Failed to enable VF%d\n", num_vfs);
- pdn->offset = 0;
- return -EBUSY;
}
- bitmap_set(phb->ioda.pe_alloc, pdn->offset, num_vfs);
pdn->num_vfs = num_vfs;
- mutex_unlock(&phb->ioda.pe_alloc_mutex);
/* Assign M64 window accordingly */
ret = pnv_pci_vf_assign_m64(pdev, num_vfs);
* Otherwise, the PE# for the VF will conflict with others.
*/
if (!pdn->m64_single_mode) {
- ret = pnv_pci_vf_resource_shift(pdev, pdn->offset);
+ ret = pnv_pci_vf_resource_shift(pdev, *pdn->pe_num_map);
if (ret)
goto m64_failed;
}
return 0;
m64_failed:
- bitmap_clear(phb->ioda.pe_alloc, pdn->offset, num_vfs);
- pdn->offset = 0;
+ if (pdn->m64_single_mode) {
+ for (i = 0; i < num_vfs; i++) {
+ if (pdn->pe_num_map[i] != IODA_INVALID_PE)
+ pnv_ioda_free_pe(phb, pdn->pe_num_map[i]);
+ }
+ } else
+ bitmap_clear(phb->ioda.pe_alloc, *pdn->pe_num_map, num_vfs);
+
+ /* Releasing pe_num_map */
+ kfree(pdn->pe_num_map);
return ret;
}