2 * Copyright (c) 2008, 2011 Mark Kettenis
4 * Permission to use, copy, modify, and distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 #include <sys/param.h>
21 #include <sys/ioctl.h>
22 #include <sys/memrange.h>
24 #include <sys/pciio.h>
26 #include <dev/pci/pcireg.h>
27 #include <dev/pci/pcidevs.h>
36 #include "pciaccess.h"
37 #include "pciaccess_private.h"
40 * This should allow for 16 domains, which should cover everything
41 * except perhaps the really big fridge-sized sparc64 server machines
42 * that are unlikely to have any graphics hardware in them.
47 static int aperturefd = -1;
50 pci_read(int domain, int bus, int dev, int func, uint32_t reg, uint32_t *val)
55 bzero(&io, sizeof(io));
56 io.pi_sel.pc_bus = bus;
57 io.pi_sel.pc_dev = dev;
58 io.pi_sel.pc_func = func;
62 err = ioctl(pcifd[domain], PCIOCREAD, &io);
72 pci_write(int domain, int bus, int dev, int func, uint32_t reg, uint32_t val)
76 bzero(&io, sizeof(io));
77 io.pi_sel.pc_bus = bus;
78 io.pi_sel.pc_dev = dev;
79 io.pi_sel.pc_func = func;
84 return ioctl(pcifd[domain], PCIOCWRITE, &io);
88 pci_readmask(int domain, int bus, int dev, int func, uint32_t reg,
94 bzero(&io, sizeof(io));
95 io.pi_sel.pc_bus = bus;
96 io.pi_sel.pc_dev = dev;
97 io.pi_sel.pc_func = func;
101 err = ioctl(pcifd[domain], PCIOCREADMASK, &io);
115 pci_device_openbsd_read_rom(struct pci_device *device, void *buffer)
117 struct pci_device_private *priv = (struct pci_device_private *)device;
122 int pci_rom, domain, bus, dev, func;
124 domain = device->domain;
125 if (domain < 0 || domain >= ndomains)
132 if (aperturefd == -1)
135 if (priv->base.rom_size == 0) {
136 #if defined(__alpha__) || defined(__amd64__) || defined(__i386__)
137 if ((device->device_class & 0x00ffff00) ==
138 ((PCI_CLASS_DISPLAY << 16) |
139 (PCI_SUBCLASS_DISPLAY_VGA << 8))) {
147 rom_base = priv->rom_base;
148 rom_size = priv->base.rom_size;
151 pci_read(domain, bus, dev, func, PCI_COMMAND_STATUS_REG, &csr);
152 pci_write(domain, bus, dev, func, PCI_COMMAND_STATUS_REG,
153 csr | PCI_COMMAND_MEM_ENABLE);
154 pci_read(domain, bus, dev, func, PCI_ROM_REG, &rom);
155 pci_write(domain, bus, dev, func, PCI_ROM_REG,
156 rom | PCI_ROM_ENABLE);
159 bios = mmap(NULL, rom_size, PROT_READ, MAP_SHARED,
160 aperturefd, (off_t)rom_base);
161 if (bios == MAP_FAILED)
164 memcpy(buffer, bios, rom_size);
165 munmap(bios, rom_size);
168 /* Restore PCI config space */
169 pci_write(domain, bus, dev, func, PCI_ROM_REG, rom);
170 pci_write(domain, bus, dev, func, PCI_COMMAND_STATUS_REG, csr);
176 pci_nfuncs(int domain, int bus, int dev)
180 if (domain < 0 || domain >= ndomains)
183 if (pci_read(domain, bus, dev, 0, PCI_BHLC_REG, &hdr) != 0)
186 return (PCI_HDRTYPE_MULTIFN(hdr) ? 8 : 1);
190 pci_device_openbsd_map_range(struct pci_device *dev,
191 struct pci_device_mapping *map)
193 struct mem_range_desc mr;
194 struct mem_range_op mo;
195 int prot = PROT_READ;
197 if (map->flags & PCI_DEV_MAP_FLAG_WRITABLE)
200 map->memory = mmap(NULL, map->size, prot, MAP_SHARED, aperturefd,
202 if (map->memory == MAP_FAILED)
204 #if defined(__i386__) || defined(__amd64__)
205 /* No need to set an MTRR if it's the default mode. */
206 if ((map->flags & PCI_DEV_MAP_FLAG_CACHABLE) ||
207 (map->flags & PCI_DEV_MAP_FLAG_WRITE_COMBINE)) {
208 mr.mr_base = map->base;
209 mr.mr_len = map->size;
211 if (map->flags & PCI_DEV_MAP_FLAG_CACHABLE)
212 mr.mr_flags |= MDF_WRITEBACK;
213 if (map->flags & PCI_DEV_MAP_FLAG_WRITE_COMBINE)
214 mr.mr_flags |= MDF_WRITECOMBINE;
215 strlcpy(mr.mr_owner, "pciaccess", sizeof(mr.mr_owner));
218 mo.mo_arg[0] = MEMRANGE_SET_UPDATE;
220 if (ioctl(aperturefd, MEMRANGE_SET, &mo))
221 (void)fprintf(stderr, "mtrr set failed: %s\n",
229 pci_device_openbsd_unmap_range(struct pci_device *dev,
230 struct pci_device_mapping *map)
232 #if defined(__i386__) || defined(__amd64__)
233 struct mem_range_desc mr;
234 struct mem_range_op mo;
236 if ((map->flags & PCI_DEV_MAP_FLAG_CACHABLE) ||
237 (map->flags & PCI_DEV_MAP_FLAG_WRITE_COMBINE)) {
238 mr.mr_base = map->base;
239 mr.mr_len = map->size;
240 mr.mr_flags = MDF_UNCACHEABLE;
241 strlcpy(mr.mr_owner, "pciaccess", sizeof(mr.mr_owner));
244 mo.mo_arg[0] = MEMRANGE_SET_REMOVE;
246 (void)ioctl(aperturefd, MEMRANGE_SET, &mo);
249 return pci_device_generic_unmap_range(dev, map);
253 pci_device_openbsd_read(struct pci_device *dev, void *data,
254 pciaddr_t offset, pciaddr_t size, pciaddr_t *bytes_read)
258 io.pi_sel.pc_bus = dev->bus;
259 io.pi_sel.pc_dev = dev->dev;
260 io.pi_sel.pc_func = dev->func;
264 int toread = MIN(size, 4 - (offset & 0x3));
266 io.pi_reg = (offset & ~0x3);
269 if (ioctl(pcifd[dev->domain], PCIOCREAD, &io) == -1)
272 io.pi_data = htole32(io.pi_data);
273 io.pi_data >>= ((offset & 0x3) * 8);
275 memcpy(data, &io.pi_data, toread);
278 data = (char *)data + toread;
280 *bytes_read += toread;
287 pci_device_openbsd_write(struct pci_device *dev, const void *data,
288 pciaddr_t offset, pciaddr_t size, pciaddr_t *bytes_written)
292 if ((offset % 4) != 0 || (size % 4) != 0)
295 io.pi_sel.pc_bus = dev->bus;
296 io.pi_sel.pc_dev = dev->dev;
297 io.pi_sel.pc_func = dev->func;
303 memcpy(&io.pi_data, data, 4);
305 if (ioctl(pcifd[dev->domain], PCIOCWRITE, &io) == -1)
309 data = (char *)data + 4;
318 pci_system_openbsd_destroy(void)
322 for (domain = 0; domain < ndomains; domain++)
323 close(pcifd[domain]);
328 pci_device_openbsd_probe(struct pci_device *device)
330 struct pci_device_private *priv = (struct pci_device_private *)device;
331 struct pci_mem_region *region;
332 uint64_t reg64, size64;
333 uint32_t bar, reg, size;
334 int domain, bus, dev, func, err;
336 domain = device->domain;
341 err = pci_read(domain, bus, dev, func, PCI_BHLC_REG, ®);
345 priv->header_type = PCI_HDRTYPE_TYPE(reg);
346 if (priv->header_type != 0)
349 region = device->regions;
350 for (bar = PCI_MAPREG_START; bar < PCI_MAPREG_END;
351 bar += sizeof(uint32_t), region++) {
352 err = pci_read(domain, bus, dev, func, bar, ®);
356 /* Probe the size of the region. */
357 err = pci_readmask(domain, bus, dev, func, bar, &size);
361 if (PCI_MAPREG_TYPE(reg) == PCI_MAPREG_TYPE_IO) {
363 region->base_addr = PCI_MAPREG_IO_ADDR(reg);
364 region->size = PCI_MAPREG_IO_SIZE(size);
366 if (PCI_MAPREG_MEM_PREFETCHABLE(reg))
367 region->is_prefetchable = 1;
368 switch(PCI_MAPREG_MEM_TYPE(reg)) {
369 case PCI_MAPREG_MEM_TYPE_32BIT:
370 case PCI_MAPREG_MEM_TYPE_32BIT_1M:
371 region->base_addr = PCI_MAPREG_MEM_ADDR(reg);
372 region->size = PCI_MAPREG_MEM_SIZE(size);
374 case PCI_MAPREG_MEM_TYPE_64BIT:
380 bar += sizeof(uint32_t);
382 err = pci_read(domain, bus, dev, func, bar, ®);
385 reg64 |= (uint64_t)reg << 32;
387 err = pci_readmask(domain, bus, dev, func, bar, &size);
390 size64 |= (uint64_t)size << 32;
392 region->base_addr = PCI_MAPREG_MEM64_ADDR(reg64);
393 region->size = PCI_MAPREG_MEM64_SIZE(size64);
400 /* Probe expansion ROM if present */
401 err = pci_read(domain, bus, dev, func, PCI_ROM_REG, ®);
405 err = pci_write(domain, bus, dev, func, PCI_ROM_REG, ~PCI_ROM_ENABLE);
408 pci_read(domain, bus, dev, func, PCI_ROM_REG, &size);
409 pci_write(domain, bus, dev, func, PCI_ROM_REG, reg);
411 if (PCI_ROM_ADDR(reg) != 0) {
412 priv->rom_base = PCI_ROM_ADDR(reg);
413 device->rom_size = PCI_ROM_SIZE(size);
419 #if defined(__i386__) || defined(__amd64__)
420 #include <machine/sysarch.h>
421 #include <machine/pio.h>
424 static struct pci_io_handle *
425 pci_device_openbsd_open_legacy_io(struct pci_io_handle *ret,
426 struct pci_device *dev, pciaddr_t base, pciaddr_t size)
428 #if defined(__i386__)
429 struct i386_iopl_args ia;
432 if (sysarch(I386_IOPL, &ia))
439 #elif defined(__amd64__)
440 struct amd64_iopl_args ia;
443 if (sysarch(AMD64_IOPL, &ia))
450 #elif defined(PCI_MAGIC_IO_RANGE)
451 ret->memory = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED,
452 aperturefd, PCI_MAGIC_IO_RANGE + base);
453 if (ret->memory == MAP_FAILED)
466 pci_device_openbsd_read32(struct pci_io_handle *handle, uint32_t reg)
468 #if defined(__i386__) || defined(__amd64__)
469 return inl(handle->base + reg);
471 return *(uint32_t *)((uintptr_t)handle->memory + reg);
476 pci_device_openbsd_read16(struct pci_io_handle *handle, uint32_t reg)
478 #if defined(__i386__) || defined(__amd64__)
479 return inw(handle->base + reg);
481 return *(uint16_t *)((uintptr_t)handle->memory + reg);
486 pci_device_openbsd_read8(struct pci_io_handle *handle, uint32_t reg)
488 #if defined(__i386__) || defined(__amd64__)
489 return inb(handle->base + reg);
491 return *(uint8_t *)((uintptr_t)handle->memory + reg);
496 pci_device_openbsd_write32(struct pci_io_handle *handle, uint32_t reg,
499 #if defined(__i386__) || defined(__amd64__)
500 outl(handle->base + reg, data);
502 *(uint16_t *)((uintptr_t)handle->memory + reg) = data;
507 pci_device_openbsd_write16(struct pci_io_handle *handle, uint32_t reg,
510 #if defined(__i386__) || defined(__amd64__)
511 outw(handle->base + reg, data);
513 *(uint8_t *)((uintptr_t)handle->memory + reg) = data;
518 pci_device_openbsd_write8(struct pci_io_handle *handle, uint32_t reg,
521 #if defined(__i386__) || defined(__amd64__)
522 outb(handle->base + reg, data);
524 *(uint32_t *)((uintptr_t)handle->memory + reg) = data;
529 pci_device_openbsd_map_legacy(struct pci_device *dev, pciaddr_t base,
530 pciaddr_t size, unsigned map_flags, void **addr)
532 struct pci_device_mapping map;
537 map.flags = map_flags;
539 err = pci_device_openbsd_map_range(dev, &map);
546 pci_device_openbsd_unmap_legacy(struct pci_device *dev, void *addr,
549 struct pci_device_mapping map;
554 return pci_device_openbsd_unmap_range(dev, &map);
557 static const struct pci_system_methods openbsd_pci_methods = {
558 pci_system_openbsd_destroy,
560 pci_device_openbsd_read_rom,
561 pci_device_openbsd_probe,
562 pci_device_openbsd_map_range,
563 pci_device_openbsd_unmap_range,
564 pci_device_openbsd_read,
565 pci_device_openbsd_write,
566 pci_fill_capabilities_generic,
571 pci_device_openbsd_open_legacy_io,
573 pci_device_openbsd_read32,
574 pci_device_openbsd_read16,
575 pci_device_openbsd_read8,
576 pci_device_openbsd_write32,
577 pci_device_openbsd_write16,
578 pci_device_openbsd_write8,
579 pci_device_openbsd_map_legacy,
580 pci_device_openbsd_unmap_legacy
584 pci_system_openbsd_create(void)
586 struct pci_device_private *device;
587 int domain, bus, dev, func, ndevs, nfuncs;
588 char path[MAXPATHLEN];
594 for (domain = 0; domain < sizeof(pcifd) / sizeof(pcifd[0]); domain++) {
595 snprintf(path, sizeof(path), "/dev/pci%d", domain);
596 pcifd[domain] = open(path, O_RDWR | O_CLOEXEC);
597 if (pcifd[domain] == -1)
605 pci_sys = calloc(1, sizeof(struct pci_system));
606 if (pci_sys == NULL) {
607 for (domain = 0; domain < ndomains; domain++)
608 close(pcifd[domain]);
613 pci_sys->methods = &openbsd_pci_methods;
616 for (domain = 0; domain < ndomains; domain++) {
617 for (bus = 0; bus < 256; bus++) {
618 for (dev = 0; dev < 32; dev++) {
619 nfuncs = pci_nfuncs(domain, bus, dev);
620 for (func = 0; func < nfuncs; func++) {
621 if (pci_read(domain, bus, dev, func,
622 PCI_ID_REG, ®) != 0)
624 if (PCI_VENDOR(reg) == PCI_VENDOR_INVALID ||
625 PCI_VENDOR(reg) == 0)
634 pci_sys->num_devices = ndevs;
635 pci_sys->devices = calloc(ndevs, sizeof(struct pci_device_private));
636 if (pci_sys->devices == NULL) {
639 for (domain = 0; domain < ndomains; domain++)
640 close(pcifd[domain]);
645 device = pci_sys->devices;
646 for (domain = 0; domain < ndomains; domain++) {
647 for (bus = 0; bus < 256; bus++) {
648 for (dev = 0; dev < 32; dev++) {
649 nfuncs = pci_nfuncs(domain, bus, dev);
650 for (func = 0; func < nfuncs; func++) {
651 if (pci_read(domain, bus, dev, func,
652 PCI_ID_REG, ®) != 0)
654 if (PCI_VENDOR(reg) == PCI_VENDOR_INVALID ||
655 PCI_VENDOR(reg) == 0)
658 device->base.domain = domain;
659 device->base.bus = bus;
660 device->base.dev = dev;
661 device->base.func = func;
662 device->base.vendor_id = PCI_VENDOR(reg);
663 device->base.device_id = PCI_PRODUCT(reg);
665 if (pci_read(domain, bus, dev, func,
666 PCI_CLASS_REG, ®) != 0)
669 device->base.device_class =
671 PCI_CLASS(reg) << 16 |
672 PCI_SUBCLASS(reg) << 8;
673 device->base.revision = PCI_REVISION(reg);
675 if (pci_read(domain, bus, dev, func,
676 PCI_SUBVEND_0, ®) != 0)
679 device->base.subvendor_id = PCI_VENDOR(reg);
680 device->base.subdevice_id = PCI_PRODUCT(reg);
682 device->base.vgaarb_rsrc =
683 VGA_ARB_RSRC_LEGACY_IO |
684 VGA_ARB_RSRC_LEGACY_MEM;
696 pci_system_openbsd_init_dev_mem(int fd)
702 pci_device_vgaarb_init(void)
704 struct pci_device *dev = pci_sys->vga_target;
705 struct pci_device_iterator *iter;
706 struct pci_id_match vga_match = {
707 PCI_MATCH_ANY, PCI_MATCH_ANY, PCI_MATCH_ANY, PCI_MATCH_ANY,
708 (PCI_CLASS_DISPLAY << 16) | (PCI_SUBCLASS_DISPLAY_VGA << 8),
714 pv.pv_sel.pc_bus = 0;
715 pv.pv_sel.pc_dev = 0;
716 pv.pv_sel.pc_func = 0;
717 err = ioctl(pcifd[0], PCIOCGETVGA, &pv);
721 pci_sys->vga_target = pci_device_find_by_slot(0, pv.pv_sel.pc_bus,
722 pv.pv_sel.pc_dev, pv.pv_sel.pc_func);
724 /* Count the number of VGA devices in domain 0. */
725 iter = pci_id_match_iterator_create(&vga_match);
728 pci_sys->vga_count = 0;
729 while ((dev = pci_device_next(iter)) != NULL) {
730 if (dev->domain == 0)
731 pci_sys->vga_count++;
733 pci_iterator_destroy(iter);
739 pci_device_vgaarb_fini(void)
741 struct pci_device *dev;
746 dev = pci_sys->vga_target;
750 pv.pv_sel.pc_bus = dev->bus;
751 pv.pv_sel.pc_dev = dev->dev;
752 pv.pv_sel.pc_func = dev->func;
753 pv.pv_lock = PCI_VGA_UNLOCK;
754 ioctl(pcifd[dev->domain], PCIOCSETVGA, &pv);
758 pci_device_vgaarb_set_target(struct pci_device *dev)
760 pci_sys->vga_target = dev;
765 pci_device_vgaarb_lock(void)
767 struct pci_device *dev = pci_sys->vga_target;
774 if (dev->vgaarb_rsrc == 0 || pci_sys->vga_count == 1)
777 if (pci_sys->vga_count == 1)
781 pv.pv_sel.pc_bus = dev->bus;
782 pv.pv_sel.pc_dev = dev->dev;
783 pv.pv_sel.pc_func = dev->func;
784 pv.pv_lock = PCI_VGA_LOCK;
785 return ioctl(pcifd[dev->domain], PCIOCSETVGA, &pv);
789 pci_device_vgaarb_unlock(void)
791 struct pci_device *dev = pci_sys->vga_target;
798 if (dev->vgaarb_rsrc == 0 || pci_sys->vga_count == 1)
801 if (pci_sys->vga_count == 1)
805 pv.pv_sel.pc_bus = dev->bus;
806 pv.pv_sel.pc_dev = dev->dev;
807 pv.pv_sel.pc_func = dev->func;
808 pv.pv_lock = PCI_VGA_UNLOCK;
809 return ioctl(pcifd[dev->domain], PCIOCSETVGA, &pv);
813 pci_device_vgaarb_get_info(struct pci_device *dev, int *vga_count,
816 *vga_count = pci_sys->vga_count;
819 *rsrc_decodes = dev->vgaarb_rsrc;
825 pci_device_vgaarb_decodes(int rsrc_decodes)
827 struct pci_device *dev = pci_sys->vga_target;
832 dev->vgaarb_rsrc = rsrc_decodes;