2 * Copyright (c) 2008, 2011 Mark Kettenis
4 * Permission to use, copy, modify, and distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include <sys/param.h>
18 #include <sys/ioctl.h>
19 #include <sys/memrange.h>
21 #include <sys/pciio.h>
23 #include <dev/pci/pcireg.h>
24 #include <dev/pci/pcidevs.h>
33 #include "pciaccess.h"
34 #include "pciaccess_private.h"
37 * This should allow for 16 domains, which should cover everything
38 * except perhaps the really big fridge-sized sparc64 server machines
39 * that are unlikely to have any graphics hardware in them.
44 static int aperturefd = -1;
47 pci_read(int domain, int bus, int dev, int func, uint32_t reg, uint32_t *val)
52 bzero(&io, sizeof(io));
53 io.pi_sel.pc_bus = bus;
54 io.pi_sel.pc_dev = dev;
55 io.pi_sel.pc_func = func;
59 err = ioctl(pcifd[domain], PCIOCREAD, &io);
69 pci_write(int domain, int bus, int dev, int func, uint32_t reg, uint32_t val)
73 bzero(&io, sizeof(io));
74 io.pi_sel.pc_bus = bus;
75 io.pi_sel.pc_dev = dev;
76 io.pi_sel.pc_func = func;
81 return ioctl(pcifd[domain], PCIOCWRITE, &io);
89 pci_device_openbsd_read_rom(struct pci_device *device, void *buffer)
91 struct pci_device_private *priv = (struct pci_device_private *)device;
96 int pci_rom, domain, bus, dev, func;
98 domain = device->domain;
99 if (domain < 0 || domain >= ndomains)
106 if (aperturefd == -1)
109 if (priv->base.rom_size == 0) {
110 #if defined(__alpha__) || defined(__amd64__) || defined(__i386__)
111 if ((device->device_class & 0x00ffff00) ==
112 ((PCI_CLASS_DISPLAY << 16) |
113 (PCI_SUBCLASS_DISPLAY_VGA << 8))) {
121 rom_base = priv->rom_base;
122 rom_size = priv->base.rom_size;
125 pci_read(domain, bus, dev, func, PCI_COMMAND_STATUS_REG, &csr);
126 pci_write(domain, bus, dev, func, PCI_COMMAND_STATUS_REG,
127 csr | PCI_COMMAND_MEM_ENABLE);
128 pci_read(domain, bus, dev, func, PCI_ROM_REG, &rom);
129 pci_write(domain, bus, dev, func, PCI_ROM_REG,
130 rom | PCI_ROM_ENABLE);
133 bios = mmap(NULL, rom_size, PROT_READ, MAP_SHARED,
134 aperturefd, (off_t)rom_base);
135 if (bios == MAP_FAILED)
138 memcpy(buffer, bios, rom_size);
139 munmap(bios, rom_size);
142 /* Restore PCI config space */
143 pci_write(domain, bus, dev, func, PCI_ROM_REG, rom);
144 pci_write(domain, bus, dev, func, PCI_COMMAND_STATUS_REG, csr);
150 pci_nfuncs(int domain, int bus, int dev)
154 if (domain < 0 || domain >= ndomains)
157 if (pci_read(domain, bus, dev, 0, PCI_BHLC_REG, &hdr) != 0)
160 return (PCI_HDRTYPE_MULTIFN(hdr) ? 8 : 1);
164 pci_device_openbsd_map_range(struct pci_device *dev,
165 struct pci_device_mapping *map)
167 struct mem_range_desc mr;
168 struct mem_range_op mo;
169 int prot = PROT_READ;
171 if (map->flags & PCI_DEV_MAP_FLAG_WRITABLE)
174 map->memory = mmap(NULL, map->size, prot, MAP_SHARED, aperturefd,
176 if (map->memory == MAP_FAILED)
178 #if defined(__i386__) || defined(__amd64__)
179 /* No need to set an MTRR if it's the default mode. */
180 if ((map->flags & PCI_DEV_MAP_FLAG_CACHABLE) ||
181 (map->flags & PCI_DEV_MAP_FLAG_WRITE_COMBINE)) {
182 mr.mr_base = map->base;
183 mr.mr_len = map->size;
185 if (map->flags & PCI_DEV_MAP_FLAG_CACHABLE)
186 mr.mr_flags |= MDF_WRITEBACK;
187 if (map->flags & PCI_DEV_MAP_FLAG_WRITE_COMBINE)
188 mr.mr_flags |= MDF_WRITECOMBINE;
189 strlcpy(mr.mr_owner, "pciaccess", sizeof(mr.mr_owner));
192 mo.mo_arg[0] = MEMRANGE_SET_UPDATE;
194 if (ioctl(aperturefd, MEMRANGE_SET, &mo))
195 (void)fprintf(stderr, "mtrr set failed: %s\n",
203 pci_device_openbsd_unmap_range(struct pci_device *dev,
204 struct pci_device_mapping *map)
206 #if defined(__i386__) || defined(__amd64__)
207 struct mem_range_desc mr;
208 struct mem_range_op mo;
210 if ((map->flags & PCI_DEV_MAP_FLAG_CACHABLE) ||
211 (map->flags & PCI_DEV_MAP_FLAG_WRITE_COMBINE)) {
212 mr.mr_base = map->base;
213 mr.mr_len = map->size;
214 mr.mr_flags = MDF_UNCACHEABLE;
215 strlcpy(mr.mr_owner, "pciaccess", sizeof(mr.mr_owner));
218 mo.mo_arg[0] = MEMRANGE_SET_REMOVE;
220 (void)ioctl(aperturefd, MEMRANGE_SET, &mo);
223 return pci_device_generic_unmap_range(dev, map);
227 pci_device_openbsd_read(struct pci_device *dev, void *data,
228 pciaddr_t offset, pciaddr_t size, pciaddr_t *bytes_read)
232 io.pi_sel.pc_bus = dev->bus;
233 io.pi_sel.pc_dev = dev->dev;
234 io.pi_sel.pc_func = dev->func;
238 int toread = MIN(size, 4 - (offset & 0x3));
240 io.pi_reg = (offset & ~0x3);
243 if (ioctl(pcifd[dev->domain], PCIOCREAD, &io) == -1)
246 io.pi_data = htole32(io.pi_data);
247 io.pi_data >>= ((offset & 0x3) * 8);
249 memcpy(data, &io.pi_data, toread);
252 data = (char *)data + toread;
254 *bytes_read += toread;
261 pci_device_openbsd_write(struct pci_device *dev, const void *data,
262 pciaddr_t offset, pciaddr_t size, pciaddr_t *bytes_written)
266 if ((offset % 4) != 0 || (size % 4) != 0)
269 io.pi_sel.pc_bus = dev->bus;
270 io.pi_sel.pc_dev = dev->dev;
271 io.pi_sel.pc_func = dev->func;
277 memcpy(&io.pi_data, data, 4);
279 if (ioctl(pcifd[dev->domain], PCIOCWRITE, &io) == -1)
283 data = (char *)data + 4;
292 pci_system_openbsd_destroy(void)
296 for (domain = 0; domain < ndomains; domain++)
297 close(pcifd[domain]);
302 pci_device_openbsd_probe(struct pci_device *device)
304 struct pci_device_private *priv = (struct pci_device_private *)device;
305 struct pci_mem_region *region;
306 uint64_t reg64, size64;
307 uint32_t bar, reg, size;
308 int domain, bus, dev, func, err;
310 domain = device->domain;
315 err = pci_read(domain, bus, dev, func, PCI_BHLC_REG, ®);
319 priv->header_type = PCI_HDRTYPE_TYPE(reg);
320 if (priv->header_type != 0)
323 region = device->regions;
324 for (bar = PCI_MAPREG_START; bar < PCI_MAPREG_END;
325 bar += sizeof(uint32_t), region++) {
326 err = pci_read(domain, bus, dev, func, bar, ®);
330 /* Probe the size of the region. */
331 err = pci_write(domain, bus, dev, func, bar, ~0);
334 pci_read(domain, bus, dev, func, bar, &size);
335 pci_write(domain, bus, dev, func, bar, reg);
337 if (PCI_MAPREG_TYPE(reg) == PCI_MAPREG_TYPE_IO) {
339 region->base_addr = PCI_MAPREG_IO_ADDR(reg);
340 region->size = PCI_MAPREG_IO_SIZE(size);
342 if (PCI_MAPREG_MEM_PREFETCHABLE(reg))
343 region->is_prefetchable = 1;
344 switch(PCI_MAPREG_MEM_TYPE(reg)) {
345 case PCI_MAPREG_MEM_TYPE_32BIT:
346 case PCI_MAPREG_MEM_TYPE_32BIT_1M:
347 region->base_addr = PCI_MAPREG_MEM_ADDR(reg);
348 region->size = PCI_MAPREG_MEM_SIZE(size);
350 case PCI_MAPREG_MEM_TYPE_64BIT:
356 bar += sizeof(uint32_t);
358 err = pci_read(domain, bus, dev, func, bar, ®);
361 reg64 |= (uint64_t)reg << 32;
363 err = pci_write(domain, bus, dev, func, bar, ~0);
366 pci_read(domain, bus, dev, func, bar, &size);
367 pci_write(domain, bus, dev, func, bar, reg64 >> 32);
368 size64 |= (uint64_t)size << 32;
370 region->base_addr = PCI_MAPREG_MEM64_ADDR(reg64);
371 region->size = PCI_MAPREG_MEM64_SIZE(size64);
378 /* Probe expansion ROM if present */
379 err = pci_read(domain, bus, dev, func, PCI_ROM_REG, ®);
383 err = pci_write(domain, bus, dev, func, PCI_ROM_REG, ~PCI_ROM_ENABLE);
386 pci_read(domain, bus, dev, func, PCI_ROM_REG, &size);
387 pci_write(domain, bus, dev, func, PCI_ROM_REG, reg);
389 if (PCI_ROM_ADDR(reg) != 0) {
390 priv->rom_base = PCI_ROM_ADDR(reg);
391 device->rom_size = PCI_ROM_SIZE(size);
397 #if defined(__i386__) || defined(__amd64__)
398 #include <machine/sysarch.h>
399 #include <machine/pio.h>
402 static struct pci_io_handle *
403 pci_device_openbsd_open_legacy_io(struct pci_io_handle *ret,
404 struct pci_device *dev, pciaddr_t base, pciaddr_t size)
406 #if defined(__i386__)
407 struct i386_iopl_args ia;
410 if (sysarch(I386_IOPL, &ia))
416 #elif defined(__amd64__)
417 struct amd64_iopl_args ia;
420 if (sysarch(AMD64_IOPL, &ia))
426 #elif defined(PCI_MAGIC_IO_RANGE)
427 ret->memory = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED,
428 aperturefd, PCI_MAGIC_IO_RANGE + base);
429 if (ret->memory == MAP_FAILED)
441 pci_device_openbsd_read32(struct pci_io_handle *handle, uint32_t reg)
443 #if defined(__i386__) || defined(__amd64__)
444 return inl(handle->base + reg);
446 return *(uint32_t *)((uintptr_t)handle->memory + reg);
451 pci_device_openbsd_read16(struct pci_io_handle *handle, uint32_t reg)
453 #if defined(__i386__) || defined(__amd64__)
454 return inw(handle->base + reg);
456 return *(uint16_t *)((uintptr_t)handle->memory + reg);
461 pci_device_openbsd_read8(struct pci_io_handle *handle, uint32_t reg)
463 #if defined(__i386__) || defined(__amd64__)
464 return inb(handle->base + reg);
466 return *(uint8_t *)((uintptr_t)handle->memory + reg);
471 pci_device_openbsd_write32(struct pci_io_handle *handle, uint32_t reg,
474 #if defined(__i386__) || defined(__amd64__)
475 outl(handle->base + reg, data);
477 *(uint16_t *)((uintptr_t)handle->memory + reg) = data;
482 pci_device_openbsd_write16(struct pci_io_handle *handle, uint32_t reg,
485 #if defined(__i386__) || defined(__amd64__)
486 outw(handle->base + reg, data);
488 *(uint8_t *)((uintptr_t)handle->memory + reg) = data;
493 pci_device_openbsd_write8(struct pci_io_handle *handle, uint32_t reg,
496 #if defined(__i386__) || defined(__amd64__)
497 outb(handle->base + reg, data);
499 *(uint32_t *)((uintptr_t)handle->memory + reg) = data;
504 pci_device_openbsd_map_legacy(struct pci_device *dev, pciaddr_t base,
505 pciaddr_t size, unsigned map_flags, void **addr)
507 struct pci_device_mapping map;
512 map.flags = map_flags;
514 err = pci_device_openbsd_map_range(dev, &map);
521 pci_device_openbsd_unmap_legacy(struct pci_device *dev, void *addr,
524 struct pci_device_mapping map;
529 return pci_device_openbsd_unmap_range(dev, &map);
532 static const struct pci_system_methods openbsd_pci_methods = {
533 pci_system_openbsd_destroy,
535 pci_device_openbsd_read_rom,
536 pci_device_openbsd_probe,
537 pci_device_openbsd_map_range,
538 pci_device_openbsd_unmap_range,
539 pci_device_openbsd_read,
540 pci_device_openbsd_write,
541 pci_fill_capabilities_generic,
546 pci_device_openbsd_open_legacy_io,
548 pci_device_openbsd_read32,
549 pci_device_openbsd_read16,
550 pci_device_openbsd_read8,
551 pci_device_openbsd_write32,
552 pci_device_openbsd_write16,
553 pci_device_openbsd_write8,
554 pci_device_openbsd_map_legacy,
555 pci_device_openbsd_unmap_legacy
559 pci_system_openbsd_create(void)
561 struct pci_device_private *device;
562 int domain, bus, dev, func, ndevs, nfuncs;
563 char path[MAXPATHLEN];
569 for (domain = 0; domain < sizeof(pcifd) / sizeof(pcifd[0]); domain++) {
570 snprintf(path, sizeof(path), "/dev/pci%d", domain);
571 pcifd[domain] = open(path, O_RDWR | O_CLOEXEC);
572 if (pcifd[domain] == -1)
580 pci_sys = calloc(1, sizeof(struct pci_system));
581 if (pci_sys == NULL) {
582 for (domain = 0; domain < ndomains; domain++)
583 close(pcifd[domain]);
588 pci_sys->methods = &openbsd_pci_methods;
591 for (domain = 0; domain < ndomains; domain++) {
592 for (bus = 0; bus < 256; bus++) {
593 for (dev = 0; dev < 32; dev++) {
594 nfuncs = pci_nfuncs(domain, bus, dev);
595 for (func = 0; func < nfuncs; func++) {
596 if (pci_read(domain, bus, dev, func,
597 PCI_ID_REG, ®) != 0)
599 if (PCI_VENDOR(reg) == PCI_VENDOR_INVALID ||
600 PCI_VENDOR(reg) == 0)
609 pci_sys->num_devices = ndevs;
610 pci_sys->devices = calloc(ndevs, sizeof(struct pci_device_private));
611 if (pci_sys->devices == NULL) {
614 for (domain = 0; domain < ndomains; domain++)
615 close(pcifd[domain]);
620 device = pci_sys->devices;
621 for (domain = 0; domain < ndomains; domain++) {
622 for (bus = 0; bus < 256; bus++) {
623 for (dev = 0; dev < 32; dev++) {
624 nfuncs = pci_nfuncs(domain, bus, dev);
625 for (func = 0; func < nfuncs; func++) {
626 if (pci_read(domain, bus, dev, func,
627 PCI_ID_REG, ®) != 0)
629 if (PCI_VENDOR(reg) == PCI_VENDOR_INVALID ||
630 PCI_VENDOR(reg) == 0)
633 device->base.domain = domain;
634 device->base.bus = bus;
635 device->base.dev = dev;
636 device->base.func = func;
637 device->base.vendor_id = PCI_VENDOR(reg);
638 device->base.device_id = PCI_PRODUCT(reg);
640 if (pci_read(domain, bus, dev, func,
641 PCI_CLASS_REG, ®) != 0)
644 device->base.device_class =
646 PCI_CLASS(reg) << 16 |
647 PCI_SUBCLASS(reg) << 8;
648 device->base.revision = PCI_REVISION(reg);
650 if (pci_read(domain, bus, dev, func,
651 PCI_SUBVEND_0, ®) != 0)
654 device->base.subvendor_id = PCI_VENDOR(reg);
655 device->base.subdevice_id = PCI_PRODUCT(reg);
657 device->base.vgaarb_rsrc =
658 VGA_ARB_RSRC_LEGACY_IO |
659 VGA_ARB_RSRC_LEGACY_MEM;
671 pci_system_openbsd_init_dev_mem(int fd)
677 pci_device_vgaarb_init(void)
679 struct pci_device *dev = pci_sys->vga_target;
680 struct pci_device_iterator *iter;
681 struct pci_id_match vga_match = {
682 PCI_MATCH_ANY, PCI_MATCH_ANY, PCI_MATCH_ANY, PCI_MATCH_ANY,
683 (PCI_CLASS_DISPLAY << 16) | (PCI_SUBCLASS_DISPLAY_VGA << 8),
689 pv.pv_sel.pc_bus = 0;
690 pv.pv_sel.pc_dev = 0;
691 pv.pv_sel.pc_func = 0;
692 err = ioctl(pcifd[0], PCIOCGETVGA, &pv);
696 pci_sys->vga_target = pci_device_find_by_slot(0, pv.pv_sel.pc_bus,
697 pv.pv_sel.pc_dev, pv.pv_sel.pc_func);
699 /* Count the number of VGA devices in domain 0. */
700 iter = pci_id_match_iterator_create(&vga_match);
703 pci_sys->vga_count = 0;
704 while ((dev = pci_device_next(iter)) != NULL) {
705 if (dev->domain == 0)
706 pci_sys->vga_count++;
708 pci_iterator_destroy(iter);
714 pci_device_vgaarb_fini(void)
716 struct pci_device *dev;
721 dev = pci_sys->vga_target;
725 pv.pv_sel.pc_bus = dev->bus;
726 pv.pv_sel.pc_dev = dev->dev;
727 pv.pv_sel.pc_func = dev->func;
728 pv.pv_lock = PCI_VGA_UNLOCK;
729 ioctl(pcifd[dev->domain], PCIOCSETVGA, &pv);
733 pci_device_vgaarb_set_target(struct pci_device *dev)
735 pci_sys->vga_target = dev;
740 pci_device_vgaarb_lock(void)
742 struct pci_device *dev = pci_sys->vga_target;
749 if (dev->vgaarb_rsrc == 0 || pci_sys->vga_count == 1)
752 if (pci_sys->vga_count == 1)
756 pv.pv_sel.pc_bus = dev->bus;
757 pv.pv_sel.pc_dev = dev->dev;
758 pv.pv_sel.pc_func = dev->func;
759 pv.pv_lock = PCI_VGA_LOCK;
760 return ioctl(pcifd[dev->domain], PCIOCSETVGA, &pv);
764 pci_device_vgaarb_unlock(void)
766 struct pci_device *dev = pci_sys->vga_target;
773 if (dev->vgaarb_rsrc == 0 || pci_sys->vga_count == 1)
776 if (pci_sys->vga_count == 1)
780 pv.pv_sel.pc_bus = dev->bus;
781 pv.pv_sel.pc_dev = dev->dev;
782 pv.pv_sel.pc_func = dev->func;
783 pv.pv_lock = PCI_VGA_UNLOCK;
784 return ioctl(pcifd[dev->domain], PCIOCSETVGA, &pv);
788 pci_device_vgaarb_get_info(struct pci_device *dev, int *vga_count,
791 *vga_count = pci_sys->vga_count;
794 *rsrc_decodes = dev->vgaarb_rsrc;
800 pci_device_vgaarb_decodes(int rsrc_decodes)
802 struct pci_device *dev = pci_sys->vga_target;
807 dev->vgaarb_rsrc = rsrc_decodes;