1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
7 #include <linux/device.h>
8 #include <linux/io-64-nonatomic-lo-hi.h>
9 #include <uapi/linux/idxd.h>
10 #include "registers.h"
13 static char *idxd_wq_type_names[] = {
14 [IDXD_WQT_NONE] = "none",
15 [IDXD_WQT_KERNEL] = "kernel",
16 [IDXD_WQT_USER] = "user",
19 static void idxd_conf_device_release(struct device *dev)
21 dev_dbg(dev, "%s for %s\n", __func__, dev_name(dev));
24 static struct device_type idxd_group_device_type = {
26 .release = idxd_conf_device_release,
29 static struct device_type idxd_wq_device_type = {
31 .release = idxd_conf_device_release,
34 static struct device_type idxd_engine_device_type = {
36 .release = idxd_conf_device_release,
39 static struct device_type dsa_device_type = {
41 .release = idxd_conf_device_release,
44 static inline bool is_dsa_dev(struct device *dev)
46 return dev ? dev->type == &dsa_device_type : false;
49 static inline bool is_idxd_dev(struct device *dev)
51 return is_dsa_dev(dev);
54 static inline bool is_idxd_wq_dev(struct device *dev)
56 return dev ? dev->type == &idxd_wq_device_type : false;
59 static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq)
61 if (wq->type == IDXD_WQT_KERNEL &&
62 strcmp(wq->name, "dmaengine") == 0)
67 static inline bool is_idxd_wq_cdev(struct idxd_wq *wq)
69 return wq->type == IDXD_WQT_USER;
72 static int idxd_config_bus_match(struct device *dev,
73 struct device_driver *drv)
77 if (is_idxd_dev(dev)) {
78 struct idxd_device *idxd = confdev_to_idxd(dev);
80 if (idxd->state != IDXD_DEV_CONF_READY)
83 } else if (is_idxd_wq_dev(dev)) {
84 struct idxd_wq *wq = confdev_to_wq(dev);
85 struct idxd_device *idxd = wq->idxd;
87 if (idxd->state < IDXD_DEV_CONF_READY)
90 if (wq->state != IDXD_WQ_DISABLED) {
91 dev_dbg(dev, "%s not disabled\n", dev_name(dev));
98 dev_dbg(dev, "%s matched\n", dev_name(dev));
103 static int idxd_config_bus_probe(struct device *dev)
108 dev_dbg(dev, "%s called\n", __func__);
110 if (is_idxd_dev(dev)) {
111 struct idxd_device *idxd = confdev_to_idxd(dev);
113 if (idxd->state != IDXD_DEV_CONF_READY) {
114 dev_warn(dev, "Device not ready for config\n");
118 if (!try_module_get(THIS_MODULE))
121 spin_lock_irqsave(&idxd->dev_lock, flags);
123 /* Perform IDXD configuration and enabling */
124 rc = idxd_device_config(idxd);
126 spin_unlock_irqrestore(&idxd->dev_lock, flags);
127 module_put(THIS_MODULE);
128 dev_warn(dev, "Device config failed: %d\n", rc);
133 rc = idxd_device_enable(idxd);
135 spin_unlock_irqrestore(&idxd->dev_lock, flags);
136 module_put(THIS_MODULE);
137 dev_warn(dev, "Device enable failed: %d\n", rc);
141 spin_unlock_irqrestore(&idxd->dev_lock, flags);
142 dev_info(dev, "Device %s enabled\n", dev_name(dev));
144 rc = idxd_register_dma_device(idxd);
146 spin_unlock_irqrestore(&idxd->dev_lock, flags);
147 module_put(THIS_MODULE);
148 dev_dbg(dev, "Failed to register dmaengine device\n");
152 } else if (is_idxd_wq_dev(dev)) {
153 struct idxd_wq *wq = confdev_to_wq(dev);
154 struct idxd_device *idxd = wq->idxd;
156 mutex_lock(&wq->wq_lock);
158 if (idxd->state != IDXD_DEV_ENABLED) {
159 mutex_unlock(&wq->wq_lock);
160 dev_warn(dev, "Enabling while device not enabled.\n");
164 if (wq->state != IDXD_WQ_DISABLED) {
165 mutex_unlock(&wq->wq_lock);
166 dev_warn(dev, "WQ %d already enabled.\n", wq->id);
171 mutex_unlock(&wq->wq_lock);
172 dev_warn(dev, "WQ not attached to group.\n");
176 if (strlen(wq->name) == 0) {
177 mutex_unlock(&wq->wq_lock);
178 dev_warn(dev, "WQ name not set.\n");
182 rc = idxd_wq_alloc_resources(wq);
184 mutex_unlock(&wq->wq_lock);
185 dev_warn(dev, "WQ resource alloc failed\n");
189 spin_lock_irqsave(&idxd->dev_lock, flags);
190 rc = idxd_device_config(idxd);
192 spin_unlock_irqrestore(&idxd->dev_lock, flags);
193 mutex_unlock(&wq->wq_lock);
194 dev_warn(dev, "Writing WQ %d config failed: %d\n",
199 rc = idxd_wq_enable(wq);
201 spin_unlock_irqrestore(&idxd->dev_lock, flags);
202 mutex_unlock(&wq->wq_lock);
203 dev_warn(dev, "WQ %d enabling failed: %d\n",
207 spin_unlock_irqrestore(&idxd->dev_lock, flags);
209 rc = idxd_wq_map_portal(wq);
211 dev_warn(dev, "wq portal mapping failed: %d\n", rc);
212 rc = idxd_wq_disable(wq);
214 dev_warn(dev, "IDXD wq disable failed\n");
215 spin_unlock_irqrestore(&idxd->dev_lock, flags);
216 mutex_unlock(&wq->wq_lock);
220 wq->client_count = 0;
222 dev_info(dev, "wq %s enabled\n", dev_name(&wq->conf_dev));
224 if (is_idxd_wq_dmaengine(wq)) {
225 rc = idxd_register_dma_channel(wq);
227 dev_dbg(dev, "DMA channel register failed\n");
228 mutex_unlock(&wq->wq_lock);
231 } else if (is_idxd_wq_cdev(wq)) {
232 rc = idxd_wq_add_cdev(wq);
234 dev_dbg(dev, "Cdev creation failed\n");
235 mutex_unlock(&wq->wq_lock);
240 mutex_unlock(&wq->wq_lock);
247 static void disable_wq(struct idxd_wq *wq)
249 struct idxd_device *idxd = wq->idxd;
250 struct device *dev = &idxd->pdev->dev;
254 mutex_lock(&wq->wq_lock);
255 dev_dbg(dev, "%s removing WQ %s\n", __func__, dev_name(&wq->conf_dev));
256 if (wq->state == IDXD_WQ_DISABLED) {
257 mutex_unlock(&wq->wq_lock);
261 if (is_idxd_wq_dmaengine(wq))
262 idxd_unregister_dma_channel(wq);
263 else if (is_idxd_wq_cdev(wq))
264 idxd_wq_del_cdev(wq);
266 if (idxd_wq_refcount(wq))
267 dev_warn(dev, "Clients has claim on wq %d: %d\n",
268 wq->id, idxd_wq_refcount(wq));
270 idxd_wq_unmap_portal(wq);
272 spin_lock_irqsave(&idxd->dev_lock, flags);
273 rc = idxd_wq_disable(wq);
274 spin_unlock_irqrestore(&idxd->dev_lock, flags);
276 idxd_wq_free_resources(wq);
277 wq->client_count = 0;
278 mutex_unlock(&wq->wq_lock);
281 dev_warn(dev, "Failed to disable %s: %d\n",
282 dev_name(&wq->conf_dev), rc);
284 dev_info(dev, "wq %s disabled\n", dev_name(&wq->conf_dev));
287 static int idxd_config_bus_remove(struct device *dev)
292 dev_dbg(dev, "%s called for %s\n", __func__, dev_name(dev));
294 /* disable workqueue here */
295 if (is_idxd_wq_dev(dev)) {
296 struct idxd_wq *wq = confdev_to_wq(dev);
299 } else if (is_idxd_dev(dev)) {
300 struct idxd_device *idxd = confdev_to_idxd(dev);
303 dev_dbg(dev, "%s removing dev %s\n", __func__,
304 dev_name(&idxd->conf_dev));
305 for (i = 0; i < idxd->max_wqs; i++) {
306 struct idxd_wq *wq = &idxd->wqs[i];
308 if (wq->state == IDXD_WQ_DISABLED)
310 dev_warn(dev, "Active wq %d on disable %s.\n", i,
311 dev_name(&idxd->conf_dev));
312 device_release_driver(&wq->conf_dev);
315 idxd_unregister_dma_device(idxd);
316 spin_lock_irqsave(&idxd->dev_lock, flags);
317 rc = idxd_device_disable(idxd);
318 spin_unlock_irqrestore(&idxd->dev_lock, flags);
319 module_put(THIS_MODULE);
321 dev_warn(dev, "Device disable failed\n");
323 dev_info(dev, "Device %s disabled\n", dev_name(dev));
330 static void idxd_config_bus_shutdown(struct device *dev)
332 dev_dbg(dev, "%s called\n", __func__);
335 struct bus_type dsa_bus_type = {
337 .match = idxd_config_bus_match,
338 .probe = idxd_config_bus_probe,
339 .remove = idxd_config_bus_remove,
340 .shutdown = idxd_config_bus_shutdown,
343 static struct bus_type *idxd_bus_types[] = {
347 static struct idxd_device_driver dsa_drv = {
350 .bus = &dsa_bus_type,
351 .owner = THIS_MODULE,
352 .mod_name = KBUILD_MODNAME,
356 static struct idxd_device_driver *idxd_drvs[] = {
360 struct bus_type *idxd_get_bus_type(struct idxd_device *idxd)
362 return idxd_bus_types[idxd->type];
365 static struct device_type *idxd_get_device_type(struct idxd_device *idxd)
367 if (idxd->type == IDXD_TYPE_DSA)
368 return &dsa_device_type;
373 /* IDXD generic driver setup */
374 int idxd_register_driver(void)
378 for (i = 0; i < IDXD_TYPE_MAX; i++) {
379 rc = driver_register(&idxd_drvs[i]->drv);
388 driver_unregister(&idxd_drvs[i]->drv);
392 void idxd_unregister_driver(void)
396 for (i = 0; i < IDXD_TYPE_MAX; i++)
397 driver_unregister(&idxd_drvs[i]->drv);
400 /* IDXD engine attributes */
401 static ssize_t engine_group_id_show(struct device *dev,
402 struct device_attribute *attr, char *buf)
404 struct idxd_engine *engine =
405 container_of(dev, struct idxd_engine, conf_dev);
408 return sprintf(buf, "%d\n", engine->group->id);
410 return sprintf(buf, "%d\n", -1);
413 static ssize_t engine_group_id_store(struct device *dev,
414 struct device_attribute *attr,
415 const char *buf, size_t count)
417 struct idxd_engine *engine =
418 container_of(dev, struct idxd_engine, conf_dev);
419 struct idxd_device *idxd = engine->idxd;
422 struct idxd_group *prevg, *group;
424 rc = kstrtol(buf, 10, &id);
428 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
431 if (id > idxd->max_groups - 1 || id < -1)
436 engine->group->num_engines--;
437 engine->group = NULL;
442 group = &idxd->groups[id];
443 prevg = engine->group;
446 prevg->num_engines--;
447 engine->group = &idxd->groups[id];
448 engine->group->num_engines++;
453 static struct device_attribute dev_attr_engine_group =
454 __ATTR(group_id, 0644, engine_group_id_show,
455 engine_group_id_store);
457 static struct attribute *idxd_engine_attributes[] = {
458 &dev_attr_engine_group.attr,
462 static const struct attribute_group idxd_engine_attribute_group = {
463 .attrs = idxd_engine_attributes,
466 static const struct attribute_group *idxd_engine_attribute_groups[] = {
467 &idxd_engine_attribute_group,
471 /* Group attributes */
473 static void idxd_set_free_tokens(struct idxd_device *idxd)
477 for (i = 0, tokens = 0; i < idxd->max_groups; i++) {
478 struct idxd_group *g = &idxd->groups[i];
480 tokens += g->tokens_reserved;
483 idxd->nr_tokens = idxd->max_tokens - tokens;
486 static ssize_t group_tokens_reserved_show(struct device *dev,
487 struct device_attribute *attr,
490 struct idxd_group *group =
491 container_of(dev, struct idxd_group, conf_dev);
493 return sprintf(buf, "%u\n", group->tokens_reserved);
496 static ssize_t group_tokens_reserved_store(struct device *dev,
497 struct device_attribute *attr,
498 const char *buf, size_t count)
500 struct idxd_group *group =
501 container_of(dev, struct idxd_group, conf_dev);
502 struct idxd_device *idxd = group->idxd;
506 rc = kstrtoul(buf, 10, &val);
510 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
513 if (idxd->state == IDXD_DEV_ENABLED)
516 if (idxd->token_limit == 0)
519 if (val > idxd->max_tokens)
522 if (val > idxd->nr_tokens + group->tokens_reserved)
525 group->tokens_reserved = val;
526 idxd_set_free_tokens(idxd);
530 static struct device_attribute dev_attr_group_tokens_reserved =
531 __ATTR(tokens_reserved, 0644, group_tokens_reserved_show,
532 group_tokens_reserved_store);
534 static ssize_t group_tokens_allowed_show(struct device *dev,
535 struct device_attribute *attr,
538 struct idxd_group *group =
539 container_of(dev, struct idxd_group, conf_dev);
541 return sprintf(buf, "%u\n", group->tokens_allowed);
544 static ssize_t group_tokens_allowed_store(struct device *dev,
545 struct device_attribute *attr,
546 const char *buf, size_t count)
548 struct idxd_group *group =
549 container_of(dev, struct idxd_group, conf_dev);
550 struct idxd_device *idxd = group->idxd;
554 rc = kstrtoul(buf, 10, &val);
558 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
561 if (idxd->state == IDXD_DEV_ENABLED)
564 if (idxd->token_limit == 0)
566 if (val < 4 * group->num_engines ||
567 val > group->tokens_reserved + idxd->nr_tokens)
570 group->tokens_allowed = val;
574 static struct device_attribute dev_attr_group_tokens_allowed =
575 __ATTR(tokens_allowed, 0644, group_tokens_allowed_show,
576 group_tokens_allowed_store);
578 static ssize_t group_use_token_limit_show(struct device *dev,
579 struct device_attribute *attr,
582 struct idxd_group *group =
583 container_of(dev, struct idxd_group, conf_dev);
585 return sprintf(buf, "%u\n", group->use_token_limit);
588 static ssize_t group_use_token_limit_store(struct device *dev,
589 struct device_attribute *attr,
590 const char *buf, size_t count)
592 struct idxd_group *group =
593 container_of(dev, struct idxd_group, conf_dev);
594 struct idxd_device *idxd = group->idxd;
598 rc = kstrtoul(buf, 10, &val);
602 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
605 if (idxd->state == IDXD_DEV_ENABLED)
608 if (idxd->token_limit == 0)
611 group->use_token_limit = !!val;
615 static struct device_attribute dev_attr_group_use_token_limit =
616 __ATTR(use_token_limit, 0644, group_use_token_limit_show,
617 group_use_token_limit_store);
619 static ssize_t group_engines_show(struct device *dev,
620 struct device_attribute *attr, char *buf)
622 struct idxd_group *group =
623 container_of(dev, struct idxd_group, conf_dev);
626 struct idxd_device *idxd = group->idxd;
628 for (i = 0; i < idxd->max_engines; i++) {
629 struct idxd_engine *engine = &idxd->engines[i];
634 if (engine->group->id == group->id)
635 rc += sprintf(tmp + rc, "engine%d.%d ",
636 idxd->id, engine->id);
640 rc += sprintf(tmp + rc, "\n");
645 static struct device_attribute dev_attr_group_engines =
646 __ATTR(engines, 0444, group_engines_show, NULL);
648 static ssize_t group_work_queues_show(struct device *dev,
649 struct device_attribute *attr, char *buf)
651 struct idxd_group *group =
652 container_of(dev, struct idxd_group, conf_dev);
655 struct idxd_device *idxd = group->idxd;
657 for (i = 0; i < idxd->max_wqs; i++) {
658 struct idxd_wq *wq = &idxd->wqs[i];
663 if (wq->group->id == group->id)
664 rc += sprintf(tmp + rc, "wq%d.%d ",
669 rc += sprintf(tmp + rc, "\n");
674 static struct device_attribute dev_attr_group_work_queues =
675 __ATTR(work_queues, 0444, group_work_queues_show, NULL);
677 static ssize_t group_traffic_class_a_show(struct device *dev,
678 struct device_attribute *attr,
681 struct idxd_group *group =
682 container_of(dev, struct idxd_group, conf_dev);
684 return sprintf(buf, "%d\n", group->tc_a);
687 static ssize_t group_traffic_class_a_store(struct device *dev,
688 struct device_attribute *attr,
689 const char *buf, size_t count)
691 struct idxd_group *group =
692 container_of(dev, struct idxd_group, conf_dev);
693 struct idxd_device *idxd = group->idxd;
697 rc = kstrtol(buf, 10, &val);
701 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
704 if (idxd->state == IDXD_DEV_ENABLED)
707 if (val < 0 || val > 7)
714 static struct device_attribute dev_attr_group_traffic_class_a =
715 __ATTR(traffic_class_a, 0644, group_traffic_class_a_show,
716 group_traffic_class_a_store);
718 static ssize_t group_traffic_class_b_show(struct device *dev,
719 struct device_attribute *attr,
722 struct idxd_group *group =
723 container_of(dev, struct idxd_group, conf_dev);
725 return sprintf(buf, "%d\n", group->tc_b);
728 static ssize_t group_traffic_class_b_store(struct device *dev,
729 struct device_attribute *attr,
730 const char *buf, size_t count)
732 struct idxd_group *group =
733 container_of(dev, struct idxd_group, conf_dev);
734 struct idxd_device *idxd = group->idxd;
738 rc = kstrtol(buf, 10, &val);
742 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
745 if (idxd->state == IDXD_DEV_ENABLED)
748 if (val < 0 || val > 7)
755 static struct device_attribute dev_attr_group_traffic_class_b =
756 __ATTR(traffic_class_b, 0644, group_traffic_class_b_show,
757 group_traffic_class_b_store);
759 static struct attribute *idxd_group_attributes[] = {
760 &dev_attr_group_work_queues.attr,
761 &dev_attr_group_engines.attr,
762 &dev_attr_group_use_token_limit.attr,
763 &dev_attr_group_tokens_allowed.attr,
764 &dev_attr_group_tokens_reserved.attr,
765 &dev_attr_group_traffic_class_a.attr,
766 &dev_attr_group_traffic_class_b.attr,
770 static const struct attribute_group idxd_group_attribute_group = {
771 .attrs = idxd_group_attributes,
774 static const struct attribute_group *idxd_group_attribute_groups[] = {
775 &idxd_group_attribute_group,
779 /* IDXD work queue attribs */
780 static ssize_t wq_clients_show(struct device *dev,
781 struct device_attribute *attr, char *buf)
783 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
785 return sprintf(buf, "%d\n", wq->client_count);
788 static struct device_attribute dev_attr_wq_clients =
789 __ATTR(clients, 0444, wq_clients_show, NULL);
791 static ssize_t wq_state_show(struct device *dev,
792 struct device_attribute *attr, char *buf)
794 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
797 case IDXD_WQ_DISABLED:
798 return sprintf(buf, "disabled\n");
799 case IDXD_WQ_ENABLED:
800 return sprintf(buf, "enabled\n");
803 return sprintf(buf, "unknown\n");
806 static struct device_attribute dev_attr_wq_state =
807 __ATTR(state, 0444, wq_state_show, NULL);
809 static ssize_t wq_group_id_show(struct device *dev,
810 struct device_attribute *attr, char *buf)
812 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
815 return sprintf(buf, "%u\n", wq->group->id);
817 return sprintf(buf, "-1\n");
820 static ssize_t wq_group_id_store(struct device *dev,
821 struct device_attribute *attr,
822 const char *buf, size_t count)
824 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
825 struct idxd_device *idxd = wq->idxd;
828 struct idxd_group *prevg, *group;
830 rc = kstrtol(buf, 10, &id);
834 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
837 if (wq->state != IDXD_WQ_DISABLED)
840 if (id > idxd->max_groups - 1 || id < -1)
845 wq->group->num_wqs--;
851 group = &idxd->groups[id];
861 static struct device_attribute dev_attr_wq_group_id =
862 __ATTR(group_id, 0644, wq_group_id_show, wq_group_id_store);
864 static ssize_t wq_mode_show(struct device *dev, struct device_attribute *attr,
867 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
869 return sprintf(buf, "%s\n",
870 wq_dedicated(wq) ? "dedicated" : "shared");
873 static ssize_t wq_mode_store(struct device *dev,
874 struct device_attribute *attr, const char *buf,
877 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
878 struct idxd_device *idxd = wq->idxd;
880 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
883 if (wq->state != IDXD_WQ_DISABLED)
886 if (sysfs_streq(buf, "dedicated")) {
887 set_bit(WQ_FLAG_DEDICATED, &wq->flags);
896 static struct device_attribute dev_attr_wq_mode =
897 __ATTR(mode, 0644, wq_mode_show, wq_mode_store);
899 static ssize_t wq_size_show(struct device *dev, struct device_attribute *attr,
902 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
904 return sprintf(buf, "%u\n", wq->size);
907 static int total_claimed_wq_size(struct idxd_device *idxd)
912 for (i = 0; i < idxd->max_wqs; i++) {
913 struct idxd_wq *wq = &idxd->wqs[i];
921 static ssize_t wq_size_store(struct device *dev,
922 struct device_attribute *attr, const char *buf,
925 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
927 struct idxd_device *idxd = wq->idxd;
930 rc = kstrtoul(buf, 10, &size);
934 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
937 if (wq->state != IDXD_WQ_DISABLED)
940 if (size + total_claimed_wq_size(idxd) - wq->size > idxd->max_wq_size)
947 static struct device_attribute dev_attr_wq_size =
948 __ATTR(size, 0644, wq_size_show, wq_size_store);
950 static ssize_t wq_priority_show(struct device *dev,
951 struct device_attribute *attr, char *buf)
953 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
955 return sprintf(buf, "%u\n", wq->priority);
958 static ssize_t wq_priority_store(struct device *dev,
959 struct device_attribute *attr,
960 const char *buf, size_t count)
962 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
964 struct idxd_device *idxd = wq->idxd;
967 rc = kstrtoul(buf, 10, &prio);
971 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
974 if (wq->state != IDXD_WQ_DISABLED)
977 if (prio > IDXD_MAX_PRIORITY)
984 static struct device_attribute dev_attr_wq_priority =
985 __ATTR(priority, 0644, wq_priority_show, wq_priority_store);
987 static ssize_t wq_type_show(struct device *dev,
988 struct device_attribute *attr, char *buf)
990 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
993 case IDXD_WQT_KERNEL:
994 return sprintf(buf, "%s\n",
995 idxd_wq_type_names[IDXD_WQT_KERNEL]);
997 return sprintf(buf, "%s\n",
998 idxd_wq_type_names[IDXD_WQT_USER]);
1001 return sprintf(buf, "%s\n",
1002 idxd_wq_type_names[IDXD_WQT_NONE]);
1008 static ssize_t wq_type_store(struct device *dev,
1009 struct device_attribute *attr, const char *buf,
1012 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1013 enum idxd_wq_type old_type;
1015 if (wq->state != IDXD_WQ_DISABLED)
1018 old_type = wq->type;
1019 if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_NONE]))
1020 wq->type = IDXD_WQT_NONE;
1021 else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_KERNEL]))
1022 wq->type = IDXD_WQT_KERNEL;
1023 else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_USER]))
1024 wq->type = IDXD_WQT_USER;
1028 /* If we are changing queue type, clear the name */
1029 if (wq->type != old_type)
1030 memset(wq->name, 0, WQ_NAME_SIZE + 1);
1035 static struct device_attribute dev_attr_wq_type =
1036 __ATTR(type, 0644, wq_type_show, wq_type_store);
1038 static ssize_t wq_name_show(struct device *dev,
1039 struct device_attribute *attr, char *buf)
1041 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1043 return sprintf(buf, "%s\n", wq->name);
1046 static ssize_t wq_name_store(struct device *dev,
1047 struct device_attribute *attr, const char *buf,
1050 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1052 if (wq->state != IDXD_WQ_DISABLED)
1055 if (strlen(buf) > WQ_NAME_SIZE || strlen(buf) == 0)
1058 memset(wq->name, 0, WQ_NAME_SIZE + 1);
1059 strncpy(wq->name, buf, WQ_NAME_SIZE);
1060 strreplace(wq->name, '\n', '\0');
1064 static struct device_attribute dev_attr_wq_name =
1065 __ATTR(name, 0644, wq_name_show, wq_name_store);
1067 static ssize_t wq_cdev_minor_show(struct device *dev,
1068 struct device_attribute *attr, char *buf)
1070 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1072 return sprintf(buf, "%d\n", wq->idxd_cdev.minor);
1075 static struct device_attribute dev_attr_wq_cdev_minor =
1076 __ATTR(cdev_minor, 0444, wq_cdev_minor_show, NULL);
1078 static struct attribute *idxd_wq_attributes[] = {
1079 &dev_attr_wq_clients.attr,
1080 &dev_attr_wq_state.attr,
1081 &dev_attr_wq_group_id.attr,
1082 &dev_attr_wq_mode.attr,
1083 &dev_attr_wq_size.attr,
1084 &dev_attr_wq_priority.attr,
1085 &dev_attr_wq_type.attr,
1086 &dev_attr_wq_name.attr,
1087 &dev_attr_wq_cdev_minor.attr,
1091 static const struct attribute_group idxd_wq_attribute_group = {
1092 .attrs = idxd_wq_attributes,
1095 static const struct attribute_group *idxd_wq_attribute_groups[] = {
1096 &idxd_wq_attribute_group,
1100 /* IDXD device attribs */
1101 static ssize_t max_work_queues_size_show(struct device *dev,
1102 struct device_attribute *attr,
1105 struct idxd_device *idxd =
1106 container_of(dev, struct idxd_device, conf_dev);
1108 return sprintf(buf, "%u\n", idxd->max_wq_size);
1110 static DEVICE_ATTR_RO(max_work_queues_size);
1112 static ssize_t max_groups_show(struct device *dev,
1113 struct device_attribute *attr, char *buf)
1115 struct idxd_device *idxd =
1116 container_of(dev, struct idxd_device, conf_dev);
1118 return sprintf(buf, "%u\n", idxd->max_groups);
1120 static DEVICE_ATTR_RO(max_groups);
1122 static ssize_t max_work_queues_show(struct device *dev,
1123 struct device_attribute *attr, char *buf)
1125 struct idxd_device *idxd =
1126 container_of(dev, struct idxd_device, conf_dev);
1128 return sprintf(buf, "%u\n", idxd->max_wqs);
1130 static DEVICE_ATTR_RO(max_work_queues);
1132 static ssize_t max_engines_show(struct device *dev,
1133 struct device_attribute *attr, char *buf)
1135 struct idxd_device *idxd =
1136 container_of(dev, struct idxd_device, conf_dev);
1138 return sprintf(buf, "%u\n", idxd->max_engines);
1140 static DEVICE_ATTR_RO(max_engines);
1142 static ssize_t numa_node_show(struct device *dev,
1143 struct device_attribute *attr, char *buf)
1145 struct idxd_device *idxd =
1146 container_of(dev, struct idxd_device, conf_dev);
1148 return sprintf(buf, "%d\n", dev_to_node(&idxd->pdev->dev));
1150 static DEVICE_ATTR_RO(numa_node);
1152 static ssize_t max_batch_size_show(struct device *dev,
1153 struct device_attribute *attr, char *buf)
1155 struct idxd_device *idxd =
1156 container_of(dev, struct idxd_device, conf_dev);
1158 return sprintf(buf, "%u\n", idxd->max_batch_size);
1160 static DEVICE_ATTR_RO(max_batch_size);
1162 static ssize_t max_transfer_size_show(struct device *dev,
1163 struct device_attribute *attr,
1166 struct idxd_device *idxd =
1167 container_of(dev, struct idxd_device, conf_dev);
1169 return sprintf(buf, "%llu\n", idxd->max_xfer_bytes);
1171 static DEVICE_ATTR_RO(max_transfer_size);
1173 static ssize_t op_cap_show(struct device *dev,
1174 struct device_attribute *attr, char *buf)
1176 struct idxd_device *idxd =
1177 container_of(dev, struct idxd_device, conf_dev);
1179 return sprintf(buf, "%#llx\n", idxd->hw.opcap.bits[0]);
1181 static DEVICE_ATTR_RO(op_cap);
1183 static ssize_t configurable_show(struct device *dev,
1184 struct device_attribute *attr, char *buf)
1186 struct idxd_device *idxd =
1187 container_of(dev, struct idxd_device, conf_dev);
1189 return sprintf(buf, "%u\n",
1190 test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags));
1192 static DEVICE_ATTR_RO(configurable);
1194 static ssize_t clients_show(struct device *dev,
1195 struct device_attribute *attr, char *buf)
1197 struct idxd_device *idxd =
1198 container_of(dev, struct idxd_device, conf_dev);
1199 unsigned long flags;
1202 spin_lock_irqsave(&idxd->dev_lock, flags);
1203 for (i = 0; i < idxd->max_wqs; i++) {
1204 struct idxd_wq *wq = &idxd->wqs[i];
1206 count += wq->client_count;
1208 spin_unlock_irqrestore(&idxd->dev_lock, flags);
1210 return sprintf(buf, "%d\n", count);
1212 static DEVICE_ATTR_RO(clients);
1214 static ssize_t state_show(struct device *dev,
1215 struct device_attribute *attr, char *buf)
1217 struct idxd_device *idxd =
1218 container_of(dev, struct idxd_device, conf_dev);
1220 switch (idxd->state) {
1221 case IDXD_DEV_DISABLED:
1222 case IDXD_DEV_CONF_READY:
1223 return sprintf(buf, "disabled\n");
1224 case IDXD_DEV_ENABLED:
1225 return sprintf(buf, "enabled\n");
1226 case IDXD_DEV_HALTED:
1227 return sprintf(buf, "halted\n");
1230 return sprintf(buf, "unknown\n");
1232 static DEVICE_ATTR_RO(state);
1234 static ssize_t errors_show(struct device *dev,
1235 struct device_attribute *attr, char *buf)
1237 struct idxd_device *idxd =
1238 container_of(dev, struct idxd_device, conf_dev);
1240 unsigned long flags;
1242 spin_lock_irqsave(&idxd->dev_lock, flags);
1243 for (i = 0; i < 4; i++)
1244 out += sprintf(buf + out, "%#018llx ", idxd->sw_err.bits[i]);
1245 spin_unlock_irqrestore(&idxd->dev_lock, flags);
1247 out += sprintf(buf + out, "\n");
1250 static DEVICE_ATTR_RO(errors);
1252 static ssize_t max_tokens_show(struct device *dev,
1253 struct device_attribute *attr, char *buf)
1255 struct idxd_device *idxd =
1256 container_of(dev, struct idxd_device, conf_dev);
1258 return sprintf(buf, "%u\n", idxd->max_tokens);
1260 static DEVICE_ATTR_RO(max_tokens);
1262 static ssize_t token_limit_show(struct device *dev,
1263 struct device_attribute *attr, char *buf)
1265 struct idxd_device *idxd =
1266 container_of(dev, struct idxd_device, conf_dev);
1268 return sprintf(buf, "%u\n", idxd->token_limit);
1271 static ssize_t token_limit_store(struct device *dev,
1272 struct device_attribute *attr,
1273 const char *buf, size_t count)
1275 struct idxd_device *idxd =
1276 container_of(dev, struct idxd_device, conf_dev);
1280 rc = kstrtoul(buf, 10, &val);
1284 if (idxd->state == IDXD_DEV_ENABLED)
1287 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1290 if (!idxd->hw.group_cap.token_limit)
1293 if (val > idxd->hw.group_cap.total_tokens)
1296 idxd->token_limit = val;
1299 static DEVICE_ATTR_RW(token_limit);
1301 static ssize_t cdev_major_show(struct device *dev,
1302 struct device_attribute *attr, char *buf)
1304 struct idxd_device *idxd =
1305 container_of(dev, struct idxd_device, conf_dev);
1307 return sprintf(buf, "%u\n", idxd->major);
1309 static DEVICE_ATTR_RO(cdev_major);
1311 static struct attribute *idxd_device_attributes[] = {
1312 &dev_attr_max_groups.attr,
1313 &dev_attr_max_work_queues.attr,
1314 &dev_attr_max_work_queues_size.attr,
1315 &dev_attr_max_engines.attr,
1316 &dev_attr_numa_node.attr,
1317 &dev_attr_max_batch_size.attr,
1318 &dev_attr_max_transfer_size.attr,
1319 &dev_attr_op_cap.attr,
1320 &dev_attr_configurable.attr,
1321 &dev_attr_clients.attr,
1322 &dev_attr_state.attr,
1323 &dev_attr_errors.attr,
1324 &dev_attr_max_tokens.attr,
1325 &dev_attr_token_limit.attr,
1326 &dev_attr_cdev_major.attr,
1330 static const struct attribute_group idxd_device_attribute_group = {
1331 .attrs = idxd_device_attributes,
1334 static const struct attribute_group *idxd_attribute_groups[] = {
1335 &idxd_device_attribute_group,
1339 static int idxd_setup_engine_sysfs(struct idxd_device *idxd)
1341 struct device *dev = &idxd->pdev->dev;
1344 for (i = 0; i < idxd->max_engines; i++) {
1345 struct idxd_engine *engine = &idxd->engines[i];
1347 engine->conf_dev.parent = &idxd->conf_dev;
1348 dev_set_name(&engine->conf_dev, "engine%d.%d",
1349 idxd->id, engine->id);
1350 engine->conf_dev.bus = idxd_get_bus_type(idxd);
1351 engine->conf_dev.groups = idxd_engine_attribute_groups;
1352 engine->conf_dev.type = &idxd_engine_device_type;
1353 dev_dbg(dev, "Engine device register: %s\n",
1354 dev_name(&engine->conf_dev));
1355 rc = device_register(&engine->conf_dev);
1357 put_device(&engine->conf_dev);
1366 struct idxd_engine *engine = &idxd->engines[i];
1368 device_unregister(&engine->conf_dev);
1373 static int idxd_setup_group_sysfs(struct idxd_device *idxd)
1375 struct device *dev = &idxd->pdev->dev;
1378 for (i = 0; i < idxd->max_groups; i++) {
1379 struct idxd_group *group = &idxd->groups[i];
1381 group->conf_dev.parent = &idxd->conf_dev;
1382 dev_set_name(&group->conf_dev, "group%d.%d",
1383 idxd->id, group->id);
1384 group->conf_dev.bus = idxd_get_bus_type(idxd);
1385 group->conf_dev.groups = idxd_group_attribute_groups;
1386 group->conf_dev.type = &idxd_group_device_type;
1387 dev_dbg(dev, "Group device register: %s\n",
1388 dev_name(&group->conf_dev));
1389 rc = device_register(&group->conf_dev);
1391 put_device(&group->conf_dev);
1400 struct idxd_group *group = &idxd->groups[i];
1402 device_unregister(&group->conf_dev);
1407 static int idxd_setup_wq_sysfs(struct idxd_device *idxd)
1409 struct device *dev = &idxd->pdev->dev;
1412 for (i = 0; i < idxd->max_wqs; i++) {
1413 struct idxd_wq *wq = &idxd->wqs[i];
1415 wq->conf_dev.parent = &idxd->conf_dev;
1416 dev_set_name(&wq->conf_dev, "wq%d.%d", idxd->id, wq->id);
1417 wq->conf_dev.bus = idxd_get_bus_type(idxd);
1418 wq->conf_dev.groups = idxd_wq_attribute_groups;
1419 wq->conf_dev.type = &idxd_wq_device_type;
1420 dev_dbg(dev, "WQ device register: %s\n",
1421 dev_name(&wq->conf_dev));
1422 rc = device_register(&wq->conf_dev);
1424 put_device(&wq->conf_dev);
1433 struct idxd_wq *wq = &idxd->wqs[i];
1435 device_unregister(&wq->conf_dev);
1440 static int idxd_setup_device_sysfs(struct idxd_device *idxd)
1442 struct device *dev = &idxd->pdev->dev;
1444 char devname[IDXD_NAME_SIZE];
1446 sprintf(devname, "%s%d", idxd_get_dev_name(idxd), idxd->id);
1447 idxd->conf_dev.parent = dev;
1448 dev_set_name(&idxd->conf_dev, "%s", devname);
1449 idxd->conf_dev.bus = idxd_get_bus_type(idxd);
1450 idxd->conf_dev.groups = idxd_attribute_groups;
1451 idxd->conf_dev.type = idxd_get_device_type(idxd);
1453 dev_dbg(dev, "IDXD device register: %s\n", dev_name(&idxd->conf_dev));
1454 rc = device_register(&idxd->conf_dev);
1456 put_device(&idxd->conf_dev);
1463 int idxd_setup_sysfs(struct idxd_device *idxd)
1465 struct device *dev = &idxd->pdev->dev;
1468 rc = idxd_setup_device_sysfs(idxd);
1470 dev_dbg(dev, "Device sysfs registering failed: %d\n", rc);
1474 rc = idxd_setup_wq_sysfs(idxd);
1476 /* unregister conf dev */
1477 dev_dbg(dev, "Work Queue sysfs registering failed: %d\n", rc);
1481 rc = idxd_setup_group_sysfs(idxd);
1483 /* unregister conf dev */
1484 dev_dbg(dev, "Group sysfs registering failed: %d\n", rc);
1488 rc = idxd_setup_engine_sysfs(idxd);
1490 /* unregister conf dev */
1491 dev_dbg(dev, "Engine sysfs registering failed: %d\n", rc);
1498 void idxd_cleanup_sysfs(struct idxd_device *idxd)
1502 for (i = 0; i < idxd->max_wqs; i++) {
1503 struct idxd_wq *wq = &idxd->wqs[i];
1505 device_unregister(&wq->conf_dev);
1508 for (i = 0; i < idxd->max_engines; i++) {
1509 struct idxd_engine *engine = &idxd->engines[i];
1511 device_unregister(&engine->conf_dev);
1514 for (i = 0; i < idxd->max_groups; i++) {
1515 struct idxd_group *group = &idxd->groups[i];
1517 device_unregister(&group->conf_dev);
1520 device_unregister(&idxd->conf_dev);
1523 int idxd_register_bus_type(void)
1527 for (i = 0; i < IDXD_TYPE_MAX; i++) {
1528 rc = bus_register(idxd_bus_types[i]);
1537 bus_unregister(idxd_bus_types[i]);
1541 void idxd_unregister_bus_type(void)
1545 for (i = 0; i < IDXD_TYPE_MAX; i++)
1546 bus_unregister(idxd_bus_types[i]);