1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
7 #include <linux/device.h>
8 #include <linux/io-64-nonatomic-lo-hi.h>
9 #include <uapi/linux/idxd.h>
10 #include "registers.h"
13 static char *idxd_wq_type_names[] = {
14 [IDXD_WQT_NONE] = "none",
15 [IDXD_WQT_KERNEL] = "kernel",
16 [IDXD_WQT_USER] = "user",
19 static void idxd_conf_device_release(struct device *dev)
21 dev_dbg(dev, "%s for %s\n", __func__, dev_name(dev));
24 static struct device_type idxd_group_device_type = {
26 .release = idxd_conf_device_release,
29 static struct device_type idxd_wq_device_type = {
31 .release = idxd_conf_device_release,
34 static struct device_type idxd_engine_device_type = {
36 .release = idxd_conf_device_release,
39 static struct device_type dsa_device_type = {
41 .release = idxd_conf_device_release,
44 static inline bool is_dsa_dev(struct device *dev)
46 return dev ? dev->type == &dsa_device_type : false;
49 static inline bool is_idxd_dev(struct device *dev)
51 return is_dsa_dev(dev);
54 static inline bool is_idxd_wq_dev(struct device *dev)
56 return dev ? dev->type == &idxd_wq_device_type : false;
59 static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq)
61 if (wq->type == IDXD_WQT_KERNEL &&
62 strcmp(wq->name, "dmaengine") == 0)
67 static inline bool is_idxd_wq_cdev(struct idxd_wq *wq)
69 return wq->type == IDXD_WQT_USER;
72 static int idxd_config_bus_match(struct device *dev,
73 struct device_driver *drv)
77 if (is_idxd_dev(dev)) {
78 struct idxd_device *idxd = confdev_to_idxd(dev);
80 if (idxd->state != IDXD_DEV_CONF_READY)
83 } else if (is_idxd_wq_dev(dev)) {
84 struct idxd_wq *wq = confdev_to_wq(dev);
85 struct idxd_device *idxd = wq->idxd;
87 if (idxd->state < IDXD_DEV_CONF_READY)
90 if (wq->state != IDXD_WQ_DISABLED) {
91 dev_dbg(dev, "%s not disabled\n", dev_name(dev));
98 dev_dbg(dev, "%s matched\n", dev_name(dev));
103 static int idxd_config_bus_probe(struct device *dev)
108 dev_dbg(dev, "%s called\n", __func__);
110 if (is_idxd_dev(dev)) {
111 struct idxd_device *idxd = confdev_to_idxd(dev);
113 if (idxd->state != IDXD_DEV_CONF_READY) {
114 dev_warn(dev, "Device not ready for config\n");
118 if (!try_module_get(THIS_MODULE))
121 spin_lock_irqsave(&idxd->dev_lock, flags);
123 /* Perform IDXD configuration and enabling */
124 rc = idxd_device_config(idxd);
126 spin_unlock_irqrestore(&idxd->dev_lock, flags);
127 dev_warn(dev, "Device config failed: %d\n", rc);
132 rc = idxd_device_enable(idxd);
134 spin_unlock_irqrestore(&idxd->dev_lock, flags);
135 dev_warn(dev, "Device enable failed: %d\n", rc);
139 spin_unlock_irqrestore(&idxd->dev_lock, flags);
140 dev_info(dev, "Device %s enabled\n", dev_name(dev));
142 rc = idxd_register_dma_device(idxd);
144 spin_unlock_irqrestore(&idxd->dev_lock, flags);
145 dev_dbg(dev, "Failed to register dmaengine device\n");
149 } else if (is_idxd_wq_dev(dev)) {
150 struct idxd_wq *wq = confdev_to_wq(dev);
151 struct idxd_device *idxd = wq->idxd;
153 mutex_lock(&wq->wq_lock);
155 if (idxd->state != IDXD_DEV_ENABLED) {
156 mutex_unlock(&wq->wq_lock);
157 dev_warn(dev, "Enabling while device not enabled.\n");
161 if (wq->state != IDXD_WQ_DISABLED) {
162 mutex_unlock(&wq->wq_lock);
163 dev_warn(dev, "WQ %d already enabled.\n", wq->id);
168 mutex_unlock(&wq->wq_lock);
169 dev_warn(dev, "WQ not attached to group.\n");
173 if (strlen(wq->name) == 0) {
174 mutex_unlock(&wq->wq_lock);
175 dev_warn(dev, "WQ name not set.\n");
179 rc = idxd_wq_alloc_resources(wq);
181 mutex_unlock(&wq->wq_lock);
182 dev_warn(dev, "WQ resource alloc failed\n");
186 spin_lock_irqsave(&idxd->dev_lock, flags);
187 rc = idxd_device_config(idxd);
189 spin_unlock_irqrestore(&idxd->dev_lock, flags);
190 mutex_unlock(&wq->wq_lock);
191 dev_warn(dev, "Writing WQ %d config failed: %d\n",
196 rc = idxd_wq_enable(wq);
198 spin_unlock_irqrestore(&idxd->dev_lock, flags);
199 mutex_unlock(&wq->wq_lock);
200 dev_warn(dev, "WQ %d enabling failed: %d\n",
204 spin_unlock_irqrestore(&idxd->dev_lock, flags);
206 rc = idxd_wq_map_portal(wq);
208 dev_warn(dev, "wq portal mapping failed: %d\n", rc);
209 rc = idxd_wq_disable(wq);
211 dev_warn(dev, "IDXD wq disable failed\n");
212 spin_unlock_irqrestore(&idxd->dev_lock, flags);
213 mutex_unlock(&wq->wq_lock);
217 wq->client_count = 0;
219 dev_info(dev, "wq %s enabled\n", dev_name(&wq->conf_dev));
221 if (is_idxd_wq_dmaengine(wq)) {
222 rc = idxd_register_dma_channel(wq);
224 dev_dbg(dev, "DMA channel register failed\n");
225 mutex_unlock(&wq->wq_lock);
228 } else if (is_idxd_wq_cdev(wq)) {
229 rc = idxd_wq_add_cdev(wq);
231 dev_dbg(dev, "Cdev creation failed\n");
232 mutex_unlock(&wq->wq_lock);
237 mutex_unlock(&wq->wq_lock);
244 static void disable_wq(struct idxd_wq *wq)
246 struct idxd_device *idxd = wq->idxd;
247 struct device *dev = &idxd->pdev->dev;
251 mutex_lock(&wq->wq_lock);
252 dev_dbg(dev, "%s removing WQ %s\n", __func__, dev_name(&wq->conf_dev));
253 if (wq->state == IDXD_WQ_DISABLED) {
254 mutex_unlock(&wq->wq_lock);
258 if (is_idxd_wq_dmaengine(wq))
259 idxd_unregister_dma_channel(wq);
260 else if (is_idxd_wq_cdev(wq))
261 idxd_wq_del_cdev(wq);
263 if (idxd_wq_refcount(wq))
264 dev_warn(dev, "Clients has claim on wq %d: %d\n",
265 wq->id, idxd_wq_refcount(wq));
267 idxd_wq_unmap_portal(wq);
269 spin_lock_irqsave(&idxd->dev_lock, flags);
270 rc = idxd_wq_disable(wq);
271 spin_unlock_irqrestore(&idxd->dev_lock, flags);
273 idxd_wq_free_resources(wq);
274 wq->client_count = 0;
275 mutex_unlock(&wq->wq_lock);
278 dev_warn(dev, "Failed to disable %s: %d\n",
279 dev_name(&wq->conf_dev), rc);
281 dev_info(dev, "wq %s disabled\n", dev_name(&wq->conf_dev));
284 static int idxd_config_bus_remove(struct device *dev)
289 dev_dbg(dev, "%s called for %s\n", __func__, dev_name(dev));
291 /* disable workqueue here */
292 if (is_idxd_wq_dev(dev)) {
293 struct idxd_wq *wq = confdev_to_wq(dev);
296 } else if (is_idxd_dev(dev)) {
297 struct idxd_device *idxd = confdev_to_idxd(dev);
300 dev_dbg(dev, "%s removing dev %s\n", __func__,
301 dev_name(&idxd->conf_dev));
302 for (i = 0; i < idxd->max_wqs; i++) {
303 struct idxd_wq *wq = &idxd->wqs[i];
305 if (wq->state == IDXD_WQ_DISABLED)
307 dev_warn(dev, "Active wq %d on disable %s.\n", i,
308 dev_name(&idxd->conf_dev));
309 device_release_driver(&wq->conf_dev);
312 idxd_unregister_dma_device(idxd);
313 spin_lock_irqsave(&idxd->dev_lock, flags);
314 rc = idxd_device_disable(idxd);
315 spin_unlock_irqrestore(&idxd->dev_lock, flags);
316 module_put(THIS_MODULE);
318 dev_warn(dev, "Device disable failed\n");
320 dev_info(dev, "Device %s disabled\n", dev_name(dev));
327 static void idxd_config_bus_shutdown(struct device *dev)
329 dev_dbg(dev, "%s called\n", __func__);
332 struct bus_type dsa_bus_type = {
334 .match = idxd_config_bus_match,
335 .probe = idxd_config_bus_probe,
336 .remove = idxd_config_bus_remove,
337 .shutdown = idxd_config_bus_shutdown,
340 static struct bus_type *idxd_bus_types[] = {
344 static struct idxd_device_driver dsa_drv = {
347 .bus = &dsa_bus_type,
348 .owner = THIS_MODULE,
349 .mod_name = KBUILD_MODNAME,
353 static struct idxd_device_driver *idxd_drvs[] = {
357 struct bus_type *idxd_get_bus_type(struct idxd_device *idxd)
359 return idxd_bus_types[idxd->type];
362 static struct device_type *idxd_get_device_type(struct idxd_device *idxd)
364 if (idxd->type == IDXD_TYPE_DSA)
365 return &dsa_device_type;
370 /* IDXD generic driver setup */
371 int idxd_register_driver(void)
375 for (i = 0; i < IDXD_TYPE_MAX; i++) {
376 rc = driver_register(&idxd_drvs[i]->drv);
385 driver_unregister(&idxd_drvs[i]->drv);
389 void idxd_unregister_driver(void)
393 for (i = 0; i < IDXD_TYPE_MAX; i++)
394 driver_unregister(&idxd_drvs[i]->drv);
397 /* IDXD engine attributes */
398 static ssize_t engine_group_id_show(struct device *dev,
399 struct device_attribute *attr, char *buf)
401 struct idxd_engine *engine =
402 container_of(dev, struct idxd_engine, conf_dev);
405 return sprintf(buf, "%d\n", engine->group->id);
407 return sprintf(buf, "%d\n", -1);
410 static ssize_t engine_group_id_store(struct device *dev,
411 struct device_attribute *attr,
412 const char *buf, size_t count)
414 struct idxd_engine *engine =
415 container_of(dev, struct idxd_engine, conf_dev);
416 struct idxd_device *idxd = engine->idxd;
419 struct idxd_group *prevg;
421 rc = kstrtol(buf, 10, &id);
425 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
428 if (id > idxd->max_groups - 1 || id < -1)
433 engine->group->num_engines--;
434 engine->group = NULL;
439 prevg = engine->group;
442 prevg->num_engines--;
443 engine->group = &idxd->groups[id];
444 engine->group->num_engines++;
449 static struct device_attribute dev_attr_engine_group =
450 __ATTR(group_id, 0644, engine_group_id_show,
451 engine_group_id_store);
453 static struct attribute *idxd_engine_attributes[] = {
454 &dev_attr_engine_group.attr,
458 static const struct attribute_group idxd_engine_attribute_group = {
459 .attrs = idxd_engine_attributes,
462 static const struct attribute_group *idxd_engine_attribute_groups[] = {
463 &idxd_engine_attribute_group,
467 /* Group attributes */
469 static void idxd_set_free_tokens(struct idxd_device *idxd)
473 for (i = 0, tokens = 0; i < idxd->max_groups; i++) {
474 struct idxd_group *g = &idxd->groups[i];
476 tokens += g->tokens_reserved;
479 idxd->nr_tokens = idxd->max_tokens - tokens;
482 static ssize_t group_tokens_reserved_show(struct device *dev,
483 struct device_attribute *attr,
486 struct idxd_group *group =
487 container_of(dev, struct idxd_group, conf_dev);
489 return sprintf(buf, "%u\n", group->tokens_reserved);
492 static ssize_t group_tokens_reserved_store(struct device *dev,
493 struct device_attribute *attr,
494 const char *buf, size_t count)
496 struct idxd_group *group =
497 container_of(dev, struct idxd_group, conf_dev);
498 struct idxd_device *idxd = group->idxd;
502 rc = kstrtoul(buf, 10, &val);
506 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
509 if (idxd->state == IDXD_DEV_ENABLED)
512 if (val > idxd->max_tokens)
515 if (val > idxd->nr_tokens)
518 group->tokens_reserved = val;
519 idxd_set_free_tokens(idxd);
523 static struct device_attribute dev_attr_group_tokens_reserved =
524 __ATTR(tokens_reserved, 0644, group_tokens_reserved_show,
525 group_tokens_reserved_store);
527 static ssize_t group_tokens_allowed_show(struct device *dev,
528 struct device_attribute *attr,
531 struct idxd_group *group =
532 container_of(dev, struct idxd_group, conf_dev);
534 return sprintf(buf, "%u\n", group->tokens_allowed);
537 static ssize_t group_tokens_allowed_store(struct device *dev,
538 struct device_attribute *attr,
539 const char *buf, size_t count)
541 struct idxd_group *group =
542 container_of(dev, struct idxd_group, conf_dev);
543 struct idxd_device *idxd = group->idxd;
547 rc = kstrtoul(buf, 10, &val);
551 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
554 if (idxd->state == IDXD_DEV_ENABLED)
557 if (val < 4 * group->num_engines ||
558 val > group->tokens_reserved + idxd->nr_tokens)
561 group->tokens_allowed = val;
565 static struct device_attribute dev_attr_group_tokens_allowed =
566 __ATTR(tokens_allowed, 0644, group_tokens_allowed_show,
567 group_tokens_allowed_store);
569 static ssize_t group_use_token_limit_show(struct device *dev,
570 struct device_attribute *attr,
573 struct idxd_group *group =
574 container_of(dev, struct idxd_group, conf_dev);
576 return sprintf(buf, "%u\n", group->use_token_limit);
579 static ssize_t group_use_token_limit_store(struct device *dev,
580 struct device_attribute *attr,
581 const char *buf, size_t count)
583 struct idxd_group *group =
584 container_of(dev, struct idxd_group, conf_dev);
585 struct idxd_device *idxd = group->idxd;
589 rc = kstrtoul(buf, 10, &val);
593 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
596 if (idxd->state == IDXD_DEV_ENABLED)
599 if (idxd->token_limit == 0)
602 group->use_token_limit = !!val;
606 static struct device_attribute dev_attr_group_use_token_limit =
607 __ATTR(use_token_limit, 0644, group_use_token_limit_show,
608 group_use_token_limit_store);
610 static ssize_t group_engines_show(struct device *dev,
611 struct device_attribute *attr, char *buf)
613 struct idxd_group *group =
614 container_of(dev, struct idxd_group, conf_dev);
617 struct idxd_device *idxd = group->idxd;
619 for (i = 0; i < idxd->max_engines; i++) {
620 struct idxd_engine *engine = &idxd->engines[i];
625 if (engine->group->id == group->id)
626 rc += sprintf(tmp + rc, "engine%d.%d ",
627 idxd->id, engine->id);
631 rc += sprintf(tmp + rc, "\n");
636 static struct device_attribute dev_attr_group_engines =
637 __ATTR(engines, 0444, group_engines_show, NULL);
639 static ssize_t group_work_queues_show(struct device *dev,
640 struct device_attribute *attr, char *buf)
642 struct idxd_group *group =
643 container_of(dev, struct idxd_group, conf_dev);
646 struct idxd_device *idxd = group->idxd;
648 for (i = 0; i < idxd->max_wqs; i++) {
649 struct idxd_wq *wq = &idxd->wqs[i];
654 if (wq->group->id == group->id)
655 rc += sprintf(tmp + rc, "wq%d.%d ",
660 rc += sprintf(tmp + rc, "\n");
665 static struct device_attribute dev_attr_group_work_queues =
666 __ATTR(work_queues, 0444, group_work_queues_show, NULL);
668 static ssize_t group_traffic_class_a_show(struct device *dev,
669 struct device_attribute *attr,
672 struct idxd_group *group =
673 container_of(dev, struct idxd_group, conf_dev);
675 return sprintf(buf, "%d\n", group->tc_a);
678 static ssize_t group_traffic_class_a_store(struct device *dev,
679 struct device_attribute *attr,
680 const char *buf, size_t count)
682 struct idxd_group *group =
683 container_of(dev, struct idxd_group, conf_dev);
684 struct idxd_device *idxd = group->idxd;
688 rc = kstrtol(buf, 10, &val);
692 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
695 if (idxd->state == IDXD_DEV_ENABLED)
698 if (val < 0 || val > 7)
705 static struct device_attribute dev_attr_group_traffic_class_a =
706 __ATTR(traffic_class_a, 0644, group_traffic_class_a_show,
707 group_traffic_class_a_store);
709 static ssize_t group_traffic_class_b_show(struct device *dev,
710 struct device_attribute *attr,
713 struct idxd_group *group =
714 container_of(dev, struct idxd_group, conf_dev);
716 return sprintf(buf, "%d\n", group->tc_b);
719 static ssize_t group_traffic_class_b_store(struct device *dev,
720 struct device_attribute *attr,
721 const char *buf, size_t count)
723 struct idxd_group *group =
724 container_of(dev, struct idxd_group, conf_dev);
725 struct idxd_device *idxd = group->idxd;
729 rc = kstrtol(buf, 10, &val);
733 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
736 if (idxd->state == IDXD_DEV_ENABLED)
739 if (val < 0 || val > 7)
746 static struct device_attribute dev_attr_group_traffic_class_b =
747 __ATTR(traffic_class_b, 0644, group_traffic_class_b_show,
748 group_traffic_class_b_store);
750 static struct attribute *idxd_group_attributes[] = {
751 &dev_attr_group_work_queues.attr,
752 &dev_attr_group_engines.attr,
753 &dev_attr_group_use_token_limit.attr,
754 &dev_attr_group_tokens_allowed.attr,
755 &dev_attr_group_tokens_reserved.attr,
756 &dev_attr_group_traffic_class_a.attr,
757 &dev_attr_group_traffic_class_b.attr,
761 static const struct attribute_group idxd_group_attribute_group = {
762 .attrs = idxd_group_attributes,
765 static const struct attribute_group *idxd_group_attribute_groups[] = {
766 &idxd_group_attribute_group,
770 /* IDXD work queue attribs */
771 static ssize_t wq_clients_show(struct device *dev,
772 struct device_attribute *attr, char *buf)
774 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
776 return sprintf(buf, "%d\n", wq->client_count);
779 static struct device_attribute dev_attr_wq_clients =
780 __ATTR(clients, 0444, wq_clients_show, NULL);
782 static ssize_t wq_state_show(struct device *dev,
783 struct device_attribute *attr, char *buf)
785 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
788 case IDXD_WQ_DISABLED:
789 return sprintf(buf, "disabled\n");
790 case IDXD_WQ_ENABLED:
791 return sprintf(buf, "enabled\n");
794 return sprintf(buf, "unknown\n");
797 static struct device_attribute dev_attr_wq_state =
798 __ATTR(state, 0444, wq_state_show, NULL);
800 static ssize_t wq_group_id_show(struct device *dev,
801 struct device_attribute *attr, char *buf)
803 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
806 return sprintf(buf, "%u\n", wq->group->id);
808 return sprintf(buf, "-1\n");
811 static ssize_t wq_group_id_store(struct device *dev,
812 struct device_attribute *attr,
813 const char *buf, size_t count)
815 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
816 struct idxd_device *idxd = wq->idxd;
819 struct idxd_group *prevg, *group;
821 rc = kstrtol(buf, 10, &id);
825 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
828 if (wq->state != IDXD_WQ_DISABLED)
831 if (id > idxd->max_groups - 1 || id < -1)
836 wq->group->num_wqs--;
842 group = &idxd->groups[id];
852 static struct device_attribute dev_attr_wq_group_id =
853 __ATTR(group_id, 0644, wq_group_id_show, wq_group_id_store);
855 static ssize_t wq_mode_show(struct device *dev, struct device_attribute *attr,
858 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
860 return sprintf(buf, "%s\n",
861 wq_dedicated(wq) ? "dedicated" : "shared");
864 static ssize_t wq_mode_store(struct device *dev,
865 struct device_attribute *attr, const char *buf,
868 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
869 struct idxd_device *idxd = wq->idxd;
871 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
874 if (wq->state != IDXD_WQ_DISABLED)
877 if (sysfs_streq(buf, "dedicated")) {
878 set_bit(WQ_FLAG_DEDICATED, &wq->flags);
887 static struct device_attribute dev_attr_wq_mode =
888 __ATTR(mode, 0644, wq_mode_show, wq_mode_store);
890 static ssize_t wq_size_show(struct device *dev, struct device_attribute *attr,
893 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
895 return sprintf(buf, "%u\n", wq->size);
898 static ssize_t wq_size_store(struct device *dev,
899 struct device_attribute *attr, const char *buf,
902 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
904 struct idxd_device *idxd = wq->idxd;
907 rc = kstrtoul(buf, 10, &size);
911 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
914 if (wq->state != IDXD_WQ_DISABLED)
917 if (size > idxd->max_wq_size)
924 static struct device_attribute dev_attr_wq_size =
925 __ATTR(size, 0644, wq_size_show, wq_size_store);
927 static ssize_t wq_priority_show(struct device *dev,
928 struct device_attribute *attr, char *buf)
930 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
932 return sprintf(buf, "%u\n", wq->priority);
935 static ssize_t wq_priority_store(struct device *dev,
936 struct device_attribute *attr,
937 const char *buf, size_t count)
939 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
941 struct idxd_device *idxd = wq->idxd;
944 rc = kstrtoul(buf, 10, &prio);
948 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
951 if (wq->state != IDXD_WQ_DISABLED)
954 if (prio > IDXD_MAX_PRIORITY)
961 static struct device_attribute dev_attr_wq_priority =
962 __ATTR(priority, 0644, wq_priority_show, wq_priority_store);
964 static ssize_t wq_type_show(struct device *dev,
965 struct device_attribute *attr, char *buf)
967 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
970 case IDXD_WQT_KERNEL:
971 return sprintf(buf, "%s\n",
972 idxd_wq_type_names[IDXD_WQT_KERNEL]);
974 return sprintf(buf, "%s\n",
975 idxd_wq_type_names[IDXD_WQT_USER]);
978 return sprintf(buf, "%s\n",
979 idxd_wq_type_names[IDXD_WQT_NONE]);
985 static ssize_t wq_type_store(struct device *dev,
986 struct device_attribute *attr, const char *buf,
989 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
990 enum idxd_wq_type old_type;
992 if (wq->state != IDXD_WQ_DISABLED)
996 if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_KERNEL]))
997 wq->type = IDXD_WQT_KERNEL;
998 else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_USER]))
999 wq->type = IDXD_WQT_USER;
1001 wq->type = IDXD_WQT_NONE;
1003 /* If we are changing queue type, clear the name */
1004 if (wq->type != old_type)
1005 memset(wq->name, 0, WQ_NAME_SIZE + 1);
1010 static struct device_attribute dev_attr_wq_type =
1011 __ATTR(type, 0644, wq_type_show, wq_type_store);
1013 static ssize_t wq_name_show(struct device *dev,
1014 struct device_attribute *attr, char *buf)
1016 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1018 return sprintf(buf, "%s\n", wq->name);
1021 static ssize_t wq_name_store(struct device *dev,
1022 struct device_attribute *attr, const char *buf,
1025 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1027 if (wq->state != IDXD_WQ_DISABLED)
1030 if (strlen(buf) > WQ_NAME_SIZE || strlen(buf) == 0)
1033 memset(wq->name, 0, WQ_NAME_SIZE + 1);
1034 strncpy(wq->name, buf, WQ_NAME_SIZE);
1035 strreplace(wq->name, '\n', '\0');
1039 static struct device_attribute dev_attr_wq_name =
1040 __ATTR(name, 0644, wq_name_show, wq_name_store);
1042 static ssize_t wq_cdev_minor_show(struct device *dev,
1043 struct device_attribute *attr, char *buf)
1045 struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
1047 return sprintf(buf, "%d\n", wq->idxd_cdev.minor);
1050 static struct device_attribute dev_attr_wq_cdev_minor =
1051 __ATTR(cdev_minor, 0444, wq_cdev_minor_show, NULL);
1053 static struct attribute *idxd_wq_attributes[] = {
1054 &dev_attr_wq_clients.attr,
1055 &dev_attr_wq_state.attr,
1056 &dev_attr_wq_group_id.attr,
1057 &dev_attr_wq_mode.attr,
1058 &dev_attr_wq_size.attr,
1059 &dev_attr_wq_priority.attr,
1060 &dev_attr_wq_type.attr,
1061 &dev_attr_wq_name.attr,
1062 &dev_attr_wq_cdev_minor.attr,
1066 static const struct attribute_group idxd_wq_attribute_group = {
1067 .attrs = idxd_wq_attributes,
1070 static const struct attribute_group *idxd_wq_attribute_groups[] = {
1071 &idxd_wq_attribute_group,
1075 /* IDXD device attribs */
1076 static ssize_t max_work_queues_size_show(struct device *dev,
1077 struct device_attribute *attr,
1080 struct idxd_device *idxd =
1081 container_of(dev, struct idxd_device, conf_dev);
1083 return sprintf(buf, "%u\n", idxd->max_wq_size);
1085 static DEVICE_ATTR_RO(max_work_queues_size);
1087 static ssize_t max_groups_show(struct device *dev,
1088 struct device_attribute *attr, char *buf)
1090 struct idxd_device *idxd =
1091 container_of(dev, struct idxd_device, conf_dev);
1093 return sprintf(buf, "%u\n", idxd->max_groups);
1095 static DEVICE_ATTR_RO(max_groups);
1097 static ssize_t max_work_queues_show(struct device *dev,
1098 struct device_attribute *attr, char *buf)
1100 struct idxd_device *idxd =
1101 container_of(dev, struct idxd_device, conf_dev);
1103 return sprintf(buf, "%u\n", idxd->max_wqs);
1105 static DEVICE_ATTR_RO(max_work_queues);
1107 static ssize_t max_engines_show(struct device *dev,
1108 struct device_attribute *attr, char *buf)
1110 struct idxd_device *idxd =
1111 container_of(dev, struct idxd_device, conf_dev);
1113 return sprintf(buf, "%u\n", idxd->max_engines);
1115 static DEVICE_ATTR_RO(max_engines);
1117 static ssize_t numa_node_show(struct device *dev,
1118 struct device_attribute *attr, char *buf)
1120 struct idxd_device *idxd =
1121 container_of(dev, struct idxd_device, conf_dev);
1123 return sprintf(buf, "%d\n", dev_to_node(&idxd->pdev->dev));
1125 static DEVICE_ATTR_RO(numa_node);
1127 static ssize_t max_batch_size_show(struct device *dev,
1128 struct device_attribute *attr, char *buf)
1130 struct idxd_device *idxd =
1131 container_of(dev, struct idxd_device, conf_dev);
1133 return sprintf(buf, "%u\n", idxd->max_batch_size);
1135 static DEVICE_ATTR_RO(max_batch_size);
1137 static ssize_t max_transfer_size_show(struct device *dev,
1138 struct device_attribute *attr,
1141 struct idxd_device *idxd =
1142 container_of(dev, struct idxd_device, conf_dev);
1144 return sprintf(buf, "%llu\n", idxd->max_xfer_bytes);
1146 static DEVICE_ATTR_RO(max_transfer_size);
1148 static ssize_t op_cap_show(struct device *dev,
1149 struct device_attribute *attr, char *buf)
1151 struct idxd_device *idxd =
1152 container_of(dev, struct idxd_device, conf_dev);
1154 return sprintf(buf, "%#llx\n", idxd->hw.opcap.bits[0]);
1156 static DEVICE_ATTR_RO(op_cap);
1158 static ssize_t gen_cap_show(struct device *dev,
1159 struct device_attribute *attr, char *buf)
1161 struct idxd_device *idxd =
1162 container_of(dev, struct idxd_device, conf_dev);
1164 return sprintf(buf, "%#llx\n", idxd->hw.gen_cap.bits);
1166 static DEVICE_ATTR_RO(gen_cap);
1168 static ssize_t configurable_show(struct device *dev,
1169 struct device_attribute *attr, char *buf)
1171 struct idxd_device *idxd =
1172 container_of(dev, struct idxd_device, conf_dev);
1174 return sprintf(buf, "%u\n",
1175 test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags));
1177 static DEVICE_ATTR_RO(configurable);
1179 static ssize_t clients_show(struct device *dev,
1180 struct device_attribute *attr, char *buf)
1182 struct idxd_device *idxd =
1183 container_of(dev, struct idxd_device, conf_dev);
1184 unsigned long flags;
1187 spin_lock_irqsave(&idxd->dev_lock, flags);
1188 for (i = 0; i < idxd->max_wqs; i++) {
1189 struct idxd_wq *wq = &idxd->wqs[i];
1191 count += wq->client_count;
1193 spin_unlock_irqrestore(&idxd->dev_lock, flags);
1195 return sprintf(buf, "%d\n", count);
1197 static DEVICE_ATTR_RO(clients);
1199 static ssize_t state_show(struct device *dev,
1200 struct device_attribute *attr, char *buf)
1202 struct idxd_device *idxd =
1203 container_of(dev, struct idxd_device, conf_dev);
1205 switch (idxd->state) {
1206 case IDXD_DEV_DISABLED:
1207 case IDXD_DEV_CONF_READY:
1208 return sprintf(buf, "disabled\n");
1209 case IDXD_DEV_ENABLED:
1210 return sprintf(buf, "enabled\n");
1211 case IDXD_DEV_HALTED:
1212 return sprintf(buf, "halted\n");
1215 return sprintf(buf, "unknown\n");
1217 static DEVICE_ATTR_RO(state);
1219 static ssize_t errors_show(struct device *dev,
1220 struct device_attribute *attr, char *buf)
1222 struct idxd_device *idxd =
1223 container_of(dev, struct idxd_device, conf_dev);
1225 unsigned long flags;
1227 spin_lock_irqsave(&idxd->dev_lock, flags);
1228 for (i = 0; i < 4; i++)
1229 out += sprintf(buf + out, "%#018llx ", idxd->sw_err.bits[i]);
1230 spin_unlock_irqrestore(&idxd->dev_lock, flags);
1232 out += sprintf(buf + out, "\n");
1235 static DEVICE_ATTR_RO(errors);
1237 static ssize_t max_tokens_show(struct device *dev,
1238 struct device_attribute *attr, char *buf)
1240 struct idxd_device *idxd =
1241 container_of(dev, struct idxd_device, conf_dev);
1243 return sprintf(buf, "%u\n", idxd->max_tokens);
1245 static DEVICE_ATTR_RO(max_tokens);
1247 static ssize_t token_limit_show(struct device *dev,
1248 struct device_attribute *attr, char *buf)
1250 struct idxd_device *idxd =
1251 container_of(dev, struct idxd_device, conf_dev);
1253 return sprintf(buf, "%u\n", idxd->token_limit);
1256 static ssize_t token_limit_store(struct device *dev,
1257 struct device_attribute *attr,
1258 const char *buf, size_t count)
1260 struct idxd_device *idxd =
1261 container_of(dev, struct idxd_device, conf_dev);
1265 rc = kstrtoul(buf, 10, &val);
1269 if (idxd->state == IDXD_DEV_ENABLED)
1272 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
1275 if (!idxd->hw.group_cap.token_limit)
1278 if (val > idxd->hw.group_cap.total_tokens)
1281 idxd->token_limit = val;
1284 static DEVICE_ATTR_RW(token_limit);
1286 static ssize_t cdev_major_show(struct device *dev,
1287 struct device_attribute *attr, char *buf)
1289 struct idxd_device *idxd =
1290 container_of(dev, struct idxd_device, conf_dev);
1292 return sprintf(buf, "%u\n", idxd->major);
1294 static DEVICE_ATTR_RO(cdev_major);
1296 static struct attribute *idxd_device_attributes[] = {
1297 &dev_attr_max_groups.attr,
1298 &dev_attr_max_work_queues.attr,
1299 &dev_attr_max_work_queues_size.attr,
1300 &dev_attr_max_engines.attr,
1301 &dev_attr_numa_node.attr,
1302 &dev_attr_max_batch_size.attr,
1303 &dev_attr_max_transfer_size.attr,
1304 &dev_attr_op_cap.attr,
1305 &dev_attr_gen_cap.attr,
1306 &dev_attr_configurable.attr,
1307 &dev_attr_clients.attr,
1308 &dev_attr_state.attr,
1309 &dev_attr_errors.attr,
1310 &dev_attr_max_tokens.attr,
1311 &dev_attr_token_limit.attr,
1312 &dev_attr_cdev_major.attr,
1316 static const struct attribute_group idxd_device_attribute_group = {
1317 .attrs = idxd_device_attributes,
1320 static const struct attribute_group *idxd_attribute_groups[] = {
1321 &idxd_device_attribute_group,
1325 static int idxd_setup_engine_sysfs(struct idxd_device *idxd)
1327 struct device *dev = &idxd->pdev->dev;
1330 for (i = 0; i < idxd->max_engines; i++) {
1331 struct idxd_engine *engine = &idxd->engines[i];
1333 engine->conf_dev.parent = &idxd->conf_dev;
1334 dev_set_name(&engine->conf_dev, "engine%d.%d",
1335 idxd->id, engine->id);
1336 engine->conf_dev.bus = idxd_get_bus_type(idxd);
1337 engine->conf_dev.groups = idxd_engine_attribute_groups;
1338 engine->conf_dev.type = &idxd_engine_device_type;
1339 dev_dbg(dev, "Engine device register: %s\n",
1340 dev_name(&engine->conf_dev));
1341 rc = device_register(&engine->conf_dev);
1343 put_device(&engine->conf_dev);
1352 struct idxd_engine *engine = &idxd->engines[i];
1354 device_unregister(&engine->conf_dev);
1359 static int idxd_setup_group_sysfs(struct idxd_device *idxd)
1361 struct device *dev = &idxd->pdev->dev;
1364 for (i = 0; i < idxd->max_groups; i++) {
1365 struct idxd_group *group = &idxd->groups[i];
1367 group->conf_dev.parent = &idxd->conf_dev;
1368 dev_set_name(&group->conf_dev, "group%d.%d",
1369 idxd->id, group->id);
1370 group->conf_dev.bus = idxd_get_bus_type(idxd);
1371 group->conf_dev.groups = idxd_group_attribute_groups;
1372 group->conf_dev.type = &idxd_group_device_type;
1373 dev_dbg(dev, "Group device register: %s\n",
1374 dev_name(&group->conf_dev));
1375 rc = device_register(&group->conf_dev);
1377 put_device(&group->conf_dev);
1386 struct idxd_group *group = &idxd->groups[i];
1388 device_unregister(&group->conf_dev);
1393 static int idxd_setup_wq_sysfs(struct idxd_device *idxd)
1395 struct device *dev = &idxd->pdev->dev;
1398 for (i = 0; i < idxd->max_wqs; i++) {
1399 struct idxd_wq *wq = &idxd->wqs[i];
1401 wq->conf_dev.parent = &idxd->conf_dev;
1402 dev_set_name(&wq->conf_dev, "wq%d.%d", idxd->id, wq->id);
1403 wq->conf_dev.bus = idxd_get_bus_type(idxd);
1404 wq->conf_dev.groups = idxd_wq_attribute_groups;
1405 wq->conf_dev.type = &idxd_wq_device_type;
1406 dev_dbg(dev, "WQ device register: %s\n",
1407 dev_name(&wq->conf_dev));
1408 rc = device_register(&wq->conf_dev);
1410 put_device(&wq->conf_dev);
1419 struct idxd_wq *wq = &idxd->wqs[i];
1421 device_unregister(&wq->conf_dev);
1426 static int idxd_setup_device_sysfs(struct idxd_device *idxd)
1428 struct device *dev = &idxd->pdev->dev;
1430 char devname[IDXD_NAME_SIZE];
1432 sprintf(devname, "%s%d", idxd_get_dev_name(idxd), idxd->id);
1433 idxd->conf_dev.parent = dev;
1434 dev_set_name(&idxd->conf_dev, "%s", devname);
1435 idxd->conf_dev.bus = idxd_get_bus_type(idxd);
1436 idxd->conf_dev.groups = idxd_attribute_groups;
1437 idxd->conf_dev.type = idxd_get_device_type(idxd);
1439 dev_dbg(dev, "IDXD device register: %s\n", dev_name(&idxd->conf_dev));
1440 rc = device_register(&idxd->conf_dev);
1442 put_device(&idxd->conf_dev);
1449 int idxd_setup_sysfs(struct idxd_device *idxd)
1451 struct device *dev = &idxd->pdev->dev;
1454 rc = idxd_setup_device_sysfs(idxd);
1456 dev_dbg(dev, "Device sysfs registering failed: %d\n", rc);
1460 rc = idxd_setup_wq_sysfs(idxd);
1462 /* unregister conf dev */
1463 dev_dbg(dev, "Work Queue sysfs registering failed: %d\n", rc);
1467 rc = idxd_setup_group_sysfs(idxd);
1469 /* unregister conf dev */
1470 dev_dbg(dev, "Group sysfs registering failed: %d\n", rc);
1474 rc = idxd_setup_engine_sysfs(idxd);
1476 /* unregister conf dev */
1477 dev_dbg(dev, "Engine sysfs registering failed: %d\n", rc);
1484 void idxd_cleanup_sysfs(struct idxd_device *idxd)
1488 for (i = 0; i < idxd->max_wqs; i++) {
1489 struct idxd_wq *wq = &idxd->wqs[i];
1491 device_unregister(&wq->conf_dev);
1494 for (i = 0; i < idxd->max_engines; i++) {
1495 struct idxd_engine *engine = &idxd->engines[i];
1497 device_unregister(&engine->conf_dev);
1500 for (i = 0; i < idxd->max_groups; i++) {
1501 struct idxd_group *group = &idxd->groups[i];
1503 device_unregister(&group->conf_dev);
1506 device_unregister(&idxd->conf_dev);
1509 int idxd_register_bus_type(void)
1513 for (i = 0; i < IDXD_TYPE_MAX; i++) {
1514 rc = bus_register(idxd_bus_types[i]);
1523 bus_unregister(idxd_bus_types[i]);
1527 void idxd_unregister_bus_type(void)
1531 for (i = 0; i < IDXD_TYPE_MAX; i++)
1532 bus_unregister(idxd_bus_types[i]);