1 // SPDX-License-Identifier: GPL-2.0
3 * driver for channel subsystem
5 * Copyright IBM Corp. 2002, 2010
7 * Author(s): Arnd Bergmann (arndb@de.ibm.com)
8 * Cornelia Huck (cornelia.huck@de.ibm.com)
11 #define KMSG_COMPONENT "cio"
12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
14 #include <linux/export.h>
15 #include <linux/init.h>
16 #include <linux/device.h>
17 #include <linux/slab.h>
18 #include <linux/errno.h>
19 #include <linux/list.h>
20 #include <linux/reboot.h>
21 #include <linux/proc_fs.h>
22 #include <linux/genalloc.h>
23 #include <linux/dma-mapping.h>
29 #include "blacklist.h"
30 #include "cio_debug.h"
37 int css_init_done = 0;
41 struct channel_subsystem *channel_subsystems[MAX_CSS_IDX + 1];
42 static struct bus_type css_bus_type;
45 for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
47 struct subchannel_id schid;
50 init_subchannel_id(&schid);
53 ret = fn(schid, data);
56 } while (schid.sch_no++ < __MAX_SUBCHANNEL);
58 } while (schid.ssid++ < max_ssid);
65 int (*fn_known_sch)(struct subchannel *, void *);
66 int (*fn_unknown_sch)(struct subchannel_id, void *);
69 static int call_fn_known_sch(struct device *dev, void *data)
71 struct subchannel *sch = to_subchannel(dev);
72 struct cb_data *cb = data;
76 idset_sch_del(cb->set, sch->schid);
78 rc = cb->fn_known_sch(sch, cb->data);
82 static int call_fn_unknown_sch(struct subchannel_id schid, void *data)
84 struct cb_data *cb = data;
87 if (idset_sch_contains(cb->set, schid))
88 rc = cb->fn_unknown_sch(schid, cb->data);
92 static int call_fn_all_sch(struct subchannel_id schid, void *data)
94 struct cb_data *cb = data;
95 struct subchannel *sch;
98 sch = get_subchannel_by_schid(schid);
100 if (cb->fn_known_sch)
101 rc = cb->fn_known_sch(sch, cb->data);
102 put_device(&sch->dev);
104 if (cb->fn_unknown_sch)
105 rc = cb->fn_unknown_sch(schid, cb->data);
111 int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *),
112 int (*fn_unknown)(struct subchannel_id,
119 cb.fn_known_sch = fn_known;
120 cb.fn_unknown_sch = fn_unknown;
122 if (fn_known && !fn_unknown) {
123 /* Skip idset allocation in case of known-only loop. */
125 return bus_for_each_dev(&css_bus_type, NULL, &cb,
129 cb.set = idset_sch_new();
131 /* fall back to brute force scanning in case of oom */
132 return for_each_subchannel(call_fn_all_sch, &cb);
136 /* Process registered subchannels. */
137 rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch);
140 /* Process unregistered subchannels. */
142 rc = for_each_subchannel(call_fn_unknown_sch, &cb);
149 static void css_sch_todo(struct work_struct *work);
151 static int css_sch_create_locks(struct subchannel *sch)
153 sch->lock = kmalloc(sizeof(*sch->lock), GFP_KERNEL);
157 spin_lock_init(sch->lock);
158 mutex_init(&sch->reg_mutex);
163 static void css_subchannel_release(struct device *dev)
165 struct subchannel *sch = to_subchannel(dev);
167 sch->config.intparm = 0;
168 cio_commit_config(sch);
169 kfree(sch->driver_override);
174 static int css_validate_subchannel(struct subchannel_id schid,
179 switch (schib->pmcw.st) {
180 case SUBCHANNEL_TYPE_IO:
181 case SUBCHANNEL_TYPE_MSG:
182 if (!css_sch_is_valid(schib))
184 else if (is_blacklisted(schid.ssid, schib->pmcw.dev)) {
185 CIO_MSG_EVENT(6, "Blacklisted device detected "
186 "at devno %04X, subchannel set %x\n",
187 schib->pmcw.dev, schid.ssid);
198 CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n",
199 schid.ssid, schid.sch_no, schib->pmcw.st);
204 struct subchannel *css_alloc_subchannel(struct subchannel_id schid,
207 struct subchannel *sch;
210 ret = css_validate_subchannel(schid, schib);
214 sch = kzalloc(sizeof(*sch), GFP_KERNEL | GFP_DMA);
216 return ERR_PTR(-ENOMEM);
220 sch->st = schib->pmcw.st;
222 ret = css_sch_create_locks(sch);
226 INIT_WORK(&sch->todo_work, css_sch_todo);
227 sch->dev.release = &css_subchannel_release;
228 sch->dev.dma_mask = &sch->dma_mask;
229 device_initialize(&sch->dev);
231 * The physical addresses for some of the dma structures that can
232 * belong to a subchannel need to fit 31 bit width (e.g. ccw).
234 ret = dma_set_coherent_mask(&sch->dev, DMA_BIT_MASK(31));
238 * But we don't have such restrictions imposed on the stuff that
239 * is handled by the streaming API.
241 ret = dma_set_mask(&sch->dev, DMA_BIT_MASK(64));
254 static int css_sch_device_register(struct subchannel *sch)
258 mutex_lock(&sch->reg_mutex);
259 dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid,
261 ret = device_add(&sch->dev);
262 mutex_unlock(&sch->reg_mutex);
267 * css_sch_device_unregister - unregister a subchannel
268 * @sch: subchannel to be unregistered
270 void css_sch_device_unregister(struct subchannel *sch)
272 mutex_lock(&sch->reg_mutex);
273 if (device_is_registered(&sch->dev))
274 device_unregister(&sch->dev);
275 mutex_unlock(&sch->reg_mutex);
277 EXPORT_SYMBOL_GPL(css_sch_device_unregister);
279 static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
284 memset(ssd, 0, sizeof(struct chsc_ssd_info));
285 ssd->path_mask = pmcw->pim;
286 for (i = 0; i < 8; i++) {
288 if (pmcw->pim & mask) {
289 chp_id_init(&ssd->chpid[i]);
290 ssd->chpid[i].id = pmcw->chpid[i];
295 static void ssd_register_chpids(struct chsc_ssd_info *ssd)
300 for (i = 0; i < 8; i++) {
302 if (ssd->path_mask & mask)
303 chp_new(ssd->chpid[i]);
307 void css_update_ssd_info(struct subchannel *sch)
311 ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info);
313 ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw);
315 ssd_register_chpids(&sch->ssd_info);
318 static ssize_t type_show(struct device *dev, struct device_attribute *attr,
321 struct subchannel *sch = to_subchannel(dev);
323 return sprintf(buf, "%01x\n", sch->st);
326 static DEVICE_ATTR_RO(type);
328 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
331 struct subchannel *sch = to_subchannel(dev);
333 return sprintf(buf, "css:t%01X\n", sch->st);
336 static DEVICE_ATTR_RO(modalias);
338 static ssize_t driver_override_store(struct device *dev,
339 struct device_attribute *attr,
340 const char *buf, size_t count)
342 struct subchannel *sch = to_subchannel(dev);
345 ret = driver_set_override(dev, &sch->driver_override, buf, count);
352 static ssize_t driver_override_show(struct device *dev,
353 struct device_attribute *attr, char *buf)
355 struct subchannel *sch = to_subchannel(dev);
359 len = snprintf(buf, PAGE_SIZE, "%s\n", sch->driver_override);
363 static DEVICE_ATTR_RW(driver_override);
365 static struct attribute *subch_attrs[] = {
367 &dev_attr_modalias.attr,
368 &dev_attr_driver_override.attr,
372 static struct attribute_group subch_attr_group = {
373 .attrs = subch_attrs,
376 static const struct attribute_group *default_subch_attr_groups[] = {
381 static ssize_t chpids_show(struct device *dev,
382 struct device_attribute *attr,
385 struct subchannel *sch = to_subchannel(dev);
386 struct chsc_ssd_info *ssd = &sch->ssd_info;
391 for (chp = 0; chp < 8; chp++) {
393 if (ssd->path_mask & mask)
394 ret += sprintf(buf + ret, "%02x ", ssd->chpid[chp].id);
396 ret += sprintf(buf + ret, "00 ");
398 ret += sprintf(buf + ret, "\n");
401 static DEVICE_ATTR_RO(chpids);
403 static ssize_t pimpampom_show(struct device *dev,
404 struct device_attribute *attr,
407 struct subchannel *sch = to_subchannel(dev);
408 struct pmcw *pmcw = &sch->schib.pmcw;
410 return sprintf(buf, "%02x %02x %02x\n",
411 pmcw->pim, pmcw->pam, pmcw->pom);
413 static DEVICE_ATTR_RO(pimpampom);
415 static ssize_t dev_busid_show(struct device *dev,
416 struct device_attribute *attr,
419 struct subchannel *sch = to_subchannel(dev);
420 struct pmcw *pmcw = &sch->schib.pmcw;
422 if ((pmcw->st == SUBCHANNEL_TYPE_IO && pmcw->dnv) ||
423 (pmcw->st == SUBCHANNEL_TYPE_MSG && pmcw->w))
424 return sysfs_emit(buf, "0.%x.%04x\n", sch->schid.ssid,
427 return sysfs_emit(buf, "none\n");
429 static DEVICE_ATTR_RO(dev_busid);
431 static struct attribute *io_subchannel_type_attrs[] = {
432 &dev_attr_chpids.attr,
433 &dev_attr_pimpampom.attr,
434 &dev_attr_dev_busid.attr,
437 ATTRIBUTE_GROUPS(io_subchannel_type);
439 static const struct device_type io_subchannel_type = {
440 .groups = io_subchannel_type_groups,
443 int css_register_subchannel(struct subchannel *sch)
447 /* Initialize the subchannel structure */
448 sch->dev.parent = &channel_subsystems[0]->device;
449 sch->dev.bus = &css_bus_type;
450 sch->dev.groups = default_subch_attr_groups;
452 if (sch->st == SUBCHANNEL_TYPE_IO)
453 sch->dev.type = &io_subchannel_type;
455 css_update_ssd_info(sch);
456 /* make it known to the system */
457 ret = css_sch_device_register(sch);
459 CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n",
460 sch->schid.ssid, sch->schid.sch_no, ret);
466 static int css_probe_device(struct subchannel_id schid, struct schib *schib)
468 struct subchannel *sch;
471 sch = css_alloc_subchannel(schid, schib);
475 ret = css_register_subchannel(sch);
477 put_device(&sch->dev);
483 check_subchannel(struct device *dev, const void *data)
485 struct subchannel *sch;
486 struct subchannel_id *schid = (void *)data;
488 sch = to_subchannel(dev);
489 return schid_equal(&sch->schid, schid);
493 get_subchannel_by_schid(struct subchannel_id schid)
497 dev = bus_find_device(&css_bus_type, NULL,
498 &schid, check_subchannel);
500 return dev ? to_subchannel(dev) : NULL;
504 * css_sch_is_valid() - check if a subchannel is valid
505 * @schib: subchannel information block for the subchannel
507 int css_sch_is_valid(struct schib *schib)
509 if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv)
511 if ((schib->pmcw.st == SUBCHANNEL_TYPE_MSG) && !schib->pmcw.w)
515 EXPORT_SYMBOL_GPL(css_sch_is_valid);
517 static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
523 /* Will be done on the slow path. */
527 * The first subchannel that is not-operational (ccode==3)
528 * indicates that there aren't any more devices available.
529 * If stsch gets an exception, it means the current subchannel set
532 ccode = stsch(schid, &schib);
534 return (ccode == 3) ? -ENXIO : ccode;
536 return css_probe_device(schid, &schib);
539 static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
544 if (sch->driver->sch_event)
545 ret = sch->driver->sch_event(sch, slow);
548 "Got subchannel machine check but "
549 "no sch_event handler provided.\n");
551 if (ret != 0 && ret != -EAGAIN) {
552 CIO_MSG_EVENT(2, "eval: sch 0.%x.%04x, rc=%d\n",
553 sch->schid.ssid, sch->schid.sch_no, ret);
558 static void css_evaluate_subchannel(struct subchannel_id schid, int slow)
560 struct subchannel *sch;
563 sch = get_subchannel_by_schid(schid);
565 ret = css_evaluate_known_subchannel(sch, slow);
566 put_device(&sch->dev);
568 ret = css_evaluate_new_subchannel(schid, slow);
570 css_schedule_eval(schid);
574 * css_sched_sch_todo - schedule a subchannel operation
578 * Schedule the operation identified by @todo to be performed on the slow path
579 * workqueue. Do nothing if another operation with higher priority is already
580 * scheduled. Needs to be called with subchannel lock held.
582 void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
584 CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
585 sch->schid.ssid, sch->schid.sch_no, todo);
586 if (sch->todo >= todo)
588 /* Get workqueue ref. */
589 if (!get_device(&sch->dev))
592 if (!queue_work(cio_work_q, &sch->todo_work)) {
593 /* Already queued, release workqueue ref. */
594 put_device(&sch->dev);
597 EXPORT_SYMBOL_GPL(css_sched_sch_todo);
599 static void css_sch_todo(struct work_struct *work)
601 struct subchannel *sch;
605 sch = container_of(work, struct subchannel, todo_work);
607 spin_lock_irq(sch->lock);
609 CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
610 sch->schid.sch_no, todo);
611 sch->todo = SCH_TODO_NOTHING;
612 spin_unlock_irq(sch->lock);
615 case SCH_TODO_NOTHING:
618 ret = css_evaluate_known_subchannel(sch, 1);
619 if (ret == -EAGAIN) {
620 spin_lock_irq(sch->lock);
621 css_sched_sch_todo(sch, todo);
622 spin_unlock_irq(sch->lock);
626 css_sch_device_unregister(sch);
629 /* Release workqueue ref. */
630 put_device(&sch->dev);
633 static struct idset *slow_subchannel_set;
634 static DEFINE_SPINLOCK(slow_subchannel_lock);
635 static DECLARE_WAIT_QUEUE_HEAD(css_eval_wq);
636 static atomic_t css_eval_scheduled;
638 static int __init slow_subchannel_init(void)
640 atomic_set(&css_eval_scheduled, 0);
641 slow_subchannel_set = idset_sch_new();
642 if (!slow_subchannel_set) {
643 CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n");
649 static int slow_eval_known_fn(struct subchannel *sch, void *data)
654 spin_lock_irq(&slow_subchannel_lock);
655 eval = idset_sch_contains(slow_subchannel_set, sch->schid);
656 idset_sch_del(slow_subchannel_set, sch->schid);
657 spin_unlock_irq(&slow_subchannel_lock);
659 rc = css_evaluate_known_subchannel(sch, 1);
661 css_schedule_eval(sch->schid);
663 * The loop might take long time for platforms with lots of
664 * known devices. Allow scheduling here.
671 static int slow_eval_unknown_fn(struct subchannel_id schid, void *data)
676 spin_lock_irq(&slow_subchannel_lock);
677 eval = idset_sch_contains(slow_subchannel_set, schid);
678 idset_sch_del(slow_subchannel_set, schid);
679 spin_unlock_irq(&slow_subchannel_lock);
681 rc = css_evaluate_new_subchannel(schid, 1);
684 css_schedule_eval(schid);
690 /* These should abort looping */
691 spin_lock_irq(&slow_subchannel_lock);
692 idset_sch_del_subseq(slow_subchannel_set, schid);
693 spin_unlock_irq(&slow_subchannel_lock);
698 /* Allow scheduling here since the containing loop might
705 static void css_slow_path_func(struct work_struct *unused)
709 CIO_TRACE_EVENT(4, "slowpath");
710 for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn,
712 spin_lock_irqsave(&slow_subchannel_lock, flags);
713 if (idset_is_empty(slow_subchannel_set)) {
714 atomic_set(&css_eval_scheduled, 0);
715 wake_up(&css_eval_wq);
717 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
720 static DECLARE_DELAYED_WORK(slow_path_work, css_slow_path_func);
721 struct workqueue_struct *cio_work_q;
723 void css_schedule_eval(struct subchannel_id schid)
727 spin_lock_irqsave(&slow_subchannel_lock, flags);
728 idset_sch_add(slow_subchannel_set, schid);
729 atomic_set(&css_eval_scheduled, 1);
730 queue_delayed_work(cio_work_q, &slow_path_work, 0);
731 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
734 void css_schedule_eval_all(void)
738 spin_lock_irqsave(&slow_subchannel_lock, flags);
739 idset_fill(slow_subchannel_set);
740 atomic_set(&css_eval_scheduled, 1);
741 queue_delayed_work(cio_work_q, &slow_path_work, 0);
742 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
745 static int __unset_validpath(struct device *dev, void *data)
747 struct idset *set = data;
748 struct subchannel *sch = to_subchannel(dev);
749 struct pmcw *pmcw = &sch->schib.pmcw;
751 /* Here we want to make sure that we are considering only those subchannels
752 * which do not have an operational device attached to it. This can be found
753 * with the help of PAM and POM values of pmcw. OPM provides the information
754 * about any path which is currently vary-off, so that we should not consider.
756 if (sch->st == SUBCHANNEL_TYPE_IO &&
757 (sch->opm & pmcw->pam & pmcw->pom))
758 idset_sch_del(set, sch->schid);
763 static int __unset_online(struct device *dev, void *data)
765 struct idset *set = data;
766 struct subchannel *sch = to_subchannel(dev);
768 if (sch->st == SUBCHANNEL_TYPE_IO && sch->config.ena)
769 idset_sch_del(set, sch->schid);
774 void css_schedule_eval_cond(enum css_eval_cond cond, unsigned long delay)
779 /* Find unregistered subchannels. */
780 set = idset_sch_new();
783 css_schedule_eval_all();
788 case CSS_EVAL_NO_PATH:
789 bus_for_each_dev(&css_bus_type, NULL, set, __unset_validpath);
791 case CSS_EVAL_NOT_ONLINE:
792 bus_for_each_dev(&css_bus_type, NULL, set, __unset_online);
798 /* Apply to slow_subchannel_set. */
799 spin_lock_irqsave(&slow_subchannel_lock, flags);
800 idset_add_set(slow_subchannel_set, set);
801 atomic_set(&css_eval_scheduled, 1);
802 queue_delayed_work(cio_work_q, &slow_path_work, delay);
803 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
807 void css_wait_for_slow_path(void)
809 flush_workqueue(cio_work_q);
812 /* Schedule reprobing of all subchannels with no valid operational path. */
813 void css_schedule_reprobe(void)
815 /* Schedule with a delay to allow merging of subsequent calls. */
816 css_schedule_eval_cond(CSS_EVAL_NO_PATH, 1 * HZ);
818 EXPORT_SYMBOL_GPL(css_schedule_reprobe);
821 * Called from the machine check handler for subchannel report words.
823 static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
825 struct subchannel_id mchk_schid;
826 struct subchannel *sch;
829 css_schedule_eval_all();
832 CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, "
833 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
834 crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc,
835 crw0->erc, crw0->rsid);
837 CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, "
838 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n",
839 crw1->slct, crw1->oflw, crw1->chn, crw1->rsc,
840 crw1->anc, crw1->erc, crw1->rsid);
841 init_subchannel_id(&mchk_schid);
842 mchk_schid.sch_no = crw0->rsid;
844 mchk_schid.ssid = (crw1->rsid >> 4) & 3;
846 if (crw0->erc == CRW_ERC_PMOD) {
847 sch = get_subchannel_by_schid(mchk_schid);
849 css_update_ssd_info(sch);
850 put_device(&sch->dev);
854 * Since we are always presented with IPI in the CRW, we have to
855 * use stsch() to find out if the subchannel in question has come
858 css_evaluate_subchannel(mchk_schid, 0);
862 css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
866 if (css_general_characteristics.mcss) {
867 css->global_pgid.pgid_high.ext_cssid.version = 0x80;
868 css->global_pgid.pgid_high.ext_cssid.cssid =
869 css->id_valid ? css->cssid : 0;
871 css->global_pgid.pgid_high.cpu_addr = stap();
874 css->global_pgid.cpu_id = cpu_id.ident;
875 css->global_pgid.cpu_model = cpu_id.machine;
876 css->global_pgid.tod_high = tod_high;
879 static void channel_subsystem_release(struct device *dev)
881 struct channel_subsystem *css = to_css(dev);
883 mutex_destroy(&css->mutex);
887 static ssize_t real_cssid_show(struct device *dev, struct device_attribute *a,
890 struct channel_subsystem *css = to_css(dev);
895 return sprintf(buf, "%x\n", css->cssid);
897 static DEVICE_ATTR_RO(real_cssid);
899 static ssize_t rescan_store(struct device *dev, struct device_attribute *a,
900 const char *buf, size_t count)
902 CIO_TRACE_EVENT(4, "usr-rescan");
904 css_schedule_eval_all();
909 static DEVICE_ATTR_WO(rescan);
911 static ssize_t cm_enable_show(struct device *dev, struct device_attribute *a,
914 struct channel_subsystem *css = to_css(dev);
917 mutex_lock(&css->mutex);
918 ret = sprintf(buf, "%x\n", css->cm_enabled);
919 mutex_unlock(&css->mutex);
923 static ssize_t cm_enable_store(struct device *dev, struct device_attribute *a,
924 const char *buf, size_t count)
926 struct channel_subsystem *css = to_css(dev);
930 ret = kstrtoul(buf, 16, &val);
933 mutex_lock(&css->mutex);
936 ret = css->cm_enabled ? chsc_secm(css, 0) : 0;
939 ret = css->cm_enabled ? 0 : chsc_secm(css, 1);
944 mutex_unlock(&css->mutex);
945 return ret < 0 ? ret : count;
947 static DEVICE_ATTR_RW(cm_enable);
949 static umode_t cm_enable_mode(struct kobject *kobj, struct attribute *attr,
952 return css_chsc_characteristics.secm ? attr->mode : 0;
955 static struct attribute *cssdev_attrs[] = {
956 &dev_attr_real_cssid.attr,
957 &dev_attr_rescan.attr,
961 static struct attribute_group cssdev_attr_group = {
962 .attrs = cssdev_attrs,
965 static struct attribute *cssdev_cm_attrs[] = {
966 &dev_attr_cm_enable.attr,
970 static struct attribute_group cssdev_cm_attr_group = {
971 .attrs = cssdev_cm_attrs,
972 .is_visible = cm_enable_mode,
975 static const struct attribute_group *cssdev_attr_groups[] = {
977 &cssdev_cm_attr_group,
981 static int __init setup_css(int nr)
983 struct channel_subsystem *css;
986 css = kzalloc(sizeof(*css), GFP_KERNEL);
990 channel_subsystems[nr] = css;
991 dev_set_name(&css->device, "css%x", nr);
992 css->device.groups = cssdev_attr_groups;
993 css->device.release = channel_subsystem_release;
995 * We currently allocate notifier bits with this (using
996 * css->device as the device argument with the DMA API)
997 * and are fine with 64 bit addresses.
999 ret = dma_coerce_mask_and_coherent(&css->device, DMA_BIT_MASK(64));
1005 mutex_init(&css->mutex);
1006 ret = chsc_get_cssid_iid(nr, &css->cssid, &css->iid);
1008 css->id_valid = true;
1009 pr_info("Partition identifier %01x.%01x\n", css->cssid,
1012 css_generate_pgid(css, (u32) (get_tod_clock() >> 32));
1014 ret = device_register(&css->device);
1016 put_device(&css->device);
1020 css->pseudo_subchannel = kzalloc(sizeof(*css->pseudo_subchannel),
1022 if (!css->pseudo_subchannel) {
1023 device_unregister(&css->device);
1028 css->pseudo_subchannel->dev.parent = &css->device;
1029 css->pseudo_subchannel->dev.release = css_subchannel_release;
1030 mutex_init(&css->pseudo_subchannel->reg_mutex);
1031 ret = css_sch_create_locks(css->pseudo_subchannel);
1033 kfree(css->pseudo_subchannel);
1034 device_unregister(&css->device);
1038 dev_set_name(&css->pseudo_subchannel->dev, "defunct");
1039 ret = device_register(&css->pseudo_subchannel->dev);
1041 put_device(&css->pseudo_subchannel->dev);
1042 device_unregister(&css->device);
1048 channel_subsystems[nr] = NULL;
1052 static int css_reboot_event(struct notifier_block *this,
1053 unsigned long event,
1056 struct channel_subsystem *css;
1061 mutex_lock(&css->mutex);
1062 if (css->cm_enabled)
1063 if (chsc_secm(css, 0))
1065 mutex_unlock(&css->mutex);
1071 static struct notifier_block css_reboot_notifier = {
1072 .notifier_call = css_reboot_event,
1075 #define CIO_DMA_GFP (GFP_KERNEL | __GFP_ZERO)
1076 static struct gen_pool *cio_dma_pool;
1078 /* Currently cio supports only a single css */
1079 struct device *cio_get_dma_css_dev(void)
1081 return &channel_subsystems[0]->device;
1084 struct gen_pool *cio_gp_dma_create(struct device *dma_dev, int nr_pages)
1086 struct gen_pool *gp_dma;
1088 dma_addr_t dma_addr;
1091 gp_dma = gen_pool_create(3, -1);
1094 for (i = 0; i < nr_pages; ++i) {
1095 cpu_addr = dma_alloc_coherent(dma_dev, PAGE_SIZE, &dma_addr,
1099 gen_pool_add_virt(gp_dma, (unsigned long) cpu_addr,
1100 dma_addr, PAGE_SIZE, -1);
1105 static void __gp_dma_free_dma(struct gen_pool *pool,
1106 struct gen_pool_chunk *chunk, void *data)
1108 size_t chunk_size = chunk->end_addr - chunk->start_addr + 1;
1110 dma_free_coherent((struct device *) data, chunk_size,
1111 (void *) chunk->start_addr,
1112 (dma_addr_t) chunk->phys_addr);
1115 void cio_gp_dma_destroy(struct gen_pool *gp_dma, struct device *dma_dev)
1119 /* this is quite ugly but no better idea */
1120 gen_pool_for_each_chunk(gp_dma, __gp_dma_free_dma, dma_dev);
1121 gen_pool_destroy(gp_dma);
1124 static int cio_dma_pool_init(void)
1126 /* No need to free up the resources: compiled in */
1127 cio_dma_pool = cio_gp_dma_create(cio_get_dma_css_dev(), 1);
1133 void *cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev,
1136 dma_addr_t dma_addr;
1142 addr = gen_pool_alloc(gp_dma, size);
1144 chunk_size = round_up(size, PAGE_SIZE);
1145 addr = (unsigned long) dma_alloc_coherent(dma_dev,
1146 chunk_size, &dma_addr, CIO_DMA_GFP);
1149 gen_pool_add_virt(gp_dma, addr, dma_addr, chunk_size, -1);
1150 addr = gen_pool_alloc(gp_dma, size);
1152 return (void *) addr;
1155 void cio_gp_dma_free(struct gen_pool *gp_dma, void *cpu_addr, size_t size)
1159 memset(cpu_addr, 0, size);
1160 gen_pool_free(gp_dma, (unsigned long) cpu_addr, size);
1164 * Allocate dma memory from the css global pool. Intended for memory not
1165 * specific to any single device within the css. The allocated memory
1166 * is not guaranteed to be 31-bit addressable.
1168 * Caution: Not suitable for early stuff like console.
1170 void *cio_dma_zalloc(size_t size)
1172 return cio_gp_dma_zalloc(cio_dma_pool, cio_get_dma_css_dev(), size);
1175 void cio_dma_free(void *cpu_addr, size_t size)
1177 cio_gp_dma_free(cio_dma_pool, cpu_addr, size);
1181 * Now that the driver core is running, we can setup our channel subsystem.
1182 * The struct subchannel's are created during probing.
1184 static int __init css_bus_init(void)
1192 chsc_determine_css_characteristics();
1193 /* Try to enable MSS. */
1194 ret = chsc_enable_facility(CHSC_SDA_OC_MSS);
1198 max_ssid = __MAX_SSID;
1200 ret = slow_subchannel_init();
1204 ret = crw_register_handler(CRW_RSC_SCH, css_process_crw);
1208 if ((ret = bus_register(&css_bus_type)))
1211 /* Setup css structure. */
1212 for (i = 0; i <= MAX_CSS_IDX; i++) {
1215 goto out_unregister;
1217 ret = register_reboot_notifier(&css_reboot_notifier);
1219 goto out_unregister;
1220 ret = cio_dma_pool_init();
1222 goto out_unregister_rn;
1226 /* Enable default isc for I/O subchannels. */
1227 isc_register(IO_SCH_ISC);
1231 unregister_reboot_notifier(&css_reboot_notifier);
1234 struct channel_subsystem *css = channel_subsystems[i];
1235 device_unregister(&css->pseudo_subchannel->dev);
1236 device_unregister(&css->device);
1238 bus_unregister(&css_bus_type);
1240 crw_unregister_handler(CRW_RSC_SCH);
1241 idset_free(slow_subchannel_set);
1242 chsc_init_cleanup();
1243 pr_alert("The CSS device driver initialization failed with "
1248 static void __init css_bus_cleanup(void)
1250 struct channel_subsystem *css;
1253 device_unregister(&css->pseudo_subchannel->dev);
1254 device_unregister(&css->device);
1256 bus_unregister(&css_bus_type);
1257 crw_unregister_handler(CRW_RSC_SCH);
1258 idset_free(slow_subchannel_set);
1259 chsc_init_cleanup();
1260 isc_unregister(IO_SCH_ISC);
1263 static int __init channel_subsystem_init(void)
1267 ret = css_bus_init();
1270 cio_work_q = create_singlethread_workqueue("cio");
1275 ret = io_subchannel_init();
1279 /* Register subchannels which are already in use. */
1280 cio_register_early_subchannels();
1281 /* Start initial subchannel evaluation. */
1282 css_schedule_eval_all();
1286 destroy_workqueue(cio_work_q);
1291 subsys_initcall(channel_subsystem_init);
1293 static int css_settle(struct device_driver *drv, void *unused)
1295 struct css_driver *cssdrv = to_cssdriver(drv);
1298 return cssdrv->settle();
1302 int css_complete_work(void)
1306 /* Wait for the evaluation of subchannels to finish. */
1307 ret = wait_event_interruptible(css_eval_wq,
1308 atomic_read(&css_eval_scheduled) == 0);
1311 flush_workqueue(cio_work_q);
1312 /* Wait for the subchannel type specific initialization to finish */
1313 return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle);
1318 * Wait for the initialization of devices to finish, to make sure we are
1319 * done with our setup if the search for the root device starts.
1321 static int __init channel_subsystem_init_sync(void)
1323 css_complete_work();
1326 subsys_initcall_sync(channel_subsystem_init_sync);
1328 #ifdef CONFIG_PROC_FS
1329 static ssize_t cio_settle_write(struct file *file, const char __user *buf,
1330 size_t count, loff_t *ppos)
1334 /* Handle pending CRW's. */
1335 crw_wait_for_channel_report();
1336 ret = css_complete_work();
1338 return ret ? ret : count;
1341 static const struct proc_ops cio_settle_proc_ops = {
1342 .proc_open = nonseekable_open,
1343 .proc_write = cio_settle_write,
1344 .proc_lseek = no_llseek,
1347 static int __init cio_settle_init(void)
1349 struct proc_dir_entry *entry;
1351 entry = proc_create("cio_settle", S_IWUSR, NULL, &cio_settle_proc_ops);
1356 device_initcall(cio_settle_init);
1357 #endif /*CONFIG_PROC_FS*/
1359 int sch_is_pseudo_sch(struct subchannel *sch)
1361 if (!sch->dev.parent)
1363 return sch == to_css(sch->dev.parent)->pseudo_subchannel;
1366 static int css_bus_match(struct device *dev, struct device_driver *drv)
1368 struct subchannel *sch = to_subchannel(dev);
1369 struct css_driver *driver = to_cssdriver(drv);
1370 struct css_device_id *id;
1372 /* When driver_override is set, only bind to the matching driver */
1373 if (sch->driver_override && strcmp(sch->driver_override, drv->name))
1376 for (id = driver->subchannel_type; id->match_flags; id++) {
1377 if (sch->st == id->type)
1384 static int css_probe(struct device *dev)
1386 struct subchannel *sch;
1389 sch = to_subchannel(dev);
1390 sch->driver = to_cssdriver(dev->driver);
1391 ret = sch->driver->probe ? sch->driver->probe(sch) : 0;
1397 static void css_remove(struct device *dev)
1399 struct subchannel *sch;
1401 sch = to_subchannel(dev);
1402 if (sch->driver->remove)
1403 sch->driver->remove(sch);
1407 static void css_shutdown(struct device *dev)
1409 struct subchannel *sch;
1411 sch = to_subchannel(dev);
1412 if (sch->driver && sch->driver->shutdown)
1413 sch->driver->shutdown(sch);
1416 static int css_uevent(const struct device *dev, struct kobj_uevent_env *env)
1418 const struct subchannel *sch = to_subchannel(dev);
1421 ret = add_uevent_var(env, "ST=%01X", sch->st);
1424 ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st);
1428 static struct bus_type css_bus_type = {
1430 .match = css_bus_match,
1432 .remove = css_remove,
1433 .shutdown = css_shutdown,
1434 .uevent = css_uevent,
1438 * css_driver_register - register a css driver
1439 * @cdrv: css driver to register
1441 * This is mainly a wrapper around driver_register that sets name
1442 * and bus_type in the embedded struct device_driver correctly.
1444 int css_driver_register(struct css_driver *cdrv)
1446 cdrv->drv.bus = &css_bus_type;
1447 return driver_register(&cdrv->drv);
1449 EXPORT_SYMBOL_GPL(css_driver_register);
1452 * css_driver_unregister - unregister a css driver
1453 * @cdrv: css driver to unregister
1455 * This is a wrapper around driver_unregister.
1457 void css_driver_unregister(struct css_driver *cdrv)
1459 driver_unregister(&cdrv->drv);
1461 EXPORT_SYMBOL_GPL(css_driver_unregister);