struct cxl_port *port = to_cxl_port(dev);
struct cxl_ep *ep, *_e;
- cxl_device_lock(dev);
+ device_lock(dev);
list_for_each_entry_safe(ep, _e, &port->endpoints, list)
cxl_ep_release(ep);
- cxl_device_unlock(dev);
+ device_unlock(dev);
ida_free(&cxl_port_ida, port->id);
kfree(port);
}
return 0;
port = to_cxl_port(dev);
- cxl_device_lock(dev);
+ device_lock(dev);
list_for_each_entry(dport, &port->dports, list) {
iter = match;
while (iter) {
}
}
out:
- cxl_device_unlock(dev);
+ device_unlock(dev);
return !!iter;
}
static void cond_cxl_root_lock(struct cxl_port *port)
{
if (is_cxl_root(port))
- cxl_device_lock(&port->dev);
+ device_lock(&port->dev);
}
static void cond_cxl_root_unlock(struct cxl_port *port)
{
if (is_cxl_root(port))
- cxl_device_unlock(&port->dev);
+ device_unlock(&port->dev);
}
static void cxl_dport_remove(void *data)
{
struct cxl_ep *dup;
- cxl_device_lock(&port->dev);
+ device_lock(&port->dev);
if (port->dead) {
- cxl_device_unlock(&port->dev);
+ device_unlock(&port->dev);
return -ENXIO;
}
dup = find_ep(port, new->ep);
if (!dup)
list_add_tail(&new->list, &port->endpoints);
- cxl_device_unlock(&port->dev);
+ device_unlock(&port->dev);
return dup ? -EEXIST : 0;
}
goto out;
parent = &parent_port->dev;
- cxl_device_lock(parent);
+ device_lock(parent);
if (parent->driver && endpoint->uport) {
devm_release_action(parent, cxl_unlink_uport, endpoint);
devm_release_action(parent, unregister_port, endpoint);
}
- cxl_device_unlock(parent);
+ device_unlock(parent);
put_device(parent);
out:
put_device(&endpoint->dev);
}
parent_port = to_cxl_port(port->dev.parent);
- cxl_device_lock(&parent_port->dev);
+ device_lock(&parent_port->dev);
if (!parent_port->dev.driver) {
/*
* The bottom-up race to delete the port lost to a
* parent_port ->remove() will have cleaned up all
* descendants.
*/
- cxl_device_unlock(&parent_port->dev);
+ device_unlock(&parent_port->dev);
put_device(&port->dev);
continue;
}
- cxl_device_lock(&port->dev);
+ device_lock(&port->dev);
ep = find_ep(port, &cxlmd->dev);
dev_dbg(&cxlmd->dev, "disconnect %s from %s\n",
ep ? dev_name(ep->ep) : "", dev_name(&port->dev));
port->dead = true;
list_splice_init(&port->dports, &reap_dports);
}
- cxl_device_unlock(&port->dev);
+ device_unlock(&port->dev);
if (!list_empty(&reap_dports)) {
dev_dbg(&cxlmd->dev, "delete %s\n",
delete_switch_port(port, &reap_dports);
}
put_device(&port->dev);
- cxl_device_unlock(&parent_port->dev);
+ device_unlock(&parent_port->dev);
}
}
return -EAGAIN;
}
- cxl_device_lock(&parent_port->dev);
+ device_lock(&parent_port->dev);
if (!parent_port->dev.driver) {
dev_warn(&cxlmd->dev,
"port %s:%s disabled, failed to enumerate CXL.mem\n",
get_device(&port->dev);
}
out:
- cxl_device_unlock(&parent_port->dev);
+ device_unlock(&parent_port->dev);
if (IS_ERR(port))
rc = PTR_ERR(port);
{
struct cxl_dport *dport;
- cxl_device_lock(&port->dev);
+ device_lock(&port->dev);
list_for_each_entry(dport, &port->dports, list)
if (dport->dport == dev) {
- cxl_device_unlock(&port->dev);
+ device_unlock(&port->dev);
return dport;
}
- cxl_device_unlock(&port->dev);
+ device_unlock(&port->dev);
return NULL;
}
EXPORT_SYMBOL_NS_GPL(cxl_find_dport_by_dev, CXL);
port = to_cxl_port(cxld->dev.parent);
- cxl_device_lock(&port->dev);
+ device_lock(&port->dev);
rc = cxl_decoder_add_locked(cxld, target_map);
- cxl_device_unlock(&port->dev);
+ device_unlock(&port->dev);
return rc;
}
{
int rc;
- /*
- * Take the CXL nested lock since the driver core only holds
- * @dev->mutex and not @dev->lockdep_mutex.
- */
- cxl_nested_lock(dev);
rc = to_cxl_drv(dev->driver)->probe(dev);
- cxl_nested_unlock(dev);
-
dev_dbg(dev, "probe: %d\n", rc);
return rc;
}
{
struct cxl_driver *cxl_drv = to_cxl_drv(dev->driver);
- cxl_nested_lock(dev);
if (cxl_drv->remove)
cxl_drv->remove(dev);
- cxl_nested_unlock(dev);
}
static struct workqueue_struct *cxl_bus_wq;
#define __mock static
#endif
-#ifdef CONFIG_PROVE_CXL_LOCKING
-enum cxl_lock_class {
- CXL_ANON_LOCK,
- CXL_NVDIMM_LOCK,
- CXL_NVDIMM_BRIDGE_LOCK,
- CXL_PORT_LOCK,
- /*
- * Be careful to add new lock classes here, CXL_PORT_LOCK is
- * extended by the port depth, so a maximum CXL port topology
- * depth would need to be defined first.
- */
-};
-
-static inline void cxl_nested_lock(struct device *dev)
-{
- if (is_cxl_port(dev)) {
- struct cxl_port *port = to_cxl_port(dev);
-
- mutex_lock_nested(&dev->lockdep_mutex,
- CXL_PORT_LOCK + port->depth);
- } else if (is_cxl_decoder(dev)) {
- struct cxl_port *port = to_cxl_port(dev->parent);
-
- /*
- * A decoder is the immediate child of a port, so set
- * its lock class equal to other child device siblings.
- */
- mutex_lock_nested(&dev->lockdep_mutex,
- CXL_PORT_LOCK + port->depth + 1);
- } else if (is_cxl_nvdimm_bridge(dev))
- mutex_lock_nested(&dev->lockdep_mutex, CXL_NVDIMM_BRIDGE_LOCK);
- else if (is_cxl_nvdimm(dev))
- mutex_lock_nested(&dev->lockdep_mutex, CXL_NVDIMM_LOCK);
- else
- mutex_lock_nested(&dev->lockdep_mutex, CXL_ANON_LOCK);
-}
-
-static inline void cxl_nested_unlock(struct device *dev)
-{
- mutex_unlock(&dev->lockdep_mutex);
-}
-
-static inline void cxl_device_lock(struct device *dev)
-{
- /*
- * For double lock errors the lockup will happen before lockdep
- * warns at cxl_nested_lock(), so assert explicitly.
- */
- lockdep_assert_not_held(&dev->lockdep_mutex);
-
- device_lock(dev);
- cxl_nested_lock(dev);
-}
-
-static inline void cxl_device_unlock(struct device *dev)
-{
- cxl_nested_unlock(dev);
- device_unlock(dev);
-}
-#else
-static inline void cxl_nested_lock(struct device *dev)
-{
-}
-
-static inline void cxl_nested_unlock(struct device *dev)
-{
-}
-
-static inline void cxl_device_lock(struct device *dev)
-{
- device_lock(dev);
-}
-
-static inline void cxl_device_unlock(struct device *dev)
-{
- device_unlock(dev);
-}
-#endif
#endif /* __CXL_H__ */
if (!cxl_nvb)
return -ENXIO;
- cxl_device_lock(&cxl_nvb->dev);
+ device_lock(&cxl_nvb->dev);
if (!cxl_nvb->nvdimm_bus) {
rc = -ENXIO;
goto out;
dev_set_drvdata(dev, nvdimm);
rc = devm_add_action_or_reset(dev, unregister_nvdimm, nvdimm);
out:
- cxl_device_unlock(&cxl_nvb->dev);
+ device_unlock(&cxl_nvb->dev);
put_device(&cxl_nvb->dev);
return rc;
struct nvdimm_bus *victim_bus = NULL;
bool release = false, rescan = false;
- cxl_device_lock(&cxl_nvb->dev);
+ device_lock(&cxl_nvb->dev);
switch (cxl_nvb->state) {
case CXL_NVB_ONLINE:
if (!online_nvdimm_bus(cxl_nvb)) {
default:
break;
}
- cxl_device_unlock(&cxl_nvb->dev);
+ device_unlock(&cxl_nvb->dev);
if (release)
device_release_driver(&cxl_nvb->dev);
return 0;
cxl_nvb = to_cxl_nvdimm_bridge(dev);
- cxl_device_lock(dev);
+ device_lock(dev);
cxl_nvb->state = CXL_NVB_NEW;
- cxl_device_unlock(dev);
+ device_unlock(dev);
return 0;
}