// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2013-2020, Intel Corporation. All rights reserved.
+ * Copyright (c) 2013-2022, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
sizeof(struct mkhi_fw_ver))
#define MKHI_FWVER_LEN(__num) (sizeof(struct mkhi_msg_hdr) + \
sizeof(struct mkhi_fw_ver_block) * (__num))
-#define MKHI_RCV_TIMEOUT 500 /* receive timeout in msec */
static int mei_fwver(struct mei_cl_device *cldev)
{
char buf[MKHI_FWVER_BUF_LEN];
ret = 0;
bytes_recv = __mei_cl_recv(cldev->cl, buf, sizeof(buf), NULL, 0,
- MKHI_RCV_TIMEOUT);
+ cldev->bus->timeouts.mkhi_recv);
if (bytes_recv < 0 || (size_t)bytes_recv < MKHI_FWVER_LEN(1)) {
/*
* Should be at least one version block,
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2003-2020, Intel Corporation. All rights reserved.
+ * Copyright (c) 2003-2022, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
}
list_move_tail(&cb->list, &dev->ctrl_rd_list);
- cl->timer_count = MEI_CONNECT_TIMEOUT;
+ cl->timer_count = dev->timeouts.connect;
mei_schedule_stall_timer(dev);
return 0;
wait_event_timeout(cl->wait,
cl->state == MEI_FILE_DISCONNECT_REPLY ||
cl->state == MEI_FILE_DISCONNECTED,
- mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
+ dev->timeouts.cl_connect);
mutex_lock(&dev->device_lock);
rets = cl->status;
}
list_move_tail(&cb->list, &dev->ctrl_rd_list);
- cl->timer_count = MEI_CONNECT_TIMEOUT;
+ cl->timer_count = dev->timeouts.connect;
mei_schedule_stall_timer(dev);
return 0;
}
cl->state == MEI_FILE_DISCONNECTED ||
cl->state == MEI_FILE_DISCONNECT_REQUIRED ||
cl->state == MEI_FILE_DISCONNECT_REPLY),
- mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
+ dev->timeouts.cl_connect);
mutex_lock(&dev->device_lock);
if (!mei_cl_is_connected(cl)) {
cl->notify_en == request ||
cl->status ||
!mei_cl_is_connected(cl),
- mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
+ dev->timeouts.cl_connect);
mutex_lock(&dev->device_lock);
if (cl->notify_en != request && !cl->status)
mutex_unlock(&dev->device_lock);
wait_event_timeout(cl->wait,
cl->dma_mapped || cl->status,
- mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
+ dev->timeouts.cl_connect);
mutex_lock(&dev->device_lock);
if (!cl->dma_mapped && !cl->status)
mutex_unlock(&dev->device_lock);
wait_event_timeout(cl->wait,
!cl->dma_mapped || cl->status,
- mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
+ dev->timeouts.cl_connect);
mutex_lock(&dev->device_lock);
if (cl->dma_mapped && !cl->status)
device = &aux_dev->dev;
- dev = mei_me_dev_init(device, cfg);
+ dev = mei_me_dev_init(device, cfg, adev->slow_firmware);
if (!dev) {
ret = -ENOMEM;
goto err;
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2003-2020, Intel Corporation. All rights reserved.
+ * Copyright (c) 2003-2022, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
#include <linux/export.h>
mutex_unlock(&dev->device_lock);
ret = wait_event_timeout(dev->wait_hbm_start,
dev->hbm_state != MEI_HBM_STARTING,
- mei_secs_to_jiffies(MEI_HBM_TIMEOUT));
+ dev->timeouts.hbm);
mutex_lock(&dev->device_lock);
if (ret == 0 && (dev->hbm_state <= MEI_HBM_STARTING)) {
}
dev->hbm_state = MEI_HBM_STARTING;
- dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
+ dev->init_clients_timer = dev->timeouts.client_init;
mei_schedule_stall_timer(dev);
return 0;
}
}
dev->hbm_state = MEI_HBM_DR_SETUP;
- dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
+ dev->init_clients_timer = dev->timeouts.client_init;
mei_schedule_stall_timer(dev);
return 0;
}
}
dev->hbm_state = MEI_HBM_CAP_SETUP;
- dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
+ dev->init_clients_timer = dev->timeouts.client_init;
mei_schedule_stall_timer(dev);
return 0;
}
return ret;
}
dev->hbm_state = MEI_HBM_ENUM_CLIENTS;
- dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
+ dev->init_clients_timer = dev->timeouts.client_init;
mei_schedule_stall_timer(dev);
return 0;
}
return ret;
}
- dev->init_clients_timer = MEI_CLIENTS_INIT_TIMEOUT;
+ dev->init_clients_timer = dev->timeouts.client_init;
mei_schedule_stall_timer(dev);
return 0;
mutex_unlock(&dev->device_lock);
wait_event_timeout(dev->wait_hw_ready,
dev->recvd_hw_ready,
- mei_secs_to_jiffies(MEI_HW_READY_TIMEOUT));
+ dev->timeouts.hw_ready);
mutex_lock(&dev->device_lock);
if (!dev->recvd_hw_ready) {
dev_err(dev->dev, "wait hw ready failed\n");
static int mei_me_pg_legacy_enter_sync(struct mei_device *dev)
{
struct mei_me_hw *hw = to_me_hw(dev);
- unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
int ret;
dev->pg_event = MEI_PG_EVENT_WAIT;
mutex_unlock(&dev->device_lock);
wait_event_timeout(dev->wait_pg,
- dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout);
+ dev->pg_event == MEI_PG_EVENT_RECEIVED,
+ dev->timeouts.pgi);
mutex_lock(&dev->device_lock);
if (dev->pg_event == MEI_PG_EVENT_RECEIVED) {
static int mei_me_pg_legacy_exit_sync(struct mei_device *dev)
{
struct mei_me_hw *hw = to_me_hw(dev);
- unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
int ret;
if (dev->pg_event == MEI_PG_EVENT_RECEIVED)
mutex_unlock(&dev->device_lock);
wait_event_timeout(dev->wait_pg,
- dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout);
+ dev->pg_event == MEI_PG_EVENT_RECEIVED,
+ dev->timeouts.pgi);
mutex_lock(&dev->device_lock);
reply:
mutex_unlock(&dev->device_lock);
wait_event_timeout(dev->wait_pg,
- dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, timeout);
+ dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED,
+ dev->timeouts.pgi);
mutex_lock(&dev->device_lock);
if (dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED)
static int mei_me_d0i3_enter_sync(struct mei_device *dev)
{
struct mei_me_hw *hw = to_me_hw(dev);
- unsigned long d0i3_timeout = mei_secs_to_jiffies(MEI_D0I3_TIMEOUT);
- unsigned long pgi_timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
int ret;
u32 reg;
mutex_unlock(&dev->device_lock);
wait_event_timeout(dev->wait_pg,
- dev->pg_event == MEI_PG_EVENT_RECEIVED, pgi_timeout);
+ dev->pg_event == MEI_PG_EVENT_RECEIVED,
+ dev->timeouts.pgi);
mutex_lock(&dev->device_lock);
if (dev->pg_event != MEI_PG_EVENT_RECEIVED) {
mutex_unlock(&dev->device_lock);
wait_event_timeout(dev->wait_pg,
- dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, d0i3_timeout);
+ dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED,
+ dev->timeouts.d0i3);
mutex_lock(&dev->device_lock);
if (dev->pg_event != MEI_PG_EVENT_INTR_RECEIVED) {
static int mei_me_d0i3_exit_sync(struct mei_device *dev)
{
struct mei_me_hw *hw = to_me_hw(dev);
- unsigned long timeout = mei_secs_to_jiffies(MEI_D0I3_TIMEOUT);
int ret;
u32 reg;
mutex_unlock(&dev->device_lock);
wait_event_timeout(dev->wait_pg,
- dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, timeout);
+ dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED,
+ dev->timeouts.d0i3);
mutex_lock(&dev->device_lock);
if (dev->pg_event != MEI_PG_EVENT_INTR_RECEIVED) {
*
* @parent: device associated with physical device (pci/platform)
* @cfg: per device generation config
+ * @slow_fw: configure longer timeouts as FW is slow
*
* Return: The mei_device pointer on success, NULL on failure.
*/
struct mei_device *mei_me_dev_init(struct device *parent,
- const struct mei_cfg *cfg)
+ const struct mei_cfg *cfg, bool slow_fw)
{
struct mei_device *dev;
struct mei_me_hw *hw;
for (i = 0; i < DMA_DSCR_NUM; i++)
dev->dr_dscr[i].size = cfg->dma_size[i];
- mei_device_init(dev, parent, &mei_me_hw_ops);
+ mei_device_init(dev, parent, slow_fw, &mei_me_hw_ops);
hw->cfg = cfg;
dev->fw_f_fw_ver_supported = cfg->fw_ver_supported;
const struct mei_cfg *mei_me_get_cfg(kernel_ulong_t idx);
struct mei_device *mei_me_dev_init(struct device *parent,
- const struct mei_cfg *cfg);
+ const struct mei_cfg *cfg, bool slow_fw);
int mei_me_pg_enter_sync(struct mei_device *dev);
int mei_me_pg_exit_sync(struct mei_device *dev);
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2013-2020, Intel Corporation. All rights reserved.
+ * Copyright (c) 2013-2022, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
if (!dev)
return NULL;
- mei_device_init(dev, &pdev->dev, &mei_txe_hw_ops);
+ mei_device_init(dev, &pdev->dev, false, &mei_txe_hw_ops);
hw = to_txe_hw(dev);
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Copyright (c) 2003-2020, Intel Corporation. All rights reserved
+ * Copyright (c) 2003-2022, Intel Corporation. All rights reserved
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
#define MEI_CONNECT_TIMEOUT 3 /* HPS: at least 2 seconds */
#define MEI_CL_CONNECT_TIMEOUT 15 /* HPS: Client Connect Timeout */
+#define MEI_CL_CONNECT_TIMEOUT_SLOW 30 /* HPS: Client Connect Timeout, slow FW */
#define MEI_CLIENTS_INIT_TIMEOUT 15 /* HPS: Clients Enumeration Timeout */
#define MEI_PGI_TIMEOUT 1 /* PG Isolation time response 1 sec */
#define MEI_D0I3_TIMEOUT 5 /* D0i3 set/unset max response time */
#define MEI_HBM_TIMEOUT 1 /* 1 second */
+#define MEI_HBM_TIMEOUT_SLOW 5 /* 5 second, slow FW */
+
+#define MKHI_RCV_TIMEOUT 500 /* receive timeout in msec */
+#define MKHI_RCV_TIMEOUT_SLOW 10000 /* receive timeout in msec, slow FW */
/*
* FW page size for DMA allocations
EXPORT_SYMBOL_GPL(mei_write_is_idle);
/**
- * mei_device_init -- initialize mei_device structure
+ * mei_device_init - initialize mei_device structure
*
* @dev: the mei device
* @device: the device structure
+ * @slow_fw: configure longer timeouts as FW is slow
* @hw_ops: hw operations
*/
void mei_device_init(struct mei_device *dev,
struct device *device,
+ bool slow_fw,
const struct mei_hw_ops *hw_ops)
{
/* setup our list array */
dev->pg_event = MEI_PG_EVENT_IDLE;
dev->ops = hw_ops;
dev->dev = device;
+
+ dev->timeouts.hw_ready = mei_secs_to_jiffies(MEI_HW_READY_TIMEOUT);
+ dev->timeouts.connect = MEI_CONNECT_TIMEOUT;
+ dev->timeouts.client_init = MEI_CLIENTS_INIT_TIMEOUT;
+ dev->timeouts.pgi = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
+ dev->timeouts.d0i3 = mei_secs_to_jiffies(MEI_D0I3_TIMEOUT);
+ if (slow_fw) {
+ dev->timeouts.cl_connect = mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT_SLOW);
+ dev->timeouts.hbm = mei_secs_to_jiffies(MEI_HBM_TIMEOUT_SLOW);
+ dev->timeouts.mkhi_recv = msecs_to_jiffies(MKHI_RCV_TIMEOUT_SLOW);
+ } else {
+ dev->timeouts.cl_connect = mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT);
+ dev->timeouts.hbm = mei_secs_to_jiffies(MEI_HBM_TIMEOUT);
+ dev->timeouts.mkhi_recv = msecs_to_jiffies(MKHI_RCV_TIMEOUT);
+ }
}
EXPORT_SYMBOL_GPL(mei_device_init);
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2003-2020, Intel Corporation. All rights reserved.
+ * Copyright (c) 2003-2022, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
cl->state == MEI_FILE_DISCONNECTED ||
cl->state == MEI_FILE_DISCONNECT_REQUIRED ||
cl->state == MEI_FILE_DISCONNECT_REPLY),
- mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
+ dev->timeouts.cl_connect);
mutex_lock(&dev->device_lock);
}
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Copyright (c) 2003-2019, Intel Corporation. All rights reserved.
+ * Copyright (c) 2003-2022, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
#define MEI_MAX_FW_VER_BLOCKS 3
+struct mei_dev_timeouts {
+ unsigned long hw_ready; /* Timeout on ready message, in jiffies */
+ int connect; /* HPS: at least 2 seconds, in seconds */
+ unsigned long cl_connect; /* HPS: Client Connect Timeout, in jiffies */
+ int client_init; /* HPS: Clients Enumeration Timeout, in seconds */
+ unsigned long pgi; /* PG Isolation time response, in jiffies */
+ unsigned int d0i3; /* D0i3 set/unset max response time, in jiffies */
+ unsigned long hbm; /* HBM operation timeout, in jiffies */
+ unsigned long mkhi_recv; /* receive timeout, in jiffies */
+};
+
/**
* struct mei_device - MEI private device struct
*
* @allow_fixed_address: allow user space to connect a fixed client
* @override_fixed_address: force allow fixed address behavior
*
+ * @timeouts: actual timeout values
+ *
* @reset_work : work item for the device reset
* @bus_rescan_work : work item for the bus rescan
*
bool allow_fixed_address;
bool override_fixed_address;
+ struct mei_dev_timeouts timeouts;
+
struct work_struct reset_work;
struct work_struct bus_rescan_work;
*/
void mei_device_init(struct mei_device *dev,
struct device *device,
+ bool slow_fw,
const struct mei_hw_ops *hw_ops);
int mei_reset(struct mei_device *dev);
int mei_start(struct mei_device *dev);
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2003-2020, Intel Corporation. All rights reserved.
+ * Copyright (c) 2003-2022, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
}
/* allocates and initializes the mei dev structure */
- dev = mei_me_dev_init(&pdev->dev, cfg);
+ dev = mei_me_dev_init(&pdev->dev, cfg, false);
if (!dev) {
err = -ENOMEM;
goto end;