1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2014 - 2020 Intel Corporation */
3 #include <linux/mutex.h>
4 #include <linux/list.h>
5 #include <linux/bitops.h>
6 #include <linux/delay.h>
7 #include "adf_accel_devices.h"
9 #include "adf_common_drv.h"
10 #include "adf_dbgfs.h"
11 #include "adf_heartbeat.h"
13 static LIST_HEAD(service_table);
14 static DEFINE_MUTEX(service_lock);
16 static void adf_service_add(struct service_hndl *service)
18 mutex_lock(&service_lock);
19 list_add(&service->list, &service_table);
20 mutex_unlock(&service_lock);
23 int adf_service_register(struct service_hndl *service)
25 memset(service->init_status, 0, sizeof(service->init_status));
26 memset(service->start_status, 0, sizeof(service->start_status));
27 adf_service_add(service);
31 static void adf_service_remove(struct service_hndl *service)
33 mutex_lock(&service_lock);
34 list_del(&service->list);
35 mutex_unlock(&service_lock);
38 int adf_service_unregister(struct service_hndl *service)
42 for (i = 0; i < ARRAY_SIZE(service->init_status); i++) {
43 if (service->init_status[i] || service->start_status[i]) {
44 pr_err("QAT: Could not remove active service\n");
48 adf_service_remove(service);
53 * adf_dev_init() - Init data structures and services for the given accel device
54 * @accel_dev: Pointer to acceleration device.
56 * Initialize the ring data structures and the admin comms and arbitration
59 * Return: 0 on success, error code otherwise.
61 static int adf_dev_init(struct adf_accel_dev *accel_dev)
63 struct service_hndl *service;
64 struct list_head *list_itr;
65 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
69 dev_err(&GET_DEV(accel_dev),
70 "Failed to init device - hw_data not set\n");
74 if (!test_bit(ADF_STATUS_CONFIGURED, &accel_dev->status) &&
76 dev_err(&GET_DEV(accel_dev), "Device not configured\n");
80 if (adf_init_etr_data(accel_dev)) {
81 dev_err(&GET_DEV(accel_dev), "Failed initialize etr\n");
85 if (hw_data->init_device && hw_data->init_device(accel_dev)) {
86 dev_err(&GET_DEV(accel_dev), "Failed to initialize device\n");
90 if (hw_data->init_admin_comms && hw_data->init_admin_comms(accel_dev)) {
91 dev_err(&GET_DEV(accel_dev), "Failed initialize admin comms\n");
95 if (hw_data->init_arb && hw_data->init_arb(accel_dev)) {
96 dev_err(&GET_DEV(accel_dev), "Failed initialize hw arbiter\n");
100 if (adf_ae_init(accel_dev)) {
101 dev_err(&GET_DEV(accel_dev),
102 "Failed to initialise Acceleration Engine\n");
105 set_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status);
107 if (adf_ae_fw_load(accel_dev)) {
108 dev_err(&GET_DEV(accel_dev),
109 "Failed to load acceleration FW\n");
112 set_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status);
114 if (hw_data->alloc_irq(accel_dev)) {
115 dev_err(&GET_DEV(accel_dev), "Failed to allocate interrupts\n");
118 set_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status);
120 hw_data->enable_ints(accel_dev);
121 hw_data->enable_error_correction(accel_dev);
123 ret = hw_data->pfvf_ops.enable_comms(accel_dev);
127 if (!test_bit(ADF_STATUS_CONFIGURED, &accel_dev->status) &&
129 if (qat_crypto_vf_dev_config(accel_dev))
133 adf_heartbeat_init(accel_dev);
136 * Subservice initialisation is divided into two stages: init and start.
137 * This is to facilitate any ordering dependencies between services
138 * prior to starting any of the accelerators.
140 list_for_each(list_itr, &service_table) {
141 service = list_entry(list_itr, struct service_hndl, list);
142 if (service->event_hld(accel_dev, ADF_EVENT_INIT)) {
143 dev_err(&GET_DEV(accel_dev),
144 "Failed to initialise service %s\n",
148 set_bit(accel_dev->accel_id, service->init_status);
155 * adf_dev_start() - Start acceleration service for the given accel device
156 * @accel_dev: Pointer to acceleration device.
158 * Function notifies all the registered services that the acceleration device
159 * is ready to be used.
160 * To be used by QAT device specific drivers.
162 * Return: 0 on success, error code otherwise.
164 static int adf_dev_start(struct adf_accel_dev *accel_dev)
166 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
167 struct service_hndl *service;
168 struct list_head *list_itr;
171 set_bit(ADF_STATUS_STARTING, &accel_dev->status);
173 if (adf_ae_start(accel_dev)) {
174 dev_err(&GET_DEV(accel_dev), "AE Start Failed\n");
177 set_bit(ADF_STATUS_AE_STARTED, &accel_dev->status);
179 if (hw_data->send_admin_init(accel_dev)) {
180 dev_err(&GET_DEV(accel_dev), "Failed to send init message\n");
184 if (hw_data->measure_clock) {
185 ret = hw_data->measure_clock(accel_dev);
187 dev_err(&GET_DEV(accel_dev), "Failed measure device clock\n");
192 /* Set ssm watch dog timer */
193 if (hw_data->set_ssm_wdtimer)
194 hw_data->set_ssm_wdtimer(accel_dev);
196 /* Enable Power Management */
197 if (hw_data->enable_pm && hw_data->enable_pm(accel_dev)) {
198 dev_err(&GET_DEV(accel_dev), "Failed to configure Power Management\n");
202 if (hw_data->start_timer) {
203 ret = hw_data->start_timer(accel_dev);
205 dev_err(&GET_DEV(accel_dev), "Failed to start internal sync timer\n");
210 adf_heartbeat_start(accel_dev);
212 list_for_each(list_itr, &service_table) {
213 service = list_entry(list_itr, struct service_hndl, list);
214 if (service->event_hld(accel_dev, ADF_EVENT_START)) {
215 dev_err(&GET_DEV(accel_dev),
216 "Failed to start service %s\n",
220 set_bit(accel_dev->accel_id, service->start_status);
223 clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
224 set_bit(ADF_STATUS_STARTED, &accel_dev->status);
226 if (!list_empty(&accel_dev->crypto_list) &&
227 (qat_algs_register() || qat_asym_algs_register())) {
228 dev_err(&GET_DEV(accel_dev),
229 "Failed to register crypto algs\n");
230 set_bit(ADF_STATUS_STARTING, &accel_dev->status);
231 clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
235 if (!list_empty(&accel_dev->compression_list) && qat_comp_algs_register()) {
236 dev_err(&GET_DEV(accel_dev),
237 "Failed to register compression algs\n");
238 set_bit(ADF_STATUS_STARTING, &accel_dev->status);
239 clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
243 adf_dbgfs_add(accel_dev);
249 * adf_dev_stop() - Stop acceleration service for the given accel device
250 * @accel_dev: Pointer to acceleration device.
252 * Function notifies all the registered services that the acceleration device
254 * To be used by QAT device specific drivers.
258 static void adf_dev_stop(struct adf_accel_dev *accel_dev)
260 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
261 struct service_hndl *service;
262 struct list_head *list_itr;
266 if (!adf_dev_started(accel_dev) &&
267 !test_bit(ADF_STATUS_STARTING, &accel_dev->status))
270 adf_dbgfs_rm(accel_dev);
272 clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
273 clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
275 if (!list_empty(&accel_dev->crypto_list)) {
276 qat_algs_unregister();
277 qat_asym_algs_unregister();
280 if (!list_empty(&accel_dev->compression_list))
281 qat_comp_algs_unregister();
283 list_for_each(list_itr, &service_table) {
284 service = list_entry(list_itr, struct service_hndl, list);
285 if (!test_bit(accel_dev->accel_id, service->start_status))
287 ret = service->event_hld(accel_dev, ADF_EVENT_STOP);
289 clear_bit(accel_dev->accel_id, service->start_status);
290 } else if (ret == -EAGAIN) {
292 clear_bit(accel_dev->accel_id, service->start_status);
296 if (hw_data->stop_timer)
297 hw_data->stop_timer(accel_dev);
302 if (test_bit(ADF_STATUS_AE_STARTED, &accel_dev->status)) {
303 if (adf_ae_stop(accel_dev))
304 dev_err(&GET_DEV(accel_dev), "failed to stop AE\n");
306 clear_bit(ADF_STATUS_AE_STARTED, &accel_dev->status);
311 * adf_dev_shutdown() - shutdown acceleration services and data strucutures
312 * @accel_dev: Pointer to acceleration device
314 * Cleanup the ring data structures and the admin comms and arbitration
317 static void adf_dev_shutdown(struct adf_accel_dev *accel_dev)
319 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
320 struct service_hndl *service;
321 struct list_head *list_itr;
324 dev_err(&GET_DEV(accel_dev),
325 "QAT: Failed to shutdown device - hw_data not set\n");
329 if (test_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status)) {
330 adf_ae_fw_release(accel_dev);
331 clear_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status);
334 if (test_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status)) {
335 if (adf_ae_shutdown(accel_dev))
336 dev_err(&GET_DEV(accel_dev),
337 "Failed to shutdown Accel Engine\n");
339 clear_bit(ADF_STATUS_AE_INITIALISED,
343 list_for_each(list_itr, &service_table) {
344 service = list_entry(list_itr, struct service_hndl, list);
345 if (!test_bit(accel_dev->accel_id, service->init_status))
347 if (service->event_hld(accel_dev, ADF_EVENT_SHUTDOWN))
348 dev_err(&GET_DEV(accel_dev),
349 "Failed to shutdown service %s\n",
352 clear_bit(accel_dev->accel_id, service->init_status);
355 adf_heartbeat_shutdown(accel_dev);
357 hw_data->disable_iov(accel_dev);
359 if (test_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status)) {
360 hw_data->free_irq(accel_dev);
361 clear_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status);
364 /* Delete configuration only if not restarting */
365 if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status))
366 adf_cfg_del_all(accel_dev);
368 if (hw_data->exit_arb)
369 hw_data->exit_arb(accel_dev);
371 if (hw_data->exit_admin_comms)
372 hw_data->exit_admin_comms(accel_dev);
374 adf_cleanup_etr_data(accel_dev);
375 adf_dev_restore(accel_dev);
378 int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev)
380 struct service_hndl *service;
381 struct list_head *list_itr;
383 list_for_each(list_itr, &service_table) {
384 service = list_entry(list_itr, struct service_hndl, list);
385 if (service->event_hld(accel_dev, ADF_EVENT_RESTARTING))
386 dev_err(&GET_DEV(accel_dev),
387 "Failed to restart service %s.\n",
393 int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev)
395 struct service_hndl *service;
396 struct list_head *list_itr;
398 list_for_each(list_itr, &service_table) {
399 service = list_entry(list_itr, struct service_hndl, list);
400 if (service->event_hld(accel_dev, ADF_EVENT_RESTARTED))
401 dev_err(&GET_DEV(accel_dev),
402 "Failed to restart service %s.\n",
408 static int adf_dev_shutdown_cache_cfg(struct adf_accel_dev *accel_dev)
410 char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
413 ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC,
414 ADF_SERVICES_ENABLED, services);
416 adf_dev_stop(accel_dev);
417 adf_dev_shutdown(accel_dev);
420 ret = adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC);
424 ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC,
425 ADF_SERVICES_ENABLED,
434 int adf_dev_down(struct adf_accel_dev *accel_dev, bool reconfig)
441 mutex_lock(&accel_dev->state_lock);
444 ret = adf_dev_shutdown_cache_cfg(accel_dev);
448 adf_dev_stop(accel_dev);
449 adf_dev_shutdown(accel_dev);
452 mutex_unlock(&accel_dev->state_lock);
455 EXPORT_SYMBOL_GPL(adf_dev_down);
457 int adf_dev_up(struct adf_accel_dev *accel_dev, bool config)
464 mutex_lock(&accel_dev->state_lock);
466 if (adf_dev_started(accel_dev)) {
467 dev_info(&GET_DEV(accel_dev), "Device qat_dev%d already up\n",
468 accel_dev->accel_id);
473 if (config && GET_HW_DATA(accel_dev)->dev_config) {
474 ret = GET_HW_DATA(accel_dev)->dev_config(accel_dev);
479 ret = adf_dev_init(accel_dev);
483 ret = adf_dev_start(accel_dev);
486 mutex_unlock(&accel_dev->state_lock);
489 EXPORT_SYMBOL_GPL(adf_dev_up);
491 int adf_dev_restart(struct adf_accel_dev *accel_dev)
498 adf_dev_down(accel_dev, false);
500 ret = adf_dev_up(accel_dev, false);
501 /* if device is already up return success*/
502 if (ret == -EALREADY)
507 EXPORT_SYMBOL_GPL(adf_dev_restart);