1 /* SPDX-License-Identifier: GPL-2.0
3 * Copyright 2016-2022 HabanaLabs, Ltd.
11 #include "../include/common/cpucp_if.h"
12 #include "../include/common/qman_if.h"
13 #include "../include/hw_ip/mmu/mmu_general.h"
14 #include <uapi/misc/habanalabs.h>
16 #include <linux/cdev.h>
17 #include <linux/iopoll.h>
18 #include <linux/irqreturn.h>
19 #include <linux/dma-direction.h>
20 #include <linux/scatterlist.h>
21 #include <linux/hashtable.h>
22 #include <linux/debugfs.h>
23 #include <linux/rwsem.h>
24 #include <linux/bitfield.h>
25 #include <linux/genalloc.h>
26 #include <linux/sched/signal.h>
27 #include <linux/io-64-nonatomic-lo-hi.h>
28 #include <linux/coresight.h>
29 #include <linux/dma-buf.h>
31 #define HL_NAME "habanalabs"
33 /* Use upper bits of mmap offset to store habana driver specific information.
34 * bits[63:59] - Encode mmap type
35 * bits[45:0] - mmap offset value
37 * NOTE: struct vm_area_struct.vm_pgoff uses offset in pages. Hence, these
38 * defines are w.r.t to PAGE_SIZE
40 #define HL_MMAP_TYPE_SHIFT (59 - PAGE_SHIFT)
41 #define HL_MMAP_TYPE_MASK (0x1full << HL_MMAP_TYPE_SHIFT)
42 #define HL_MMAP_TYPE_TS_BUFF (0x10ull << HL_MMAP_TYPE_SHIFT)
43 #define HL_MMAP_TYPE_BLOCK (0x4ull << HL_MMAP_TYPE_SHIFT)
44 #define HL_MMAP_TYPE_CB (0x2ull << HL_MMAP_TYPE_SHIFT)
46 #define HL_MMAP_OFFSET_VALUE_MASK (0x1FFFFFFFFFFFull >> PAGE_SHIFT)
47 #define HL_MMAP_OFFSET_VALUE_GET(off) (off & HL_MMAP_OFFSET_VALUE_MASK)
49 #define HL_PENDING_RESET_PER_SEC 10
50 #define HL_PENDING_RESET_MAX_TRIALS 60 /* 10 minutes */
51 #define HL_PENDING_RESET_LONG_SEC 60
53 #define HL_HARD_RESET_MAX_TIMEOUT 120
54 #define HL_PLDM_HARD_RESET_MAX_TIMEOUT (HL_HARD_RESET_MAX_TIMEOUT * 3)
56 #define HL_DEVICE_TIMEOUT_USEC 1000000 /* 1 s */
58 #define HL_HEARTBEAT_PER_USEC 5000000 /* 5 s */
60 #define HL_PLL_LOW_JOB_FREQ_USEC 5000000 /* 5 s */
62 #define HL_CPUCP_INFO_TIMEOUT_USEC 10000000 /* 10s */
63 #define HL_CPUCP_EEPROM_TIMEOUT_USEC 10000000 /* 10s */
65 #define HL_FW_STATUS_POLL_INTERVAL_USEC 10000 /* 10ms */
67 #define HL_PCI_ELBI_TIMEOUT_MSEC 10 /* 10ms */
69 #define HL_SIM_MAX_TIMEOUT_US 10000000 /* 10s */
71 #define HL_COMMON_USER_INTERRUPT_ID 0xFFF
73 #define HL_STATE_DUMP_HIST_LEN 5
75 /* Default value for device reset trigger , an invalid value */
76 #define HL_RESET_TRIGGER_DEFAULT 0xFF
78 #define OBJ_NAMES_HASH_TABLE_BITS 7 /* 1 << 7 buckets */
79 #define SYNC_TO_ENGINE_HASH_TABLE_BITS 7 /* 1 << 7 buckets */
82 #define MEM_HASH_TABLE_BITS 7 /* 1 << 7 buckets */
85 #define MMU_HASH_TABLE_BITS 7 /* 1 << 7 buckets */
88 * enum hl_mmu_page_table_locaion - mmu page table location
89 * @MMU_DR_PGT: page-table is located on device DRAM.
90 * @MMU_HR_PGT: page-table is located on host memory.
91 * @MMU_NUM_PGT_LOCATIONS: number of page-table locations currently supported.
93 enum hl_mmu_page_table_location {
94 MMU_DR_PGT = 0, /* device-dram-resident MMU PGT */
95 MMU_HR_PGT, /* host resident MMU PGT */
96 MMU_NUM_PGT_LOCATIONS /* num of PGT locations */
100 * HL_RSVD_SOBS 'sync stream' reserved sync objects per QMAN stream
101 * HL_RSVD_MONS 'sync stream' reserved monitors per QMAN stream
103 #define HL_RSVD_SOBS 2
104 #define HL_RSVD_MONS 1
107 * HL_COLLECTIVE_RSVD_MSTR_MONS 'collective' reserved monitors per QMAN stream
109 #define HL_COLLECTIVE_RSVD_MSTR_MONS 2
111 #define HL_MAX_SOB_VAL (1 << 15)
113 #define IS_POWER_OF_2(n) (n != 0 && ((n & (n - 1)) == 0))
114 #define IS_MAX_PENDING_CS_VALID(n) (IS_POWER_OF_2(n) && (n > 1))
116 #define HL_PCI_NUM_BARS 6
118 #define HL_MAX_DCORES 4
123 * - HL_DRV_RESET_HARD
124 * If set do hard reset to all engines. If not set reset just
125 * compute/DMA engines.
127 * - HL_DRV_RESET_FROM_RESET_THR
128 * Set if the caller is the hard-reset thread
130 * - HL_DRV_RESET_HEARTBEAT
131 * Set if reset is due to heartbeat
134 * Set if reset is due to TDR
136 * - HL_DRV_RESET_DEV_RELEASE
137 * Set if reset is due to device release
139 * - HL_DRV_RESET_BYPASS_REQ_TO_FW
140 * F/W will perform the reset. No need to ask it to reset the device. This is relevant
141 * only when running with secured f/w
143 * - HL_DRV_RESET_FW_FATAL_ERR
144 * Set if reset is due to a fatal error from FW
146 * - HL_DRV_RESET_DELAY
147 * Set if a delay should be added before the reset
150 #define HL_DRV_RESET_HARD (1 << 0)
151 #define HL_DRV_RESET_FROM_RESET_THR (1 << 1)
152 #define HL_DRV_RESET_HEARTBEAT (1 << 2)
153 #define HL_DRV_RESET_TDR (1 << 3)
154 #define HL_DRV_RESET_DEV_RELEASE (1 << 4)
155 #define HL_DRV_RESET_BYPASS_REQ_TO_FW (1 << 5)
156 #define HL_DRV_RESET_FW_FATAL_ERR (1 << 6)
157 #define HL_DRV_RESET_DELAY (1 << 7)
159 #define HL_MAX_SOBS_PER_MONITOR 8
162 * struct hl_gen_wait_properties - properties for generating a wait CB
163 * @data: command buffer
164 * @q_idx: queue id is used to extract fence register address
165 * @size: offset in command buffer
166 * @sob_base: SOB base to use in this wait CB
167 * @sob_val: SOB value to wait for
168 * @mon_id: monitor to use in this wait CB
169 * @sob_mask: each bit represents a SOB offset from sob_base to be used
171 struct hl_gen_wait_properties {
182 * struct pgt_info - MMU hop page info.
183 * @node: hash linked-list node for the pgts shadow hash of pgts.
184 * @phys_addr: physical address of the pgt.
185 * @shadow_addr: shadow hop in the host.
186 * @ctx: pointer to the owner ctx.
187 * @num_of_ptes: indicates how many ptes are used in the pgt.
189 * The MMU page tables hierarchy is placed on the DRAM. When a new level (hop)
190 * is needed during mapping, a new page is allocated and this structure holds
191 * its essential information. During unmapping, if no valid PTEs remained in the
192 * page, it is freed with its pgt_info structure.
195 struct hlist_node node;
206 * enum hl_pci_match_mode - pci match mode per region
207 * @PCI_ADDRESS_MATCH_MODE: address match mode
208 * @PCI_BAR_MATCH_MODE: bar match mode
210 enum hl_pci_match_mode {
211 PCI_ADDRESS_MATCH_MODE,
216 * enum hl_fw_component - F/W components to read version through registers.
217 * @FW_COMP_BOOT_FIT: boot fit.
218 * @FW_COMP_PREBOOT: preboot.
219 * @FW_COMP_LINUX: linux.
221 enum hl_fw_component {
228 * enum hl_fw_types - F/W types present in the system
229 * @FW_TYPE_NONE: no FW component indication
230 * @FW_TYPE_LINUX: Linux image for device CPU
231 * @FW_TYPE_BOOT_CPU: Boot image for device CPU
232 * @FW_TYPE_PREBOOT_CPU: Indicates pre-loaded CPUs are present in the system
233 * (preboot, ppboot etc...)
234 * @FW_TYPE_ALL_TYPES: Mask for all types
239 FW_TYPE_BOOT_CPU = 0x2,
240 FW_TYPE_PREBOOT_CPU = 0x4,
242 (FW_TYPE_LINUX | FW_TYPE_BOOT_CPU | FW_TYPE_PREBOOT_CPU)
246 * enum hl_queue_type - Supported QUEUE types.
247 * @QUEUE_TYPE_NA: queue is not available.
248 * @QUEUE_TYPE_EXT: external queue which is a DMA channel that may access the
250 * @QUEUE_TYPE_INT: internal queue that performs DMA inside the device's
251 * memories and/or operates the compute engines.
252 * @QUEUE_TYPE_CPU: S/W queue for communication with the device's CPU.
253 * @QUEUE_TYPE_HW: queue of DMA and compute engines jobs, for which completion
254 * notifications are sent by H/W.
268 CS_TYPE_COLLECTIVE_WAIT,
274 * struct hl_inbound_pci_region - inbound region descriptor
275 * @mode: pci match mode for this region
276 * @addr: region target address
277 * @size: region size in bytes
278 * @offset_in_bar: offset within bar (address match mode)
281 struct hl_inbound_pci_region {
282 enum hl_pci_match_mode mode;
290 * struct hl_outbound_pci_region - outbound region descriptor
291 * @addr: region target address
292 * @size: region size in bytes
294 struct hl_outbound_pci_region {
300 * enum queue_cb_alloc_flags - Indicates queue support for CBs that
301 * allocated by Kernel or by User
302 * @CB_ALLOC_KERNEL: support only CBs that allocated by Kernel
303 * @CB_ALLOC_USER: support only CBs that allocated by User
305 enum queue_cb_alloc_flags {
306 CB_ALLOC_KERNEL = 0x1,
311 * struct hl_hw_sob - H/W SOB info.
312 * @hdev: habanalabs device structure.
313 * @kref: refcount of this SOB. The SOB will reset once the refcount is zero.
314 * @sob_id: id of this SOB.
315 * @sob_addr: the sob offset from the base address.
316 * @q_idx: the H/W queue that uses this SOB.
317 * @need_reset: reset indication set when switching to the other sob.
320 struct hl_device *hdev;
328 enum hl_collective_mode {
329 HL_COLLECTIVE_NOT_SUPPORTED = 0x0,
330 HL_COLLECTIVE_MASTER = 0x1,
331 HL_COLLECTIVE_SLAVE = 0x2
335 * struct hw_queue_properties - queue information.
337 * @queue_cb_alloc_flags: bitmap which indicates if the hw queue supports CB
338 * that allocated by the Kernel driver and therefore,
339 * a CB handle can be provided for jobs on this queue.
340 * Otherwise, a CB address must be provided.
341 * @collective_mode: collective mode of current queue
342 * @driver_only: true if only the driver is allowed to send a job to this queue,
344 * @supports_sync_stream: True if queue supports sync stream
346 struct hw_queue_properties {
347 enum hl_queue_type type;
348 enum queue_cb_alloc_flags cb_alloc_flags;
349 enum hl_collective_mode collective_mode;
351 u8 supports_sync_stream;
355 * enum vm_type - virtual memory mapping request information.
356 * @VM_TYPE_USERPTR: mapping of user memory to device virtual address.
357 * @VM_TYPE_PHYS_PACK: mapping of DRAM memory to device virtual address.
360 VM_TYPE_USERPTR = 0x1,
361 VM_TYPE_PHYS_PACK = 0x2
365 * enum mmu_op_flags - mmu operation relevant information.
366 * @MMU_OP_USERPTR: operation on user memory (host resident).
367 * @MMU_OP_PHYS_PACK: operation on DRAM (device resident).
368 * @MMU_OP_CLEAR_MEMCACHE: operation has to clear memcache.
369 * @MMU_OP_SKIP_LOW_CACHE_INV: operation is allowed to skip parts of cache invalidation.
372 MMU_OP_USERPTR = 0x1,
373 MMU_OP_PHYS_PACK = 0x2,
374 MMU_OP_CLEAR_MEMCACHE = 0x4,
375 MMU_OP_SKIP_LOW_CACHE_INV = 0x8,
380 * enum hl_device_hw_state - H/W device state. use this to understand whether
381 * to do reset before hw_init or not
382 * @HL_DEVICE_HW_STATE_CLEAN: H/W state is clean. i.e. after hard reset
383 * @HL_DEVICE_HW_STATE_DIRTY: H/W state is dirty. i.e. we started to execute
386 enum hl_device_hw_state {
387 HL_DEVICE_HW_STATE_CLEAN = 0,
388 HL_DEVICE_HW_STATE_DIRTY
391 #define HL_MMU_VA_ALIGNMENT_NOT_NEEDED 0
394 * struct hl_mmu_properties - ASIC specific MMU address translation properties.
395 * @start_addr: virtual start address of the memory region.
396 * @end_addr: virtual end address of the memory region.
397 * @hop0_shift: shift of hop 0 mask.
398 * @hop1_shift: shift of hop 1 mask.
399 * @hop2_shift: shift of hop 2 mask.
400 * @hop3_shift: shift of hop 3 mask.
401 * @hop4_shift: shift of hop 4 mask.
402 * @hop5_shift: shift of hop 5 mask.
403 * @hop0_mask: mask to get the PTE address in hop 0.
404 * @hop1_mask: mask to get the PTE address in hop 1.
405 * @hop2_mask: mask to get the PTE address in hop 2.
406 * @hop3_mask: mask to get the PTE address in hop 3.
407 * @hop4_mask: mask to get the PTE address in hop 4.
408 * @hop5_mask: mask to get the PTE address in hop 5.
409 * @last_mask: mask to get the bit indicating this is the last hop.
410 * @pgt_size: size for page tables.
411 * @page_size: default page size used to allocate memory.
412 * @num_hops: The amount of hops supported by the translation table.
413 * @hop_table_size: HOP table size.
414 * @hop0_tables_total_size: total size for all HOP0 tables.
415 * @host_resident: Should the MMU page table reside in host memory or in the
418 struct hl_mmu_properties {
438 u32 hop0_tables_total_size;
443 * struct hl_hints_range - hint addresses reserved va range.
444 * @start_addr: start address of the va range.
445 * @end_addr: end address of the va range.
447 struct hl_hints_range {
453 * struct asic_fixed_properties - ASIC specific immutable properties.
454 * @hw_queues_props: H/W queues properties.
455 * @cpucp_info: received various information from CPU-CP regarding the H/W, e.g.
457 * @uboot_ver: F/W U-boot version.
458 * @preboot_ver: F/W Preboot version.
459 * @dmmu: DRAM MMU address translation properties.
460 * @pmmu: PCI (host) MMU address translation properties.
461 * @pmmu_huge: PCI (host) MMU address translation properties for memory
462 * allocated with huge pages.
463 * @hints_dram_reserved_va_range: dram hint addresses reserved range.
464 * @hints_host_reserved_va_range: host hint addresses reserved range.
465 * @hints_host_hpage_reserved_va_range: host huge page hint addresses reserved
467 * @sram_base_address: SRAM physical start address.
468 * @sram_end_address: SRAM physical end address.
469 * @sram_user_base_address - SRAM physical start address for user access.
470 * @dram_base_address: DRAM physical start address.
471 * @dram_end_address: DRAM physical end address.
472 * @dram_user_base_address: DRAM physical start address for user access.
473 * @dram_size: DRAM total size.
474 * @dram_pci_bar_size: size of PCI bar towards DRAM.
475 * @max_power_default: max power of the device after reset
476 * @dc_power_default: power consumed by the device in mode idle.
477 * @dram_size_for_default_page_mapping: DRAM size needed to map to avoid page
479 * @pcie_dbi_base_address: Base address of the PCIE_DBI block.
480 * @pcie_aux_dbi_reg_addr: Address of the PCIE_AUX DBI register.
481 * @mmu_pgt_addr: base physical address in DRAM of MMU page tables.
482 * @mmu_dram_default_page_addr: DRAM default page physical address.
483 * @cb_va_start_addr: virtual start address of command buffers which are mapped
484 * to the device's MMU.
485 * @cb_va_end_addr: virtual end address of command buffers which are mapped to
487 * @dram_hints_align_mask: dram va hint addresses alignment mask which is used
488 * for hints validity check.
489 * device_dma_offset_for_host_access: the offset to add to host DMA addresses
490 * to enable the device to access them.
491 * @max_freq_value: current max clk frequency.
492 * @clk_pll_index: clock PLL index that specify which PLL determines the clock
493 * we display to the user
494 * @mmu_pgt_size: MMU page tables total size.
495 * @mmu_pte_size: PTE size in MMU page tables.
496 * @mmu_hop_table_size: MMU hop table size.
497 * @mmu_hop0_tables_total_size: total size of MMU hop0 tables.
498 * @dram_page_size: page size for MMU DRAM allocation.
499 * @cfg_size: configuration space size on SRAM.
500 * @sram_size: total size of SRAM.
501 * @max_asid: maximum number of open contexts (ASIDs).
502 * @num_of_events: number of possible internal H/W IRQs.
503 * @psoc_pci_pll_nr: PCI PLL NR value.
504 * @psoc_pci_pll_nf: PCI PLL NF value.
505 * @psoc_pci_pll_od: PCI PLL OD value.
506 * @psoc_pci_pll_div_factor: PCI PLL DIV FACTOR 1 value.
507 * @psoc_timestamp_frequency: frequency of the psoc timestamp clock.
508 * @high_pll: high PLL frequency used by the device.
509 * @cb_pool_cb_cnt: number of CBs in the CB pool.
510 * @cb_pool_cb_size: size of each CB in the CB pool.
511 * @max_pending_cs: maximum of concurrent pending command submissions
512 * @max_queues: maximum amount of queues in the system
513 * @fw_preboot_cpu_boot_dev_sts0: bitmap representation of preboot cpu
514 * capabilities reported by FW, bit description
515 * can be found in CPU_BOOT_DEV_STS0
516 * @fw_preboot_cpu_boot_dev_sts1: bitmap representation of preboot cpu
517 * capabilities reported by FW, bit description
518 * can be found in CPU_BOOT_DEV_STS1
519 * @fw_bootfit_cpu_boot_dev_sts0: bitmap representation of boot cpu security
520 * status reported by FW, bit description can be
521 * found in CPU_BOOT_DEV_STS0
522 * @fw_bootfit_cpu_boot_dev_sts1: bitmap representation of boot cpu security
523 * status reported by FW, bit description can be
524 * found in CPU_BOOT_DEV_STS1
525 * @fw_app_cpu_boot_dev_sts0: bitmap representation of application security
526 * status reported by FW, bit description can be
527 * found in CPU_BOOT_DEV_STS0
528 * @fw_app_cpu_boot_dev_sts1: bitmap representation of application security
529 * status reported by FW, bit description can be
530 * found in CPU_BOOT_DEV_STS1
531 * @device_mem_alloc_default_page_size: may be different than dram_page_size only for ASICs for
532 * which the property supports_user_set_page_size is true
533 * (i.e. the DRAM supports multiple page sizes), otherwise
534 * it will shall be equal to dram_page_size.
535 * @collective_first_sob: first sync object available for collective use
536 * @collective_first_mon: first monitor available for collective use
537 * @sync_stream_first_sob: first sync object available for sync stream use
538 * @sync_stream_first_mon: first monitor available for sync stream use
539 * @first_available_user_sob: first sob available for the user
540 * @first_available_user_mon: first monitor available for the user
541 * @first_available_user_msix_interrupt: first available msix interrupt
542 * reserved for the user
543 * @first_available_cq: first available CQ for the user.
544 * @user_interrupt_count: number of user interrupts.
545 * @server_type: Server type that the ASIC is currently installed in.
546 * The value is according to enum hl_server_type in uapi file.
547 * @tpc_enabled_mask: which TPCs are enabled.
548 * @completion_queues_count: number of completion queues.
549 * @fw_security_enabled: true if security measures are enabled in firmware,
551 * @fw_cpu_boot_dev_sts0_valid: status bits are valid and can be fetched from
553 * @fw_cpu_boot_dev_sts1_valid: status bits are valid and can be fetched from
555 * @dram_supports_virtual_memory: is there an MMU towards the DRAM
556 * @hard_reset_done_by_fw: true if firmware is handling hard reset flow
557 * @num_functional_hbms: number of functional HBMs in each DCORE.
558 * @hints_range_reservation: device support hint addresses range reservation.
559 * @iatu_done_by_fw: true if iATU configuration is being done by FW.
560 * @dynamic_fw_load: is dynamic FW load is supported.
561 * @gic_interrupts_enable: true if FW is not blocking GIC controller,
563 * @use_get_power_for_reset_history: To support backward compatibility for Goya
565 * @supports_soft_reset: is soft reset supported.
566 * @allow_inference_soft_reset: true if the ASIC supports soft reset that is
567 * initiated by user or TDR. This is only true
568 * in inference ASICs, as there is no real-world
569 * use-case of doing soft-reset in training (due
570 * to the fact that training runs on multiple
572 * @configurable_stop_on_err: is stop-on-error option configurable via debugfs.
573 * @set_max_power_on_device_init: true if need to set max power in F/W on device init.
574 * @supports_user_set_page_size: true if user can set the allocation page size.
576 struct asic_fixed_properties {
577 struct hw_queue_properties *hw_queues_props;
578 struct cpucp_info cpucp_info;
579 char uboot_ver[VERSION_MAX_LEN];
580 char preboot_ver[VERSION_MAX_LEN];
581 struct hl_mmu_properties dmmu;
582 struct hl_mmu_properties pmmu;
583 struct hl_mmu_properties pmmu_huge;
584 struct hl_hints_range hints_dram_reserved_va_range;
585 struct hl_hints_range hints_host_reserved_va_range;
586 struct hl_hints_range hints_host_hpage_reserved_va_range;
587 u64 sram_base_address;
588 u64 sram_end_address;
589 u64 sram_user_base_address;
590 u64 dram_base_address;
591 u64 dram_end_address;
592 u64 dram_user_base_address;
594 u64 dram_pci_bar_size;
595 u64 max_power_default;
596 u64 dc_power_default;
597 u64 dram_size_for_default_page_mapping;
598 u64 pcie_dbi_base_address;
599 u64 pcie_aux_dbi_reg_addr;
601 u64 mmu_dram_default_page_addr;
602 u64 cb_va_start_addr;
604 u64 dram_hints_align_mask;
605 u64 device_dma_offset_for_host_access;
610 u32 mmu_hop_table_size;
611 u32 mmu_hop0_tables_total_size;
620 u32 psoc_pci_pll_div_factor;
621 u32 psoc_timestamp_frequency;
627 u32 fw_preboot_cpu_boot_dev_sts0;
628 u32 fw_preboot_cpu_boot_dev_sts1;
629 u32 fw_bootfit_cpu_boot_dev_sts0;
630 u32 fw_bootfit_cpu_boot_dev_sts1;
631 u32 fw_app_cpu_boot_dev_sts0;
632 u32 fw_app_cpu_boot_dev_sts1;
633 u32 device_mem_alloc_default_page_size;
634 u16 collective_first_sob;
635 u16 collective_first_mon;
636 u16 sync_stream_first_sob;
637 u16 sync_stream_first_mon;
638 u16 first_available_user_sob[HL_MAX_DCORES];
639 u16 first_available_user_mon[HL_MAX_DCORES];
640 u16 first_available_user_msix_interrupt;
641 u16 first_available_cq[HL_MAX_DCORES];
642 u16 user_interrupt_count;
645 u8 completion_queues_count;
646 u8 fw_security_enabled;
647 u8 fw_cpu_boot_dev_sts0_valid;
648 u8 fw_cpu_boot_dev_sts1_valid;
649 u8 dram_supports_virtual_memory;
650 u8 hard_reset_done_by_fw;
651 u8 num_functional_hbms;
652 u8 hints_range_reservation;
655 u8 gic_interrupts_enable;
656 u8 use_get_power_for_reset_history;
657 u8 supports_soft_reset;
658 u8 allow_inference_soft_reset;
659 u8 configurable_stop_on_err;
660 u8 set_max_power_on_device_init;
661 u8 supports_user_set_page_size;
665 * struct hl_fence - software synchronization primitive
666 * @completion: fence is implemented using completion
667 * @refcount: refcount for this fence
668 * @cs_sequence: sequence of the corresponding command submission
669 * @stream_master_qid_map: streams masters QID bitmap to represent all streams
670 * masters QIDs that multi cs is waiting on
671 * @error: mark this fence with error
672 * @timestamp: timestamp upon completion
673 * @mcs_handling_done: indicates that corresponding command submission has
674 * finished msc handling, this does not mean it was part
678 struct completion completion;
679 struct kref refcount;
681 u32 stream_master_qid_map;
684 u8 mcs_handling_done;
688 * struct hl_cs_compl - command submission completion object.
689 * @base_fence: hl fence object.
690 * @lock: spinlock to protect fence.
691 * @hdev: habanalabs device structure.
692 * @hw_sob: the H/W SOB used in this signal/wait CS.
693 * @encaps_sig_hdl: encaps signals hanlder.
694 * @cs_seq: command submission sequence number.
695 * @type: type of the CS - signal/wait.
696 * @sob_val: the SOB value that is used in this signal/wait CS.
697 * @sob_group: the SOB group that is used in this collective wait CS.
698 * @encaps_signals: indication whether it's a completion object of cs with
699 * encaps signals or not.
702 struct hl_fence base_fence;
704 struct hl_device *hdev;
705 struct hl_hw_sob *hw_sob;
706 struct hl_cs_encaps_sig_handle *encaps_sig_hdl;
708 enum hl_cs_type type;
719 * struct hl_cb_mgr - describes a Command Buffer Manager.
720 * @cb_lock: protects cb_handles.
721 * @cb_handles: an idr to hold all command buffer handles.
725 struct idr cb_handles; /* protected by cb_lock */
729 * struct hl_ts_mgr - describes the timestamp registration memory manager.
730 * @ts_lock: protects ts_handles.
731 * @ts_handles: an idr to hold all ts bufferes handles.
735 struct idr ts_handles;
739 * struct hl_ts_buff - describes a timestamp buffer.
740 * @refcount: reference counter for usage of the buffer.
741 * @hdev: pointer to device this buffer belongs to.
742 * @mmap: true if the buff is currently mapped to user.
743 * @kernel_buff_address: Holds the internal buffer's kernel virtual address.
744 * @user_buff_address: Holds the user buffer's kernel virtual address.
745 * @id: the buffer ID.
746 * @mmap_size: Holds the buffer size that was mmaped.
747 * @kernel_buff_size: Holds the internal kernel buffer size.
748 * @user_buff_size: Holds the user buffer size.
751 struct kref refcount;
752 struct hl_device *hdev;
754 void *kernel_buff_address;
755 void *user_buff_address;
758 u32 kernel_buff_size;
763 * struct hl_cb - describes a Command Buffer.
764 * @refcount: reference counter for usage of the CB.
765 * @hdev: pointer to device this CB belongs to.
766 * @ctx: pointer to the CB owner's context.
767 * @lock: spinlock to protect mmap flows.
768 * @debugfs_list: node in debugfs list of command buffers.
769 * @pool_list: node in pool list of command buffers.
770 * @va_block_list: list of virtual addresses blocks of the CB if it is mapped to
773 * @kernel_address: Holds the CB's kernel virtual address.
774 * @bus_address: Holds the CB's DMA address.
775 * @mmap_size: Holds the CB's size that was mmaped.
776 * @size: holds the CB's size.
777 * @cs_cnt: holds number of CS that this CB participates in.
778 * @mmap: true if the CB is currently mmaped to user.
779 * @is_pool: true if CB was acquired from the pool, false otherwise.
780 * @is_internal: internaly allocated
781 * @is_mmu_mapped: true if the CB is mapped to the device's MMU.
784 struct kref refcount;
785 struct hl_device *hdev;
788 struct list_head debugfs_list;
789 struct list_head pool_list;
790 struct list_head va_block_list;
792 void *kernel_address;
793 dma_addr_t bus_address;
811 /* Queue length of external and HW queues */
812 #define HL_QUEUE_LENGTH 4096
813 #define HL_QUEUE_SIZE_IN_BYTES (HL_QUEUE_LENGTH * HL_BD_SIZE)
815 #if (HL_MAX_JOBS_PER_CS > HL_QUEUE_LENGTH)
816 #error "HL_QUEUE_LENGTH must be greater than HL_MAX_JOBS_PER_CS"
819 /* HL_CQ_LENGTH is in units of struct hl_cq_entry */
820 #define HL_CQ_LENGTH HL_QUEUE_LENGTH
821 #define HL_CQ_SIZE_IN_BYTES (HL_CQ_LENGTH * HL_CQ_ENTRY_SIZE)
823 /* Must be power of 2 */
824 #define HL_EQ_LENGTH 64
825 #define HL_EQ_SIZE_IN_BYTES (HL_EQ_LENGTH * HL_EQ_ENTRY_SIZE)
827 /* Host <-> CPU-CP shared memory size */
828 #define HL_CPU_ACCESSIBLE_MEM_SIZE SZ_2M
831 * struct hl_sync_stream_properties -
832 * describes a H/W queue sync stream properties
833 * @hw_sob: array of the used H/W SOBs by this H/W queue.
834 * @next_sob_val: the next value to use for the currently used SOB.
835 * @base_sob_id: the base SOB id of the SOBs used by this queue.
836 * @base_mon_id: the base MON id of the MONs used by this queue.
837 * @collective_mstr_mon_id: the MON ids of the MONs used by this master queue
838 * in order to sync with all slave queues.
839 * @collective_slave_mon_id: the MON id used by this slave queue in order to
840 * sync with its master queue.
841 * @collective_sob_id: current SOB id used by this collective slave queue
842 * to signal its collective master queue upon completion.
843 * @curr_sob_offset: the id offset to the currently used SOB from the
844 * HL_RSVD_SOBS that are being used by this queue.
846 struct hl_sync_stream_properties {
847 struct hl_hw_sob hw_sob[HL_RSVD_SOBS];
851 u16 collective_mstr_mon_id[HL_COLLECTIVE_RSVD_MSTR_MONS];
852 u16 collective_slave_mon_id;
853 u16 collective_sob_id;
858 * struct hl_encaps_signals_mgr - describes sync stream encapsulated signals
860 * @lock: protects handles.
861 * @handles: an idr to hold all encapsulated signals handles.
863 struct hl_encaps_signals_mgr {
869 * struct hl_hw_queue - describes a H/W transport queue.
870 * @shadow_queue: pointer to a shadow queue that holds pointers to jobs.
871 * @sync_stream_prop: sync stream queue properties
872 * @queue_type: type of queue.
873 * @collective_mode: collective mode of current queue
874 * @kernel_address: holds the queue's kernel virtual address.
875 * @bus_address: holds the queue's DMA address.
876 * @pi: holds the queue's pi value.
877 * @ci: holds the queue's ci value, AS CALCULATED BY THE DRIVER (not real ci).
878 * @hw_queue_id: the id of the H/W queue.
879 * @cq_id: the id for the corresponding CQ for this H/W queue.
880 * @msi_vec: the IRQ number of the H/W queue.
881 * @int_queue_len: length of internal queue (number of entries).
882 * @valid: is the queue valid (we have array of 32 queues, not all of them
884 * @supports_sync_stream: True if queue supports sync stream
887 struct hl_cs_job **shadow_queue;
888 struct hl_sync_stream_properties sync_stream_prop;
889 enum hl_queue_type queue_type;
890 enum hl_collective_mode collective_mode;
891 void *kernel_address;
892 dma_addr_t bus_address;
900 u8 supports_sync_stream;
904 * struct hl_cq - describes a completion queue
905 * @hdev: pointer to the device structure
906 * @kernel_address: holds the queue's kernel virtual address
907 * @bus_address: holds the queue's DMA address
908 * @cq_idx: completion queue index in array
909 * @hw_queue_id: the id of the matching H/W queue
910 * @ci: ci inside the queue
911 * @pi: pi inside the queue
912 * @free_slots_cnt: counter of free slots in queue
915 struct hl_device *hdev;
916 void *kernel_address;
917 dma_addr_t bus_address;
922 atomic_t free_slots_cnt;
926 * struct hl_user_interrupt - holds user interrupt information
927 * @hdev: pointer to the device structure
928 * @wait_list_head: head to the list of user threads pending on this interrupt
929 * @wait_list_lock: protects wait_list_head
930 * @interrupt_id: msix interrupt id
932 struct hl_user_interrupt {
933 struct hl_device *hdev;
934 struct list_head wait_list_head;
935 spinlock_t wait_list_lock;
940 * struct timestamp_reg_free_node - holds the timestamp registration free objects node
941 * @free_objects_node: node in the list free_obj_jobs
942 * @cq_cb: pointer to cq command buffer to be freed
943 * @ts_buff: pointer to timestamp buffer to be freed
945 struct timestamp_reg_free_node {
946 struct list_head free_objects_node;
948 struct hl_ts_buff *ts_buff;
951 /* struct timestamp_reg_work_obj - holds the timestamp registration free objects job
952 * the job will be to pass over the free_obj_jobs list and put refcount to objects
953 * in each node of the list
954 * @free_obj: workqueue object to free timestamp registration node objects
955 * @hdev: pointer to the device structure
956 * @free_obj_head: list of free jobs nodes (node type timestamp_reg_free_node)
958 struct timestamp_reg_work_obj {
959 struct work_struct free_obj;
960 struct hl_device *hdev;
961 struct list_head *free_obj_head;
964 /* struct timestamp_reg_info - holds the timestamp registration related data.
965 * @ts_buff: pointer to the timestamp buffer which include both user/kernel buffers.
966 * relevant only when doing timestamps records registration.
967 * @cq_cb: pointer to CQ counter CB.
968 * @timestamp_kernel_addr: timestamp handle address, where to set timestamp
969 * relevant only when doing timestamps records
971 * @in_use: indicates if the node already in use. relevant only when doing
972 * timestamps records registration, since in this case the driver
973 * will have it's own buffer which serve as a records pool instead of
974 * allocating records dynamically.
976 struct timestamp_reg_info {
977 struct hl_ts_buff *ts_buff;
979 u64 *timestamp_kernel_addr;
984 * struct hl_user_pending_interrupt - holds a context to a user thread
985 * pending on an interrupt
986 * @ts_reg_info: holds the timestamps registration nodes info
987 * @wait_list_node: node in the list of user threads pending on an interrupt
988 * @fence: hl fence object for interrupt completion
989 * @cq_target_value: CQ target value
990 * @cq_kernel_addr: CQ kernel address, to be used in the cq interrupt
991 * handler for taget value comparison
993 struct hl_user_pending_interrupt {
994 struct timestamp_reg_info ts_reg_info;
995 struct list_head wait_list_node;
996 struct hl_fence fence;
1002 * struct hl_eq - describes the event queue (single one per device)
1003 * @hdev: pointer to the device structure
1004 * @kernel_address: holds the queue's kernel virtual address
1005 * @bus_address: holds the queue's DMA address
1006 * @ci: ci inside the queue
1007 * @prev_eqe_index: the index of the previous event queue entry. The index of
1008 * the current entry's index must be +1 of the previous one.
1009 * @check_eqe_index: do we need to check the index of the current entry vs. the
1010 * previous one. This is for backward compatibility with older
1014 struct hl_device *hdev;
1015 void *kernel_address;
1016 dma_addr_t bus_address;
1019 bool check_eqe_index;
1028 * enum hl_asic_type - supported ASIC types.
1029 * @ASIC_INVALID: Invalid ASIC type.
1030 * @ASIC_GOYA: Goya device.
1031 * @ASIC_GAUDI: Gaudi device.
1032 * @ASIC_GAUDI_SEC: Gaudi secured device (HL-2000).
1041 struct hl_cs_parser;
1044 * enum hl_pm_mng_profile - power management profile.
1045 * @PM_AUTO: internal clock is set by the Linux driver.
1046 * @PM_MANUAL: internal clock is set by the user.
1047 * @PM_LAST: last power management type.
1049 enum hl_pm_mng_profile {
1056 * enum hl_pll_frequency - PLL frequency.
1057 * @PLL_HIGH: high frequency.
1058 * @PLL_LOW: low frequency.
1059 * @PLL_LAST: last frequency values that were configured by the user.
1061 enum hl_pll_frequency {
1067 #define PLL_REF_CLK 50
1069 enum div_select_defs {
1070 DIV_SEL_REF_CLK = 0,
1071 DIV_SEL_PLL_CLK = 1,
1072 DIV_SEL_DIVIDED_REF = 2,
1073 DIV_SEL_DIVIDED_PLL = 3,
1085 * struct pci_mem_region - describe memory region in a PCI bar
1086 * @region_base: region base address
1087 * @region_size: region size
1088 * @bar_size: size of the BAR
1089 * @offset_in_bar: region offset into the bar
1090 * @bar_id: bar ID of the region
1091 * @used: if used 1, otherwise 0
1093 struct pci_mem_region {
1103 * struct static_fw_load_mgr - static FW load manager
1104 * @preboot_version_max_off: max offset to preboot version
1105 * @boot_fit_version_max_off: max offset to boot fit version
1106 * @kmd_msg_to_cpu_reg: register address for KDM->CPU messages
1107 * @cpu_cmd_status_to_host_reg: register address for CPU command status response
1108 * @cpu_boot_status_reg: boot status register
1109 * @cpu_boot_dev_status0_reg: boot device status register 0
1110 * @cpu_boot_dev_status1_reg: boot device status register 1
1111 * @boot_err0_reg: boot error register 0
1112 * @boot_err1_reg: boot error register 1
1113 * @preboot_version_offset_reg: SRAM offset to preboot version register
1114 * @boot_fit_version_offset_reg: SRAM offset to boot fit version register
1115 * @sram_offset_mask: mask for getting offset into the SRAM
1116 * @cpu_reset_wait_msec: used when setting WFE via kmd_msg_to_cpu_reg
1118 struct static_fw_load_mgr {
1119 u64 preboot_version_max_off;
1120 u64 boot_fit_version_max_off;
1121 u32 kmd_msg_to_cpu_reg;
1122 u32 cpu_cmd_status_to_host_reg;
1123 u32 cpu_boot_status_reg;
1124 u32 cpu_boot_dev_status0_reg;
1125 u32 cpu_boot_dev_status1_reg;
1128 u32 preboot_version_offset_reg;
1129 u32 boot_fit_version_offset_reg;
1130 u32 sram_offset_mask;
1131 u32 cpu_reset_wait_msec;
1135 * struct fw_response - FW response to LKD command
1136 * @ram_offset: descriptor offset into the RAM
1137 * @ram_type: RAM type containing the descriptor (SRAM/DRAM)
1138 * @status: command status
1140 struct fw_response {
1147 * struct dynamic_fw_load_mgr - dynamic FW load manager
1148 * @response: FW to LKD response
1149 * @comm_desc: the communication descriptor with FW
1150 * @image_region: region to copy the FW image to
1151 * @fw_image_size: size of FW image to load
1152 * @wait_for_bl_timeout: timeout for waiting for boot loader to respond
1153 * @fw_desc_valid: true if FW descriptor has been validated and hence the data can be used
1155 struct dynamic_fw_load_mgr {
1156 struct fw_response response;
1157 struct lkd_fw_comms_desc comm_desc;
1158 struct pci_mem_region *image_region;
1159 size_t fw_image_size;
1160 u32 wait_for_bl_timeout;
1165 * struct fw_image_props - properties of FW image
1166 * @image_name: name of the image
1167 * @src_off: offset in src FW to copy from
1168 * @copy_size: amount of bytes to copy (0 to copy the whole binary)
1170 struct fw_image_props {
1177 * struct fw_load_mgr - manager FW loading process
1178 * @dynamic_loader: specific structure for dynamic load
1179 * @static_loader: specific structure for static load
1180 * @boot_fit_img: boot fit image properties
1181 * @linux_img: linux image properties
1182 * @cpu_timeout: CPU response timeout in usec
1183 * @boot_fit_timeout: Boot fit load timeout in usec
1184 * @skip_bmc: should BMC be skipped
1185 * @sram_bar_id: SRAM bar ID
1186 * @dram_bar_id: DRAM bar ID
1187 * @fw_comp_loaded: bitmask of loaded FW components. set bit meaning loaded
1188 * component. values are set according to enum hl_fw_types.
1190 struct fw_load_mgr {
1192 struct dynamic_fw_load_mgr dynamic_loader;
1193 struct static_fw_load_mgr static_loader;
1195 struct fw_image_props boot_fit_img;
1196 struct fw_image_props linux_img;
1198 u32 boot_fit_timeout;
1206 * struct hl_asic_funcs - ASIC specific functions that are can be called from
1208 * @early_init: sets up early driver state (pre sw_init), doesn't configure H/W.
1209 * @early_fini: tears down what was done in early_init.
1210 * @late_init: sets up late driver/hw state (post hw_init) - Optional.
1211 * @late_fini: tears down what was done in late_init (pre hw_fini) - Optional.
1212 * @sw_init: sets up driver state, does not configure H/W.
1213 * @sw_fini: tears down driver state, does not configure H/W.
1214 * @hw_init: sets up the H/W state.
1215 * @hw_fini: tears down the H/W state.
1216 * @halt_engines: halt engines, needed for reset sequence. This also disables
1217 * interrupts from the device. Should be called before
1218 * hw_fini and before CS rollback.
1219 * @suspend: handles IP specific H/W or SW changes for suspend.
1220 * @resume: handles IP specific H/W or SW changes for resume.
1221 * @mmap: maps a memory.
1222 * @ring_doorbell: increment PI on a given QMAN.
1223 * @pqe_write: Write the PQ entry to the PQ. This is ASIC-specific
1224 * function because the PQs are located in different memory areas
1225 * per ASIC (SRAM, DRAM, Host memory) and therefore, the method of
1226 * writing the PQE must match the destination memory area
1228 * @asic_dma_alloc_coherent: Allocate coherent DMA memory by calling
1229 * dma_alloc_coherent(). This is ASIC function because
1230 * its implementation is not trivial when the driver
1231 * is loaded in simulation mode (not upstreamed).
1232 * @asic_dma_free_coherent: Free coherent DMA memory by calling
1233 * dma_free_coherent(). This is ASIC function because
1234 * its implementation is not trivial when the driver
1235 * is loaded in simulation mode (not upstreamed).
1236 * @scrub_device_mem: Scrub device memory given an address and size
1237 * @get_int_queue_base: get the internal queue base address.
1238 * @test_queues: run simple test on all queues for sanity check.
1239 * @asic_dma_pool_zalloc: small DMA allocation of coherent memory from DMA pool.
1240 * size of allocation is HL_DMA_POOL_BLK_SIZE.
1241 * @asic_dma_pool_free: free small DMA allocation from pool.
1242 * @cpu_accessible_dma_pool_alloc: allocate CPU PQ packet from DMA pool.
1243 * @cpu_accessible_dma_pool_free: free CPU PQ packet from DMA pool.
1244 * @hl_dma_unmap_sg: DMA unmap scatter-gather list.
1245 * @cs_parser: parse Command Submission.
1246 * @asic_dma_map_sg: DMA map scatter-gather list.
1247 * @get_dma_desc_list_size: get number of LIN_DMA packets required for CB.
1248 * @add_end_of_cb_packets: Add packets to the end of CB, if device requires it.
1249 * @update_eq_ci: update event queue CI.
1250 * @context_switch: called upon ASID context switch.
1251 * @restore_phase_topology: clear all SOBs amd MONs.
1252 * @debugfs_read32: debug interface for reading u32 from DRAM/SRAM/Host memory.
1253 * @debugfs_write32: debug interface for writing u32 to DRAM/SRAM/Host memory.
1254 * @debugfs_read64: debug interface for reading u64 from DRAM/SRAM/Host memory.
1255 * @debugfs_write64: debug interface for writing u64 to DRAM/SRAM/Host memory.
1256 * @debugfs_read_dma: debug interface for reading up to 2MB from the device's
1257 * internal memory via DMA engine.
1258 * @add_device_attr: add ASIC specific device attributes.
1259 * @handle_eqe: handle event queue entry (IRQ) from CPU-CP.
1260 * @get_events_stat: retrieve event queue entries histogram.
1261 * @read_pte: read MMU page table entry from DRAM.
1262 * @write_pte: write MMU page table entry to DRAM.
1263 * @mmu_invalidate_cache: flush MMU STLB host/DRAM cache, either with soft
1264 * (L1 only) or hard (L0 & L1) flush.
1265 * @mmu_invalidate_cache_range: flush specific MMU STLB cache lines with
1266 * ASID-VA-size mask.
1267 * @send_heartbeat: send is-alive packet to CPU-CP and verify response.
1268 * @debug_coresight: perform certain actions on Coresight for debugging.
1269 * @is_device_idle: return true if device is idle, false otherwise.
1270 * @non_hard_reset_late_init: perform certain actions needed after a reset which is not hard-reset
1271 * @hw_queues_lock: acquire H/W queues lock.
1272 * @hw_queues_unlock: release H/W queues lock.
1273 * @get_pci_id: retrieve PCI ID.
1274 * @get_eeprom_data: retrieve EEPROM data from F/W.
1275 * @send_cpu_message: send message to F/W. If the message is timedout, the
1276 * driver will eventually reset the device. The timeout can
1277 * be determined by the calling function or it can be 0 and
1278 * then the timeout is the default timeout for the specific
1280 * @get_hw_state: retrieve the H/W state
1281 * @pci_bars_map: Map PCI BARs.
1282 * @init_iatu: Initialize the iATU unit inside the PCI controller.
1283 * @rreg: Read a register. Needed for simulator support.
1284 * @wreg: Write a register. Needed for simulator support.
1285 * @halt_coresight: stop the ETF and ETR traces.
1286 * @ctx_init: context dependent initialization.
1287 * @ctx_fini: context dependent cleanup.
1288 * @get_queue_id_for_cq: Get the H/W queue id related to the given CQ index.
1289 * @load_firmware_to_device: load the firmware to the device's memory
1290 * @load_boot_fit_to_device: load boot fit to device's memory
1291 * @get_signal_cb_size: Get signal CB size.
1292 * @get_wait_cb_size: Get wait CB size.
1293 * @gen_signal_cb: Generate a signal CB.
1294 * @gen_wait_cb: Generate a wait CB.
1295 * @reset_sob: Reset a SOB.
1296 * @reset_sob_group: Reset SOB group
1297 * @set_dma_mask_from_fw: set the DMA mask in the driver according to the
1298 * firmware configuration
1299 * @get_device_time: Get the device time.
1300 * @collective_wait_init_cs: Generate collective master/slave packets
1301 * and place them in the relevant cs jobs
1302 * @collective_wait_create_jobs: allocate collective wait cs jobs
1303 * @scramble_addr: Routine to scramble the address prior of mapping it
1305 * @descramble_addr: Routine to de-scramble the address prior of
1306 * showing it to users.
1307 * @ack_protection_bits_errors: ack and dump all security violations
1308 * @get_hw_block_id: retrieve a HW block id to be used by the user to mmap it.
1309 * also returns the size of the block if caller supplies
1310 * a valid pointer for it
1311 * @hw_block_mmap: mmap a HW block with a given id.
1312 * @enable_events_from_fw: send interrupt to firmware to notify them the
1313 * driver is ready to receive asynchronous events. This
1314 * function should be called during the first init and
1315 * after every hard-reset of the device
1316 * @get_msi_info: Retrieve asic-specific MSI ID of the f/w async event
1317 * @map_pll_idx_to_fw_idx: convert driver specific per asic PLL index to
1318 * generic f/w compatible PLL Indexes
1319 * @init_firmware_loader: initialize data for FW loader.
1320 * @init_cpu_scrambler_dram: Enable CPU specific DRAM scrambling
1321 * @state_dump_init: initialize constants required for state dump
1322 * @get_sob_addr: get SOB base address offset.
1323 * @set_pci_memory_regions: setting properties of PCI memory regions
1324 * @get_stream_master_qid_arr: get pointer to stream masters QID array
1325 * @is_valid_dram_page_size: return true if page size is supported in device
1326 * memory allocation, otherwise false.
1328 struct hl_asic_funcs {
1329 int (*early_init)(struct hl_device *hdev);
1330 int (*early_fini)(struct hl_device *hdev);
1331 int (*late_init)(struct hl_device *hdev);
1332 void (*late_fini)(struct hl_device *hdev);
1333 int (*sw_init)(struct hl_device *hdev);
1334 int (*sw_fini)(struct hl_device *hdev);
1335 int (*hw_init)(struct hl_device *hdev);
1336 void (*hw_fini)(struct hl_device *hdev, bool hard_reset, bool fw_reset);
1337 void (*halt_engines)(struct hl_device *hdev, bool hard_reset, bool fw_reset);
1338 int (*suspend)(struct hl_device *hdev);
1339 int (*resume)(struct hl_device *hdev);
1340 int (*mmap)(struct hl_device *hdev, struct vm_area_struct *vma,
1341 void *cpu_addr, dma_addr_t dma_addr, size_t size);
1342 void (*ring_doorbell)(struct hl_device *hdev, u32 hw_queue_id, u32 pi);
1343 void (*pqe_write)(struct hl_device *hdev, __le64 *pqe,
1345 void* (*asic_dma_alloc_coherent)(struct hl_device *hdev, size_t size,
1346 dma_addr_t *dma_handle, gfp_t flag);
1347 void (*asic_dma_free_coherent)(struct hl_device *hdev, size_t size,
1348 void *cpu_addr, dma_addr_t dma_handle);
1349 int (*scrub_device_mem)(struct hl_device *hdev, u64 addr, u64 size);
1350 void* (*get_int_queue_base)(struct hl_device *hdev, u32 queue_id,
1351 dma_addr_t *dma_handle, u16 *queue_len);
1352 int (*test_queues)(struct hl_device *hdev);
1353 void* (*asic_dma_pool_zalloc)(struct hl_device *hdev, size_t size,
1354 gfp_t mem_flags, dma_addr_t *dma_handle);
1355 void (*asic_dma_pool_free)(struct hl_device *hdev, void *vaddr,
1356 dma_addr_t dma_addr);
1357 void* (*cpu_accessible_dma_pool_alloc)(struct hl_device *hdev,
1358 size_t size, dma_addr_t *dma_handle);
1359 void (*cpu_accessible_dma_pool_free)(struct hl_device *hdev,
1360 size_t size, void *vaddr);
1361 void (*hl_dma_unmap_sg)(struct hl_device *hdev,
1362 struct scatterlist *sgl, int nents,
1363 enum dma_data_direction dir);
1364 int (*cs_parser)(struct hl_device *hdev, struct hl_cs_parser *parser);
1365 int (*asic_dma_map_sg)(struct hl_device *hdev,
1366 struct scatterlist *sgl, int nents,
1367 enum dma_data_direction dir);
1368 u32 (*get_dma_desc_list_size)(struct hl_device *hdev,
1369 struct sg_table *sgt);
1370 void (*add_end_of_cb_packets)(struct hl_device *hdev,
1371 void *kernel_address, u32 len,
1372 u64 cq_addr, u32 cq_val, u32 msix_num,
1374 void (*update_eq_ci)(struct hl_device *hdev, u32 val);
1375 int (*context_switch)(struct hl_device *hdev, u32 asid);
1376 void (*restore_phase_topology)(struct hl_device *hdev);
1377 int (*debugfs_read32)(struct hl_device *hdev, u64 addr,
1378 bool user_address, u32 *val);
1379 int (*debugfs_write32)(struct hl_device *hdev, u64 addr,
1380 bool user_address, u32 val);
1381 int (*debugfs_read64)(struct hl_device *hdev, u64 addr,
1382 bool user_address, u64 *val);
1383 int (*debugfs_write64)(struct hl_device *hdev, u64 addr,
1384 bool user_address, u64 val);
1385 int (*debugfs_read_dma)(struct hl_device *hdev, u64 addr, u32 size,
1387 void (*add_device_attr)(struct hl_device *hdev, struct attribute_group *dev_clk_attr_grp,
1388 struct attribute_group *dev_vrm_attr_grp);
1389 void (*handle_eqe)(struct hl_device *hdev,
1390 struct hl_eq_entry *eq_entry);
1391 void* (*get_events_stat)(struct hl_device *hdev, bool aggregate,
1393 u64 (*read_pte)(struct hl_device *hdev, u64 addr);
1394 void (*write_pte)(struct hl_device *hdev, u64 addr, u64 val);
1395 int (*mmu_invalidate_cache)(struct hl_device *hdev, bool is_hard,
1397 int (*mmu_invalidate_cache_range)(struct hl_device *hdev, bool is_hard,
1398 u32 flags, u32 asid, u64 va, u64 size);
1399 int (*send_heartbeat)(struct hl_device *hdev);
1400 int (*debug_coresight)(struct hl_device *hdev, struct hl_ctx *ctx, void *data);
1401 bool (*is_device_idle)(struct hl_device *hdev, u64 *mask_arr,
1402 u8 mask_len, struct seq_file *s);
1403 int (*non_hard_reset_late_init)(struct hl_device *hdev);
1404 void (*hw_queues_lock)(struct hl_device *hdev);
1405 void (*hw_queues_unlock)(struct hl_device *hdev);
1406 u32 (*get_pci_id)(struct hl_device *hdev);
1407 int (*get_eeprom_data)(struct hl_device *hdev, void *data,
1409 int (*send_cpu_message)(struct hl_device *hdev, u32 *msg,
1410 u16 len, u32 timeout, u64 *result);
1411 int (*pci_bars_map)(struct hl_device *hdev);
1412 int (*init_iatu)(struct hl_device *hdev);
1413 u32 (*rreg)(struct hl_device *hdev, u32 reg);
1414 void (*wreg)(struct hl_device *hdev, u32 reg, u32 val);
1415 void (*halt_coresight)(struct hl_device *hdev, struct hl_ctx *ctx);
1416 int (*ctx_init)(struct hl_ctx *ctx);
1417 void (*ctx_fini)(struct hl_ctx *ctx);
1418 u32 (*get_queue_id_for_cq)(struct hl_device *hdev, u32 cq_idx);
1419 int (*load_firmware_to_device)(struct hl_device *hdev);
1420 int (*load_boot_fit_to_device)(struct hl_device *hdev);
1421 u32 (*get_signal_cb_size)(struct hl_device *hdev);
1422 u32 (*get_wait_cb_size)(struct hl_device *hdev);
1423 u32 (*gen_signal_cb)(struct hl_device *hdev, void *data, u16 sob_id,
1425 u32 (*gen_wait_cb)(struct hl_device *hdev,
1426 struct hl_gen_wait_properties *prop);
1427 void (*reset_sob)(struct hl_device *hdev, void *data);
1428 void (*reset_sob_group)(struct hl_device *hdev, u16 sob_group);
1429 void (*set_dma_mask_from_fw)(struct hl_device *hdev);
1430 u64 (*get_device_time)(struct hl_device *hdev);
1431 int (*collective_wait_init_cs)(struct hl_cs *cs);
1432 int (*collective_wait_create_jobs)(struct hl_device *hdev,
1433 struct hl_ctx *ctx, struct hl_cs *cs,
1434 u32 wait_queue_id, u32 collective_engine_id,
1435 u32 encaps_signal_offset);
1436 u64 (*scramble_addr)(struct hl_device *hdev, u64 addr);
1437 u64 (*descramble_addr)(struct hl_device *hdev, u64 addr);
1438 void (*ack_protection_bits_errors)(struct hl_device *hdev);
1439 int (*get_hw_block_id)(struct hl_device *hdev, u64 block_addr,
1440 u32 *block_size, u32 *block_id);
1441 int (*hw_block_mmap)(struct hl_device *hdev, struct vm_area_struct *vma,
1442 u32 block_id, u32 block_size);
1443 void (*enable_events_from_fw)(struct hl_device *hdev);
1444 void (*get_msi_info)(__le32 *table);
1445 int (*map_pll_idx_to_fw_idx)(u32 pll_idx);
1446 void (*init_firmware_loader)(struct hl_device *hdev);
1447 void (*init_cpu_scrambler_dram)(struct hl_device *hdev);
1448 void (*state_dump_init)(struct hl_device *hdev);
1449 u32 (*get_sob_addr)(struct hl_device *hdev, u32 sob_id);
1450 void (*set_pci_memory_regions)(struct hl_device *hdev);
1451 u32* (*get_stream_master_qid_arr)(void);
1452 bool (*is_valid_dram_page_size)(u32 page_size);
1460 #define HL_KERNEL_ASID_ID 0
1463 * enum hl_va_range_type - virtual address range type.
1464 * @HL_VA_RANGE_TYPE_HOST: range type of host pages
1465 * @HL_VA_RANGE_TYPE_HOST_HUGE: range type of host huge pages
1466 * @HL_VA_RANGE_TYPE_DRAM: range type of dram pages
1468 enum hl_va_range_type {
1469 HL_VA_RANGE_TYPE_HOST,
1470 HL_VA_RANGE_TYPE_HOST_HUGE,
1471 HL_VA_RANGE_TYPE_DRAM,
1472 HL_VA_RANGE_TYPE_MAX
1476 * struct hl_va_range - virtual addresses range.
1477 * @lock: protects the virtual addresses list.
1478 * @list: list of virtual addresses blocks available for mappings.
1479 * @start_addr: range start address.
1480 * @end_addr: range end address.
1481 * @page_size: page size of this va range.
1483 struct hl_va_range {
1485 struct list_head list;
1492 * struct hl_cs_counters_atomic - command submission counters
1493 * @out_of_mem_drop_cnt: dropped due to memory allocation issue
1494 * @parsing_drop_cnt: dropped due to error in packet parsing
1495 * @queue_full_drop_cnt: dropped due to queue full
1496 * @device_in_reset_drop_cnt: dropped due to device in reset
1497 * @max_cs_in_flight_drop_cnt: dropped due to maximum CS in-flight
1498 * @validation_drop_cnt: dropped due to error in validation
1500 struct hl_cs_counters_atomic {
1501 atomic64_t out_of_mem_drop_cnt;
1502 atomic64_t parsing_drop_cnt;
1503 atomic64_t queue_full_drop_cnt;
1504 atomic64_t device_in_reset_drop_cnt;
1505 atomic64_t max_cs_in_flight_drop_cnt;
1506 atomic64_t validation_drop_cnt;
1510 * struct hl_dmabuf_priv - a dma-buf private object.
1511 * @dmabuf: pointer to dma-buf object.
1512 * @ctx: pointer to the dma-buf owner's context.
1513 * @phys_pg_pack: pointer to physical page pack if the dma-buf was exported for
1514 * memory allocation handle.
1515 * @device_address: physical address of the device's memory. Relevant only
1516 * if phys_pg_pack is NULL (dma-buf was exported from address).
1517 * The total size can be taken from the dmabuf object.
1519 struct hl_dmabuf_priv {
1520 struct dma_buf *dmabuf;
1522 struct hl_vm_phys_pg_pack *phys_pg_pack;
1523 uint64_t device_address;
1527 * struct hl_ctx - user/kernel context.
1528 * @mem_hash: holds mapping from virtual address to virtual memory area
1529 * descriptor (hl_vm_phys_pg_list or hl_userptr).
1530 * @mmu_shadow_hash: holds a mapping from shadow address to pgt_info structure.
1531 * @hpriv: pointer to the private (Kernel Driver) data of the process (fd).
1532 * @hdev: pointer to the device structure.
1533 * @refcount: reference counter for the context. Context is released only when
1534 * this hits 0l. It is incremented on CS and CS_WAIT.
1535 * @cs_pending: array of hl fence objects representing pending CS.
1536 * @va_range: holds available virtual addresses for host and dram mappings.
1537 * @mem_hash_lock: protects the mem_hash.
1538 * @mmu_lock: protects the MMU page tables. Any change to the PGT, modifying the
1539 * MMU hash or walking the PGT requires talking this lock.
1540 * @hw_block_list_lock: protects the HW block memory list.
1541 * @debugfs_list: node in debugfs list of contexts.
1542 * @hw_block_mem_list: list of HW block virtual mapped addresses.
1543 * @cs_counters: context command submission counters.
1544 * @cb_va_pool: device VA pool for command buffers which are mapped to the
1546 * @sig_mgr: encaps signals handle manager.
1547 * @cs_sequence: sequence number for CS. Value is assigned to a CS and passed
1548 * to user so user could inquire about CS. It is used as
1549 * index to cs_pending array.
1550 * @dram_default_hops: array that holds all hops addresses needed for default
1552 * @cs_lock: spinlock to protect cs_sequence.
1553 * @dram_phys_mem: amount of used physical DRAM memory by this context.
1554 * @thread_ctx_switch_token: token to prevent multiple threads of the same
1555 * context from running the context switch phase.
1556 * Only a single thread should run it.
1557 * @thread_ctx_switch_wait_token: token to prevent the threads that didn't run
1558 * the context switch phase from moving to their
1559 * execution phase before the context switch phase
1561 * @asid: context's unique address space ID in the device's MMU.
1562 * @handle: context's opaque handle for user
1565 DECLARE_HASHTABLE(mem_hash, MEM_HASH_TABLE_BITS);
1566 DECLARE_HASHTABLE(mmu_shadow_hash, MMU_HASH_TABLE_BITS);
1567 struct hl_fpriv *hpriv;
1568 struct hl_device *hdev;
1569 struct kref refcount;
1570 struct hl_fence **cs_pending;
1571 struct hl_va_range *va_range[HL_VA_RANGE_TYPE_MAX];
1572 struct mutex mem_hash_lock;
1573 struct mutex mmu_lock;
1574 struct mutex hw_block_list_lock;
1575 struct list_head debugfs_list;
1576 struct list_head hw_block_mem_list;
1577 struct hl_cs_counters_atomic cs_counters;
1578 struct gen_pool *cb_va_pool;
1579 struct hl_encaps_signals_mgr sig_mgr;
1581 u64 *dram_default_hops;
1583 atomic64_t dram_phys_mem;
1584 atomic_t thread_ctx_switch_token;
1585 u32 thread_ctx_switch_wait_token;
1591 * struct hl_ctx_mgr - for handling multiple contexts.
1592 * @ctx_lock: protects ctx_handles.
1593 * @ctx_handles: idr to hold all ctx handles.
1596 struct mutex ctx_lock;
1597 struct idr ctx_handles;
1603 * COMMAND SUBMISSIONS
1607 * struct hl_userptr - memory mapping chunk information
1608 * @vm_type: type of the VM.
1609 * @job_node: linked-list node for hanging the object on the Job's list.
1610 * @pages: pointer to struct page array
1611 * @npages: size of @pages array
1612 * @sgt: pointer to the scatter-gather table that holds the pages.
1613 * @dir: for DMA unmapping, the direction must be supplied, so save it.
1614 * @debugfs_list: node in debugfs list of command submissions.
1615 * @pid: the pid of the user process owning the memory
1616 * @addr: user-space virtual address of the start of the memory area.
1617 * @size: size of the memory area to pin & map.
1618 * @dma_mapped: true if the SG was mapped to DMA addresses, false otherwise.
1621 enum vm_type vm_type; /* must be first */
1622 struct list_head job_node;
1623 struct page **pages;
1624 unsigned int npages;
1625 struct sg_table *sgt;
1626 enum dma_data_direction dir;
1627 struct list_head debugfs_list;
1635 * struct hl_cs - command submission.
1636 * @jobs_in_queue_cnt: per each queue, maintain counter of submitted jobs.
1637 * @ctx: the context this CS belongs to.
1638 * @job_list: list of the CS's jobs in the various queues.
1639 * @job_lock: spinlock for the CS's jobs list. Needed for free_job.
1640 * @refcount: reference counter for usage of the CS.
1641 * @fence: pointer to the fence object of this CS.
1642 * @signal_fence: pointer to the fence object of the signal CS (used by wait
1644 * @finish_work: workqueue object to run when CS is completed by H/W.
1645 * @work_tdr: delayed work node for TDR.
1646 * @mirror_node : node in device mirror list of command submissions.
1647 * @staged_cs_node: node in the staged cs list.
1648 * @debugfs_list: node in debugfs list of command submissions.
1649 * @encaps_sig_hdl: holds the encaps signals handle.
1650 * @sequence: the sequence number of this CS.
1651 * @staged_sequence: the sequence of the staged submission this CS is part of,
1652 * relevant only if staged_cs is set.
1653 * @timeout_jiffies: cs timeout in jiffies.
1654 * @submission_time_jiffies: submission time of the cs
1656 * @encaps_sig_hdl_id: encaps signals handle id, set for the first staged cs.
1657 * @sob_addr_offset: sob offset from the configuration base address.
1658 * @initial_sob_count: count of completed signals in SOB before current submission of signal or
1659 * cs with encaps signals.
1660 * @submitted: true if CS was submitted to H/W.
1661 * @completed: true if CS was completed by device.
1662 * @timedout : true if CS was timedout.
1663 * @tdr_active: true if TDR was activated for this CS (to prevent
1664 * double TDR activation).
1665 * @aborted: true if CS was aborted due to some device error.
1666 * @timestamp: true if a timestmap must be captured upon completion.
1667 * @staged_last: true if this is the last staged CS and needs completion.
1668 * @staged_first: true if this is the first staged CS and we need to receive
1669 * timeout for this CS.
1670 * @staged_cs: true if this CS is part of a staged submission.
1671 * @skip_reset_on_timeout: true if we shall not reset the device in case
1672 * timeout occurs (debug scenario).
1673 * @encaps_signals: true if this CS has encaps reserved signals.
1676 u16 *jobs_in_queue_cnt;
1678 struct list_head job_list;
1679 spinlock_t job_lock;
1680 struct kref refcount;
1681 struct hl_fence *fence;
1682 struct hl_fence *signal_fence;
1683 struct work_struct finish_work;
1684 struct delayed_work work_tdr;
1685 struct list_head mirror_node;
1686 struct list_head staged_cs_node;
1687 struct list_head debugfs_list;
1688 struct hl_cs_encaps_sig_handle *encaps_sig_hdl;
1690 u64 staged_sequence;
1691 u64 timeout_jiffies;
1692 u64 submission_time_jiffies;
1693 enum hl_cs_type type;
1694 u32 encaps_sig_hdl_id;
1695 u32 sob_addr_offset;
1696 u16 initial_sob_count;
1706 u8 skip_reset_on_timeout;
1711 * struct hl_cs_job - command submission job.
1712 * @cs_node: the node to hang on the CS jobs list.
1713 * @cs: the CS this job belongs to.
1714 * @user_cb: the CB we got from the user.
1715 * @patched_cb: in case of patching, this is internal CB which is submitted on
1716 * the queue instead of the CB we got from the IOCTL.
1717 * @finish_work: workqueue object to run when job is completed.
1718 * @userptr_list: linked-list of userptr mappings that belong to this job and
1719 * wait for completion.
1720 * @debugfs_list: node in debugfs list of command submission jobs.
1721 * @refcount: reference counter for usage of the CS job.
1722 * @queue_type: the type of the H/W queue this job is submitted to.
1723 * @id: the id of this job inside a CS.
1724 * @hw_queue_id: the id of the H/W queue this job is submitted to.
1725 * @user_cb_size: the actual size of the CB we got from the user.
1726 * @job_cb_size: the actual size of the CB that we put on the queue.
1727 * @encaps_sig_wait_offset: encapsulated signals offset, which allow user
1728 * to wait on part of the reserved signals.
1729 * @is_kernel_allocated_cb: true if the CB handle we got from the user holds a
1730 * handle to a kernel-allocated CB object, false
1731 * otherwise (SRAM/DRAM/host address).
1732 * @contains_dma_pkt: whether the JOB contains at least one DMA packet. This
1733 * info is needed later, when adding the 2xMSG_PROT at the
1734 * end of the JOB, to know which barriers to put in the
1735 * MSG_PROT packets. Relevant only for GAUDI as GOYA doesn't
1736 * have streams so the engine can't be busy by another
1740 struct list_head cs_node;
1742 struct hl_cb *user_cb;
1743 struct hl_cb *patched_cb;
1744 struct work_struct finish_work;
1745 struct list_head userptr_list;
1746 struct list_head debugfs_list;
1747 struct kref refcount;
1748 enum hl_queue_type queue_type;
1753 u32 encaps_sig_wait_offset;
1754 u8 is_kernel_allocated_cb;
1755 u8 contains_dma_pkt;
1759 * struct hl_cs_parser - command submission parser properties.
1760 * @user_cb: the CB we got from the user.
1761 * @patched_cb: in case of patching, this is internal CB which is submitted on
1762 * the queue instead of the CB we got from the IOCTL.
1763 * @job_userptr_list: linked-list of userptr mappings that belong to the related
1764 * job and wait for completion.
1765 * @cs_sequence: the sequence number of the related CS.
1766 * @queue_type: the type of the H/W queue this job is submitted to.
1767 * @ctx_id: the ID of the context the related CS belongs to.
1768 * @hw_queue_id: the id of the H/W queue this job is submitted to.
1769 * @user_cb_size: the actual size of the CB we got from the user.
1770 * @patched_cb_size: the size of the CB after parsing.
1771 * @job_id: the id of the related job inside the related CS.
1772 * @is_kernel_allocated_cb: true if the CB handle we got from the user holds a
1773 * handle to a kernel-allocated CB object, false
1774 * otherwise (SRAM/DRAM/host address).
1775 * @contains_dma_pkt: whether the JOB contains at least one DMA packet. This
1776 * info is needed later, when adding the 2xMSG_PROT at the
1777 * end of the JOB, to know which barriers to put in the
1778 * MSG_PROT packets. Relevant only for GAUDI as GOYA doesn't
1779 * have streams so the engine can't be busy by another
1781 * @completion: true if we need completion for this CS.
1783 struct hl_cs_parser {
1784 struct hl_cb *user_cb;
1785 struct hl_cb *patched_cb;
1786 struct list_head *job_userptr_list;
1788 enum hl_queue_type queue_type;
1792 u32 patched_cb_size;
1794 u8 is_kernel_allocated_cb;
1795 u8 contains_dma_pkt;
1804 * struct hl_vm_hash_node - hash element from virtual address to virtual
1805 * memory area descriptor (hl_vm_phys_pg_list or
1807 * @node: node to hang on the hash table in context object.
1808 * @vaddr: key virtual address.
1809 * @ptr: value pointer (hl_vm_phys_pg_list or hl_userptr).
1811 struct hl_vm_hash_node {
1812 struct hlist_node node;
1818 * struct hl_vm_hw_block_list_node - list element from user virtual address to
1820 * @node: node to hang on the list in context object.
1821 * @ctx: the context this node belongs to.
1822 * @vaddr: virtual address of the HW block.
1823 * @size: size of the block.
1824 * @id: HW block id (handle).
1826 struct hl_vm_hw_block_list_node {
1827 struct list_head node;
1829 unsigned long vaddr;
1835 * struct hl_vm_phys_pg_pack - physical page pack.
1836 * @vm_type: describes the type of the virtual area descriptor.
1837 * @pages: the physical page array.
1838 * @npages: num physical pages in the pack.
1839 * @total_size: total size of all the pages in this list.
1840 * @node: used to attach to deletion list that is used when all the allocations are cleared
1841 * at the teardown of the context.
1842 * @mapping_cnt: number of shared mappings.
1843 * @exporting_cnt: number of dma-buf exporting.
1844 * @asid: the context related to this list.
1845 * @page_size: size of each page in the pack.
1846 * @flags: HL_MEM_* flags related to this list.
1847 * @handle: the provided handle related to this list.
1848 * @offset: offset from the first page.
1849 * @contiguous: is contiguous physical memory.
1850 * @created_from_userptr: is product of host virtual address.
1852 struct hl_vm_phys_pg_pack {
1853 enum vm_type vm_type; /* must be first */
1857 struct list_head node;
1858 atomic_t mapping_cnt;
1866 u8 created_from_userptr;
1870 * struct hl_vm_va_block - virtual range block information.
1871 * @node: node to hang on the virtual range list in context object.
1872 * @start: virtual range start address.
1873 * @end: virtual range end address.
1874 * @size: virtual range size.
1876 struct hl_vm_va_block {
1877 struct list_head node;
1884 * struct hl_vm - virtual memory manager for MMU.
1885 * @dram_pg_pool: pool for DRAM physical pages of 2MB.
1886 * @dram_pg_pool_refcount: reference counter for the pool usage.
1887 * @idr_lock: protects the phys_pg_list_handles.
1888 * @phys_pg_pack_handles: idr to hold all device allocations handles.
1889 * @init_done: whether initialization was done. We need this because VM
1890 * initialization might be skipped during device initialization.
1893 struct gen_pool *dram_pg_pool;
1894 struct kref dram_pg_pool_refcount;
1895 spinlock_t idr_lock;
1896 struct idr phys_pg_pack_handles;
1902 * DEBUG, PROFILING STRUCTURE
1906 * struct hl_debug_params - Coresight debug parameters.
1907 * @input: pointer to component specific input parameters.
1908 * @output: pointer to component specific output parameters.
1909 * @output_size: size of output buffer.
1910 * @reg_idx: relevant register ID.
1911 * @op: component operation to execute.
1912 * @enable: true if to enable component debugging, false otherwise.
1914 struct hl_debug_params {
1924 * FILE PRIVATE STRUCTURE
1928 * struct hl_fpriv - process information stored in FD private data.
1929 * @hdev: habanalabs device structure.
1930 * @filp: pointer to the given file structure.
1931 * @taskpid: current process ID.
1932 * @ctx: current executing context. TODO: remove for multiple ctx per process
1933 * @ctx_mgr: context manager to handle multiple context for this FD.
1934 * @cb_mgr: command buffer manager to handle multiple buffers for this FD.
1935 * @ts_mem_mgr: timestamp registration manager for alloc/free/map timestamp buffers.
1936 * @debugfs_list: list of relevant ASIC debugfs.
1937 * @dev_node: node in the device list of file private data
1938 * @refcount: number of related contexts.
1939 * @restore_phase_mutex: lock for context switch and restore phase.
1942 struct hl_device *hdev;
1944 struct pid *taskpid;
1946 struct hl_ctx_mgr ctx_mgr;
1947 struct hl_cb_mgr cb_mgr;
1948 struct hl_ts_mgr ts_mem_mgr;
1949 struct list_head debugfs_list;
1950 struct list_head dev_node;
1951 struct kref refcount;
1952 struct mutex restore_phase_mutex;
1961 * struct hl_info_list - debugfs file ops.
1963 * @show: function to output information.
1964 * @write: function to write to the file.
1966 struct hl_info_list {
1968 int (*show)(struct seq_file *s, void *data);
1969 ssize_t (*write)(struct file *file, const char __user *buf,
1970 size_t count, loff_t *f_pos);
1974 * struct hl_debugfs_entry - debugfs dentry wrapper.
1975 * @info_ent: dentry realted ops.
1976 * @dev_entry: ASIC specific debugfs manager.
1978 struct hl_debugfs_entry {
1979 const struct hl_info_list *info_ent;
1980 struct hl_dbg_device_entry *dev_entry;
1984 * struct hl_dbg_device_entry - ASIC specific debugfs manager.
1985 * @root: root dentry.
1986 * @hdev: habanalabs device structure.
1987 * @entry_arr: array of available hl_debugfs_entry.
1988 * @file_list: list of available debugfs files.
1989 * @file_mutex: protects file_list.
1990 * @cb_list: list of available CBs.
1991 * @cb_spinlock: protects cb_list.
1992 * @cs_list: list of available CSs.
1993 * @cs_spinlock: protects cs_list.
1994 * @cs_job_list: list of available CB jobs.
1995 * @cs_job_spinlock: protects cs_job_list.
1996 * @userptr_list: list of available userptrs (virtual memory chunk descriptor).
1997 * @userptr_spinlock: protects userptr_list.
1998 * @ctx_mem_hash_list: list of available contexts with MMU mappings.
1999 * @ctx_mem_hash_spinlock: protects cb_list.
2000 * @blob_desc: descriptor of blob
2001 * @state_dump: data of the system states in case of a bad cs.
2002 * @state_dump_sem: protects state_dump.
2003 * @addr: next address to read/write from/to in read/write32.
2004 * @mmu_addr: next virtual address to translate to physical address in mmu_show.
2005 * @userptr_lookup: the target user ptr to look up for on demand.
2006 * @mmu_asid: ASID to use while translating in mmu_show.
2007 * @state_dump_head: index of the latest state dump
2008 * @i2c_bus: generic u8 debugfs file for bus value to use in i2c_data_read.
2009 * @i2c_addr: generic u8 debugfs file for address value to use in i2c_data_read.
2010 * @i2c_reg: generic u8 debugfs file for register value to use in i2c_data_read.
2011 * @i2c_len: generic u8 debugfs file for length value to use in i2c_data_read.
2013 struct hl_dbg_device_entry {
2014 struct dentry *root;
2015 struct hl_device *hdev;
2016 struct hl_debugfs_entry *entry_arr;
2017 struct list_head file_list;
2018 struct mutex file_mutex;
2019 struct list_head cb_list;
2020 spinlock_t cb_spinlock;
2021 struct list_head cs_list;
2022 spinlock_t cs_spinlock;
2023 struct list_head cs_job_list;
2024 spinlock_t cs_job_spinlock;
2025 struct list_head userptr_list;
2026 spinlock_t userptr_spinlock;
2027 struct list_head ctx_mem_hash_list;
2028 spinlock_t ctx_mem_hash_spinlock;
2029 struct debugfs_blob_wrapper blob_desc;
2030 char *state_dump[HL_STATE_DUMP_HIST_LEN];
2031 struct rw_semaphore state_dump_sem;
2036 u32 state_dump_head;
2044 * struct hl_hw_obj_name_entry - single hw object name, member of
2045 * hl_state_dump_specs
2046 * @node: link to the containing hash table
2047 * @name: hw object name
2048 * @id: object identifier
2050 struct hl_hw_obj_name_entry {
2051 struct hlist_node node;
2056 enum hl_state_dump_specs_props {
2057 SP_SYNC_OBJ_BASE_ADDR,
2058 SP_NEXT_SYNC_OBJ_ADDR,
2060 SP_MON_OBJ_WR_ADDR_LOW,
2061 SP_MON_OBJ_WR_ADDR_HIGH,
2063 SP_MON_OBJ_ARM_DATA,
2074 SP_DMA_QUEUES_OFFSET,
2075 SP_NUM_OF_MME_ENGINES,
2077 SP_NUM_OF_DMA_ENGINES,
2078 SP_NUM_OF_TPC_ENGINES,
2079 SP_ENGINE_NUM_OF_QUEUES,
2080 SP_ENGINE_NUM_OF_STREAMS,
2081 SP_ENGINE_NUM_OF_FENCES,
2082 SP_FENCE0_CNT_OFFSET,
2083 SP_FENCE0_RDATA_OFFSET,
2090 enum hl_sync_engine_type {
2097 * struct hl_mon_state_dump - represents a state dump of a single monitor
2099 * @wr_addr_low: address monitor will write to, low bits
2100 * @wr_addr_high: address monitor will write to, high bits
2101 * @wr_data: data monitor will write
2102 * @arm_data: register value containing monitor configuration
2103 * @status: monitor status
2105 struct hl_mon_state_dump {
2115 * struct hl_sync_to_engine_map_entry - sync object id to engine mapping entry
2116 * @engine_type: type of the engine
2117 * @engine_id: id of the engine
2118 * @sync_id: id of the sync object
2120 struct hl_sync_to_engine_map_entry {
2121 struct hlist_node node;
2122 enum hl_sync_engine_type engine_type;
2128 * struct hl_sync_to_engine_map - maps sync object id to associated engine id
2129 * @tb: hash table containing the mapping, each element is of type
2130 * struct hl_sync_to_engine_map_entry
2132 struct hl_sync_to_engine_map {
2133 DECLARE_HASHTABLE(tb, SYNC_TO_ENGINE_HASH_TABLE_BITS);
2137 * struct hl_state_dump_specs_funcs - virtual functions used by the state dump
2138 * @gen_sync_to_engine_map: generate a hash map from sync obj id to its engine
2139 * @print_single_monitor: format monitor data as string
2140 * @monitor_valid: return true if given monitor dump is valid
2141 * @print_fences_single_engine: format fences data as string
2143 struct hl_state_dump_specs_funcs {
2144 int (*gen_sync_to_engine_map)(struct hl_device *hdev,
2145 struct hl_sync_to_engine_map *map);
2146 int (*print_single_monitor)(char **buf, size_t *size, size_t *offset,
2147 struct hl_device *hdev,
2148 struct hl_mon_state_dump *mon);
2149 int (*monitor_valid)(struct hl_mon_state_dump *mon);
2150 int (*print_fences_single_engine)(struct hl_device *hdev,
2152 u64 status_base_offset,
2153 enum hl_sync_engine_type engine_type,
2154 u32 engine_id, char **buf,
2155 size_t *size, size_t *offset);
2159 * struct hl_state_dump_specs - defines ASIC known hw objects names
2160 * @so_id_to_str_tb: sync objects names index table
2161 * @monitor_id_to_str_tb: monitors names index table
2162 * @funcs: virtual functions used for state dump
2163 * @sync_namager_names: readable names for sync manager if available (ex: N_E)
2164 * @props: pointer to a per asic const props array required for state dump
2166 struct hl_state_dump_specs {
2167 DECLARE_HASHTABLE(so_id_to_str_tb, OBJ_NAMES_HASH_TABLE_BITS);
2168 DECLARE_HASHTABLE(monitor_id_to_str_tb, OBJ_NAMES_HASH_TABLE_BITS);
2169 struct hl_state_dump_specs_funcs funcs;
2170 const char * const *sync_namager_names;
2179 #define HL_STR_MAX 32
2181 #define HL_DEV_STS_MAX (HL_DEVICE_STATUS_LAST + 1)
2183 /* Theoretical limit only. A single host can only contain up to 4 or 8 PCIe
2184 * x16 cards. In extreme cases, there are hosts that can accommodate 16 cards.
2186 #define HL_MAX_MINORS 256
2189 * Registers read & write functions.
2192 u32 hl_rreg(struct hl_device *hdev, u32 reg);
2193 void hl_wreg(struct hl_device *hdev, u32 reg, u32 val);
2195 #define RREG32(reg) hdev->asic_funcs->rreg(hdev, (reg))
2196 #define WREG32(reg, v) hdev->asic_funcs->wreg(hdev, (reg), (v))
2197 #define DREG32(reg) pr_info("REGISTER: " #reg " : 0x%08X\n", \
2198 hdev->asic_funcs->rreg(hdev, (reg)))
2200 #define WREG32_P(reg, val, mask) \
2202 u32 tmp_ = RREG32(reg); \
2204 tmp_ |= ((val) & ~(mask)); \
2205 WREG32(reg, tmp_); \
2207 #define WREG32_AND(reg, and) WREG32_P(reg, 0, and)
2208 #define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or))
2210 #define RMWREG32(reg, val, mask) \
2212 u32 tmp_ = RREG32(reg); \
2214 tmp_ |= ((val) << __ffs(mask)); \
2215 WREG32(reg, tmp_); \
2218 #define RREG32_MASK(reg, mask) ((RREG32(reg) & mask) >> __ffs(mask))
2220 #define REG_FIELD_SHIFT(reg, field) reg##_##field##_SHIFT
2221 #define REG_FIELD_MASK(reg, field) reg##_##field##_MASK
2222 #define WREG32_FIELD(reg, offset, field, val) \
2223 WREG32(mm##reg + offset, (RREG32(mm##reg + offset) & \
2224 ~REG_FIELD_MASK(reg, field)) | \
2225 (val) << REG_FIELD_SHIFT(reg, field))
2227 /* Timeout should be longer when working with simulator but cap the
2228 * increased timeout to some maximum
2230 #define hl_poll_timeout(hdev, addr, val, cond, sleep_us, timeout_us) \
2232 ktime_t __timeout; \
2234 __timeout = ktime_add_us(ktime_get(), timeout_us); \
2236 __timeout = ktime_add_us(ktime_get(),\
2237 min((u64)(timeout_us * 10), \
2238 (u64) HL_SIM_MAX_TIMEOUT_US)); \
2239 might_sleep_if(sleep_us); \
2241 (val) = RREG32(addr); \
2244 if (timeout_us && ktime_compare(ktime_get(), __timeout) > 0) { \
2245 (val) = RREG32(addr); \
2249 usleep_range((sleep_us >> 2) + 1, sleep_us); \
2251 (cond) ? 0 : -ETIMEDOUT; \
2255 * address in this macro points always to a memory location in the
2256 * host's (server's) memory. That location is updated asynchronously
2257 * either by the direct access of the device or by another core.
2259 * To work both in LE and BE architectures, we need to distinguish between the
2260 * two states (device or another core updates the memory location). Therefore,
2261 * if mem_written_by_device is true, the host memory being polled will be
2262 * updated directly by the device. If false, the host memory being polled will
2263 * be updated by host CPU. Required so host knows whether or not the memory
2264 * might need to be byte-swapped before returning value to caller.
2266 #define hl_poll_timeout_memory(hdev, addr, val, cond, sleep_us, timeout_us, \
2267 mem_written_by_device) \
2269 ktime_t __timeout; \
2271 __timeout = ktime_add_us(ktime_get(), timeout_us); \
2273 __timeout = ktime_add_us(ktime_get(),\
2274 min((u64)(timeout_us * 10), \
2275 (u64) HL_SIM_MAX_TIMEOUT_US)); \
2276 might_sleep_if(sleep_us); \
2278 /* Verify we read updates done by other cores or by device */ \
2280 (val) = *((u32 *)(addr)); \
2281 if (mem_written_by_device) \
2282 (val) = le32_to_cpu(*(__le32 *) &(val)); \
2285 if (timeout_us && ktime_compare(ktime_get(), __timeout) > 0) { \
2286 (val) = *((u32 *)(addr)); \
2287 if (mem_written_by_device) \
2288 (val) = le32_to_cpu(*(__le32 *) &(val)); \
2292 usleep_range((sleep_us >> 2) + 1, sleep_us); \
2294 (cond) ? 0 : -ETIMEDOUT; \
2297 #define hl_poll_timeout_device_memory(hdev, addr, val, cond, sleep_us, \
2300 ktime_t __timeout; \
2302 __timeout = ktime_add_us(ktime_get(), timeout_us); \
2304 __timeout = ktime_add_us(ktime_get(),\
2305 min((u64)(timeout_us * 10), \
2306 (u64) HL_SIM_MAX_TIMEOUT_US)); \
2307 might_sleep_if(sleep_us); \
2309 (val) = readl(addr); \
2312 if (timeout_us && ktime_compare(ktime_get(), __timeout) > 0) { \
2313 (val) = readl(addr); \
2317 usleep_range((sleep_us >> 2) + 1, sleep_us); \
2319 (cond) ? 0 : -ETIMEDOUT; \
2322 struct hwmon_chip_info;
2325 * struct hl_device_reset_work - reset workqueue task wrapper.
2326 * @wq: work queue for device reset procedure.
2327 * @reset_work: reset work to be done.
2328 * @hdev: habanalabs device structure.
2329 * @flags: reset flags.
2331 struct hl_device_reset_work {
2332 struct workqueue_struct *wq;
2333 struct delayed_work reset_work;
2334 struct hl_device *hdev;
2339 * struct hr_mmu_hop_addrs - used for holding per-device host-resident mmu hop
2341 * @virt_addr: the virtual address of the hop.
2342 * @phys-addr: the physical address of the hop (used by the device-mmu).
2343 * @shadow_addr: The shadow of the hop used by the driver for walking the hops.
2345 struct hr_mmu_hop_addrs {
2352 * struct hl_mmu_hr_pgt_priv - used for holding per-device mmu host-resident
2353 * page-table internal information.
2354 * @mmu_pgt_pool: pool of page tables used by MMU for allocating hops.
2355 * @mmu_shadow_hop0: shadow array of hop0 tables.
2357 struct hl_mmu_hr_priv {
2358 struct gen_pool *mmu_pgt_pool;
2359 struct hr_mmu_hop_addrs *mmu_shadow_hop0;
2363 * struct hl_mmu_dr_pgt_priv - used for holding per-device mmu device-resident
2364 * page-table internal information.
2365 * @mmu_pgt_pool: pool of page tables used by MMU for allocating hops.
2366 * @mmu_shadow_hop0: shadow array of hop0 tables.
2368 struct hl_mmu_dr_priv {
2369 struct gen_pool *mmu_pgt_pool;
2370 void *mmu_shadow_hop0;
2374 * struct hl_mmu_priv - used for holding per-device mmu internal information.
2375 * @dr: information on the device-resident MMU, when exists.
2376 * @hr: information on the host-resident MMU, when exists.
2378 struct hl_mmu_priv {
2379 struct hl_mmu_dr_priv dr;
2380 struct hl_mmu_hr_priv hr;
2384 * struct hl_mmu_per_hop_info - A structure describing one TLB HOP and its entry
2385 * that was created in order to translate a virtual address to a
2387 * @hop_addr: The address of the hop.
2388 * @hop_pte_addr: The address of the hop entry.
2389 * @hop_pte_val: The value in the hop entry.
2391 struct hl_mmu_per_hop_info {
2398 * struct hl_mmu_hop_info - A structure describing the TLB hops and their
2399 * hop-entries that were created in order to translate a virtual address to a
2401 * @scrambled_vaddr: The value of the virtual address after scrambling. This
2402 * address replaces the original virtual-address when mapped
2403 * in the MMU tables.
2404 * @unscrambled_paddr: The un-scrambled physical address.
2405 * @hop_info: Array holding the per-hop information used for the translation.
2406 * @used_hops: The number of hops used for the translation.
2407 * @range_type: virtual address range type.
2409 struct hl_mmu_hop_info {
2410 u64 scrambled_vaddr;
2411 u64 unscrambled_paddr;
2412 struct hl_mmu_per_hop_info hop_info[MMU_ARCH_5_HOPS];
2414 enum hl_va_range_type range_type;
2418 * struct hl_mmu_funcs - Device related MMU functions.
2419 * @init: initialize the MMU module.
2420 * @fini: release the MMU module.
2421 * @ctx_init: Initialize a context for using the MMU module.
2422 * @ctx_fini: disable a ctx from using the mmu module.
2423 * @map: maps a virtual address to physical address for a context.
2424 * @unmap: unmap a virtual address of a context.
2425 * @flush: flush all writes from all cores to reach device MMU.
2426 * @swap_out: marks all mapping of the given context as swapped out.
2427 * @swap_in: marks all mapping of the given context as swapped in.
2428 * @get_tlb_info: returns the list of hops and hop-entries used that were
2429 * created in order to translate the giver virtual address to a
2432 struct hl_mmu_funcs {
2433 int (*init)(struct hl_device *hdev);
2434 void (*fini)(struct hl_device *hdev);
2435 int (*ctx_init)(struct hl_ctx *ctx);
2436 void (*ctx_fini)(struct hl_ctx *ctx);
2437 int (*map)(struct hl_ctx *ctx,
2438 u64 virt_addr, u64 phys_addr, u32 page_size,
2440 int (*unmap)(struct hl_ctx *ctx,
2441 u64 virt_addr, bool is_dram_addr);
2442 void (*flush)(struct hl_ctx *ctx);
2443 void (*swap_out)(struct hl_ctx *ctx);
2444 void (*swap_in)(struct hl_ctx *ctx);
2445 int (*get_tlb_info)(struct hl_ctx *ctx,
2446 u64 virt_addr, struct hl_mmu_hop_info *hops);
2450 * number of user contexts allowed to call wait_for_multi_cs ioctl in
2453 #define MULTI_CS_MAX_USER_CTX 2
2456 * struct multi_cs_completion - multi CS wait completion.
2457 * @completion: completion of any of the CS in the list
2458 * @lock: spinlock for the completion structure
2459 * @timestamp: timestamp for the multi-CS completion
2460 * @stream_master_qid_map: bitmap of all stream masters on which the multi-CS
2462 * @used: 1 if in use, otherwise 0
2464 struct multi_cs_completion {
2465 struct completion completion;
2468 u32 stream_master_qid_map;
2473 * struct multi_cs_data - internal data for multi CS call
2474 * @ctx: pointer to the context structure
2475 * @fence_arr: array of fences of all CSs
2476 * @seq_arr: array of CS sequence numbers
2477 * @timeout_jiffies: timeout in jiffies for waiting for CS to complete
2478 * @timestamp: timestamp of first completed CS
2479 * @wait_status: wait for CS status
2480 * @completion_bitmap: bitmap of completed CSs (1- completed, otherwise 0)
2481 * @arr_len: fence_arr and seq_arr array length
2482 * @gone_cs: indication of gone CS (1- there was gone CS, otherwise 0)
2483 * @update_ts: update timestamp. 1- update the timestamp, otherwise 0.
2485 struct multi_cs_data {
2487 struct hl_fence **fence_arr;
2489 s64 timeout_jiffies;
2492 u32 completion_bitmap;
2499 * struct hl_clk_throttle_timestamp - current/last clock throttling timestamp
2500 * @start: timestamp taken when 'start' event is received in driver
2501 * @end: timestamp taken when 'end' event is received in driver
2503 struct hl_clk_throttle_timestamp {
2509 * struct hl_clk_throttle - keeps current/last clock throttling timestamps
2510 * @timestamp: timestamp taken by driver and firmware, index 0 refers to POWER
2511 * index 1 refers to THERMAL
2512 * @lock: protects this structure as it can be accessed from both event queue
2513 * context and info_ioctl context
2514 * @current_reason: bitmask represents the current clk throttling reasons
2515 * @aggregated_reason: bitmask represents aggregated clk throttling reasons since driver load
2517 struct hl_clk_throttle {
2518 struct hl_clk_throttle_timestamp timestamp[HL_CLK_THROTTLE_TYPE_MAX];
2521 u32 aggregated_reason;
2525 * struct last_error_session_info - info about last session in which CS timeout or
2526 * razwi error occurred.
2527 * @open_dev_timestamp: device open timestamp.
2528 * @cs_timeout_timestamp: CS timeout timestamp.
2529 * @razwi_timestamp: razwi timestamp.
2530 * @cs_write_disable: if set writing to CS parameters in the structure is disabled so the
2531 * first (root cause) CS timeout will not be overwritten.
2532 * @razwi_write_disable: if set writing to razwi parameters in the structure is disabled so the
2533 * first (root cause) razwi will not be overwritten.
2534 * @cs_timeout_seq: CS timeout sequence number.
2535 * @razwi_addr: address that caused razwi.
2536 * @razwi_engine_id_1: engine id of the razwi initiator, if it was initiated by engine that does
2537 * not have engine id it will be set to U16_MAX.
2538 * @razwi_engine_id_2: second engine id of razwi initiator. Might happen that razwi have 2 possible
2539 * engines which one them caused the razwi. In that case, it will contain the
2540 * second possible engine id, otherwise it will be set to U16_MAX.
2541 * @razwi_non_engine_initiator: in case the initiator of the razwi does not have engine id.
2542 * @razwi_type: cause of razwi, page fault or access error, otherwise it will be set to U8_MAX.
2544 struct last_error_session_info {
2545 ktime_t open_dev_timestamp;
2546 ktime_t cs_timeout_timestamp;
2547 ktime_t razwi_timestamp;
2548 atomic_t cs_write_disable;
2549 atomic_t razwi_write_disable;
2552 u16 razwi_engine_id_1;
2553 u16 razwi_engine_id_2;
2554 u8 razwi_non_engine_initiator;
2559 * struct hl_reset_info - holds current device reset information.
2560 * @lock: lock to protect critical reset flows.
2561 * @soft_reset_cnt: number of soft reset since the driver was loaded.
2562 * @hard_reset_cnt: number of hard reset since the driver was loaded.
2563 * @hard_reset_schedule_flags: hard reset is scheduled to after current soft reset,
2564 * here we hold the hard reset flags.
2565 * @in_reset: is device in reset flow.
2566 * @is_in_soft_reset: Device is currently in soft reset process.
2567 * @needs_reset: true if reset_on_lockup is false and device should be reset
2569 * @hard_reset_pending: is there a hard reset work pending.
2570 * @curr_reset_cause: saves an enumerated reset cause when a hard reset is
2571 * triggered, and cleared after it is shared with preboot.
2572 * @prev_reset_trigger: saves the previous trigger which caused a reset, overidden
2573 * with a new value on next reset
2574 * @reset_trigger_repeated: set if device reset is triggered more than once with
2576 * @skip_reset_on_timeout: Skip device reset if CS has timed out, wait for it to
2579 struct hl_reset_info {
2583 u32 hard_reset_schedule_flags;
2585 u8 is_in_soft_reset;
2587 u8 hard_reset_pending;
2589 u8 curr_reset_cause;
2590 u8 prev_reset_trigger;
2591 u8 reset_trigger_repeated;
2593 u8 skip_reset_on_timeout;
2597 * struct hl_device - habanalabs device structure.
2598 * @pdev: pointer to PCI device, can be NULL in case of simulator device.
2599 * @pcie_bar_phys: array of available PCIe bars physical addresses.
2600 * (required only for PCI address match mode)
2601 * @pcie_bar: array of available PCIe bars virtual addresses.
2602 * @rmmio: configuration area address on SRAM.
2603 * @cdev: related char device.
2604 * @cdev_ctrl: char device for control operations only (INFO IOCTL)
2605 * @dev: related kernel basic device structure.
2606 * @dev_ctrl: related kernel device structure for the control device
2607 * @work_heartbeat: delayed work for CPU-CP is-alive check.
2608 * @device_reset_work: delayed work which performs hard reset
2609 * @asic_name: ASIC specific name.
2610 * @asic_type: ASIC specific type.
2611 * @completion_queue: array of hl_cq.
2612 * @user_interrupt: array of hl_user_interrupt. upon the corresponding user
2613 * interrupt, driver will monitor the list of fences
2614 * registered to this interrupt.
2615 * @common_user_interrupt: common user interrupt for all user interrupts.
2616 * upon any user interrupt, driver will monitor the
2617 * list of fences registered to this common structure.
2618 * @cq_wq: work queues of completion queues for executing work in process
2620 * @eq_wq: work queue of event queue for executing work in process context.
2621 * @ts_free_obj_wq: work queue for timestamp registration objects release.
2622 * @kernel_ctx: Kernel driver context structure.
2623 * @kernel_queues: array of hl_hw_queue.
2624 * @cs_mirror_list: CS mirror list for TDR.
2625 * @cs_mirror_lock: protects cs_mirror_list.
2626 * @kernel_cb_mgr: command buffer manager for creating/destroying/handling CBs.
2627 * @event_queue: event queue for IRQ from CPU-CP.
2628 * @dma_pool: DMA pool for small allocations.
2629 * @cpu_accessible_dma_mem: Host <-> CPU-CP shared memory CPU address.
2630 * @cpu_accessible_dma_address: Host <-> CPU-CP shared memory DMA address.
2631 * @cpu_accessible_dma_pool: Host <-> CPU-CP shared memory pool.
2632 * @asid_bitmap: holds used/available ASIDs.
2633 * @asid_mutex: protects asid_bitmap.
2634 * @send_cpu_message_lock: enforces only one message in Host <-> CPU-CP queue.
2635 * @debug_lock: protects critical section of setting debug mode for device
2636 * @asic_prop: ASIC specific immutable properties.
2637 * @asic_funcs: ASIC specific functions.
2638 * @asic_specific: ASIC specific information to use only from ASIC files.
2639 * @vm: virtual memory manager for MMU.
2640 * @hwmon_dev: H/W monitor device.
2641 * @hl_chip_info: ASIC's sensors information.
2642 * @device_status_description: device status description.
2643 * @hl_debugfs: device's debugfs manager.
2644 * @cb_pool: list of preallocated CBs.
2645 * @cb_pool_lock: protects the CB pool.
2646 * @internal_cb_pool_virt_addr: internal command buffer pool virtual address.
2647 * @internal_cb_pool_dma_addr: internal command buffer pool dma address.
2648 * @internal_cb_pool: internal command buffer memory pool.
2649 * @internal_cb_va_base: internal cb pool mmu virtual address base
2650 * @fpriv_list: list of file private data structures. Each structure is created
2651 * when a user opens the device
2652 * @fpriv_ctrl_list: list of file private data structures. Each structure is created
2653 * when a user opens the control device
2654 * @fpriv_list_lock: protects the fpriv_list
2655 * @fpriv_ctrl_list_lock: protects the fpriv_ctrl_list
2656 * @aggregated_cs_counters: aggregated cs counters among all contexts
2657 * @mmu_priv: device-specific MMU data.
2658 * @mmu_func: device-related MMU functions.
2659 * @fw_loader: FW loader manager.
2660 * @pci_mem_region: array of memory regions in the PCI
2661 * @state_dump_specs: constants and dictionaries needed to dump system state.
2662 * @multi_cs_completion: array of multi-CS completion.
2663 * @clk_throttling: holds information about current/previous clock throttling events
2664 * @reset_info: holds current device reset information.
2665 * @last_error: holds information about last session in which CS timeout or razwi error occurred.
2666 * @stream_master_qid_arr: pointer to array with QIDs of master streams.
2667 * @dram_used_mem: current DRAM memory consumption.
2668 * @timeout_jiffies: device CS timeout value.
2669 * @max_power: the max power of the device, as configured by the sysadmin. This
2670 * value is saved so in case of hard-reset, the driver will restore
2671 * this value and update the F/W after the re-initialization
2672 * @boot_error_status_mask: contains a mask of the device boot error status.
2673 * Each bit represents a different error, according to
2674 * the defines in hl_boot_if.h. If the bit is cleared,
2675 * the error will be ignored by the driver during
2676 * device initialization. Mainly used to debug and
2677 * workaround firmware bugs
2678 * @dram_pci_bar_start: start bus address of PCIe bar towards DRAM.
2679 * @last_successful_open_ktime: timestamp (ktime) of the last successful device open.
2680 * @last_successful_open_jif: timestamp (jiffies) of the last successful
2682 * @last_open_session_duration_jif: duration (jiffies) of the last device open
2684 * @open_counter: number of successful device open operations.
2685 * @fw_poll_interval_usec: FW status poll interval in usec.
2686 * @card_type: Various ASICs have several card types. This indicates the card
2687 * type of the current device.
2688 * @major: habanalabs kernel driver major.
2689 * @high_pll: high PLL profile frequency.
2690 * @id: device minor.
2691 * @id_control: minor of the control device
2692 * @cpu_pci_msb_addr: 50-bit extension bits for the device CPU's 40-bit
2694 * @disabled: is device disabled.
2695 * @late_init_done: is late init stage was done during initialization.
2696 * @hwmon_initialized: is H/W monitor sensors was initialized.
2697 * @heartbeat: is heartbeat sanity check towards CPU-CP enabled.
2698 * @reset_on_lockup: true if a reset should be done in case of stuck CS, false
2700 * @dram_default_page_mapping: is DRAM default page mapping enabled.
2701 * @memory_scrub: true to perform device memory scrub in various locations,
2702 * such as context-switch, context close, page free, etc.
2703 * @pmmu_huge_range: is a different virtual addresses range used for PMMU with
2705 * @init_done: is the initialization of the device done.
2706 * @device_cpu_disabled: is the device CPU disabled (due to timeouts)
2707 * @dma_mask: the dma mask that was set for this device
2708 * @in_debug: whether the device is in a state where the profiling/tracing infrastructure
2709 * can be used. This indication is needed because in some ASICs we need to do
2710 * specific operations to enable that infrastructure.
2711 * @cdev_sysfs_created: were char devices and sysfs nodes created.
2712 * @stop_on_err: true if engines should stop on error.
2713 * @supports_sync_stream: is sync stream supported.
2714 * @sync_stream_queue_idx: helper index for sync stream queues initialization.
2715 * @collective_mon_idx: helper index for collective initialization
2716 * @supports_coresight: is CoreSight supported.
2717 * @supports_cb_mapping: is mapping a CB to the device's MMU supported.
2718 * @process_kill_trial_cnt: number of trials reset thread tried killing
2720 * @device_fini_pending: true if device_fini was called and might be
2721 * waiting for the reset thread to finish
2722 * @supports_staged_submission: true if staged submissions are supported
2723 * @device_cpu_is_halted: Flag to indicate whether the device CPU was already
2724 * halted. We can't halt it again because the COMMS
2725 * protocol will throw an error. Relevant only for
2726 * cases where Linux was not loaded to device CPU
2727 * @supports_wait_for_multi_cs: true if wait for multi CS is supported
2728 * @is_compute_ctx_active: Whether there is an active compute context executing.
2731 struct pci_dev *pdev;
2732 u64 pcie_bar_phys[HL_PCI_NUM_BARS];
2733 void __iomem *pcie_bar[HL_PCI_NUM_BARS];
2734 void __iomem *rmmio;
2736 struct cdev cdev_ctrl;
2738 struct device *dev_ctrl;
2739 struct delayed_work work_heartbeat;
2740 struct hl_device_reset_work device_reset_work;
2741 char asic_name[HL_STR_MAX];
2742 char status[HL_DEV_STS_MAX][HL_STR_MAX];
2743 enum hl_asic_type asic_type;
2744 struct hl_cq *completion_queue;
2745 struct hl_user_interrupt *user_interrupt;
2746 struct hl_user_interrupt common_user_interrupt;
2747 struct workqueue_struct **cq_wq;
2748 struct workqueue_struct *eq_wq;
2749 struct workqueue_struct *ts_free_obj_wq;
2750 struct hl_ctx *kernel_ctx;
2751 struct hl_hw_queue *kernel_queues;
2752 struct list_head cs_mirror_list;
2753 spinlock_t cs_mirror_lock;
2754 struct hl_cb_mgr kernel_cb_mgr;
2755 struct hl_eq event_queue;
2756 struct dma_pool *dma_pool;
2757 void *cpu_accessible_dma_mem;
2758 dma_addr_t cpu_accessible_dma_address;
2759 struct gen_pool *cpu_accessible_dma_pool;
2760 unsigned long *asid_bitmap;
2761 struct mutex asid_mutex;
2762 struct mutex send_cpu_message_lock;
2763 struct mutex debug_lock;
2764 struct asic_fixed_properties asic_prop;
2765 const struct hl_asic_funcs *asic_funcs;
2766 void *asic_specific;
2768 struct device *hwmon_dev;
2769 struct hwmon_chip_info *hl_chip_info;
2771 struct hl_dbg_device_entry hl_debugfs;
2773 struct list_head cb_pool;
2774 spinlock_t cb_pool_lock;
2776 void *internal_cb_pool_virt_addr;
2777 dma_addr_t internal_cb_pool_dma_addr;
2778 struct gen_pool *internal_cb_pool;
2779 u64 internal_cb_va_base;
2781 struct list_head fpriv_list;
2782 struct list_head fpriv_ctrl_list;
2783 struct mutex fpriv_list_lock;
2784 struct mutex fpriv_ctrl_list_lock;
2786 struct hl_cs_counters_atomic aggregated_cs_counters;
2788 struct hl_mmu_priv mmu_priv;
2789 struct hl_mmu_funcs mmu_func[MMU_NUM_PGT_LOCATIONS];
2791 struct fw_load_mgr fw_loader;
2793 struct pci_mem_region pci_mem_region[PCI_REGION_NUMBER];
2795 struct hl_state_dump_specs state_dump_specs;
2797 struct multi_cs_completion multi_cs_completion[
2798 MULTI_CS_MAX_USER_CTX];
2799 struct hl_clk_throttle clk_throttling;
2800 struct last_error_session_info last_error;
2802 struct hl_reset_info reset_info;
2804 u32 *stream_master_qid_arr;
2805 atomic64_t dram_used_mem;
2806 u64 timeout_jiffies;
2808 u64 boot_error_status_mask;
2809 u64 dram_pci_bar_start;
2810 u64 last_successful_open_jif;
2811 u64 last_open_session_duration_jif;
2813 u64 fw_poll_interval_usec;
2814 ktime_t last_successful_open_ktime;
2815 enum cpucp_card_types card_type;
2820 u16 cpu_pci_msb_addr;
2823 u8 hwmon_initialized;
2826 u8 dram_default_page_mapping;
2830 u8 device_cpu_disabled;
2833 u8 cdev_sysfs_created;
2835 u8 supports_sync_stream;
2836 u8 sync_stream_queue_idx;
2837 u8 collective_mon_idx;
2838 u8 supports_coresight;
2839 u8 supports_cb_mapping;
2840 u8 process_kill_trial_cnt;
2841 u8 device_fini_pending;
2842 u8 supports_staged_submission;
2843 u8 device_cpu_is_halted;
2844 u8 supports_wait_for_multi_cs;
2845 u8 stream_master_qid_arr_size;
2846 u8 is_compute_ctx_active;
2848 /* Parameters for bring-up */
2852 u8 mmu_huge_page_opt;
2854 u8 cpu_queues_enable;
2857 u8 sram_scrambler_enable;
2858 u8 dram_scrambler_enable;
2859 u8 hard_reset_on_fw_events;
2862 u8 reset_on_preboot_fail;
2863 u8 reset_upon_device_release;
2864 u8 reset_if_device_not_idle;
2869 * struct hl_cs_encaps_sig_handle - encapsulated signals handle structure
2870 * @refcount: refcount used to protect removing this id when several
2871 * wait cs are used to wait of the reserved encaps signals.
2872 * @hdev: pointer to habanalabs device structure.
2873 * @hw_sob: pointer to H/W SOB used in the reservation.
2874 * @ctx: pointer to the user's context data structure
2875 * @cs_seq: staged cs sequence which contains encapsulated signals
2876 * @id: idr handler id to be used to fetch the handler info
2877 * @q_idx: stream queue index
2878 * @pre_sob_val: current SOB value before reservation
2879 * @count: signals number
2881 struct hl_cs_encaps_sig_handle {
2882 struct kref refcount;
2883 struct hl_device *hdev;
2884 struct hl_hw_sob *hw_sob;
2898 * typedef hl_ioctl_t - typedef for ioctl function in the driver
2899 * @hpriv: pointer to the FD's private data, which contains state of
2901 * @data: pointer to the input/output arguments structure of the IOCTL
2903 * Return: 0 for success, negative value for error
2905 typedef int hl_ioctl_t(struct hl_fpriv *hpriv, void *data);
2908 * struct hl_ioctl_desc - describes an IOCTL entry of the driver.
2909 * @cmd: the IOCTL code as created by the kernel macros.
2910 * @func: pointer to the driver's function that should be called for this IOCTL.
2912 struct hl_ioctl_desc {
2919 * Kernel module functions that can be accessed by entire module
2923 * hl_get_sg_info() - get number of pages and the DMA address from SG list.
2925 * @dma_addr: pointer to DMA address to return.
2927 * Calculate the number of consecutive pages described by the SG list. Take the
2928 * offset of the address in the first page, add to it the length and round it up
2929 * to the number of needed pages.
2931 static inline u32 hl_get_sg_info(struct scatterlist *sg, dma_addr_t *dma_addr)
2933 *dma_addr = sg_dma_address(sg);
2935 return ((((*dma_addr) & (PAGE_SIZE - 1)) + sg_dma_len(sg)) +
2936 (PAGE_SIZE - 1)) >> PAGE_SHIFT;
2940 * hl_mem_area_inside_range() - Checks whether address+size are inside a range.
2941 * @address: The start address of the area we want to validate.
2942 * @size: The size in bytes of the area we want to validate.
2943 * @range_start_address: The start address of the valid range.
2944 * @range_end_address: The end address of the valid range.
2946 * Return: true if the area is inside the valid range, false otherwise.
2948 static inline bool hl_mem_area_inside_range(u64 address, u64 size,
2949 u64 range_start_address, u64 range_end_address)
2951 u64 end_address = address + size;
2953 if ((address >= range_start_address) &&
2954 (end_address <= range_end_address) &&
2955 (end_address > address))
2962 * hl_mem_area_crosses_range() - Checks whether address+size crossing a range.
2963 * @address: The start address of the area we want to validate.
2964 * @size: The size in bytes of the area we want to validate.
2965 * @range_start_address: The start address of the valid range.
2966 * @range_end_address: The end address of the valid range.
2968 * Return: true if the area overlaps part or all of the valid range,
2971 static inline bool hl_mem_area_crosses_range(u64 address, u32 size,
2972 u64 range_start_address, u64 range_end_address)
2974 u64 end_address = address + size - 1;
2976 return ((address <= range_end_address) && (range_start_address <= end_address));
2979 int hl_device_open(struct inode *inode, struct file *filp);
2980 int hl_device_open_ctrl(struct inode *inode, struct file *filp);
2981 bool hl_device_operational(struct hl_device *hdev,
2982 enum hl_device_status *status);
2983 enum hl_device_status hl_device_status(struct hl_device *hdev);
2984 int hl_device_set_debug_mode(struct hl_device *hdev, struct hl_ctx *ctx, bool enable);
2985 int hl_hw_queues_create(struct hl_device *hdev);
2986 void hl_hw_queues_destroy(struct hl_device *hdev);
2987 int hl_hw_queue_send_cb_no_cmpl(struct hl_device *hdev, u32 hw_queue_id,
2988 u32 cb_size, u64 cb_ptr);
2989 void hl_hw_queue_submit_bd(struct hl_device *hdev, struct hl_hw_queue *q,
2990 u32 ctl, u32 len, u64 ptr);
2991 int hl_hw_queue_schedule_cs(struct hl_cs *cs);
2992 u32 hl_hw_queue_add_ptr(u32 ptr, u16 val);
2993 void hl_hw_queue_inc_ci_kernel(struct hl_device *hdev, u32 hw_queue_id);
2994 void hl_hw_queue_update_ci(struct hl_cs *cs);
2995 void hl_hw_queue_reset(struct hl_device *hdev, bool hard_reset);
2997 #define hl_queue_inc_ptr(p) hl_hw_queue_add_ptr(p, 1)
2998 #define hl_pi_2_offset(pi) ((pi) & (HL_QUEUE_LENGTH - 1))
3000 int hl_cq_init(struct hl_device *hdev, struct hl_cq *q, u32 hw_queue_id);
3001 void hl_cq_fini(struct hl_device *hdev, struct hl_cq *q);
3002 int hl_eq_init(struct hl_device *hdev, struct hl_eq *q);
3003 void hl_eq_fini(struct hl_device *hdev, struct hl_eq *q);
3004 void hl_cq_reset(struct hl_device *hdev, struct hl_cq *q);
3005 void hl_eq_reset(struct hl_device *hdev, struct hl_eq *q);
3006 irqreturn_t hl_irq_handler_cq(int irq, void *arg);
3007 irqreturn_t hl_irq_handler_eq(int irq, void *arg);
3008 irqreturn_t hl_irq_handler_user_cq(int irq, void *arg);
3009 irqreturn_t hl_irq_handler_default(int irq, void *arg);
3010 u32 hl_cq_inc_ptr(u32 ptr);
3012 int hl_asid_init(struct hl_device *hdev);
3013 void hl_asid_fini(struct hl_device *hdev);
3014 unsigned long hl_asid_alloc(struct hl_device *hdev);
3015 void hl_asid_free(struct hl_device *hdev, unsigned long asid);
3017 int hl_ctx_create(struct hl_device *hdev, struct hl_fpriv *hpriv);
3018 void hl_ctx_free(struct hl_device *hdev, struct hl_ctx *ctx);
3019 int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx);
3020 void hl_ctx_do_release(struct kref *ref);
3021 void hl_ctx_get(struct hl_device *hdev, struct hl_ctx *ctx);
3022 int hl_ctx_put(struct hl_ctx *ctx);
3023 struct hl_ctx *hl_get_compute_ctx(struct hl_device *hdev);
3024 struct hl_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq);
3025 int hl_ctx_get_fences(struct hl_ctx *ctx, u64 *seq_arr,
3026 struct hl_fence **fence, u32 arr_len);
3027 void hl_ctx_mgr_init(struct hl_ctx_mgr *mgr);
3028 void hl_ctx_mgr_fini(struct hl_device *hdev, struct hl_ctx_mgr *mgr);
3030 int hl_device_init(struct hl_device *hdev, struct class *hclass);
3031 void hl_device_fini(struct hl_device *hdev);
3032 int hl_device_suspend(struct hl_device *hdev);
3033 int hl_device_resume(struct hl_device *hdev);
3034 int hl_device_reset(struct hl_device *hdev, u32 flags);
3035 void hl_hpriv_get(struct hl_fpriv *hpriv);
3036 int hl_hpriv_put(struct hl_fpriv *hpriv);
3037 int hl_device_utilization(struct hl_device *hdev, u32 *utilization);
3039 int hl_build_hwmon_channel_info(struct hl_device *hdev,
3040 struct cpucp_sensor *sensors_arr);
3042 int hl_sysfs_init(struct hl_device *hdev);
3043 void hl_sysfs_fini(struct hl_device *hdev);
3045 int hl_hwmon_init(struct hl_device *hdev);
3046 void hl_hwmon_fini(struct hl_device *hdev);
3048 int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr,
3049 struct hl_ctx *ctx, u32 cb_size, bool internal_cb,
3050 bool map_cb, u64 *handle);
3051 int hl_cb_destroy(struct hl_device *hdev, struct hl_cb_mgr *mgr, u64 cb_handle);
3052 int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma);
3053 int hl_hw_block_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma);
3054 struct hl_cb *hl_cb_get(struct hl_device *hdev, struct hl_cb_mgr *mgr,
3056 void hl_cb_put(struct hl_cb *cb);
3057 void hl_cb_mgr_init(struct hl_cb_mgr *mgr);
3058 void hl_cb_mgr_fini(struct hl_device *hdev, struct hl_cb_mgr *mgr);
3059 struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size,
3061 int hl_cb_pool_init(struct hl_device *hdev);
3062 int hl_cb_pool_fini(struct hl_device *hdev);
3063 int hl_cb_va_pool_init(struct hl_ctx *ctx);
3064 void hl_cb_va_pool_fini(struct hl_ctx *ctx);
3066 void hl_cs_rollback_all(struct hl_device *hdev, bool skip_wq_flush);
3067 struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev,
3068 enum hl_queue_type queue_type, bool is_kernel_allocated_cb);
3069 void hl_sob_reset_error(struct kref *ref);
3070 int hl_gen_sob_mask(u16 sob_base, u8 sob_mask, u8 *mask);
3071 void hl_fence_put(struct hl_fence *fence);
3072 void hl_fences_put(struct hl_fence **fence, int len);
3073 void hl_fence_get(struct hl_fence *fence);
3074 void cs_get(struct hl_cs *cs);
3075 bool cs_needs_completion(struct hl_cs *cs);
3076 bool cs_needs_timeout(struct hl_cs *cs);
3077 bool is_staged_cs_last_exists(struct hl_device *hdev, struct hl_cs *cs);
3078 struct hl_cs *hl_staged_cs_find_first(struct hl_device *hdev, u64 cs_seq);
3079 void hl_multi_cs_completion_init(struct hl_device *hdev);
3081 void goya_set_asic_funcs(struct hl_device *hdev);
3082 void gaudi_set_asic_funcs(struct hl_device *hdev);
3084 int hl_vm_ctx_init(struct hl_ctx *ctx);
3085 void hl_vm_ctx_fini(struct hl_ctx *ctx);
3087 int hl_vm_init(struct hl_device *hdev);
3088 void hl_vm_fini(struct hl_device *hdev);
3090 void hl_hw_block_mem_init(struct hl_ctx *ctx);
3091 void hl_hw_block_mem_fini(struct hl_ctx *ctx);
3093 u64 hl_reserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx,
3094 enum hl_va_range_type type, u32 size, u32 alignment);
3095 int hl_unreserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx,
3096 u64 start_addr, u64 size);
3097 int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
3098 struct hl_userptr *userptr);
3099 void hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr);
3100 void hl_userptr_delete_list(struct hl_device *hdev,
3101 struct list_head *userptr_list);
3102 bool hl_userptr_is_pinned(struct hl_device *hdev, u64 addr, u32 size,
3103 struct list_head *userptr_list,
3104 struct hl_userptr **userptr);
3106 int hl_mmu_init(struct hl_device *hdev);
3107 void hl_mmu_fini(struct hl_device *hdev);
3108 int hl_mmu_ctx_init(struct hl_ctx *ctx);
3109 void hl_mmu_ctx_fini(struct hl_ctx *ctx);
3110 int hl_mmu_map_page(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
3111 u32 page_size, bool flush_pte);
3112 int hl_mmu_unmap_page(struct hl_ctx *ctx, u64 virt_addr, u32 page_size,
3114 int hl_mmu_map_contiguous(struct hl_ctx *ctx, u64 virt_addr,
3115 u64 phys_addr, u32 size);
3116 int hl_mmu_unmap_contiguous(struct hl_ctx *ctx, u64 virt_addr, u32 size);
3117 int hl_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard, u32 flags);
3118 int hl_mmu_invalidate_cache_range(struct hl_device *hdev, bool is_hard,
3119 u32 flags, u32 asid, u64 va, u64 size);
3120 u64 hl_mmu_get_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte);
3121 u64 hl_mmu_get_hop_pte_phys_addr(struct hl_ctx *ctx, struct hl_mmu_properties *mmu_prop,
3122 u8 hop_idx, u64 hop_addr, u64 virt_addr);
3123 void hl_mmu_swap_out(struct hl_ctx *ctx);
3124 void hl_mmu_swap_in(struct hl_ctx *ctx);
3125 int hl_mmu_if_set_funcs(struct hl_device *hdev);
3126 void hl_mmu_v1_set_funcs(struct hl_device *hdev, struct hl_mmu_funcs *mmu);
3127 int hl_mmu_va_to_pa(struct hl_ctx *ctx, u64 virt_addr, u64 *phys_addr);
3128 int hl_mmu_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr,
3129 struct hl_mmu_hop_info *hops);
3130 u64 hl_mmu_scramble_addr(struct hl_device *hdev, u64 addr);
3131 u64 hl_mmu_descramble_addr(struct hl_device *hdev, u64 addr);
3132 bool hl_is_dram_va(struct hl_device *hdev, u64 virt_addr);
3134 int hl_fw_load_fw_to_device(struct hl_device *hdev, const char *fw_name,
3135 void __iomem *dst, u32 src_offset, u32 size);
3136 int hl_fw_send_pci_access_msg(struct hl_device *hdev, u32 opcode);
3137 int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
3138 u16 len, u32 timeout, u64 *result);
3139 int hl_fw_unmask_irq(struct hl_device *hdev, u16 event_type);
3140 int hl_fw_unmask_irq_arr(struct hl_device *hdev, const u32 *irq_arr,
3141 size_t irq_arr_size);
3142 int hl_fw_test_cpu_queue(struct hl_device *hdev);
3143 void *hl_fw_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size,
3144 dma_addr_t *dma_handle);
3145 void hl_fw_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size,
3147 int hl_fw_send_heartbeat(struct hl_device *hdev);
3148 int hl_fw_cpucp_info_get(struct hl_device *hdev,
3149 u32 sts_boot_dev_sts0_reg,
3150 u32 sts_boot_dev_sts1_reg, u32 boot_err0_reg,
3152 int hl_fw_cpucp_handshake(struct hl_device *hdev,
3153 u32 sts_boot_dev_sts0_reg,
3154 u32 sts_boot_dev_sts1_reg, u32 boot_err0_reg,
3156 int hl_fw_get_eeprom_data(struct hl_device *hdev, void *data, size_t max_size);
3157 int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev,
3158 struct hl_info_pci_counters *counters);
3159 int hl_fw_cpucp_total_energy_get(struct hl_device *hdev,
3161 int get_used_pll_index(struct hl_device *hdev, u32 input_pll_index,
3162 enum pll_index *pll_index);
3163 int hl_fw_cpucp_pll_info_get(struct hl_device *hdev, u32 pll_index,
3165 int hl_fw_cpucp_power_get(struct hl_device *hdev, u64 *power);
3166 void hl_fw_ask_hard_reset_without_linux(struct hl_device *hdev);
3167 void hl_fw_ask_halt_machine_without_linux(struct hl_device *hdev);
3168 int hl_fw_init_cpu(struct hl_device *hdev);
3169 int hl_fw_read_preboot_status(struct hl_device *hdev, u32 cpu_boot_status_reg,
3170 u32 sts_boot_dev_sts0_reg,
3171 u32 sts_boot_dev_sts1_reg, u32 boot_err0_reg,
3172 u32 boot_err1_reg, u32 timeout);
3173 int hl_fw_dynamic_send_protocol_cmd(struct hl_device *hdev,
3174 struct fw_load_mgr *fw_loader,
3175 enum comms_cmd cmd, unsigned int size,
3176 bool wait_ok, u32 timeout);
3177 int hl_fw_dram_replaced_row_get(struct hl_device *hdev,
3178 struct cpucp_hbm_row_info *info);
3179 int hl_fw_dram_pending_row_get(struct hl_device *hdev, u32 *pend_rows_num);
3180 int hl_fw_cpucp_engine_core_asid_set(struct hl_device *hdev, u32 asid);
3181 int hl_pci_bars_map(struct hl_device *hdev, const char * const name[3],
3183 int hl_pci_elbi_read(struct hl_device *hdev, u64 addr, u32 *data);
3184 int hl_pci_iatu_write(struct hl_device *hdev, u32 addr, u32 data);
3185 int hl_pci_set_inbound_region(struct hl_device *hdev, u8 region,
3186 struct hl_inbound_pci_region *pci_region);
3187 int hl_pci_set_outbound_region(struct hl_device *hdev,
3188 struct hl_outbound_pci_region *pci_region);
3189 enum pci_region hl_get_pci_memory_region(struct hl_device *hdev, u64 addr);
3190 int hl_pci_init(struct hl_device *hdev);
3191 void hl_pci_fini(struct hl_device *hdev);
3193 long hl_fw_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr);
3194 void hl_fw_set_frequency(struct hl_device *hdev, u32 pll_index, u64 freq);
3195 int hl_get_temperature(struct hl_device *hdev, int sensor_index, u32 attr, long *value);
3196 int hl_set_temperature(struct hl_device *hdev, int sensor_index, u32 attr, long value);
3197 int hl_get_voltage(struct hl_device *hdev, int sensor_index, u32 attr, long *value);
3198 int hl_get_current(struct hl_device *hdev, int sensor_index, u32 attr, long *value);
3199 int hl_get_fan_speed(struct hl_device *hdev, int sensor_index, u32 attr, long *value);
3200 int hl_get_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr, long *value);
3201 void hl_set_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr, long value);
3202 long hl_fw_get_max_power(struct hl_device *hdev);
3203 void hl_fw_set_max_power(struct hl_device *hdev);
3204 int hl_set_voltage(struct hl_device *hdev, int sensor_index, u32 attr, long value);
3205 int hl_set_current(struct hl_device *hdev, int sensor_index, u32 attr, long value);
3206 int hl_set_power(struct hl_device *hdev, int sensor_index, u32 attr, long value);
3207 int hl_get_power(struct hl_device *hdev, int sensor_index, u32 attr, long *value);
3208 int hl_fw_get_clk_rate(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk);
3209 void hl_fw_set_pll_profile(struct hl_device *hdev);
3210 void hl_sysfs_add_dev_clk_attr(struct hl_device *hdev, struct attribute_group *dev_clk_attr_grp);
3211 void hl_sysfs_add_dev_vrm_attr(struct hl_device *hdev, struct attribute_group *dev_vrm_attr_grp);
3213 void hw_sob_get(struct hl_hw_sob *hw_sob);
3214 void hw_sob_put(struct hl_hw_sob *hw_sob);
3215 void hl_encaps_handle_do_release(struct kref *ref);
3216 void hl_hw_queue_encaps_sig_set_sob_info(struct hl_device *hdev,
3217 struct hl_cs *cs, struct hl_cs_job *job,
3218 struct hl_cs_compl *cs_cmpl);
3219 void hl_release_pending_user_interrupts(struct hl_device *hdev);
3220 int hl_cs_signal_sob_wraparound_handler(struct hl_device *hdev, u32 q_idx,
3221 struct hl_hw_sob **hw_sob, u32 count, bool encaps_sig);
3223 int hl_state_dump(struct hl_device *hdev);
3224 const char *hl_state_dump_get_sync_name(struct hl_device *hdev, u32 sync_id);
3225 const char *hl_state_dump_get_monitor_name(struct hl_device *hdev,
3226 struct hl_mon_state_dump *mon);
3227 void hl_state_dump_free_sync_to_engine_map(struct hl_sync_to_engine_map *map);
3228 __printf(4, 5) int hl_snprintf_resize(char **buf, size_t *size, size_t *offset,
3229 const char *format, ...);
3230 char *hl_format_as_binary(char *buf, size_t buf_len, u32 n);
3231 const char *hl_sync_engine_to_string(enum hl_sync_engine_type engine_type);
3232 void hl_ts_mgr_init(struct hl_ts_mgr *mgr);
3233 void hl_ts_mgr_fini(struct hl_device *hdev, struct hl_ts_mgr *mgr);
3234 int hl_ts_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma);
3235 struct hl_ts_buff *hl_ts_get(struct hl_device *hdev, struct hl_ts_mgr *mgr, u32 handle);
3236 void hl_ts_put(struct hl_ts_buff *buff);
3238 #ifdef CONFIG_DEBUG_FS
3240 void hl_debugfs_init(void);
3241 void hl_debugfs_fini(void);
3242 void hl_debugfs_add_device(struct hl_device *hdev);
3243 void hl_debugfs_remove_device(struct hl_device *hdev);
3244 void hl_debugfs_add_file(struct hl_fpriv *hpriv);
3245 void hl_debugfs_remove_file(struct hl_fpriv *hpriv);
3246 void hl_debugfs_add_cb(struct hl_cb *cb);
3247 void hl_debugfs_remove_cb(struct hl_cb *cb);
3248 void hl_debugfs_add_cs(struct hl_cs *cs);
3249 void hl_debugfs_remove_cs(struct hl_cs *cs);
3250 void hl_debugfs_add_job(struct hl_device *hdev, struct hl_cs_job *job);
3251 void hl_debugfs_remove_job(struct hl_device *hdev, struct hl_cs_job *job);
3252 void hl_debugfs_add_userptr(struct hl_device *hdev, struct hl_userptr *userptr);
3253 void hl_debugfs_remove_userptr(struct hl_device *hdev,
3254 struct hl_userptr *userptr);
3255 void hl_debugfs_add_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx);
3256 void hl_debugfs_remove_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx);
3257 void hl_debugfs_set_state_dump(struct hl_device *hdev, char *data,
3258 unsigned long length);
3262 static inline void __init hl_debugfs_init(void)
3266 static inline void hl_debugfs_fini(void)
3270 static inline void hl_debugfs_add_device(struct hl_device *hdev)
3274 static inline void hl_debugfs_remove_device(struct hl_device *hdev)
3278 static inline void hl_debugfs_add_file(struct hl_fpriv *hpriv)
3282 static inline void hl_debugfs_remove_file(struct hl_fpriv *hpriv)
3286 static inline void hl_debugfs_add_cb(struct hl_cb *cb)
3290 static inline void hl_debugfs_remove_cb(struct hl_cb *cb)
3294 static inline void hl_debugfs_add_cs(struct hl_cs *cs)
3298 static inline void hl_debugfs_remove_cs(struct hl_cs *cs)
3302 static inline void hl_debugfs_add_job(struct hl_device *hdev,
3303 struct hl_cs_job *job)
3307 static inline void hl_debugfs_remove_job(struct hl_device *hdev,
3308 struct hl_cs_job *job)
3312 static inline void hl_debugfs_add_userptr(struct hl_device *hdev,
3313 struct hl_userptr *userptr)
3317 static inline void hl_debugfs_remove_userptr(struct hl_device *hdev,
3318 struct hl_userptr *userptr)
3322 static inline void hl_debugfs_add_ctx_mem_hash(struct hl_device *hdev,
3327 static inline void hl_debugfs_remove_ctx_mem_hash(struct hl_device *hdev,
3332 static inline void hl_debugfs_set_state_dump(struct hl_device *hdev,
3333 char *data, unsigned long length)
3340 long hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
3341 long hl_ioctl_control(struct file *filep, unsigned int cmd, unsigned long arg);
3342 int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data);
3343 int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data);
3344 int hl_wait_ioctl(struct hl_fpriv *hpriv, void *data);
3345 int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data);
3347 #endif /* HABANALABSP_H_ */