1 /* SPDX-License-Identifier: GPL-2.0+ */
3 * Copyright (C) 2020 Stefan Roese <sr@denx.de>
6 #ifndef __CVMX_REGS_H__
7 #define __CVMX_REGS_H__
10 #include <linux/bitfield.h>
11 #include <linux/bitops.h>
13 #include <mach/cvmx-address.h>
16 #define CVMX_MAX_CORES 48
17 /* Maximum # of bits to define core in node */
18 #define CVMX_NODE_NO_SHIFT 7
19 #define CVMX_NODE_BITS 2 /* Number of bits to define a node */
20 #define CVMX_MAX_NODES (1 << CVMX_NODE_BITS)
21 #define CVMX_NODE_MASK (CVMX_MAX_NODES - 1)
22 #define CVMX_NODE_IO_SHIFT 36
23 #define CVMX_NODE_MEM_SHIFT 40
24 #define CVMX_NODE_IO_MASK ((u64)CVMX_NODE_MASK << CVMX_NODE_IO_SHIFT)
26 #define CVMX_MIPS_MAX_CORE_BITS 10 /* Maximum # of bits to define cores */
27 #define CVMX_MIPS_MAX_CORES (1 << CVMX_MIPS_MAX_CORE_BITS)
29 #define MAX_CORE_TADS 8
31 #define CASTPTR(type, v) ((type *)(long)(v))
32 #define CAST64(v) ((long long)(long)(v))
35 #define CVMX_CIU3_NMI 0x0001010000000160ULL
36 #define CVMX_CIU3_ISCX_W1C(x) (0x0001010090000000ull + ((x) & 1048575) * 8)
38 #define CVMX_MIO_BOOT_LOC_CFGX(x) (0x0001180000000080ULL + ((x) & 1) * 8)
39 #define MIO_BOOT_LOC_CFG_BASE GENMASK_ULL(27, 3)
40 #define MIO_BOOT_LOC_CFG_EN BIT_ULL(31)
42 #define CVMX_MIO_BOOT_LOC_ADR 0x0001180000000090ULL
43 #define MIO_BOOT_LOC_ADR_ADR GENMASK_ULL(7, 3)
45 #define CVMX_MIO_BOOT_LOC_DAT 0x0001180000000098ULL
47 #define CVMX_MIO_FUS_DAT2 0x0001180000001410ULL
48 #define MIO_FUS_DAT2_NOCRYPTO BIT_ULL(26)
49 #define MIO_FUS_DAT2_NOMUL BIT_ULL(27)
50 #define MIO_FUS_DAT2_DORM_CRYPTO BIT_ULL(34)
52 #define CVMX_MIO_FUS_RCMD 0x0001180000001500ULL
53 #define MIO_FUS_RCMD_ADDR GENMASK_ULL(7, 0)
54 #define MIO_FUS_RCMD_PEND BIT_ULL(12)
55 #define MIO_FUS_RCMD_DAT GENMASK_ULL(23, 16)
57 #define CVMX_RNM_CTL_STATUS 0x0001180040000000ULL
58 #define RNM_CTL_STATUS_EER_VAL BIT_ULL(9)
60 /* IOBDMA/LMTDMA IO addresses */
61 #define CVMX_LMTDMA_ORDERED_IO_ADDR 0xffffffffffffa400ull
62 #define CVMX_IOBDMA_ORDERED_IO_ADDR 0xffffffffffffa200ull
64 /* turn the variable name into a string */
65 #define CVMX_TMP_STR(x) CVMX_TMP_STR2(x)
66 #define CVMX_TMP_STR2(x) #x
67 #define VASTR(...) #__VA_ARGS__
69 #define CVMX_PKO_LMTLINE 2ull
70 #define CVMX_SCRATCH_BASE (-32768l) /* 0xffffffffffff8000 */
72 #define COP0_CVMMEMCTL $11,7 /* Cavium memory control */
74 #define CVMX_RDHWR(result, regstr) \
75 asm volatile("rdhwr %[rt],$" CVMX_TMP_STR(regstr) : [rt] "=d"(result))
76 #define CVMX_RDHWRNV(result, regstr) \
77 asm("rdhwr %[rt],$" CVMX_TMP_STR(regstr) : [rt] "=d"(result))
78 #define CVMX_POP(result, input) \
79 asm("pop %[rd],%[rs]" : [rd] "=d"(result) : [rs] "d"(input))
80 #define CVMX_MF_COP0(val, cop0) \
81 asm("dmfc0 %[rt]," VASTR(cop0) : [rt] "=d" (val))
82 #define CVMX_MT_COP0(val, cop0) \
83 asm("dmtc0 %[rt]," VASTR(cop0) : : [rt] "d" (val))
85 #define CVMX_MF_CVM_MEM_CTL(val) CVMX_MF_COP0(val, COP0_CVMMEMCTL)
86 #define CVMX_MT_CVM_MEM_CTL(val) CVMX_MT_COP0(val, COP0_CVMMEMCTL)
88 #define CVMX_SYNC asm volatile("sync\n" : : : "memory")
89 #define CVMX_SYNCW asm volatile("syncw\nsyncw\n" : : : "memory")
90 #define CVMX_SYNCS asm volatile("syncs\n" : : : "memory")
91 #define CVMX_SYNCWS asm volatile("syncws\n" : : : "memory")
93 #define CVMX_CACHE_LINE_SIZE 128 // In bytes
94 #define CVMX_CACHE_LINE_MASK (CVMX_CACHE_LINE_SIZE - 1) // In bytes
95 #define CVMX_CACHE_LINE_ALIGNED __aligned(CVMX_CACHE_LINE_SIZE)
97 #define CVMX_SYNCIOBDMA asm volatile("synciobdma" : : : "memory")
99 #define CVMX_MF_CHORD(dest) CVMX_RDHWR(dest, 30)
101 #define CVMX_PREFETCH0(address) CVMX_PREFETCH(address, 0)
102 #define CVMX_PREFETCH128(address) CVMX_PREFETCH(address, 128)
104 /** a normal prefetch */
105 #define CVMX_PREFETCH(address, offset) CVMX_PREFETCH_PREF0(address, offset)
107 /** normal prefetches that use the pref instruction */
108 #define CVMX_PREFETCH_PREFX(X, address, offset) \
109 asm volatile ("pref %[type], %[off](%[rbase])" : : [rbase] "d" (address), [off] "I" (offset), [type] "n" (X))
110 #define CVMX_PREFETCH_PREF0(address, offset) \
111 CVMX_PREFETCH_PREFX(0, address, offset)
114 * The macros cvmx_likely and cvmx_unlikely use the
115 * __builtin_expect GCC operation to control branch
116 * probabilities for a conditional. For example, an "if"
117 * statement in the code that will almost always be
118 * executed should be written as "if (cvmx_likely(...))".
119 * If the "else" section of an if statement is more
120 * probable, use "if (cvmx_unlikey(...))".
122 #define cvmx_likely(x) __builtin_expect(!!(x), 1)
123 #define cvmx_unlikely(x) __builtin_expect(!!(x), 0)
125 #define CVMX_WAIT_FOR_FIELD64(address, type, field, op, value, to_us) \
129 u64 done = get_timer(0); \
132 c.u64 = csr_rd(address); \
133 if ((c.s.field)op(value)) { \
136 } else if (get_timer(done) > ((to_us) / 1000)) { \
146 #define CVMX_WAIT_FOR_FIELD64_NODE(node, address, type, field, op, value, to_us) \
150 u64 done = get_timer(0); \
153 c.u64 = csr_rd(address); \
154 if ((c.s.field)op(value)) { \
157 } else if (get_timer(done) > ((to_us) / 1000)) { \
167 /* ToDo: Currently only node = 0 supported */
168 #define cvmx_get_node_num() 0
170 static inline u64 csr_rd_node(int node, u64 addr)
174 base = ioremap_nocache(addr, 0x100);
175 return ioread64(base);
178 static inline u32 csr_rd32_node(int node, u64 addr)
182 base = ioremap_nocache(addr, 0x100);
183 return ioread32(base);
186 static inline u64 csr_rd(u64 addr)
188 return csr_rd_node(0, addr);
191 static inline u32 csr_rd32(u64 addr)
193 return csr_rd32_node(0, addr);
196 static inline void csr_wr_node(int node, u64 addr, u64 val)
200 base = ioremap_nocache(addr, 0x100);
201 iowrite64(val, base);
204 static inline void csr_wr32_node(int node, u64 addr, u32 val)
208 base = ioremap_nocache(addr, 0x100);
209 iowrite32(val, base);
212 static inline void csr_wr(u64 addr, u64 val)
214 csr_wr_node(0, addr, val);
217 static inline void csr_wr32(u64 addr, u32 val)
219 csr_wr32_node(0, addr, val);
223 * We need to use the volatile access here, otherwise the IO accessor
224 * functions might swap the bytes
226 static inline u64 cvmx_read64_uint64(u64 addr)
228 return *(volatile u64 *)addr;
231 static inline s64 cvmx_read64_int64(u64 addr)
233 return *(volatile s64 *)addr;
236 static inline void cvmx_write64_uint64(u64 addr, u64 val)
238 *(volatile u64 *)addr = val;
241 static inline void cvmx_write64_int64(u64 addr, s64 val)
243 *(volatile s64 *)addr = val;
246 static inline u32 cvmx_read64_uint32(u64 addr)
248 return *(volatile u32 *)addr;
251 static inline s32 cvmx_read64_int32(u64 addr)
253 return *(volatile s32 *)addr;
256 static inline void cvmx_write64_uint32(u64 addr, u32 val)
258 *(volatile u32 *)addr = val;
261 static inline void cvmx_write64_int32(u64 addr, s32 val)
263 *(volatile s32 *)addr = val;
266 static inline void cvmx_write64_int16(u64 addr, s16 val)
268 *(volatile s16 *)addr = val;
271 static inline void cvmx_write64_uint16(u64 addr, u16 val)
273 *(volatile u16 *)addr = val;
276 static inline void cvmx_write64_int8(u64 addr, int8_t val)
278 *(volatile int8_t *)addr = val;
281 static inline void cvmx_write64_uint8(u64 addr, u8 val)
283 *(volatile u8 *)addr = val;
286 static inline s16 cvmx_read64_int16(u64 addr)
288 return *(volatile s16 *)addr;
291 static inline u16 cvmx_read64_uint16(u64 addr)
293 return *(volatile u16 *)addr;
296 static inline int8_t cvmx_read64_int8(u64 addr)
298 return *(volatile int8_t *)addr;
301 static inline u8 cvmx_read64_uint8(u64 addr)
303 return *(volatile u8 *)addr;
306 static inline void cvmx_send_single(u64 data)
308 cvmx_write64_uint64(CVMX_IOBDMA_ORDERED_IO_ADDR, data);
312 * Perform a 64-bit write to an IO address
314 * @param io_addr I/O address to write to
315 * @param val 64-bit value to write
317 static inline void cvmx_write_io(u64 io_addr, u64 val)
319 cvmx_write64_uint64(io_addr, val);
323 * Builds a memory address for I/O based on the Major and Sub DID.
325 * @param major_did 5 bit major did
326 * @param sub_did 3 bit sub did
327 * Return: I/O base address
329 static inline u64 cvmx_build_io_address(u64 major_did, u64 sub_did)
331 return ((0x1ull << 48) | (major_did << 43) | (sub_did << 40));
335 * Builds a bit mask given the required size in bits.
337 * @param bits Number of bits in the mask
340 static inline u64 cvmx_build_mask(u64 bits)
345 return ~((~0x0ull) << bits);
349 * Extract bits out of a number
351 * @param input Number to extract from
352 * @param lsb Starting bit, least significant (0-63)
353 * @param width Width in bits (1-64)
355 * Return: Extracted number
357 static inline u64 cvmx_bit_extract(u64 input, int lsb, int width)
359 u64 result = input >> lsb;
361 result &= cvmx_build_mask(width);
367 * Perform mask and shift to place the supplied value into
368 * the supplied bit rage.
370 * Example: cvmx_build_bits(39,24,value)
374 * +-------+-------+-------+-------+-------+-------+-------+------+
375 * 000000000000000000000000___________value000000000000000000000000
378 * @param high_bit Highest bit value can occupy (inclusive) 0-63
379 * @param low_bit Lowest bit value can occupy inclusive 0-high_bit
380 * @param value Value to use
381 * Return: Value masked and shifted
383 static inline u64 cvmx_build_bits(u64 high_bit, u64 low_bit, u64 value)
385 return ((value & cvmx_build_mask(high_bit - low_bit + 1)) << low_bit);
388 static inline u64 cvmx_mask_to_localaddr(u64 addr)
390 return (addr & 0xffffffffff);
393 static inline u64 cvmx_addr_on_node(u64 node, u64 addr)
395 return (node << 40) | cvmx_mask_to_localaddr(addr);
398 static inline void *cvmx_phys_to_ptr(u64 addr)
400 return (void *)CKSEG0ADDR(addr);
403 static inline u64 cvmx_ptr_to_phys(void *ptr)
405 return virt_to_phys(ptr);
409 * Number of the Core on which the program is currently running.
411 * Return: core number
413 static inline unsigned int cvmx_get_core_num(void)
415 unsigned int core_num;
417 CVMX_RDHWRNV(core_num, 0);
422 * Node-local number of the core on which the program is currently running.
424 * Return: core number on local node
426 static inline unsigned int cvmx_get_local_core_num(void)
428 unsigned int core_num, core_mask;
430 CVMX_RDHWRNV(core_num, 0);
431 /* note that MAX_CORES may not be power of 2 */
432 core_mask = (1 << CVMX_NODE_NO_SHIFT) - 1;
434 return core_num & core_mask;
438 * Given a CSR address return the node number of that address
440 * @param addr Address to extract node number from
442 * @return node number
444 static inline u8 cvmx_csr_addr_to_node(u64 addr)
446 return (addr >> CVMX_NODE_IO_SHIFT) & CVMX_NODE_MASK;
450 * Strip the node address bits from a CSR address
452 * @param addr CSR address to strip the node bits from
454 * @return CSR address with the node bits set to zero
456 static inline u64 cvmx_csr_addr_strip_node(u64 addr)
458 return addr & ~((u64)CVMX_NODE_MASK << CVMX_NODE_IO_SHIFT);
462 * Returns the number of bits set in the provided value.
463 * Simple wrapper for POP instruction.
465 * @param val 32 bit value to count set bits in
467 * Return: Number of bits set
469 static inline u32 cvmx_pop(u32 val)
478 #define cvmx_read_csr_node(node, addr) csr_rd(addr)
479 #define cvmx_write_csr_node(node, addr, val) csr_wr(addr, val)
481 #define cvmx_printf printf
482 #define cvmx_vprintf vprintf
484 /* Use common debug macros */
485 #define cvmx_warn debug
486 #define cvmx_warn_if debug_cond
489 * Atomically adds a signed value to a 32 bit (aligned) memory location,
490 * and returns previous value.
492 * Memory access ordering is enforced before/after the atomic operation,
493 * so no additional 'sync' instructions are required.
495 * @param ptr address in memory to add incr to
496 * @param incr amount to increment memory location by (signed)
498 * @return Value of memory location before increment
500 static inline int32_t cvmx_atomic_fetch_and_add32(int32_t * ptr, int32_t incr)
510 * Atomically adds a signed value to a 32 bit (aligned) memory location.
512 * This version does not perform 'sync' operations to enforce memory
513 * operations. This should only be used when there are no memory operation
514 * ordering constraints. (This should NOT be used for reference counting -
515 * use the standard version instead.)
517 * @param ptr address in memory to add incr to
518 * @param incr amount to increment memory location by (signed)
520 static inline void cvmx_atomic_add32_nosync(int32_t * ptr, int32_t incr)
525 #endif /* __CVMX_REGS_H__ */