2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1994, 1995 Waldorf GmbH
7 * Copyright (C) 1994 - 2000, 06 Ralf Baechle
8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9 * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved.
10 * Author: Maciej W. Rozycki <macro@mips.com>
15 #define ARCH_HAS_IOREMAP_WC
17 #include <linux/compiler.h>
18 #include <linux/kernel.h>
19 #include <linux/types.h>
20 #include <linux/irqflags.h>
22 #include <asm/addrspace.h>
24 #include <asm/byteorder.h>
26 #include <asm/cpu-features.h>
27 #include <asm-generic/iomap.h>
29 #include <asm/pgtable-bits.h>
30 #include <asm/processor.h>
31 #include <asm/string.h>
34 #include <mangle-port.h>
37 * Raw operations are never swapped in software. OTOH values that raw
38 * operations are working on may or may not have been swapped by the bus
39 * hardware. An example use would be for flash memory that's used for
42 # define __raw_ioswabb(a, x) (x)
43 # define __raw_ioswabw(a, x) (x)
44 # define __raw_ioswabl(a, x) (x)
45 # define __raw_ioswabq(a, x) (x)
46 # define ____raw_ioswabq(a, x) (x)
48 /* ioswab[bwlq], __mem_ioswab[bwlq] are defined in mangle-port.h */
50 #define IO_SPACE_LIMIT 0xffff
53 * On MIPS I/O ports are memory mapped, so we access them using normal
54 * load/store instructions. mips_io_port_base is the virtual address to
55 * which all ports are being mapped. For sake of efficiency some code
56 * assumes that this is an address that can be loaded with a single lui
57 * instruction, so the lower 16 bits must be zero. Should be true on
58 * on any sane architecture; generic code does not use this assumption.
60 extern const unsigned long mips_io_port_base;
63 * Gcc will generate code to load the value of mips_io_port_base after each
64 * function call which may be fairly wasteful in some cases. So we don't
65 * play quite by the book. We tell gcc mips_io_port_base is a long variable
66 * which solves the code generation issue. Now we need to violate the
67 * aliasing rules a little to make initialization possible and finally we
68 * will need the barrier() to fight side effects of the aliasing chat.
69 * This trickery will eventually collapse under gcc's optimizer. Oh well.
71 static inline void set_io_port_base(unsigned long base)
73 * (unsigned long *) &mips_io_port_base = base;
78 * Provide the necessary definitions for generic iomap. We make use of
79 * mips_io_port_base for iomap(), but we don't reserve any low addresses for
82 #define HAVE_ARCH_PIO_SIZE
83 #define PIO_OFFSET mips_io_port_base
84 #define PIO_MASK IO_SPACE_LIMIT
85 #define PIO_RESERVED 0x0UL
88 * virt_to_phys - map virtual addresses to physical
89 * @address: address to remap
91 * The returned physical address is the physical (CPU) mapping for
92 * the memory address given. It is only valid to use this function on
93 * addresses directly mapped or allocated via kmalloc.
95 * This function does not give bus mappings for DMA transfers. In
96 * almost all conceivable cases a device driver should not be using
99 static inline unsigned long virt_to_phys(volatile const void *address)
101 return __pa(address);
105 * phys_to_virt - map physical address to virtual
106 * @address: address to remap
108 * The returned virtual address is a current CPU mapping for
109 * the memory address given. It is only valid to use this function on
110 * addresses that have a kernel mapping
112 * This function does not handle bus mappings for DMA transfers. In
113 * almost all conceivable cases a device driver should not be using
116 static inline void * phys_to_virt(unsigned long address)
118 return (void *)(address + PAGE_OFFSET - PHYS_OFFSET);
122 * ISA I/O bus memory addresses are 1:1 with the physical address.
124 static inline unsigned long isa_virt_to_bus(volatile void *address)
126 return virt_to_phys(address);
129 static inline void *isa_bus_to_virt(unsigned long address)
131 return phys_to_virt(address);
134 #define isa_page_to_bus page_to_phys
137 * However PCI ones are not necessarily 1:1 and therefore these interfaces
138 * are forbidden in portable PCI drivers.
140 * Allow them for x86 for legacy drivers, though.
142 #define virt_to_bus virt_to_phys
143 #define bus_to_virt phys_to_virt
146 * Change "struct page" to physical address.
148 #define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
150 extern void __iomem * __ioremap(phys_addr_t offset, phys_addr_t size, unsigned long flags);
151 extern void __iounmap(const volatile void __iomem *addr);
153 static inline void __iomem * __ioremap_mode(phys_addr_t offset, unsigned long size,
156 void __iomem *addr = plat_ioremap(offset, size, flags);
161 #define __IS_LOW512(addr) (!((phys_addr_t)(addr) & (phys_addr_t) ~0x1fffffffULL))
163 if (cpu_has_64bit_addresses) {
164 u64 base = UNCAC_BASE;
167 * R10000 supports a 2 bit uncached attribute therefore
168 * UNCAC_BASE may not equal IO_BASE.
170 if (flags == _CACHE_UNCACHED)
171 base = (u64) IO_BASE;
172 return (void __iomem *) (unsigned long) (base + offset);
173 } else if (__builtin_constant_p(offset) &&
174 __builtin_constant_p(size) && __builtin_constant_p(flags)) {
175 phys_addr_t phys_addr, last_addr;
177 phys_addr = fixup_bigphys_addr(offset, size);
179 /* Don't allow wraparound or zero size. */
180 last_addr = phys_addr + size - 1;
181 if (!size || last_addr < phys_addr)
185 * Map uncached objects in the low 512MB of address
188 if (__IS_LOW512(phys_addr) && __IS_LOW512(last_addr) &&
189 flags == _CACHE_UNCACHED)
190 return (void __iomem *)
191 (unsigned long)CKSEG1ADDR(phys_addr);
194 return __ioremap(offset, size, flags);
200 * ioremap - map bus memory into CPU space
201 * @offset: bus address of the memory
202 * @size: size of the resource to map
204 * ioremap performs a platform specific sequence of operations to
205 * make bus memory CPU accessible via the readb/readw/readl/writeb/
206 * writew/writel functions and the other mmio helpers. The returned
207 * address is not guaranteed to be usable directly as a virtual
210 #define ioremap(offset, size) \
211 __ioremap_mode((offset), (size), _CACHE_UNCACHED)
214 * ioremap_nocache - map bus memory into CPU space
215 * @offset: bus address of the memory
216 * @size: size of the resource to map
218 * ioremap_nocache performs a platform specific sequence of operations to
219 * make bus memory CPU accessible via the readb/readw/readl/writeb/
220 * writew/writel functions and the other mmio helpers. The returned
221 * address is not guaranteed to be usable directly as a virtual
224 * This version of ioremap ensures that the memory is marked uncachable
225 * on the CPU as well as honouring existing caching rules from things like
226 * the PCI bus. Note that there are other caches and buffers on many
227 * busses. In particular driver authors should read up on PCI writes
229 * It's useful if some control registers are in such an area and
230 * write combining or read caching is not desirable:
232 #define ioremap_nocache(offset, size) \
233 __ioremap_mode((offset), (size), _CACHE_UNCACHED)
234 #define ioremap_uc ioremap_nocache
237 * ioremap_cachable - map bus memory into CPU space
238 * @offset: bus address of the memory
239 * @size: size of the resource to map
241 * ioremap_nocache performs a platform specific sequence of operations to
242 * make bus memory CPU accessible via the readb/readw/readl/writeb/
243 * writew/writel functions and the other mmio helpers. The returned
244 * address is not guaranteed to be usable directly as a virtual
247 * This version of ioremap ensures that the memory is marked cachable by
248 * the CPU. Also enables full write-combining. Useful for some
249 * memory-like regions on I/O busses.
251 #define ioremap_cachable(offset, size) \
252 __ioremap_mode((offset), (size), _page_cachable_default)
253 #define ioremap_cache ioremap_cachable
256 * ioremap_wc - map bus memory into CPU space
257 * @offset: bus address of the memory
258 * @size: size of the resource to map
260 * ioremap_wc performs a platform specific sequence of operations to
261 * make bus memory CPU accessible via the readb/readw/readl/writeb/
262 * writew/writel functions and the other mmio helpers. The returned
263 * address is not guaranteed to be usable directly as a virtual
266 * This version of ioremap ensures that the memory is marked uncachable
267 * but accelerated by means of write-combining feature. It is specifically
268 * useful for PCIe prefetchable windows, which may vastly improve a
269 * communications performance. If it was determined on boot stage, what
270 * CPU CCA doesn't support UCA, the method shall fall-back to the
271 * _CACHE_UNCACHED option (see cpu_probe() method).
273 #define ioremap_wc(offset, size) \
274 __ioremap_mode((offset), (size), boot_cpu_data.writecombine)
276 static inline void iounmap(const volatile void __iomem *addr)
278 if (plat_iounmap(addr))
281 #define __IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == CKSEG1)
283 if (cpu_has_64bit_addresses ||
284 (__builtin_constant_p(addr) && __IS_KSEG1(addr)))
292 #if defined(CONFIG_CPU_CAVIUM_OCTEON) || defined(CONFIG_LOONGSON3_ENHANCEMENT)
293 #define war_io_reorder_wmb() wmb()
295 #define war_io_reorder_wmb() barrier()
298 #define __BUILD_MEMORY_SINGLE(pfx, bwlq, type, irq) \
300 static inline void pfx##write##bwlq(type val, \
301 volatile void __iomem *mem) \
303 volatile type *__mem; \
306 war_io_reorder_wmb(); \
308 __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \
310 __val = pfx##ioswab##bwlq(__mem, val); \
312 if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \
314 else if (cpu_has_64bits) { \
315 unsigned long __flags; \
319 local_irq_save(__flags); \
320 __asm__ __volatile__( \
321 ".set arch=r4000" "\t\t# __writeq""\n\t" \
322 "dsll32 %L0, %L0, 0" "\n\t" \
323 "dsrl32 %L0, %L0, 0" "\n\t" \
324 "dsll32 %M0, %M0, 0" "\n\t" \
325 "or %L0, %L0, %M0" "\n\t" \
326 "sd %L0, %2" "\n\t" \
329 : "0" (__val), "m" (*__mem)); \
331 local_irq_restore(__flags); \
336 static inline type pfx##read##bwlq(const volatile void __iomem *mem) \
338 volatile type *__mem; \
341 __mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \
343 if (sizeof(type) != sizeof(u64) || sizeof(u64) == sizeof(long)) \
345 else if (cpu_has_64bits) { \
346 unsigned long __flags; \
349 local_irq_save(__flags); \
350 __asm__ __volatile__( \
351 ".set arch=r4000" "\t\t# __readq" "\n\t" \
352 "ld %L0, %1" "\n\t" \
353 "dsra32 %M0, %L0, 0" "\n\t" \
354 "sll %L0, %L0, 0" "\n\t" \
359 local_irq_restore(__flags); \
365 /* prevent prefetching of coherent DMA data prematurely */ \
367 return pfx##ioswab##bwlq(__mem, __val); \
370 #define __BUILD_IOPORT_SINGLE(pfx, bwlq, type, p) \
372 static inline void pfx##out##bwlq##p(type val, unsigned long port) \
374 volatile type *__addr; \
377 war_io_reorder_wmb(); \
379 __addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base + port); \
381 __val = pfx##ioswab##bwlq(__addr, val); \
383 /* Really, we want this to be atomic */ \
384 BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \
389 static inline type pfx##in##bwlq##p(unsigned long port) \
391 volatile type *__addr; \
394 __addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base + port); \
396 BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long)); \
400 /* prevent prefetching of coherent DMA data prematurely */ \
402 return pfx##ioswab##bwlq(__addr, __val); \
405 #define __BUILD_MEMORY_PFX(bus, bwlq, type) \
407 __BUILD_MEMORY_SINGLE(bus, bwlq, type, 1)
409 #define BUILDIO_MEM(bwlq, type) \
411 __BUILD_MEMORY_PFX(__raw_, bwlq, type) \
412 __BUILD_MEMORY_PFX(, bwlq, type) \
413 __BUILD_MEMORY_PFX(__mem_, bwlq, type) \
420 #define __BUILD_IOPORT_PFX(bus, bwlq, type) \
421 __BUILD_IOPORT_SINGLE(bus, bwlq, type,) \
422 __BUILD_IOPORT_SINGLE(bus, bwlq, type, _p)
424 #define BUILDIO_IOPORT(bwlq, type) \
425 __BUILD_IOPORT_PFX(, bwlq, type) \
426 __BUILD_IOPORT_PFX(__mem_, bwlq, type)
428 BUILDIO_IOPORT(b, u8)
429 BUILDIO_IOPORT(w, u16)
430 BUILDIO_IOPORT(l, u32)
432 BUILDIO_IOPORT(q, u64)
435 #define __BUILDIO(bwlq, type) \
437 __BUILD_MEMORY_SINGLE(____raw_, bwlq, type, 0)
441 #define readb_relaxed readb
442 #define readw_relaxed readw
443 #define readl_relaxed readl
444 #define readq_relaxed readq
446 #define writeb_relaxed writeb
447 #define writew_relaxed writew
448 #define writel_relaxed writel
449 #define writeq_relaxed writeq
451 #define readb_be(addr) \
452 __raw_readb((__force unsigned *)(addr))
453 #define readw_be(addr) \
454 be16_to_cpu(__raw_readw((__force unsigned *)(addr)))
455 #define readl_be(addr) \
456 be32_to_cpu(__raw_readl((__force unsigned *)(addr)))
457 #define readq_be(addr) \
458 be64_to_cpu(__raw_readq((__force unsigned *)(addr)))
460 #define writeb_be(val, addr) \
461 __raw_writeb((val), (__force unsigned *)(addr))
462 #define writew_be(val, addr) \
463 __raw_writew(cpu_to_be16((val)), (__force unsigned *)(addr))
464 #define writel_be(val, addr) \
465 __raw_writel(cpu_to_be32((val)), (__force unsigned *)(addr))
466 #define writeq_be(val, addr) \
467 __raw_writeq(cpu_to_be64((val)), (__force unsigned *)(addr))
470 * Some code tests for these symbols
473 #define writeq writeq
475 #define __BUILD_MEMORY_STRING(bwlq, type) \
477 static inline void writes##bwlq(volatile void __iomem *mem, \
478 const void *addr, unsigned int count) \
480 const volatile type *__addr = addr; \
483 __mem_write##bwlq(*__addr, mem); \
488 static inline void reads##bwlq(volatile void __iomem *mem, void *addr, \
489 unsigned int count) \
491 volatile type *__addr = addr; \
494 *__addr = __mem_read##bwlq(mem); \
499 #define __BUILD_IOPORT_STRING(bwlq, type) \
501 static inline void outs##bwlq(unsigned long port, const void *addr, \
502 unsigned int count) \
504 const volatile type *__addr = addr; \
507 __mem_out##bwlq(*__addr, port); \
512 static inline void ins##bwlq(unsigned long port, void *addr, \
513 unsigned int count) \
515 volatile type *__addr = addr; \
518 *__addr = __mem_in##bwlq(port); \
523 #define BUILDSTRING(bwlq, type) \
525 __BUILD_MEMORY_STRING(bwlq, type) \
526 __BUILD_IOPORT_STRING(bwlq, type)
536 #ifdef CONFIG_CPU_CAVIUM_OCTEON
537 #define mmiowb() wmb()
539 /* Depends on MIPS II instruction set */
540 #define mmiowb() asm volatile ("sync" ::: "memory")
543 static inline void memset_io(volatile void __iomem *addr, unsigned char val, int count)
545 memset((void __force *) addr, val, count);
547 static inline void memcpy_fromio(void *dst, const volatile void __iomem *src, int count)
549 memcpy(dst, (void __force *) src, count);
551 static inline void memcpy_toio(volatile void __iomem *dst, const void *src, int count)
553 memcpy((void __force *) dst, src, count);
557 * The caches on some architectures aren't dma-coherent and have need to
558 * handle this in software. There are three types of operations that
559 * can be applied to dma buffers.
561 * - dma_cache_wback_inv(start, size) makes caches and coherent by
562 * writing the content of the caches back to memory, if necessary.
563 * The function also invalidates the affected part of the caches as
564 * necessary before DMA transfers from outside to memory.
565 * - dma_cache_wback(start, size) makes caches and coherent by
566 * writing the content of the caches back to memory, if necessary.
567 * The function also invalidates the affected part of the caches as
568 * necessary before DMA transfers from outside to memory.
569 * - dma_cache_inv(start, size) invalidates the affected parts of the
570 * caches. Dirty lines of the caches may be written back or simply
571 * be discarded. This operation is necessary before dma operations
574 * This API used to be exported; it now is for arch code internal use only.
576 #ifdef CONFIG_DMA_NONCOHERENT
578 extern void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
579 extern void (*_dma_cache_wback)(unsigned long start, unsigned long size);
580 extern void (*_dma_cache_inv)(unsigned long start, unsigned long size);
582 #define dma_cache_wback_inv(start, size) _dma_cache_wback_inv(start, size)
583 #define dma_cache_wback(start, size) _dma_cache_wback(start, size)
584 #define dma_cache_inv(start, size) _dma_cache_inv(start, size)
586 #else /* Sane hardware */
588 #define dma_cache_wback_inv(start,size) \
589 do { (void) (start); (void) (size); } while (0)
590 #define dma_cache_wback(start,size) \
591 do { (void) (start); (void) (size); } while (0)
592 #define dma_cache_inv(start,size) \
593 do { (void) (start); (void) (size); } while (0)
595 #endif /* CONFIG_DMA_NONCOHERENT */
598 * Read a 32-bit register that requires a 64-bit read cycle on the bus.
599 * Avoid interrupt mucking, just adjust the address for 4-byte access.
600 * Assume the addresses are 8-byte aligned.
603 #define __CSR_32_ADJUST 4
605 #define __CSR_32_ADJUST 0
608 #define csr_out32(v, a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST) = (v))
609 #define csr_in32(a) (*(volatile u32 *)((unsigned long)(a) + __CSR_32_ADJUST))
612 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
615 #define xlate_dev_mem_ptr(p) __va(p)
618 * Convert a virtual cached pointer to an uncached pointer
620 #define xlate_dev_kmem_ptr(p) p
622 void __ioread64_copy(void *to, const void __iomem *from, size_t count);
624 #endif /* _ASM_IO_H */