Clean up ROUNDUP, Use ALIGN where ever appropriate.
Signed-off-by: Milind Arun Choudhary <milindchoudhary@gmail.com>
Acked-by: Grant Grundler <grundler@parisc-linux.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Kyle McMartin <kyle@parisc-linux.org>
*/
#include <linux/types.h>
+#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/spinlock.h>
#define PDIR_INDEX(iovp) ((iovp)>>IOVP_SHIFT)
#define MKIOVP(pdir_idx) ((long)(pdir_idx) << IOVP_SHIFT)
#define MKIOVA(iovp,offset) (dma_addr_t)((long)iovp | (long)offset)
-#define ROUNDUP(x,y) ((x + ((y)-1)) & ~((y)-1))
/*
** Don't worry about the 150% average search length on a miss.
size_t saved_byte_cnt;
/* round up to nearest page size */
- saved_byte_cnt = byte_cnt = ROUNDUP(byte_cnt, IOVP_SIZE);
+ saved_byte_cnt = byte_cnt = ALIGN(byte_cnt, IOVP_SIZE);
while(byte_cnt > 0) {
/* invalidate one page at a time */
offset = ((unsigned long) addr) & ~IOVP_MASK;
/* round up to nearest IOVP_SIZE */
- size = ROUNDUP(size + offset, IOVP_SIZE);
+ size = ALIGN(size + offset, IOVP_SIZE);
spin_lock_irqsave(&ioc->res_lock, flags);
#ifdef CCIO_MAP_STATS
iova ^= offset; /* clear offset bits */
size += offset;
- size = ROUNDUP(size, IOVP_SIZE);
+ size = ALIGN(size, IOVP_SIZE);
spin_lock_irqsave(&ioc->res_lock, flags);
** exceed DMA_CHUNK_SIZE if we coalesce the
** next entry.
*/
- if(unlikely(ROUNDUP(dma_len + dma_offset + startsg->length,
+ if(unlikely(ALIGN(dma_len + dma_offset + startsg->length,
IOVP_SIZE) > DMA_CHUNK_SIZE))
break;
** Allocate space for DMA stream.
*/
sg_dma_len(contig_sg) = dma_len;
- dma_len = ROUNDUP(dma_len + dma_offset, IOVP_SIZE);
+ dma_len = ALIGN(dma_len + dma_offset, IOVP_SIZE);
sg_dma_address(contig_sg) =
PIDE_FLAG
| (iommu_alloc_range(ioc, dma_len) << IOVP_SHIFT)
MODULE_PARM_DESC(sba_reserve_agpgart, "Reserve half of IO pdir as AGPGART");
#endif
-#define ROUNDUP(x,y) ((x + ((y)-1)) & ~((y)-1))
-
/************************************
** SBA register read and write support
** SBA HW features in the unmap path.
*/
unsigned long o = 1 << get_order(bits_wanted << PAGE_SHIFT);
- uint bitshiftcnt = ROUNDUP(ioc->res_bitshift, o);
+ uint bitshiftcnt = ALIGN(ioc->res_bitshift, o);
unsigned long mask;
if (bitshiftcnt >= BITS_PER_LONG) {
offset = iova & ~IOVP_MASK;
iova ^= offset; /* clear offset bits */
size += offset;
- size = ROUNDUP(size, IOVP_SIZE);
+ size = ALIGN(size, IOVP_SIZE);
spin_lock_irqsave(&ioc->res_lock, flags);