while(nr_pages > 0) {
ret = find_get_pages_contig(inode->i_mapping, index,
- min(nr_pages, ARRAY_SIZE(pages)), pages);
+ min_t(unsigned long,
+ nr_pages, ARRAY_SIZE(pages)), pages);
if (ret == 0) {
nr_pages -= 1;
index += 1;
while(nr_pages > 0) {
ret = find_get_pages_contig(inode->i_mapping, index,
- min(nr_pages, ARRAY_SIZE(pages)), pages);
+ min_t(unsigned long, nr_pages,
+ ARRAY_SIZE(pages)), pages);
for (i = 0; i < ret; i++) {
if (pages[i] != locked_page)
unlock_page(pages[i]);
nrpages = end_index - index + 1;
while(nrpages > 0) {
ret = find_get_pages_contig(inode->i_mapping, index,
- min(nrpages, ARRAY_SIZE(pages)), pages);
+ min_t(unsigned long,
+ nrpages, ARRAY_SIZE(pages)), pages);
if (ret == 0) {
ret = -EAGAIN;
goto done;
while(nr_pages > 0) {
ret = find_get_pages_contig(inode->i_mapping, index,
- min(nr_pages, ARRAY_SIZE(pages)), pages);
+ min_t(unsigned long,
+ nr_pages, ARRAY_SIZE(pages)), pages);
for (i = 0; i < ret; i++) {
if (pages[i] == locked_page) {
page_cache_release(pages[i]);
int contig = 0;
int this_compressed = bio_flags & EXTENT_BIO_COMPRESSED;
int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED;
- size_t page_size = min(size, PAGE_CACHE_SIZE);
+ size_t page_size = min_t(size_t, size, PAGE_CACHE_SIZE);
if (bio_ret && *bio_ret) {
bio = *bio_ret;
int i = 0;
while(compressed_size > 0) {
cpage = compressed_pages[i];
- cur_size = min(compressed_size,
+ cur_size = min_t(unsigned long, compressed_size,
PAGE_CACHE_SIZE);
kaddr = kmap(cpage);
read_extent_buffer(leaf, tmp, ptr, inline_size);
- max_size = min(PAGE_CACHE_SIZE, max_size);
+ max_size = min_t(unsigned long, PAGE_CACHE_SIZE, max_size);
ret = btrfs_zlib_decompress(tmp, page, extent_offset,
inline_size, max_size);
if (ret) {
data_in = kmap(pages_in[page_in_index]);
workspace->inf_strm.next_in = data_in;
- workspace->inf_strm.avail_in = min(srclen, PAGE_CACHE_SIZE);
+ workspace->inf_strm.avail_in = min_t(size_t, srclen, PAGE_CACHE_SIZE);
workspace->inf_strm.total_in = 0;
workspace->inf_strm.total_out = 0;