unsigned long map_len; \
unsigned long offset = (unsigned long)s + \
offsetof(type, member); \
- err = map_extent_buffer(eb, offset, \
+ if (eb->map_token && offset >= eb->map_start && \
+ offset + sizeof(((type *)0)->member) <= eb->map_start + \
+ eb->map_len) { \
+ kaddr = eb->kaddr; \
+ map_start = eb->map_start; \
+ err = 0; \
+ } else { \
+ err = map_extent_buffer(eb, offset, \
sizeof(((type *)0)->member), \
&map_token, &kaddr, \
&map_start, &map_len, KM_USER1); \
+ } \
if (!err) { \
__le##bits *tmp = (__le##bits *)(kaddr + offset - \
map_start); \
int unmap_on_exit = (eb->map_token == NULL); \
unsigned long offset = (unsigned long)s + \
offsetof(type, member); \
- err = map_extent_buffer(eb, offset, \
+ if (eb->map_token && offset >= eb->map_start && \
+ offset + sizeof(((type *)0)->member) <= eb->map_start + \
+ eb->map_len) { \
+ kaddr = eb->kaddr; \
+ map_start = eb->map_start; \
+ err = 0; \
+ } else { \
+ err = map_extent_buffer(eb, offset, \
sizeof(((type *)0)->member), \
&map_token, &kaddr, \
&map_start, &map_len, KM_USER1); \
+ } \
if (!err) { \
__le##bits *tmp = (__le##bits *)(kaddr + offset - \
map_start); \
unsigned long map_len; \
unsigned long offset = offsetof(type, member); \
int unmap_on_exit = (eb->map_token == NULL); \
- err = map_extent_buffer(eb, offset, \
+ if (eb->map_token && offset >= eb->map_start && \
+ offset + sizeof(((type *)0)->member) <= eb->map_start + \
+ eb->map_len) { \
+ kaddr = eb->kaddr; \
+ map_start = eb->map_start; \
+ err = 0; \
+ } else { \
+ err = map_extent_buffer(eb, offset, \
sizeof(((type *)0)->member), \
&map_token, &kaddr, \
&map_start, &map_len, KM_USER1); \
+ } \
if (!err) { \
__le##bits *tmp = (__le##bits *)(kaddr + offset - \
map_start); \
unsigned long map_len; \
unsigned long offset = offsetof(type, member); \
int unmap_on_exit = (eb->map_token == NULL); \
- err = map_extent_buffer(eb, offset, \
+ if (eb->map_token && offset >= eb->map_start && \
+ offset + sizeof(((type *)0)->member) <= eb->map_start + \
+ eb->map_len) { \
+ kaddr = eb->kaddr; \
+ map_start = eb->map_start; \
+ err = 0; \
+ } else { \
+ err = map_extent_buffer(eb, offset, \
sizeof(((type *)0)->member), \
&map_token, &kaddr, \
&map_start, &map_len, KM_USER1); \
+ } \
if (!err) { \
__le##bits *tmp = (__le##bits *)(kaddr + offset - \
map_start); \
struct extent_buffer *eb;
struct page *p;
struct address_space *mapping = tree->mapping;
+ int uptodate = 1;
eb = __alloc_extent_buffer(mask);
if (!eb || IS_ERR(eb))
atomic_set(&eb->refs, 1);
for (i = 0; i < num_pages; i++, index++) {
- p = find_get_page(mapping, index);
+ p = find_lock_page(mapping, index);
if (!p) {
/* make sure the free only frees the pages we've
* grabbed a reference on
set_page_extent_mapped(p);
if (i == 0)
eb->first_page = p;
+ if (!PageUptodate(p))
+ uptodate = 0;
+ unlock_page(p);
}
+ if (uptodate)
+ eb->flags |= EXTENT_UPTODATE;
return eb;
fail:
free_extent_buffer(eb);
if (eb->flags & EXTENT_UPTODATE)
return 0;
- if (test_range_bit(tree, eb->start, eb->start + eb->len - 1,
+ if (0 && test_range_bit(tree, eb->start, eb->start + eb->len - 1,
EXTENT_UPTODATE, 1)) {
return 0;
}
char *dst = (char *)dstv;
size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
+ unsigned long num_pages = num_extent_pages(eb->start, eb->len);
WARN_ON(start > eb->len);
WARN_ON(start + len > eb->start + eb->len);
while(len > 0) {
page = extent_buffer_page(eb, i);
+ if (!PageUptodate(page)) {
+ printk("page %lu not up to date i %lu, total %lu, len %lu\n", page->index, i, num_pages, eb->len);
+ WARN_ON(1);
+ }
WARN_ON(!PageUptodate(page));
cur = min(len, (PAGE_CACHE_SIZE - offset));