diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c index baf0bf2acbd442e6115bc11d01c4c5313a9f29d2..a263810803c16db36bb1f784fd111441c76d0b1b 100644 --- a/fs/nfs/blocklayout/blocklayout.c +++ b/fs/nfs/blocklayout/blocklayout.c @@ -577,8 +577,7 @@ bl_write_pagelist(struct nfs_write_data *wdata, int sync) unlock_page(page); ret = bl_mark_sectors_init(be->be_inval, isect, - PAGE_CACHE_SECTORS, - NULL); + PAGE_CACHE_SECTORS); if (unlikely(ret)) { dprintk("%s bl_mark_sectors_init fail %d\n", __func__, ret); @@ -627,8 +626,7 @@ bl_write_pagelist(struct nfs_write_data *wdata, int sync) } if (be->be_state == PNFS_BLOCK_INVALID_DATA) { ret = bl_mark_sectors_init(be->be_inval, isect, - PAGE_CACHE_SECTORS, - NULL); + PAGE_CACHE_SECTORS); if (unlikely(ret)) { dprintk("%s bl_mark_sectors_init fail %d\n", __func__, ret); diff --git a/fs/nfs/blocklayout/blocklayout.h b/fs/nfs/blocklayout/blocklayout.h index 42acf7ef59926d61ce0b4a12e208585d928a9aad..60728acc7b99275ab8c74bd5ea6bd6703fef65cf 100644 --- a/fs/nfs/blocklayout/blocklayout.h +++ b/fs/nfs/blocklayout/blocklayout.h @@ -186,8 +186,7 @@ struct pnfs_block_extent * bl_find_get_extent(struct pnfs_block_layout *bl, sector_t isect, struct pnfs_block_extent **cow_read); int bl_mark_sectors_init(struct pnfs_inval_markings *marks, - sector_t offset, sector_t length, - sector_t **pages); + sector_t offset, sector_t length); void bl_put_extent(struct pnfs_block_extent *be); struct pnfs_block_extent *bl_alloc_extent(void); int bl_is_sector_init(struct pnfs_inval_markings *marks, sector_t isect); diff --git a/fs/nfs/blocklayout/extents.c b/fs/nfs/blocklayout/extents.c index 369ef53e89e15053d770b16b5db2d10700b2c3db..d0f52ed22428c232bb32c7e65ba6c047930ab09d 100644 --- a/fs/nfs/blocklayout/extents.c +++ b/fs/nfs/blocklayout/extents.c @@ -174,33 +174,6 @@ static int _preload_range(struct pnfs_inval_markings *marks, return status; } -static void set_needs_init(sector_t *array, sector_t offset) -{ - sector_t *p = array; - - dprintk("%s enter\n", __func__); - if (!p) - return; - while (*p < offset) - p++; - if (*p == offset) - return; - else if (*p == ~0) { - *p++ = offset; - *p = ~0; - return; - } else { - sector_t *save = p; - dprintk("%s Adding %llu\n", __func__, (u64)offset); - while (*p != ~0) - p++; - p++; - memmove(save + 1, save, (char *)p - (char *)save); - *save = offset; - return; - } -} - /* We are relying on page lock to serialize this */ int bl_is_sector_init(struct pnfs_inval_markings *marks, sector_t isect) { @@ -256,28 +229,15 @@ static int is_range_written(struct pnfs_inval_markings *marks, /* Marks sectors in [offest, offset_length) as having been initialized. * All lengths are step-aligned, where step is min(pagesize, blocksize). - * Notes where partial block is initialized, and helps prepare it for - * complete initialization later. + * Currently assumes offset is page-aligned */ -/* Currently assumes offset is page-aligned */ int bl_mark_sectors_init(struct pnfs_inval_markings *marks, - sector_t offset, sector_t length, - sector_t **pages) + sector_t offset, sector_t length) { - sector_t s, start, end; - sector_t *array = NULL; /* Pages to mark */ + sector_t start, end; dprintk("%s(offset=%llu,len=%llu) enter\n", __func__, (u64)offset, (u64)length); - s = max((sector_t) 3, - 2 * (marks->im_block_size / (PAGE_CACHE_SECTORS))); - dprintk("%s set max=%llu\n", __func__, (u64)s); - if (pages) { - array = kmalloc(s * sizeof(sector_t), GFP_NOFS); - if (!array) - goto outerr; - array[0] = ~0; - } start = normalize(offset, marks->im_block_size); end = normalize_up(offset + length, marks->im_block_size); @@ -285,41 +245,15 @@ int bl_mark_sectors_init(struct pnfs_inval_markings *marks, goto outerr; spin_lock(&marks->im_lock); - - for (s = normalize_up(start, PAGE_CACHE_SECTORS); - s < offset; s += PAGE_CACHE_SECTORS) { - dprintk("%s pre-area pages\n", __func__); - /* Portion of used block is not initialized */ - if (!_has_tag(&marks->im_tree, s, EXTENT_INITIALIZED)) - set_needs_init(array, s); - } if (_set_range(&marks->im_tree, EXTENT_INITIALIZED, offset, length)) goto out_unlock; - for (s = normalize_up(offset + length, PAGE_CACHE_SECTORS); - s < end; s += PAGE_CACHE_SECTORS) { - dprintk("%s post-area pages\n", __func__); - if (!_has_tag(&marks->im_tree, s, EXTENT_INITIALIZED)) - set_needs_init(array, s); - } - spin_unlock(&marks->im_lock); - if (pages) { - if (array[0] == ~0) { - kfree(array); - *pages = NULL; - } else - *pages = array; - } return 0; - out_unlock: +out_unlock: spin_unlock(&marks->im_lock); - outerr: - if (pages) { - kfree(array); - *pages = NULL; - } +outerr: return -ENOMEM; }