fb_defio.c 8.7 KB
Newer Older
J
Jaya Kumar 已提交
1 2 3 4 5 6
/*
 *  linux/drivers/video/fb_defio.c
 *
 *  Copyright (C) 2006 Jaya Kumar
 *
 * This file is subject to the terms and conditions of the GNU General Public
J
Jaya Kumar 已提交
7
 * License. See the file COPYING in the main directory of this archive
J
Jaya Kumar 已提交
8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
 * for more details.
 */

#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/fb.h>
#include <linux/list.h>

/* to support deferred IO */
#include <linux/rmap.h>
#include <linux/pagemap.h>

26
static struct page *fb_deferred_io_page(struct fb_info *info, unsigned long offs)
27 28 29 30 31 32 33 34 35 36 37 38
{
	void *screen_base = (void __force *) info->screen_base;
	struct page *page;

	if (is_vmalloc_addr(screen_base + offs))
		page = vmalloc_to_page(screen_base + offs);
	else
		page = pfn_to_page((info->fix.smem_start + offs) >> PAGE_SHIFT);

	return page;
}

39 40 41 42 43
static struct fb_deferred_io_pageref *fb_deferred_io_pageref_get(struct fb_info *info,
								 unsigned long offset,
								 struct page *page)
{
	struct fb_deferred_io *fbdefio = info->fbdefio;
44
	struct list_head *pos = &fbdefio->pagereflist;
45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65
	unsigned long pgoff = offset >> PAGE_SHIFT;
	struct fb_deferred_io_pageref *pageref, *cur;

	if (WARN_ON_ONCE(pgoff >= info->npagerefs))
		return NULL; /* incorrect allocation size */

	/* 1:1 mapping between pageref and page offset */
	pageref = &info->pagerefs[pgoff];

	/*
	 * This check is to catch the case where a new process could start
	 * writing to the same page through a new PTE. This new access
	 * can cause a call to .page_mkwrite even if the original process'
	 * PTE is marked writable.
	 */
	if (!list_empty(&pageref->list))
		goto pageref_already_added;

	pageref->page = page;
	pageref->offset = pgoff << PAGE_SHIFT;

66
	if (unlikely(fbdefio->sort_pagereflist)) {
67 68 69 70 71 72 73
		/*
		 * We loop through the list of pagerefs before adding in
		 * order to keep the pagerefs sorted. This has significant
		 * overhead of O(n^2) with n being the number of written
		 * pages. If possible, drivers should try to work with
		 * unsorted page lists instead.
		 */
74
		list_for_each_entry(cur, &fbdefio->pagereflist, list) {
75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92
			if (cur->offset > pageref->offset)
				break;
		}
		pos = &cur->list;
	}

	list_add_tail(&pageref->list, pos);

pageref_already_added:
	return pageref;
}

static void fb_deferred_io_pageref_put(struct fb_deferred_io_pageref *pageref,
				       struct fb_info *info)
{
	list_del_init(&pageref->list);
}

J
Jaya Kumar 已提交
93
/* this is to find and return the vmalloc-ed fb pages */
94
static vm_fault_t fb_deferred_io_fault(struct vm_fault *vmf)
J
Jaya Kumar 已提交
95 96 97
{
	unsigned long offset;
	struct page *page;
98
	struct fb_info *info = vmf->vma->vm_private_data;
J
Jaya Kumar 已提交
99

N
Nick Piggin 已提交
100
	offset = vmf->pgoff << PAGE_SHIFT;
J
Jaya Kumar 已提交
101
	if (offset >= info->fix.smem_len)
N
Nick Piggin 已提交
102
		return VM_FAULT_SIGBUS;
J
Jaya Kumar 已提交
103

104
	page = fb_deferred_io_page(info, offset);
J
Jaya Kumar 已提交
105
	if (!page)
N
Nick Piggin 已提交
106
		return VM_FAULT_SIGBUS;
J
Jaya Kumar 已提交
107 108

	get_page(page);
109 110 111 112 113 114 115

	if (vmf->vma->vm_file)
		page->mapping = vmf->vma->vm_file->f_mapping;
	else
		printk(KERN_ERR "no mapping available\n");

	BUG_ON(!page->mapping);
116
	page->index = vmf->pgoff; /* for page_mkclean() */
J
Jaya Kumar 已提交
117

N
Nick Piggin 已提交
118 119
	vmf->page = page;
	return 0;
J
Jaya Kumar 已提交
120 121
}

122
int fb_deferred_io_fsync(struct file *file, loff_t start, loff_t end, int datasync)
123 124
{
	struct fb_info *info = file->private_data;
A
Al Viro 已提交
125
	struct inode *inode = file_inode(file);
126
	int err = file_write_and_wait_range(file, start, end);
127 128
	if (err)
		return err;
129

130
	/* Skip if deferred io is compiled-in but disabled on this fbdev */
M
Magnus Damm 已提交
131 132 133
	if (!info->fbdefio)
		return 0;

A
Al Viro 已提交
134
	inode_lock(inode);
135
	/* Kill off the delayed work */
136
	cancel_delayed_work_sync(&info->deferred_work);
137 138

	/* Run it immediately */
T
Tomi Valkeinen 已提交
139
	schedule_delayed_work(&info->deferred_work, 0);
A
Al Viro 已提交
140
	inode_unlock(inode);
T
Tomi Valkeinen 已提交
141 142

	return 0;
143 144 145
}
EXPORT_SYMBOL_GPL(fb_deferred_io_fsync);

146 147 148 149 150 151
/*
 * Adds a page to the dirty list. Call this from struct
 * vm_operations_struct.page_mkwrite.
 */
static vm_fault_t fb_deferred_io_track_page(struct fb_info *info, unsigned long offset,
					    struct page *page)
J
Jaya Kumar 已提交
152 153
{
	struct fb_deferred_io *fbdefio = info->fbdefio;
154 155 156
	struct fb_deferred_io_pageref *pageref;
	vm_fault_t ret;

J
Jaya Kumar 已提交
157 158
	/* protect against the workqueue changing the page list */
	mutex_lock(&fbdefio->lock);
159

H
Heiko Stübner 已提交
160
	/* first write in this cycle, notify the driver */
161
	if (fbdefio->first_io && list_empty(&fbdefio->pagereflist))
H
Heiko Stübner 已提交
162 163
		fbdefio->first_io(info);

164 165 166 167 168 169
	pageref = fb_deferred_io_pageref_get(info, offset, page);
	if (WARN_ON_ONCE(!pageref)) {
		ret = VM_FAULT_OOM;
		goto err_mutex_unlock;
	}

170 171 172 173 174 175 176 177
	/*
	 * We want the page to remain locked from ->page_mkwrite until
	 * the PTE is marked dirty to avoid page_mkclean() being called
	 * before the PTE is updated, which would leave the page ignored
	 * by defio.
	 * Do this by locking the page here and informing the caller
	 * about it with VM_FAULT_LOCKED.
	 */
178
	lock_page(pageref->page);
179

J
Jaya Kumar 已提交
180 181 182 183
	mutex_unlock(&fbdefio->lock);

	/* come back after delay to process the deferred IO */
	schedule_delayed_work(&info->deferred_work, fbdefio->delay);
184
	return VM_FAULT_LOCKED;
185 186 187 188

err_mutex_unlock:
	mutex_unlock(&fbdefio->lock);
	return ret;
J
Jaya Kumar 已提交
189 190
}

191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222
/*
 * fb_deferred_io_page_mkwrite - Mark a page as written for deferred I/O
 * @fb_info: The fbdev info structure
 * @vmf: The VM fault
 *
 * This is a callback we get when userspace first tries to
 * write to the page. We schedule a workqueue. That workqueue
 * will eventually mkclean the touched pages and execute the
 * deferred framebuffer IO. Then if userspace touches a page
 * again, we repeat the same scheme.
 *
 * Returns:
 * VM_FAULT_LOCKED on success, or a VM_FAULT error otherwise.
 */
static vm_fault_t fb_deferred_io_page_mkwrite(struct fb_info *info, struct vm_fault *vmf)
{
	unsigned long offset = vmf->address - vmf->vma->vm_start;
	struct page *page = vmf->page;

	file_update_time(vmf->vma->vm_file);

	return fb_deferred_io_track_page(info, offset, page);
}

/* vm_ops->page_mkwrite handler */
static vm_fault_t fb_deferred_io_mkwrite(struct vm_fault *vmf)
{
	struct fb_info *info = vmf->vma->vm_private_data;

	return fb_deferred_io_page_mkwrite(info, vmf);
}

223
static const struct vm_operations_struct fb_deferred_io_vm_ops = {
N
Nick Piggin 已提交
224
	.fault		= fb_deferred_io_fault,
J
Jaya Kumar 已提交
225 226 227
	.page_mkwrite	= fb_deferred_io_mkwrite,
};

228
static const struct address_space_operations fb_deferred_io_aops = {
229
	.dirty_folio	= noop_dirty_folio,
230 231
};

232
int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
J
Jaya Kumar 已提交
233 234
{
	vma->vm_ops = &fb_deferred_io_vm_ops;
235
	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
236 237
	if (!(info->flags & FBINFO_VIRTFB))
		vma->vm_flags |= VM_IO;
J
Jaya Kumar 已提交
238 239 240
	vma->vm_private_data = info;
	return 0;
}
241
EXPORT_SYMBOL_GPL(fb_deferred_io_mmap);
J
Jaya Kumar 已提交
242 243 244 245

/* workqueue callback */
static void fb_deferred_io_work(struct work_struct *work)
{
246 247
	struct fb_info *info = container_of(work, struct fb_info, deferred_work.work);
	struct fb_deferred_io_pageref *pageref, *next;
J
Jaya Kumar 已提交
248 249 250 251
	struct fb_deferred_io *fbdefio = info->fbdefio;

	/* here we mkclean the pages, then do all deferred IO */
	mutex_lock(&fbdefio->lock);
252
	list_for_each_entry(pageref, &fbdefio->pagereflist, list) {
253
		struct page *cur = pageref->page;
254 255 256
		lock_page(cur);
		page_mkclean(cur);
		unlock_page(cur);
J
Jaya Kumar 已提交
257 258
	}

259 260
	/* driver's callback with pagereflist */
	fbdefio->deferred_io(info, &fbdefio->pagereflist);
J
Jaya Kumar 已提交
261

262
	/* clear the list */
263
	list_for_each_entry_safe(pageref, next, &fbdefio->pagereflist, list)
264 265
		fb_deferred_io_pageref_put(pageref, info);

J
Jaya Kumar 已提交
266 267 268
	mutex_unlock(&fbdefio->lock);
}

269
int fb_deferred_io_init(struct fb_info *info)
J
Jaya Kumar 已提交
270 271
{
	struct fb_deferred_io *fbdefio = info->fbdefio;
272 273 274
	struct fb_deferred_io_pageref *pagerefs;
	unsigned long npagerefs, i;
	int ret;
J
Jaya Kumar 已提交
275 276

	BUG_ON(!fbdefio);
277 278 279 280

	if (WARN_ON(!info->fix.smem_len))
		return -EINVAL;

J
Jaya Kumar 已提交
281 282
	mutex_init(&fbdefio->lock);
	INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
283
	INIT_LIST_HEAD(&fbdefio->pagereflist);
J
Jaya Kumar 已提交
284 285
	if (fbdefio->delay == 0) /* set a default of 1 s */
		fbdefio->delay = HZ;
286

287 288 289 290 291 292 293
	npagerefs = DIV_ROUND_UP(info->fix.smem_len, PAGE_SIZE);

	/* alloc a page ref for each page of the display memory */
	pagerefs = kvcalloc(npagerefs, sizeof(*pagerefs), GFP_KERNEL);
	if (!pagerefs) {
		ret = -ENOMEM;
		goto err;
294
	}
295 296 297 298 299 300 301 302 303 304
	for (i = 0; i < npagerefs; ++i)
		INIT_LIST_HEAD(&pagerefs[i].list);
	info->npagerefs = npagerefs;
	info->pagerefs = pagerefs;

	return 0;

err:
	mutex_destroy(&fbdefio->lock);
	return ret;
J
Jaya Kumar 已提交
305 306 307
}
EXPORT_SYMBOL_GPL(fb_deferred_io_init);

308 309 310 311 312 313 314 315
void fb_deferred_io_open(struct fb_info *info,
			 struct inode *inode,
			 struct file *file)
{
	file->f_mapping->a_ops = &fb_deferred_io_aops;
}
EXPORT_SYMBOL_GPL(fb_deferred_io_open);

J
Jaya Kumar 已提交
316 317 318
void fb_deferred_io_cleanup(struct fb_info *info)
{
	struct fb_deferred_io *fbdefio = info->fbdefio;
319 320
	struct page *page;
	int i;
J
Jaya Kumar 已提交
321 322

	BUG_ON(!fbdefio);
323
	cancel_delayed_work_sync(&info->deferred_work);
324 325 326 327 328 329 330

	/* clear out the mapping that we setup */
	for (i = 0 ; i < info->fix.smem_len; i += PAGE_SIZE) {
		page = fb_deferred_io_page(info, i);
		page->mapping = NULL;
	}

331
	kvfree(info->pagerefs);
M
Magnus Damm 已提交
332
	mutex_destroy(&fbdefio->lock);
J
Jaya Kumar 已提交
333 334
}
EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);