aboutsummaryrefslogtreecommitdiff
path: root/drivers/video/fbdev/core/fb_defio.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/video/fbdev/core/fb_defio.c')
-rw-r--r--drivers/video/fbdev/core/fb_defio.c209
1 files changed, 146 insertions, 63 deletions
diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
index 842c66b3e33d..c730253ab85c 100644
--- a/drivers/video/fbdev/core/fb_defio.c
+++ b/drivers/video/fbdev/core/fb_defio.c
@@ -36,6 +36,60 @@ static struct page *fb_deferred_io_page(struct fb_info *info, unsigned long offs
return page;
}
+static struct fb_deferred_io_pageref *fb_deferred_io_pageref_get(struct fb_info *info,
+ unsigned long offset,
+ struct page *page)
+{
+ struct fb_deferred_io *fbdefio = info->fbdefio;
+ struct list_head *pos = &fbdefio->pagereflist;
+ unsigned long pgoff = offset >> PAGE_SHIFT;
+ struct fb_deferred_io_pageref *pageref, *cur;
+
+ if (WARN_ON_ONCE(pgoff >= info->npagerefs))
+ return NULL; /* incorrect allocation size */
+
+ /* 1:1 mapping between pageref and page offset */
+ pageref = &info->pagerefs[pgoff];
+
+ /*
+ * This check is to catch the case where a new process could start
+ * writing to the same page through a new PTE. This new access
+ * can cause a call to .page_mkwrite even if the original process'
+ * PTE is marked writable.
+ */
+ if (!list_empty(&pageref->list))
+ goto pageref_already_added;
+
+ pageref->page = page;
+ pageref->offset = pgoff << PAGE_SHIFT;
+
+ if (unlikely(fbdefio->sort_pagereflist)) {
+ /*
+ * We loop through the list of pagerefs before adding in
+ * order to keep the pagerefs sorted. This has significant
+ * overhead of O(n^2) with n being the number of written
+ * pages. If possible, drivers should try to work with
+ * unsorted page lists instead.
+ */
+ list_for_each_entry(cur, &fbdefio->pagereflist, list) {
+ if (cur->offset > pageref->offset)
+ break;
+ }
+ pos = &cur->list;
+ }
+
+ list_add_tail(&pageref->list, pos);
+
+pageref_already_added:
+ return pageref;
+}
+
+static void fb_deferred_io_pageref_put(struct fb_deferred_io_pageref *pageref,
+ struct fb_info *info)
+{
+ list_del_init(&pageref->list);
+}
+
/* this is to find and return the vmalloc-ed fb pages */
static vm_fault_t fb_deferred_io_fault(struct vm_fault *vmf)
{
@@ -59,8 +113,7 @@ static vm_fault_t fb_deferred_io_fault(struct vm_fault *vmf)
printk(KERN_ERR "no mapping available\n");
BUG_ON(!page->mapping);
- INIT_LIST_HEAD(&page->lru);
- page->index = vmf->pgoff;
+ page->index = vmf->pgoff; /* for page_mkclean() */
vmf->page = page;
return 0;
@@ -90,29 +143,30 @@ int fb_deferred_io_fsync(struct file *file, loff_t start, loff_t end, int datasy
}
EXPORT_SYMBOL_GPL(fb_deferred_io_fsync);
-/* vm_ops->page_mkwrite handler */
-static vm_fault_t fb_deferred_io_mkwrite(struct vm_fault *vmf)
+/*
+ * Adds a page to the dirty list. Call this from struct
+ * vm_operations_struct.page_mkwrite.
+ */
+static vm_fault_t fb_deferred_io_track_page(struct fb_info *info, unsigned long offset,
+ struct page *page)
{
- struct page *page = vmf->page;
- struct fb_info *info = vmf->vma->vm_private_data;
struct fb_deferred_io *fbdefio = info->fbdefio;
- struct list_head *pos = &fbdefio->pagelist;
-
- /* this is a callback we get when userspace first tries to
- write to the page. we schedule a workqueue. that workqueue
- will eventually mkclean the touched pages and execute the
- deferred framebuffer IO. then if userspace touches a page
- again, we repeat the same scheme */
-
- file_update_time(vmf->vma->vm_file);
+ struct fb_deferred_io_pageref *pageref;
+ vm_fault_t ret;
/* protect against the workqueue changing the page list */
mutex_lock(&fbdefio->lock);
/* first write in this cycle, notify the driver */
- if (fbdefio->first_io && list_empty(&fbdefio->pagelist))
+ if (fbdefio->first_io && list_empty(&fbdefio->pagereflist))
fbdefio->first_io(info);
+ pageref = fb_deferred_io_pageref_get(info, offset, page);
+ if (WARN_ON_ONCE(!pageref)) {
+ ret = VM_FAULT_OOM;
+ goto err_mutex_unlock;
+ }
+
/*
* We want the page to remain locked from ->page_mkwrite until
* the PTE is marked dirty to avoid page_mkclean() being called
@@ -121,47 +175,49 @@ static vm_fault_t fb_deferred_io_mkwrite(struct vm_fault *vmf)
* Do this by locking the page here and informing the caller
* about it with VM_FAULT_LOCKED.
*/
- lock_page(page);
-
- /*
- * This check is to catch the case where a new process could start
- * writing to the same page through a new PTE. This new access
- * can cause a call to .page_mkwrite even if the original process'
- * PTE is marked writable.
- *
- * TODO: The lru field is owned by the page cache; hence the name.
- * We dequeue in fb_deferred_io_work() after flushing the
- * page's content into video memory. Instead of lru, fbdefio
- * should have it's own field.
- */
- if (!list_empty(&page->lru))
- goto page_already_added;
-
- if (unlikely(fbdefio->sort_pagelist)) {
- /*
- * We loop through the pagelist before adding in order to
- * keep the pagelist sorted. This has significant overhead
- * of O(n^2) with n being the number of written pages. If
- * possible, drivers should try to work with unsorted page
- * lists instead.
- */
- struct page *cur;
+ lock_page(pageref->page);
- list_for_each_entry(cur, &fbdefio->pagelist, lru) {
- if (cur->index > page->index)
- break;
- }
- pos = &cur->lru;
- }
-
- list_add_tail(&page->lru, pos);
-
-page_already_added:
mutex_unlock(&fbdefio->lock);
/* come back after delay to process the deferred IO */
schedule_delayed_work(&info->deferred_work, fbdefio->delay);
return VM_FAULT_LOCKED;
+
+err_mutex_unlock:
+ mutex_unlock(&fbdefio->lock);
+ return ret;
+}
+
+/*
+ * fb_deferred_io_page_mkwrite - Mark a page as written for deferred I/O
+ * @fb_info: The fbdev info structure
+ * @vmf: The VM fault
+ *
+ * This is a callback we get when userspace first tries to
+ * write to the page. We schedule a workqueue. That workqueue
+ * will eventually mkclean the touched pages and execute the
+ * deferred framebuffer IO. Then if userspace touches a page
+ * again, we repeat the same scheme.
+ *
+ * Returns:
+ * VM_FAULT_LOCKED on success, or a VM_FAULT error otherwise.
+ */
+static vm_fault_t fb_deferred_io_page_mkwrite(struct fb_info *info, struct vm_fault *vmf)
+{
+ unsigned long offset = vmf->address - vmf->vma->vm_start;
+ struct page *page = vmf->page;
+
+ file_update_time(vmf->vma->vm_file);
+
+ return fb_deferred_io_track_page(info, offset, page);
+}
+
+/* vm_ops->page_mkwrite handler */
+static vm_fault_t fb_deferred_io_mkwrite(struct vm_fault *vmf)
+{
+ struct fb_info *info = vmf->vma->vm_private_data;
+
+ return fb_deferred_io_page_mkwrite(info, vmf);
}
static const struct vm_operations_struct fb_deferred_io_vm_ops = {
@@ -182,44 +238,70 @@ int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
vma->vm_private_data = info;
return 0;
}
+EXPORT_SYMBOL_GPL(fb_deferred_io_mmap);
/* workqueue callback */
static void fb_deferred_io_work(struct work_struct *work)
{
- struct fb_info *info = container_of(work, struct fb_info,
- deferred_work.work);
- struct list_head *node, *next;
- struct page *cur;
+ struct fb_info *info = container_of(work, struct fb_info, deferred_work.work);
+ struct fb_deferred_io_pageref *pageref, *next;
struct fb_deferred_io *fbdefio = info->fbdefio;
/* here we mkclean the pages, then do all deferred IO */
mutex_lock(&fbdefio->lock);
- list_for_each_entry(cur, &fbdefio->pagelist, lru) {
+ list_for_each_entry(pageref, &fbdefio->pagereflist, list) {
+ struct page *cur = pageref->page;
lock_page(cur);
page_mkclean(cur);
unlock_page(cur);
}
- /* driver's callback with pagelist */
- fbdefio->deferred_io(info, &fbdefio->pagelist);
+ /* driver's callback with pagereflist */
+ fbdefio->deferred_io(info, &fbdefio->pagereflist);
/* clear the list */
- list_for_each_safe(node, next, &fbdefio->pagelist) {
- list_del_init(node);
- }
+ list_for_each_entry_safe(pageref, next, &fbdefio->pagereflist, list)
+ fb_deferred_io_pageref_put(pageref, info);
+
mutex_unlock(&fbdefio->lock);
}
-void fb_deferred_io_init(struct fb_info *info)
+int fb_deferred_io_init(struct fb_info *info)
{
struct fb_deferred_io *fbdefio = info->fbdefio;
+ struct fb_deferred_io_pageref *pagerefs;
+ unsigned long npagerefs, i;
+ int ret;
BUG_ON(!fbdefio);
+
+ if (WARN_ON(!info->fix.smem_len))
+ return -EINVAL;
+
mutex_init(&fbdefio->lock);
INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
- INIT_LIST_HEAD(&fbdefio->pagelist);
+ INIT_LIST_HEAD(&fbdefio->pagereflist);
if (fbdefio->delay == 0) /* set a default of 1 s */
fbdefio->delay = HZ;
+
+ npagerefs = DIV_ROUND_UP(info->fix.smem_len, PAGE_SIZE);
+
+ /* alloc a page ref for each page of the display memory */
+ pagerefs = kvcalloc(npagerefs, sizeof(*pagerefs), GFP_KERNEL);
+ if (!pagerefs) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ for (i = 0; i < npagerefs; ++i)
+ INIT_LIST_HEAD(&pagerefs[i].list);
+ info->npagerefs = npagerefs;
+ info->pagerefs = pagerefs;
+
+ return 0;
+
+err:
+ mutex_destroy(&fbdefio->lock);
+ return ret;
}
EXPORT_SYMBOL_GPL(fb_deferred_io_init);
@@ -246,6 +328,7 @@ void fb_deferred_io_cleanup(struct fb_info *info)
page->mapping = NULL;
}
+ kvfree(info->pagerefs);
mutex_destroy(&fbdefio->lock);
}
EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);