diff options
author | Jiri Kosina <jkosina@suse.cz> | 2014-02-20 14:54:28 +0100 |
---|---|---|
committer | Jiri Kosina <jkosina@suse.cz> | 2014-02-20 14:54:28 +0100 |
commit | d4263348f796f29546f90802177865dd4379dd0a (patch) | |
tree | adcbdaebae584eee2f32fab95e826e8e49eef385 /drivers/staging/android | |
parent | be873ac782f5ff5ee6675f83929f4fe6737eead2 (diff) | |
parent | 6d0abeca3242a88cab8232e4acd7e2bf088f3bc2 (diff) |
Merge branch 'master' into for-next
Diffstat (limited to 'drivers/staging/android')
26 files changed, 4869 insertions, 56 deletions
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig index 22cf17dcb7da..b6b869261f32 100644 --- a/drivers/staging/android/Kconfig +++ b/drivers/staging/android/Kconfig @@ -98,6 +98,8 @@ config SW_SYNC_USER *WARNING* improper use of this can result in deadlocking kernel drivers from userspace. +source "drivers/staging/android/ion/Kconfig" + endif # if ANDROID endmenu diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile index c136299e05af..0a01e1914905 100644 --- a/drivers/staging/android/Makefile +++ b/drivers/staging/android/Makefile @@ -1,5 +1,7 @@ ccflags-y += -I$(src) # needed for trace events +obj-y += ion/ + obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o obj-$(CONFIG_ASHMEM) += ashmem.o obj-$(CONFIG_ANDROID_LOGGER) += logger.o diff --git a/drivers/staging/android/alarm-dev.c b/drivers/staging/android/alarm-dev.c index 647694f43dcf..2fc7cdd4c4e3 100644 --- a/drivers/staging/android/alarm-dev.c +++ b/drivers/staging/android/alarm-dev.c @@ -68,11 +68,10 @@ static struct devalarm alarms[ANDROID_ALARM_TYPE_COUNT]; */ static int is_wakeup(enum android_alarm_type type) { - return (type == ANDROID_ALARM_RTC_WAKEUP || - type == ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP); + return type == ANDROID_ALARM_RTC_WAKEUP || + type == ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP; } - static void devalarm_start(struct devalarm *alrm, ktime_t exp) { if (is_wakeup(alrm->type)) @@ -111,7 +110,6 @@ static void alarm_clear(enum android_alarm_type alarm_type) } alarm_enabled &= ~alarm_type_mask; spin_unlock_irqrestore(&alarm_slock, flags); - } static void alarm_set(enum android_alarm_type alarm_type, @@ -280,6 +278,7 @@ static long alarm_ioctl(struct file *file, unsigned int cmd, unsigned long arg) return 0; } + #ifdef CONFIG_COMPAT static long alarm_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) @@ -371,7 +370,6 @@ static void devalarm_triggered(struct devalarm *alarm) spin_unlock_irqrestore(&alarm_slock, flags); } - static enum hrtimer_restart devalarm_hrthandler(struct hrtimer *hrt) { struct devalarm *devalrm = container_of(hrt, struct devalarm, u.hrt); diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c index 23948f167012..713a97226787 100644 --- a/drivers/staging/android/ashmem.c +++ b/drivers/staging/android/ashmem.c @@ -295,21 +295,29 @@ static ssize_t ashmem_read(struct file *file, char __user *buf, /* If size is not set, or set to 0, always return EOF. */ if (asma->size == 0) - goto out; + goto out_unlock; if (!asma->file) { ret = -EBADF; - goto out; + goto out_unlock; } - ret = asma->file->f_op->read(asma->file, buf, len, pos); - if (ret < 0) - goto out; + mutex_unlock(&ashmem_mutex); - /** Update backing file pos, since f_ops->read() doesn't */ - asma->file->f_pos = *pos; + /* + * asma and asma->file are used outside the lock here. We assume + * once asma->file is set it will never be changed, and will not + * be destroyed until all references to the file are dropped and + * ashmem_release is called. + */ + ret = asma->file->f_op->read(asma->file, buf, len, pos); + if (ret >= 0) { + /** Update backing file pos, since f_ops->read() doesn't */ + asma->file->f_pos = *pos; + } + return ret; -out: +out_unlock: mutex_unlock(&ashmem_mutex); return ret; } @@ -498,6 +506,7 @@ out: static int set_name(struct ashmem_area *asma, void __user *name) { + int len; int ret = 0; char local_name[ASHMEM_NAME_LEN]; @@ -510,21 +519,19 @@ static int set_name(struct ashmem_area *asma, void __user *name) * variable that does not need protection and later copy the local * variable to the structure member with lock held. */ - if (copy_from_user(local_name, name, ASHMEM_NAME_LEN)) - return -EFAULT; - + len = strncpy_from_user(local_name, name, ASHMEM_NAME_LEN); + if (len < 0) + return len; + if (len == ASHMEM_NAME_LEN) + local_name[ASHMEM_NAME_LEN - 1] = '\0'; mutex_lock(&ashmem_mutex); /* cannot change an existing mapping's name */ - if (unlikely(asma->file)) { + if (unlikely(asma->file)) ret = -EINVAL; - goto out; - } - memcpy(asma->name + ASHMEM_NAME_PREFIX_LEN, - local_name, ASHMEM_NAME_LEN); - asma->name[ASHMEM_FULL_NAME_LEN-1] = '\0'; -out: - mutex_unlock(&ashmem_mutex); + else + strcpy(asma->name + ASHMEM_NAME_PREFIX_LEN, local_name); + mutex_unlock(&ashmem_mutex); return ret; } diff --git a/drivers/staging/android/ion/Kconfig b/drivers/staging/android/ion/Kconfig new file mode 100644 index 000000000000..0f8fec1f84e5 --- /dev/null +++ b/drivers/staging/android/ion/Kconfig @@ -0,0 +1,35 @@ +menuconfig ION + bool "Ion Memory Manager" + depends on HAVE_MEMBLOCK + select GENERIC_ALLOCATOR + select DMA_SHARED_BUFFER + ---help--- + Chose this option to enable the ION Memory Manager, + used by Android to efficiently allocate buffers + from userspace that can be shared between drivers. + If you're not using Android its probably safe to + say N here. + +config ION_TEST + tristate "Ion Test Device" + depends on ION + help + Choose this option to create a device that can be used to test the + kernel and device side ION functions. + +config ION_DUMMY + bool "Dummy Ion driver" + depends on ION + help + Provides a dummy ION driver that registers the + /dev/ion device and some basic heaps. This can + be used for testing the ION infrastructure if + one doesn't have access to hardware drivers that + use ION. + +config ION_TEGRA + tristate "Ion for Tegra" + depends on ARCH_TEGRA && ION + help + Choose this option if you wish to use ion on an nVidia Tegra. + diff --git a/drivers/staging/android/ion/Makefile b/drivers/staging/android/ion/Makefile new file mode 100644 index 000000000000..b56fd2bf2b4f --- /dev/null +++ b/drivers/staging/android/ion/Makefile @@ -0,0 +1,10 @@ +obj-$(CONFIG_ION) += ion.o ion_heap.o ion_page_pool.o ion_system_heap.o \ + ion_carveout_heap.o ion_chunk_heap.o ion_cma_heap.o +obj-$(CONFIG_ION_TEST) += ion_test.o +ifdef CONFIG_COMPAT +obj-$(CONFIG_ION) += compat_ion.o +endif + +obj-$(CONFIG_ION_DUMMY) += ion_dummy_driver.o +obj-$(CONFIG_ION_TEGRA) += tegra/ + diff --git a/drivers/staging/android/ion/compat_ion.c b/drivers/staging/android/ion/compat_ion.c new file mode 100644 index 000000000000..ee3a7380e53b --- /dev/null +++ b/drivers/staging/android/ion/compat_ion.c @@ -0,0 +1,195 @@ +/* + * drivers/staging/android/ion/compat_ion.c + * + * Copyright (C) 2013 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/compat.h> +#include <linux/fs.h> +#include <linux/uaccess.h> + +#include "ion.h" +#include "compat_ion.h" + +/* See drivers/staging/android/uapi/ion.h for the definition of these structs */ +struct compat_ion_allocation_data { + compat_size_t len; + compat_size_t align; + compat_uint_t heap_id_mask; + compat_uint_t flags; + compat_int_t handle; +}; + +struct compat_ion_custom_data { + compat_uint_t cmd; + compat_ulong_t arg; +}; + +struct compat_ion_handle_data { + compat_int_t handle; +}; + +#define COMPAT_ION_IOC_ALLOC _IOWR(ION_IOC_MAGIC, 0, \ + struct compat_ion_allocation_data) +#define COMPAT_ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, \ + struct compat_ion_handle_data) +#define COMPAT_ION_IOC_CUSTOM _IOWR(ION_IOC_MAGIC, 6, \ + struct compat_ion_custom_data) + +static int compat_get_ion_allocation_data( + struct compat_ion_allocation_data __user *data32, + struct ion_allocation_data __user *data) +{ + compat_size_t s; + compat_uint_t u; + compat_int_t i; + int err; + + err = get_user(s, &data32->len); + err |= put_user(s, &data->len); + err |= get_user(s, &data32->align); + err |= put_user(s, &data->align); + err |= get_user(u, &data32->heap_id_mask); + err |= put_user(u, &data->heap_id_mask); + err |= get_user(u, &data32->flags); + err |= put_user(u, &data->flags); + err |= get_user(i, &data32->handle); + err |= put_user(i, &data->handle); + + return err; +} + +static int compat_get_ion_handle_data( + struct compat_ion_handle_data __user *data32, + struct ion_handle_data __user *data) +{ + compat_int_t i; + int err; + + err = get_user(i, &data32->handle); + err |= put_user(i, &data->handle); + + return err; +} + +static int compat_put_ion_allocation_data( + struct compat_ion_allocation_data __user *data32, + struct ion_allocation_data __user *data) +{ + compat_size_t s; + compat_uint_t u; + compat_int_t i; + int err; + + err = get_user(s, &data->len); + err |= put_user(s, &data32->len); + err |= get_user(s, &data->align); + err |= put_user(s, &data32->align); + err |= get_user(u, &data->heap_id_mask); + err |= put_user(u, &data32->heap_id_mask); + err |= get_user(u, &data->flags); + err |= put_user(u, &data32->flags); + err |= get_user(i, &data->handle); + err |= put_user(i, &data32->handle); + + return err; +} + +static int compat_get_ion_custom_data( + struct compat_ion_custom_data __user *data32, + struct ion_custom_data __user *data) +{ + compat_uint_t cmd; + compat_ulong_t arg; + int err; + + err = get_user(cmd, &data32->cmd); + err |= put_user(cmd, &data->cmd); + err |= get_user(arg, &data32->arg); + err |= put_user(arg, &data->arg); + + return err; +}; + +long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + long ret; + + if (!filp->f_op || !filp->f_op->unlocked_ioctl) + return -ENOTTY; + + switch (cmd) { + case COMPAT_ION_IOC_ALLOC: + { + struct compat_ion_allocation_data __user *data32; + struct ion_allocation_data __user *data; + int err; + + data32 = compat_ptr(arg); + data = compat_alloc_user_space(sizeof(*data)); + if (data == NULL) + return -EFAULT; + + err = compat_get_ion_allocation_data(data32, data); + if (err) + return err; + ret = filp->f_op->unlocked_ioctl(filp, ION_IOC_ALLOC, + (unsigned long)data); + err = compat_put_ion_allocation_data(data32, data); + return ret ? ret : err; + } + case COMPAT_ION_IOC_FREE: + { + struct compat_ion_handle_data __user *data32; + struct ion_handle_data __user *data; + int err; + + data32 = compat_ptr(arg); + data = compat_alloc_user_space(sizeof(*data)); + if (data == NULL) + return -EFAULT; + + err = compat_get_ion_handle_data(data32, data); + if (err) + return err; + + return filp->f_op->unlocked_ioctl(filp, ION_IOC_FREE, + (unsigned long)data); + } + case COMPAT_ION_IOC_CUSTOM: { + struct compat_ion_custom_data __user *data32; + struct ion_custom_data __user *data; + int err; + + data32 = compat_ptr(arg); + data = compat_alloc_user_space(sizeof(*data)); + if (data == NULL) + return -EFAULT; + + err = compat_get_ion_custom_data(data32, data); + if (err) + return err; + + return filp->f_op->unlocked_ioctl(filp, ION_IOC_CUSTOM, + (unsigned long)data); + } + case ION_IOC_SHARE: + case ION_IOC_MAP: + case ION_IOC_IMPORT: + case ION_IOC_SYNC: + return filp->f_op->unlocked_ioctl(filp, cmd, + (unsigned long)compat_ptr(arg)); + default: + return -ENOIOCTLCMD; + } +} diff --git a/drivers/staging/android/ion/compat_ion.h b/drivers/staging/android/ion/compat_ion.h new file mode 100644 index 000000000000..c2ad5893dfda --- /dev/null +++ b/drivers/staging/android/ion/compat_ion.h @@ -0,0 +1,30 @@ +/* + + * drivers/staging/android/ion/compat_ion.h + * + * Copyright (C) 2013 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _LINUX_COMPAT_ION_H +#define _LINUX_COMPAT_ION_H + +#if IS_ENABLED(CONFIG_COMPAT) + +long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); + +#else + +#define compat_ion_ioctl NULL + +#endif /* CONFIG_COMPAT */ +#endif /* _LINUX_COMPAT_ION_H */ diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c new file mode 100644 index 000000000000..574066ff73f8 --- /dev/null +++ b/drivers/staging/android/ion/ion.c @@ -0,0 +1,1549 @@ +/* + + * drivers/staging/android/ion/ion.c + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/device.h> +#include <linux/file.h> +#include <linux/freezer.h> +#include <linux/fs.h> +#include <linux/anon_inodes.h> +#include <linux/kthread.h> +#include <linux/list.h> +#include <linux/memblock.h> +#include <linux/miscdevice.h> +#include <linux/export.h> +#include <linux/mm.h> +#include <linux/mm_types.h> +#include <linux/rbtree.h> +#include <linux/slab.h> +#include <linux/seq_file.h> +#include <linux/uaccess.h> +#include <linux/vmalloc.h> +#include <linux/debugfs.h> +#include <linux/dma-buf.h> +#include <linux/idr.h> + +#include "ion.h" +#include "ion_priv.h" +#include "compat_ion.h" + +/** + * struct ion_device - the metadata of the ion device node + * @dev: the actual misc device + * @buffers: an rb tree of all the existing buffers + * @buffer_lock: lock protecting the tree of buffers + * @lock: rwsem protecting the tree of heaps and clients + * @heaps: list of all the heaps in the system + * @user_clients: list of all the clients created from userspace + */ +struct ion_device { + struct miscdevice dev; + struct rb_root buffers; + struct mutex buffer_lock; + struct rw_semaphore lock; + struct plist_head heaps; + long (*custom_ioctl) (struct ion_client *client, unsigned int cmd, + unsigned long arg); + struct rb_root clients; + struct dentry *debug_root; +}; + +/** + * struct ion_client - a process/hw block local address space + * @node: node in the tree of all clients + * @dev: backpointer to ion device + * @handles: an rb tree of all the handles in this client + * @idr: an idr space for allocating handle ids + * @lock: lock protecting the tree of handles + * @name: used for debugging + * @task: used for debugging + * + * A client represents a list of buffers this client may access. + * The mutex stored here is used to protect both handles tree + * as well as the handles themselves, and should be held while modifying either. + */ +struct ion_client { + struct rb_node node; + struct ion_device *dev; + struct rb_root handles; + struct idr idr; + struct mutex lock; + const char *name; + struct task_struct *task; + pid_t pid; + struct dentry *debug_root; +}; + +/** + * ion_handle - a client local reference to a buffer + * @ref: reference count + * @client: back pointer to the client the buffer resides in + * @buffer: pointer to the buffer + * @node: node in the client's handle rbtree + * @kmap_cnt: count of times this client has mapped to kernel + * @id: client-unique id allocated by client->idr + * + * Modifications to node, map_cnt or mapping should be protected by the + * lock in the client. Other fields are never changed after initialization. + */ +struct ion_handle { + struct kref ref; + struct ion_client *client; + struct ion_buffer *buffer; + struct rb_node node; + unsigned int kmap_cnt; + int id; +}; + +bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer) +{ + return (buffer->flags & ION_FLAG_CACHED) && + !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC); +} + +bool ion_buffer_cached(struct ion_buffer *buffer) +{ + return !!(buffer->flags & ION_FLAG_CACHED); +} + +static inline struct page *ion_buffer_page(struct page *page) +{ + return (struct page *)((unsigned long)page & ~(1UL)); +} + +static inline bool ion_buffer_page_is_dirty(struct page *page) +{ + return !!((unsigned long)page & 1UL); +} + +static inline void ion_buffer_page_dirty(struct page **page) +{ + *page = (struct page *)((unsigned long)(*page) | 1UL); +} + +static inline void ion_buffer_page_clean(struct page **page) +{ + *page = (struct page *)((unsigned long)(*page) & ~(1UL)); +} + +/* this function should only be called while dev->lock is held */ +static void ion_buffer_add(struct ion_device *dev, + struct ion_buffer *buffer) +{ + struct rb_node **p = &dev->buffers.rb_node; + struct rb_node *parent = NULL; + struct ion_buffer *entry; + + while (*p) { + parent = *p; + entry = rb_entry(parent, struct ion_buffer, node); + + if (buffer < entry) { + p = &(*p)->rb_left; + } else if (buffer > entry) { + p = &(*p)->rb_right; + } else { + pr_err("%s: buffer already found.", __func__); + BUG(); + } + } + + rb_link_node(&buffer->node, parent, p); + rb_insert_color(&buffer->node, &dev->buffers); +} + +/* this function should only be called while dev->lock is held */ +static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, + struct ion_device *dev, + unsigned long len, + unsigned long align, + unsigned long flags) +{ + struct ion_buffer *buffer; + struct sg_table *table; + struct scatterlist *sg; + int i, ret; + + buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL); + if (!buffer) + return ERR_PTR(-ENOMEM); + + buffer->heap = heap; + buffer->flags = flags; + kref_init(&buffer->ref); + + ret = heap->ops->allocate(heap, buffer, len, align, flags); + + if (ret) { + if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE)) + goto err2; + + ion_heap_freelist_drain(heap, 0); + ret = heap->ops->allocate(heap, buffer, len, align, + flags); + if (ret) + goto err2; + } + + buffer->dev = dev; + buffer->size = len; + + table = heap->ops->map_dma(heap, buffer); + if (WARN_ONCE(table == NULL, + "heap->ops->map_dma should return ERR_PTR on error")) + table = ERR_PTR(-EINVAL); + if (IS_ERR(table)) { + heap->ops->free(buffer); + kfree(buffer); + return ERR_PTR(PTR_ERR(table)); + } + buffer->sg_table = table; + if (ion_buffer_fault_user_mappings(buffer)) { + int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; + struct scatterlist *sg; + int i, j, k = 0; + + buffer->pages = vmalloc(sizeof(struct page *) * num_pages); + if (!buffer->pages) { + ret = -ENOMEM; + goto err1; + } + + for_each_sg(table->sgl, sg, table->nents, i) { + struct page *page = sg_page(sg); + + for (j = 0; j < sg->length / PAGE_SIZE; j++) + buffer->pages[k++] = page++; + } + + if (ret) + goto err; + } + + buffer->dev = dev; + buffer->size = len; + INIT_LIST_HEAD(&buffer->vmas); + mutex_init(&buffer->lock); + /* this will set up dma addresses for the sglist -- it is not + technically correct as per the dma api -- a specific + device isn't really taking ownership here. However, in practice on + our systems the only dma_address space is physical addresses. + Additionally, we can't afford the overhead of invalidating every + allocation via dma_map_sg. The implicit contract here is that + memory comming from the heaps is ready for dma, ie if it has a + cached mapping that mapping has been invalidated */ + for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) + sg_dma_address(sg) = sg_phys(sg); + mutex_lock(&dev->buffer_lock); + ion_buffer_add(dev, buffer); + mutex_unlock(&dev->buffer_lock); + return buffer; + +err: + heap->ops->unmap_dma(heap, buffer); + heap->ops->free(buffer); +err1: + if (buffer->pages) + vfree(buffer->pages); +err2: + kfree(buffer); + return ERR_PTR(ret); +} + +void ion_buffer_destroy(struct ion_buffer *buffer) +{ + if (WARN_ON(buffer->kmap_cnt > 0)) + buffer->heap->ops->unmap_kernel(buffer->heap, buffer); + buffer->heap->ops->unmap_dma(buffer->heap, buffer); + buffer->heap->ops->free(buffer); + if (buffer->pages) + vfree(buffer->pages); + kfree(buffer); +} + +static void _ion_buffer_destroy(struct kref *kref) +{ + struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref); + struct ion_heap *heap = buffer->heap; + struct ion_device *dev = buffer->dev; + + mutex_lock(&dev->buffer_lock); + rb_erase(&buffer->node, &dev->buffers); + mutex_unlock(&dev->buffer_lock); + + if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) + ion_heap_freelist_add(heap, buffer); + else + ion_buffer_destroy(buffer); +} + +static void ion_buffer_get(struct ion_buffer *buffer) +{ + kref_get(&buffer->ref); +} + +static int ion_buffer_put(struct ion_buffer *buffer) +{ + return kref_put(&buffer->ref, _ion_buffer_destroy); +} + +static void ion_buffer_add_to_handle(struct ion_buffer *buffer) +{ + mutex_lock(&buffer->lock); + buffer->handle_count++; + mutex_unlock(&buffer->lock); +} + +static void ion_buffer_remove_from_handle(struct ion_buffer *buffer) +{ + /* + * when a buffer is removed from a handle, if it is not in + * any other handles, copy the taskcomm and the pid of the + * process it's being removed from into the buffer. At this + * point there will be no way to track what processes this buffer is + * being used by, it only exists as a dma_buf file descriptor. + * The taskcomm and pid can provide a debug hint as to where this fd + * is in the system + */ + mutex_lock(&buffer->lock); + buffer->handle_count--; + BUG_ON(buffer->handle_count < 0); + if (!buffer->handle_count) { + struct task_struct *task; + + task = current->group_leader; + get_task_comm(buffer->task_comm, task); + buffer->pid = task_pid_nr(task); + } + mutex_unlock(&buffer->lock); +} + +static struct ion_handle *ion_handle_create(struct ion_client *client, + struct ion_buffer *buffer) +{ + struct ion_handle *handle; + + handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL); + if (!handle) + return ERR_PTR(-ENOMEM); + kref_init(&handle->ref); + RB_CLEAR_NODE(&handle->node); + handle->client = client; + ion_buffer_get(buffer); + ion_buffer_add_to_handle(buffer); + handle->buffer = buffer; + + return handle; +} + +static void ion_handle_kmap_put(struct ion_handle *); + +static void ion_handle_destroy(struct kref *kref) +{ + struct ion_handle *handle = container_of(kref, struct ion_handle, ref); + struct ion_client *client = handle->client; + struct ion_buffer *buffer = handle->buffer; + + mutex_lock(&buffer->lock); + while (handle->kmap_cnt) + ion_handle_kmap_put(handle); + mutex_unlock(&buffer->lock); + + idr_remove(&client->idr, handle->id); + if (!RB_EMPTY_NODE(&handle->node)) + rb_erase(&handle->node, &client->handles); + + ion_buffer_remove_from_handle(buffer); + ion_buffer_put(buffer); + + kfree(handle); +} + +struct ion_buffer *ion_handle_buffer(struct ion_handle *handle) +{ + return handle->buffer; +} + +static void ion_handle_get(struct ion_handle *handle) +{ + kref_get(&handle->ref); +} + +static int ion_handle_put(struct ion_handle *handle) +{ + struct ion_client *client = handle->client; + int ret; + + mutex_lock(&client->lock); + ret = kref_put(&handle->ref, ion_handle_destroy); + mutex_unlock(&client->lock); + + return ret; +} + +static struct ion_handle *ion_handle_lookup(struct ion_client *client, + struct ion_buffer *buffer) +{ + struct rb_node *n = client->handles.rb_node; + + while (n) { + struct ion_handle *entry = rb_entry(n, struct ion_handle, node); + if (buffer < entry->buffer) + n = n->rb_left; + else if (buffer > entry->buffer) + n = n->rb_right; + else + return entry; + } + return ERR_PTR(-EINVAL); +} + +static struct ion_handle *ion_handle_get_by_id(struct ion_client *client, + int id) +{ + struct ion_handle *handle; + + mutex_lock(&client->lock); + handle = idr_find(&client->idr, id); + if (handle) + ion_handle_get(handle); + mutex_unlock(&client->lock); + + return handle ? handle : ERR_PTR(-EINVAL); +} + +static bool ion_handle_validate(struct ion_client *client, + struct ion_handle *handle) +{ + WARN_ON(!mutex_is_locked(&client->lock)); + return (idr_find(&client->idr, handle->id) == handle); +} + +static int ion_handle_add(struct ion_client *client, struct ion_handle *handle) +{ + int id; + struct rb_node **p = &client->handles.rb_node; + struct rb_node *parent = NULL; + struct ion_handle *entry; + + id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL); + if (id < 0) + return id; + + handle->id = id; + + while (*p) { + parent = *p; + entry = rb_entry(parent, struct ion_handle, node); + + if (handle->buffer < entry->buffer) + p = &(*p)->rb_left; + else if (handle->buffer > entry->buffer) + p = &(*p)->rb_right; + else + WARN(1, "%s: buffer already found.", __func__); + } + + rb_link_node(&handle->node, parent, p); + rb_insert_color(&handle->node, &client->handles); + + return 0; +} + +struct ion_handle *ion_alloc(struct ion_client *client, size_t len, + size_t align, unsigned int heap_id_mask, + unsigned int flags) +{ + struct ion_handle *handle; + struct ion_device *dev = client->dev; + struct ion_buffer *buffer = NULL; + struct ion_heap *heap; + int ret; + + pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__, + len, align, heap_id_mask, flags); + /* + * traverse the list of heaps available in this system in priority + * order. If the heap type is supported by the client, and matches the + * request of the caller allocate from it. Repeat until allocate has + * succeeded or all heaps have been tried + */ + len = PAGE_ALIGN(len); + + if (!len) + return ERR_PTR(-EINVAL); + + down_read(&dev->lock); + plist_for_each_entry(heap, &dev->heaps, node) { + /* if the caller didn't specify this heap id */ + if (!((1 << heap->id) & heap_id_mask)) + continue; + buffer = ion_buffer_create(heap, dev, len, align, flags); + if (!IS_ERR(buffer)) + break; + } + up_read(&dev->lock); + + if (buffer == NULL) + return ERR_PTR(-ENODEV); + + if (IS_ERR(buffer)) + return ERR_PTR(PTR_ERR(buffer)); + + handle = ion_handle_create(client, buffer); + + /* + * ion_buffer_create will create a buffer with a ref_cnt of 1, + * and ion_handle_create will take a second reference, drop one here + */ + ion_buffer_put(buffer); + + if (IS_ERR(handle)) + return handle; + + mutex_lock(&client->lock); + ret = ion_handle_add(client, handle); + mutex_unlock(&client->lock); + if (ret) { + ion_handle_put(handle); + handle = ERR_PTR(ret); + } + + return handle; +} +EXPORT_SYMBOL(ion_alloc); + +void ion_free(struct ion_client *client, struct ion_handle *handle) +{ + bool valid_handle; + + BUG_ON(client != handle->client); + + mutex_lock(&client->lock); + valid_handle = ion_handle_validate(client, handle); + + if (!valid_handle) { + WARN(1, "%s: invalid handle passed to free.\n", __func__); + mutex_unlock(&client->lock); + return; + } + mutex_unlock(&client->lock); + ion_handle_put(handle); +} +EXPORT_SYMBOL(ion_free); + +int ion_phys(struct ion_client *client, struct ion_handle *handle, + ion_phys_addr_t *addr, size_t *len) +{ + struct ion_buffer *buffer; + int ret; + + mutex_lock(&client->lock); + if (!ion_handle_validate(client, handle)) { + mutex_unlock(&client->lock); + return -EINVAL; + } + + buffer = handle->buffer; + + if (!buffer->heap->ops->phys) { + pr_err("%s: ion_phys is not implemented by this heap.\n", + __func__); + mutex_unlock(&client->lock); + return -ENODEV; + } + mutex_unlock(&client->lock); + ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len); + return ret; +} +EXPORT_SYMBOL(ion_phys); + +static void *ion_buffer_kmap_get(struct ion_buffer *buffer) +{ + void *vaddr; + + if (buffer->kmap_cnt) { + buffer->kmap_cnt++; + return buffer->vaddr; + } + vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer); + if (WARN_ONCE(vaddr == NULL, + "heap->ops->map_kernel should return ERR_PTR on error")) + return ERR_PTR(-EINVAL); + if (IS_ERR(vaddr)) + return vaddr; + buffer->vaddr = vaddr; + buffer->kmap_cnt++; + return vaddr; +} + +static void *ion_handle_kmap_get(struct ion_handle *handle) +{ + struct ion_buffer *buffer = handle->buffer; + void *vaddr; + + if (handle->kmap_cnt) { + handle->kmap_cnt++; + return buffer->vaddr; + } + vaddr = ion_buffer_kmap_get(buffer); + if (IS_ERR(vaddr)) + return vaddr; + handle->kmap_cnt++; + return vaddr; +} + +static void ion_buffer_kmap_put(struct ion_buffer *buffer) +{ + buffer->kmap_cnt--; + if (!buffer->kmap_cnt) { + buffer->heap->ops->unmap_kernel(buffer->heap, buffer); + buffer->vaddr = NULL; + } +} + +static void ion_handle_kmap_put(struct ion_handle *handle) +{ + struct ion_buffer *buffer = handle->buffer; + + handle->kmap_cnt--; + if (!handle->kmap_cnt) + ion_buffer_kmap_put(buffer); +} + +void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle) +{ + struct ion_buffer *buffer; + void *vaddr; + + mutex_lock(&client->lock); + if (!ion_handle_validate(client, handle)) { + pr_err("%s: invalid handle passed to map_kernel.\n", + __func__); + mutex_unlock(&client->lock); + return ERR_PTR(-EINVAL); + } + + buffer = handle->buffer; + + if (!handle->buffer->heap->ops->map_kernel) { + pr_err("%s: map_kernel is not implemented by this heap.\n", + __func__); + mutex_unlock(&client->lock); + return ERR_PTR(-ENODEV); + } + + mutex_lock(&buffer->lock); + vaddr = ion_handle_kmap_get(handle); + mutex_unlock(&buffer->lock); + mutex_unlock(&client->lock); + return vaddr; +} +EXPORT_SYMBOL(ion_map_kernel); + +void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle) +{ + struct ion_buffer *buffer; + + mutex_lock(&client->lock); + buffer = handle->buffer; + mutex_lock(&buffer->lock); + ion_handle_kmap_put(handle); + mutex_unlock(&buffer->lock); + mutex_unlock(&client->lock); +} +EXPORT_SYMBOL(ion_unmap_kernel); + +static int ion_debug_client_show(struct seq_file *s, void *unused) +{ + struct ion_client *client = s->private; + struct rb_node *n; + size_t sizes[ION_NUM_HEAP_IDS] = {0}; + const char *names[ION_NUM_HEAP_IDS] = {NULL}; + int i; + + mutex_lock(&client->lock); + for (n = rb_first(&client->handles); n; n = rb_next(n)) { + struct ion_handle *handle = rb_entry(n, struct ion_handle, + node); + unsigned int id = handle->buffer->heap->id; + + if (!names[id]) + names[id] = handle->buffer->heap->name; + sizes[id] += handle->buffer->size; + } + mutex_unlock(&client->lock); + + seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes"); + for (i = 0; i < ION_NUM_HEAP_IDS; i++) { + if (!names[i]) + continue; + seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]); + } + return 0; +} + +static int ion_debug_client_open(struct inode *inode, struct file *file) +{ + return single_open(file, ion_debug_client_show, inode->i_private); +} + +static const struct file_operations debug_client_fops = { + .open = ion_debug_client_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +struct ion_client *ion_client_create(struct ion_device *dev, + const char *name) +{ + struct ion_client *client; + struct task_struct *task; + struct rb_node **p; + struct rb_node *parent = NULL; + struct ion_client *entry; + char debug_name[64]; + pid_t pid; + + get_task_struct(current->group_leader); + task_lock(current->group_leader); + pid = task_pid_nr(current->group_leader); + /* don't bother to store task struct for kernel threads, + they can't be killed anyway */ + if (current->group_leader->flags & PF_KTHREAD) { + put_task_struct(current->group_leader); + task = NULL; + } else { + task = current->group_leader; + } + task_unlock(current->group_leader); + + client = kzalloc(sizeof(struct ion_client), GFP_KERNEL); + if (!client) { + if (task) + put_task_struct(current->group_leader); + return ERR_PTR(-ENOMEM); + } + + client->dev = dev; + client->handles = RB_ROOT; + idr_init(&client->idr); + mutex_init(&client->lock); + client->name = name; + client->task = task; + client->pid = pid; + + down_write(&dev->lock); + p = &dev->clients.rb_node; + while (*p) { + parent = *p; + entry = rb_entry(parent, struct ion_client, node); + + if (client < entry) + p = &(*p)->rb_left; + else if (client > entry) + p = &(*p)->rb_right; + } + rb_link_node(&client->node, parent, p); + rb_insert_color(&client->node, &dev->clients); + + snprintf(debug_name, 64, "%u", client->pid); + client->debug_root = debugfs_create_file(debug_name, 0664, + dev->debug_root, client, + &debug_client_fops); + up_write(&dev->lock); + + return client; +} +EXPORT_SYMBOL(ion_client_create); + +void ion_client_destroy(struct ion_client *client) +{ + struct ion_device *dev = client->dev; + struct rb_node *n; + + pr_debug("%s: %d\n", __func__, __LINE__); + while ((n = rb_first(&client->handles))) { + struct ion_handle *handle = rb_entry(n, struct ion_handle, + node); + ion_handle_destroy(&handle->ref); + } + + idr_destroy(&client->idr); + + down_write(&dev->lock); + if (client->task) + put_task_struct(client->task); + rb_erase(&client->node, &dev->clients); + debugfs_remove_recursive(client->debug_root); + up_write(&dev->lock); + + kfree(client); +} +EXPORT_SYMBOL(ion_client_destroy); + +struct sg_table *ion_sg_table(struct ion_client *client, + struct ion_handle *handle) +{ + struct ion_buffer *buffer; + struct sg_table *table; + + mutex_lock(&client->lock); + if (!ion_handle_validate(client, handle)) { + pr_err("%s: invalid handle passed to map_dma.\n", + __func__); + mutex_unlock(&client->lock); + return ERR_PTR(-EINVAL); + } + buffer = handle->buffer; + table = buffer->sg_table; + mutex_unlock(&client->lock); + return table; +} +EXPORT_SYMBOL(ion_sg_table); + +static void ion_buffer_sync_for_device(struct ion_buffer *buffer, + struct device *dev, + enum dma_data_direction direction); + +static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment, + enum dma_data_direction direction) +{ + struct dma_buf *dmabuf = attachment->dmabuf; + struct ion_buffer *buffer = dmabuf->priv; + + ion_buffer_sync_for_device(buffer, attachment->dev, direction); + return buffer->sg_table; +} + +static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment, + struct sg_table *table, + enum dma_data_direction direction) +{ +} + +void ion_pages_sync_for_device(struct device *dev, struct page *page, + size_t size, enum dma_data_direction dir) +{ + struct scatterlist sg; + + sg_init_table(&sg, 1); + sg_set_page(&sg, page, size, 0); + /* + * This is not correct - sg_dma_address needs a dma_addr_t that is valid + * for the the targeted device, but this works on the currently targeted + * hardware. + */ + sg_dma_address(&sg) = page_to_phys(page); + dma_sync_sg_for_device(dev, &sg, 1, dir); +} + +struct ion_vma_list { + struct list_head list; + struct vm_area_struct *vma; +}; + +static void ion_buffer_sync_for_device(struct ion_buffer *buffer, + struct device *dev, + enum dma_data_direction dir) +{ + struct ion_vma_list *vma_list; + int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; + int i; + + pr_debug("%s: syncing for device %s\n", __func__, + dev ? dev_name(dev) : "null"); + + if (!ion_buffer_fault_user_mappings(buffer)) + return; + + mutex_lock(&buffer->lock); + for (i = 0; i < pages; i++) { + struct page *page = buffer->pages[i]; + + if (ion_buffer_page_is_dirty(page)) + ion_pages_sync_for_device(dev, ion_buffer_page(page), + PAGE_SIZE, dir); + + ion_buffer_page_clean(buffer->pages + i); + } + list_for_each_entry(vma_list, &buffer->vmas, list) { + struct vm_area_struct *vma = vma_list->vma; + + zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start, + NULL); + } + mutex_unlock(&buffer->lock); +} + +static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +{ + struct ion_buffer *buffer = vma->vm_private_data; + unsigned long pfn; + int ret; + + mutex_lock(&buffer->lock); + ion_buffer_page_dirty(buffer->pages + vmf->pgoff); + BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]); + + pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff])); + ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn); + mutex_unlock(&buffer->lock); + if (ret) + return VM_FAULT_ERROR; + + return VM_FAULT_NOPAGE; +} + +static void ion_vm_open(struct vm_area_struct *vma) +{ + struct ion_buffer *buffer = vma->vm_private_data; + struct ion_vma_list *vma_list; + + vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL); + if (!vma_list) + return; + vma_list->vma = vma; + mutex_lock(&buffer->lock); + list_add(&vma_list->list, &buffer->vmas); + mutex_unlock(&buffer->lock); + pr_debug("%s: adding %p\n", __func__, vma); +} + +static void ion_vm_close(struct vm_area_struct *vma) +{ + struct ion_buffer *buffer = vma->vm_private_data; + struct ion_vma_list *vma_list, *tmp; + + pr_debug("%s\n", __func__); + mutex_lock(&buffer->lock); + list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) { + if (vma_list->vma != vma) + continue; + list_del(&vma_list->list); + kfree(vma_list); + pr_debug("%s: deleting %p\n", __func__, vma); + break; + } + mutex_unlock(&buffer->lock); +} + +static struct vm_operations_struct ion_vma_ops = { + .open = ion_vm_open, + .close = ion_vm_close, + .fault = ion_vm_fault, +}; + +static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) +{ + struct ion_buffer *buffer = dmabuf->priv; + int ret = 0; + + if (!buffer->heap->ops->map_user) { + pr_err("%s: this heap does not define a method for mapping " + "to userspace\n", __func__); + return -EINVAL; + } + + if (ion_buffer_fault_user_mappings(buffer)) { + vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | + VM_DONTDUMP; + vma->vm_private_data = buffer; + vma->vm_ops = &ion_vma_ops; + ion_vm_open(vma); + return 0; + } + + if (!(buffer->flags & ION_FLAG_CACHED)) + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); + + mutex_lock(&buffer->lock); + /* now map it to userspace */ + ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma); + mutex_unlock(&buffer->lock); + + if (ret) + pr_err("%s: failure mapping buffer to userspace\n", + __func__); + + return ret; +} + +static void ion_dma_buf_release(struct dma_buf *dmabuf) +{ + struct ion_buffer *buffer = dmabuf->priv; + ion_buffer_put(buffer); +} + +static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset) +{ + struct ion_buffer *buffer = dmabuf->priv; + return buffer->vaddr + offset * PAGE_SIZE; +} + +static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset, + void *ptr) +{ + return; +} + +static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start, + size_t len, + enum dma_data_direction direction) +{ + struct ion_buffer *buffer = dmabuf->priv; + void *vaddr; + + if (!buffer->heap->ops->map_kernel) { + pr_err("%s: map kernel is not implemented by this heap.\n", + __func__); + return -ENODEV; + } + + mutex_lock(&buffer->lock); + vaddr = ion_buffer_kmap_get(buffer); + mutex_unlock(&buffer->lock); + if (IS_ERR(vaddr)) + return PTR_ERR(vaddr); + return 0; +} + +static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start, + size_t len, + enum dma_data_direction direction) +{ + struct ion_buffer *buffer = dmabuf->priv; + + mutex_lock(&buffer->lock); + ion_buffer_kmap_put(buffer); + mutex_unlock(&buffer->lock); +} + +static struct dma_buf_ops dma_buf_ops = { + .map_dma_buf = ion_map_dma_buf, + .unmap_dma_buf = ion_unmap_dma_buf, + .mmap = ion_mmap, + .release = ion_dma_buf_release, + .begin_cpu_access = ion_dma_buf_begin_cpu_access, + .end_cpu_access = ion_dma_buf_end_cpu_access, + .kmap_atomic = ion_dma_buf_kmap, + .kunmap_atomic = ion_dma_buf_kunmap, + .kmap = ion_dma_buf_kmap, + .kunmap = ion_dma_buf_kunmap, +}; + +struct dma_buf *ion_share_dma_buf(struct ion_client *client, + struct ion_handle *handle) +{ + struct ion_buffer *buffer; + struct dma_buf *dmabuf; + bool valid_handle; + + mutex_lock(&client->lock); + valid_handle = ion_handle_validate(client, handle); + if (!valid_handle) { + WARN(1, "%s: invalid handle passed to share.\n", __func__); + mutex_unlock(&client->lock); + return ERR_PTR(-EINVAL); + } + buffer = handle->buffer; + ion_buffer_get(buffer); + mutex_unlock(&client->lock); + + dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR); + if (IS_ERR(dmabuf)) { + ion_buffer_put(buffer); + return dmabuf; + } + + return dmabuf; +} +EXPORT_SYMBOL(ion_share_dma_buf); + +int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle) +{ + struct dma_buf *dmabuf; + int fd; + + dmabuf = ion_share_dma_buf(client, handle); + if (IS_ERR(dmabuf)) + return PTR_ERR(dmabuf); + + fd = dma_buf_fd(dmabuf, O_CLOEXEC); + if (fd < 0) + dma_buf_put(dmabuf); + + return fd; +} +EXPORT_SYMBOL(ion_share_dma_buf_fd); + +struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd) +{ + struct dma_buf *dmabuf; + struct ion_buffer *buffer; + struct ion_handle *handle; + int ret; + + dmabuf = dma_buf_get(fd); + if (IS_ERR(dmabuf)) + return ERR_PTR(PTR_ERR(dmabuf)); + /* if this memory came from ion */ + + if (dmabuf->ops != &dma_buf_ops) { + pr_err("%s: can not import dmabuf from another exporter\n", + __func__); + dma_buf_put(dmabuf); + return ERR_PTR(-EINVAL); + } + buffer = dmabuf->priv; + + mutex_lock(&client->lock); + /* if a handle exists for this buffer just take a reference to it */ + handle = ion_handle_lookup(client, buffer); + if (!IS_ERR(handle)) { + ion_handle_get(handle); + mutex_unlock(&client->lock); + goto end; + } + mutex_unlock(&client->lock); + + handle = ion_handle_create(client, buffer); + if (IS_ERR(handle)) + goto end; + + mutex_lock(&client->lock); + ret = ion_handle_add(client, handle); + mutex_unlock(&client->lock); + if (ret) { + ion_handle_put(handle); + handle = ERR_PTR(ret); + } + +end: + dma_buf_put(dmabuf); + return handle; +} +EXPORT_SYMBOL(ion_import_dma_buf); + +static int ion_sync_for_device(struct ion_client *client, int fd) +{ + struct dma_buf *dmabuf; + struct ion_buffer *buffer; + + dmabuf = dma_buf_get(fd); + if (IS_ERR(dmabuf)) + return PTR_ERR(dmabuf); + + /* if this memory came from ion */ + if (dmabuf->ops != &dma_buf_ops) { + pr_err("%s: can not sync dmabuf from another exporter\n", + __func__); + dma_buf_put(dmabuf); + return -EINVAL; + } + buffer = dmabuf->priv; + + dma_sync_sg_for_device(NULL, buffer->sg_table->sgl, + buffer->sg_table->nents, DMA_BIDIRECTIONAL); + dma_buf_put(dmabuf); + return 0; +} + +/* fix up the cases where the ioctl direction bits are incorrect */ +static unsigned int ion_ioctl_dir(unsigned int cmd) +{ + switch (cmd) { + case ION_IOC_SYNC: + case ION_IOC_FREE: + case ION_IOC_CUSTOM: + return _IOC_WRITE; + default: + return _IOC_DIR(cmd); + } +} + +static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + struct ion_client *client = filp->private_data; + struct ion_device *dev = client->dev; + struct ion_handle *cleanup_handle = NULL; + int ret = 0; + unsigned int dir; + + union { + struct ion_fd_data fd; + struct ion_allocation_data allocation; + struct ion_handle_data handle; + struct ion_custom_data custom; + } data; + + dir = ion_ioctl_dir(cmd); + + if (_IOC_SIZE(cmd) > sizeof(data)) + return -EINVAL; + + if (dir & _IOC_WRITE) + if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd))) + return -EFAULT; + + switch (cmd) { + case ION_IOC_ALLOC: + { + struct ion_handle *handle; + + handle = ion_alloc(client, data.allocation.len, + data.allocation.align, + data.allocation.heap_id_mask, + data.allocation.flags); + if (IS_ERR(handle)) + return PTR_ERR(handle); + + data.allocation.handle = handle->id; + + cleanup_handle = handle; + break; + } + case ION_IOC_FREE: + { + struct ion_handle *handle; + + handle = ion_handle_get_by_id(client, data.handle.handle); + if (IS_ERR(handle)) + return PTR_ERR(handle); + ion_free(client, handle); + ion_handle_put(handle); + break; + } + case ION_IOC_SHARE: + case ION_IOC_MAP: + { + struct ion_handle *handle; + + handle = ion_handle_get_by_id(client, data.handle.handle); + if (IS_ERR(handle)) + return PTR_ERR(handle); + data.fd.fd = ion_share_dma_buf_fd(client, handle); + ion_handle_put(handle); + if (data.fd.fd < 0) + ret = data.fd.fd; + break; + } + case ION_IOC_IMPORT: + { + struct ion_handle *handle; + handle = ion_import_dma_buf(client, data.fd.fd); + if (IS_ERR(handle)) + ret = PTR_ERR(handle); + else + data.handle.handle = handle->id; + break; + } + case ION_IOC_SYNC: + { + ret = ion_sync_for_device(client, data.fd.fd); + break; + } + case ION_IOC_CUSTOM: + { + if (!dev->custom_ioctl) + return -ENOTTY; + ret = dev->custom_ioctl(client, data.custom.cmd, + data.custom.arg); + break; + } + default: + return -ENOTTY; + } + + if (dir & _IOC_READ) { + if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) { + if (cleanup_handle) + ion_free(client, cleanup_handle); + return -EFAULT; + } + } + return ret; +} + +static int ion_release(struct inode *inode, struct file *file) +{ + struct ion_client *client = file->private_data; + + pr_debug("%s: %d\n", __func__, __LINE__); + ion_client_destroy(client); + return 0; +} + +static int ion_open(struct inode *inode, struct file *file) +{ + struct miscdevice *miscdev = file->private_data; + struct ion_device *dev = container_of(miscdev, struct ion_device, dev); + struct ion_client *client; + + pr_debug("%s: %d\n", __func__, __LINE__); + client = ion_client_create(dev, "user"); + if (IS_ERR(client)) + return PTR_ERR(client); + file->private_data = client; + + return 0; +} + +static const struct file_operations ion_fops = { + .owner = THIS_MODULE, + .open = ion_open, + .release = ion_release, + .unlocked_ioctl = ion_ioctl, + .compat_ioctl = compat_ion_ioctl, +}; + +static size_t ion_debug_heap_total(struct ion_client *client, + unsigned int id) +{ + size_t size = 0; + struct rb_node *n; + + mutex_lock(&client->lock); + for (n = rb_first(&client->handles); n; n = rb_next(n)) { + struct ion_handle *handle = rb_entry(n, + struct ion_handle, + node); + if (handle->buffer->heap->id == id) + size += handle->buffer->size; + } + mutex_unlock(&client->lock); + return size; +} + +static int ion_debug_heap_show(struct seq_file *s, void *unused) +{ + struct ion_heap *heap = s->private; + struct ion_device *dev = heap->dev; + struct rb_node *n; + size_t total_size = 0; + size_t total_orphaned_size = 0; + + seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size"); + seq_printf(s, "----------------------------------------------------\n"); + + for (n = rb_first(&dev->clients); n; n = rb_next(n)) { + struct ion_client *client = rb_entry(n, struct ion_client, + node); + size_t size = ion_debug_heap_total(client, heap->id); + if (!size) + continue; + if (client->task) { + char task_comm[TASK_COMM_LEN]; + + get_task_comm(task_comm, client->task); + seq_printf(s, "%16.s %16u %16zu\n", task_comm, + client->pid, size); + } else { + seq_printf(s, "%16.s %16u %16zu\n", client->name, + client->pid, size); + } + } + seq_printf(s, "----------------------------------------------------\n"); + seq_printf(s, "orphaned allocations (info is from last known client):" + "\n"); + mutex_lock(&dev->buffer_lock); + for (n = rb_first(&dev->buffers); n; n = rb_next(n)) { + struct ion_buffer *buffer = rb_entry(n, struct ion_buffer, + node); + if (buffer->heap->id != heap->id) + continue; + total_size += buffer->size; + if (!buffer->handle_count) { + seq_printf(s, "%16.s %16u %16zu %d %d\n", + buffer->task_comm, buffer->pid, + buffer->size, buffer->kmap_cnt, + atomic_read(&buffer->ref.refcount)); + total_orphaned_size += buffer->size; + } + } + mutex_unlock(&dev->buffer_lock); + seq_printf(s, "----------------------------------------------------\n"); + seq_printf(s, "%16.s %16zu\n", "total orphaned", + total_orphaned_size); + seq_printf(s, "%16.s %16zu\n", "total ", total_size); + if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) + seq_printf(s, "%16.s %16zu\n", "deferred free", + heap->free_list_size); + seq_printf(s, "----------------------------------------------------\n"); + + if (heap->debug_show) + heap->debug_show(heap, s, unused); + + return 0; +} + +static int ion_debug_heap_open(struct inode *inode, struct file *file) +{ + return single_open(file, ion_debug_heap_show, inode->i_private); +} + +static const struct file_operations debug_heap_fops = { + .open = ion_debug_heap_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +#ifdef DEBUG_HEAP_SHRINKER +static int debug_shrink_set(void *data, u64 val) +{ + struct ion_heap *heap = data; + struct shrink_control sc; + int objs; + + sc.gfp_mask = -1; + sc.nr_to_scan = 0; + + if (!val) + return 0; + + objs = heap->shrinker.shrink(&heap->shrinker, &sc); + sc.nr_to_scan = objs; + + heap->shrinker.shrink(&heap->shrinker, &sc); + return 0; +} + +static int debug_shrink_get(void *data, u64 *val) +{ + struct ion_heap *heap = data; + struct shrink_control sc; + int objs; + + sc.gfp_mask = -1; + sc.nr_to_scan = 0; + + objs = heap->shrinker.shrink(&heap->shrinker, &sc); + *val = objs; + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get, + debug_shrink_set, "%llu\n"); +#endif + +void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap) +{ + if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma || + !heap->ops->unmap_dma) + pr_err("%s: can not add heap with invalid ops struct.\n", + __func__); + + if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) + ion_heap_init_deferred_free(heap); + + heap->dev = dev; + down_write(&dev->lock); + /* use negative heap->id to reverse the priority -- when traversing + the list later attempt higher id numbers first */ + plist_node_init(&heap->node, -heap->id); + plist_add(&heap->node, &dev->heaps); + debugfs_create_file(heap->name, 0664, dev->debug_root, heap, + &debug_heap_fops); +#ifdef DEBUG_HEAP_SHRINKER + if (heap->shrinker.shrink) { + char debug_name[64]; + + snprintf(debug_name, 64, "%s_shrink", heap->name); + debugfs_create_file(debug_name, 0644, dev->debug_root, heap, + &debug_shrink_fops); + } +#endif + up_write(&dev->lock); +} + +struct ion_device *ion_device_create(long (*custom_ioctl) + (struct ion_client *client, + unsigned int cmd, + unsigned long arg)) +{ + struct ion_device *idev; + int ret; + + idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL); + if (!idev) + return ERR_PTR(-ENOMEM); + + idev->dev.minor = MISC_DYNAMIC_MINOR; + idev->dev.name = "ion"; + idev->dev.fops = &ion_fops; + idev->dev.parent = NULL; + ret = misc_register(&idev->dev); + if (ret) { + pr_err("ion: failed to register misc device.\n"); + return ERR_PTR(ret); + } + + idev->debug_root = debugfs_create_dir("ion", NULL); + if (!idev->debug_root) + pr_err("ion: failed to create debug files.\n"); + + idev->custom_ioctl = custom_ioctl; + idev->buffers = RB_ROOT; + mutex_init(&idev->buffer_lock); + init_rwsem(&idev->lock); + plist_head_init(&idev->heaps); + idev->clients = RB_ROOT; + return idev; +} + +void ion_device_destroy(struct ion_device *dev) +{ + misc_deregister(&dev->dev); + /* XXX need to free the heaps and clients ? */ + kfree(dev); +} + +void __init ion_reserve(struct ion_platform_data *data) +{ + int i; + + for (i = 0; i < data->nr; i++) { + if (data->heaps[i].size == 0) + continue; + + if (data->heaps[i].base == 0) { + phys_addr_t paddr; + paddr = memblock_alloc_base(data->heaps[i].size, + data->heaps[i].align, + MEMBLOCK_ALLOC_ANYWHERE); + if (!paddr) { + pr_err("%s: error allocating memblock for " + "heap %d\n", + __func__, i); + continue; + } + data->heaps[i].base = paddr; + } else { + int ret = memblock_reserve(data->heaps[i].base, + data->heaps[i].size); + if (ret) + pr_err("memblock reserve of %zx@%lx failed\n", + data->heaps[i].size, + data->heaps[i].base); + } + pr_info("%s: %s reserved base %lx size %zu\n", __func__, + data->heaps[i].name, + data->heaps[i].base, + data->heaps[i].size); + } +} diff --git a/drivers/staging/android/ion/ion.h b/drivers/staging/android/ion/ion.h new file mode 100644 index 000000000000..dcd2a0cdb192 --- /dev/null +++ b/drivers/staging/android/ion/ion.h @@ -0,0 +1,204 @@ +/* + * drivers/staging/android/ion/ion.h + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _LINUX_ION_H +#define _LINUX_ION_H + +#include <linux/types.h> + +#include "../uapi/ion.h" + +struct ion_handle; +struct ion_device; +struct ion_heap; +struct ion_mapper; +struct ion_client; +struct ion_buffer; + +/* This should be removed some day when phys_addr_t's are fully + plumbed in the kernel, and all instances of ion_phys_addr_t should + be converted to phys_addr_t. For the time being many kernel interfaces + do not accept phys_addr_t's that would have to */ +#define ion_phys_addr_t unsigned long + +/** + * struct ion_platform_heap - defines a heap in the given platform + * @type: type of the heap from ion_heap_type enum + * @id: unique identifier for heap. When allocating higher numbers + * will be allocated from first. At allocation these are passed + * as a bit mask and therefore can not exceed ION_NUM_HEAP_IDS. + * @name: used for debug purposes + * @base: base address of heap in physical memory if applicable + * @size: size of the heap in bytes if applicable + * @align: required alignment in physical memory if applicable + * @priv: private info passed from the board file + * + * Provided by the board file. + */ +struct ion_platform_heap { + enum ion_heap_type type; + unsigned int id; + const char *name; + ion_phys_addr_t base; + size_t size; + ion_phys_addr_t align; + void *priv; +}; + +/** + * struct ion_platform_data - array of platform heaps passed from board file + * @nr: number of structures in the array + * @heaps: array of platform_heap structions + * + * Provided by the board file in the form of platform data to a platform device. + */ +struct ion_platform_data { + int nr; + struct ion_platform_heap *heaps; +}; + +/** + * ion_reserve() - reserve memory for ion heaps if applicable + * @data: platform data specifying starting physical address and + * size + * + * Calls memblock reserve to set aside memory for heaps that are + * located at specific memory addresses or of specfic sizes not + * managed by the kernel + */ +void ion_reserve(struct ion_platform_data *data); + +/** + * ion_client_create() - allocate a client and returns it + * @dev: the global ion device + * @heap_type_mask: mask of heaps this client can allocate from + * @name: used for debugging + */ +struct ion_client *ion_client_create(struct ion_device *dev, + const char *name); + +/** + * ion_client_destroy() - free's a client and all it's handles + * @client: the client + * + * Free the provided client and all it's resources including + * any handles it is holding. + */ +void ion_client_destroy(struct ion_client *client); + +/** + * ion_alloc - allocate ion memory + * @client: the client + * @len: size of the allocation + * @align: requested allocation alignment, lots of hardware blocks + * have alignment requirements of some kind + * @heap_id_mask: mask of heaps to allocate from, if multiple bits are set + * heaps will be tried in order from highest to lowest + * id + * @flags: heap flags, the low 16 bits are consumed by ion, the + * high 16 bits are passed on to the respective heap and + * can be heap custom + * + * Allocate memory in one of the heaps provided in heap mask and return + * an opaque handle to it. + */ +struct ion_handle *ion_alloc(struct ion_client *client, size_t len, + size_t align, unsigned int heap_id_mask, + unsigned int flags); + +/** + * ion_free - free a handle + * @client: the client + * @handle: the handle to free + * + * Free the provided handle. + */ +void ion_free(struct ion_client *client, struct ion_handle *handle); + +/** + * ion_phys - returns the physical address and len of a handle + * @client: the client + * @handle: the handle + * @addr: a pointer to put the address in + * @len: a pointer to put the length in + * + * This function queries the heap for a particular handle to get the + * handle's physical address. It't output is only correct if + * a heap returns physically contiguous memory -- in other cases + * this api should not be implemented -- ion_sg_table should be used + * instead. Returns -EINVAL if the handle is invalid. This has + * no implications on the reference counting of the handle -- + * the returned value may not be valid if the caller is not + * holding a reference. + */ +int ion_phys(struct ion_client *client, struct ion_handle *handle, + ion_phys_addr_t *addr, size_t *len); + +/** + * ion_map_dma - return an sg_table describing a handle + * @client: the client + * @handle: the handle + * + * This function returns the sg_table describing + * a particular ion handle. + */ +struct sg_table *ion_sg_table(struct ion_client *client, + struct ion_handle *handle); + +/** + * ion_map_kernel - create mapping for the given handle + * @client: the client + * @handle: handle to map + * + * Map the given handle into the kernel and return a kernel address that + * can be used to access this address. + */ +void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle); + +/** + * ion_unmap_kernel() - destroy a kernel mapping for a handle + * @client: the client + * @handle: handle to unmap + */ +void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle); + +/** + * ion_share_dma_buf() - share buffer as dma-buf + * @client: the client + * @handle: the handle + */ +struct dma_buf *ion_share_dma_buf(struct ion_client *client, + struct ion_handle *handle); + +/** + * ion_share_dma_buf_fd() - given an ion client, create a dma-buf fd + * @client: the client + * @handle: the handle + */ +int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle); + +/** + * ion_import_dma_buf() - given an dma-buf fd from the ion exporter get handle + * @client: the client + * @fd: the dma-buf fd + * + * Given an dma-buf fd that was allocated through ion via ion_share_dma_buf, + * import that fd and return a handle representing it. If a dma-buf from + * another exporter is passed in this function will return ERR_PTR(-EINVAL) + */ +struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd); + +#endif /* _LINUX_ION_H */ diff --git a/drivers/staging/android/ion/ion_carveout_heap.c b/drivers/staging/android/ion/ion_carveout_heap.c new file mode 100644 index 000000000000..3cb05b9b0e93 --- /dev/null +++ b/drivers/staging/android/ion/ion_carveout_heap.c @@ -0,0 +1,194 @@ +/* + * drivers/staging/android/ion/ion_carveout_heap.c + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include <linux/spinlock.h> +#include <linux/dma-mapping.h> +#include <linux/err.h> +#include <linux/genalloc.h> +#include <linux/io.h> +#include <linux/mm.h> +#include <linux/scatterlist.h> +#include <linux/slab.h> +#include <linux/vmalloc.h> +#include "ion.h" +#include "ion_priv.h" + +struct ion_carveout_heap { + struct ion_heap heap; + struct gen_pool *pool; + ion_phys_addr_t base; +}; + +ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, + unsigned long size, + unsigned long align) +{ + struct ion_carveout_heap *carveout_heap = + container_of(heap, struct ion_carveout_heap, heap); + unsigned long offset = gen_pool_alloc(carveout_heap->pool, size); + + if (!offset) + return ION_CARVEOUT_ALLOCATE_FAIL; + + return offset; +} + +void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr, + unsigned long size) +{ + struct ion_carveout_heap *carveout_heap = + container_of(heap, struct ion_carveout_heap, heap); + + if (addr == ION_CARVEOUT_ALLOCATE_FAIL) + return; + gen_pool_free(carveout_heap->pool, addr, size); +} + +static int ion_carveout_heap_phys(struct ion_heap *heap, + struct ion_buffer *buffer, + ion_phys_addr_t *addr, size_t *len) +{ + struct sg_table *table = buffer->priv_virt; + struct page *page = sg_page(table->sgl); + ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page)); + + *addr = paddr; + *len = buffer->size; + return 0; +} + +static int ion_carveout_heap_allocate(struct ion_heap *heap, + struct ion_buffer *buffer, + unsigned long size, unsigned long align, + unsigned long flags) +{ + struct sg_table *table; + ion_phys_addr_t paddr; + int ret; + + if (align > PAGE_SIZE) + return -EINVAL; + + table = kzalloc(sizeof(struct sg_table), GFP_KERNEL); + if (!table) + return -ENOMEM; + ret = sg_alloc_table(table, 1, GFP_KERNEL); + if (ret) + goto err_free; + + paddr = ion_carveout_allocate(heap, size, align); + if (paddr == ION_CARVEOUT_ALLOCATE_FAIL) { + ret = -ENOMEM; + goto err_free_table; + } + + sg_set_page(table->sgl, pfn_to_page(PFN_DOWN(paddr)), size, 0); + buffer->priv_virt = table; + + return 0; + +err_free_table: + sg_free_table(table); +err_free: + kfree(table); + return ret; +} + +static void ion_carveout_heap_free(struct ion_buffer *buffer) +{ + struct ion_heap *heap = buffer->heap; + struct sg_table *table = buffer->priv_virt; + struct page *page = sg_page(table->sgl); + ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page)); + + ion_heap_buffer_zero(buffer); + + if (ion_buffer_cached(buffer)) + dma_sync_sg_for_device(NULL, table->sgl, table->nents, + DMA_BIDIRECTIONAL); + + ion_carveout_free(heap, paddr, buffer->size); + sg_free_table(table); + kfree(table); +} + +static struct sg_table *ion_carveout_heap_map_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + return buffer->priv_virt; +} + +static void ion_carveout_heap_unmap_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + return; +} + +static struct ion_heap_ops carveout_heap_ops = { + .allocate = ion_carveout_heap_allocate, + .free = ion_carveout_heap_free, + .phys = ion_carveout_heap_phys, + .map_dma = ion_carveout_heap_map_dma, + .unmap_dma = ion_carveout_heap_unmap_dma, + .map_user = ion_heap_map_user, + .map_kernel = ion_heap_map_kernel, + .unmap_kernel = ion_heap_unmap_kernel, +}; + +struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data) +{ + struct ion_carveout_heap *carveout_heap; + int ret; + + struct page *page; + size_t size; + + page = pfn_to_page(PFN_DOWN(heap_data->base)); + size = heap_data->size; + + ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL); + + ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL)); + if (ret) + return ERR_PTR(ret); + + carveout_heap = kzalloc(sizeof(struct ion_carveout_heap), GFP_KERNEL); + if (!carveout_heap) + return ERR_PTR(-ENOMEM); + + carveout_heap->pool = gen_pool_create(12, -1); + if (!carveout_heap->pool) { + kfree(carveout_heap); + return ERR_PTR(-ENOMEM); + } + carveout_heap->base = heap_data->base; + gen_pool_add(carveout_heap->pool, carveout_heap->base, heap_data->size, + -1); + carveout_heap->heap.ops = &carveout_heap_ops; + carveout_heap->heap.type = ION_HEAP_TYPE_CARVEOUT; + carveout_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE; + + return &carveout_heap->heap; +} + +void ion_carveout_heap_destroy(struct ion_heap *heap) +{ + struct ion_carveout_heap *carveout_heap = + container_of(heap, struct ion_carveout_heap, heap); + + gen_pool_destroy(carveout_heap->pool); + kfree(carveout_heap); + carveout_heap = NULL; +} diff --git a/drivers/staging/android/ion/ion_chunk_heap.c b/drivers/staging/android/ion/ion_chunk_heap.c new file mode 100644 index 000000000000..d40f5f831808 --- /dev/null +++ b/drivers/staging/android/ion/ion_chunk_heap.c @@ -0,0 +1,195 @@ +/* + * drivers/staging/android/ion/ion_chunk_heap.c + * + * Copyright (C) 2012 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include <linux/dma-mapping.h> +#include <linux/err.h> +#include <linux/genalloc.h> +#include <linux/io.h> +#include <linux/mm.h> +#include <linux/scatterlist.h> +#include <linux/slab.h> +#include <linux/vmalloc.h> +#include "ion.h" +#include "ion_priv.h" + +struct ion_chunk_heap { + struct ion_heap heap; + struct gen_pool *pool; + ion_phys_addr_t base; + unsigned long chunk_size; + unsigned long size; + unsigned long allocated; +}; + +static int ion_chunk_heap_allocate(struct ion_heap *heap, + struct ion_buffer *buffer, + unsigned long size, unsigned long align, + unsigned long flags) +{ + struct ion_chunk_heap *chunk_heap = + container_of(heap, struct ion_chunk_heap, heap); + struct sg_table *table; + struct scatterlist *sg; + int ret, i; + unsigned long num_chunks; + unsigned long allocated_size; + + if (align > chunk_heap->chunk_size) + return -EINVAL; + + allocated_size = ALIGN(size, chunk_heap->chunk_size); + num_chunks = allocated_size / chunk_heap->chunk_size; + + if (allocated_size > chunk_heap->size - chunk_heap->allocated) + return -ENOMEM; + + table = kzalloc(sizeof(struct sg_table), GFP_KERNEL); + if (!table) + return -ENOMEM; + ret = sg_alloc_table(table, num_chunks, GFP_KERNEL); + if (ret) { + kfree(table); + return ret; + } + + sg = table->sgl; + for (i = 0; i < num_chunks; i++) { + unsigned long paddr = gen_pool_alloc(chunk_heap->pool, + chunk_heap->chunk_size); + if (!paddr) + goto err; + sg_set_page(sg, pfn_to_page(PFN_DOWN(paddr)), + chunk_heap->chunk_size, 0); + sg = sg_next(sg); + } + + buffer->priv_virt = table; + chunk_heap->allocated += allocated_size; + return 0; +err: + sg = table->sgl; + for (i -= 1; i >= 0; i--) { + gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)), + sg->length); + sg = sg_next(sg); + } + sg_free_table(table); + kfree(table); + return -ENOMEM; +} + +static void ion_chunk_heap_free(struct ion_buffer *buffer) +{ + struct ion_heap *heap = buffer->heap; + struct ion_chunk_heap *chunk_heap = + container_of(heap, struct ion_chunk_heap, heap); + struct sg_table *table = buffer->priv_virt; + struct scatterlist *sg; + int i; + unsigned long allocated_size; + + allocated_size = ALIGN(buffer->size, chunk_heap->chunk_size); + + ion_heap_buffer_zero(buffer); + + if (ion_buffer_cached(buffer)) + dma_sync_sg_for_device(NULL, table->sgl, table->nents, + DMA_BIDIRECTIONAL); + + for_each_sg(table->sgl, sg, table->nents, i) { + gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)), + sg->length); + } + chunk_heap->allocated -= allocated_size; + sg_free_table(table); + kfree(table); +} + +static struct sg_table *ion_chunk_heap_map_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + return buffer->priv_virt; +} + +static void ion_chunk_heap_unmap_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + return; +} + +static struct ion_heap_ops chunk_heap_ops = { + .allocate = ion_chunk_heap_allocate, + .free = ion_chunk_heap_free, + .map_dma = ion_chunk_heap_map_dma, + .unmap_dma = ion_chunk_heap_unmap_dma, + .map_user = ion_heap_map_user, + .map_kernel = ion_heap_map_kernel, + .unmap_kernel = ion_heap_unmap_kernel, +}; + +struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data) +{ + struct ion_chunk_heap *chunk_heap; + int ret; + struct page *page; + size_t size; + + page = pfn_to_page(PFN_DOWN(heap_data->base)); + size = heap_data->size; + + ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL); + + ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL)); + if (ret) + return ERR_PTR(ret); + + chunk_heap = kzalloc(sizeof(struct ion_chunk_heap), GFP_KERNEL); + if (!chunk_heap) + return ERR_PTR(-ENOMEM); + + chunk_heap->chunk_size = (unsigned long)heap_data->priv; + chunk_heap->pool = gen_pool_create(get_order(chunk_heap->chunk_size) + + PAGE_SHIFT, -1); + if (!chunk_heap->pool) { + ret = -ENOMEM; + goto error_gen_pool_create; + } + chunk_heap->base = heap_data->base; + chunk_heap->size = heap_data->size; + chunk_heap->allocated = 0; + + gen_pool_add(chunk_heap->pool, chunk_heap->base, heap_data->size, -1); + chunk_heap->heap.ops = &chunk_heap_ops; + chunk_heap->heap.type = ION_HEAP_TYPE_CHUNK; + chunk_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE; + pr_info("%s: base %lu size %zu align %ld\n", __func__, chunk_heap->base, + heap_data->size, heap_data->align); + + return &chunk_heap->heap; + +error_gen_pool_create: + kfree(chunk_heap); + return ERR_PTR(ret); +} + +void ion_chunk_heap_destroy(struct ion_heap *heap) +{ + struct ion_chunk_heap *chunk_heap = + container_of(heap, struct ion_chunk_heap, heap); + + gen_pool_destroy(chunk_heap->pool); + kfree(chunk_heap); + chunk_heap = NULL; +} diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c new file mode 100644 index 000000000000..f0f98897e4b9 --- /dev/null +++ b/drivers/staging/android/ion/ion_cma_heap.c @@ -0,0 +1,218 @@ +/* + * drivers/staging/android/ion/ion_cma_heap.c + * + * Copyright (C) Linaro 2012 + * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/device.h> +#include <linux/slab.h> +#include <linux/errno.h> +#include <linux/err.h> +#include <linux/dma-mapping.h> + +#include "ion.h" +#include "ion_priv.h" + +#define ION_CMA_ALLOCATE_FAILED -1 + +struct ion_cma_heap { + struct ion_heap heap; + struct device *dev; +}; + +#define to_cma_heap(x) container_of(x, struct ion_cma_heap, heap) + +struct ion_cma_buffer_info { + void *cpu_addr; + dma_addr_t handle; + struct sg_table *table; +}; + +/* + * Create scatter-list for the already allocated DMA buffer. + * This function could be replaced by dma_common_get_sgtable + * as soon as it will avalaible. + */ +static int ion_cma_get_sgtable(struct device *dev, struct sg_table *sgt, + void *cpu_addr, dma_addr_t handle, size_t size) +{ + struct page *page = virt_to_page(cpu_addr); + int ret; + + ret = sg_alloc_table(sgt, 1, GFP_KERNEL); + if (unlikely(ret)) + return ret; + + sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); + return 0; +} + +/* ION CMA heap operations functions */ +static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer, + unsigned long len, unsigned long align, + unsigned long flags) +{ + struct ion_cma_heap *cma_heap = to_cma_heap(heap); + struct device *dev = cma_heap->dev; + struct ion_cma_buffer_info *info; + + dev_dbg(dev, "Request buffer allocation len %ld\n", len); + + if (buffer->flags & ION_FLAG_CACHED) + return -EINVAL; + + if (align > PAGE_SIZE) + return -EINVAL; + + info = kzalloc(sizeof(struct ion_cma_buffer_info), GFP_KERNEL); + if (!info) { + dev_err(dev, "Can't allocate buffer info\n"); + return ION_CMA_ALLOCATE_FAILED; + } + + info->cpu_addr = dma_alloc_coherent(dev, len, &(info->handle), + GFP_HIGHUSER | __GFP_ZERO); + + if (!info->cpu_addr) { + dev_err(dev, "Fail to allocate buffer\n"); + goto err; + } + + info->table = kmalloc(sizeof(struct sg_table), GFP_KERNEL); + if (!info->table) { + dev_err(dev, "Fail to allocate sg table\n"); + goto free_mem; + } + + if (ion_cma_get_sgtable + (dev, info->table, info->cpu_addr, info->handle, len)) + goto free_table; + /* keep this for memory release */ + buffer->priv_virt = info; + dev_dbg(dev, "Allocate buffer %p\n", buffer); + return 0; + +free_table: + kfree(info->table); +free_mem: + dma_free_coherent(dev, len, info->cpu_addr, info->handle); +err: + kfree(info); + return ION_CMA_ALLOCATE_FAILED; +} + +static void ion_cma_free(struct ion_buffer *buffer) +{ + struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap); + struct device *dev = cma_heap->dev; + struct ion_cma_buffer_info *info = buffer->priv_virt; + + dev_dbg(dev, "Release buffer %p\n", buffer); + /* release memory */ + dma_free_coherent(dev, buffer->size, info->cpu_addr, info->handle); + /* release sg table */ + sg_free_table(info->table); + kfree(info->table); + kfree(info); +} + +/* return physical address in addr */ +static int ion_cma_phys(struct ion_heap *heap, struct ion_buffer *buffer, + ion_phys_addr_t *addr, size_t *len) +{ + struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap); + struct device *dev = cma_heap->dev; + struct ion_cma_buffer_info *info = buffer->priv_virt; + + dev_dbg(dev, "Return buffer %p physical address 0x%pa\n", buffer, + &info->handle); + + *addr = info->handle; + *len = buffer->size; + + return 0; +} + +static struct sg_table *ion_cma_heap_map_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + struct ion_cma_buffer_info *info = buffer->priv_virt; + + return info->table; +} + +static void ion_cma_heap_unmap_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + return; +} + +static int ion_cma_mmap(struct ion_heap *mapper, struct ion_buffer *buffer, + struct vm_area_struct *vma) +{ + struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap); + struct device *dev = cma_heap->dev; + struct ion_cma_buffer_info *info = buffer->priv_virt; + + return dma_mmap_coherent(dev, vma, info->cpu_addr, info->handle, + buffer->size); +} + +static void *ion_cma_map_kernel(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + struct ion_cma_buffer_info *info = buffer->priv_virt; + /* kernel memory mapping has been done at allocation time */ + return info->cpu_addr; +} + +static void ion_cma_unmap_kernel(struct ion_heap *heap, + struct ion_buffer *buffer) +{ +} + +static struct ion_heap_ops ion_cma_ops = { + .allocate = ion_cma_allocate, + .free = ion_cma_free, + .map_dma = ion_cma_heap_map_dma, + .unmap_dma = ion_cma_heap_unmap_dma, + .phys = ion_cma_phys, + .map_user = ion_cma_mmap, + .map_kernel = ion_cma_map_kernel, + .unmap_kernel = ion_cma_unmap_kernel, +}; + +struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *data) +{ + struct ion_cma_heap *cma_heap; + + cma_heap = kzalloc(sizeof(struct ion_cma_heap), GFP_KERNEL); + + if (!cma_heap) + return ERR_PTR(-ENOMEM); + + cma_heap->heap.ops = &ion_cma_ops; + /* get device from private heaps data, later it will be + * used to make the link with reserved CMA memory */ + cma_heap->dev = data->priv; + cma_heap->heap.type = ION_HEAP_TYPE_DMA; + return &cma_heap->heap; +} + +void ion_cma_heap_destroy(struct ion_heap *heap) +{ + struct ion_cma_heap *cma_heap = to_cma_heap(heap); + + kfree(cma_heap); +} diff --git a/drivers/staging/android/ion/ion_dummy_driver.c b/drivers/staging/android/ion/ion_dummy_driver.c new file mode 100644 index 000000000000..01cdc8aee898 --- /dev/null +++ b/drivers/staging/android/ion/ion_dummy_driver.c @@ -0,0 +1,158 @@ +/* + * drivers/gpu/ion/ion_dummy_driver.c + * + * Copyright (C) 2013 Linaro, Inc + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/err.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/init.h> +#include <linux/bootmem.h> +#include <linux/memblock.h> +#include <linux/sizes.h> +#include <linux/io.h> +#include "ion.h" +#include "ion_priv.h" + +struct ion_device *idev; +struct ion_heap **heaps; + +void *carveout_ptr; +void *chunk_ptr; + +struct ion_platform_heap dummy_heaps[] = { + { + .id = ION_HEAP_TYPE_SYSTEM, + .type = ION_HEAP_TYPE_SYSTEM, + .name = "system", + }, + { + .id = ION_HEAP_TYPE_SYSTEM_CONTIG, + .type = ION_HEAP_TYPE_SYSTEM_CONTIG, + .name = "system contig", + }, + { + .id = ION_HEAP_TYPE_CARVEOUT, + .type = ION_HEAP_TYPE_CARVEOUT, + .name = "carveout", + .size = SZ_4M, + }, + { + .id = ION_HEAP_TYPE_CHUNK, + .type = ION_HEAP_TYPE_CHUNK, + .name = "chunk", + .size = SZ_4M, + .align = SZ_16K, + .priv = (void *)(SZ_16K), + }, +}; + +struct ion_platform_data dummy_ion_pdata = { + .nr = ARRAY_SIZE(dummy_heaps), + .heaps = dummy_heaps, +}; + +static int __init ion_dummy_init(void) +{ + int i, err; + + idev = ion_device_create(NULL); + heaps = kzalloc(sizeof(struct ion_heap *) * dummy_ion_pdata.nr, + GFP_KERNEL); + if (!heaps) + return -ENOMEM; + + + /* Allocate a dummy carveout heap */ + carveout_ptr = alloc_pages_exact( + dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size, + GFP_KERNEL); + if (carveout_ptr) + dummy_heaps[ION_HEAP_TYPE_CARVEOUT].base = + virt_to_phys(carveout_ptr); + else + pr_err("ion_dummy: Could not allocate carveout\n"); + + /* Allocate a dummy chunk heap */ + chunk_ptr = alloc_pages_exact( + dummy_heaps[ION_HEAP_TYPE_CHUNK].size, + GFP_KERNEL); + if (chunk_ptr) + dummy_heaps[ION_HEAP_TYPE_CHUNK].base = virt_to_phys(chunk_ptr); + else + pr_err("ion_dummy: Could not allocate chunk\n"); + + for (i = 0; i < dummy_ion_pdata.nr; i++) { + struct ion_platform_heap *heap_data = &dummy_ion_pdata.heaps[i]; + + if (heap_data->type == ION_HEAP_TYPE_CARVEOUT && + !heap_data->base) + continue; + + if (heap_data->type == ION_HEAP_TYPE_CHUNK && !heap_data->base) + continue; + + heaps[i] = ion_heap_create(heap_data); + if (IS_ERR_OR_NULL(heaps[i])) { + err = PTR_ERR(heaps[i]); + goto err; + } + ion_device_add_heap(idev, heaps[i]); + } + return 0; +err: + for (i = 0; i < dummy_ion_pdata.nr; i++) { + if (heaps[i]) + ion_heap_destroy(heaps[i]); + } + kfree(heaps); + + if (carveout_ptr) { + free_pages_exact(carveout_ptr, + dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size); + carveout_ptr = NULL; + } + if (chunk_ptr) { + free_pages_exact(chunk_ptr, + dummy_heaps[ION_HEAP_TYPE_CHUNK].size); + chunk_ptr = NULL; + } + return err; +} +device_initcall(ion_dummy_init); + +static void __exit ion_dummy_exit(void) +{ + int i; + + ion_device_destroy(idev); + + for (i = 0; i < dummy_ion_pdata.nr; i++) + ion_heap_destroy(heaps[i]); + kfree(heaps); + + if (carveout_ptr) { + free_pages_exact(carveout_ptr, + dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size); + carveout_ptr = NULL; + } + if (chunk_ptr) { + free_pages_exact(chunk_ptr, + dummy_heaps[ION_HEAP_TYPE_CHUNK].size); + chunk_ptr = NULL; + } + + return; +} +__exitcall(ion_dummy_exit); diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c new file mode 100644 index 000000000000..37e64d51394c --- /dev/null +++ b/drivers/staging/android/ion/ion_heap.c @@ -0,0 +1,318 @@ +/* + * drivers/staging/android/ion/ion_heap.c + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/err.h> +#include <linux/freezer.h> +#include <linux/kthread.h> +#include <linux/mm.h> +#include <linux/rtmutex.h> +#include <linux/sched.h> +#include <linux/scatterlist.h> +#include <linux/vmalloc.h> +#include "ion.h" +#include "ion_priv.h" + +void *ion_heap_map_kernel(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + struct scatterlist *sg; + int i, j; + void *vaddr; + pgprot_t pgprot; + struct sg_table *table = buffer->sg_table; + int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; + struct page **pages = vmalloc(sizeof(struct page *) * npages); + struct page **tmp = pages; + + if (!pages) + return NULL; + + if (buffer->flags & ION_FLAG_CACHED) + pgprot = PAGE_KERNEL; + else + pgprot = pgprot_writecombine(PAGE_KERNEL); + + for_each_sg(table->sgl, sg, table->nents, i) { + int npages_this_entry = PAGE_ALIGN(sg->length) / PAGE_SIZE; + struct page *page = sg_page(sg); + BUG_ON(i >= npages); + for (j = 0; j < npages_this_entry; j++) + *(tmp++) = page++; + } + vaddr = vmap(pages, npages, VM_MAP, pgprot); + vfree(pages); + + if (vaddr == NULL) + return ERR_PTR(-ENOMEM); + + return vaddr; +} + +void ion_heap_unmap_kernel(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + vunmap(buffer->vaddr); +} + +int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer, + struct vm_area_struct *vma) +{ + struct sg_table *table = buffer->sg_table; + unsigned long addr = vma->vm_start; + unsigned long offset = vma->vm_pgoff * PAGE_SIZE; + struct scatterlist *sg; + int i; + int ret; + + for_each_sg(table->sgl, sg, table->nents, i) { + struct page *page = sg_page(sg); + unsigned long remainder = vma->vm_end - addr; + unsigned long len = sg->length; + + if (offset >= sg->length) { + offset -= sg->length; + continue; + } else if (offset) { + page += offset / PAGE_SIZE; + len = sg->length - offset; + offset = 0; + } + len = min(len, remainder); + ret = remap_pfn_range(vma, addr, page_to_pfn(page), len, + vma->vm_page_prot); + if (ret) + return ret; + addr += len; + if (addr >= vma->vm_end) + return 0; + } + return 0; +} + +static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot) +{ + void *addr = vm_map_ram(pages, num, -1, pgprot); + if (!addr) + return -ENOMEM; + memset(addr, 0, PAGE_SIZE * num); + vm_unmap_ram(addr, num); + + return 0; +} + +static int ion_heap_sglist_zero(struct scatterlist *sgl, unsigned int nents, + pgprot_t pgprot) +{ + int p = 0; + int ret = 0; + struct sg_page_iter piter; + struct page *pages[32]; + + for_each_sg_page(sgl, &piter, nents, 0) { + pages[p++] = sg_page_iter_page(&piter); + if (p == ARRAY_SIZE(pages)) { + ret = ion_heap_clear_pages(pages, p, pgprot); + if (ret) + return ret; + p = 0; + } + } + if (p) + ret = ion_heap_clear_pages(pages, p, pgprot); + + return ret; +} + +int ion_heap_buffer_zero(struct ion_buffer *buffer) +{ + struct sg_table *table = buffer->sg_table; + pgprot_t pgprot; + + if (buffer->flags & ION_FLAG_CACHED) + pgprot = PAGE_KERNEL; + else + pgprot = pgprot_writecombine(PAGE_KERNEL); + + return ion_heap_sglist_zero(table->sgl, table->nents, pgprot); +} + +int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot) +{ + struct scatterlist sg; + + sg_init_table(&sg, 1); + sg_set_page(&sg, page, size, 0); + return ion_heap_sglist_zero(&sg, 1, pgprot); +} + +void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer) +{ + spin_lock(&heap->free_lock); + list_add(&buffer->list, &heap->free_list); + heap->free_list_size += buffer->size; + spin_unlock(&heap->free_lock); + wake_up(&heap->waitqueue); +} + +size_t ion_heap_freelist_size(struct ion_heap *heap) +{ + size_t size; + + spin_lock(&heap->free_lock); + size = heap->free_list_size; + spin_unlock(&heap->free_lock); + + return size; +} + +size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size) +{ + struct ion_buffer *buffer; + size_t total_drained = 0; + + if (ion_heap_freelist_size(heap) == 0) + return 0; + + spin_lock(&heap->free_lock); + if (size == 0) + size = heap->free_list_size; + + while (!list_empty(&heap->free_list)) { + if (total_drained >= size) + break; + buffer = list_first_entry(&heap->free_list, struct ion_buffer, + list); + list_del(&buffer->list); + heap->free_list_size -= buffer->size; + total_drained += buffer->size; + spin_unlock(&heap->free_lock); + ion_buffer_destroy(buffer); + spin_lock(&heap->free_lock); + } + spin_unlock(&heap->free_lock); + + return total_drained; +} + +static int ion_heap_deferred_free(void *data) +{ + struct ion_heap *heap = data; + + while (true) { + struct ion_buffer *buffer; + + wait_event_freezable(heap->waitqueue, + ion_heap_freelist_size(heap) > 0); + + spin_lock(&heap->free_lock); + if (list_empty(&heap->free_list)) { + spin_unlock(&heap->free_lock); + continue; + } + buffer = list_first_entry(&heap->free_list, struct ion_buffer, + list); + list_del(&buffer->list); + heap->free_list_size -= buffer->size; + spin_unlock(&heap->free_lock); + ion_buffer_destroy(buffer); + } + + return 0; +} + +int ion_heap_init_deferred_free(struct ion_heap *heap) +{ + struct sched_param param = { .sched_priority = 0 }; + + INIT_LIST_HEAD(&heap->free_list); + heap->free_list_size = 0; + spin_lock_init(&heap->free_lock); + init_waitqueue_head(&heap->waitqueue); + heap->task = kthread_run(ion_heap_deferred_free, heap, + "%s", heap->name); + if (IS_ERR(heap->task)) { + pr_err("%s: creating thread for deferred free failed\n", + __func__); + return PTR_RET(heap->task); + } + sched_setscheduler(heap->task, SCHED_IDLE, ¶m); + return 0; +} + +struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data) +{ + struct ion_heap *heap = NULL; + + switch (heap_data->type) { + case ION_HEAP_TYPE_SYSTEM_CONTIG: + heap = ion_system_contig_heap_create(heap_data); + break; + case ION_HEAP_TYPE_SYSTEM: + heap = ion_system_heap_create(heap_data); + break; + case ION_HEAP_TYPE_CARVEOUT: + heap = ion_carveout_heap_create(heap_data); + break; + case ION_HEAP_TYPE_CHUNK: + heap = ion_chunk_heap_create(heap_data); + break; + case ION_HEAP_TYPE_DMA: + heap = ion_cma_heap_create(heap_data); + break; + default: + pr_err("%s: Invalid heap type %d\n", __func__, + heap_data->type); + return ERR_PTR(-EINVAL); + } + + if (IS_ERR_OR_NULL(heap)) { + pr_err("%s: error creating heap %s type %d base %lu size %zu\n", + __func__, heap_data->name, heap_data->type, + heap_data->base, heap_data->size); + return ERR_PTR(-EINVAL); + } + + heap->name = heap_data->name; + heap->id = heap_data->id; + return heap; +} + +void ion_heap_destroy(struct ion_heap *heap) +{ + if (!heap) + return; + + switch (heap->type) { + case ION_HEAP_TYPE_SYSTEM_CONTIG: + ion_system_contig_heap_destroy(heap); + break; + case ION_HEAP_TYPE_SYSTEM: + ion_system_heap_destroy(heap); + break; + case ION_HEAP_TYPE_CARVEOUT: + ion_carveout_heap_destroy(heap); + break; + case ION_HEAP_TYPE_CHUNK: + ion_chunk_heap_destroy(heap); + break; + case ION_HEAP_TYPE_DMA: + ion_cma_heap_destroy(heap); + break; + default: + pr_err("%s: Invalid heap type %d\n", __func__, + heap->type); + } +} diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c new file mode 100644 index 000000000000..fa693c23681a --- /dev/null +++ b/drivers/staging/android/ion/ion_page_pool.c @@ -0,0 +1,195 @@ +/* + * drivers/staging/android/ion/ion_mem_pool.c + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/debugfs.h> +#include <linux/dma-mapping.h> +#include <linux/err.h> +#include <linux/fs.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/slab.h> +#include "ion_priv.h" + +struct ion_page_pool_item { + struct page *page; + struct list_head list; +}; + +static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool) +{ + struct page *page = alloc_pages(pool->gfp_mask, pool->order); + + if (!page) + return NULL; + ion_pages_sync_for_device(NULL, page, PAGE_SIZE << pool->order, + DMA_BIDIRECTIONAL); + return page; +} + +static void ion_page_pool_free_pages(struct ion_page_pool *pool, + struct page *page) +{ + __free_pages(page, pool->order); +} + +static int ion_page_pool_add(struct ion_page_pool *pool, struct page *page) +{ + struct ion_page_pool_item *item; + + item = kmalloc(sizeof(struct ion_page_pool_item), GFP_KERNEL); + if (!item) + return -ENOMEM; + + mutex_lock(&pool->mutex); + item->page = page; + if (PageHighMem(page)) { + list_add_tail(&item->list, &pool->high_items); + pool->high_count++; + } else { + list_add_tail(&item->list, &pool->low_items); + pool->low_count++; + } + mutex_unlock(&pool->mutex); + return 0; +} + +static struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high) +{ + struct ion_page_pool_item *item; + struct page *page; + + if (high) { + BUG_ON(!pool->high_count); + item = list_first_entry(&pool->high_items, + struct ion_page_pool_item, list); + pool->high_count--; + } else { + BUG_ON(!pool->low_count); + item = list_first_entry(&pool->low_items, + struct ion_page_pool_item, list); + pool->low_count--; + } + + list_del(&item->list); + page = item->page; + kfree(item); + return page; +} + +void *ion_page_pool_alloc(struct ion_page_pool *pool) +{ + struct page *page = NULL; + + BUG_ON(!pool); + + mutex_lock(&pool->mutex); + if (pool->high_count) + page = ion_page_pool_remove(pool, true); + else if (pool->low_count) + page = ion_page_pool_remove(pool, false); + mutex_unlock(&pool->mutex); + + if (!page) + page = ion_page_pool_alloc_pages(pool); + + return page; +} + +void ion_page_pool_free(struct ion_page_pool *pool, struct page *page) +{ + int ret; + + ret = ion_page_pool_add(pool, page); + if (ret) + ion_page_pool_free_pages(pool, page); +} + +static int ion_page_pool_total(struct ion_page_pool *pool, bool high) +{ + int total = 0; + + total += high ? (pool->high_count + pool->low_count) * + (1 << pool->order) : + pool->low_count * (1 << pool->order); + return total; +} + +int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask, + int nr_to_scan) +{ + int nr_freed = 0; + int i; + bool high; + + high = !!(gfp_mask & __GFP_HIGHMEM); + + if (nr_to_scan == 0) + return ion_page_pool_total(pool, high); + + for (i = 0; i < nr_to_scan; i++) { + struct page *page; + + mutex_lock(&pool->mutex); + if (pool->low_count) { + page = ion_page_pool_remove(pool, false); + } else if (high && pool->high_count) { + page = ion_page_pool_remove(pool, true); + } else { + mutex_unlock(&pool->mutex); + break; + } + mutex_unlock(&pool->mutex); + ion_page_pool_free_pages(pool, page); + nr_freed += (1 << pool->order); + } + + return nr_freed; +} + +struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order) +{ + struct ion_page_pool *pool = kmalloc(sizeof(struct ion_page_pool), + GFP_KERNEL); + if (!pool) + return NULL; + pool->high_count = 0; + pool->low_count = 0; + INIT_LIST_HEAD(&pool->low_items); + INIT_LIST_HEAD(&pool->high_items); + pool->gfp_mask = gfp_mask; + pool->order = order; + mutex_init(&pool->mutex); + plist_node_init(&pool->list, order); + + return pool; +} + +void ion_page_pool_destroy(struct ion_page_pool *pool) +{ + kfree(pool); +} + +static int __init ion_page_pool_init(void) +{ + return 0; +} + +static void __exit ion_page_pool_exit(void) +{ +} + +module_init(ion_page_pool_init); +module_exit(ion_page_pool_exit); diff --git a/drivers/staging/android/ion/ion_priv.h b/drivers/staging/android/ion/ion_priv.h new file mode 100644 index 000000000000..fc2e4fccf69d --- /dev/null +++ b/drivers/staging/android/ion/ion_priv.h @@ -0,0 +1,361 @@ +/* + * drivers/staging/android/ion/ion_priv.h + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _ION_PRIV_H +#define _ION_PRIV_H + +#include <linux/device.h> +#include <linux/dma-direction.h> +#include <linux/kref.h> +#include <linux/mm_types.h> +#include <linux/mutex.h> +#include <linux/rbtree.h> +#include <linux/sched.h> +#include <linux/shrinker.h> +#include <linux/types.h> + +#include "ion.h" + +struct ion_buffer *ion_handle_buffer(struct ion_handle *handle); + +/** + * struct ion_buffer - metadata for a particular buffer + * @ref: refernce count + * @node: node in the ion_device buffers tree + * @dev: back pointer to the ion_device + * @heap: back pointer to the heap the buffer came from + * @flags: buffer specific flags + * @size: size of the buffer + * @priv_virt: private data to the buffer representable as + * a void * + * @priv_phys: private data to the buffer representable as + * an ion_phys_addr_t (and someday a phys_addr_t) + * @lock: protects the buffers cnt fields + * @kmap_cnt: number of times the buffer is mapped to the kernel + * @vaddr: the kenrel mapping if kmap_cnt is not zero + * @dmap_cnt: number of times the buffer is mapped for dma + * @sg_table: the sg table for the buffer if dmap_cnt is not zero + * @pages: flat array of pages in the buffer -- used by fault + * handler and only valid for buffers that are faulted in + * @vmas: list of vma's mapping this buffer + * @handle_count: count of handles referencing this buffer + * @task_comm: taskcomm of last client to reference this buffer in a + * handle, used for debugging + * @pid: pid of last client to reference this buffer in a + * handle, used for debugging +*/ +struct ion_buffer { + struct kref ref; + union { + struct rb_node node; + struct list_head list; + }; + struct ion_device *dev; + struct ion_heap *heap; + unsigned long flags; + size_t size; + union { + void *priv_virt; + ion_phys_addr_t priv_phys; + }; + struct mutex lock; + int kmap_cnt; + void *vaddr; + int dmap_cnt; + struct sg_table *sg_table; + struct page **pages; + struct list_head vmas; + /* used to track orphaned buffers */ + int handle_count; + char task_comm[TASK_COMM_LEN]; + pid_t pid; +}; +void ion_buffer_destroy(struct ion_buffer *buffer); + +/** + * struct ion_heap_ops - ops to operate on a given heap + * @allocate: allocate memory + * @free: free memory + * @phys get physical address of a buffer (only define on + * physically contiguous heaps) + * @map_dma map the memory for dma to a scatterlist + * @unmap_dma unmap the memory for dma + * @map_kernel map memory to the kernel + * @unmap_kernel unmap memory to the kernel + * @map_user map memory to userspace + * + * allocate, phys, and map_user return 0 on success, -errno on error. + * map_dma and map_kernel return pointer on success, ERR_PTR on error. + */ +struct ion_heap_ops { + int (*allocate) (struct ion_heap *heap, + struct ion_buffer *buffer, unsigned long len, + unsigned long align, unsigned long flags); + void (*free) (struct ion_buffer *buffer); + int (*phys) (struct ion_heap *heap, struct ion_buffer *buffer, + ion_phys_addr_t *addr, size_t *len); + struct sg_table *(*map_dma) (struct ion_heap *heap, + struct ion_buffer *buffer); + void (*unmap_dma) (struct ion_heap *heap, struct ion_buffer *buffer); + void * (*map_kernel) (struct ion_heap *heap, struct ion_buffer *buffer); + void (*unmap_kernel) (struct ion_heap *heap, struct ion_buffer *buffer); + int (*map_user) (struct ion_heap *mapper, struct ion_buffer *buffer, + struct vm_area_struct *vma); +}; + +/** + * heap flags - flags between the heaps and core ion code + */ +#define ION_HEAP_FLAG_DEFER_FREE (1 << 0) + +/** + * struct ion_heap - represents a heap in the system + * @node: rb node to put the heap on the device's tree of heaps + * @dev: back pointer to the ion_device + * @type: type of heap + * @ops: ops struct as above + * @flags: flags + * @id: id of heap, also indicates priority of this heap when + * allocating. These are specified by platform data and + * MUST be unique + * @name: used for debugging + * @shrinker: a shrinker for the heap, if the heap caches system + * memory, it must define a shrinker to return it on low + * memory conditions, this includes system memory cached + * in the deferred free lists for heaps that support it + * @free_list: free list head if deferred free is used + * @free_list_size size of the deferred free list in bytes + * @lock: protects the free list + * @waitqueue: queue to wait on from deferred free thread + * @task: task struct of deferred free thread + * @debug_show: called when heap debug file is read to add any + * heap specific debug info to output + * + * Represents a pool of memory from which buffers can be made. In some + * systems the only heap is regular system memory allocated via vmalloc. + * On others, some blocks might require large physically contiguous buffers + * that are allocated from a specially reserved heap. + */ +struct ion_heap { + struct plist_node node; + struct ion_device *dev; + enum ion_heap_type type; + struct ion_heap_ops *ops; + unsigned long flags; + unsigned int id; + const char *name; + struct shrinker shrinker; + struct list_head free_list; + size_t free_list_size; + spinlock_t free_lock; + wait_queue_head_t waitqueue; + struct task_struct *task; + int (*debug_show)(struct ion_heap *heap, struct seq_file *, void *); +}; + +/** + * ion_buffer_cached - this ion buffer is cached + * @buffer: buffer + * + * indicates whether this ion buffer is cached + */ +bool ion_buffer_cached(struct ion_buffer *buffer); + +/** + * ion_buffer_fault_user_mappings - fault in user mappings of this buffer + * @buffer: buffer + * + * indicates whether userspace mappings of this buffer will be faulted + * in, this can affect how buffers are allocated from the heap. + */ +bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer); + +/** + * ion_device_create - allocates and returns an ion device + * @custom_ioctl: arch specific ioctl function if applicable + * + * returns a valid device or -PTR_ERR + */ +struct ion_device *ion_device_create(long (*custom_ioctl) + (struct ion_client *client, + unsigned int cmd, + unsigned long arg)); + +/** + * ion_device_destroy - free and device and it's resource + * @dev: the device + */ +void ion_device_destroy(struct ion_device *dev); + +/** + * ion_device_add_heap - adds a heap to the ion device + * @dev: the device + * @heap: the heap to add + */ +void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap); + +/** + * some helpers for common operations on buffers using the sg_table + * and vaddr fields + */ +void *ion_heap_map_kernel(struct ion_heap *, struct ion_buffer *); +void ion_heap_unmap_kernel(struct ion_heap *, struct ion_buffer *); +int ion_heap_map_user(struct ion_heap *, struct ion_buffer *, + struct vm_area_struct *); +int ion_heap_buffer_zero(struct ion_buffer *buffer); +int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot); + +/** + * ion_heap_init_deferred_free -- initialize deferred free functionality + * @heap: the heap + * + * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag this function will + * be called to setup deferred frees. Calls to free the buffer will + * return immediately and the actual free will occur some time later + */ +int ion_heap_init_deferred_free(struct ion_heap *heap); + +/** + * ion_heap_freelist_add - add a buffer to the deferred free list + * @heap: the heap + * @buffer: the buffer + * + * Adds an item to the deferred freelist. + */ +void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer); + +/** + * ion_heap_freelist_drain - drain the deferred free list + * @heap: the heap + * @size: ammount of memory to drain in bytes + * + * Drains the indicated amount of memory from the deferred freelist immediately. + * Returns the total amount freed. The total freed may be higher depending + * on the size of the items in the list, or lower if there is insufficient + * total memory on the freelist. + */ +size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size); + +/** + * ion_heap_freelist_size - returns the size of the freelist in bytes + * @heap: the heap + */ +size_t ion_heap_freelist_size(struct ion_heap *heap); + + +/** + * functions for creating and destroying the built in ion heaps. + * architectures can add their own custom architecture specific + * heaps as appropriate. + */ + +struct ion_heap *ion_heap_create(struct ion_platform_heap *); +void ion_heap_destroy(struct ion_heap *); +struct ion_heap *ion_system_heap_create(struct ion_platform_heap *); +void ion_system_heap_destroy(struct ion_heap *); + +struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *); +void ion_system_contig_heap_destroy(struct ion_heap *); + +struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *); +void ion_carveout_heap_destroy(struct ion_heap *); + +struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *); +void ion_chunk_heap_destroy(struct ion_heap *); +struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *); +void ion_cma_heap_destroy(struct ion_heap *); + +/** + * kernel api to allocate/free from carveout -- used when carveout is + * used to back an architecture specific custom heap + */ +ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, unsigned long size, + unsigned long align); +void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr, + unsigned long size); +/** + * The carveout heap returns physical addresses, since 0 may be a valid + * physical address, this is used to indicate allocation failed + */ +#define ION_CARVEOUT_ALLOCATE_FAIL -1 + +/** + * functions for creating and destroying a heap pool -- allows you + * to keep a pool of pre allocated memory to use from your heap. Keeping + * a pool of memory that is ready for dma, ie any cached mapping have been + * invalidated from the cache, provides a significant peformance benefit on + * many systems */ + +/** + * struct ion_page_pool - pagepool struct + * @high_count: number of highmem items in the pool + * @low_count: number of lowmem items in the pool + * @high_items: list of highmem items + * @low_items: list of lowmem items + * @shrinker: a shrinker for the items + * @mutex: lock protecting this struct and especially the count + * item list + * @alloc: function to be used to allocate pageory when the pool + * is empty + * @free: function to be used to free pageory back to the system + * when the shrinker fires + * @gfp_mask: gfp_mask to use from alloc + * @order: order of pages in the pool + * @list: plist node for list of pools + * + * Allows you to keep a pool of pre allocated pages to use from your heap. + * Keeping a pool of pages that is ready for dma, ie any cached mapping have + * been invalidated from the cache, provides a significant peformance benefit + * on many systems + */ +struct ion_page_pool { + int high_count; + int low_count; + struct list_head high_items; + struct list_head low_items; + struct mutex mutex; + gfp_t gfp_mask; + unsigned int order; + struct plist_node list; +}; + +struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order); +void ion_page_pool_destroy(struct ion_page_pool *); +void *ion_page_pool_alloc(struct ion_page_pool *); +void ion_page_pool_free(struct ion_page_pool *, struct page *); + +/** ion_page_pool_shrink - shrinks the size of the memory cached in the pool + * @pool: the pool + * @gfp_mask: the memory type to reclaim + * @nr_to_scan: number of items to shrink in pages + * + * returns the number of items freed in pages + */ +int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask, + int nr_to_scan); + +/** + * ion_pages_sync_for_device - cache flush pages for use with the specified + * device + * @dev: the device the pages will be used with + * @page: the first page to be flushed + * @size: size in bytes of region to be flushed + * @dir: direction of dma transfer + */ +void ion_pages_sync_for_device(struct device *dev, struct page *page, + size_t size, enum dma_data_direction dir); + +#endif /* _ION_PRIV_H */ diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c new file mode 100644 index 000000000000..9849f3963e75 --- /dev/null +++ b/drivers/staging/android/ion/ion_system_heap.c @@ -0,0 +1,492 @@ +/* + * drivers/staging/android/ion/ion_system_heap.c + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <asm/page.h> +#include <linux/dma-mapping.h> +#include <linux/err.h> +#include <linux/highmem.h> +#include <linux/mm.h> +#include <linux/scatterlist.h> +#include <linux/seq_file.h> +#include <linux/slab.h> +#include <linux/vmalloc.h> +#include "ion.h" +#include "ion_priv.h" + +static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN | + __GFP_NORETRY) & ~__GFP_WAIT; +static gfp_t low_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN); +static const unsigned int orders[] = {8, 4, 0}; +static const int num_orders = ARRAY_SIZE(orders); +static int order_to_index(unsigned int order) +{ + int i; + for (i = 0; i < num_orders; i++) + if (order == orders[i]) + return i; + BUG(); + return -1; +} + +static unsigned int order_to_size(int order) +{ + return PAGE_SIZE << order; +} + +struct ion_system_heap { + struct ion_heap heap; + struct ion_page_pool **pools; +}; + +struct page_info { + struct page *page; + unsigned int order; + struct list_head list; +}; + +static struct page *alloc_buffer_page(struct ion_system_heap *heap, + struct ion_buffer *buffer, + unsigned long order) +{ + bool cached = ion_buffer_cached(buffer); + struct ion_page_pool *pool = heap->pools[order_to_index(order)]; + struct page *page; + + if (!cached) { + page = ion_page_pool_alloc(pool); + } else { + gfp_t gfp_flags = low_order_gfp_flags; + + if (order > 4) + gfp_flags = high_order_gfp_flags; + page = alloc_pages(gfp_flags, order); + if (!page) + return NULL; + ion_pages_sync_for_device(NULL, page, PAGE_SIZE << order, + DMA_BIDIRECTIONAL); + } + if (!page) + return NULL; + + return page; +} + +static void free_buffer_page(struct ion_system_heap *heap, + struct ion_buffer *buffer, struct page *page, + unsigned int order) +{ + bool cached = ion_buffer_cached(buffer); + + if (!cached) { + struct ion_page_pool *pool = heap->pools[order_to_index(order)]; + ion_page_pool_free(pool, page); + } else { + __free_pages(page, order); + } +} + + +static struct page_info *alloc_largest_available(struct ion_system_heap *heap, + struct ion_buffer *buffer, + unsigned long size, + unsigned int max_order) +{ + struct page *page; + struct page_info *info; + int i; + + info = kmalloc(sizeof(struct page_info), GFP_KERNEL); + if (!info) + return NULL; + + for (i = 0; i < num_orders; i++) { + if (size < order_to_size(orders[i])) + continue; + if (max_order < orders[i]) + continue; + + page = alloc_buffer_page(heap, buffer, orders[i]); + if (!page) + continue; + + info->page = page; + info->order = orders[i]; + INIT_LIST_HEAD(&info->list); + return info; + } + kfree(info); + + return NULL; +} + +static int ion_system_heap_allocate(struct ion_heap *heap, + struct ion_buffer *buffer, + unsigned long size, unsigned long align, + unsigned long flags) +{ + struct ion_system_heap *sys_heap = container_of(heap, + struct ion_system_heap, + heap); + struct sg_table *table; + struct scatterlist *sg; + int ret; + struct list_head pages; + struct page_info *info, *tmp_info; + int i = 0; + unsigned long size_remaining = PAGE_ALIGN(size); + unsigned int max_order = orders[0]; + + if (align > PAGE_SIZE) + return -EINVAL; + + if (size / PAGE_SIZE > totalram_pages / 2) + return -ENOMEM; + + INIT_LIST_HEAD(&pages); + while (size_remaining > 0) { + info = alloc_largest_available(sys_heap, buffer, size_remaining, + max_order); + if (!info) + goto err; + list_add_tail(&info->list, &pages); + size_remaining -= (1 << info->order) * PAGE_SIZE; + max_order = info->order; + i++; + } + table = kzalloc(sizeof(struct sg_table), GFP_KERNEL); + if (!table) + goto err; + + ret = sg_alloc_table(table, i, GFP_KERNEL); + if (ret) + goto err1; + + sg = table->sgl; + list_for_each_entry_safe(info, tmp_info, &pages, list) { + struct page *page = info->page; + sg_set_page(sg, page, (1 << info->order) * PAGE_SIZE, 0); + sg = sg_next(sg); + list_del(&info->list); + kfree(info); + } + + buffer->priv_virt = table; + return 0; +err1: + kfree(table); +err: + list_for_each_entry_safe(info, tmp_info, &pages, list) { + free_buffer_page(sys_heap, buffer, info->page, info->order); + kfree(info); + } + return -ENOMEM; +} + +static void ion_system_heap_free(struct ion_buffer *buffer) +{ + struct ion_heap *heap = buffer->heap; + struct ion_system_heap *sys_heap = container_of(heap, + struct ion_system_heap, + heap); + struct sg_table *table = buffer->sg_table; + bool cached = ion_buffer_cached(buffer); + struct scatterlist *sg; + LIST_HEAD(pages); + int i; + + /* uncached pages come from the page pools, zero them before returning + for security purposes (other allocations are zerod at alloc time */ + if (!cached) + ion_heap_buffer_zero(buffer); + + for_each_sg(table->sgl, sg, table->nents, i) + free_buffer_page(sys_heap, buffer, sg_page(sg), + get_order(sg->length)); + sg_free_table(table); + kfree(table); +} + +static struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + return buffer->priv_virt; +} + +static void ion_system_heap_unmap_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + return; +} + +static struct ion_heap_ops system_heap_ops = { + .allocate = ion_system_heap_allocate, + .free = ion_system_heap_free, + .map_dma = ion_system_heap_map_dma, + .unmap_dma = ion_system_heap_unmap_dma, + .map_kernel = ion_heap_map_kernel, + .unmap_kernel = ion_heap_unmap_kernel, + .map_user = ion_heap_map_user, +}; + +static unsigned long ion_system_heap_shrink_count(struct shrinker *shrinker, + struct shrink_control *sc) +{ + struct ion_heap *heap = container_of(shrinker, struct ion_heap, + shrinker); + struct ion_system_heap *sys_heap = container_of(heap, + struct ion_system_heap, + heap); + int nr_total = 0; + int i; + + /* total number of items is whatever the page pools are holding + plus whatever's in the freelist */ + for (i = 0; i < num_orders; i++) { + struct ion_page_pool *pool = sys_heap->pools[i]; + nr_total += ion_page_pool_shrink(pool, sc->gfp_mask, 0); + } + nr_total += ion_heap_freelist_size(heap) / PAGE_SIZE; + return nr_total; + +} + +static unsigned long ion_system_heap_shrink_scan(struct shrinker *shrinker, + struct shrink_control *sc) +{ + + struct ion_heap *heap = container_of(shrinker, struct ion_heap, + shrinker); + struct ion_system_heap *sys_heap = container_of(heap, + struct ion_system_heap, + heap); + int nr_freed = 0; + int i; + + if (sc->nr_to_scan == 0) + goto end; + + /* shrink the free list first, no point in zeroing the memory if + we're just going to reclaim it */ + nr_freed += ion_heap_freelist_drain(heap, sc->nr_to_scan * PAGE_SIZE) / + PAGE_SIZE; + + if (nr_freed >= sc->nr_to_scan) + goto end; + + for (i = 0; i < num_orders; i++) { + struct ion_page_pool *pool = sys_heap->pools[i]; + + nr_freed += ion_page_pool_shrink(pool, sc->gfp_mask, + sc->nr_to_scan); + if (nr_freed >= sc->nr_to_scan) + break; + } + +end: + return nr_freed; + +} + +static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s, + void *unused) +{ + + struct ion_system_heap *sys_heap = container_of(heap, + struct ion_system_heap, + heap); + int i; + for (i = 0; i < num_orders; i++) { + struct ion_page_pool *pool = sys_heap->pools[i]; + seq_printf(s, "%d order %u highmem pages in pool = %lu total\n", + pool->high_count, pool->order, + (1 << pool->order) * PAGE_SIZE * pool->high_count); + seq_printf(s, "%d order %u lowmem pages in pool = %lu total\n", + pool->low_count, pool->order, + (1 << pool->order) * PAGE_SIZE * pool->low_count); + } + return 0; +} + +struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused) +{ + struct ion_system_heap *heap; + int i; + + heap = kzalloc(sizeof(struct ion_system_heap), GFP_KERNEL); + if (!heap) + return ERR_PTR(-ENOMEM); + heap->heap.ops = &system_heap_ops; + heap->heap.type = ION_HEAP_TYPE_SYSTEM; + heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE; + heap->pools = kzalloc(sizeof(struct ion_page_pool *) * num_orders, + GFP_KERNEL); + if (!heap->pools) + goto err_alloc_pools; + for (i = 0; i < num_orders; i++) { + struct ion_page_pool *pool; + gfp_t gfp_flags = low_order_gfp_flags; + + if (orders[i] > 4) + gfp_flags = high_order_gfp_flags; + pool = ion_page_pool_create(gfp_flags, orders[i]); + if (!pool) + goto err_create_pool; + heap->pools[i] = pool; + } + + heap->heap.shrinker.scan_objects = ion_system_heap_shrink_scan; + heap->heap.shrinker.count_objects = ion_system_heap_shrink_count; + heap->heap.shrinker.seeks = DEFAULT_SEEKS; + heap->heap.shrinker.batch = 0; + register_shrinker(&heap->heap.shrinker); + heap->heap.debug_show = ion_system_heap_debug_show; + return &heap->heap; +err_create_pool: + for (i = 0; i < num_orders; i++) + if (heap->pools[i]) + ion_page_pool_destroy(heap->pools[i]); + kfree(heap->pools); +err_alloc_pools: + kfree(heap); + return ERR_PTR(-ENOMEM); +} + +void ion_system_heap_destroy(struct ion_heap *heap) +{ + struct ion_system_heap *sys_heap = container_of(heap, + struct ion_system_heap, + heap); + int i; + + for (i = 0; i < num_orders; i++) + ion_page_pool_destroy(sys_heap->pools[i]); + kfree(sys_heap->pools); + kfree(sys_heap); +} + +static int ion_system_contig_heap_allocate(struct ion_heap *heap, + struct ion_buffer *buffer, + unsigned long len, + unsigned long align, + unsigned long flags) +{ + int order = get_order(len); + struct page *page; + struct sg_table *table; + unsigned long i; + int ret; + + if (align > (PAGE_SIZE << order)) + return -EINVAL; + + page = alloc_pages(low_order_gfp_flags, order); + if (!page) + return -ENOMEM; + + split_page(page, order); + + len = PAGE_ALIGN(len); + for (i = len >> PAGE_SHIFT; i < (1 << order); i++) + __free_page(page + i); + + table = kzalloc(sizeof(struct sg_table), GFP_KERNEL); + if (!table) { + ret = -ENOMEM; + goto out; + } + + ret = sg_alloc_table(table, 1, GFP_KERNEL); + if (ret) + goto out; + + sg_set_page(table->sgl, page, len, 0); + + buffer->priv_virt = table; + + ion_pages_sync_for_device(NULL, page, len, DMA_BIDIRECTIONAL); + + return 0; + +out: + for (i = 0; i < len >> PAGE_SHIFT; i++) + __free_page(page + i); + kfree(table); + return ret; +} + +static void ion_system_contig_heap_free(struct ion_buffer *buffer) +{ + struct sg_table *table = buffer->priv_virt; + struct page *page = sg_page(table->sgl); + unsigned long pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT; + unsigned long i; + + for (i = 0; i < pages; i++) + __free_page(page + i); + sg_free_table(table); + kfree(table); +} + +static int ion_system_contig_heap_phys(struct ion_heap *heap, + struct ion_buffer *buffer, + ion_phys_addr_t *addr, size_t *len) +{ + struct sg_table *table = buffer->priv_virt; + struct page *page = sg_page(table->sgl); + *addr = page_to_phys(page); + *len = buffer->size; + return 0; +} + +static struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + return buffer->priv_virt; +} + +static void ion_system_contig_heap_unmap_dma(struct ion_heap *heap, + struct ion_buffer *buffer) +{ +} + +static struct ion_heap_ops kmalloc_ops = { + .allocate = ion_system_contig_heap_allocate, + .free = ion_system_contig_heap_free, + .phys = ion_system_contig_heap_phys, + .map_dma = ion_system_contig_heap_map_dma, + .unmap_dma = ion_system_contig_heap_unmap_dma, + .map_kernel = ion_heap_map_kernel, + .unmap_kernel = ion_heap_unmap_kernel, + .map_user = ion_heap_map_user, +}; + +struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused) +{ + struct ion_heap *heap; + + heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL); + if (!heap) + return ERR_PTR(-ENOMEM); + heap->ops = &kmalloc_ops; + heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG; + return heap; +} + +void ion_system_contig_heap_destroy(struct ion_heap *heap) +{ + kfree(heap); +} + diff --git a/drivers/staging/android/ion/ion_test.c b/drivers/staging/android/ion/ion_test.c new file mode 100644 index 000000000000..654acb5c8eba --- /dev/null +++ b/drivers/staging/android/ion/ion_test.c @@ -0,0 +1,282 @@ +/* + * + * Copyright (C) 2013 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#define pr_fmt(fmt) "ion-test: " fmt + +#include <linux/dma-buf.h> +#include <linux/dma-direction.h> +#include <linux/fs.h> +#include <linux/miscdevice.h> +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/uaccess.h> +#include <linux/vmalloc.h> + +#include "ion.h" +#include "../uapi/ion_test.h" + +#define u64_to_uptr(x) ((void __user *)(unsigned long)(x)) + +struct ion_test_device { + struct miscdevice misc; +}; + +struct ion_test_data { + struct dma_buf *dma_buf; + struct device *dev; +}; + +static int ion_handle_test_dma(struct device *dev, struct dma_buf *dma_buf, + void __user *ptr, size_t offset, size_t size, bool write) +{ + int ret = 0; + struct dma_buf_attachment *attach; + struct sg_table *table; + pgprot_t pgprot = pgprot_writecombine(PAGE_KERNEL); + enum dma_data_direction dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE; + struct sg_page_iter sg_iter; + unsigned long offset_page; + + attach = dma_buf_attach(dma_buf, dev); + if (IS_ERR(attach)) + return PTR_ERR(attach); + + table = dma_buf_map_attachment(attach, dir); + if (IS_ERR(table)) + return PTR_ERR(table); + + offset_page = offset >> PAGE_SHIFT; + offset %= PAGE_SIZE; + + for_each_sg_page(table->sgl, &sg_iter, table->nents, offset_page) { + struct page *page = sg_page_iter_page(&sg_iter); + void *vaddr = vmap(&page, 1, VM_MAP, pgprot); + size_t to_copy = PAGE_SIZE - offset; + + to_copy = min(to_copy, size); + if (!vaddr) { + ret = -ENOMEM; + goto err; + } + + if (write) + ret = copy_from_user(vaddr + offset, ptr, to_copy); + else + ret = copy_to_user(ptr, vaddr + offset, to_copy); + + vunmap(vaddr); + if (ret) { + ret = -EFAULT; + goto err; + } + size -= to_copy; + if (!size) + break; + ptr += to_copy; + offset = 0; + } + +err: + dma_buf_unmap_attachment(attach, table, dir); + dma_buf_detach(dma_buf, attach); + return ret; +} + +static int ion_handle_test_kernel(struct dma_buf *dma_buf, void __user *ptr, + size_t offset, size_t size, bool write) +{ + int ret; + unsigned long page_offset = offset >> PAGE_SHIFT; + size_t copy_offset = offset % PAGE_SIZE; + size_t copy_size = size; + enum dma_data_direction dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE; + + if (offset > dma_buf->size || size > dma_buf->size - offset) + return -EINVAL; + + ret = dma_buf_begin_cpu_access(dma_buf, offset, size, dir); + if (ret) + return ret; + + while (copy_size > 0) { + size_t to_copy; + void *vaddr = dma_buf_kmap(dma_buf, page_offset); + + if (!vaddr) + goto err; + + to_copy = min_t(size_t, PAGE_SIZE - copy_offset, copy_size); + + if (write) + ret = copy_from_user(vaddr + copy_offset, ptr, to_copy); + else + ret = copy_to_user(ptr, vaddr + copy_offset, to_copy); + + dma_buf_kunmap(dma_buf, page_offset, vaddr); + if (ret) { + ret = -EFAULT; + goto err; + } + + copy_size -= to_copy; + ptr += to_copy; + page_offset++; + copy_offset = 0; + } +err: + dma_buf_end_cpu_access(dma_buf, offset, size, dir); + return ret; +} + +static long ion_test_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + struct ion_test_data *test_data = filp->private_data; + int ret = 0; + + union { + struct ion_test_rw_data test_rw; + } data; + + if (_IOC_SIZE(cmd) > sizeof(data)) + return -EINVAL; + + if (_IOC_DIR(cmd) & _IOC_WRITE) + if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd))) + return -EFAULT; + + switch (cmd) { + case ION_IOC_TEST_SET_FD: + { + struct dma_buf *dma_buf = NULL; + int fd = arg; + + if (fd >= 0) { + dma_buf = dma_buf_get((int)arg); + if (IS_ERR(dma_buf)) + return PTR_ERR(dma_buf); + } + if (test_data->dma_buf) + dma_buf_put(test_data->dma_buf); + test_data->dma_buf = dma_buf; + break; + } + case ION_IOC_TEST_DMA_MAPPING: + { + ret = ion_handle_test_dma(test_data->dev, test_data->dma_buf, + u64_to_uptr(data.test_rw.ptr), + data.test_rw.offset, data.test_rw.size, + data.test_rw.write); + break; + } + case ION_IOC_TEST_KERNEL_MAPPING: + { + ret = ion_handle_test_kernel(test_data->dma_buf, + u64_to_uptr(data.test_rw.ptr), + data.test_rw.offset, data.test_rw.size, + data.test_rw.write); + break; + } + default: + return -ENOTTY; + } + + if (_IOC_DIR(cmd) & _IOC_READ) { + if (copy_to_user((void __user *)arg, &data, sizeof(data))) + return -EFAULT; + } + return ret; +} + +static int ion_test_open(struct inode *inode, struct file *file) +{ + struct ion_test_data *data; + struct miscdevice *miscdev = file->private_data; + + data = kzalloc(sizeof(struct ion_test_data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->dev = miscdev->parent; + + file->private_data = data; + + return 0; +} + +static int ion_test_release(struct inode *inode, struct file *file) +{ + struct ion_test_data *data = file->private_data; + + kfree(data); + + return 0; +} + +static const struct file_operations ion_test_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = ion_test_ioctl, + .compat_ioctl = ion_test_ioctl, + .open = ion_test_open, + .release = ion_test_release, +}; + +static int __init ion_test_probe(struct platform_device *pdev) +{ + int ret; + struct ion_test_device *testdev; + + testdev = devm_kzalloc(&pdev->dev, sizeof(struct ion_test_device), + GFP_KERNEL); + if (!testdev) + return -ENOMEM; + + testdev->misc.minor = MISC_DYNAMIC_MINOR; + testdev->misc.name = "ion-test"; + testdev->misc.fops = &ion_test_fops; + testdev->misc.parent = &pdev->dev; + ret = misc_register(&testdev->misc); + if (ret) { + pr_err("failed to register misc device.\n"); + return ret; + } + + platform_set_drvdata(pdev, testdev); + + return 0; +} + +static struct platform_driver ion_test_platform_driver = { + .driver = { + .name = "ion-test", + }, +}; + +static int __init ion_test_init(void) +{ + platform_device_register_simple("ion-test", -1, NULL, 0); + return platform_driver_probe(&ion_test_platform_driver, ion_test_probe); +} + +static void __exit ion_test_exit(void) +{ + platform_driver_unregister(&ion_test_platform_driver); +} + +module_init(ion_test_init); +module_exit(ion_test_exit); diff --git a/drivers/staging/android/ion/tegra/Makefile b/drivers/staging/android/ion/tegra/Makefile new file mode 100644 index 000000000000..11cd003fb08f --- /dev/null +++ b/drivers/staging/android/ion/tegra/Makefile @@ -0,0 +1 @@ +obj-y += tegra_ion.o diff --git a/drivers/staging/android/ion/tegra/tegra_ion.c b/drivers/staging/android/ion/tegra/tegra_ion.c new file mode 100644 index 000000000000..3474c65f87fa --- /dev/null +++ b/drivers/staging/android/ion/tegra/tegra_ion.c @@ -0,0 +1,84 @@ +/* + * drivers/gpu/tegra/tegra_ion.c + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/err.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include "../ion.h" +#include "../ion_priv.h" + +static struct ion_device *idev; +static int num_heaps; +static struct ion_heap **heaps; + +static int tegra_ion_probe(struct platform_device *pdev) +{ + struct ion_platform_data *pdata = pdev->dev.platform_data; + int err; + int i; + + num_heaps = pdata->nr; + + heaps = kzalloc(sizeof(struct ion_heap *) * pdata->nr, GFP_KERNEL); + + idev = ion_device_create(NULL); + if (IS_ERR_OR_NULL(idev)) { + kfree(heaps); + return PTR_ERR(idev); + } + + /* create the heaps as specified in the board file */ + for (i = 0; i < num_heaps; i++) { + struct ion_platform_heap *heap_data = &pdata->heaps[i]; + + heaps[i] = ion_heap_create(heap_data); + if (IS_ERR_OR_NULL(heaps[i])) { + err = PTR_ERR(heaps[i]); + goto err; + } + ion_device_add_heap(idev, heaps[i]); + } + platform_set_drvdata(pdev, idev); + return 0; +err: + for (i = 0; i < num_heaps; i++) { + if (heaps[i]) + ion_heap_destroy(heaps[i]); + } + kfree(heaps); + return err; +} + +static int tegra_ion_remove(struct platform_device *pdev) +{ + struct ion_device *idev = platform_get_drvdata(pdev); + int i; + + ion_device_destroy(idev); + for (i = 0; i < num_heaps; i++) + ion_heap_destroy(heaps[i]); + kfree(heaps); + return 0; +} + +static struct platform_driver ion_driver = { + .probe = tegra_ion_probe, + .remove = tegra_ion_remove, + .driver = { .name = "ion-tegra" } +}; + +module_platform_driver(ion_driver); + diff --git a/drivers/staging/android/sw_sync.h b/drivers/staging/android/sw_sync.h index 585040be5f18..5aaf71d6974b 100644 --- a/drivers/staging/android/sw_sync.h +++ b/drivers/staging/android/sw_sync.h @@ -35,10 +35,27 @@ struct sw_sync_pt { u32 value; }; +#if IS_ENABLED(CONFIG_SW_SYNC) struct sw_sync_timeline *sw_sync_timeline_create(const char *name); void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc); struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj, u32 value); +#else +static inline struct sw_sync_timeline *sw_sync_timeline_create(const char *name) +{ + return NULL; +} + +static inline void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc) +{ +} + +static inline struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj, + u32 value) +{ + return NULL; +} +#endif /* IS_ENABLED(CONFIG_SW_SYNC) */ #endif /* __KERNEL __ */ diff --git a/drivers/staging/android/sync.c b/drivers/staging/android/sync.c index 38e5d3b5ed9b..3d05f662110b 100644 --- a/drivers/staging/android/sync.c +++ b/drivers/staging/android/sync.c @@ -79,27 +79,27 @@ static void sync_timeline_free(struct kref *kref) container_of(kref, struct sync_timeline, kref); unsigned long flags; - if (obj->ops->release_obj) - obj->ops->release_obj(obj); - spin_lock_irqsave(&sync_timeline_list_lock, flags); list_del(&obj->sync_timeline_list); spin_unlock_irqrestore(&sync_timeline_list_lock, flags); + if (obj->ops->release_obj) + obj->ops->release_obj(obj); + kfree(obj); } void sync_timeline_destroy(struct sync_timeline *obj) { obj->destroyed = true; + smp_wmb(); /* - * If this is not the last reference, signal any children - * that their parent is going away. + * signal any children that their parent is going away. */ + sync_timeline_signal(obj); - if (!kref_put(&obj->kref, sync_timeline_free)) - sync_timeline_signal(obj); + kref_put(&obj->kref, sync_timeline_free); } EXPORT_SYMBOL(sync_timeline_destroy); diff --git a/drivers/staging/android/sync.h b/drivers/staging/android/sync.h index 38ea986dc70f..62e2255b1c1e 100644 --- a/drivers/staging/android/sync.h +++ b/drivers/staging/android/sync.h @@ -28,7 +28,7 @@ struct sync_fence; /** * struct sync_timeline_ops - sync object implementation ops - * @driver_name: name of the implentation + * @driver_name: name of the implementation * @dup: duplicate a sync_pt * @has_signaled: returns: * 1 if pt has signaled @@ -37,12 +37,12 @@ struct sync_fence; * @compare: returns: * 1 if b will signal before a * 0 if a and b will signal at the same time - * -1 if a will signabl before b + * -1 if a will signal before b * @free_pt: called before sync_pt is freed * @release_obj: called before sync_timeline is freed * @print_obj: deprecated * @print_pt: deprecated - * @fill_driver_data: write implmentation specific driver data to data. + * @fill_driver_data: write implementation specific driver data to data. * should return an error if there is not enough room * as specified by size. This information is returned * to userspace by SYNC_IOC_FENCE_INFO. @@ -88,9 +88,9 @@ struct sync_timeline_ops { /** * struct sync_timeline - sync object * @kref: reference count on fence. - * @ops: ops that define the implementaiton of the sync_timeline + * @ops: ops that define the implementation of the sync_timeline * @name: name of the sync_timeline. Useful for debugging - * @destoryed: set when sync_timeline is destroyed + * @destroyed: set when sync_timeline is destroyed * @child_list_head: list of children sync_pts for this sync_timeline * @child_list_lock: lock protecting @child_list_head, destroyed, and * sync_pt.status @@ -119,12 +119,12 @@ struct sync_timeline { * @parent: sync_timeline to which this sync_pt belongs * @child_list: membership in sync_timeline.child_list_head * @active_list: membership in sync_timeline.active_list_head - * @signaled_list: membership in temorary signaled_list on stack + * @signaled_list: membership in temporary signaled_list on stack * @fence: sync_fence to which the sync_pt belongs * @pt_list: membership in sync_fence.pt_list_head * @status: 1: signaled, 0:active, <0: error * @timestamp: time which sync_pt status transitioned from active to - * singaled or error. + * signaled or error. */ struct sync_pt { struct sync_timeline *parent; @@ -145,9 +145,9 @@ struct sync_pt { /** * struct sync_fence - sync fence * @file: file representing this fence - * @kref: referenace count on fence. + * @kref: reference count on fence. * @name: name of sync_fence. Useful for debugging - * @pt_list_head: list of sync_pts in ths fence. immutable once fence + * @pt_list_head: list of sync_pts in the fence. immutable once fence * is created * @waiter_list_head: list of asynchronous waiters on this fence * @waiter_list_lock: lock protecting @waiter_list_head and @status @@ -201,23 +201,23 @@ static inline void sync_fence_waiter_init(struct sync_fence_waiter *waiter, /** * sync_timeline_create() - creates a sync object - * @ops: specifies the implemention ops for the object + * @ops: specifies the implementation ops for the object * @size: size to allocate for this obj * @name: sync_timeline name * - * Creates a new sync_timeline which will use the implemetation specified by - * @ops. @size bytes will be allocated allowing for implemntation specific - * data to be kept after the generic sync_timeline stuct. + * Creates a new sync_timeline which will use the implementation specified by + * @ops. @size bytes will be allocated allowing for implementation specific + * data to be kept after the generic sync_timeline struct. */ struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops, int size, const char *name); /** - * sync_timeline_destory() - destorys a sync object + * sync_timeline_destroy() - destroys a sync object * @obj: sync_timeline to destroy * - * A sync implemntation should call this when the @obj is going away - * (i.e. module unload.) @obj won't actually be freed until all its childern + * A sync implementation should call this when the @obj is going away + * (i.e. module unload.) @obj won't actually be freed until all its children * sync_pts are freed. */ void sync_timeline_destroy(struct sync_timeline *obj); @@ -226,7 +226,7 @@ void sync_timeline_destroy(struct sync_timeline *obj); * sync_timeline_signal() - signal a status change on a sync_timeline * @obj: sync_timeline to signal * - * A sync implemntation should call this any time one of it's sync_pts + * A sync implementation should call this any time one of it's sync_pts * has signaled or has an error condition. */ void sync_timeline_signal(struct sync_timeline *obj); @@ -236,8 +236,8 @@ void sync_timeline_signal(struct sync_timeline *obj); * @parent: sync_pt's parent sync_timeline * @size: size to allocate for this pt * - * Creates a new sync_pt as a chiled of @parent. @size bytes will be - * allocated allowing for implemntation specific data to be kept after + * Creates a new sync_pt as a child of @parent. @size bytes will be + * allocated allowing for implementation specific data to be kept after * the generic sync_timeline struct. */ struct sync_pt *sync_pt_create(struct sync_timeline *parent, int size); @@ -287,7 +287,7 @@ struct sync_fence *sync_fence_merge(const char *name, struct sync_fence *sync_fence_fdget(int fd); /** - * sync_fence_put() - puts a refernnce of a sync fence + * sync_fence_put() - puts a reference of a sync fence * @fence: fence to put * * Puts a reference on @fence. If this is the last reference, the fence and @@ -297,7 +297,7 @@ void sync_fence_put(struct sync_fence *fence); /** * sync_fence_install() - installs a fence into a file descriptor - * @fence: fence to instal + * @fence: fence to install * @fd: file descriptor in which to install the fence * * Installs @fence into @fd. @fd's should be acquired through get_unused_fd(). @@ -359,10 +359,10 @@ struct sync_merge_data { * struct sync_pt_info - detailed sync_pt information * @len: length of sync_pt_info including any driver_data * @obj_name: name of parent sync_timeline - * @driver_name: name of driver implmenting the parent + * @driver_name: name of driver implementing the parent * @status: status of the sync_pt 0:active 1:signaled <0:error * @timestamp_ns: timestamp of status change in nanoseconds - * @driver_data: any driver dependant data + * @driver_data: any driver dependent data */ struct sync_pt_info { __u32 len; @@ -377,7 +377,7 @@ struct sync_pt_info { /** * struct sync_fence_info_data - data returned from fence info ioctl * @len: ioctl caller writes the size of the buffer its passing in. - * ioctl returns length of sync_fence_data reutnred to userspace + * ioctl returns length of sync_fence_data returned to userspace * including pt_info. * @name: name of fence * @status: status of fence. 1: signaled 0:active <0:error @@ -418,7 +418,7 @@ struct sync_fence_info_data { * pt_info. * * pt_info is a buffer containing sync_pt_infos for every sync_pt in the fence. - * To itterate over the sync_pt_infos, use the sync_pt_info.len field. + * To iterate over the sync_pt_infos, use the sync_pt_info.len field. */ #define SYNC_IOC_FENCE_INFO _IOWR(SYNC_IOC_MAGIC, 2,\ struct sync_fence_info_data) diff --git a/drivers/staging/android/uapi/ion.h b/drivers/staging/android/uapi/ion.h new file mode 100644 index 000000000000..f09e7c154d69 --- /dev/null +++ b/drivers/staging/android/uapi/ion.h @@ -0,0 +1,196 @@ +/* + * drivers/staging/android/uapi/ion.h + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _UAPI_LINUX_ION_H +#define _UAPI_LINUX_ION_H + +#include <linux/ioctl.h> +#include <linux/types.h> + +typedef int ion_user_handle_t; + +/** + * enum ion_heap_types - list of all possible types of heaps + * @ION_HEAP_TYPE_SYSTEM: memory allocated via vmalloc + * @ION_HEAP_TYPE_SYSTEM_CONTIG: memory allocated via kmalloc + * @ION_HEAP_TYPE_CARVEOUT: memory allocated from a prereserved + * carveout heap, allocations are physically + * contiguous + * @ION_HEAP_TYPE_DMA: memory allocated via DMA API + * @ION_NUM_HEAPS: helper for iterating over heaps, a bit mask + * is used to identify the heaps, so only 32 + * total heap types are supported + */ +enum ion_heap_type { + ION_HEAP_TYPE_SYSTEM, + ION_HEAP_TYPE_SYSTEM_CONTIG, + ION_HEAP_TYPE_CARVEOUT, + ION_HEAP_TYPE_CHUNK, + ION_HEAP_TYPE_DMA, + ION_HEAP_TYPE_CUSTOM, /* must be last so device specific heaps always + are at the end of this enum */ + ION_NUM_HEAPS = 16, +}; + +#define ION_HEAP_SYSTEM_MASK (1 << ION_HEAP_TYPE_SYSTEM) +#define ION_HEAP_SYSTEM_CONTIG_MASK (1 << ION_HEAP_TYPE_SYSTEM_CONTIG) +#define ION_HEAP_CARVEOUT_MASK (1 << ION_HEAP_TYPE_CARVEOUT) +#define ION_HEAP_TYPE_DMA_MASK (1 << ION_HEAP_TYPE_DMA) + +#define ION_NUM_HEAP_IDS sizeof(unsigned int) * 8 + +/** + * allocation flags - the lower 16 bits are used by core ion, the upper 16 + * bits are reserved for use by the heaps themselves. + */ +#define ION_FLAG_CACHED 1 /* mappings of this buffer should be + cached, ion will do cache + maintenance when the buffer is + mapped for dma */ +#define ION_FLAG_CACHED_NEEDS_SYNC 2 /* mappings of this buffer will created + at mmap time, if this is set + caches must be managed manually */ + +/** + * DOC: Ion Userspace API + * + * create a client by opening /dev/ion + * most operations handled via following ioctls + * + */ + +/** + * struct ion_allocation_data - metadata passed from userspace for allocations + * @len: size of the allocation + * @align: required alignment of the allocation + * @heap_id_mask: mask of heap ids to allocate from + * @flags: flags passed to heap + * @handle: pointer that will be populated with a cookie to use to + * refer to this allocation + * + * Provided by userspace as an argument to the ioctl + */ +struct ion_allocation_data { + size_t len; + size_t align; + unsigned int heap_id_mask; + unsigned int flags; + ion_user_handle_t handle; +}; + +/** + * struct ion_fd_data - metadata passed to/from userspace for a handle/fd pair + * @handle: a handle + * @fd: a file descriptor representing that handle + * + * For ION_IOC_SHARE or ION_IOC_MAP userspace populates the handle field with + * the handle returned from ion alloc, and the kernel returns the file + * descriptor to share or map in the fd field. For ION_IOC_IMPORT, userspace + * provides the file descriptor and the kernel returns the handle. + */ +struct ion_fd_data { + ion_user_handle_t handle; + int fd; +}; + +/** + * struct ion_handle_data - a handle passed to/from the kernel + * @handle: a handle + */ +struct ion_handle_data { + ion_user_handle_t handle; +}; + +/** + * struct ion_custom_data - metadata passed to/from userspace for a custom ioctl + * @cmd: the custom ioctl function to call + * @arg: additional data to pass to the custom ioctl, typically a user + * pointer to a predefined structure + * + * This works just like the regular cmd and arg fields of an ioctl. + */ +struct ion_custom_data { + unsigned int cmd; + unsigned long arg; +}; + +#define ION_IOC_MAGIC 'I' + +/** + * DOC: ION_IOC_ALLOC - allocate memory + * + * Takes an ion_allocation_data struct and returns it with the handle field + * populated with the opaque handle for the allocation. + */ +#define ION_IOC_ALLOC _IOWR(ION_IOC_MAGIC, 0, \ + struct ion_allocation_data) + +/** + * DOC: ION_IOC_FREE - free memory + * + * Takes an ion_handle_data struct and frees the handle. + */ +#define ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, struct ion_handle_data) + +/** + * DOC: ION_IOC_MAP - get a file descriptor to mmap + * + * Takes an ion_fd_data struct with the handle field populated with a valid + * opaque handle. Returns the struct with the fd field set to a file + * descriptor open in the current address space. This file descriptor + * can then be used as an argument to mmap. + */ +#define ION_IOC_MAP _IOWR(ION_IOC_MAGIC, 2, struct ion_fd_data) + +/** + * DOC: ION_IOC_SHARE - creates a file descriptor to use to share an allocation + * + * Takes an ion_fd_data struct with the handle field populated with a valid + * opaque handle. Returns the struct with the fd field set to a file + * descriptor open in the current address space. This file descriptor + * can then be passed to another process. The corresponding opaque handle can + * be retrieved via ION_IOC_IMPORT. + */ +#define ION_IOC_SHARE _IOWR(ION_IOC_MAGIC, 4, struct ion_fd_data) + +/** + * DOC: ION_IOC_IMPORT - imports a shared file descriptor + * + * Takes an ion_fd_data struct with the fd field populated with a valid file + * descriptor obtained from ION_IOC_SHARE and returns the struct with the handle + * filed set to the corresponding opaque handle. + */ +#define ION_IOC_IMPORT _IOWR(ION_IOC_MAGIC, 5, struct ion_fd_data) + +/** + * DOC: ION_IOC_SYNC - syncs a shared file descriptors to memory + * + * Deprecated in favor of using the dma_buf api's correctly (syncing + * will happend automatically when the buffer is mapped to a device). + * If necessary should be used after touching a cached buffer from the cpu, + * this will make the buffer in memory coherent. + */ +#define ION_IOC_SYNC _IOWR(ION_IOC_MAGIC, 7, struct ion_fd_data) + +/** + * DOC: ION_IOC_CUSTOM - call architecture specific ion ioctl + * + * Takes the argument of the architecture specific ioctl to call and + * passes appropriate userdata for that ioctl + */ +#define ION_IOC_CUSTOM _IOWR(ION_IOC_MAGIC, 6, struct ion_custom_data) + +#endif /* _UAPI_LINUX_ION_H */ diff --git a/drivers/staging/android/uapi/ion_test.h b/drivers/staging/android/uapi/ion_test.h new file mode 100644 index 000000000000..ffef06f63133 --- /dev/null +++ b/drivers/staging/android/uapi/ion_test.h @@ -0,0 +1,70 @@ +/* + * drivers/staging/android/uapi/ion.h + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _UAPI_LINUX_ION_TEST_H +#define _UAPI_LINUX_ION_TEST_H + +#include <linux/ioctl.h> +#include <linux/types.h> + +/** + * struct ion_test_rw_data - metadata passed to the kernel to read handle + * @ptr: a pointer to an area at least as large as size + * @offset: offset into the ion buffer to start reading + * @size: size to read or write + * @write: 1 to write, 0 to read + */ +struct ion_test_rw_data { + __u64 ptr; + __u64 offset; + __u64 size; + int write; + int __padding; +}; + +#define ION_IOC_MAGIC 'I' + +/** + * DOC: ION_IOC_TEST_SET_DMA_BUF - attach a dma buf to the test driver + * + * Attaches a dma buf fd to the test driver. Passing a second fd or -1 will + * release the first fd. + */ +#define ION_IOC_TEST_SET_FD \ + _IO(ION_IOC_MAGIC, 0xf0) + +/** + * DOC: ION_IOC_TEST_DMA_MAPPING - read or write memory from a handle as DMA + * + * Reads or writes the memory from a handle using an uncached mapping. Can be + * used by unit tests to emulate a DMA engine as close as possible. Only + * expected to be used for debugging and testing, may not always be available. + */ +#define ION_IOC_TEST_DMA_MAPPING \ + _IOW(ION_IOC_MAGIC, 0xf1, struct ion_test_rw_data) + +/** + * DOC: ION_IOC_TEST_KERNEL_MAPPING - read or write memory from a handle + * + * Reads or writes the memory from a handle using a kernel mapping. Can be + * used by unit tests to test heap map_kernel functions. Only expected to be + * used for debugging and testing, may not always be available. + */ +#define ION_IOC_TEST_KERNEL_MAPPING \ + _IOW(ION_IOC_MAGIC, 0xf2, struct ion_test_rw_data) + + +#endif /* _UAPI_LINUX_ION_H */ |