---
 drivers/staging/media/sunxi/Kconfig           |    2 +
 drivers/staging/media/sunxi/Makefile          |    2 +
 drivers/staging/media/sunxi/cedar/Kconfig     |    3 +
 drivers/staging/media/sunxi/cedar/Makefile    |    3 +
 drivers/staging/media/sunxi/cedar/ion/Kconfig |    9 +
 .../staging/media/sunxi/cedar/ion/Makefile    |    8 +
 .../media/sunxi/cedar/ion/compat_ion.c        |  195 ++
 .../media/sunxi/cedar/ion/compat_ion.h        |   29 +
 .../staging/media/sunxi/cedar/ion/ion-ioctl.c |  190 ++
 drivers/staging/media/sunxi/cedar/ion/ion.c   | 1510 +++++++++++++++
 drivers/staging/media/sunxi/cedar/ion/ion.h   |  176 ++
 .../media/sunxi/cedar/ion/ion_carveout_heap.c |  168 ++
 .../media/sunxi/cedar/ion/ion_chunk_heap.c    |  181 ++
 .../media/sunxi/cedar/ion/ion_cma_heap.c      |  173 ++
 .../staging/media/sunxi/cedar/ion/ion_heap.c  |  384 ++++
 .../staging/media/sunxi/cedar/ion/ion_of.c    |  185 ++
 .../staging/media/sunxi/cedar/ion/ion_of.h    |   37 +
 .../media/sunxi/cedar/ion/ion_page_pool.c     |  181 ++
 .../staging/media/sunxi/cedar/ion/ion_priv.h  |  473 +++++
 .../media/sunxi/cedar/ion/ion_system_heap.c   |  460 +++++
 .../media/sunxi/cedar/ion/sunxi/Makefile      |    1 +
 .../media/sunxi/cedar/ion/sunxi/cache-v7.S    |  145 ++
 .../media/sunxi/cedar/ion/sunxi/cache.S       |    5 +
 .../media/sunxi/cedar/ion/sunxi/cache.h       |    6 +
 .../media/sunxi/cedar/ion/sunxi/sunxi_ion.c   |  189 ++
 .../media/sunxi/cedar/ion/sunxi/sunxi_ion.h   |   33 +
 .../sunxi/cedar/ion/sunxi/sunxi_ion_priv.h    |   30 +
 .../staging/media/sunxi/cedar/ion/uapi/ion.h  |  237 +++
 drivers/staging/media/sunxi/cedar/ve/Kconfig  |    8 +
 drivers/staging/media/sunxi/cedar/ve/Makefile |    1 +
 .../staging/media/sunxi/cedar/ve/cedar_ve.c   | 1637 +++++++++++++++++
 .../staging/media/sunxi/cedar/ve/cedar_ve.h   |   85 +
 .../media/sunxi/cedar/ve/cedar_ve_priv.h      |  183 ++
 .../media/sunxi/cedar/ve/ve_mem_list.h        |  120 ++
 34 files changed, 7049 insertions(+)
 create mode 100644 drivers/staging/media/sunxi/cedar/Kconfig
 create mode 100644 drivers/staging/media/sunxi/cedar/Makefile
 create mode 100644 drivers/staging/media/sunxi/cedar/ion/Kconfig
 create mode 100644 drivers/staging/media/sunxi/cedar/ion/Makefile
 create mode 100644 drivers/staging/media/sunxi/cedar/ion/compat_ion.c
 create mode 100644 drivers/staging/media/sunxi/cedar/ion/compat_ion.h
 create mode 100644 drivers/staging/media/sunxi/cedar/ion/ion-ioctl.c
 create mode 100644 drivers/staging/media/sunxi/cedar/ion/ion.c
 create mode 100644 drivers/staging/media/sunxi/cedar/ion/ion.h
 create mode 100644 drivers/staging/media/sunxi/cedar/ion/ion_carveout_heap.c
 create mode 100644 drivers/staging/media/sunxi/cedar/ion/ion_chunk_heap.c
 create mode 100644 drivers/staging/media/sunxi/cedar/ion/ion_cma_heap.c
 create mode 100644 drivers/staging/media/sunxi/cedar/ion/ion_heap.c
 create mode 100644 drivers/staging/media/sunxi/cedar/ion/ion_of.c
 create mode 100644 drivers/staging/media/sunxi/cedar/ion/ion_of.h
 create mode 100644 drivers/staging/media/sunxi/cedar/ion/ion_page_pool.c
 create mode 100644 drivers/staging/media/sunxi/cedar/ion/ion_priv.h
 create mode 100644 drivers/staging/media/sunxi/cedar/ion/ion_system_heap.c
 create mode 100644 drivers/staging/media/sunxi/cedar/ion/sunxi/Makefile
 create mode 100755 drivers/staging/media/sunxi/cedar/ion/sunxi/cache-v7.S
 create mode 100644 drivers/staging/media/sunxi/cedar/ion/sunxi/cache.S
 create mode 100644 drivers/staging/media/sunxi/cedar/ion/sunxi/cache.h
 create mode 100644 drivers/staging/media/sunxi/cedar/ion/sunxi/sunxi_ion.c
 create mode 100644 drivers/staging/media/sunxi/cedar/ion/sunxi/sunxi_ion.h
 create mode 100644 drivers/staging/media/sunxi/cedar/ion/sunxi/sunxi_ion_priv.h
 create mode 100644 drivers/staging/media/sunxi/cedar/ion/uapi/ion.h
 create mode 100644 drivers/staging/media/sunxi/cedar/ve/Kconfig
 create mode 100644 drivers/staging/media/sunxi/cedar/ve/Makefile
 create mode 100644 drivers/staging/media/sunxi/cedar/ve/cedar_ve.c
 create mode 100644 drivers/staging/media/sunxi/cedar/ve/cedar_ve.h
 create mode 100644 drivers/staging/media/sunxi/cedar/ve/cedar_ve_priv.h
 create mode 100644 drivers/staging/media/sunxi/cedar/ve/ve_mem_list.h

diff --git a/drivers/staging/media/sunxi/Kconfig b/drivers/staging/media/sunxi/Kconfig
index 4549a1357..a0cfdd100 100644
--- a/drivers/staging/media/sunxi/Kconfig
+++ b/drivers/staging/media/sunxi/Kconfig
@@ -12,5 +12,7 @@ config VIDEO_SUNXI
 if VIDEO_SUNXI
 
 source "drivers/staging/media/sunxi/cedrus/Kconfig"
+source "drivers/staging/media/sunxi/cedar/Kconfig"
 
 endif
+
diff --git a/drivers/staging/media/sunxi/Makefile b/drivers/staging/media/sunxi/Makefile
index b87140b0e..62a1eb748 100644
--- a/drivers/staging/media/sunxi/Makefile
+++ b/drivers/staging/media/sunxi/Makefile
@@ -1,2 +1,4 @@
 # SPDX-License-Identifier: GPL-2.0
 obj-$(CONFIG_VIDEO_SUNXI_CEDRUS)	+= cedrus/
+obj-y += cedar/
+
diff --git a/drivers/staging/media/sunxi/cedar/Kconfig b/drivers/staging/media/sunxi/cedar/Kconfig
new file mode 100644
index 000000000..2f9539ba9
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedar/Kconfig
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+source "drivers/staging/media/sunxi/cedar/ve/Kconfig"
+source "drivers/staging/media/sunxi/cedar/ion/Kconfig"
diff --git a/drivers/staging/media/sunxi/cedar/Makefile b/drivers/staging/media/sunxi/cedar/Makefile
new file mode 100644
index 000000000..8563fca8b
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedar/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_VIDEO_SUNXI_CEDAR_VE)	+= ve/
+obj-$(CONFIG_VIDEO_SUNXI_CEDAR_ION)	+= ion/
\ No newline at end of file
diff --git a/drivers/staging/media/sunxi/cedar/ion/Kconfig b/drivers/staging/media/sunxi/cedar/ion/Kconfig
new file mode 100644
index 000000000..0499f2233
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedar/ion/Kconfig
@@ -0,0 +1,9 @@
+config VIDEO_SUNXI_CEDAR_ION
+	tristate "Allwinner CedarX Ion Driver"
+	depends on ARCH_SUNXI && OF_ADDRESS && HAS_DMA && MMU
+	select GENERIC_ALLOCATOR
+	select DMA_SHARED_BUFFER
+	select CMA
+	select DMA_CMA
+	help
+	  Allwinner libcdc compatible Ion driver.
diff --git a/drivers/staging/media/sunxi/cedar/ion/Makefile b/drivers/staging/media/sunxi/cedar/ion/Makefile
new file mode 100644
index 000000000..5f398ee44
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedar/ion/Makefile
@@ -0,0 +1,8 @@
+obj-$(CONFIG_VIDEO_SUNXI_CEDAR_ION) +=	ion.o ion-ioctl.o ion_heap.o \
+			ion_page_pool.o ion_system_heap.o \
+			ion_carveout_heap.o ion_chunk_heap.o ion_cma_heap.o \
+			ion_of.o
+ifdef CONFIG_COMPAT
+obj-$(CONFIG_VIDEO_SUNXI_CEDAR_ION) += compat_ion.o
+endif
+obj-$(CONFIG_VIDEO_SUNXI_CEDAR_ION) += sunxi/
\ No newline at end of file
diff --git a/drivers/staging/media/sunxi/cedar/ion/compat_ion.c b/drivers/staging/media/sunxi/cedar/ion/compat_ion.c
new file mode 100644
index 000000000..9a978d217
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedar/ion/compat_ion.c
@@ -0,0 +1,195 @@
+/*
+ * drivers/staging/android/ion/compat_ion.c
+ *
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/compat.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+
+#include "ion.h"
+#include "compat_ion.h"
+
+/* See drivers/staging/android/uapi/ion.h for the definition of these structs */
+struct compat_ion_allocation_data {
+	compat_size_t len;
+	compat_size_t align;
+	compat_uint_t heap_id_mask;
+	compat_uint_t flags;
+	compat_int_t handle;
+};
+
+struct compat_ion_custom_data {
+	compat_uint_t cmd;
+	compat_ulong_t arg;
+};
+
+struct compat_ion_handle_data {
+	compat_int_t handle;
+};
+
+#define COMPAT_ION_IOC_ALLOC	_IOWR(ION_IOC_MAGIC, 0, \
+				      struct compat_ion_allocation_data)
+#define COMPAT_ION_IOC_FREE	_IOWR(ION_IOC_MAGIC, 1, \
+				      struct compat_ion_handle_data)
+#define COMPAT_ION_IOC_CUSTOM	_IOWR(ION_IOC_MAGIC, 6, \
+				      struct compat_ion_custom_data)
+
+static int compat_get_ion_allocation_data(
+			struct compat_ion_allocation_data __user *data32,
+			struct ion_allocation_data __user *data)
+{
+	compat_size_t s;
+	compat_uint_t u;
+	compat_int_t i;
+	int err;
+
+	err = get_user(s, &data32->len);
+	err |= put_user(s, &data->len);
+	err |= get_user(s, &data32->align);
+	err |= put_user(s, &data->align);
+	err |= get_user(u, &data32->heap_id_mask);
+	err |= put_user(u, &data->heap_id_mask);
+	err |= get_user(u, &data32->flags);
+	err |= put_user(u, &data->flags);
+	err |= get_user(i, &data32->handle);
+	err |= put_user(i, &data->handle);
+
+	return err;
+}
+
+static int compat_get_ion_handle_data(
+			struct compat_ion_handle_data __user *data32,
+			struct ion_handle_data __user *data)
+{
+	compat_int_t i;
+	int err;
+
+	err = get_user(i, &data32->handle);
+	err |= put_user(i, &data->handle);
+
+	return err;
+}
+
+static int compat_put_ion_allocation_data(
+			struct compat_ion_allocation_data __user *data32,
+			struct ion_allocation_data __user *data)
+{
+	compat_size_t s;
+	compat_uint_t u;
+	compat_int_t i;
+	int err;
+
+	err = get_user(s, &data->len);
+	err |= put_user(s, &data32->len);
+	err |= get_user(s, &data->align);
+	err |= put_user(s, &data32->align);
+	err |= get_user(u, &data->heap_id_mask);
+	err |= put_user(u, &data32->heap_id_mask);
+	err |= get_user(u, &data->flags);
+	err |= put_user(u, &data32->flags);
+	err |= get_user(i, &data->handle);
+	err |= put_user(i, &data32->handle);
+
+	return err;
+}
+
+static int compat_get_ion_custom_data(
+			struct compat_ion_custom_data __user *data32,
+			struct ion_custom_data __user *data)
+{
+	compat_uint_t cmd;
+	compat_ulong_t arg;
+	int err;
+
+	err = get_user(cmd, &data32->cmd);
+	err |= put_user(cmd, &data->cmd);
+	err |= get_user(arg, &data32->arg);
+	err |= put_user(arg, &data->arg);
+
+	return err;
+};
+
+long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+	long ret;
+
+	if (!filp->f_op->unlocked_ioctl)
+		return -ENOTTY;
+
+	switch (cmd) {
+	case COMPAT_ION_IOC_ALLOC:
+	{
+		struct compat_ion_allocation_data __user *data32;
+		struct ion_allocation_data __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (!data)
+			return -EFAULT;
+
+		err = compat_get_ion_allocation_data(data32, data);
+		if (err)
+			return err;
+		ret = filp->f_op->unlocked_ioctl(filp, ION_IOC_ALLOC,
+							(unsigned long)data);
+		err = compat_put_ion_allocation_data(data32, data);
+		return ret ? ret : err;
+	}
+	case COMPAT_ION_IOC_FREE:
+	{
+		struct compat_ion_handle_data __user *data32;
+		struct ion_handle_data __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (!data)
+			return -EFAULT;
+
+		err = compat_get_ion_handle_data(data32, data);
+		if (err)
+			return err;
+
+		return filp->f_op->unlocked_ioctl(filp, ION_IOC_FREE,
+							(unsigned long)data);
+	}
+	case COMPAT_ION_IOC_CUSTOM: {
+		struct compat_ion_custom_data __user *data32;
+		struct ion_custom_data __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (!data)
+			return -EFAULT;
+
+		err = compat_get_ion_custom_data(data32, data);
+		if (err)
+			return err;
+
+		return filp->f_op->unlocked_ioctl(filp, ION_IOC_CUSTOM,
+							(unsigned long)data);
+	}
+	case ION_IOC_SHARE:
+	case ION_IOC_MAP:
+	case ION_IOC_IMPORT:
+	case ION_IOC_SYNC:
+		return filp->f_op->unlocked_ioctl(filp, cmd,
+						(unsigned long)compat_ptr(arg));
+	default:
+		return -ENOIOCTLCMD;
+	}
+}
diff --git a/drivers/staging/media/sunxi/cedar/ion/compat_ion.h b/drivers/staging/media/sunxi/cedar/ion/compat_ion.h
new file mode 100644
index 000000000..9da8f9176
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedar/ion/compat_ion.h
@@ -0,0 +1,29 @@
+/*
+ * drivers/staging/android/ion/compat_ion.h
+ *
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_COMPAT_ION_H
+#define _LINUX_COMPAT_ION_H
+
+#if IS_ENABLED(CONFIG_COMPAT)
+
+long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
+
+#else
+
+#define compat_ion_ioctl  NULL
+
+#endif /* CONFIG_COMPAT */
+#endif /* _LINUX_COMPAT_ION_H */
diff --git a/drivers/staging/media/sunxi/cedar/ion/ion-ioctl.c b/drivers/staging/media/sunxi/cedar/ion/ion-ioctl.c
new file mode 100644
index 000000000..1d832e07b
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedar/ion/ion-ioctl.c
@@ -0,0 +1,190 @@
+/*
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+
+#include "ion.h"
+#include "ion_priv.h"
+#include "compat_ion.h"
+
+union ion_ioctl_arg {
+	struct ion_fd_data fd;
+	struct ion_allocation_data allocation;
+	struct ion_handle_data handle;
+	struct ion_custom_data custom;
+	struct ion_heap_query query;
+};
+
+static int validate_ioctl_arg(unsigned int cmd, union ion_ioctl_arg *arg)
+{
+	int ret = 0;
+
+	switch (cmd) {
+	case ION_IOC_HEAP_QUERY:
+		ret = arg->query.reserved0 != 0;
+		ret |= arg->query.reserved1 != 0;
+		ret |= arg->query.reserved2 != 0;
+		break;
+	default:
+		break;
+	}
+
+	return ret ? -EINVAL : 0;
+}
+
+/* fix up the cases where the ioctl direction bits are incorrect */
+static unsigned int ion_ioctl_dir(unsigned int cmd)
+{
+	switch (cmd) {
+	case ION_IOC_SYNC:
+	case ION_IOC_FREE:
+	case ION_IOC_CUSTOM:
+		return _IOC_WRITE;
+	default:
+		return _IOC_DIR(cmd);
+	}
+}
+
+long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+	struct ion_client *client = filp->private_data;
+	struct ion_device *dev = client->dev;
+	struct ion_handle *cleanup_handle = NULL;
+	int ret = 0;
+	unsigned int dir;
+	union ion_ioctl_arg data;
+
+	dir = ion_ioctl_dir(cmd);
+
+	if (_IOC_SIZE(cmd) > sizeof(data))
+		return -EINVAL;
+
+	/*
+	 * The copy_from_user is unconditional here for both read and write
+	 * to do the validate. If there is no write for the ioctl, the
+	 * buffer is cleared
+	 */
+	if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
+		return -EFAULT;
+
+	ret = validate_ioctl_arg(cmd, &data);
+	if (ret) {
+		pr_warn_once("%s: ioctl validate failed\n", __func__);
+		return ret;
+	}
+
+	if (!(dir & _IOC_WRITE))
+		memset(&data, 0, sizeof(data));
+
+	switch (cmd) {
+	case ION_IOC_ALLOC:
+	{
+		struct ion_handle *handle;
+
+		handle = ion_alloc(client, data.allocation.len,
+						data.allocation.align,
+						data.allocation.heap_id_mask,
+						data.allocation.flags);
+		if (IS_ERR(handle))
+			return PTR_ERR(handle);
+
+		data.allocation.handle = handle->id;
+
+		cleanup_handle = handle;
+		break;
+	}
+	case ION_IOC_FREE:
+	{
+		struct ion_handle *handle;
+
+		mutex_lock(&client->lock);
+		handle = ion_handle_get_by_id_nolock(client, data.handle.handle);
+		if (IS_ERR(handle)) {
+			mutex_unlock(&client->lock);
+			return PTR_ERR(handle);
+		}
+		ion_free_nolock(client, handle);
+		ion_handle_put_nolock(handle);
+		mutex_unlock(&client->lock);
+		break;
+	}
+	case ION_IOC_SHARE:
+	case ION_IOC_MAP:
+	{
+		struct ion_handle *handle;
+
+		mutex_lock(&client->lock);
+		handle = ion_handle_get_by_id_nolock(client, data.handle.handle);
+		if (IS_ERR(handle)) {
+			mutex_unlock(&client->lock);
+			return PTR_ERR(handle);
+		}
+		data.fd.fd = ion_share_dma_buf_fd_nolock(client, handle);
+		ion_handle_put_nolock(handle);
+		mutex_unlock(&client->lock);
+		if (data.fd.fd < 0)
+			ret = data.fd.fd;
+		break;
+	}
+	case ION_IOC_IMPORT:
+	{
+		struct ion_handle *handle;
+
+		handle = ion_import_dma_buf_fd(client, data.fd.fd);
+		if (IS_ERR(handle))
+			ret = PTR_ERR(handle);
+		else
+			data.handle.handle = handle->id;
+		break;
+	}
+	case ION_IOC_SYNC:
+	{
+		ret = ion_sync_for_device(client, data.fd.fd);
+		break;
+	}
+	case ION_IOC_CUSTOM:
+	{
+		if (!dev->custom_ioctl)
+			return -ENOTTY;
+		ret = dev->custom_ioctl(client, data.custom.cmd,
+						data.custom.arg);
+		break;
+	}
+	case ION_IOC_HEAP_QUERY:
+		ret = ion_query_heaps(client, &data.query);
+		break;
+
+	case 5: // Stupid Allwinner libcdc
+		if (!dev->custom_ioctl)
+			return -ENOTTY;
+		ret = dev->custom_ioctl(client, cmd, arg);
+		break;
+
+	default:
+		return -ENOTTY;
+	}
+
+	if (dir & _IOC_READ) {
+		if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
+			if (cleanup_handle)
+				ion_free(client, cleanup_handle);
+			return -EFAULT;
+		}
+	}
+	return ret;
+}
diff --git a/drivers/staging/media/sunxi/cedar/ion/ion.c b/drivers/staging/media/sunxi/cedar/ion/ion.c
new file mode 100644
index 000000000..faa2e0c9e
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedar/ion/ion.c
@@ -0,0 +1,1510 @@
+/*
+ *
+ * drivers/staging/android/ion/ion.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/atomic.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/file.h>
+#include <linux/freezer.h>
+#include <linux/fs.h>
+#include <linux/anon_inodes.h>
+#include <linux/kthread.h>
+#include <linux/list.h>
+#include <linux/memblock.h>
+#include <linux/miscdevice.h>
+#include <linux/export.h>
+#include <linux/mm.h>
+#include <linux/mm_types.h>
+#include <linux/rbtree.h>
+#include <linux/slab.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+#include <linux/debugfs.h>
+#include <linux/dma-buf.h>
+#include <linux/idr.h>
+#include <linux/sched/task.h>
+
+#include "ion.h"
+#include "ion_priv.h"
+#include "compat_ion.h"
+
+bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
+{
+	return (buffer->flags & ION_FLAG_CACHED) &&
+		!(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
+}
+
+bool ion_buffer_cached(struct ion_buffer *buffer)
+{
+	return !!(buffer->flags & ION_FLAG_CACHED);
+}
+
+static inline struct page *ion_buffer_page(struct page *page)
+{
+	return (struct page *)((unsigned long)page & ~(1UL));
+}
+
+static inline bool ion_buffer_page_is_dirty(struct page *page)
+{
+	return !!((unsigned long)page & 1UL);
+}
+
+static inline void ion_buffer_page_dirty(struct page **page)
+{
+	*page = (struct page *)((unsigned long)(*page) | 1UL);
+}
+
+static inline void ion_buffer_page_clean(struct page **page)
+{
+	*page = (struct page *)((unsigned long)(*page) & ~(1UL));
+}
+
+/* this function should only be called while dev->lock is held */
+static void ion_buffer_add(struct ion_device *dev,
+			   struct ion_buffer *buffer)
+{
+	struct rb_node **p = &dev->buffers.rb_node;
+	struct rb_node *parent = NULL;
+	struct ion_buffer *entry;
+
+	while (*p) {
+		parent = *p;
+		entry = rb_entry(parent, struct ion_buffer, node);
+
+		if (buffer < entry) {
+			p = &(*p)->rb_left;
+		} else if (buffer > entry) {
+			p = &(*p)->rb_right;
+		} else {
+			pr_err("%s: buffer already found.", __func__);
+			BUG();
+		}
+	}
+
+	rb_link_node(&buffer->node, parent, p);
+	rb_insert_color(&buffer->node, &dev->buffers);
+}
+
+/* this function should only be called while dev->lock is held */
+static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
+					    struct ion_device *dev,
+					    unsigned long len,
+					    unsigned long align,
+					    unsigned long flags)
+{
+	struct ion_buffer *buffer;
+	struct sg_table *table;
+	struct scatterlist *sg;
+	int i, ret;
+
+	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+	if (!buffer)
+		return ERR_PTR(-ENOMEM);
+
+	buffer->heap = heap;
+	buffer->flags = flags;
+	kref_init(&buffer->ref);
+
+	ret = heap->ops->allocate(heap, buffer, len, align, flags);
+
+	if (ret) {
+		if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
+			goto err2;
+
+		ion_heap_freelist_drain(heap, 0);
+		ret = heap->ops->allocate(heap, buffer, len, align,
+					  flags);
+		if (ret)
+			goto err2;
+	}
+
+	if (buffer->sg_table == NULL) {
+		WARN_ONCE(1, "This heap needs to set the sgtable");
+		ret = -EINVAL;
+		goto err1;
+	}
+
+	table = buffer->sg_table;
+	buffer->dev = dev;
+	buffer->size = len;
+
+	if (ion_buffer_fault_user_mappings(buffer)) {
+		int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
+		struct scatterlist *sg;
+		int i, j, k = 0;
+
+		buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
+		if (!buffer->pages) {
+			ret = -ENOMEM;
+			goto err1;
+		}
+
+		for_each_sg(table->sgl, sg, table->nents, i) {
+			struct page *page = sg_page(sg);
+
+			for (j = 0; j < sg->length / PAGE_SIZE; j++)
+				buffer->pages[k++] = page++;
+		}
+	}
+
+	buffer->dev = dev;
+	buffer->size = len;
+	INIT_LIST_HEAD(&buffer->vmas);
+	mutex_init(&buffer->lock);
+	/*
+	 * this will set up dma addresses for the sglist -- it is not
+	 * technically correct as per the dma api -- a specific
+	 * device isn't really taking ownership here.  However, in practice on
+	 * our systems the only dma_address space is physical addresses.
+	 * Additionally, we can't afford the overhead of invalidating every
+	 * allocation via dma_map_sg. The implicit contract here is that
+	 * memory coming from the heaps is ready for dma, ie if it has a
+	 * cached mapping that mapping has been invalidated
+	 */
+	for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
+		sg_dma_address(sg) = sg_phys(sg);
+		sg_dma_len(sg) = sg->length;
+	}
+	mutex_lock(&dev->buffer_lock);
+	ion_buffer_add(dev, buffer);
+	mutex_unlock(&dev->buffer_lock);
+	return buffer;
+
+err1:
+	heap->ops->free(buffer);
+err2:
+	kfree(buffer);
+	return ERR_PTR(ret);
+}
+
+void ion_buffer_destroy(struct ion_buffer *buffer)
+{
+	if (buffer->kmap_cnt > 0) {
+		pr_warn_once("%s: buffer still mapped in the kernel\n",
+			     __func__);
+		buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
+	}
+	buffer->heap->ops->free(buffer);
+	vfree(buffer->pages);
+	kfree(buffer);
+}
+
+static void _ion_buffer_destroy(struct kref *kref)
+{
+	struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
+	struct ion_heap *heap = buffer->heap;
+	struct ion_device *dev = buffer->dev;
+
+	mutex_lock(&dev->buffer_lock);
+	rb_erase(&buffer->node, &dev->buffers);
+	mutex_unlock(&dev->buffer_lock);
+
+	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
+		ion_heap_freelist_add(heap, buffer);
+	else
+		ion_buffer_destroy(buffer);
+}
+
+static void ion_buffer_get(struct ion_buffer *buffer)
+{
+	kref_get(&buffer->ref);
+}
+
+static int ion_buffer_put(struct ion_buffer *buffer)
+{
+	return kref_put(&buffer->ref, _ion_buffer_destroy);
+}
+
+static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
+{
+	mutex_lock(&buffer->lock);
+	buffer->handle_count++;
+	mutex_unlock(&buffer->lock);
+}
+
+static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
+{
+	/*
+	 * when a buffer is removed from a handle, if it is not in
+	 * any other handles, copy the taskcomm and the pid of the
+	 * process it's being removed from into the buffer.  At this
+	 * point there will be no way to track what processes this buffer is
+	 * being used by, it only exists as a dma_buf file descriptor.
+	 * The taskcomm and pid can provide a debug hint as to where this fd
+	 * is in the system
+	 */
+	mutex_lock(&buffer->lock);
+	buffer->handle_count--;
+	BUG_ON(buffer->handle_count < 0);
+	if (!buffer->handle_count) {
+		struct task_struct *task;
+
+		task = current->group_leader;
+		get_task_comm(buffer->task_comm, task);
+		buffer->pid = task_pid_nr(task);
+	}
+	mutex_unlock(&buffer->lock);
+}
+
+static struct ion_handle *ion_handle_create(struct ion_client *client,
+					    struct ion_buffer *buffer)
+{
+	struct ion_handle *handle;
+
+	handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+	if (!handle)
+		return ERR_PTR(-ENOMEM);
+	kref_init(&handle->ref);
+	RB_CLEAR_NODE(&handle->node);
+	handle->client = client;
+	ion_buffer_get(buffer);
+	ion_buffer_add_to_handle(buffer);
+	handle->buffer = buffer;
+
+	return handle;
+}
+
+static void ion_handle_kmap_put(struct ion_handle *);
+
+static void ion_handle_destroy(struct kref *kref)
+{
+	struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
+	struct ion_client *client = handle->client;
+	struct ion_buffer *buffer = handle->buffer;
+
+	mutex_lock(&buffer->lock);
+	while (handle->kmap_cnt)
+		ion_handle_kmap_put(handle);
+	mutex_unlock(&buffer->lock);
+
+	idr_remove(&client->idr, handle->id);
+	if (!RB_EMPTY_NODE(&handle->node))
+		rb_erase(&handle->node, &client->handles);
+
+	ion_buffer_remove_from_handle(buffer);
+	ion_buffer_put(buffer);
+
+	kfree(handle);
+}
+
+static void ion_handle_get(struct ion_handle *handle)
+{
+	kref_get(&handle->ref);
+}
+
+/* Must hold the client lock */
+static struct ion_handle *ion_handle_get_check_overflow(
+					struct ion_handle *handle)
+{
+	if (atomic_read(&handle->ref.refcount.refs) + 1 == 0)
+		return ERR_PTR(-EOVERFLOW);
+	ion_handle_get(handle);
+	return handle;
+}
+
+int ion_handle_put_nolock(struct ion_handle *handle)
+{
+	return kref_put(&handle->ref, ion_handle_destroy);
+}
+
+int ion_handle_put(struct ion_handle *handle)
+{
+	struct ion_client *client = handle->client;
+	int ret;
+
+	mutex_lock(&client->lock);
+	ret = ion_handle_put_nolock(handle);
+	mutex_unlock(&client->lock);
+
+	return ret;
+}
+
+static struct ion_handle *ion_handle_lookup(struct ion_client *client,
+					    struct ion_buffer *buffer)
+{
+	struct rb_node *n = client->handles.rb_node;
+
+	while (n) {
+		struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
+
+		if (buffer < entry->buffer)
+			n = n->rb_left;
+		else if (buffer > entry->buffer)
+			n = n->rb_right;
+		else
+			return entry;
+	}
+	return ERR_PTR(-EINVAL);
+}
+
+struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
+					       int id)
+{
+	struct ion_handle *handle;
+
+	handle = idr_find(&client->idr, id);
+	if (handle)
+		return ion_handle_get_check_overflow(handle);
+
+	return ERR_PTR(-EINVAL);
+}
+
+static bool ion_handle_validate(struct ion_client *client,
+				struct ion_handle *handle)
+{
+	WARN_ON(!mutex_is_locked(&client->lock));
+	return idr_find(&client->idr, handle->id) == handle;
+}
+
+static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
+{
+	int id;
+	struct rb_node **p = &client->handles.rb_node;
+	struct rb_node *parent = NULL;
+	struct ion_handle *entry;
+
+	id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
+	if (id < 0)
+		return id;
+
+	handle->id = id;
+
+	while (*p) {
+		parent = *p;
+		entry = rb_entry(parent, struct ion_handle, node);
+
+		if (handle->buffer < entry->buffer)
+			p = &(*p)->rb_left;
+		else if (handle->buffer > entry->buffer)
+			p = &(*p)->rb_right;
+		else
+			WARN(1, "%s: buffer already found.", __func__);
+	}
+
+	rb_link_node(&handle->node, parent, p);
+	rb_insert_color(&handle->node, &client->handles);
+
+	return 0;
+}
+
+struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
+			     size_t align, unsigned int heap_id_mask,
+			     unsigned int flags)
+{
+	struct ion_handle *handle;
+	struct ion_device *dev = client->dev;
+	struct ion_buffer *buffer = NULL;
+	struct ion_heap *heap;
+	int ret;
+
+	pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
+		 len, align, heap_id_mask, flags);
+	/*
+	 * traverse the list of heaps available in this system in priority
+	 * order.  If the heap type is supported by the client, and matches the
+	 * request of the caller allocate from it.  Repeat until allocate has
+	 * succeeded or all heaps have been tried
+	 */
+	len = PAGE_ALIGN(len);
+
+	if (!len)
+		return ERR_PTR(-EINVAL);
+
+	down_read(&dev->lock);
+	plist_for_each_entry(heap, &dev->heaps, node) {
+		/* if the caller didn't specify this heap id */
+		if (!((1 << heap->id) & heap_id_mask))
+			continue;
+		buffer = ion_buffer_create(heap, dev, len, align, flags);
+		if (!IS_ERR(buffer))
+			break;
+	}
+	up_read(&dev->lock);
+
+	if (buffer == NULL)
+		return ERR_PTR(-ENODEV);
+
+	if (IS_ERR(buffer))
+		return ERR_CAST(buffer);
+
+	handle = ion_handle_create(client, buffer);
+
+	/*
+	 * ion_buffer_create will create a buffer with a ref_cnt of 1,
+	 * and ion_handle_create will take a second reference, drop one here
+	 */
+	ion_buffer_put(buffer);
+
+	if (IS_ERR(handle))
+		return handle;
+
+	mutex_lock(&client->lock);
+	ret = ion_handle_add(client, handle);
+	mutex_unlock(&client->lock);
+	if (ret) {
+		ion_handle_put(handle);
+		handle = ERR_PTR(ret);
+	}
+
+	return handle;
+}
+EXPORT_SYMBOL(ion_alloc);
+
+void ion_free_nolock(struct ion_client *client,
+		     struct ion_handle *handle)
+{
+	if (!ion_handle_validate(client, handle)) {
+		WARN(1, "%s: invalid handle passed to free.\n", __func__);
+		return;
+	}
+	ion_handle_put_nolock(handle);
+}
+
+void ion_free(struct ion_client *client, struct ion_handle *handle)
+{
+	BUG_ON(client != handle->client);
+
+	mutex_lock(&client->lock);
+	ion_free_nolock(client, handle);
+	mutex_unlock(&client->lock);
+}
+EXPORT_SYMBOL(ion_free);
+
+static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
+{
+	void *vaddr;
+
+	if (buffer->kmap_cnt) {
+		buffer->kmap_cnt++;
+		return buffer->vaddr;
+	}
+	vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
+	if (WARN_ONCE(vaddr == NULL,
+		      "heap->ops->map_kernel should return ERR_PTR on error"))
+		return ERR_PTR(-EINVAL);
+	if (IS_ERR(vaddr))
+		return vaddr;
+	buffer->vaddr = vaddr;
+	buffer->kmap_cnt++;
+	return vaddr;
+}
+
+static void *ion_handle_kmap_get(struct ion_handle *handle)
+{
+	struct ion_buffer *buffer = handle->buffer;
+	void *vaddr;
+
+	if (handle->kmap_cnt) {
+		handle->kmap_cnt++;
+		return buffer->vaddr;
+	}
+	vaddr = ion_buffer_kmap_get(buffer);
+	if (IS_ERR(vaddr))
+		return vaddr;
+	handle->kmap_cnt++;
+	return vaddr;
+}
+
+static void ion_buffer_kmap_put(struct ion_buffer *buffer)
+{
+	buffer->kmap_cnt--;
+	if (!buffer->kmap_cnt) {
+		buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
+		buffer->vaddr = NULL;
+	}
+}
+
+static void ion_handle_kmap_put(struct ion_handle *handle)
+{
+	struct ion_buffer *buffer = handle->buffer;
+
+	if (!handle->kmap_cnt) {
+		WARN(1, "%s: Double unmap detected! bailing...\n", __func__);
+		return;
+	}
+	handle->kmap_cnt--;
+	if (!handle->kmap_cnt)
+		ion_buffer_kmap_put(buffer);
+}
+
+void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
+{
+	struct ion_buffer *buffer;
+	void *vaddr;
+
+	mutex_lock(&client->lock);
+	if (!ion_handle_validate(client, handle)) {
+		pr_err("%s: invalid handle passed to map_kernel.\n",
+		       __func__);
+		mutex_unlock(&client->lock);
+		return ERR_PTR(-EINVAL);
+	}
+
+	buffer = handle->buffer;
+
+	if (!handle->buffer->heap->ops->map_kernel) {
+		pr_err("%s: map_kernel is not implemented by this heap.\n",
+		       __func__);
+		mutex_unlock(&client->lock);
+		return ERR_PTR(-ENODEV);
+	}
+
+	mutex_lock(&buffer->lock);
+	vaddr = ion_handle_kmap_get(handle);
+	mutex_unlock(&buffer->lock);
+	mutex_unlock(&client->lock);
+	return vaddr;
+}
+EXPORT_SYMBOL(ion_map_kernel);
+
+void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
+{
+	struct ion_buffer *buffer;
+
+	mutex_lock(&client->lock);
+	buffer = handle->buffer;
+	mutex_lock(&buffer->lock);
+	ion_handle_kmap_put(handle);
+	mutex_unlock(&buffer->lock);
+	mutex_unlock(&client->lock);
+}
+EXPORT_SYMBOL(ion_unmap_kernel);
+
+static struct mutex debugfs_mutex;
+static struct rb_root *ion_root_client;
+static int is_client_alive(struct ion_client *client)
+{
+	struct rb_node *node;
+	struct ion_client *tmp;
+	struct ion_device *dev;
+
+	node = ion_root_client->rb_node;
+	dev = container_of(ion_root_client, struct ion_device, clients);
+
+	down_read(&dev->lock);
+	while (node) {
+		tmp = rb_entry(node, struct ion_client, node);
+		if (client < tmp) {
+			node = node->rb_left;
+		} else if (client > tmp) {
+			node = node->rb_right;
+		} else {
+			up_read(&dev->lock);
+			return 1;
+		}
+	}
+
+	up_read(&dev->lock);
+	return 0;
+}
+
+static int ion_debug_client_show(struct seq_file *s, void *unused)
+{
+	struct ion_client *client = s->private;
+	struct rb_node *n;
+	size_t sizes[ION_NUM_HEAP_IDS] = {0};
+	const char *names[ION_NUM_HEAP_IDS] = {NULL};
+	int i;
+
+	mutex_lock(&debugfs_mutex);
+	if (!is_client_alive(client)) {
+		seq_printf(s, "ion_client 0x%p dead, can't dump its buffers\n",
+			   client);
+		mutex_unlock(&debugfs_mutex);
+		return 0;
+	}
+
+	mutex_lock(&client->lock);
+	for (n = rb_first(&client->handles); n; n = rb_next(n)) {
+		struct ion_handle *handle = rb_entry(n, struct ion_handle,
+						     node);
+		unsigned int id = handle->buffer->heap->id;
+
+		if (!names[id])
+			names[id] = handle->buffer->heap->name;
+		sizes[id] += handle->buffer->size;
+	}
+	mutex_unlock(&client->lock);
+	mutex_unlock(&debugfs_mutex);
+
+	seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
+	for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
+		if (!names[i])
+			continue;
+		seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
+	}
+	return 0;
+}
+
+static int ion_debug_client_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, ion_debug_client_show, inode->i_private);
+}
+
+static const struct file_operations debug_client_fops = {
+	.open = ion_debug_client_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static int ion_get_client_serial(const struct rb_root *root,
+				 const unsigned char *name)
+{
+	int serial = -1;
+	struct rb_node *node;
+
+	for (node = rb_first(root); node; node = rb_next(node)) {
+		struct ion_client *client = rb_entry(node, struct ion_client,
+						     node);
+
+		if (strcmp(client->name, name))
+			continue;
+		serial = max(serial, client->display_serial);
+	}
+	return serial + 1;
+}
+
+struct ion_client *ion_client_create(struct ion_device *dev,
+				     const char *name)
+{
+	struct ion_client *client;
+	struct task_struct *task;
+	struct rb_node **p;
+	struct rb_node *parent = NULL;
+	struct ion_client *entry;
+	pid_t pid;
+
+	if (!name) {
+		pr_err("%s: Name cannot be null\n", __func__);
+		return ERR_PTR(-EINVAL);
+	}
+
+	get_task_struct(current->group_leader);
+	task_lock(current->group_leader);
+	pid = task_pid_nr(current->group_leader);
+	/*
+	 * don't bother to store task struct for kernel threads,
+	 * they can't be killed anyway
+	 */
+	if (current->group_leader->flags & PF_KTHREAD) {
+		put_task_struct(current->group_leader);
+		task = NULL;
+	} else {
+		task = current->group_leader;
+	}
+	task_unlock(current->group_leader);
+
+	client = kzalloc(sizeof(*client), GFP_KERNEL);
+	if (!client)
+		goto err_put_task_struct;
+
+	client->dev = dev;
+	client->handles = RB_ROOT;
+	idr_init(&client->idr);
+	mutex_init(&client->lock);
+	client->task = task;
+	client->pid = pid;
+	client->name = kstrdup(name, GFP_KERNEL);
+	if (!client->name)
+		goto err_free_client;
+
+	down_write(&dev->lock);
+	client->display_serial = ion_get_client_serial(&dev->clients, name);
+	client->display_name = kasprintf(
+		GFP_KERNEL, "%s-%d", name, client->display_serial);
+	if (!client->display_name) {
+		up_write(&dev->lock);
+		goto err_free_client_name;
+	}
+	p = &dev->clients.rb_node;
+	while (*p) {
+		parent = *p;
+		entry = rb_entry(parent, struct ion_client, node);
+
+		if (client < entry)
+			p = &(*p)->rb_left;
+		else if (client > entry)
+			p = &(*p)->rb_right;
+	}
+	rb_link_node(&client->node, parent, p);
+	rb_insert_color(&client->node, &dev->clients);
+
+	client->debug_root = debugfs_create_file(client->display_name, 0664,
+						 dev->clients_debug_root,
+						 client, &debug_client_fops);
+	if (!client->debug_root) {
+		char buf[256], *path;
+
+		path = dentry_path(dev->clients_debug_root, buf, 256);
+		pr_err("Failed to create client debugfs at %s/%s\n",
+		       path, client->display_name);
+	}
+
+	up_write(&dev->lock);
+
+	return client;
+
+err_free_client_name:
+	kfree(client->name);
+err_free_client:
+	kfree(client);
+err_put_task_struct:
+	if (task)
+		put_task_struct(current->group_leader);
+	return ERR_PTR(-ENOMEM);
+}
+EXPORT_SYMBOL(ion_client_create);
+
+void ion_client_destroy(struct ion_client *client)
+{
+	struct ion_device *dev = client->dev;
+	struct rb_node *n;
+
+	pr_debug("%s: %d\n", __func__, __LINE__);
+	mutex_lock(&debugfs_mutex);
+	while ((n = rb_first(&client->handles))) {
+		struct ion_handle *handle = rb_entry(n, struct ion_handle,
+						     node);
+		ion_handle_destroy(&handle->ref);
+	}
+
+	idr_destroy(&client->idr);
+
+	down_write(&dev->lock);
+	if (client->task)
+		put_task_struct(client->task);
+	rb_erase(&client->node, &dev->clients);
+	debugfs_remove_recursive(client->debug_root);
+	up_write(&dev->lock);
+
+	kfree(client->display_name);
+	kfree(client->name);
+	kfree(client);
+	mutex_unlock(&debugfs_mutex);
+}
+EXPORT_SYMBOL(ion_client_destroy);
+
+static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
+				       struct device *dev,
+				       enum dma_data_direction direction);
+
+static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
+					enum dma_data_direction direction)
+{
+	struct dma_buf *dmabuf = attachment->dmabuf;
+	struct ion_buffer *buffer = dmabuf->priv;
+
+	ion_buffer_sync_for_device(buffer, attachment->dev, direction);
+	return buffer->sg_table;
+}
+
+static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
+			      struct sg_table *table,
+			      enum dma_data_direction direction)
+{
+}
+
+void ion_pages_sync_for_device(struct device *dev, struct page *page,
+			       size_t size, enum dma_data_direction dir)
+{
+	struct scatterlist sg;
+
+	sg_init_table(&sg, 1);
+	sg_set_page(&sg, page, size, 0);
+	/*
+	 * This is not correct - sg_dma_address needs a dma_addr_t that is valid
+	 * for the targeted device, but this works on the currently targeted
+	 * hardware.
+	 */
+	sg_dma_address(&sg) = page_to_phys(page);
+	dma_sync_sg_for_device(dev, &sg, 1, dir);
+}
+
+struct ion_vma_list {
+	struct list_head list;
+	struct vm_area_struct *vma;
+};
+
+static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
+				       struct device *dev,
+				       enum dma_data_direction dir)
+{
+	struct ion_vma_list *vma_list;
+	int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
+	int i;
+
+	pr_debug("%s: syncing for device %s\n", __func__,
+		 dev ? dev_name(dev) : "null");
+
+	if (!ion_buffer_fault_user_mappings(buffer))
+		return;
+
+	mutex_lock(&buffer->lock);
+	for (i = 0; i < pages; i++) {
+		struct page *page = buffer->pages[i];
+
+		if (ion_buffer_page_is_dirty(page))
+			ion_pages_sync_for_device(dev, ion_buffer_page(page),
+						  PAGE_SIZE, dir);
+
+		ion_buffer_page_clean(buffer->pages + i);
+	}
+	list_for_each_entry(vma_list, &buffer->vmas, list) {
+		struct vm_area_struct *vma = vma_list->vma;
+
+		zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start);
+	}
+	mutex_unlock(&buffer->lock);
+}
+
+static vm_fault_t ion_vm_fault(struct vm_fault *vmf)
+{
+	struct vm_area_struct *vma = vmf->vma;
+	struct ion_buffer *buffer = vma->vm_private_data;
+	unsigned long pfn;
+	int ret;
+
+	mutex_lock(&buffer->lock);
+	ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
+	BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
+
+	pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
+	ret = vmf_insert_pfn(vma, (unsigned long)vmf->address, pfn);
+	mutex_unlock(&buffer->lock);
+	if (ret)
+		return VM_FAULT_ERROR;
+
+	return VM_FAULT_NOPAGE;
+}
+
+static void ion_vm_open(struct vm_area_struct *vma)
+{
+	struct ion_buffer *buffer = vma->vm_private_data;
+	struct ion_vma_list *vma_list;
+
+	vma_list = kmalloc(sizeof(*vma_list), GFP_KERNEL);
+	if (!vma_list)
+		return;
+	vma_list->vma = vma;
+	mutex_lock(&buffer->lock);
+	list_add(&vma_list->list, &buffer->vmas);
+	mutex_unlock(&buffer->lock);
+	pr_debug("%s: adding %p\n", __func__, vma);
+}
+
+static void ion_vm_close(struct vm_area_struct *vma)
+{
+	struct ion_buffer *buffer = vma->vm_private_data;
+	struct ion_vma_list *vma_list, *tmp;
+
+	pr_debug("%s\n", __func__);
+	mutex_lock(&buffer->lock);
+	list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
+		if (vma_list->vma != vma)
+			continue;
+		list_del(&vma_list->list);
+		kfree(vma_list);
+		pr_debug("%s: deleting %p\n", __func__, vma);
+		break;
+	}
+	mutex_unlock(&buffer->lock);
+}
+
+static const struct vm_operations_struct ion_vma_ops = {
+	.open = ion_vm_open,
+	.close = ion_vm_close,
+	.fault = ion_vm_fault,
+};
+
+static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
+{
+	struct ion_buffer *buffer = dmabuf->priv;
+	int ret = 0;
+
+	if (!buffer->heap->ops->map_user) {
+		pr_err("%s: this heap does not define a method for mapping to userspace\n",
+		       __func__);
+		return -EINVAL;
+	}
+
+	if (ion_buffer_fault_user_mappings(buffer)) {
+		vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
+							VM_DONTDUMP;
+		vma->vm_private_data = buffer;
+		vma->vm_ops = &ion_vma_ops;
+		ion_vm_open(vma);
+		return 0;
+	}
+
+	if (!(buffer->flags & ION_FLAG_CACHED))
+		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+	mutex_lock(&buffer->lock);
+	/* now map it to userspace */
+	ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
+	mutex_unlock(&buffer->lock);
+
+	if (ret)
+		pr_err("%s: failure mapping buffer to userspace\n",
+		       __func__);
+
+	return ret;
+}
+
+static void ion_dma_buf_release(struct dma_buf *dmabuf)
+{
+	struct ion_buffer *buffer = dmabuf->priv;
+
+	ion_buffer_put(buffer);
+}
+
+static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
+{
+	struct ion_buffer *buffer = dmabuf->priv;
+
+	return buffer->vaddr + offset * PAGE_SIZE;
+}
+
+static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
+			       void *ptr)
+{
+}
+
+static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
+					enum dma_data_direction direction)
+{
+	struct ion_buffer *buffer = dmabuf->priv;
+	void *vaddr;
+
+	if (!buffer->heap->ops->map_kernel) {
+		pr_err("%s: map kernel is not implemented by this heap.\n",
+		       __func__);
+		return -ENODEV;
+	}
+
+	mutex_lock(&buffer->lock);
+	vaddr = ion_buffer_kmap_get(buffer);
+	mutex_unlock(&buffer->lock);
+	return PTR_ERR_OR_ZERO(vaddr);
+}
+
+static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
+				      enum dma_data_direction direction)
+{
+	struct ion_buffer *buffer = dmabuf->priv;
+
+	mutex_lock(&buffer->lock);
+	ion_buffer_kmap_put(buffer);
+	mutex_unlock(&buffer->lock);
+
+	return 0;
+}
+
+static struct dma_buf_ops dma_buf_ops = {
+	.map_dma_buf = ion_map_dma_buf,
+	.unmap_dma_buf = ion_unmap_dma_buf,
+	.mmap = ion_mmap,
+	.release = ion_dma_buf_release,
+	.begin_cpu_access = ion_dma_buf_begin_cpu_access,
+	.end_cpu_access = ion_dma_buf_end_cpu_access,
+	.map = ion_dma_buf_kmap,
+	.unmap = ion_dma_buf_kunmap,
+};
+
+static struct dma_buf *__ion_share_dma_buf(struct ion_client *client,
+					   struct ion_handle *handle,
+					   bool lock_client)
+{
+	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+	struct ion_buffer *buffer;
+	struct dma_buf *dmabuf;
+	bool valid_handle;
+
+	if (lock_client)
+		mutex_lock(&client->lock);
+	valid_handle = ion_handle_validate(client, handle);
+	if (!valid_handle) {
+		WARN(1, "%s: invalid handle passed to share.\n", __func__);
+		if (lock_client)
+			mutex_unlock(&client->lock);
+		return ERR_PTR(-EINVAL);
+	}
+	buffer = handle->buffer;
+	ion_buffer_get(buffer);
+	if (lock_client)
+		mutex_unlock(&client->lock);
+
+	exp_info.ops = &dma_buf_ops;
+	exp_info.size = buffer->size;
+	exp_info.flags = O_RDWR;
+	exp_info.priv = buffer;
+
+	dmabuf = dma_buf_export(&exp_info);
+	if (IS_ERR(dmabuf)) {
+		ion_buffer_put(buffer);
+		return dmabuf;
+	}
+
+	return dmabuf;
+}
+
+struct dma_buf *ion_share_dma_buf(struct ion_client *client,
+				  struct ion_handle *handle)
+{
+	return __ion_share_dma_buf(client, handle, true);
+}
+EXPORT_SYMBOL(ion_share_dma_buf);
+
+static int __ion_share_dma_buf_fd(struct ion_client *client,
+				  struct ion_handle *handle, bool lock_client)
+{
+	struct dma_buf *dmabuf;
+	int fd;
+
+	dmabuf = __ion_share_dma_buf(client, handle, lock_client);
+	if (IS_ERR(dmabuf))
+		return PTR_ERR(dmabuf);
+
+	fd = dma_buf_fd(dmabuf, O_CLOEXEC);
+	if (fd < 0)
+		dma_buf_put(dmabuf);
+
+	return fd;
+}
+
+int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
+{
+	return __ion_share_dma_buf_fd(client, handle, true);
+}
+EXPORT_SYMBOL(ion_share_dma_buf_fd);
+
+int ion_share_dma_buf_fd_nolock(struct ion_client *client,
+				struct ion_handle *handle)
+{
+	return __ion_share_dma_buf_fd(client, handle, false);
+}
+
+struct ion_handle *ion_import_dma_buf(struct ion_client *client,
+				      struct dma_buf *dmabuf)
+{
+	struct ion_buffer *buffer;
+	struct ion_handle *handle;
+	int ret;
+
+	/* if this memory came from ion */
+
+	if (dmabuf->ops != &dma_buf_ops) {
+		pr_err("%s: can not import dmabuf from another exporter\n",
+		       __func__);
+		return ERR_PTR(-EINVAL);
+	}
+	buffer = dmabuf->priv;
+
+	mutex_lock(&client->lock);
+	/* if a handle exists for this buffer just take a reference to it */
+	handle = ion_handle_lookup(client, buffer);
+	if (!IS_ERR(handle)) {
+		handle = ion_handle_get_check_overflow(handle);
+		mutex_unlock(&client->lock);
+		goto end;
+	}
+
+	handle = ion_handle_create(client, buffer);
+	if (IS_ERR(handle)) {
+		mutex_unlock(&client->lock);
+		goto end;
+	}
+
+	ret = ion_handle_add(client, handle);
+	mutex_unlock(&client->lock);
+	if (ret) {
+		ion_handle_put(handle);
+		handle = ERR_PTR(ret);
+	}
+
+end:
+	return handle;
+}
+EXPORT_SYMBOL(ion_import_dma_buf);
+
+struct ion_handle *ion_import_dma_buf_fd(struct ion_client *client, int fd)
+{
+	struct dma_buf *dmabuf;
+	struct ion_handle *handle;
+
+	dmabuf = dma_buf_get(fd);
+	if (IS_ERR(dmabuf))
+		return ERR_CAST(dmabuf);
+
+	handle = ion_import_dma_buf(client, dmabuf);
+	dma_buf_put(dmabuf);
+	return handle;
+}
+EXPORT_SYMBOL(ion_import_dma_buf_fd);
+
+int ion_sync_for_device(struct ion_client *client, int fd)
+{
+	struct dma_buf *dmabuf;
+	struct ion_buffer *buffer;
+
+	dmabuf = dma_buf_get(fd);
+	if (IS_ERR(dmabuf))
+		return PTR_ERR(dmabuf);
+
+	/* if this memory came from ion */
+	if (dmabuf->ops != &dma_buf_ops) {
+		pr_err("%s: can not sync dmabuf from another exporter\n",
+		       __func__);
+		dma_buf_put(dmabuf);
+		return -EINVAL;
+	}
+	buffer = dmabuf->priv;
+
+	dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
+			       buffer->sg_table->nents, DMA_BIDIRECTIONAL);
+	dma_buf_put(dmabuf);
+	return 0;
+}
+
+int ion_query_heaps(struct ion_client *client, struct ion_heap_query *query)
+{
+	struct ion_device *dev = client->dev;
+	struct ion_heap_data __user *buffer = u64_to_user_ptr(query->heaps);
+	int ret = -EINVAL, cnt = 0, max_cnt;
+	struct ion_heap *heap;
+	struct ion_heap_data hdata;
+
+	memset(&hdata, 0, sizeof(hdata));
+
+	down_read(&dev->lock);
+	if (!buffer) {
+		query->cnt = dev->heap_cnt;
+		ret = 0;
+		goto out;
+	}
+
+	if (query->cnt <= 0)
+		goto out;
+
+	max_cnt = query->cnt;
+
+	plist_for_each_entry(heap, &dev->heaps, node) {
+		strncpy(hdata.name, heap->name, MAX_HEAP_NAME);
+		hdata.name[sizeof(hdata.name) - 1] = '\0';
+		hdata.type = heap->type;
+		hdata.heap_id = heap->id;
+
+		if (copy_to_user(&buffer[cnt], &hdata, sizeof(hdata))) {
+			ret = -EFAULT;
+			goto out;
+		}
+
+		cnt++;
+		if (cnt >= max_cnt)
+			break;
+	}
+
+	query->cnt = cnt;
+out:
+	up_read(&dev->lock);
+	return ret;
+}
+
+static int ion_release(struct inode *inode, struct file *file)
+{
+	struct ion_client *client = file->private_data;
+
+	pr_debug("%s: %d\n", __func__, __LINE__);
+	ion_client_destroy(client);
+	return 0;
+}
+
+static int ion_open(struct inode *inode, struct file *file)
+{
+	struct miscdevice *miscdev = file->private_data;
+	struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
+	struct ion_client *client;
+	char debug_name[64];
+
+	pr_debug("%s: %d\n", __func__, __LINE__);
+	snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
+	client = ion_client_create(dev, debug_name);
+	if (IS_ERR(client))
+		return PTR_ERR(client);
+	file->private_data = client;
+
+	return 0;
+}
+
+static const struct file_operations ion_fops = {
+	.owner          = THIS_MODULE,
+	.open           = ion_open,
+	.release        = ion_release,
+	.unlocked_ioctl = ion_ioctl,
+	.compat_ioctl   = compat_ion_ioctl,
+};
+
+static size_t ion_debug_heap_total(struct ion_client *client,
+				   unsigned int id)
+{
+	size_t size = 0;
+	struct rb_node *n;
+
+	mutex_lock(&client->lock);
+	for (n = rb_first(&client->handles); n; n = rb_next(n)) {
+		struct ion_handle *handle = rb_entry(n,
+						     struct ion_handle,
+						     node);
+		if (handle->buffer->heap->id == id)
+			size += handle->buffer->size;
+	}
+	mutex_unlock(&client->lock);
+	return size;
+}
+
+static int ion_debug_heap_show(struct seq_file *s, void *unused)
+{
+	struct ion_heap *heap = s->private;
+	struct ion_device *dev = heap->dev;
+	struct rb_node *n;
+	size_t total_size = 0;
+	size_t total_orphaned_size = 0;
+
+	seq_printf(s, "%16s %16s %16s\n", "client", "pid", "size");
+	seq_puts(s, "----------------------------------------------------\n");
+
+	mutex_lock(&debugfs_mutex);
+	for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
+		struct ion_client *client = rb_entry(n, struct ion_client,
+						     node);
+		size_t size = ion_debug_heap_total(client, heap->id);
+
+		if (!size)
+			continue;
+		if (client->task) {
+			char task_comm[TASK_COMM_LEN];
+
+			get_task_comm(task_comm, client->task);
+			seq_printf(s, "%16s %16u %16zu\n", task_comm,
+				   client->pid, size);
+		} else {
+			seq_printf(s, "%16s %16u %16zu\n", client->name,
+				   client->pid, size);
+		}
+	}
+	mutex_unlock(&debugfs_mutex);
+
+	seq_puts(s, "----------------------------------------------------\n");
+	seq_puts(s, "orphaned allocations (info is from last known client):\n");
+	mutex_lock(&dev->buffer_lock);
+	for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
+		struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
+						     node);
+		if (buffer->heap->id != heap->id)
+			continue;
+		total_size += buffer->size;
+		if (!buffer->handle_count) {
+			seq_printf(s, "%16s %16u %16zu %d %d\n",
+				   buffer->task_comm, buffer->pid,
+				   buffer->size, buffer->kmap_cnt,
+				   atomic_read(&buffer->ref.refcount.refs));
+			total_orphaned_size += buffer->size;
+		}
+	}
+	mutex_unlock(&dev->buffer_lock);
+	seq_puts(s, "----------------------------------------------------\n");
+	seq_printf(s, "%16s %16zu\n", "total orphaned",
+		   total_orphaned_size);
+	seq_printf(s, "%16s %16zu\n", "total ", total_size);
+	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
+		seq_printf(s, "%16s %16zu\n", "deferred free",
+			   heap->free_list_size);
+	seq_puts(s, "----------------------------------------------------\n");
+
+	if (heap->debug_show)
+		heap->debug_show(heap, s, unused);
+
+	return 0;
+}
+
+static int ion_debug_heap_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, ion_debug_heap_show, inode->i_private);
+}
+
+static const struct file_operations debug_heap_fops = {
+	.open = ion_debug_heap_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static int debug_shrink_set(void *data, u64 val)
+{
+	struct ion_heap *heap = data;
+	struct shrink_control sc;
+	int objs;
+
+	sc.gfp_mask = GFP_HIGHUSER;
+	sc.nr_to_scan = val;
+
+	if (!val) {
+		objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
+		sc.nr_to_scan = objs;
+	}
+
+	heap->shrinker.scan_objects(&heap->shrinker, &sc);
+	return 0;
+}
+
+static int debug_shrink_get(void *data, u64 *val)
+{
+	struct ion_heap *heap = data;
+	struct shrink_control sc;
+	int objs;
+
+	sc.gfp_mask = GFP_HIGHUSER;
+	sc.nr_to_scan = 0;
+
+	objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
+	*val = objs;
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
+			debug_shrink_set, "%llu\n");
+
+void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
+{
+	struct dentry *debug_file;
+
+	if (!heap->ops->allocate || !heap->ops->free)
+		pr_err("%s: can not add heap with invalid ops struct.\n",
+		       __func__);
+
+	spin_lock_init(&heap->free_lock);
+	heap->free_list_size = 0;
+
+	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
+		ion_heap_init_deferred_free(heap);
+
+	if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
+		ion_heap_init_shrinker(heap);
+
+	heap->dev = dev;
+	down_write(&dev->lock);
+	/*
+	 * use negative heap->id to reverse the priority -- when traversing
+	 * the list later attempt higher id numbers first
+	 */
+	plist_node_init(&heap->node, -heap->id);
+	plist_add(&heap->node, &dev->heaps);
+	debug_file = debugfs_create_file(heap->name, 0664,
+					 dev->heaps_debug_root, heap,
+					 &debug_heap_fops);
+
+	if (!debug_file) {
+		char buf[256], *path;
+
+		path = dentry_path(dev->heaps_debug_root, buf, 256);
+		pr_err("Failed to create heap debugfs at %s/%s\n",
+		       path, heap->name);
+	}
+
+	if (heap->shrinker.count_objects && heap->shrinker.scan_objects) {
+		char debug_name[64];
+
+		snprintf(debug_name, 64, "%s_shrink", heap->name);
+		debug_file = debugfs_create_file(
+			debug_name, 0644, dev->heaps_debug_root, heap,
+			&debug_shrink_fops);
+		if (!debug_file) {
+			char buf[256], *path;
+
+			path = dentry_path(dev->heaps_debug_root, buf, 256);
+			pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
+			       path, debug_name);
+		}
+	}
+
+	dev->heap_cnt++;
+	up_write(&dev->lock);
+}
+EXPORT_SYMBOL(ion_device_add_heap);
+
+struct ion_device *ion_device_create(long (*custom_ioctl)
+				     (struct ion_client *client,
+				      unsigned int cmd,
+				      unsigned long arg))
+{
+	struct ion_device *idev;
+	int ret;
+
+	idev = kzalloc(sizeof(*idev), GFP_KERNEL);
+	if (!idev)
+		return ERR_PTR(-ENOMEM);
+
+	idev->dev.minor = MISC_DYNAMIC_MINOR;
+	idev->dev.name = "ion";
+	idev->dev.fops = &ion_fops;
+	idev->dev.parent = NULL;
+	ret = misc_register(&idev->dev);
+	if (ret) {
+		pr_err("ion: failed to register misc device.\n");
+		kfree(idev);
+		return ERR_PTR(ret);
+	}
+
+	idev->debug_root = debugfs_create_dir("ion", NULL);
+	if (!idev->debug_root) {
+		pr_err("ion: failed to create debugfs root directory.\n");
+		goto debugfs_done;
+	}
+	idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
+	if (!idev->heaps_debug_root) {
+		pr_err("ion: failed to create debugfs heaps directory.\n");
+		goto debugfs_done;
+	}
+	idev->clients_debug_root = debugfs_create_dir("clients",
+						idev->debug_root);
+	if (!idev->clients_debug_root)
+		pr_err("ion: failed to create debugfs clients directory.\n");
+
+debugfs_done:
+
+	idev->custom_ioctl = custom_ioctl;
+	idev->buffers = RB_ROOT;
+	mutex_init(&idev->buffer_lock);
+	init_rwsem(&idev->lock);
+	plist_head_init(&idev->heaps);
+	idev->clients = RB_ROOT;
+	ion_root_client = &idev->clients;
+	mutex_init(&debugfs_mutex);
+	return idev;
+}
+EXPORT_SYMBOL(ion_device_create);
+
+void ion_device_destroy(struct ion_device *dev)
+{
+	misc_deregister(&dev->dev);
+	debugfs_remove_recursive(dev->debug_root);
+	/* XXX need to free the heaps and clients ? */
+	kfree(dev);
+}
+EXPORT_SYMBOL(ion_device_destroy);
diff --git a/drivers/staging/media/sunxi/cedar/ion/ion.h b/drivers/staging/media/sunxi/cedar/ion/ion.h
new file mode 100644
index 000000000..2e01075d1
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedar/ion/ion.h
@@ -0,0 +1,176 @@
+/*
+ * drivers/staging/android/ion/ion.h
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_ION_H
+#define _LINUX_ION_H
+
+#include <linux/types.h>
+
+#include "uapi/ion.h"
+
+struct ion_handle;
+struct ion_device;
+struct ion_heap;
+struct ion_mapper;
+struct ion_client;
+struct ion_buffer;
+
+/*
+ * This should be removed some day when phys_addr_t's are fully
+ * plumbed in the kernel, and all instances of ion_phys_addr_t should
+ * be converted to phys_addr_t.  For the time being many kernel interfaces
+ * do not accept phys_addr_t's that would have to
+ */
+#define ion_phys_addr_t unsigned long
+
+/**
+ * struct ion_platform_heap - defines a heap in the given platform
+ * @type:	type of the heap from ion_heap_type enum
+ * @id:		unique identifier for heap.  When allocating higher numbers
+ *		will be allocated from first.  At allocation these are passed
+ *		as a bit mask and therefore can not exceed ION_NUM_HEAP_IDS.
+ * @name:	used for debug purposes
+ * @base:	base address of heap in physical memory if applicable
+ * @size:	size of the heap in bytes if applicable
+ * @align:	required alignment in physical memory if applicable
+ * @priv:	private info passed from the board file
+ *
+ * Provided by the board file.
+ */
+struct ion_platform_heap {
+	enum ion_heap_type type;
+	unsigned int id;
+	const char *name;
+	ion_phys_addr_t base;
+	size_t size;
+	ion_phys_addr_t align;
+	void *priv;
+};
+
+/**
+ * struct ion_platform_data - array of platform heaps passed from board file
+ * @nr:		number of structures in the array
+ * @heaps:	array of platform_heap structions
+ *
+ * Provided by the board file in the form of platform data to a platform device.
+ */
+struct ion_platform_data {
+	int nr;
+	struct ion_platform_heap *heaps;
+};
+
+/**
+ * ion_client_create() -  allocate a client and returns it
+ * @dev:		the global ion device
+ * @name:		used for debugging
+ */
+struct ion_client *ion_client_create(struct ion_device *dev,
+				     const char *name);
+
+/**
+ * ion_client_destroy() -  free's a client and all it's handles
+ * @client:	the client
+ *
+ * Free the provided client and all it's resources including
+ * any handles it is holding.
+ */
+void ion_client_destroy(struct ion_client *client);
+
+/**
+ * ion_alloc - allocate ion memory
+ * @client:		the client
+ * @len:		size of the allocation
+ * @align:		requested allocation alignment, lots of hardware blocks
+ *			have alignment requirements of some kind
+ * @heap_id_mask:	mask of heaps to allocate from, if multiple bits are set
+ *			heaps will be tried in order from highest to lowest
+ *			id
+ * @flags:		heap flags, the low 16 bits are consumed by ion, the
+ *			high 16 bits are passed on to the respective heap and
+ *			can be heap custom
+ *
+ * Allocate memory in one of the heaps provided in heap mask and return
+ * an opaque handle to it.
+ */
+struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
+			     size_t align, unsigned int heap_id_mask,
+			     unsigned int flags);
+
+/**
+ * ion_free - free a handle
+ * @client:	the client
+ * @handle:	the handle to free
+ *
+ * Free the provided handle.
+ */
+void ion_free(struct ion_client *client, struct ion_handle *handle);
+
+/**
+ * ion_map_kernel - create mapping for the given handle
+ * @client:	the client
+ * @handle:	handle to map
+ *
+ * Map the given handle into the kernel and return a kernel address that
+ * can be used to access this address.
+ */
+void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle);
+
+/**
+ * ion_unmap_kernel() - destroy a kernel mapping for a handle
+ * @client:	the client
+ * @handle:	handle to unmap
+ */
+void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle);
+
+/**
+ * ion_share_dma_buf() - share buffer as dma-buf
+ * @client:	the client
+ * @handle:	the handle
+ */
+struct dma_buf *ion_share_dma_buf(struct ion_client *client,
+						struct ion_handle *handle);
+
+/**
+ * ion_share_dma_buf_fd() - given an ion client, create a dma-buf fd
+ * @client:	the client
+ * @handle:	the handle
+ */
+int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle);
+
+/**
+ * ion_import_dma_buf() - get ion_handle from dma-buf
+ * @client:	the client
+ * @dmabuf:	the dma-buf
+ *
+ * Get the ion_buffer associated with the dma-buf and return the ion_handle.
+ * If no ion_handle exists for this buffer, return newly created ion_handle.
+ * If dma-buf from another exporter is passed, return ERR_PTR(-EINVAL)
+ */
+struct ion_handle *ion_import_dma_buf(struct ion_client *client,
+				      struct dma_buf *dmabuf);
+
+/**
+ * ion_import_dma_buf_fd() - given a dma-buf fd from the ion exporter get handle
+ * @client:	the client
+ * @fd:		the dma-buf fd
+ *
+ * Given an dma-buf fd that was allocated through ion via ion_share_dma_buf_fd,
+ * import that fd and return a handle representing it. If a dma-buf from
+ * another exporter is passed in this function will return ERR_PTR(-EINVAL)
+ */
+struct ion_handle *ion_import_dma_buf_fd(struct ion_client *client, int fd);
+
+#endif /* _LINUX_ION_H */
diff --git a/drivers/staging/media/sunxi/cedar/ion/ion_carveout_heap.c b/drivers/staging/media/sunxi/cedar/ion/ion_carveout_heap.c
new file mode 100644
index 000000000..a8ea97391
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedar/ion/ion_carveout_heap.c
@@ -0,0 +1,168 @@
+/*
+ * drivers/staging/android/ion/ion_carveout_heap.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/spinlock.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/genalloc.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include "ion.h"
+#include "ion_priv.h"
+
+#define ION_CARVEOUT_ALLOCATE_FAIL	-1
+
+struct ion_carveout_heap {
+	struct ion_heap heap;
+	struct gen_pool *pool;
+	ion_phys_addr_t base;
+};
+
+static ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap,
+					     unsigned long size,
+					     unsigned long align)
+{
+	struct ion_carveout_heap *carveout_heap =
+		container_of(heap, struct ion_carveout_heap, heap);
+	unsigned long offset = gen_pool_alloc(carveout_heap->pool, size);
+
+	if (!offset)
+		return ION_CARVEOUT_ALLOCATE_FAIL;
+
+	return offset;
+}
+
+static void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
+			      unsigned long size)
+{
+	struct ion_carveout_heap *carveout_heap =
+		container_of(heap, struct ion_carveout_heap, heap);
+
+	if (addr == ION_CARVEOUT_ALLOCATE_FAIL)
+		return;
+	gen_pool_free(carveout_heap->pool, addr, size);
+}
+
+static int ion_carveout_heap_allocate(struct ion_heap *heap,
+				      struct ion_buffer *buffer,
+				      unsigned long size, unsigned long align,
+				      unsigned long flags)
+{
+	struct sg_table *table;
+	ion_phys_addr_t paddr;
+	int ret;
+
+	if (align > PAGE_SIZE)
+		return -EINVAL;
+
+	table = kmalloc(sizeof(*table), GFP_KERNEL);
+	if (!table)
+		return -ENOMEM;
+	ret = sg_alloc_table(table, 1, GFP_KERNEL);
+	if (ret)
+		goto err_free;
+
+	paddr = ion_carveout_allocate(heap, size, align);
+	if (paddr == ION_CARVEOUT_ALLOCATE_FAIL) {
+		ret = -ENOMEM;
+		goto err_free_table;
+	}
+
+	sg_set_page(table->sgl, pfn_to_page(PFN_DOWN(paddr)), size, 0);
+	buffer->sg_table = table;
+
+	return 0;
+
+err_free_table:
+	sg_free_table(table);
+err_free:
+	kfree(table);
+	return ret;
+}
+
+static void ion_carveout_heap_free(struct ion_buffer *buffer)
+{
+	struct ion_heap *heap = buffer->heap;
+	struct sg_table *table = buffer->sg_table;
+	struct page *page = sg_page(table->sgl);
+	ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
+
+	ion_heap_buffer_zero(buffer);
+
+	if (ion_buffer_cached(buffer))
+		dma_sync_sg_for_device(NULL, table->sgl, table->nents,
+				       DMA_BIDIRECTIONAL);
+
+	ion_carveout_free(heap, paddr, buffer->size);
+	sg_free_table(table);
+	kfree(table);
+}
+
+static struct ion_heap_ops carveout_heap_ops = {
+	.allocate = ion_carveout_heap_allocate,
+	.free = ion_carveout_heap_free,
+	.map_user = ion_heap_map_user,
+	.map_kernel = ion_heap_map_kernel,
+	.unmap_kernel = ion_heap_unmap_kernel,
+};
+
+struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
+{
+	struct ion_carveout_heap *carveout_heap;
+	int ret;
+
+	struct page *page;
+	size_t size;
+
+	page = pfn_to_page(PFN_DOWN(heap_data->base));
+	size = heap_data->size;
+
+	ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL);
+
+	ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL));
+	if (ret)
+		return ERR_PTR(ret);
+
+	carveout_heap = kzalloc(sizeof(*carveout_heap), GFP_KERNEL);
+	if (!carveout_heap)
+		return ERR_PTR(-ENOMEM);
+
+	carveout_heap->pool = gen_pool_create(PAGE_SHIFT, -1);
+	if (!carveout_heap->pool) {
+		kfree(carveout_heap);
+		return ERR_PTR(-ENOMEM);
+	}
+	carveout_heap->base = heap_data->base;
+	gen_pool_add(carveout_heap->pool, carveout_heap->base, heap_data->size,
+		     -1);
+	carveout_heap->heap.ops = &carveout_heap_ops;
+	carveout_heap->heap.type = ION_HEAP_TYPE_CARVEOUT;
+	carveout_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
+
+	return &carveout_heap->heap;
+}
+
+void ion_carveout_heap_destroy(struct ion_heap *heap)
+{
+	struct ion_carveout_heap *carveout_heap =
+	     container_of(heap, struct  ion_carveout_heap, heap);
+
+	gen_pool_destroy(carveout_heap->pool);
+	kfree(carveout_heap);
+	carveout_heap = NULL;
+}
diff --git a/drivers/staging/media/sunxi/cedar/ion/ion_chunk_heap.c b/drivers/staging/media/sunxi/cedar/ion/ion_chunk_heap.c
new file mode 100644
index 000000000..70495dc64
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedar/ion/ion_chunk_heap.c
@@ -0,0 +1,181 @@
+/*
+ * drivers/staging/android/ion/ion_chunk_heap.c
+ *
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/genalloc.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include "ion.h"
+#include "ion_priv.h"
+
+struct ion_chunk_heap {
+	struct ion_heap heap;
+	struct gen_pool *pool;
+	ion_phys_addr_t base;
+	unsigned long chunk_size;
+	unsigned long size;
+	unsigned long allocated;
+};
+
+static int ion_chunk_heap_allocate(struct ion_heap *heap,
+				   struct ion_buffer *buffer,
+				   unsigned long size, unsigned long align,
+				   unsigned long flags)
+{
+	struct ion_chunk_heap *chunk_heap =
+		container_of(heap, struct ion_chunk_heap, heap);
+	struct sg_table *table;
+	struct scatterlist *sg;
+	int ret, i;
+	unsigned long num_chunks;
+	unsigned long allocated_size;
+
+	if (align > chunk_heap->chunk_size)
+		return -EINVAL;
+
+	allocated_size = ALIGN(size, chunk_heap->chunk_size);
+	num_chunks = allocated_size / chunk_heap->chunk_size;
+
+	if (allocated_size > chunk_heap->size - chunk_heap->allocated)
+		return -ENOMEM;
+
+	table = kmalloc(sizeof(*table), GFP_KERNEL);
+	if (!table)
+		return -ENOMEM;
+	ret = sg_alloc_table(table, num_chunks, GFP_KERNEL);
+	if (ret) {
+		kfree(table);
+		return ret;
+	}
+
+	sg = table->sgl;
+	for (i = 0; i < num_chunks; i++) {
+		unsigned long paddr = gen_pool_alloc(chunk_heap->pool,
+						     chunk_heap->chunk_size);
+		if (!paddr)
+			goto err;
+		sg_set_page(sg, pfn_to_page(PFN_DOWN(paddr)),
+			    chunk_heap->chunk_size, 0);
+		sg = sg_next(sg);
+	}
+
+	buffer->sg_table = table;
+	chunk_heap->allocated += allocated_size;
+	return 0;
+err:
+	sg = table->sgl;
+	for (i -= 1; i >= 0; i--) {
+		gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
+			      sg->length);
+		sg = sg_next(sg);
+	}
+	sg_free_table(table);
+	kfree(table);
+	return -ENOMEM;
+}
+
+static void ion_chunk_heap_free(struct ion_buffer *buffer)
+{
+	struct ion_heap *heap = buffer->heap;
+	struct ion_chunk_heap *chunk_heap =
+		container_of(heap, struct ion_chunk_heap, heap);
+	struct sg_table *table = buffer->sg_table;
+	struct scatterlist *sg;
+	int i;
+	unsigned long allocated_size;
+
+	allocated_size = ALIGN(buffer->size, chunk_heap->chunk_size);
+
+	ion_heap_buffer_zero(buffer);
+
+	if (ion_buffer_cached(buffer))
+		dma_sync_sg_for_device(NULL, table->sgl, table->nents,
+				       DMA_BIDIRECTIONAL);
+
+	for_each_sg(table->sgl, sg, table->nents, i) {
+		gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
+			      sg->length);
+	}
+	chunk_heap->allocated -= allocated_size;
+	sg_free_table(table);
+	kfree(table);
+}
+
+static struct ion_heap_ops chunk_heap_ops = {
+	.allocate = ion_chunk_heap_allocate,
+	.free = ion_chunk_heap_free,
+	.map_user = ion_heap_map_user,
+	.map_kernel = ion_heap_map_kernel,
+	.unmap_kernel = ion_heap_unmap_kernel,
+};
+
+struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data)
+{
+	struct ion_chunk_heap *chunk_heap;
+	int ret;
+	struct page *page;
+	size_t size;
+
+	page = pfn_to_page(PFN_DOWN(heap_data->base));
+	size = heap_data->size;
+
+	ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL);
+
+	ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL));
+	if (ret)
+		return ERR_PTR(ret);
+
+	chunk_heap = kzalloc(sizeof(*chunk_heap), GFP_KERNEL);
+	if (!chunk_heap)
+		return ERR_PTR(-ENOMEM);
+
+	chunk_heap->chunk_size = (unsigned long)heap_data->priv;
+	chunk_heap->pool = gen_pool_create(get_order(chunk_heap->chunk_size) +
+					   PAGE_SHIFT, -1);
+	if (!chunk_heap->pool) {
+		ret = -ENOMEM;
+		goto error_gen_pool_create;
+	}
+	chunk_heap->base = heap_data->base;
+	chunk_heap->size = heap_data->size;
+	chunk_heap->allocated = 0;
+
+	gen_pool_add(chunk_heap->pool, chunk_heap->base, heap_data->size, -1);
+	chunk_heap->heap.ops = &chunk_heap_ops;
+	chunk_heap->heap.type = ION_HEAP_TYPE_CHUNK;
+	chunk_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
+	pr_debug("%s: base %lu size %zu align %ld\n", __func__,
+		 chunk_heap->base, heap_data->size, heap_data->align);
+
+	return &chunk_heap->heap;
+
+error_gen_pool_create:
+	kfree(chunk_heap);
+	return ERR_PTR(ret);
+}
+
+void ion_chunk_heap_destroy(struct ion_heap *heap)
+{
+	struct ion_chunk_heap *chunk_heap =
+	     container_of(heap, struct  ion_chunk_heap, heap);
+
+	gen_pool_destroy(chunk_heap->pool);
+	kfree(chunk_heap);
+	chunk_heap = NULL;
+}
diff --git a/drivers/staging/media/sunxi/cedar/ion/ion_cma_heap.c b/drivers/staging/media/sunxi/cedar/ion/ion_cma_heap.c
new file mode 100644
index 000000000..f2f126f4f
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedar/ion/ion_cma_heap.c
@@ -0,0 +1,173 @@
+/*
+ * drivers/staging/android/ion/ion_cma_heap.c
+ *
+ * Copyright (C) Linaro 2012
+ * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/dma-mapping.h>
+
+#include "ion.h"
+#include "ion_priv.h"
+
+#define ION_CMA_ALLOCATE_FAILED -1
+
+struct ion_cma_heap {
+	struct ion_heap heap;
+	struct device *dev;
+};
+
+#define to_cma_heap(x) container_of(x, struct ion_cma_heap, heap)
+
+struct ion_cma_buffer_info {
+	void *cpu_addr;
+	dma_addr_t handle;
+	struct sg_table *table;
+};
+
+
+/* ION CMA heap operations functions */
+static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
+			    unsigned long len, unsigned long align,
+			    unsigned long flags)
+{
+	struct ion_cma_heap *cma_heap = to_cma_heap(heap);
+	struct device *dev = cma_heap->dev;
+	struct ion_cma_buffer_info *info;
+
+	dev_dbg(dev, "Request buffer allocation len %ld\n", len);
+
+	// if (buffer->flags & ION_FLAG_CACHED){
+	// 	return -EINVAL;
+	// }
+
+	if (align > PAGE_SIZE)
+		return -EINVAL;
+
+	info = kzalloc(sizeof(struct ion_cma_buffer_info), GFP_KERNEL);
+	if (!info)
+		return ION_CMA_ALLOCATE_FAILED;
+
+	info->cpu_addr = dma_alloc_coherent(dev, len, &(info->handle),
+						GFP_HIGHUSER | __GFP_ZERO);
+
+	if (!info->cpu_addr) {
+		dev_err(dev, "Fail to allocate buffer\n");
+		goto err;
+	}
+
+	info->table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+	if (!info->table)
+		goto free_mem;
+
+	if (dma_get_sgtable(dev, info->table, info->cpu_addr, info->handle,
+			    len))
+		goto free_table;
+	/* keep this for memory release */
+	buffer->priv_virt = info;
+	buffer->sg_table = info->table;
+	dev_dbg(dev, "Allocate buffer %p\n", buffer);
+	return 0;
+
+free_table:
+	kfree(info->table);
+free_mem:
+	dma_free_coherent(dev, len, info->cpu_addr, info->handle);
+err:
+	kfree(info);
+	return ION_CMA_ALLOCATE_FAILED;
+}
+
+static void ion_cma_free(struct ion_buffer *buffer)
+{
+	struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
+	struct device *dev = cma_heap->dev;
+	struct ion_cma_buffer_info *info = buffer->priv_virt;
+
+	dev_dbg(dev, "Release buffer %p\n", buffer);
+	/* release memory */
+	dma_free_coherent(dev, buffer->size, info->cpu_addr, info->handle);
+	/* release sg table */
+	sg_free_table(info->table);
+	kfree(info->table);
+	kfree(info);
+}
+
+static int ion_cma_mmap(struct ion_heap *mapper, struct ion_buffer *buffer,
+			struct vm_area_struct *vma)
+{
+	struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
+	struct device *dev = cma_heap->dev;
+	struct ion_cma_buffer_info *info = buffer->priv_virt;
+
+	if (buffer->flags & ION_FLAG_CACHED){
+		return remap_pfn_range(vma, vma->vm_start,
+		      __phys_to_pfn((u32)info->handle) + vma->vm_pgoff,
+		      vma->vm_end - vma->vm_start,
+		      vma->vm_page_prot);
+	}
+	
+	return dma_mmap_coherent(dev, vma, info->cpu_addr, info->handle,
+				 buffer->size);
+}
+
+static void *ion_cma_map_kernel(struct ion_heap *heap,
+				struct ion_buffer *buffer)
+{
+	struct ion_cma_buffer_info *info = buffer->priv_virt;
+	/* kernel memory mapping has been done at allocation time */
+	return info->cpu_addr;
+}
+
+static void ion_cma_unmap_kernel(struct ion_heap *heap,
+				 struct ion_buffer *buffer)
+{
+}
+
+static struct ion_heap_ops ion_cma_ops = {
+	.allocate = ion_cma_allocate,
+	.free = ion_cma_free,
+	.map_user = ion_cma_mmap,
+	.map_kernel = ion_cma_map_kernel,
+	.unmap_kernel = ion_cma_unmap_kernel,
+};
+
+struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *data)
+{
+	struct ion_cma_heap *cma_heap;
+
+	cma_heap = kzalloc(sizeof(struct ion_cma_heap), GFP_KERNEL);
+
+	if (!cma_heap)
+		return ERR_PTR(-ENOMEM);
+
+	cma_heap->heap.ops = &ion_cma_ops;
+	/*
+	 * get device from private heaps data, later it will be
+	 * used to make the link with reserved CMA memory
+	 */
+	cma_heap->dev = data->priv;
+	cma_heap->heap.type = ION_HEAP_TYPE_DMA;
+	return &cma_heap->heap;
+}
+
+void ion_cma_heap_destroy(struct ion_heap *heap)
+{
+	struct ion_cma_heap *cma_heap = to_cma_heap(heap);
+
+	kfree(cma_heap);
+}
diff --git a/drivers/staging/media/sunxi/cedar/ion/ion_heap.c b/drivers/staging/media/sunxi/cedar/ion/ion_heap.c
new file mode 100644
index 000000000..8e3bd3a61
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedar/ion/ion_heap.c
@@ -0,0 +1,384 @@
+/*
+ * drivers/staging/android/ion/ion_heap.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/freezer.h>
+#include <linux/kthread.h>
+#include <linux/mm.h>
+#include <linux/rtmutex.h>
+#include <linux/sched.h>
+#include <linux/scatterlist.h>
+#include <linux/vmalloc.h>
+#include <uapi/linux/sched/types.h>
+#include "ion.h"
+#include "ion_priv.h"
+
+void *ion_heap_map_kernel(struct ion_heap *heap,
+			  struct ion_buffer *buffer)
+{
+	struct scatterlist *sg;
+	int i, j;
+	void *vaddr;
+	pgprot_t pgprot;
+	struct sg_table *table = buffer->sg_table;
+	int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
+	struct page **pages = vmalloc(sizeof(struct page *) * npages);
+	struct page **tmp = pages;
+
+	if (!pages)
+		return ERR_PTR(-ENOMEM);
+
+	if (buffer->flags & ION_FLAG_CACHED)
+		pgprot = PAGE_KERNEL;
+	else
+		pgprot = pgprot_writecombine(PAGE_KERNEL);
+
+	for_each_sg(table->sgl, sg, table->nents, i) {
+		int npages_this_entry = PAGE_ALIGN(sg->length) / PAGE_SIZE;
+		struct page *page = sg_page(sg);
+
+		BUG_ON(i >= npages);
+		for (j = 0; j < npages_this_entry; j++)
+			*(tmp++) = page++;
+	}
+	vaddr = vmap(pages, npages, VM_MAP, pgprot);
+	vfree(pages);
+
+	if (!vaddr)
+		return ERR_PTR(-ENOMEM);
+
+	return vaddr;
+}
+
+void ion_heap_unmap_kernel(struct ion_heap *heap,
+			   struct ion_buffer *buffer)
+{
+	vunmap(buffer->vaddr);
+}
+
+int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
+		      struct vm_area_struct *vma)
+{
+	struct sg_table *table = buffer->sg_table;
+	unsigned long addr = vma->vm_start;
+	unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
+	struct scatterlist *sg;
+	int i;
+	int ret;
+
+	for_each_sg(table->sgl, sg, table->nents, i) {
+		struct page *page = sg_page(sg);
+		unsigned long remainder = vma->vm_end - addr;
+		unsigned long len = sg->length;
+
+		if (offset >= sg->length) {
+			offset -= sg->length;
+			continue;
+		} else if (offset) {
+			page += offset / PAGE_SIZE;
+			len = sg->length - offset;
+			offset = 0;
+		}
+		len = min(len, remainder);
+		ret = remap_pfn_range(vma, addr, page_to_pfn(page), len,
+				      vma->vm_page_prot);
+		if (ret)
+			return ret;
+		addr += len;
+		if (addr >= vma->vm_end)
+			return 0;
+	}
+	return 0;
+}
+
+static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot)
+{
+	void *addr = vm_map_ram(pages, num, -1, pgprot);
+
+	if (!addr)
+		return -ENOMEM;
+	memset(addr, 0, PAGE_SIZE * num);
+	vm_unmap_ram(addr, num);
+
+	return 0;
+}
+
+static int ion_heap_sglist_zero(struct scatterlist *sgl, unsigned int nents,
+				pgprot_t pgprot)
+{
+	int p = 0;
+	int ret = 0;
+	struct sg_page_iter piter;
+	struct page *pages[32];
+
+	for_each_sg_page(sgl, &piter, nents, 0) {
+		pages[p++] = sg_page_iter_page(&piter);
+		if (p == ARRAY_SIZE(pages)) {
+			ret = ion_heap_clear_pages(pages, p, pgprot);
+			if (ret)
+				return ret;
+			p = 0;
+		}
+	}
+	if (p)
+		ret = ion_heap_clear_pages(pages, p, pgprot);
+
+	return ret;
+}
+
+int ion_heap_buffer_zero(struct ion_buffer *buffer)
+{
+	struct sg_table *table = buffer->sg_table;
+	pgprot_t pgprot;
+
+	if (buffer->flags & ION_FLAG_CACHED)
+		pgprot = PAGE_KERNEL;
+	else
+		pgprot = pgprot_writecombine(PAGE_KERNEL);
+
+	return ion_heap_sglist_zero(table->sgl, table->nents, pgprot);
+}
+
+int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot)
+{
+	struct scatterlist sg;
+
+	sg_init_table(&sg, 1);
+	sg_set_page(&sg, page, size, 0);
+	return ion_heap_sglist_zero(&sg, 1, pgprot);
+}
+
+void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer)
+{
+	spin_lock(&heap->free_lock);
+	list_add(&buffer->list, &heap->free_list);
+	heap->free_list_size += buffer->size;
+	spin_unlock(&heap->free_lock);
+	wake_up(&heap->waitqueue);
+}
+
+size_t ion_heap_freelist_size(struct ion_heap *heap)
+{
+	size_t size;
+
+	spin_lock(&heap->free_lock);
+	size = heap->free_list_size;
+	spin_unlock(&heap->free_lock);
+
+	return size;
+}
+
+static size_t _ion_heap_freelist_drain(struct ion_heap *heap, size_t size,
+				       bool skip_pools)
+{
+	struct ion_buffer *buffer;
+	size_t total_drained = 0;
+
+	if (ion_heap_freelist_size(heap) == 0)
+		return 0;
+
+	spin_lock(&heap->free_lock);
+	if (size == 0)
+		size = heap->free_list_size;
+
+	while (!list_empty(&heap->free_list)) {
+		if (total_drained >= size)
+			break;
+		buffer = list_first_entry(&heap->free_list, struct ion_buffer,
+					  list);
+		list_del(&buffer->list);
+		heap->free_list_size -= buffer->size;
+		if (skip_pools)
+			buffer->private_flags |= ION_PRIV_FLAG_SHRINKER_FREE;
+		total_drained += buffer->size;
+		spin_unlock(&heap->free_lock);
+		ion_buffer_destroy(buffer);
+		spin_lock(&heap->free_lock);
+	}
+	spin_unlock(&heap->free_lock);
+
+	return total_drained;
+}
+
+size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size)
+{
+	return _ion_heap_freelist_drain(heap, size, false);
+}
+
+size_t ion_heap_freelist_shrink(struct ion_heap *heap, size_t size)
+{
+	return _ion_heap_freelist_drain(heap, size, true);
+}
+
+static int ion_heap_deferred_free(void *data)
+{
+	struct ion_heap *heap = data;
+
+	while (true) {
+		struct ion_buffer *buffer;
+
+		wait_event_freezable(heap->waitqueue,
+				     ion_heap_freelist_size(heap) > 0);
+
+		spin_lock(&heap->free_lock);
+		if (list_empty(&heap->free_list)) {
+			spin_unlock(&heap->free_lock);
+			continue;
+		}
+		buffer = list_first_entry(&heap->free_list, struct ion_buffer,
+					  list);
+		list_del(&buffer->list);
+		heap->free_list_size -= buffer->size;
+		spin_unlock(&heap->free_lock);
+		ion_buffer_destroy(buffer);
+	}
+
+	return 0;
+}
+
+int ion_heap_init_deferred_free(struct ion_heap *heap)
+{
+	struct sched_param param = { .sched_priority = 0 };
+
+	INIT_LIST_HEAD(&heap->free_list);
+	init_waitqueue_head(&heap->waitqueue);
+	heap->task = kthread_run(ion_heap_deferred_free, heap,
+				 "%s", heap->name);
+	if (IS_ERR(heap->task)) {
+		pr_err("%s: creating thread for deferred free failed\n",
+		       __func__);
+		return PTR_ERR_OR_ZERO(heap->task);
+	}
+	sched_setscheduler(heap->task, SCHED_IDLE, &param);
+	return 0;
+}
+
+static unsigned long ion_heap_shrink_count(struct shrinker *shrinker,
+					   struct shrink_control *sc)
+{
+	struct ion_heap *heap = container_of(shrinker, struct ion_heap,
+					     shrinker);
+	int total = 0;
+
+	total = ion_heap_freelist_size(heap) / PAGE_SIZE;
+	if (heap->ops->shrink)
+		total += heap->ops->shrink(heap, sc->gfp_mask, 0);
+	return total;
+}
+
+static unsigned long ion_heap_shrink_scan(struct shrinker *shrinker,
+					  struct shrink_control *sc)
+{
+	struct ion_heap *heap = container_of(shrinker, struct ion_heap,
+					     shrinker);
+	int freed = 0;
+	int to_scan = sc->nr_to_scan;
+
+	if (to_scan == 0)
+		return 0;
+
+	/*
+	 * shrink the free list first, no point in zeroing the memory if we're
+	 * just going to reclaim it. Also, skip any possible page pooling.
+	 */
+	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
+		freed = ion_heap_freelist_shrink(heap, to_scan * PAGE_SIZE) /
+				PAGE_SIZE;
+
+	to_scan -= freed;
+	if (to_scan <= 0)
+		return freed;
+
+	if (heap->ops->shrink)
+		freed += heap->ops->shrink(heap, sc->gfp_mask, to_scan);
+	return freed;
+}
+
+void ion_heap_init_shrinker(struct ion_heap *heap)
+{
+	heap->shrinker.count_objects = ion_heap_shrink_count;
+	heap->shrinker.scan_objects = ion_heap_shrink_scan;
+	heap->shrinker.seeks = DEFAULT_SEEKS;
+	heap->shrinker.batch = 0;
+	register_shrinker(&heap->shrinker);
+}
+
+struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data)
+{
+	struct ion_heap *heap = NULL;
+
+	switch (heap_data->type) {
+	case ION_HEAP_TYPE_SYSTEM_CONTIG:
+		heap = ion_system_contig_heap_create(heap_data);
+		break;
+	case ION_HEAP_TYPE_SYSTEM:
+		heap = ion_system_heap_create(heap_data);
+		break;
+	case ION_HEAP_TYPE_CARVEOUT:
+		heap = ion_carveout_heap_create(heap_data);
+		break;
+	case ION_HEAP_TYPE_CHUNK:
+		heap = ion_chunk_heap_create(heap_data);
+		break;
+	case ION_HEAP_TYPE_DMA:
+		heap = ion_cma_heap_create(heap_data);
+		break;
+	default:
+		pr_err("%s: Invalid heap type %d\n", __func__,
+		       heap_data->type);
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (IS_ERR_OR_NULL(heap)) {
+		pr_err("%s: error creating heap %s type %d base %lu size %zu\n",
+		       __func__, heap_data->name, heap_data->type,
+		       heap_data->base, heap_data->size);
+		return ERR_PTR(-EINVAL);
+	}
+
+	heap->name = heap_data->name;
+	heap->id = heap_data->id;
+	return heap;
+}
+EXPORT_SYMBOL(ion_heap_create);
+
+void ion_heap_destroy(struct ion_heap *heap)
+{
+	if (!heap)
+		return;
+
+	switch (heap->type) {
+	case ION_HEAP_TYPE_SYSTEM_CONTIG:
+		ion_system_contig_heap_destroy(heap);
+		break;
+	case ION_HEAP_TYPE_SYSTEM:
+		ion_system_heap_destroy(heap);
+		break;
+	case ION_HEAP_TYPE_CARVEOUT:
+		ion_carveout_heap_destroy(heap);
+		break;
+	case ION_HEAP_TYPE_CHUNK:
+		ion_chunk_heap_destroy(heap);
+		break;
+	case ION_HEAP_TYPE_DMA:
+		ion_cma_heap_destroy(heap);
+		break;
+	default:
+		pr_err("%s: Invalid heap type %d\n", __func__,
+		       heap->type);
+	}
+}
+EXPORT_SYMBOL(ion_heap_destroy);
diff --git a/drivers/staging/media/sunxi/cedar/ion/ion_of.c b/drivers/staging/media/sunxi/cedar/ion/ion_of.c
new file mode 100644
index 000000000..46b2bb99b
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedar/ion/ion_of.c
@@ -0,0 +1,185 @@
+/*
+ * Based on work from:
+ *   Andrew Andrianov <andrew@ncrmnt.org>
+ *   Google
+ *   The Linux Foundation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/cma.h>
+#include <linux/dma-contiguous.h>
+#include <linux/io.h>
+#include <linux/of_reserved_mem.h>
+#include "ion.h"
+#include "ion_priv.h"
+#include "ion_of.h"
+
+static int ion_parse_dt_heap_common(struct device_node *heap_node,
+				    struct ion_platform_heap *heap,
+				    struct ion_of_heap *compatible)
+{
+	int i;
+
+	for (i = 0; compatible[i].name; i++) {
+		if (of_device_is_compatible(heap_node, compatible[i].compat))
+			break;
+	}
+
+	if (!compatible[i].name)
+		return -ENODEV;
+
+	heap->id = compatible[i].heap_id;
+	heap->type = compatible[i].type;
+	heap->name = compatible[i].name;
+	heap->align = compatible[i].align;
+
+	/* Some kind of callback function pointer? */
+
+	pr_info("%s: id %d type %d name %s align %lx\n", __func__,
+		heap->id, heap->type, heap->name, heap->align);
+	return 0;
+}
+
+static int ion_setup_heap_common(struct platform_device *parent,
+				 struct device_node *heap_node,
+				 struct ion_platform_heap *heap)
+{
+	int ret = 0;
+
+	switch (heap->type) {
+	case ION_HEAP_TYPE_CARVEOUT:
+	case ION_HEAP_TYPE_CHUNK:
+		if (heap->base && heap->size)
+			return 0;
+
+		ret = of_reserved_mem_device_init(heap->priv);
+		break;
+	default:
+		break;
+	}
+
+	return ret;
+}
+
+struct ion_platform_data *ion_parse_dt(struct platform_device *pdev,
+				       struct ion_of_heap *compatible)
+{
+	int num_heaps, ret;
+	const struct device_node *dt_node = pdev->dev.of_node;
+	struct device_node *node;
+	struct ion_platform_heap *heaps;
+	struct ion_platform_data *data;
+	int i = 0;
+
+	num_heaps = of_get_available_child_count(dt_node);
+
+	if (!num_heaps)
+		return ERR_PTR(-EINVAL);
+
+	heaps = devm_kzalloc(&pdev->dev,
+			     sizeof(struct ion_platform_heap) * num_heaps,
+			     GFP_KERNEL);
+	if (!heaps)
+		return ERR_PTR(-ENOMEM);
+
+	data = devm_kzalloc(&pdev->dev, sizeof(struct ion_platform_data),
+			    GFP_KERNEL);
+	if (!data)
+		return ERR_PTR(-ENOMEM);
+
+	for_each_available_child_of_node(dt_node, node) {
+		struct platform_device *heap_pdev;
+
+		ret = ion_parse_dt_heap_common(node, &heaps[i], compatible);
+		if (ret)
+			return ERR_PTR(ret);
+
+		heap_pdev = of_platform_device_create(node, heaps[i].name,
+						      &pdev->dev);
+		if (!heap_pdev)
+			return ERR_PTR(-ENOMEM);
+		heap_pdev->dev.platform_data = &heaps[i];
+
+		heaps[i].priv = &heap_pdev->dev;
+
+		ret = ion_setup_heap_common(pdev, node, &heaps[i]);
+		if (ret)
+			goto out_err;
+		i++;
+	}
+
+	data->heaps = heaps;
+	data->nr = num_heaps;
+	return data;
+
+out_err:
+	for ( ; i >= 0; i--)
+		if (heaps[i].priv)
+			of_device_unregister(to_platform_device(heaps[i].priv));
+
+	return ERR_PTR(ret);
+}
+
+void ion_destroy_platform_data(struct ion_platform_data *data)
+{
+	int i;
+
+	for (i = 0; i < data->nr; i++)
+		if (data->heaps[i].priv)
+			of_device_unregister(to_platform_device(
+				data->heaps[i].priv));
+}
+
+#ifdef CONFIG_OF_RESERVED_MEM
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+#include <linux/of_reserved_mem.h>
+
+static int rmem_ion_device_init(struct reserved_mem *rmem, struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct ion_platform_heap *heap = pdev->dev.platform_data;
+
+	heap->base = rmem->base;
+	heap->base = rmem->size;
+	pr_debug("%s: heap %s base %pa size %pa dev %p\n", __func__,
+		 heap->name, &rmem->base, &rmem->size, dev);
+	return 0;
+}
+
+static void rmem_ion_device_release(struct reserved_mem *rmem,
+				    struct device *dev)
+{
+	return;
+}
+
+static const struct reserved_mem_ops rmem_dma_ops = {
+	.device_init	= rmem_ion_device_init,
+	.device_release	= rmem_ion_device_release,
+};
+
+static int __init rmem_ion_setup(struct reserved_mem *rmem)
+{
+	phys_addr_t size = rmem->size;
+
+	size = size / 1024;
+
+	pr_info("Ion memory setup at %pa size %pa MiB\n",
+		&rmem->base, &size);
+	rmem->ops = &rmem_dma_ops;
+	return 0;
+}
+
+RESERVEDMEM_OF_DECLARE(ion, "ion-region", rmem_ion_setup);
+#endif
diff --git a/drivers/staging/media/sunxi/cedar/ion/ion_of.h b/drivers/staging/media/sunxi/cedar/ion/ion_of.h
new file mode 100644
index 000000000..8241a1770
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedar/ion/ion_of.h
@@ -0,0 +1,37 @@
+/*
+ * Based on work from:
+ *   Andrew Andrianov <andrew@ncrmnt.org>
+ *   Google
+ *   The Linux Foundation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ION_OF_H
+#define _ION_OF_H
+
+struct ion_of_heap {
+	const char *compat;
+	int heap_id;
+	int type;
+	const char *name;
+	int align;
+};
+
+#define PLATFORM_HEAP(_compat, _id, _type, _name) \
+{ \
+	.compat = _compat, \
+	.heap_id = _id, \
+	.type = _type, \
+	.name = _name, \
+	.align = PAGE_SIZE, \
+}
+
+struct ion_platform_data *ion_parse_dt(struct platform_device *pdev,
+					struct ion_of_heap *compatible);
+
+void ion_destroy_platform_data(struct ion_platform_data *data);
+
+#endif
diff --git a/drivers/staging/media/sunxi/cedar/ion/ion_page_pool.c b/drivers/staging/media/sunxi/cedar/ion/ion_page_pool.c
new file mode 100644
index 000000000..aea89c1ec
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedar/ion/ion_page_pool.c
@@ -0,0 +1,181 @@
+/*
+ * drivers/staging/android/ion/ion_mem_pool.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/swap.h>
+#include "ion_priv.h"
+
+static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
+{
+	struct page *page = alloc_pages(pool->gfp_mask, pool->order);
+
+	if (!page)
+		return NULL;
+	if (!pool->cached)
+		ion_pages_sync_for_device(NULL, page, PAGE_SIZE << pool->order,
+					  DMA_BIDIRECTIONAL);
+	return page;
+}
+
+static void ion_page_pool_free_pages(struct ion_page_pool *pool,
+				     struct page *page)
+{
+	__free_pages(page, pool->order);
+}
+
+static int ion_page_pool_add(struct ion_page_pool *pool, struct page *page)
+{
+	mutex_lock(&pool->mutex);
+	if (PageHighMem(page)) {
+		list_add_tail(&page->lru, &pool->high_items);
+		pool->high_count++;
+	} else {
+		list_add_tail(&page->lru, &pool->low_items);
+		pool->low_count++;
+	}
+	mutex_unlock(&pool->mutex);
+	return 0;
+}
+
+static struct page *ion_page_pool_remove(struct ion_page_pool *pool, bool high)
+{
+	struct page *page;
+
+	if (high) {
+		BUG_ON(!pool->high_count);
+		page = list_first_entry(&pool->high_items, struct page, lru);
+		pool->high_count--;
+	} else {
+		BUG_ON(!pool->low_count);
+		page = list_first_entry(&pool->low_items, struct page, lru);
+		pool->low_count--;
+	}
+
+	list_del(&page->lru);
+	return page;
+}
+
+struct page *ion_page_pool_alloc(struct ion_page_pool *pool)
+{
+	struct page *page = NULL;
+
+	BUG_ON(!pool);
+
+	mutex_lock(&pool->mutex);
+	if (pool->high_count)
+		page = ion_page_pool_remove(pool, true);
+	else if (pool->low_count)
+		page = ion_page_pool_remove(pool, false);
+	mutex_unlock(&pool->mutex);
+
+	if (!page)
+		page = ion_page_pool_alloc_pages(pool);
+
+	return page;
+}
+
+void ion_page_pool_free(struct ion_page_pool *pool, struct page *page)
+{
+	int ret;
+
+	BUG_ON(pool->order != compound_order(page));
+
+	ret = ion_page_pool_add(pool, page);
+	if (ret)
+		ion_page_pool_free_pages(pool, page);
+}
+
+static int ion_page_pool_total(struct ion_page_pool *pool, bool high)
+{
+	int count = pool->low_count;
+
+	if (high)
+		count += pool->high_count;
+
+	return count << pool->order;
+}
+
+int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
+			 int nr_to_scan)
+{
+	int freed = 0;
+	bool high;
+
+	if (current_is_kswapd())
+		high = true;
+	else
+		high = !!(gfp_mask & __GFP_HIGHMEM);
+
+	if (nr_to_scan == 0)
+		return ion_page_pool_total(pool, high);
+
+	while (freed < nr_to_scan) {
+		struct page *page;
+
+		mutex_lock(&pool->mutex);
+		if (pool->low_count) {
+			page = ion_page_pool_remove(pool, false);
+		} else if (high && pool->high_count) {
+			page = ion_page_pool_remove(pool, true);
+		} else {
+			mutex_unlock(&pool->mutex);
+			break;
+		}
+		mutex_unlock(&pool->mutex);
+		ion_page_pool_free_pages(pool, page);
+		freed += (1 << pool->order);
+	}
+
+	return freed;
+}
+
+struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order,
+					   bool cached)
+{
+	struct ion_page_pool *pool = kmalloc(sizeof(*pool), GFP_KERNEL);
+
+	if (!pool)
+		return NULL;
+	pool->high_count = 0;
+	pool->low_count = 0;
+	INIT_LIST_HEAD(&pool->low_items);
+	INIT_LIST_HEAD(&pool->high_items);
+	pool->gfp_mask = gfp_mask | __GFP_COMP;
+	pool->order = order;
+	mutex_init(&pool->mutex);
+	plist_node_init(&pool->list, order);
+	if (cached)
+		pool->cached = true;
+
+	return pool;
+}
+
+void ion_page_pool_destroy(struct ion_page_pool *pool)
+{
+	kfree(pool);
+}
+
+static int __init ion_page_pool_init(void)
+{
+	return 0;
+}
+device_initcall(ion_page_pool_init);
diff --git a/drivers/staging/media/sunxi/cedar/ion/ion_priv.h b/drivers/staging/media/sunxi/cedar/ion/ion_priv.h
new file mode 100644
index 000000000..760e41885
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedar/ion/ion_priv.h
@@ -0,0 +1,473 @@
+/*
+ * drivers/staging/android/ion/ion_priv.h
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _ION_PRIV_H
+#define _ION_PRIV_H
+
+#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/kref.h>
+#include <linux/mm_types.h>
+#include <linux/mutex.h>
+#include <linux/rbtree.h>
+#include <linux/sched.h>
+#include <linux/shrinker.h>
+#include <linux/types.h>
+#include <linux/miscdevice.h>
+
+#include "ion.h"
+
+/**
+ * struct ion_buffer - metadata for a particular buffer
+ * @ref:		reference count
+ * @node:		node in the ion_device buffers tree
+ * @dev:		back pointer to the ion_device
+ * @heap:		back pointer to the heap the buffer came from
+ * @flags:		buffer specific flags
+ * @private_flags:	internal buffer specific flags
+ * @size:		size of the buffer
+ * @priv_virt:		private data to the buffer representable as
+ *			a void *
+ * @lock:		protects the buffers cnt fields
+ * @kmap_cnt:		number of times the buffer is mapped to the kernel
+ * @vaddr:		the kernel mapping if kmap_cnt is not zero
+ * @dmap_cnt:		number of times the buffer is mapped for dma
+ * @sg_table:		the sg table for the buffer if dmap_cnt is not zero
+ * @pages:		flat array of pages in the buffer -- used by fault
+ *			handler and only valid for buffers that are faulted in
+ * @vmas:		list of vma's mapping this buffer
+ * @handle_count:	count of handles referencing this buffer
+ * @task_comm:		taskcomm of last client to reference this buffer in a
+ *			handle, used for debugging
+ * @pid:		pid of last client to reference this buffer in a
+ *			handle, used for debugging
+*/
+struct ion_buffer {
+	struct kref ref;
+	union {
+		struct rb_node node;
+		struct list_head list;
+	};
+	struct ion_device *dev;
+	struct ion_heap *heap;
+	unsigned long flags;
+	unsigned long private_flags;
+	size_t size;
+	void *priv_virt;
+	struct mutex lock;
+	int kmap_cnt;
+	void *vaddr;
+	int dmap_cnt;
+	struct sg_table *sg_table;
+	struct page **pages;
+	struct list_head vmas;
+	/* used to track orphaned buffers */
+	int handle_count;
+	char task_comm[TASK_COMM_LEN];
+	pid_t pid;
+};
+void ion_buffer_destroy(struct ion_buffer *buffer);
+
+/**
+ * struct ion_device - the metadata of the ion device node
+ * @dev:		the actual misc device
+ * @buffers:		an rb tree of all the existing buffers
+ * @buffer_lock:	lock protecting the tree of buffers
+ * @lock:		rwsem protecting the tree of heaps and clients
+ * @heaps:		list of all the heaps in the system
+ * @user_clients:	list of all the clients created from userspace
+ */
+struct ion_device {
+	struct miscdevice dev;
+	struct rb_root buffers;
+	struct mutex buffer_lock;
+	struct rw_semaphore lock;
+	struct plist_head heaps;
+	long (*custom_ioctl)(struct ion_client *client, unsigned int cmd,
+			     unsigned long arg);
+	struct rb_root clients;
+	struct dentry *debug_root;
+	struct dentry *heaps_debug_root;
+	struct dentry *clients_debug_root;
+	int heap_cnt;
+};
+
+/**
+ * struct ion_client - a process/hw block local address space
+ * @node:		node in the tree of all clients
+ * @dev:		backpointer to ion device
+ * @handles:		an rb tree of all the handles in this client
+ * @idr:		an idr space for allocating handle ids
+ * @lock:		lock protecting the tree of handles
+ * @name:		used for debugging
+ * @display_name:	used for debugging (unique version of @name)
+ * @display_serial:	used for debugging (to make display_name unique)
+ * @task:		used for debugging
+ *
+ * A client represents a list of buffers this client may access.
+ * The mutex stored here is used to protect both handles tree
+ * as well as the handles themselves, and should be held while modifying either.
+ */
+struct ion_client {
+	struct rb_node node;
+	struct ion_device *dev;
+	struct rb_root handles;
+	struct idr idr;
+	struct mutex lock;
+	const char *name;
+	char *display_name;
+	int display_serial;
+	struct task_struct *task;
+	pid_t pid;
+	struct dentry *debug_root;
+};
+
+/**
+ * ion_handle - a client local reference to a buffer
+ * @ref:		reference count
+ * @client:		back pointer to the client the buffer resides in
+ * @buffer:		pointer to the buffer
+ * @node:		node in the client's handle rbtree
+ * @kmap_cnt:		count of times this client has mapped to kernel
+ * @id:			client-unique id allocated by client->idr
+ *
+ * Modifications to node, map_cnt or mapping should be protected by the
+ * lock in the client.  Other fields are never changed after initialization.
+ */
+struct ion_handle {
+	struct kref ref;
+	struct ion_client *client;
+	struct ion_buffer *buffer;
+	struct rb_node node;
+	unsigned int kmap_cnt;
+	int id;
+};
+
+/**
+ * struct ion_heap_ops - ops to operate on a given heap
+ * @allocate:		allocate memory
+ * @free:		free memory
+ * @map_kernel		map memory to the kernel
+ * @unmap_kernel	unmap memory to the kernel
+ * @map_user		map memory to userspace
+ *
+ * allocate, phys, and map_user return 0 on success, -errno on error.
+ * map_dma and map_kernel return pointer on success, ERR_PTR on
+ * error. @free will be called with ION_PRIV_FLAG_SHRINKER_FREE set in
+ * the buffer's private_flags when called from a shrinker. In that
+ * case, the pages being free'd must be truly free'd back to the
+ * system, not put in a page pool or otherwise cached.
+ */
+struct ion_heap_ops {
+	int (*allocate)(struct ion_heap *heap,
+			struct ion_buffer *buffer, unsigned long len,
+			unsigned long align, unsigned long flags);
+	void (*free)(struct ion_buffer *buffer);
+	void * (*map_kernel)(struct ion_heap *heap, struct ion_buffer *buffer);
+	void (*unmap_kernel)(struct ion_heap *heap, struct ion_buffer *buffer);
+	int (*map_user)(struct ion_heap *mapper, struct ion_buffer *buffer,
+			struct vm_area_struct *vma);
+	int (*shrink)(struct ion_heap *heap, gfp_t gfp_mask, int nr_to_scan);
+};
+
+/**
+ * heap flags - flags between the heaps and core ion code
+ */
+#define ION_HEAP_FLAG_DEFER_FREE (1 << 0)
+
+/**
+ * private flags - flags internal to ion
+ */
+/*
+ * Buffer is being freed from a shrinker function. Skip any possible
+ * heap-specific caching mechanism (e.g. page pools). Guarantees that
+ * any buffer storage that came from the system allocator will be
+ * returned to the system allocator.
+ */
+#define ION_PRIV_FLAG_SHRINKER_FREE (1 << 0)
+
+/**
+ * struct ion_heap - represents a heap in the system
+ * @node:		rb node to put the heap on the device's tree of heaps
+ * @dev:		back pointer to the ion_device
+ * @type:		type of heap
+ * @ops:		ops struct as above
+ * @flags:		flags
+ * @id:			id of heap, also indicates priority of this heap when
+ *			allocating.  These are specified by platform data and
+ *			MUST be unique
+ * @name:		used for debugging
+ * @shrinker:		a shrinker for the heap
+ * @free_list:		free list head if deferred free is used
+ * @free_list_size	size of the deferred free list in bytes
+ * @lock:		protects the free list
+ * @waitqueue:		queue to wait on from deferred free thread
+ * @task:		task struct of deferred free thread
+ * @debug_show:		called when heap debug file is read to add any
+ *			heap specific debug info to output
+ *
+ * Represents a pool of memory from which buffers can be made.  In some
+ * systems the only heap is regular system memory allocated via vmalloc.
+ * On others, some blocks might require large physically contiguous buffers
+ * that are allocated from a specially reserved heap.
+ */
+struct ion_heap {
+	struct plist_node node;
+	struct ion_device *dev;
+	enum ion_heap_type type;
+	struct ion_heap_ops *ops;
+	unsigned long flags;
+	unsigned int id;
+	const char *name;
+	struct shrinker shrinker;
+	struct list_head free_list;
+	size_t free_list_size;
+	spinlock_t free_lock;
+	wait_queue_head_t waitqueue;
+	struct task_struct *task;
+
+	int (*debug_show)(struct ion_heap *heap, struct seq_file *, void *);
+};
+
+/**
+ * ion_buffer_cached - this ion buffer is cached
+ * @buffer:		buffer
+ *
+ * indicates whether this ion buffer is cached
+ */
+bool ion_buffer_cached(struct ion_buffer *buffer);
+
+/**
+ * ion_buffer_fault_user_mappings - fault in user mappings of this buffer
+ * @buffer:		buffer
+ *
+ * indicates whether userspace mappings of this buffer will be faulted
+ * in, this can affect how buffers are allocated from the heap.
+ */
+bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer);
+
+/**
+ * ion_device_create - allocates and returns an ion device
+ * @custom_ioctl:	arch specific ioctl function if applicable
+ *
+ * returns a valid device or -PTR_ERR
+ */
+struct ion_device *ion_device_create(long (*custom_ioctl)
+				     (struct ion_client *client,
+				      unsigned int cmd,
+				      unsigned long arg));
+
+/**
+ * ion_device_destroy - free and device and it's resource
+ * @dev:		the device
+ */
+void ion_device_destroy(struct ion_device *dev);
+
+/**
+ * ion_device_add_heap - adds a heap to the ion device
+ * @dev:		the device
+ * @heap:		the heap to add
+ */
+void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap);
+
+/**
+ * some helpers for common operations on buffers using the sg_table
+ * and vaddr fields
+ */
+void *ion_heap_map_kernel(struct ion_heap *, struct ion_buffer *);
+void ion_heap_unmap_kernel(struct ion_heap *, struct ion_buffer *);
+int ion_heap_map_user(struct ion_heap *, struct ion_buffer *,
+			struct vm_area_struct *);
+int ion_heap_buffer_zero(struct ion_buffer *buffer);
+int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot);
+
+/**
+ * ion_heap_init_shrinker
+ * @heap:		the heap
+ *
+ * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag or defines the shrink op
+ * this function will be called to setup a shrinker to shrink the freelists
+ * and call the heap's shrink op.
+ */
+void ion_heap_init_shrinker(struct ion_heap *heap);
+
+/**
+ * ion_heap_init_deferred_free -- initialize deferred free functionality
+ * @heap:		the heap
+ *
+ * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag this function will
+ * be called to setup deferred frees. Calls to free the buffer will
+ * return immediately and the actual free will occur some time later
+ */
+int ion_heap_init_deferred_free(struct ion_heap *heap);
+
+/**
+ * ion_heap_freelist_add - add a buffer to the deferred free list
+ * @heap:		the heap
+ * @buffer:		the buffer
+ *
+ * Adds an item to the deferred freelist.
+ */
+void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer);
+
+/**
+ * ion_heap_freelist_drain - drain the deferred free list
+ * @heap:		the heap
+ * @size:		amount of memory to drain in bytes
+ *
+ * Drains the indicated amount of memory from the deferred freelist immediately.
+ * Returns the total amount freed.  The total freed may be higher depending
+ * on the size of the items in the list, or lower if there is insufficient
+ * total memory on the freelist.
+ */
+size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size);
+
+/**
+ * ion_heap_freelist_shrink - drain the deferred free
+ *				list, skipping any heap-specific
+ *				pooling or caching mechanisms
+ *
+ * @heap:		the heap
+ * @size:		amount of memory to drain in bytes
+ *
+ * Drains the indicated amount of memory from the deferred freelist immediately.
+ * Returns the total amount freed.  The total freed may be higher depending
+ * on the size of the items in the list, or lower if there is insufficient
+ * total memory on the freelist.
+ *
+ * Unlike with @ion_heap_freelist_drain, don't put any pages back into
+ * page pools or otherwise cache the pages. Everything must be
+ * genuinely free'd back to the system. If you're free'ing from a
+ * shrinker you probably want to use this. Note that this relies on
+ * the heap.ops.free callback honoring the ION_PRIV_FLAG_SHRINKER_FREE
+ * flag.
+ */
+size_t ion_heap_freelist_shrink(struct ion_heap *heap,
+					size_t size);
+
+/**
+ * ion_heap_freelist_size - returns the size of the freelist in bytes
+ * @heap:		the heap
+ */
+size_t ion_heap_freelist_size(struct ion_heap *heap);
+
+
+/**
+ * functions for creating and destroying the built in ion heaps.
+ * architectures can add their own custom architecture specific
+ * heaps as appropriate.
+ */
+
+struct ion_heap *ion_heap_create(struct ion_platform_heap *);
+void ion_heap_destroy(struct ion_heap *);
+struct ion_heap *ion_system_heap_create(struct ion_platform_heap *);
+void ion_system_heap_destroy(struct ion_heap *);
+
+struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *);
+void ion_system_contig_heap_destroy(struct ion_heap *);
+
+struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *);
+void ion_carveout_heap_destroy(struct ion_heap *);
+
+struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *);
+void ion_chunk_heap_destroy(struct ion_heap *);
+struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *);
+void ion_cma_heap_destroy(struct ion_heap *);
+
+/**
+ * functions for creating and destroying a heap pool -- allows you
+ * to keep a pool of pre allocated memory to use from your heap.  Keeping
+ * a pool of memory that is ready for dma, ie any cached mapping have been
+ * invalidated from the cache, provides a significant performance benefit on
+ * many systems
+ */
+
+/**
+ * struct ion_page_pool - pagepool struct
+ * @high_count:		number of highmem items in the pool
+ * @low_count:		number of lowmem items in the pool
+ * @high_items:		list of highmem items
+ * @low_items:		list of lowmem items
+ * @mutex:		lock protecting this struct and especially the count
+ *			item list
+ * @gfp_mask:		gfp_mask to use from alloc
+ * @order:		order of pages in the pool
+ * @list:		plist node for list of pools
+ * @cached:		it's cached pool or not
+ *
+ * Allows you to keep a pool of pre allocated pages to use from your heap.
+ * Keeping a pool of pages that is ready for dma, ie any cached mapping have
+ * been invalidated from the cache, provides a significant performance benefit
+ * on many systems
+ */
+struct ion_page_pool {
+	int high_count;
+	int low_count;
+	bool cached;
+	struct list_head high_items;
+	struct list_head low_items;
+	struct mutex mutex;
+	gfp_t gfp_mask;
+	unsigned int order;
+	struct plist_node list;
+};
+
+struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order,
+					   bool cached);
+void ion_page_pool_destroy(struct ion_page_pool *);
+struct page *ion_page_pool_alloc(struct ion_page_pool *);
+void ion_page_pool_free(struct ion_page_pool *, struct page *);
+
+/** ion_page_pool_shrink - shrinks the size of the memory cached in the pool
+ * @pool:		the pool
+ * @gfp_mask:		the memory type to reclaim
+ * @nr_to_scan:		number of items to shrink in pages
+ *
+ * returns the number of items freed in pages
+ */
+int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
+			  int nr_to_scan);
+
+/**
+ * ion_pages_sync_for_device - cache flush pages for use with the specified
+ *                             device
+ * @dev:		the device the pages will be used with
+ * @page:		the first page to be flushed
+ * @size:		size in bytes of region to be flushed
+ * @dir:		direction of dma transfer
+ */
+void ion_pages_sync_for_device(struct device *dev, struct page *page,
+		size_t size, enum dma_data_direction dir);
+
+long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
+
+int ion_sync_for_device(struct ion_client *client, int fd);
+
+struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
+						int id);
+
+void ion_free_nolock(struct ion_client *client, struct ion_handle *handle);
+
+int ion_handle_put_nolock(struct ion_handle *handle);
+
+int ion_handle_put(struct ion_handle *handle);
+
+int ion_query_heaps(struct ion_client *client, struct ion_heap_query *query);
+
+int ion_share_dma_buf_fd_nolock(struct ion_client *client,
+				struct ion_handle *handle);
+
+#endif /* _ION_PRIV_H */
diff --git a/drivers/staging/media/sunxi/cedar/ion/ion_system_heap.c b/drivers/staging/media/sunxi/cedar/ion/ion_system_heap.c
new file mode 100644
index 000000000..2bbbb61e6
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedar/ion/ion_system_heap.c
@@ -0,0 +1,460 @@
+/*
+ * drivers/staging/android/ion/ion_system_heap.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <asm/page.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/highmem.h>
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include "ion.h"
+#include "ion_priv.h"
+
+#define NUM_ORDERS ARRAY_SIZE(orders)
+
+static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN |
+				     __GFP_NORETRY) & ~__GFP_RECLAIM;
+static gfp_t low_order_gfp_flags  = (GFP_HIGHUSER | __GFP_ZERO);
+static const unsigned int orders[] = {8, 4, 0};
+
+static int order_to_index(unsigned int order)
+{
+	int i;
+
+	for (i = 0; i < NUM_ORDERS; i++)
+		if (order == orders[i])
+			return i;
+	BUG();
+	return -1;
+}
+
+static inline unsigned int order_to_size(int order)
+{
+	return PAGE_SIZE << order;
+}
+
+struct ion_system_heap {
+	struct ion_heap heap;
+	struct ion_page_pool *uncached_pools[NUM_ORDERS];
+	struct ion_page_pool *cached_pools[NUM_ORDERS];
+};
+
+/**
+ * The page from page-pool are all zeroed before. We need do cache
+ * clean for cached buffer. The uncached buffer are always non-cached
+ * since it's allocated. So no need for non-cached pages.
+ */
+static struct page *alloc_buffer_page(struct ion_system_heap *heap,
+				      struct ion_buffer *buffer,
+				      unsigned long order)
+{
+	bool cached = ion_buffer_cached(buffer);
+	struct ion_page_pool *pool;
+	struct page *page;
+
+	if (!cached)
+		pool = heap->uncached_pools[order_to_index(order)];
+	else
+		pool = heap->cached_pools[order_to_index(order)];
+
+	page = ion_page_pool_alloc(pool);
+
+	if (cached)
+		ion_pages_sync_for_device(NULL, page, PAGE_SIZE << order,
+					  DMA_BIDIRECTIONAL);
+	return page;
+}
+
+static void free_buffer_page(struct ion_system_heap *heap,
+			     struct ion_buffer *buffer, struct page *page)
+{
+	struct ion_page_pool *pool;
+	unsigned int order = compound_order(page);
+	bool cached = ion_buffer_cached(buffer);
+
+	/* go to system */
+	if (buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE) {
+		__free_pages(page, order);
+		return;
+	}
+
+	if (!cached)
+		pool = heap->uncached_pools[order_to_index(order)];
+	else
+		pool = heap->cached_pools[order_to_index(order)];
+
+	ion_page_pool_free(pool, page);
+}
+
+
+static struct page *alloc_largest_available(struct ion_system_heap *heap,
+					    struct ion_buffer *buffer,
+					    unsigned long size,
+					    unsigned int max_order)
+{
+	struct page *page;
+	int i;
+
+	for (i = 0; i < NUM_ORDERS; i++) {
+		if (size < order_to_size(orders[i]))
+			continue;
+		if (max_order < orders[i])
+			continue;
+
+		page = alloc_buffer_page(heap, buffer, orders[i]);
+		if (!page)
+			continue;
+
+		return page;
+	}
+
+	return NULL;
+}
+
+static int ion_system_heap_allocate(struct ion_heap *heap,
+				    struct ion_buffer *buffer,
+				    unsigned long size, unsigned long align,
+				    unsigned long flags)
+{
+	struct ion_system_heap *sys_heap = container_of(heap,
+							struct ion_system_heap,
+							heap);
+	struct sg_table *table;
+	struct scatterlist *sg;
+	struct list_head pages;
+	struct page *page, *tmp_page;
+	int i = 0;
+	unsigned long size_remaining = PAGE_ALIGN(size);
+	unsigned int max_order = orders[0];
+
+	if (align > PAGE_SIZE)
+		return -EINVAL;
+
+	if ((size / PAGE_SIZE) > (totalram_pages() / 2))
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&pages);
+	while (size_remaining > 0) {
+		page = alloc_largest_available(sys_heap, buffer, size_remaining,
+					       max_order);
+		if (!page)
+			goto free_pages;
+		list_add_tail(&page->lru, &pages);
+		size_remaining -= PAGE_SIZE << compound_order(page);
+		max_order = compound_order(page);
+		i++;
+	}
+	table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+	if (!table)
+		goto free_pages;
+
+	if (sg_alloc_table(table, i, GFP_KERNEL))
+		goto free_table;
+
+	sg = table->sgl;
+	list_for_each_entry_safe(page, tmp_page, &pages, lru) {
+		sg_set_page(sg, page, PAGE_SIZE << compound_order(page), 0);
+		sg = sg_next(sg);
+		list_del(&page->lru);
+	}
+
+	buffer->sg_table = table;
+	return 0;
+
+free_table:
+	kfree(table);
+free_pages:
+	list_for_each_entry_safe(page, tmp_page, &pages, lru)
+		free_buffer_page(sys_heap, buffer, page);
+	return -ENOMEM;
+}
+
+static void ion_system_heap_free(struct ion_buffer *buffer)
+{
+	struct ion_system_heap *sys_heap = container_of(buffer->heap,
+							struct ion_system_heap,
+							heap);
+	struct sg_table *table = buffer->sg_table;
+	struct scatterlist *sg;
+	int i;
+
+	/* zero the buffer before goto page pool */
+	if (!(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE))
+		ion_heap_buffer_zero(buffer);
+
+	for_each_sg(table->sgl, sg, table->nents, i)
+		free_buffer_page(sys_heap, buffer, sg_page(sg));
+	sg_free_table(table);
+	kfree(table);
+}
+
+static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
+				  int nr_to_scan)
+{
+	struct ion_page_pool *uncached_pool;
+	struct ion_page_pool *cached_pool;
+	struct ion_system_heap *sys_heap;
+	int nr_total = 0;
+	int i, nr_freed;
+	int only_scan = 0;
+
+	sys_heap = container_of(heap, struct ion_system_heap, heap);
+
+	if (!nr_to_scan)
+		only_scan = 1;
+
+	for (i = 0; i < NUM_ORDERS; i++) {
+		uncached_pool = sys_heap->uncached_pools[i];
+		cached_pool = sys_heap->cached_pools[i];
+
+		if (only_scan) {
+			nr_total += ion_page_pool_shrink(uncached_pool,
+							 gfp_mask,
+							 nr_to_scan);
+
+			nr_total += ion_page_pool_shrink(cached_pool,
+							 gfp_mask,
+							 nr_to_scan);
+		} else {
+			nr_freed = ion_page_pool_shrink(uncached_pool,
+							gfp_mask,
+							nr_to_scan);
+			nr_to_scan -= nr_freed;
+			nr_total += nr_freed;
+			if (nr_to_scan <= 0)
+				break;
+			nr_freed = ion_page_pool_shrink(cached_pool,
+							gfp_mask,
+							nr_to_scan);
+			nr_to_scan -= nr_freed;
+			nr_total += nr_freed;
+			if (nr_to_scan <= 0)
+				break;
+		}
+	}
+	return nr_total;
+}
+
+static struct ion_heap_ops system_heap_ops = {
+	.allocate = ion_system_heap_allocate,
+	.free = ion_system_heap_free,
+	.map_kernel = ion_heap_map_kernel,
+	.unmap_kernel = ion_heap_unmap_kernel,
+	.map_user = ion_heap_map_user,
+	.shrink = ion_system_heap_shrink,
+};
+
+static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
+				      void *unused)
+{
+
+	struct ion_system_heap *sys_heap = container_of(heap,
+							struct ion_system_heap,
+							heap);
+	int i;
+	struct ion_page_pool *pool;
+
+	for (i = 0; i < NUM_ORDERS; i++) {
+		pool = sys_heap->uncached_pools[i];
+
+		seq_printf(s, "%d order %u highmem pages uncached %lu total\n",
+			   pool->high_count, pool->order,
+			   (PAGE_SIZE << pool->order) * pool->high_count);
+		seq_printf(s, "%d order %u lowmem pages uncached %lu total\n",
+			   pool->low_count, pool->order,
+			   (PAGE_SIZE << pool->order) * pool->low_count);
+	}
+
+	for (i = 0; i < NUM_ORDERS; i++) {
+		pool = sys_heap->cached_pools[i];
+
+		seq_printf(s, "%d order %u highmem pages cached %lu total\n",
+			   pool->high_count, pool->order,
+			   (PAGE_SIZE << pool->order) * pool->high_count);
+		seq_printf(s, "%d order %u lowmem pages cached %lu total\n",
+			   pool->low_count, pool->order,
+			   (PAGE_SIZE << pool->order) * pool->low_count);
+	}
+	return 0;
+}
+
+static void ion_system_heap_destroy_pools(struct ion_page_pool **pools)
+{
+	int i;
+
+	for (i = 0; i < NUM_ORDERS; i++)
+		if (pools[i])
+			ion_page_pool_destroy(pools[i]);
+}
+
+static int ion_system_heap_create_pools(struct ion_page_pool **pools,
+					bool cached)
+{
+	int i;
+
+	for (i = 0; i < NUM_ORDERS; i++) {
+		struct ion_page_pool *pool;
+		gfp_t gfp_flags = low_order_gfp_flags;
+
+		if (orders[i] > 4)
+			gfp_flags = high_order_gfp_flags;
+
+		pool = ion_page_pool_create(gfp_flags, orders[i], cached);
+		if (!pool)
+			goto err_create_pool;
+		pools[i] = pool;
+	}
+	return 0;
+
+err_create_pool:
+	ion_system_heap_destroy_pools(pools);
+	return -ENOMEM;
+}
+
+struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
+{
+	struct ion_system_heap *heap;
+
+	heap = kzalloc(sizeof(*heap), GFP_KERNEL);
+	if (!heap)
+		return ERR_PTR(-ENOMEM);
+	heap->heap.ops = &system_heap_ops;
+	heap->heap.type = ION_HEAP_TYPE_SYSTEM;
+	heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
+
+	if (ion_system_heap_create_pools(heap->uncached_pools, false))
+		goto free_heap;
+
+	if (ion_system_heap_create_pools(heap->cached_pools, true))
+		goto destroy_uncached_pools;
+
+	heap->heap.debug_show = ion_system_heap_debug_show;
+	return &heap->heap;
+
+destroy_uncached_pools:
+	ion_system_heap_destroy_pools(heap->uncached_pools);
+
+free_heap:
+	kfree(heap);
+	return ERR_PTR(-ENOMEM);
+}
+
+void ion_system_heap_destroy(struct ion_heap *heap)
+{
+	struct ion_system_heap *sys_heap = container_of(heap,
+							struct ion_system_heap,
+							heap);
+	int i;
+
+	for (i = 0; i < NUM_ORDERS; i++) {
+		ion_page_pool_destroy(sys_heap->uncached_pools[i]);
+		ion_page_pool_destroy(sys_heap->cached_pools[i]);
+	}
+	kfree(sys_heap);
+}
+
+static int ion_system_contig_heap_allocate(struct ion_heap *heap,
+					   struct ion_buffer *buffer,
+					   unsigned long len,
+					   unsigned long align,
+					   unsigned long flags)
+{
+	int order = get_order(len);
+	struct page *page;
+	struct sg_table *table;
+	unsigned long i;
+	int ret;
+
+	if (align > (PAGE_SIZE << order))
+		return -EINVAL;
+
+	page = alloc_pages(low_order_gfp_flags | __GFP_NOWARN, order);
+	if (!page)
+		return -ENOMEM;
+
+	split_page(page, order);
+
+	len = PAGE_ALIGN(len);
+	for (i = len >> PAGE_SHIFT; i < (1 << order); i++)
+		__free_page(page + i);
+
+	table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+	if (!table) {
+		ret = -ENOMEM;
+		goto free_pages;
+	}
+
+	ret = sg_alloc_table(table, 1, GFP_KERNEL);
+	if (ret)
+		goto free_table;
+
+	sg_set_page(table->sgl, page, len, 0);
+
+	buffer->sg_table = table;
+
+	ion_pages_sync_for_device(NULL, page, len, DMA_BIDIRECTIONAL);
+
+	return 0;
+
+free_table:
+	kfree(table);
+free_pages:
+	for (i = 0; i < len >> PAGE_SHIFT; i++)
+		__free_page(page + i);
+
+	return ret;
+}
+
+static void ion_system_contig_heap_free(struct ion_buffer *buffer)
+{
+	struct sg_table *table = buffer->sg_table;
+	struct page *page = sg_page(table->sgl);
+	unsigned long pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT;
+	unsigned long i;
+
+	for (i = 0; i < pages; i++)
+		__free_page(page + i);
+	sg_free_table(table);
+	kfree(table);
+}
+
+static struct ion_heap_ops kmalloc_ops = {
+	.allocate = ion_system_contig_heap_allocate,
+	.free = ion_system_contig_heap_free,
+	.map_kernel = ion_heap_map_kernel,
+	.unmap_kernel = ion_heap_unmap_kernel,
+	.map_user = ion_heap_map_user,
+};
+
+struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused)
+{
+	struct ion_heap *heap;
+
+	heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
+	if (!heap)
+		return ERR_PTR(-ENOMEM);
+	heap->ops = &kmalloc_ops;
+	heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
+	return heap;
+}
+
+void ion_system_contig_heap_destroy(struct ion_heap *heap)
+{
+	kfree(heap);
+}
diff --git a/drivers/staging/media/sunxi/cedar/ion/sunxi/Makefile b/drivers/staging/media/sunxi/cedar/ion/sunxi/Makefile
new file mode 100644
index 000000000..177c51bed
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedar/ion/sunxi/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_VIDEO_SUNXI_CEDAR_ION) += sunxi_ion.o cache.o
diff --git a/drivers/staging/media/sunxi/cedar/ion/sunxi/cache-v7.S b/drivers/staging/media/sunxi/cedar/ion/sunxi/cache-v7.S
new file mode 100755
index 000000000..4aaf7918d
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedar/ion/sunxi/cache-v7.S
@@ -0,0 +1,145 @@
+
+#ifndef __ASSEMBLY__
+#define __ASSEMBLY__
+#endif
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+/*
+ * c code declared as follows:
+ * int flush_clean_user_range(long start, long end);
+ */
+ENTRY(flush_clean_user_range)
+	.macro	dcache_line_size, reg, tmp
+	mrc	p15, 1, \tmp, c0, c0, 0		@ read CSIDR
+	and	\tmp, \tmp, #7			@ cache line size encoding
+	mov	\reg, #16			@ size offset
+	mov	\reg, \reg, lsl \tmp		@ actual cache line size
+	.endm
+
+    .text
+    .globl flush_clean_user_range
+    .globl flush_dcache_all
+flush_clean_user_range:
+    dcache_line_size r2, r3
+    sub	r3, r2, #1
+    bic	r0, r0, r3
+1:
+    USER(	mcr	p15, 0, r0, c7, c14, 1	)	@ clean and flush D line to the point of unification
+    add	r0, r0, r2
+2:
+    cmp	r0, r1
+    blo	1b
+    mov	r0, #0
+    dsb
+    mov	pc, lr
+
+/*
+ * Fault handling for the cache operation above. If the virtual address in r0
+ * is not mapped, just try the next page.
+ */
+9001:
+    mov	r0, r0, lsr #12
+    mov	r0, r0, lsl #12
+    add	r0, r0, #4096
+    b	2b
+ENDPROC(flush_clean_user_range)
+
+/*
+ *	flush_dcache_all()
+ *
+ *	Flush the whole D-cache.
+ *
+ *	Corrupted registers: r0-r7, r9-r11 (r6 only in Thumb mode)
+ *
+ *	- mm    - mm_struct describing address space
+ */
+ENTRY(flush_dcache_all)
+	stmfd	sp!, {r0 - r12, lr}
+	dmb					@ ensure ordering with previous memory accesses
+	mrc	p15, 1, r0, c0, c0, 1		@ read clidr
+	ands	r3, r0, #0x7000000		@ extract loc from clidr
+	mov	r3, r3, lsr #23			@ left align loc bit field
+	beq	finished			@ if loc is 0, then no need to clean
+	mov	r10, #0				@ start clean at cache level 0
+loop1:
+	add	r2, r10, r10, lsr #1		@ work out 3x current cache level
+	mov	r1, r0, lsr r2			@ extract cache type bits from clidr
+	and	r1, r1, #7			@ mask of the bits for current cache only
+	cmp	r1, #2				@ see what cache we have at this level
+	blt	skip				@ skip if no cache, or just i-cache
+	mcr	p15, 2, r10, c0, c0, 0		@ select current cache level in cssr
+	isb					@ isb to sych the new cssr&csidr
+	mrc	p15, 1, r1, c0, c0, 0		@ read the new csidr
+	and	r2, r1, #7			@ extract the length of the cache lines
+	add	r2, r2, #4			@ add 4 (line length offset)
+	ldr	r4, =0x3ff
+	ands	r4, r4, r1, lsr #3		@ find maximum number on the way size
+	clz	r5, r4				@ find bit position of way size increment
+	ldr	r7, =0x7fff
+	ands	r7, r7, r1, lsr #13		@ extract max number of the index size
+loop2:
+	mov	r9, r4				@ create working copy of max way size
+loop3:
+ ARM(	orr	r11, r10, r9, lsl r5	)	@ factor way and cache number into r11
+ THUMB(	lsl	r6, r9, r5		)
+ THUMB(	orr	r11, r10, r6		)	@ factor way and cache number into r11
+ ARM(	orr	r11, r11, r7, lsl r2	)	@ factor index number into r11
+ THUMB(	lsl	r6, r7, r2		)
+ THUMB(	orr	r11, r11, r6		)	@ factor index number into r11
+	mcr	p15, 0, r11, c7, c14, 2		@ clean & invalidate by set/way
+	subs	r9, r9, #1			@ decrement the way
+	bge	loop3
+	subs	r7, r7, #1			@ decrement the index
+	bge	loop2
+skip:
+	add	r10, r10, #2			@ increment cache number
+	cmp	r3, r10
+	bgt	loop1
+finished:
+	mov	r10, #0				@ swith back to cache level 0
+	mcr	p15, 2, r10, c0, c0, 0		@ select current cache level in cssr
+	dsb
+	isb
+
+	ldmfd	sp!, {r0 - r12, lr}
+	mov	pc, lr
+ENDPROC(flush_dcache_all)
+
+/*
+ * int flush_user_range(long start, long end);
+ *
+ * flush user space range. just flush, not write back.
+ */
+ENTRY(flush_user_range)
+	dcache_line_size r2, r3
+	sub	r3, r2, #1
+
+	tst	r0, r3
+	bic	r0, r0, r3
+	USER(mcrne p15, 0, r0, c7, c14, 1)	@ clean & invalidate D / U line
+
+	tst	r1, r3
+	bic	r1, r1, r3
+	USER(mcrne p15, 0, r1, c7, c14, 1)	@ clean & invalidate D / U line
+
+1:
+	USER(mcr p15, 0, r0, c7, c6, 1)		@ invalidate D / U line
+	add	r0, r0, r2
+2:
+	cmp	r0, r1
+	blo	1b
+	mov	r0, #0
+	dsb
+	mov	pc, lr
+
+	/*
+	 * Fault handling for the cache operation above. If the virtual address in r0
+	 * is not mapped, just try the next page.
+	 */
+9001:
+	mov	r0, r0, lsr #12
+	mov	r0, r0, lsl #12
+	add	r0, r0, #4096
+	b	2b
+ENDPROC(flush_user_range)
diff --git a/drivers/staging/media/sunxi/cedar/ion/sunxi/cache.S b/drivers/staging/media/sunxi/cedar/ion/sunxi/cache.S
new file mode 100644
index 000000000..8ce87750d
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedar/ion/sunxi/cache.S
@@ -0,0 +1,5 @@
+#if __LINUX_ARM_ARCH__ == 7
+#include "cache-v7.S"
+#else
+#warning "[sunxi-cedar] No BSP asm cache flush support, use dmac flush"
+#endif
diff --git a/drivers/staging/media/sunxi/cedar/ion/sunxi/cache.h b/drivers/staging/media/sunxi/cedar/ion/sunxi/cache.h
new file mode 100644
index 000000000..2eb9c65e0
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedar/ion/sunxi/cache.h
@@ -0,0 +1,6 @@
+#ifndef _CACHE_H
+#define _CACHE_H
+int flush_clean_user_range(long start, long end);
+int flush_user_range(long start, long end);
+void flush_dcache_all(void);
+#endif
\ No newline at end of file
diff --git a/drivers/staging/media/sunxi/cedar/ion/sunxi/sunxi_ion.c b/drivers/staging/media/sunxi/cedar/ion/sunxi/sunxi_ion.c
new file mode 100644
index 000000000..1ab4fdf34
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedar/ion/sunxi/sunxi_ion.c
@@ -0,0 +1,189 @@
+/*
+ * Allwinner SUNXI ION Driver
+ *
+ * Copyright (c) 2017 Allwinnertech.
+ *
+ * Author: fanqinghua <fanqinghua@allwinnertech.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) "Ion: " fmt
+
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+#include <asm/cacheflush.h>
+#include "../ion_priv.h"
+#include "../ion.h"
+#include "../ion_of.h"
+#include "sunxi_ion_priv.h"
+
+struct sunxi_ion_dev {
+	struct ion_heap **heaps;
+	struct ion_device *idev;
+	struct ion_platform_data *data;
+};
+struct device *g_ion_dev;
+struct ion_device *idev;
+/* export for IMG GPU(sgx544) */
+EXPORT_SYMBOL(idev);
+
+static struct ion_of_heap sunxi_heaps[] = {
+	PLATFORM_HEAP("allwinner,sys_user", 0, ION_HEAP_TYPE_SYSTEM,
+		      "sys_user"),
+	PLATFORM_HEAP("allwinner,sys_contig", 1, ION_HEAP_TYPE_SYSTEM_CONTIG,
+		      "sys_contig"),
+	PLATFORM_HEAP("allwinner,cma", ION_HEAP_TYPE_DMA, ION_HEAP_TYPE_DMA,
+		      "cma"),
+	PLATFORM_HEAP("allwinner,secure", ION_HEAP_TYPE_SECURE,
+		      ION_HEAP_TYPE_SECURE, "secure"),
+	{}
+};
+
+struct device *get_ion_dev(void)
+{
+	return g_ion_dev;
+}
+
+long sunxi_ion_ioctl(struct ion_client *client, unsigned int cmd,
+		     unsigned long arg)
+{
+	long ret = 0;
+	switch (cmd) {
+	case ION_IOC_SUNXI_FLUSH_RANGE: {
+		sunxi_cache_range range;
+		if (copy_from_user(&range, (void __user *)arg,
+				   sizeof(sunxi_cache_range))) {
+			ret = -EINVAL;
+			goto end;
+		}
+		
+#if __LINUX_ARM_ARCH__ == 7
+		if (flush_clean_user_range(range.start, range.end)) {
+			ret = -EINVAL;
+			goto end;
+		}
+#else
+		dmac_flush_range((void*)range.start, (void*)range.end);
+#endif
+		break;
+	}
+#if __LINUX_ARM_ARCH__ == 7
+	case ION_IOC_SUNXI_FLUSH_ALL: {
+		flush_dcache_all();
+		break;
+	}
+#endif
+	case ION_IOC_SUNXI_PHYS_ADDR: {
+		sunxi_phys_data data;
+		struct ion_handle *handle;
+		if (copy_from_user(&data, (void __user *)arg,
+				   sizeof(sunxi_phys_data)))
+			return -EFAULT;
+		handle =
+			ion_handle_get_by_id_nolock(client, data.handle.handle);
+		/* FIXME Hardcoded CMA struct pointer */
+		data.phys_addr =
+			((struct ion_cma_buffer_info *)(handle->buffer
+								->priv_virt))
+				->handle;
+		data.size = handle->buffer->size;
+		if (copy_to_user((void __user *)arg, &data,
+				 sizeof(sunxi_phys_data)))
+			return -EFAULT;
+		break;
+	}
+	
+	default:
+		return -ENOTTY;
+	}
+end:
+	return ret;
+}
+
+static int sunxi_ion_probe(struct platform_device *pdev)
+{
+	struct sunxi_ion_dev *ipdev;
+	int i;
+
+	ipdev = devm_kzalloc(&pdev->dev, sizeof(*ipdev), GFP_KERNEL);
+	if (!ipdev)
+		return -ENOMEM;
+
+	g_ion_dev = &pdev->dev;
+	platform_set_drvdata(pdev, ipdev);
+
+	ipdev->idev = ion_device_create(sunxi_ion_ioctl);
+	if (IS_ERR(ipdev->idev))
+		return PTR_ERR(ipdev->idev);
+
+	idev = ipdev->idev;
+
+	ipdev->data = ion_parse_dt(pdev, sunxi_heaps);
+	if (IS_ERR(ipdev->data)) {
+		pr_err("%s: ion_parse_dt error!\n", __func__);
+		return PTR_ERR(ipdev->data);
+	}
+
+	ipdev->heaps = devm_kzalloc(&pdev->dev,
+				    sizeof(struct ion_heap) * ipdev->data->nr,
+				    GFP_KERNEL);
+	if (!ipdev->heaps) {
+		ion_destroy_platform_data(ipdev->data);
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < ipdev->data->nr; i++) {
+		ipdev->heaps[i] = ion_heap_create(&ipdev->data->heaps[i]);
+		if (!ipdev->heaps) {
+			ion_destroy_platform_data(ipdev->data);
+			return -ENOMEM;
+		} else if (ipdev->heaps[i] == ERR_PTR(-EINVAL)) {
+			return 0;
+		}
+		ion_device_add_heap(ipdev->idev, ipdev->heaps[i]);
+	}
+	return 0;
+}
+
+static int sunxi_ion_remove(struct platform_device *pdev)
+{
+	struct sunxi_ion_dev *ipdev;
+	int i;
+
+	ipdev = platform_get_drvdata(pdev);
+
+	for (i = 0; i < ipdev->data->nr; i++)
+		ion_heap_destroy(ipdev->heaps[i]);
+
+	ion_destroy_platform_data(ipdev->data);
+	ion_device_destroy(ipdev->idev);
+
+	return 0;
+}
+
+static const struct of_device_id sunxi_ion_match_table[] = {
+	{ .compatible = "allwinner,sunxi-ion" },
+	{},
+};
+
+static struct platform_driver sunxi_ion_driver = {
+	.probe = sunxi_ion_probe,
+	.remove = sunxi_ion_remove,
+	.driver = {
+		.name = "ion-sunxi",
+		.of_match_table = sunxi_ion_match_table,
+	},
+};
+
+static int __init sunxi_ion_init(void)
+{
+	return platform_driver_register(&sunxi_ion_driver);
+}
+subsys_initcall(sunxi_ion_init);
diff --git a/drivers/staging/media/sunxi/cedar/ion/sunxi/sunxi_ion.h b/drivers/staging/media/sunxi/cedar/ion/sunxi/sunxi_ion.h
new file mode 100644
index 000000000..b084b84fc
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedar/ion/sunxi/sunxi_ion.h
@@ -0,0 +1,33 @@
+/*
+ * drivers/staging/android/ion/sunxi/ion_sunxi.h
+ *
+ * Copyright(c) 2015-2020 Allwinnertech Co., Ltd.
+ *      http://www.allwinnertech.com
+ *
+ * Author: Wim Hwang <huangwei@allwinnertech.com>
+ *
+ * sunxi ion header file
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _LINUX_ION_SUNXI_H
+#define _LINUX_ION_SUNXI_H
+
+/**
+ * ion_client_create() -  allocate a client and returns it
+ * @name:	used for debugging
+ */
+struct ion_client *sunxi_ion_client_create(const char *name);
+
+void sunxi_ion_probe_drm_info(u32 *drm_phy_addr, u32 *drm_tee_addr);
+
+int  optee_probe_drm_configure(
+		unsigned long *drm_base,
+		size_t *drm_size,
+		unsigned long  *tee_base);
+
+#endif
diff --git a/drivers/staging/media/sunxi/cedar/ion/sunxi/sunxi_ion_priv.h b/drivers/staging/media/sunxi/cedar/ion/sunxi/sunxi_ion_priv.h
new file mode 100644
index 000000000..c5fcfc013
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedar/ion/sunxi/sunxi_ion_priv.h
@@ -0,0 +1,30 @@
+#ifndef _SUNXI_ION_PRIV_H
+#define _SUNXI_ION_PRIV_H
+
+#include "cache.h"
+
+#define ION_IOC_SUNXI_FLUSH_RANGE           5
+#define ION_IOC_SUNXI_FLUSH_ALL             6
+#define ION_IOC_SUNXI_PHYS_ADDR             7
+#define ION_IOC_SUNXI_DMA_COPY              8
+#define ION_IOC_SUNXI_DUMP                  9
+#define ION_IOC_SUNXI_POOL_FREE             10
+
+typedef struct {
+	long 	start;
+	long 	end;
+}sunxi_cache_range;
+
+typedef struct {
+	struct ion_handle_data handle;
+	unsigned int phys_addr;
+	unsigned int size;
+}sunxi_phys_data;
+
+struct ion_cma_buffer_info {
+	void *cpu_addr;
+	dma_addr_t handle;
+	struct sg_table *table;
+};
+
+#endif
\ No newline at end of file
diff --git a/drivers/staging/media/sunxi/cedar/ion/uapi/ion.h b/drivers/staging/media/sunxi/cedar/ion/uapi/ion.h
new file mode 100644
index 000000000..786b06db0
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedar/ion/uapi/ion.h
@@ -0,0 +1,237 @@
+/*
+ * drivers/staging/android/uapi/ion.h
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_ION_H
+#define _UAPI_LINUX_ION_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+typedef int ion_user_handle_t;
+
+/**
+ * enum ion_heap_types - list of all possible types of heaps
+ * @ION_HEAP_TYPE_SYSTEM:	 memory allocated via vmalloc
+ * @ION_HEAP_TYPE_SYSTEM_CONTIG: memory allocated via kmalloc
+ * @ION_HEAP_TYPE_CARVEOUT:	 memory allocated from a prereserved
+ *				 carveout heap, allocations are physically
+ *				 contiguous
+ * @ION_HEAP_TYPE_DMA:		 memory allocated via DMA API
+ * @ION_NUM_HEAPS:		 helper for iterating over heaps, a bit mask
+ *				 is used to identify the heaps, so only 32
+ *				 total heap types are supported
+ */
+enum ion_heap_type {
+	ION_HEAP_TYPE_SYSTEM,
+	ION_HEAP_TYPE_SYSTEM_CONTIG,
+	ION_HEAP_TYPE_CARVEOUT,
+	ION_HEAP_TYPE_CHUNK,
+	ION_HEAP_TYPE_DMA,
+	ION_HEAP_TYPE_SECURE, /* allwinner add */
+	ION_HEAP_TYPE_CUSTOM, /*
+			       * must be last so device specific heaps always
+			       * are at the end of this enum
+			       */
+};
+
+#define ION_NUM_HEAP_IDS		(sizeof(unsigned int) * 8)
+
+/**
+ * allocation flags - the lower 16 bits are used by core ion, the upper 16
+ * bits are reserved for use by the heaps themselves.
+ */
+
+/*
+ * mappings of this buffer should be cached, ion will do cache maintenance
+ * when the buffer is mapped for dma
+ */
+#define ION_FLAG_CACHED 1
+
+/*
+ * mappings of this buffer will created at mmap time, if this is set
+ * caches must be managed manually
+ */
+#define ION_FLAG_CACHED_NEEDS_SYNC 2
+
+/**
+ * DOC: Ion Userspace API
+ *
+ * create a client by opening /dev/ion
+ * most operations handled via following ioctls
+ *
+ */
+
+/**
+ * struct ion_allocation_data - metadata passed from userspace for allocations
+ * @len:		size of the allocation
+ * @align:		required alignment of the allocation
+ * @heap_id_mask:	mask of heap ids to allocate from
+ * @flags:		flags passed to heap
+ * @handle:		pointer that will be populated with a cookie to use to
+ *			refer to this allocation
+ *
+ * Provided by userspace as an argument to the ioctl
+ */
+struct ion_allocation_data {
+	size_t len;
+	size_t align;
+	unsigned int heap_id_mask;
+	unsigned int flags;
+	ion_user_handle_t handle;
+};
+
+/**
+ * struct ion_fd_data - metadata passed to/from userspace for a handle/fd pair
+ * @handle:	a handle
+ * @fd:		a file descriptor representing that handle
+ *
+ * For ION_IOC_SHARE or ION_IOC_MAP userspace populates the handle field with
+ * the handle returned from ion alloc, and the kernel returns the file
+ * descriptor to share or map in the fd field.  For ION_IOC_IMPORT, userspace
+ * provides the file descriptor and the kernel returns the handle.
+ */
+struct ion_fd_data {
+	ion_user_handle_t handle;
+	int fd;
+};
+
+/**
+ * struct ion_handle_data - a handle passed to/from the kernel
+ * @handle:	a handle
+ */
+struct ion_handle_data {
+	ion_user_handle_t handle;
+};
+
+/**
+ * struct ion_custom_data - metadata passed to/from userspace for a custom ioctl
+ * @cmd:	the custom ioctl function to call
+ * @arg:	additional data to pass to the custom ioctl, typically a user
+ *		pointer to a predefined structure
+ *
+ * This works just like the regular cmd and arg fields of an ioctl.
+ */
+struct ion_custom_data {
+	unsigned int cmd;
+	unsigned long arg;
+};
+
+#define MAX_HEAP_NAME			32
+
+/**
+ * struct ion_heap_data - data about a heap
+ * @name - first 32 characters of the heap name
+ * @type - heap type
+ * @heap_id - heap id for the heap
+ */
+struct ion_heap_data {
+	char name[MAX_HEAP_NAME];
+	__u32 type;
+	__u32 heap_id;
+	__u32 reserved0;
+	__u32 reserved1;
+	__u32 reserved2;
+};
+
+/**
+ * struct ion_heap_query - collection of data about all heaps
+ * @cnt - total number of heaps to be copied
+ * @heaps - buffer to copy heap data
+ */
+struct ion_heap_query {
+	__u32 cnt; /* Total number of heaps to be copied */
+	__u32 reserved0; /* align to 64bits */
+	__u64 heaps; /* buffer to be populated */
+	__u32 reserved1;
+	__u32 reserved2;
+};
+
+#define ION_IOC_MAGIC		'I'
+
+/**
+ * DOC: ION_IOC_ALLOC - allocate memory
+ *
+ * Takes an ion_allocation_data struct and returns it with the handle field
+ * populated with the opaque handle for the allocation.
+ */
+#define ION_IOC_ALLOC		_IOWR(ION_IOC_MAGIC, 0, \
+				      struct ion_allocation_data)
+
+/**
+ * DOC: ION_IOC_FREE - free memory
+ *
+ * Takes an ion_handle_data struct and frees the handle.
+ */
+#define ION_IOC_FREE		_IOWR(ION_IOC_MAGIC, 1, struct ion_handle_data)
+
+/**
+ * DOC: ION_IOC_MAP - get a file descriptor to mmap
+ *
+ * Takes an ion_fd_data struct with the handle field populated with a valid
+ * opaque handle.  Returns the struct with the fd field set to a file
+ * descriptor open in the current address space.  This file descriptor
+ * can then be used as an argument to mmap.
+ */
+#define ION_IOC_MAP		_IOWR(ION_IOC_MAGIC, 2, struct ion_fd_data)
+
+/**
+ * DOC: ION_IOC_SHARE - creates a file descriptor to use to share an allocation
+ *
+ * Takes an ion_fd_data struct with the handle field populated with a valid
+ * opaque handle.  Returns the struct with the fd field set to a file
+ * descriptor open in the current address space.  This file descriptor
+ * can then be passed to another process.  The corresponding opaque handle can
+ * be retrieved via ION_IOC_IMPORT.
+ */
+#define ION_IOC_SHARE		_IOWR(ION_IOC_MAGIC, 4, struct ion_fd_data)
+
+/**
+ * DOC: ION_IOC_IMPORT - imports a shared file descriptor
+ *
+ * Takes an ion_fd_data struct with the fd field populated with a valid file
+ * descriptor obtained from ION_IOC_SHARE and returns the struct with the handle
+ * filed set to the corresponding opaque handle.
+ */
+#define ION_IOC_IMPORT		_IOWR(ION_IOC_MAGIC, 5, struct ion_fd_data)
+
+/**
+ * DOC: ION_IOC_SYNC - syncs a shared file descriptors to memory
+ *
+ * Deprecated in favor of using the dma_buf api's correctly (syncing
+ * will happen automatically when the buffer is mapped to a device).
+ * If necessary should be used after touching a cached buffer from the cpu,
+ * this will make the buffer in memory coherent.
+ */
+#define ION_IOC_SYNC		_IOWR(ION_IOC_MAGIC, 7, struct ion_fd_data)
+
+/**
+ * DOC: ION_IOC_CUSTOM - call architecture specific ion ioctl
+ *
+ * Takes the argument of the architecture specific ioctl to call and
+ * passes appropriate userdata for that ioctl
+ */
+#define ION_IOC_CUSTOM		_IOWR(ION_IOC_MAGIC, 6, struct ion_custom_data)
+
+/**
+ * DOC: ION_IOC_HEAP_QUERY - information about available heaps
+ *
+ * Takes an ion_heap_query structure and populates information about
+ * available Ion heaps.
+ */
+#define ION_IOC_HEAP_QUERY     _IOWR(ION_IOC_MAGIC, 8, \
+					struct ion_heap_query)
+
+#endif /* _UAPI_LINUX_ION_H */
diff --git a/drivers/staging/media/sunxi/cedar/ve/Kconfig b/drivers/staging/media/sunxi/cedar/ve/Kconfig
new file mode 100644
index 000000000..075cf8dda
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedar/ve/Kconfig
@@ -0,0 +1,8 @@
+config VIDEO_SUNXI_CEDAR_VE
+	tristate "Allwinner CedarX VideoEngine Driver"
+    select DMA_SHARED_BUFFER
+	---help---
+	  This is the driver for sunxi video decoder, including h264/
+	  mpeg4/mpeg2/vc1/rmvb.
+	  To compile this driver as a module, choose M here: the
+	  module will be called cedar_dev.
diff --git a/drivers/staging/media/sunxi/cedar/ve/Makefile b/drivers/staging/media/sunxi/cedar/ve/Makefile
new file mode 100644
index 000000000..26d7afe18
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedar/ve/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_VIDEO_SUNXI_CEDAR_VE) += cedar_ve.o
\ No newline at end of file
diff --git a/drivers/staging/media/sunxi/cedar/ve/cedar_ve.c b/drivers/staging/media/sunxi/cedar/ve/cedar_ve.c
new file mode 100644
index 000000000..629ea87ef
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedar/ve/cedar_ve.c
@@ -0,0 +1,1637 @@
+/*
+ * drivers\media\cedar_ve
+ * (C) Copyright 2010-2016
+ * Allwinner Technology Co., Ltd. <www.allwinnertech.com>
+ * fangning<fangning@allwinnertech.com>
+ *
+ * some simple description for this code
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/ioctl.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/list.h>
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/preempt.h>
+#include <linux/cdev.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/rmap.h>
+#include <linux/wait.h>
+#include <linux/semaphore.h>
+#include <linux/poll.h>
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+#include <linux/kthread.h>
+#include <linux/delay.h>
+#include <linux/scatterlist.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <linux/mm.h>
+#include <asm/siginfo.h>
+#include <asm/signal.h>
+#include <linux/debugfs.h>
+#include <linux/sched/signal.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_device.h>
+#include "cedar_ve_priv.h"
+#include "cedar_ve.h"
+#include <linux/dma-mapping.h>
+#include <linux/dma-buf.h>
+#include <linux/soc/sunxi/sunxi_sram.h>
+#include <linux/reset.h>
+
+#define DRV_VERSION "0.01alpha"
+
+struct dentry *ve_debugfs_root;
+struct ve_debugfs_buffer ve_debug_proc_info;
+
+int g_dev_major = CEDARDEV_MAJOR;
+int g_dev_minor = CEDARDEV_MINOR;
+
+/*S_IRUGO represent that g_dev_major can be read,but canot be write*/
+module_param(g_dev_major, int, S_IRUGO);
+module_param(g_dev_minor, int, S_IRUGO);
+
+static DECLARE_WAIT_QUEUE_HEAD(wait_ve);
+
+static struct cedar_dev *cedar_devp;
+
+static int clk_status;
+static LIST_HEAD(run_task_list);
+static LIST_HEAD(del_task_list);
+static spinlock_t cedar_spin_lock;
+
+static irqreturn_t VideoEngineInterupt(int irq, void *data)
+{
+	unsigned long ve_int_status_reg;
+	unsigned long ve_int_ctrl_reg;
+	unsigned int status;
+	volatile int val;
+	int modual_sel;
+	unsigned int interrupt_enable;
+	struct cedar_dev *dev = data;
+
+	modual_sel = readl(cedar_devp->regs_macc + 0);
+	if (dev->capabilities & CEDARV_ISP_OLD) {
+		if (modual_sel & 0xa) {
+			if ((modual_sel & 0xb) == 0xb) {
+				/*jpg enc*/
+				ve_int_status_reg =
+					(unsigned long)(cedar_devp->regs_macc +
+							0xb00 + 0x1c);
+				ve_int_ctrl_reg =
+					(unsigned long)(cedar_devp->regs_macc +
+							0xb00 + 0x14);
+				interrupt_enable =
+					readl((void *)ve_int_ctrl_reg) & (0x7);
+				status = readl((void *)ve_int_status_reg);
+				status &= 0xf;
+			} else {
+				/*isp*/
+				ve_int_status_reg =
+					(unsigned long)(cedar_devp->regs_macc +
+							0xa00 + 0x10);
+				ve_int_ctrl_reg =
+					(unsigned long)(cedar_devp->regs_macc +
+							0xa00 + 0x08);
+				interrupt_enable =
+					readl((void *)ve_int_ctrl_reg) & (0x1);
+				status = readl((void *)ve_int_status_reg);
+				status &= 0x1;
+			}
+
+			if (status && interrupt_enable) {
+				/*disable interrupt*/
+				if ((modual_sel & 0xb) == 0xb) {
+					ve_int_ctrl_reg =
+						(unsigned long)(cedar_devp
+									->regs_macc +
+								0xb00 + 0x14);
+					val = readl((void *)ve_int_ctrl_reg);
+					writel(val & (~0x7),
+					       (void *)ve_int_ctrl_reg);
+				} else {
+					ve_int_ctrl_reg =
+						(unsigned long)(cedar_devp
+									->regs_macc +
+								0xa00 + 0x08);
+					val = readl((void *)ve_int_ctrl_reg);
+					writel(val & (~0x1),
+					       (void *)ve_int_ctrl_reg);
+				}
+
+				cedar_devp->en_irq_value =
+					1; /*hx modify 2011-8-1 16:08:47*/
+				cedar_devp->en_irq_flag = 1;
+				/*any interrupt will wake up wait queue*/
+				wake_up(&wait_ve); /*ioctl*/
+			}
+		}
+	} else {
+		if (modual_sel & (3 << 6)) {
+			if (modual_sel & (1 << 7)) {
+				/*avc enc*/
+				ve_int_status_reg =
+					(unsigned long)(cedar_devp->regs_macc +
+							0xb00 + 0x1c);
+				ve_int_ctrl_reg =
+					(unsigned long)(cedar_devp->regs_macc +
+							0xb00 + 0x14);
+				interrupt_enable =
+					readl((void *)ve_int_ctrl_reg) & (0x7);
+				status = readl((void *)ve_int_status_reg);
+				status &= 0xf;
+			} else {
+				/*isp*/
+				ve_int_status_reg =
+					(unsigned long)(cedar_devp->regs_macc +
+							0xa00 + 0x10);
+				ve_int_ctrl_reg =
+					(unsigned long)(cedar_devp->regs_macc +
+							0xa00 + 0x08);
+				interrupt_enable =
+					readl((void *)ve_int_ctrl_reg) & (0x1);
+				status = readl((void *)ve_int_status_reg);
+				status &= 0x1;
+			}
+
+			/*modify by fangning 2013-05-22*/
+			if (status && interrupt_enable) {
+				/*disable interrupt*/
+				/*avc enc*/
+				if (modual_sel & (1 << 7)) {
+					ve_int_ctrl_reg =
+						(unsigned long)(cedar_devp
+									->regs_macc +
+								0xb00 + 0x14);
+					val = readl((void *)ve_int_ctrl_reg);
+					writel(val & (~0x7),
+					       (void *)ve_int_ctrl_reg);
+				} else {
+					/*isp*/
+					ve_int_ctrl_reg =
+						(unsigned long)(cedar_devp
+									->regs_macc +
+								0xa00 + 0x08);
+					val = readl((void *)ve_int_ctrl_reg);
+					writel(val & (~0x1),
+					       (void *)ve_int_ctrl_reg);
+				}
+				/*hx modify 2011-8-1 16:08:47*/
+				cedar_devp->en_irq_value = 1;
+				cedar_devp->en_irq_flag = 1;
+				/*any interrupt will wake up wait queue*/
+				wake_up(&wait_ve);
+			}
+		}
+		if (dev->capabilities & CEDARV_ISP_NEW) {
+			if (modual_sel & (0x20)) {
+				ve_int_status_reg =
+					(unsigned long)(cedar_devp->regs_macc +
+							0xe00 + 0x1c);
+				ve_int_ctrl_reg =
+					(unsigned long)(cedar_devp->regs_macc +
+							0xe00 + 0x14);
+				interrupt_enable =
+					readl((void *)ve_int_ctrl_reg) & (0x38);
+
+				status = readl((void *)ve_int_status_reg);
+
+				if ((status & 0x7) && interrupt_enable) {
+					/*disable interrupt*/
+					val = readl((void *)ve_int_ctrl_reg);
+					writel(val & (~0x38),
+					       (void *)ve_int_ctrl_reg);
+
+					cedar_devp->jpeg_irq_value = 1;
+					cedar_devp->jpeg_irq_flag = 1;
+
+					/*any interrupt will wake up wait queue*/
+					wake_up(&wait_ve);
+				}
+			}
+		}
+	}
+
+	modual_sel &= 0xf;
+	if (modual_sel <= 4) {
+		/*estimate Which video format*/
+		switch (modual_sel) {
+		case 0: /*mpeg124*/
+			ve_int_status_reg =
+				(unsigned long)(cedar_devp->regs_macc + 0x100 +
+						0x1c);
+			ve_int_ctrl_reg =
+				(unsigned long)(cedar_devp->regs_macc + 0x100 +
+						0x14);
+			interrupt_enable =
+				readl((void *)ve_int_ctrl_reg) & (0x7c);
+			break;
+		case 1: /*h264*/
+			ve_int_status_reg =
+				(unsigned long)(cedar_devp->regs_macc + 0x200 +
+						0x28);
+			ve_int_ctrl_reg =
+				(unsigned long)(cedar_devp->regs_macc + 0x200 +
+						0x20);
+			interrupt_enable =
+				readl((void *)ve_int_ctrl_reg) & (0xf);
+			break;
+		case 2: /*vc1*/
+			ve_int_status_reg =
+				(unsigned long)(cedar_devp->regs_macc + 0x300 +
+						0x2c);
+			ve_int_ctrl_reg =
+				(unsigned long)(cedar_devp->regs_macc + 0x300 +
+						0x24);
+			interrupt_enable =
+				readl((void *)ve_int_ctrl_reg) & (0xf);
+			break;
+		case 3: /*rv*/
+			ve_int_status_reg =
+				(unsigned long)(cedar_devp->regs_macc + 0x400 +
+						0x1c);
+			ve_int_ctrl_reg =
+				(unsigned long)(cedar_devp->regs_macc + 0x400 +
+						0x14);
+			interrupt_enable =
+				readl((void *)ve_int_ctrl_reg) & (0xf);
+			break;
+
+		case 4: /*hevc*/
+			ve_int_status_reg =
+				(unsigned long)(cedar_devp->regs_macc + 0x500 +
+						0x38);
+			ve_int_ctrl_reg =
+				(unsigned long)(cedar_devp->regs_macc + 0x500 +
+						0x30);
+			interrupt_enable =
+				readl((void *)ve_int_ctrl_reg) & (0xf);
+			break;
+
+		default:
+			ve_int_status_reg =
+				(unsigned long)(cedar_devp->regs_macc + 0x100 +
+						0x1c);
+			ve_int_ctrl_reg =
+				(unsigned long)(cedar_devp->regs_macc + 0x100 +
+						0x14);
+			interrupt_enable =
+				readl((void *)ve_int_ctrl_reg) & (0xf);
+			dev_warn(cedar_devp->platform_dev,
+				 "ve mode :%x "
+				 "not defined!\n",
+				 modual_sel);
+			break;
+		}
+
+		status = readl((void *)ve_int_status_reg);
+
+		/*modify by fangning 2013-05-22*/
+		if ((status & 0xf) && interrupt_enable) {
+			/*disable interrupt*/
+			if (modual_sel == 0) {
+				val = readl((void *)ve_int_ctrl_reg);
+				writel(val & (~0x7c), (void *)ve_int_ctrl_reg);
+			} else {
+				val = readl((void *)ve_int_ctrl_reg);
+				writel(val & (~0xf), (void *)ve_int_ctrl_reg);
+			}
+
+			cedar_devp->de_irq_value = 1;
+			cedar_devp->de_irq_flag = 1;
+			/*any interrupt will wake up wait queue*/
+			wake_up(&wait_ve);
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+int enable_cedar_hw_clk(void)
+{
+	unsigned long flags;
+	int res = -EFAULT;
+
+	spin_lock_irqsave(&cedar_spin_lock, flags);
+
+	if (clk_status == 1)
+		goto out;
+
+	clk_status = 1;
+
+	reset_control_deassert(cedar_devp->rstc);
+	if (clk_enable(cedar_devp->mod_clk)) {
+		dev_warn(cedar_devp->platform_dev,
+			 "enable cedar_devp->mod_clk failed;\n");
+		goto out;
+	} else {
+		res = 0;
+	}
+
+	AW_MEM_INIT_LIST_HEAD(&cedar_devp->list);
+	dev_dbg(cedar_devp->platform_dev, "%s,%d\n", __func__, __LINE__);
+
+out:
+	spin_unlock_irqrestore(&cedar_spin_lock, flags);
+	return res;
+}
+
+int disable_cedar_hw_clk(void)
+{
+	unsigned long flags;
+	struct aw_mem_list_head *pos, *q;
+	int res = -EFAULT;
+
+	spin_lock_irqsave(&cedar_spin_lock, flags);
+
+	if (clk_status == 0) {
+		res = 0;
+		goto out;
+	}
+	clk_status = 0;
+
+	if ((NULL == cedar_devp->mod_clk) || (IS_ERR(cedar_devp->mod_clk)))
+		dev_warn(cedar_devp->platform_dev,
+			 "cedar_devp->mod_clk is invalid\n");
+	else {
+		clk_disable(cedar_devp->mod_clk);
+		reset_control_assert(cedar_devp->rstc);
+		res = 0;
+	}
+
+	aw_mem_list_for_each_safe(pos, q, &cedar_devp->list)
+	{
+		struct cedarv_iommu_buffer *tmp;
+
+		tmp = aw_mem_list_entry(pos, struct cedarv_iommu_buffer,
+					i_list);
+		aw_mem_list_del(pos);
+		kfree(tmp);
+	}
+	dev_dbg(cedar_devp->platform_dev, "%s,%d\n", __func__, __LINE__);
+
+out:
+	spin_unlock_irqrestore(&cedar_spin_lock, flags);
+	return res;
+}
+
+void cedardev_insert_task(struct cedarv_engine_task *new_task)
+{
+	struct cedarv_engine_task *task_entry;
+	unsigned long flags;
+
+	spin_lock_irqsave(&cedar_spin_lock, flags);
+
+	if (list_empty(&run_task_list))
+		new_task->is_first_task = 1;
+
+	list_for_each_entry (task_entry, &run_task_list, list) {
+		if ((task_entry->is_first_task == 0) &&
+		    (task_entry->running == 0) &&
+		    (task_entry->t.task_prio < new_task->t.task_prio)) {
+			break;
+		}
+	}
+
+	list_add(&new_task->list, task_entry->list.prev);
+
+	dev_dbg(cedar_devp->platform_dev, "%s,%d, TASK_ID:", __func__,
+		__LINE__);
+	list_for_each_entry (task_entry, &run_task_list, list) {
+		dev_dbg(cedar_devp->platform_dev, "%d!", task_entry->t.ID);
+	}
+	dev_dbg(cedar_devp->platform_dev, "\n");
+
+	mod_timer(&cedar_devp->cedar_engine_timer, jiffies + 0);
+
+	spin_unlock_irqrestore(&cedar_spin_lock, flags);
+}
+
+int cedardev_del_task(int task_id)
+{
+	struct cedarv_engine_task *task_entry;
+	unsigned long flags;
+
+	spin_lock_irqsave(&cedar_spin_lock, flags);
+
+	list_for_each_entry (task_entry, &run_task_list, list) {
+		if (task_entry->t.ID == task_id &&
+		    task_entry->status != TASK_RELEASE) {
+			task_entry->status = TASK_RELEASE;
+
+			spin_unlock_irqrestore(&cedar_spin_lock, flags);
+			mod_timer(&cedar_devp->cedar_engine_timer, jiffies + 0);
+			return 0;
+		}
+	}
+	spin_unlock_irqrestore(&cedar_spin_lock, flags);
+
+	return -1;
+}
+
+int cedardev_check_delay(int check_prio)
+{
+	struct cedarv_engine_task *task_entry;
+	int timeout_total = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&cedar_spin_lock, flags);
+	list_for_each_entry (task_entry, &run_task_list, list) {
+		if ((task_entry->t.task_prio >= check_prio) ||
+		    (task_entry->running == 1) ||
+		    (task_entry->is_first_task == 1))
+			timeout_total = timeout_total + task_entry->t.frametime;
+	}
+
+	spin_unlock_irqrestore(&cedar_spin_lock, flags);
+	dev_dbg(cedar_devp->platform_dev, "%s,%d,%d\n", __func__, __LINE__,
+		timeout_total);
+	return timeout_total;
+}
+
+static void cedar_engine_for_timer_rel(struct timer_list *arg)
+{
+	unsigned long flags;
+	int ret = 0;
+	spin_lock_irqsave(&cedar_spin_lock, flags);
+
+	if (list_empty(&run_task_list)) {
+		ret = disable_cedar_hw_clk();
+		if (ret < 0) {
+			dev_warn(cedar_devp->platform_dev,
+				 "clk disable error!\n");
+		}
+	} else {
+		dev_warn(cedar_devp->platform_dev, "clk disable time out "
+						   "but task left\n");
+		mod_timer(&cedar_devp->cedar_engine_timer,
+			  jiffies + msecs_to_jiffies(TIMER_CIRCLE));
+	}
+
+	spin_unlock_irqrestore(&cedar_spin_lock, flags);
+}
+
+static void cedar_engine_for_events(struct timer_list *arg)
+{
+	struct cedarv_engine_task *task_entry, *task_entry_tmp;
+	struct kernel_siginfo info;
+	unsigned long flags;
+
+	spin_lock_irqsave(&cedar_spin_lock, flags);
+
+	list_for_each_entry_safe (task_entry, task_entry_tmp, &run_task_list,
+				  list) {
+		mod_timer(&cedar_devp->cedar_engine_timer_rel,
+			  jiffies + msecs_to_jiffies(CLK_REL_TIME));
+		if (task_entry->status == TASK_RELEASE ||
+		    time_after(jiffies, task_entry->t.timeout)) {
+			if (task_entry->status == TASK_INIT)
+				task_entry->status = TASK_TIMEOUT;
+			list_move(&task_entry->list, &del_task_list);
+		}
+	}
+
+	list_for_each_entry_safe (task_entry, task_entry_tmp, &del_task_list,
+				  list) {
+		info.si_signo = SIG_CEDAR;
+		info.si_code = task_entry->t.ID;
+		if (task_entry->status == TASK_TIMEOUT) {
+			info.si_errno = TASK_TIMEOUT;
+			send_sig_info(SIG_CEDAR, &info,
+				      task_entry->task_handle);
+		} else if (task_entry->status == TASK_RELEASE) {
+			info.si_errno = TASK_RELEASE;
+			send_sig_info(SIG_CEDAR, &info,
+				      task_entry->task_handle);
+		}
+		list_del(&task_entry->list);
+		kfree(task_entry);
+	}
+
+	if (!list_empty(&run_task_list)) {
+		task_entry = list_entry(run_task_list.next,
+					struct cedarv_engine_task, list);
+		if (task_entry->running == 0) {
+			task_entry->running = 1;
+			info.si_signo = SIG_CEDAR;
+			info.si_code = task_entry->t.ID;
+			info.si_errno = TASK_INIT;
+			send_sig_info(SIG_CEDAR, &info,
+				      task_entry->task_handle);
+		}
+
+		mod_timer(&cedar_devp->cedar_engine_timer,
+			  jiffies + msecs_to_jiffies(TIMER_CIRCLE));
+	}
+
+	spin_unlock_irqrestore(&cedar_spin_lock, flags);
+}
+
+static long compat_cedardev_ioctl(struct file *filp, unsigned int cmd,
+				  unsigned long arg)
+{
+	long ret = 0;
+	int ve_timeout = 0;
+	/*struct cedar_dev *devp;*/
+	unsigned long flags;
+	struct ve_info *info;
+
+	info = filp->private_data;
+
+	switch (cmd) {
+	case IOCTL_ENGINE_REQ:
+		if (down_interruptible(&cedar_devp->sem))
+			return -ERESTARTSYS;
+		cedar_devp->ref_count++;
+		if (1 == cedar_devp->ref_count) {
+			cedar_devp->last_min_freq = 0;
+			enable_cedar_hw_clk();
+		}
+		up(&cedar_devp->sem);
+		break;
+	case IOCTL_ENGINE_REL:
+		if (down_interruptible(&cedar_devp->sem))
+			return -ERESTARTSYS;
+		cedar_devp->ref_count--;
+		if (0 == cedar_devp->ref_count) {
+			ret = disable_cedar_hw_clk();
+			if (ret < 0) {
+				dev_warn(cedar_devp->platform_dev,
+					 "IOCTL_ENGINE_REL "
+					 "clk disable error!\n");
+				up(&cedar_devp->sem);
+				return -EFAULT;
+			}
+		}
+		up(&cedar_devp->sem);
+		return ret;
+	case IOCTL_ENGINE_CHECK_DELAY: {
+		struct cedarv_engine_task_info task_info;
+
+		if (copy_from_user(&task_info, (void __user *)arg,
+				   sizeof(struct cedarv_engine_task_info))) {
+			dev_warn(cedar_devp->platform_dev,
+				 "%d "
+				 "copy_from_user fail\n",
+				 IOCTL_ENGINE_CHECK_DELAY);
+			return -EFAULT;
+		}
+		task_info.total_time =
+			cedardev_check_delay(task_info.task_prio);
+		dev_dbg(cedar_devp->platform_dev, "%s,%d,%d\n", __func__,
+			__LINE__, task_info.total_time);
+		task_info.frametime = 0;
+		spin_lock_irqsave(&cedar_spin_lock, flags);
+		if (!list_empty(&run_task_list)) {
+			struct cedarv_engine_task *task_entry;
+			dev_dbg(cedar_devp->platform_dev, "%s,%d\n", __func__,
+				__LINE__);
+			task_entry =
+				list_entry(run_task_list.next,
+					   struct cedarv_engine_task, list);
+			if (task_entry->running == 1)
+				task_info.frametime = task_entry->t.frametime;
+			dev_dbg(cedar_devp->platform_dev, "%s,%d,%d\n",
+				__func__, __LINE__, task_info.frametime);
+		}
+		spin_unlock_irqrestore(&cedar_spin_lock, flags);
+
+		if (copy_to_user((void *)arg, &task_info,
+				 sizeof(struct cedarv_engine_task_info))) {
+			dev_warn(cedar_devp->platform_dev,
+				 "%d "
+				 "copy_to_user fail\n",
+				 IOCTL_ENGINE_CHECK_DELAY);
+			return -EFAULT;
+		}
+	} break;
+	case IOCTL_WAIT_VE_DE:
+		ve_timeout = (int)arg;
+		cedar_devp->de_irq_value = 0;
+
+		spin_lock_irqsave(&cedar_spin_lock, flags);
+		if (cedar_devp->de_irq_flag)
+			cedar_devp->de_irq_value = 1;
+		spin_unlock_irqrestore(&cedar_spin_lock, flags);
+		wait_event_timeout(wait_ve, cedar_devp->de_irq_flag,
+				   ve_timeout * HZ);
+		cedar_devp->de_irq_flag = 0;
+
+		return cedar_devp->de_irq_value;
+
+	case IOCTL_WAIT_VE_EN:
+
+		ve_timeout = (int)arg;
+		cedar_devp->en_irq_value = 0;
+
+		spin_lock_irqsave(&cedar_spin_lock, flags);
+		if (cedar_devp->en_irq_flag)
+			cedar_devp->en_irq_value = 1;
+		spin_unlock_irqrestore(&cedar_spin_lock, flags);
+
+		wait_event_timeout(wait_ve, cedar_devp->en_irq_flag,
+				   ve_timeout * HZ);
+		cedar_devp->en_irq_flag = 0;
+
+		return cedar_devp->en_irq_value;
+
+	case IOCTL_WAIT_JPEG_DEC:
+		ve_timeout = (int)arg;
+		cedar_devp->jpeg_irq_value = 0;
+
+		spin_lock_irqsave(&cedar_spin_lock, flags);
+		if (cedar_devp->jpeg_irq_flag)
+			cedar_devp->jpeg_irq_value = 1;
+		spin_unlock_irqrestore(&cedar_spin_lock, flags);
+
+		wait_event_timeout(wait_ve, cedar_devp->jpeg_irq_flag,
+				   ve_timeout * HZ);
+		cedar_devp->jpeg_irq_flag = 0;
+		return cedar_devp->jpeg_irq_value;
+
+	case IOCTL_ENABLE_VE:
+		if (clk_prepare_enable(cedar_devp->mod_clk)) {
+			dev_warn(cedar_devp->platform_dev,
+				 "IOCTL_ENABLE_VE "
+				 "enable cedar_devp->mod_clk failed!\n");
+		}
+		break;
+
+	case IOCTL_DISABLE_VE:
+		if ((NULL == cedar_devp->mod_clk) ||
+		    IS_ERR(cedar_devp->mod_clk)) {
+			dev_warn(cedar_devp->platform_dev,
+				 "IOCTL_DISABLE_VE "
+				 "cedar_devp->mod_clk is invalid\n");
+			return -EFAULT;
+		} else {
+			clk_disable_unprepare(cedar_devp->mod_clk);
+		}
+		break;
+
+	case IOCTL_RESET_VE:
+		reset_control_assert(cedar_devp->rstc);
+		reset_control_deassert(cedar_devp->rstc);
+		break;
+
+	case IOCTL_SET_DRAM_HIGH_CHANNAL: {
+		dev_err(cedar_devp->platform_dev,
+			"IOCTL_SET_DRAM_HIGH_CHANNAL NOT IMPL\n");
+		break;
+	}
+
+	case IOCTL_SET_VE_FREQ: {
+		int arg_rate = (int)arg;
+
+		if (0 == cedar_devp->last_min_freq) {
+			cedar_devp->last_min_freq = arg_rate;
+		} else {
+			if (arg_rate > cedar_devp->last_min_freq) {
+				arg_rate = cedar_devp->last_min_freq;
+			} else {
+				cedar_devp->last_min_freq = arg_rate;
+			}
+		}
+		if (arg_rate >= VE_CLK_LOW_WATER &&
+		    arg_rate <= VE_CLK_HIGH_WATER &&
+		    clk_get_rate(cedar_devp->mod_clk) / 1000000 != arg_rate) {
+			clk_get_rate(cedar_devp->ahb_clk);
+			if (clk_set_rate(cedar_devp->mod_clk,
+					 arg_rate * 1000000)) {
+				dev_warn(cedar_devp->platform_dev,
+					 "set ve clock failed\n");
+			}
+		}
+		ret = clk_get_rate(cedar_devp->mod_clk);
+		break;
+	}
+	case IOCTL_GETVALUE_AVS2:
+	case IOCTL_ADJUST_AVS2:
+	case IOCTL_ADJUST_AVS2_ABS:
+	case IOCTL_CONFIG_AVS2:
+	case IOCTL_RESET_AVS2:
+	case IOCTL_PAUSE_AVS2:
+	case IOCTL_START_AVS2:
+		dev_warn(cedar_devp->platform_dev,
+			 "do not supprot this ioctrl now\n");
+		break;
+
+	case IOCTL_GET_ENV_INFO: {
+		struct cedarv_env_infomation_compat env_info;
+
+		env_info.phymem_start = 0;
+		env_info.phymem_total_size = 0;
+		env_info.address_macc = 0;
+		if (copy_to_user((char *)arg, &env_info,
+				 sizeof(struct cedarv_env_infomation_compat)))
+			return -EFAULT;
+	} break;
+	case IOCTL_GET_IC_VER: {
+		return 0;
+	}
+	case IOCTL_SET_REFCOUNT:
+		cedar_devp->ref_count = (int)arg;
+		break;
+	case IOCTL_SET_VOL: {
+		break;
+	}
+	case IOCTL_GET_LOCK: {
+		int lock_ctl_ret = 0;
+		u32 lock_type = arg;
+		struct ve_info *vi = filp->private_data;
+
+		if (lock_type == VE_LOCK_VDEC)
+			mutex_lock(&cedar_devp->lock_vdec);
+		else if (lock_type == VE_LOCK_VENC)
+			mutex_lock(&cedar_devp->lock_venc);
+		else if (lock_type == VE_LOCK_JDEC)
+			mutex_lock(&cedar_devp->lock_jdec);
+		else if (lock_type == VE_LOCK_00_REG)
+			mutex_lock(&cedar_devp->lock_00_reg);
+		else if (lock_type == VE_LOCK_04_REG)
+			mutex_lock(&cedar_devp->lock_04_reg);
+		else
+			dev_err(cedar_devp->platform_dev,
+				"invalid lock type '%d'", lock_type);
+
+		if ((vi->lock_flags & lock_type) != 0)
+			dev_err(cedar_devp->platform_dev,
+				"when get lock, this should be 0!!!");
+
+		mutex_lock(&vi->lock_flag_io);
+		vi->lock_flags |= lock_type;
+		mutex_unlock(&vi->lock_flag_io);
+
+		return lock_ctl_ret;
+	}
+	case IOCTL_SET_PROC_INFO: {
+		struct VE_PROC_INFO ve_info;
+		unsigned char channel_id = 0;
+
+		mutex_lock(&ve_debug_proc_info.lock_proc);
+		if (copy_from_user(&ve_info, (void __user *)arg,
+				   sizeof(struct VE_PROC_INFO))) {
+			dev_warn(cedar_devp->platform_dev,
+				 "IOCTL_SET_PROC_INFO copy_from_user fail\n");
+			mutex_unlock(&ve_debug_proc_info.lock_proc);
+			return -EFAULT;
+		}
+
+		channel_id = ve_info.channel_id;
+		if (channel_id >= VE_DEBUGFS_MAX_CHANNEL) {
+			dev_warn(
+				cedar_devp->platform_dev,
+				"set channel[%c] is bigger than max channel[%d]\n",
+				channel_id, VE_DEBUGFS_MAX_CHANNEL);
+			mutex_unlock(&ve_debug_proc_info.lock_proc);
+			return -EFAULT;
+		}
+
+		ve_debug_proc_info.cur_channel_id = ve_info.channel_id;
+		ve_debug_proc_info.proc_len[channel_id] = ve_info.proc_info_len;
+		ve_debug_proc_info.proc_buf[channel_id] =
+			ve_debug_proc_info.data +
+			channel_id * VE_DEBUGFS_BUF_SIZE;
+		break;
+	}
+	case IOCTL_COPY_PROC_INFO: {
+		unsigned char channel_id;
+
+		channel_id = ve_debug_proc_info.cur_channel_id;
+		if (copy_from_user(ve_debug_proc_info.proc_buf[channel_id],
+				   (void __user *)arg,
+				   ve_debug_proc_info.proc_len[channel_id])) {
+			dev_err(cedar_devp->platform_dev,
+				"IOCTL_COPY_PROC_INFO copy_from_user fail\n");
+			mutex_unlock(&ve_debug_proc_info.lock_proc);
+			return -EFAULT;
+		}
+		mutex_unlock(&ve_debug_proc_info.lock_proc);
+		break;
+	}
+	case IOCTL_STOP_PROC_INFO: {
+		unsigned char channel_id;
+
+		channel_id = arg;
+		ve_debug_proc_info.proc_buf[channel_id] = NULL;
+
+		break;
+	}
+	case IOCTL_RELEASE_LOCK: {
+		int lock_ctl_ret = 0;
+		do {
+			u32 lock_type = arg;
+			struct ve_info *vi = filp->private_data;
+
+			if (!(vi->lock_flags & lock_type)) {
+				dev_err(cedar_devp->platform_dev,
+					"Not lock? flags: '%x/%x'.",
+					vi->lock_flags, lock_type);
+				lock_ctl_ret = -1;
+				break; /* break 'do...while' */
+			}
+
+			mutex_lock(&vi->lock_flag_io);
+			vi->lock_flags &= (~lock_type);
+			mutex_unlock(&vi->lock_flag_io);
+
+			if (lock_type == VE_LOCK_VDEC)
+				mutex_unlock(&cedar_devp->lock_vdec);
+			else if (lock_type == VE_LOCK_VENC)
+				mutex_unlock(&cedar_devp->lock_venc);
+			else if (lock_type == VE_LOCK_JDEC)
+				mutex_unlock(&cedar_devp->lock_jdec);
+			else if (lock_type == VE_LOCK_00_REG)
+				mutex_unlock(&cedar_devp->lock_00_reg);
+			else if (lock_type == VE_LOCK_04_REG)
+				mutex_unlock(&cedar_devp->lock_04_reg);
+			else
+				dev_err(cedar_devp->platform_dev,
+					"invalid lock type '%d'", lock_type);
+		} while (0);
+		return lock_ctl_ret;
+	}
+	case IOCTL_GET_IOMMU_ADDR: {
+		int ret, i;
+		struct sg_table *sgt, *sgt_bak;
+		struct scatterlist *sgl, *sgl_bak;
+		struct user_iommu_param sUserIommuParam;
+		struct cedarv_iommu_buffer *pVeIommuBuf = NULL;
+
+		pVeIommuBuf = (struct cedarv_iommu_buffer *)kmalloc(
+			sizeof(struct cedarv_iommu_buffer), GFP_KERNEL);
+		if (pVeIommuBuf == NULL) {
+			dev_err(cedar_devp->platform_dev,
+				"IOCTL_GET_IOMMU_ADDR malloc cedarv_iommu_buffererror\n");
+			return -EFAULT;
+		}
+		if (copy_from_user(&sUserIommuParam, (void __user *)arg,
+				   sizeof(struct user_iommu_param))) {
+			dev_err(cedar_devp->platform_dev,
+				"IOCTL_GET_IOMMU_ADDR copy_from_user error");
+			return -EFAULT;
+		}
+
+		pVeIommuBuf->fd = sUserIommuParam.fd;
+		pVeIommuBuf->dma_buf = dma_buf_get(pVeIommuBuf->fd);
+		if (pVeIommuBuf->dma_buf < 0) {
+			dev_err(cedar_devp->platform_dev,
+				"ve get dma_buf error");
+			return -EFAULT;
+		}
+
+		pVeIommuBuf->attachment = dma_buf_attach(
+			pVeIommuBuf->dma_buf, cedar_devp->platform_dev);
+		if (pVeIommuBuf->attachment < 0) {
+			dev_err(cedar_devp->platform_dev,
+				"ve get dma_buf_attachment error");
+			goto RELEASE_DMA_BUF;
+		}
+
+		sgt = dma_buf_map_attachment(pVeIommuBuf->attachment,
+					     DMA_BIDIRECTIONAL);
+
+		sgt_bak = kmalloc(sizeof(struct sg_table),
+				  GFP_KERNEL | __GFP_ZERO);
+		if (sgt_bak == NULL)
+			dev_err(cedar_devp->platform_dev, "alloc sgt fail\n");
+
+		ret = sg_alloc_table(sgt_bak, sgt->nents, GFP_KERNEL);
+		if (ret != 0)
+			dev_err(cedar_devp->platform_dev, "alloc sgt fail\n");
+
+		sgl_bak = sgt_bak->sgl;
+		for_each_sg (sgt->sgl, sgl, sgt->nents, i) {
+			sg_set_page(sgl_bak, sg_page(sgl), sgl->length,
+				    sgl->offset);
+			sgl_bak = sg_next(sgl_bak);
+		}
+
+		pVeIommuBuf->sgt = sgt_bak;
+		if (pVeIommuBuf->sgt < 0) {
+			dev_err(cedar_devp->platform_dev,
+				"ve get sg_table error\n");
+			goto RELEASE_DMA_BUF;
+		}
+
+		ret = dma_map_sg(cedar_devp->platform_dev,
+				 pVeIommuBuf->sgt->sgl, pVeIommuBuf->sgt->nents,
+				 DMA_BIDIRECTIONAL);
+		if (ret != 1) {
+			dev_err(cedar_devp->platform_dev,
+				"ve dma_map_sg error\n");
+			goto RELEASE_DMA_BUF;
+		}
+
+		pVeIommuBuf->iommu_addr = sg_dma_address(pVeIommuBuf->sgt->sgl);
+		sUserIommuParam.iommu_addr =
+			(unsigned int)(pVeIommuBuf->iommu_addr & 0xffffffff);
+
+		if (copy_to_user((void __user *)arg, &sUserIommuParam,
+				 sizeof(struct user_iommu_param))) {
+			dev_err(cedar_devp->platform_dev,
+				"ve get iommu copy_to_user error\n");
+			goto RELEASE_DMA_BUF;
+		}
+
+		pVeIommuBuf->p_id = current->tgid;
+		dev_dbg(cedar_devp->platform_dev,
+			"fd:%d, iommu_addr:%lx, dma_buf:%p, dma_buf_attach:%p, sg_table:%p, nents:%d, pid:%d\n",
+			pVeIommuBuf->fd, pVeIommuBuf->iommu_addr,
+			pVeIommuBuf->dma_buf, pVeIommuBuf->attachment,
+			pVeIommuBuf->sgt, pVeIommuBuf->sgt->nents,
+			pVeIommuBuf->p_id);
+
+		mutex_lock(&cedar_devp->lock_mem);
+		aw_mem_list_add_tail(&pVeIommuBuf->i_list, &cedar_devp->list);
+		mutex_unlock(&cedar_devp->lock_mem);
+		break;
+
+	RELEASE_DMA_BUF:
+		if (pVeIommuBuf->dma_buf > 0) {
+			if (pVeIommuBuf->attachment > 0) {
+				if (pVeIommuBuf->sgt > 0) {
+					dma_unmap_sg(cedar_devp->platform_dev,
+						     pVeIommuBuf->sgt->sgl,
+						     pVeIommuBuf->sgt->nents,
+						     DMA_BIDIRECTIONAL);
+					dma_buf_unmap_attachment(
+						pVeIommuBuf->attachment,
+						pVeIommuBuf->sgt,
+						DMA_BIDIRECTIONAL);
+					sg_free_table(pVeIommuBuf->sgt);
+					kfree(pVeIommuBuf->sgt);
+				}
+
+				dma_buf_detach(pVeIommuBuf->dma_buf,
+					       pVeIommuBuf->attachment);
+			}
+
+			dma_buf_put(pVeIommuBuf->dma_buf);
+			return -1;
+		}
+		kfree(pVeIommuBuf);
+		break;
+	}
+	case IOCTL_FREE_IOMMU_ADDR: {
+		struct user_iommu_param sUserIommuParam;
+		struct cedarv_iommu_buffer *pVeIommuBuf;
+
+		if (copy_from_user(&sUserIommuParam, (void __user *)arg,
+				   sizeof(struct user_iommu_param))) {
+			dev_err(cedar_devp->platform_dev,
+				"IOCTL_FREE_IOMMU_ADDR copy_from_user error");
+			return -EFAULT;
+		}
+		aw_mem_list_for_each_entry(pVeIommuBuf, &cedar_devp->list,
+					   i_list)
+		{
+			if (pVeIommuBuf->fd == sUserIommuParam.fd &&
+			    pVeIommuBuf->p_id == current->tgid) {
+				dev_dbg(cedar_devp->platform_dev,
+					"free: fd:%d, iommu_addr:%lx, dma_buf:%p, dma_buf_attach:%p, sg_table:%p nets:%d, pid:%d\n",
+					pVeIommuBuf->fd,
+					pVeIommuBuf->iommu_addr,
+					pVeIommuBuf->dma_buf,
+					pVeIommuBuf->attachment,
+					pVeIommuBuf->sgt,
+					pVeIommuBuf->sgt->nents,
+					pVeIommuBuf->p_id);
+
+				if (pVeIommuBuf->dma_buf > 0) {
+					if (pVeIommuBuf->attachment > 0) {
+						if (pVeIommuBuf->sgt > 0) {
+							dma_unmap_sg(
+								cedar_devp
+									->platform_dev,
+								pVeIommuBuf->sgt
+									->sgl,
+								pVeIommuBuf->sgt
+									->nents,
+								DMA_BIDIRECTIONAL);
+							dma_buf_unmap_attachment(
+								pVeIommuBuf
+									->attachment,
+								pVeIommuBuf->sgt,
+								DMA_BIDIRECTIONAL);
+							sg_free_table(
+								pVeIommuBuf
+									->sgt);
+							kfree(pVeIommuBuf->sgt);
+						}
+
+						dma_buf_detach(
+							pVeIommuBuf->dma_buf,
+							pVeIommuBuf->attachment);
+					}
+
+					dma_buf_put(pVeIommuBuf->dma_buf);
+				}
+
+				mutex_lock(&cedar_devp->lock_mem);
+				aw_mem_list_del(&pVeIommuBuf->i_list);
+				kfree(pVeIommuBuf);
+				mutex_unlock(&cedar_devp->lock_mem);
+				break;
+			}
+		}
+		break;
+	}
+	default:
+		return -1;
+	}
+	return ret;
+}
+
+static int cedardev_open(struct inode *inode, struct file *filp)
+{
+	struct ve_info *info;
+
+	info = kmalloc(sizeof(struct ve_info), GFP_KERNEL);
+	if (!info)
+		return -ENOMEM;
+
+	info->set_vol_flag = 0;
+
+	filp->private_data = info;
+	if (down_interruptible(&cedar_devp->sem)) {
+		return -ERESTARTSYS;
+	}
+
+	/* init other resource here */
+	if (0 == cedar_devp->ref_count) {
+		cedar_devp->de_irq_flag = 0;
+		cedar_devp->en_irq_flag = 0;
+		cedar_devp->jpeg_irq_flag = 0;
+	}
+
+	up(&cedar_devp->sem);
+	nonseekable_open(inode, filp);
+
+	mutex_init(&info->lock_flag_io);
+	info->lock_flags = 0;
+
+	return 0;
+}
+
+static int cedardev_release(struct inode *inode, struct file *filp)
+{
+	struct ve_info *info;
+
+	info = filp->private_data;
+
+	mutex_lock(&info->lock_flag_io);
+	/* lock status */
+	if (info->lock_flags) {
+		dev_warn(cedar_devp->platform_dev, "release lost-lock...");
+		if (info->lock_flags & VE_LOCK_VDEC)
+			mutex_unlock(&cedar_devp->lock_vdec);
+
+		if (info->lock_flags & VE_LOCK_VENC)
+			mutex_unlock(&cedar_devp->lock_venc);
+
+		if (info->lock_flags & VE_LOCK_JDEC)
+			mutex_unlock(&cedar_devp->lock_jdec);
+
+		if (info->lock_flags & VE_LOCK_00_REG)
+			mutex_unlock(&cedar_devp->lock_00_reg);
+
+		if (info->lock_flags & VE_LOCK_04_REG)
+			mutex_unlock(&cedar_devp->lock_04_reg);
+
+		info->lock_flags = 0;
+	}
+
+	mutex_unlock(&info->lock_flag_io);
+	mutex_destroy(&info->lock_flag_io);
+
+	if (down_interruptible(&cedar_devp->sem)) {
+		return -ERESTARTSYS;
+	}
+
+	/* release other resource here */
+	if (0 == cedar_devp->ref_count) {
+		cedar_devp->de_irq_flag = 1;
+		cedar_devp->en_irq_flag = 1;
+		cedar_devp->jpeg_irq_flag = 1;
+	}
+	up(&cedar_devp->sem);
+
+	kfree(info);
+	return 0;
+}
+
+static void cedardev_vma_open(struct vm_area_struct *vma)
+{
+}
+
+static void cedardev_vma_close(struct vm_area_struct *vma)
+{
+}
+
+static struct vm_operations_struct cedardev_remap_vm_ops = {
+	.open = cedardev_vma_open,
+	.close = cedardev_vma_close,
+};
+
+#ifdef CONFIG_PM
+static int snd_sw_cedar_suspend(struct platform_device *pdev,
+				pm_message_t state)
+{
+	int ret = 0;
+
+	printk("[cedar] standby suspend\n");
+	ret = disable_cedar_hw_clk();
+
+	if (ret < 0) {
+		dev_warn(cedar_devp->platform_dev,
+			 "cedar clk disable somewhere error!\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+static int snd_sw_cedar_resume(struct platform_device *pdev)
+{
+	int ret = 0;
+
+	printk("[cedar] standby resume\n");
+
+	if (cedar_devp->ref_count == 0) {
+		return 0;
+	}
+
+	ret = enable_cedar_hw_clk();
+	if (ret < 0) {
+		dev_warn(cedar_devp->platform_dev,
+			 "cedar clk enable somewhere error!\n");
+		return -EFAULT;
+	}
+	return 0;
+}
+#endif
+
+static int cedardev_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	unsigned long temp_pfn;
+
+	if (vma->vm_end - vma->vm_start == 0) {
+		dev_warn(cedar_devp->platform_dev,
+			 "vma->vm_end is equal vma->vm_start : %lx\n",
+			 vma->vm_start);
+		return 0;
+	}
+	if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) {
+		dev_err(cedar_devp->platform_dev,
+			"the vma->vm_pgoff is %lx,it is large than the largest page number\n",
+			vma->vm_pgoff);
+		return -EINVAL;
+	}
+
+	temp_pfn = cedar_devp->phy_addr >> 12;
+
+	/* Set reserved and I/O flag for the area. */
+	vma->vm_flags |= /*VM_RESERVED | */ VM_IO;
+	/* Select uncached access. */
+	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+	if (io_remap_pfn_range(vma, vma->vm_start, temp_pfn,
+			       vma->vm_end - vma->vm_start,
+			       vma->vm_page_prot)) {
+		return -EAGAIN;
+	}
+
+	vma->vm_ops = &cedardev_remap_vm_ops;
+	cedardev_vma_open(vma);
+
+	return 0;
+}
+
+static const struct file_operations cedardev_fops = {
+	.owner = THIS_MODULE,
+	.mmap = cedardev_mmap,
+	.open = cedardev_open,
+	.release = cedardev_release,
+	.llseek = no_llseek,
+	.unlocked_ioctl = compat_cedardev_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = compat_cedardev_ioctl,
+#endif
+};
+
+static int ve_debugfs_open(struct inode *inode, struct file *file)
+{
+	int i = 0;
+	char *pData;
+	struct ve_debugfs_proc *pVeProc;
+
+	pVeProc = kmalloc(sizeof(struct ve_debugfs_proc), GFP_KERNEL);
+	if (pVeProc == NULL) {
+		dev_err(cedar_devp->platform_dev, "kmalloc pVeProc fail\n");
+		return -ENOMEM;
+	}
+	pVeProc->len = 0;
+	memset(pVeProc->data, 0, VE_DEBUGFS_BUF_SIZE * VE_DEBUGFS_MAX_CHANNEL);
+
+	pData = pVeProc->data;
+	mutex_lock(&ve_debug_proc_info.lock_proc);
+	for (i = 0; i < VE_DEBUGFS_MAX_CHANNEL; i++) {
+		if (ve_debug_proc_info.proc_buf[i] != NULL) {
+			memcpy(pData, ve_debug_proc_info.proc_buf[i],
+			       ve_debug_proc_info.proc_len[i]);
+			pData += ve_debug_proc_info.proc_len[i];
+			pVeProc->len += ve_debug_proc_info.proc_len[i];
+		}
+	}
+	mutex_unlock(&ve_debug_proc_info.lock_proc);
+
+	file->private_data = pVeProc;
+	return 0;
+}
+
+static ssize_t ve_debugfs_read(struct file *file, char __user *user_buf,
+			       size_t nbytes, loff_t *ppos)
+{
+	struct ve_debugfs_proc *pVeProc = file->private_data;
+
+	if (pVeProc->len == 0) {
+		dev_dbg(cedar_devp->platform_dev,
+			"there is no any codec working currently\n");
+		return 0;
+	}
+
+	return simple_read_from_buffer(user_buf, nbytes, ppos, pVeProc->data,
+				       pVeProc->len);
+}
+
+static int ve_debugfs_release(struct inode *inode, struct file *file)
+{
+	struct ve_debugfs_proc *pVeProc = file->private_data;
+
+	kfree(pVeProc);
+	pVeProc = NULL;
+	file->private_data = NULL;
+
+	return 0;
+}
+
+static const struct file_operations ve_debugfs_fops = {
+	.owner = THIS_MODULE,
+	.open = ve_debugfs_open,
+	.llseek = no_llseek,
+	.read = ve_debugfs_read,
+	.release = ve_debugfs_release,
+};
+
+int sunxi_ve_debug_register_driver(void)
+{
+	struct dentry *dent;
+
+	ve_debugfs_root = debugfs_create_dir("mpp", 0);
+
+	if (ve_debugfs_root == NULL) {
+		dev_err(cedar_devp->platform_dev,
+			"get debugfs_mpp_root is NULL, please check mpp\n");
+		return -ENOENT;
+	}
+
+	dent = debugfs_create_file("ve", 0444, ve_debugfs_root, NULL,
+				   &ve_debugfs_fops);
+	if (IS_ERR_OR_NULL(dent)) {
+		dev_err(cedar_devp->platform_dev,
+			"Unable to create debugfs status file.\n");
+		debugfs_remove_recursive(ve_debugfs_root);
+		ve_debugfs_root = NULL;
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+void sunxi_ve_debug_unregister_driver(void)
+{
+	if (ve_debugfs_root == NULL)
+		return;
+	debugfs_remove_recursive(ve_debugfs_root);
+	ve_debugfs_root = NULL;
+}
+
+static int cedardev_init(struct platform_device *pdev)
+{
+	int ret = 0;
+	int i = 0;
+	int devno;
+	struct device_node *node;
+	dev_t dev;
+	const struct cedar_variant *variant;
+	struct resource *res;
+
+	node = pdev->dev.of_node;
+	dev = 0;
+	dev_info(&pdev->dev, "sunxi cedar version " DRV_VERSION "\n");
+	dev_dbg(cedar_devp->platform_dev, "install start!!!\n");
+
+	variant = of_device_get_match_data(&pdev->dev);
+	if (!variant)
+		return -EINVAL;
+
+	spin_lock_init(&cedar_spin_lock);
+	cedar_devp = kcalloc(1, sizeof(struct cedar_dev), GFP_KERNEL);
+	if (cedar_devp == NULL) {
+		dev_warn(cedar_devp->platform_dev,
+			 "malloc mem for cedar device err\n");
+		return -ENOMEM;
+	}
+
+	cedar_devp->platform_dev = &pdev->dev;
+	cedar_devp->capabilities = variant->capabilities;
+
+	ret = sunxi_sram_claim(cedar_devp->platform_dev);
+	if (ret) {
+		dev_err(cedar_devp->platform_dev, "Failed to claim SRAM\n");
+		goto err_mem;
+	}
+
+	cedar_devp->ahb_clk = devm_clk_get(cedar_devp->platform_dev, "ahb");
+	if (IS_ERR(cedar_devp->ahb_clk)) {
+		dev_err(cedar_devp->platform_dev, "Failed to get AHB clock\n");
+		ret = PTR_ERR(cedar_devp->ahb_clk);
+		goto err_sram;
+	}
+
+	cedar_devp->mod_clk = devm_clk_get(cedar_devp->platform_dev, "mod");
+	if (IS_ERR(cedar_devp->mod_clk)) {
+		dev_err(cedar_devp->platform_dev, "Failed to get MOD clock\n");
+		ret = PTR_ERR(cedar_devp->mod_clk);
+		goto err_sram;
+	}
+
+	cedar_devp->ram_clk = devm_clk_get(cedar_devp->platform_dev, "ram");
+	if (IS_ERR(cedar_devp->ram_clk)) {
+		dev_err(cedar_devp->platform_dev, "Failed to get RAM clock\n");
+		ret = PTR_ERR(cedar_devp->ram_clk);
+		goto err_sram;
+	}
+
+	cedar_devp->rstc =
+		devm_reset_control_get(cedar_devp->platform_dev, NULL);
+	if (IS_ERR(cedar_devp->rstc)) {
+		dev_err(cedar_devp->platform_dev,
+			"Failed to get reset control\n");
+		ret = PTR_ERR(cedar_devp->rstc);
+		goto err_sram;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	cedar_devp->regs_macc =
+		devm_ioremap_resource(cedar_devp->platform_dev, res);
+	if (IS_ERR(cedar_devp->regs_macc)) {
+		dev_err(cedar_devp->platform_dev, "Failed to map registers\n");
+		ret = PTR_ERR(cedar_devp->regs_macc);
+		goto err_sram;
+	}
+	cedar_devp->phy_addr = res->start;
+
+	ret = clk_set_rate(cedar_devp->mod_clk, variant->mod_rate);
+	if (ret) {
+		dev_err(cedar_devp->platform_dev, "Failed to set clock rate\n");
+		goto err_sram;
+	}
+
+	ret = clk_prepare_enable(cedar_devp->ahb_clk);
+	if (ret) {
+		dev_err(cedar_devp->platform_dev,
+			"Failed to enable AHB clock\n");
+		goto err_sram;
+	}
+
+	ret = clk_prepare_enable(cedar_devp->mod_clk);
+	if (ret) {
+		dev_err(cedar_devp->platform_dev,
+			"Failed to enable MOD clock\n");
+		goto err_ahb_clk;
+	}
+
+	ret = clk_prepare_enable(cedar_devp->ram_clk);
+	if (ret) {
+		dev_err(cedar_devp->platform_dev,
+			"Failed to enable RAM clock\n");
+		goto err_mod_clk;
+	}
+
+	ret = reset_control_reset(cedar_devp->rstc);
+	if (ret) {
+		dev_err(cedar_devp->platform_dev, "Failed to apply reset\n");
+		goto err_ram_clk;
+	}
+
+	cedar_devp->irq = irq_of_parse_and_map(node, 0);
+	dev_info(cedar_devp->platform_dev, "cedar-ve the get irq is %d\n",
+		 cedar_devp->irq);
+	if (cedar_devp->irq <= 0)
+		dev_err(cedar_devp->platform_dev, "Can't parse IRQ");
+
+	sema_init(&cedar_devp->sem, 1);
+	init_waitqueue_head(&cedar_devp->wq);
+
+	ret = request_irq(cedar_devp->irq, VideoEngineInterupt, 0, "cedar_dev",
+			  cedar_devp);
+	if (ret < 0) {
+		dev_err(cedar_devp->platform_dev, "request irq err\n");
+		return -EINVAL;
+	}
+
+	/*register or alloc the device number.*/
+	if (g_dev_major) {
+		dev = MKDEV(g_dev_major, g_dev_minor);
+		ret = register_chrdev_region(dev, 1, "cedar_dev");
+	} else {
+		ret = alloc_chrdev_region(&dev, g_dev_minor, 1, "cedar_dev");
+		g_dev_major = MAJOR(dev);
+		g_dev_minor = MINOR(dev);
+	}
+	if (ret < 0) {
+		dev_err(cedar_devp->platform_dev,
+			"cedar_dev: can't get major %d\n", g_dev_major);
+		return ret;
+	}
+
+	/* Create char device */
+	devno = MKDEV(g_dev_major, g_dev_minor);
+	cdev_init(&cedar_devp->cdev, &cedardev_fops);
+	cedar_devp->cdev.owner = THIS_MODULE;
+	/* cedar_devp->cdev.ops = &cedardev_fops; */
+	ret = cdev_add(&cedar_devp->cdev, devno, 1);
+	if (ret) {
+		dev_warn(cedar_devp->platform_dev, "Err:%d add cedardev", ret);
+	}
+	cedar_devp->class = class_create(THIS_MODULE, "cedar_dev");
+	cedar_devp->dev = device_create(cedar_devp->class, NULL, devno, NULL,
+					"cedar_dev");
+
+	timer_setup(&cedar_devp->cedar_engine_timer, cedar_engine_for_events,
+		    0);
+	timer_setup(&cedar_devp->cedar_engine_timer_rel,
+		    cedar_engine_for_timer_rel, 0);
+
+	mutex_init(&cedar_devp->lock_vdec);
+	mutex_init(&cedar_devp->lock_venc);
+	mutex_init(&cedar_devp->lock_jdec);
+	mutex_init(&cedar_devp->lock_00_reg);
+	mutex_init(&cedar_devp->lock_04_reg);
+	mutex_init(&cedar_devp->lock_mem);
+
+	ret = sunxi_ve_debug_register_driver();
+	if (ret) {
+		dev_err(cedar_devp->platform_dev,
+			"sunxi ve debug register driver failed!\n");
+		return ret;
+	}
+
+	memset(&ve_debug_proc_info, 0, sizeof(struct ve_debugfs_buffer));
+	for (i = 0; i < VE_DEBUGFS_MAX_CHANNEL; i++) {
+		ve_debug_proc_info.proc_buf[i] = NULL;
+	}
+	ve_debug_proc_info.data = kmalloc(
+		VE_DEBUGFS_BUF_SIZE * VE_DEBUGFS_MAX_CHANNEL, GFP_KERNEL);
+	if (!ve_debug_proc_info.data) {
+		dev_err(cedar_devp->platform_dev,
+			"kmalloc proc buffer failed!\n");
+		return -ENOMEM;
+	}
+	mutex_init(&ve_debug_proc_info.lock_proc);
+	dev_dbg(cedar_devp->platform_dev,
+		"ve_debug_proc_info:%p, data:%p, lock:%p\n",
+		&ve_debug_proc_info, ve_debug_proc_info.data,
+		&ve_debug_proc_info.lock_proc);
+
+	dev_dbg(cedar_devp->platform_dev, "install end!!!\n");
+	return 0;
+
+err_ram_clk:
+	clk_disable_unprepare(cedar_devp->ram_clk);
+err_mod_clk:
+	clk_disable_unprepare(cedar_devp->mod_clk);
+err_ahb_clk:
+	clk_disable_unprepare(cedar_devp->ahb_clk);
+err_sram:
+	sunxi_sram_release(cedar_devp->platform_dev);
+err_mem:
+	kfree(cedar_devp);
+	return ret;
+}
+
+static void cedardev_exit(void)
+{
+	dev_t dev;
+	dev = MKDEV(g_dev_major, g_dev_minor);
+
+	free_irq(cedar_devp->irq, cedar_devp);
+
+	/* Destroy char device */
+	if (cedar_devp) {
+		cdev_del(&cedar_devp->cdev);
+		device_destroy(cedar_devp->class, dev);
+		class_destroy(cedar_devp->class);
+	}
+
+	reset_control_assert(cedar_devp->rstc);
+	clk_disable_unprepare(cedar_devp->ram_clk);
+	clk_disable_unprepare(cedar_devp->mod_clk);
+	clk_disable_unprepare(cedar_devp->ahb_clk);
+	sunxi_sram_release(cedar_devp->platform_dev);
+
+	unregister_chrdev_region(dev, 1);
+	if (cedar_devp) {
+		kfree(cedar_devp);
+	}
+
+	sunxi_ve_debug_unregister_driver();
+	kfree(ve_debug_proc_info.data);
+}
+
+static int sunxi_cedar_remove(struct platform_device *pdev)
+{
+	cedardev_exit();
+	return 0;
+}
+
+static int sunxi_cedar_probe(struct platform_device *pdev)
+{
+	cedardev_init(pdev);
+	return 0;
+}
+
+static struct cedar_variant sun8i_v3_quirk = {
+	.capabilities = CEDARV_ISP_NEW,
+	.mod_rate = 402000000,
+};
+
+static struct cedar_variant sun8i_h3_quirk = {
+	.capabilities = CEDARV_ISP_OLD,
+	.mod_rate = 402000000,
+};
+
+static struct cedar_variant suniv_f1c100s_quirk = {
+	.capabilities = CEDARV_ISP_OLD,
+	.mod_rate = 300000000,
+};
+
+static struct of_device_id sunxi_cedar_ve_match[] = {
+	{ .compatible = "allwinner,sun8i-v3-cedar", .data = &sun8i_v3_quirk },
+	{ .compatible = "allwinner,sun8i-h3-cedar", .data = &sun8i_h3_quirk },
+	{ .compatible = "allwinner,suniv-f1c100s-cedar", .data = &suniv_f1c100s_quirk },
+	{}
+};
+MODULE_DEVICE_TABLE(of, sunxi_cedar_ve_match);
+
+static struct platform_driver sunxi_cedar_driver = {
+	.probe = sunxi_cedar_probe,
+	.remove = sunxi_cedar_remove,
+#if defined(CONFIG_PM)
+	.suspend = snd_sw_cedar_suspend,
+	.resume = snd_sw_cedar_resume,
+#endif
+	.driver = {
+		.name = "sunxi-cedar",
+		.owner = THIS_MODULE,
+		.of_match_table = sunxi_cedar_ve_match,
+	},
+};
+
+static int __init sunxi_cedar_init(void)
+{
+	return platform_driver_register(&sunxi_cedar_driver);
+}
+
+static void __exit sunxi_cedar_exit(void)
+{
+	platform_driver_unregister(&sunxi_cedar_driver);
+}
+
+module_init(sunxi_cedar_init);
+module_exit(sunxi_cedar_exit);
+
+MODULE_AUTHOR("Soft-Allwinner");
+MODULE_DESCRIPTION("User mode CEDAR device interface");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+MODULE_ALIAS("platform:cedarx-sunxi");
diff --git a/drivers/staging/media/sunxi/cedar/ve/cedar_ve.h b/drivers/staging/media/sunxi/cedar/ve/cedar_ve.h
new file mode 100644
index 000000000..4c69c3b53
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedar/ve/cedar_ve.h
@@ -0,0 +1,85 @@
+/*
+ *    Filename: cedarv_ve.h
+ *     Version: 0.01alpha
+ * Description: Video engine driver API, Don't modify it in user space.
+ *     License: GPLv2
+ *
+ *		Author  : xyliu <xyliu@allwinnertech.com>
+ *		Date    : 2016/04/13
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ */
+/* Notice: It's video engine driver API, Don't modify it in user space. */
+#ifndef _CEDAR_VE_H_
+#define _CEDAR_VE_H_
+
+enum IOCTL_CMD
+{
+	IOCTL_UNKOWN = 0x100,
+	IOCTL_GET_ENV_INFO,
+	IOCTL_WAIT_VE_DE,
+	IOCTL_WAIT_VE_EN,
+	IOCTL_RESET_VE,
+	IOCTL_ENABLE_VE,
+	IOCTL_DISABLE_VE,
+	IOCTL_SET_VE_FREQ,
+
+	IOCTL_CONFIG_AVS2 = 0x200,
+	IOCTL_GETVALUE_AVS2,
+	IOCTL_PAUSE_AVS2,
+	IOCTL_START_AVS2,
+	IOCTL_RESET_AVS2,
+	IOCTL_ADJUST_AVS2,
+	IOCTL_ENGINE_REQ,
+	IOCTL_ENGINE_REL,
+	IOCTL_ENGINE_CHECK_DELAY,
+	IOCTL_GET_IC_VER,
+	IOCTL_ADJUST_AVS2_ABS,
+	IOCTL_FLUSH_CACHE,
+	IOCTL_SET_REFCOUNT,
+	IOCTL_FLUSH_CACHE_ALL,
+	IOCTL_TEST_VERSION,
+
+	IOCTL_GET_LOCK = 0x310,
+	IOCTL_RELEASE_LOCK,
+
+	IOCTL_SET_VOL = 0x400,
+
+	IOCTL_WAIT_JPEG_DEC = 0x500,
+	/*for get the ve ref_count for ipc to delete the semphore*/
+	IOCTL_GET_REFCOUNT,
+
+	/*for iommu*/
+	IOCTL_GET_IOMMU_ADDR,
+	IOCTL_FREE_IOMMU_ADDR,
+
+	/*for debug*/
+	IOCTL_SET_PROC_INFO,
+	IOCTL_STOP_PROC_INFO,
+	IOCTL_COPY_PROC_INFO,
+
+	IOCTL_SET_DRAM_HIGH_CHANNAL = 0x600,
+};
+
+#define VE_LOCK_VDEC 0x01
+#define VE_LOCK_VENC 0x02
+#define VE_LOCK_JDEC 0x04
+#define VE_LOCK_00_REG 0x08
+#define VE_LOCK_04_REG 0x10
+#define VE_LOCK_ERR 0x80
+
+struct cedarv_env_infomation
+{
+	unsigned int phymem_start;
+	int phymem_total_size;
+	unsigned long address_macc;
+};
+
+#endif
diff --git a/drivers/staging/media/sunxi/cedar/ve/cedar_ve_priv.h b/drivers/staging/media/sunxi/cedar/ve/cedar_ve_priv.h
new file mode 100644
index 000000000..ca347d416
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedar/ve/cedar_ve_priv.h
@@ -0,0 +1,183 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifndef _CEDAR_VE_PRIV_H_
+#define _CEDAR_VE_PRIV_H_
+#include "ve_mem_list.h"
+
+#ifndef CEDARDEV_MAJOR
+#define CEDARDEV_MAJOR (150)
+#endif
+#ifndef CEDARDEV_MINOR
+#define CEDARDEV_MINOR (0)
+#endif
+
+#define VE_CLK_HIGH_WATER (900)
+#define VE_CLK_LOW_WATER (100)
+
+#define PRINTK_IOMMU_ADDR 0
+
+#define VE_DEBUGFS_MAX_CHANNEL 16
+#define VE_DEBUGFS_BUF_SIZE 1024
+
+#define CEDAR_RUN_LIST_NONULL -1
+#define CEDAR_NONBLOCK_TASK 0
+#define CEDAR_BLOCK_TASK 1
+#define CLK_REL_TIME 10000
+#define TIMER_CIRCLE 50
+#define TASK_INIT 0x00
+#define TASK_TIMEOUT 0x55
+#define TASK_RELEASE 0xaa
+#define SIG_CEDAR 35
+
+struct ve_debugfs_proc
+{
+	unsigned int len;
+	char data[VE_DEBUGFS_BUF_SIZE * VE_DEBUGFS_MAX_CHANNEL];
+};
+
+struct ve_debugfs_buffer
+{
+	unsigned char cur_channel_id;
+	unsigned int proc_len[VE_DEBUGFS_MAX_CHANNEL];
+	char *proc_buf[VE_DEBUGFS_MAX_CHANNEL];
+	char *data;
+	struct mutex lock_proc;
+};
+
+struct __cedarv_task
+{
+	int task_prio;
+	int ID;
+	unsigned long timeout;
+	unsigned int frametime;
+	unsigned int block_mode;
+};
+
+struct cedarv_engine_task
+{
+	struct __cedarv_task t;
+	struct list_head list;
+	struct task_struct *task_handle;
+	unsigned int status;
+	unsigned int running;
+	unsigned int is_first_task;
+};
+
+struct cedarv_engine_task_info
+{
+	int task_prio;
+	unsigned int frametime;
+	unsigned int total_time;
+};
+
+struct cedarv_regop
+{
+	unsigned long addr;
+	unsigned int value;
+};
+
+struct cedarv_env_infomation_compat
+{
+	unsigned int phymem_start;
+	int phymem_total_size;
+	uint32_t address_macc;
+};
+
+struct __cedarv_task_compat
+{
+	int task_prio;
+	int ID;
+	uint32_t timeout;
+	unsigned int frametime;
+	unsigned int block_mode;
+};
+
+struct cedarv_regop_compat
+{
+	uint32_t addr;
+	unsigned int value;
+};
+
+struct VE_PROC_INFO
+{
+	unsigned char channel_id;
+	unsigned int proc_info_len;
+};
+
+struct cedar_dev
+{
+	struct cdev cdev;			 /* char device struct*/
+	struct device *dev;			 /* ptr to class device struct*/
+	struct device *platform_dev; /* ptr to class device struct */
+	struct class *class;		 /* class for auto create device node */
+
+	struct semaphore sem; /* mutual exclusion semaphore */
+
+	wait_queue_head_t wq; /* wait queue for poll ops */
+
+	struct timer_list cedar_engine_timer;
+	struct timer_list cedar_engine_timer_rel;
+
+	uint32_t irq;		   /* cedar video engine irq number */
+	uint32_t de_irq_flag;  /* flag of video decoder engine irq generated */
+	uint32_t de_irq_value; /* value of video decoder engine irq */
+	uint32_t en_irq_flag;  /* flag of video encoder engine irq generated */
+	uint32_t en_irq_value; /* value of video encoder engine irq */
+	uint32_t irq_has_enable;
+	uint32_t ref_count;
+	int last_min_freq;
+
+	uint32_t jpeg_irq_flag;	 /* flag of video jpeg dec irq generated */
+	uint32_t jpeg_irq_value; /* value of video jpeg dec  irq */
+
+	struct mutex lock_vdec;
+	struct mutex lock_jdec;
+	struct mutex lock_venc;
+	struct mutex lock_00_reg;
+	struct mutex lock_04_reg;
+	struct aw_mem_list_head list; /* buffer list */
+	struct mutex lock_mem;
+
+	struct clk *ahb_clk;
+	struct clk *mod_clk;
+	struct clk *ram_clk;
+	struct reset_control *rstc;
+	int capabilities;
+	phys_addr_t phy_addr;
+
+	void __iomem *regs_macc;
+};
+
+struct ve_info
+{ /* each object will bind a new file handler */
+	unsigned int set_vol_flag;
+	struct mutex lock_flag_io;
+	uint32_t lock_flags; /* if flags is 0, means unlock status */
+};
+
+struct user_iommu_param
+{
+	int fd;
+	unsigned int iommu_addr;
+};
+
+struct cedarv_iommu_buffer
+{
+	struct aw_mem_list_head i_list;
+	int fd;
+	unsigned long iommu_addr;
+	struct dma_buf *dma_buf;
+	struct dma_buf_attachment *attachment;
+	struct sg_table *sgt;
+	int p_id;
+};
+
+struct cedar_variant
+{
+	int capabilities;
+	unsigned long mod_rate;
+};
+
+#define CEDARV_ISP_OLD (1 << 0)
+#define CEDARV_ISP_NEW (1 << 1)
+
+#endif
\ No newline at end of file
diff --git a/drivers/staging/media/sunxi/cedar/ve/ve_mem_list.h b/drivers/staging/media/sunxi/cedar/ve/ve_mem_list.h
new file mode 100644
index 000000000..1b1d478ce
--- /dev/null
+++ b/drivers/staging/media/sunxi/cedar/ve/ve_mem_list.h
@@ -0,0 +1,120 @@
+/*
+ *	  Filename: ve_mem_list.h
+ *	   Version: 0.01alpha
+ * Description: Video engine driver memory list management.
+ *	   License: GPLv2
+ *
+ *		Author	: yangcaoyuan <yangcaoyuan@allwinnertech.com>
+ *		Date	: 2017/04/04
+ *
+ *	This program is free software; you can redistribute it and/or modify
+ *	it under the terms of the GNU General Public License version 2 as
+ *	published by the Free Software Foundation.
+ *
+ *	This program is distributed in the hope that it will be useful,
+ *	but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *	MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *	GNU General Public License for more details.
+ *
+ *	You should have received a copy of the GNU General Public License
+ *	along with this program;
+ *
+ */
+#ifndef _VE_MEM__LIST_H
+#define _VE_MEM__LIST_H
+
+#define ion_offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
+
+#define aw_container_of(aw_ptr, type, member) ({ \
+const typeof(((type *)0)->member)*__mptr = (aw_ptr); \
+(type *)((char *)__mptr - ion_offsetof(type, member)); })
+
+static inline void aw_prefetch(const void *x) {(void)x; }
+static inline void aw_prefetchw(const void *x) {(void)x; }
+
+#define AW_LIST_LOCATION1  ((void *) 0x00100100)
+#define AW_LIST_LOCATION2  ((void *) 0x00200200)
+
+struct aw_mem_list_head {
+struct aw_mem_list_head *aw_next, *aw_prev;
+};
+
+#define AW_MEM_LIST_HEAD_INIT(aw_name) { &(aw_name), &(aw_name) }
+
+#define VE_LIST_HEAD(aw_name) \
+struct aw_mem_list_head aw_name = AW_MEM_LIST_HEAD_INIT(aw_name)
+
+#define AW_MEM_INIT_LIST_HEAD(aw_ptr) do { \
+(aw_ptr)->aw_next = (aw_ptr); (aw_ptr)->aw_prev = (aw_ptr); \
+} while (0)
+
+/*
+ * Insert a new entry between two known consecutive entries.
+ *
+ * This is only for internal list manipulation where we know
+ * the aw_prev/aw_next entries already!
+ */
+static inline void __aw_list_add(struct aw_mem_list_head *newList,
+	  struct aw_mem_list_head *aw_prev,
+	  struct aw_mem_list_head *aw_next)
+{
+	aw_next->aw_prev = newList;
+	newList->aw_next = aw_next;
+	newList->aw_prev = aw_prev;
+	aw_prev->aw_next = newList;
+}
+
+/**
+ * list_add - add a new entry
+ * @new: new entry to be added
+ * @head: list head to add it after
+ *
+ * Insert a new entry after the specified head.
+ * This is good for implementing stacks.
+ */
+static inline void aw_mem_list_add(struct aw_mem_list_head *newList,
+										struct aw_mem_list_head *head)
+{
+	__aw_list_add(newList, head, head->aw_next);
+}
+
+/**
+ * aw_mem_list_add_tail - add a new entry
+ * @new: new entry to be added
+ * @head: list head to add it before
+ *
+ * Insert a new entry before the specified head.
+ * This is useful for implementing queues.
+ */
+static inline void aw_mem_list_add_tail(struct aw_mem_list_head *newList,
+		 struct aw_mem_list_head *head)
+{
+	__aw_list_add(newList, head->aw_prev, head);
+}
+
+static inline void __aw_mem_list_del(struct aw_mem_list_head *aw_prev,
+		struct aw_mem_list_head *aw_next)
+{
+	aw_next->aw_prev = aw_prev;
+	aw_prev->aw_next = aw_next;
+}
+
+static inline void aw_mem_list_del(struct aw_mem_list_head *entry)
+{
+	__aw_mem_list_del(entry->aw_prev, entry->aw_next);
+	entry->aw_next = AW_LIST_LOCATION1;
+	entry->aw_prev = AW_LIST_LOCATION2;
+}
+
+#define aw_mem_list_entry(aw_ptr, type, member) aw_container_of(aw_ptr, type, member)
+
+#define aw_mem_list_for_each_safe(aw_pos, aw_n, aw_head) \
+for (aw_pos = (aw_head)->aw_next, aw_n = aw_pos->aw_next; aw_pos != (aw_head); \
+aw_pos = aw_n, aw_n = aw_pos->aw_next)
+
+#define aw_mem_list_for_each_entry(aw_pos, aw_head, member) \
+for (aw_pos = aw_mem_list_entry((aw_head)->aw_next, typeof(*aw_pos), member); \
+	aw_prefetch(aw_pos->member.aw_next), &aw_pos->member != (aw_head);  \
+	aw_pos = aw_mem_list_entry(aw_pos->member.aw_next, typeof(*aw_pos), member))
+
+#endif
-- 
2.17.1

