diff options
author | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2012-02-12 23:11:33 -0800 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2012-02-12 23:11:33 -0800 |
commit | 6781f657257a44acac89cccf49251389daacc370 (patch) | |
tree | b7d98ce2a16a89a8d5e228567e7e8fde12037969 | |
parent | 8996802048908af004dbed7fd99ce6a4af6ae0ea (diff) | |
download | ltsi-kernel-6781f657257a44acac89cccf49251389daacc370.tar.gz |
android patches
Imported from 3.2->3.3-rc3 in Linus's tree.
41 files changed, 13002 insertions, 0 deletions
diff --git a/patches.android/android-0001-Revert-Staging-android-delete-android-drivers.patch b/patches.android/android-0001-Revert-Staging-android-delete-android-drivers.patch new file mode 100644 index 0000000000000..e278f5d6b3c06 --- /dev/null +++ b/patches.android/android-0001-Revert-Staging-android-delete-android-drivers.patch @@ -0,0 +1,5904 @@ +From 355b0502f6efea0ff9492753888772c96972d2a3 Mon Sep 17 00:00:00 2001 +From: Greg Kroah-Hartman <gregkh@suse.de> +Date: Wed, 30 Nov 2011 20:18:14 +0900 +Subject: Revert "Staging: android: delete android drivers" +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit +Patch-mainline: HEAD +Git-commit: 355b0502f6efea0ff9492753888772c96972d2a3 + +This reverts commit b0a0ccfad85b3657fe999805df65f5cfe634ab8a. + +Turns out I was wrong, we want these in the tree. + +Note, I've disabled the drivers from the build at the moment, so other +patches can be applied to fix some build issues due to internal api +changes since the code was removed from the tree. + +Cc: Arve Hjønnevåg <arve@android.com> +Cc: Brian Swetland <swetland@google.com> +Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de> + +diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig +new file mode 100644 +index 0000000..eb67563 +--- /dev/null ++++ b/drivers/staging/android/Kconfig +@@ -0,0 +1,96 @@ ++menu "Android" ++ ++config ANDROID ++ bool "Android Drivers" ++ depends on BROKEN ++ default N ++ ---help--- ++ Enable support for various drivers needed on the Android platform ++ ++if ANDROID ++ ++config ANDROID_BINDER_IPC ++ bool "Android Binder IPC Driver" ++ default n ++ ++config ANDROID_LOGGER ++ tristate "Android log driver" ++ default n ++ ++config ANDROID_RAM_CONSOLE ++ bool "Android RAM buffer console" ++ default n ++ ++config ANDROID_RAM_CONSOLE_ENABLE_VERBOSE ++ bool "Enable verbose console messages on Android RAM console" ++ default y ++ depends on ANDROID_RAM_CONSOLE ++ ++menuconfig ANDROID_RAM_CONSOLE_ERROR_CORRECTION ++ bool "Android RAM Console Enable error correction" ++ default n ++ depends on ANDROID_RAM_CONSOLE ++ depends on !ANDROID_RAM_CONSOLE_EARLY_INIT ++ select REED_SOLOMON ++ select REED_SOLOMON_ENC8 ++ select REED_SOLOMON_DEC8 ++ ++if ANDROID_RAM_CONSOLE_ERROR_CORRECTION ++ ++config ANDROID_RAM_CONSOLE_ERROR_CORRECTION_DATA_SIZE ++ int "Android RAM Console Data data size" ++ default 128 ++ help ++ Must be a power of 2. ++ ++config ANDROID_RAM_CONSOLE_ERROR_CORRECTION_ECC_SIZE ++ int "Android RAM Console ECC size" ++ default 16 ++ ++config ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE ++ int "Android RAM Console Symbol size" ++ default 8 ++ ++config ANDROID_RAM_CONSOLE_ERROR_CORRECTION_POLYNOMIAL ++ hex "Android RAM Console Polynomial" ++ default 0x19 if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 4) ++ default 0x29 if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 5) ++ default 0x61 if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 6) ++ default 0x89 if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 7) ++ default 0x11d if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 8) ++ ++endif # ANDROID_RAM_CONSOLE_ERROR_CORRECTION ++ ++config ANDROID_RAM_CONSOLE_EARLY_INIT ++ bool "Start Android RAM console early" ++ default n ++ depends on ANDROID_RAM_CONSOLE ++ ++config ANDROID_RAM_CONSOLE_EARLY_ADDR ++ hex "Android RAM console virtual address" ++ default 0 ++ depends on ANDROID_RAM_CONSOLE_EARLY_INIT ++ ++config ANDROID_RAM_CONSOLE_EARLY_SIZE ++ hex "Android RAM console buffer size" ++ default 0 ++ depends on ANDROID_RAM_CONSOLE_EARLY_INIT ++ ++config ANDROID_TIMED_OUTPUT ++ bool "Timed output class driver" ++ default y ++ ++config ANDROID_TIMED_GPIO ++ tristate "Android timed gpio driver" ++ depends on GENERIC_GPIO && ANDROID_TIMED_OUTPUT ++ default n ++ ++config ANDROID_LOW_MEMORY_KILLER ++ bool "Android Low Memory Killer" ++ default N ++ ---help--- ++ Register processes to be killed when memory is low ++ ++endif # if ANDROID ++ ++endmenu +diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile +new file mode 100644 +index 0000000..8e057e6 +--- /dev/null ++++ b/drivers/staging/android/Makefile +@@ -0,0 +1,6 @@ ++obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o ++obj-$(CONFIG_ANDROID_LOGGER) += logger.o ++obj-$(CONFIG_ANDROID_RAM_CONSOLE) += ram_console.o ++obj-$(CONFIG_ANDROID_TIMED_OUTPUT) += timed_output.o ++obj-$(CONFIG_ANDROID_TIMED_GPIO) += timed_gpio.o ++obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER) += lowmemorykiller.o +diff --git a/drivers/staging/android/TODO b/drivers/staging/android/TODO +new file mode 100644 +index 0000000..e59c5be +--- /dev/null ++++ b/drivers/staging/android/TODO +@@ -0,0 +1,10 @@ ++TODO: ++ - checkpatch.pl cleanups ++ - sparse fixes ++ - rename files to be not so "generic" ++ - make sure things build as modules properly ++ - add proper arch dependancies as needed ++ - audit userspace interfaces to make sure they are sane ++ ++Please send patches to Greg Kroah-Hartman <greg@kroah.com> and Cc: ++Brian Swetland <swetland@google.com> +diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c +new file mode 100644 +index 0000000..99010d4 +--- /dev/null ++++ b/drivers/staging/android/binder.c +@@ -0,0 +1,3767 @@ ++/* binder.c ++ * ++ * Android IPC Subsystem ++ * ++ * Copyright (C) 2007-2008 Google, Inc. ++ * ++ * This software is licensed under the terms of the GNU General Public ++ * License version 2, as published by the Free Software Foundation, and ++ * may be copied, distributed, and modified under those terms. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ */ ++ ++#include <asm/cacheflush.h> ++#include <linux/fdtable.h> ++#include <linux/file.h> ++#include <linux/fs.h> ++#include <linux/list.h> ++#include <linux/miscdevice.h> ++#include <linux/mm.h> ++#include <linux/module.h> ++#include <linux/mutex.h> ++#include <linux/nsproxy.h> ++#include <linux/poll.h> ++#include <linux/proc_fs.h> ++#include <linux/rbtree.h> ++#include <linux/sched.h> ++#include <linux/uaccess.h> ++#include <linux/vmalloc.h> ++ ++#include "binder.h" ++ ++static DEFINE_MUTEX(binder_lock); ++static DEFINE_MUTEX(binder_deferred_lock); ++ ++static HLIST_HEAD(binder_procs); ++static HLIST_HEAD(binder_deferred_list); ++static HLIST_HEAD(binder_dead_nodes); ++ ++static struct proc_dir_entry *binder_proc_dir_entry_root; ++static struct proc_dir_entry *binder_proc_dir_entry_proc; ++static struct binder_node *binder_context_mgr_node; ++static uid_t binder_context_mgr_uid = -1; ++static int binder_last_id; ++ ++static int binder_read_proc_proc(char *page, char **start, off_t off, ++ int count, int *eof, void *data); ++ ++/* This is only defined in include/asm-arm/sizes.h */ ++#ifndef SZ_1K ++#define SZ_1K 0x400 ++#endif ++ ++#ifndef SZ_4M ++#define SZ_4M 0x400000 ++#endif ++ ++#define FORBIDDEN_MMAP_FLAGS (VM_WRITE) ++ ++#define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64) ++ ++enum { ++ BINDER_DEBUG_USER_ERROR = 1U << 0, ++ BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1, ++ BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2, ++ BINDER_DEBUG_OPEN_CLOSE = 1U << 3, ++ BINDER_DEBUG_DEAD_BINDER = 1U << 4, ++ BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5, ++ BINDER_DEBUG_READ_WRITE = 1U << 6, ++ BINDER_DEBUG_USER_REFS = 1U << 7, ++ BINDER_DEBUG_THREADS = 1U << 8, ++ BINDER_DEBUG_TRANSACTION = 1U << 9, ++ BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10, ++ BINDER_DEBUG_FREE_BUFFER = 1U << 11, ++ BINDER_DEBUG_INTERNAL_REFS = 1U << 12, ++ BINDER_DEBUG_BUFFER_ALLOC = 1U << 13, ++ BINDER_DEBUG_PRIORITY_CAP = 1U << 14, ++ BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 15, ++}; ++static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR | ++ BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION; ++module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO); ++ ++static int binder_debug_no_lock; ++module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO); ++ ++static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait); ++static int binder_stop_on_user_error; ++ ++static int binder_set_stop_on_user_error(const char *val, ++ struct kernel_param *kp) ++{ ++ int ret; ++ ret = param_set_int(val, kp); ++ if (binder_stop_on_user_error < 2) ++ wake_up(&binder_user_error_wait); ++ return ret; ++} ++module_param_call(stop_on_user_error, binder_set_stop_on_user_error, ++ param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO); ++ ++#define binder_debug(mask, x...) \ ++ do { \ ++ if (binder_debug_mask & mask) \ ++ printk(KERN_INFO x); \ ++ } while (0) ++ ++#define binder_user_error(x...) \ ++ do { \ ++ if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \ ++ printk(KERN_INFO x); \ ++ if (binder_stop_on_user_error) \ ++ binder_stop_on_user_error = 2; \ ++ } while (0) ++ ++enum binder_stat_types { ++ BINDER_STAT_PROC, ++ BINDER_STAT_THREAD, ++ BINDER_STAT_NODE, ++ BINDER_STAT_REF, ++ BINDER_STAT_DEATH, ++ BINDER_STAT_TRANSACTION, ++ BINDER_STAT_TRANSACTION_COMPLETE, ++ BINDER_STAT_COUNT ++}; ++ ++struct binder_stats { ++ int br[_IOC_NR(BR_FAILED_REPLY) + 1]; ++ int bc[_IOC_NR(BC_DEAD_BINDER_DONE) + 1]; ++ int obj_created[BINDER_STAT_COUNT]; ++ int obj_deleted[BINDER_STAT_COUNT]; ++}; ++ ++static struct binder_stats binder_stats; ++ ++static inline void binder_stats_deleted(enum binder_stat_types type) ++{ ++ binder_stats.obj_deleted[type]++; ++} ++ ++static inline void binder_stats_created(enum binder_stat_types type) ++{ ++ binder_stats.obj_created[type]++; ++} ++ ++struct binder_transaction_log_entry { ++ int debug_id; ++ int call_type; ++ int from_proc; ++ int from_thread; ++ int target_handle; ++ int to_proc; ++ int to_thread; ++ int to_node; ++ int data_size; ++ int offsets_size; ++}; ++struct binder_transaction_log { ++ int next; ++ int full; ++ struct binder_transaction_log_entry entry[32]; ++}; ++static struct binder_transaction_log binder_transaction_log; ++static struct binder_transaction_log binder_transaction_log_failed; ++ ++static struct binder_transaction_log_entry *binder_transaction_log_add( ++ struct binder_transaction_log *log) ++{ ++ struct binder_transaction_log_entry *e; ++ e = &log->entry[log->next]; ++ memset(e, 0, sizeof(*e)); ++ log->next++; ++ if (log->next == ARRAY_SIZE(log->entry)) { ++ log->next = 0; ++ log->full = 1; ++ } ++ return e; ++} ++ ++struct binder_work { ++ struct list_head entry; ++ enum { ++ BINDER_WORK_TRANSACTION = 1, ++ BINDER_WORK_TRANSACTION_COMPLETE, ++ BINDER_WORK_NODE, ++ BINDER_WORK_DEAD_BINDER, ++ BINDER_WORK_DEAD_BINDER_AND_CLEAR, ++ BINDER_WORK_CLEAR_DEATH_NOTIFICATION, ++ } type; ++}; ++ ++struct binder_node { ++ int debug_id; ++ struct binder_work work; ++ union { ++ struct rb_node rb_node; ++ struct hlist_node dead_node; ++ }; ++ struct binder_proc *proc; ++ struct hlist_head refs; ++ int internal_strong_refs; ++ int local_weak_refs; ++ int local_strong_refs; ++ void __user *ptr; ++ void __user *cookie; ++ unsigned has_strong_ref:1; ++ unsigned pending_strong_ref:1; ++ unsigned has_weak_ref:1; ++ unsigned pending_weak_ref:1; ++ unsigned has_async_transaction:1; ++ unsigned accept_fds:1; ++ unsigned min_priority:8; ++ struct list_head async_todo; ++}; ++ ++struct binder_ref_death { ++ struct binder_work work; ++ void __user *cookie; ++}; ++ ++struct binder_ref { ++ /* Lookups needed: */ ++ /* node + proc => ref (transaction) */ ++ /* desc + proc => ref (transaction, inc/dec ref) */ ++ /* node => refs + procs (proc exit) */ ++ int debug_id; ++ struct rb_node rb_node_desc; ++ struct rb_node rb_node_node; ++ struct hlist_node node_entry; ++ struct binder_proc *proc; ++ struct binder_node *node; ++ uint32_t desc; ++ int strong; ++ int weak; ++ struct binder_ref_death *death; ++}; ++ ++struct binder_buffer { ++ struct list_head entry; /* free and allocated entries by addesss */ ++ struct rb_node rb_node; /* free entry by size or allocated entry */ ++ /* by address */ ++ unsigned free:1; ++ unsigned allow_user_free:1; ++ unsigned async_transaction:1; ++ unsigned debug_id:29; ++ ++ struct binder_transaction *transaction; ++ ++ struct binder_node *target_node; ++ size_t data_size; ++ size_t offsets_size; ++ uint8_t data[0]; ++}; ++ ++enum binder_deferred_state { ++ BINDER_DEFERRED_PUT_FILES = 0x01, ++ BINDER_DEFERRED_FLUSH = 0x02, ++ BINDER_DEFERRED_RELEASE = 0x04, ++}; ++ ++struct binder_proc { ++ struct hlist_node proc_node; ++ struct rb_root threads; ++ struct rb_root nodes; ++ struct rb_root refs_by_desc; ++ struct rb_root refs_by_node; ++ int pid; ++ struct vm_area_struct *vma; ++ struct task_struct *tsk; ++ struct files_struct *files; ++ struct hlist_node deferred_work_node; ++ int deferred_work; ++ void *buffer; ++ ptrdiff_t user_buffer_offset; ++ ++ struct list_head buffers; ++ struct rb_root free_buffers; ++ struct rb_root allocated_buffers; ++ size_t free_async_space; ++ ++ struct page **pages; ++ size_t buffer_size; ++ uint32_t buffer_free; ++ struct list_head todo; ++ wait_queue_head_t wait; ++ struct binder_stats stats; ++ struct list_head delivered_death; ++ int max_threads; ++ int requested_threads; ++ int requested_threads_started; ++ int ready_threads; ++ long default_priority; ++}; ++ ++enum { ++ BINDER_LOOPER_STATE_REGISTERED = 0x01, ++ BINDER_LOOPER_STATE_ENTERED = 0x02, ++ BINDER_LOOPER_STATE_EXITED = 0x04, ++ BINDER_LOOPER_STATE_INVALID = 0x08, ++ BINDER_LOOPER_STATE_WAITING = 0x10, ++ BINDER_LOOPER_STATE_NEED_RETURN = 0x20 ++}; ++ ++struct binder_thread { ++ struct binder_proc *proc; ++ struct rb_node rb_node; ++ int pid; ++ int looper; ++ struct binder_transaction *transaction_stack; ++ struct list_head todo; ++ uint32_t return_error; /* Write failed, return error code in read buf */ ++ uint32_t return_error2; /* Write failed, return error code in read */ ++ /* buffer. Used when sending a reply to a dead process that */ ++ /* we are also waiting on */ ++ wait_queue_head_t wait; ++ struct binder_stats stats; ++}; ++ ++struct binder_transaction { ++ int debug_id; ++ struct binder_work work; ++ struct binder_thread *from; ++ struct binder_transaction *from_parent; ++ struct binder_proc *to_proc; ++ struct binder_thread *to_thread; ++ struct binder_transaction *to_parent; ++ unsigned need_reply:1; ++ /* unsigned is_dead:1; */ /* not used at the moment */ ++ ++ struct binder_buffer *buffer; ++ unsigned int code; ++ unsigned int flags; ++ long priority; ++ long saved_priority; ++ uid_t sender_euid; ++}; ++ ++static void ++binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer); ++ ++/* ++ * copied from get_unused_fd_flags ++ */ ++int task_get_unused_fd_flags(struct binder_proc *proc, int flags) ++{ ++ struct files_struct *files = proc->files; ++ int fd, error; ++ struct fdtable *fdt; ++ unsigned long rlim_cur; ++ unsigned long irqs; ++ ++ if (files == NULL) ++ return -ESRCH; ++ ++ error = -EMFILE; ++ spin_lock(&files->file_lock); ++ ++repeat: ++ fdt = files_fdtable(files); ++ fd = find_next_zero_bit(fdt->open_fds->fds_bits, fdt->max_fds, ++ files->next_fd); ++ ++ /* ++ * N.B. For clone tasks sharing a files structure, this test ++ * will limit the total number of files that can be opened. ++ */ ++ rlim_cur = 0; ++ if (lock_task_sighand(proc->tsk, &irqs)) { ++ rlim_cur = proc->tsk->signal->rlim[RLIMIT_NOFILE].rlim_cur; ++ unlock_task_sighand(proc->tsk, &irqs); ++ } ++ if (fd >= rlim_cur) ++ goto out; ++ ++ /* Do we need to expand the fd array or fd set? */ ++ error = expand_files(files, fd); ++ if (error < 0) ++ goto out; ++ ++ if (error) { ++ /* ++ * If we needed to expand the fs array we ++ * might have blocked - try again. ++ */ ++ error = -EMFILE; ++ goto repeat; ++ } ++ ++ FD_SET(fd, fdt->open_fds); ++ if (flags & O_CLOEXEC) ++ FD_SET(fd, fdt->close_on_exec); ++ else ++ FD_CLR(fd, fdt->close_on_exec); ++ files->next_fd = fd + 1; ++#if 1 ++ /* Sanity check */ ++ if (fdt->fd[fd] != NULL) { ++ printk(KERN_WARNING "get_unused_fd: slot %d not NULL!\n", fd); ++ fdt->fd[fd] = NULL; ++ } ++#endif ++ error = fd; ++ ++out: ++ spin_unlock(&files->file_lock); ++ return error; ++} ++ ++/* ++ * copied from fd_install ++ */ ++static void task_fd_install( ++ struct binder_proc *proc, unsigned int fd, struct file *file) ++{ ++ struct files_struct *files = proc->files; ++ struct fdtable *fdt; ++ ++ if (files == NULL) ++ return; ++ ++ spin_lock(&files->file_lock); ++ fdt = files_fdtable(files); ++ BUG_ON(fdt->fd[fd] != NULL); ++ rcu_assign_pointer(fdt->fd[fd], file); ++ spin_unlock(&files->file_lock); ++} ++ ++/* ++ * copied from __put_unused_fd in open.c ++ */ ++static void __put_unused_fd(struct files_struct *files, unsigned int fd) ++{ ++ struct fdtable *fdt = files_fdtable(files); ++ __FD_CLR(fd, fdt->open_fds); ++ if (fd < files->next_fd) ++ files->next_fd = fd; ++} ++ ++/* ++ * copied from sys_close ++ */ ++static long task_close_fd(struct binder_proc *proc, unsigned int fd) ++{ ++ struct file *filp; ++ struct files_struct *files = proc->files; ++ struct fdtable *fdt; ++ int retval; ++ ++ if (files == NULL) ++ return -ESRCH; ++ ++ spin_lock(&files->file_lock); ++ fdt = files_fdtable(files); ++ if (fd >= fdt->max_fds) ++ goto out_unlock; ++ filp = fdt->fd[fd]; ++ if (!filp) ++ goto out_unlock; ++ rcu_assign_pointer(fdt->fd[fd], NULL); ++ FD_CLR(fd, fdt->close_on_exec); ++ __put_unused_fd(files, fd); ++ spin_unlock(&files->file_lock); ++ retval = filp_close(filp, files); ++ ++ /* can't restart close syscall because file table entry was cleared */ ++ if (unlikely(retval == -ERESTARTSYS || ++ retval == -ERESTARTNOINTR || ++ retval == -ERESTARTNOHAND || ++ retval == -ERESTART_RESTARTBLOCK)) ++ retval = -EINTR; ++ ++ return retval; ++ ++out_unlock: ++ spin_unlock(&files->file_lock); ++ return -EBADF; ++} ++ ++static void binder_set_nice(long nice) ++{ ++ long min_nice; ++ if (can_nice(current, nice)) { ++ set_user_nice(current, nice); ++ return; ++ } ++ min_nice = 20 - current->signal->rlim[RLIMIT_NICE].rlim_cur; ++ binder_debug(BINDER_DEBUG_PRIORITY_CAP, ++ "binder: %d: nice value %ld not allowed use " ++ "%ld instead\n", current->pid, nice, min_nice); ++ set_user_nice(current, min_nice); ++ if (min_nice < 20) ++ return; ++ binder_user_error("binder: %d RLIMIT_NICE not set\n", current->pid); ++} ++ ++static size_t binder_buffer_size(struct binder_proc *proc, ++ struct binder_buffer *buffer) ++{ ++ if (list_is_last(&buffer->entry, &proc->buffers)) ++ return proc->buffer + proc->buffer_size - (void *)buffer->data; ++ else ++ return (size_t)list_entry(buffer->entry.next, ++ struct binder_buffer, entry) - (size_t)buffer->data; ++} ++ ++static void binder_insert_free_buffer(struct binder_proc *proc, ++ struct binder_buffer *new_buffer) ++{ ++ struct rb_node **p = &proc->free_buffers.rb_node; ++ struct rb_node *parent = NULL; ++ struct binder_buffer *buffer; ++ size_t buffer_size; ++ size_t new_buffer_size; ++ ++ BUG_ON(!new_buffer->free); ++ ++ new_buffer_size = binder_buffer_size(proc, new_buffer); ++ ++ binder_debug(BINDER_DEBUG_BUFFER_ALLOC, ++ "binder: %d: add free buffer, size %zd, " ++ "at %p\n", proc->pid, new_buffer_size, new_buffer); ++ ++ while (*p) { ++ parent = *p; ++ buffer = rb_entry(parent, struct binder_buffer, rb_node); ++ BUG_ON(!buffer->free); ++ ++ buffer_size = binder_buffer_size(proc, buffer); ++ ++ if (new_buffer_size < buffer_size) ++ p = &parent->rb_left; ++ else ++ p = &parent->rb_right; ++ } ++ rb_link_node(&new_buffer->rb_node, parent, p); ++ rb_insert_color(&new_buffer->rb_node, &proc->free_buffers); ++} ++ ++static void binder_insert_allocated_buffer(struct binder_proc *proc, ++ struct binder_buffer *new_buffer) ++{ ++ struct rb_node **p = &proc->allocated_buffers.rb_node; ++ struct rb_node *parent = NULL; ++ struct binder_buffer *buffer; ++ ++ BUG_ON(new_buffer->free); ++ ++ while (*p) { ++ parent = *p; ++ buffer = rb_entry(parent, struct binder_buffer, rb_node); ++ BUG_ON(buffer->free); ++ ++ if (new_buffer < buffer) ++ p = &parent->rb_left; ++ else if (new_buffer > buffer) ++ p = &parent->rb_right; ++ else ++ BUG(); ++ } ++ rb_link_node(&new_buffer->rb_node, parent, p); ++ rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers); ++} ++ ++static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc, ++ void __user *user_ptr) ++{ ++ struct rb_node *n = proc->allocated_buffers.rb_node; ++ struct binder_buffer *buffer; ++ struct binder_buffer *kern_ptr; ++ ++ kern_ptr = user_ptr - proc->user_buffer_offset ++ - offsetof(struct binder_buffer, data); ++ ++ while (n) { ++ buffer = rb_entry(n, struct binder_buffer, rb_node); ++ BUG_ON(buffer->free); ++ ++ if (kern_ptr < buffer) ++ n = n->rb_left; ++ else if (kern_ptr > buffer) ++ n = n->rb_right; ++ else ++ return buffer; ++ } ++ return NULL; ++} ++ ++static int binder_update_page_range(struct binder_proc *proc, int allocate, ++ void *start, void *end, ++ struct vm_area_struct *vma) ++{ ++ void *page_addr; ++ unsigned long user_page_addr; ++ struct vm_struct tmp_area; ++ struct page **page; ++ struct mm_struct *mm; ++ ++ binder_debug(BINDER_DEBUG_BUFFER_ALLOC, ++ "binder: %d: %s pages %p-%p\n", proc->pid, ++ allocate ? "allocate" : "free", start, end); ++ ++ if (end <= start) ++ return 0; ++ ++ if (vma) ++ mm = NULL; ++ else ++ mm = get_task_mm(proc->tsk); ++ ++ if (mm) { ++ down_write(&mm->mmap_sem); ++ vma = proc->vma; ++ } ++ ++ if (allocate == 0) ++ goto free_range; ++ ++ if (vma == NULL) { ++ printk(KERN_ERR "binder: %d: binder_alloc_buf failed to " ++ "map pages in userspace, no vma\n", proc->pid); ++ goto err_no_vma; ++ } ++ ++ for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { ++ int ret; ++ struct page **page_array_ptr; ++ page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; ++ ++ BUG_ON(*page); ++ *page = alloc_page(GFP_KERNEL | __GFP_ZERO); ++ if (*page == NULL) { ++ printk(KERN_ERR "binder: %d: binder_alloc_buf failed " ++ "for page at %p\n", proc->pid, page_addr); ++ goto err_alloc_page_failed; ++ } ++ tmp_area.addr = page_addr; ++ tmp_area.size = PAGE_SIZE + PAGE_SIZE /* guard page? */; ++ page_array_ptr = page; ++ ret = map_vm_area(&tmp_area, PAGE_KERNEL, &page_array_ptr); ++ if (ret) { ++ printk(KERN_ERR "binder: %d: binder_alloc_buf failed " ++ "to map page at %p in kernel\n", ++ proc->pid, page_addr); ++ goto err_map_kernel_failed; ++ } ++ user_page_addr = ++ (uintptr_t)page_addr + proc->user_buffer_offset; ++ ret = vm_insert_page(vma, user_page_addr, page[0]); ++ if (ret) { ++ printk(KERN_ERR "binder: %d: binder_alloc_buf failed " ++ "to map page at %lx in userspace\n", ++ proc->pid, user_page_addr); ++ goto err_vm_insert_page_failed; ++ } ++ /* vm_insert_page does not seem to increment the refcount */ ++ } ++ if (mm) { ++ up_write(&mm->mmap_sem); ++ mmput(mm); ++ } ++ return 0; ++ ++free_range: ++ for (page_addr = end - PAGE_SIZE; page_addr >= start; ++ page_addr -= PAGE_SIZE) { ++ page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE]; ++ if (vma) ++ zap_page_range(vma, (uintptr_t)page_addr + ++ proc->user_buffer_offset, PAGE_SIZE, NULL); ++err_vm_insert_page_failed: ++ unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); ++err_map_kernel_failed: ++ __free_page(*page); ++ *page = NULL; ++err_alloc_page_failed: ++ ; ++ } ++err_no_vma: ++ if (mm) { ++ up_write(&mm->mmap_sem); ++ mmput(mm); ++ } ++ return -ENOMEM; ++} ++ ++static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc, ++ size_t data_size, ++ size_t offsets_size, int is_async) ++{ ++ struct rb_node *n = proc->free_buffers.rb_node; ++ struct binder_buffer *buffer; ++ size_t buffer_size; ++ struct rb_node *best_fit = NULL; ++ void *has_page_addr; ++ void *end_page_addr; ++ size_t size; ++ ++ if (proc->vma == NULL) { ++ printk(KERN_ERR "binder: %d: binder_alloc_buf, no vma\n", ++ proc->pid); ++ return NULL; ++ } ++ ++ size = ALIGN(data_size, sizeof(void *)) + ++ ALIGN(offsets_size, sizeof(void *)); ++ ++ if (size < data_size || size < offsets_size) { ++ binder_user_error("binder: %d: got transaction with invalid " ++ "size %zd-%zd\n", proc->pid, data_size, offsets_size); ++ return NULL; ++ } ++ ++ if (is_async && ++ proc->free_async_space < size + sizeof(struct binder_buffer)) { ++ binder_debug(BINDER_DEBUG_BUFFER_ALLOC, ++ "binder: %d: binder_alloc_buf size %zd" ++ "failed, no async space left\n", proc->pid, size); ++ return NULL; ++ } ++ ++ while (n) { ++ buffer = rb_entry(n, struct binder_buffer, rb_node); ++ BUG_ON(!buffer->free); ++ buffer_size = binder_buffer_size(proc, buffer); ++ ++ if (size < buffer_size) { ++ best_fit = n; ++ n = n->rb_left; ++ } else if (size > buffer_size) ++ n = n->rb_right; ++ else { ++ best_fit = n; ++ break; ++ } ++ } ++ if (best_fit == NULL) { ++ printk(KERN_ERR "binder: %d: binder_alloc_buf size %zd failed, " ++ "no address space\n", proc->pid, size); ++ return NULL; ++ } ++ if (n == NULL) { ++ buffer = rb_entry(best_fit, struct binder_buffer, rb_node); ++ buffer_size = binder_buffer_size(proc, buffer); ++ } ++ ++ binder_debug(BINDER_DEBUG_BUFFER_ALLOC, ++ "binder: %d: binder_alloc_buf size %zd got buff" ++ "er %p size %zd\n", proc->pid, size, buffer, buffer_size); ++ ++ has_page_addr = ++ (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK); ++ if (n == NULL) { ++ if (size + sizeof(struct binder_buffer) + 4 >= buffer_size) ++ buffer_size = size; /* no room for other buffers */ ++ else ++ buffer_size = size + sizeof(struct binder_buffer); ++ } ++ end_page_addr = ++ (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size); ++ if (end_page_addr > has_page_addr) ++ end_page_addr = has_page_addr; ++ if (binder_update_page_range(proc, 1, ++ (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL)) ++ return NULL; ++ ++ rb_erase(best_fit, &proc->free_buffers); ++ buffer->free = 0; ++ binder_insert_allocated_buffer(proc, buffer); ++ if (buffer_size != size) { ++ struct binder_buffer *new_buffer = (void *)buffer->data + size; ++ list_add(&new_buffer->entry, &buffer->entry); ++ new_buffer->free = 1; ++ binder_insert_free_buffer(proc, new_buffer); ++ } ++ binder_debug(BINDER_DEBUG_BUFFER_ALLOC, ++ "binder: %d: binder_alloc_buf size %zd got " ++ "%p\n", proc->pid, size, buffer); ++ buffer->data_size = data_size; ++ buffer->offsets_size = offsets_size; ++ buffer->async_transaction = is_async; ++ if (is_async) { ++ proc->free_async_space -= size + sizeof(struct binder_buffer); ++ binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, ++ "binder: %d: binder_alloc_buf size %zd " ++ "async free %zd\n", proc->pid, size, ++ proc->free_async_space); ++ } ++ ++ return buffer; ++} ++ ++static void *buffer_start_page(struct binder_buffer *buffer) ++{ ++ return (void *)((uintptr_t)buffer & PAGE_MASK); ++} ++ ++static void *buffer_end_page(struct binder_buffer *buffer) ++{ ++ return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK); ++} ++ ++static void binder_delete_free_buffer(struct binder_proc *proc, ++ struct binder_buffer *buffer) ++{ ++ struct binder_buffer *prev, *next = NULL; ++ int free_page_end = 1; ++ int free_page_start = 1; ++ ++ BUG_ON(proc->buffers.next == &buffer->entry); ++ prev = list_entry(buffer->entry.prev, struct binder_buffer, entry); ++ BUG_ON(!prev->free); ++ if (buffer_end_page(prev) == buffer_start_page(buffer)) { ++ free_page_start = 0; ++ if (buffer_end_page(prev) == buffer_end_page(buffer)) ++ free_page_end = 0; ++ binder_debug(BINDER_DEBUG_BUFFER_ALLOC, ++ "binder: %d: merge free, buffer %p " ++ "share page with %p\n", proc->pid, buffer, prev); ++ } ++ ++ if (!list_is_last(&buffer->entry, &proc->buffers)) { ++ next = list_entry(buffer->entry.next, ++ struct binder_buffer, entry); ++ if (buffer_start_page(next) == buffer_end_page(buffer)) { ++ free_page_end = 0; ++ if (buffer_start_page(next) == ++ buffer_start_page(buffer)) ++ free_page_start = 0; ++ binder_debug(BINDER_DEBUG_BUFFER_ALLOC, ++ "binder: %d: merge free, buffer" ++ " %p share page with %p\n", proc->pid, ++ buffer, prev); ++ } ++ } ++ list_del(&buffer->entry); ++ if (free_page_start || free_page_end) { ++ binder_debug(BINDER_DEBUG_BUFFER_ALLOC, ++ "binder: %d: merge free, buffer %p do " ++ "not share page%s%s with with %p or %p\n", ++ proc->pid, buffer, free_page_start ? "" : " end", ++ free_page_end ? "" : " start", prev, next); ++ binder_update_page_range(proc, 0, free_page_start ? ++ buffer_start_page(buffer) : buffer_end_page(buffer), ++ (free_page_end ? buffer_end_page(buffer) : ++ buffer_start_page(buffer)) + PAGE_SIZE, NULL); ++ } ++} ++ ++static void binder_free_buf(struct binder_proc *proc, ++ struct binder_buffer *buffer) ++{ ++ size_t size, buffer_size; ++ ++ buffer_size = binder_buffer_size(proc, buffer); ++ ++ size = ALIGN(buffer->data_size, sizeof(void *)) + ++ ALIGN(buffer->offsets_size, sizeof(void *)); ++ ++ binder_debug(BINDER_DEBUG_BUFFER_ALLOC, ++ "binder: %d: binder_free_buf %p size %zd buffer" ++ "_size %zd\n", proc->pid, buffer, size, buffer_size); ++ ++ BUG_ON(buffer->free); ++ BUG_ON(size > buffer_size); ++ BUG_ON(buffer->transaction != NULL); ++ BUG_ON((void *)buffer < proc->buffer); ++ BUG_ON((void *)buffer > proc->buffer + proc->buffer_size); ++ ++ if (buffer->async_transaction) { ++ proc->free_async_space += size + sizeof(struct binder_buffer); ++ ++ binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, ++ "binder: %d: binder_free_buf size %zd " ++ "async free %zd\n", proc->pid, size, ++ proc->free_async_space); ++ } ++ ++ binder_update_page_range(proc, 0, ++ (void *)PAGE_ALIGN((uintptr_t)buffer->data), ++ (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK), ++ NULL); ++ rb_erase(&buffer->rb_node, &proc->allocated_buffers); ++ buffer->free = 1; ++ if (!list_is_last(&buffer->entry, &proc->buffers)) { ++ struct binder_buffer *next = list_entry(buffer->entry.next, ++ struct binder_buffer, entry); ++ if (next->free) { ++ rb_erase(&next->rb_node, &proc->free_buffers); ++ binder_delete_free_buffer(proc, next); ++ } ++ } ++ if (proc->buffers.next != &buffer->entry) { ++ struct binder_buffer *prev = list_entry(buffer->entry.prev, ++ struct binder_buffer, entry); ++ if (prev->free) { ++ binder_delete_free_buffer(proc, buffer); ++ rb_erase(&prev->rb_node, &proc->free_buffers); ++ buffer = prev; ++ } ++ } ++ binder_insert_free_buffer(proc, buffer); ++} ++ ++static struct binder_node *binder_get_node(struct binder_proc *proc, ++ void __user *ptr) ++{ ++ struct rb_node *n = proc->nodes.rb_node; ++ struct binder_node *node; ++ ++ while (n) { ++ node = rb_entry(n, struct binder_node, rb_node); ++ ++ if (ptr < node->ptr) ++ n = n->rb_left; ++ else if (ptr > node->ptr) ++ n = n->rb_right; ++ else ++ return node; ++ } ++ return NULL; ++} ++ ++static struct binder_node *binder_new_node(struct binder_proc *proc, ++ void __user *ptr, ++ void __user *cookie) ++{ ++ struct rb_node **p = &proc->nodes.rb_node; ++ struct rb_node *parent = NULL; ++ struct binder_node *node; ++ ++ while (*p) { ++ parent = *p; ++ node = rb_entry(parent, struct binder_node, rb_node); ++ ++ if (ptr < node->ptr) ++ p = &(*p)->rb_left; ++ else if (ptr > node->ptr) ++ p = &(*p)->rb_right; ++ else ++ return NULL; ++ } ++ ++ node = kzalloc(sizeof(*node), GFP_KERNEL); ++ if (node == NULL) ++ return NULL; ++ binder_stats_created(BINDER_STAT_NODE); ++ rb_link_node(&node->rb_node, parent, p); ++ rb_insert_color(&node->rb_node, &proc->nodes); ++ node->debug_id = ++binder_last_id; ++ node->proc = proc; ++ node->ptr = ptr; ++ node->cookie = cookie; ++ node->work.type = BINDER_WORK_NODE; ++ INIT_LIST_HEAD(&node->work.entry); ++ INIT_LIST_HEAD(&node->async_todo); ++ binder_debug(BINDER_DEBUG_INTERNAL_REFS, ++ "binder: %d:%d node %d u%p c%p created\n", ++ proc->pid, current->pid, node->debug_id, ++ node->ptr, node->cookie); ++ return node; ++} ++ ++static int binder_inc_node(struct binder_node *node, int strong, int internal, ++ struct list_head *target_list) ++{ ++ if (strong) { ++ if (internal) { ++ if (target_list == NULL && ++ node->internal_strong_refs == 0 && ++ !(node == binder_context_mgr_node && ++ node->has_strong_ref)) { ++ printk(KERN_ERR "binder: invalid inc strong " ++ "node for %d\n", node->debug_id); ++ return -EINVAL; ++ } ++ node->internal_strong_refs++; ++ } else ++ node->local_strong_refs++; ++ if (!node->has_strong_ref && target_list) { ++ list_del_init(&node->work.entry); ++ list_add_tail(&node->work.entry, target_list); ++ } ++ } else { ++ if (!internal) ++ node->local_weak_refs++; ++ if (!node->has_weak_ref && list_empty(&node->work.entry)) { ++ if (target_list == NULL) { ++ printk(KERN_ERR "binder: invalid inc weak node " ++ "for %d\n", node->debug_id); ++ return -EINVAL; ++ } ++ list_add_tail(&node->work.entry, target_list); ++ } ++ } ++ return 0; ++} ++ ++static int binder_dec_node(struct binder_node *node, int strong, int internal) ++{ ++ if (strong) { ++ if (internal) ++ node->internal_strong_refs--; ++ else ++ node->local_strong_refs--; ++ if (node->local_strong_refs || node->internal_strong_refs) ++ return 0; ++ } else { ++ if (!internal) ++ node->local_weak_refs--; ++ if (node->local_weak_refs || !hlist_empty(&node->refs)) ++ return 0; ++ } ++ if (node->proc && (node->has_strong_ref || node->has_weak_ref)) { ++ if (list_empty(&node->work.entry)) { ++ list_add_tail(&node->work.entry, &node->proc->todo); ++ wake_up_interruptible(&node->proc->wait); ++ } ++ } else { ++ if (hlist_empty(&node->refs) && !node->local_strong_refs && ++ !node->local_weak_refs) { ++ list_del_init(&node->work.entry); ++ if (node->proc) { ++ rb_erase(&node->rb_node, &node->proc->nodes); ++ binder_debug(BINDER_DEBUG_INTERNAL_REFS, ++ "binder: refless node %d deleted\n", ++ node->debug_id); ++ } else { ++ hlist_del(&node->dead_node); ++ binder_debug(BINDER_DEBUG_INTERNAL_REFS, ++ "binder: dead node %d deleted\n", ++ node->debug_id); ++ } ++ kfree(node); ++ binder_stats_deleted(BINDER_STAT_NODE); ++ } ++ } ++ ++ return 0; ++} ++ ++ ++static struct binder_ref *binder_get_ref(struct binder_proc *proc, ++ uint32_t desc) ++{ ++ struct rb_node *n = proc->refs_by_desc.rb_node; ++ struct binder_ref *ref; ++ ++ while (n) { ++ ref = rb_entry(n, struct binder_ref, rb_node_desc); ++ ++ if (desc < ref->desc) ++ n = n->rb_left; ++ else if (desc > ref->desc) ++ n = n->rb_right; ++ else ++ return ref; ++ } ++ return NULL; ++} ++ ++static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc, ++ struct binder_node *node) ++{ ++ struct rb_node *n; ++ struct rb_node **p = &proc->refs_by_node.rb_node; ++ struct rb_node *parent = NULL; ++ struct binder_ref *ref, *new_ref; ++ ++ while (*p) { ++ parent = *p; ++ ref = rb_entry(parent, struct binder_ref, rb_node_node); ++ ++ if (node < ref->node) ++ p = &(*p)->rb_left; ++ else if (node > ref->node) ++ p = &(*p)->rb_right; ++ else ++ return ref; ++ } ++ new_ref = kzalloc(sizeof(*ref), GFP_KERNEL); ++ if (new_ref == NULL) ++ return NULL; ++ binder_stats_created(BINDER_STAT_REF); ++ new_ref->debug_id = ++binder_last_id; ++ new_ref->proc = proc; ++ new_ref->node = node; ++ rb_link_node(&new_ref->rb_node_node, parent, p); ++ rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node); ++ ++ new_ref->desc = (node == binder_context_mgr_node) ? 0 : 1; ++ for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { ++ ref = rb_entry(n, struct binder_ref, rb_node_desc); ++ if (ref->desc > new_ref->desc) ++ break; ++ new_ref->desc = ref->desc + 1; ++ } ++ ++ p = &proc->refs_by_desc.rb_node; ++ while (*p) { ++ parent = *p; ++ ref = rb_entry(parent, struct binder_ref, rb_node_desc); ++ ++ if (new_ref->desc < ref->desc) ++ p = &(*p)->rb_left; ++ else if (new_ref->desc > ref->desc) ++ p = &(*p)->rb_right; ++ else ++ BUG(); ++ } ++ rb_link_node(&new_ref->rb_node_desc, parent, p); ++ rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc); ++ if (node) { ++ hlist_add_head(&new_ref->node_entry, &node->refs); ++ ++ binder_debug(BINDER_DEBUG_INTERNAL_REFS, ++ "binder: %d new ref %d desc %d for " ++ "node %d\n", proc->pid, new_ref->debug_id, ++ new_ref->desc, node->debug_id); ++ } else { ++ binder_debug(BINDER_DEBUG_INTERNAL_REFS, ++ "binder: %d new ref %d desc %d for " ++ "dead node\n", proc->pid, new_ref->debug_id, ++ new_ref->desc); ++ } ++ return new_ref; ++} ++ ++static void binder_delete_ref(struct binder_ref *ref) ++{ ++ binder_debug(BINDER_DEBUG_INTERNAL_REFS, ++ "binder: %d delete ref %d desc %d for " ++ "node %d\n", ref->proc->pid, ref->debug_id, ++ ref->desc, ref->node->debug_id); ++ ++ rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc); ++ rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node); ++ if (ref->strong) ++ binder_dec_node(ref->node, 1, 1); ++ hlist_del(&ref->node_entry); ++ binder_dec_node(ref->node, 0, 1); ++ if (ref->death) { ++ binder_debug(BINDER_DEBUG_DEAD_BINDER, ++ "binder: %d delete ref %d desc %d " ++ "has death notification\n", ref->proc->pid, ++ ref->debug_id, ref->desc); ++ list_del(&ref->death->work.entry); ++ kfree(ref->death); ++ binder_stats_deleted(BINDER_STAT_DEATH); ++ } ++ kfree(ref); ++ binder_stats_deleted(BINDER_STAT_REF); ++} ++ ++static int binder_inc_ref(struct binder_ref *ref, int strong, ++ struct list_head *target_list) ++{ ++ int ret; ++ if (strong) { ++ if (ref->strong == 0) { ++ ret = binder_inc_node(ref->node, 1, 1, target_list); ++ if (ret) ++ return ret; ++ } ++ ref->strong++; ++ } else { ++ if (ref->weak == 0) { ++ ret = binder_inc_node(ref->node, 0, 1, target_list); ++ if (ret) ++ return ret; ++ } ++ ref->weak++; ++ } ++ return 0; ++} ++ ++ ++static int binder_dec_ref(struct binder_ref *ref, int strong) ++{ ++ if (strong) { ++ if (ref->strong == 0) { ++ binder_user_error("binder: %d invalid dec strong, " ++ "ref %d desc %d s %d w %d\n", ++ ref->proc->pid, ref->debug_id, ++ ref->desc, ref->strong, ref->weak); ++ return -EINVAL; ++ } ++ ref->strong--; ++ if (ref->strong == 0) { ++ int ret; ++ ret = binder_dec_node(ref->node, strong, 1); ++ if (ret) ++ return ret; ++ } ++ } else { ++ if (ref->weak == 0) { ++ binder_user_error("binder: %d invalid dec weak, " ++ "ref %d desc %d s %d w %d\n", ++ ref->proc->pid, ref->debug_id, ++ ref->desc, ref->strong, ref->weak); ++ return -EINVAL; ++ } ++ ref->weak--; ++ } ++ if (ref->strong == 0 && ref->weak == 0) ++ binder_delete_ref(ref); ++ return 0; ++} ++ ++static void binder_pop_transaction(struct binder_thread *target_thread, ++ struct binder_transaction *t) ++{ ++ if (target_thread) { ++ BUG_ON(target_thread->transaction_stack != t); ++ BUG_ON(target_thread->transaction_stack->from != target_thread); ++ target_thread->transaction_stack = ++ target_thread->transaction_stack->from_parent; ++ t->from = NULL; ++ } ++ t->need_reply = 0; ++ if (t->buffer) ++ t->buffer->transaction = NULL; ++ kfree(t); ++ binder_stats_deleted(BINDER_STAT_TRANSACTION); ++} ++ ++static void binder_send_failed_reply(struct binder_transaction *t, ++ uint32_t error_code) ++{ ++ struct binder_thread *target_thread; ++ BUG_ON(t->flags & TF_ONE_WAY); ++ while (1) { ++ target_thread = t->from; ++ if (target_thread) { ++ if (target_thread->return_error != BR_OK && ++ target_thread->return_error2 == BR_OK) { ++ target_thread->return_error2 = ++ target_thread->return_error; ++ target_thread->return_error = BR_OK; ++ } ++ if (target_thread->return_error == BR_OK) { ++ binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, ++ "binder: send failed reply for " ++ "transaction %d to %d:%d\n", ++ t->debug_id, target_thread->proc->pid, ++ target_thread->pid); ++ ++ binder_pop_transaction(target_thread, t); ++ target_thread->return_error = error_code; ++ wake_up_interruptible(&target_thread->wait); ++ } else { ++ printk(KERN_ERR "binder: reply failed, target " ++ "thread, %d:%d, has error code %d " ++ "already\n", target_thread->proc->pid, ++ target_thread->pid, ++ target_thread->return_error); ++ } ++ return; ++ } else { ++ struct binder_transaction *next = t->from_parent; ++ ++ binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, ++ "binder: send failed reply " ++ "for transaction %d, target dead\n", ++ t->debug_id); ++ ++ binder_pop_transaction(target_thread, t); ++ if (next == NULL) { ++ binder_debug(BINDER_DEBUG_DEAD_BINDER, ++ "binder: reply failed," ++ " no target thread at root\n"); ++ return; ++ } ++ t = next; ++ binder_debug(BINDER_DEBUG_DEAD_BINDER, ++ "binder: reply failed, no target " ++ "thread -- retry %d\n", t->debug_id); ++ } ++ } ++} ++ ++static void binder_transaction_buffer_release(struct binder_proc *proc, ++ struct binder_buffer *buffer, ++ size_t *failed_at) ++{ ++ size_t *offp, *off_end; ++ int debug_id = buffer->debug_id; ++ ++ binder_debug(BINDER_DEBUG_TRANSACTION, ++ "binder: %d buffer release %d, size %zd-%zd, failed at %p\n", ++ proc->pid, buffer->debug_id, ++ buffer->data_size, buffer->offsets_size, failed_at); ++ ++ if (buffer->target_node) ++ binder_dec_node(buffer->target_node, 1, 0); ++ ++ offp = (size_t *)(buffer->data + ALIGN(buffer->data_size, sizeof(void *))); ++ if (failed_at) ++ off_end = failed_at; ++ else ++ off_end = (void *)offp + buffer->offsets_size; ++ for (; offp < off_end; offp++) { ++ struct flat_binder_object *fp; ++ if (*offp > buffer->data_size - sizeof(*fp) || ++ buffer->data_size < sizeof(*fp) || ++ !IS_ALIGNED(*offp, sizeof(void *))) { ++ printk(KERN_ERR "binder: transaction release %d bad" ++ "offset %zd, size %zd\n", debug_id, ++ *offp, buffer->data_size); ++ continue; ++ } ++ fp = (struct flat_binder_object *)(buffer->data + *offp); ++ switch (fp->type) { ++ case BINDER_TYPE_BINDER: ++ case BINDER_TYPE_WEAK_BINDER: { ++ struct binder_node *node = binder_get_node(proc, fp->binder); ++ if (node == NULL) { ++ printk(KERN_ERR "binder: transaction release %d" ++ " bad node %p\n", debug_id, fp->binder); ++ break; ++ } ++ binder_debug(BINDER_DEBUG_TRANSACTION, ++ " node %d u%p\n", ++ node->debug_id, node->ptr); ++ binder_dec_node(node, fp->type == BINDER_TYPE_BINDER, 0); ++ } break; ++ case BINDER_TYPE_HANDLE: ++ case BINDER_TYPE_WEAK_HANDLE: { ++ struct binder_ref *ref = binder_get_ref(proc, fp->handle); ++ if (ref == NULL) { ++ printk(KERN_ERR "binder: transaction release %d" ++ " bad handle %ld\n", debug_id, ++ fp->handle); ++ break; ++ } ++ binder_debug(BINDER_DEBUG_TRANSACTION, ++ " ref %d desc %d (node %d)\n", ++ ref->debug_id, ref->desc, ref->node->debug_id); ++ binder_dec_ref(ref, fp->type == BINDER_TYPE_HANDLE); ++ } break; ++ ++ case BINDER_TYPE_FD: ++ binder_debug(BINDER_DEBUG_TRANSACTION, ++ " fd %ld\n", fp->handle); ++ if (failed_at) ++ task_close_fd(proc, fp->handle); ++ break; ++ ++ default: ++ printk(KERN_ERR "binder: transaction release %d bad " ++ "object type %lx\n", debug_id, fp->type); ++ break; ++ } ++ } ++} ++ ++static void binder_transaction(struct binder_proc *proc, ++ struct binder_thread *thread, ++ struct binder_transaction_data *tr, int reply) ++{ ++ struct binder_transaction *t; ++ struct binder_work *tcomplete; ++ size_t *offp, *off_end; ++ struct binder_proc *target_proc; ++ struct binder_thread *target_thread = NULL; ++ struct binder_node *target_node = NULL; ++ struct list_head *target_list; ++ wait_queue_head_t *target_wait; ++ struct binder_transaction *in_reply_to = NULL; ++ struct binder_transaction_log_entry *e; ++ uint32_t return_error; ++ ++ e = binder_transaction_log_add(&binder_transaction_log); ++ e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY); ++ e->from_proc = proc->pid; ++ e->from_thread = thread->pid; ++ e->target_handle = tr->target.handle; ++ e->data_size = tr->data_size; ++ e->offsets_size = tr->offsets_size; ++ ++ if (reply) { ++ in_reply_to = thread->transaction_stack; ++ if (in_reply_to == NULL) { ++ binder_user_error("binder: %d:%d got reply transaction " ++ "with no transaction stack\n", ++ proc->pid, thread->pid); ++ return_error = BR_FAILED_REPLY; ++ goto err_empty_call_stack; ++ } ++ binder_set_nice(in_reply_to->saved_priority); ++ if (in_reply_to->to_thread != thread) { ++ binder_user_error("binder: %d:%d got reply transaction " ++ "with bad transaction stack," ++ " transaction %d has target %d:%d\n", ++ proc->pid, thread->pid, in_reply_to->debug_id, ++ in_reply_to->to_proc ? ++ in_reply_to->to_proc->pid : 0, ++ in_reply_to->to_thread ? ++ in_reply_to->to_thread->pid : 0); ++ return_error = BR_FAILED_REPLY; ++ in_reply_to = NULL; ++ goto err_bad_call_stack; ++ } ++ thread->transaction_stack = in_reply_to->to_parent; ++ target_thread = in_reply_to->from; ++ if (target_thread == NULL) { ++ return_error = BR_DEAD_REPLY; ++ goto err_dead_binder; ++ } ++ if (target_thread->transaction_stack != in_reply_to) { ++ binder_user_error("binder: %d:%d got reply transaction " ++ "with bad target transaction stack %d, " ++ "expected %d\n", ++ proc->pid, thread->pid, ++ target_thread->transaction_stack ? ++ target_thread->transaction_stack->debug_id : 0, ++ in_reply_to->debug_id); ++ return_error = BR_FAILED_REPLY; ++ in_reply_to = NULL; ++ target_thread = NULL; ++ goto err_dead_binder; ++ } ++ target_proc = target_thread->proc; ++ } else { ++ if (tr->target.handle) { ++ struct binder_ref *ref; ++ ref = binder_get_ref(proc, tr->target.handle); ++ if (ref == NULL) { ++ binder_user_error("binder: %d:%d got " ++ "transaction to invalid handle\n", ++ proc->pid, thread->pid); ++ return_error = BR_FAILED_REPLY; ++ goto err_invalid_target_handle; ++ } ++ target_node = ref->node; ++ } else { ++ target_node = binder_context_mgr_node; ++ if (target_node == NULL) { ++ return_error = BR_DEAD_REPLY; ++ goto err_no_context_mgr_node; ++ } ++ } ++ e->to_node = target_node->debug_id; ++ target_proc = target_node->proc; ++ if (target_proc == NULL) { ++ return_error = BR_DEAD_REPLY; ++ goto err_dead_binder; ++ } ++ if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) { ++ struct binder_transaction *tmp; ++ tmp = thread->transaction_stack; ++ if (tmp->to_thread != thread) { ++ binder_user_error("binder: %d:%d got new " ++ "transaction with bad transaction stack" ++ ", transaction %d has target %d:%d\n", ++ proc->pid, thread->pid, tmp->debug_id, ++ tmp->to_proc ? tmp->to_proc->pid : 0, ++ tmp->to_thread ? ++ tmp->to_thread->pid : 0); ++ return_error = BR_FAILED_REPLY; ++ goto err_bad_call_stack; ++ } ++ while (tmp) { ++ if (tmp->from && tmp->from->proc == target_proc) ++ target_thread = tmp->from; ++ tmp = tmp->from_parent; ++ } ++ } ++ } ++ if (target_thread) { ++ e->to_thread = target_thread->pid; ++ target_list = &target_thread->todo; ++ target_wait = &target_thread->wait; ++ } else { ++ target_list = &target_proc->todo; ++ target_wait = &target_proc->wait; ++ } ++ e->to_proc = target_proc->pid; ++ ++ /* TODO: reuse incoming transaction for reply */ ++ t = kzalloc(sizeof(*t), GFP_KERNEL); ++ if (t == NULL) { ++ return_error = BR_FAILED_REPLY; ++ goto err_alloc_t_failed; ++ } ++ binder_stats_created(BINDER_STAT_TRANSACTION); ++ ++ tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL); ++ if (tcomplete == NULL) { ++ return_error = BR_FAILED_REPLY; ++ goto err_alloc_tcomplete_failed; ++ } ++ binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE); ++ ++ t->debug_id = ++binder_last_id; ++ e->debug_id = t->debug_id; ++ ++ if (reply) ++ binder_debug(BINDER_DEBUG_TRANSACTION, ++ "binder: %d:%d BC_REPLY %d -> %d:%d, " ++ "data %p-%p size %zd-%zd\n", ++ proc->pid, thread->pid, t->debug_id, ++ target_proc->pid, target_thread->pid, ++ tr->data.ptr.buffer, tr->data.ptr.offsets, ++ tr->data_size, tr->offsets_size); ++ else ++ binder_debug(BINDER_DEBUG_TRANSACTION, ++ "binder: %d:%d BC_TRANSACTION %d -> " ++ "%d - node %d, data %p-%p size %zd-%zd\n", ++ proc->pid, thread->pid, t->debug_id, ++ target_proc->pid, target_node->debug_id, ++ tr->data.ptr.buffer, tr->data.ptr.offsets, ++ tr->data_size, tr->offsets_size); ++ ++ if (!reply && !(tr->flags & TF_ONE_WAY)) ++ t->from = thread; ++ else ++ t->from = NULL; ++ t->sender_euid = proc->tsk->cred->euid; ++ t->to_proc = target_proc; ++ t->to_thread = target_thread; ++ t->code = tr->code; ++ t->flags = tr->flags; ++ t->priority = task_nice(current); ++ t->buffer = binder_alloc_buf(target_proc, tr->data_size, ++ tr->offsets_size, !reply && (t->flags & TF_ONE_WAY)); ++ if (t->buffer == NULL) { ++ return_error = BR_FAILED_REPLY; ++ goto err_binder_alloc_buf_failed; ++ } ++ t->buffer->allow_user_free = 0; ++ t->buffer->debug_id = t->debug_id; ++ t->buffer->transaction = t; ++ t->buffer->target_node = target_node; ++ if (target_node) ++ binder_inc_node(target_node, 1, 0, NULL); ++ ++ offp = (size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *))); ++ ++ if (copy_from_user(t->buffer->data, tr->data.ptr.buffer, tr->data_size)) { ++ binder_user_error("binder: %d:%d got transaction with invalid " ++ "data ptr\n", proc->pid, thread->pid); ++ return_error = BR_FAILED_REPLY; ++ goto err_copy_data_failed; ++ } ++ if (copy_from_user(offp, tr->data.ptr.offsets, tr->offsets_size)) { ++ binder_user_error("binder: %d:%d got transaction with invalid " ++ "offsets ptr\n", proc->pid, thread->pid); ++ return_error = BR_FAILED_REPLY; ++ goto err_copy_data_failed; ++ } ++ if (!IS_ALIGNED(tr->offsets_size, sizeof(size_t))) { ++ binder_user_error("binder: %d:%d got transaction with " ++ "invalid offsets size, %zd\n", ++ proc->pid, thread->pid, tr->offsets_size); ++ return_error = BR_FAILED_REPLY; ++ goto err_bad_offset; ++ } ++ off_end = (void *)offp + tr->offsets_size; ++ for (; offp < off_end; offp++) { ++ struct flat_binder_object *fp; ++ if (*offp > t->buffer->data_size - sizeof(*fp) || ++ t->buffer->data_size < sizeof(*fp) || ++ !IS_ALIGNED(*offp, sizeof(void *))) { ++ binder_user_error("binder: %d:%d got transaction with " ++ "invalid offset, %zd\n", ++ proc->pid, thread->pid, *offp); ++ return_error = BR_FAILED_REPLY; ++ goto err_bad_offset; ++ } ++ fp = (struct flat_binder_object *)(t->buffer->data + *offp); ++ switch (fp->type) { ++ case BINDER_TYPE_BINDER: ++ case BINDER_TYPE_WEAK_BINDER: { ++ struct binder_ref *ref; ++ struct binder_node *node = binder_get_node(proc, fp->binder); ++ if (node == NULL) { ++ node = binder_new_node(proc, fp->binder, fp->cookie); ++ if (node == NULL) { ++ return_error = BR_FAILED_REPLY; ++ goto err_binder_new_node_failed; ++ } ++ node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK; ++ node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS); ++ } ++ if (fp->cookie != node->cookie) { ++ binder_user_error("binder: %d:%d sending u%p " ++ "node %d, cookie mismatch %p != %p\n", ++ proc->pid, thread->pid, ++ fp->binder, node->debug_id, ++ fp->cookie, node->cookie); ++ goto err_binder_get_ref_for_node_failed; ++ } ++ ref = binder_get_ref_for_node(target_proc, node); ++ if (ref == NULL) { ++ return_error = BR_FAILED_REPLY; ++ goto err_binder_get_ref_for_node_failed; ++ } ++ if (fp->type == BINDER_TYPE_BINDER) ++ fp->type = BINDER_TYPE_HANDLE; ++ else ++ fp->type = BINDER_TYPE_WEAK_HANDLE; ++ fp->handle = ref->desc; ++ binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE, ++ &thread->todo); ++ ++ binder_debug(BINDER_DEBUG_TRANSACTION, ++ " node %d u%p -> ref %d desc %d\n", ++ node->debug_id, node->ptr, ref->debug_id, ++ ref->desc); ++ } break; ++ case BINDER_TYPE_HANDLE: ++ case BINDER_TYPE_WEAK_HANDLE: { ++ struct binder_ref *ref = binder_get_ref(proc, fp->handle); ++ if (ref == NULL) { ++ binder_user_error("binder: %d:%d got " ++ "transaction with invalid " ++ "handle, %ld\n", proc->pid, ++ thread->pid, fp->handle); ++ return_error = BR_FAILED_REPLY; ++ goto err_binder_get_ref_failed; ++ } ++ if (ref->node->proc == target_proc) { ++ if (fp->type == BINDER_TYPE_HANDLE) ++ fp->type = BINDER_TYPE_BINDER; ++ else ++ fp->type = BINDER_TYPE_WEAK_BINDER; ++ fp->binder = ref->node->ptr; ++ fp->cookie = ref->node->cookie; ++ binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL); ++ binder_debug(BINDER_DEBUG_TRANSACTION, ++ " ref %d desc %d -> node %d u%p\n", ++ ref->debug_id, ref->desc, ref->node->debug_id, ++ ref->node->ptr); ++ } else { ++ struct binder_ref *new_ref; ++ new_ref = binder_get_ref_for_node(target_proc, ref->node); ++ if (new_ref == NULL) { ++ return_error = BR_FAILED_REPLY; ++ goto err_binder_get_ref_for_node_failed; ++ } ++ fp->handle = new_ref->desc; ++ binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL); ++ binder_debug(BINDER_DEBUG_TRANSACTION, ++ " ref %d desc %d -> ref %d desc %d (node %d)\n", ++ ref->debug_id, ref->desc, new_ref->debug_id, ++ new_ref->desc, ref->node->debug_id); ++ } ++ } break; ++ ++ case BINDER_TYPE_FD: { ++ int target_fd; ++ struct file *file; ++ ++ if (reply) { ++ if (!(in_reply_to->flags & TF_ACCEPT_FDS)) { ++ binder_user_error("binder: %d:%d got reply with fd, %ld, but target does not allow fds\n", ++ proc->pid, thread->pid, fp->handle); ++ return_error = BR_FAILED_REPLY; ++ goto err_fd_not_allowed; ++ } ++ } else if (!target_node->accept_fds) { ++ binder_user_error("binder: %d:%d got transaction with fd, %ld, but target does not allow fds\n", ++ proc->pid, thread->pid, fp->handle); ++ return_error = BR_FAILED_REPLY; ++ goto err_fd_not_allowed; ++ } ++ ++ file = fget(fp->handle); ++ if (file == NULL) { ++ binder_user_error("binder: %d:%d got transaction with invalid fd, %ld\n", ++ proc->pid, thread->pid, fp->handle); ++ return_error = BR_FAILED_REPLY; ++ goto err_fget_failed; ++ } ++ target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC); ++ if (target_fd < 0) { ++ fput(file); ++ return_error = BR_FAILED_REPLY; ++ goto err_get_unused_fd_failed; ++ } ++ task_fd_install(target_proc, target_fd, file); ++ binder_debug(BINDER_DEBUG_TRANSACTION, ++ " fd %ld -> %d\n", fp->handle, target_fd); ++ /* TODO: fput? */ ++ fp->handle = target_fd; ++ } break; ++ ++ default: ++ binder_user_error("binder: %d:%d got transactio" ++ "n with invalid object type, %lx\n", ++ proc->pid, thread->pid, fp->type); ++ return_error = BR_FAILED_REPLY; ++ goto err_bad_object_type; ++ } ++ } ++ if (reply) { ++ BUG_ON(t->buffer->async_transaction != 0); ++ binder_pop_transaction(target_thread, in_reply_to); ++ } else if (!(t->flags & TF_ONE_WAY)) { ++ BUG_ON(t->buffer->async_transaction != 0); ++ t->need_reply = 1; ++ t->from_parent = thread->transaction_stack; ++ thread->transaction_stack = t; ++ } else { ++ BUG_ON(target_node == NULL); ++ BUG_ON(t->buffer->async_transaction != 1); ++ if (target_node->has_async_transaction) { ++ target_list = &target_node->async_todo; ++ target_wait = NULL; ++ } else ++ target_node->has_async_transaction = 1; ++ } ++ t->work.type = BINDER_WORK_TRANSACTION; ++ list_add_tail(&t->work.entry, target_list); ++ tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; ++ list_add_tail(&tcomplete->entry, &thread->todo); ++ if (target_wait) ++ wake_up_interruptible(target_wait); ++ return; ++ ++err_get_unused_fd_failed: ++err_fget_failed: ++err_fd_not_allowed: ++err_binder_get_ref_for_node_failed: ++err_binder_get_ref_failed: ++err_binder_new_node_failed: ++err_bad_object_type: ++err_bad_offset: ++err_copy_data_failed: ++ binder_transaction_buffer_release(target_proc, t->buffer, offp); ++ t->buffer->transaction = NULL; ++ binder_free_buf(target_proc, t->buffer); ++err_binder_alloc_buf_failed: ++ kfree(tcomplete); ++ binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); ++err_alloc_tcomplete_failed: ++ kfree(t); ++ binder_stats_deleted(BINDER_STAT_TRANSACTION); ++err_alloc_t_failed: ++err_bad_call_stack: ++err_empty_call_stack: ++err_dead_binder: ++err_invalid_target_handle: ++err_no_context_mgr_node: ++ binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, ++ "binder: %d:%d transaction failed %d, size %zd-%zd\n", ++ proc->pid, thread->pid, return_error, ++ tr->data_size, tr->offsets_size); ++ ++ { ++ struct binder_transaction_log_entry *fe; ++ fe = binder_transaction_log_add(&binder_transaction_log_failed); ++ *fe = *e; ++ } ++ ++ BUG_ON(thread->return_error != BR_OK); ++ if (in_reply_to) { ++ thread->return_error = BR_TRANSACTION_COMPLETE; ++ binder_send_failed_reply(in_reply_to, return_error); ++ } else ++ thread->return_error = return_error; ++} ++ ++int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread, ++ void __user *buffer, int size, signed long *consumed) ++{ ++ uint32_t cmd; ++ void __user *ptr = buffer + *consumed; ++ void __user *end = buffer + size; ++ ++ while (ptr < end && thread->return_error == BR_OK) { ++ if (get_user(cmd, (uint32_t __user *)ptr)) ++ return -EFAULT; ++ ptr += sizeof(uint32_t); ++ if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) { ++ binder_stats.bc[_IOC_NR(cmd)]++; ++ proc->stats.bc[_IOC_NR(cmd)]++; ++ thread->stats.bc[_IOC_NR(cmd)]++; ++ } ++ switch (cmd) { ++ case BC_INCREFS: ++ case BC_ACQUIRE: ++ case BC_RELEASE: ++ case BC_DECREFS: { ++ uint32_t target; ++ struct binder_ref *ref; ++ const char *debug_string; ++ ++ if (get_user(target, (uint32_t __user *)ptr)) ++ return -EFAULT; ++ ptr += sizeof(uint32_t); ++ if (target == 0 && binder_context_mgr_node && ++ (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) { ++ ref = binder_get_ref_for_node(proc, ++ binder_context_mgr_node); ++ if (ref->desc != target) { ++ binder_user_error("binder: %d:" ++ "%d tried to acquire " ++ "reference to desc 0, " ++ "got %d instead\n", ++ proc->pid, thread->pid, ++ ref->desc); ++ } ++ } else ++ ref = binder_get_ref(proc, target); ++ if (ref == NULL) { ++ binder_user_error("binder: %d:%d refcou" ++ "nt change on invalid ref %d\n", ++ proc->pid, thread->pid, target); ++ break; ++ } ++ switch (cmd) { ++ case BC_INCREFS: ++ debug_string = "IncRefs"; ++ binder_inc_ref(ref, 0, NULL); ++ break; ++ case BC_ACQUIRE: ++ debug_string = "Acquire"; ++ binder_inc_ref(ref, 1, NULL); ++ break; ++ case BC_RELEASE: ++ debug_string = "Release"; ++ binder_dec_ref(ref, 1); ++ break; ++ case BC_DECREFS: ++ default: ++ debug_string = "DecRefs"; ++ binder_dec_ref(ref, 0); ++ break; ++ } ++ binder_debug(BINDER_DEBUG_USER_REFS, ++ "binder: %d:%d %s ref %d desc %d s %d w %d for node %d\n", ++ proc->pid, thread->pid, debug_string, ref->debug_id, ++ ref->desc, ref->strong, ref->weak, ref->node->debug_id); ++ break; ++ } ++ case BC_INCREFS_DONE: ++ case BC_ACQUIRE_DONE: { ++ void __user *node_ptr; ++ void *cookie; ++ struct binder_node *node; ++ ++ if (get_user(node_ptr, (void * __user *)ptr)) ++ return -EFAULT; ++ ptr += sizeof(void *); ++ if (get_user(cookie, (void * __user *)ptr)) ++ return -EFAULT; ++ ptr += sizeof(void *); ++ node = binder_get_node(proc, node_ptr); ++ if (node == NULL) { ++ binder_user_error("binder: %d:%d " ++ "%s u%p no match\n", ++ proc->pid, thread->pid, ++ cmd == BC_INCREFS_DONE ? ++ "BC_INCREFS_DONE" : ++ "BC_ACQUIRE_DONE", ++ node_ptr); ++ break; ++ } ++ if (cookie != node->cookie) { ++ binder_user_error("binder: %d:%d %s u%p node %d" ++ " cookie mismatch %p != %p\n", ++ proc->pid, thread->pid, ++ cmd == BC_INCREFS_DONE ? ++ "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", ++ node_ptr, node->debug_id, ++ cookie, node->cookie); ++ break; ++ } ++ if (cmd == BC_ACQUIRE_DONE) { ++ if (node->pending_strong_ref == 0) { ++ binder_user_error("binder: %d:%d " ++ "BC_ACQUIRE_DONE node %d has " ++ "no pending acquire request\n", ++ proc->pid, thread->pid, ++ node->debug_id); ++ break; ++ } ++ node->pending_strong_ref = 0; ++ } else { ++ if (node->pending_weak_ref == 0) { ++ binder_user_error("binder: %d:%d " ++ "BC_INCREFS_DONE node %d has " ++ "no pending increfs request\n", ++ proc->pid, thread->pid, ++ node->debug_id); ++ break; ++ } ++ node->pending_weak_ref = 0; ++ } ++ binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0); ++ binder_debug(BINDER_DEBUG_USER_REFS, ++ "binder: %d:%d %s node %d ls %d lw %d\n", ++ proc->pid, thread->pid, ++ cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", ++ node->debug_id, node->local_strong_refs, node->local_weak_refs); ++ break; ++ } ++ case BC_ATTEMPT_ACQUIRE: ++ printk(KERN_ERR "binder: BC_ATTEMPT_ACQUIRE not supported\n"); ++ return -EINVAL; ++ case BC_ACQUIRE_RESULT: ++ printk(KERN_ERR "binder: BC_ACQUIRE_RESULT not supported\n"); ++ return -EINVAL; ++ ++ case BC_FREE_BUFFER: { ++ void __user *data_ptr; ++ struct binder_buffer *buffer; ++ ++ if (get_user(data_ptr, (void * __user *)ptr)) ++ return -EFAULT; ++ ptr += sizeof(void *); ++ ++ buffer = binder_buffer_lookup(proc, data_ptr); ++ if (buffer == NULL) { ++ binder_user_error("binder: %d:%d " ++ "BC_FREE_BUFFER u%p no match\n", ++ proc->pid, thread->pid, data_ptr); ++ break; ++ } ++ if (!buffer->allow_user_free) { ++ binder_user_error("binder: %d:%d " ++ "BC_FREE_BUFFER u%p matched " ++ "unreturned buffer\n", ++ proc->pid, thread->pid, data_ptr); ++ break; ++ } ++ binder_debug(BINDER_DEBUG_FREE_BUFFER, ++ "binder: %d:%d BC_FREE_BUFFER u%p found buffer %d for %s transaction\n", ++ proc->pid, thread->pid, data_ptr, buffer->debug_id, ++ buffer->transaction ? "active" : "finished"); ++ ++ if (buffer->transaction) { ++ buffer->transaction->buffer = NULL; ++ buffer->transaction = NULL; ++ } ++ if (buffer->async_transaction && buffer->target_node) { ++ BUG_ON(!buffer->target_node->has_async_transaction); ++ if (list_empty(&buffer->target_node->async_todo)) ++ buffer->target_node->has_async_transaction = 0; ++ else ++ list_move_tail(buffer->target_node->async_todo.next, &thread->todo); ++ } ++ binder_transaction_buffer_release(proc, buffer, NULL); ++ binder_free_buf(proc, buffer); ++ break; ++ } ++ ++ case BC_TRANSACTION: ++ case BC_REPLY: { ++ struct binder_transaction_data tr; ++ ++ if (copy_from_user(&tr, ptr, sizeof(tr))) ++ return -EFAULT; ++ ptr += sizeof(tr); ++ binder_transaction(proc, thread, &tr, cmd == BC_REPLY); ++ break; ++ } ++ ++ case BC_REGISTER_LOOPER: ++ binder_debug(BINDER_DEBUG_THREADS, ++ "binder: %d:%d BC_REGISTER_LOOPER\n", ++ proc->pid, thread->pid); ++ if (thread->looper & BINDER_LOOPER_STATE_ENTERED) { ++ thread->looper |= BINDER_LOOPER_STATE_INVALID; ++ binder_user_error("binder: %d:%d ERROR:" ++ " BC_REGISTER_LOOPER called " ++ "after BC_ENTER_LOOPER\n", ++ proc->pid, thread->pid); ++ } else if (proc->requested_threads == 0) { ++ thread->looper |= BINDER_LOOPER_STATE_INVALID; ++ binder_user_error("binder: %d:%d ERROR:" ++ " BC_REGISTER_LOOPER called " ++ "without request\n", ++ proc->pid, thread->pid); ++ } else { ++ proc->requested_threads--; ++ proc->requested_threads_started++; ++ } ++ thread->looper |= BINDER_LOOPER_STATE_REGISTERED; ++ break; ++ case BC_ENTER_LOOPER: ++ binder_debug(BINDER_DEBUG_THREADS, ++ "binder: %d:%d BC_ENTER_LOOPER\n", ++ proc->pid, thread->pid); ++ if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) { ++ thread->looper |= BINDER_LOOPER_STATE_INVALID; ++ binder_user_error("binder: %d:%d ERROR:" ++ " BC_ENTER_LOOPER called after " ++ "BC_REGISTER_LOOPER\n", ++ proc->pid, thread->pid); ++ } ++ thread->looper |= BINDER_LOOPER_STATE_ENTERED; ++ break; ++ case BC_EXIT_LOOPER: ++ binder_debug(BINDER_DEBUG_THREADS, ++ "binder: %d:%d BC_EXIT_LOOPER\n", ++ proc->pid, thread->pid); ++ thread->looper |= BINDER_LOOPER_STATE_EXITED; ++ break; ++ ++ case BC_REQUEST_DEATH_NOTIFICATION: ++ case BC_CLEAR_DEATH_NOTIFICATION: { ++ uint32_t target; ++ void __user *cookie; ++ struct binder_ref *ref; ++ struct binder_ref_death *death; ++ ++ if (get_user(target, (uint32_t __user *)ptr)) ++ return -EFAULT; ++ ptr += sizeof(uint32_t); ++ if (get_user(cookie, (void __user * __user *)ptr)) ++ return -EFAULT; ++ ptr += sizeof(void *); ++ ref = binder_get_ref(proc, target); ++ if (ref == NULL) { ++ binder_user_error("binder: %d:%d %s " ++ "invalid ref %d\n", ++ proc->pid, thread->pid, ++ cmd == BC_REQUEST_DEATH_NOTIFICATION ? ++ "BC_REQUEST_DEATH_NOTIFICATION" : ++ "BC_CLEAR_DEATH_NOTIFICATION", ++ target); ++ break; ++ } ++ ++ binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, ++ "binder: %d:%d %s %p ref %d desc %d s %d w %d for node %d\n", ++ proc->pid, thread->pid, ++ cmd == BC_REQUEST_DEATH_NOTIFICATION ? ++ "BC_REQUEST_DEATH_NOTIFICATION" : ++ "BC_CLEAR_DEATH_NOTIFICATION", ++ cookie, ref->debug_id, ref->desc, ++ ref->strong, ref->weak, ref->node->debug_id); ++ ++ if (cmd == BC_REQUEST_DEATH_NOTIFICATION) { ++ if (ref->death) { ++ binder_user_error("binder: %d:%" ++ "d BC_REQUEST_DEATH_NOTI" ++ "FICATION death notific" ++ "ation already set\n", ++ proc->pid, thread->pid); ++ break; ++ } ++ death = kzalloc(sizeof(*death), GFP_KERNEL); ++ if (death == NULL) { ++ thread->return_error = BR_ERROR; ++ binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, ++ "binder: %d:%d " ++ "BC_REQUEST_DEATH_NOTIFICATION failed\n", ++ proc->pid, thread->pid); ++ break; ++ } ++ binder_stats_created(BINDER_STAT_DEATH); ++ INIT_LIST_HEAD(&death->work.entry); ++ death->cookie = cookie; ++ ref->death = death; ++ if (ref->node->proc == NULL) { ++ ref->death->work.type = BINDER_WORK_DEAD_BINDER; ++ if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { ++ list_add_tail(&ref->death->work.entry, &thread->todo); ++ } else { ++ list_add_tail(&ref->death->work.entry, &proc->todo); ++ wake_up_interruptible(&proc->wait); ++ } ++ } ++ } else { ++ if (ref->death == NULL) { ++ binder_user_error("binder: %d:%" ++ "d BC_CLEAR_DEATH_NOTIFI" ++ "CATION death notificat" ++ "ion not active\n", ++ proc->pid, thread->pid); ++ break; ++ } ++ death = ref->death; ++ if (death->cookie != cookie) { ++ binder_user_error("binder: %d:%" ++ "d BC_CLEAR_DEATH_NOTIFI" ++ "CATION death notificat" ++ "ion cookie mismatch " ++ "%p != %p\n", ++ proc->pid, thread->pid, ++ death->cookie, cookie); ++ break; ++ } ++ ref->death = NULL; ++ if (list_empty(&death->work.entry)) { ++ death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; ++ if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { ++ list_add_tail(&death->work.entry, &thread->todo); ++ } else { ++ list_add_tail(&death->work.entry, &proc->todo); ++ wake_up_interruptible(&proc->wait); ++ } ++ } else { ++ BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER); ++ death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR; ++ } ++ } ++ } break; ++ case BC_DEAD_BINDER_DONE: { ++ struct binder_work *w; ++ void __user *cookie; ++ struct binder_ref_death *death = NULL; ++ if (get_user(cookie, (void __user * __user *)ptr)) ++ return -EFAULT; ++ ++ ptr += sizeof(void *); ++ list_for_each_entry(w, &proc->delivered_death, entry) { ++ struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work); ++ if (tmp_death->cookie == cookie) { ++ death = tmp_death; ++ break; ++ } ++ } ++ binder_debug(BINDER_DEBUG_DEAD_BINDER, ++ "binder: %d:%d BC_DEAD_BINDER_DONE %p found %p\n", ++ proc->pid, thread->pid, cookie, death); ++ if (death == NULL) { ++ binder_user_error("binder: %d:%d BC_DEAD" ++ "_BINDER_DONE %p not found\n", ++ proc->pid, thread->pid, cookie); ++ break; ++ } ++ ++ list_del_init(&death->work.entry); ++ if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) { ++ death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; ++ if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) { ++ list_add_tail(&death->work.entry, &thread->todo); ++ } else { ++ list_add_tail(&death->work.entry, &proc->todo); ++ wake_up_interruptible(&proc->wait); ++ } ++ } ++ } break; ++ ++ default: ++ printk(KERN_ERR "binder: %d:%d unknown command %d\n", ++ proc->pid, thread->pid, cmd); ++ return -EINVAL; ++ } ++ *consumed = ptr - buffer; ++ } ++ return 0; ++} ++ ++void binder_stat_br(struct binder_proc *proc, struct binder_thread *thread, ++ uint32_t cmd) ++{ ++ if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) { ++ binder_stats.br[_IOC_NR(cmd)]++; ++ proc->stats.br[_IOC_NR(cmd)]++; ++ thread->stats.br[_IOC_NR(cmd)]++; ++ } ++} ++ ++static int binder_has_proc_work(struct binder_proc *proc, ++ struct binder_thread *thread) ++{ ++ return !list_empty(&proc->todo) || ++ (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN); ++} ++ ++static int binder_has_thread_work(struct binder_thread *thread) ++{ ++ return !list_empty(&thread->todo) || thread->return_error != BR_OK || ++ (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN); ++} ++ ++static int binder_thread_read(struct binder_proc *proc, ++ struct binder_thread *thread, ++ void __user *buffer, int size, ++ signed long *consumed, int non_block) ++{ ++ void __user *ptr = buffer + *consumed; ++ void __user *end = buffer + size; ++ ++ int ret = 0; ++ int wait_for_proc_work; ++ ++ if (*consumed == 0) { ++ if (put_user(BR_NOOP, (uint32_t __user *)ptr)) ++ return -EFAULT; ++ ptr += sizeof(uint32_t); ++ } ++ ++retry: ++ wait_for_proc_work = thread->transaction_stack == NULL && ++ list_empty(&thread->todo); ++ ++ if (thread->return_error != BR_OK && ptr < end) { ++ if (thread->return_error2 != BR_OK) { ++ if (put_user(thread->return_error2, (uint32_t __user *)ptr)) ++ return -EFAULT; ++ ptr += sizeof(uint32_t); ++ if (ptr == end) ++ goto done; ++ thread->return_error2 = BR_OK; ++ } ++ if (put_user(thread->return_error, (uint32_t __user *)ptr)) ++ return -EFAULT; ++ ptr += sizeof(uint32_t); ++ thread->return_error = BR_OK; ++ goto done; ++ } ++ ++ ++ thread->looper |= BINDER_LOOPER_STATE_WAITING; ++ if (wait_for_proc_work) ++ proc->ready_threads++; ++ mutex_unlock(&binder_lock); ++ if (wait_for_proc_work) { ++ if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED | ++ BINDER_LOOPER_STATE_ENTERED))) { ++ binder_user_error("binder: %d:%d ERROR: Thread waiting " ++ "for process work before calling BC_REGISTER_" ++ "LOOPER or BC_ENTER_LOOPER (state %x)\n", ++ proc->pid, thread->pid, thread->looper); ++ wait_event_interruptible(binder_user_error_wait, ++ binder_stop_on_user_error < 2); ++ } ++ binder_set_nice(proc->default_priority); ++ if (non_block) { ++ if (!binder_has_proc_work(proc, thread)) ++ ret = -EAGAIN; ++ } else ++ ret = wait_event_interruptible_exclusive(proc->wait, binder_has_proc_work(proc, thread)); ++ } else { ++ if (non_block) { ++ if (!binder_has_thread_work(thread)) ++ ret = -EAGAIN; ++ } else ++ ret = wait_event_interruptible(thread->wait, binder_has_thread_work(thread)); ++ } ++ mutex_lock(&binder_lock); ++ if (wait_for_proc_work) ++ proc->ready_threads--; ++ thread->looper &= ~BINDER_LOOPER_STATE_WAITING; ++ ++ if (ret) ++ return ret; ++ ++ while (1) { ++ uint32_t cmd; ++ struct binder_transaction_data tr; ++ struct binder_work *w; ++ struct binder_transaction *t = NULL; ++ ++ if (!list_empty(&thread->todo)) ++ w = list_first_entry(&thread->todo, struct binder_work, entry); ++ else if (!list_empty(&proc->todo) && wait_for_proc_work) ++ w = list_first_entry(&proc->todo, struct binder_work, entry); ++ else { ++ if (ptr - buffer == 4 && !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN)) /* no data added */ ++ goto retry; ++ break; ++ } ++ ++ if (end - ptr < sizeof(tr) + 4) ++ break; ++ ++ switch (w->type) { ++ case BINDER_WORK_TRANSACTION: { ++ t = container_of(w, struct binder_transaction, work); ++ } break; ++ case BINDER_WORK_TRANSACTION_COMPLETE: { ++ cmd = BR_TRANSACTION_COMPLETE; ++ if (put_user(cmd, (uint32_t __user *)ptr)) ++ return -EFAULT; ++ ptr += sizeof(uint32_t); ++ ++ binder_stat_br(proc, thread, cmd); ++ binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE, ++ "binder: %d:%d BR_TRANSACTION_COMPLETE\n", ++ proc->pid, thread->pid); ++ ++ list_del(&w->entry); ++ kfree(w); ++ binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); ++ } break; ++ case BINDER_WORK_NODE: { ++ struct binder_node *node = container_of(w, struct binder_node, work); ++ uint32_t cmd = BR_NOOP; ++ const char *cmd_name; ++ int strong = node->internal_strong_refs || node->local_strong_refs; ++ int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong; ++ if (weak && !node->has_weak_ref) { ++ cmd = BR_INCREFS; ++ cmd_name = "BR_INCREFS"; ++ node->has_weak_ref = 1; ++ node->pending_weak_ref = 1; ++ node->local_weak_refs++; ++ } else if (strong && !node->has_strong_ref) { ++ cmd = BR_ACQUIRE; ++ cmd_name = "BR_ACQUIRE"; ++ node->has_strong_ref = 1; ++ node->pending_strong_ref = 1; ++ node->local_strong_refs++; ++ } else if (!strong && node->has_strong_ref) { ++ cmd = BR_RELEASE; ++ cmd_name = "BR_RELEASE"; ++ node->has_strong_ref = 0; ++ } else if (!weak && node->has_weak_ref) { ++ cmd = BR_DECREFS; ++ cmd_name = "BR_DECREFS"; ++ node->has_weak_ref = 0; ++ } ++ if (cmd != BR_NOOP) { ++ if (put_user(cmd, (uint32_t __user *)ptr)) ++ return -EFAULT; ++ ptr += sizeof(uint32_t); ++ if (put_user(node->ptr, (void * __user *)ptr)) ++ return -EFAULT; ++ ptr += sizeof(void *); ++ if (put_user(node->cookie, (void * __user *)ptr)) ++ return -EFAULT; ++ ptr += sizeof(void *); ++ ++ binder_stat_br(proc, thread, cmd); ++ binder_debug(BINDER_DEBUG_USER_REFS, ++ "binder: %d:%d %s %d u%p c%p\n", ++ proc->pid, thread->pid, cmd_name, node->debug_id, node->ptr, node->cookie); ++ } else { ++ list_del_init(&w->entry); ++ if (!weak && !strong) { ++ binder_debug(BINDER_DEBUG_INTERNAL_REFS, ++ "binder: %d:%d node %d u%p c%p deleted\n", ++ proc->pid, thread->pid, node->debug_id, ++ node->ptr, node->cookie); ++ rb_erase(&node->rb_node, &proc->nodes); ++ kfree(node); ++ binder_stats_deleted(BINDER_STAT_NODE); ++ } else { ++ binder_debug(BINDER_DEBUG_INTERNAL_REFS, ++ "binder: %d:%d node %d u%p c%p state unchanged\n", ++ proc->pid, thread->pid, node->debug_id, node->ptr, ++ node->cookie); ++ } ++ } ++ } break; ++ case BINDER_WORK_DEAD_BINDER: ++ case BINDER_WORK_DEAD_BINDER_AND_CLEAR: ++ case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { ++ struct binder_ref_death *death; ++ uint32_t cmd; ++ ++ death = container_of(w, struct binder_ref_death, work); ++ if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) ++ cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE; ++ else ++ cmd = BR_DEAD_BINDER; ++ if (put_user(cmd, (uint32_t __user *)ptr)) ++ return -EFAULT; ++ ptr += sizeof(uint32_t); ++ if (put_user(death->cookie, (void * __user *)ptr)) ++ return -EFAULT; ++ ptr += sizeof(void *); ++ binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, ++ "binder: %d:%d %s %p\n", ++ proc->pid, thread->pid, ++ cmd == BR_DEAD_BINDER ? ++ "BR_DEAD_BINDER" : ++ "BR_CLEAR_DEATH_NOTIFICATION_DONE", ++ death->cookie); ++ ++ if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) { ++ list_del(&w->entry); ++ kfree(death); ++ binder_stats_deleted(BINDER_STAT_DEATH); ++ } else ++ list_move(&w->entry, &proc->delivered_death); ++ if (cmd == BR_DEAD_BINDER) ++ goto done; /* DEAD_BINDER notifications can cause transactions */ ++ } break; ++ } ++ ++ if (!t) ++ continue; ++ ++ BUG_ON(t->buffer == NULL); ++ if (t->buffer->target_node) { ++ struct binder_node *target_node = t->buffer->target_node; ++ tr.target.ptr = target_node->ptr; ++ tr.cookie = target_node->cookie; ++ t->saved_priority = task_nice(current); ++ if (t->priority < target_node->min_priority && ++ !(t->flags & TF_ONE_WAY)) ++ binder_set_nice(t->priority); ++ else if (!(t->flags & TF_ONE_WAY) || ++ t->saved_priority > target_node->min_priority) ++ binder_set_nice(target_node->min_priority); ++ cmd = BR_TRANSACTION; ++ } else { ++ tr.target.ptr = NULL; ++ tr.cookie = NULL; ++ cmd = BR_REPLY; ++ } ++ tr.code = t->code; ++ tr.flags = t->flags; ++ tr.sender_euid = t->sender_euid; ++ ++ if (t->from) { ++ struct task_struct *sender = t->from->proc->tsk; ++ tr.sender_pid = task_tgid_nr_ns(sender, ++ current->nsproxy->pid_ns); ++ } else { ++ tr.sender_pid = 0; ++ } ++ ++ tr.data_size = t->buffer->data_size; ++ tr.offsets_size = t->buffer->offsets_size; ++ tr.data.ptr.buffer = (void *)t->buffer->data + ++ proc->user_buffer_offset; ++ tr.data.ptr.offsets = tr.data.ptr.buffer + ++ ALIGN(t->buffer->data_size, ++ sizeof(void *)); ++ ++ if (put_user(cmd, (uint32_t __user *)ptr)) ++ return -EFAULT; ++ ptr += sizeof(uint32_t); ++ if (copy_to_user(ptr, &tr, sizeof(tr))) ++ return -EFAULT; ++ ptr += sizeof(tr); ++ ++ binder_stat_br(proc, thread, cmd); ++ binder_debug(BINDER_DEBUG_TRANSACTION, ++ "binder: %d:%d %s %d %d:%d, cmd %d" ++ "size %zd-%zd ptr %p-%p\n", ++ proc->pid, thread->pid, ++ (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" : ++ "BR_REPLY", ++ t->debug_id, t->from ? t->from->proc->pid : 0, ++ t->from ? t->from->pid : 0, cmd, ++ t->buffer->data_size, t->buffer->offsets_size, ++ tr.data.ptr.buffer, tr.data.ptr.offsets); ++ ++ list_del(&t->work.entry); ++ t->buffer->allow_user_free = 1; ++ if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) { ++ t->to_parent = thread->transaction_stack; ++ t->to_thread = thread; ++ thread->transaction_stack = t; ++ } else { ++ t->buffer->transaction = NULL; ++ kfree(t); ++ binder_stats_deleted(BINDER_STAT_TRANSACTION); ++ } ++ break; ++ } ++ ++done: ++ ++ *consumed = ptr - buffer; ++ if (proc->requested_threads + proc->ready_threads == 0 && ++ proc->requested_threads_started < proc->max_threads && ++ (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | ++ BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */ ++ /*spawn a new thread if we leave this out */) { ++ proc->requested_threads++; ++ binder_debug(BINDER_DEBUG_THREADS, ++ "binder: %d:%d BR_SPAWN_LOOPER\n", ++ proc->pid, thread->pid); ++ if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer)) ++ return -EFAULT; ++ } ++ return 0; ++} ++ ++static void binder_release_work(struct list_head *list) ++{ ++ struct binder_work *w; ++ while (!list_empty(list)) { ++ w = list_first_entry(list, struct binder_work, entry); ++ list_del_init(&w->entry); ++ switch (w->type) { ++ case BINDER_WORK_TRANSACTION: { ++ struct binder_transaction *t; ++ ++ t = container_of(w, struct binder_transaction, work); ++ if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) ++ binder_send_failed_reply(t, BR_DEAD_REPLY); ++ } break; ++ case BINDER_WORK_TRANSACTION_COMPLETE: { ++ kfree(w); ++ binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); ++ } break; ++ default: ++ break; ++ } ++ } ++ ++} ++ ++static struct binder_thread *binder_get_thread(struct binder_proc *proc) ++{ ++ struct binder_thread *thread = NULL; ++ struct rb_node *parent = NULL; ++ struct rb_node **p = &proc->threads.rb_node; ++ ++ while (*p) { ++ parent = *p; ++ thread = rb_entry(parent, struct binder_thread, rb_node); ++ ++ if (current->pid < thread->pid) ++ p = &(*p)->rb_left; ++ else if (current->pid > thread->pid) ++ p = &(*p)->rb_right; ++ else ++ break; ++ } ++ if (*p == NULL) { ++ thread = kzalloc(sizeof(*thread), GFP_KERNEL); ++ if (thread == NULL) ++ return NULL; ++ binder_stats_created(BINDER_STAT_THREAD); ++ thread->proc = proc; ++ thread->pid = current->pid; ++ init_waitqueue_head(&thread->wait); ++ INIT_LIST_HEAD(&thread->todo); ++ rb_link_node(&thread->rb_node, parent, p); ++ rb_insert_color(&thread->rb_node, &proc->threads); ++ thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN; ++ thread->return_error = BR_OK; ++ thread->return_error2 = BR_OK; ++ } ++ return thread; ++} ++ ++static int binder_free_thread(struct binder_proc *proc, ++ struct binder_thread *thread) ++{ ++ struct binder_transaction *t; ++ struct binder_transaction *send_reply = NULL; ++ int active_transactions = 0; ++ ++ rb_erase(&thread->rb_node, &proc->threads); ++ t = thread->transaction_stack; ++ if (t && t->to_thread == thread) ++ send_reply = t; ++ while (t) { ++ active_transactions++; ++ binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, ++ "binder: release %d:%d transaction %d " ++ "%s, still active\n", proc->pid, thread->pid, ++ t->debug_id, ++ (t->to_thread == thread) ? "in" : "out"); ++ ++ if (t->to_thread == thread) { ++ t->to_proc = NULL; ++ t->to_thread = NULL; ++ if (t->buffer) { ++ t->buffer->transaction = NULL; ++ t->buffer = NULL; ++ } ++ t = t->to_parent; ++ } else if (t->from == thread) { ++ t->from = NULL; ++ t = t->from_parent; ++ } else ++ BUG(); ++ } ++ if (send_reply) ++ binder_send_failed_reply(send_reply, BR_DEAD_REPLY); ++ binder_release_work(&thread->todo); ++ kfree(thread); ++ binder_stats_deleted(BINDER_STAT_THREAD); ++ return active_transactions; ++} ++ ++static unsigned int binder_poll(struct file *filp, ++ struct poll_table_struct *wait) ++{ ++ struct binder_proc *proc = filp->private_data; ++ struct binder_thread *thread = NULL; ++ int wait_for_proc_work; ++ ++ mutex_lock(&binder_lock); ++ thread = binder_get_thread(proc); ++ ++ wait_for_proc_work = thread->transaction_stack == NULL && ++ list_empty(&thread->todo) && thread->return_error == BR_OK; ++ mutex_unlock(&binder_lock); ++ ++ if (wait_for_proc_work) { ++ if (binder_has_proc_work(proc, thread)) ++ return POLLIN; ++ poll_wait(filp, &proc->wait, wait); ++ if (binder_has_proc_work(proc, thread)) ++ return POLLIN; ++ } else { ++ if (binder_has_thread_work(thread)) ++ return POLLIN; ++ poll_wait(filp, &thread->wait, wait); ++ if (binder_has_thread_work(thread)) ++ return POLLIN; ++ } ++ return 0; ++} ++ ++static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) ++{ ++ int ret; ++ struct binder_proc *proc = filp->private_data; ++ struct binder_thread *thread; ++ unsigned int size = _IOC_SIZE(cmd); ++ void __user *ubuf = (void __user *)arg; ++ ++ /*printk(KERN_INFO "binder_ioctl: %d:%d %x %lx\n", proc->pid, current->pid, cmd, arg);*/ ++ ++ ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); ++ if (ret) ++ return ret; ++ ++ mutex_lock(&binder_lock); ++ thread = binder_get_thread(proc); ++ if (thread == NULL) { ++ ret = -ENOMEM; ++ goto err; ++ } ++ ++ switch (cmd) { ++ case BINDER_WRITE_READ: { ++ struct binder_write_read bwr; ++ if (size != sizeof(struct binder_write_read)) { ++ ret = -EINVAL; ++ goto err; ++ } ++ if (copy_from_user(&bwr, ubuf, sizeof(bwr))) { ++ ret = -EFAULT; ++ goto err; ++ } ++ binder_debug(BINDER_DEBUG_READ_WRITE, ++ "binder: %d:%d write %ld at %08lx, read %ld at %08lx\n", ++ proc->pid, thread->pid, bwr.write_size, bwr.write_buffer, ++ bwr.read_size, bwr.read_buffer); ++ ++ if (bwr.write_size > 0) { ++ ret = binder_thread_write(proc, thread, (void __user *)bwr.write_buffer, bwr.write_size, &bwr.write_consumed); ++ if (ret < 0) { ++ bwr.read_consumed = 0; ++ if (copy_to_user(ubuf, &bwr, sizeof(bwr))) ++ ret = -EFAULT; ++ goto err; ++ } ++ } ++ if (bwr.read_size > 0) { ++ ret = binder_thread_read(proc, thread, (void __user *)bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK); ++ if (!list_empty(&proc->todo)) ++ wake_up_interruptible(&proc->wait); ++ if (ret < 0) { ++ if (copy_to_user(ubuf, &bwr, sizeof(bwr))) ++ ret = -EFAULT; ++ goto err; ++ } ++ } ++ binder_debug(BINDER_DEBUG_READ_WRITE, ++ "binder: %d:%d wrote %ld of %ld, read return %ld of %ld\n", ++ proc->pid, thread->pid, bwr.write_consumed, bwr.write_size, ++ bwr.read_consumed, bwr.read_size); ++ if (copy_to_user(ubuf, &bwr, sizeof(bwr))) { ++ ret = -EFAULT; ++ goto err; ++ } ++ break; ++ } ++ case BINDER_SET_MAX_THREADS: ++ if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) { ++ ret = -EINVAL; ++ goto err; ++ } ++ break; ++ case BINDER_SET_CONTEXT_MGR: ++ if (binder_context_mgr_node != NULL) { ++ printk(KERN_ERR "binder: BINDER_SET_CONTEXT_MGR already set\n"); ++ ret = -EBUSY; ++ goto err; ++ } ++ if (binder_context_mgr_uid != -1) { ++ if (binder_context_mgr_uid != current->cred->euid) { ++ printk(KERN_ERR "binder: BINDER_SET_" ++ "CONTEXT_MGR bad uid %d != %d\n", ++ current->cred->euid, ++ binder_context_mgr_uid); ++ ret = -EPERM; ++ goto err; ++ } ++ } else ++ binder_context_mgr_uid = current->cred->euid; ++ binder_context_mgr_node = binder_new_node(proc, NULL, NULL); ++ if (binder_context_mgr_node == NULL) { ++ ret = -ENOMEM; ++ goto err; ++ } ++ binder_context_mgr_node->local_weak_refs++; ++ binder_context_mgr_node->local_strong_refs++; ++ binder_context_mgr_node->has_strong_ref = 1; ++ binder_context_mgr_node->has_weak_ref = 1; ++ break; ++ case BINDER_THREAD_EXIT: ++ binder_debug(BINDER_DEBUG_THREADS, "binder: %d:%d exit\n", ++ proc->pid, thread->pid); ++ binder_free_thread(proc, thread); ++ thread = NULL; ++ break; ++ case BINDER_VERSION: ++ if (size != sizeof(struct binder_version)) { ++ ret = -EINVAL; ++ goto err; ++ } ++ if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, &((struct binder_version *)ubuf)->protocol_version)) { ++ ret = -EINVAL; ++ goto err; ++ } ++ break; ++ default: ++ ret = -EINVAL; ++ goto err; ++ } ++ ret = 0; ++err: ++ if (thread) ++ thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN; ++ mutex_unlock(&binder_lock); ++ wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); ++ if (ret && ret != -ERESTARTSYS) ++ printk(KERN_INFO "binder: %d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret); ++ return ret; ++} ++ ++static void binder_vma_open(struct vm_area_struct *vma) ++{ ++ struct binder_proc *proc = vma->vm_private_data; ++ binder_debug(BINDER_DEBUG_OPEN_CLOSE, ++ "binder: %d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", ++ proc->pid, vma->vm_start, vma->vm_end, ++ (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, ++ (unsigned long)pgprot_val(vma->vm_page_prot)); ++ dump_stack(); ++} ++ ++static void binder_vma_close(struct vm_area_struct *vma) ++{ ++ struct binder_proc *proc = vma->vm_private_data; ++ binder_debug(BINDER_DEBUG_OPEN_CLOSE, ++ "binder: %d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", ++ proc->pid, vma->vm_start, vma->vm_end, ++ (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, ++ (unsigned long)pgprot_val(vma->vm_page_prot)); ++ proc->vma = NULL; ++ binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES); ++} ++ ++static struct vm_operations_struct binder_vm_ops = { ++ .open = binder_vma_open, ++ .close = binder_vma_close, ++}; ++ ++static int binder_mmap(struct file *filp, struct vm_area_struct *vma) ++{ ++ int ret; ++ struct vm_struct *area; ++ struct binder_proc *proc = filp->private_data; ++ const char *failure_string; ++ struct binder_buffer *buffer; ++ ++ if ((vma->vm_end - vma->vm_start) > SZ_4M) ++ vma->vm_end = vma->vm_start + SZ_4M; ++ ++ binder_debug(BINDER_DEBUG_OPEN_CLOSE, ++ "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n", ++ proc->pid, vma->vm_start, vma->vm_end, ++ (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, ++ (unsigned long)pgprot_val(vma->vm_page_prot)); ++ ++ if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) { ++ ret = -EPERM; ++ failure_string = "bad vm_flags"; ++ goto err_bad_arg; ++ } ++ vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE; ++ ++ if (proc->buffer) { ++ ret = -EBUSY; ++ failure_string = "already mapped"; ++ goto err_already_mapped; ++ } ++ ++ area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP); ++ if (area == NULL) { ++ ret = -ENOMEM; ++ failure_string = "get_vm_area"; ++ goto err_get_vm_area_failed; ++ } ++ proc->buffer = area->addr; ++ proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer; ++ ++#ifdef CONFIG_CPU_CACHE_VIPT ++ if (cache_is_vipt_aliasing()) { ++ while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) { ++ printk(KERN_INFO "binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer); ++ vma->vm_start += PAGE_SIZE; ++ } ++ } ++#endif ++ proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL); ++ if (proc->pages == NULL) { ++ ret = -ENOMEM; ++ failure_string = "alloc page array"; ++ goto err_alloc_pages_failed; ++ } ++ proc->buffer_size = vma->vm_end - vma->vm_start; ++ ++ vma->vm_ops = &binder_vm_ops; ++ vma->vm_private_data = proc; ++ ++ if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) { ++ ret = -ENOMEM; ++ failure_string = "alloc small buf"; ++ goto err_alloc_small_buf_failed; ++ } ++ buffer = proc->buffer; ++ INIT_LIST_HEAD(&proc->buffers); ++ list_add(&buffer->entry, &proc->buffers); ++ buffer->free = 1; ++ binder_insert_free_buffer(proc, buffer); ++ proc->free_async_space = proc->buffer_size / 2; ++ barrier(); ++ proc->files = get_files_struct(current); ++ proc->vma = vma; ++ ++ /*printk(KERN_INFO "binder_mmap: %d %lx-%lx maps %p\n", ++ proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/ ++ return 0; ++ ++err_alloc_small_buf_failed: ++ kfree(proc->pages); ++ proc->pages = NULL; ++err_alloc_pages_failed: ++ vfree(proc->buffer); ++ proc->buffer = NULL; ++err_get_vm_area_failed: ++err_already_mapped: ++err_bad_arg: ++ printk(KERN_ERR "binder_mmap: %d %lx-%lx %s failed %d\n", ++ proc->pid, vma->vm_start, vma->vm_end, failure_string, ret); ++ return ret; ++} ++ ++static int binder_open(struct inode *nodp, struct file *filp) ++{ ++ struct binder_proc *proc; ++ ++ binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n", ++ current->group_leader->pid, current->pid); ++ ++ proc = kzalloc(sizeof(*proc), GFP_KERNEL); ++ if (proc == NULL) ++ return -ENOMEM; ++ get_task_struct(current); ++ proc->tsk = current; ++ INIT_LIST_HEAD(&proc->todo); ++ init_waitqueue_head(&proc->wait); ++ proc->default_priority = task_nice(current); ++ mutex_lock(&binder_lock); ++ binder_stats_created(BINDER_STAT_PROC); ++ hlist_add_head(&proc->proc_node, &binder_procs); ++ proc->pid = current->group_leader->pid; ++ INIT_LIST_HEAD(&proc->delivered_death); ++ filp->private_data = proc; ++ mutex_unlock(&binder_lock); ++ ++ if (binder_proc_dir_entry_proc) { ++ char strbuf[11]; ++ snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); ++ remove_proc_entry(strbuf, binder_proc_dir_entry_proc); ++ create_proc_read_entry(strbuf, S_IRUGO, ++ binder_proc_dir_entry_proc, ++ binder_read_proc_proc, proc); ++ } ++ ++ return 0; ++} ++ ++static int binder_flush(struct file *filp, fl_owner_t id) ++{ ++ struct binder_proc *proc = filp->private_data; ++ ++ binder_defer_work(proc, BINDER_DEFERRED_FLUSH); ++ ++ return 0; ++} ++ ++static void binder_deferred_flush(struct binder_proc *proc) ++{ ++ struct rb_node *n; ++ int wake_count = 0; ++ for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { ++ struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node); ++ thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN; ++ if (thread->looper & BINDER_LOOPER_STATE_WAITING) { ++ wake_up_interruptible(&thread->wait); ++ wake_count++; ++ } ++ } ++ wake_up_interruptible_all(&proc->wait); ++ ++ binder_debug(BINDER_DEBUG_OPEN_CLOSE, ++ "binder_flush: %d woke %d threads\n", proc->pid, ++ wake_count); ++} ++ ++static int binder_release(struct inode *nodp, struct file *filp) ++{ ++ struct binder_proc *proc = filp->private_data; ++ if (binder_proc_dir_entry_proc) { ++ char strbuf[11]; ++ snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); ++ remove_proc_entry(strbuf, binder_proc_dir_entry_proc); ++ } ++ ++ binder_defer_work(proc, BINDER_DEFERRED_RELEASE); ++ ++ return 0; ++} ++ ++static void binder_deferred_release(struct binder_proc *proc) ++{ ++ struct hlist_node *pos; ++ struct binder_transaction *t; ++ struct rb_node *n; ++ int threads, nodes, incoming_refs, outgoing_refs, buffers, active_transactions, page_count; ++ ++ BUG_ON(proc->vma); ++ BUG_ON(proc->files); ++ ++ hlist_del(&proc->proc_node); ++ if (binder_context_mgr_node && binder_context_mgr_node->proc == proc) { ++ binder_debug(BINDER_DEBUG_DEAD_BINDER, ++ "binder_release: %d context_mgr_node gone\n", ++ proc->pid); ++ binder_context_mgr_node = NULL; ++ } ++ ++ threads = 0; ++ active_transactions = 0; ++ while ((n = rb_first(&proc->threads))) { ++ struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node); ++ threads++; ++ active_transactions += binder_free_thread(proc, thread); ++ } ++ nodes = 0; ++ incoming_refs = 0; ++ while ((n = rb_first(&proc->nodes))) { ++ struct binder_node *node = rb_entry(n, struct binder_node, rb_node); ++ ++ nodes++; ++ rb_erase(&node->rb_node, &proc->nodes); ++ list_del_init(&node->work.entry); ++ if (hlist_empty(&node->refs)) { ++ kfree(node); ++ binder_stats_deleted(BINDER_STAT_NODE); ++ } else { ++ struct binder_ref *ref; ++ int death = 0; ++ ++ node->proc = NULL; ++ node->local_strong_refs = 0; ++ node->local_weak_refs = 0; ++ hlist_add_head(&node->dead_node, &binder_dead_nodes); ++ ++ hlist_for_each_entry(ref, pos, &node->refs, node_entry) { ++ incoming_refs++; ++ if (ref->death) { ++ death++; ++ if (list_empty(&ref->death->work.entry)) { ++ ref->death->work.type = BINDER_WORK_DEAD_BINDER; ++ list_add_tail(&ref->death->work.entry, &ref->proc->todo); ++ wake_up_interruptible(&ref->proc->wait); ++ } else ++ BUG(); ++ } ++ } ++ binder_debug(BINDER_DEBUG_DEAD_BINDER, ++ "binder: node %d now dead, " ++ "refs %d, death %d\n", node->debug_id, ++ incoming_refs, death); ++ } ++ } ++ outgoing_refs = 0; ++ while ((n = rb_first(&proc->refs_by_desc))) { ++ struct binder_ref *ref = rb_entry(n, struct binder_ref, ++ rb_node_desc); ++ outgoing_refs++; ++ binder_delete_ref(ref); ++ } ++ binder_release_work(&proc->todo); ++ buffers = 0; ++ ++ while ((n = rb_first(&proc->allocated_buffers))) { ++ struct binder_buffer *buffer = rb_entry(n, struct binder_buffer, ++ rb_node); ++ t = buffer->transaction; ++ if (t) { ++ t->buffer = NULL; ++ buffer->transaction = NULL; ++ printk(KERN_ERR "binder: release proc %d, " ++ "transaction %d, not freed\n", ++ proc->pid, t->debug_id); ++ /*BUG();*/ ++ } ++ binder_free_buf(proc, buffer); ++ buffers++; ++ } ++ ++ binder_stats_deleted(BINDER_STAT_PROC); ++ ++ page_count = 0; ++ if (proc->pages) { ++ int i; ++ for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) { ++ if (proc->pages[i]) { ++ binder_debug(BINDER_DEBUG_BUFFER_ALLOC, ++ "binder_release: %d: " ++ "page %d at %p not freed\n", ++ proc->pid, i, ++ proc->buffer + i * PAGE_SIZE); ++ __free_page(proc->pages[i]); ++ page_count++; ++ } ++ } ++ kfree(proc->pages); ++ vfree(proc->buffer); ++ } ++ ++ put_task_struct(proc->tsk); ++ ++ binder_debug(BINDER_DEBUG_OPEN_CLOSE, ++ "binder_release: %d threads %d, nodes %d (ref %d), " ++ "refs %d, active transactions %d, buffers %d, " ++ "pages %d\n", ++ proc->pid, threads, nodes, incoming_refs, outgoing_refs, ++ active_transactions, buffers, page_count); ++ ++ kfree(proc); ++} ++ ++static void binder_deferred_func(struct work_struct *work) ++{ ++ struct binder_proc *proc; ++ struct files_struct *files; ++ ++ int defer; ++ do { ++ mutex_lock(&binder_lock); ++ mutex_lock(&binder_deferred_lock); ++ if (!hlist_empty(&binder_deferred_list)) { ++ proc = hlist_entry(binder_deferred_list.first, ++ struct binder_proc, deferred_work_node); ++ hlist_del_init(&proc->deferred_work_node); ++ defer = proc->deferred_work; ++ proc->deferred_work = 0; ++ } else { ++ proc = NULL; ++ defer = 0; ++ } ++ mutex_unlock(&binder_deferred_lock); ++ ++ files = NULL; ++ if (defer & BINDER_DEFERRED_PUT_FILES) { ++ files = proc->files; ++ if (files) ++ proc->files = NULL; ++ } ++ ++ if (defer & BINDER_DEFERRED_FLUSH) ++ binder_deferred_flush(proc); ++ ++ if (defer & BINDER_DEFERRED_RELEASE) ++ binder_deferred_release(proc); /* frees proc */ ++ ++ mutex_unlock(&binder_lock); ++ if (files) ++ put_files_struct(files); ++ } while (proc); ++} ++static DECLARE_WORK(binder_deferred_work, binder_deferred_func); ++ ++static void ++binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer) ++{ ++ mutex_lock(&binder_deferred_lock); ++ proc->deferred_work |= defer; ++ if (hlist_unhashed(&proc->deferred_work_node)) { ++ hlist_add_head(&proc->deferred_work_node, ++ &binder_deferred_list); ++ schedule_work(&binder_deferred_work); ++ } ++ mutex_unlock(&binder_deferred_lock); ++} ++ ++static char *print_binder_transaction(char *buf, char *end, const char *prefix, ++ struct binder_transaction *t) ++{ ++ buf += snprintf(buf, end - buf, ++ "%s %d: %p from %d:%d to %d:%d code %x " ++ "flags %x pri %ld r%d", ++ prefix, t->debug_id, t, ++ t->from ? t->from->proc->pid : 0, ++ t->from ? t->from->pid : 0, ++ t->to_proc ? t->to_proc->pid : 0, ++ t->to_thread ? t->to_thread->pid : 0, ++ t->code, t->flags, t->priority, t->need_reply); ++ if (buf >= end) ++ return buf; ++ if (t->buffer == NULL) { ++ buf += snprintf(buf, end - buf, " buffer free\n"); ++ return buf; ++ } ++ if (t->buffer->target_node) { ++ buf += snprintf(buf, end - buf, " node %d", ++ t->buffer->target_node->debug_id); ++ if (buf >= end) ++ return buf; ++ } ++ buf += snprintf(buf, end - buf, " size %zd:%zd data %p\n", ++ t->buffer->data_size, t->buffer->offsets_size, ++ t->buffer->data); ++ return buf; ++} ++ ++static char *print_binder_buffer(char *buf, char *end, const char *prefix, ++ struct binder_buffer *buffer) ++{ ++ buf += snprintf(buf, end - buf, "%s %d: %p size %zd:%zd %s\n", ++ prefix, buffer->debug_id, buffer->data, ++ buffer->data_size, buffer->offsets_size, ++ buffer->transaction ? "active" : "delivered"); ++ return buf; ++} ++ ++static char *print_binder_work(char *buf, char *end, const char *prefix, ++ const char *transaction_prefix, ++ struct binder_work *w) ++{ ++ struct binder_node *node; ++ struct binder_transaction *t; ++ ++ switch (w->type) { ++ case BINDER_WORK_TRANSACTION: ++ t = container_of(w, struct binder_transaction, work); ++ buf = print_binder_transaction(buf, end, transaction_prefix, t); ++ break; ++ case BINDER_WORK_TRANSACTION_COMPLETE: ++ buf += snprintf(buf, end - buf, ++ "%stransaction complete\n", prefix); ++ break; ++ case BINDER_WORK_NODE: ++ node = container_of(w, struct binder_node, work); ++ buf += snprintf(buf, end - buf, "%snode work %d: u%p c%p\n", ++ prefix, node->debug_id, node->ptr, ++ node->cookie); ++ break; ++ case BINDER_WORK_DEAD_BINDER: ++ buf += snprintf(buf, end - buf, "%shas dead binder\n", prefix); ++ break; ++ case BINDER_WORK_DEAD_BINDER_AND_CLEAR: ++ buf += snprintf(buf, end - buf, ++ "%shas cleared dead binder\n", prefix); ++ break; ++ case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: ++ buf += snprintf(buf, end - buf, ++ "%shas cleared death notification\n", prefix); ++ break; ++ default: ++ buf += snprintf(buf, end - buf, "%sunknown work: type %d\n", ++ prefix, w->type); ++ break; ++ } ++ return buf; ++} ++ ++static char *print_binder_thread(char *buf, char *end, ++ struct binder_thread *thread, ++ int print_always) ++{ ++ struct binder_transaction *t; ++ struct binder_work *w; ++ char *start_buf = buf; ++ char *header_buf; ++ ++ buf += snprintf(buf, end - buf, " thread %d: l %02x\n", ++ thread->pid, thread->looper); ++ header_buf = buf; ++ t = thread->transaction_stack; ++ while (t) { ++ if (buf >= end) ++ break; ++ if (t->from == thread) { ++ buf = print_binder_transaction(buf, end, ++ " outgoing transaction", t); ++ t = t->from_parent; ++ } else if (t->to_thread == thread) { ++ buf = print_binder_transaction(buf, end, ++ " incoming transaction", t); ++ t = t->to_parent; ++ } else { ++ buf = print_binder_transaction(buf, end, ++ " bad transaction", t); ++ t = NULL; ++ } ++ } ++ list_for_each_entry(w, &thread->todo, entry) { ++ if (buf >= end) ++ break; ++ buf = print_binder_work(buf, end, " ", ++ " pending transaction", w); ++ } ++ if (!print_always && buf == header_buf) ++ buf = start_buf; ++ return buf; ++} ++ ++static char *print_binder_node(char *buf, char *end, struct binder_node *node) ++{ ++ struct binder_ref *ref; ++ struct hlist_node *pos; ++ struct binder_work *w; ++ int count; ++ ++ count = 0; ++ hlist_for_each_entry(ref, pos, &node->refs, node_entry) ++ count++; ++ ++ buf += snprintf(buf, end - buf, ++ " node %d: u%p c%p hs %d hw %d ls %d lw %d " ++ "is %d iw %d", ++ node->debug_id, node->ptr, node->cookie, ++ node->has_strong_ref, node->has_weak_ref, ++ node->local_strong_refs, node->local_weak_refs, ++ node->internal_strong_refs, count); ++ if (buf >= end) ++ return buf; ++ if (count) { ++ buf += snprintf(buf, end - buf, " proc"); ++ if (buf >= end) ++ return buf; ++ hlist_for_each_entry(ref, pos, &node->refs, node_entry) { ++ buf += snprintf(buf, end - buf, " %d", ref->proc->pid); ++ if (buf >= end) ++ return buf; ++ } ++ } ++ buf += snprintf(buf, end - buf, "\n"); ++ list_for_each_entry(w, &node->async_todo, entry) { ++ if (buf >= end) ++ break; ++ buf = print_binder_work(buf, end, " ", ++ " pending async transaction", w); ++ } ++ return buf; ++} ++ ++static char *print_binder_ref(char *buf, char *end, struct binder_ref *ref) ++{ ++ buf += snprintf(buf, end - buf, ++ " ref %d: desc %d %snode %d s %d w %d d %p\n", ++ ref->debug_id, ref->desc, ++ ref->node->proc ? "" : "dead ", ref->node->debug_id, ++ ref->strong, ref->weak, ref->death); ++ return buf; ++} ++ ++static char *print_binder_proc(char *buf, char *end, ++ struct binder_proc *proc, int print_all) ++{ ++ struct binder_work *w; ++ struct rb_node *n; ++ char *start_buf = buf; ++ char *header_buf; ++ ++ buf += snprintf(buf, end - buf, "proc %d\n", proc->pid); ++ header_buf = buf; ++ ++ for (n = rb_first(&proc->threads); ++ n != NULL && buf < end; ++ n = rb_next(n)) ++ buf = print_binder_thread(buf, end, ++ rb_entry(n, struct binder_thread, ++ rb_node), print_all); ++ for (n = rb_first(&proc->nodes); ++ n != NULL && buf < end; ++ n = rb_next(n)) { ++ struct binder_node *node = rb_entry(n, struct binder_node, ++ rb_node); ++ if (print_all || node->has_async_transaction) ++ buf = print_binder_node(buf, end, node); ++ } ++ if (print_all) { ++ for (n = rb_first(&proc->refs_by_desc); ++ n != NULL && buf < end; ++ n = rb_next(n)) ++ buf = print_binder_ref(buf, end, ++ rb_entry(n, struct binder_ref, ++ rb_node_desc)); ++ } ++ for (n = rb_first(&proc->allocated_buffers); ++ n != NULL && buf < end; ++ n = rb_next(n)) ++ buf = print_binder_buffer(buf, end, " buffer", ++ rb_entry(n, struct binder_buffer, ++ rb_node)); ++ list_for_each_entry(w, &proc->todo, entry) { ++ if (buf >= end) ++ break; ++ buf = print_binder_work(buf, end, " ", ++ " pending transaction", w); ++ } ++ list_for_each_entry(w, &proc->delivered_death, entry) { ++ if (buf >= end) ++ break; ++ buf += snprintf(buf, end - buf, ++ " has delivered dead binder\n"); ++ break; ++ } ++ if (!print_all && buf == header_buf) ++ buf = start_buf; ++ return buf; ++} ++ ++static const char *binder_return_strings[] = { ++ "BR_ERROR", ++ "BR_OK", ++ "BR_TRANSACTION", ++ "BR_REPLY", ++ "BR_ACQUIRE_RESULT", ++ "BR_DEAD_REPLY", ++ "BR_TRANSACTION_COMPLETE", ++ "BR_INCREFS", ++ "BR_ACQUIRE", ++ "BR_RELEASE", ++ "BR_DECREFS", ++ "BR_ATTEMPT_ACQUIRE", ++ "BR_NOOP", ++ "BR_SPAWN_LOOPER", ++ "BR_FINISHED", ++ "BR_DEAD_BINDER", ++ "BR_CLEAR_DEATH_NOTIFICATION_DONE", ++ "BR_FAILED_REPLY" ++}; ++ ++static const char *binder_command_strings[] = { ++ "BC_TRANSACTION", ++ "BC_REPLY", ++ "BC_ACQUIRE_RESULT", ++ "BC_FREE_BUFFER", ++ "BC_INCREFS", ++ "BC_ACQUIRE", ++ "BC_RELEASE", ++ "BC_DECREFS", ++ "BC_INCREFS_DONE", ++ "BC_ACQUIRE_DONE", ++ "BC_ATTEMPT_ACQUIRE", ++ "BC_REGISTER_LOOPER", ++ "BC_ENTER_LOOPER", ++ "BC_EXIT_LOOPER", ++ "BC_REQUEST_DEATH_NOTIFICATION", ++ "BC_CLEAR_DEATH_NOTIFICATION", ++ "BC_DEAD_BINDER_DONE" ++}; ++ ++static const char *binder_objstat_strings[] = { ++ "proc", ++ "thread", ++ "node", ++ "ref", ++ "death", ++ "transaction", ++ "transaction_complete" ++}; ++ ++static char *print_binder_stats(char *buf, char *end, const char *prefix, ++ struct binder_stats *stats) ++{ ++ int i; ++ ++ BUILD_BUG_ON(ARRAY_SIZE(stats->bc) != ++ ARRAY_SIZE(binder_command_strings)); ++ for (i = 0; i < ARRAY_SIZE(stats->bc); i++) { ++ if (stats->bc[i]) ++ buf += snprintf(buf, end - buf, "%s%s: %d\n", prefix, ++ binder_command_strings[i], ++ stats->bc[i]); ++ if (buf >= end) ++ return buf; ++ } ++ ++ BUILD_BUG_ON(ARRAY_SIZE(stats->br) != ++ ARRAY_SIZE(binder_return_strings)); ++ for (i = 0; i < ARRAY_SIZE(stats->br); i++) { ++ if (stats->br[i]) ++ buf += snprintf(buf, end - buf, "%s%s: %d\n", prefix, ++ binder_return_strings[i], stats->br[i]); ++ if (buf >= end) ++ return buf; ++ } ++ ++ BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != ++ ARRAY_SIZE(binder_objstat_strings)); ++ BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != ++ ARRAY_SIZE(stats->obj_deleted)); ++ for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) { ++ if (stats->obj_created[i] || stats->obj_deleted[i]) ++ buf += snprintf(buf, end - buf, ++ "%s%s: active %d total %d\n", prefix, ++ binder_objstat_strings[i], ++ stats->obj_created[i] - ++ stats->obj_deleted[i], ++ stats->obj_created[i]); ++ if (buf >= end) ++ return buf; ++ } ++ return buf; ++} ++ ++static char *print_binder_proc_stats(char *buf, char *end, ++ struct binder_proc *proc) ++{ ++ struct binder_work *w; ++ struct rb_node *n; ++ int count, strong, weak; ++ ++ buf += snprintf(buf, end - buf, "proc %d\n", proc->pid); ++ if (buf >= end) ++ return buf; ++ count = 0; ++ for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) ++ count++; ++ buf += snprintf(buf, end - buf, " threads: %d\n", count); ++ if (buf >= end) ++ return buf; ++ buf += snprintf(buf, end - buf, " requested threads: %d+%d/%d\n" ++ " ready threads %d\n" ++ " free async space %zd\n", proc->requested_threads, ++ proc->requested_threads_started, proc->max_threads, ++ proc->ready_threads, proc->free_async_space); ++ if (buf >= end) ++ return buf; ++ count = 0; ++ for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) ++ count++; ++ buf += snprintf(buf, end - buf, " nodes: %d\n", count); ++ if (buf >= end) ++ return buf; ++ count = 0; ++ strong = 0; ++ weak = 0; ++ for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { ++ struct binder_ref *ref = rb_entry(n, struct binder_ref, ++ rb_node_desc); ++ count++; ++ strong += ref->strong; ++ weak += ref->weak; ++ } ++ buf += snprintf(buf, end - buf, " refs: %d s %d w %d\n", ++ count, strong, weak); ++ if (buf >= end) ++ return buf; ++ ++ count = 0; ++ for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n)) ++ count++; ++ buf += snprintf(buf, end - buf, " buffers: %d\n", count); ++ if (buf >= end) ++ return buf; ++ ++ count = 0; ++ list_for_each_entry(w, &proc->todo, entry) { ++ switch (w->type) { ++ case BINDER_WORK_TRANSACTION: ++ count++; ++ break; ++ default: ++ break; ++ } ++ } ++ buf += snprintf(buf, end - buf, " pending transactions: %d\n", count); ++ if (buf >= end) ++ return buf; ++ ++ buf = print_binder_stats(buf, end, " ", &proc->stats); ++ ++ return buf; ++} ++ ++ ++static int binder_read_proc_state(char *page, char **start, off_t off, ++ int count, int *eof, void *data) ++{ ++ struct binder_proc *proc; ++ struct hlist_node *pos; ++ struct binder_node *node; ++ int len = 0; ++ char *buf = page; ++ char *end = page + PAGE_SIZE; ++ int do_lock = !binder_debug_no_lock; ++ ++ if (off) ++ return 0; ++ ++ if (do_lock) ++ mutex_lock(&binder_lock); ++ ++ buf += snprintf(buf, end - buf, "binder state:\n"); ++ ++ if (!hlist_empty(&binder_dead_nodes)) ++ buf += snprintf(buf, end - buf, "dead nodes:\n"); ++ hlist_for_each_entry(node, pos, &binder_dead_nodes, dead_node) { ++ if (buf >= end) ++ break; ++ buf = print_binder_node(buf, end, node); ++ } ++ ++ hlist_for_each_entry(proc, pos, &binder_procs, proc_node) { ++ if (buf >= end) ++ break; ++ buf = print_binder_proc(buf, end, proc, 1); ++ } ++ if (do_lock) ++ mutex_unlock(&binder_lock); ++ if (buf > page + PAGE_SIZE) ++ buf = page + PAGE_SIZE; ++ ++ *start = page + off; ++ ++ len = buf - page; ++ if (len > off) ++ len -= off; ++ else ++ len = 0; ++ ++ return len < count ? len : count; ++} ++ ++static int binder_read_proc_stats(char *page, char **start, off_t off, ++ int count, int *eof, void *data) ++{ ++ struct binder_proc *proc; ++ struct hlist_node *pos; ++ int len = 0; ++ char *p = page; ++ int do_lock = !binder_debug_no_lock; ++ ++ if (off) ++ return 0; ++ ++ if (do_lock) ++ mutex_lock(&binder_lock); ++ ++ p += snprintf(p, PAGE_SIZE, "binder stats:\n"); ++ ++ p = print_binder_stats(p, page + PAGE_SIZE, "", &binder_stats); ++ ++ hlist_for_each_entry(proc, pos, &binder_procs, proc_node) { ++ if (p >= page + PAGE_SIZE) ++ break; ++ p = print_binder_proc_stats(p, page + PAGE_SIZE, proc); ++ } ++ if (do_lock) ++ mutex_unlock(&binder_lock); ++ if (p > page + PAGE_SIZE) ++ p = page + PAGE_SIZE; ++ ++ *start = page + off; ++ ++ len = p - page; ++ if (len > off) ++ len -= off; ++ else ++ len = 0; ++ ++ return len < count ? len : count; ++} ++ ++static int binder_read_proc_transactions(char *page, char **start, off_t off, ++ int count, int *eof, void *data) ++{ ++ struct binder_proc *proc; ++ struct hlist_node *pos; ++ int len = 0; ++ char *buf = page; ++ char *end = page + PAGE_SIZE; ++ int do_lock = !binder_debug_no_lock; ++ ++ if (off) ++ return 0; ++ ++ if (do_lock) ++ mutex_lock(&binder_lock); ++ ++ buf += snprintf(buf, end - buf, "binder transactions:\n"); ++ hlist_for_each_entry(proc, pos, &binder_procs, proc_node) { ++ if (buf >= end) ++ break; ++ buf = print_binder_proc(buf, end, proc, 0); ++ } ++ if (do_lock) ++ mutex_unlock(&binder_lock); ++ if (buf > page + PAGE_SIZE) ++ buf = page + PAGE_SIZE; ++ ++ *start = page + off; ++ ++ len = buf - page; ++ if (len > off) ++ len -= off; ++ else ++ len = 0; ++ ++ return len < count ? len : count; ++} ++ ++static int binder_read_proc_proc(char *page, char **start, off_t off, ++ int count, int *eof, void *data) ++{ ++ struct binder_proc *proc = data; ++ int len = 0; ++ char *p = page; ++ int do_lock = !binder_debug_no_lock; ++ ++ if (off) ++ return 0; ++ ++ if (do_lock) ++ mutex_lock(&binder_lock); ++ p += snprintf(p, PAGE_SIZE, "binder proc state:\n"); ++ p = print_binder_proc(p, page + PAGE_SIZE, proc, 1); ++ if (do_lock) ++ mutex_unlock(&binder_lock); ++ ++ if (p > page + PAGE_SIZE) ++ p = page + PAGE_SIZE; ++ *start = page + off; ++ ++ len = p - page; ++ if (len > off) ++ len -= off; ++ else ++ len = 0; ++ ++ return len < count ? len : count; ++} ++ ++static char *print_binder_transaction_log_entry(char *buf, char *end, ++ struct binder_transaction_log_entry *e) ++{ ++ buf += snprintf(buf, end - buf, ++ "%d: %s from %d:%d to %d:%d node %d handle %d " ++ "size %d:%d\n", ++ e->debug_id, (e->call_type == 2) ? "reply" : ++ ((e->call_type == 1) ? "async" : "call "), e->from_proc, ++ e->from_thread, e->to_proc, e->to_thread, e->to_node, ++ e->target_handle, e->data_size, e->offsets_size); ++ return buf; ++} ++ ++static int binder_read_proc_transaction_log( ++ char *page, char **start, off_t off, int count, int *eof, void *data) ++{ ++ struct binder_transaction_log *log = data; ++ int len = 0; ++ int i; ++ char *buf = page; ++ char *end = page + PAGE_SIZE; ++ ++ if (off) ++ return 0; ++ ++ if (log->full) { ++ for (i = log->next; i < ARRAY_SIZE(log->entry); i++) { ++ if (buf >= end) ++ break; ++ buf = print_binder_transaction_log_entry(buf, end, ++ &log->entry[i]); ++ } ++ } ++ for (i = 0; i < log->next; i++) { ++ if (buf >= end) ++ break; ++ buf = print_binder_transaction_log_entry(buf, end, ++ &log->entry[i]); ++ } ++ ++ *start = page + off; ++ ++ len = buf - page; ++ if (len > off) ++ len -= off; ++ else ++ len = 0; ++ ++ return len < count ? len : count; ++} ++ ++static const struct file_operations binder_fops = { ++ .owner = THIS_MODULE, ++ .poll = binder_poll, ++ .unlocked_ioctl = binder_ioctl, ++ .mmap = binder_mmap, ++ .open = binder_open, ++ .flush = binder_flush, ++ .release = binder_release, ++}; ++ ++static struct miscdevice binder_miscdev = { ++ .minor = MISC_DYNAMIC_MINOR, ++ .name = "binder", ++ .fops = &binder_fops ++}; ++ ++static int __init binder_init(void) ++{ ++ int ret; ++ ++ binder_proc_dir_entry_root = proc_mkdir("binder", NULL); ++ if (binder_proc_dir_entry_root) ++ binder_proc_dir_entry_proc = proc_mkdir("proc", ++ binder_proc_dir_entry_root); ++ ret = misc_register(&binder_miscdev); ++ if (binder_proc_dir_entry_root) { ++ create_proc_read_entry("state", ++ S_IRUGO, ++ binder_proc_dir_entry_root, ++ binder_read_proc_state, ++ NULL); ++ create_proc_read_entry("stats", ++ S_IRUGO, ++ binder_proc_dir_entry_root, ++ binder_read_proc_stats, ++ NULL); ++ create_proc_read_entry("transactions", ++ S_IRUGO, ++ binder_proc_dir_entry_root, ++ binder_read_proc_transactions, ++ NULL); ++ create_proc_read_entry("transaction_log", ++ S_IRUGO, ++ binder_proc_dir_entry_root, ++ binder_read_proc_transaction_log, ++ &binder_transaction_log); ++ create_proc_read_entry("failed_transaction_log", ++ S_IRUGO, ++ binder_proc_dir_entry_root, ++ binder_read_proc_transaction_log, ++ &binder_transaction_log_failed); ++ } ++ return ret; ++} ++ ++device_initcall(binder_init); ++ ++MODULE_LICENSE("GPL v2"); +diff --git a/drivers/staging/android/binder.h b/drivers/staging/android/binder.h +new file mode 100644 +index 0000000..863ae1a +--- /dev/null ++++ b/drivers/staging/android/binder.h +@@ -0,0 +1,330 @@ ++/* ++ * Copyright (C) 2008 Google, Inc. ++ * ++ * Based on, but no longer compatible with, the original ++ * OpenBinder.org binder driver interface, which is: ++ * ++ * Copyright (c) 2005 Palmsource, Inc. ++ * ++ * This software is licensed under the terms of the GNU General Public ++ * License version 2, as published by the Free Software Foundation, and ++ * may be copied, distributed, and modified under those terms. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ */ ++ ++#ifndef _LINUX_BINDER_H ++#define _LINUX_BINDER_H ++ ++#include <linux/ioctl.h> ++ ++#define B_PACK_CHARS(c1, c2, c3, c4) \ ++ ((((c1)<<24)) | (((c2)<<16)) | (((c3)<<8)) | (c4)) ++#define B_TYPE_LARGE 0x85 ++ ++enum { ++ BINDER_TYPE_BINDER = B_PACK_CHARS('s', 'b', '*', B_TYPE_LARGE), ++ BINDER_TYPE_WEAK_BINDER = B_PACK_CHARS('w', 'b', '*', B_TYPE_LARGE), ++ BINDER_TYPE_HANDLE = B_PACK_CHARS('s', 'h', '*', B_TYPE_LARGE), ++ BINDER_TYPE_WEAK_HANDLE = B_PACK_CHARS('w', 'h', '*', B_TYPE_LARGE), ++ BINDER_TYPE_FD = B_PACK_CHARS('f', 'd', '*', B_TYPE_LARGE), ++}; ++ ++enum { ++ FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff, ++ FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100, ++}; ++ ++/* ++ * This is the flattened representation of a Binder object for transfer ++ * between processes. The 'offsets' supplied as part of a binder transaction ++ * contains offsets into the data where these structures occur. The Binder ++ * driver takes care of re-writing the structure type and data as it moves ++ * between processes. ++ */ ++struct flat_binder_object { ++ /* 8 bytes for large_flat_header. */ ++ unsigned long type; ++ unsigned long flags; ++ ++ /* 8 bytes of data. */ ++ union { ++ void *binder; /* local object */ ++ signed long handle; /* remote object */ ++ }; ++ ++ /* extra data associated with local object */ ++ void *cookie; ++}; ++ ++/* ++ * On 64-bit platforms where user code may run in 32-bits the driver must ++ * translate the buffer (and local binder) addresses apropriately. ++ */ ++ ++struct binder_write_read { ++ signed long write_size; /* bytes to write */ ++ signed long write_consumed; /* bytes consumed by driver */ ++ unsigned long write_buffer; ++ signed long read_size; /* bytes to read */ ++ signed long read_consumed; /* bytes consumed by driver */ ++ unsigned long read_buffer; ++}; ++ ++/* Use with BINDER_VERSION, driver fills in fields. */ ++struct binder_version { ++ /* driver protocol version -- increment with incompatible change */ ++ signed long protocol_version; ++}; ++ ++/* This is the current protocol version. */ ++#define BINDER_CURRENT_PROTOCOL_VERSION 7 ++ ++#define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read) ++#define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, int64_t) ++#define BINDER_SET_MAX_THREADS _IOW('b', 5, size_t) ++#define BINDER_SET_IDLE_PRIORITY _IOW('b', 6, int) ++#define BINDER_SET_CONTEXT_MGR _IOW('b', 7, int) ++#define BINDER_THREAD_EXIT _IOW('b', 8, int) ++#define BINDER_VERSION _IOWR('b', 9, struct binder_version) ++ ++/* ++ * NOTE: Two special error codes you should check for when calling ++ * in to the driver are: ++ * ++ * EINTR -- The operation has been interupted. This should be ++ * handled by retrying the ioctl() until a different error code ++ * is returned. ++ * ++ * ECONNREFUSED -- The driver is no longer accepting operations ++ * from your process. That is, the process is being destroyed. ++ * You should handle this by exiting from your process. Note ++ * that once this error code is returned, all further calls to ++ * the driver from any thread will return this same code. ++ */ ++ ++enum transaction_flags { ++ TF_ONE_WAY = 0x01, /* this is a one-way call: async, no return */ ++ TF_ROOT_OBJECT = 0x04, /* contents are the component's root object */ ++ TF_STATUS_CODE = 0x08, /* contents are a 32-bit status code */ ++ TF_ACCEPT_FDS = 0x10, /* allow replies with file descriptors */ ++}; ++ ++struct binder_transaction_data { ++ /* The first two are only used for bcTRANSACTION and brTRANSACTION, ++ * identifying the target and contents of the transaction. ++ */ ++ union { ++ size_t handle; /* target descriptor of command transaction */ ++ void *ptr; /* target descriptor of return transaction */ ++ } target; ++ void *cookie; /* target object cookie */ ++ unsigned int code; /* transaction command */ ++ ++ /* General information about the transaction. */ ++ unsigned int flags; ++ pid_t sender_pid; ++ uid_t sender_euid; ++ size_t data_size; /* number of bytes of data */ ++ size_t offsets_size; /* number of bytes of offsets */ ++ ++ /* If this transaction is inline, the data immediately ++ * follows here; otherwise, it ends with a pointer to ++ * the data buffer. ++ */ ++ union { ++ struct { ++ /* transaction data */ ++ const void *buffer; ++ /* offsets from buffer to flat_binder_object structs */ ++ const void *offsets; ++ } ptr; ++ uint8_t buf[8]; ++ } data; ++}; ++ ++struct binder_ptr_cookie { ++ void *ptr; ++ void *cookie; ++}; ++ ++struct binder_pri_desc { ++ int priority; ++ int desc; ++}; ++ ++struct binder_pri_ptr_cookie { ++ int priority; ++ void *ptr; ++ void *cookie; ++}; ++ ++enum BinderDriverReturnProtocol { ++ BR_ERROR = _IOR('r', 0, int), ++ /* ++ * int: error code ++ */ ++ ++ BR_OK = _IO('r', 1), ++ /* No parameters! */ ++ ++ BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data), ++ BR_REPLY = _IOR('r', 3, struct binder_transaction_data), ++ /* ++ * binder_transaction_data: the received command. ++ */ ++ ++ BR_ACQUIRE_RESULT = _IOR('r', 4, int), ++ /* ++ * not currently supported ++ * int: 0 if the last bcATTEMPT_ACQUIRE was not successful. ++ * Else the remote object has acquired a primary reference. ++ */ ++ ++ BR_DEAD_REPLY = _IO('r', 5), ++ /* ++ * The target of the last transaction (either a bcTRANSACTION or ++ * a bcATTEMPT_ACQUIRE) is no longer with us. No parameters. ++ */ ++ ++ BR_TRANSACTION_COMPLETE = _IO('r', 6), ++ /* ++ * No parameters... always refers to the last transaction requested ++ * (including replies). Note that this will be sent even for ++ * asynchronous transactions. ++ */ ++ ++ BR_INCREFS = _IOR('r', 7, struct binder_ptr_cookie), ++ BR_ACQUIRE = _IOR('r', 8, struct binder_ptr_cookie), ++ BR_RELEASE = _IOR('r', 9, struct binder_ptr_cookie), ++ BR_DECREFS = _IOR('r', 10, struct binder_ptr_cookie), ++ /* ++ * void *: ptr to binder ++ * void *: cookie for binder ++ */ ++ ++ BR_ATTEMPT_ACQUIRE = _IOR('r', 11, struct binder_pri_ptr_cookie), ++ /* ++ * not currently supported ++ * int: priority ++ * void *: ptr to binder ++ * void *: cookie for binder ++ */ ++ ++ BR_NOOP = _IO('r', 12), ++ /* ++ * No parameters. Do nothing and examine the next command. It exists ++ * primarily so that we can replace it with a BR_SPAWN_LOOPER command. ++ */ ++ ++ BR_SPAWN_LOOPER = _IO('r', 13), ++ /* ++ * No parameters. The driver has determined that a process has no ++ * threads waiting to service incomming transactions. When a process ++ * receives this command, it must spawn a new service thread and ++ * register it via bcENTER_LOOPER. ++ */ ++ ++ BR_FINISHED = _IO('r', 14), ++ /* ++ * not currently supported ++ * stop threadpool thread ++ */ ++ ++ BR_DEAD_BINDER = _IOR('r', 15, void *), ++ /* ++ * void *: cookie ++ */ ++ BR_CLEAR_DEATH_NOTIFICATION_DONE = _IOR('r', 16, void *), ++ /* ++ * void *: cookie ++ */ ++ ++ BR_FAILED_REPLY = _IO('r', 17), ++ /* ++ * The the last transaction (either a bcTRANSACTION or ++ * a bcATTEMPT_ACQUIRE) failed (e.g. out of memory). No parameters. ++ */ ++}; ++ ++enum BinderDriverCommandProtocol { ++ BC_TRANSACTION = _IOW('c', 0, struct binder_transaction_data), ++ BC_REPLY = _IOW('c', 1, struct binder_transaction_data), ++ /* ++ * binder_transaction_data: the sent command. ++ */ ++ ++ BC_ACQUIRE_RESULT = _IOW('c', 2, int), ++ /* ++ * not currently supported ++ * int: 0 if the last BR_ATTEMPT_ACQUIRE was not successful. ++ * Else you have acquired a primary reference on the object. ++ */ ++ ++ BC_FREE_BUFFER = _IOW('c', 3, int), ++ /* ++ * void *: ptr to transaction data received on a read ++ */ ++ ++ BC_INCREFS = _IOW('c', 4, int), ++ BC_ACQUIRE = _IOW('c', 5, int), ++ BC_RELEASE = _IOW('c', 6, int), ++ BC_DECREFS = _IOW('c', 7, int), ++ /* ++ * int: descriptor ++ */ ++ ++ BC_INCREFS_DONE = _IOW('c', 8, struct binder_ptr_cookie), ++ BC_ACQUIRE_DONE = _IOW('c', 9, struct binder_ptr_cookie), ++ /* ++ * void *: ptr to binder ++ * void *: cookie for binder ++ */ ++ ++ BC_ATTEMPT_ACQUIRE = _IOW('c', 10, struct binder_pri_desc), ++ /* ++ * not currently supported ++ * int: priority ++ * int: descriptor ++ */ ++ ++ BC_REGISTER_LOOPER = _IO('c', 11), ++ /* ++ * No parameters. ++ * Register a spawned looper thread with the device. ++ */ ++ ++ BC_ENTER_LOOPER = _IO('c', 12), ++ BC_EXIT_LOOPER = _IO('c', 13), ++ /* ++ * No parameters. ++ * These two commands are sent as an application-level thread ++ * enters and exits the binder loop, respectively. They are ++ * used so the binder can have an accurate count of the number ++ * of looping threads it has available. ++ */ ++ ++ BC_REQUEST_DEATH_NOTIFICATION = _IOW('c', 14, struct binder_ptr_cookie), ++ /* ++ * void *: ptr to binder ++ * void *: cookie ++ */ ++ ++ BC_CLEAR_DEATH_NOTIFICATION = _IOW('c', 15, struct binder_ptr_cookie), ++ /* ++ * void *: ptr to binder ++ * void *: cookie ++ */ ++ ++ BC_DEAD_BINDER_DONE = _IOW('c', 16, void *), ++ /* ++ * void *: cookie ++ */ ++}; ++ ++#endif /* _LINUX_BINDER_H */ ++ +diff --git a/drivers/staging/android/logger.c b/drivers/staging/android/logger.c +new file mode 100644 +index 0000000..6c10b45 +--- /dev/null ++++ b/drivers/staging/android/logger.c +@@ -0,0 +1,607 @@ ++/* ++ * drivers/misc/logger.c ++ * ++ * A Logging Subsystem ++ * ++ * Copyright (C) 2007-2008 Google, Inc. ++ * ++ * Robert Love <rlove@google.com> ++ * ++ * This software is licensed under the terms of the GNU General Public ++ * License version 2, as published by the Free Software Foundation, and ++ * may be copied, distributed, and modified under those terms. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++ ++#include <linux/module.h> ++#include <linux/fs.h> ++#include <linux/miscdevice.h> ++#include <linux/uaccess.h> ++#include <linux/poll.h> ++#include <linux/time.h> ++#include "logger.h" ++ ++#include <asm/ioctls.h> ++ ++/* ++ * struct logger_log - represents a specific log, such as 'main' or 'radio' ++ * ++ * This structure lives from module insertion until module removal, so it does ++ * not need additional reference counting. The structure is protected by the ++ * mutex 'mutex'. ++ */ ++struct logger_log { ++ unsigned char *buffer;/* the ring buffer itself */ ++ struct miscdevice misc; /* misc device representing the log */ ++ wait_queue_head_t wq; /* wait queue for readers */ ++ struct list_head readers; /* this log's readers */ ++ struct mutex mutex; /* mutex protecting buffer */ ++ size_t w_off; /* current write head offset */ ++ size_t head; /* new readers start here */ ++ size_t size; /* size of the log */ ++}; ++ ++/* ++ * struct logger_reader - a logging device open for reading ++ * ++ * This object lives from open to release, so we don't need additional ++ * reference counting. The structure is protected by log->mutex. ++ */ ++struct logger_reader { ++ struct logger_log *log; /* associated log */ ++ struct list_head list; /* entry in logger_log's list */ ++ size_t r_off; /* current read head offset */ ++}; ++ ++/* logger_offset - returns index 'n' into the log via (optimized) modulus */ ++#define logger_offset(n) ((n) & (log->size - 1)) ++ ++/* ++ * file_get_log - Given a file structure, return the associated log ++ * ++ * This isn't aesthetic. We have several goals: ++ * ++ * 1) Need to quickly obtain the associated log during an I/O operation ++ * 2) Readers need to maintain state (logger_reader) ++ * 3) Writers need to be very fast (open() should be a near no-op) ++ * ++ * In the reader case, we can trivially go file->logger_reader->logger_log. ++ * For a writer, we don't want to maintain a logger_reader, so we just go ++ * file->logger_log. Thus what file->private_data points at depends on whether ++ * or not the file was opened for reading. This function hides that dirtiness. ++ */ ++static inline struct logger_log *file_get_log(struct file *file) ++{ ++ if (file->f_mode & FMODE_READ) { ++ struct logger_reader *reader = file->private_data; ++ return reader->log; ++ } else ++ return file->private_data; ++} ++ ++/* ++ * get_entry_len - Grabs the length of the payload of the next entry starting ++ * from 'off'. ++ * ++ * Caller needs to hold log->mutex. ++ */ ++static __u32 get_entry_len(struct logger_log *log, size_t off) ++{ ++ __u16 val; ++ ++ switch (log->size - off) { ++ case 1: ++ memcpy(&val, log->buffer + off, 1); ++ memcpy(((char *) &val) + 1, log->buffer, 1); ++ break; ++ default: ++ memcpy(&val, log->buffer + off, 2); ++ } ++ ++ return sizeof(struct logger_entry) + val; ++} ++ ++/* ++ * do_read_log_to_user - reads exactly 'count' bytes from 'log' into the ++ * user-space buffer 'buf'. Returns 'count' on success. ++ * ++ * Caller must hold log->mutex. ++ */ ++static ssize_t do_read_log_to_user(struct logger_log *log, ++ struct logger_reader *reader, ++ char __user *buf, ++ size_t count) ++{ ++ size_t len; ++ ++ /* ++ * We read from the log in two disjoint operations. First, we read from ++ * the current read head offset up to 'count' bytes or to the end of ++ * the log, whichever comes first. ++ */ ++ len = min(count, log->size - reader->r_off); ++ if (copy_to_user(buf, log->buffer + reader->r_off, len)) ++ return -EFAULT; ++ ++ /* ++ * Second, we read any remaining bytes, starting back at the head of ++ * the log. ++ */ ++ if (count != len) ++ if (copy_to_user(buf + len, log->buffer, count - len)) ++ return -EFAULT; ++ ++ reader->r_off = logger_offset(reader->r_off + count); ++ ++ return count; ++} ++ ++/* ++ * logger_read - our log's read() method ++ * ++ * Behavior: ++ * ++ * - O_NONBLOCK works ++ * - If there are no log entries to read, blocks until log is written to ++ * - Atomically reads exactly one log entry ++ * ++ * Optimal read size is LOGGER_ENTRY_MAX_LEN. Will set errno to EINVAL if read ++ * buffer is insufficient to hold next entry. ++ */ ++static ssize_t logger_read(struct file *file, char __user *buf, ++ size_t count, loff_t *pos) ++{ ++ struct logger_reader *reader = file->private_data; ++ struct logger_log *log = reader->log; ++ ssize_t ret; ++ DEFINE_WAIT(wait); ++ ++start: ++ while (1) { ++ prepare_to_wait(&log->wq, &wait, TASK_INTERRUPTIBLE); ++ ++ mutex_lock(&log->mutex); ++ ret = (log->w_off == reader->r_off); ++ mutex_unlock(&log->mutex); ++ if (!ret) ++ break; ++ ++ if (file->f_flags & O_NONBLOCK) { ++ ret = -EAGAIN; ++ break; ++ } ++ ++ if (signal_pending(current)) { ++ ret = -EINTR; ++ break; ++ } ++ ++ schedule(); ++ } ++ ++ finish_wait(&log->wq, &wait); ++ if (ret) ++ return ret; ++ ++ mutex_lock(&log->mutex); ++ ++ /* is there still something to read or did we race? */ ++ if (unlikely(log->w_off == reader->r_off)) { ++ mutex_unlock(&log->mutex); ++ goto start; ++ } ++ ++ /* get the size of the next entry */ ++ ret = get_entry_len(log, reader->r_off); ++ if (count < ret) { ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ /* get exactly one entry from the log */ ++ ret = do_read_log_to_user(log, reader, buf, ret); ++ ++out: ++ mutex_unlock(&log->mutex); ++ ++ return ret; ++} ++ ++/* ++ * get_next_entry - return the offset of the first valid entry at least 'len' ++ * bytes after 'off'. ++ * ++ * Caller must hold log->mutex. ++ */ ++static size_t get_next_entry(struct logger_log *log, size_t off, size_t len) ++{ ++ size_t count = 0; ++ ++ do { ++ size_t nr = get_entry_len(log, off); ++ off = logger_offset(off + nr); ++ count += nr; ++ } while (count < len); ++ ++ return off; ++} ++ ++/* ++ * clock_interval - is a < c < b in mod-space? Put another way, does the line ++ * from a to b cross c? ++ */ ++static inline int clock_interval(size_t a, size_t b, size_t c) ++{ ++ if (b < a) { ++ if (a < c || b >= c) ++ return 1; ++ } else { ++ if (a < c && b >= c) ++ return 1; ++ } ++ ++ return 0; ++} ++ ++/* ++ * fix_up_readers - walk the list of all readers and "fix up" any who were ++ * lapped by the writer; also do the same for the default "start head". ++ * We do this by "pulling forward" the readers and start head to the first ++ * entry after the new write head. ++ * ++ * The caller needs to hold log->mutex. ++ */ ++static void fix_up_readers(struct logger_log *log, size_t len) ++{ ++ size_t old = log->w_off; ++ size_t new = logger_offset(old + len); ++ struct logger_reader *reader; ++ ++ if (clock_interval(old, new, log->head)) ++ log->head = get_next_entry(log, log->head, len); ++ ++ list_for_each_entry(reader, &log->readers, list) ++ if (clock_interval(old, new, reader->r_off)) ++ reader->r_off = get_next_entry(log, reader->r_off, len); ++} ++ ++/* ++ * do_write_log - writes 'len' bytes from 'buf' to 'log' ++ * ++ * The caller needs to hold log->mutex. ++ */ ++static void do_write_log(struct logger_log *log, const void *buf, size_t count) ++{ ++ size_t len; ++ ++ len = min(count, log->size - log->w_off); ++ memcpy(log->buffer + log->w_off, buf, len); ++ ++ if (count != len) ++ memcpy(log->buffer, buf + len, count - len); ++ ++ log->w_off = logger_offset(log->w_off + count); ++ ++} ++ ++/* ++ * do_write_log_user - writes 'len' bytes from the user-space buffer 'buf' to ++ * the log 'log' ++ * ++ * The caller needs to hold log->mutex. ++ * ++ * Returns 'count' on success, negative error code on failure. ++ */ ++static ssize_t do_write_log_from_user(struct logger_log *log, ++ const void __user *buf, size_t count) ++{ ++ size_t len; ++ ++ len = min(count, log->size - log->w_off); ++ if (len && copy_from_user(log->buffer + log->w_off, buf, len)) ++ return -EFAULT; ++ ++ if (count != len) ++ if (copy_from_user(log->buffer, buf + len, count - len)) ++ return -EFAULT; ++ ++ log->w_off = logger_offset(log->w_off + count); ++ ++ return count; ++} ++ ++/* ++ * logger_aio_write - our write method, implementing support for write(), ++ * writev(), and aio_write(). Writes are our fast path, and we try to optimize ++ * them above all else. ++ */ ++ssize_t logger_aio_write(struct kiocb *iocb, const struct iovec *iov, ++ unsigned long nr_segs, loff_t ppos) ++{ ++ struct logger_log *log = file_get_log(iocb->ki_filp); ++ size_t orig = log->w_off; ++ struct logger_entry header; ++ struct timespec now; ++ ssize_t ret = 0; ++ ++ now = current_kernel_time(); ++ ++ header.pid = current->tgid; ++ header.tid = current->pid; ++ header.sec = now.tv_sec; ++ header.nsec = now.tv_nsec; ++ header.len = min_t(size_t, iocb->ki_left, LOGGER_ENTRY_MAX_PAYLOAD); ++ ++ /* null writes succeed, return zero */ ++ if (unlikely(!header.len)) ++ return 0; ++ ++ mutex_lock(&log->mutex); ++ ++ /* ++ * Fix up any readers, pulling them forward to the first readable ++ * entry after (what will be) the new write offset. We do this now ++ * because if we partially fail, we can end up with clobbered log ++ * entries that encroach on readable buffer. ++ */ ++ fix_up_readers(log, sizeof(struct logger_entry) + header.len); ++ ++ do_write_log(log, &header, sizeof(struct logger_entry)); ++ ++ while (nr_segs-- > 0) { ++ size_t len; ++ ssize_t nr; ++ ++ /* figure out how much of this vector we can keep */ ++ len = min_t(size_t, iov->iov_len, header.len - ret); ++ ++ /* write out this segment's payload */ ++ nr = do_write_log_from_user(log, iov->iov_base, len); ++ if (unlikely(nr < 0)) { ++ log->w_off = orig; ++ mutex_unlock(&log->mutex); ++ return nr; ++ } ++ ++ iov++; ++ ret += nr; ++ } ++ ++ mutex_unlock(&log->mutex); ++ ++ /* wake up any blocked readers */ ++ wake_up_interruptible(&log->wq); ++ ++ return ret; ++} ++ ++static struct logger_log *get_log_from_minor(int); ++ ++/* ++ * logger_open - the log's open() file operation ++ * ++ * Note how near a no-op this is in the write-only case. Keep it that way! ++ */ ++static int logger_open(struct inode *inode, struct file *file) ++{ ++ struct logger_log *log; ++ int ret; ++ ++ ret = nonseekable_open(inode, file); ++ if (ret) ++ return ret; ++ ++ log = get_log_from_minor(MINOR(inode->i_rdev)); ++ if (!log) ++ return -ENODEV; ++ ++ if (file->f_mode & FMODE_READ) { ++ struct logger_reader *reader; ++ ++ reader = kmalloc(sizeof(struct logger_reader), GFP_KERNEL); ++ if (!reader) ++ return -ENOMEM; ++ ++ reader->log = log; ++ INIT_LIST_HEAD(&reader->list); ++ ++ mutex_lock(&log->mutex); ++ reader->r_off = log->head; ++ list_add_tail(&reader->list, &log->readers); ++ mutex_unlock(&log->mutex); ++ ++ file->private_data = reader; ++ } else ++ file->private_data = log; ++ ++ return 0; ++} ++ ++/* ++ * logger_release - the log's release file operation ++ * ++ * Note this is a total no-op in the write-only case. Keep it that way! ++ */ ++static int logger_release(struct inode *ignored, struct file *file) ++{ ++ if (file->f_mode & FMODE_READ) { ++ struct logger_reader *reader = file->private_data; ++ list_del(&reader->list); ++ kfree(reader); ++ } ++ ++ return 0; ++} ++ ++/* ++ * logger_poll - the log's poll file operation, for poll/select/epoll ++ * ++ * Note we always return POLLOUT, because you can always write() to the log. ++ * Note also that, strictly speaking, a return value of POLLIN does not ++ * guarantee that the log is readable without blocking, as there is a small ++ * chance that the writer can lap the reader in the interim between poll() ++ * returning and the read() request. ++ */ ++static unsigned int logger_poll(struct file *file, poll_table *wait) ++{ ++ struct logger_reader *reader; ++ struct logger_log *log; ++ unsigned int ret = POLLOUT | POLLWRNORM; ++ ++ if (!(file->f_mode & FMODE_READ)) ++ return ret; ++ ++ reader = file->private_data; ++ log = reader->log; ++ ++ poll_wait(file, &log->wq, wait); ++ ++ mutex_lock(&log->mutex); ++ if (log->w_off != reader->r_off) ++ ret |= POLLIN | POLLRDNORM; ++ mutex_unlock(&log->mutex); ++ ++ return ret; ++} ++ ++static long logger_ioctl(struct file *file, unsigned int cmd, unsigned long arg) ++{ ++ struct logger_log *log = file_get_log(file); ++ struct logger_reader *reader; ++ long ret = -ENOTTY; ++ ++ mutex_lock(&log->mutex); ++ ++ switch (cmd) { ++ case LOGGER_GET_LOG_BUF_SIZE: ++ ret = log->size; ++ break; ++ case LOGGER_GET_LOG_LEN: ++ if (!(file->f_mode & FMODE_READ)) { ++ ret = -EBADF; ++ break; ++ } ++ reader = file->private_data; ++ if (log->w_off >= reader->r_off) ++ ret = log->w_off - reader->r_off; ++ else ++ ret = (log->size - reader->r_off) + log->w_off; ++ break; ++ case LOGGER_GET_NEXT_ENTRY_LEN: ++ if (!(file->f_mode & FMODE_READ)) { ++ ret = -EBADF; ++ break; ++ } ++ reader = file->private_data; ++ if (log->w_off != reader->r_off) ++ ret = get_entry_len(log, reader->r_off); ++ else ++ ret = 0; ++ break; ++ case LOGGER_FLUSH_LOG: ++ if (!(file->f_mode & FMODE_WRITE)) { ++ ret = -EBADF; ++ break; ++ } ++ list_for_each_entry(reader, &log->readers, list) ++ reader->r_off = log->w_off; ++ log->head = log->w_off; ++ ret = 0; ++ break; ++ } ++ ++ mutex_unlock(&log->mutex); ++ ++ return ret; ++} ++ ++static const struct file_operations logger_fops = { ++ .owner = THIS_MODULE, ++ .read = logger_read, ++ .aio_write = logger_aio_write, ++ .poll = logger_poll, ++ .unlocked_ioctl = logger_ioctl, ++ .compat_ioctl = logger_ioctl, ++ .open = logger_open, ++ .release = logger_release, ++}; ++ ++/* ++ * Defines a log structure with name 'NAME' and a size of 'SIZE' bytes, which ++ * must be a power of two, greater than LOGGER_ENTRY_MAX_LEN, and less than ++ * LONG_MAX minus LOGGER_ENTRY_MAX_LEN. ++ */ ++#define DEFINE_LOGGER_DEVICE(VAR, NAME, SIZE) \ ++static unsigned char _buf_ ## VAR[SIZE]; \ ++static struct logger_log VAR = { \ ++ .buffer = _buf_ ## VAR, \ ++ .misc = { \ ++ .minor = MISC_DYNAMIC_MINOR, \ ++ .name = NAME, \ ++ .fops = &logger_fops, \ ++ .parent = NULL, \ ++ }, \ ++ .wq = __WAIT_QUEUE_HEAD_INITIALIZER(VAR .wq), \ ++ .readers = LIST_HEAD_INIT(VAR .readers), \ ++ .mutex = __MUTEX_INITIALIZER(VAR .mutex), \ ++ .w_off = 0, \ ++ .head = 0, \ ++ .size = SIZE, \ ++}; ++ ++DEFINE_LOGGER_DEVICE(log_main, LOGGER_LOG_MAIN, 64*1024) ++DEFINE_LOGGER_DEVICE(log_events, LOGGER_LOG_EVENTS, 256*1024) ++DEFINE_LOGGER_DEVICE(log_radio, LOGGER_LOG_RADIO, 64*1024) ++ ++static struct logger_log *get_log_from_minor(int minor) ++{ ++ if (log_main.misc.minor == minor) ++ return &log_main; ++ if (log_events.misc.minor == minor) ++ return &log_events; ++ if (log_radio.misc.minor == minor) ++ return &log_radio; ++ return NULL; ++} ++ ++static int __init init_log(struct logger_log *log) ++{ ++ int ret; ++ ++ ret = misc_register(&log->misc); ++ if (unlikely(ret)) { ++ printk(KERN_ERR "logger: failed to register misc " ++ "device for log '%s'!\n", log->misc.name); ++ return ret; ++ } ++ ++ printk(KERN_INFO "logger: created %luK log '%s'\n", ++ (unsigned long) log->size >> 10, log->misc.name); ++ ++ return 0; ++} ++ ++static int __init logger_init(void) ++{ ++ int ret; ++ ++ ret = init_log(&log_main); ++ if (unlikely(ret)) ++ goto out; ++ ++ ret = init_log(&log_events); ++ if (unlikely(ret)) ++ goto out; ++ ++ ret = init_log(&log_radio); ++ if (unlikely(ret)) ++ goto out; ++ ++out: ++ return ret; ++} ++device_initcall(logger_init); +diff --git a/drivers/staging/android/logger.h b/drivers/staging/android/logger.h +new file mode 100644 +index 0000000..a562434 +--- /dev/null ++++ b/drivers/staging/android/logger.h +@@ -0,0 +1,48 @@ ++/* include/linux/logger.h ++ * ++ * Copyright (C) 2007-2008 Google, Inc. ++ * Author: Robert Love <rlove@android.com> ++ * ++ * This software is licensed under the terms of the GNU General Public ++ * License version 2, as published by the Free Software Foundation, and ++ * may be copied, distributed, and modified under those terms. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ */ ++ ++#ifndef _LINUX_LOGGER_H ++#define _LINUX_LOGGER_H ++ ++#include <linux/types.h> ++#include <linux/ioctl.h> ++ ++struct logger_entry { ++ __u16 len; /* length of the payload */ ++ __u16 __pad; /* no matter what, we get 2 bytes of padding */ ++ __s32 pid; /* generating process's pid */ ++ __s32 tid; /* generating process's tid */ ++ __s32 sec; /* seconds since Epoch */ ++ __s32 nsec; /* nanoseconds */ ++ char msg[0]; /* the entry's payload */ ++}; ++ ++#define LOGGER_LOG_RADIO "log_radio" /* radio-related messages */ ++#define LOGGER_LOG_EVENTS "log_events" /* system/hardware events */ ++#define LOGGER_LOG_MAIN "log_main" /* everything else */ ++ ++#define LOGGER_ENTRY_MAX_LEN (4*1024) ++#define LOGGER_ENTRY_MAX_PAYLOAD \ ++ (LOGGER_ENTRY_MAX_LEN - sizeof(struct logger_entry)) ++ ++#define __LOGGERIO 0xAE ++ ++#define LOGGER_GET_LOG_BUF_SIZE _IO(__LOGGERIO, 1) /* size of log */ ++#define LOGGER_GET_LOG_LEN _IO(__LOGGERIO, 2) /* used log len */ ++#define LOGGER_GET_NEXT_ENTRY_LEN _IO(__LOGGERIO, 3) /* next entry len */ ++#define LOGGER_FLUSH_LOG _IO(__LOGGERIO, 4) /* flush log */ ++ ++#endif /* _LINUX_LOGGER_H */ +diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c +new file mode 100644 +index 0000000..935d281 +--- /dev/null ++++ b/drivers/staging/android/lowmemorykiller.c +@@ -0,0 +1,173 @@ ++/* drivers/misc/lowmemorykiller.c ++ * ++ * The lowmemorykiller driver lets user-space specify a set of memory thresholds ++ * where processes with a range of oom_adj values will get killed. Specify the ++ * minimum oom_adj values in /sys/module/lowmemorykiller/parameters/adj and the ++ * number of free pages in /sys/module/lowmemorykiller/parameters/minfree. Both ++ * files take a comma separated list of numbers in ascending order. ++ * ++ * For example, write "0,8" to /sys/module/lowmemorykiller/parameters/adj and ++ * "1024,4096" to /sys/module/lowmemorykiller/parameters/minfree to kill processes ++ * with a oom_adj value of 8 or higher when the free memory drops below 4096 pages ++ * and kill processes with a oom_adj value of 0 or higher when the free memory ++ * drops below 1024 pages. ++ * ++ * The driver considers memory used for caches to be free, but if a large ++ * percentage of the cached memory is locked this can be very inaccurate ++ * and processes may not get killed until the normal oom killer is triggered. ++ * ++ * Copyright (C) 2007-2008 Google, Inc. ++ * ++ * This software is licensed under the terms of the GNU General Public ++ * License version 2, as published by the Free Software Foundation, and ++ * may be copied, distributed, and modified under those terms. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ */ ++ ++#include <linux/module.h> ++#include <linux/kernel.h> ++#include <linux/mm.h> ++#include <linux/oom.h> ++#include <linux/sched.h> ++ ++static uint32_t lowmem_debug_level = 2; ++static int lowmem_adj[6] = { ++ 0, ++ 1, ++ 6, ++ 12, ++}; ++static int lowmem_adj_size = 4; ++static size_t lowmem_minfree[6] = { ++ 3 * 512, /* 6MB */ ++ 2 * 1024, /* 8MB */ ++ 4 * 1024, /* 16MB */ ++ 16 * 1024, /* 64MB */ ++}; ++static int lowmem_minfree_size = 4; ++ ++#define lowmem_print(level, x...) \ ++ do { \ ++ if (lowmem_debug_level >= (level)) \ ++ printk(x); \ ++ } while (0) ++ ++static int lowmem_shrink(int nr_to_scan, gfp_t gfp_mask) ++{ ++ struct task_struct *p; ++ struct task_struct *selected = NULL; ++ int rem = 0; ++ int tasksize; ++ int i; ++ int min_adj = OOM_ADJUST_MAX + 1; ++ int selected_tasksize = 0; ++ int selected_oom_adj; ++ int array_size = ARRAY_SIZE(lowmem_adj); ++ int other_free = global_page_state(NR_FREE_PAGES); ++ int other_file = global_page_state(NR_FILE_PAGES); ++ ++ if (lowmem_adj_size < array_size) ++ array_size = lowmem_adj_size; ++ if (lowmem_minfree_size < array_size) ++ array_size = lowmem_minfree_size; ++ for (i = 0; i < array_size; i++) { ++ if (other_free < lowmem_minfree[i] && ++ other_file < lowmem_minfree[i]) { ++ min_adj = lowmem_adj[i]; ++ break; ++ } ++ } ++ if (nr_to_scan > 0) ++ lowmem_print(3, "lowmem_shrink %d, %x, ofree %d %d, ma %d\n", ++ nr_to_scan, gfp_mask, other_free, other_file, ++ min_adj); ++ rem = global_page_state(NR_ACTIVE_ANON) + ++ global_page_state(NR_ACTIVE_FILE) + ++ global_page_state(NR_INACTIVE_ANON) + ++ global_page_state(NR_INACTIVE_FILE); ++ if (nr_to_scan <= 0 || min_adj == OOM_ADJUST_MAX + 1) { ++ lowmem_print(5, "lowmem_shrink %d, %x, return %d\n", ++ nr_to_scan, gfp_mask, rem); ++ return rem; ++ } ++ selected_oom_adj = min_adj; ++ ++ read_lock(&tasklist_lock); ++ for_each_process(p) { ++ struct mm_struct *mm; ++ int oom_adj; ++ ++ task_lock(p); ++ mm = p->mm; ++ if (!mm) { ++ task_unlock(p); ++ continue; ++ } ++ oom_adj = mm->oom_adj; ++ if (oom_adj < min_adj) { ++ task_unlock(p); ++ continue; ++ } ++ tasksize = get_mm_rss(mm); ++ task_unlock(p); ++ if (tasksize <= 0) ++ continue; ++ if (selected) { ++ if (oom_adj < selected_oom_adj) ++ continue; ++ if (oom_adj == selected_oom_adj && ++ tasksize <= selected_tasksize) ++ continue; ++ } ++ selected = p; ++ selected_tasksize = tasksize; ++ selected_oom_adj = oom_adj; ++ lowmem_print(2, "select %d (%s), adj %d, size %d, to kill\n", ++ p->pid, p->comm, oom_adj, tasksize); ++ } ++ if (selected) { ++ lowmem_print(1, "send sigkill to %d (%s), adj %d, size %d\n", ++ selected->pid, selected->comm, ++ selected_oom_adj, selected_tasksize); ++ force_sig(SIGKILL, selected); ++ rem -= selected_tasksize; ++ } ++ lowmem_print(4, "lowmem_shrink %d, %x, return %d\n", ++ nr_to_scan, gfp_mask, rem); ++ read_unlock(&tasklist_lock); ++ return rem; ++} ++ ++static struct shrinker lowmem_shrinker = { ++ .shrink = lowmem_shrink, ++ .seeks = DEFAULT_SEEKS * 16 ++}; ++ ++static int __init lowmem_init(void) ++{ ++ register_shrinker(&lowmem_shrinker); ++ return 0; ++} ++ ++static void __exit lowmem_exit(void) ++{ ++ unregister_shrinker(&lowmem_shrinker); ++} ++ ++module_param_named(cost, lowmem_shrinker.seeks, int, S_IRUGO | S_IWUSR); ++module_param_array_named(adj, lowmem_adj, int, &lowmem_adj_size, ++ S_IRUGO | S_IWUSR); ++module_param_array_named(minfree, lowmem_minfree, uint, &lowmem_minfree_size, ++ S_IRUGO | S_IWUSR); ++module_param_named(debug_level, lowmem_debug_level, uint, S_IRUGO | S_IWUSR); ++ ++module_init(lowmem_init); ++module_exit(lowmem_exit); ++ ++MODULE_LICENSE("GPL"); ++ +diff --git a/drivers/staging/android/ram_console.c b/drivers/staging/android/ram_console.c +new file mode 100644 +index 0000000..8f18a59 +--- /dev/null ++++ b/drivers/staging/android/ram_console.c +@@ -0,0 +1,410 @@ ++/* drivers/android/ram_console.c ++ * ++ * Copyright (C) 2007-2008 Google, Inc. ++ * ++ * This software is licensed under the terms of the GNU General Public ++ * License version 2, as published by the Free Software Foundation, and ++ * may be copied, distributed, and modified under those terms. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ */ ++ ++#include <linux/console.h> ++#include <linux/init.h> ++#include <linux/module.h> ++#include <linux/platform_device.h> ++#include <linux/proc_fs.h> ++#include <linux/string.h> ++#include <linux/uaccess.h> ++#include <linux/io.h> ++ ++#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION ++#include <linux/rslib.h> ++#endif ++ ++struct ram_console_buffer { ++ uint32_t sig; ++ uint32_t start; ++ uint32_t size; ++ uint8_t data[0]; ++}; ++ ++#define RAM_CONSOLE_SIG (0x43474244) /* DBGC */ ++ ++#ifdef CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT ++static char __initdata ++ ram_console_old_log_init_buffer[CONFIG_ANDROID_RAM_CONSOLE_EARLY_SIZE]; ++#endif ++static char *ram_console_old_log; ++static size_t ram_console_old_log_size; ++ ++static struct ram_console_buffer *ram_console_buffer; ++static size_t ram_console_buffer_size; ++#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION ++static char *ram_console_par_buffer; ++static struct rs_control *ram_console_rs_decoder; ++static int ram_console_corrected_bytes; ++static int ram_console_bad_blocks; ++#define ECC_BLOCK_SIZE CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_DATA_SIZE ++#define ECC_SIZE CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_ECC_SIZE ++#define ECC_SYMSIZE CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE ++#define ECC_POLY CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_POLYNOMIAL ++#endif ++ ++#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION ++static void ram_console_encode_rs8(uint8_t *data, size_t len, uint8_t *ecc) ++{ ++ int i; ++ uint16_t par[ECC_SIZE]; ++ /* Initialize the parity buffer */ ++ memset(par, 0, sizeof(par)); ++ encode_rs8(ram_console_rs_decoder, data, len, par, 0); ++ for (i = 0; i < ECC_SIZE; i++) ++ ecc[i] = par[i]; ++} ++ ++static int ram_console_decode_rs8(void *data, size_t len, uint8_t *ecc) ++{ ++ int i; ++ uint16_t par[ECC_SIZE]; ++ for (i = 0; i < ECC_SIZE; i++) ++ par[i] = ecc[i]; ++ return decode_rs8(ram_console_rs_decoder, data, par, len, ++ NULL, 0, NULL, 0, NULL); ++} ++#endif ++ ++static void ram_console_update(const char *s, unsigned int count) ++{ ++ struct ram_console_buffer *buffer = ram_console_buffer; ++#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION ++ uint8_t *buffer_end = buffer->data + ram_console_buffer_size; ++ uint8_t *block; ++ uint8_t *par; ++ int size = ECC_BLOCK_SIZE; ++#endif ++ memcpy(buffer->data + buffer->start, s, count); ++#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION ++ block = buffer->data + (buffer->start & ~(ECC_BLOCK_SIZE - 1)); ++ par = ram_console_par_buffer + ++ (buffer->start / ECC_BLOCK_SIZE) * ECC_SIZE; ++ do { ++ if (block + ECC_BLOCK_SIZE > buffer_end) ++ size = buffer_end - block; ++ ram_console_encode_rs8(block, size, par); ++ block += ECC_BLOCK_SIZE; ++ par += ECC_SIZE; ++ } while (block < buffer->data + buffer->start + count); ++#endif ++} ++ ++static void ram_console_update_header(void) ++{ ++#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION ++ struct ram_console_buffer *buffer = ram_console_buffer; ++ uint8_t *par; ++ par = ram_console_par_buffer + ++ DIV_ROUND_UP(ram_console_buffer_size, ECC_BLOCK_SIZE) * ECC_SIZE; ++ ram_console_encode_rs8((uint8_t *)buffer, sizeof(*buffer), par); ++#endif ++} ++ ++static void ++ram_console_write(struct console *console, const char *s, unsigned int count) ++{ ++ int rem; ++ struct ram_console_buffer *buffer = ram_console_buffer; ++ ++ if (count > ram_console_buffer_size) { ++ s += count - ram_console_buffer_size; ++ count = ram_console_buffer_size; ++ } ++ rem = ram_console_buffer_size - buffer->start; ++ if (rem < count) { ++ ram_console_update(s, rem); ++ s += rem; ++ count -= rem; ++ buffer->start = 0; ++ buffer->size = ram_console_buffer_size; ++ } ++ ram_console_update(s, count); ++ ++ buffer->start += count; ++ if (buffer->size < ram_console_buffer_size) ++ buffer->size += count; ++ ram_console_update_header(); ++} ++ ++static struct console ram_console = { ++ .name = "ram", ++ .write = ram_console_write, ++ .flags = CON_PRINTBUFFER | CON_ENABLED, ++ .index = -1, ++}; ++ ++static void __init ++ram_console_save_old(struct ram_console_buffer *buffer, char *dest) ++{ ++ size_t old_log_size = buffer->size; ++#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION ++ uint8_t *block; ++ uint8_t *par; ++ char strbuf[80]; ++ int strbuf_len; ++ ++ block = buffer->data; ++ par = ram_console_par_buffer; ++ while (block < buffer->data + buffer->size) { ++ int numerr; ++ int size = ECC_BLOCK_SIZE; ++ if (block + size > buffer->data + ram_console_buffer_size) ++ size = buffer->data + ram_console_buffer_size - block; ++ numerr = ram_console_decode_rs8(block, size, par); ++ if (numerr > 0) { ++#if 0 ++ printk(KERN_INFO "ram_console: error in block %p, %d\n", ++ block, numerr); ++#endif ++ ram_console_corrected_bytes += numerr; ++ } else if (numerr < 0) { ++#if 0 ++ printk(KERN_INFO "ram_console: uncorrectable error in " ++ "block %p\n", block); ++#endif ++ ram_console_bad_blocks++; ++ } ++ block += ECC_BLOCK_SIZE; ++ par += ECC_SIZE; ++ } ++ if (ram_console_corrected_bytes || ram_console_bad_blocks) ++ strbuf_len = snprintf(strbuf, sizeof(strbuf), ++ "\n%d Corrected bytes, %d unrecoverable blocks\n", ++ ram_console_corrected_bytes, ram_console_bad_blocks); ++ else ++ strbuf_len = snprintf(strbuf, sizeof(strbuf), ++ "\nNo errors detected\n"); ++ if (strbuf_len >= sizeof(strbuf)) ++ strbuf_len = sizeof(strbuf) - 1; ++ old_log_size += strbuf_len; ++#endif ++ ++ if (dest == NULL) { ++ dest = kmalloc(old_log_size, GFP_KERNEL); ++ if (dest == NULL) { ++ printk(KERN_ERR ++ "ram_console: failed to allocate buffer\n"); ++ return; ++ } ++ } ++ ++ ram_console_old_log = dest; ++ ram_console_old_log_size = old_log_size; ++ memcpy(ram_console_old_log, ++ &buffer->data[buffer->start], buffer->size - buffer->start); ++ memcpy(ram_console_old_log + buffer->size - buffer->start, ++ &buffer->data[0], buffer->start); ++#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION ++ memcpy(ram_console_old_log + old_log_size - strbuf_len, ++ strbuf, strbuf_len); ++#endif ++} ++ ++static int __init ram_console_init(struct ram_console_buffer *buffer, ++ size_t buffer_size, char *old_buf) ++{ ++#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION ++ int numerr; ++ uint8_t *par; ++#endif ++ ram_console_buffer = buffer; ++ ram_console_buffer_size = ++ buffer_size - sizeof(struct ram_console_buffer); ++ ++ if (ram_console_buffer_size > buffer_size) { ++ pr_err("ram_console: buffer %p, invalid size %zu, " ++ "datasize %zu\n", buffer, buffer_size, ++ ram_console_buffer_size); ++ return 0; ++ } ++ ++#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION ++ ram_console_buffer_size -= (DIV_ROUND_UP(ram_console_buffer_size, ++ ECC_BLOCK_SIZE) + 1) * ECC_SIZE; ++ ++ if (ram_console_buffer_size > buffer_size) { ++ pr_err("ram_console: buffer %p, invalid size %zu, " ++ "non-ecc datasize %zu\n", ++ buffer, buffer_size, ram_console_buffer_size); ++ return 0; ++ } ++ ++ ram_console_par_buffer = buffer->data + ram_console_buffer_size; ++ ++ ++ /* first consecutive root is 0 ++ * primitive element to generate roots = 1 ++ */ ++ ram_console_rs_decoder = init_rs(ECC_SYMSIZE, ECC_POLY, 0, 1, ECC_SIZE); ++ if (ram_console_rs_decoder == NULL) { ++ printk(KERN_INFO "ram_console: init_rs failed\n"); ++ return 0; ++ } ++ ++ ram_console_corrected_bytes = 0; ++ ram_console_bad_blocks = 0; ++ ++ par = ram_console_par_buffer + ++ DIV_ROUND_UP(ram_console_buffer_size, ECC_BLOCK_SIZE) * ECC_SIZE; ++ ++ numerr = ram_console_decode_rs8(buffer, sizeof(*buffer), par); ++ if (numerr > 0) { ++ printk(KERN_INFO "ram_console: error in header, %d\n", numerr); ++ ram_console_corrected_bytes += numerr; ++ } else if (numerr < 0) { ++ printk(KERN_INFO ++ "ram_console: uncorrectable error in header\n"); ++ ram_console_bad_blocks++; ++ } ++#endif ++ ++ if (buffer->sig == RAM_CONSOLE_SIG) { ++ if (buffer->size > ram_console_buffer_size ++ || buffer->start > buffer->size) ++ printk(KERN_INFO "ram_console: found existing invalid " ++ "buffer, size %d, start %d\n", ++ buffer->size, buffer->start); ++ else { ++ printk(KERN_INFO "ram_console: found existing buffer, " ++ "size %d, start %d\n", ++ buffer->size, buffer->start); ++ ram_console_save_old(buffer, old_buf); ++ } ++ } else { ++ printk(KERN_INFO "ram_console: no valid data in buffer " ++ "(sig = 0x%08x)\n", buffer->sig); ++ } ++ ++ buffer->sig = RAM_CONSOLE_SIG; ++ buffer->start = 0; ++ buffer->size = 0; ++ ++ register_console(&ram_console); ++#ifdef CONFIG_ANDROID_RAM_CONSOLE_ENABLE_VERBOSE ++ console_verbose(); ++#endif ++ return 0; ++} ++ ++#ifdef CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT ++static int __init ram_console_early_init(void) ++{ ++ return ram_console_init((struct ram_console_buffer *) ++ CONFIG_ANDROID_RAM_CONSOLE_EARLY_ADDR, ++ CONFIG_ANDROID_RAM_CONSOLE_EARLY_SIZE, ++ ram_console_old_log_init_buffer); ++} ++#else ++static int ram_console_driver_probe(struct platform_device *pdev) ++{ ++ struct resource *res = pdev->resource; ++ size_t start; ++ size_t buffer_size; ++ void *buffer; ++ ++ if (res == NULL || pdev->num_resources != 1 || ++ !(res->flags & IORESOURCE_MEM)) { ++ printk(KERN_ERR "ram_console: invalid resource, %p %d flags " ++ "%lx\n", res, pdev->num_resources, res ? res->flags : 0); ++ return -ENXIO; ++ } ++ buffer_size = res->end - res->start + 1; ++ start = res->start; ++ printk(KERN_INFO "ram_console: got buffer at %zx, size %zx\n", ++ start, buffer_size); ++ buffer = ioremap(res->start, buffer_size); ++ if (buffer == NULL) { ++ printk(KERN_ERR "ram_console: failed to map memory\n"); ++ return -ENOMEM; ++ } ++ ++ return ram_console_init(buffer, buffer_size, NULL/* allocate */); ++} ++ ++static struct platform_driver ram_console_driver = { ++ .probe = ram_console_driver_probe, ++ .driver = { ++ .name = "ram_console", ++ }, ++}; ++ ++static int __init ram_console_module_init(void) ++{ ++ int err; ++ err = platform_driver_register(&ram_console_driver); ++ return err; ++} ++#endif ++ ++static ssize_t ram_console_read_old(struct file *file, char __user *buf, ++ size_t len, loff_t *offset) ++{ ++ loff_t pos = *offset; ++ ssize_t count; ++ ++ if (pos >= ram_console_old_log_size) ++ return 0; ++ ++ count = min(len, (size_t)(ram_console_old_log_size - pos)); ++ if (copy_to_user(buf, ram_console_old_log + pos, count)) ++ return -EFAULT; ++ ++ *offset += count; ++ return count; ++} ++ ++static const struct file_operations ram_console_file_ops = { ++ .owner = THIS_MODULE, ++ .read = ram_console_read_old, ++}; ++ ++static int __init ram_console_late_init(void) ++{ ++ struct proc_dir_entry *entry; ++ ++ if (ram_console_old_log == NULL) ++ return 0; ++#ifdef CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT ++ ram_console_old_log = kmalloc(ram_console_old_log_size, GFP_KERNEL); ++ if (ram_console_old_log == NULL) { ++ printk(KERN_ERR ++ "ram_console: failed to allocate buffer for old log\n"); ++ ram_console_old_log_size = 0; ++ return 0; ++ } ++ memcpy(ram_console_old_log, ++ ram_console_old_log_init_buffer, ram_console_old_log_size); ++#endif ++ entry = create_proc_entry("last_kmsg", S_IFREG | S_IRUGO, NULL); ++ if (!entry) { ++ printk(KERN_ERR "ram_console: failed to create proc entry\n"); ++ kfree(ram_console_old_log); ++ ram_console_old_log = NULL; ++ return 0; ++ } ++ ++ entry->proc_fops = &ram_console_file_ops; ++ entry->size = ram_console_old_log_size; ++ return 0; ++} ++ ++#ifdef CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT ++console_initcall(ram_console_early_init); ++#else ++module_init(ram_console_module_init); ++#endif ++late_initcall(ram_console_late_init); ++ +diff --git a/drivers/staging/android/timed_gpio.c b/drivers/staging/android/timed_gpio.c +new file mode 100644 +index 0000000..be7cdaa +--- /dev/null ++++ b/drivers/staging/android/timed_gpio.c +@@ -0,0 +1,166 @@ ++/* drivers/misc/timed_gpio.c ++ * ++ * Copyright (C) 2008 Google, Inc. ++ * Author: Mike Lockwood <lockwood@android.com> ++ * ++ * This software is licensed under the terms of the GNU General Public ++ * License version 2, as published by the Free Software Foundation, and ++ * may be copied, distributed, and modified under those terms. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ */ ++ ++#include <linux/module.h> ++#include <linux/platform_device.h> ++#include <linux/hrtimer.h> ++#include <linux/err.h> ++#include <linux/gpio.h> ++ ++#include "timed_output.h" ++#include "timed_gpio.h" ++ ++ ++struct timed_gpio_data { ++ struct timed_output_dev dev; ++ struct hrtimer timer; ++ spinlock_t lock; ++ unsigned gpio; ++ int max_timeout; ++ u8 active_low; ++}; ++ ++static enum hrtimer_restart gpio_timer_func(struct hrtimer *timer) ++{ ++ struct timed_gpio_data *data = ++ container_of(timer, struct timed_gpio_data, timer); ++ ++ gpio_direction_output(data->gpio, data->active_low ? 1 : 0); ++ return HRTIMER_NORESTART; ++} ++ ++static int gpio_get_time(struct timed_output_dev *dev) ++{ ++ struct timed_gpio_data *data = ++ container_of(dev, struct timed_gpio_data, dev); ++ ++ if (hrtimer_active(&data->timer)) { ++ ktime_t r = hrtimer_get_remaining(&data->timer); ++ struct timeval t = ktime_to_timeval(r); ++ return t.tv_sec * 1000 + t.tv_usec / 1000; ++ } else ++ return 0; ++} ++ ++static void gpio_enable(struct timed_output_dev *dev, int value) ++{ ++ struct timed_gpio_data *data = ++ container_of(dev, struct timed_gpio_data, dev); ++ unsigned long flags; ++ ++ spin_lock_irqsave(&data->lock, flags); ++ ++ /* cancel previous timer and set GPIO according to value */ ++ hrtimer_cancel(&data->timer); ++ gpio_direction_output(data->gpio, data->active_low ? !value : !!value); ++ ++ if (value > 0) { ++ if (value > data->max_timeout) ++ value = data->max_timeout; ++ ++ hrtimer_start(&data->timer, ++ ktime_set(value / 1000, (value % 1000) * 1000000), ++ HRTIMER_MODE_REL); ++ } ++ ++ spin_unlock_irqrestore(&data->lock, flags); ++} ++ ++static int timed_gpio_probe(struct platform_device *pdev) ++{ ++ struct timed_gpio_platform_data *pdata = pdev->dev.platform_data; ++ struct timed_gpio *cur_gpio; ++ struct timed_gpio_data *gpio_data, *gpio_dat; ++ int i, j, ret = 0; ++ ++ if (!pdata) ++ return -EBUSY; ++ ++ gpio_data = kzalloc(sizeof(struct timed_gpio_data) * pdata->num_gpios, ++ GFP_KERNEL); ++ if (!gpio_data) ++ return -ENOMEM; ++ ++ for (i = 0; i < pdata->num_gpios; i++) { ++ cur_gpio = &pdata->gpios[i]; ++ gpio_dat = &gpio_data[i]; ++ ++ hrtimer_init(&gpio_dat->timer, CLOCK_MONOTONIC, ++ HRTIMER_MODE_REL); ++ gpio_dat->timer.function = gpio_timer_func; ++ spin_lock_init(&gpio_dat->lock); ++ ++ gpio_dat->dev.name = cur_gpio->name; ++ gpio_dat->dev.get_time = gpio_get_time; ++ gpio_dat->dev.enable = gpio_enable; ++ ret = timed_output_dev_register(&gpio_dat->dev); ++ if (ret < 0) { ++ for (j = 0; j < i; j++) ++ timed_output_dev_unregister(&gpio_data[i].dev); ++ kfree(gpio_data); ++ return ret; ++ } ++ ++ gpio_dat->gpio = cur_gpio->gpio; ++ gpio_dat->max_timeout = cur_gpio->max_timeout; ++ gpio_dat->active_low = cur_gpio->active_low; ++ gpio_direction_output(gpio_dat->gpio, gpio_dat->active_low); ++ } ++ ++ platform_set_drvdata(pdev, gpio_data); ++ ++ return 0; ++} ++ ++static int timed_gpio_remove(struct platform_device *pdev) ++{ ++ struct timed_gpio_platform_data *pdata = pdev->dev.platform_data; ++ struct timed_gpio_data *gpio_data = platform_get_drvdata(pdev); ++ int i; ++ ++ for (i = 0; i < pdata->num_gpios; i++) ++ timed_output_dev_unregister(&gpio_data[i].dev); ++ ++ kfree(gpio_data); ++ ++ return 0; ++} ++ ++static struct platform_driver timed_gpio_driver = { ++ .probe = timed_gpio_probe, ++ .remove = timed_gpio_remove, ++ .driver = { ++ .name = TIMED_GPIO_NAME, ++ .owner = THIS_MODULE, ++ }, ++}; ++ ++static int __init timed_gpio_init(void) ++{ ++ return platform_driver_register(&timed_gpio_driver); ++} ++ ++static void __exit timed_gpio_exit(void) ++{ ++ platform_driver_unregister(&timed_gpio_driver); ++} ++ ++module_init(timed_gpio_init); ++module_exit(timed_gpio_exit); ++ ++MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>"); ++MODULE_DESCRIPTION("timed gpio driver"); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/staging/android/timed_gpio.h b/drivers/staging/android/timed_gpio.h +new file mode 100644 +index 0000000..a0e15f8 +--- /dev/null ++++ b/drivers/staging/android/timed_gpio.h +@@ -0,0 +1,33 @@ ++/* include/linux/timed_gpio.h ++ * ++ * Copyright (C) 2008 Google, Inc. ++ * ++ * This software is licensed under the terms of the GNU General Public ++ * License version 2, as published by the Free Software Foundation, and ++ * may be copied, distributed, and modified under those terms. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++*/ ++ ++#ifndef _LINUX_TIMED_GPIO_H ++#define _LINUX_TIMED_GPIO_H ++ ++#define TIMED_GPIO_NAME "timed-gpio" ++ ++struct timed_gpio { ++ const char *name; ++ unsigned gpio; ++ int max_timeout; ++ u8 active_low; ++}; ++ ++struct timed_gpio_platform_data { ++ int num_gpios; ++ struct timed_gpio *gpios; ++}; ++ ++#endif +diff --git a/drivers/staging/android/timed_output.c b/drivers/staging/android/timed_output.c +new file mode 100644 +index 0000000..62e7918 +--- /dev/null ++++ b/drivers/staging/android/timed_output.c +@@ -0,0 +1,121 @@ ++/* drivers/misc/timed_output.c ++ * ++ * Copyright (C) 2009 Google, Inc. ++ * Author: Mike Lockwood <lockwood@android.com> ++ * ++ * This software is licensed under the terms of the GNU General Public ++ * License version 2, as published by the Free Software Foundation, and ++ * may be copied, distributed, and modified under those terms. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ */ ++ ++#include <linux/module.h> ++#include <linux/types.h> ++#include <linux/device.h> ++#include <linux/fs.h> ++#include <linux/err.h> ++ ++#include "timed_output.h" ++ ++static struct class *timed_output_class; ++static atomic_t device_count; ++ ++static ssize_t enable_show(struct device *dev, struct device_attribute *attr, ++ char *buf) ++{ ++ struct timed_output_dev *tdev = dev_get_drvdata(dev); ++ int remaining = tdev->get_time(tdev); ++ ++ return sprintf(buf, "%d\n", remaining); ++} ++ ++static ssize_t enable_store( ++ struct device *dev, struct device_attribute *attr, ++ const char *buf, size_t size) ++{ ++ struct timed_output_dev *tdev = dev_get_drvdata(dev); ++ int value; ++ ++ sscanf(buf, "%d", &value); ++ tdev->enable(tdev, value); ++ ++ return size; ++} ++ ++static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, enable_show, enable_store); ++ ++static int create_timed_output_class(void) ++{ ++ if (!timed_output_class) { ++ timed_output_class = class_create(THIS_MODULE, "timed_output"); ++ if (IS_ERR(timed_output_class)) ++ return PTR_ERR(timed_output_class); ++ atomic_set(&device_count, 0); ++ } ++ ++ return 0; ++} ++ ++int timed_output_dev_register(struct timed_output_dev *tdev) ++{ ++ int ret; ++ ++ if (!tdev || !tdev->name || !tdev->enable || !tdev->get_time) ++ return -EINVAL; ++ ++ ret = create_timed_output_class(); ++ if (ret < 0) ++ return ret; ++ ++ tdev->index = atomic_inc_return(&device_count); ++ tdev->dev = device_create(timed_output_class, NULL, ++ MKDEV(0, tdev->index), NULL, tdev->name); ++ if (IS_ERR(tdev->dev)) ++ return PTR_ERR(tdev->dev); ++ ++ ret = device_create_file(tdev->dev, &dev_attr_enable); ++ if (ret < 0) ++ goto err_create_file; ++ ++ dev_set_drvdata(tdev->dev, tdev); ++ tdev->state = 0; ++ return 0; ++ ++err_create_file: ++ device_destroy(timed_output_class, MKDEV(0, tdev->index)); ++ printk(KERN_ERR "timed_output: Failed to register driver %s\n", ++ tdev->name); ++ ++ return ret; ++} ++EXPORT_SYMBOL_GPL(timed_output_dev_register); ++ ++void timed_output_dev_unregister(struct timed_output_dev *tdev) ++{ ++ device_remove_file(tdev->dev, &dev_attr_enable); ++ device_destroy(timed_output_class, MKDEV(0, tdev->index)); ++ dev_set_drvdata(tdev->dev, NULL); ++} ++EXPORT_SYMBOL_GPL(timed_output_dev_unregister); ++ ++static int __init timed_output_init(void) ++{ ++ return create_timed_output_class(); ++} ++ ++static void __exit timed_output_exit(void) ++{ ++ class_destroy(timed_output_class); ++} ++ ++module_init(timed_output_init); ++module_exit(timed_output_exit); ++ ++MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>"); ++MODULE_DESCRIPTION("timed output class driver"); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/staging/android/timed_output.h b/drivers/staging/android/timed_output.h +new file mode 100644 +index 0000000..ec907ab +--- /dev/null ++++ b/drivers/staging/android/timed_output.h +@@ -0,0 +1,37 @@ ++/* include/linux/timed_output.h ++ * ++ * Copyright (C) 2008 Google, Inc. ++ * ++ * This software is licensed under the terms of the GNU General Public ++ * License version 2, as published by the Free Software Foundation, and ++ * may be copied, distributed, and modified under those terms. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++*/ ++ ++#ifndef _LINUX_TIMED_OUTPUT_H ++#define _LINUX_TIMED_OUTPUT_H ++ ++struct timed_output_dev { ++ const char *name; ++ ++ /* enable the output and set the timer */ ++ void (*enable)(struct timed_output_dev *sdev, int timeout); ++ ++ /* returns the current number of milliseconds remaining on the timer */ ++ int (*get_time)(struct timed_output_dev *sdev); ++ ++ /* private data */ ++ struct device *dev; ++ int index; ++ int state; ++}; ++ ++extern int timed_output_dev_register(struct timed_output_dev *dev); ++extern void timed_output_dev_unregister(struct timed_output_dev *dev); ++ ++#endif diff --git a/patches.android/android-0002-staging-android-fix-build-issues.patch b/patches.android/android-0002-staging-android-fix-build-issues.patch new file mode 100644 index 0000000000000..7761ae26d5c3e --- /dev/null +++ b/patches.android/android-0002-staging-android-fix-build-issues.patch @@ -0,0 +1,46 @@ +From 23687af9317c9c01d4609667d3566af7f5e71e65 Mon Sep 17 00:00:00 2001 +From: Corentin Chary <corentincj@iksaif.net> +Date: Sat, 28 Nov 2009 09:45:14 +0100 +Subject: staging/android: fix build issues +Patch-mainline: HEAD +Git-commit: 23687af9317c9c01d4609667d3566af7f5e71e65 + +Signed-off-by: Corentin Chary <corentincj@iksaif.net> +Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de> + +diff --git a/drivers/staging/android/logger.c b/drivers/staging/android/logger.c +index 6c10b45..64cc2a1 100644 +--- a/drivers/staging/android/logger.c ++++ b/drivers/staging/android/logger.c +@@ -17,6 +17,7 @@ + * GNU General Public License for more details. + */ + ++#include <linux/sched.h> + #include <linux/module.h> + #include <linux/fs.h> + #include <linux/miscdevice.h> +diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c +index 935d281..32314e8 100644 +--- a/drivers/staging/android/lowmemorykiller.c ++++ b/drivers/staging/android/lowmemorykiller.c +@@ -100,15 +100,17 @@ static int lowmem_shrink(int nr_to_scan, gfp_t gfp_mask) + read_lock(&tasklist_lock); + for_each_process(p) { + struct mm_struct *mm; ++ struct signal_struct *sig; + int oom_adj; + + task_lock(p); + mm = p->mm; +- if (!mm) { ++ sig = p->signal; ++ if (!mm || !sig) { + task_unlock(p); + continue; + } +- oom_adj = mm->oom_adj; ++ oom_adj = sig->oom_adj; + if (oom_adj < min_adj) { + task_unlock(p); + continue; diff --git a/patches.android/android-0003-android-common-include-linux-slab.h.patch b/patches.android/android-0003-android-common-include-linux-slab.h.patch new file mode 100644 index 0000000000000..b31913061f90f --- /dev/null +++ b/patches.android/android-0003-android-common-include-linux-slab.h.patch @@ -0,0 +1,21 @@ +From c1b197ae67a201e404df9084cfd544eb8cb2e8a6 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Arve=20Hj=C3=B8nnev=C3=A5g?= <arve@android.com> +Date: Tue, 20 Apr 2010 22:33:05 -0700 +Subject: android-common: include linux/slab.h +Patch-mainline: HEAD +Git-commit: c1b197ae67a201e404df9084cfd544eb8cb2e8a6 + +Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de> + +diff --git a/drivers/staging/android/timed_gpio.c b/drivers/staging/android/timed_gpio.c +index be7cdaa..3bfe6e0 100644 +--- a/drivers/staging/android/timed_gpio.c ++++ b/drivers/staging/android/timed_gpio.c +@@ -16,6 +16,7 @@ + + #include <linux/module.h> + #include <linux/platform_device.h> ++#include <linux/slab.h> + #include <linux/hrtimer.h> + #include <linux/err.h> + #include <linux/gpio.h> diff --git a/patches.android/android-0004-android-common-Fix-slab.h-includes-for-2.6.34-rc4.patch b/patches.android/android-0004-android-common-Fix-slab.h-includes-for-2.6.34-rc4.patch new file mode 100644 index 0000000000000..bad9b9bb105aa --- /dev/null +++ b/patches.android/android-0004-android-common-Fix-slab.h-includes-for-2.6.34-rc4.patch @@ -0,0 +1,33 @@ +From c11a166cd4c19664355e0e3d9c04cfa7ee4aa9f4 Mon Sep 17 00:00:00 2001 +From: Colin Cross <ccross@android.com> +Date: Thu, 15 Apr 2010 15:21:51 -0700 +Subject: android-common: Fix slab.h includes for 2.6.34-rc4 +Patch-mainline: HEAD +Git-commit: c11a166cd4c19664355e0e3d9c04cfa7ee4aa9f4 + +Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de> + +diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c +index 99010d4..3aec4530 100644 +--- a/drivers/staging/android/binder.c ++++ b/drivers/staging/android/binder.c +@@ -31,6 +31,7 @@ + #include <linux/sched.h> + #include <linux/uaccess.h> + #include <linux/vmalloc.h> ++#include <linux/slab.h> + + #include "binder.h" + +diff --git a/drivers/staging/android/logger.c b/drivers/staging/android/logger.c +index 64cc2a1..dd76564 100644 +--- a/drivers/staging/android/logger.c ++++ b/drivers/staging/android/logger.c +@@ -23,6 +23,7 @@ + #include <linux/miscdevice.h> + #include <linux/uaccess.h> + #include <linux/poll.h> ++#include <linux/slab.h> + #include <linux/time.h> + #include "logger.h" + diff --git a/patches.android/android-0005-Revert-Staging-android-mark-subsystem-as-broken.patch b/patches.android/android-0005-Revert-Staging-android-mark-subsystem-as-broken.patch new file mode 100644 index 0000000000000..5fe607e7aee5d --- /dev/null +++ b/patches.android/android-0005-Revert-Staging-android-mark-subsystem-as-broken.patch @@ -0,0 +1,30 @@ +From e59bbb8ea3fb5794c09e48490a74b673aee4adc2 Mon Sep 17 00:00:00 2001 +From: Greg Kroah-Hartman <gregkh@suse.de> +Date: Wed, 30 Nov 2011 20:32:24 +0900 +Subject: Revert "Staging: android: mark subsystem as broken" +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit +Patch-mainline: HEAD +Git-commit: e59bbb8ea3fb5794c09e48490a74b673aee4adc2 + +This reverts commit 2cdf99ce2b9418c9d7c5f907195cfac421375520. + +It now builds, so this can be reverted. + +Cc: Arve Hjønnevåg <arve@android.com> +Cc: Brian Swetland <swetland@google.com> +Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de> + +diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig +index eb67563..2471949 100644 +--- a/drivers/staging/android/Kconfig ++++ b/drivers/staging/android/Kconfig +@@ -2,7 +2,6 @@ menu "Android" + + config ANDROID + bool "Android Drivers" +- depends on BROKEN + default N + ---help--- + Enable support for various drivers needed on the Android platform diff --git a/patches.android/android-0006-staging-android-ramconsole-Ensure-ramconsole-does-not-get-cl.patch b/patches.android/android-0006-staging-android-ramconsole-Ensure-ramconsole-does-not-get-cl.patch new file mode 100644 index 0000000000000..5583d85cd9996 --- /dev/null +++ b/patches.android/android-0006-staging-android-ramconsole-Ensure-ramconsole-does-not-get-cl.patch @@ -0,0 +1,35 @@ +From fdfc8089429b58b4ac901926fe83fa85b0b7bfc1 Mon Sep 17 00:00:00 2001 +From: San Mehat <san@google.com> +Date: Thu, 17 Sep 2009 14:27:41 -0700 +Subject: staging: android: ramconsole: Ensure ramconsole does not get + cluttered by apanic threads +Patch-mainline: HEAD +Git-commit: fdfc8089429b58b4ac901926fe83fa85b0b7bfc1 + +[Note, this is part of a patch from Sam, just the drivers/staging/ +portion, that adds a function that the apanic code calls, but the apanic +code isn't here, so just include part of this to make merges and diffs +easier and this keeps things self-contained - gregkh] + +Signed-off-by: San Mehat <san@google.com> +Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de> + +diff --git a/drivers/staging/android/ram_console.c b/drivers/staging/android/ram_console.c +index 8f18a59..d735be4 100644 +--- a/drivers/staging/android/ram_console.c ++++ b/drivers/staging/android/ram_console.c +@@ -146,6 +146,14 @@ static struct console ram_console = { + .index = -1, + }; + ++void ram_console_enable_console(int enabled) ++{ ++ if (enabled) ++ ram_console.flags |= CON_ENABLED; ++ else ++ ram_console.flags &= ~CON_ENABLED; ++} ++ + static void __init + ram_console_save_old(struct ram_console_buffer *buffer, char *dest) + { diff --git a/patches.android/android-0007-Staging-android-ram_console-Start-ram-console-earlier.patch b/patches.android/android-0007-Staging-android-ram_console-Start-ram-console-earlier.patch new file mode 100644 index 0000000000000..196de3821f3f7 --- /dev/null +++ b/patches.android/android-0007-Staging-android-ram_console-Start-ram-console-earlier.patch @@ -0,0 +1,26 @@ +From 81057ec1ded5ddf15149c3b266f414c0fbde5530 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Arve=20Hj=C3=B8nnev=C3=A5g?= <arve@android.com> +Date: Thu, 17 Dec 2009 23:42:08 -0800 +Subject: Staging: android: ram_console: Start ram console earlier +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit +Patch-mainline: HEAD +Git-commit: 81057ec1ded5ddf15149c3b266f414c0fbde5530 + +Signed-off-by: Arve Hjønnevåg <arve@android.com> +Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de> + +diff --git a/drivers/staging/android/ram_console.c b/drivers/staging/android/ram_console.c +index d735be4..53f736b 100644 +--- a/drivers/staging/android/ram_console.c ++++ b/drivers/staging/android/ram_console.c +@@ -412,7 +412,7 @@ static int __init ram_console_late_init(void) + #ifdef CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT + console_initcall(ram_console_early_init); + #else +-module_init(ram_console_module_init); ++postcore_initcall(ram_console_module_init); + #endif + late_initcall(ram_console_late_init); + diff --git a/patches.android/android-0008-Staging-android-timed_gpio-Request-gpios.patch b/patches.android/android-0008-Staging-android-timed_gpio-Request-gpios.patch new file mode 100644 index 0000000000000..2ef7c1f6ee107 --- /dev/null +++ b/patches.android/android-0008-Staging-android-timed_gpio-Request-gpios.patch @@ -0,0 +1,49 @@ +From 0445f1548fc66a72f3b91cdbe8f26b120245efd1 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Arve=20Hj=C3=B8nnev=C3=A5g?= <arve@android.com> +Date: Wed, 6 Jan 2010 17:17:33 -0800 +Subject: Staging: android: timed_gpio: Request gpios. +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit +Patch-mainline: HEAD +Git-commit: 0445f1548fc66a72f3b91cdbe8f26b120245efd1 + +Signed-off-by: Arve Hjønnevåg <arve@android.com> +Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de> + +diff --git a/drivers/staging/android/timed_gpio.c b/drivers/staging/android/timed_gpio.c +index 3bfe6e0..a64481c 100644 +--- a/drivers/staging/android/timed_gpio.c ++++ b/drivers/staging/android/timed_gpio.c +@@ -107,10 +107,17 @@ static int timed_gpio_probe(struct platform_device *pdev) + gpio_dat->dev.name = cur_gpio->name; + gpio_dat->dev.get_time = gpio_get_time; + gpio_dat->dev.enable = gpio_enable; +- ret = timed_output_dev_register(&gpio_dat->dev); ++ ret = gpio_request(cur_gpio->gpio, cur_gpio->name); ++ if (ret >= 0) { ++ ret = timed_output_dev_register(&gpio_dat->dev); ++ if (ret < 0) ++ gpio_free(cur_gpio->gpio); ++ } + if (ret < 0) { +- for (j = 0; j < i; j++) ++ for (j = 0; j < i; j++) { + timed_output_dev_unregister(&gpio_data[i].dev); ++ gpio_free(gpio_data[i].gpio); ++ } + kfree(gpio_data); + return ret; + } +@@ -132,8 +139,10 @@ static int timed_gpio_remove(struct platform_device *pdev) + struct timed_gpio_data *gpio_data = platform_get_drvdata(pdev); + int i; + +- for (i = 0; i < pdata->num_gpios; i++) ++ for (i = 0; i < pdata->num_gpios; i++) { + timed_output_dev_unregister(&gpio_data[i].dev); ++ gpio_free(gpio_data[i].gpio); ++ } + + kfree(gpio_data); + diff --git a/patches.android/android-0009-android-logger-Add-new-system-log-for-framework-system-log-m.patch b/patches.android/android-0009-android-logger-Add-new-system-log-for-framework-system-log-m.patch new file mode 100644 index 0000000000000..c4e4cf3f4c228 --- /dev/null +++ b/patches.android/android-0009-android-logger-Add-new-system-log-for-framework-system-log-m.patch @@ -0,0 +1,55 @@ +From 3537cdaa16205d2af7e1195276c786f22bada574 Mon Sep 17 00:00:00 2001 +From: San Mehat <san@google.com> +Date: Tue, 23 Feb 2010 16:09:47 -0800 +Subject: android: logger: Add new system log for framework/system log + messages +Patch-mainline: HEAD +Git-commit: 3537cdaa16205d2af7e1195276c786f22bada574 + +Signed-off-by: San Mehat <san@google.com> +Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de> + +diff --git a/drivers/staging/android/logger.c b/drivers/staging/android/logger.c +index dd76564..531bdbe 100644 +--- a/drivers/staging/android/logger.c ++++ b/drivers/staging/android/logger.c +@@ -558,6 +558,7 @@ static struct logger_log VAR = { \ + DEFINE_LOGGER_DEVICE(log_main, LOGGER_LOG_MAIN, 64*1024) + DEFINE_LOGGER_DEVICE(log_events, LOGGER_LOG_EVENTS, 256*1024) + DEFINE_LOGGER_DEVICE(log_radio, LOGGER_LOG_RADIO, 64*1024) ++DEFINE_LOGGER_DEVICE(log_system, LOGGER_LOG_SYSTEM, 64*1024) + + static struct logger_log *get_log_from_minor(int minor) + { +@@ -567,6 +568,8 @@ static struct logger_log *get_log_from_minor(int minor) + return &log_events; + if (log_radio.misc.minor == minor) + return &log_radio; ++ if (log_system.misc.minor == minor) ++ return &log_system; + return NULL; + } + +@@ -603,6 +606,10 @@ static int __init logger_init(void) + if (unlikely(ret)) + goto out; + ++ ret = init_log(&log_system); ++ if (unlikely(ret)) ++ goto out; ++ + out: + return ret; + } +diff --git a/drivers/staging/android/logger.h b/drivers/staging/android/logger.h +index a562434..2cb06e9 100644 +--- a/drivers/staging/android/logger.h ++++ b/drivers/staging/android/logger.h +@@ -32,6 +32,7 @@ struct logger_entry { + + #define LOGGER_LOG_RADIO "log_radio" /* radio-related messages */ + #define LOGGER_LOG_EVENTS "log_events" /* system/hardware events */ ++#define LOGGER_LOG_SYSTEM "log_system" /* system/framework messages */ + #define LOGGER_LOG_MAIN "log_main" /* everything else */ + + #define LOGGER_ENTRY_MAX_LEN (4*1024) diff --git a/patches.android/android-0010-binder-Use-seq_file-for-debug-interface.patch b/patches.android/android-0010-binder-Use-seq_file-for-debug-interface.patch new file mode 100644 index 0000000000000..a05bb2ceaa549 --- /dev/null +++ b/patches.android/android-0010-binder-Use-seq_file-for-debug-interface.patch @@ -0,0 +1,835 @@ +From 5249f4883045de494916db7b1a6d6e1e422e9a0b Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Arve=20Hj=C3=B8nnev=C3=A5g?= <arve@android.com> +Date: Tue, 28 Apr 2009 20:57:50 -0700 +Subject: binder: Use seq_file for debug interface. +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit +Patch-mainline: HEAD +Git-commit: 5249f4883045de494916db7b1a6d6e1e422e9a0b + +Signed-off-by: Arve Hjønnevåg <arve@android.com> +Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de> + +diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c +index 3aec4530..371232b 100644 +--- a/drivers/staging/android/binder.c ++++ b/drivers/staging/android/binder.c +@@ -29,6 +29,7 @@ + #include <linux/proc_fs.h> + #include <linux/rbtree.h> + #include <linux/sched.h> ++#include <linux/seq_file.h> + #include <linux/uaccess.h> + #include <linux/vmalloc.h> + #include <linux/slab.h> +@@ -48,8 +49,22 @@ static struct binder_node *binder_context_mgr_node; + static uid_t binder_context_mgr_uid = -1; + static int binder_last_id; + +-static int binder_read_proc_proc(char *page, char **start, off_t off, +- int count, int *eof, void *data); ++#define BINDER_DEBUG_ENTRY(name) \ ++static int binder_##name##_open(struct inode *inode, struct file *file) \ ++{ \ ++ return single_open(file, binder_##name##_show, PDE(inode)->data); \ ++} \ ++\ ++static const struct file_operations binder_##name##_fops = { \ ++ .owner = THIS_MODULE, \ ++ .open = binder_##name##_open, \ ++ .read = seq_read, \ ++ .llseek = seq_lseek, \ ++ .release = single_release, \ ++} ++ ++static int binder_proc_show(struct seq_file *m, void *unused); ++BINDER_DEBUG_ENTRY(proc); + + /* This is only defined in include/asm-arm/sizes.h */ + #ifndef SZ_1K +@@ -2880,9 +2895,9 @@ static int binder_open(struct inode *nodp, struct file *filp) + char strbuf[11]; + snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); + remove_proc_entry(strbuf, binder_proc_dir_entry_proc); +- create_proc_read_entry(strbuf, S_IRUGO, +- binder_proc_dir_entry_proc, +- binder_read_proc_proc, proc); ++ proc_create_data(strbuf, S_IRUGO, ++ binder_proc_dir_entry_proc, ++ &binder_proc_fops, proc); + } + + return 0; +@@ -3105,49 +3120,41 @@ binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer) + mutex_unlock(&binder_deferred_lock); + } + +-static char *print_binder_transaction(char *buf, char *end, const char *prefix, +- struct binder_transaction *t) +-{ +- buf += snprintf(buf, end - buf, +- "%s %d: %p from %d:%d to %d:%d code %x " +- "flags %x pri %ld r%d", +- prefix, t->debug_id, t, +- t->from ? t->from->proc->pid : 0, +- t->from ? t->from->pid : 0, +- t->to_proc ? t->to_proc->pid : 0, +- t->to_thread ? t->to_thread->pid : 0, +- t->code, t->flags, t->priority, t->need_reply); +- if (buf >= end) +- return buf; ++static void print_binder_transaction(struct seq_file *m, const char *prefix, ++ struct binder_transaction *t) ++{ ++ seq_printf(m, ++ "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d", ++ prefix, t->debug_id, t, ++ t->from ? t->from->proc->pid : 0, ++ t->from ? t->from->pid : 0, ++ t->to_proc ? t->to_proc->pid : 0, ++ t->to_thread ? t->to_thread->pid : 0, ++ t->code, t->flags, t->priority, t->need_reply); + if (t->buffer == NULL) { +- buf += snprintf(buf, end - buf, " buffer free\n"); +- return buf; +- } +- if (t->buffer->target_node) { +- buf += snprintf(buf, end - buf, " node %d", +- t->buffer->target_node->debug_id); +- if (buf >= end) +- return buf; ++ seq_puts(m, " buffer free\n"); ++ return; + } +- buf += snprintf(buf, end - buf, " size %zd:%zd data %p\n", +- t->buffer->data_size, t->buffer->offsets_size, +- t->buffer->data); +- return buf; ++ if (t->buffer->target_node) ++ seq_printf(m, " node %d", ++ t->buffer->target_node->debug_id); ++ seq_printf(m, " size %zd:%zd data %p\n", ++ t->buffer->data_size, t->buffer->offsets_size, ++ t->buffer->data); + } + +-static char *print_binder_buffer(char *buf, char *end, const char *prefix, +- struct binder_buffer *buffer) ++static void print_binder_buffer(struct seq_file *m, const char *prefix, ++ struct binder_buffer *buffer) + { +- buf += snprintf(buf, end - buf, "%s %d: %p size %zd:%zd %s\n", +- prefix, buffer->debug_id, buffer->data, +- buffer->data_size, buffer->offsets_size, +- buffer->transaction ? "active" : "delivered"); +- return buf; ++ seq_printf(m, "%s %d: %p size %zd:%zd %s\n", ++ prefix, buffer->debug_id, buffer->data, ++ buffer->data_size, buffer->offsets_size, ++ buffer->transaction ? "active" : "delivered"); + } + +-static char *print_binder_work(char *buf, char *end, const char *prefix, +- const char *transaction_prefix, +- struct binder_work *w) ++static void print_binder_work(struct seq_file *m, const char *prefix, ++ const char *transaction_prefix, ++ struct binder_work *w) + { + struct binder_node *node; + struct binder_transaction *t; +@@ -3155,79 +3162,65 @@ static char *print_binder_work(char *buf, char *end, const char *prefix, + switch (w->type) { + case BINDER_WORK_TRANSACTION: + t = container_of(w, struct binder_transaction, work); +- buf = print_binder_transaction(buf, end, transaction_prefix, t); ++ print_binder_transaction(m, transaction_prefix, t); + break; + case BINDER_WORK_TRANSACTION_COMPLETE: +- buf += snprintf(buf, end - buf, +- "%stransaction complete\n", prefix); ++ seq_printf(m, "%stransaction complete\n", prefix); + break; + case BINDER_WORK_NODE: + node = container_of(w, struct binder_node, work); +- buf += snprintf(buf, end - buf, "%snode work %d: u%p c%p\n", +- prefix, node->debug_id, node->ptr, +- node->cookie); ++ seq_printf(m, "%snode work %d: u%p c%p\n", ++ prefix, node->debug_id, node->ptr, node->cookie); + break; + case BINDER_WORK_DEAD_BINDER: +- buf += snprintf(buf, end - buf, "%shas dead binder\n", prefix); ++ seq_printf(m, "%shas dead binder\n", prefix); + break; + case BINDER_WORK_DEAD_BINDER_AND_CLEAR: +- buf += snprintf(buf, end - buf, +- "%shas cleared dead binder\n", prefix); ++ seq_printf(m, "%shas cleared dead binder\n", prefix); + break; + case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: +- buf += snprintf(buf, end - buf, +- "%shas cleared death notification\n", prefix); ++ seq_printf(m, "%shas cleared death notification\n", prefix); + break; + default: +- buf += snprintf(buf, end - buf, "%sunknown work: type %d\n", +- prefix, w->type); ++ seq_printf(m, "%sunknown work: type %d\n", prefix, w->type); + break; + } +- return buf; + } + +-static char *print_binder_thread(char *buf, char *end, +- struct binder_thread *thread, +- int print_always) ++static void print_binder_thread(struct seq_file *m, ++ struct binder_thread *thread, ++ int print_always) + { + struct binder_transaction *t; + struct binder_work *w; +- char *start_buf = buf; +- char *header_buf; ++ size_t start_pos = m->count; ++ size_t header_pos; + +- buf += snprintf(buf, end - buf, " thread %d: l %02x\n", +- thread->pid, thread->looper); +- header_buf = buf; ++ seq_printf(m, " thread %d: l %02x\n", thread->pid, thread->looper); ++ header_pos = m->count; + t = thread->transaction_stack; + while (t) { +- if (buf >= end) +- break; + if (t->from == thread) { +- buf = print_binder_transaction(buf, end, +- " outgoing transaction", t); ++ print_binder_transaction(m, ++ " outgoing transaction", t); + t = t->from_parent; + } else if (t->to_thread == thread) { +- buf = print_binder_transaction(buf, end, +- " incoming transaction", t); ++ print_binder_transaction(m, ++ " incoming transaction", t); + t = t->to_parent; + } else { +- buf = print_binder_transaction(buf, end, +- " bad transaction", t); ++ print_binder_transaction(m, " bad transaction", t); + t = NULL; + } + } + list_for_each_entry(w, &thread->todo, entry) { +- if (buf >= end) +- break; +- buf = print_binder_work(buf, end, " ", +- " pending transaction", w); ++ print_binder_work(m, " ", " pending transaction", w); + } +- if (!print_always && buf == header_buf) +- buf = start_buf; +- return buf; ++ if (!print_always && m->count == header_pos) ++ m->count = start_pos; + } + +-static char *print_binder_node(char *buf, char *end, struct binder_node *node) ++static void print_binder_node(struct seq_file *m, struct binder_node *node) + { + struct binder_ref *ref; + struct hlist_node *pos; +@@ -3238,100 +3231,67 @@ static char *print_binder_node(char *buf, char *end, struct binder_node *node) + hlist_for_each_entry(ref, pos, &node->refs, node_entry) + count++; + +- buf += snprintf(buf, end - buf, +- " node %d: u%p c%p hs %d hw %d ls %d lw %d " +- "is %d iw %d", +- node->debug_id, node->ptr, node->cookie, +- node->has_strong_ref, node->has_weak_ref, +- node->local_strong_refs, node->local_weak_refs, +- node->internal_strong_refs, count); +- if (buf >= end) +- return buf; ++ seq_printf(m, " node %d: u%p c%p hs %d hw %d ls %d lw %d is %d iw %d", ++ node->debug_id, node->ptr, node->cookie, ++ node->has_strong_ref, node->has_weak_ref, ++ node->local_strong_refs, node->local_weak_refs, ++ node->internal_strong_refs, count); + if (count) { +- buf += snprintf(buf, end - buf, " proc"); +- if (buf >= end) +- return buf; +- hlist_for_each_entry(ref, pos, &node->refs, node_entry) { +- buf += snprintf(buf, end - buf, " %d", ref->proc->pid); +- if (buf >= end) +- return buf; +- } ++ seq_puts(m, " proc"); ++ hlist_for_each_entry(ref, pos, &node->refs, node_entry) ++ seq_printf(m, " %d", ref->proc->pid); + } +- buf += snprintf(buf, end - buf, "\n"); +- list_for_each_entry(w, &node->async_todo, entry) { +- if (buf >= end) +- break; +- buf = print_binder_work(buf, end, " ", +- " pending async transaction", w); +- } +- return buf; ++ seq_puts(m, "\n"); ++ list_for_each_entry(w, &node->async_todo, entry) ++ print_binder_work(m, " ", ++ " pending async transaction", w); + } + +-static char *print_binder_ref(char *buf, char *end, struct binder_ref *ref) ++static void print_binder_ref(struct seq_file *m, struct binder_ref *ref) + { +- buf += snprintf(buf, end - buf, +- " ref %d: desc %d %snode %d s %d w %d d %p\n", +- ref->debug_id, ref->desc, +- ref->node->proc ? "" : "dead ", ref->node->debug_id, +- ref->strong, ref->weak, ref->death); +- return buf; ++ seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %p\n", ++ ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ", ++ ref->node->debug_id, ref->strong, ref->weak, ref->death); + } + +-static char *print_binder_proc(char *buf, char *end, +- struct binder_proc *proc, int print_all) ++static void print_binder_proc(struct seq_file *m, ++ struct binder_proc *proc, int print_all) + { + struct binder_work *w; + struct rb_node *n; +- char *start_buf = buf; +- char *header_buf; +- +- buf += snprintf(buf, end - buf, "proc %d\n", proc->pid); +- header_buf = buf; +- +- for (n = rb_first(&proc->threads); +- n != NULL && buf < end; +- n = rb_next(n)) +- buf = print_binder_thread(buf, end, +- rb_entry(n, struct binder_thread, +- rb_node), print_all); +- for (n = rb_first(&proc->nodes); +- n != NULL && buf < end; +- n = rb_next(n)) { ++ size_t start_pos = m->count; ++ size_t header_pos; ++ ++ seq_printf(m, "proc %d\n", proc->pid); ++ header_pos = m->count; ++ ++ for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) ++ print_binder_thread(m, rb_entry(n, struct binder_thread, ++ rb_node), print_all); ++ for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { + struct binder_node *node = rb_entry(n, struct binder_node, + rb_node); + if (print_all || node->has_async_transaction) +- buf = print_binder_node(buf, end, node); ++ print_binder_node(m, node); + } + if (print_all) { + for (n = rb_first(&proc->refs_by_desc); +- n != NULL && buf < end; ++ n != NULL; + n = rb_next(n)) +- buf = print_binder_ref(buf, end, +- rb_entry(n, struct binder_ref, +- rb_node_desc)); +- } +- for (n = rb_first(&proc->allocated_buffers); +- n != NULL && buf < end; +- n = rb_next(n)) +- buf = print_binder_buffer(buf, end, " buffer", +- rb_entry(n, struct binder_buffer, +- rb_node)); +- list_for_each_entry(w, &proc->todo, entry) { +- if (buf >= end) +- break; +- buf = print_binder_work(buf, end, " ", +- " pending transaction", w); ++ print_binder_ref(m, rb_entry(n, struct binder_ref, ++ rb_node_desc)); + } ++ for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n)) ++ print_binder_buffer(m, " buffer", ++ rb_entry(n, struct binder_buffer, rb_node)); ++ list_for_each_entry(w, &proc->todo, entry) ++ print_binder_work(m, " ", " pending transaction", w); + list_for_each_entry(w, &proc->delivered_death, entry) { +- if (buf >= end) +- break; +- buf += snprintf(buf, end - buf, +- " has delivered dead binder\n"); ++ seq_puts(m, " has delivered dead binder\n"); + break; + } +- if (!print_all && buf == header_buf) +- buf = start_buf; +- return buf; ++ if (!print_all && m->count == header_pos) ++ m->count = start_pos; + } + + static const char *binder_return_strings[] = { +@@ -3385,79 +3345,61 @@ static const char *binder_objstat_strings[] = { + "transaction_complete" + }; + +-static char *print_binder_stats(char *buf, char *end, const char *prefix, +- struct binder_stats *stats) ++static void print_binder_stats(struct seq_file *m, const char *prefix, ++ struct binder_stats *stats) + { + int i; + + BUILD_BUG_ON(ARRAY_SIZE(stats->bc) != +- ARRAY_SIZE(binder_command_strings)); ++ ARRAY_SIZE(binder_command_strings)); + for (i = 0; i < ARRAY_SIZE(stats->bc); i++) { + if (stats->bc[i]) +- buf += snprintf(buf, end - buf, "%s%s: %d\n", prefix, +- binder_command_strings[i], +- stats->bc[i]); +- if (buf >= end) +- return buf; ++ seq_printf(m, "%s%s: %d\n", prefix, ++ binder_command_strings[i], stats->bc[i]); + } + + BUILD_BUG_ON(ARRAY_SIZE(stats->br) != +- ARRAY_SIZE(binder_return_strings)); ++ ARRAY_SIZE(binder_return_strings)); + for (i = 0; i < ARRAY_SIZE(stats->br); i++) { + if (stats->br[i]) +- buf += snprintf(buf, end - buf, "%s%s: %d\n", prefix, +- binder_return_strings[i], stats->br[i]); +- if (buf >= end) +- return buf; ++ seq_printf(m, "%s%s: %d\n", prefix, ++ binder_return_strings[i], stats->br[i]); + } + + BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != +- ARRAY_SIZE(binder_objstat_strings)); ++ ARRAY_SIZE(binder_objstat_strings)); + BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != +- ARRAY_SIZE(stats->obj_deleted)); ++ ARRAY_SIZE(stats->obj_deleted)); + for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) { + if (stats->obj_created[i] || stats->obj_deleted[i]) +- buf += snprintf(buf, end - buf, +- "%s%s: active %d total %d\n", prefix, +- binder_objstat_strings[i], +- stats->obj_created[i] - +- stats->obj_deleted[i], +- stats->obj_created[i]); +- if (buf >= end) +- return buf; ++ seq_printf(m, "%s%s: active %d total %d\n", prefix, ++ binder_objstat_strings[i], ++ stats->obj_created[i] - stats->obj_deleted[i], ++ stats->obj_created[i]); + } +- return buf; + } + +-static char *print_binder_proc_stats(char *buf, char *end, +- struct binder_proc *proc) ++static void print_binder_proc_stats(struct seq_file *m, ++ struct binder_proc *proc) + { + struct binder_work *w; + struct rb_node *n; + int count, strong, weak; + +- buf += snprintf(buf, end - buf, "proc %d\n", proc->pid); +- if (buf >= end) +- return buf; ++ seq_printf(m, "proc %d\n", proc->pid); + count = 0; + for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) + count++; +- buf += snprintf(buf, end - buf, " threads: %d\n", count); +- if (buf >= end) +- return buf; +- buf += snprintf(buf, end - buf, " requested threads: %d+%d/%d\n" ++ seq_printf(m, " threads: %d\n", count); ++ seq_printf(m, " requested threads: %d+%d/%d\n" + " ready threads %d\n" + " free async space %zd\n", proc->requested_threads, + proc->requested_threads_started, proc->max_threads, + proc->ready_threads, proc->free_async_space); +- if (buf >= end) +- return buf; + count = 0; + for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) + count++; +- buf += snprintf(buf, end - buf, " nodes: %d\n", count); +- if (buf >= end) +- return buf; ++ seq_printf(m, " nodes: %d\n", count); + count = 0; + strong = 0; + weak = 0; +@@ -3468,17 +3410,12 @@ static char *print_binder_proc_stats(char *buf, char *end, + strong += ref->strong; + weak += ref->weak; + } +- buf += snprintf(buf, end - buf, " refs: %d s %d w %d\n", +- count, strong, weak); +- if (buf >= end) +- return buf; ++ seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak); + + count = 0; + for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n)) + count++; +- buf += snprintf(buf, end - buf, " buffers: %d\n", count); +- if (buf >= end) +- return buf; ++ seq_printf(m, " buffers: %d\n", count); + + count = 0; + list_for_each_entry(w, &proc->todo, entry) { +@@ -3490,222 +3427,110 @@ static char *print_binder_proc_stats(char *buf, char *end, + break; + } + } +- buf += snprintf(buf, end - buf, " pending transactions: %d\n", count); +- if (buf >= end) +- return buf; +- +- buf = print_binder_stats(buf, end, " ", &proc->stats); ++ seq_printf(m, " pending transactions: %d\n", count); + +- return buf; ++ print_binder_stats(m, " ", &proc->stats); + } + + +-static int binder_read_proc_state(char *page, char **start, off_t off, +- int count, int *eof, void *data) ++static int binder_state_show(struct seq_file *m, void *unused) + { + struct binder_proc *proc; + struct hlist_node *pos; + struct binder_node *node; +- int len = 0; +- char *buf = page; +- char *end = page + PAGE_SIZE; + int do_lock = !binder_debug_no_lock; + +- if (off) +- return 0; +- + if (do_lock) + mutex_lock(&binder_lock); + +- buf += snprintf(buf, end - buf, "binder state:\n"); ++ seq_puts(m, "binder state:\n"); + + if (!hlist_empty(&binder_dead_nodes)) +- buf += snprintf(buf, end - buf, "dead nodes:\n"); +- hlist_for_each_entry(node, pos, &binder_dead_nodes, dead_node) { +- if (buf >= end) +- break; +- buf = print_binder_node(buf, end, node); +- } ++ seq_puts(m, "dead nodes:\n"); ++ hlist_for_each_entry(node, pos, &binder_dead_nodes, dead_node) ++ print_binder_node(m, node); + +- hlist_for_each_entry(proc, pos, &binder_procs, proc_node) { +- if (buf >= end) +- break; +- buf = print_binder_proc(buf, end, proc, 1); +- } ++ hlist_for_each_entry(proc, pos, &binder_procs, proc_node) ++ print_binder_proc(m, proc, 1); + if (do_lock) + mutex_unlock(&binder_lock); +- if (buf > page + PAGE_SIZE) +- buf = page + PAGE_SIZE; +- +- *start = page + off; +- +- len = buf - page; +- if (len > off) +- len -= off; +- else +- len = 0; +- +- return len < count ? len : count; ++ return 0; + } + +-static int binder_read_proc_stats(char *page, char **start, off_t off, +- int count, int *eof, void *data) ++static int binder_stats_show(struct seq_file *m, void *unused) + { + struct binder_proc *proc; + struct hlist_node *pos; +- int len = 0; +- char *p = page; + int do_lock = !binder_debug_no_lock; + +- if (off) +- return 0; +- + if (do_lock) + mutex_lock(&binder_lock); + +- p += snprintf(p, PAGE_SIZE, "binder stats:\n"); ++ seq_puts(m, "binder stats:\n"); + +- p = print_binder_stats(p, page + PAGE_SIZE, "", &binder_stats); ++ print_binder_stats(m, "", &binder_stats); + +- hlist_for_each_entry(proc, pos, &binder_procs, proc_node) { +- if (p >= page + PAGE_SIZE) +- break; +- p = print_binder_proc_stats(p, page + PAGE_SIZE, proc); +- } ++ hlist_for_each_entry(proc, pos, &binder_procs, proc_node) ++ print_binder_proc_stats(m, proc); + if (do_lock) + mutex_unlock(&binder_lock); +- if (p > page + PAGE_SIZE) +- p = page + PAGE_SIZE; +- +- *start = page + off; +- +- len = p - page; +- if (len > off) +- len -= off; +- else +- len = 0; +- +- return len < count ? len : count; ++ return 0; + } + +-static int binder_read_proc_transactions(char *page, char **start, off_t off, +- int count, int *eof, void *data) ++static int binder_transactions_show(struct seq_file *m, void *unused) + { + struct binder_proc *proc; + struct hlist_node *pos; +- int len = 0; +- char *buf = page; +- char *end = page + PAGE_SIZE; + int do_lock = !binder_debug_no_lock; + +- if (off) +- return 0; +- + if (do_lock) + mutex_lock(&binder_lock); + +- buf += snprintf(buf, end - buf, "binder transactions:\n"); +- hlist_for_each_entry(proc, pos, &binder_procs, proc_node) { +- if (buf >= end) +- break; +- buf = print_binder_proc(buf, end, proc, 0); +- } ++ seq_puts(m, "binder transactions:\n"); ++ hlist_for_each_entry(proc, pos, &binder_procs, proc_node) ++ print_binder_proc(m, proc, 0); + if (do_lock) + mutex_unlock(&binder_lock); +- if (buf > page + PAGE_SIZE) +- buf = page + PAGE_SIZE; +- +- *start = page + off; +- +- len = buf - page; +- if (len > off) +- len -= off; +- else +- len = 0; +- +- return len < count ? len : count; ++ return 0; + } + +-static int binder_read_proc_proc(char *page, char **start, off_t off, +- int count, int *eof, void *data) ++static int binder_proc_show(struct seq_file *m, void *unused) + { +- struct binder_proc *proc = data; +- int len = 0; +- char *p = page; ++ struct binder_proc *proc = m->private; + int do_lock = !binder_debug_no_lock; + +- if (off) +- return 0; +- + if (do_lock) + mutex_lock(&binder_lock); +- p += snprintf(p, PAGE_SIZE, "binder proc state:\n"); +- p = print_binder_proc(p, page + PAGE_SIZE, proc, 1); ++ seq_puts(m, "binder proc state:\n"); ++ print_binder_proc(m, proc, 1); + if (do_lock) + mutex_unlock(&binder_lock); +- +- if (p > page + PAGE_SIZE) +- p = page + PAGE_SIZE; +- *start = page + off; +- +- len = p - page; +- if (len > off) +- len -= off; +- else +- len = 0; +- +- return len < count ? len : count; ++ return 0; + } + +-static char *print_binder_transaction_log_entry(char *buf, char *end, ++static void print_binder_transaction_log_entry(struct seq_file *m, + struct binder_transaction_log_entry *e) + { +- buf += snprintf(buf, end - buf, +- "%d: %s from %d:%d to %d:%d node %d handle %d " +- "size %d:%d\n", +- e->debug_id, (e->call_type == 2) ? "reply" : +- ((e->call_type == 1) ? "async" : "call "), e->from_proc, +- e->from_thread, e->to_proc, e->to_thread, e->to_node, +- e->target_handle, e->data_size, e->offsets_size); +- return buf; ++ seq_printf(m, ++ "%d: %s from %d:%d to %d:%d node %d handle %d size %d:%d\n", ++ e->debug_id, (e->call_type == 2) ? "reply" : ++ ((e->call_type == 1) ? "async" : "call "), e->from_proc, ++ e->from_thread, e->to_proc, e->to_thread, e->to_node, ++ e->target_handle, e->data_size, e->offsets_size); + } + +-static int binder_read_proc_transaction_log( +- char *page, char **start, off_t off, int count, int *eof, void *data) ++static int binder_transaction_log_show(struct seq_file *m, void *unused) + { +- struct binder_transaction_log *log = data; +- int len = 0; ++ struct binder_transaction_log *log = m->private; + int i; +- char *buf = page; +- char *end = page + PAGE_SIZE; +- +- if (off) +- return 0; + + if (log->full) { +- for (i = log->next; i < ARRAY_SIZE(log->entry); i++) { +- if (buf >= end) +- break; +- buf = print_binder_transaction_log_entry(buf, end, +- &log->entry[i]); +- } ++ for (i = log->next; i < ARRAY_SIZE(log->entry); i++) ++ print_binder_transaction_log_entry(m, &log->entry[i]); + } +- for (i = 0; i < log->next; i++) { +- if (buf >= end) +- break; +- buf = print_binder_transaction_log_entry(buf, end, +- &log->entry[i]); +- } +- +- *start = page + off; +- +- len = buf - page; +- if (len > off) +- len -= off; +- else +- len = 0; +- +- return len < count ? len : count; ++ for (i = 0; i < log->next; i++) ++ print_binder_transaction_log_entry(m, &log->entry[i]); ++ return 0; + } + + static const struct file_operations binder_fops = { +@@ -3724,6 +3549,11 @@ static struct miscdevice binder_miscdev = { + .fops = &binder_fops + }; + ++BINDER_DEBUG_ENTRY(state); ++BINDER_DEBUG_ENTRY(stats); ++BINDER_DEBUG_ENTRY(transactions); ++BINDER_DEBUG_ENTRY(transaction_log); ++ + static int __init binder_init(void) + { + int ret; +@@ -3734,31 +3564,28 @@ static int __init binder_init(void) + binder_proc_dir_entry_root); + ret = misc_register(&binder_miscdev); + if (binder_proc_dir_entry_root) { +- create_proc_read_entry("state", +- S_IRUGO, +- binder_proc_dir_entry_root, +- binder_read_proc_state, +- NULL); +- create_proc_read_entry("stats", +- S_IRUGO, +- binder_proc_dir_entry_root, +- binder_read_proc_stats, +- NULL); +- create_proc_read_entry("transactions", +- S_IRUGO, +- binder_proc_dir_entry_root, +- binder_read_proc_transactions, +- NULL); +- create_proc_read_entry("transaction_log", +- S_IRUGO, +- binder_proc_dir_entry_root, +- binder_read_proc_transaction_log, +- &binder_transaction_log); +- create_proc_read_entry("failed_transaction_log", +- S_IRUGO, +- binder_proc_dir_entry_root, +- binder_read_proc_transaction_log, +- &binder_transaction_log_failed); ++ proc_create("state", ++ S_IRUGO, ++ binder_proc_dir_entry_root, ++ &binder_state_fops); ++ proc_create("stats", ++ S_IRUGO, ++ binder_proc_dir_entry_root, ++ &binder_stats_fops); ++ proc_create("transactions", ++ S_IRUGO, ++ binder_proc_dir_entry_root, ++ &binder_transactions_fops); ++ proc_create_data("transaction_log", ++ S_IRUGO, ++ binder_proc_dir_entry_root, ++ &binder_transaction_log_fops, ++ &binder_transaction_log); ++ proc_create_data("failed_transaction_log", ++ S_IRUGO, ++ binder_proc_dir_entry_root, ++ &binder_transaction_log_fops, ++ &binder_transaction_log_failed); + } + return ret; + } diff --git a/patches.android/android-0011-staging-android-binder-Move-debugging-information-from-procf.patch b/patches.android/android-0011-staging-android-binder-Move-debugging-information-from-procf.patch new file mode 100644 index 0000000000000..961982ec39ea5 --- /dev/null +++ b/patches.android/android-0011-staging-android-binder-Move-debugging-information-from-procf.patch @@ -0,0 +1,151 @@ +From 16b665543864904714f028b1d349f5d905f39afb Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Arve=20Hj=C3=B8nnev=C3=A5g?= <arve@android.com> +Date: Tue, 28 Apr 2009 20:57:50 -0700 +Subject: staging: android: binder: Move debugging information from + procfs to debugfs +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit +Patch-mainline: HEAD +Git-commit: 16b665543864904714f028b1d349f5d905f39afb + +Signed-off-by: Arve Hjønnevåg <arve@android.com> +Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de> + +diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c +index 371232b..669e2a6 100644 +--- a/drivers/staging/android/binder.c ++++ b/drivers/staging/android/binder.c +@@ -26,7 +26,7 @@ + #include <linux/mutex.h> + #include <linux/nsproxy.h> + #include <linux/poll.h> +-#include <linux/proc_fs.h> ++#include <linux/debugfs.h> + #include <linux/rbtree.h> + #include <linux/sched.h> + #include <linux/seq_file.h> +@@ -43,8 +43,8 @@ static HLIST_HEAD(binder_procs); + static HLIST_HEAD(binder_deferred_list); + static HLIST_HEAD(binder_dead_nodes); + +-static struct proc_dir_entry *binder_proc_dir_entry_root; +-static struct proc_dir_entry *binder_proc_dir_entry_proc; ++static struct dentry *binder_debugfs_dir_entry_root; ++static struct dentry *binder_debugfs_dir_entry_proc; + static struct binder_node *binder_context_mgr_node; + static uid_t binder_context_mgr_uid = -1; + static int binder_last_id; +@@ -52,7 +52,7 @@ static int binder_last_id; + #define BINDER_DEBUG_ENTRY(name) \ + static int binder_##name##_open(struct inode *inode, struct file *file) \ + { \ +- return single_open(file, binder_##name##_show, PDE(inode)->data); \ ++ return single_open(file, binder_##name##_show, inode->i_private); \ + } \ + \ + static const struct file_operations binder_##name##_fops = { \ +@@ -310,6 +310,7 @@ struct binder_proc { + int requested_threads_started; + int ready_threads; + long default_priority; ++ struct dentry *debugfs_entry; + }; + + enum { +@@ -2891,13 +2892,11 @@ static int binder_open(struct inode *nodp, struct file *filp) + filp->private_data = proc; + mutex_unlock(&binder_lock); + +- if (binder_proc_dir_entry_proc) { ++ if (binder_debugfs_dir_entry_proc) { + char strbuf[11]; + snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); +- remove_proc_entry(strbuf, binder_proc_dir_entry_proc); +- proc_create_data(strbuf, S_IRUGO, +- binder_proc_dir_entry_proc, +- &binder_proc_fops, proc); ++ proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO, ++ binder_debugfs_dir_entry_proc, proc, &binder_proc_fops); + } + + return 0; +@@ -2934,12 +2933,7 @@ static void binder_deferred_flush(struct binder_proc *proc) + static int binder_release(struct inode *nodp, struct file *filp) + { + struct binder_proc *proc = filp->private_data; +- if (binder_proc_dir_entry_proc) { +- char strbuf[11]; +- snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); +- remove_proc_entry(strbuf, binder_proc_dir_entry_proc); +- } +- ++ debugfs_remove(proc->debugfs_entry); + binder_defer_work(proc, BINDER_DEFERRED_RELEASE); + + return 0; +@@ -3558,34 +3552,37 @@ static int __init binder_init(void) + { + int ret; + +- binder_proc_dir_entry_root = proc_mkdir("binder", NULL); +- if (binder_proc_dir_entry_root) +- binder_proc_dir_entry_proc = proc_mkdir("proc", +- binder_proc_dir_entry_root); ++ binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL); ++ if (binder_debugfs_dir_entry_root) ++ binder_debugfs_dir_entry_proc = debugfs_create_dir("proc", ++ binder_debugfs_dir_entry_root); + ret = misc_register(&binder_miscdev); +- if (binder_proc_dir_entry_root) { +- proc_create("state", +- S_IRUGO, +- binder_proc_dir_entry_root, +- &binder_state_fops); +- proc_create("stats", +- S_IRUGO, +- binder_proc_dir_entry_root, +- &binder_stats_fops); +- proc_create("transactions", +- S_IRUGO, +- binder_proc_dir_entry_root, +- &binder_transactions_fops); +- proc_create_data("transaction_log", +- S_IRUGO, +- binder_proc_dir_entry_root, +- &binder_transaction_log_fops, +- &binder_transaction_log); +- proc_create_data("failed_transaction_log", +- S_IRUGO, +- binder_proc_dir_entry_root, +- &binder_transaction_log_fops, +- &binder_transaction_log_failed); ++ if (binder_debugfs_dir_entry_root) { ++ debugfs_create_file("state", ++ S_IRUGO, ++ binder_debugfs_dir_entry_root, ++ NULL, ++ &binder_state_fops); ++ debugfs_create_file("stats", ++ S_IRUGO, ++ binder_debugfs_dir_entry_root, ++ NULL, ++ &binder_stats_fops); ++ debugfs_create_file("transactions", ++ S_IRUGO, ++ binder_debugfs_dir_entry_root, ++ NULL, ++ &binder_transactions_fops); ++ debugfs_create_file("transaction_log", ++ S_IRUGO, ++ binder_debugfs_dir_entry_root, ++ &binder_transaction_log, ++ &binder_transaction_log_fops); ++ debugfs_create_file("failed_transaction_log", ++ S_IRUGO, ++ binder_debugfs_dir_entry_root, ++ &binder_transaction_log_failed, ++ &binder_transaction_log_fops); + } + return ret; + } diff --git a/patches.android/android-0012-Staging-android-timed_gpio-Properly-discard-invalid-timeout-.patch b/patches.android/android-0012-Staging-android-timed_gpio-Properly-discard-invalid-timeout-.patch new file mode 100644 index 0000000000000..3fa2971af3860 --- /dev/null +++ b/patches.android/android-0012-Staging-android-timed_gpio-Properly-discard-invalid-timeout-.patch @@ -0,0 +1,30 @@ +From 8bfe15f3de0af92fd822b7475878bddc93fa490d Mon Sep 17 00:00:00 2001 +From: Mike Lockwood <lockwood@android.com> +Date: Sat, 17 Apr 2010 12:01:35 -0400 +Subject: Staging: android: timed_gpio: Properly discard invalid + timeout values. +Patch-mainline: HEAD +Git-commit: 8bfe15f3de0af92fd822b7475878bddc93fa490d + +The timed output device never previously checked the return value of sscanf, +resulting in an uninitialized int being passed to enable() if input value +was invalid. + +Signed-off-by: Mike Lockwood <lockwood@android.com> +Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de> + +diff --git a/drivers/staging/android/timed_output.c b/drivers/staging/android/timed_output.c +index 62e7918..f373422 100644 +--- a/drivers/staging/android/timed_output.c ++++ b/drivers/staging/android/timed_output.c +@@ -41,7 +41,9 @@ static ssize_t enable_store( + struct timed_output_dev *tdev = dev_get_drvdata(dev); + int value; + +- sscanf(buf, "%d", &value); ++ if (sscanf(buf, "%d", &value) != 1) ++ return -EINVAL; ++ + tdev->enable(tdev, value); + + return size; diff --git a/patches.android/android-0013-Staging-android-binder-Create-dedicated-workqueue-for-binder.patch b/patches.android/android-0013-Staging-android-binder-Create-dedicated-workqueue-for-binder.patch new file mode 100644 index 0000000000000..69bb56fbb6c88 --- /dev/null +++ b/patches.android/android-0013-Staging-android-binder-Create-dedicated-workqueue-for-binder.patch @@ -0,0 +1,49 @@ +From 3c762a49b12073c96f6a55b4dfc8b6a2a45fedff Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Arve=20Hj=C3=B8nnev=C3=A5g?= <arve@android.com> +Date: Thu, 22 Apr 2010 15:53:23 -0700 +Subject: Staging: android: binder: Create dedicated workqueue for + binder deferred work +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit +Patch-mainline: HEAD +Git-commit: 3c762a49b12073c96f6a55b4dfc8b6a2a45fedff + +Some drivers flush the global workqueue when closed. This would deadlock if +the last reference to the file was released from the binder. + +Signed-off-by: Arve Hjønnevåg <arve@android.com> +Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de> + +diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c +index 669e2a6..6d6fe7b 100644 +--- a/drivers/staging/android/binder.c ++++ b/drivers/staging/android/binder.c +@@ -48,6 +48,7 @@ static struct dentry *binder_debugfs_dir_entry_proc; + static struct binder_node *binder_context_mgr_node; + static uid_t binder_context_mgr_uid = -1; + static int binder_last_id; ++static struct workqueue_struct *binder_deferred_workqueue; + + #define BINDER_DEBUG_ENTRY(name) \ + static int binder_##name##_open(struct inode *inode, struct file *file) \ +@@ -3109,7 +3110,7 @@ binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer) + if (hlist_unhashed(&proc->deferred_work_node)) { + hlist_add_head(&proc->deferred_work_node, + &binder_deferred_list); +- schedule_work(&binder_deferred_work); ++ queue_work(binder_deferred_workqueue, &binder_deferred_work); + } + mutex_unlock(&binder_deferred_lock); + } +@@ -3552,6 +3553,10 @@ static int __init binder_init(void) + { + int ret; + ++ binder_deferred_workqueue = create_singlethread_workqueue("binder"); ++ if (!binder_deferred_workqueue) ++ return -ENOMEM; ++ + binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL); + if (binder_debugfs_dir_entry_root) + binder_debugfs_dir_entry_proc = debugfs_create_dir("proc", diff --git a/patches.android/android-0014-staging-android-lowmemorykiller-Don-t-try-to-kill-the-same-p.patch b/patches.android/android-0014-staging-android-lowmemorykiller-Don-t-try-to-kill-the-same-p.patch new file mode 100644 index 0000000000000..75be85a7c75dc --- /dev/null +++ b/patches.android/android-0014-staging-android-lowmemorykiller-Don-t-try-to-kill-the-same-p.patch @@ -0,0 +1,42 @@ +From 4964cd41cd966502c1e0c5bc929ed15c175f8218 Mon Sep 17 00:00:00 2001 +From: San Mehat <san@google.com> +Date: Mon, 26 Apr 2010 15:11:04 -0700 +Subject: staging: android: lowmemorykiller: Don't try to kill the + same pid over and over +Patch-mainline: HEAD +Git-commit: 4964cd41cd966502c1e0c5bc929ed15c175f8218 + + Under certain circumstances, a process can take awhile to +handle a sig-kill (especially if it's in a scheduler group with +a very low share ratio). When this occurs, lowmemkiller returns +to vmscan indicating the process memory has been freed - even +though the process is still waiting to die. Since the memory +hasn't actually freed, lowmemkiller is called again shortly after, +and picks the same process to die; regardless of the fact that +it has already been 'scheduled' to die and the memory has already +been reported to vmscan as having been freed. + + Solution is to check fatal_signal_pending() on the selected +task, and if it's already pending destruction return; indicating +to vmscan that no resources were freed on this pass. + +Signed-off-by: San Mehat <san@google.com> +Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de> + +diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c +index 32314e8..8372178 100644 +--- a/drivers/staging/android/lowmemorykiller.c ++++ b/drivers/staging/android/lowmemorykiller.c +@@ -133,6 +133,12 @@ static int lowmem_shrink(int nr_to_scan, gfp_t gfp_mask) + p->pid, p->comm, oom_adj, tasksize); + } + if (selected) { ++ if (fatal_signal_pending(selected)) { ++ pr_warning("process %d is suffering a slow death\n", ++ selected->pid); ++ read_unlock(&tasklist_lock); ++ return rem; ++ } + lowmem_print(1, "send sigkill to %d (%s), adj %d, size %d\n", + selected->pid, selected->comm, + selected_oom_adj, selected_tasksize); diff --git a/patches.android/android-0015-staging-android-lowmemkiller-Substantially-reduce-overhead-d.patch b/patches.android/android-0015-staging-android-lowmemkiller-Substantially-reduce-overhead-d.patch new file mode 100644 index 0000000000000..5f80612815736 --- /dev/null +++ b/patches.android/android-0015-staging-android-lowmemkiller-Substantially-reduce-overhead-d.patch @@ -0,0 +1,99 @@ +From 4755b72e261478b48337e0e54c8448cbea32c5c8 Mon Sep 17 00:00:00 2001 +From: San Mehat <san@google.com> +Date: Wed, 5 May 2010 11:38:42 -0700 +Subject: staging: android: lowmemkiller: Substantially reduce + overhead during reclaim +Patch-mainline: HEAD +Git-commit: 4755b72e261478b48337e0e54c8448cbea32c5c8 + +This patch optimizes lowmemkiller to not do any work when it has an outstanding +kill-request. This greatly reduces the pressure on the task_list lock +(improving interactivity), as well as improving the vmscan performance +when under heavy memory pressure (by up to 20x in tests). + +Note: For this enhancement to work, you need CONFIG_PROFILING + +Signed-off-by: San Mehat <san@google.com> +Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de> + +diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c +index 8372178..4523093 100644 +--- a/drivers/staging/android/lowmemorykiller.c ++++ b/drivers/staging/android/lowmemorykiller.c +@@ -34,6 +34,8 @@ + #include <linux/mm.h> + #include <linux/oom.h> + #include <linux/sched.h> ++#include <linux/profile.h> ++#include <linux/notifier.h> + + static uint32_t lowmem_debug_level = 2; + static int lowmem_adj[6] = { +@@ -51,12 +53,32 @@ static size_t lowmem_minfree[6] = { + }; + static int lowmem_minfree_size = 4; + ++static struct task_struct *lowmem_deathpending; ++ + #define lowmem_print(level, x...) \ + do { \ + if (lowmem_debug_level >= (level)) \ + printk(x); \ + } while (0) + ++static int ++task_notify_func(struct notifier_block *self, unsigned long val, void *data); ++ ++static struct notifier_block task_nb = { ++ .notifier_call = task_notify_func, ++}; ++ ++static int ++task_notify_func(struct notifier_block *self, unsigned long val, void *data) ++{ ++ struct task_struct *task = data; ++ if (task == lowmem_deathpending) { ++ lowmem_deathpending = NULL; ++ task_handoff_unregister(&task_nb); ++ } ++ return NOTIFY_OK; ++} ++ + static int lowmem_shrink(int nr_to_scan, gfp_t gfp_mask) + { + struct task_struct *p; +@@ -71,6 +93,18 @@ static int lowmem_shrink(int nr_to_scan, gfp_t gfp_mask) + int other_free = global_page_state(NR_FREE_PAGES); + int other_file = global_page_state(NR_FILE_PAGES); + ++ /* ++ * If we already have a death outstanding, then ++ * bail out right away; indicating to vmscan ++ * that we have nothing further to offer on ++ * this pass. ++ * ++ * Note: Currently you need CONFIG_PROFILING ++ * for this to work correctly. ++ */ ++ if (lowmem_deathpending) ++ return 0; ++ + if (lowmem_adj_size < array_size) + array_size = lowmem_adj_size; + if (lowmem_minfree_size < array_size) +@@ -142,6 +176,15 @@ static int lowmem_shrink(int nr_to_scan, gfp_t gfp_mask) + lowmem_print(1, "send sigkill to %d (%s), adj %d, size %d\n", + selected->pid, selected->comm, + selected_oom_adj, selected_tasksize); ++ /* ++ * If CONFIG_PROFILING is off, then task_handoff_register() ++ * is a nop. In that case we don't want to stall the killer ++ * by setting lowmem_deathpending. ++ */ ++#ifdef CONFIG_PROFILING ++ lowmem_deathpending = selected; ++ task_handoff_register(&task_nb); ++#endif + force_sig(SIGKILL, selected); + rem -= selected_tasksize; + } diff --git a/patches.android/android-0016-staging-binder-Fix-memory-corruption-via-page-aliasing.patch b/patches.android/android-0016-staging-binder-Fix-memory-corruption-via-page-aliasing.patch new file mode 100644 index 0000000000000..dc17772c2bdcc --- /dev/null +++ b/patches.android/android-0016-staging-binder-Fix-memory-corruption-via-page-aliasing.patch @@ -0,0 +1,48 @@ +From 58526090ece3582516e62779739a7d665a74708c Mon Sep 17 00:00:00 2001 +From: Christopher Lais <chris+android@zenthought.org> +Date: Sat, 1 May 2010 15:51:48 -0500 +Subject: staging: binder: Fix memory corruption via page aliasing +Patch-mainline: HEAD +Git-commit: 58526090ece3582516e62779739a7d665a74708c + +binder_deferred_release was not unmapping the page from the buffer +before freeing it, causing memory corruption. This only happened +when page(s) had not been freed by binder_update_page_range, which +properly unmaps the pages. + +This only happens on architectures with VIPT aliasing. + +To reproduce, create a program which opens, mmaps, munmaps, then closes +the binder very quickly. This should leave a page allocated when the +binder is released. When binder_deferrred_release is called on the +close, the page will remain mapped to the address in the linear +proc->buffer. Later, we may map the same physical page to a different +virtual address that has different coloring, and this may cause +aliasing to occur. + +PAGE_POISONING will greatly increase your chances of noticing any +problems. + +Signed-off-by: Christopher Lais <chris+android@zenthought.org> +Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de> + +diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c +index 6d6fe7b..7491801 100644 +--- a/drivers/staging/android/binder.c ++++ b/drivers/staging/android/binder.c +@@ -3036,11 +3036,14 @@ static void binder_deferred_release(struct binder_proc *proc) + int i; + for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) { + if (proc->pages[i]) { ++ void *page_addr = proc->buffer + i * PAGE_SIZE; + binder_debug(BINDER_DEBUG_BUFFER_ALLOC, + "binder_release: %d: " + "page %d at %p not freed\n", + proc->pid, i, +- proc->buffer + i * PAGE_SIZE); ++ page_addr); ++ unmap_kernel_range((unsigned long)page_addr, ++ PAGE_SIZE); + __free_page(proc->pages[i]); + page_count++; + } diff --git a/patches.android/android-0017-staging-android-lowmemorykiller-Remove-bitrotted-codepath.patch b/patches.android/android-0017-staging-android-lowmemorykiller-Remove-bitrotted-codepath.patch new file mode 100644 index 0000000000000..290c30bf9a87b --- /dev/null +++ b/patches.android/android-0017-staging-android-lowmemorykiller-Remove-bitrotted-codepath.patch @@ -0,0 +1,32 @@ +From eb943f6be011b33455b767880e13c34a2bb96a5e Mon Sep 17 00:00:00 2001 +From: San Mehat <san@google.com> +Date: Thu, 6 May 2010 15:43:46 -0700 +Subject: staging: android: lowmemorykiller: Remove bitrotted codepath +Patch-mainline: HEAD +Git-commit: eb943f6be011b33455b767880e13c34a2bb96a5e + +Now that we're murder-synchronous, this code path will never be +called (and if it does, it doesn't tell us anything useful other +than we killed a task that was already being killed by somebody +else but hadn't gotten its' signal yet) + +Signed-off-by: San Mehat <san@google.com> +Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de> + +diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c +index 4523093..cf79eb9 100644 +--- a/drivers/staging/android/lowmemorykiller.c ++++ b/drivers/staging/android/lowmemorykiller.c +@@ -167,12 +167,6 @@ static int lowmem_shrink(int nr_to_scan, gfp_t gfp_mask) + p->pid, p->comm, oom_adj, tasksize); + } + if (selected) { +- if (fatal_signal_pending(selected)) { +- pr_warning("process %d is suffering a slow death\n", +- selected->pid); +- read_unlock(&tasklist_lock); +- return rem; +- } + lowmem_print(1, "send sigkill to %d (%s), adj %d, size %d\n", + selected->pid, selected->comm, + selected_oom_adj, selected_tasksize); diff --git a/patches.android/android-0018-staging-android-lowmemorykiller-Update-arguments-of-shrinker.patch b/patches.android/android-0018-staging-android-lowmemorykiller-Update-arguments-of-shrinker.patch new file mode 100644 index 0000000000000..71bfef441c308 --- /dev/null +++ b/patches.android/android-0018-staging-android-lowmemorykiller-Update-arguments-of-shrinker.patch @@ -0,0 +1,24 @@ +From 06a1074e1c789a777732f8c432d913b0fedb8ff5 Mon Sep 17 00:00:00 2001 +From: Colin Cross <ccross@google.com> +Date: Sat, 21 Aug 2010 17:25:42 -0700 +Subject: staging: android: lowmemorykiller: Update arguments of + shrinker for 2.6.35 +Patch-mainline: HEAD +Git-commit: 06a1074e1c789a777732f8c432d913b0fedb8ff5 + +Signed-off-by: Colin Cross <ccross@google.com> +Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de> + +diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c +index cf79eb9..93a1758 100644 +--- a/drivers/staging/android/lowmemorykiller.c ++++ b/drivers/staging/android/lowmemorykiller.c +@@ -79,7 +79,7 @@ task_notify_func(struct notifier_block *self, unsigned long val, void *data) + return NOTIFY_OK; + } + +-static int lowmem_shrink(int nr_to_scan, gfp_t gfp_mask) ++static int lowmem_shrink(struct shrinker *s, int nr_to_scan, gfp_t gfp_mask) + { + struct task_struct *p; + struct task_struct *selected = NULL; diff --git a/patches.android/android-0019-staging-android-lowmemorykiller-Ignore-shmem-pages-in-page-c.patch b/patches.android/android-0019-staging-android-lowmemorykiller-Ignore-shmem-pages-in-page-c.patch new file mode 100644 index 0000000000000..3d032787d3ee4 --- /dev/null +++ b/patches.android/android-0019-staging-android-lowmemorykiller-Ignore-shmem-pages-in-page-c.patch @@ -0,0 +1,28 @@ +From 71b2c82bdf67ab24716c536e10de436169d3574c Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Arve=20Hj=C3=B8nnev=C3=A5g?= <arve@android.com> +Date: Tue, 23 Nov 2010 17:29:04 -0800 +Subject: staging: android: lowmemorykiller: Ignore shmem pages in + page-cache +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit +Patch-mainline: HEAD +Git-commit: 71b2c82bdf67ab24716c536e10de436169d3574c + +Signed-off-by: Arve Hjønnevåg <arve@android.com> +Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de> + +diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c +index 93a1758..0ae08ad 100644 +--- a/drivers/staging/android/lowmemorykiller.c ++++ b/drivers/staging/android/lowmemorykiller.c +@@ -91,7 +91,8 @@ static int lowmem_shrink(struct shrinker *s, int nr_to_scan, gfp_t gfp_mask) + int selected_oom_adj; + int array_size = ARRAY_SIZE(lowmem_adj); + int other_free = global_page_state(NR_FREE_PAGES); +- int other_file = global_page_state(NR_FILE_PAGES); ++ int other_file = global_page_state(NR_FILE_PAGES) - ++ global_page_state(NR_SHMEM); + + /* + * If we already have a death outstanding, then diff --git a/patches.android/android-0020-android-lowmemorykiller-Fix-arguments-to-lowmem_shrink.patch b/patches.android/android-0020-android-lowmemorykiller-Fix-arguments-to-lowmem_shrink.patch new file mode 100644 index 0000000000000..0a31aeafa768f --- /dev/null +++ b/patches.android/android-0020-android-lowmemorykiller-Fix-arguments-to-lowmem_shrink.patch @@ -0,0 +1,61 @@ +From cae9bf11ef0d92875b222f994a2cfd723991435a Mon Sep 17 00:00:00 2001 +From: Colin Cross <ccross@android.com> +Date: Wed, 22 Jun 2011 16:05:47 -0700 +Subject: android: lowmemorykiller: Fix arguments to lowmem_shrink +Patch-mainline: HEAD +Git-commit: cae9bf11ef0d92875b222f994a2cfd723991435a + +The arguments to shrink functions have changed, update +lowmem_shrink to match. + +Signed-off-by: Colin Cross <ccross@android.com> +Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de> + +diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c +index 0ae08ad..4098bbb 100644 +--- a/drivers/staging/android/lowmemorykiller.c ++++ b/drivers/staging/android/lowmemorykiller.c +@@ -79,7 +79,7 @@ task_notify_func(struct notifier_block *self, unsigned long val, void *data) + return NOTIFY_OK; + } + +-static int lowmem_shrink(struct shrinker *s, int nr_to_scan, gfp_t gfp_mask) ++static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc) + { + struct task_struct *p; + struct task_struct *selected = NULL; +@@ -117,17 +117,17 @@ static int lowmem_shrink(struct shrinker *s, int nr_to_scan, gfp_t gfp_mask) + break; + } + } +- if (nr_to_scan > 0) +- lowmem_print(3, "lowmem_shrink %d, %x, ofree %d %d, ma %d\n", +- nr_to_scan, gfp_mask, other_free, other_file, ++ if (sc->nr_to_scan > 0) ++ lowmem_print(3, "lowmem_shrink %lu, %x, ofree %d %d, ma %d\n", ++ sc->nr_to_scan, sc->gfp_mask, other_free, other_file, + min_adj); + rem = global_page_state(NR_ACTIVE_ANON) + + global_page_state(NR_ACTIVE_FILE) + + global_page_state(NR_INACTIVE_ANON) + + global_page_state(NR_INACTIVE_FILE); +- if (nr_to_scan <= 0 || min_adj == OOM_ADJUST_MAX + 1) { +- lowmem_print(5, "lowmem_shrink %d, %x, return %d\n", +- nr_to_scan, gfp_mask, rem); ++ if (sc->nr_to_scan <= 0 || min_adj == OOM_ADJUST_MAX + 1) { ++ lowmem_print(5, "lowmem_shrink %lu, %x, return %d\n", ++ sc->nr_to_scan, sc->gfp_mask, rem); + return rem; + } + selected_oom_adj = min_adj; +@@ -183,8 +183,8 @@ static int lowmem_shrink(struct shrinker *s, int nr_to_scan, gfp_t gfp_mask) + force_sig(SIGKILL, selected); + rem -= selected_tasksize; + } +- lowmem_print(4, "lowmem_shrink %d, %x, return %d\n", +- nr_to_scan, gfp_mask, rem); ++ lowmem_print(4, "lowmem_shrink %lu, %x, return %d\n", ++ sc->nr_to_scan, sc->gfp_mask, rem); + read_unlock(&tasklist_lock); + return rem; + } diff --git a/patches.android/android-0021-android-logger-bump-up-the-logger-buffer-sizes.patch b/patches.android/android-0021-android-logger-bump-up-the-logger-buffer-sizes.patch new file mode 100644 index 0000000000000..d1442539984ba --- /dev/null +++ b/patches.android/android-0021-android-logger-bump-up-the-logger-buffer-sizes.patch @@ -0,0 +1,36 @@ +From 2b374956f3afee5857c85b073d726be11f4d2e9b Mon Sep 17 00:00:00 2001 +From: JP Abgrall <jpa@google.com> +Date: Thu, 11 Aug 2011 21:33:35 -0700 +Subject: android: logger: bump up the logger buffer sizes +Patch-mainline: HEAD +Git-commit: 2b374956f3afee5857c85b073d726be11f4d2e9b + +(port from common android-2.6.39 + commit: 11430f16545205c614dd5bd58e4a7ee630fc0f9f) + +events: (no change, 256) +main: 64 -> 256 +radio: 64 -> 256 +system: 64 -> 256 + +Signed-off-by: JP Abgrall <jpa@google.com> +Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de> + +diff --git a/drivers/staging/android/logger.c b/drivers/staging/android/logger.c +index 531bdbe..fa76ce7 100644 +--- a/drivers/staging/android/logger.c ++++ b/drivers/staging/android/logger.c +@@ -555,10 +555,10 @@ static struct logger_log VAR = { \ + .size = SIZE, \ + }; + +-DEFINE_LOGGER_DEVICE(log_main, LOGGER_LOG_MAIN, 64*1024) ++DEFINE_LOGGER_DEVICE(log_main, LOGGER_LOG_MAIN, 256*1024) + DEFINE_LOGGER_DEVICE(log_events, LOGGER_LOG_EVENTS, 256*1024) +-DEFINE_LOGGER_DEVICE(log_radio, LOGGER_LOG_RADIO, 64*1024) +-DEFINE_LOGGER_DEVICE(log_system, LOGGER_LOG_SYSTEM, 64*1024) ++DEFINE_LOGGER_DEVICE(log_radio, LOGGER_LOG_RADIO, 256*1024) ++DEFINE_LOGGER_DEVICE(log_system, LOGGER_LOG_SYSTEM, 256*1024) + + static struct logger_log *get_log_from_minor(int minor) + { diff --git a/patches.android/android-0022-staging-android-ram_console-pass-in-a-boot-info-string.patch b/patches.android/android-0022-staging-android-ram_console-pass-in-a-boot-info-string.patch new file mode 100644 index 0000000000000..8d89238ab5ca3 --- /dev/null +++ b/patches.android/android-0022-staging-android-ram_console-pass-in-a-boot-info-string.patch @@ -0,0 +1,166 @@ +From a6707f830e39ab5ef285d9155525eac5e500e55d Mon Sep 17 00:00:00 2001 +From: Colin Cross <ccross@android.com> +Date: Tue, 25 Oct 2011 14:31:58 -0700 +Subject: staging: android: ram_console: pass in a boot info string +Patch-mainline: HEAD +Git-commit: a6707f830e39ab5ef285d9155525eac5e500e55d + +Allow the board file to pass a boot info string through the +platform data that is appended to the /proc/last_kmsg file. + +[moved the .h file to drivers/staging/android/ to be self-contained - gregkh] + +Signed-off-by: Colin Cross <ccross@android.com> +Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de> + +diff --git a/drivers/staging/android/ram_console.c b/drivers/staging/android/ram_console.c +index 53f736b..6d4d679 100644 +--- a/drivers/staging/android/ram_console.c ++++ b/drivers/staging/android/ram_console.c +@@ -21,6 +21,7 @@ + #include <linux/string.h> + #include <linux/uaccess.h> + #include <linux/io.h> ++#include "ram_console.h" + + #ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION + #include <linux/rslib.h> +@@ -155,14 +156,20 @@ void ram_console_enable_console(int enabled) + } + + static void __init +-ram_console_save_old(struct ram_console_buffer *buffer, char *dest) ++ram_console_save_old(struct ram_console_buffer *buffer, const char *bootinfo, ++ char *dest) + { + size_t old_log_size = buffer->size; ++ size_t bootinfo_size = 0; ++ size_t total_size = old_log_size; ++ char *ptr; ++ const char *bootinfo_label = "Boot info:\n"; ++ + #ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION + uint8_t *block; + uint8_t *par; + char strbuf[80]; +- int strbuf_len; ++ int strbuf_len = 0; + + block = buffer->data; + par = ram_console_par_buffer; +@@ -197,11 +204,15 @@ ram_console_save_old(struct ram_console_buffer *buffer, char *dest) + "\nNo errors detected\n"); + if (strbuf_len >= sizeof(strbuf)) + strbuf_len = sizeof(strbuf) - 1; +- old_log_size += strbuf_len; ++ total_size += strbuf_len; + #endif + ++ if (bootinfo) ++ bootinfo_size = strlen(bootinfo) + strlen(bootinfo_label); ++ total_size += bootinfo_size; ++ + if (dest == NULL) { +- dest = kmalloc(old_log_size, GFP_KERNEL); ++ dest = kmalloc(total_size, GFP_KERNEL); + if (dest == NULL) { + printk(KERN_ERR + "ram_console: failed to allocate buffer\n"); +@@ -210,19 +221,27 @@ ram_console_save_old(struct ram_console_buffer *buffer, char *dest) + } + + ram_console_old_log = dest; +- ram_console_old_log_size = old_log_size; ++ ram_console_old_log_size = total_size; + memcpy(ram_console_old_log, + &buffer->data[buffer->start], buffer->size - buffer->start); + memcpy(ram_console_old_log + buffer->size - buffer->start, + &buffer->data[0], buffer->start); ++ ptr = ram_console_old_log + old_log_size; + #ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION +- memcpy(ram_console_old_log + old_log_size - strbuf_len, +- strbuf, strbuf_len); ++ memcpy(ptr, strbuf, strbuf_len); ++ ptr += strbuf_len; + #endif ++ if (bootinfo) { ++ memcpy(ptr, bootinfo_label, strlen(bootinfo_label)); ++ ptr += strlen(bootinfo_label); ++ memcpy(ptr, bootinfo, bootinfo_size); ++ ptr += bootinfo_size; ++ } + } + + static int __init ram_console_init(struct ram_console_buffer *buffer, +- size_t buffer_size, char *old_buf) ++ size_t buffer_size, const char *bootinfo, ++ char *old_buf) + { + #ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION + int numerr; +@@ -289,7 +308,7 @@ static int __init ram_console_init(struct ram_console_buffer *buffer, + printk(KERN_INFO "ram_console: found existing buffer, " + "size %d, start %d\n", + buffer->size, buffer->start); +- ram_console_save_old(buffer, old_buf); ++ ram_console_save_old(buffer, bootinfo, old_buf); + } + } else { + printk(KERN_INFO "ram_console: no valid data in buffer " +@@ -313,6 +332,7 @@ static int __init ram_console_early_init(void) + return ram_console_init((struct ram_console_buffer *) + CONFIG_ANDROID_RAM_CONSOLE_EARLY_ADDR, + CONFIG_ANDROID_RAM_CONSOLE_EARLY_SIZE, ++ NULL, + ram_console_old_log_init_buffer); + } + #else +@@ -322,6 +342,8 @@ static int ram_console_driver_probe(struct platform_device *pdev) + size_t start; + size_t buffer_size; + void *buffer; ++ const char *bootinfo = NULL; ++ struct ram_console_platform_data *pdata = pdev->dev.platform_data; + + if (res == NULL || pdev->num_resources != 1 || + !(res->flags & IORESOURCE_MEM)) { +@@ -339,7 +361,10 @@ static int ram_console_driver_probe(struct platform_device *pdev) + return -ENOMEM; + } + +- return ram_console_init(buffer, buffer_size, NULL/* allocate */); ++ if (pdata) ++ bootinfo = pdata->bootinfo; ++ ++ return ram_console_init(buffer, buffer_size, bootinfo, NULL/* allocate */); + } + + static struct platform_driver ram_console_driver = { +diff --git a/drivers/staging/android/ram_console.h b/drivers/staging/android/ram_console.h +new file mode 100644 +index 0000000..9f1125c +--- /dev/null ++++ b/drivers/staging/android/ram_console.h +@@ -0,0 +1,22 @@ ++/* ++ * Copyright (C) 2010 Google, Inc. ++ * ++ * This software is licensed under the terms of the GNU General Public ++ * License version 2, as published by the Free Software Foundation, and ++ * may be copied, distributed, and modified under those terms. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ */ ++ ++#ifndef _INCLUDE_LINUX_PLATFORM_DATA_RAM_CONSOLE_H_ ++#define _INCLUDE_LINUX_PLATFORM_DATA_RAM_CONSOLE_H_ ++ ++struct ram_console_platform_data { ++ const char *bootinfo; ++}; ++ ++#endif /* _INCLUDE_LINUX_PLATFORM_DATA_RAM_CONSOLE_H_ */ diff --git a/patches.android/android-0023-Staging-android-fixed-white-spaces-coding-style-issue-in-log.patch b/patches.android/android-0023-Staging-android-fixed-white-spaces-coding-style-issue-in-log.patch new file mode 100644 index 0000000000000..c0e6e6c61a6ab --- /dev/null +++ b/patches.android/android-0023-Staging-android-fixed-white-spaces-coding-style-issue-in-log.patch @@ -0,0 +1,52 @@ +From 277cdd01d6e831e2168cb3f2c082eecd7fcaac06 Mon Sep 17 00:00:00 2001 +From: Marco Navarra <fromenglish@gmail.com> +Date: Thu, 15 Dec 2011 17:57:48 +0100 +Subject: Staging: android: fixed white spaces coding style issue in + logger.c +Patch-mainline: HEAD +Git-commit: 277cdd01d6e831e2168cb3f2c082eecd7fcaac06 + +This patch fixes some space-before-tabs warnings found by checkpatch tool on the staging android driver file logger.c + +Signed-off-by: Marco Navarra <fromenglish@gmail.com> +Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de> + +diff --git a/drivers/staging/android/logger.c b/drivers/staging/android/logger.c +index fa76ce7..ffc2d04 100644 +--- a/drivers/staging/android/logger.c ++++ b/drivers/staging/android/logger.c +@@ -37,7 +37,7 @@ + * mutex 'mutex'. + */ + struct logger_log { +- unsigned char *buffer;/* the ring buffer itself */ ++ unsigned char *buffer;/* the ring buffer itself */ + struct miscdevice misc; /* misc device representing the log */ + wait_queue_head_t wq; /* wait queue for readers */ + struct list_head readers; /* this log's readers */ +@@ -67,9 +67,9 @@ struct logger_reader { + * + * This isn't aesthetic. We have several goals: + * +- * 1) Need to quickly obtain the associated log during an I/O operation +- * 2) Readers need to maintain state (logger_reader) +- * 3) Writers need to be very fast (open() should be a near no-op) ++ * 1) Need to quickly obtain the associated log during an I/O operation ++ * 2) Readers need to maintain state (logger_reader) ++ * 3) Writers need to be very fast (open() should be a near no-op) + * + * In the reader case, we can trivially go file->logger_reader->logger_log. + * For a writer, we don't want to maintain a logger_reader, so we just go +@@ -147,9 +147,9 @@ static ssize_t do_read_log_to_user(struct logger_log *log, + * + * Behavior: + * +- * - O_NONBLOCK works +- * - If there are no log entries to read, blocks until log is written to +- * - Atomically reads exactly one log entry ++ * - O_NONBLOCK works ++ * - If there are no log entries to read, blocks until log is written to ++ * - Atomically reads exactly one log entry + * + * Optimal read size is LOGGER_ENTRY_MAX_LEN. Will set errno to EINVAL if read + * buffer is insufficient to hold next entry. diff --git a/patches.android/android-0024-staging-android-switch-switch-class-and-GPIO-drivers.patch b/patches.android/android-0024-staging-android-switch-switch-class-and-GPIO-drivers.patch new file mode 100644 index 0000000000000..3dde18bae7df1 --- /dev/null +++ b/patches.android/android-0024-staging-android-switch-switch-class-and-GPIO-drivers.patch @@ -0,0 +1,499 @@ +From e0f5bb9b3850bdd7907eda9eb923cd3f9d4358b8 Mon Sep 17 00:00:00 2001 +From: Mike Lockwood <lockwood@android.com> +Date: Tue, 14 Oct 2008 12:50:16 -0400 +Subject: staging: android: switch: switch class and GPIO drivers. +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit +Patch-mainline: HEAD +Git-commit: e0f5bb9b3850bdd7907eda9eb923cd3f9d4358b8 + +This adds the Android switch driver code to the staging tree. + +[Note, this code was located in drivers/switch/ in the Android kernel +releases, but as that api wasn't generally accepted, and the interface +is working toward changing to the newly proposed extcon inteface, this +driver was placed here until the extcon code is merged into mainline and +the Android userspace code is converted over to using it. - gregkh] + +Signed-off-by: Arve Hjønnevåg <arve@android.com> +Signed-off-by: Mike Lockwood <lockwood@android.com> +Cc: MyungJoo Ham <myungjoo.ham@samsung.com> +Cc: Kyungmin Park <kyungmin.park@samsung.com> +Cc: Donggeun Kim <dg77.kim@samsung.com> +Cc: Arnd Bergmann <arnd@arndb.de> +Cc: MyungJoo Ham <myungjoo.ham@gmail.com> +Cc: Linus Walleij <linus.walleij@linaro.org> +Cc: Dmitry Torokhov <dmitry.torokhov@gmail.com> +Cc: NeilBrown <neilb@suse.de> +Cc: Morten CHRISTIANSEN <morten.christiansen@stericsson.com> +Cc: Mark Brown <broonie@opensource.wolfsonmicro.com> +Cc: John Stultz <john.stultz@linaro.org> +Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de> + +diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig +index 2471949..0712b18 100644 +--- a/drivers/staging/android/Kconfig ++++ b/drivers/staging/android/Kconfig +@@ -90,6 +90,8 @@ config ANDROID_LOW_MEMORY_KILLER + ---help--- + Register processes to be killed when memory is low + ++source "drivers/staging/android/switch/Kconfig" ++ + endif # if ANDROID + + endmenu +diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile +index 8e057e6..36e32bc 100644 +--- a/drivers/staging/android/Makefile ++++ b/drivers/staging/android/Makefile +@@ -4,3 +4,4 @@ obj-$(CONFIG_ANDROID_RAM_CONSOLE) += ram_console.o + obj-$(CONFIG_ANDROID_TIMED_OUTPUT) += timed_output.o + obj-$(CONFIG_ANDROID_TIMED_GPIO) += timed_gpio.o + obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER) += lowmemorykiller.o ++obj-$(CONFIG_ANDROID_SWITCH) += switch/ +diff --git a/drivers/staging/android/switch/Kconfig b/drivers/staging/android/switch/Kconfig +new file mode 100644 +index 0000000..36846f6 +--- /dev/null ++++ b/drivers/staging/android/switch/Kconfig +@@ -0,0 +1,11 @@ ++menuconfig ANDROID_SWITCH ++ tristate "Android Switch class support" ++ help ++ Say Y here to enable Android switch class support. This allows ++ monitoring switches by userspace via sysfs and uevent. ++ ++config ANDROID_SWITCH_GPIO ++ tristate "Android GPIO Switch support" ++ depends on GENERIC_GPIO && ANDROID_SWITCH ++ help ++ Say Y here to enable GPIO based switch support. +diff --git a/drivers/staging/android/switch/Makefile b/drivers/staging/android/switch/Makefile +new file mode 100644 +index 0000000..d76bfdc +--- /dev/null ++++ b/drivers/staging/android/switch/Makefile +@@ -0,0 +1,4 @@ ++# Android Switch Class Driver ++obj-$(CONFIG_ANDROID_SWITCH) += switch_class.o ++obj-$(CONFIG_ANDROID_SWITCH_GPIO) += switch_gpio.o ++ +diff --git a/drivers/staging/android/switch/switch.h b/drivers/staging/android/switch/switch.h +new file mode 100644 +index 0000000..3e4c748 +--- /dev/null ++++ b/drivers/staging/android/switch/switch.h +@@ -0,0 +1,53 @@ ++/* ++ * Switch class driver ++ * ++ * Copyright (C) 2008 Google, Inc. ++ * Author: Mike Lockwood <lockwood@android.com> ++ * ++ * This software is licensed under the terms of the GNU General Public ++ * License version 2, as published by the Free Software Foundation, and ++ * may be copied, distributed, and modified under those terms. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++*/ ++ ++#ifndef __LINUX_SWITCH_H__ ++#define __LINUX_SWITCH_H__ ++ ++struct switch_dev { ++ const char *name; ++ struct device *dev; ++ int index; ++ int state; ++ ++ ssize_t (*print_name)(struct switch_dev *sdev, char *buf); ++ ssize_t (*print_state)(struct switch_dev *sdev, char *buf); ++}; ++ ++struct gpio_switch_platform_data { ++ const char *name; ++ unsigned gpio; ++ ++ /* if NULL, switch_dev.name will be printed */ ++ const char *name_on; ++ const char *name_off; ++ /* if NULL, "0" or "1" will be printed */ ++ const char *state_on; ++ const char *state_off; ++}; ++ ++extern int switch_dev_register(struct switch_dev *sdev); ++extern void switch_dev_unregister(struct switch_dev *sdev); ++ ++static inline int switch_get_state(struct switch_dev *sdev) ++{ ++ return sdev->state; ++} ++ ++extern void switch_set_state(struct switch_dev *sdev, int state); ++ ++#endif /* __LINUX_SWITCH_H__ */ +diff --git a/drivers/staging/android/switch/switch_class.c b/drivers/staging/android/switch/switch_class.c +new file mode 100644 +index 0000000..7468044 +--- /dev/null ++++ b/drivers/staging/android/switch/switch_class.c +@@ -0,0 +1,174 @@ ++/* ++ * switch_class.c ++ * ++ * Copyright (C) 2008 Google, Inc. ++ * Author: Mike Lockwood <lockwood@android.com> ++ * ++ * This software is licensed under the terms of the GNU General Public ++ * License version 2, as published by the Free Software Foundation, and ++ * may be copied, distributed, and modified under those terms. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++*/ ++ ++#include <linux/module.h> ++#include <linux/types.h> ++#include <linux/init.h> ++#include <linux/device.h> ++#include <linux/fs.h> ++#include <linux/err.h> ++#include "switch.h" ++ ++struct class *switch_class; ++static atomic_t device_count; ++ ++static ssize_t state_show(struct device *dev, struct device_attribute *attr, ++ char *buf) ++{ ++ struct switch_dev *sdev = (struct switch_dev *) ++ dev_get_drvdata(dev); ++ ++ if (sdev->print_state) { ++ int ret = sdev->print_state(sdev, buf); ++ if (ret >= 0) ++ return ret; ++ } ++ return sprintf(buf, "%d\n", sdev->state); ++} ++ ++static ssize_t name_show(struct device *dev, struct device_attribute *attr, ++ char *buf) ++{ ++ struct switch_dev *sdev = (struct switch_dev *) ++ dev_get_drvdata(dev); ++ ++ if (sdev->print_name) { ++ int ret = sdev->print_name(sdev, buf); ++ if (ret >= 0) ++ return ret; ++ } ++ return sprintf(buf, "%s\n", sdev->name); ++} ++ ++static DEVICE_ATTR(state, S_IRUGO | S_IWUSR, state_show, NULL); ++static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, name_show, NULL); ++ ++void switch_set_state(struct switch_dev *sdev, int state) ++{ ++ char name_buf[120]; ++ char state_buf[120]; ++ char *prop_buf; ++ char *envp[3]; ++ int env_offset = 0; ++ int length; ++ ++ if (sdev->state != state) { ++ sdev->state = state; ++ ++ prop_buf = (char *)get_zeroed_page(GFP_KERNEL); ++ if (prop_buf) { ++ length = name_show(sdev->dev, NULL, prop_buf); ++ if (length > 0) { ++ if (prop_buf[length - 1] == '\n') ++ prop_buf[length - 1] = 0; ++ snprintf(name_buf, sizeof(name_buf), ++ "SWITCH_NAME=%s", prop_buf); ++ envp[env_offset++] = name_buf; ++ } ++ length = state_show(sdev->dev, NULL, prop_buf); ++ if (length > 0) { ++ if (prop_buf[length - 1] == '\n') ++ prop_buf[length - 1] = 0; ++ snprintf(state_buf, sizeof(state_buf), ++ "SWITCH_STATE=%s", prop_buf); ++ envp[env_offset++] = state_buf; ++ } ++ envp[env_offset] = NULL; ++ kobject_uevent_env(&sdev->dev->kobj, KOBJ_CHANGE, envp); ++ free_page((unsigned long)prop_buf); ++ } else { ++ printk(KERN_ERR "out of memory in switch_set_state\n"); ++ kobject_uevent(&sdev->dev->kobj, KOBJ_CHANGE); ++ } ++ } ++} ++EXPORT_SYMBOL_GPL(switch_set_state); ++ ++static int create_switch_class(void) ++{ ++ if (!switch_class) { ++ switch_class = class_create(THIS_MODULE, "switch"); ++ if (IS_ERR(switch_class)) ++ return PTR_ERR(switch_class); ++ atomic_set(&device_count, 0); ++ } ++ ++ return 0; ++} ++ ++int switch_dev_register(struct switch_dev *sdev) ++{ ++ int ret; ++ ++ if (!switch_class) { ++ ret = create_switch_class(); ++ if (ret < 0) ++ return ret; ++ } ++ ++ sdev->index = atomic_inc_return(&device_count); ++ sdev->dev = device_create(switch_class, NULL, ++ MKDEV(0, sdev->index), NULL, sdev->name); ++ if (IS_ERR(sdev->dev)) ++ return PTR_ERR(sdev->dev); ++ ++ ret = device_create_file(sdev->dev, &dev_attr_state); ++ if (ret < 0) ++ goto err_create_file_1; ++ ret = device_create_file(sdev->dev, &dev_attr_name); ++ if (ret < 0) ++ goto err_create_file_2; ++ ++ dev_set_drvdata(sdev->dev, sdev); ++ sdev->state = 0; ++ return 0; ++ ++err_create_file_2: ++ device_remove_file(sdev->dev, &dev_attr_state); ++err_create_file_1: ++ device_destroy(switch_class, MKDEV(0, sdev->index)); ++ printk(KERN_ERR "switch: Failed to register driver %s\n", sdev->name); ++ ++ return ret; ++} ++EXPORT_SYMBOL_GPL(switch_dev_register); ++ ++void switch_dev_unregister(struct switch_dev *sdev) ++{ ++ device_remove_file(sdev->dev, &dev_attr_name); ++ device_remove_file(sdev->dev, &dev_attr_state); ++ device_destroy(switch_class, MKDEV(0, sdev->index)); ++ dev_set_drvdata(sdev->dev, NULL); ++} ++EXPORT_SYMBOL_GPL(switch_dev_unregister); ++ ++static int __init switch_class_init(void) ++{ ++ return create_switch_class(); ++} ++ ++static void __exit switch_class_exit(void) ++{ ++ class_destroy(switch_class); ++} ++ ++module_init(switch_class_init); ++module_exit(switch_class_exit); ++ ++MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>"); ++MODULE_DESCRIPTION("Switch class driver"); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/staging/android/switch/switch_gpio.c b/drivers/staging/android/switch/switch_gpio.c +new file mode 100644 +index 0000000..6ba8d97 +--- /dev/null ++++ b/drivers/staging/android/switch/switch_gpio.c +@@ -0,0 +1,172 @@ ++/* ++ * switch_gpio.c ++ * ++ * Copyright (C) 2008 Google, Inc. ++ * Author: Mike Lockwood <lockwood@android.com> ++ * ++ * This software is licensed under the terms of the GNU General Public ++ * License version 2, as published by the Free Software Foundation, and ++ * may be copied, distributed, and modified under those terms. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++*/ ++ ++#include <linux/module.h> ++#include <linux/kernel.h> ++#include <linux/init.h> ++#include <linux/interrupt.h> ++#include <linux/platform_device.h> ++#include <linux/slab.h> ++#include <linux/workqueue.h> ++#include <linux/gpio.h> ++#include "switch.h" ++ ++struct gpio_switch_data { ++ struct switch_dev sdev; ++ unsigned gpio; ++ const char *name_on; ++ const char *name_off; ++ const char *state_on; ++ const char *state_off; ++ int irq; ++ struct work_struct work; ++}; ++ ++static void gpio_switch_work(struct work_struct *work) ++{ ++ int state; ++ struct gpio_switch_data *data = ++ container_of(work, struct gpio_switch_data, work); ++ ++ state = gpio_get_value(data->gpio); ++ switch_set_state(&data->sdev, state); ++} ++ ++static irqreturn_t gpio_irq_handler(int irq, void *dev_id) ++{ ++ struct gpio_switch_data *switch_data = ++ (struct gpio_switch_data *)dev_id; ++ ++ schedule_work(&switch_data->work); ++ return IRQ_HANDLED; ++} ++ ++static ssize_t switch_gpio_print_state(struct switch_dev *sdev, char *buf) ++{ ++ struct gpio_switch_data *switch_data = ++ container_of(sdev, struct gpio_switch_data, sdev); ++ const char *state; ++ if (switch_get_state(sdev)) ++ state = switch_data->state_on; ++ else ++ state = switch_data->state_off; ++ ++ if (state) ++ return sprintf(buf, "%s\n", state); ++ return -1; ++} ++ ++static int gpio_switch_probe(struct platform_device *pdev) ++{ ++ struct gpio_switch_platform_data *pdata = pdev->dev.platform_data; ++ struct gpio_switch_data *switch_data; ++ int ret = 0; ++ ++ if (!pdata) ++ return -EBUSY; ++ ++ switch_data = kzalloc(sizeof(struct gpio_switch_data), GFP_KERNEL); ++ if (!switch_data) ++ return -ENOMEM; ++ ++ switch_data->sdev.name = pdata->name; ++ switch_data->gpio = pdata->gpio; ++ switch_data->name_on = pdata->name_on; ++ switch_data->name_off = pdata->name_off; ++ switch_data->state_on = pdata->state_on; ++ switch_data->state_off = pdata->state_off; ++ switch_data->sdev.print_state = switch_gpio_print_state; ++ ++ ret = switch_dev_register(&switch_data->sdev); ++ if (ret < 0) ++ goto err_switch_dev_register; ++ ++ ret = gpio_request(switch_data->gpio, pdev->name); ++ if (ret < 0) ++ goto err_request_gpio; ++ ++ ret = gpio_direction_input(switch_data->gpio); ++ if (ret < 0) ++ goto err_set_gpio_input; ++ ++ INIT_WORK(&switch_data->work, gpio_switch_work); ++ ++ switch_data->irq = gpio_to_irq(switch_data->gpio); ++ if (switch_data->irq < 0) { ++ ret = switch_data->irq; ++ goto err_detect_irq_num_failed; ++ } ++ ++ ret = request_irq(switch_data->irq, gpio_irq_handler, ++ IRQF_TRIGGER_LOW, pdev->name, switch_data); ++ if (ret < 0) ++ goto err_request_irq; ++ ++ /* Perform initial detection */ ++ gpio_switch_work(&switch_data->work); ++ ++ return 0; ++ ++err_request_irq: ++err_detect_irq_num_failed: ++err_set_gpio_input: ++ gpio_free(switch_data->gpio); ++err_request_gpio: ++ switch_dev_unregister(&switch_data->sdev); ++err_switch_dev_register: ++ kfree(switch_data); ++ ++ return ret; ++} ++ ++static int __devexit gpio_switch_remove(struct platform_device *pdev) ++{ ++ struct gpio_switch_data *switch_data = platform_get_drvdata(pdev); ++ ++ cancel_work_sync(&switch_data->work); ++ gpio_free(switch_data->gpio); ++ switch_dev_unregister(&switch_data->sdev); ++ kfree(switch_data); ++ ++ return 0; ++} ++ ++static struct platform_driver gpio_switch_driver = { ++ .probe = gpio_switch_probe, ++ .remove = __devexit_p(gpio_switch_remove), ++ .driver = { ++ .name = "switch-gpio", ++ .owner = THIS_MODULE, ++ }, ++}; ++ ++static int __init gpio_switch_init(void) ++{ ++ return platform_driver_register(&gpio_switch_driver); ++} ++ ++static void __exit gpio_switch_exit(void) ++{ ++ platform_driver_unregister(&gpio_switch_driver); ++} ++ ++module_init(gpio_switch_init); ++module_exit(gpio_switch_exit); ++ ++MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>"); ++MODULE_DESCRIPTION("GPIO Switch driver"); ++MODULE_LICENSE("GPL"); diff --git a/patches.android/android-0025-staging-android-switch-minor-code-formatting-cleanups.patch b/patches.android/android-0025-staging-android-switch-minor-code-formatting-cleanups.patch new file mode 100644 index 0000000000000..97151bdb699c6 --- /dev/null +++ b/patches.android/android-0025-staging-android-switch-minor-code-formatting-cleanups.patch @@ -0,0 +1,55 @@ +From c001dff0f6715813bdb8e978dc8c05416c01962b Mon Sep 17 00:00:00 2001 +From: Greg Kroah-Hartman <gregkh@suse.de> +Date: Fri, 16 Dec 2011 13:41:37 -0800 +Subject: staging: android: switch: minor code formatting cleanups +Patch-mainline: HEAD +Git-commit: c001dff0f6715813bdb8e978dc8c05416c01962b + +This fixes a number of minor space issues in the Android switch code. + +Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de> + +diff --git a/drivers/staging/android/switch/switch.h b/drivers/staging/android/switch/switch.h +index 3e4c748..4fcb310 100644 +--- a/drivers/staging/android/switch/switch.h ++++ b/drivers/staging/android/switch/switch.h +@@ -30,7 +30,7 @@ struct switch_dev { + + struct gpio_switch_platform_data { + const char *name; +- unsigned gpio; ++ unsigned gpio; + + /* if NULL, switch_dev.name will be printed */ + const char *name_on; +diff --git a/drivers/staging/android/switch/switch_gpio.c b/drivers/staging/android/switch/switch_gpio.c +index 6ba8d97..38b2c2f 100644 +--- a/drivers/staging/android/switch/switch_gpio.c ++++ b/drivers/staging/android/switch/switch_gpio.c +@@ -91,7 +91,7 @@ static int gpio_switch_probe(struct platform_device *pdev) + switch_data->state_off = pdata->state_off; + switch_data->sdev.print_state = switch_gpio_print_state; + +- ret = switch_dev_register(&switch_data->sdev); ++ ret = switch_dev_register(&switch_data->sdev); + if (ret < 0) + goto err_switch_dev_register; + +@@ -126,7 +126,7 @@ err_detect_irq_num_failed: + err_set_gpio_input: + gpio_free(switch_data->gpio); + err_request_gpio: +- switch_dev_unregister(&switch_data->sdev); ++ switch_dev_unregister(&switch_data->sdev); + err_switch_dev_register: + kfree(switch_data); + +@@ -139,7 +139,7 @@ static int __devexit gpio_switch_remove(struct platform_device *pdev) + + cancel_work_sync(&switch_data->work); + gpio_free(switch_data->gpio); +- switch_dev_unregister(&switch_data->sdev); ++ switch_dev_unregister(&switch_data->sdev); + kfree(switch_data); + + return 0; diff --git a/patches.android/android-0026-staging-android-add-pmem-driver.patch b/patches.android/android-0026-staging-android-add-pmem-driver.patch new file mode 100644 index 0000000000000..756606ca5a16b --- /dev/null +++ b/patches.android/android-0026-staging-android-add-pmem-driver.patch @@ -0,0 +1,1499 @@ +From b6aba85c35baa7d08b7a601b30589bcae607d9e0 Mon Sep 17 00:00:00 2001 +From: Rebecca Schultz <rschultz@google.com> +Date: Thu, 24 Jul 2008 11:22:53 -0700 +Subject: staging: android: add pmem driver +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit +Patch-mainline: HEAD +Git-commit: b6aba85c35baa7d08b7a601b30589bcae607d9e0 + +This adds the Android pmem driver to the staging tree. + +[At this point in time, it is dependent on the ARM platform, due to some +build issues that require it. - gregkh] + +Signed-off-by: Rebecca Schultz <rschultz@google.com> +Signed-off-by: Rebecca Schultz Zavin <rebecca@android.com> +Signed-off-by: Dima Zavin <dima@android.com> +Signed-off-by: Jamie Gennis <jgennis@google.com> +Cc: Brian Swetland <swetland@google.com> +Cc: Arve Hjønnevåg <arve@android.com> +Cc: Colin Cross <ccross@android.com> +Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de> + +diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig +index 0712b18..6094fd6 100644 +--- a/drivers/staging/android/Kconfig ++++ b/drivers/staging/android/Kconfig +@@ -90,6 +90,10 @@ config ANDROID_LOW_MEMORY_KILLER + ---help--- + Register processes to be killed when memory is low + ++config ANDROID_PMEM ++ bool "Android pmem allocator" ++ depends on ARM ++ + source "drivers/staging/android/switch/Kconfig" + + endif # if ANDROID +diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile +index 36e32bc..8fd7391 100644 +--- a/drivers/staging/android/Makefile ++++ b/drivers/staging/android/Makefile +@@ -4,4 +4,5 @@ obj-$(CONFIG_ANDROID_RAM_CONSOLE) += ram_console.o + obj-$(CONFIG_ANDROID_TIMED_OUTPUT) += timed_output.o + obj-$(CONFIG_ANDROID_TIMED_GPIO) += timed_gpio.o + obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER) += lowmemorykiller.o ++obj-$(CONFIG_ANDROID_PMEM) += pmem.o + obj-$(CONFIG_ANDROID_SWITCH) += switch/ +diff --git a/drivers/staging/android/android_pmem.h b/drivers/staging/android/android_pmem.h +new file mode 100644 +index 0000000..f633621 +--- /dev/null ++++ b/drivers/staging/android/android_pmem.h +@@ -0,0 +1,93 @@ ++/* include/linux/android_pmem.h ++ * ++ * Copyright (C) 2007 Google, Inc. ++ * ++ * This software is licensed under the terms of the GNU General Public ++ * License version 2, as published by the Free Software Foundation, and ++ * may be copied, distributed, and modified under those terms. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ */ ++ ++#ifndef _ANDROID_PMEM_H_ ++#define _ANDROID_PMEM_H_ ++ ++#define PMEM_IOCTL_MAGIC 'p' ++#define PMEM_GET_PHYS _IOW(PMEM_IOCTL_MAGIC, 1, unsigned int) ++#define PMEM_MAP _IOW(PMEM_IOCTL_MAGIC, 2, unsigned int) ++#define PMEM_GET_SIZE _IOW(PMEM_IOCTL_MAGIC, 3, unsigned int) ++#define PMEM_UNMAP _IOW(PMEM_IOCTL_MAGIC, 4, unsigned int) ++/* This ioctl will allocate pmem space, backing the file, it will fail ++ * if the file already has an allocation, pass it the len as the argument ++ * to the ioctl */ ++#define PMEM_ALLOCATE _IOW(PMEM_IOCTL_MAGIC, 5, unsigned int) ++/* This will connect a one pmem file to another, pass the file that is already ++ * backed in memory as the argument to the ioctl ++ */ ++#define PMEM_CONNECT _IOW(PMEM_IOCTL_MAGIC, 6, unsigned int) ++/* Returns the total size of the pmem region it is sent to as a pmem_region ++ * struct (with offset set to 0). ++ */ ++#define PMEM_GET_TOTAL_SIZE _IOW(PMEM_IOCTL_MAGIC, 7, unsigned int) ++#define PMEM_CACHE_FLUSH _IOW(PMEM_IOCTL_MAGIC, 8, unsigned int) ++ ++struct android_pmem_platform_data ++{ ++ const char* name; ++ /* starting physical address of memory region */ ++ unsigned long start; ++ /* size of memory region */ ++ unsigned long size; ++ /* set to indicate the region should not be managed with an allocator */ ++ unsigned no_allocator; ++ /* set to indicate maps of this region should be cached, if a mix of ++ * cached and uncached is desired, set this and open the device with ++ * O_SYNC to get an uncached region */ ++ unsigned cached; ++ /* The MSM7k has bits to enable a write buffer in the bus controller*/ ++ unsigned buffered; ++}; ++ ++struct pmem_region { ++ unsigned long offset; ++ unsigned long len; ++}; ++ ++#ifdef CONFIG_ANDROID_PMEM ++int is_pmem_file(struct file *file); ++int get_pmem_file(int fd, unsigned long *start, unsigned long *vstart, ++ unsigned long *end, struct file **filp); ++int get_pmem_user_addr(struct file *file, unsigned long *start, ++ unsigned long *end); ++void put_pmem_file(struct file* file); ++void flush_pmem_file(struct file *file, unsigned long start, unsigned long len); ++int pmem_setup(struct android_pmem_platform_data *pdata, ++ long (*ioctl)(struct file *, unsigned int, unsigned long), ++ int (*release)(struct inode *, struct file *)); ++int pmem_remap(struct pmem_region *region, struct file *file, ++ unsigned operation); ++ ++#else ++static inline int is_pmem_file(struct file *file) { return 0; } ++static inline int get_pmem_file(int fd, unsigned long *start, ++ unsigned long *vstart, unsigned long *end, ++ struct file **filp) { return -ENOSYS; } ++static inline int get_pmem_user_addr(struct file *file, unsigned long *start, ++ unsigned long *end) { return -ENOSYS; } ++static inline void put_pmem_file(struct file* file) { return; } ++static inline void flush_pmem_file(struct file *file, unsigned long start, ++ unsigned long len) { return; } ++static inline int pmem_setup(struct android_pmem_platform_data *pdata, ++ long (*ioctl)(struct file *, unsigned int, unsigned long), ++ int (*release)(struct inode *, struct file *)) { return -ENOSYS; } ++ ++static inline int pmem_remap(struct pmem_region *region, struct file *file, ++ unsigned operation) { return -ENOSYS; } ++#endif ++ ++#endif //_ANDROID_PPP_H_ ++ +diff --git a/drivers/staging/android/pmem.c b/drivers/staging/android/pmem.c +new file mode 100644 +index 0000000..7d97032 +--- /dev/null ++++ b/drivers/staging/android/pmem.c +@@ -0,0 +1,1345 @@ ++/* pmem.c ++ * ++ * Copyright (C) 2007 Google, Inc. ++ * ++ * This software is licensed under the terms of the GNU General Public ++ * License version 2, as published by the Free Software Foundation, and ++ * may be copied, distributed, and modified under those terms. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ */ ++ ++#include <linux/miscdevice.h> ++#include <linux/platform_device.h> ++#include <linux/fs.h> ++#include <linux/file.h> ++#include <linux/mm.h> ++#include <linux/list.h> ++#include <linux/mutex.h> ++#include <linux/debugfs.h> ++#include <linux/mempolicy.h> ++#include <linux/sched.h> ++#include <asm/io.h> ++#include <asm/uaccess.h> ++#include <asm/cacheflush.h> ++#include "android_pmem.h" ++ ++#define PMEM_MAX_DEVICES 10 ++#define PMEM_MAX_ORDER 128 ++#define PMEM_MIN_ALLOC PAGE_SIZE ++ ++#define PMEM_DEBUG 1 ++ ++/* indicates that a refernce to this file has been taken via get_pmem_file, ++ * the file should not be released until put_pmem_file is called */ ++#define PMEM_FLAGS_BUSY 0x1 ++/* indicates that this is a suballocation of a larger master range */ ++#define PMEM_FLAGS_CONNECTED 0x1 << 1 ++/* indicates this is a master and not a sub allocation and that it is mmaped */ ++#define PMEM_FLAGS_MASTERMAP 0x1 << 2 ++/* submap and unsubmap flags indicate: ++ * 00: subregion has never been mmaped ++ * 10: subregion has been mmaped, reference to the mm was taken ++ * 11: subretion has ben released, refernece to the mm still held ++ * 01: subretion has been released, reference to the mm has been released ++ */ ++#define PMEM_FLAGS_SUBMAP 0x1 << 3 ++#define PMEM_FLAGS_UNSUBMAP 0x1 << 4 ++ ++ ++struct pmem_data { ++ /* in alloc mode: an index into the bitmap ++ * in no_alloc mode: the size of the allocation */ ++ int index; ++ /* see flags above for descriptions */ ++ unsigned int flags; ++ /* protects this data field, if the mm_mmap sem will be held at the ++ * same time as this sem, the mm sem must be taken first (as this is ++ * the order for vma_open and vma_close ops */ ++ struct rw_semaphore sem; ++ /* info about the mmaping process */ ++ struct vm_area_struct *vma; ++ /* task struct of the mapping process */ ++ struct task_struct *task; ++ /* process id of teh mapping process */ ++ pid_t pid; ++ /* file descriptor of the master */ ++ int master_fd; ++ /* file struct of the master */ ++ struct file *master_file; ++ /* a list of currently available regions if this is a suballocation */ ++ struct list_head region_list; ++ /* a linked list of data so we can access them for debugging */ ++ struct list_head list; ++#if PMEM_DEBUG ++ int ref; ++#endif ++}; ++ ++struct pmem_bits { ++ unsigned allocated:1; /* 1 if allocated, 0 if free */ ++ unsigned order:7; /* size of the region in pmem space */ ++}; ++ ++struct pmem_region_node { ++ struct pmem_region region; ++ struct list_head list; ++}; ++ ++#define PMEM_DEBUG_MSGS 0 ++#if PMEM_DEBUG_MSGS ++#define DLOG(fmt,args...) \ ++ do { printk(KERN_INFO "[%s:%s:%d] "fmt, __FILE__, __func__, __LINE__, \ ++ ##args); } \ ++ while (0) ++#else ++#define DLOG(x...) do {} while (0) ++#endif ++ ++struct pmem_info { ++ struct miscdevice dev; ++ /* physical start address of the remaped pmem space */ ++ unsigned long base; ++ /* vitual start address of the remaped pmem space */ ++ unsigned char __iomem *vbase; ++ /* total size of the pmem space */ ++ unsigned long size; ++ /* number of entries in the pmem space */ ++ unsigned long num_entries; ++ /* pfn of the garbage page in memory */ ++ unsigned long garbage_pfn; ++ /* index of the garbage page in the pmem space */ ++ int garbage_index; ++ /* the bitmap for the region indicating which entries are allocated ++ * and which are free */ ++ struct pmem_bits *bitmap; ++ /* indicates the region should not be managed with an allocator */ ++ unsigned no_allocator; ++ /* indicates maps of this region should be cached, if a mix of ++ * cached and uncached is desired, set this and open the device with ++ * O_SYNC to get an uncached region */ ++ unsigned cached; ++ unsigned buffered; ++ /* in no_allocator mode the first mapper gets the whole space and sets ++ * this flag */ ++ unsigned allocated; ++ /* for debugging, creates a list of pmem file structs, the ++ * data_list_lock should be taken before pmem_data->sem if both are ++ * needed */ ++ struct mutex data_list_lock; ++ struct list_head data_list; ++ /* pmem_sem protects the bitmap array ++ * a write lock should be held when modifying entries in bitmap ++ * a read lock should be held when reading data from bits or ++ * dereferencing a pointer into bitmap ++ * ++ * pmem_data->sem protects the pmem data of a particular file ++ * Many of the function that require the pmem_data->sem have a non- ++ * locking version for when the caller is already holding that sem. ++ * ++ * IF YOU TAKE BOTH LOCKS TAKE THEM IN THIS ORDER: ++ * down(pmem_data->sem) => down(bitmap_sem) ++ */ ++ struct rw_semaphore bitmap_sem; ++ ++ long (*ioctl)(struct file *, unsigned int, unsigned long); ++ int (*release)(struct inode *, struct file *); ++}; ++ ++static struct pmem_info pmem[PMEM_MAX_DEVICES]; ++static int id_count; ++ ++#define PMEM_IS_FREE(id, index) !(pmem[id].bitmap[index].allocated) ++#define PMEM_ORDER(id, index) pmem[id].bitmap[index].order ++#define PMEM_BUDDY_INDEX(id, index) (index ^ (1 << PMEM_ORDER(id, index))) ++#define PMEM_NEXT_INDEX(id, index) (index + (1 << PMEM_ORDER(id, index))) ++#define PMEM_OFFSET(index) (index * PMEM_MIN_ALLOC) ++#define PMEM_START_ADDR(id, index) (PMEM_OFFSET(index) + pmem[id].base) ++#define PMEM_LEN(id, index) ((1 << PMEM_ORDER(id, index)) * PMEM_MIN_ALLOC) ++#define PMEM_END_ADDR(id, index) (PMEM_START_ADDR(id, index) + \ ++ PMEM_LEN(id, index)) ++#define PMEM_START_VADDR(id, index) (PMEM_OFFSET(id, index) + pmem[id].vbase) ++#define PMEM_END_VADDR(id, index) (PMEM_START_VADDR(id, index) + \ ++ PMEM_LEN(id, index)) ++#define PMEM_REVOKED(data) (data->flags & PMEM_FLAGS_REVOKED) ++#define PMEM_IS_PAGE_ALIGNED(addr) (!((addr) & (~PAGE_MASK))) ++#define PMEM_IS_SUBMAP(data) ((data->flags & PMEM_FLAGS_SUBMAP) && \ ++ (!(data->flags & PMEM_FLAGS_UNSUBMAP))) ++ ++static int pmem_release(struct inode *, struct file *); ++static int pmem_mmap(struct file *, struct vm_area_struct *); ++static int pmem_open(struct inode *, struct file *); ++static long pmem_ioctl(struct file *, unsigned int, unsigned long); ++ ++struct file_operations pmem_fops = { ++ .release = pmem_release, ++ .mmap = pmem_mmap, ++ .open = pmem_open, ++ .unlocked_ioctl = pmem_ioctl, ++}; ++ ++static int get_id(struct file *file) ++{ ++ return MINOR(file->f_dentry->d_inode->i_rdev); ++} ++ ++int is_pmem_file(struct file *file) ++{ ++ int id; ++ ++ if (unlikely(!file || !file->f_dentry || !file->f_dentry->d_inode)) ++ return 0; ++ id = get_id(file); ++ if (unlikely(id >= PMEM_MAX_DEVICES)) ++ return 0; ++ if (unlikely(file->f_dentry->d_inode->i_rdev != ++ MKDEV(MISC_MAJOR, pmem[id].dev.minor))) ++ return 0; ++ return 1; ++} ++ ++static int has_allocation(struct file *file) ++{ ++ struct pmem_data *data; ++ /* check is_pmem_file first if not accessed via pmem_file_ops */ ++ ++ if (unlikely(!file->private_data)) ++ return 0; ++ data = (struct pmem_data *)file->private_data; ++ if (unlikely(data->index < 0)) ++ return 0; ++ return 1; ++} ++ ++static int is_master_owner(struct file *file) ++{ ++ struct file *master_file; ++ struct pmem_data *data; ++ int put_needed, ret = 0; ++ ++ if (!is_pmem_file(file) || !has_allocation(file)) ++ return 0; ++ data = (struct pmem_data *)file->private_data; ++ if (PMEM_FLAGS_MASTERMAP & data->flags) ++ return 1; ++ master_file = fget_light(data->master_fd, &put_needed); ++ if (master_file && data->master_file == master_file) ++ ret = 1; ++ fput_light(master_file, put_needed); ++ return ret; ++} ++ ++static int pmem_free(int id, int index) ++{ ++ /* caller should hold the write lock on pmem_sem! */ ++ int buddy, curr = index; ++ DLOG("index %d\n", index); ++ ++ if (pmem[id].no_allocator) { ++ pmem[id].allocated = 0; ++ return 0; ++ } ++ /* clean up the bitmap, merging any buddies */ ++ pmem[id].bitmap[curr].allocated = 0; ++ /* find a slots buddy Buddy# = Slot# ^ (1 << order) ++ * if the buddy is also free merge them ++ * repeat until the buddy is not free or end of the bitmap is reached ++ */ ++ do { ++ buddy = PMEM_BUDDY_INDEX(id, curr); ++ if (PMEM_IS_FREE(id, buddy) && ++ PMEM_ORDER(id, buddy) == PMEM_ORDER(id, curr)) { ++ PMEM_ORDER(id, buddy)++; ++ PMEM_ORDER(id, curr)++; ++ curr = min(buddy, curr); ++ } else { ++ break; ++ } ++ } while (curr < pmem[id].num_entries); ++ ++ return 0; ++} ++ ++static void pmem_revoke(struct file *file, struct pmem_data *data); ++ ++static int pmem_release(struct inode *inode, struct file *file) ++{ ++ struct pmem_data *data = (struct pmem_data *)file->private_data; ++ struct pmem_region_node *region_node; ++ struct list_head *elt, *elt2; ++ int id = get_id(file), ret = 0; ++ ++ ++ mutex_lock(&pmem[id].data_list_lock); ++ /* if this file is a master, revoke all the memory in the connected ++ * files */ ++ if (PMEM_FLAGS_MASTERMAP & data->flags) { ++ struct pmem_data *sub_data; ++ list_for_each(elt, &pmem[id].data_list) { ++ sub_data = list_entry(elt, struct pmem_data, list); ++ down_read(&sub_data->sem); ++ if (PMEM_IS_SUBMAP(sub_data) && ++ file == sub_data->master_file) { ++ up_read(&sub_data->sem); ++ pmem_revoke(file, sub_data); ++ } else ++ up_read(&sub_data->sem); ++ } ++ } ++ list_del(&data->list); ++ mutex_unlock(&pmem[id].data_list_lock); ++ ++ ++ down_write(&data->sem); ++ ++ /* if its not a conencted file and it has an allocation, free it */ ++ if (!(PMEM_FLAGS_CONNECTED & data->flags) && has_allocation(file)) { ++ down_write(&pmem[id].bitmap_sem); ++ ret = pmem_free(id, data->index); ++ up_write(&pmem[id].bitmap_sem); ++ } ++ ++ /* if this file is a submap (mapped, connected file), downref the ++ * task struct */ ++ if (PMEM_FLAGS_SUBMAP & data->flags) ++ if (data->task) { ++ put_task_struct(data->task); ++ data->task = NULL; ++ } ++ ++ file->private_data = NULL; ++ ++ list_for_each_safe(elt, elt2, &data->region_list) { ++ region_node = list_entry(elt, struct pmem_region_node, list); ++ list_del(elt); ++ kfree(region_node); ++ } ++ BUG_ON(!list_empty(&data->region_list)); ++ ++ up_write(&data->sem); ++ kfree(data); ++ if (pmem[id].release) ++ ret = pmem[id].release(inode, file); ++ ++ return ret; ++} ++ ++static int pmem_open(struct inode *inode, struct file *file) ++{ ++ struct pmem_data *data; ++ int id = get_id(file); ++ int ret = 0; ++ ++ DLOG("current %u file %p(%d)\n", current->pid, file, file_count(file)); ++ /* setup file->private_data to indicate its unmapped */ ++ /* you can only open a pmem device one time */ ++ if (file->private_data != NULL) ++ return -1; ++ data = kmalloc(sizeof(struct pmem_data), GFP_KERNEL); ++ if (!data) { ++ printk("pmem: unable to allocate memory for pmem metadata."); ++ return -1; ++ } ++ data->flags = 0; ++ data->index = -1; ++ data->task = NULL; ++ data->vma = NULL; ++ data->pid = 0; ++ data->master_file = NULL; ++#if PMEM_DEBUG ++ data->ref = 0; ++#endif ++ INIT_LIST_HEAD(&data->region_list); ++ init_rwsem(&data->sem); ++ ++ file->private_data = data; ++ INIT_LIST_HEAD(&data->list); ++ ++ mutex_lock(&pmem[id].data_list_lock); ++ list_add(&data->list, &pmem[id].data_list); ++ mutex_unlock(&pmem[id].data_list_lock); ++ return ret; ++} ++ ++static unsigned long pmem_order(unsigned long len) ++{ ++ int i; ++ ++ len = (len + PMEM_MIN_ALLOC - 1)/PMEM_MIN_ALLOC; ++ len--; ++ for (i = 0; i < sizeof(len)*8; i++) ++ if (len >> i == 0) ++ break; ++ return i; ++} ++ ++static int pmem_allocate(int id, unsigned long len) ++{ ++ /* caller should hold the write lock on pmem_sem! */ ++ /* return the corresponding pdata[] entry */ ++ int curr = 0; ++ int end = pmem[id].num_entries; ++ int best_fit = -1; ++ unsigned long order = pmem_order(len); ++ ++ if (pmem[id].no_allocator) { ++ DLOG("no allocator"); ++ if ((len > pmem[id].size) || pmem[id].allocated) ++ return -1; ++ pmem[id].allocated = 1; ++ return len; ++ } ++ ++ if (order > PMEM_MAX_ORDER) ++ return -1; ++ DLOG("order %lx\n", order); ++ ++ /* look through the bitmap: ++ * if you find a free slot of the correct order use it ++ * otherwise, use the best fit (smallest with size > order) slot ++ */ ++ while (curr < end) { ++ if (PMEM_IS_FREE(id, curr)) { ++ if (PMEM_ORDER(id, curr) == (unsigned char)order) { ++ /* set the not free bit and clear others */ ++ best_fit = curr; ++ break; ++ } ++ if (PMEM_ORDER(id, curr) > (unsigned char)order && ++ (best_fit < 0 || ++ PMEM_ORDER(id, curr) < PMEM_ORDER(id, best_fit))) ++ best_fit = curr; ++ } ++ curr = PMEM_NEXT_INDEX(id, curr); ++ } ++ ++ /* if best_fit < 0, there are no suitable slots, ++ * return an error ++ */ ++ if (best_fit < 0) { ++ printk("pmem: no space left to allocate!\n"); ++ return -1; ++ } ++ ++ /* now partition the best fit: ++ * split the slot into 2 buddies of order - 1 ++ * repeat until the slot is of the correct order ++ */ ++ while (PMEM_ORDER(id, best_fit) > (unsigned char)order) { ++ int buddy; ++ PMEM_ORDER(id, best_fit) -= 1; ++ buddy = PMEM_BUDDY_INDEX(id, best_fit); ++ PMEM_ORDER(id, buddy) = PMEM_ORDER(id, best_fit); ++ } ++ pmem[id].bitmap[best_fit].allocated = 1; ++ return best_fit; ++} ++ ++static pgprot_t pmem_access_prot(struct file *file, pgprot_t vma_prot) ++{ ++ int id = get_id(file); ++#ifdef pgprot_noncached ++ if (pmem[id].cached == 0 || file->f_flags & O_SYNC) ++ return pgprot_noncached(vma_prot); ++#endif ++#ifdef pgprot_ext_buffered ++ else if (pmem[id].buffered) ++ return pgprot_ext_buffered(vma_prot); ++#endif ++ return vma_prot; ++} ++ ++static unsigned long pmem_start_addr(int id, struct pmem_data *data) ++{ ++ if (pmem[id].no_allocator) ++ return PMEM_START_ADDR(id, 0); ++ else ++ return PMEM_START_ADDR(id, data->index); ++ ++} ++ ++static void *pmem_start_vaddr(int id, struct pmem_data *data) ++{ ++ return pmem_start_addr(id, data) - pmem[id].base + pmem[id].vbase; ++} ++ ++static unsigned long pmem_len(int id, struct pmem_data *data) ++{ ++ if (pmem[id].no_allocator) ++ return data->index; ++ else ++ return PMEM_LEN(id, data->index); ++} ++ ++static int pmem_map_garbage(int id, struct vm_area_struct *vma, ++ struct pmem_data *data, unsigned long offset, ++ unsigned long len) ++{ ++ int i, garbage_pages = len >> PAGE_SHIFT; ++ ++ vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP | VM_SHARED | VM_WRITE; ++ for (i = 0; i < garbage_pages; i++) { ++ if (vm_insert_pfn(vma, vma->vm_start + offset + (i * PAGE_SIZE), ++ pmem[id].garbage_pfn)) ++ return -EAGAIN; ++ } ++ return 0; ++} ++ ++static int pmem_unmap_pfn_range(int id, struct vm_area_struct *vma, ++ struct pmem_data *data, unsigned long offset, ++ unsigned long len) ++{ ++ int garbage_pages; ++ DLOG("unmap offset %lx len %lx\n", offset, len); ++ ++ BUG_ON(!PMEM_IS_PAGE_ALIGNED(len)); ++ ++ garbage_pages = len >> PAGE_SHIFT; ++ zap_page_range(vma, vma->vm_start + offset, len, NULL); ++ pmem_map_garbage(id, vma, data, offset, len); ++ return 0; ++} ++ ++static int pmem_map_pfn_range(int id, struct vm_area_struct *vma, ++ struct pmem_data *data, unsigned long offset, ++ unsigned long len) ++{ ++ DLOG("map offset %lx len %lx\n", offset, len); ++ BUG_ON(!PMEM_IS_PAGE_ALIGNED(vma->vm_start)); ++ BUG_ON(!PMEM_IS_PAGE_ALIGNED(vma->vm_end)); ++ BUG_ON(!PMEM_IS_PAGE_ALIGNED(len)); ++ BUG_ON(!PMEM_IS_PAGE_ALIGNED(offset)); ++ ++ if (io_remap_pfn_range(vma, vma->vm_start + offset, ++ (pmem_start_addr(id, data) + offset) >> PAGE_SHIFT, ++ len, vma->vm_page_prot)) { ++ return -EAGAIN; ++ } ++ return 0; ++} ++ ++static int pmem_remap_pfn_range(int id, struct vm_area_struct *vma, ++ struct pmem_data *data, unsigned long offset, ++ unsigned long len) ++{ ++ /* hold the mm semp for the vma you are modifying when you call this */ ++ BUG_ON(!vma); ++ zap_page_range(vma, vma->vm_start + offset, len, NULL); ++ return pmem_map_pfn_range(id, vma, data, offset, len); ++} ++ ++static void pmem_vma_open(struct vm_area_struct *vma) ++{ ++ struct file *file = vma->vm_file; ++ struct pmem_data *data = file->private_data; ++ int id = get_id(file); ++ /* this should never be called as we don't support copying pmem ++ * ranges via fork */ ++ BUG_ON(!has_allocation(file)); ++ down_write(&data->sem); ++ /* remap the garbage pages, forkers don't get access to the data */ ++ pmem_unmap_pfn_range(id, vma, data, 0, vma->vm_start - vma->vm_end); ++ up_write(&data->sem); ++} ++ ++static void pmem_vma_close(struct vm_area_struct *vma) ++{ ++ struct file *file = vma->vm_file; ++ struct pmem_data *data = file->private_data; ++ ++ DLOG("current %u ppid %u file %p count %d\n", current->pid, ++ current->parent->pid, file, file_count(file)); ++ if (unlikely(!is_pmem_file(file) || !has_allocation(file))) { ++ printk(KERN_WARNING "pmem: something is very wrong, you are " ++ "closing a vm backing an allocation that doesn't " ++ "exist!\n"); ++ return; ++ } ++ down_write(&data->sem); ++ if (data->vma == vma) { ++ data->vma = NULL; ++ if ((data->flags & PMEM_FLAGS_CONNECTED) && ++ (data->flags & PMEM_FLAGS_SUBMAP)) ++ data->flags |= PMEM_FLAGS_UNSUBMAP; ++ } ++ /* the kernel is going to free this vma now anyway */ ++ up_write(&data->sem); ++} ++ ++static struct vm_operations_struct vm_ops = { ++ .open = pmem_vma_open, ++ .close = pmem_vma_close, ++}; ++ ++static int pmem_mmap(struct file *file, struct vm_area_struct *vma) ++{ ++ struct pmem_data *data; ++ int index; ++ unsigned long vma_size = vma->vm_end - vma->vm_start; ++ int ret = 0, id = get_id(file); ++ ++ if (vma->vm_pgoff || !PMEM_IS_PAGE_ALIGNED(vma_size)) { ++#if PMEM_DEBUG ++ printk(KERN_ERR "pmem: mmaps must be at offset zero, aligned" ++ " and a multiple of pages_size.\n"); ++#endif ++ return -EINVAL; ++ } ++ ++ data = (struct pmem_data *)file->private_data; ++ down_write(&data->sem); ++ /* check this file isn't already mmaped, for submaps check this file ++ * has never been mmaped */ ++ if ((data->flags & PMEM_FLAGS_SUBMAP) || ++ (data->flags & PMEM_FLAGS_UNSUBMAP)) { ++#if PMEM_DEBUG ++ printk(KERN_ERR "pmem: you can only mmap a pmem file once, " ++ "this file is already mmaped. %x\n", data->flags); ++#endif ++ ret = -EINVAL; ++ goto error; ++ } ++ /* if file->private_data == unalloced, alloc*/ ++ if (data && data->index == -1) { ++ down_write(&pmem[id].bitmap_sem); ++ index = pmem_allocate(id, vma->vm_end - vma->vm_start); ++ up_write(&pmem[id].bitmap_sem); ++ data->index = index; ++ } ++ /* either no space was available or an error occured */ ++ if (!has_allocation(file)) { ++ ret = -EINVAL; ++ printk("pmem: could not find allocation for map.\n"); ++ goto error; ++ } ++ ++ if (pmem_len(id, data) < vma_size) { ++#if PMEM_DEBUG ++ printk(KERN_WARNING "pmem: mmap size [%lu] does not match" ++ "size of backing region [%lu].\n", vma_size, ++ pmem_len(id, data)); ++#endif ++ ret = -EINVAL; ++ goto error; ++ } ++ ++ vma->vm_pgoff = pmem_start_addr(id, data) >> PAGE_SHIFT; ++ vma->vm_page_prot = pmem_access_prot(file, vma->vm_page_prot); ++ ++ if (data->flags & PMEM_FLAGS_CONNECTED) { ++ struct pmem_region_node *region_node; ++ struct list_head *elt; ++ if (pmem_map_garbage(id, vma, data, 0, vma_size)) { ++ printk("pmem: mmap failed in kernel!\n"); ++ ret = -EAGAIN; ++ goto error; ++ } ++ list_for_each(elt, &data->region_list) { ++ region_node = list_entry(elt, struct pmem_region_node, ++ list); ++ DLOG("remapping file: %p %lx %lx\n", file, ++ region_node->region.offset, ++ region_node->region.len); ++ if (pmem_remap_pfn_range(id, vma, data, ++ region_node->region.offset, ++ region_node->region.len)) { ++ ret = -EAGAIN; ++ goto error; ++ } ++ } ++ data->flags |= PMEM_FLAGS_SUBMAP; ++ get_task_struct(current->group_leader); ++ data->task = current->group_leader; ++ data->vma = vma; ++#if PMEM_DEBUG ++ data->pid = current->pid; ++#endif ++ DLOG("submmapped file %p vma %p pid %u\n", file, vma, ++ current->pid); ++ } else { ++ if (pmem_map_pfn_range(id, vma, data, 0, vma_size)) { ++ printk(KERN_INFO "pmem: mmap failed in kernel!\n"); ++ ret = -EAGAIN; ++ goto error; ++ } ++ data->flags |= PMEM_FLAGS_MASTERMAP; ++ data->pid = current->pid; ++ } ++ vma->vm_ops = &vm_ops; ++error: ++ up_write(&data->sem); ++ return ret; ++} ++ ++/* the following are the api for accessing pmem regions by other drivers ++ * from inside the kernel */ ++int get_pmem_user_addr(struct file *file, unsigned long *start, ++ unsigned long *len) ++{ ++ struct pmem_data *data; ++ if (!is_pmem_file(file) || !has_allocation(file)) { ++#if PMEM_DEBUG ++ printk(KERN_INFO "pmem: requested pmem data from invalid" ++ "file.\n"); ++#endif ++ return -1; ++ } ++ data = (struct pmem_data *)file->private_data; ++ down_read(&data->sem); ++ if (data->vma) { ++ *start = data->vma->vm_start; ++ *len = data->vma->vm_end - data->vma->vm_start; ++ } else { ++ *start = 0; ++ *len = 0; ++ } ++ up_read(&data->sem); ++ return 0; ++} ++ ++int get_pmem_addr(struct file *file, unsigned long *start, ++ unsigned long *vstart, unsigned long *len) ++{ ++ struct pmem_data *data; ++ int id; ++ ++ if (!is_pmem_file(file) || !has_allocation(file)) { ++ return -1; ++ } ++ ++ data = (struct pmem_data *)file->private_data; ++ if (data->index == -1) { ++#if PMEM_DEBUG ++ printk(KERN_INFO "pmem: requested pmem data from file with no " ++ "allocation.\n"); ++ return -1; ++#endif ++ } ++ id = get_id(file); ++ ++ down_read(&data->sem); ++ *start = pmem_start_addr(id, data); ++ *len = pmem_len(id, data); ++ *vstart = (unsigned long)pmem_start_vaddr(id, data); ++ up_read(&data->sem); ++#if PMEM_DEBUG ++ down_write(&data->sem); ++ data->ref++; ++ up_write(&data->sem); ++#endif ++ return 0; ++} ++ ++int get_pmem_file(int fd, unsigned long *start, unsigned long *vstart, ++ unsigned long *len, struct file **filp) ++{ ++ struct file *file; ++ ++ file = fget(fd); ++ if (unlikely(file == NULL)) { ++ printk(KERN_INFO "pmem: requested data from file descriptor " ++ "that doesn't exist."); ++ return -1; ++ } ++ ++ if (get_pmem_addr(file, start, vstart, len)) ++ goto end; ++ ++ if (filp) ++ *filp = file; ++ return 0; ++end: ++ fput(file); ++ return -1; ++} ++ ++void put_pmem_file(struct file *file) ++{ ++ struct pmem_data *data; ++ int id; ++ ++ if (!is_pmem_file(file)) ++ return; ++ id = get_id(file); ++ data = (struct pmem_data *)file->private_data; ++#if PMEM_DEBUG ++ down_write(&data->sem); ++ if (data->ref == 0) { ++ printk("pmem: pmem_put > pmem_get %s (pid %d)\n", ++ pmem[id].dev.name, data->pid); ++ BUG(); ++ } ++ data->ref--; ++ up_write(&data->sem); ++#endif ++ fput(file); ++} ++ ++void flush_pmem_file(struct file *file, unsigned long offset, unsigned long len) ++{ ++ struct pmem_data *data; ++ int id; ++ void *vaddr; ++ struct pmem_region_node *region_node; ++ struct list_head *elt; ++ void *flush_start, *flush_end; ++ ++ if (!is_pmem_file(file) || !has_allocation(file)) { ++ return; ++ } ++ ++ id = get_id(file); ++ data = (struct pmem_data *)file->private_data; ++ if (!pmem[id].cached || file->f_flags & O_SYNC) ++ return; ++ ++ down_read(&data->sem); ++ vaddr = pmem_start_vaddr(id, data); ++ /* if this isn't a submmapped file, flush the whole thing */ ++ if (unlikely(!(data->flags & PMEM_FLAGS_CONNECTED))) { ++ dmac_flush_range(vaddr, vaddr + pmem_len(id, data)); ++ goto end; ++ } ++ /* otherwise, flush the region of the file we are drawing */ ++ list_for_each(elt, &data->region_list) { ++ region_node = list_entry(elt, struct pmem_region_node, list); ++ if ((offset >= region_node->region.offset) && ++ ((offset + len) <= (region_node->region.offset + ++ region_node->region.len))) { ++ flush_start = vaddr + region_node->region.offset; ++ flush_end = flush_start + region_node->region.len; ++ dmac_flush_range(flush_start, flush_end); ++ break; ++ } ++ } ++end: ++ up_read(&data->sem); ++} ++ ++static int pmem_connect(unsigned long connect, struct file *file) ++{ ++ struct pmem_data *data = (struct pmem_data *)file->private_data; ++ struct pmem_data *src_data; ++ struct file *src_file; ++ int ret = 0, put_needed; ++ ++ down_write(&data->sem); ++ /* retrieve the src file and check it is a pmem file with an alloc */ ++ src_file = fget_light(connect, &put_needed); ++ DLOG("connect %p to %p\n", file, src_file); ++ if (!src_file) { ++ printk("pmem: src file not found!\n"); ++ ret = -EINVAL; ++ goto err_no_file; ++ } ++ if (unlikely(!is_pmem_file(src_file) || !has_allocation(src_file))) { ++ printk(KERN_INFO "pmem: src file is not a pmem file or has no " ++ "alloc!\n"); ++ ret = -EINVAL; ++ goto err_bad_file; ++ } ++ src_data = (struct pmem_data *)src_file->private_data; ++ ++ if (has_allocation(file) && (data->index != src_data->index)) { ++ printk("pmem: file is already mapped but doesn't match this" ++ " src_file!\n"); ++ ret = -EINVAL; ++ goto err_bad_file; ++ } ++ data->index = src_data->index; ++ data->flags |= PMEM_FLAGS_CONNECTED; ++ data->master_fd = connect; ++ data->master_file = src_file; ++ ++err_bad_file: ++ fput_light(src_file, put_needed); ++err_no_file: ++ up_write(&data->sem); ++ return ret; ++} ++ ++static void pmem_unlock_data_and_mm(struct pmem_data *data, ++ struct mm_struct *mm) ++{ ++ up_write(&data->sem); ++ if (mm != NULL) { ++ up_write(&mm->mmap_sem); ++ mmput(mm); ++ } ++} ++ ++static int pmem_lock_data_and_mm(struct file *file, struct pmem_data *data, ++ struct mm_struct **locked_mm) ++{ ++ int ret = 0; ++ struct mm_struct *mm = NULL; ++ *locked_mm = NULL; ++lock_mm: ++ down_read(&data->sem); ++ if (PMEM_IS_SUBMAP(data)) { ++ mm = get_task_mm(data->task); ++ if (!mm) { ++#if PMEM_DEBUG ++ printk("pmem: can't remap task is gone!\n"); ++#endif ++ up_read(&data->sem); ++ return -1; ++ } ++ } ++ up_read(&data->sem); ++ ++ if (mm) ++ down_write(&mm->mmap_sem); ++ ++ down_write(&data->sem); ++ /* check that the file didn't get mmaped before we could take the ++ * data sem, this should be safe b/c you can only submap each file ++ * once */ ++ if (PMEM_IS_SUBMAP(data) && !mm) { ++ pmem_unlock_data_and_mm(data, mm); ++ up_write(&data->sem); ++ goto lock_mm; ++ } ++ /* now check that vma.mm is still there, it could have been ++ * deleted by vma_close before we could get the data->sem */ ++ if ((data->flags & PMEM_FLAGS_UNSUBMAP) && (mm != NULL)) { ++ /* might as well release this */ ++ if (data->flags & PMEM_FLAGS_SUBMAP) { ++ put_task_struct(data->task); ++ data->task = NULL; ++ /* lower the submap flag to show the mm is gone */ ++ data->flags &= ~(PMEM_FLAGS_SUBMAP); ++ } ++ pmem_unlock_data_and_mm(data, mm); ++ return -1; ++ } ++ *locked_mm = mm; ++ return ret; ++} ++ ++int pmem_remap(struct pmem_region *region, struct file *file, ++ unsigned operation) ++{ ++ int ret; ++ struct pmem_region_node *region_node; ++ struct mm_struct *mm = NULL; ++ struct list_head *elt, *elt2; ++ int id = get_id(file); ++ struct pmem_data *data = (struct pmem_data *)file->private_data; ++ ++ /* pmem region must be aligned on a page boundry */ ++ if (unlikely(!PMEM_IS_PAGE_ALIGNED(region->offset) || ++ !PMEM_IS_PAGE_ALIGNED(region->len))) { ++#if PMEM_DEBUG ++ printk("pmem: request for unaligned pmem suballocation " ++ "%lx %lx\n", region->offset, region->len); ++#endif ++ return -EINVAL; ++ } ++ ++ /* if userspace requests a region of len 0, there's nothing to do */ ++ if (region->len == 0) ++ return 0; ++ ++ /* lock the mm and data */ ++ ret = pmem_lock_data_and_mm(file, data, &mm); ++ if (ret) ++ return 0; ++ ++ /* only the owner of the master file can remap the client fds ++ * that back in it */ ++ if (!is_master_owner(file)) { ++#if PMEM_DEBUG ++ printk("pmem: remap requested from non-master process\n"); ++#endif ++ ret = -EINVAL; ++ goto err; ++ } ++ ++ /* check that the requested range is within the src allocation */ ++ if (unlikely((region->offset > pmem_len(id, data)) || ++ (region->len > pmem_len(id, data)) || ++ (region->offset + region->len > pmem_len(id, data)))) { ++#if PMEM_DEBUG ++ printk(KERN_INFO "pmem: suballoc doesn't fit in src_file!\n"); ++#endif ++ ret = -EINVAL; ++ goto err; ++ } ++ ++ if (operation == PMEM_MAP) { ++ region_node = kmalloc(sizeof(struct pmem_region_node), ++ GFP_KERNEL); ++ if (!region_node) { ++ ret = -ENOMEM; ++#if PMEM_DEBUG ++ printk(KERN_INFO "No space to allocate metadata!"); ++#endif ++ goto err; ++ } ++ region_node->region = *region; ++ list_add(®ion_node->list, &data->region_list); ++ } else if (operation == PMEM_UNMAP) { ++ int found = 0; ++ list_for_each_safe(elt, elt2, &data->region_list) { ++ region_node = list_entry(elt, struct pmem_region_node, ++ list); ++ if (region->len == 0 || ++ (region_node->region.offset == region->offset && ++ region_node->region.len == region->len)) { ++ list_del(elt); ++ kfree(region_node); ++ found = 1; ++ } ++ } ++ if (!found) { ++#if PMEM_DEBUG ++ printk("pmem: Unmap region does not map any mapped " ++ "region!"); ++#endif ++ ret = -EINVAL; ++ goto err; ++ } ++ } ++ ++ if (data->vma && PMEM_IS_SUBMAP(data)) { ++ if (operation == PMEM_MAP) ++ ret = pmem_remap_pfn_range(id, data->vma, data, ++ region->offset, region->len); ++ else if (operation == PMEM_UNMAP) ++ ret = pmem_unmap_pfn_range(id, data->vma, data, ++ region->offset, region->len); ++ } ++ ++err: ++ pmem_unlock_data_and_mm(data, mm); ++ return ret; ++} ++ ++static void pmem_revoke(struct file *file, struct pmem_data *data) ++{ ++ struct pmem_region_node *region_node; ++ struct list_head *elt, *elt2; ++ struct mm_struct *mm = NULL; ++ int id = get_id(file); ++ int ret = 0; ++ ++ data->master_file = NULL; ++ ret = pmem_lock_data_and_mm(file, data, &mm); ++ /* if lock_data_and_mm fails either the task that mapped the fd, or ++ * the vma that mapped it have already gone away, nothing more ++ * needs to be done */ ++ if (ret) ++ return; ++ /* unmap everything */ ++ /* delete the regions and region list nothing is mapped any more */ ++ if (data->vma) ++ list_for_each_safe(elt, elt2, &data->region_list) { ++ region_node = list_entry(elt, struct pmem_region_node, ++ list); ++ pmem_unmap_pfn_range(id, data->vma, data, ++ region_node->region.offset, ++ region_node->region.len); ++ list_del(elt); ++ kfree(region_node); ++ } ++ /* delete the master file */ ++ pmem_unlock_data_and_mm(data, mm); ++} ++ ++static void pmem_get_size(struct pmem_region *region, struct file *file) ++{ ++ struct pmem_data *data = (struct pmem_data *)file->private_data; ++ int id = get_id(file); ++ ++ if (!has_allocation(file)) { ++ region->offset = 0; ++ region->len = 0; ++ return; ++ } else { ++ region->offset = pmem_start_addr(id, data); ++ region->len = pmem_len(id, data); ++ } ++ DLOG("offset %lx len %lx\n", region->offset, region->len); ++} ++ ++ ++static long pmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg) ++{ ++ struct pmem_data *data; ++ int id = get_id(file); ++ ++ switch (cmd) { ++ case PMEM_GET_PHYS: ++ { ++ struct pmem_region region; ++ DLOG("get_phys\n"); ++ if (!has_allocation(file)) { ++ region.offset = 0; ++ region.len = 0; ++ } else { ++ data = (struct pmem_data *)file->private_data; ++ region.offset = pmem_start_addr(id, data); ++ region.len = pmem_len(id, data); ++ } ++ printk(KERN_INFO "pmem: request for physical address of pmem region " ++ "from process %d.\n", current->pid); ++ if (copy_to_user((void __user *)arg, ®ion, ++ sizeof(struct pmem_region))) ++ return -EFAULT; ++ break; ++ } ++ case PMEM_MAP: ++ { ++ struct pmem_region region; ++ if (copy_from_user(®ion, (void __user *)arg, ++ sizeof(struct pmem_region))) ++ return -EFAULT; ++ data = (struct pmem_data *)file->private_data; ++ return pmem_remap(®ion, file, PMEM_MAP); ++ } ++ break; ++ case PMEM_UNMAP: ++ { ++ struct pmem_region region; ++ if (copy_from_user(®ion, (void __user *)arg, ++ sizeof(struct pmem_region))) ++ return -EFAULT; ++ data = (struct pmem_data *)file->private_data; ++ return pmem_remap(®ion, file, PMEM_UNMAP); ++ break; ++ } ++ case PMEM_GET_SIZE: ++ { ++ struct pmem_region region; ++ DLOG("get_size\n"); ++ pmem_get_size(®ion, file); ++ if (copy_to_user((void __user *)arg, ®ion, ++ sizeof(struct pmem_region))) ++ return -EFAULT; ++ break; ++ } ++ case PMEM_GET_TOTAL_SIZE: ++ { ++ struct pmem_region region; ++ DLOG("get total size\n"); ++ region.offset = 0; ++ get_id(file); ++ region.len = pmem[id].size; ++ if (copy_to_user((void __user *)arg, ®ion, ++ sizeof(struct pmem_region))) ++ return -EFAULT; ++ break; ++ } ++ case PMEM_ALLOCATE: ++ { ++ if (has_allocation(file)) ++ return -EINVAL; ++ data = (struct pmem_data *)file->private_data; ++ data->index = pmem_allocate(id, arg); ++ break; ++ } ++ case PMEM_CONNECT: ++ DLOG("connect\n"); ++ return pmem_connect(arg, file); ++ break; ++ case PMEM_CACHE_FLUSH: ++ { ++ struct pmem_region region; ++ DLOG("flush\n"); ++ if (copy_from_user(®ion, (void __user *)arg, ++ sizeof(struct pmem_region))) ++ return -EFAULT; ++ flush_pmem_file(file, region.offset, region.len); ++ break; ++ } ++ default: ++ if (pmem[id].ioctl) ++ return pmem[id].ioctl(file, cmd, arg); ++ return -EINVAL; ++ } ++ return 0; ++} ++ ++#if PMEM_DEBUG ++static ssize_t debug_open(struct inode *inode, struct file *file) ++{ ++ file->private_data = inode->i_private; ++ return 0; ++} ++ ++static ssize_t debug_read(struct file *file, char __user *buf, size_t count, ++ loff_t *ppos) ++{ ++ struct list_head *elt, *elt2; ++ struct pmem_data *data; ++ struct pmem_region_node *region_node; ++ int id = (int)file->private_data; ++ const int debug_bufmax = 4096; ++ static char buffer[4096]; ++ int n = 0; ++ ++ DLOG("debug open\n"); ++ n = scnprintf(buffer, debug_bufmax, ++ "pid #: mapped regions (offset, len) (offset,len)...\n"); ++ ++ mutex_lock(&pmem[id].data_list_lock); ++ list_for_each(elt, &pmem[id].data_list) { ++ data = list_entry(elt, struct pmem_data, list); ++ down_read(&data->sem); ++ n += scnprintf(buffer + n, debug_bufmax - n, "pid %u:", ++ data->pid); ++ list_for_each(elt2, &data->region_list) { ++ region_node = list_entry(elt2, struct pmem_region_node, ++ list); ++ n += scnprintf(buffer + n, debug_bufmax - n, ++ "(%lx,%lx) ", ++ region_node->region.offset, ++ region_node->region.len); ++ } ++ n += scnprintf(buffer + n, debug_bufmax - n, "\n"); ++ up_read(&data->sem); ++ } ++ mutex_unlock(&pmem[id].data_list_lock); ++ ++ n++; ++ buffer[n] = 0; ++ return simple_read_from_buffer(buf, count, ppos, buffer, n); ++} ++ ++static struct file_operations debug_fops = { ++ .read = debug_read, ++ .open = debug_open, ++}; ++#endif ++ ++#if 0 ++static struct miscdevice pmem_dev = { ++ .name = "pmem", ++ .fops = &pmem_fops, ++}; ++#endif ++ ++int pmem_setup(struct android_pmem_platform_data *pdata, ++ long (*ioctl)(struct file *, unsigned int, unsigned long), ++ int (*release)(struct inode *, struct file *)) ++{ ++ int err = 0; ++ int i, index = 0; ++ int id = id_count; ++ id_count++; ++ ++ pmem[id].no_allocator = pdata->no_allocator; ++ pmem[id].cached = pdata->cached; ++ pmem[id].buffered = pdata->buffered; ++ pmem[id].base = pdata->start; ++ pmem[id].size = pdata->size; ++ pmem[id].ioctl = ioctl; ++ pmem[id].release = release; ++ init_rwsem(&pmem[id].bitmap_sem); ++ mutex_init(&pmem[id].data_list_lock); ++ INIT_LIST_HEAD(&pmem[id].data_list); ++ pmem[id].dev.name = pdata->name; ++ pmem[id].dev.minor = id; ++ pmem[id].dev.fops = &pmem_fops; ++ printk(KERN_INFO "%s: %d init\n", pdata->name, pdata->cached); ++ ++ err = misc_register(&pmem[id].dev); ++ if (err) { ++ printk(KERN_ALERT "Unable to register pmem driver!\n"); ++ goto err_cant_register_device; ++ } ++ pmem[id].num_entries = pmem[id].size / PMEM_MIN_ALLOC; ++ ++ pmem[id].bitmap = kmalloc(pmem[id].num_entries * ++ sizeof(struct pmem_bits), GFP_KERNEL); ++ if (!pmem[id].bitmap) ++ goto err_no_mem_for_metadata; ++ ++ memset(pmem[id].bitmap, 0, sizeof(struct pmem_bits) * ++ pmem[id].num_entries); ++ ++ for (i = sizeof(pmem[id].num_entries) * 8 - 1; i >= 0; i--) { ++ if ((pmem[id].num_entries) & 1<<i) { ++ PMEM_ORDER(id, index) = i; ++ index = PMEM_NEXT_INDEX(id, index); ++ } ++ } ++ ++ if (pmem[id].cached) ++ pmem[id].vbase = ioremap_cached(pmem[id].base, ++ pmem[id].size); ++#ifdef ioremap_ext_buffered ++ else if (pmem[id].buffered) ++ pmem[id].vbase = ioremap_ext_buffered(pmem[id].base, ++ pmem[id].size); ++#endif ++ else ++ pmem[id].vbase = ioremap(pmem[id].base, pmem[id].size); ++ ++ if (pmem[id].vbase == 0) ++ goto error_cant_remap; ++ ++ pmem[id].garbage_pfn = page_to_pfn(alloc_page(GFP_KERNEL)); ++ if (pmem[id].no_allocator) ++ pmem[id].allocated = 0; ++ ++#if PMEM_DEBUG ++ debugfs_create_file(pdata->name, S_IFREG | S_IRUGO, NULL, (void *)id, ++ &debug_fops); ++#endif ++ return 0; ++error_cant_remap: ++ kfree(pmem[id].bitmap); ++err_no_mem_for_metadata: ++ misc_deregister(&pmem[id].dev); ++err_cant_register_device: ++ return -1; ++} ++ ++static int pmem_probe(struct platform_device *pdev) ++{ ++ struct android_pmem_platform_data *pdata; ++ ++ if (!pdev || !pdev->dev.platform_data) { ++ printk(KERN_ALERT "Unable to probe pmem!\n"); ++ return -1; ++ } ++ pdata = pdev->dev.platform_data; ++ return pmem_setup(pdata, NULL, NULL); ++} ++ ++ ++static int pmem_remove(struct platform_device *pdev) ++{ ++ int id = pdev->id; ++ __free_page(pfn_to_page(pmem[id].garbage_pfn)); ++ misc_deregister(&pmem[id].dev); ++ return 0; ++} ++ ++static struct platform_driver pmem_driver = { ++ .probe = pmem_probe, ++ .remove = pmem_remove, ++ .driver = { .name = "android_pmem" } ++}; ++ ++ ++static int __init pmem_init(void) ++{ ++ return platform_driver_register(&pmem_driver); ++} ++ ++static void __exit pmem_exit(void) ++{ ++ platform_driver_unregister(&pmem_driver); ++} ++ ++module_init(pmem_init); ++module_exit(pmem_exit); ++ diff --git a/patches.android/android-0027-ashmem-Anonymous-shared-memory-subsystem.patch b/patches.android/android-0027-ashmem-Anonymous-shared-memory-subsystem.patch new file mode 100644 index 0000000000000..1d3f83365c6f3 --- /dev/null +++ b/patches.android/android-0027-ashmem-Anonymous-shared-memory-subsystem.patch @@ -0,0 +1,829 @@ +From 11980c2ac4ccfad21a5f8ee9e12059f1e687bb40 Mon Sep 17 00:00:00 2001 +From: Robert Love <rlove@google.com> +Date: Tue, 20 Dec 2011 16:49:48 -0800 +Subject: ashmem: Anonymous shared memory subsystem +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit +Patch-mainline: HEAD +Git-commit: 11980c2ac4ccfad21a5f8ee9e12059f1e687bb40 + +The anonymous shared memory (ashmem) subsystem provides a +Unix-y,file-based shared memory interface to user-space. It +works like anonymous memory (e.g. mmapping fd=0) except if +you share the file descriptor via the usual means, you will +share the mapping. The shared memory can be accessed via both +mmap or file I/O. The backing store is a simple shmem file. + +Additionally, ashmem introduces the concept of page pinning. +Pinned pages (the default) behave like any anonymous memory. +Unpinned pages are available to the kernel for eviction during +VM pressure. When repinning the pages, the return value +instructs user-space as to any eviction. In this manner, +user-space processes may implement caching and similar +resource management that efficiently integrates with kernel +memory management. + +Signed-off-by: Robert Love <rlove@google.com> + +ashmem: Don't install fault handler for private mmaps. + +Ashmem is used to create named private heaps. If this heap is backed +by a tmpfs file it will allocate two pages for every page touched. +In 2.6.27, the extra page would later be freed, but 2.6.29 does not +scan anonymous pages when running without swap so the memory is not +freed while the file is referenced. This change changes the behavior +of private ashmem mmaps to match /dev/zero instead tmpfs. + +Signed-off-by: Arve Hjønnevåg <arve@android.com> + +ashmem: Add common prefix to name reported in /proc/pid/maps + +Signed-off-by: Arve Hjønnevåg <arve@android.com> + +ashmem: don't require a page aligned size + +This makes ashmem more similar to shmem and mmap, by +not requiring the specified size to be page aligned, +instead rounding it internally as needed. + +Signed-off-by: Marco Nelissen <marcone@android.com> +[jstultz: Improved commit subject and included patch description +from rlove. Also moved ashmem files to staging dir, and reworked +code to avoid touching mm/shmem.c while we're in staging.] +CC: Brian Swetland <swetland@google.com> +CC: Colin Cross <ccross@android.com> +CC: Arve Hjønnevåg <arve@android.com> +CC: Dima Zavin <dima@android.com> +CC: Robert Love <rlove@google.com> +Signed-off-by: John Stultz <john.stultz@linaro.org> +Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de> + +diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig +index 6094fd6..becf711 100644 +--- a/drivers/staging/android/Kconfig ++++ b/drivers/staging/android/Kconfig +@@ -12,6 +12,15 @@ config ANDROID_BINDER_IPC + bool "Android Binder IPC Driver" + default n + ++config ASHMEM ++ bool "Enable the Anonymous Shared Memory Subsystem" ++ default n ++ depends on SHMEM || TINY_SHMEM ++ help ++ The ashmem subsystem is a new shared memory allocator, similar to ++ POSIX SHM but with different behavior and sporting a simpler ++ file-based API. ++ + config ANDROID_LOGGER + tristate "Android log driver" + default n +diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile +index 8fd7391..eaed1ff 100644 +--- a/drivers/staging/android/Makefile ++++ b/drivers/staging/android/Makefile +@@ -1,4 +1,5 @@ + obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o ++obj-$(CONFIG_ASHMEM) += ashmem.o + obj-$(CONFIG_ANDROID_LOGGER) += logger.o + obj-$(CONFIG_ANDROID_RAM_CONSOLE) += ram_console.o + obj-$(CONFIG_ANDROID_TIMED_OUTPUT) += timed_output.o +diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c +new file mode 100644 +index 0000000..5775c6c +--- /dev/null ++++ b/drivers/staging/android/ashmem.c +@@ -0,0 +1,678 @@ ++/* mm/ashmem.c ++** ++** Anonymous Shared Memory Subsystem, ashmem ++** ++** Copyright (C) 2008 Google, Inc. ++** ++** Robert Love <rlove@google.com> ++** ++** This software is licensed under the terms of the GNU General Public ++** License version 2, as published by the Free Software Foundation, and ++** may be copied, distributed, and modified under those terms. ++** ++** This program is distributed in the hope that it will be useful, ++** but WITHOUT ANY WARRANTY; without even the implied warranty of ++** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++** GNU General Public License for more details. ++*/ ++ ++#include <linux/module.h> ++#include <linux/file.h> ++#include <linux/fs.h> ++#include <linux/miscdevice.h> ++#include <linux/security.h> ++#include <linux/mm.h> ++#include <linux/mman.h> ++#include <linux/uaccess.h> ++#include <linux/personality.h> ++#include <linux/bitops.h> ++#include <linux/mutex.h> ++#include <linux/shmem_fs.h> ++#include "ashmem.h" ++ ++#define ASHMEM_NAME_PREFIX "dev/ashmem/" ++#define ASHMEM_NAME_PREFIX_LEN (sizeof(ASHMEM_NAME_PREFIX) - 1) ++#define ASHMEM_FULL_NAME_LEN (ASHMEM_NAME_LEN + ASHMEM_NAME_PREFIX_LEN) ++ ++/* ++ * ashmem_area - anonymous shared memory area ++ * Lifecycle: From our parent file's open() until its release() ++ * Locking: Protected by `ashmem_mutex' ++ * Big Note: Mappings do NOT pin this structure; it dies on close() ++ */ ++struct ashmem_area { ++ char name[ASHMEM_FULL_NAME_LEN];/* optional name for /proc/pid/maps */ ++ struct list_head unpinned_list; /* list of all ashmem areas */ ++ struct file *file; /* the shmem-based backing file */ ++ size_t size; /* size of the mapping, in bytes */ ++ unsigned long prot_mask; /* allowed prot bits, as vm_flags */ ++}; ++ ++/* ++ * ashmem_range - represents an interval of unpinned (evictable) pages ++ * Lifecycle: From unpin to pin ++ * Locking: Protected by `ashmem_mutex' ++ */ ++struct ashmem_range { ++ struct list_head lru; /* entry in LRU list */ ++ struct list_head unpinned; /* entry in its area's unpinned list */ ++ struct ashmem_area *asma; /* associated area */ ++ size_t pgstart; /* starting page, inclusive */ ++ size_t pgend; /* ending page, inclusive */ ++ unsigned int purged; /* ASHMEM_NOT or ASHMEM_WAS_PURGED */ ++}; ++ ++/* LRU list of unpinned pages, protected by ashmem_mutex */ ++static LIST_HEAD(ashmem_lru_list); ++ ++/* Count of pages on our LRU list, protected by ashmem_mutex */ ++static unsigned long lru_count; ++ ++/* ++ * ashmem_mutex - protects the list of and each individual ashmem_area ++ * ++ * Lock Ordering: ashmex_mutex -> i_mutex -> i_alloc_sem ++ */ ++static DEFINE_MUTEX(ashmem_mutex); ++ ++static struct kmem_cache *ashmem_area_cachep __read_mostly; ++static struct kmem_cache *ashmem_range_cachep __read_mostly; ++ ++#define range_size(range) \ ++ ((range)->pgend - (range)->pgstart + 1) ++ ++#define range_on_lru(range) \ ++ ((range)->purged == ASHMEM_NOT_PURGED) ++ ++#define page_range_subsumes_range(range, start, end) \ ++ (((range)->pgstart >= (start)) && ((range)->pgend <= (end))) ++ ++#define page_range_subsumed_by_range(range, start, end) \ ++ (((range)->pgstart <= (start)) && ((range)->pgend >= (end))) ++ ++#define page_in_range(range, page) \ ++ (((range)->pgstart <= (page)) && ((range)->pgend >= (page))) ++ ++#define page_range_in_range(range, start, end) \ ++ (page_in_range(range, start) || page_in_range(range, end) || \ ++ page_range_subsumes_range(range, start, end)) ++ ++#define range_before_page(range, page) \ ++ ((range)->pgend < (page)) ++ ++#define PROT_MASK (PROT_EXEC | PROT_READ | PROT_WRITE) ++ ++static inline void lru_add(struct ashmem_range *range) ++{ ++ list_add_tail(&range->lru, &ashmem_lru_list); ++ lru_count += range_size(range); ++} ++ ++static inline void lru_del(struct ashmem_range *range) ++{ ++ list_del(&range->lru); ++ lru_count -= range_size(range); ++} ++ ++/* ++ * range_alloc - allocate and initialize a new ashmem_range structure ++ * ++ * 'asma' - associated ashmem_area ++ * 'prev_range' - the previous ashmem_range in the sorted asma->unpinned list ++ * 'purged' - initial purge value (ASMEM_NOT_PURGED or ASHMEM_WAS_PURGED) ++ * 'start' - starting page, inclusive ++ * 'end' - ending page, inclusive ++ * ++ * Caller must hold ashmem_mutex. ++ */ ++static int range_alloc(struct ashmem_area *asma, ++ struct ashmem_range *prev_range, unsigned int purged, ++ size_t start, size_t end) ++{ ++ struct ashmem_range *range; ++ ++ range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL); ++ if (unlikely(!range)) ++ return -ENOMEM; ++ ++ range->asma = asma; ++ range->pgstart = start; ++ range->pgend = end; ++ range->purged = purged; ++ ++ list_add_tail(&range->unpinned, &prev_range->unpinned); ++ ++ if (range_on_lru(range)) ++ lru_add(range); ++ ++ return 0; ++} ++ ++static void range_del(struct ashmem_range *range) ++{ ++ list_del(&range->unpinned); ++ if (range_on_lru(range)) ++ lru_del(range); ++ kmem_cache_free(ashmem_range_cachep, range); ++} ++ ++/* ++ * range_shrink - shrinks a range ++ * ++ * Caller must hold ashmem_mutex. ++ */ ++static inline void range_shrink(struct ashmem_range *range, ++ size_t start, size_t end) ++{ ++ size_t pre = range_size(range); ++ ++ range->pgstart = start; ++ range->pgend = end; ++ ++ if (range_on_lru(range)) ++ lru_count -= pre - range_size(range); ++} ++ ++static int ashmem_open(struct inode *inode, struct file *file) ++{ ++ struct ashmem_area *asma; ++ int ret; ++ ++ ret = nonseekable_open(inode, file); ++ if (unlikely(ret)) ++ return ret; ++ ++ asma = kmem_cache_zalloc(ashmem_area_cachep, GFP_KERNEL); ++ if (unlikely(!asma)) ++ return -ENOMEM; ++ ++ INIT_LIST_HEAD(&asma->unpinned_list); ++ memcpy(asma->name, ASHMEM_NAME_PREFIX, ASHMEM_NAME_PREFIX_LEN); ++ asma->prot_mask = PROT_MASK; ++ file->private_data = asma; ++ ++ return 0; ++} ++ ++static int ashmem_release(struct inode *ignored, struct file *file) ++{ ++ struct ashmem_area *asma = file->private_data; ++ struct ashmem_range *range, *next; ++ ++ mutex_lock(&ashmem_mutex); ++ list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) ++ range_del(range); ++ mutex_unlock(&ashmem_mutex); ++ ++ if (asma->file) ++ fput(asma->file); ++ kmem_cache_free(ashmem_area_cachep, asma); ++ ++ return 0; ++} ++ ++static int ashmem_mmap(struct file *file, struct vm_area_struct *vma) ++{ ++ struct ashmem_area *asma = file->private_data; ++ int ret = 0; ++ ++ mutex_lock(&ashmem_mutex); ++ ++ /* user needs to SET_SIZE before mapping */ ++ if (unlikely(!asma->size)) { ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ /* requested protection bits must match our allowed protection mask */ ++ if (unlikely((vma->vm_flags & ~asma->prot_mask) & PROT_MASK)) { ++ ret = -EPERM; ++ goto out; ++ } ++ ++ if (!asma->file) { ++ char *name = ASHMEM_NAME_DEF; ++ struct file *vmfile; ++ ++ if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') ++ name = asma->name; ++ ++ /* ... and allocate the backing shmem file */ ++ vmfile = shmem_file_setup(name, asma->size, vma->vm_flags); ++ if (unlikely(IS_ERR(vmfile))) { ++ ret = PTR_ERR(vmfile); ++ goto out; ++ } ++ asma->file = vmfile; ++ } ++ get_file(asma->file); ++ ++ /* ++ * XXX - Reworked to use shmem_zero_setup() instead of ++ * shmem_set_file while we're in staging. -jstultz ++ */ ++ if (vma->vm_flags & VM_SHARED) { ++ ret = shmem_zero_setup(vma); ++ if (ret) { ++ fput(asma->file); ++ goto out; ++ } ++ } ++ ++ if (vma->vm_file) ++ fput(vma->vm_file); ++ vma->vm_file = asma->file; ++ vma->vm_flags |= VM_CAN_NONLINEAR; ++ ++out: ++ mutex_unlock(&ashmem_mutex); ++ return ret; ++} ++ ++/* ++ * ashmem_shrink - our cache shrinker, called from mm/vmscan.c :: shrink_slab ++ * ++ * 'nr_to_scan' is the number of objects (pages) to prune, or 0 to query how ++ * many objects (pages) we have in total. ++ * ++ * 'gfp_mask' is the mask of the allocation that got us into this mess. ++ * ++ * Return value is the number of objects (pages) remaining, or -1 if we cannot ++ * proceed without risk of deadlock (due to gfp_mask). ++ * ++ * We approximate LRU via least-recently-unpinned, jettisoning unpinned partial ++ * chunks of ashmem regions LRU-wise one-at-a-time until we hit 'nr_to_scan' ++ * pages freed. ++ */ ++static int ashmem_shrink(int nr_to_scan, gfp_t gfp_mask) ++{ ++ struct ashmem_range *range, *next; ++ ++ /* We might recurse into filesystem code, so bail out if necessary */ ++ if (nr_to_scan && !(gfp_mask & __GFP_FS)) ++ return -1; ++ if (!nr_to_scan) ++ return lru_count; ++ ++ mutex_lock(&ashmem_mutex); ++ list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) { ++ struct inode *inode = range->asma->file->f_dentry->d_inode; ++ loff_t start = range->pgstart * PAGE_SIZE; ++ loff_t end = (range->pgend + 1) * PAGE_SIZE - 1; ++ ++ vmtruncate_range(inode, start, end); ++ range->purged = ASHMEM_WAS_PURGED; ++ lru_del(range); ++ ++ nr_to_scan -= range_size(range); ++ if (nr_to_scan <= 0) ++ break; ++ } ++ mutex_unlock(&ashmem_mutex); ++ ++ return lru_count; ++} ++ ++static struct shrinker ashmem_shrinker = { ++ .shrink = ashmem_shrink, ++ .seeks = DEFAULT_SEEKS * 4, ++}; ++ ++static int set_prot_mask(struct ashmem_area *asma, unsigned long prot) ++{ ++ int ret = 0; ++ ++ mutex_lock(&ashmem_mutex); ++ ++ /* the user can only remove, not add, protection bits */ ++ if (unlikely((asma->prot_mask & prot) != prot)) { ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ /* does the application expect PROT_READ to imply PROT_EXEC? */ ++ if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) ++ prot |= PROT_EXEC; ++ ++ asma->prot_mask = prot; ++ ++out: ++ mutex_unlock(&ashmem_mutex); ++ return ret; ++} ++ ++static int set_name(struct ashmem_area *asma, void __user *name) ++{ ++ int ret = 0; ++ ++ mutex_lock(&ashmem_mutex); ++ ++ /* cannot change an existing mapping's name */ ++ if (unlikely(asma->file)) { ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ if (unlikely(copy_from_user(asma->name + ASHMEM_NAME_PREFIX_LEN, ++ name, ASHMEM_NAME_LEN))) ++ ret = -EFAULT; ++ asma->name[ASHMEM_FULL_NAME_LEN-1] = '\0'; ++ ++out: ++ mutex_unlock(&ashmem_mutex); ++ ++ return ret; ++} ++ ++static int get_name(struct ashmem_area *asma, void __user *name) ++{ ++ int ret = 0; ++ ++ mutex_lock(&ashmem_mutex); ++ if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') { ++ size_t len; ++ ++ /* ++ * Copying only `len', instead of ASHMEM_NAME_LEN, bytes ++ * prevents us from revealing one user's stack to another. ++ */ ++ len = strlen(asma->name + ASHMEM_NAME_PREFIX_LEN) + 1; ++ if (unlikely(copy_to_user(name, ++ asma->name + ASHMEM_NAME_PREFIX_LEN, len))) ++ ret = -EFAULT; ++ } else { ++ if (unlikely(copy_to_user(name, ASHMEM_NAME_DEF, ++ sizeof(ASHMEM_NAME_DEF)))) ++ ret = -EFAULT; ++ } ++ mutex_unlock(&ashmem_mutex); ++ ++ return ret; ++} ++ ++/* ++ * ashmem_pin - pin the given ashmem region, returning whether it was ++ * previously purged (ASHMEM_WAS_PURGED) or not (ASHMEM_NOT_PURGED). ++ * ++ * Caller must hold ashmem_mutex. ++ */ ++static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend) ++{ ++ struct ashmem_range *range, *next; ++ int ret = ASHMEM_NOT_PURGED; ++ ++ list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) { ++ /* moved past last applicable page; we can short circuit */ ++ if (range_before_page(range, pgstart)) ++ break; ++ ++ /* ++ * The user can ask us to pin pages that span multiple ranges, ++ * or to pin pages that aren't even unpinned, so this is messy. ++ * ++ * Four cases: ++ * 1. The requested range subsumes an existing range, so we ++ * just remove the entire matching range. ++ * 2. The requested range overlaps the start of an existing ++ * range, so we just update that range. ++ * 3. The requested range overlaps the end of an existing ++ * range, so we just update that range. ++ * 4. The requested range punches a hole in an existing range, ++ * so we have to update one side of the range and then ++ * create a new range for the other side. ++ */ ++ if (page_range_in_range(range, pgstart, pgend)) { ++ ret |= range->purged; ++ ++ /* Case #1: Easy. Just nuke the whole thing. */ ++ if (page_range_subsumes_range(range, pgstart, pgend)) { ++ range_del(range); ++ continue; ++ } ++ ++ /* Case #2: We overlap from the start, so adjust it */ ++ if (range->pgstart >= pgstart) { ++ range_shrink(range, pgend + 1, range->pgend); ++ continue; ++ } ++ ++ /* Case #3: We overlap from the rear, so adjust it */ ++ if (range->pgend <= pgend) { ++ range_shrink(range, range->pgstart, pgstart-1); ++ continue; ++ } ++ ++ /* ++ * Case #4: We eat a chunk out of the middle. A bit ++ * more complicated, we allocate a new range for the ++ * second half and adjust the first chunk's endpoint. ++ */ ++ range_alloc(asma, range, range->purged, ++ pgend + 1, range->pgend); ++ range_shrink(range, range->pgstart, pgstart - 1); ++ break; ++ } ++ } ++ ++ return ret; ++} ++ ++/* ++ * ashmem_unpin - unpin the given range of pages. Returns zero on success. ++ * ++ * Caller must hold ashmem_mutex. ++ */ ++static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend) ++{ ++ struct ashmem_range *range, *next; ++ unsigned int purged = ASHMEM_NOT_PURGED; ++ ++restart: ++ list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) { ++ /* short circuit: this is our insertion point */ ++ if (range_before_page(range, pgstart)) ++ break; ++ ++ /* ++ * The user can ask us to unpin pages that are already entirely ++ * or partially pinned. We handle those two cases here. ++ */ ++ if (page_range_subsumed_by_range(range, pgstart, pgend)) ++ return 0; ++ if (page_range_in_range(range, pgstart, pgend)) { ++ pgstart = min_t(size_t, range->pgstart, pgstart), ++ pgend = max_t(size_t, range->pgend, pgend); ++ purged |= range->purged; ++ range_del(range); ++ goto restart; ++ } ++ } ++ ++ return range_alloc(asma, range, purged, pgstart, pgend); ++} ++ ++/* ++ * ashmem_get_pin_status - Returns ASHMEM_IS_UNPINNED if _any_ pages in the ++ * given interval are unpinned and ASHMEM_IS_PINNED otherwise. ++ * ++ * Caller must hold ashmem_mutex. ++ */ ++static int ashmem_get_pin_status(struct ashmem_area *asma, size_t pgstart, ++ size_t pgend) ++{ ++ struct ashmem_range *range; ++ int ret = ASHMEM_IS_PINNED; ++ ++ list_for_each_entry(range, &asma->unpinned_list, unpinned) { ++ if (range_before_page(range, pgstart)) ++ break; ++ if (page_range_in_range(range, pgstart, pgend)) { ++ ret = ASHMEM_IS_UNPINNED; ++ break; ++ } ++ } ++ ++ return ret; ++} ++ ++static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd, ++ void __user *p) ++{ ++ struct ashmem_pin pin; ++ size_t pgstart, pgend; ++ int ret = -EINVAL; ++ ++ if (unlikely(!asma->file)) ++ return -EINVAL; ++ ++ if (unlikely(copy_from_user(&pin, p, sizeof(pin)))) ++ return -EFAULT; ++ ++ /* per custom, you can pass zero for len to mean "everything onward" */ ++ if (!pin.len) ++ pin.len = PAGE_ALIGN(asma->size) - pin.offset; ++ ++ if (unlikely((pin.offset | pin.len) & ~PAGE_MASK)) ++ return -EINVAL; ++ ++ if (unlikely(((__u32) -1) - pin.offset < pin.len)) ++ return -EINVAL; ++ ++ if (unlikely(PAGE_ALIGN(asma->size) < pin.offset + pin.len)) ++ return -EINVAL; ++ ++ pgstart = pin.offset / PAGE_SIZE; ++ pgend = pgstart + (pin.len / PAGE_SIZE) - 1; ++ ++ mutex_lock(&ashmem_mutex); ++ ++ switch (cmd) { ++ case ASHMEM_PIN: ++ ret = ashmem_pin(asma, pgstart, pgend); ++ break; ++ case ASHMEM_UNPIN: ++ ret = ashmem_unpin(asma, pgstart, pgend); ++ break; ++ case ASHMEM_GET_PIN_STATUS: ++ ret = ashmem_get_pin_status(asma, pgstart, pgend); ++ break; ++ } ++ ++ mutex_unlock(&ashmem_mutex); ++ ++ return ret; ++} ++ ++static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg) ++{ ++ struct ashmem_area *asma = file->private_data; ++ long ret = -ENOTTY; ++ ++ switch (cmd) { ++ case ASHMEM_SET_NAME: ++ ret = set_name(asma, (void __user *) arg); ++ break; ++ case ASHMEM_GET_NAME: ++ ret = get_name(asma, (void __user *) arg); ++ break; ++ case ASHMEM_SET_SIZE: ++ ret = -EINVAL; ++ if (!asma->file) { ++ ret = 0; ++ asma->size = (size_t) arg; ++ } ++ break; ++ case ASHMEM_GET_SIZE: ++ ret = asma->size; ++ break; ++ case ASHMEM_SET_PROT_MASK: ++ ret = set_prot_mask(asma, arg); ++ break; ++ case ASHMEM_GET_PROT_MASK: ++ ret = asma->prot_mask; ++ break; ++ case ASHMEM_PIN: ++ case ASHMEM_UNPIN: ++ case ASHMEM_GET_PIN_STATUS: ++ ret = ashmem_pin_unpin(asma, cmd, (void __user *) arg); ++ break; ++ case ASHMEM_PURGE_ALL_CACHES: ++ ret = -EPERM; ++ if (capable(CAP_SYS_ADMIN)) { ++ ret = ashmem_shrink(0, GFP_KERNEL); ++ ashmem_shrink(ret, GFP_KERNEL); ++ } ++ break; ++ } ++ ++ return ret; ++} ++ ++static struct file_operations ashmem_fops = { ++ .owner = THIS_MODULE, ++ .open = ashmem_open, ++ .release = ashmem_release, ++ .mmap = ashmem_mmap, ++ .unlocked_ioctl = ashmem_ioctl, ++ .compat_ioctl = ashmem_ioctl, ++}; ++ ++static struct miscdevice ashmem_misc = { ++ .minor = MISC_DYNAMIC_MINOR, ++ .name = "ashmem", ++ .fops = &ashmem_fops, ++}; ++ ++static int __init ashmem_init(void) ++{ ++ int ret; ++ ++ ashmem_area_cachep = kmem_cache_create("ashmem_area_cache", ++ sizeof(struct ashmem_area), ++ 0, 0, NULL); ++ if (unlikely(!ashmem_area_cachep)) { ++ printk(KERN_ERR "ashmem: failed to create slab cache\n"); ++ return -ENOMEM; ++ } ++ ++ ashmem_range_cachep = kmem_cache_create("ashmem_range_cache", ++ sizeof(struct ashmem_range), ++ 0, 0, NULL); ++ if (unlikely(!ashmem_range_cachep)) { ++ printk(KERN_ERR "ashmem: failed to create slab cache\n"); ++ return -ENOMEM; ++ } ++ ++ ret = misc_register(&ashmem_misc); ++ if (unlikely(ret)) { ++ printk(KERN_ERR "ashmem: failed to register misc device!\n"); ++ return ret; ++ } ++ ++ register_shrinker(&ashmem_shrinker); ++ ++ printk(KERN_INFO "ashmem: initialized\n"); ++ ++ return 0; ++} ++ ++static void __exit ashmem_exit(void) ++{ ++ int ret; ++ ++ unregister_shrinker(&ashmem_shrinker); ++ ++ ret = misc_deregister(&ashmem_misc); ++ if (unlikely(ret)) ++ printk(KERN_ERR "ashmem: failed to unregister misc device!\n"); ++ ++ kmem_cache_destroy(ashmem_range_cachep); ++ kmem_cache_destroy(ashmem_area_cachep); ++ ++ printk(KERN_INFO "ashmem: unloaded\n"); ++} ++ ++module_init(ashmem_init); ++module_exit(ashmem_exit); ++ ++MODULE_LICENSE("GPL"); +diff --git a/drivers/staging/android/ashmem.h b/drivers/staging/android/ashmem.h +new file mode 100644 +index 0000000..1976b10 +--- /dev/null ++++ b/drivers/staging/android/ashmem.h +@@ -0,0 +1,48 @@ ++/* ++ * include/linux/ashmem.h ++ * ++ * Copyright 2008 Google Inc. ++ * Author: Robert Love ++ * ++ * This file is dual licensed. It may be redistributed and/or modified ++ * under the terms of the Apache 2.0 License OR version 2 of the GNU ++ * General Public License. ++ */ ++ ++#ifndef _LINUX_ASHMEM_H ++#define _LINUX_ASHMEM_H ++ ++#include <linux/limits.h> ++#include <linux/ioctl.h> ++ ++#define ASHMEM_NAME_LEN 256 ++ ++#define ASHMEM_NAME_DEF "dev/ashmem" ++ ++/* Return values from ASHMEM_PIN: Was the mapping purged while unpinned? */ ++#define ASHMEM_NOT_PURGED 0 ++#define ASHMEM_WAS_PURGED 1 ++ ++/* Return values from ASHMEM_GET_PIN_STATUS: Is the mapping pinned? */ ++#define ASHMEM_IS_UNPINNED 0 ++#define ASHMEM_IS_PINNED 1 ++ ++struct ashmem_pin { ++ __u32 offset; /* offset into region, in bytes, page-aligned */ ++ __u32 len; /* length forward from offset, in bytes, page-aligned */ ++}; ++ ++#define __ASHMEMIOC 0x77 ++ ++#define ASHMEM_SET_NAME _IOW(__ASHMEMIOC, 1, char[ASHMEM_NAME_LEN]) ++#define ASHMEM_GET_NAME _IOR(__ASHMEMIOC, 2, char[ASHMEM_NAME_LEN]) ++#define ASHMEM_SET_SIZE _IOW(__ASHMEMIOC, 3, size_t) ++#define ASHMEM_GET_SIZE _IO(__ASHMEMIOC, 4) ++#define ASHMEM_SET_PROT_MASK _IOW(__ASHMEMIOC, 5, unsigned long) ++#define ASHMEM_GET_PROT_MASK _IO(__ASHMEMIOC, 6) ++#define ASHMEM_PIN _IOW(__ASHMEMIOC, 7, struct ashmem_pin) ++#define ASHMEM_UNPIN _IOW(__ASHMEMIOC, 8, struct ashmem_pin) ++#define ASHMEM_GET_PIN_STATUS _IO(__ASHMEMIOC, 9) ++#define ASHMEM_PURGE_ALL_CACHES _IO(__ASHMEMIOC, 10) ++ ++#endif /* _LINUX_ASHMEM_H */ diff --git a/patches.android/android-0028-ashmem-Implement-read-2-in-ashmem-driver.patch b/patches.android/android-0028-ashmem-Implement-read-2-in-ashmem-driver.patch new file mode 100644 index 0000000000000..0fb2f666019e0 --- /dev/null +++ b/patches.android/android-0028-ashmem-Implement-read-2-in-ashmem-driver.patch @@ -0,0 +1,65 @@ +From 853ca7ae292f9c4809e1e42914e81453eaa15367 Mon Sep 17 00:00:00 2001 +From: Bjorn Bringert <bringert@android.com> +Date: Tue, 20 Dec 2011 16:49:49 -0800 +Subject: ashmem: Implement read(2) in ashmem driver +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit +Patch-mainline: HEAD +Git-commit: 853ca7ae292f9c4809e1e42914e81453eaa15367 + +Signed-off-by: Bjorn Bringert <bringert@android.com> +[jstultz: Tweaked commit subject] +CC: Brian Swetland <swetland@google.com> +CC: Colin Cross <ccross@android.com> +CC: Arve Hjønnevåg <arve@android.com> +CC: Dima Zavin <dima@android.com> +CC: Robert Love <rlove@google.com> +Signed-off-by: John Stultz <john.stultz@linaro.org> +Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de> + +diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c +index 5775c6c..6f1a0bb 100644 +--- a/drivers/staging/android/ashmem.c ++++ b/drivers/staging/android/ashmem.c +@@ -211,6 +211,32 @@ static int ashmem_release(struct inode *ignored, struct file *file) + return 0; + } + ++static ssize_t ashmem_read(struct file *file, char __user *buf, ++ size_t len, loff_t *pos) ++{ ++ struct ashmem_area *asma = file->private_data; ++ int ret = 0; ++ ++ mutex_lock(&ashmem_mutex); ++ ++ /* If size is not set, or set to 0, always return EOF. */ ++ if (asma->size == 0) { ++ goto out; ++ } ++ ++ if (!asma->file) { ++ ret = -EBADF; ++ goto out; ++ } ++ ++ ret = asma->file->f_op->read(asma->file, buf, len, pos); ++ ++out: ++ mutex_unlock(&ashmem_mutex); ++ return ret; ++} ++ ++ + static int ashmem_mmap(struct file *file, struct vm_area_struct *vma) + { + struct ashmem_area *asma = file->private_data; +@@ -612,6 +638,7 @@ static struct file_operations ashmem_fops = { + .owner = THIS_MODULE, + .open = ashmem_open, + .release = ashmem_release, ++ .read = ashmem_read, + .mmap = ashmem_mmap, + .unlocked_ioctl = ashmem_ioctl, + .compat_ioctl = ashmem_ioctl, diff --git a/patches.android/android-0029-ashmem-Fix-ASHMEM_SET_PROT_MASK.patch b/patches.android/android-0029-ashmem-Fix-ASHMEM_SET_PROT_MASK.patch new file mode 100644 index 0000000000000..d5b5e96f96fe9 --- /dev/null +++ b/patches.android/android-0029-ashmem-Fix-ASHMEM_SET_PROT_MASK.patch @@ -0,0 +1,51 @@ +From 56f76fc68492af718fff88927bc296635d634b78 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Arve=20Hj=C3=B8nnev=C3=A5g?= <arve@android.com> +Date: Tue, 20 Dec 2011 16:49:50 -0800 +Subject: ashmem: Fix ASHMEM_SET_PROT_MASK. +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit +Patch-mainline: HEAD +Git-commit: 56f76fc68492af718fff88927bc296635d634b78 + +Signed-off-by: Arve Hjønnevåg <arve@android.com> +CC: Brian Swetland <swetland@google.com> +CC: Colin Cross <ccross@android.com> +CC: Arve Hjønnevåg <arve@android.com> +CC: Dima Zavin <dima@android.com> +CC: Robert Love <rlove@google.com> +Signed-off-by: John Stultz <john.stultz@linaro.org> +Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de> + +diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c +index 6f1a0bb..0b923b0 100644 +--- a/drivers/staging/android/ashmem.c ++++ b/drivers/staging/android/ashmem.c +@@ -236,6 +236,13 @@ out: + return ret; + } + ++static inline unsigned long ++calc_vm_may_flags(unsigned long prot) ++{ ++ return _calc_vm_trans(prot, PROT_READ, VM_MAYREAD ) | ++ _calc_vm_trans(prot, PROT_WRITE, VM_MAYWRITE) | ++ _calc_vm_trans(prot, PROT_EXEC, VM_MAYEXEC); ++} + + static int ashmem_mmap(struct file *file, struct vm_area_struct *vma) + { +@@ -251,10 +258,12 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma) + } + + /* requested protection bits must match our allowed protection mask */ +- if (unlikely((vma->vm_flags & ~asma->prot_mask) & PROT_MASK)) { ++ if (unlikely((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask)) & ++ calc_vm_prot_bits(PROT_MASK))) { + ret = -EPERM; + goto out; + } ++ vma->vm_flags &= ~calc_vm_may_flags(~asma->prot_mask); + + if (!asma->file) { + char *name = ASHMEM_NAME_DEF; diff --git a/patches.android/android-0030-ashmem-Update-arguments-of-shrinker-for-2.6.35.patch b/patches.android/android-0030-ashmem-Update-arguments-of-shrinker-for-2.6.35.patch new file mode 100644 index 0000000000000..2bab0eb486f28 --- /dev/null +++ b/patches.android/android-0030-ashmem-Update-arguments-of-shrinker-for-2.6.35.patch @@ -0,0 +1,43 @@ +From 1d3f8f2da1c28709a3c494f3872b89c871906b2d Mon Sep 17 00:00:00 2001 +From: Colin Cross <ccross@google.com> +Date: Tue, 20 Dec 2011 16:49:51 -0800 +Subject: ashmem: Update arguments of shrinker for 2.6.35 +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit +Patch-mainline: HEAD +Git-commit: 1d3f8f2da1c28709a3c494f3872b89c871906b2d + +Signed-off-by: Colin Cross <ccross@google.com> +CC: Brian Swetland <swetland@google.com> +CC: Colin Cross <ccross@android.com> +CC: Arve Hjønnevåg <arve@android.com> +CC: Dima Zavin <dima@android.com> +CC: Robert Love <rlove@google.com> +Signed-off-by: John Stultz <john.stultz@linaro.org> +Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de> + +diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c +index 0b923b0..40c3dc8 100644 +--- a/drivers/staging/android/ashmem.c ++++ b/drivers/staging/android/ashmem.c +@@ -319,7 +319,7 @@ out: + * chunks of ashmem regions LRU-wise one-at-a-time until we hit 'nr_to_scan' + * pages freed. + */ +-static int ashmem_shrink(int nr_to_scan, gfp_t gfp_mask) ++static int ashmem_shrink(struct shrinker *s, int nr_to_scan, gfp_t gfp_mask) + { + struct ashmem_range *range, *next; + +@@ -634,8 +634,8 @@ static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg) + case ASHMEM_PURGE_ALL_CACHES: + ret = -EPERM; + if (capable(CAP_SYS_ADMIN)) { +- ret = ashmem_shrink(0, GFP_KERNEL); +- ashmem_shrink(ret, GFP_KERNEL); ++ ret = ashmem_shrink(&ashmem_shrinker, 0, GFP_KERNEL); ++ ashmem_shrink(&ashmem_shrinker, ret, GFP_KERNEL); + } + break; + } diff --git a/patches.android/android-0031-ashmem-Support-lseek-2-in-ashmem-driver.patch b/patches.android/android-0031-ashmem-Support-lseek-2-in-ashmem-driver.patch new file mode 100644 index 0000000000000..024b4ad954a55 --- /dev/null +++ b/patches.android/android-0031-ashmem-Support-lseek-2-in-ashmem-driver.patch @@ -0,0 +1,84 @@ +From 5154b93b8eceb57bdab4e77030bf21ead15b42e4 Mon Sep 17 00:00:00 2001 +From: Bjorn Bringert <bringert@android.com> +Date: Tue, 20 Dec 2011 16:49:52 -0800 +Subject: ashmem: Support lseek(2) in ashmem driver +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit +Patch-mainline: HEAD +Git-commit: 5154b93b8eceb57bdab4e77030bf21ead15b42e4 + +Signed-off-by: Bjorn Bringert <bringert@android.com> +[jstultz: tweaked commit subject] +CC: Brian Swetland <swetland@google.com> +CC: Colin Cross <ccross@android.com> +CC: Arve Hjønnevåg <arve@android.com> +CC: Dima Zavin <dima@android.com> +CC: Robert Love <rlove@google.com> +Signed-off-by: John Stultz <john.stultz@linaro.org> +Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de> + +diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c +index 40c3dc8..777e2b2 100644 +--- a/drivers/staging/android/ashmem.c ++++ b/drivers/staging/android/ashmem.c +@@ -178,7 +178,7 @@ static int ashmem_open(struct inode *inode, struct file *file) + struct ashmem_area *asma; + int ret; + +- ret = nonseekable_open(inode, file); ++ ret = generic_file_open(inode, file); + if (unlikely(ret)) + return ret; + +@@ -230,6 +230,42 @@ static ssize_t ashmem_read(struct file *file, char __user *buf, + } + + ret = asma->file->f_op->read(asma->file, buf, len, pos); ++ if (ret < 0) { ++ goto out; ++ } ++ ++ /** Update backing file pos, since f_ops->read() doesn't */ ++ asma->file->f_pos = *pos; ++ ++out: ++ mutex_unlock(&ashmem_mutex); ++ return ret; ++} ++ ++static loff_t ashmem_llseek(struct file *file, loff_t offset, int origin) ++{ ++ struct ashmem_area *asma = file->private_data; ++ int ret; ++ ++ mutex_lock(&ashmem_mutex); ++ ++ if (asma->size == 0) { ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ if (!asma->file) { ++ ret = -EBADF; ++ goto out; ++ } ++ ++ ret = asma->file->f_op->llseek(asma->file, offset, origin); ++ if (ret < 0) { ++ goto out; ++ } ++ ++ /** Copy f_pos from backing file, since f_ops->llseek() sets it */ ++ file->f_pos = asma->file->f_pos; + + out: + mutex_unlock(&ashmem_mutex); +@@ -648,6 +684,7 @@ static struct file_operations ashmem_fops = { + .open = ashmem_open, + .release = ashmem_release, + .read = ashmem_read, ++ .llseek = ashmem_llseek, + .mmap = ashmem_mmap, + .unlocked_ioctl = ashmem_ioctl, + .compat_ioctl = ashmem_ioctl, diff --git a/patches.android/android-0032-ashmem-Fix-arguments-to-ashmem_shrink.patch b/patches.android/android-0032-ashmem-Fix-arguments-to-ashmem_shrink.patch new file mode 100644 index 0000000000000..2403d5d450521 --- /dev/null +++ b/patches.android/android-0032-ashmem-Fix-arguments-to-ashmem_shrink.patch @@ -0,0 +1,72 @@ +From 33e8fc463eeec29227282e4bd2082f5928d629a5 Mon Sep 17 00:00:00 2001 +From: Colin Cross <ccross@android.com> +Date: Tue, 20 Dec 2011 16:49:53 -0800 +Subject: ashmem: Fix arguments to ashmem_shrink +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit +Patch-mainline: HEAD +Git-commit: 33e8fc463eeec29227282e4bd2082f5928d629a5 + +The arguments to shrink functions have changed, update +ashmem_shrink to match. + +Signed-off-by: Colin Cross <ccross@android.com> +[jstultz: tweaked commit subject] +CC: Brian Swetland <swetland@google.com> +CC: Colin Cross <ccross@android.com> +CC: Arve Hjønnevåg <arve@android.com> +CC: Dima Zavin <dima@android.com> +CC: Robert Love <rlove@google.com> +Signed-off-by: John Stultz <john.stultz@linaro.org> +Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de> + +diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c +index 777e2b2..a78ba21 100644 +--- a/drivers/staging/android/ashmem.c ++++ b/drivers/staging/android/ashmem.c +@@ -355,14 +355,14 @@ out: + * chunks of ashmem regions LRU-wise one-at-a-time until we hit 'nr_to_scan' + * pages freed. + */ +-static int ashmem_shrink(struct shrinker *s, int nr_to_scan, gfp_t gfp_mask) ++static int ashmem_shrink(struct shrinker *s, struct shrink_control *sc) + { + struct ashmem_range *range, *next; + + /* We might recurse into filesystem code, so bail out if necessary */ +- if (nr_to_scan && !(gfp_mask & __GFP_FS)) ++ if (sc->nr_to_scan && !(sc->gfp_mask & __GFP_FS)) + return -1; +- if (!nr_to_scan) ++ if (!sc->nr_to_scan) + return lru_count; + + mutex_lock(&ashmem_mutex); +@@ -375,8 +375,8 @@ static int ashmem_shrink(struct shrinker *s, int nr_to_scan, gfp_t gfp_mask) + range->purged = ASHMEM_WAS_PURGED; + lru_del(range); + +- nr_to_scan -= range_size(range); +- if (nr_to_scan <= 0) ++ sc->nr_to_scan -= range_size(range); ++ if (sc->nr_to_scan <= 0) + break; + } + mutex_unlock(&ashmem_mutex); +@@ -670,8 +670,13 @@ static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg) + case ASHMEM_PURGE_ALL_CACHES: + ret = -EPERM; + if (capable(CAP_SYS_ADMIN)) { +- ret = ashmem_shrink(&ashmem_shrinker, 0, GFP_KERNEL); +- ashmem_shrink(&ashmem_shrinker, ret, GFP_KERNEL); ++ struct shrink_control sc = { ++ .gfp_mask = GFP_KERNEL, ++ .nr_to_scan = 0, ++ }; ++ ret = ashmem_shrink(&ashmem_shrinker, &sc); ++ sc.nr_to_scan = ret; ++ ashmem_shrink(&ashmem_shrinker, &sc); + } + break; + } diff --git a/patches.android/android-0033-ashmem-Whitespace-cleanups.patch b/patches.android/android-0033-ashmem-Whitespace-cleanups.patch new file mode 100644 index 0000000000000..37b2a421710d1 --- /dev/null +++ b/patches.android/android-0033-ashmem-Whitespace-cleanups.patch @@ -0,0 +1,142 @@ +From 1efb34394a694b458d66f25072318c375e22afe2 Mon Sep 17 00:00:00 2001 +From: John Stultz <john.stultz@linaro.org> +Date: Tue, 20 Dec 2011 16:49:54 -0800 +Subject: ashmem: Whitespace cleanups +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit +Patch-mainline: HEAD +Git-commit: 1efb34394a694b458d66f25072318c375e22afe2 + +Fixes checkpatch warnings with the ashmem.c file + +CC: Brian Swetland <swetland@google.com> +CC: Colin Cross <ccross@android.com> +CC: Arve Hjønnevåg <arve@android.com> +CC: Dima Zavin <dima@android.com> +CC: Robert Love <rlove@google.com> +Signed-off-by: John Stultz <john.stultz@linaro.org> +Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de> + +diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c +index a78ba21..99052bf 100644 +--- a/drivers/staging/android/ashmem.c ++++ b/drivers/staging/android/ashmem.c +@@ -41,11 +41,11 @@ + * Big Note: Mappings do NOT pin this structure; it dies on close() + */ + struct ashmem_area { +- char name[ASHMEM_FULL_NAME_LEN];/* optional name for /proc/pid/maps */ +- struct list_head unpinned_list; /* list of all ashmem areas */ +- struct file *file; /* the shmem-based backing file */ +- size_t size; /* size of the mapping, in bytes */ +- unsigned long prot_mask; /* allowed prot bits, as vm_flags */ ++ char name[ASHMEM_FULL_NAME_LEN]; /* optional name in /proc/pid/maps */ ++ struct list_head unpinned_list; /* list of all ashmem areas */ ++ struct file *file; /* the shmem-based backing file */ ++ size_t size; /* size of the mapping, in bytes */ ++ unsigned long prot_mask; /* allowed prot bits, as vm_flags */ + }; + + /* +@@ -79,26 +79,26 @@ static struct kmem_cache *ashmem_area_cachep __read_mostly; + static struct kmem_cache *ashmem_range_cachep __read_mostly; + + #define range_size(range) \ +- ((range)->pgend - (range)->pgstart + 1) ++ ((range)->pgend - (range)->pgstart + 1) + + #define range_on_lru(range) \ +- ((range)->purged == ASHMEM_NOT_PURGED) ++ ((range)->purged == ASHMEM_NOT_PURGED) + + #define page_range_subsumes_range(range, start, end) \ +- (((range)->pgstart >= (start)) && ((range)->pgend <= (end))) ++ (((range)->pgstart >= (start)) && ((range)->pgend <= (end))) + + #define page_range_subsumed_by_range(range, start, end) \ +- (((range)->pgstart <= (start)) && ((range)->pgend >= (end))) ++ (((range)->pgstart <= (start)) && ((range)->pgend >= (end))) + + #define page_in_range(range, page) \ +- (((range)->pgstart <= (page)) && ((range)->pgend >= (page))) ++ (((range)->pgstart <= (page)) && ((range)->pgend >= (page))) + + #define page_range_in_range(range, start, end) \ +- (page_in_range(range, start) || page_in_range(range, end) || \ +- page_range_subsumes_range(range, start, end)) ++ (page_in_range(range, start) || page_in_range(range, end) || \ ++ page_range_subsumes_range(range, start, end)) + + #define range_before_page(range, page) \ +- ((range)->pgend < (page)) ++ ((range)->pgend < (page)) + + #define PROT_MASK (PROT_EXEC | PROT_READ | PROT_WRITE) + +@@ -220,9 +220,8 @@ static ssize_t ashmem_read(struct file *file, char __user *buf, + mutex_lock(&ashmem_mutex); + + /* If size is not set, or set to 0, always return EOF. */ +- if (asma->size == 0) { ++ if (asma->size == 0) + goto out; +- } + + if (!asma->file) { + ret = -EBADF; +@@ -230,9 +229,8 @@ static ssize_t ashmem_read(struct file *file, char __user *buf, + } + + ret = asma->file->f_op->read(asma->file, buf, len, pos); +- if (ret < 0) { ++ if (ret < 0) + goto out; +- } + + /** Update backing file pos, since f_ops->read() doesn't */ + asma->file->f_pos = *pos; +@@ -260,9 +258,8 @@ static loff_t ashmem_llseek(struct file *file, loff_t offset, int origin) + } + + ret = asma->file->f_op->llseek(asma->file, offset, origin); +- if (ret < 0) { ++ if (ret < 0) + goto out; +- } + + /** Copy f_pos from backing file, since f_ops->llseek() sets it */ + file->f_pos = asma->file->f_pos; +@@ -272,10 +269,9 @@ out: + return ret; + } + +-static inline unsigned long +-calc_vm_may_flags(unsigned long prot) ++static inline unsigned long calc_vm_may_flags(unsigned long prot) + { +- return _calc_vm_trans(prot, PROT_READ, VM_MAYREAD ) | ++ return _calc_vm_trans(prot, PROT_READ, VM_MAYREAD) | + _calc_vm_trans(prot, PROT_WRITE, VM_MAYWRITE) | + _calc_vm_trans(prot, PROT_EXEC, VM_MAYEXEC); + } +@@ -295,7 +291,7 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma) + + /* requested protection bits must match our allowed protection mask */ + if (unlikely((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask)) & +- calc_vm_prot_bits(PROT_MASK))) { ++ calc_vm_prot_bits(PROT_MASK))) { + ret = -EPERM; + goto out; + } +@@ -688,8 +684,8 @@ static struct file_operations ashmem_fops = { + .owner = THIS_MODULE, + .open = ashmem_open, + .release = ashmem_release, +- .read = ashmem_read, +- .llseek = ashmem_llseek, ++ .read = ashmem_read, ++ .llseek = ashmem_llseek, + .mmap = ashmem_mmap, + .unlocked_ioctl = ashmem_ioctl, + .compat_ioctl = ashmem_ioctl, diff --git a/patches.android/android-0034-Staging-android-fixed-a-space-warning-in-binder.h.patch b/patches.android/android-0034-Staging-android-fixed-a-space-warning-in-binder.h.patch new file mode 100644 index 0000000000000..35c9eefb7875d --- /dev/null +++ b/patches.android/android-0034-Staging-android-fixed-a-space-warning-in-binder.h.patch @@ -0,0 +1,25 @@ +From 29b858471b266be7e56b69cfcee7ba94d9427dd3 Mon Sep 17 00:00:00 2001 +From: Marco Navarra <fromenglish@gmail.com> +Date: Thu, 22 Dec 2011 13:27:07 +0100 +Subject: Staging: android: fixed a space warning in binder.h +Patch-mainline: HEAD +Git-commit: 29b858471b266be7e56b69cfcee7ba94d9427dd3 + +This patch fixes a simple tab-space warning in binder.h found by checkpatch tool + +Signed-off-by: Marco Navarra <fromenglish@gmail.com> +Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de> + +diff --git a/drivers/staging/android/binder.h b/drivers/staging/android/binder.h +index 863ae1a..25ab6f2 100644 +--- a/drivers/staging/android/binder.h ++++ b/drivers/staging/android/binder.h +@@ -84,7 +84,7 @@ struct binder_version { + /* This is the current protocol version. */ + #define BINDER_CURRENT_PROTOCOL_VERSION 7 + +-#define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read) ++#define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read) + #define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, int64_t) + #define BINDER_SET_MAX_THREADS _IOW('b', 5, size_t) + #define BINDER_SET_IDLE_PRIORITY _IOW('b', 6, int) diff --git a/patches.android/android-0035-Staging-android-fixed-80-characters-warnings-in-lowmemorykil.patch b/patches.android/android-0035-Staging-android-fixed-80-characters-warnings-in-lowmemorykil.patch new file mode 100644 index 0000000000000..1e595ca4516c7 --- /dev/null +++ b/patches.android/android-0035-Staging-android-fixed-80-characters-warnings-in-lowmemorykil.patch @@ -0,0 +1,43 @@ +From 3bf5d65f4324510231cf33e5d75654f4fb1d1892 Mon Sep 17 00:00:00 2001 +From: Marco Navarra <fromenglish@gmail.com> +Date: Thu, 22 Dec 2011 13:28:23 +0100 +Subject: Staging: android: fixed 80 characters warnings in + lowmemorykiller.c +Patch-mainline: HEAD +Git-commit: 3bf5d65f4324510231cf33e5d75654f4fb1d1892 + +This patch fixes some 80 chatacters limit warnings in the lowmemorykiller.c file + +Signed-off-by: Marco Navarra <fromenglish@gmail.com> +Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de> + +diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c +index 4098bbb..2d8d2b7 100644 +--- a/drivers/staging/android/lowmemorykiller.c ++++ b/drivers/staging/android/lowmemorykiller.c +@@ -7,10 +7,10 @@ + * files take a comma separated list of numbers in ascending order. + * + * For example, write "0,8" to /sys/module/lowmemorykiller/parameters/adj and +- * "1024,4096" to /sys/module/lowmemorykiller/parameters/minfree to kill processes +- * with a oom_adj value of 8 or higher when the free memory drops below 4096 pages +- * and kill processes with a oom_adj value of 0 or higher when the free memory +- * drops below 1024 pages. ++ * "1024,4096" to /sys/module/lowmemorykiller/parameters/minfree to kill ++ * processes with a oom_adj value of 8 or higher when the free memory drops ++ * below 4096 pages and kill processes with a oom_adj value of 0 or higher ++ * when the free memory drops below 1024 pages. + * + * The driver considers memory used for caches to be free, but if a large + * percentage of the cached memory is locked this can be very inaccurate +@@ -119,8 +119,8 @@ static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc) + } + if (sc->nr_to_scan > 0) + lowmem_print(3, "lowmem_shrink %lu, %x, ofree %d %d, ma %d\n", +- sc->nr_to_scan, sc->gfp_mask, other_free, other_file, +- min_adj); ++ sc->nr_to_scan, sc->gfp_mask, other_free, ++ other_file, min_adj); + rem = global_page_state(NR_ACTIVE_ANON) + + global_page_state(NR_ACTIVE_FILE) + + global_page_state(NR_INACTIVE_ANON) + diff --git a/patches.android/android-0036-Staging-android-binder-Don-t-call-dump_stack-in-binder_vma_o.patch b/patches.android/android-0036-Staging-android-binder-Don-t-call-dump_stack-in-binder_vma_o.patch new file mode 100644 index 0000000000000..376f336fad16b --- /dev/null +++ b/patches.android/android-0036-Staging-android-binder-Don-t-call-dump_stack-in-binder_vma_o.patch @@ -0,0 +1,32 @@ +From 3c1b86f17068cf6476fb2d022b9c8b44dedea2e5 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Arve=20Hj=C3=B8nnev=C3=A5g?= <arve@android.com> +Date: Fri, 20 Jan 2012 19:56:21 -0800 +Subject: Staging: android: binder: Don't call dump_stack in + binder_vma_open +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit +Patch-mainline: HEAD +Git-commit: 3c1b86f17068cf6476fb2d022b9c8b44dedea2e5 + +If user-space partially unmaps the driver, binder_vma_open +would dump the kernel stack. This is not a kernel bug however +and will be treated as if the whole area was unmapped once +binder_vma_close gets called. + +Signed-off-by: Arve Hjønnevåg <arve@android.com> +Cc: stable <stable@vger.kernel.org> +Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de> + +diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c +index 7491801..48cf27c 100644 +--- a/drivers/staging/android/binder.c ++++ b/drivers/staging/android/binder.c +@@ -2759,7 +2759,6 @@ static void binder_vma_open(struct vm_area_struct *vma) + proc->pid, vma->vm_start, vma->vm_end, + (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, + (unsigned long)pgprot_val(vma->vm_page_prot)); +- dump_stack(); + } + + static void binder_vma_close(struct vm_area_struct *vma) diff --git a/patches.android/android-0037-Staging-android-Remove-pmem-driver.patch b/patches.android/android-0037-Staging-android-Remove-pmem-driver.patch new file mode 100644 index 0000000000000..6c4e119fbafee --- /dev/null +++ b/patches.android/android-0037-Staging-android-Remove-pmem-driver.patch @@ -0,0 +1,1490 @@ +From b0d017e80e9f4e6b37e699b9a944646e64deb473 Mon Sep 17 00:00:00 2001 +From: Shuah Khan <shuahkhan@gmail.com> +Date: Fri, 27 Jan 2012 11:40:10 -0700 +Subject: Staging: android: Remove pmem driver +Patch-mainline: HEAD +Git-commit: b0d017e80e9f4e6b37e699b9a944646e64deb473 + +Addroid pmem driver is no longer used in any of the Android products. +This patch removes pmem driver from Android staging area + +Reference: https://lkml.org/lkml/2012/1/23/183 + +Signed-off-by: Shuah Khan <shuahkhan@gmail.com> +Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de> + +diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig +index becf711..94cb2ac 100644 +--- a/drivers/staging/android/Kconfig ++++ b/drivers/staging/android/Kconfig +@@ -99,10 +99,6 @@ config ANDROID_LOW_MEMORY_KILLER + ---help--- + Register processes to be killed when memory is low + +-config ANDROID_PMEM +- bool "Android pmem allocator" +- depends on ARM +- + source "drivers/staging/android/switch/Kconfig" + + endif # if ANDROID +diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile +index eaed1ff..5fcc24f 100644 +--- a/drivers/staging/android/Makefile ++++ b/drivers/staging/android/Makefile +@@ -5,5 +5,4 @@ obj-$(CONFIG_ANDROID_RAM_CONSOLE) += ram_console.o + obj-$(CONFIG_ANDROID_TIMED_OUTPUT) += timed_output.o + obj-$(CONFIG_ANDROID_TIMED_GPIO) += timed_gpio.o + obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER) += lowmemorykiller.o +-obj-$(CONFIG_ANDROID_PMEM) += pmem.o + obj-$(CONFIG_ANDROID_SWITCH) += switch/ +diff --git a/drivers/staging/android/android_pmem.h b/drivers/staging/android/android_pmem.h +deleted file mode 100644 +index f633621..0000000 +--- a/drivers/staging/android/android_pmem.h ++++ /dev/null +@@ -1,93 +0,0 @@ +-/* include/linux/android_pmem.h +- * +- * Copyright (C) 2007 Google, Inc. +- * +- * This software is licensed under the terms of the GNU General Public +- * License version 2, as published by the Free Software Foundation, and +- * may be copied, distributed, and modified under those terms. +- * +- * This program is distributed in the hope that it will be useful, +- * but WITHOUT ANY WARRANTY; without even the implied warranty of +- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +- * GNU General Public License for more details. +- * +- */ +- +-#ifndef _ANDROID_PMEM_H_ +-#define _ANDROID_PMEM_H_ +- +-#define PMEM_IOCTL_MAGIC 'p' +-#define PMEM_GET_PHYS _IOW(PMEM_IOCTL_MAGIC, 1, unsigned int) +-#define PMEM_MAP _IOW(PMEM_IOCTL_MAGIC, 2, unsigned int) +-#define PMEM_GET_SIZE _IOW(PMEM_IOCTL_MAGIC, 3, unsigned int) +-#define PMEM_UNMAP _IOW(PMEM_IOCTL_MAGIC, 4, unsigned int) +-/* This ioctl will allocate pmem space, backing the file, it will fail +- * if the file already has an allocation, pass it the len as the argument +- * to the ioctl */ +-#define PMEM_ALLOCATE _IOW(PMEM_IOCTL_MAGIC, 5, unsigned int) +-/* This will connect a one pmem file to another, pass the file that is already +- * backed in memory as the argument to the ioctl +- */ +-#define PMEM_CONNECT _IOW(PMEM_IOCTL_MAGIC, 6, unsigned int) +-/* Returns the total size of the pmem region it is sent to as a pmem_region +- * struct (with offset set to 0). +- */ +-#define PMEM_GET_TOTAL_SIZE _IOW(PMEM_IOCTL_MAGIC, 7, unsigned int) +-#define PMEM_CACHE_FLUSH _IOW(PMEM_IOCTL_MAGIC, 8, unsigned int) +- +-struct android_pmem_platform_data +-{ +- const char* name; +- /* starting physical address of memory region */ +- unsigned long start; +- /* size of memory region */ +- unsigned long size; +- /* set to indicate the region should not be managed with an allocator */ +- unsigned no_allocator; +- /* set to indicate maps of this region should be cached, if a mix of +- * cached and uncached is desired, set this and open the device with +- * O_SYNC to get an uncached region */ +- unsigned cached; +- /* The MSM7k has bits to enable a write buffer in the bus controller*/ +- unsigned buffered; +-}; +- +-struct pmem_region { +- unsigned long offset; +- unsigned long len; +-}; +- +-#ifdef CONFIG_ANDROID_PMEM +-int is_pmem_file(struct file *file); +-int get_pmem_file(int fd, unsigned long *start, unsigned long *vstart, +- unsigned long *end, struct file **filp); +-int get_pmem_user_addr(struct file *file, unsigned long *start, +- unsigned long *end); +-void put_pmem_file(struct file* file); +-void flush_pmem_file(struct file *file, unsigned long start, unsigned long len); +-int pmem_setup(struct android_pmem_platform_data *pdata, +- long (*ioctl)(struct file *, unsigned int, unsigned long), +- int (*release)(struct inode *, struct file *)); +-int pmem_remap(struct pmem_region *region, struct file *file, +- unsigned operation); +- +-#else +-static inline int is_pmem_file(struct file *file) { return 0; } +-static inline int get_pmem_file(int fd, unsigned long *start, +- unsigned long *vstart, unsigned long *end, +- struct file **filp) { return -ENOSYS; } +-static inline int get_pmem_user_addr(struct file *file, unsigned long *start, +- unsigned long *end) { return -ENOSYS; } +-static inline void put_pmem_file(struct file* file) { return; } +-static inline void flush_pmem_file(struct file *file, unsigned long start, +- unsigned long len) { return; } +-static inline int pmem_setup(struct android_pmem_platform_data *pdata, +- long (*ioctl)(struct file *, unsigned int, unsigned long), +- int (*release)(struct inode *, struct file *)) { return -ENOSYS; } +- +-static inline int pmem_remap(struct pmem_region *region, struct file *file, +- unsigned operation) { return -ENOSYS; } +-#endif +- +-#endif //_ANDROID_PPP_H_ +- +diff --git a/drivers/staging/android/pmem.c b/drivers/staging/android/pmem.c +deleted file mode 100644 +index 7d97032..0000000 +--- a/drivers/staging/android/pmem.c ++++ /dev/null +@@ -1,1345 +0,0 @@ +-/* pmem.c +- * +- * Copyright (C) 2007 Google, Inc. +- * +- * This software is licensed under the terms of the GNU General Public +- * License version 2, as published by the Free Software Foundation, and +- * may be copied, distributed, and modified under those terms. +- * +- * This program is distributed in the hope that it will be useful, +- * but WITHOUT ANY WARRANTY; without even the implied warranty of +- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +- * GNU General Public License for more details. +- * +- */ +- +-#include <linux/miscdevice.h> +-#include <linux/platform_device.h> +-#include <linux/fs.h> +-#include <linux/file.h> +-#include <linux/mm.h> +-#include <linux/list.h> +-#include <linux/mutex.h> +-#include <linux/debugfs.h> +-#include <linux/mempolicy.h> +-#include <linux/sched.h> +-#include <asm/io.h> +-#include <asm/uaccess.h> +-#include <asm/cacheflush.h> +-#include "android_pmem.h" +- +-#define PMEM_MAX_DEVICES 10 +-#define PMEM_MAX_ORDER 128 +-#define PMEM_MIN_ALLOC PAGE_SIZE +- +-#define PMEM_DEBUG 1 +- +-/* indicates that a refernce to this file has been taken via get_pmem_file, +- * the file should not be released until put_pmem_file is called */ +-#define PMEM_FLAGS_BUSY 0x1 +-/* indicates that this is a suballocation of a larger master range */ +-#define PMEM_FLAGS_CONNECTED 0x1 << 1 +-/* indicates this is a master and not a sub allocation and that it is mmaped */ +-#define PMEM_FLAGS_MASTERMAP 0x1 << 2 +-/* submap and unsubmap flags indicate: +- * 00: subregion has never been mmaped +- * 10: subregion has been mmaped, reference to the mm was taken +- * 11: subretion has ben released, refernece to the mm still held +- * 01: subretion has been released, reference to the mm has been released +- */ +-#define PMEM_FLAGS_SUBMAP 0x1 << 3 +-#define PMEM_FLAGS_UNSUBMAP 0x1 << 4 +- +- +-struct pmem_data { +- /* in alloc mode: an index into the bitmap +- * in no_alloc mode: the size of the allocation */ +- int index; +- /* see flags above for descriptions */ +- unsigned int flags; +- /* protects this data field, if the mm_mmap sem will be held at the +- * same time as this sem, the mm sem must be taken first (as this is +- * the order for vma_open and vma_close ops */ +- struct rw_semaphore sem; +- /* info about the mmaping process */ +- struct vm_area_struct *vma; +- /* task struct of the mapping process */ +- struct task_struct *task; +- /* process id of teh mapping process */ +- pid_t pid; +- /* file descriptor of the master */ +- int master_fd; +- /* file struct of the master */ +- struct file *master_file; +- /* a list of currently available regions if this is a suballocation */ +- struct list_head region_list; +- /* a linked list of data so we can access them for debugging */ +- struct list_head list; +-#if PMEM_DEBUG +- int ref; +-#endif +-}; +- +-struct pmem_bits { +- unsigned allocated:1; /* 1 if allocated, 0 if free */ +- unsigned order:7; /* size of the region in pmem space */ +-}; +- +-struct pmem_region_node { +- struct pmem_region region; +- struct list_head list; +-}; +- +-#define PMEM_DEBUG_MSGS 0 +-#if PMEM_DEBUG_MSGS +-#define DLOG(fmt,args...) \ +- do { printk(KERN_INFO "[%s:%s:%d] "fmt, __FILE__, __func__, __LINE__, \ +- ##args); } \ +- while (0) +-#else +-#define DLOG(x...) do {} while (0) +-#endif +- +-struct pmem_info { +- struct miscdevice dev; +- /* physical start address of the remaped pmem space */ +- unsigned long base; +- /* vitual start address of the remaped pmem space */ +- unsigned char __iomem *vbase; +- /* total size of the pmem space */ +- unsigned long size; +- /* number of entries in the pmem space */ +- unsigned long num_entries; +- /* pfn of the garbage page in memory */ +- unsigned long garbage_pfn; +- /* index of the garbage page in the pmem space */ +- int garbage_index; +- /* the bitmap for the region indicating which entries are allocated +- * and which are free */ +- struct pmem_bits *bitmap; +- /* indicates the region should not be managed with an allocator */ +- unsigned no_allocator; +- /* indicates maps of this region should be cached, if a mix of +- * cached and uncached is desired, set this and open the device with +- * O_SYNC to get an uncached region */ +- unsigned cached; +- unsigned buffered; +- /* in no_allocator mode the first mapper gets the whole space and sets +- * this flag */ +- unsigned allocated; +- /* for debugging, creates a list of pmem file structs, the +- * data_list_lock should be taken before pmem_data->sem if both are +- * needed */ +- struct mutex data_list_lock; +- struct list_head data_list; +- /* pmem_sem protects the bitmap array +- * a write lock should be held when modifying entries in bitmap +- * a read lock should be held when reading data from bits or +- * dereferencing a pointer into bitmap +- * +- * pmem_data->sem protects the pmem data of a particular file +- * Many of the function that require the pmem_data->sem have a non- +- * locking version for when the caller is already holding that sem. +- * +- * IF YOU TAKE BOTH LOCKS TAKE THEM IN THIS ORDER: +- * down(pmem_data->sem) => down(bitmap_sem) +- */ +- struct rw_semaphore bitmap_sem; +- +- long (*ioctl)(struct file *, unsigned int, unsigned long); +- int (*release)(struct inode *, struct file *); +-}; +- +-static struct pmem_info pmem[PMEM_MAX_DEVICES]; +-static int id_count; +- +-#define PMEM_IS_FREE(id, index) !(pmem[id].bitmap[index].allocated) +-#define PMEM_ORDER(id, index) pmem[id].bitmap[index].order +-#define PMEM_BUDDY_INDEX(id, index) (index ^ (1 << PMEM_ORDER(id, index))) +-#define PMEM_NEXT_INDEX(id, index) (index + (1 << PMEM_ORDER(id, index))) +-#define PMEM_OFFSET(index) (index * PMEM_MIN_ALLOC) +-#define PMEM_START_ADDR(id, index) (PMEM_OFFSET(index) + pmem[id].base) +-#define PMEM_LEN(id, index) ((1 << PMEM_ORDER(id, index)) * PMEM_MIN_ALLOC) +-#define PMEM_END_ADDR(id, index) (PMEM_START_ADDR(id, index) + \ +- PMEM_LEN(id, index)) +-#define PMEM_START_VADDR(id, index) (PMEM_OFFSET(id, index) + pmem[id].vbase) +-#define PMEM_END_VADDR(id, index) (PMEM_START_VADDR(id, index) + \ +- PMEM_LEN(id, index)) +-#define PMEM_REVOKED(data) (data->flags & PMEM_FLAGS_REVOKED) +-#define PMEM_IS_PAGE_ALIGNED(addr) (!((addr) & (~PAGE_MASK))) +-#define PMEM_IS_SUBMAP(data) ((data->flags & PMEM_FLAGS_SUBMAP) && \ +- (!(data->flags & PMEM_FLAGS_UNSUBMAP))) +- +-static int pmem_release(struct inode *, struct file *); +-static int pmem_mmap(struct file *, struct vm_area_struct *); +-static int pmem_open(struct inode *, struct file *); +-static long pmem_ioctl(struct file *, unsigned int, unsigned long); +- +-struct file_operations pmem_fops = { +- .release = pmem_release, +- .mmap = pmem_mmap, +- .open = pmem_open, +- .unlocked_ioctl = pmem_ioctl, +-}; +- +-static int get_id(struct file *file) +-{ +- return MINOR(file->f_dentry->d_inode->i_rdev); +-} +- +-int is_pmem_file(struct file *file) +-{ +- int id; +- +- if (unlikely(!file || !file->f_dentry || !file->f_dentry->d_inode)) +- return 0; +- id = get_id(file); +- if (unlikely(id >= PMEM_MAX_DEVICES)) +- return 0; +- if (unlikely(file->f_dentry->d_inode->i_rdev != +- MKDEV(MISC_MAJOR, pmem[id].dev.minor))) +- return 0; +- return 1; +-} +- +-static int has_allocation(struct file *file) +-{ +- struct pmem_data *data; +- /* check is_pmem_file first if not accessed via pmem_file_ops */ +- +- if (unlikely(!file->private_data)) +- return 0; +- data = (struct pmem_data *)file->private_data; +- if (unlikely(data->index < 0)) +- return 0; +- return 1; +-} +- +-static int is_master_owner(struct file *file) +-{ +- struct file *master_file; +- struct pmem_data *data; +- int put_needed, ret = 0; +- +- if (!is_pmem_file(file) || !has_allocation(file)) +- return 0; +- data = (struct pmem_data *)file->private_data; +- if (PMEM_FLAGS_MASTERMAP & data->flags) +- return 1; +- master_file = fget_light(data->master_fd, &put_needed); +- if (master_file && data->master_file == master_file) +- ret = 1; +- fput_light(master_file, put_needed); +- return ret; +-} +- +-static int pmem_free(int id, int index) +-{ +- /* caller should hold the write lock on pmem_sem! */ +- int buddy, curr = index; +- DLOG("index %d\n", index); +- +- if (pmem[id].no_allocator) { +- pmem[id].allocated = 0; +- return 0; +- } +- /* clean up the bitmap, merging any buddies */ +- pmem[id].bitmap[curr].allocated = 0; +- /* find a slots buddy Buddy# = Slot# ^ (1 << order) +- * if the buddy is also free merge them +- * repeat until the buddy is not free or end of the bitmap is reached +- */ +- do { +- buddy = PMEM_BUDDY_INDEX(id, curr); +- if (PMEM_IS_FREE(id, buddy) && +- PMEM_ORDER(id, buddy) == PMEM_ORDER(id, curr)) { +- PMEM_ORDER(id, buddy)++; +- PMEM_ORDER(id, curr)++; +- curr = min(buddy, curr); +- } else { +- break; +- } +- } while (curr < pmem[id].num_entries); +- +- return 0; +-} +- +-static void pmem_revoke(struct file *file, struct pmem_data *data); +- +-static int pmem_release(struct inode *inode, struct file *file) +-{ +- struct pmem_data *data = (struct pmem_data *)file->private_data; +- struct pmem_region_node *region_node; +- struct list_head *elt, *elt2; +- int id = get_id(file), ret = 0; +- +- +- mutex_lock(&pmem[id].data_list_lock); +- /* if this file is a master, revoke all the memory in the connected +- * files */ +- if (PMEM_FLAGS_MASTERMAP & data->flags) { +- struct pmem_data *sub_data; +- list_for_each(elt, &pmem[id].data_list) { +- sub_data = list_entry(elt, struct pmem_data, list); +- down_read(&sub_data->sem); +- if (PMEM_IS_SUBMAP(sub_data) && +- file == sub_data->master_file) { +- up_read(&sub_data->sem); +- pmem_revoke(file, sub_data); +- } else +- up_read(&sub_data->sem); +- } +- } +- list_del(&data->list); +- mutex_unlock(&pmem[id].data_list_lock); +- +- +- down_write(&data->sem); +- +- /* if its not a conencted file and it has an allocation, free it */ +- if (!(PMEM_FLAGS_CONNECTED & data->flags) && has_allocation(file)) { +- down_write(&pmem[id].bitmap_sem); +- ret = pmem_free(id, data->index); +- up_write(&pmem[id].bitmap_sem); +- } +- +- /* if this file is a submap (mapped, connected file), downref the +- * task struct */ +- if (PMEM_FLAGS_SUBMAP & data->flags) +- if (data->task) { +- put_task_struct(data->task); +- data->task = NULL; +- } +- +- file->private_data = NULL; +- +- list_for_each_safe(elt, elt2, &data->region_list) { +- region_node = list_entry(elt, struct pmem_region_node, list); +- list_del(elt); +- kfree(region_node); +- } +- BUG_ON(!list_empty(&data->region_list)); +- +- up_write(&data->sem); +- kfree(data); +- if (pmem[id].release) +- ret = pmem[id].release(inode, file); +- +- return ret; +-} +- +-static int pmem_open(struct inode *inode, struct file *file) +-{ +- struct pmem_data *data; +- int id = get_id(file); +- int ret = 0; +- +- DLOG("current %u file %p(%d)\n", current->pid, file, file_count(file)); +- /* setup file->private_data to indicate its unmapped */ +- /* you can only open a pmem device one time */ +- if (file->private_data != NULL) +- return -1; +- data = kmalloc(sizeof(struct pmem_data), GFP_KERNEL); +- if (!data) { +- printk("pmem: unable to allocate memory for pmem metadata."); +- return -1; +- } +- data->flags = 0; +- data->index = -1; +- data->task = NULL; +- data->vma = NULL; +- data->pid = 0; +- data->master_file = NULL; +-#if PMEM_DEBUG +- data->ref = 0; +-#endif +- INIT_LIST_HEAD(&data->region_list); +- init_rwsem(&data->sem); +- +- file->private_data = data; +- INIT_LIST_HEAD(&data->list); +- +- mutex_lock(&pmem[id].data_list_lock); +- list_add(&data->list, &pmem[id].data_list); +- mutex_unlock(&pmem[id].data_list_lock); +- return ret; +-} +- +-static unsigned long pmem_order(unsigned long len) +-{ +- int i; +- +- len = (len + PMEM_MIN_ALLOC - 1)/PMEM_MIN_ALLOC; +- len--; +- for (i = 0; i < sizeof(len)*8; i++) +- if (len >> i == 0) +- break; +- return i; +-} +- +-static int pmem_allocate(int id, unsigned long len) +-{ +- /* caller should hold the write lock on pmem_sem! */ +- /* return the corresponding pdata[] entry */ +- int curr = 0; +- int end = pmem[id].num_entries; +- int best_fit = -1; +- unsigned long order = pmem_order(len); +- +- if (pmem[id].no_allocator) { +- DLOG("no allocator"); +- if ((len > pmem[id].size) || pmem[id].allocated) +- return -1; +- pmem[id].allocated = 1; +- return len; +- } +- +- if (order > PMEM_MAX_ORDER) +- return -1; +- DLOG("order %lx\n", order); +- +- /* look through the bitmap: +- * if you find a free slot of the correct order use it +- * otherwise, use the best fit (smallest with size > order) slot +- */ +- while (curr < end) { +- if (PMEM_IS_FREE(id, curr)) { +- if (PMEM_ORDER(id, curr) == (unsigned char)order) { +- /* set the not free bit and clear others */ +- best_fit = curr; +- break; +- } +- if (PMEM_ORDER(id, curr) > (unsigned char)order && +- (best_fit < 0 || +- PMEM_ORDER(id, curr) < PMEM_ORDER(id, best_fit))) +- best_fit = curr; +- } +- curr = PMEM_NEXT_INDEX(id, curr); +- } +- +- /* if best_fit < 0, there are no suitable slots, +- * return an error +- */ +- if (best_fit < 0) { +- printk("pmem: no space left to allocate!\n"); +- return -1; +- } +- +- /* now partition the best fit: +- * split the slot into 2 buddies of order - 1 +- * repeat until the slot is of the correct order +- */ +- while (PMEM_ORDER(id, best_fit) > (unsigned char)order) { +- int buddy; +- PMEM_ORDER(id, best_fit) -= 1; +- buddy = PMEM_BUDDY_INDEX(id, best_fit); +- PMEM_ORDER(id, buddy) = PMEM_ORDER(id, best_fit); +- } +- pmem[id].bitmap[best_fit].allocated = 1; +- return best_fit; +-} +- +-static pgprot_t pmem_access_prot(struct file *file, pgprot_t vma_prot) +-{ +- int id = get_id(file); +-#ifdef pgprot_noncached +- if (pmem[id].cached == 0 || file->f_flags & O_SYNC) +- return pgprot_noncached(vma_prot); +-#endif +-#ifdef pgprot_ext_buffered +- else if (pmem[id].buffered) +- return pgprot_ext_buffered(vma_prot); +-#endif +- return vma_prot; +-} +- +-static unsigned long pmem_start_addr(int id, struct pmem_data *data) +-{ +- if (pmem[id].no_allocator) +- return PMEM_START_ADDR(id, 0); +- else +- return PMEM_START_ADDR(id, data->index); +- +-} +- +-static void *pmem_start_vaddr(int id, struct pmem_data *data) +-{ +- return pmem_start_addr(id, data) - pmem[id].base + pmem[id].vbase; +-} +- +-static unsigned long pmem_len(int id, struct pmem_data *data) +-{ +- if (pmem[id].no_allocator) +- return data->index; +- else +- return PMEM_LEN(id, data->index); +-} +- +-static int pmem_map_garbage(int id, struct vm_area_struct *vma, +- struct pmem_data *data, unsigned long offset, +- unsigned long len) +-{ +- int i, garbage_pages = len >> PAGE_SHIFT; +- +- vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP | VM_SHARED | VM_WRITE; +- for (i = 0; i < garbage_pages; i++) { +- if (vm_insert_pfn(vma, vma->vm_start + offset + (i * PAGE_SIZE), +- pmem[id].garbage_pfn)) +- return -EAGAIN; +- } +- return 0; +-} +- +-static int pmem_unmap_pfn_range(int id, struct vm_area_struct *vma, +- struct pmem_data *data, unsigned long offset, +- unsigned long len) +-{ +- int garbage_pages; +- DLOG("unmap offset %lx len %lx\n", offset, len); +- +- BUG_ON(!PMEM_IS_PAGE_ALIGNED(len)); +- +- garbage_pages = len >> PAGE_SHIFT; +- zap_page_range(vma, vma->vm_start + offset, len, NULL); +- pmem_map_garbage(id, vma, data, offset, len); +- return 0; +-} +- +-static int pmem_map_pfn_range(int id, struct vm_area_struct *vma, +- struct pmem_data *data, unsigned long offset, +- unsigned long len) +-{ +- DLOG("map offset %lx len %lx\n", offset, len); +- BUG_ON(!PMEM_IS_PAGE_ALIGNED(vma->vm_start)); +- BUG_ON(!PMEM_IS_PAGE_ALIGNED(vma->vm_end)); +- BUG_ON(!PMEM_IS_PAGE_ALIGNED(len)); +- BUG_ON(!PMEM_IS_PAGE_ALIGNED(offset)); +- +- if (io_remap_pfn_range(vma, vma->vm_start + offset, +- (pmem_start_addr(id, data) + offset) >> PAGE_SHIFT, +- len, vma->vm_page_prot)) { +- return -EAGAIN; +- } +- return 0; +-} +- +-static int pmem_remap_pfn_range(int id, struct vm_area_struct *vma, +- struct pmem_data *data, unsigned long offset, +- unsigned long len) +-{ +- /* hold the mm semp for the vma you are modifying when you call this */ +- BUG_ON(!vma); +- zap_page_range(vma, vma->vm_start + offset, len, NULL); +- return pmem_map_pfn_range(id, vma, data, offset, len); +-} +- +-static void pmem_vma_open(struct vm_area_struct *vma) +-{ +- struct file *file = vma->vm_file; +- struct pmem_data *data = file->private_data; +- int id = get_id(file); +- /* this should never be called as we don't support copying pmem +- * ranges via fork */ +- BUG_ON(!has_allocation(file)); +- down_write(&data->sem); +- /* remap the garbage pages, forkers don't get access to the data */ +- pmem_unmap_pfn_range(id, vma, data, 0, vma->vm_start - vma->vm_end); +- up_write(&data->sem); +-} +- +-static void pmem_vma_close(struct vm_area_struct *vma) +-{ +- struct file *file = vma->vm_file; +- struct pmem_data *data = file->private_data; +- +- DLOG("current %u ppid %u file %p count %d\n", current->pid, +- current->parent->pid, file, file_count(file)); +- if (unlikely(!is_pmem_file(file) || !has_allocation(file))) { +- printk(KERN_WARNING "pmem: something is very wrong, you are " +- "closing a vm backing an allocation that doesn't " +- "exist!\n"); +- return; +- } +- down_write(&data->sem); +- if (data->vma == vma) { +- data->vma = NULL; +- if ((data->flags & PMEM_FLAGS_CONNECTED) && +- (data->flags & PMEM_FLAGS_SUBMAP)) +- data->flags |= PMEM_FLAGS_UNSUBMAP; +- } +- /* the kernel is going to free this vma now anyway */ +- up_write(&data->sem); +-} +- +-static struct vm_operations_struct vm_ops = { +- .open = pmem_vma_open, +- .close = pmem_vma_close, +-}; +- +-static int pmem_mmap(struct file *file, struct vm_area_struct *vma) +-{ +- struct pmem_data *data; +- int index; +- unsigned long vma_size = vma->vm_end - vma->vm_start; +- int ret = 0, id = get_id(file); +- +- if (vma->vm_pgoff || !PMEM_IS_PAGE_ALIGNED(vma_size)) { +-#if PMEM_DEBUG +- printk(KERN_ERR "pmem: mmaps must be at offset zero, aligned" +- " and a multiple of pages_size.\n"); +-#endif +- return -EINVAL; +- } +- +- data = (struct pmem_data *)file->private_data; +- down_write(&data->sem); +- /* check this file isn't already mmaped, for submaps check this file +- * has never been mmaped */ +- if ((data->flags & PMEM_FLAGS_SUBMAP) || +- (data->flags & PMEM_FLAGS_UNSUBMAP)) { +-#if PMEM_DEBUG +- printk(KERN_ERR "pmem: you can only mmap a pmem file once, " +- "this file is already mmaped. %x\n", data->flags); +-#endif +- ret = -EINVAL; +- goto error; +- } +- /* if file->private_data == unalloced, alloc*/ +- if (data && data->index == -1) { +- down_write(&pmem[id].bitmap_sem); +- index = pmem_allocate(id, vma->vm_end - vma->vm_start); +- up_write(&pmem[id].bitmap_sem); +- data->index = index; +- } +- /* either no space was available or an error occured */ +- if (!has_allocation(file)) { +- ret = -EINVAL; +- printk("pmem: could not find allocation for map.\n"); +- goto error; +- } +- +- if (pmem_len(id, data) < vma_size) { +-#if PMEM_DEBUG +- printk(KERN_WARNING "pmem: mmap size [%lu] does not match" +- "size of backing region [%lu].\n", vma_size, +- pmem_len(id, data)); +-#endif +- ret = -EINVAL; +- goto error; +- } +- +- vma->vm_pgoff = pmem_start_addr(id, data) >> PAGE_SHIFT; +- vma->vm_page_prot = pmem_access_prot(file, vma->vm_page_prot); +- +- if (data->flags & PMEM_FLAGS_CONNECTED) { +- struct pmem_region_node *region_node; +- struct list_head *elt; +- if (pmem_map_garbage(id, vma, data, 0, vma_size)) { +- printk("pmem: mmap failed in kernel!\n"); +- ret = -EAGAIN; +- goto error; +- } +- list_for_each(elt, &data->region_list) { +- region_node = list_entry(elt, struct pmem_region_node, +- list); +- DLOG("remapping file: %p %lx %lx\n", file, +- region_node->region.offset, +- region_node->region.len); +- if (pmem_remap_pfn_range(id, vma, data, +- region_node->region.offset, +- region_node->region.len)) { +- ret = -EAGAIN; +- goto error; +- } +- } +- data->flags |= PMEM_FLAGS_SUBMAP; +- get_task_struct(current->group_leader); +- data->task = current->group_leader; +- data->vma = vma; +-#if PMEM_DEBUG +- data->pid = current->pid; +-#endif +- DLOG("submmapped file %p vma %p pid %u\n", file, vma, +- current->pid); +- } else { +- if (pmem_map_pfn_range(id, vma, data, 0, vma_size)) { +- printk(KERN_INFO "pmem: mmap failed in kernel!\n"); +- ret = -EAGAIN; +- goto error; +- } +- data->flags |= PMEM_FLAGS_MASTERMAP; +- data->pid = current->pid; +- } +- vma->vm_ops = &vm_ops; +-error: +- up_write(&data->sem); +- return ret; +-} +- +-/* the following are the api for accessing pmem regions by other drivers +- * from inside the kernel */ +-int get_pmem_user_addr(struct file *file, unsigned long *start, +- unsigned long *len) +-{ +- struct pmem_data *data; +- if (!is_pmem_file(file) || !has_allocation(file)) { +-#if PMEM_DEBUG +- printk(KERN_INFO "pmem: requested pmem data from invalid" +- "file.\n"); +-#endif +- return -1; +- } +- data = (struct pmem_data *)file->private_data; +- down_read(&data->sem); +- if (data->vma) { +- *start = data->vma->vm_start; +- *len = data->vma->vm_end - data->vma->vm_start; +- } else { +- *start = 0; +- *len = 0; +- } +- up_read(&data->sem); +- return 0; +-} +- +-int get_pmem_addr(struct file *file, unsigned long *start, +- unsigned long *vstart, unsigned long *len) +-{ +- struct pmem_data *data; +- int id; +- +- if (!is_pmem_file(file) || !has_allocation(file)) { +- return -1; +- } +- +- data = (struct pmem_data *)file->private_data; +- if (data->index == -1) { +-#if PMEM_DEBUG +- printk(KERN_INFO "pmem: requested pmem data from file with no " +- "allocation.\n"); +- return -1; +-#endif +- } +- id = get_id(file); +- +- down_read(&data->sem); +- *start = pmem_start_addr(id, data); +- *len = pmem_len(id, data); +- *vstart = (unsigned long)pmem_start_vaddr(id, data); +- up_read(&data->sem); +-#if PMEM_DEBUG +- down_write(&data->sem); +- data->ref++; +- up_write(&data->sem); +-#endif +- return 0; +-} +- +-int get_pmem_file(int fd, unsigned long *start, unsigned long *vstart, +- unsigned long *len, struct file **filp) +-{ +- struct file *file; +- +- file = fget(fd); +- if (unlikely(file == NULL)) { +- printk(KERN_INFO "pmem: requested data from file descriptor " +- "that doesn't exist."); +- return -1; +- } +- +- if (get_pmem_addr(file, start, vstart, len)) +- goto end; +- +- if (filp) +- *filp = file; +- return 0; +-end: +- fput(file); +- return -1; +-} +- +-void put_pmem_file(struct file *file) +-{ +- struct pmem_data *data; +- int id; +- +- if (!is_pmem_file(file)) +- return; +- id = get_id(file); +- data = (struct pmem_data *)file->private_data; +-#if PMEM_DEBUG +- down_write(&data->sem); +- if (data->ref == 0) { +- printk("pmem: pmem_put > pmem_get %s (pid %d)\n", +- pmem[id].dev.name, data->pid); +- BUG(); +- } +- data->ref--; +- up_write(&data->sem); +-#endif +- fput(file); +-} +- +-void flush_pmem_file(struct file *file, unsigned long offset, unsigned long len) +-{ +- struct pmem_data *data; +- int id; +- void *vaddr; +- struct pmem_region_node *region_node; +- struct list_head *elt; +- void *flush_start, *flush_end; +- +- if (!is_pmem_file(file) || !has_allocation(file)) { +- return; +- } +- +- id = get_id(file); +- data = (struct pmem_data *)file->private_data; +- if (!pmem[id].cached || file->f_flags & O_SYNC) +- return; +- +- down_read(&data->sem); +- vaddr = pmem_start_vaddr(id, data); +- /* if this isn't a submmapped file, flush the whole thing */ +- if (unlikely(!(data->flags & PMEM_FLAGS_CONNECTED))) { +- dmac_flush_range(vaddr, vaddr + pmem_len(id, data)); +- goto end; +- } +- /* otherwise, flush the region of the file we are drawing */ +- list_for_each(elt, &data->region_list) { +- region_node = list_entry(elt, struct pmem_region_node, list); +- if ((offset >= region_node->region.offset) && +- ((offset + len) <= (region_node->region.offset + +- region_node->region.len))) { +- flush_start = vaddr + region_node->region.offset; +- flush_end = flush_start + region_node->region.len; +- dmac_flush_range(flush_start, flush_end); +- break; +- } +- } +-end: +- up_read(&data->sem); +-} +- +-static int pmem_connect(unsigned long connect, struct file *file) +-{ +- struct pmem_data *data = (struct pmem_data *)file->private_data; +- struct pmem_data *src_data; +- struct file *src_file; +- int ret = 0, put_needed; +- +- down_write(&data->sem); +- /* retrieve the src file and check it is a pmem file with an alloc */ +- src_file = fget_light(connect, &put_needed); +- DLOG("connect %p to %p\n", file, src_file); +- if (!src_file) { +- printk("pmem: src file not found!\n"); +- ret = -EINVAL; +- goto err_no_file; +- } +- if (unlikely(!is_pmem_file(src_file) || !has_allocation(src_file))) { +- printk(KERN_INFO "pmem: src file is not a pmem file or has no " +- "alloc!\n"); +- ret = -EINVAL; +- goto err_bad_file; +- } +- src_data = (struct pmem_data *)src_file->private_data; +- +- if (has_allocation(file) && (data->index != src_data->index)) { +- printk("pmem: file is already mapped but doesn't match this" +- " src_file!\n"); +- ret = -EINVAL; +- goto err_bad_file; +- } +- data->index = src_data->index; +- data->flags |= PMEM_FLAGS_CONNECTED; +- data->master_fd = connect; +- data->master_file = src_file; +- +-err_bad_file: +- fput_light(src_file, put_needed); +-err_no_file: +- up_write(&data->sem); +- return ret; +-} +- +-static void pmem_unlock_data_and_mm(struct pmem_data *data, +- struct mm_struct *mm) +-{ +- up_write(&data->sem); +- if (mm != NULL) { +- up_write(&mm->mmap_sem); +- mmput(mm); +- } +-} +- +-static int pmem_lock_data_and_mm(struct file *file, struct pmem_data *data, +- struct mm_struct **locked_mm) +-{ +- int ret = 0; +- struct mm_struct *mm = NULL; +- *locked_mm = NULL; +-lock_mm: +- down_read(&data->sem); +- if (PMEM_IS_SUBMAP(data)) { +- mm = get_task_mm(data->task); +- if (!mm) { +-#if PMEM_DEBUG +- printk("pmem: can't remap task is gone!\n"); +-#endif +- up_read(&data->sem); +- return -1; +- } +- } +- up_read(&data->sem); +- +- if (mm) +- down_write(&mm->mmap_sem); +- +- down_write(&data->sem); +- /* check that the file didn't get mmaped before we could take the +- * data sem, this should be safe b/c you can only submap each file +- * once */ +- if (PMEM_IS_SUBMAP(data) && !mm) { +- pmem_unlock_data_and_mm(data, mm); +- up_write(&data->sem); +- goto lock_mm; +- } +- /* now check that vma.mm is still there, it could have been +- * deleted by vma_close before we could get the data->sem */ +- if ((data->flags & PMEM_FLAGS_UNSUBMAP) && (mm != NULL)) { +- /* might as well release this */ +- if (data->flags & PMEM_FLAGS_SUBMAP) { +- put_task_struct(data->task); +- data->task = NULL; +- /* lower the submap flag to show the mm is gone */ +- data->flags &= ~(PMEM_FLAGS_SUBMAP); +- } +- pmem_unlock_data_and_mm(data, mm); +- return -1; +- } +- *locked_mm = mm; +- return ret; +-} +- +-int pmem_remap(struct pmem_region *region, struct file *file, +- unsigned operation) +-{ +- int ret; +- struct pmem_region_node *region_node; +- struct mm_struct *mm = NULL; +- struct list_head *elt, *elt2; +- int id = get_id(file); +- struct pmem_data *data = (struct pmem_data *)file->private_data; +- +- /* pmem region must be aligned on a page boundry */ +- if (unlikely(!PMEM_IS_PAGE_ALIGNED(region->offset) || +- !PMEM_IS_PAGE_ALIGNED(region->len))) { +-#if PMEM_DEBUG +- printk("pmem: request for unaligned pmem suballocation " +- "%lx %lx\n", region->offset, region->len); +-#endif +- return -EINVAL; +- } +- +- /* if userspace requests a region of len 0, there's nothing to do */ +- if (region->len == 0) +- return 0; +- +- /* lock the mm and data */ +- ret = pmem_lock_data_and_mm(file, data, &mm); +- if (ret) +- return 0; +- +- /* only the owner of the master file can remap the client fds +- * that back in it */ +- if (!is_master_owner(file)) { +-#if PMEM_DEBUG +- printk("pmem: remap requested from non-master process\n"); +-#endif +- ret = -EINVAL; +- goto err; +- } +- +- /* check that the requested range is within the src allocation */ +- if (unlikely((region->offset > pmem_len(id, data)) || +- (region->len > pmem_len(id, data)) || +- (region->offset + region->len > pmem_len(id, data)))) { +-#if PMEM_DEBUG +- printk(KERN_INFO "pmem: suballoc doesn't fit in src_file!\n"); +-#endif +- ret = -EINVAL; +- goto err; +- } +- +- if (operation == PMEM_MAP) { +- region_node = kmalloc(sizeof(struct pmem_region_node), +- GFP_KERNEL); +- if (!region_node) { +- ret = -ENOMEM; +-#if PMEM_DEBUG +- printk(KERN_INFO "No space to allocate metadata!"); +-#endif +- goto err; +- } +- region_node->region = *region; +- list_add(®ion_node->list, &data->region_list); +- } else if (operation == PMEM_UNMAP) { +- int found = 0; +- list_for_each_safe(elt, elt2, &data->region_list) { +- region_node = list_entry(elt, struct pmem_region_node, +- list); +- if (region->len == 0 || +- (region_node->region.offset == region->offset && +- region_node->region.len == region->len)) { +- list_del(elt); +- kfree(region_node); +- found = 1; +- } +- } +- if (!found) { +-#if PMEM_DEBUG +- printk("pmem: Unmap region does not map any mapped " +- "region!"); +-#endif +- ret = -EINVAL; +- goto err; +- } +- } +- +- if (data->vma && PMEM_IS_SUBMAP(data)) { +- if (operation == PMEM_MAP) +- ret = pmem_remap_pfn_range(id, data->vma, data, +- region->offset, region->len); +- else if (operation == PMEM_UNMAP) +- ret = pmem_unmap_pfn_range(id, data->vma, data, +- region->offset, region->len); +- } +- +-err: +- pmem_unlock_data_and_mm(data, mm); +- return ret; +-} +- +-static void pmem_revoke(struct file *file, struct pmem_data *data) +-{ +- struct pmem_region_node *region_node; +- struct list_head *elt, *elt2; +- struct mm_struct *mm = NULL; +- int id = get_id(file); +- int ret = 0; +- +- data->master_file = NULL; +- ret = pmem_lock_data_and_mm(file, data, &mm); +- /* if lock_data_and_mm fails either the task that mapped the fd, or +- * the vma that mapped it have already gone away, nothing more +- * needs to be done */ +- if (ret) +- return; +- /* unmap everything */ +- /* delete the regions and region list nothing is mapped any more */ +- if (data->vma) +- list_for_each_safe(elt, elt2, &data->region_list) { +- region_node = list_entry(elt, struct pmem_region_node, +- list); +- pmem_unmap_pfn_range(id, data->vma, data, +- region_node->region.offset, +- region_node->region.len); +- list_del(elt); +- kfree(region_node); +- } +- /* delete the master file */ +- pmem_unlock_data_and_mm(data, mm); +-} +- +-static void pmem_get_size(struct pmem_region *region, struct file *file) +-{ +- struct pmem_data *data = (struct pmem_data *)file->private_data; +- int id = get_id(file); +- +- if (!has_allocation(file)) { +- region->offset = 0; +- region->len = 0; +- return; +- } else { +- region->offset = pmem_start_addr(id, data); +- region->len = pmem_len(id, data); +- } +- DLOG("offset %lx len %lx\n", region->offset, region->len); +-} +- +- +-static long pmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +-{ +- struct pmem_data *data; +- int id = get_id(file); +- +- switch (cmd) { +- case PMEM_GET_PHYS: +- { +- struct pmem_region region; +- DLOG("get_phys\n"); +- if (!has_allocation(file)) { +- region.offset = 0; +- region.len = 0; +- } else { +- data = (struct pmem_data *)file->private_data; +- region.offset = pmem_start_addr(id, data); +- region.len = pmem_len(id, data); +- } +- printk(KERN_INFO "pmem: request for physical address of pmem region " +- "from process %d.\n", current->pid); +- if (copy_to_user((void __user *)arg, ®ion, +- sizeof(struct pmem_region))) +- return -EFAULT; +- break; +- } +- case PMEM_MAP: +- { +- struct pmem_region region; +- if (copy_from_user(®ion, (void __user *)arg, +- sizeof(struct pmem_region))) +- return -EFAULT; +- data = (struct pmem_data *)file->private_data; +- return pmem_remap(®ion, file, PMEM_MAP); +- } +- break; +- case PMEM_UNMAP: +- { +- struct pmem_region region; +- if (copy_from_user(®ion, (void __user *)arg, +- sizeof(struct pmem_region))) +- return -EFAULT; +- data = (struct pmem_data *)file->private_data; +- return pmem_remap(®ion, file, PMEM_UNMAP); +- break; +- } +- case PMEM_GET_SIZE: +- { +- struct pmem_region region; +- DLOG("get_size\n"); +- pmem_get_size(®ion, file); +- if (copy_to_user((void __user *)arg, ®ion, +- sizeof(struct pmem_region))) +- return -EFAULT; +- break; +- } +- case PMEM_GET_TOTAL_SIZE: +- { +- struct pmem_region region; +- DLOG("get total size\n"); +- region.offset = 0; +- get_id(file); +- region.len = pmem[id].size; +- if (copy_to_user((void __user *)arg, ®ion, +- sizeof(struct pmem_region))) +- return -EFAULT; +- break; +- } +- case PMEM_ALLOCATE: +- { +- if (has_allocation(file)) +- return -EINVAL; +- data = (struct pmem_data *)file->private_data; +- data->index = pmem_allocate(id, arg); +- break; +- } +- case PMEM_CONNECT: +- DLOG("connect\n"); +- return pmem_connect(arg, file); +- break; +- case PMEM_CACHE_FLUSH: +- { +- struct pmem_region region; +- DLOG("flush\n"); +- if (copy_from_user(®ion, (void __user *)arg, +- sizeof(struct pmem_region))) +- return -EFAULT; +- flush_pmem_file(file, region.offset, region.len); +- break; +- } +- default: +- if (pmem[id].ioctl) +- return pmem[id].ioctl(file, cmd, arg); +- return -EINVAL; +- } +- return 0; +-} +- +-#if PMEM_DEBUG +-static ssize_t debug_open(struct inode *inode, struct file *file) +-{ +- file->private_data = inode->i_private; +- return 0; +-} +- +-static ssize_t debug_read(struct file *file, char __user *buf, size_t count, +- loff_t *ppos) +-{ +- struct list_head *elt, *elt2; +- struct pmem_data *data; +- struct pmem_region_node *region_node; +- int id = (int)file->private_data; +- const int debug_bufmax = 4096; +- static char buffer[4096]; +- int n = 0; +- +- DLOG("debug open\n"); +- n = scnprintf(buffer, debug_bufmax, +- "pid #: mapped regions (offset, len) (offset,len)...\n"); +- +- mutex_lock(&pmem[id].data_list_lock); +- list_for_each(elt, &pmem[id].data_list) { +- data = list_entry(elt, struct pmem_data, list); +- down_read(&data->sem); +- n += scnprintf(buffer + n, debug_bufmax - n, "pid %u:", +- data->pid); +- list_for_each(elt2, &data->region_list) { +- region_node = list_entry(elt2, struct pmem_region_node, +- list); +- n += scnprintf(buffer + n, debug_bufmax - n, +- "(%lx,%lx) ", +- region_node->region.offset, +- region_node->region.len); +- } +- n += scnprintf(buffer + n, debug_bufmax - n, "\n"); +- up_read(&data->sem); +- } +- mutex_unlock(&pmem[id].data_list_lock); +- +- n++; +- buffer[n] = 0; +- return simple_read_from_buffer(buf, count, ppos, buffer, n); +-} +- +-static struct file_operations debug_fops = { +- .read = debug_read, +- .open = debug_open, +-}; +-#endif +- +-#if 0 +-static struct miscdevice pmem_dev = { +- .name = "pmem", +- .fops = &pmem_fops, +-}; +-#endif +- +-int pmem_setup(struct android_pmem_platform_data *pdata, +- long (*ioctl)(struct file *, unsigned int, unsigned long), +- int (*release)(struct inode *, struct file *)) +-{ +- int err = 0; +- int i, index = 0; +- int id = id_count; +- id_count++; +- +- pmem[id].no_allocator = pdata->no_allocator; +- pmem[id].cached = pdata->cached; +- pmem[id].buffered = pdata->buffered; +- pmem[id].base = pdata->start; +- pmem[id].size = pdata->size; +- pmem[id].ioctl = ioctl; +- pmem[id].release = release; +- init_rwsem(&pmem[id].bitmap_sem); +- mutex_init(&pmem[id].data_list_lock); +- INIT_LIST_HEAD(&pmem[id].data_list); +- pmem[id].dev.name = pdata->name; +- pmem[id].dev.minor = id; +- pmem[id].dev.fops = &pmem_fops; +- printk(KERN_INFO "%s: %d init\n", pdata->name, pdata->cached); +- +- err = misc_register(&pmem[id].dev); +- if (err) { +- printk(KERN_ALERT "Unable to register pmem driver!\n"); +- goto err_cant_register_device; +- } +- pmem[id].num_entries = pmem[id].size / PMEM_MIN_ALLOC; +- +- pmem[id].bitmap = kmalloc(pmem[id].num_entries * +- sizeof(struct pmem_bits), GFP_KERNEL); +- if (!pmem[id].bitmap) +- goto err_no_mem_for_metadata; +- +- memset(pmem[id].bitmap, 0, sizeof(struct pmem_bits) * +- pmem[id].num_entries); +- +- for (i = sizeof(pmem[id].num_entries) * 8 - 1; i >= 0; i--) { +- if ((pmem[id].num_entries) & 1<<i) { +- PMEM_ORDER(id, index) = i; +- index = PMEM_NEXT_INDEX(id, index); +- } +- } +- +- if (pmem[id].cached) +- pmem[id].vbase = ioremap_cached(pmem[id].base, +- pmem[id].size); +-#ifdef ioremap_ext_buffered +- else if (pmem[id].buffered) +- pmem[id].vbase = ioremap_ext_buffered(pmem[id].base, +- pmem[id].size); +-#endif +- else +- pmem[id].vbase = ioremap(pmem[id].base, pmem[id].size); +- +- if (pmem[id].vbase == 0) +- goto error_cant_remap; +- +- pmem[id].garbage_pfn = page_to_pfn(alloc_page(GFP_KERNEL)); +- if (pmem[id].no_allocator) +- pmem[id].allocated = 0; +- +-#if PMEM_DEBUG +- debugfs_create_file(pdata->name, S_IFREG | S_IRUGO, NULL, (void *)id, +- &debug_fops); +-#endif +- return 0; +-error_cant_remap: +- kfree(pmem[id].bitmap); +-err_no_mem_for_metadata: +- misc_deregister(&pmem[id].dev); +-err_cant_register_device: +- return -1; +-} +- +-static int pmem_probe(struct platform_device *pdev) +-{ +- struct android_pmem_platform_data *pdata; +- +- if (!pdev || !pdev->dev.platform_data) { +- printk(KERN_ALERT "Unable to probe pmem!\n"); +- return -1; +- } +- pdata = pdev->dev.platform_data; +- return pmem_setup(pdata, NULL, NULL); +-} +- +- +-static int pmem_remove(struct platform_device *pdev) +-{ +- int id = pdev->id; +- __free_page(pfn_to_page(pmem[id].garbage_pfn)); +- misc_deregister(&pmem[id].dev); +- return 0; +-} +- +-static struct platform_driver pmem_driver = { +- .probe = pmem_probe, +- .remove = pmem_remove, +- .driver = { .name = "android_pmem" } +-}; +- +- +-static int __init pmem_init(void) +-{ +- return platform_driver_register(&pmem_driver); +-} +- +-static void __exit pmem_exit(void) +-{ +- platform_driver_unregister(&pmem_driver); +-} +- +-module_init(pmem_init); +-module_exit(pmem_exit); +- diff --git a/patches.android/android-0038-Staging-android-binder-Fix-crashes-when-sharing-a-binder-fil.patch b/patches.android/android-0038-Staging-android-binder-Fix-crashes-when-sharing-a-binder-fil.patch new file mode 100644 index 0000000000000..c4ce228223e80 --- /dev/null +++ b/patches.android/android-0038-Staging-android-binder-Fix-crashes-when-sharing-a-binder-fil.patch @@ -0,0 +1,87 @@ +From bd1eff9741af27378b241b347041c724bb28e857 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Arve=20Hj=C3=B8nnev=C3=A5g?= <arve@android.com> +Date: Wed, 1 Feb 2012 15:29:13 -0800 +Subject: Staging: android: binder: Fix crashes when sharing a binder + file between processes +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit +Patch-mainline: HEAD +Git-commit: bd1eff9741af27378b241b347041c724bb28e857 + +Opening the binder driver and sharing the file returned with +other processes (e.g. by calling fork) can crash the kernel. +Prevent these crashes with the following changes: +- Add a mutex to protect against two processes mmapping the + same binder_proc. +- After locking mmap_sem, check that the vma we want to access + (still) points to the same mm_struct. +- Use proc->tsk instead of current to get the files struct since + this is where we get the rlimit from. + +Signed-off-by: Arve Hjønnevåg <arve@android.com> +Cc: stable <stable@vger.kernel.org> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> + +diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c +index 48cf27c..f0b7e66 100644 +--- a/drivers/staging/android/binder.c ++++ b/drivers/staging/android/binder.c +@@ -38,6 +38,7 @@ + + static DEFINE_MUTEX(binder_lock); + static DEFINE_MUTEX(binder_deferred_lock); ++static DEFINE_MUTEX(binder_mmap_lock); + + static HLIST_HEAD(binder_procs); + static HLIST_HEAD(binder_deferred_list); +@@ -632,6 +633,11 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate, + if (mm) { + down_write(&mm->mmap_sem); + vma = proc->vma; ++ if (vma && mm != vma->vm_mm) { ++ pr_err("binder: %d: vma mm and task mm mismatch\n", ++ proc->pid); ++ vma = NULL; ++ } + } + + if (allocate == 0) +@@ -2802,6 +2808,7 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma) + } + vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE; + ++ mutex_lock(&binder_mmap_lock); + if (proc->buffer) { + ret = -EBUSY; + failure_string = "already mapped"; +@@ -2816,6 +2823,7 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma) + } + proc->buffer = area->addr; + proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer; ++ mutex_unlock(&binder_mmap_lock); + + #ifdef CONFIG_CPU_CACHE_VIPT + if (cache_is_vipt_aliasing()) { +@@ -2848,7 +2856,7 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma) + binder_insert_free_buffer(proc, buffer); + proc->free_async_space = proc->buffer_size / 2; + barrier(); +- proc->files = get_files_struct(current); ++ proc->files = get_files_struct(proc->tsk); + proc->vma = vma; + + /*printk(KERN_INFO "binder_mmap: %d %lx-%lx maps %p\n", +@@ -2859,10 +2867,12 @@ err_alloc_small_buf_failed: + kfree(proc->pages); + proc->pages = NULL; + err_alloc_pages_failed: ++ mutex_lock(&binder_mmap_lock); + vfree(proc->buffer); + proc->buffer = NULL; + err_get_vm_area_failed: + err_already_mapped: ++ mutex_unlock(&binder_mmap_lock); + err_bad_arg: + printk(KERN_ERR "binder_mmap: %d %lx-%lx %s failed %d\n", + proc->pid, vma->vm_start, vma->vm_end, failure_string, ret); diff --git a/patches.android/android-0039-staging-android-lowmemorykiller-Don-t-wait-more-than-one-sec.patch b/patches.android/android-0039-staging-android-lowmemorykiller-Don-t-wait-more-than-one-sec.patch new file mode 100644 index 0000000000000..bbec644e3d58c --- /dev/null +++ b/patches.android/android-0039-staging-android-lowmemorykiller-Don-t-wait-more-than-one-sec.patch @@ -0,0 +1,49 @@ +From e5d7965f88a3755b2d0c54768a17032ab3a72819 Mon Sep 17 00:00:00 2001 +From: =?UTF-8?q?Arve=20Hj=C3=B8nnev=C3=A5g?= <arve@android.com> +Date: Fri, 13 Jan 2012 22:21:25 +0400 +Subject: staging: android: lowmemorykiller: Don't wait more than one + second for a process to die +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit +Patch-mainline: HEAD +Git-commit: e5d7965f88a3755b2d0c54768a17032ab3a72819 + +If a process forked and the child process was killed by the +lowmemorykiller, the lowmemory killer would be disabled until +the parent process reaped the child or it died itself. + +Signed-off-by: Arve Hjønnevåg <arve@android.com> +Signed-off-by: Anton Vorontsov <anton.vorontsov@linaro.org> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> + +diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c +index 2d8d2b7..efc7dc1 100644 +--- a/drivers/staging/android/lowmemorykiller.c ++++ b/drivers/staging/android/lowmemorykiller.c +@@ -54,6 +54,7 @@ static size_t lowmem_minfree[6] = { + static int lowmem_minfree_size = 4; + + static struct task_struct *lowmem_deathpending; ++static unsigned long lowmem_deathpending_timeout; + + #define lowmem_print(level, x...) \ + do { \ +@@ -103,7 +104,8 @@ static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc) + * Note: Currently you need CONFIG_PROFILING + * for this to work correctly. + */ +- if (lowmem_deathpending) ++ if (lowmem_deathpending && ++ time_before_eq(jiffies, lowmem_deathpending_timeout)) + return 0; + + if (lowmem_adj_size < array_size) +@@ -178,6 +180,7 @@ static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc) + */ + #ifdef CONFIG_PROFILING + lowmem_deathpending = selected; ++ lowmem_deathpending_timeout = jiffies + HZ; + task_handoff_register(&task_nb); + #endif + force_sig(SIGKILL, selected); diff --git a/patches.android/android-0040-staging-android-ram_console-Don-t-build-on-arches-w-o-iorema.patch b/patches.android/android-0040-staging-android-ram_console-Don-t-build-on-arches-w-o-iorema.patch new file mode 100644 index 0000000000000..36ee8811930c7 --- /dev/null +++ b/patches.android/android-0040-staging-android-ram_console-Don-t-build-on-arches-w-o-iorema.patch @@ -0,0 +1,33 @@ +From 203209ef77e4d5f0ee729557b09770bce0c2d251 Mon Sep 17 00:00:00 2001 +From: Anton Vorontsov <anton.vorontsov@linaro.org> +Date: Tue, 7 Feb 2012 09:13:27 +0400 +Subject: staging: android/ram_console: Don't build on arches w/o + ioremap +Patch-mainline: HEAD +Git-commit: 203209ef77e4d5f0ee729557b09770bce0c2d251 + +This patch fixes UML build: + + CC drivers/staging/android/ram_console.o + drivers/staging/android/ram_console.c: In function + 'ram_console_driver_probe': + drivers/staging/android/ram_console.c:358:2: error: implicit declaration + of function 'ioremap' [-Werror=implicit-function-declaration] + cc1: some warnings being treated as errors + make[3]: *** [drivers/staging/android/ram_console.o] Error 1 + +Signed-off-by: Anton Vorontsov <anton.vorontsov@linaro.org> +Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> + +diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig +index 94cb2ac..fef3580 100644 +--- a/drivers/staging/android/Kconfig ++++ b/drivers/staging/android/Kconfig +@@ -27,6 +27,7 @@ config ANDROID_LOGGER + + config ANDROID_RAM_CONSOLE + bool "Android RAM buffer console" ++ depends on !S390 && !UML + default n + + config ANDROID_RAM_CONSOLE_ENABLE_VERBOSE @@ -0,0 +1,52 @@ +# +# LTSI patch queue series +# + + +ltsi-makefile-addition.patch + +# +# Android patches. +# +# 3.2.0 through 3.3-rc3 +patches.android/android-0001-Revert-Staging-android-delete-android-drivers.patch +patches.android/android-0002-staging-android-fix-build-issues.patch +patches.android/android-0003-android-common-include-linux-slab.h.patch +patches.android/android-0004-android-common-Fix-slab.h-includes-for-2.6.34-rc4.patch +patches.android/android-0005-Revert-Staging-android-mark-subsystem-as-broken.patch +patches.android/android-0006-staging-android-ramconsole-Ensure-ramconsole-does-not-get-cl.patch +patches.android/android-0007-Staging-android-ram_console-Start-ram-console-earlier.patch +patches.android/android-0008-Staging-android-timed_gpio-Request-gpios.patch +patches.android/android-0009-android-logger-Add-new-system-log-for-framework-system-log-m.patch +patches.android/android-0010-binder-Use-seq_file-for-debug-interface.patch +patches.android/android-0011-staging-android-binder-Move-debugging-information-from-procf.patch +patches.android/android-0012-Staging-android-timed_gpio-Properly-discard-invalid-timeout-.patch +patches.android/android-0013-Staging-android-binder-Create-dedicated-workqueue-for-binder.patch +patches.android/android-0014-staging-android-lowmemorykiller-Don-t-try-to-kill-the-same-p.patch +patches.android/android-0015-staging-android-lowmemkiller-Substantially-reduce-overhead-d.patch +patches.android/android-0016-staging-binder-Fix-memory-corruption-via-page-aliasing.patch +patches.android/android-0017-staging-android-lowmemorykiller-Remove-bitrotted-codepath.patch +patches.android/android-0018-staging-android-lowmemorykiller-Update-arguments-of-shrinker.patch +patches.android/android-0019-staging-android-lowmemorykiller-Ignore-shmem-pages-in-page-c.patch +patches.android/android-0020-android-lowmemorykiller-Fix-arguments-to-lowmem_shrink.patch +patches.android/android-0021-android-logger-bump-up-the-logger-buffer-sizes.patch +patches.android/android-0022-staging-android-ram_console-pass-in-a-boot-info-string.patch +patches.android/android-0023-Staging-android-fixed-white-spaces-coding-style-issue-in-log.patch +patches.android/android-0024-staging-android-switch-switch-class-and-GPIO-drivers.patch +patches.android/android-0025-staging-android-switch-minor-code-formatting-cleanups.patch +patches.android/android-0026-staging-android-add-pmem-driver.patch +patches.android/android-0027-ashmem-Anonymous-shared-memory-subsystem.patch +patches.android/android-0028-ashmem-Implement-read-2-in-ashmem-driver.patch +patches.android/android-0029-ashmem-Fix-ASHMEM_SET_PROT_MASK.patch +patches.android/android-0030-ashmem-Update-arguments-of-shrinker-for-2.6.35.patch +patches.android/android-0031-ashmem-Support-lseek-2-in-ashmem-driver.patch +patches.android/android-0032-ashmem-Fix-arguments-to-ashmem_shrink.patch +patches.android/android-0033-ashmem-Whitespace-cleanups.patch +patches.android/android-0034-Staging-android-fixed-a-space-warning-in-binder.h.patch +patches.android/android-0035-Staging-android-fixed-80-characters-warnings-in-lowmemorykil.patch +patches.android/android-0036-Staging-android-binder-Don-t-call-dump_stack-in-binder_vma_o.patch +patches.android/android-0037-Staging-android-Remove-pmem-driver.patch +patches.android/android-0038-Staging-android-binder-Fix-crashes-when-sharing-a-binder-fil.patch +patches.android/android-0039-staging-android-lowmemorykiller-Don-t-wait-more-than-one-sec.patch +patches.android/android-0040-staging-android-ram_console-Don-t-build-on-arches-w-o-iorema.patch + |