Merge branch 'android11-5.4' into 'android11-5.4-lts'
Sync up with android11-5.4 for the following commits:b0f930a58e
BACKPORT: f2fs: relocate inline conversion from mmap() to mkwrite()4f4602b711
BACKPORT: f2fs: support RO featuree6fd4c5c6a
BACKPORT: f2fs: fix wrong total_sections check and fsmeta checkce664fbdcc
BACKPORT: FROMGIT: binder: fix freeze raceb1232b020f
FROMGIT: binder: BINDER_GET_FROZEN_INFO ioctla02d0f685d
FROMGIT: binder: use EINTR for interrupted wait for worke658e9e4bc
BACKPORT: FROMGIT: binder: BINDER_FREEZE ioctl63ef444773
ANDROID: usb: gadget: f_accessory: Mitgate handling of non-existent USB request50c9c8cb33
FROMGIT: binder: fix test regression due to sender_euid change1c1f571840
BACKPORT: binder: use cred instead of task for getsecidea1a2391d5
BACKPORT: binder: use cred instead of task for selinux checks1fe8a2bb64
BACKPORT: binder: use euid from cred instead of using taska080050156
ANDROID: setlocalversion: make KMI_GENERATION optionale785a25f52
Revert "ANDROID: GKI: Enable CHACHA20POLY1305 and XCBC" Change-Id: Ica7d80e62dcd4b12f7294bcc4b5c9291491ad37b Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
commit
b2d37d0916
@ -557,9 +557,7 @@ CONFIG_STATIC_USERMODEHELPER_PATH=""
|
||||
CONFIG_SECURITY_SELINUX=y
|
||||
CONFIG_INIT_STACK_ALL_ZERO=y
|
||||
CONFIG_INIT_ON_ALLOC_DEFAULT_ON=y
|
||||
CONFIG_CRYPTO_CHACHA20POLY1305=y
|
||||
CONFIG_CRYPTO_ADIANTUM=y
|
||||
CONFIG_CRYPTO_XCBC=y
|
||||
CONFIG_CRYPTO_BLAKE2B=y
|
||||
CONFIG_CRYPTO_LZ4=y
|
||||
CONFIG_CRYPTO_ZSTD=y
|
||||
|
@ -485,9 +485,7 @@ CONFIG_STATIC_USERMODEHELPER_PATH=""
|
||||
CONFIG_SECURITY_SELINUX=y
|
||||
CONFIG_INIT_STACK_ALL_ZERO=y
|
||||
CONFIG_INIT_ON_ALLOC_DEFAULT_ON=y
|
||||
CONFIG_CRYPTO_CHACHA20POLY1305=y
|
||||
CONFIG_CRYPTO_ADIANTUM=y
|
||||
CONFIG_CRYPTO_XCBC=y
|
||||
CONFIG_CRYPTO_BLAKE2B=y
|
||||
CONFIG_CRYPTO_SHA256_SSSE3=y
|
||||
CONFIG_CRYPTO_AES_NI_INTEL=y
|
||||
|
@ -1628,6 +1628,12 @@ static void binder_free_transaction(struct binder_transaction *t)
|
||||
|
||||
if (target_proc) {
|
||||
binder_inner_proc_lock(target_proc);
|
||||
target_proc->outstanding_txns--;
|
||||
if (target_proc->outstanding_txns < 0)
|
||||
pr_warn("%s: Unexpected outstanding_txns %d\n",
|
||||
__func__, target_proc->outstanding_txns);
|
||||
if (!target_proc->outstanding_txns && target_proc->is_frozen)
|
||||
wake_up_interruptible_all(&target_proc->freeze_wait);
|
||||
if (t->buffer)
|
||||
t->buffer->transaction = NULL;
|
||||
binder_inner_proc_unlock(target_proc);
|
||||
@ -2168,7 +2174,8 @@ static int binder_translate_binder(struct flat_binder_object *fp,
|
||||
ret = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
|
||||
if (security_binder_transfer_binder(binder_get_cred(proc),
|
||||
binder_get_cred(target_proc))) {
|
||||
ret = -EPERM;
|
||||
goto done;
|
||||
}
|
||||
@ -2214,7 +2221,8 @@ static int binder_translate_handle(struct flat_binder_object *fp,
|
||||
proc->pid, thread->pid, fp->handle);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
|
||||
if (security_binder_transfer_binder(binder_get_cred(proc),
|
||||
binder_get_cred(target_proc))) {
|
||||
ret = -EPERM;
|
||||
goto done;
|
||||
}
|
||||
@ -2302,7 +2310,8 @@ static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
|
||||
ret = -EBADF;
|
||||
goto err_fget;
|
||||
}
|
||||
ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
|
||||
ret = security_binder_transfer_file(binder_get_cred(proc),
|
||||
binder_get_cred(target_proc), file);
|
||||
if (ret < 0) {
|
||||
ret = -EPERM;
|
||||
goto err_security;
|
||||
@ -2458,10 +2467,11 @@ static int binder_fixup_parent(struct binder_transaction *t,
|
||||
* If the @thread parameter is not NULL, the transaction is always queued
|
||||
* to the waitlist of that specific thread.
|
||||
*
|
||||
* Return: true if the transactions was successfully queued
|
||||
* false if the target process or thread is dead
|
||||
* Return: 0 if the transaction was successfully queued
|
||||
* BR_DEAD_REPLY if the target process or thread is dead
|
||||
* BR_FROZEN_REPLY if the target process or thread is frozen
|
||||
*/
|
||||
static bool binder_proc_transaction(struct binder_transaction *t,
|
||||
static int binder_proc_transaction(struct binder_transaction *t,
|
||||
struct binder_proc *proc,
|
||||
struct binder_thread *thread)
|
||||
{
|
||||
@ -2485,11 +2495,16 @@ static bool binder_proc_transaction(struct binder_transaction *t,
|
||||
}
|
||||
|
||||
binder_inner_proc_lock(proc);
|
||||
if (proc->is_frozen) {
|
||||
proc->sync_recv |= !oneway;
|
||||
proc->async_recv |= oneway;
|
||||
}
|
||||
|
||||
if (proc->is_dead || (thread && thread->is_dead)) {
|
||||
if ((proc->is_frozen && !oneway) || proc->is_dead ||
|
||||
(thread && thread->is_dead)) {
|
||||
binder_inner_proc_unlock(proc);
|
||||
binder_node_unlock(node);
|
||||
return false;
|
||||
return proc->is_frozen ? BR_FROZEN_REPLY : BR_DEAD_REPLY;
|
||||
}
|
||||
|
||||
if (!thread && !pending_async)
|
||||
@ -2508,10 +2523,11 @@ static bool binder_proc_transaction(struct binder_transaction *t,
|
||||
if (!pending_async)
|
||||
binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
|
||||
|
||||
proc->outstanding_txns++;
|
||||
binder_inner_proc_unlock(proc);
|
||||
binder_node_unlock(node);
|
||||
|
||||
return true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2701,8 +2717,8 @@ static void binder_transaction(struct binder_proc *proc,
|
||||
goto err_dead_binder;
|
||||
}
|
||||
e->to_node = target_node->debug_id;
|
||||
if (security_binder_transaction(proc->tsk,
|
||||
target_proc->tsk) < 0) {
|
||||
if (security_binder_transaction(binder_get_cred(proc),
|
||||
binder_get_cred(target_proc)) < 0) {
|
||||
return_error = BR_FAILED_REPLY;
|
||||
return_error_param = -EPERM;
|
||||
return_error_line = __LINE__;
|
||||
@ -2837,7 +2853,7 @@ static void binder_transaction(struct binder_proc *proc,
|
||||
u32 secid;
|
||||
size_t added_size;
|
||||
|
||||
security_task_getsecid(proc->tsk, &secid);
|
||||
security_cred_getsecid(binder_get_cred(proc), &secid);
|
||||
ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
|
||||
if (ret) {
|
||||
return_error = BR_FAILED_REPLY;
|
||||
@ -3151,12 +3167,14 @@ static void binder_transaction(struct binder_proc *proc,
|
||||
binder_enqueue_thread_work(thread, tcomplete);
|
||||
binder_inner_proc_lock(target_proc);
|
||||
if (target_thread->is_dead) {
|
||||
return_error = BR_DEAD_REPLY;
|
||||
binder_inner_proc_unlock(target_proc);
|
||||
goto err_dead_proc_or_thread;
|
||||
}
|
||||
BUG_ON(t->buffer->async_transaction != 0);
|
||||
binder_pop_transaction_ilocked(target_thread, in_reply_to);
|
||||
binder_enqueue_thread_work_ilocked(target_thread, &t->work);
|
||||
target_proc->outstanding_txns++;
|
||||
binder_inner_proc_unlock(target_proc);
|
||||
wake_up_interruptible_sync(&target_thread->wait);
|
||||
trace_android_vh_binder_restore_priority(in_reply_to, current);
|
||||
@ -3177,7 +3195,9 @@ static void binder_transaction(struct binder_proc *proc,
|
||||
t->from_parent = thread->transaction_stack;
|
||||
thread->transaction_stack = t;
|
||||
binder_inner_proc_unlock(proc);
|
||||
if (!binder_proc_transaction(t, target_proc, target_thread)) {
|
||||
return_error = binder_proc_transaction(t,
|
||||
target_proc, target_thread);
|
||||
if (return_error) {
|
||||
binder_inner_proc_lock(proc);
|
||||
binder_pop_transaction_ilocked(thread, t);
|
||||
binder_inner_proc_unlock(proc);
|
||||
@ -3187,7 +3207,8 @@ static void binder_transaction(struct binder_proc *proc,
|
||||
BUG_ON(target_node == NULL);
|
||||
BUG_ON(t->buffer->async_transaction != 1);
|
||||
binder_enqueue_thread_work(thread, tcomplete);
|
||||
if (!binder_proc_transaction(t, target_proc, NULL))
|
||||
return_error = binder_proc_transaction(t, target_proc, NULL);
|
||||
if (return_error)
|
||||
goto err_dead_proc_or_thread;
|
||||
}
|
||||
if (target_thread)
|
||||
@ -3204,7 +3225,6 @@ static void binder_transaction(struct binder_proc *proc,
|
||||
return;
|
||||
|
||||
err_dead_proc_or_thread:
|
||||
return_error = BR_DEAD_REPLY;
|
||||
return_error_line = __LINE__;
|
||||
binder_dequeue_work(proc, tcomplete);
|
||||
err_translate_failed:
|
||||
@ -3830,7 +3850,7 @@ static int binder_wait_for_work(struct binder_thread *thread,
|
||||
binder_inner_proc_lock(proc);
|
||||
list_del_init(&thread->waiting_thread_node);
|
||||
if (signal_pending(current)) {
|
||||
ret = -ERESTARTSYS;
|
||||
ret = -EINTR;
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -4430,9 +4450,14 @@ static struct binder_thread *binder_get_thread(struct binder_proc *proc)
|
||||
static void binder_free_proc(struct binder_proc *proc)
|
||||
{
|
||||
struct binder_device *device;
|
||||
struct binder_proc_ext *eproc =
|
||||
container_of(proc, struct binder_proc_ext, proc);
|
||||
|
||||
BUG_ON(!list_empty(&proc->todo));
|
||||
BUG_ON(!list_empty(&proc->delivered_death));
|
||||
if (proc->outstanding_txns)
|
||||
pr_warn("%s: Unexpected outstanding_txns %d\n",
|
||||
__func__, proc->outstanding_txns);
|
||||
device = container_of(proc->context, struct binder_device, context);
|
||||
if (refcount_dec_and_test(&device->ref)) {
|
||||
kfree(proc->context->name);
|
||||
@ -4440,8 +4465,9 @@ static void binder_free_proc(struct binder_proc *proc)
|
||||
}
|
||||
binder_alloc_deferred_release(&proc->alloc);
|
||||
put_task_struct(proc->tsk);
|
||||
put_cred(eproc->cred);
|
||||
binder_stats_deleted(BINDER_STAT_PROC);
|
||||
kfree(proc);
|
||||
kfree(eproc);
|
||||
}
|
||||
|
||||
static void binder_free_thread(struct binder_thread *thread)
|
||||
@ -4495,6 +4521,7 @@ static int binder_thread_release(struct binder_proc *proc,
|
||||
(t->to_thread == thread) ? "in" : "out");
|
||||
|
||||
if (t->to_thread == thread) {
|
||||
thread->proc->outstanding_txns--;
|
||||
t->to_proc = NULL;
|
||||
t->to_thread = NULL;
|
||||
if (t->buffer) {
|
||||
@ -4651,7 +4678,7 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp,
|
||||
ret = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
ret = security_binder_set_context_mgr(proc->tsk);
|
||||
ret = security_binder_set_context_mgr(binder_get_cred(proc));
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
if (uid_valid(context->binder_context_mgr_uid)) {
|
||||
@ -4745,6 +4772,100 @@ static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool binder_txns_pending_ilocked(struct binder_proc *proc)
|
||||
{
|
||||
struct rb_node *n;
|
||||
struct binder_thread *thread;
|
||||
|
||||
if (proc->outstanding_txns > 0)
|
||||
return true;
|
||||
|
||||
for (n = rb_first(&proc->threads); n; n = rb_next(n)) {
|
||||
thread = rb_entry(n, struct binder_thread, rb_node);
|
||||
if (thread->transaction_stack)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static int binder_ioctl_freeze(struct binder_freeze_info *info,
|
||||
struct binder_proc *target_proc)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (!info->enable) {
|
||||
binder_inner_proc_lock(target_proc);
|
||||
target_proc->sync_recv = false;
|
||||
target_proc->async_recv = false;
|
||||
target_proc->is_frozen = false;
|
||||
binder_inner_proc_unlock(target_proc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Freezing the target. Prevent new transactions by
|
||||
* setting frozen state. If timeout specified, wait
|
||||
* for transactions to drain.
|
||||
*/
|
||||
binder_inner_proc_lock(target_proc);
|
||||
target_proc->sync_recv = false;
|
||||
target_proc->async_recv = false;
|
||||
target_proc->is_frozen = true;
|
||||
binder_inner_proc_unlock(target_proc);
|
||||
|
||||
if (info->timeout_ms > 0)
|
||||
ret = wait_event_interruptible_timeout(
|
||||
target_proc->freeze_wait,
|
||||
(!target_proc->outstanding_txns),
|
||||
msecs_to_jiffies(info->timeout_ms));
|
||||
|
||||
/* Check pending transactions that wait for reply */
|
||||
if (ret >= 0) {
|
||||
binder_inner_proc_lock(target_proc);
|
||||
if (binder_txns_pending_ilocked(target_proc))
|
||||
ret = -EAGAIN;
|
||||
binder_inner_proc_unlock(target_proc);
|
||||
}
|
||||
|
||||
if (ret < 0) {
|
||||
binder_inner_proc_lock(target_proc);
|
||||
target_proc->is_frozen = false;
|
||||
binder_inner_proc_unlock(target_proc);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int binder_ioctl_get_freezer_info(
|
||||
struct binder_frozen_status_info *info)
|
||||
{
|
||||
struct binder_proc *target_proc;
|
||||
bool found = false;
|
||||
__u32 txns_pending;
|
||||
|
||||
info->sync_recv = 0;
|
||||
info->async_recv = 0;
|
||||
|
||||
mutex_lock(&binder_procs_lock);
|
||||
hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
|
||||
if (target_proc->pid == info->pid) {
|
||||
found = true;
|
||||
binder_inner_proc_lock(target_proc);
|
||||
txns_pending = binder_txns_pending_ilocked(target_proc);
|
||||
info->sync_recv |= target_proc->sync_recv |
|
||||
(txns_pending << 1);
|
||||
info->async_recv |= target_proc->async_recv;
|
||||
binder_inner_proc_unlock(target_proc);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&binder_procs_lock);
|
||||
|
||||
if (!found)
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
int ret;
|
||||
@ -4863,6 +4984,84 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
||||
}
|
||||
break;
|
||||
}
|
||||
case BINDER_FREEZE: {
|
||||
struct binder_freeze_info info;
|
||||
struct binder_proc **target_procs = NULL, *target_proc;
|
||||
int target_procs_count = 0, i = 0;
|
||||
|
||||
ret = 0;
|
||||
|
||||
if (copy_from_user(&info, ubuf, sizeof(info))) {
|
||||
ret = -EFAULT;
|
||||
goto err;
|
||||
}
|
||||
|
||||
mutex_lock(&binder_procs_lock);
|
||||
hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
|
||||
if (target_proc->pid == info.pid)
|
||||
target_procs_count++;
|
||||
}
|
||||
|
||||
if (target_procs_count == 0) {
|
||||
mutex_unlock(&binder_procs_lock);
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
|
||||
target_procs = kcalloc(target_procs_count,
|
||||
sizeof(struct binder_proc *),
|
||||
GFP_KERNEL);
|
||||
|
||||
if (!target_procs) {
|
||||
mutex_unlock(&binder_procs_lock);
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
|
||||
if (target_proc->pid != info.pid)
|
||||
continue;
|
||||
|
||||
binder_inner_proc_lock(target_proc);
|
||||
target_proc->tmp_ref++;
|
||||
binder_inner_proc_unlock(target_proc);
|
||||
|
||||
target_procs[i++] = target_proc;
|
||||
}
|
||||
mutex_unlock(&binder_procs_lock);
|
||||
|
||||
for (i = 0; i < target_procs_count; i++) {
|
||||
if (ret >= 0)
|
||||
ret = binder_ioctl_freeze(&info,
|
||||
target_procs[i]);
|
||||
|
||||
binder_proc_dec_tmpref(target_procs[i]);
|
||||
}
|
||||
|
||||
kfree(target_procs);
|
||||
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
break;
|
||||
}
|
||||
case BINDER_GET_FROZEN_INFO: {
|
||||
struct binder_frozen_status_info info;
|
||||
|
||||
if (copy_from_user(&info, ubuf, sizeof(info))) {
|
||||
ret = -EFAULT;
|
||||
goto err;
|
||||
}
|
||||
|
||||
ret = binder_ioctl_get_freezer_info(&info);
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
|
||||
if (copy_to_user(ubuf, &info, sizeof(info))) {
|
||||
ret = -EFAULT;
|
||||
goto err;
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
@ -4872,7 +5071,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
||||
if (thread)
|
||||
thread->looper_need_return = false;
|
||||
wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
|
||||
if (ret && ret != -ERESTARTSYS)
|
||||
if (ret && ret != -EINTR)
|
||||
pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
|
||||
err_unlocked:
|
||||
trace_binder_ioctl_done(ret);
|
||||
@ -4953,6 +5152,7 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||
static int binder_open(struct inode *nodp, struct file *filp)
|
||||
{
|
||||
struct binder_proc *proc, *itr;
|
||||
struct binder_proc_ext *eproc;
|
||||
struct binder_device *binder_dev;
|
||||
struct binderfs_info *info;
|
||||
struct dentry *binder_binderfs_dir_entry_proc = NULL;
|
||||
@ -4961,14 +5161,17 @@ static int binder_open(struct inode *nodp, struct file *filp)
|
||||
binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
|
||||
current->group_leader->pid, current->pid);
|
||||
|
||||
proc = kzalloc(sizeof(*proc), GFP_KERNEL);
|
||||
eproc = kzalloc(sizeof(*eproc), GFP_KERNEL);
|
||||
proc = &eproc->proc;
|
||||
if (proc == NULL)
|
||||
return -ENOMEM;
|
||||
spin_lock_init(&proc->inner_lock);
|
||||
spin_lock_init(&proc->outer_lock);
|
||||
get_task_struct(current->group_leader);
|
||||
proc->tsk = current->group_leader;
|
||||
eproc->cred = get_cred(filp->f_cred);
|
||||
INIT_LIST_HEAD(&proc->todo);
|
||||
init_waitqueue_head(&proc->freeze_wait);
|
||||
if (binder_supported_policy(current->policy)) {
|
||||
proc->default_priority.sched_policy = current->policy;
|
||||
proc->default_priority.prio = current->normal_prio;
|
||||
@ -5188,6 +5391,9 @@ static void binder_deferred_release(struct binder_proc *proc)
|
||||
proc->tmp_ref++;
|
||||
|
||||
proc->is_dead = true;
|
||||
proc->is_frozen = false;
|
||||
proc->sync_recv = false;
|
||||
proc->async_recv = false;
|
||||
threads = 0;
|
||||
active_transactions = 0;
|
||||
while ((n = rb_first(&proc->threads))) {
|
||||
|
@ -388,9 +388,24 @@ struct binder_priority {
|
||||
* (protected by binder_deferred_lock)
|
||||
* @deferred_work: bitmap of deferred work to perform
|
||||
* (protected by binder_deferred_lock)
|
||||
* @outstanding_txns: number of transactions to be transmitted before
|
||||
* processes in freeze_wait are woken up
|
||||
* (protected by @inner_lock)
|
||||
* @is_dead: process is dead and awaiting free
|
||||
* when outstanding transactions are cleaned up
|
||||
* (protected by @inner_lock)
|
||||
* @is_frozen: process is frozen and unable to service
|
||||
* binder transactions
|
||||
* (protected by @inner_lock)
|
||||
* @sync_recv: process received sync transactions since last frozen
|
||||
* bit 0: received sync transaction after being frozen
|
||||
* bit 1: new pending sync transaction during freezing
|
||||
* (protected by @inner_lock)
|
||||
* @async_recv: process received async transactions since last frozen
|
||||
* (protected by @inner_lock)
|
||||
* @freeze_wait: waitqueue of processes waiting for all outstanding
|
||||
* transactions to be processed
|
||||
* (protected by @inner_lock)
|
||||
* @todo: list of work for this process
|
||||
* (protected by @inner_lock)
|
||||
* @stats: per-process binder statistics
|
||||
@ -431,7 +446,12 @@ struct binder_proc {
|
||||
struct task_struct *tsk;
|
||||
struct hlist_node deferred_work_node;
|
||||
int deferred_work;
|
||||
int outstanding_txns;
|
||||
bool is_dead;
|
||||
bool is_frozen;
|
||||
bool sync_recv;
|
||||
bool async_recv;
|
||||
wait_queue_head_t freeze_wait;
|
||||
|
||||
struct list_head todo;
|
||||
struct binder_stats stats;
|
||||
@ -449,6 +469,29 @@ struct binder_proc {
|
||||
struct dentry *binderfs_entry;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct binder_proc_ext - binder process bookkeeping
|
||||
* @proc: element for binder_procs list
|
||||
* @cred struct cred associated with the `struct file`
|
||||
* in binder_open()
|
||||
* (invariant after initialized)
|
||||
*
|
||||
* Extended binder_proc -- needed to add the "cred" field without
|
||||
* changing the KMI for binder_proc.
|
||||
*/
|
||||
struct binder_proc_ext {
|
||||
struct binder_proc proc;
|
||||
const struct cred *cred;
|
||||
};
|
||||
|
||||
static inline const struct cred *binder_get_cred(struct binder_proc *proc)
|
||||
{
|
||||
struct binder_proc_ext *eproc;
|
||||
|
||||
eproc = container_of(proc, struct binder_proc_ext, proc);
|
||||
return eproc->cred;
|
||||
}
|
||||
|
||||
/**
|
||||
* struct binder_thread - binder thread bookkeeping
|
||||
* @proc: binder process for this thread
|
||||
|
@ -601,8 +601,11 @@ static int create_bulk_endpoints(struct acc_dev *dev,
|
||||
pr_err("acc_bind() could not allocate requests\n");
|
||||
while ((req = req_get(dev, &dev->tx_idle)))
|
||||
acc_request_free(req, dev->ep_in);
|
||||
for (i = 0; i < RX_REQ_MAX; i++)
|
||||
for (i = 0; i < RX_REQ_MAX; i++) {
|
||||
acc_request_free(dev->rx_req[i], dev->ep_out);
|
||||
dev->rx_req[i] = NULL;
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -634,6 +637,12 @@ static ssize_t acc_read(struct file *fp, char __user *buf,
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (!dev->rx_req[0]) {
|
||||
pr_warn("acc_read: USB request already handled/freed");
|
||||
r = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
|
||||
/*
|
||||
* Calculate the data length by considering termination character.
|
||||
* Then compansite the difference of rounding up to
|
||||
@ -1098,8 +1107,10 @@ acc_function_unbind(struct usb_configuration *c, struct usb_function *f)
|
||||
|
||||
while ((req = req_get(dev, &dev->tx_idle)))
|
||||
acc_request_free(req, dev->ep_in);
|
||||
for (i = 0; i < RX_REQ_MAX; i++)
|
||||
for (i = 0; i < RX_REQ_MAX; i++) {
|
||||
acc_request_free(dev->rx_req[i], dev->ep_out);
|
||||
dev->rx_req[i] = NULL;
|
||||
}
|
||||
|
||||
acc_hid_unbind(dev);
|
||||
}
|
||||
|
@ -168,6 +168,7 @@ struct f2fs_mount_info {
|
||||
#define F2FS_FEATURE_SB_CHKSUM 0x0800
|
||||
#define F2FS_FEATURE_CASEFOLD 0x1000
|
||||
#define F2FS_FEATURE_COMPRESSION 0x2000
|
||||
#define F2FS_FEATURE_RO 0x4000
|
||||
|
||||
#define __F2FS_HAS_FEATURE(raw_super, mask) \
|
||||
((raw_super->feature & cpu_to_le32(mask)) != 0)
|
||||
@ -991,6 +992,7 @@ static inline void set_new_dnode(struct dnode_of_data *dn, struct inode *inode,
|
||||
*/
|
||||
#define NR_CURSEG_DATA_TYPE (3)
|
||||
#define NR_CURSEG_NODE_TYPE (3)
|
||||
#define NR_CURSEG_RO_TYPE (2)
|
||||
#define NR_CURSEG_TYPE (NR_CURSEG_DATA_TYPE + NR_CURSEG_NODE_TYPE)
|
||||
|
||||
enum {
|
||||
@ -3968,6 +3970,7 @@ F2FS_FEATURE_FUNCS(verity, VERITY);
|
||||
F2FS_FEATURE_FUNCS(sb_chksum, SB_CHKSUM);
|
||||
F2FS_FEATURE_FUNCS(casefold, CASEFOLD);
|
||||
F2FS_FEATURE_FUNCS(compression, COMPRESSION);
|
||||
F2FS_FEATURE_FUNCS(readonly, RO);
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_ZONED
|
||||
static inline bool f2fs_blkz_is_seq(struct f2fs_sb_info *sbi, int devi,
|
||||
|
@ -68,6 +68,10 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf)
|
||||
goto err;
|
||||
}
|
||||
|
||||
err = f2fs_convert_inline_inode(inode);
|
||||
if (err)
|
||||
goto err;
|
||||
|
||||
#ifdef CONFIG_F2FS_FS_COMPRESSION
|
||||
if (f2fs_compressed_file(inode)) {
|
||||
int ret = f2fs_is_compressed_cluster(inode, page->index);
|
||||
@ -515,7 +519,6 @@ static loff_t f2fs_llseek(struct file *file, loff_t offset, int whence)
|
||||
static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
{
|
||||
struct inode *inode = file_inode(file);
|
||||
int err;
|
||||
|
||||
if (unlikely(f2fs_cp_error(F2FS_I_SB(inode))))
|
||||
return -EIO;
|
||||
@ -523,11 +526,6 @@ static int f2fs_file_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
if (!f2fs_is_compress_backend_ready(inode))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* we don't need to use inline_data strictly */
|
||||
err = f2fs_convert_inline_inode(inode);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
file_accessed(file);
|
||||
vma->vm_ops = &f2fs_file_vm_ops;
|
||||
set_inode_flag(inode, FI_MMAP_FILE);
|
||||
|
@ -4372,6 +4372,10 @@ static int sanity_check_curseg(struct f2fs_sb_info *sbi)
|
||||
struct seg_entry *se = get_seg_entry(sbi, curseg->segno);
|
||||
unsigned int blkofs = curseg->next_blkoff;
|
||||
|
||||
if (f2fs_sb_has_readonly(sbi) &&
|
||||
i != CURSEG_HOT_DATA && i != CURSEG_HOT_NODE)
|
||||
continue;
|
||||
|
||||
if (f2fs_test_bit(blkofs, se->cur_valid_map))
|
||||
goto out;
|
||||
|
||||
|
@ -16,6 +16,7 @@
|
||||
#define DEF_MAX_RECLAIM_PREFREE_SEGMENTS 4096 /* 8GB in maximum */
|
||||
|
||||
#define F2FS_MIN_SEGMENTS 9 /* SB + 2 (CP + SIT + NAT) + SSA + MAIN */
|
||||
#define F2FS_MIN_META_SEGMENTS 8 /* SB + 2 (CP + SIT + NAT) + SSA */
|
||||
|
||||
/* L: Logical segment # in volume, R: Relative segment # in main area */
|
||||
#define GET_L2R_SEGNO(free_i, segno) ((segno) - (free_i)->start_segno)
|
||||
|
@ -469,7 +469,7 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
|
||||
int ret;
|
||||
|
||||
if (!options)
|
||||
return 0;
|
||||
goto default_check;
|
||||
|
||||
while ((p = strsep(&options, ",")) != NULL) {
|
||||
int token;
|
||||
@ -932,6 +932,7 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
default_check:
|
||||
#ifdef CONFIG_QUOTA
|
||||
if (f2fs_check_quota_options(sbi))
|
||||
return -EINVAL;
|
||||
@ -993,6 +994,11 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
|
||||
*/
|
||||
if (F2FS_OPTION(sbi).active_logs != NR_CURSEG_TYPE)
|
||||
F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF;
|
||||
|
||||
if (f2fs_sb_has_readonly(sbi) && !f2fs_readonly(sbi->sb)) {
|
||||
f2fs_err(sbi, "Allow to mount readonly mode only");
|
||||
return -EROFS;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1626,7 +1632,11 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
|
||||
static void default_options(struct f2fs_sb_info *sbi)
|
||||
{
|
||||
/* init some FS parameters */
|
||||
F2FS_OPTION(sbi).active_logs = NR_CURSEG_TYPE;
|
||||
if (f2fs_sb_has_readonly(sbi))
|
||||
F2FS_OPTION(sbi).active_logs = NR_CURSEG_RO_TYPE;
|
||||
else
|
||||
F2FS_OPTION(sbi).active_logs = NR_CURSEG_TYPE;
|
||||
|
||||
F2FS_OPTION(sbi).inline_xattr_size = DEFAULT_INLINE_XATTR_ADDRS;
|
||||
F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF;
|
||||
F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT;
|
||||
@ -1805,6 +1815,11 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
|
||||
if (f2fs_readonly(sb) && (*flags & SB_RDONLY))
|
||||
goto skip;
|
||||
|
||||
if (f2fs_sb_has_readonly(sbi) && !(*flags & SB_RDONLY)) {
|
||||
err = -EROFS;
|
||||
goto restore_opts;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_QUOTA
|
||||
if (!f2fs_readonly(sb) && (*flags & SB_RDONLY)) {
|
||||
err = dquot_suspend(sb, -1);
|
||||
@ -2720,7 +2735,7 @@ static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi,
|
||||
static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
|
||||
struct buffer_head *bh)
|
||||
{
|
||||
block_t segment_count, segs_per_sec, secs_per_zone;
|
||||
block_t segment_count, segs_per_sec, secs_per_zone, segment_count_main;
|
||||
block_t total_sections, blocks_per_seg;
|
||||
struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
|
||||
(bh->b_data + F2FS_SUPER_OFFSET);
|
||||
@ -2790,6 +2805,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
|
||||
}
|
||||
|
||||
segment_count = le32_to_cpu(raw_super->segment_count);
|
||||
segment_count_main = le32_to_cpu(raw_super->segment_count_main);
|
||||
segs_per_sec = le32_to_cpu(raw_super->segs_per_sec);
|
||||
secs_per_zone = le32_to_cpu(raw_super->secs_per_zone);
|
||||
total_sections = le32_to_cpu(raw_super->section_count);
|
||||
@ -2803,8 +2819,7 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
|
||||
return -EFSCORRUPTED;
|
||||
}
|
||||
|
||||
if (total_sections > segment_count ||
|
||||
total_sections < F2FS_MIN_SEGMENTS ||
|
||||
if (total_sections > segment_count_main || total_sections < 1 ||
|
||||
segs_per_sec > segment_count || !segs_per_sec) {
|
||||
f2fs_info(sbi, "Invalid segment/section count (%u, %u x %u)",
|
||||
segment_count, total_sections, segs_per_sec);
|
||||
@ -2911,14 +2926,15 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
|
||||
ovp_segments = le32_to_cpu(ckpt->overprov_segment_count);
|
||||
reserved_segments = le32_to_cpu(ckpt->rsvd_segment_count);
|
||||
|
||||
if (unlikely(fsmeta < F2FS_MIN_SEGMENTS ||
|
||||
if (!f2fs_sb_has_readonly(sbi) &&
|
||||
unlikely(fsmeta < F2FS_MIN_META_SEGMENTS ||
|
||||
ovp_segments == 0 || reserved_segments == 0)) {
|
||||
f2fs_err(sbi, "Wrong layout: check mkfs.f2fs version");
|
||||
return 1;
|
||||
}
|
||||
|
||||
user_block_count = le64_to_cpu(ckpt->user_block_count);
|
||||
segment_count_main = le32_to_cpu(raw_super->segment_count_main);
|
||||
segment_count_main = le32_to_cpu(raw_super->segment_count_main) +
|
||||
(f2fs_sb_has_readonly(sbi) ? 1 : 0);
|
||||
log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
|
||||
if (!user_block_count || user_block_count >=
|
||||
segment_count_main << log_blocks_per_seg) {
|
||||
@ -2949,6 +2965,10 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
|
||||
if (le32_to_cpu(ckpt->cur_node_segno[i]) >= main_segs ||
|
||||
le16_to_cpu(ckpt->cur_node_blkoff[i]) >= blocks_per_seg)
|
||||
return 1;
|
||||
|
||||
if (f2fs_sb_has_readonly(sbi))
|
||||
goto check_data;
|
||||
|
||||
for (j = i + 1; j < NR_CURSEG_NODE_TYPE; j++) {
|
||||
if (le32_to_cpu(ckpt->cur_node_segno[i]) ==
|
||||
le32_to_cpu(ckpt->cur_node_segno[j])) {
|
||||
@ -2959,10 +2979,15 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
|
||||
}
|
||||
}
|
||||
}
|
||||
check_data:
|
||||
for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
|
||||
if (le32_to_cpu(ckpt->cur_data_segno[i]) >= main_segs ||
|
||||
le16_to_cpu(ckpt->cur_data_blkoff[i]) >= blocks_per_seg)
|
||||
return 1;
|
||||
|
||||
if (f2fs_sb_has_readonly(sbi))
|
||||
goto skip_cross;
|
||||
|
||||
for (j = i + 1; j < NR_CURSEG_DATA_TYPE; j++) {
|
||||
if (le32_to_cpu(ckpt->cur_data_segno[i]) ==
|
||||
le32_to_cpu(ckpt->cur_data_segno[j])) {
|
||||
@ -2984,7 +3009,7 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
skip_cross:
|
||||
sit_bitmap_size = le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
|
||||
nat_bitmap_size = le32_to_cpu(ckpt->nat_ver_bitmap_bytesize);
|
||||
|
||||
|
@ -145,6 +145,9 @@ static ssize_t features_show(struct f2fs_attr *a,
|
||||
if (f2fs_sb_has_casefold(sbi))
|
||||
len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
|
||||
len ? ", " : "", "casefold");
|
||||
if (f2fs_sb_has_readonly(sbi))
|
||||
len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
|
||||
len ? ", " : "", "readonly");
|
||||
if (f2fs_sb_has_compression(sbi))
|
||||
len += scnprintf(buf + len, PAGE_SIZE - len, "%s%s",
|
||||
len ? ", " : "", "compression");
|
||||
@ -457,6 +460,7 @@ enum feat_id {
|
||||
FEAT_SB_CHECKSUM,
|
||||
FEAT_CASEFOLD,
|
||||
FEAT_COMPRESSION,
|
||||
FEAT_RO,
|
||||
FEAT_TEST_DUMMY_ENCRYPTION_V2,
|
||||
FEAT_ENCRYPTED_CASEFOLD,
|
||||
};
|
||||
@ -479,6 +483,7 @@ static ssize_t f2fs_feature_show(struct f2fs_attr *a,
|
||||
case FEAT_SB_CHECKSUM:
|
||||
case FEAT_CASEFOLD:
|
||||
case FEAT_COMPRESSION:
|
||||
case FEAT_RO:
|
||||
case FEAT_TEST_DUMMY_ENCRYPTION_V2:
|
||||
case FEAT_ENCRYPTED_CASEFOLD:
|
||||
return sprintf(buf, "supported\n");
|
||||
@ -604,6 +609,7 @@ F2FS_FEATURE_RO_ATTR(sb_checksum, FEAT_SB_CHECKSUM);
|
||||
#ifdef CONFIG_UNICODE
|
||||
F2FS_FEATURE_RO_ATTR(casefold, FEAT_CASEFOLD);
|
||||
#endif
|
||||
F2FS_FEATURE_RO_ATTR(readonly, FEAT_RO);
|
||||
#ifdef CONFIG_F2FS_FS_COMPRESSION
|
||||
F2FS_FEATURE_RO_ATTR(compression, FEAT_COMPRESSION);
|
||||
#endif
|
||||
@ -697,6 +703,7 @@ static struct attribute *f2fs_feat_attrs[] = {
|
||||
#ifdef CONFIG_UNICODE
|
||||
ATTR_LIST(casefold),
|
||||
#endif
|
||||
ATTR_LIST(readonly),
|
||||
#ifdef CONFIG_F2FS_FS_COMPRESSION
|
||||
ATTR_LIST(compression),
|
||||
#endif
|
||||
|
@ -1241,22 +1241,22 @@
|
||||
*
|
||||
* @binder_set_context_mgr:
|
||||
* Check whether @mgr is allowed to be the binder context manager.
|
||||
* @mgr contains the task_struct for the task being registered.
|
||||
* @mgr contains the struct cred for the current binder process.
|
||||
* Return 0 if permission is granted.
|
||||
* @binder_transaction:
|
||||
* Check whether @from is allowed to invoke a binder transaction call
|
||||
* to @to.
|
||||
* @from contains the task_struct for the sending task.
|
||||
* @to contains the task_struct for the receiving task.
|
||||
* @from contains the struct cred for the sending process.
|
||||
* @to contains the struct cred for the receiving process.
|
||||
* @binder_transfer_binder:
|
||||
* Check whether @from is allowed to transfer a binder reference to @to.
|
||||
* @from contains the task_struct for the sending task.
|
||||
* @to contains the task_struct for the receiving task.
|
||||
* @from contains the struct cred for the sending process.
|
||||
* @to contains the struct cred for the receiving process.
|
||||
* @binder_transfer_file:
|
||||
* Check whether @from is allowed to transfer @file to @to.
|
||||
* @from contains the task_struct for the sending task.
|
||||
* @from contains the struct cred for the sending process.
|
||||
* @file contains the struct file being transferred.
|
||||
* @to contains the task_struct for the receiving task.
|
||||
* @to contains the struct cred for the receiving process.
|
||||
*
|
||||
* @ptrace_access_check:
|
||||
* Check permission before allowing the current process to trace the
|
||||
@ -1456,13 +1456,13 @@
|
||||
* @what: kernel feature being accessed
|
||||
*/
|
||||
union security_list_options {
|
||||
int (*binder_set_context_mgr)(struct task_struct *mgr);
|
||||
int (*binder_transaction)(struct task_struct *from,
|
||||
struct task_struct *to);
|
||||
int (*binder_transfer_binder)(struct task_struct *from,
|
||||
struct task_struct *to);
|
||||
int (*binder_transfer_file)(struct task_struct *from,
|
||||
struct task_struct *to,
|
||||
int (*binder_set_context_mgr)(const struct cred *mgr);
|
||||
int (*binder_transaction)(const struct cred *from,
|
||||
const struct cred *to);
|
||||
int (*binder_transfer_binder)(const struct cred *from,
|
||||
const struct cred *to);
|
||||
int (*binder_transfer_file)(const struct cred *from,
|
||||
const struct cred *to,
|
||||
struct file *file);
|
||||
|
||||
int (*ptrace_access_check)(struct task_struct *child,
|
||||
|
@ -249,13 +249,13 @@ extern int security_init(void);
|
||||
extern int early_security_init(void);
|
||||
|
||||
/* Security operations */
|
||||
int security_binder_set_context_mgr(struct task_struct *mgr);
|
||||
int security_binder_transaction(struct task_struct *from,
|
||||
struct task_struct *to);
|
||||
int security_binder_transfer_binder(struct task_struct *from,
|
||||
struct task_struct *to);
|
||||
int security_binder_transfer_file(struct task_struct *from,
|
||||
struct task_struct *to, struct file *file);
|
||||
int security_binder_set_context_mgr(const struct cred *mgr);
|
||||
int security_binder_transaction(const struct cred *from,
|
||||
const struct cred *to);
|
||||
int security_binder_transfer_binder(const struct cred *from,
|
||||
const struct cred *to);
|
||||
int security_binder_transfer_file(const struct cred *from,
|
||||
const struct cred *to, struct file *file);
|
||||
int security_ptrace_access_check(struct task_struct *child, unsigned int mode);
|
||||
int security_ptrace_traceme(struct task_struct *parent);
|
||||
int security_capget(struct task_struct *target,
|
||||
@ -481,25 +481,25 @@ static inline int early_security_init(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int security_binder_set_context_mgr(struct task_struct *mgr)
|
||||
static inline int security_binder_set_context_mgr(const struct cred *mgr)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int security_binder_transaction(struct task_struct *from,
|
||||
struct task_struct *to)
|
||||
static inline int security_binder_transaction(const struct cred *from,
|
||||
const struct cred *to)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int security_binder_transfer_binder(struct task_struct *from,
|
||||
struct task_struct *to)
|
||||
static inline int security_binder_transfer_binder(const struct cred *from,
|
||||
const struct cred *to)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int security_binder_transfer_file(struct task_struct *from,
|
||||
struct task_struct *to,
|
||||
static inline int security_binder_transfer_file(const struct cred *from,
|
||||
const struct cred *to,
|
||||
struct file *file)
|
||||
{
|
||||
return 0;
|
||||
@ -985,6 +985,11 @@ static inline void security_transfer_creds(struct cred *new,
|
||||
{
|
||||
}
|
||||
|
||||
static inline void security_cred_getsecid(const struct cred *c, u32 *secid)
|
||||
{
|
||||
*secid = 0;
|
||||
}
|
||||
|
||||
static inline int security_kernel_act_as(struct cred *cred, u32 secid)
|
||||
{
|
||||
return 0;
|
||||
|
@ -265,6 +265,25 @@ struct binder_node_info_for_ref {
|
||||
__u32 reserved3;
|
||||
};
|
||||
|
||||
struct binder_freeze_info {
|
||||
__u32 pid;
|
||||
__u32 enable;
|
||||
__u32 timeout_ms;
|
||||
};
|
||||
|
||||
struct binder_frozen_status_info {
|
||||
__u32 pid;
|
||||
|
||||
/* process received sync transactions since last frozen
|
||||
* bit 0: received sync transaction after being frozen
|
||||
* bit 1: new pending sync transaction during freezing
|
||||
*/
|
||||
__u32 sync_recv;
|
||||
|
||||
/* process received async transactions since last frozen */
|
||||
__u32 async_recv;
|
||||
};
|
||||
|
||||
#define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read)
|
||||
#define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, __s64)
|
||||
#define BINDER_SET_MAX_THREADS _IOW('b', 5, __u32)
|
||||
@ -275,6 +294,8 @@ struct binder_node_info_for_ref {
|
||||
#define BINDER_GET_NODE_DEBUG_INFO _IOWR('b', 11, struct binder_node_debug_info)
|
||||
#define BINDER_GET_NODE_INFO_FOR_REF _IOWR('b', 12, struct binder_node_info_for_ref)
|
||||
#define BINDER_SET_CONTEXT_MGR_EXT _IOW('b', 13, struct flat_binder_object)
|
||||
#define BINDER_FREEZE _IOW('b', 14, struct binder_freeze_info)
|
||||
#define BINDER_GET_FROZEN_INFO _IOWR('b', 15, struct binder_frozen_status_info)
|
||||
|
||||
/*
|
||||
* NOTE: Two special error codes you should check for when calling
|
||||
@ -456,6 +477,12 @@ enum binder_driver_return_protocol {
|
||||
* The the last transaction (either a bcTRANSACTION or
|
||||
* a bcATTEMPT_ACQUIRE) failed (e.g. out of memory). No parameters.
|
||||
*/
|
||||
|
||||
BR_FROZEN_REPLY = _IO('r', 18),
|
||||
/*
|
||||
* The target of the last transaction (either a bcTRANSACTION or
|
||||
* a bcATTEMPT_ACQUIRE) is frozen. No parameters.
|
||||
*/
|
||||
};
|
||||
|
||||
enum binder_driver_command_protocol {
|
||||
|
@ -41,8 +41,6 @@ if test $# -gt 0; then
|
||||
kmi_generation=$1
|
||||
[ $(expr $kmi_generation : '^[0-9]\+$') -eq 0 ] && usage
|
||||
shift
|
||||
else
|
||||
usage
|
||||
fi
|
||||
fi
|
||||
if test $# -gt 0 -o ! -d "$srctree"; then
|
||||
@ -69,6 +67,8 @@ scm_version()
|
||||
|
||||
if [ -n "$android_release" ] && [ -n "$kmi_generation" ]; then
|
||||
printf '%s' "-$android_release-$kmi_generation"
|
||||
elif [ -n "$android_release" ]; then
|
||||
printf '%s' "-$android_release"
|
||||
fi
|
||||
|
||||
# If we are at a tagged commit (like "v2.6.30-rc6"), we ignore
|
||||
|
@ -670,25 +670,25 @@ static void __init lsm_early_task(struct task_struct *task)
|
||||
|
||||
/* Security operations */
|
||||
|
||||
int security_binder_set_context_mgr(struct task_struct *mgr)
|
||||
int security_binder_set_context_mgr(const struct cred *mgr)
|
||||
{
|
||||
return call_int_hook(binder_set_context_mgr, 0, mgr);
|
||||
}
|
||||
|
||||
int security_binder_transaction(struct task_struct *from,
|
||||
struct task_struct *to)
|
||||
int security_binder_transaction(const struct cred *from,
|
||||
const struct cred *to)
|
||||
{
|
||||
return call_int_hook(binder_transaction, 0, from, to);
|
||||
}
|
||||
|
||||
int security_binder_transfer_binder(struct task_struct *from,
|
||||
struct task_struct *to)
|
||||
int security_binder_transfer_binder(const struct cred *from,
|
||||
const struct cred *to)
|
||||
{
|
||||
return call_int_hook(binder_transfer_binder, 0, from, to);
|
||||
}
|
||||
|
||||
int security_binder_transfer_file(struct task_struct *from,
|
||||
struct task_struct *to, struct file *file)
|
||||
int security_binder_transfer_file(const struct cred *from,
|
||||
const struct cred *to, struct file *file)
|
||||
{
|
||||
return call_int_hook(binder_transfer_file, 0, from, to, file);
|
||||
}
|
||||
|
@ -2055,22 +2055,19 @@ static inline u32 open_file_to_av(struct file *file)
|
||||
|
||||
/* Hook functions begin here. */
|
||||
|
||||
static int selinux_binder_set_context_mgr(struct task_struct *mgr)
|
||||
static int selinux_binder_set_context_mgr(const struct cred *mgr)
|
||||
{
|
||||
u32 mysid = current_sid();
|
||||
u32 mgrsid = task_sid(mgr);
|
||||
|
||||
return avc_has_perm(&selinux_state,
|
||||
mysid, mgrsid, SECCLASS_BINDER,
|
||||
current_sid(), cred_sid(mgr), SECCLASS_BINDER,
|
||||
BINDER__SET_CONTEXT_MGR, NULL);
|
||||
}
|
||||
|
||||
static int selinux_binder_transaction(struct task_struct *from,
|
||||
struct task_struct *to)
|
||||
static int selinux_binder_transaction(const struct cred *from,
|
||||
const struct cred *to)
|
||||
{
|
||||
u32 mysid = current_sid();
|
||||
u32 fromsid = task_sid(from);
|
||||
u32 tosid = task_sid(to);
|
||||
u32 fromsid = cred_sid(from);
|
||||
u32 tosid = cred_sid(to);
|
||||
int rc;
|
||||
|
||||
if (mysid != fromsid) {
|
||||
@ -2081,27 +2078,24 @@ static int selinux_binder_transaction(struct task_struct *from,
|
||||
return rc;
|
||||
}
|
||||
|
||||
return avc_has_perm(&selinux_state,
|
||||
fromsid, tosid, SECCLASS_BINDER, BINDER__CALL,
|
||||
NULL);
|
||||
return avc_has_perm(&selinux_state, fromsid, tosid,
|
||||
SECCLASS_BINDER, BINDER__CALL, NULL);
|
||||
}
|
||||
|
||||
static int selinux_binder_transfer_binder(struct task_struct *from,
|
||||
struct task_struct *to)
|
||||
static int selinux_binder_transfer_binder(const struct cred *from,
|
||||
const struct cred *to)
|
||||
{
|
||||
u32 fromsid = task_sid(from);
|
||||
u32 tosid = task_sid(to);
|
||||
|
||||
return avc_has_perm(&selinux_state,
|
||||
fromsid, tosid, SECCLASS_BINDER, BINDER__TRANSFER,
|
||||
cred_sid(from), cred_sid(to),
|
||||
SECCLASS_BINDER, BINDER__TRANSFER,
|
||||
NULL);
|
||||
}
|
||||
|
||||
static int selinux_binder_transfer_file(struct task_struct *from,
|
||||
struct task_struct *to,
|
||||
static int selinux_binder_transfer_file(const struct cred *from,
|
||||
const struct cred *to,
|
||||
struct file *file)
|
||||
{
|
||||
u32 sid = task_sid(to);
|
||||
u32 sid = cred_sid(to);
|
||||
struct file_security_struct *fsec = selinux_file(file);
|
||||
struct dentry *dentry = file->f_path.dentry;
|
||||
struct inode_security_struct *isec;
|
||||
|
Loading…
Reference in New Issue
Block a user