diff options
author | Ben Fennema <fennema@google.com> | 2016-11-23 00:07:54 +0000 |
---|---|---|
committer | Android Partner Code Review <android-gerrit-partner@google.com> | 2016-11-23 00:07:55 +0000 |
commit | 11eaf99b7d9a15433b79931a9c34821656c062a2 (patch) | |
tree | 64deaf23a3ca94a5af300ab4aee6e23145677216 | |
parent | 36b716a02663ec35d140ff28d4ec889cf65e3bf4 (diff) | |
parent | 3b4330bfb0222fcca7bca4ff58c0cedd9036027a (diff) | |
download | mediatek-11eaf99b7d9a15433b79931a9c34821656c062a2.tar.gz |
Merge changes Ia2946d6b,I10a0c2aa,I0667f1f3,I548a200f,Iebc150f6, ... into android-mediatek-pike-3.10
* changes:
netfilter: Change %p to %pK in debug messages
ion: blacklist %p kptr_restrict
ASoC: check for null function pointer for dummy device read/write, take 2
drivers: video: Add bounds checking in fb_cmap_to_user
binder: blacklist %p kptr_restrict
net: ping: Fix stack buffer overflow in ping_common_sendmsg()
UPSTREAM: KEYS: Fix crash when attempt to garbage collect an uninstantiated keyring
UPSTREAM: KEYS: Fix race between key destruction and finding a keyring by name
perf: protect group_leader from races that cause ctx double-free
BACKPORT: perf: Fix event->ctx locking
UPSTREAM: staging/android/ion : fix a race condition in the ion driver
-rw-r--r-- | drivers/android/binder.c | 36 | ||||
-rwxr-xr-x[-rw-r--r--] | drivers/staging/android/ion/ion.c | 81 | ||||
-rw-r--r-- | drivers/video/fbcmap.c | 3 | ||||
-rw-r--r-- | include/linux/perf_event.h | 6 | ||||
-rw-r--r-- | kernel/events/core.c | 278 | ||||
-rw-r--r-- | net/ipv4/ping.c | 2 | ||||
-rw-r--r-- | net/netfilter/nf_conntrack_core.c | 20 | ||||
-rw-r--r-- | security/keys/gc.c | 10 | ||||
-rw-r--r-- | sound/soc/soc-core.c | 4 |
9 files changed, 341 insertions, 99 deletions
diff --git a/drivers/android/binder.c b/drivers/android/binder.c index c3e5e71793ac..a8ddaa0152ce 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -478,7 +478,7 @@ static void binder_insert_free_buffer(struct binder_proc *proc, new_buffer_size = binder_buffer_size(proc, new_buffer); binder_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: add free buffer, size %zd, at %p\n", + "%d: add free buffer, size %zd, at %pK\n", proc->pid, new_buffer_size, new_buffer); while (*p) { @@ -557,7 +557,7 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate, struct mm_struct *mm; binder_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: %s pages %p-%p\n", proc->pid, + "%d: %s pages %pK-%pK\n", proc->pid, allocate ? "allocate" : "free", start, end); if (end <= start) @@ -598,7 +598,7 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate, BUG_ON(*page); *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); if (*page == NULL) { - pr_err("%d: binder_alloc_buf failed for page at %p\n", + pr_err("%d: binder_alloc_buf failed for page at %pK\n", proc->pid, page_addr); goto err_alloc_page_failed; } @@ -607,7 +607,7 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate, page_array_ptr = page; ret = map_vm_area(&tmp_area, PAGE_KERNEL, &page_array_ptr); if (ret) { - pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n", + pr_err("%d: binder_alloc_buf failed to map page at %pK in kernel\n", proc->pid, page_addr); goto err_map_kernel_failed; } @@ -711,7 +711,7 @@ static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc, } binder_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: binder_alloc_buf size %zd got buffer %p size %zd\n", + "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n", proc->pid, size, buffer, buffer_size); has_page_addr = @@ -741,7 +741,7 @@ static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc, binder_insert_free_buffer(proc, new_buffer); } binder_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: binder_alloc_buf size %zd got %p\n", + "%d: binder_alloc_buf size %zd got %pK\n", proc->pid, size, buffer); buffer->data_size = data_size; buffer->offsets_size = offsets_size; @@ -781,7 +781,7 @@ static void binder_delete_free_buffer(struct binder_proc *proc, if (buffer_end_page(prev) == buffer_end_page(buffer)) free_page_end = 0; binder_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: merge free, buffer %p share page with %p\n", + "%d: merge free, buffer %pK share page with %pK\n", proc->pid, buffer, prev); } @@ -794,14 +794,14 @@ static void binder_delete_free_buffer(struct binder_proc *proc, buffer_start_page(buffer)) free_page_start = 0; binder_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: merge free, buffer %p share page with %p\n", + "%d: merge free, buffer %pK share page with %pK\n", proc->pid, buffer, prev); } } list_del(&buffer->entry); if (free_page_start || free_page_end) { binder_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: merge free, buffer %p do not share page%s%s with %p or %p\n", + "%d: merge free, buffer %pK do not share page%s%s with %pK or %pK\n", proc->pid, buffer, free_page_start ? "" : " end", free_page_end ? "" : " start", prev, next); binder_update_page_range(proc, 0, free_page_start ? @@ -822,7 +822,7 @@ static void binder_free_buf(struct binder_proc *proc, ALIGN(buffer->offsets_size, sizeof(void *)); binder_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: binder_free_buf %p size %zd buffer_size %zd\n", + "%d: binder_free_buf %pK size %zd buffer_size %zd\n", proc->pid, buffer, size, buffer_size); BUG_ON(buffer->free); @@ -1252,7 +1252,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc, int debug_id = buffer->debug_id; binder_debug(BINDER_DEBUG_TRANSACTION, - "%d buffer release %d, size %zd-%zd, failed at %p\n", + "%d buffer release %d, size %zd-%zd, failed at %pK\n", proc->pid, buffer->debug_id, buffer->data_size, buffer->offsets_size, failed_at); @@ -2102,7 +2102,7 @@ static int binder_thread_write(struct binder_proc *proc, } } binder_debug(BINDER_DEBUG_DEAD_BINDER, - "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n", + "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n", proc->pid, thread->pid, (u64)cookie, death); if (death == NULL) { @@ -2905,7 +2905,7 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma) #ifdef CONFIG_CPU_CACHE_VIPT if (cache_is_vipt_aliasing()) { while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) { - pr_info("binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer); + pr_info("binder_mmap: %d %lx-%lx maps %pK bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer); vma->vm_start += PAGE_SIZE; } } @@ -2937,7 +2937,7 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma) proc->vma = vma; proc->vma_vm_mm = vma->vm_mm; - /*pr_info("binder_mmap: %d %lx-%lx maps %p\n", + /*pr_info("binder_mmap: %d %lx-%lx maps %pK\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/ return 0; @@ -3163,7 +3163,7 @@ static void binder_deferred_release(struct binder_proc *proc) page_addr = proc->buffer + i * PAGE_SIZE; binder_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%s: %d: page %d at %p not freed\n", + "%s: %d: page %d at %pK not freed\n", __func__, proc->pid, i, page_addr); unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE); __free_page(proc->pages[i]); @@ -3242,7 +3242,7 @@ static void print_binder_transaction(struct seq_file *m, const char *prefix, struct binder_transaction *t) { seq_printf(m, - "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d", + "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d", prefix, t->debug_id, t, t->from ? t->from->proc->pid : 0, t->from ? t->from->pid : 0, @@ -3256,7 +3256,7 @@ static void print_binder_transaction(struct seq_file *m, const char *prefix, if (t->buffer->target_node) seq_printf(m, " node %d", t->buffer->target_node->debug_id); - seq_printf(m, " size %zd:%zd data %p\n", + seq_printf(m, " size %zd:%zd data %pK\n", t->buffer->data_size, t->buffer->offsets_size, t->buffer->data); } @@ -3264,7 +3264,7 @@ static void print_binder_transaction(struct seq_file *m, const char *prefix, static void print_binder_buffer(struct seq_file *m, const char *prefix, struct binder_buffer *buffer) { - seq_printf(m, "%s %d: %p size %zd:%zd %s\n", + seq_printf(m, "%s %d: %pK size %zd:%zd %s\n", prefix, buffer->debug_id, buffer->data, buffer->data_size, buffer->offsets_size, buffer->transaction ? "active" : "delivered"); diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c index f4fa3c9fc09f..80b045c2dc84 100644..100755 --- a/drivers/staging/android/ion/ion.c +++ b/drivers/staging/android/ion/ion.c @@ -445,13 +445,22 @@ static void ion_handle_get(struct ion_handle *handle) kref_get(&handle->ref); } -static int ion_handle_put(struct ion_handle *handle) +static int ion_handle_put_nolock(struct ion_handle *handle) +{ + int ret; + + ret = kref_put(&handle->ref, ion_handle_destroy); + + return ret; +} + +int ion_handle_put(struct ion_handle *handle) { struct ion_client *client = handle->client; int ret; mutex_lock(&client->lock); - ret = kref_put(&handle->ref, ion_handle_destroy); + ret = ion_handle_put_nolock(handle); mutex_unlock(&client->lock); return ret; @@ -475,20 +484,30 @@ static struct ion_handle *ion_handle_lookup(struct ion_client *client, return ERR_PTR(-EINVAL); } -static struct ion_handle *ion_handle_get_by_id(struct ion_client *client, +static struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client, int id) { struct ion_handle *handle; - mutex_lock(&client->lock); handle = idr_find(&client->idr, id); if (handle) ion_handle_get(handle); - mutex_unlock(&client->lock); return handle ? handle : ERR_PTR(-EINVAL); } +struct ion_handle *ion_handle_get_by_id(struct ion_client *client, + int id) +{ + struct ion_handle *handle; + + mutex_lock(&client->lock); + handle = ion_handle_get_by_id_nolock(client, id); + mutex_unlock(&client->lock); + + return handle; +} + static bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle) { @@ -614,26 +633,30 @@ struct ion_handle *ion_alloc(struct ion_client *client, size_t len, } EXPORT_SYMBOL(ion_alloc); -void __ion_free(struct ion_client *client, struct ion_handle *handle, int from_kern) +static void __ion_free_nolock(struct ion_client *client, struct ion_handle *handle, int from_kern) { bool valid_handle; BUG_ON(client != handle->client); - mutex_lock(&client->lock); valid_handle = ion_handle_validate(client, handle); if (!valid_handle) { WARN(1, "%s: invalid handle passed to free.\n", __func__); - mutex_unlock(&client->lock); return; } - mutex_unlock(&client->lock); - - if(from_kern) + if (from_kern) ion_debug_kern_rec(client, handle->buffer, NULL, ION_FUNCTION_FREE, 0, 0, 0, 0); + ion_handle_put_nolock(handle); +} + +static void __ion_free(struct ion_client *client, struct ion_handle *handle, int from_kern) +{ + BUG_ON(client != handle->client); - ion_handle_put(handle); + mutex_lock(&client->lock); + __ion_free_nolock(client, handle, from_kern); + mutex_unlock(&client->lock); } void ion_free(struct ion_client *client, struct ion_handle *handle) @@ -822,7 +845,7 @@ static int ion_debug_client_show(struct seq_file *s, void *unused) names[id] = buffer->heap->name; sizes[id] += buffer->size; - seq_printf(s, "%16.s %3d %8zu %3d %p %p\n", buffer->heap->name, + seq_printf(s, "%16.s %3d %8zu %3d %pK %pK\n", buffer->heap->name, client->pid, buffer->size, buffer->handle_count, handle, buffer); } mutex_unlock(&client->lock); @@ -988,7 +1011,7 @@ void __ion_client_destroy(struct ion_client *client, int from_kern) struct ion_handle *handle = rb_entry(n, struct ion_handle, node); mutex_lock(&client->lock); - IONMSG("warning: release handle @ client destory: handle=%p, buf=%p, ref=%d, size=%ld, kmap=%d\n", + IONMSG("warning: release handle @ client destory: handle=%pK, buf=%pK, ref=%d, size=%ld, kmap=%d\n", handle, handle->buffer, handle->buffer->ref, handle->buffer->size, handle->buffer->kmap_cnt); ion_handle_destroy(&handle->ref); mutex_unlock(&client->lock); @@ -1145,7 +1168,7 @@ static void ion_vm_open(struct vm_area_struct *vma) mutex_lock(&buffer->lock); list_add(&vma_list->list, &buffer->vmas); mutex_unlock(&buffer->lock); - pr_debug("%s: adding %p\n", __func__, vma); + pr_debug("%s: adding %pK\n", __func__, vma); } static void ion_vm_close(struct vm_area_struct *vma) @@ -1160,7 +1183,7 @@ static void ion_vm_close(struct vm_area_struct *vma) continue; list_del(&vma_list->list); kfree(vma_list); - pr_debug("%s: deleting %p\n", __func__, vma); + pr_debug("%s: deleting %pK\n", __func__, vma); break; } mutex_unlock(&buffer->lock); @@ -1341,7 +1364,7 @@ struct ion_handle *__ion_import_dma_buf(struct ion_client *client, int fd, int f dmabuf = dma_buf_get(fd); if (IS_ERR(dmabuf)) { - IONMSG("ion_import: dma_buf_get fail fd=%d ret=0x%p\n", fd, dmabuf); + IONMSG("ion_import: dma_buf_get fail fd=%d ret=0x%pK\n", fd, dmabuf); return ERR_PTR(PTR_ERR(dmabuf)); } /* if this memory came from ion */ @@ -1479,11 +1502,15 @@ static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct ion_handle *handle; - handle = ion_handle_get_by_id(client, data.handle.handle); - if (IS_ERR(handle)) + mutex_lock(&client->lock); + handle = ion_handle_get_by_id_nolock(client, data.handle.handle); + if (IS_ERR(handle)) { + mutex_unlock(&client->lock); return PTR_ERR(handle); - __ion_free(client, handle, 0); - ion_handle_put(handle); + } + __ion_free_nolock(client, handle, 0); + ion_handle_put_nolock(handle); + mutex_unlock(&client->lock); break; } case ION_IOC_SHARE: @@ -1616,10 +1643,10 @@ static int ion_debug_heap_show(struct seq_file *s, void *unused) char task_comm[TASK_COMM_LEN]; get_task_comm(task_comm, client->task); - seq_printf(s, "%16.s(%16.s) %16u %16zu 0x%p\n", task_comm, + seq_printf(s, "%16.s(%16.s) %16u %16zu 0x%pK\n", task_comm, client->dbg_name, client->pid, size, client); } else { - seq_printf(s, "%16.s(%16.s) %16u %16zu 0x%p\n", client->name, + seq_printf(s, "%16.s(%16.s) %16u %16zu 0x%pK\n", client->name, "from_kernel", client->pid, size, client); } } @@ -2270,7 +2297,7 @@ static int ion_debug_dbcl_show(struct seq_file *s, void *unused) buffer_count++; if(buffer_count == 1) { - seq_printf(s, "%8s[%2d] buffer: 0x%p buffer structure adr: 0x%p size(%d)\n", "buffer", buffer_cnt++, buf_rec->buffer, buf_rec->buffer_address, buf_rec->buffer->size); + seq_printf(s, "%8s[%2d] buffer: 0x%pK buffer structure adr: 0x%pK size(%d)\n", "buffer", buffer_cnt++, buf_rec->buffer, buf_rec->buffer_address, buf_rec->buffer->size); } seq_printf(s, "%s\n"," <BUFFER_ALLOCATION_LIST>"); seq_printf(s, "%s [0x%x] %10s [%d] (%s [%d])\n"," client", @@ -2580,7 +2607,7 @@ static int ion_debug_dbis_show(struct seq_file *s, void *unused) /* All clients */ for (cn = rb_first(&dev->clients); cn; cn = rb_next(cn)) { client = rb_entry(cn, struct ion_client, node); - seq_printf(s, "\n%8s[%2d] 0x%p PID[%d]\n", "client", client_cnt++, client, client->pid); + seq_printf(s, "\n%8s[%2d] 0x%pK PID[%d]\n", "client", client_cnt++, client, client->pid); mutex_lock(&client->lock); /* All client-handles */ for (hn = rb_first(&client->handles); hn; hn = rb_next(hn)) { @@ -2601,7 +2628,7 @@ static int ion_debug_dbis_show(struct seq_file *s, void *unused) client_cnt = 0; while(client_rec != NULL) { - seq_printf(s, "\n[%2d]%s: fd[%d] 0x%p PID[%d] GROUP_PID[%d]\n",client_cnt++,"client",client_rec->fd, client_rec->tracking_info.recordID.client,client_rec->tracking_info.recordID.pid,client_rec->tracking_info.recordID.group_pid); + seq_printf(s, "\n[%2d]%s: fd[%d] 0x%pK PID[%d] GROUP_PID[%d]\n",client_cnt++,"client",client_rec->fd, client_rec->tracking_info.recordID.client,client_rec->tracking_info.recordID.pid,client_rec->tracking_info.recordID.group_pid); /* Show buffer allocation backtrace */ seq_printf(s, " %s\n","<CLIENT_ALLOCATION_LIST>"); ion_debugdb_show_backtrace(s, &client_rec->tracking_info,ALLOCATE_BACKTRACE_INFO); @@ -2624,7 +2651,7 @@ static int ion_debug_dbis_show(struct seq_file *s, void *unused) #endif while (buf_rec != NULL) { - seq_printf(s, "%8s[%2d][0x%x] buffer structure: 0x%p size(%d)\n", "buffer", buffer_cnt++,(unsigned int)buf_rec->buffer,buf_rec->buffer_address, buf_rec->buffer->size); + seq_printf(s, "%8s[%2d][0x%x] buffer structure: 0x%pK size(%d)\n", "buffer", buffer_cnt++,(unsigned int)buf_rec->buffer,buf_rec->buffer_address, buf_rec->buffer->size); /* Allocation */ usg_rec = ion_get_list(LIST_BUFFER,buf_rec, BUFFER_ALLOCATION_LIST); if(usg_rec) diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c index 5c3960da755a..5ac72814c8eb 100644 --- a/drivers/video/fbcmap.c +++ b/drivers/video/fbcmap.c @@ -190,6 +190,9 @@ int fb_cmap_to_user(const struct fb_cmap *from, struct fb_cmap_user *to) int tooff = 0, fromoff = 0; int size; + if ((int)(to->start) < 0) + return -EINVAL; + if (to->start > from->start) fromoff = to->start - from->start; else diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 1a9b9118a05a..01a56a812bc1 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -314,6 +314,12 @@ struct perf_event { int nr_siblings; int group_flags; struct perf_event *group_leader; + + /* + * Protect the pmu, attributes and context of a group leader. + * Note: does not protect the pointer to the group_leader. + */ + struct mutex group_leader_mutex; struct pmu *pmu; enum perf_event_active_state state; diff --git a/kernel/events/core.c b/kernel/events/core.c index 31ee544c50eb..6fdc987d1e12 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -796,6 +796,77 @@ static void put_ctx(struct perf_event_context *ctx) } } +/* + * Because of perf_event::ctx migration in sys_perf_event_open::move_group and + * perf_pmu_migrate_context() we need some magic. + * + * Those places that change perf_event::ctx will hold both + * perf_event_ctx::mutex of the 'old' and 'new' ctx value. + * + * Lock ordering is by mutex address. There is one other site where + * perf_event_context::mutex nests and that is put_event(). But remember that + * that is a parent<->child context relation, and migration does not affect + * children, therefore these two orderings should not interact. + * + * The change in perf_event::ctx does not affect children (as claimed above) + * because the sys_perf_event_open() case will install a new event and break + * the ctx parent<->child relation, and perf_pmu_migrate_context() is only + * concerned with cpuctx and that doesn't have children. + * + * The places that change perf_event::ctx will issue: + * + * perf_remove_from_context(); + * synchronize_rcu(); + * perf_install_in_context(); + * + * to affect the change. The remove_from_context() + synchronize_rcu() should + * quiesce the event, after which we can install it in the new location. This + * means that only external vectors (perf_fops, prctl) can perturb the event + * while in transit. Therefore all such accessors should also acquire + * perf_event_context::mutex to serialize against this. + * + * However; because event->ctx can change while we're waiting to acquire + * ctx->mutex we must be careful and use the below perf_event_ctx_lock() + * function. + * + * Lock order: + * task_struct::perf_event_mutex + * perf_event_context::mutex + * perf_event_context::lock + * perf_event::child_mutex; + * perf_event::mmap_mutex + * mmap_sem + */ +static struct perf_event_context *perf_event_ctx_lock(struct perf_event *event) +{ + struct perf_event_context *ctx; + +again: + rcu_read_lock(); + ctx = ACCESS_ONCE(event->ctx); + if (!atomic_inc_not_zero(&ctx->refcount)) { + rcu_read_unlock(); + goto again; + } + rcu_read_unlock(); + + mutex_lock(&ctx->mutex); + if (event->ctx != ctx) { + mutex_unlock(&ctx->mutex); + put_ctx(ctx); + goto again; + } + + return ctx; +} + +static void perf_event_ctx_unlock(struct perf_event *event, + struct perf_event_context *ctx) +{ + mutex_unlock(&ctx->mutex); + put_ctx(ctx); +} + static void unclone_ctx(struct perf_event_context *ctx) { if (ctx->parent_ctx) { @@ -1475,7 +1546,7 @@ int __perf_event_disable(void *info) * is the current context on this CPU and preemption is disabled, * hence we can't get into perf_event_task_sched_out for this context. */ -void perf_event_disable(struct perf_event *event) +static void _perf_event_disable(struct perf_event *event) { struct perf_event_context *ctx = event->ctx; struct task_struct *task = ctx->task; @@ -1516,6 +1587,19 @@ retry: } raw_spin_unlock_irq(&ctx->lock); } + +/* + * Strictly speaking kernel users cannot create groups and therefore this + * interface does not need the perf_event_ctx_lock() magic. + */ +void perf_event_disable(struct perf_event *event) +{ + struct perf_event_context *ctx; + + ctx = perf_event_ctx_lock(event); + _perf_event_disable(event); + perf_event_ctx_unlock(event, ctx); +} EXPORT_SYMBOL_GPL(perf_event_disable); static void perf_set_shadow_time(struct perf_event *event, @@ -1962,7 +2046,7 @@ unlock: * perf_event_for_each_child or perf_event_for_each as described * for perf_event_disable. */ -void perf_event_enable(struct perf_event *event) +static void _perf_event_enable(struct perf_event *event) { struct perf_event_context *ctx = event->ctx; struct task_struct *task = ctx->task; @@ -2018,9 +2102,21 @@ retry: out: raw_spin_unlock_irq(&ctx->lock); } + +/* + * See perf_event_disable(); + */ +void perf_event_enable(struct perf_event *event) +{ + struct perf_event_context *ctx; + + ctx = perf_event_ctx_lock(event); + _perf_event_enable(event); + perf_event_ctx_unlock(event, ctx); +} EXPORT_SYMBOL_GPL(perf_event_enable); -int perf_event_refresh(struct perf_event *event, int refresh) +static int _perf_event_refresh(struct perf_event *event, int refresh) { /* * not supported on inherited events @@ -2029,10 +2125,25 @@ int perf_event_refresh(struct perf_event *event, int refresh) return -EINVAL; atomic_add(refresh, &event->event_limit); - perf_event_enable(event); + _perf_event_enable(event); return 0; } + +/* + * See perf_event_disable() + */ +int perf_event_refresh(struct perf_event *event, int refresh) +{ + struct perf_event_context *ctx; + int ret; + + ctx = perf_event_ctx_lock(event); + ret = _perf_event_refresh(event, refresh); + perf_event_ctx_unlock(event, ctx); + + return ret; +} EXPORT_SYMBOL_GPL(perf_event_refresh); static void ctx_sched_out(struct perf_event_context *ctx, @@ -3167,7 +3278,16 @@ static void put_event(struct perf_event *event) rcu_read_unlock(); if (owner) { - mutex_lock(&owner->perf_event_mutex); + /* + * If we're here through perf_event_exit_task() we're already + * holding ctx->mutex which would be an inversion wrt. the + * normal lock order. + * + * However we can safely take this lock because its the child + * ctx->mutex. + */ + mutex_lock_nested(&owner->perf_event_mutex, SINGLE_DEPTH_NESTING); + /* * We have to re-check the event->owner field, if it is cleared * we raced with perf_event_exit_task(), acquiring the mutex @@ -3219,12 +3339,13 @@ static int perf_event_read_group(struct perf_event *event, u64 read_format, char __user *buf) { struct perf_event *leader = event->group_leader, *sub; - int n = 0, size = 0, ret = -EFAULT; struct perf_event_context *ctx = leader->ctx; - u64 values[5]; + int n = 0, size = 0, ret; u64 count, enabled, running; + u64 values[5]; + + lockdep_assert_held(&ctx->mutex); - mutex_lock(&ctx->mutex); count = perf_event_read_value(leader, &enabled, &running); values[n++] = 1 + leader->nr_siblings; @@ -3239,7 +3360,7 @@ static int perf_event_read_group(struct perf_event *event, size = n * sizeof(u64); if (copy_to_user(buf, values, size)) - goto unlock; + return -EFAULT; ret = size; @@ -3253,14 +3374,11 @@ static int perf_event_read_group(struct perf_event *event, size = n * sizeof(u64); if (copy_to_user(buf + ret, values, size)) { - ret = -EFAULT; - goto unlock; + return -EFAULT; } ret += size; } -unlock: - mutex_unlock(&ctx->mutex); return ret; } @@ -3319,8 +3437,14 @@ static ssize_t perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct perf_event *event = file->private_data; + struct perf_event_context *ctx; + int ret; + + ctx = perf_event_ctx_lock(event); + ret = perf_read_hw(event, buf, count); + perf_event_ctx_unlock(event, ctx); - return perf_read_hw(event, buf, count); + return ret; } static unsigned int perf_poll(struct file *file, poll_table *wait) @@ -3344,7 +3468,7 @@ static unsigned int perf_poll(struct file *file, poll_table *wait) return events; } -static void perf_event_reset(struct perf_event *event) +static void _perf_event_reset(struct perf_event *event) { (void)perf_event_read(event); local64_set(&event->count, 0); @@ -3363,6 +3487,7 @@ static void perf_event_for_each_child(struct perf_event *event, struct perf_event *child; WARN_ON_ONCE(event->ctx->parent_ctx); + mutex_lock(&event->child_mutex); func(event); list_for_each_entry(child, &event->child_list, child_list) @@ -3376,14 +3501,13 @@ static void perf_event_for_each(struct perf_event *event, struct perf_event_context *ctx = event->ctx; struct perf_event *sibling; - WARN_ON_ONCE(ctx->parent_ctx); - mutex_lock(&ctx->mutex); + lockdep_assert_held(&ctx->mutex); + event = event->group_leader; perf_event_for_each_child(event, func); list_for_each_entry(sibling, &event->sibling_list, group_entry) perf_event_for_each_child(sibling, func); - mutex_unlock(&ctx->mutex); } static int perf_event_period(struct perf_event *event, u64 __user *arg) @@ -3439,25 +3563,24 @@ static int perf_event_set_output(struct perf_event *event, struct perf_event *output_event); static int perf_event_set_filter(struct perf_event *event, void __user *arg); -static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg) { - struct perf_event *event = file->private_data; void (*func)(struct perf_event *); u32 flags = arg; switch (cmd) { case PERF_EVENT_IOC_ENABLE: - func = perf_event_enable; + func = _perf_event_enable; break; case PERF_EVENT_IOC_DISABLE: - func = perf_event_disable; + func = _perf_event_disable; break; case PERF_EVENT_IOC_RESET: - func = perf_event_reset; + func = _perf_event_reset; break; case PERF_EVENT_IOC_REFRESH: - return perf_event_refresh(event, arg); + return _perf_event_refresh(event, arg); case PERF_EVENT_IOC_PERIOD: return perf_event_period(event, (u64 __user *)arg); @@ -3495,13 +3618,49 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) return 0; } +static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct perf_event *event = file->private_data; + struct perf_event_context *ctx; + long ret; + + ctx = perf_event_ctx_lock(event); + ret = _perf_ioctl(event, cmd, arg); + perf_event_ctx_unlock(event, ctx); + + return ret; +} + +#ifdef CONFIG_COMPAT +static long perf_compat_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + switch (_IOC_NR(cmd)) { + case _IOC_NR(PERF_EVENT_IOC_SET_FILTER): + /* Fix up pointer size (usually 4 -> 8 in 32-on-64-bit case */ + if (_IOC_SIZE(cmd) == sizeof(compat_uptr_t)) { + cmd &= ~IOCSIZE_MASK; + cmd |= sizeof(void *) << IOCSIZE_SHIFT; + } + break; + } + return perf_ioctl(file, cmd, arg); +} +#else +# define perf_compat_ioctl NULL +#endif + int perf_event_task_enable(void) { + struct perf_event_context *ctx; struct perf_event *event; mutex_lock(¤t->perf_event_mutex); - list_for_each_entry(event, ¤t->perf_event_list, owner_entry) - perf_event_for_each_child(event, perf_event_enable); + list_for_each_entry(event, ¤t->perf_event_list, owner_entry) { + ctx = perf_event_ctx_lock(event); + perf_event_for_each_child(event, _perf_event_enable); + perf_event_ctx_unlock(event, ctx); + } mutex_unlock(¤t->perf_event_mutex); return 0; @@ -3509,11 +3668,15 @@ int perf_event_task_enable(void) int perf_event_task_disable(void) { + struct perf_event_context *ctx; struct perf_event *event; mutex_lock(¤t->perf_event_mutex); - list_for_each_entry(event, ¤t->perf_event_list, owner_entry) - perf_event_for_each_child(event, perf_event_disable); + list_for_each_entry(event, ¤t->perf_event_list, owner_entry) { + ctx = perf_event_ctx_lock(event); + perf_event_for_each_child(event, _perf_event_disable); + perf_event_ctx_unlock(event, ctx); + } mutex_unlock(¤t->perf_event_mutex); return 0; @@ -6345,6 +6508,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, if (!group_leader) group_leader = event; + mutex_init(&event->group_leader_mutex); mutex_init(&event->child_mutex); INIT_LIST_HEAD(&event->child_list); @@ -6650,6 +6814,15 @@ out: return ret; } +static void mutex_lock_double(struct mutex *a, struct mutex *b) +{ + if (b < a) + swap(a, b); + + mutex_lock(a); + mutex_lock_nested(b, SINGLE_DEPTH_NESTING); +} + /** * sys_perf_event_open - open a performance event, associate it to a task/cpu * @@ -6665,7 +6838,7 @@ SYSCALL_DEFINE5(perf_event_open, struct perf_event *group_leader = NULL, *output_event = NULL; struct perf_event *event, *sibling; struct perf_event_attr attr; - struct perf_event_context *ctx; + struct perf_event_context *ctx, *uninitialized_var(gctx); struct file *event_file = NULL; struct fd group = {NULL, 0}; struct task_struct *task = NULL; @@ -6722,6 +6895,16 @@ SYSCALL_DEFINE5(perf_event_open, group_leader = NULL; } + /* + * Take the group_leader's group_leader_mutex before observing + * anything in the group leader that leads to changes in ctx, + * many of which may be changing on another thread. + * In particular, we want to take this lock before deciding + * whether we need to move_group. + */ + if (group_leader) + mutex_lock(&group_leader->group_leader_mutex); + if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) { task = find_lively_task_by_vpid(pid); if (IS_ERR(task)) { @@ -6839,9 +7022,14 @@ SYSCALL_DEFINE5(perf_event_open, } if (move_group) { - struct perf_event_context *gctx = group_leader->ctx; + gctx = group_leader->ctx; + + /* + * See perf_event_ctx_lock() for comments on the details + * of swizzling perf_event::ctx. + */ + mutex_lock_double(&gctx->mutex, &ctx->mutex); - mutex_lock(&gctx->mutex); perf_remove_from_context(group_leader, false); /* @@ -6856,15 +7044,19 @@ SYSCALL_DEFINE5(perf_event_open, perf_event__state_init(sibling); put_ctx(gctx); } - mutex_unlock(&gctx->mutex); - put_ctx(gctx); + } else { + mutex_lock(&ctx->mutex); } WARN_ON_ONCE(ctx->parent_ctx); - mutex_lock(&ctx->mutex); if (move_group) { + /* + * Wait for everybody to stop referencing the events through + * the old lists, before installing it on new lists. + */ synchronize_rcu(); + perf_install_in_context(ctx, group_leader, event->cpu); get_ctx(ctx); list_for_each_entry(sibling, &group_leader->sibling_list, @@ -6877,7 +7069,14 @@ SYSCALL_DEFINE5(perf_event_open, perf_install_in_context(ctx, event, event->cpu); ++ctx->generation; perf_unpin_context(ctx); + + if (move_group) { + mutex_unlock(&gctx->mutex); + put_ctx(gctx); + } mutex_unlock(&ctx->mutex); + if (group_leader) + mutex_unlock(&group_leader->group_leader_mutex); put_online_cpus(); @@ -6913,6 +7112,8 @@ err_task: if (task) put_task_struct(task); err_group_fd: + if (group_leader) + mutex_unlock(&group_leader->group_leader_mutex); fdput(group); err_fd: put_unused_fd(event_fd); @@ -6979,18 +7180,20 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu) src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx; dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx; - mutex_lock(&src_ctx->mutex); + /* + * See perf_event_ctx_lock() for comments on the details + * of swizzling perf_event::ctx. + */ + mutex_lock_double(&src_ctx->mutex, &dst_ctx->mutex); list_for_each_entry_safe(event, tmp, &src_ctx->event_list, event_entry) { perf_remove_from_context(event, false); put_ctx(src_ctx); list_add(&event->event_entry, &events); } - mutex_unlock(&src_ctx->mutex); synchronize_rcu(); - mutex_lock(&dst_ctx->mutex); list_for_each_entry_safe(event, tmp, &events, event_entry) { list_del(&event->event_entry); if (event->state >= PERF_EVENT_STATE_OFF) @@ -6999,6 +7202,7 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu) get_ctx(dst_ctx); } mutex_unlock(&dst_ctx->mutex); + mutex_unlock(&src_ctx->mutex); } EXPORT_SYMBOL_GPL(perf_pmu_migrate_context); diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index e716ff9ee38a..20ed693c4b35 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c @@ -651,7 +651,7 @@ int ping_common_sendmsg(int family, struct msghdr *msg, size_t len, void *user_icmph, size_t icmph_len) { u8 type, code; - if (len > 0xFFFF) + if (len > 0xFFFF || len < icmph_len) return -EMSGSIZE; /* diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 0283baedcdfb..fae881e8b8ef 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -188,7 +188,7 @@ EXPORT_SYMBOL_GPL(nf_ct_invert_tuple); static void clean_from_lists(struct nf_conn *ct) { - pr_debug("clean_from_lists(%p)\n", ct); + pr_debug("clean_from_lists(%pK)\n", ct); hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode); hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode); @@ -203,7 +203,7 @@ destroy_conntrack(struct nf_conntrack *nfct) struct net *net = nf_ct_net(ct); struct nf_conntrack_l4proto *l4proto; - pr_debug("destroy_conntrack(%p)\n", ct); + pr_debug("destroy_conntrack(%pK)\n", ct); NF_CT_ASSERT(atomic_read(&nfct->use) == 0); NF_CT_ASSERT(!timer_pending(&ct->timeout)); @@ -234,7 +234,7 @@ destroy_conntrack(struct nf_conntrack *nfct) if (ct->master) nf_ct_put(ct->master); - pr_debug("destroy_conntrack: returning ct=%p to slab\n", ct); + pr_debug("destroy_conntrack: returning ct=%pK to slab\n", ct); nf_conntrack_free(ct); } @@ -496,7 +496,7 @@ __nf_conntrack_confirm(struct sk_buff *skb) /* No external references means no one else could have confirmed us. */ NF_CT_ASSERT(!nf_ct_is_confirmed(ct)); - pr_debug("Confirming conntrack %p\n", ct); + pr_debug("Confirming conntrack %pK\n", ct); spin_lock_bh(&nf_conntrack_lock); @@ -823,7 +823,7 @@ init_conntrack(struct net *net, struct nf_conn *tmpl, spin_lock_bh(&nf_conntrack_lock); exp = nf_ct_find_expectation(net, zone, tuple); if (exp) { - pr_debug("conntrack: expectation arrives ct=%p exp=%p\n", + pr_debug("conntrack: expectation arrives ct=%pK exp=%pK\n", ct, exp); /* Welcome, Mr. Bond. We've been expecting you... */ __set_bit(IPS_EXPECTED_BIT, &ct->status); @@ -909,14 +909,14 @@ resolve_normal_ct(struct net *net, struct nf_conn *tmpl, } else { /* Once we've had two way comms, always ESTABLISHED. */ if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) { - pr_debug("nf_conntrack_in: normal packet for %p\n", ct); + pr_debug("nf_conntrack_in: normal packet for %pK\n", ct); *ctinfo = IP_CT_ESTABLISHED; } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) { - pr_debug("nf_conntrack_in: related packet for %p\n", + pr_debug("nf_conntrack_in: related packet for %pK\n", ct); *ctinfo = IP_CT_RELATED; } else { - pr_debug("nf_conntrack_in: new packet for %p\n", ct); + pr_debug("nf_conntrack_in: new packet for %pK\n", ct); *ctinfo = IP_CT_NEW; } *set_reply = 0; @@ -1058,7 +1058,7 @@ void nf_conntrack_alter_reply(struct nf_conn *ct, /* Should be unconfirmed, so not in hash table yet */ NF_CT_ASSERT(!nf_ct_is_confirmed(ct)); - pr_debug("Altering reply tuple of %p to ", ct); + pr_debug("Altering reply tuple of %pK to ", ct); nf_ct_dump_tuple(newreply); ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply; @@ -1628,7 +1628,7 @@ int nf_conntrack_init_net(struct net *net) goto err_stat; } - net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net); + net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%pK", net); if (!net->ct.slabname) { ret = -ENOMEM; goto err_slabname; diff --git a/security/keys/gc.c b/security/keys/gc.c index 797818695c87..de34c290bd6f 100644 --- a/security/keys/gc.c +++ b/security/keys/gc.c @@ -187,6 +187,12 @@ static noinline void key_gc_unused_keys(struct list_head *keys) kdebug("- %u", key->serial); key_check(key); + /* Throw away the key data if the key is instantiated */ + if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags) && + !test_bit(KEY_FLAG_NEGATIVE, &key->flags) && + key->type->destroy) + key->type->destroy(key); + security_key_free(key); /* deal with the user's key tracking and quota */ @@ -201,10 +207,6 @@ static noinline void key_gc_unused_keys(struct list_head *keys) if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) atomic_dec(&key->user->nikeys); - /* now throw away the key memory */ - if (key->type->destroy) - key->type->destroy(key); - key_user_put(key->user); kfree(key->description); diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index a862361f7782..c283a93c476e 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c @@ -2109,7 +2109,7 @@ unsigned int snd_soc_read(struct snd_soc_codec *codec, unsigned int reg) trace_snd_soc_reg_read(codec, reg, ret); } else - ret = -EIO; + ret = -1; return ret; } @@ -2124,7 +2124,7 @@ unsigned int snd_soc_write(struct snd_soc_codec *codec, return codec->write(codec, reg, val); } else - return -EIO; + return -1; } EXPORT_SYMBOL_GPL(snd_soc_write); |