VERSION = 4
PATCHLEVEL = 4
- SUBLEVEL = 189
+ SUBLEVEL = 190
EXTRAVERSION =
NAME = Blurry Fish Butt
# Make variables (CC, etc...)
AS = $(CROSS_COMPILE)as
LD = $(CROSS_COMPILE)ld
-CC = $(CROSS_COMPILE)gcc
+REAL_CC = $(CROSS_COMPILE)gcc
CPP = $(CC) -E
AR = $(CROSS_COMPILE)ar
NM = $(CROSS_COMPILE)nm
PYTHON = python
CHECK = sparse
+# Use the wrapper for the compiler. This wrapper scans for new
+# warnings and causes the build to stop upon encountering them.
+CC = $(PYTHON) $(srctree)/scripts/gcc-wrapper.py $(REAL_CC)
+
CHECKFLAGS := -D__linux__ -Dlinux -D__STDC__ -Dunix -D__unix__ \
-Wbitwise -Wno-return-void $(CF)
CFLAGS_MODULE =
-Wno-format-security \
-std=gnu89 $(call cc-option,-fno-PIE)
-
+ifeq ($(TARGET_BOARD_TYPE),auto)
+KBUILD_CFLAGS += -DCONFIG_PLATFORM_AUTO
+endif
KBUILD_AFLAGS_KERNEL :=
KBUILD_CFLAGS_KERNEL :=
KBUILD_AFLAGS := -D__ASSEMBLY__ $(call cc-option,-fno-PIE)
export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS LDFLAGS
export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE CFLAGS_GCOV
-export CFLAGS_KASAN CFLAGS_KASAN_NOSANITIZE
+export CFLAGS_KASAN CFLAGS_UBSAN CFLAGS_KASAN_NOSANITIZE
export CFLAGS_KCOV
export KBUILD_AFLAGS AFLAGS_KERNEL AFLAGS_MODULE
export KBUILD_AFLAGS_MODULE KBUILD_CFLAGS_MODULE KBUILD_LDFLAGS_MODULE
include scripts/Makefile.kasan
include scripts/Makefile.extrawarn
+include scripts/Makefile.ubsan
# Add any arch overrides and user supplied CPPFLAGS, AFLAGS and CFLAGS as the
# last assignments
#include <linux/cpu_pm.h>
#include <linux/errno.h>
#include <linux/hw_breakpoint.h>
+#include <linux/kprobes.h>
#include <linux/perf_event.h>
#include <linux/ptrace.h>
#include <linux/smp.h>
return val;
}
+NOKPROBE_SYMBOL(read_wb_reg);
static void write_wb_reg(int reg, int n, u64 val)
{
}
isb();
}
+NOKPROBE_SYMBOL(write_wb_reg);
/*
* Convert a breakpoint privilege level to the corresponding exception
return -EINVAL;
}
}
+NOKPROBE_SYMBOL(debug_exception_level);
enum hw_breakpoint_ops {
HW_BREAKPOINT_INSTALL,
/* Aligned */
break;
case 1:
- /* Allow single byte watchpoint. */
- if (info->ctrl.len == ARM_BREAKPOINT_LEN_1)
- break;
case 2:
/* Allow halfword watchpoints and breakpoints. */
if (info->ctrl.len == ARM_BREAKPOINT_LEN_2)
break;
+ case 3:
+ /* Allow single byte watchpoint. */
+ if (info->ctrl.len == ARM_BREAKPOINT_LEN_1)
+ break;
default:
return -EINVAL;
}
write_wb_reg(reg, i, ctrl);
}
}
+NOKPROBE_SYMBOL(toggle_bp_registers);
/*
* Debug exception handlers.
return 0;
}
+NOKPROBE_SYMBOL(breakpoint_handler);
/*
* Arm64 hardware does not always report a watchpoint hit address that matches
return 0;
}
+NOKPROBE_SYMBOL(watchpoint_handler);
/*
* Handle single-step exception.
return !handled_exception;
}
+NOKPROBE_SYMBOL(reinstall_suspended_bps);
/*
* Context-switcher for restoring suspended breakpoints.
KBUILD_CFLAGS += -mno-mmx -mno-sse
KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
+ KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
GCOV_PROFILE := n
+UBSAN_SANITIZE :=n
LDFLAGS := -m elf_$(UTS_MACHINE)
ifeq ($(CONFIG_RELOCATABLE),y)
config ISCSI_IBFT_FIND
bool "iSCSI Boot Firmware Table Attributes"
- depends on X86 && ACPI
+ depends on X86 && ISCSI_IBFT
default n
help
This option enables the kernel to find the region of memory
config ISCSI_IBFT
tristate "iSCSI Boot Firmware Table Attributes module"
select ISCSI_BOOT_SYSFS
- depends on ISCSI_IBFT_FIND && SCSI && SCSI_LOWLEVEL
+ select ISCSI_IBFT_FIND if X86
+ depends on ACPI && SCSI && SCSI_LOWLEVEL
default n
help
This option enables support for detection and exposing of iSCSI
source "drivers/firmware/broadcom/Kconfig"
source "drivers/firmware/google/Kconfig"
source "drivers/firmware/efi/Kconfig"
+source "drivers/firmware/qcom/Kconfig"
endmenu
spin_unlock_irq(&list->hiddev->list_lock);
mutex_lock(&hiddev->existancelock);
+ /*
+ * recheck exist with existance lock held to
+ * avoid opening a disconnected device
+ */
+ if (!list->hiddev->exist) {
+ res = -ENODEV;
+ goto bail_unlock;
+ }
if (!list->hiddev->open++)
if (list->hiddev->exist) {
struct hid_device *hid = hiddev->hid;
return 0;
bail_unlock:
mutex_unlock(&hiddev->existancelock);
+
+ spin_lock_irq(&list->hiddev->list_lock);
+ list_del(&list->node);
+ spin_unlock_irq(&list->hiddev->list_lock);
bail:
file->private_data = NULL;
vfree(list);
report->maxfield);
field = report->field[uref->field_index];
+ }
- if (cmd == HIDIOCGCOLLECTIONINDEX) {
- if (uref->usage_index >= field->maxusage)
- goto inval;
- uref->usage_index =
- array_index_nospec(uref->usage_index,
- field->maxusage);
- } else if (uref->usage_index >= field->report_count)
+ if (cmd == HIDIOCGCOLLECTIONINDEX) {
+ if (uref->usage_index >= field->maxusage)
goto inval;
- }
+ uref->usage_index =
+ array_index_nospec(uref->usage_index,
+ field->maxusage);
+ } else if (uref->usage_index >= field->report_count)
+ goto inval;
if (cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) {
if (uref_multi->num_values > HID_MAX_MULTI_USAGES ||
.bmAttributes = USB_ENDPOINT_XFER_BULK,
};
+static struct usb_ss_ep_comp_descriptor ss_bulk_comp_desc = {
+ .bLength = sizeof(ss_bulk_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+};
+
/* B.6.2 Class-specific MS Bulk IN Endpoint Descriptor */
static struct usb_ms_endpoint_descriptor_16 ms_in_desc = {
/* .bLength = DYNAMIC */
static inline struct usb_request *midi_alloc_ep_req(struct usb_ep *ep,
unsigned length)
{
- return alloc_ep_req(ep, length, length);
+ return alloc_ep_req(ep, length);
}
static const uint8_t f_midi_cin_length[] = {
req->complete = f_midi_complete;
err = usb_ep_queue(midi->out_ep, req, GFP_ATOMIC);
if (err) {
- ERROR(midi, "%s queue req: %d\n",
+ ERROR(midi, "%s: couldn't enqueue request: %d\n",
midi->out_ep->name, err);
- free_ep_req(midi->out_ep, req);
+ if (req->buf != NULL)
+ free_ep_req(midi->out_ep, req);
+ return err;
}
}
static int f_midi_bind(struct usb_configuration *c, struct usb_function *f)
{
struct usb_descriptor_header **midi_function;
+ struct usb_descriptor_header **midi_ss_function;
struct usb_midi_in_jack_descriptor jack_in_ext_desc[MAX_PORTS];
struct usb_midi_in_jack_descriptor jack_in_emb_desc[MAX_PORTS];
struct usb_midi_out_jack_descriptor_1 jack_out_ext_desc[MAX_PORTS];
struct usb_composite_dev *cdev = c->cdev;
struct f_midi *midi = func_to_midi(f);
struct usb_string *us;
- int status, n, jack = 1, i = 0;
+ int status, n, jack = 1, i = 0, j = 0;
midi->gadget = cdev->gadget;
tasklet_init(&midi->tasklet, f_midi_in_tasklet, (unsigned long) midi);
if (!midi->out_ep)
goto fail;
+ /* allocate temporary function list for ss */
+ midi_ss_function = kcalloc((MAX_PORTS * 4) + 11,
+ sizeof(*midi_ss_function), GFP_KERNEL);
+ if (!midi_ss_function) {
+ status = -ENOMEM;
+ goto fail;
+ }
+
/* allocate temporary function list */
midi_function = kcalloc((MAX_PORTS * 4) + 9, sizeof(*midi_function),
GFP_KERNEL);
if (!midi_function) {
status = -ENOMEM;
+ kfree(midi_ss_function);
goto fail;
}
midi_function[i++] = (struct usb_descriptor_header *) &ac_interface_desc;
midi_function[i++] = (struct usb_descriptor_header *) &ac_header_desc;
midi_function[i++] = (struct usb_descriptor_header *) &ms_interface_desc;
+ midi_ss_function[j++] =
+ (struct usb_descriptor_header *) &ac_interface_desc;
+ midi_ss_function[j++] =
+ (struct usb_descriptor_header *) &ac_header_desc;
+ midi_ss_function[j++] =
+ (struct usb_descriptor_header *) &ms_interface_desc;
/* calculate the header's wTotalLength */
n = USB_DT_MS_HEADER_SIZE
ms_header_desc.wTotalLength = cpu_to_le16(n);
midi_function[i++] = (struct usb_descriptor_header *) &ms_header_desc;
+ midi_ss_function[j++] =
+ (struct usb_descriptor_header *) &ms_header_desc;
/* configure the external IN jacks, each linked to an embedded OUT jack */
for (n = 0; n < midi->in_ports; n++) {
in_ext->bJackID = jack++;
in_ext->iJack = 0;
midi_function[i++] = (struct usb_descriptor_header *) in_ext;
+ midi_ss_function[j++] = (struct usb_descriptor_header *) in_ext;
out_emb->bLength = USB_DT_MIDI_OUT_SIZE(1);
out_emb->bDescriptorType = USB_DT_CS_INTERFACE;
out_emb->pins[0].baSourceID = in_ext->bJackID;
out_emb->iJack = 0;
midi_function[i++] = (struct usb_descriptor_header *) out_emb;
+ midi_ss_function[j++] =
+ (struct usb_descriptor_header *) out_emb;
/* link it to the endpoint */
ms_in_desc.baAssocJackID[n] = out_emb->bJackID;
in_emb->bJackID = jack++;
in_emb->iJack = 0;
midi_function[i++] = (struct usb_descriptor_header *) in_emb;
+ midi_ss_function[j++] = (struct usb_descriptor_header *) in_emb;
out_ext->bLength = USB_DT_MIDI_OUT_SIZE(1);
out_ext->bDescriptorType = USB_DT_CS_INTERFACE;
out_ext->pins[0].baSourceID = in_emb->bJackID;
out_ext->pins[0].baSourcePin = 1;
midi_function[i++] = (struct usb_descriptor_header *) out_ext;
+ midi_ss_function[j++] =
+ (struct usb_descriptor_header *) out_ext;
/* link it to the endpoint */
ms_out_desc.baAssocJackID[n] = in_emb->bJackID;
midi_function[i++] = (struct usb_descriptor_header *) &ms_in_desc;
midi_function[i++] = NULL;
+ midi_ss_function[j++] = (struct usb_descriptor_header *) &bulk_out_desc;
+ midi_ss_function[j++] =
+ (struct usb_descriptor_header *) &ss_bulk_comp_desc;
+ midi_ss_function[j++] = (struct usb_descriptor_header *) &ms_out_desc;
+ midi_ss_function[j++] = (struct usb_descriptor_header *) &bulk_in_desc;
+ midi_ss_function[j++] =
+ (struct usb_descriptor_header *) &ss_bulk_comp_desc;
+ midi_ss_function[j++] = (struct usb_descriptor_header *) &ms_in_desc;
+ midi_ss_function[j++] = NULL;
+
/*
* support all relevant hardware speeds... we expect that when
* hardware is dual speed, all bulk-capable endpoints work at
goto fail_f_midi;
}
+ if (gadget_is_superspeed(c->cdev->gadget)) {
+ bulk_in_desc.wMaxPacketSize = cpu_to_le16(1024);
+ bulk_out_desc.wMaxPacketSize = cpu_to_le16(1024);
+ f->ss_descriptors = usb_copy_descriptors(midi_ss_function);
+ if (!f->ss_descriptors)
+ goto fail_f_midi;
+ }
+
kfree(midi_function);
+ kfree(midi_ss_function);
return 0;
fail_f_midi:
kfree(midi_function);
usb_free_descriptors(f->hs_descriptors);
+ kfree(midi_ss_function);
fail:
f_midi_unregister_card(midi);
fail_register:
opts->func_inst.free_func_inst = f_midi_free_inst;
opts->index = SNDRV_DEFAULT_IDX1;
opts->id = SNDRV_DEFAULT_STR1;
- opts->buflen = 256;
+ opts->buflen = 1024;
opts->qlen = 32;
opts->in_ports = 1;
opts->out_ports = 1;
mutex_lock(&opts->lock);
for (i = opts->in_ports - 1; i >= 0; --i)
kfree(midi->in_port[i]);
+ opts->func_inst.f = NULL;
kfree(midi);
opts->func_inst.f = NULL;
--opts->refcnt;
card = midi->card;
midi->card = NULL;
if (card)
- snd_card_free(card);
+ snd_card_free_when_closed(card);
usb_free_all_descriptors(f);
}
*
* @ep: the endpoint to allocate a usb_request
* @len: usb_requests's buffer suggested size
- * @default_len: used if @len is not provided, ie, is 0
*
* In case @ep direction is OUT, the @len will be aligned to ep's
* wMaxPacketSize. In order to avoid memory leaks or drops, *always* use
* usb_requests's length (req->length) to refer to the allocated buffer size.
* Requests allocated via alloc_ep_req() *must* be freed by free_ep_req().
*/
-struct usb_request *alloc_ep_req(struct usb_ep *ep, size_t len, int default_len);
+struct usb_request *alloc_ep_req(struct usb_ep *ep, size_t len);
/* Frees a usb_request previously allocated by alloc_ep_req() */
static inline void free_ep_req(struct usb_ep *ep, struct usb_request *req)
{
+ WARN_ON(req->buf == NULL);
kfree(req->buf);
+ req->buf = NULL;
usb_ep_free_request(ep, req);
}
bool metadata_set;
bool next_track;
void *private_data;
+ struct snd_soc_pcm_runtime *be;
};
/**
* @get_params: retrieve the codec parameters, mandatory
* @set_metadata: Set the metadata values for a stream
* @get_metadata: retrieves the requested metadata values from stream
+ * @set_next_track_param: send codec specific data of subsequent track
+ * in gapless
* @trigger: Trigger operations like start, pause, resume, drain, stop.
* This callback is mandatory
* @pointer: Retrieve current h/w pointer information. Mandatory
struct snd_compr_metadata *metadata);
int (*get_metadata)(struct snd_compr_stream *stream,
struct snd_compr_metadata *metadata);
+ int (*set_next_track_param)(struct snd_compr_stream *stream,
+ union snd_codec_options *codec_options);
int (*trigger)(struct snd_compr_stream *stream, int cmd);
int (*pointer)(struct snd_compr_stream *stream,
struct snd_compr_tstamp *tstamp);
int snd_compress_deregister(struct snd_compr *device);
int snd_compress_new(struct snd_card *card, int device,
int type, struct snd_compr *compr);
+void snd_compress_free(struct snd_card *card, struct snd_compr *compr);
/* dsp driver callback apis
* For playback: driver should call snd_compress_fragment_elapsed() to let the
if (snd_BUG_ON(!stream))
return;
- if (stream->direction == SND_COMPRESS_PLAYBACK)
- stream->runtime->state = SNDRV_PCM_STATE_SETUP;
- else
- stream->runtime->state = SNDRV_PCM_STATE_PREPARED;
+ stream->runtime->state = SNDRV_PCM_STATE_SETUP;
wake_up(&stream->runtime->sleep);
}
struct static_key_deferred perf_sched_events __read_mostly;
static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
static DEFINE_PER_CPU(int, perf_sched_cb_usages);
+static DEFINE_PER_CPU(bool, is_idle);
static atomic_t nr_mmap_events __read_mostly;
static atomic_t nr_comm_events __read_mostly;
* 2 - disallow kernel profiling for unpriv
* 3 - disallow all unpriv perf event use
*/
-#ifdef CONFIG_SECURITY_PERF_EVENTS_RESTRICT
+#ifdef CONFIG_PERF_EVENTS_USERMODE
+int sysctl_perf_event_paranoid __read_mostly = -1;
+#elif defined CONFIG_SECURITY_PERF_EVENTS_RESTRICT
int sysctl_perf_event_paranoid __read_mostly = 3;
#else
int sysctl_perf_event_paranoid __read_mostly = 1;
* If this was a group event with sibling events then
* upgrade the siblings to singleton events by adding them
* to whatever list we are on.
+ * If this isn't on a list, make sure we still remove the sibling's
+ * group_entry from this sibling_list; otherwise, when that sibling
+ * is later deallocated, it will try to remove itself from this
+ * sibling_list, which may well have been deallocated already,
+ * resulting in a use-after-free.
*/
list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
if (list)
list_move_tail(&sibling->group_entry, list);
+ else
+ list_del_init(&sibling->group_entry);
sibling->group_leader = sibling;
/* Inherit group flags from the previous leader */
}
-/*
+#ifdef CONFIG_SMP
+static void perf_retry_remove(struct perf_event *event,
+ struct remove_event *rep)
+{
+ int up_ret;
+ /*
+ * CPU was offline. Bring it online so we can
+ * gracefully exit a perf context.
+ */
+ up_ret = cpu_up(event->cpu);
+ if (!up_ret)
+ /* Try the remove call once again. */
+ cpu_function_call(event->cpu, __perf_remove_from_context,
+ rep);
+ else
+ pr_err("Failed to bring up CPU: %d, ret: %d\n",
+ event->cpu, up_ret);
+}
+#else
+static void perf_retry_remove(struct perf_event *event,
+ struct remove_event *rep)
+{
+}
+#endif
+
+ /*
* Remove the event from a task's (or a CPU's) list of events.
*
* CPU events are removed with a smp call. For task events we only
* When called from perf_event_exit_task, it's OK because the
* context has been detached from its task.
*/
-static void perf_remove_from_context(struct perf_event *event, bool detach_group)
+static void __ref perf_remove_from_context(struct perf_event *event,
+ bool detach_group)
{
struct perf_event_context *ctx = event->ctx;
struct task_struct *task = ctx->task;
.event = event,
.detach_group = detach_group,
};
+ int ret;
lockdep_assert_held(&ctx->mutex);
* already called __perf_remove_from_context from
* perf_event_exit_cpu.
*/
- cpu_function_call(event->cpu, __perf_remove_from_context, &re);
+ ret = cpu_function_call(event->cpu, __perf_remove_from_context,
+ &re);
+ if (ret == -ENXIO)
+ perf_retry_remove(event, &re);
+
return;
}
if (event->state <= PERF_EVENT_STATE_OFF)
return 0;
- event->state = PERF_EVENT_STATE_ACTIVE;
- event->oncpu = smp_processor_id();
+ WRITE_ONCE(event->oncpu, smp_processor_id());
+ /*
+ * Order event::oncpu write to happen before the ACTIVE state
+ * is visible.
+ */
+ smp_wmb();
+ WRITE_ONCE(event->state, PERF_EVENT_STATE_ACTIVE);
/*
* Unthrottle events, since we scheduled we might have missed several
}
EXPORT_SYMBOL_GPL(perf_event_enable);
+static int __perf_event_stop(void *info)
+{
+ struct perf_event *event = info;
+
+ /* for AUX events, our job is done if the event is already inactive */
+ if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE)
+ return 0;
+
+ /* matches smp_wmb() in event_sched_in() */
+ smp_rmb();
+
+ /*
+ * There is a window with interrupts enabled before we get here,
+ * so we need to check again lest we try to stop another CPU's event.
+ */
+ if (READ_ONCE(event->oncpu) != smp_processor_id())
+ return -EAGAIN;
+
+ event->pmu->stop(event, PERF_EF_UPDATE);
+
+ return 0;
+}
+
static int _perf_event_refresh(struct perf_event *event, int refresh)
{
/*
static int perf_event_read(struct perf_event *event, bool group)
{
- int ret = 0;
+ int event_cpu, ret = 0;
/*
* If event is enabled and currently active on a CPU, update the
* value in the event structure:
*/
- if (event->state == PERF_EVENT_STATE_ACTIVE) {
+ event_cpu = READ_ONCE(event->oncpu);
+
+ if (event->state == PERF_EVENT_STATE_ACTIVE &&
+ !cpu_isolated(event_cpu)) {
struct perf_read_data data = {
.event = event,
.group = group,
.ret = 0,
};
- smp_call_function_single(event->oncpu,
- __perf_event_read, &data, 1);
- ret = data.ret;
+
+ if ((unsigned int)event_cpu >= nr_cpu_ids)
+ return 0;
+ if (!event->attr.exclude_idle ||
+ !per_cpu(is_idle, event_cpu)) {
+ smp_call_function_single(event_cpu,
+ __perf_event_read, &data, 1);
+ ret = data.ret;
+ }
} else if (event->state == PERF_EVENT_STATE_INACTIVE) {
struct perf_event_context *ctx = event->ctx;
unsigned long flags;
if (!task) {
/* Must be root to operate on a CPU event: */
- if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
+ if (event->owner != EVENT_OWNER_KERNEL && perf_paranoid_cpu() &&
+ !capable(CAP_SYS_ADMIN))
return ERR_PTR(-EACCES);
/*
if (event->destroy)
event->destroy(event);
+ if (event->pmu->free_drv_configs)
+ event->pmu->free_drv_configs(event);
+
if (event->ctx)
put_ctx(event->ctx);
*/
static int perf_release(struct inode *inode, struct file *file)
{
+ struct perf_event *event = file->private_data;
+
+ /*
+ * Event can be in state OFF because of a constraint check.
+ * Change to ACTIVE so that it gets cleaned up correctly.
+ */
+ if ((event->state == PERF_EVENT_STATE_OFF) &&
+ event->attr.constraint_duplicate)
+ event->state = PERF_EVENT_STATE_ACTIVE;
put_event(file->private_data);
return 0;
}
struct perf_event *output_event);
static int perf_event_set_filter(struct perf_event *event, void __user *arg);
static int perf_event_set_bpf_prog(struct perf_event *event, u32 prog_fd);
+static int perf_event_drv_configs(struct perf_event *event,
+ void __user *arg);
static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg)
{
case PERF_EVENT_IOC_SET_BPF:
return perf_event_set_bpf_prog(event, arg);
+ case PERF_EVENT_IOC_SET_DRV_CONFIGS:
+ return perf_event_drv_configs(event, (void __user *)arg);
+
default:
return -ENOTTY;
}
switch (_IOC_NR(cmd)) {
case _IOC_NR(PERF_EVENT_IOC_SET_FILTER):
case _IOC_NR(PERF_EVENT_IOC_ID):
+ case _IOC_NR(PERF_EVENT_IOC_SET_DRV_CONFIGS):
/* Fix up pointer size (usually 4 -> 8 in 32-on-64-bit case */
if (_IOC_SIZE(cmd) == sizeof(compat_uptr_t)) {
cmd &= ~IOCSIZE_MASK;
event->pmu->event_mapped(event);
}
+static void perf_pmu_output_stop(struct perf_event *event);
+
/*
* A buffer can be mmap()ed multiple times; either directly through the same
* event, or through other events by use of perf_event_set_output().
*/
if (rb_has_aux(rb) && vma->vm_pgoff == rb->aux_pgoff &&
atomic_dec_and_mutex_lock(&rb->aux_mmap_count, &event->mmap_mutex)) {
+ /*
+ * Stop all AUX events that are writing to this buffer,
+ * so that we can free its AUX pages and corresponding PMU
+ * data. Note that after rb::aux_mmap_count dropped to zero,
+ * they won't start any more (see perf_aux_output_begin()).
+ */
+ perf_pmu_output_stop(event);
+
+ /* now it's safe to free the pages */
atomic_long_sub(rb->aux_nr_pages, &mmap_user->locked_vm);
vma->vm_mm->pinned_vm -= rb->aux_mmap_locked;
+ /* this has to be the last one */
rb_free_aux(rb);
+ WARN_ON_ONCE(atomic_read(&rb->aux_refcount));
+
mutex_unlock(&event->mmap_mutex);
}
rcu_read_unlock();
}
+struct remote_output {
+ struct ring_buffer *rb;
+ int err;
+};
+
+static void __perf_event_output_stop(struct perf_event *event, void *data)
+{
+ struct perf_event *parent = event->parent;
+ struct remote_output *ro = data;
+ struct ring_buffer *rb = ro->rb;
+
+ if (!has_aux(event))
+ return;
+
+ if (!parent)
+ parent = event;
+
+ /*
+ * In case of inheritance, it will be the parent that links to the
+ * ring-buffer, but it will be the child that's actually using it:
+ */
+ if (rcu_dereference(parent->rb) == rb)
+ ro->err = __perf_event_stop(event);
+}
+
+static int __perf_pmu_output_stop(void *info)
+{
+ struct perf_event *event = info;
+ struct pmu *pmu = event->pmu;
+ struct perf_cpu_context *cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
+ struct remote_output ro = {
+ .rb = event->rb,
+ };
+
+ rcu_read_lock();
+ perf_event_aux_ctx(&cpuctx->ctx, __perf_event_output_stop, &ro);
+ if (cpuctx->task_ctx)
+ perf_event_aux_ctx(cpuctx->task_ctx, __perf_event_output_stop,
+ &ro);
+ rcu_read_unlock();
+
+ return ro.err;
+}
+
+static void perf_pmu_output_stop(struct perf_event *event)
+{
+ struct perf_event *iter;
+ int err, cpu;
+
+restart:
+ rcu_read_lock();
+ list_for_each_entry_rcu(iter, &event->rb->event_list, rb_entry) {
+ /*
+ * For per-CPU events, we need to make sure that neither they
+ * nor their children are running; for cpu==-1 events it's
+ * sufficient to stop the event itself if it's active, since
+ * it can't have children.
+ */
+ cpu = iter->cpu;
+ if (cpu == -1)
+ cpu = READ_ONCE(iter->oncpu);
+
+ if (cpu == -1)
+ continue;
+
+ err = cpu_function_call(cpu, __perf_pmu_output_stop, event);
+ if (err == -EAGAIN) {
+ rcu_read_unlock();
+ goto restart;
+ }
+ }
+ rcu_read_unlock();
+}
+
/*
* task tracking -- fork/exit
*
.start = perf_swevent_start,
.stop = perf_swevent_stop,
.read = perf_swevent_read,
+
+ .events_across_hotplug = 1,
};
#ifdef CONFIG_EVENT_TRACING
.start = perf_swevent_start,
.stop = perf_swevent_stop,
.read = perf_swevent_read,
+
+ .events_across_hotplug = 1,
};
static inline void perf_tp_register(void)
}
#endif
+static int perf_event_drv_configs(struct perf_event *event,
+ void __user *arg)
+{
+ if (!event->pmu->get_drv_configs)
+ return -EINVAL;
+
+ return event->pmu->get_drv_configs(event, arg);
+}
+
/*
* hrtimer based swevent callback
*/
.start = cpu_clock_event_start,
.stop = cpu_clock_event_stop,
.read = cpu_clock_event_read,
+
+ .events_across_hotplug = 1,
};
/*
.start = task_clock_event_start,
.stop = task_clock_event_stop,
.read = task_clock_event_read,
+
+ .events_across_hotplug = 1,
};
static void perf_pmu_nop_void(struct pmu *pmu)
if (!group_leader)
group_leader = event;
+ mutex_init(&event->group_leader_mutex);
mutex_init(&event->child_mutex);
INIT_LIST_HEAD(&event->child_list);
INIT_LIST_HEAD(&event->sibling_list);
INIT_LIST_HEAD(&event->rb_entry);
INIT_LIST_HEAD(&event->active_entry);
+ INIT_LIST_HEAD(&event->drv_configs);
INIT_HLIST_NODE(&event->hlist_entry);
if (err)
return err;
+ if (attr.constraint_duplicate || attr.__reserved_1)
+ return -EINVAL;
+
if (!attr.exclude_kernel) {
if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
return -EACCES;
group_leader = NULL;
}
+ /*
+ * Take the group_leader's group_leader_mutex before observing
+ * anything in the group leader that leads to changes in ctx,
+ * many of which may be changing on another thread.
+ * In particular, we want to take this lock before deciding
+ * whether we need to move_group.
+ */
+ if (group_leader)
+ mutex_lock(&group_leader->group_leader_mutex);
+
if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) {
task = find_lively_task_by_vpid(pid);
if (IS_ERR(task)) {
if (move_group)
perf_event_ctx_unlock(group_leader, gctx);
mutex_unlock(&ctx->mutex);
+ if (group_leader)
+ mutex_unlock(&group_leader->group_leader_mutex);
if (task) {
mutex_unlock(&task->signal->cred_guard_mutex);
if (task)
put_task_struct(task);
err_group_fd:
+ if (group_leader)
+ mutex_unlock(&group_leader->group_leader_mutex);
fdput(group);
err_fd:
put_unused_fd(event_fd);
goto err_free;
}
- perf_install_in_context(ctx, event, cpu);
+ perf_install_in_context(ctx, event, event->cpu);
perf_unpin_context(ctx);
mutex_unlock(&ctx->mutex);
rcu_read_unlock();
}
+static void __perf_event_stop_swclock(void *__info)
+{
+ struct perf_event_context *ctx = __info;
+ struct perf_event *event, *tmp;
+
+ list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry) {
+ if (event->attr.config == PERF_COUNT_SW_CPU_CLOCK &&
+ event->attr.type == PERF_TYPE_SOFTWARE)
+ cpu_clock_event_stop(event, 0);
+ }
+}
+
static void perf_event_exit_cpu_context(int cpu)
{
+ struct perf_cpu_context *cpuctx;
struct perf_event_context *ctx;
+ unsigned long flags;
struct pmu *pmu;
int idx;
idx = srcu_read_lock(&pmus_srcu);
list_for_each_entry_rcu(pmu, &pmus, entry) {
- ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
+ cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
+ ctx = &cpuctx->ctx;
+
+ /* Cancel the mux hrtimer to avoid CPU migration */
+ if (pmu->task_ctx_nr != perf_sw_context) {
+ raw_spin_lock_irqsave(&cpuctx->hrtimer_lock, flags);
+ hrtimer_cancel(&cpuctx->hrtimer);
+ cpuctx->hrtimer_active = 0;
+ raw_spin_unlock_irqrestore(&cpuctx->hrtimer_lock,
+ flags);
+ }
mutex_lock(&ctx->mutex);
- smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
+ /*
+ * If keeping events across hotplugging is supported, do not
+ * remove the event list, but keep it alive across CPU hotplug.
+ * The context is exited via an fd close path when userspace
+ * is done and the target CPU is online. If software clock
+ * event is active, then stop hrtimer associated with it.
+ * Start the timer when the CPU comes back online.
+ */
+ if (!pmu->events_across_hotplug)
+ smp_call_function_single(cpu, __perf_event_exit_context,
+ ctx, 1);
+ else
+ smp_call_function_single(cpu, __perf_event_stop_swclock,
+ ctx, 1);
mutex_unlock(&ctx->mutex);
}
srcu_read_unlock(&pmus_srcu, idx);
}
+static void perf_event_start_swclock(int cpu)
+{
+ struct perf_event_context *ctx;
+ struct pmu *pmu;
+ int idx;
+ struct perf_event *event, *tmp;
+
+ idx = srcu_read_lock(&pmus_srcu);
+ list_for_each_entry_rcu(pmu, &pmus, entry) {
+ if (pmu->events_across_hotplug) {
+ ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
+ list_for_each_entry_safe(event, tmp, &ctx->event_list,
+ event_entry) {
+ if (event->attr.config ==
+ PERF_COUNT_SW_CPU_CLOCK &&
+ event->attr.type == PERF_TYPE_SOFTWARE)
+ cpu_clock_event_start(event, 0);
+ }
+ }
+ }
+ srcu_read_unlock(&pmus_srcu, idx);
+}
+
static void perf_event_exit_cpu(int cpu)
{
perf_event_exit_cpu_context(cpu);
}
#else
static inline void perf_event_exit_cpu(int cpu) { }
+static inline void perf_event_start_swclock(int cpu) { }
#endif
static int
case CPU_DOWN_PREPARE:
perf_event_exit_cpu(cpu);
break;
+
+ case CPU_STARTING:
+ perf_event_start_swclock(cpu);
+ break;
+
default:
break;
}
return NOTIFY_OK;
}
+static int event_idle_notif(struct notifier_block *nb, unsigned long action,
+ void *data)
+{
+ switch (action) {
+ case IDLE_START:
+ __this_cpu_write(is_idle, true);
+ break;
+ case IDLE_END:
+ __this_cpu_write(is_idle, false);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block perf_event_idle_nb = {
+ .notifier_call = event_idle_notif,
+};
+
void __init perf_event_init(void)
{
int ret;
perf_pmu_register(&perf_task_clock, NULL, -1);
perf_tp_register();
perf_cpu_notifier(perf_cpu_notify);
+ idle_notifier_register(&perf_event_idle_nb);
register_reboot_notifier(&perf_reboot_notifier);
ret = init_hw_breakpoint();
/*** Global kva allocator ***/
-#define VM_LAZY_FREE 0x01
-#define VM_LAZY_FREEING 0x02
#define VM_VM_AREA 0x04
static DEFINE_SPINLOCK(vmap_area_lock);
/* Export for kexec only */
LIST_HEAD(vmap_area_list);
+static LLIST_HEAD(vmap_purge_list);
static struct rb_root vmap_area_root = RB_ROOT;
/* The vmap cache globals are protected by vmap_area_lock */
static unsigned long vmap_area_pcpu_hole;
+#ifdef CONFIG_ENABLE_VMALLOC_SAVING
+#define POSSIBLE_VMALLOC_START PAGE_OFFSET
+
+#define VMALLOC_BITMAP_SIZE ((VMALLOC_END - PAGE_OFFSET) >> \
+ PAGE_SHIFT)
+#define VMALLOC_TO_BIT(addr) ((addr - PAGE_OFFSET) >> PAGE_SHIFT)
+#define BIT_TO_VMALLOC(i) (PAGE_OFFSET + i * PAGE_SIZE)
+
+unsigned long total_vmalloc_size;
+unsigned long vmalloc_reserved;
+
+DECLARE_BITMAP(possible_areas, VMALLOC_BITMAP_SIZE);
+
+void mark_vmalloc_reserved_area(void *x, unsigned long size)
+{
+ unsigned long addr = (unsigned long)x;
+
+ bitmap_set(possible_areas, VMALLOC_TO_BIT(addr), size >> PAGE_SHIFT);
+ vmalloc_reserved += size;
+}
+
+int is_vmalloc_addr(const void *x)
+{
+ unsigned long addr = (unsigned long)x;
+
+ if (addr < POSSIBLE_VMALLOC_START || addr >= VMALLOC_END)
+ return 0;
+
+ if (test_bit(VMALLOC_TO_BIT(addr), possible_areas))
+ return 0;
+
+ return 1;
+}
+
+static void calc_total_vmalloc_size(void)
+{
+ total_vmalloc_size = VMALLOC_END - POSSIBLE_VMALLOC_START -
+ vmalloc_reserved;
+}
+#else
+int is_vmalloc_addr(const void *x)
+{
+ unsigned long addr = (unsigned long)x;
+
+ return addr >= VMALLOC_START && addr < VMALLOC_END;
+}
+
+static void calc_total_vmalloc_size(void) { }
+#endif
+EXPORT_SYMBOL(is_vmalloc_addr);
+
static struct vmap_area *__find_vmap_area(unsigned long addr)
{
struct rb_node *n = vmap_area_root.rb_node;
BUG_ON(offset_in_page(size));
BUG_ON(!is_power_of_2(align));
+ might_sleep();
+
va = kmalloc_node(sizeof(struct vmap_area),
gfp_mask & GFP_RECLAIM_MASK, node);
if (unlikely(!va))
purged = 1;
goto retry;
}
- if (printk_ratelimit())
+ if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit())
pr_warn("vmap allocation for size %lu failed: use vmalloc=<size> to increase size\n",
size);
kfree(va);
log = fls(num_online_cpus());
- return log * (32UL * 1024 * 1024 / PAGE_SIZE);
+ return log * (1UL * CONFIG_VMAP_LAZY_PURGING_FACTOR *
+ 1024 * 1024 / PAGE_SIZE);
}
static atomic_t vmap_lazy_nr = ATOMIC_INIT(0);
+/*
+ * Serialize vmap purging. There is no actual criticial section protected
+ * by this look, but we want to avoid concurrent calls for performance
+ * reasons and to make the pcpu_get_vm_areas more deterministic.
+ */
+static DEFINE_MUTEX(vmap_purge_lock);
+
/* for per-CPU blocks */
static void purge_fragmented_blocks_allcpus(void);
/*
* Purges all lazily-freed vmap areas.
- *
- * If sync is 0 then don't purge if there is already a purge in progress.
- * If force_flush is 1, then flush kernel TLBs between *start and *end even
- * if we found no lazy vmap areas to unmap (callers can use this to optimise
- * their own TLB flushing).
- * Returns with *start = min(*start, lowest purged address)
- * *end = max(*end, highest purged address)
*/
-static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
- int sync, int force_flush)
+static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
{
- static DEFINE_SPINLOCK(purge_lock);
- LIST_HEAD(valist);
+ struct llist_node *valist;
struct vmap_area *va;
struct vmap_area *n_va;
- int nr = 0;
+ bool do_free = false;
- /*
- * If sync is 0 but force_flush is 1, we'll go sync anyway but callers
- * should not expect such behaviour. This just simplifies locking for
- * the case that isn't actually used at the moment anyway.
- */
- if (!sync && !force_flush) {
- if (!spin_trylock(&purge_lock))
- return;
- } else
- spin_lock(&purge_lock);
+ lockdep_assert_held(&vmap_purge_lock);
- if (sync)
- purge_fragmented_blocks_allcpus();
-
- rcu_read_lock();
- list_for_each_entry_rcu(va, &vmap_area_list, list) {
- if (va->flags & VM_LAZY_FREE) {
- if (va->va_start < *start)
- *start = va->va_start;
- if (va->va_end > *end)
- *end = va->va_end;
- nr += (va->va_end - va->va_start) >> PAGE_SHIFT;
- list_add_tail(&va->purge_list, &valist);
- va->flags |= VM_LAZY_FREEING;
- va->flags &= ~VM_LAZY_FREE;
- }
+ valist = llist_del_all(&vmap_purge_list);
+ llist_for_each_entry(va, valist, purge_list) {
+ if (va->va_start < start)
+ start = va->va_start;
+ if (va->va_end > end)
+ end = va->va_end;
+ do_free = true;
}
- rcu_read_unlock();
- if (nr)
- atomic_sub(nr, &vmap_lazy_nr);
+ if (!do_free)
+ return false;
- if (nr || force_flush)
- flush_tlb_kernel_range(*start, *end);
+ flush_tlb_kernel_range(start, end);
- if (nr) {
- spin_lock(&vmap_area_lock);
- list_for_each_entry_safe(va, n_va, &valist, purge_list)
- __free_vmap_area(va);
- spin_unlock(&vmap_area_lock);
+ spin_lock(&vmap_area_lock);
+ llist_for_each_entry_safe(va, n_va, valist, purge_list) {
+ int nr = (va->va_end - va->va_start) >> PAGE_SHIFT;
+
+ __free_vmap_area(va);
+ atomic_sub(nr, &vmap_lazy_nr);
+ cond_resched_lock(&vmap_area_lock);
}
- spin_unlock(&purge_lock);
+ spin_unlock(&vmap_area_lock);
+ return true;
}
/*
*/
static void try_purge_vmap_area_lazy(void)
{
- unsigned long start = ULONG_MAX, end = 0;
-
- __purge_vmap_area_lazy(&start, &end, 0, 0);
+ if (mutex_trylock(&vmap_purge_lock)) {
+ __purge_vmap_area_lazy(ULONG_MAX, 0);
+ mutex_unlock(&vmap_purge_lock);
+ }
}
/*
*/
static void purge_vmap_area_lazy(void)
{
- unsigned long start = ULONG_MAX, end = 0;
-
- __purge_vmap_area_lazy(&start, &end, 1, 0);
+ mutex_lock(&vmap_purge_lock);
+ purge_fragmented_blocks_allcpus();
+ __purge_vmap_area_lazy(ULONG_MAX, 0);
+ mutex_unlock(&vmap_purge_lock);
}
/*
*/
static void free_vmap_area_noflush(struct vmap_area *va)
{
- va->flags |= VM_LAZY_FREE;
- atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr);
- if (unlikely(atomic_read(&vmap_lazy_nr) > lazy_max_pages()))
- try_purge_vmap_area_lazy();
-}
+ int nr_lazy;
-/*
- * Free and unmap a vmap area, caller ensuring flush_cache_vunmap had been
- * called for the correct range previously.
- */
-static void free_unmap_vmap_area_noflush(struct vmap_area *va)
-{
- unmap_vmap_area(va);
- free_vmap_area_noflush(va);
+ nr_lazy = atomic_add_return((va->va_end - va->va_start) >> PAGE_SHIFT,
+ &vmap_lazy_nr);
+
+ /* After this point, we may free va at any time */
+ llist_add(&va->purge_list, &vmap_purge_list);
+
+ if (unlikely(nr_lazy > lazy_max_pages()))
+ try_purge_vmap_area_lazy();
}
/*
static void free_unmap_vmap_area(struct vmap_area *va)
{
flush_cache_vunmap(va->va_start, va->va_end);
- free_unmap_vmap_area_noflush(va);
+ unmap_vmap_area(va);
+ free_vmap_area_noflush(va);
}
static struct vmap_area *find_vmap_area(unsigned long addr)
return va;
}
-static void free_unmap_vmap_area_addr(unsigned long addr)
-{
- struct vmap_area *va;
-
- va = find_vmap_area(addr);
- BUG_ON(!va);
- free_unmap_vmap_area(va);
-}
-
-
/*** Per cpu kva allocator ***/
/*
if (unlikely(!vmap_initialized))
return;
+ might_sleep();
+
for_each_possible_cpu(cpu) {
struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
struct vmap_block *vb;
rcu_read_unlock();
}
- __purge_vmap_area_lazy(&start, &end, 1, flush);
+ mutex_lock(&vmap_purge_lock);
+ purge_fragmented_blocks_allcpus();
+ if (!__purge_vmap_area_lazy(start, end) && flush)
+ flush_tlb_kernel_range(start, end);
+ mutex_unlock(&vmap_purge_lock);
}
EXPORT_SYMBOL_GPL(vm_unmap_aliases);
{
unsigned long size = count << PAGE_SHIFT;
unsigned long addr = (unsigned long)mem;
+ struct vmap_area *va;
+ might_sleep();
BUG_ON(!addr);
BUG_ON(addr < VMALLOC_START);
BUG_ON(addr > VMALLOC_END);
debug_check_no_locks_freed(mem, size);
vmap_debug_free_range(addr, addr+size);
- if (likely(count <= VMAP_MAX_ALLOC))
+ if (likely(count <= VMAP_MAX_ALLOC)) {
vb_free(mem, size);
- else
- free_unmap_vmap_area_addr(addr);
+ return;
+ }
+
+ va = find_vmap_area(addr);
+ BUG_ON(!va);
+ free_unmap_vmap_area(va);
}
EXPORT_SYMBOL(vm_unmap_ram);
EXPORT_SYMBOL(vm_map_ram);
static struct vm_struct *vmlist __initdata;
+
+/**
+ * vm_area_check_early - check if vmap area is already mapped
+ * @vm: vm_struct to be checked
+ *
+ * This function is used to check if the vmap area has been
+ * mapped already. @vm->addr, @vm->size and @vm->flags should
+ * contain proper values.
+ *
+ */
+int __init vm_area_check_early(struct vm_struct *vm)
+{
+ struct vm_struct *tmp, **p;
+
+ BUG_ON(vmap_initialized);
+ for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
+ if (tmp->addr >= vm->addr) {
+ if (tmp->addr < vm->addr + vm->size)
+ return 1;
+ } else {
+ if (tmp->addr + tmp->size > vm->addr)
+ return 1;
+ }
+ }
+ return 0;
+}
+
/**
* vm_area_add_early - add vmap area early during boot
* @vm: vm_struct to add
}
vmap_area_pcpu_hole = VMALLOC_END;
-
+ calc_total_vmalloc_size();
vmap_initialized = true;
}
*/
struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
{
+#ifdef CONFIG_ENABLE_VMALLOC_SAVING
+ return __get_vm_area_node(size, 1, flags, PAGE_OFFSET, VMALLOC_END,
+ NUMA_NO_NODE, GFP_KERNEL,
+ __builtin_return_address(0));
+#else
return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
NUMA_NO_NODE, GFP_KERNEL,
__builtin_return_address(0));
+#endif
}
struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
const void *caller)
{
+#ifdef CONFIG_ENABLE_VMALLOC_SAVING
+ return __get_vm_area_node(size, 1, flags, PAGE_OFFSET, VMALLOC_END,
+ NUMA_NO_NODE, GFP_KERNEL, caller);
+#else
return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
NUMA_NO_NODE, GFP_KERNEL, caller);
+#endif
}
/**
{
struct vmap_area *va;
+ might_sleep();
+
va = find_vmap_area((unsigned long)addr);
if (va && va->flags & VM_VM_AREA) {
struct vm_struct *vm = va->vm;
kfree(area);
return;
}
-
+
+static inline void __vfree_deferred(const void *addr)
+{
+ /*
+ * Use raw_cpu_ptr() because this can be called from preemptible
+ * context. Preemption is absolutely fine here, because the llist_add()
+ * implementation is lockless, so it works even if we are adding to
+ * nother cpu's list. schedule_work() should be fine with this too.
+ */
+ struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred);
+
+ if (llist_add((struct llist_node *)addr, &p->list))
+ schedule_work(&p->wq);
+}
+
+/**
+ * vfree_atomic - release memory allocated by vmalloc()
+ * @addr: memory base address
+ *
+ * This one is just like vfree() but can be called in any atomic context
+ * except NMIs.
+ */
+void vfree_atomic(const void *addr)
+{
+ BUG_ON(in_nmi());
+
+ kmemleak_free(addr);
+
+ if (!addr)
+ return;
+ __vfree_deferred(addr);
+}
+
/**
* vfree - release memory allocated by vmalloc()
* @addr: memory base address
if (!addr)
return;
- if (unlikely(in_interrupt())) {
- struct vfree_deferred *p = this_cpu_ptr(&vfree_deferred);
- if (llist_add((struct llist_node *)addr, &p->list))
- schedule_work(&p->wq);
- } else
+ if (unlikely(in_interrupt()))
+ __vfree_deferred(addr);
+ else
__vunmap(addr, 1);
}
EXPORT_SYMBOL(vfree);
return NULL;
/*
+ * First make sure the mappings are removed from all page-tables
+ * before they are freed.
+ */
+ vmalloc_sync_all();
+
+ /*
* In this function, newly allocated vm_struct has VM_UNINITIALIZED
* flag. It means that vm_struct is not fully initialized.
* Now, it is fully initialized, so remove this flag here.
/*
* Implement a stub for vmalloc_sync_all() if the architecture chose not to
* have one.
+ *
+ * The purpose of this function is to make sure the vmalloc area
+ * mappings are identical in all page-tables in the system.
*/
void __weak vmalloc_sync_all(void)
{
if (v->flags & VM_VPAGES)
seq_puts(m, " vpages");
+ if (v->flags & VM_LOWMEM)
+ seq_puts(m, " lowmem");
+
show_numa_info(m, v);
seq_putc(m, '\n');
return 0;
}
}
+ /* WMM specification requires all 4 ACIs. */
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+ if (params[ac].cw_min == 0) {
+ sdata_info(sdata,
+ "AP has invalid WMM params (missing AC %d), using defaults\n",
+ ac);
+ return false;
+ }
+ }
+
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
mlme_dbg(sdata,
"WMM AC=%d acm=%d aifs=%d cWmin=%d cWmax=%d txop=%d uapsd=%d, downgraded=%d\n",
return -EOPNOTSUPP;
}
- auth_data = kzalloc(sizeof(*auth_data) + req->sae_data_len +
+ auth_data = kzalloc(sizeof(*auth_data) + req->auth_data_len +
req->ie_len, GFP_KERNEL);
if (!auth_data)
return -ENOMEM;
auth_data->bss = req->bss;
- if (req->sae_data_len >= 4) {
- __le16 *pos = (__le16 *) req->sae_data;
+ if (req->auth_data_len >= 4) {
+ __le16 *pos = (__le16 *) req->auth_data;
auth_data->sae_trans = le16_to_cpu(pos[0]);
auth_data->sae_status = le16_to_cpu(pos[1]);
- memcpy(auth_data->data, req->sae_data + 4,
- req->sae_data_len - 4);
- auth_data->data_len += req->sae_data_len - 4;
+ memcpy(auth_data->data, req->auth_data + 4,
+ req->auth_data_len - 4);
+ auth_data->data_len += req->auth_data_len - 4;
}
if (req->ie && req->ie_len) {
static int snd_compr_update_tstamp(struct snd_compr_stream *stream,
struct snd_compr_tstamp *tstamp)
{
+ int err = 0;
if (!stream->ops->pointer)
return -ENOTSUPP;
- stream->ops->pointer(stream, tstamp);
- pr_debug("dsp consumed till %d total %d bytes\n",
+ err = stream->ops->pointer(stream, tstamp);
+ if (err)
+ return err;
+ pr_debug("dsp consumed till %d total %llu bytes\n",
tstamp->byte_offset, tstamp->copied_total);
if (stream->direction == SND_COMPRESS_PLAYBACK)
stream->runtime->total_bytes_transferred = tstamp->copied_total;
(app_pointer * runtime->buffer_size);
dstn = runtime->buffer + app_pointer;
- pr_debug("copying %ld at %lld\n",
- (unsigned long)count, app_pointer);
+ pr_debug("copying %zu at %lld\n",
+ count, app_pointer);
if (count < runtime->buffer_size - app_pointer) {
if (copy_from_user(dstn, buf, count))
return -EFAULT;
}
avail = snd_compr_get_avail(stream);
- pr_debug("avail returned %ld\n", (unsigned long)avail);
+ pr_debug("avail returned %zu\n", avail);
/* calculate how much we can write to buffer */
if (avail > count)
avail = count;
}
avail = snd_compr_get_avail(stream);
- pr_debug("avail returned %ld\n", (unsigned long)avail);
+ pr_debug("avail returned %zu\n", avail);
/* calculate how much we can read from buffer */
if (avail > count)
avail = count;
poll_wait(f, &stream->runtime->sleep, wait);
avail = snd_compr_get_avail(stream);
- pr_debug("avail is %ld\n", (unsigned long)avail);
+ pr_debug("avail is %zu\n", avail);
/* check if we have at least one fragment to fill */
switch (stream->runtime->state) {
case SNDRV_PCM_STATE_DRAINING:
{
/* first let's check the buffer parameter's */
if (params->buffer.fragment_size == 0 ||
- params->buffer.fragments > INT_MAX / params->buffer.fragment_size ||
+ params->buffer.fragments > U32_MAX / params->buffer.fragment_size ||
params->buffer.fragments == 0)
return -EINVAL;
stream->metadata_set = false;
stream->next_track = false;
- if (stream->direction == SND_COMPRESS_PLAYBACK)
- stream->runtime->state = SNDRV_PCM_STATE_SETUP;
- else
- stream->runtime->state = SNDRV_PCM_STATE_PREPARED;
+ stream->runtime->state = SNDRV_PCM_STATE_SETUP;
} else {
return -EPERM;
}
static inline int
snd_compr_tstamp(struct snd_compr_stream *stream, unsigned long arg)
{
- struct snd_compr_tstamp tstamp = {0};
+ struct snd_compr_tstamp tstamp;
int ret;
+ memset(&tstamp, 0, sizeof(tstamp));
ret = snd_compr_update_tstamp(stream, &tstamp);
if (ret == 0)
ret = copy_to_user((struct snd_compr_tstamp __user *)arg,
{
int retval;
- if (stream->runtime->state != SNDRV_PCM_STATE_PREPARED)
+ switch (stream->runtime->state) {
+ case SNDRV_PCM_STATE_SETUP:
+ if (stream->direction != SND_COMPRESS_CAPTURE)
+ return -EPERM;
+ break;
+ case SNDRV_PCM_STATE_PREPARED:
+ break;
+ default:
return -EPERM;
+ }
+
retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_START);
if (!retval)
stream->runtime->state = SNDRV_PCM_STATE_RUNNING;
{
int retval;
- if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED ||
- stream->runtime->state == SNDRV_PCM_STATE_SETUP)
+ switch (stream->runtime->state) {
+ case SNDRV_PCM_STATE_OPEN:
+ case SNDRV_PCM_STATE_SETUP:
+ case SNDRV_PCM_STATE_PREPARED:
return -EPERM;
+ default:
+ break;
+ }
+
retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_STOP);
if (!retval) {
- snd_compr_drain_notify(stream);
+ stream->runtime->state = SNDRV_PCM_STATE_SETUP;
+ wake_up(&stream->runtime->sleep);
stream->runtime->total_bytes_available = 0;
stream->runtime->total_bytes_transferred = 0;
}
return retval;
}
-static int snd_compress_wait_for_drain(struct snd_compr_stream *stream)
-{
- int ret;
-
- /*
- * We are called with lock held. So drop the lock while we wait for
- * drain complete notfication from the driver
- *
- * It is expected that driver will notify the drain completion and then
- * stream will be moved to SETUP state, even if draining resulted in an
- * error. We can trigger next track after this.
- */
- stream->runtime->state = SNDRV_PCM_STATE_DRAINING;
- mutex_unlock(&stream->device->lock);
-
- /* we wait for drain to complete here, drain can return when
- * interruption occurred, wait returned error or success.
- * For the first two cases we don't do anything different here and
- * return after waking up
- */
-
- ret = wait_event_interruptible(stream->runtime->sleep,
- (stream->runtime->state != SNDRV_PCM_STATE_DRAINING));
- if (ret == -ERESTARTSYS)
- pr_debug("wait aborted by a signal");
- else if (ret)
- pr_debug("wait for drain failed with %d\n", ret);
-
-
- wake_up(&stream->runtime->sleep);
- mutex_lock(&stream->device->lock);
-
- return ret;
-}
-
+/* this fn is called without lock being held and we change stream states here
+ * so while using the stream state auquire the lock but relase before invoking
+ * DSP as the call will possibly take a while
+ */
static int snd_compr_drain(struct snd_compr_stream *stream)
{
int retval;
- if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED ||
- stream->runtime->state == SNDRV_PCM_STATE_SETUP) {
+ mutex_lock(&stream->device->lock);
- return -EPERM;
+ switch (stream->runtime->state) {
+ case SNDRV_PCM_STATE_OPEN:
+ case SNDRV_PCM_STATE_SETUP:
+ case SNDRV_PCM_STATE_PREPARED:
+ case SNDRV_PCM_STATE_PAUSED:
- return -EPIPE;
+ retval = -EPERM;
+ goto ret;
+ case SNDRV_PCM_STATE_XRUN:
++ retval = -EPIPE;
++ goto ret;
+ default:
+ break;
}
+ mutex_unlock(&stream->device->lock);
+
retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_DRAIN);
- if (retval) {
- pr_debug("SND_COMPR_TRIGGER_DRAIN failed %d\n", retval);
+ mutex_lock(&stream->device->lock);
+ if (!retval) {
+ stream->runtime->state = SNDRV_PCM_STATE_DRAINING;
wake_up(&stream->runtime->sleep);
- return retval;
}
- return snd_compress_wait_for_drain(stream);
+ret:
+ mutex_unlock(&stream->device->lock);
+ return retval;
}
static int snd_compr_next_track(struct snd_compr_stream *stream)
{
int retval;
- if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED ||
- stream->runtime->state == SNDRV_PCM_STATE_SETUP) {
+ mutex_lock(&stream->device->lock);
+ switch (stream->runtime->state) {
+ case SNDRV_PCM_STATE_OPEN:
+ case SNDRV_PCM_STATE_SETUP:
+ case SNDRV_PCM_STATE_PREPARED:
+ case SNDRV_PCM_STATE_PAUSED:
+ mutex_unlock(&stream->device->lock);
return -EPERM;
+ case SNDRV_PCM_STATE_XRUN:
++ mutex_unlock(&stream->device->lock);
+ return -EPIPE;
+ default:
+ break;
}
+ mutex_unlock(&stream->device->lock);
+
/* stream can be drained only when next track has been signalled */
if (stream->next_track == false)
return -EPERM;
retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_PARTIAL_DRAIN);
- if (retval) {
- pr_debug("Partial drain returned failure\n");
- wake_up(&stream->runtime->sleep);
- return retval;
- }
stream->next_track = false;
- return snd_compress_wait_for_drain(stream);
+ return retval;
+}
+
+static int snd_compr_set_next_track_param(struct snd_compr_stream *stream,
+ unsigned long arg)
+{
+ union snd_codec_options codec_options;
+ int retval;
+
+ /* set next track params when stream is running or has been setup */
+ if (stream->runtime->state != SNDRV_PCM_STATE_SETUP &&
+ stream->runtime->state != SNDRV_PCM_STATE_RUNNING)
+ return -EPERM;
+
+ if (copy_from_user(&codec_options, (void __user *)arg,
+ sizeof(codec_options)))
+ return -EFAULT;
+
+ retval = stream->ops->set_next_track_param(stream, &codec_options);
+ return retval;
+}
+
+static int snd_compress_simple_ioctls(struct file *file,
+ struct snd_compr_stream *stream,
+ unsigned int cmd, unsigned long arg)
+{
+ int retval = -ENOTTY;
+
+ switch (_IOC_NR(cmd)) {
+ case _IOC_NR(SNDRV_COMPRESS_IOCTL_VERSION):
+ retval = put_user(SNDRV_COMPRESS_VERSION,
+ (int __user *)arg) ? -EFAULT : 0;
+ break;
+
+ case _IOC_NR(SNDRV_COMPRESS_GET_CAPS):
+ retval = snd_compr_get_caps(stream, arg);
+ break;
+
+ case _IOC_NR(SNDRV_COMPRESS_GET_CODEC_CAPS):
+ retval = snd_compr_get_codec_caps(stream, arg);
+ break;
+
+
+ case _IOC_NR(SNDRV_COMPRESS_TSTAMP):
+ retval = snd_compr_tstamp(stream, arg);
+ break;
+
+ case _IOC_NR(SNDRV_COMPRESS_AVAIL):
+ retval = snd_compr_ioctl_avail(stream, arg);
+ break;
+
+ /* drain and partial drain need special handling
+ * we need to drop the locks here as the streams would get blocked on
+ * the dsp to get drained. The locking would be handled in respective
+ * function here
+ */
+ case _IOC_NR(SNDRV_COMPRESS_DRAIN):
+ retval = snd_compr_drain(stream);
+ break;
+
+ case _IOC_NR(SNDRV_COMPRESS_PARTIAL_DRAIN):
+ retval = snd_compr_partial_drain(stream);
+ break;
+ }
+
+ return retval;
}
static long snd_compr_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
stream = &data->stream;
if (snd_BUG_ON(!stream))
return -EFAULT;
+
mutex_lock(&stream->device->lock);
switch (_IOC_NR(cmd)) {
- case _IOC_NR(SNDRV_COMPRESS_IOCTL_VERSION):
- retval = put_user(SNDRV_COMPRESS_VERSION,
- (int __user *)arg) ? -EFAULT : 0;
- break;
- case _IOC_NR(SNDRV_COMPRESS_GET_CAPS):
- retval = snd_compr_get_caps(stream, arg);
- break;
#ifndef COMPR_CODEC_CAPS_OVERFLOW
case _IOC_NR(SNDRV_COMPRESS_GET_CODEC_CAPS):
retval = snd_compr_get_codec_caps(stream, arg);
case _IOC_NR(SNDRV_COMPRESS_SET_PARAMS):
retval = snd_compr_set_params(stream, arg);
break;
+
case _IOC_NR(SNDRV_COMPRESS_GET_PARAMS):
retval = snd_compr_get_params(stream, arg);
break;
+
case _IOC_NR(SNDRV_COMPRESS_SET_METADATA):
retval = snd_compr_set_metadata(stream, arg);
break;
+
case _IOC_NR(SNDRV_COMPRESS_GET_METADATA):
retval = snd_compr_get_metadata(stream, arg);
break;
- case _IOC_NR(SNDRV_COMPRESS_TSTAMP):
- retval = snd_compr_tstamp(stream, arg);
- break;
- case _IOC_NR(SNDRV_COMPRESS_AVAIL):
- retval = snd_compr_ioctl_avail(stream, arg);
- break;
+
case _IOC_NR(SNDRV_COMPRESS_PAUSE):
retval = snd_compr_pause(stream);
break;
+
case _IOC_NR(SNDRV_COMPRESS_RESUME):
retval = snd_compr_resume(stream);
break;
+
case _IOC_NR(SNDRV_COMPRESS_START):
retval = snd_compr_start(stream);
break;
+
case _IOC_NR(SNDRV_COMPRESS_STOP):
retval = snd_compr_stop(stream);
break;
- case _IOC_NR(SNDRV_COMPRESS_DRAIN):
- retval = snd_compr_drain(stream);
- break;
- case _IOC_NR(SNDRV_COMPRESS_PARTIAL_DRAIN):
- retval = snd_compr_partial_drain(stream);
- break;
+
case _IOC_NR(SNDRV_COMPRESS_NEXT_TRACK):
retval = snd_compr_next_track(stream);
break;
+ case _IOC_NR(SNDRV_COMPRESS_SET_NEXT_TRACK_PARAM):
+ retval = snd_compr_set_next_track_param(stream, arg);
+ break;
+
+ default:
+ mutex_unlock(&stream->device->lock);
+ return snd_compress_simple_ioctls(f, stream, cmd, arg);
+
}
+
mutex_unlock(&stream->device->lock);
return retval;
}
#endif
static const struct file_operations snd_compr_file_ops = {
- .owner = THIS_MODULE,
- .open = snd_compr_open,
- .release = snd_compr_free,
- .write = snd_compr_write,
- .read = snd_compr_read,
+ .owner = THIS_MODULE,
+ .open = snd_compr_open,
+ .release = snd_compr_free,
+ .write = snd_compr_write,
+ .read = snd_compr_read,
.unlocked_ioctl = snd_compr_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = snd_compr_ioctl_compat,
+#else
+ .compat_ioctl = snd_compr_ioctl,
#endif
- .mmap = snd_compr_mmap,
- .poll = snd_compr_poll,
+ .mmap = snd_compr_mmap,
+ .poll = snd_compr_poll,
};
static int snd_compress_dev_register(struct snd_device *device)
}
EXPORT_SYMBOL_GPL(snd_compress_new);
+/*
+ * snd_compress_free: free compress device
+ * @card: sound card pointer
+ * @compr: compress device pointer
+ */
+void snd_compress_free(struct snd_card *card, struct snd_compr *compr)
+{
+ snd_device_free(card, compr);
+}
+EXPORT_SYMBOL_GPL(snd_compress_free);
+
static int snd_compress_add_device(struct snd_compr *device)
{
int ret;