struct parse_events_error err;
int ret;
- bzero(&err, sizeof(err));
+ parse_events_error__init(&err);
ret = parse_events(evlist, str, &err);
if (err.str)
parse_events_error__print(&err, "tracepoint");
+ parse_events_error__exit(&err);
return ret;
}
static struct evlist *bench__create_evlist(char *evstr)
{
- struct parse_events_error err = { .idx = 0, };
+ struct parse_events_error err;
struct evlist *evlist = evlist__new();
int ret;
return NULL;
}
+ parse_events_error__init(&err);
ret = parse_events(evlist, evstr, &err);
if (ret) {
parse_events_error__print(&err, evstr);
+ parse_events_error__exit(&err);
pr_err("Run 'perf list' for a list of valid events\n");
ret = 1;
goto out_delete_evlist;
}
-
+ parse_events_error__exit(&err);
ret = evlist__create_maps(evlist, &opts.target);
if (ret < 0) {
pr_err("Not enough memory to create thread/cpu maps\n");
(PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) |
(PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
};
- struct parse_events_error errinfo;
-
/* Set attrs if no event is selected and !null_run: */
if (stat_config.null_run)
return 0;
- bzero(&errinfo, sizeof(errinfo));
if (transaction_run) {
+ struct parse_events_error errinfo;
/* Handle -T as -M transaction. Once platform specific metrics
* support has been added to the json files, all architectures
* will use this approach. To determine transaction support
&stat_config.metric_events);
}
+ parse_events_error__init(&errinfo);
if (pmu_have_event("cpu", "cycles-ct") &&
pmu_have_event("cpu", "el-start"))
err = parse_events(evsel_list, transaction_attrs,
if (err) {
fprintf(stderr, "Cannot set up transaction events\n");
parse_events_error__print(&errinfo, transaction_attrs);
- return -1;
}
- return 0;
+ parse_events_error__exit(&errinfo);
+ return err ? -1 : 0;
}
if (smi_cost) {
+ struct parse_events_error errinfo;
int smi;
if (sysfs__read_int(FREEZE_ON_SMI_PATH, &smi) < 0) {
smi_reset = true;
}
- if (pmu_have_event("msr", "aperf") &&
- pmu_have_event("msr", "smi")) {
- if (!force_metric_only)
- stat_config.metric_only = true;
- err = parse_events(evsel_list, smi_cost_attrs, &errinfo);
- } else {
+ if (!pmu_have_event("msr", "aperf") ||
+ !pmu_have_event("msr", "smi")) {
fprintf(stderr, "To measure SMI cost, it needs "
"msr/aperf/, msr/smi/ and cpu/cycles/ support\n");
- parse_events_error__print(&errinfo, smi_cost_attrs);
return -1;
}
+ if (!force_metric_only)
+ stat_config.metric_only = true;
+
+ parse_events_error__init(&errinfo);
+ err = parse_events(evsel_list, smi_cost_attrs, &errinfo);
if (err) {
parse_events_error__print(&errinfo, smi_cost_attrs);
fprintf(stderr, "Cannot set up SMI cost events\n");
- return -1;
}
- return 0;
+ parse_events_error__exit(&errinfo);
+ return err ? -1 : 0;
}
if (topdown_run) {
return -1;
}
if (topdown_attrs[0] && str) {
+ struct parse_events_error errinfo;
if (warn)
arch_topdown_group_warn();
setup_metrics:
+ parse_events_error__init(&errinfo);
err = parse_events(evsel_list, str, &errinfo);
if (err) {
fprintf(stderr,
"Cannot set up top down events %s: %d\n",
str, err);
parse_events_error__print(&errinfo, str);
+ parse_events_error__exit(&errinfo);
free(str);
return -1;
}
+ parse_events_error__exit(&errinfo);
} else {
fprintf(stderr, "System does not support topdown\n");
return -1;
if (!evsel_list->core.nr_entries) {
if (perf_pmu__has_hybrid()) {
+ struct parse_events_error errinfo;
const char *hybrid_str = "cycles,instructions,branches,branch-misses";
if (target__has_cpu(&target))
return -1;
}
+ parse_events_error__init(&errinfo);
err = parse_events(evsel_list, hybrid_str, &errinfo);
if (err) {
fprintf(stderr,
"Cannot set up hybrid events %s: %d\n",
hybrid_str, err);
parse_events_error__print(&errinfo, hybrid_str);
- return -1;
}
- return err;
+ parse_events_error__exit(&errinfo);
+ return err ? -1 : 0;
}
if (target__has_cpu(&target))
struct parse_events_error err;
int ret;
- bzero(&err, sizeof(err));
+ parse_events_error__init(&err);
ret = parse_events(evlist, "probe:vfs_getname*", &err);
- if (ret) {
- free(err.str);
- free(err.help);
- free(err.first_str);
- free(err.first_help);
+ parse_events_error__exit(&err);
+ if (ret)
return false;
- }
evlist__for_each_entry_safe(evlist, evsel, tmp) {
if (!strstarts(evsel__name(evsel), "probe:vfs_getname"))
if (trace.perfconfig_events != NULL) {
struct parse_events_error parse_err;
- bzero(&parse_err, sizeof(parse_err));
+ parse_events_error__init(&parse_err);
err = parse_events(trace.evlist, trace.perfconfig_events, &parse_err);
- if (err) {
+ if (err)
parse_events_error__print(&parse_err, trace.perfconfig_events);
+ parse_events_error__exit(&parse_err);
+ if (err)
goto out;
- }
}
if ((nr_cgroups || trace.cgroup) && !trace.opts.target.system_wide) {
goto out_delete_evlist;
}
- bzero(&parse_error, sizeof(parse_error));
+ parse_events_error__init(&parse_error);
/*
* Set backward bit, ring buffer should be writing from end. Record
* it in aux evlist
*/
err = parse_events(evlist, "syscalls:sys_enter_prctl/overwrite/", &parse_error);
+ parse_events_error__exit(&parse_error);
if (err) {
pr_debug("Failed to parse tracepoint event, try use root\n");
ret = TEST_SKIP;
struct parse_events_state parse_state;
struct parse_events_error parse_error;
- bzero(&parse_error, sizeof(parse_error));
+ parse_events_error__init(&parse_error);
bzero(&parse_state, sizeof(parse_state));
parse_state.error = &parse_error;
INIT_LIST_HEAD(&parse_state.list);
err = parse_events_load_bpf_obj(&parse_state, &parse_state.list, obj, NULL);
+ parse_events_error__exit(&parse_error);
if (err || list_empty(&parse_state.list)) {
pr_debug("Failed to add events selected by BPF\n");
return TEST_FAIL;
evlist = evlist__new();
TEST_ASSERT_VAL("failed to get evlist", evlist);
+ parse_events_error__init(&err);
ret = parse_events(evlist, event_str, &err);
if (ret < 0) {
pr_debug("failed to parse event '%s', err %d, str '%s'\n",
rblist__init(&metric_events);
ret = test_expand_events(evlist, &metric_events);
out:
+ parse_events_error__exit(&err);
evlist__delete(evlist);
return ret;
}
struct evlist *evlist;
int ret;
- bzero(&err, sizeof(err));
if (e->valid && !e->valid()) {
pr_debug("... SKIP");
return 0;
if (evlist == NULL)
return -ENOMEM;
+ parse_events_error__init(&err);
ret = parse_events(evlist, e->name, &err);
if (ret) {
pr_debug("failed to parse event '%s', err %d, str '%s'\n",
} else {
ret = e->check(evlist);
}
-
+ parse_events_error__exit(&err);
evlist__delete(evlist);
return ret;
static int check_parse_cpu(const char *id, bool same_cpu, const struct pmu_event *pe)
{
- struct parse_events_error error = { .idx = 0, };
+ struct parse_events_error error;
+ int ret;
- int ret = check_parse_id(id, &error, NULL);
+ parse_events_error__init(&error);
+ ret = check_parse_id(id, &error, NULL);
if (ret && same_cpu) {
pr_warning("Parse event failed metric '%s' id '%s' expr '%s'\n",
pe->metric_name, id, pe->metric_expr);
id, pe->metric_name, pe->metric_expr);
ret = 0;
}
- free(error.str);
- free(error.help);
- free(error.first_str);
- free(error.first_help);
+ parse_events_error__exit(&error);
return ret;
}
static int check_parse_fake(const char *id)
{
- struct parse_events_error error = { .idx = 0, };
- int ret = check_parse_id(id, &error, &perf_pmu__fake);
+ struct parse_events_error error;
+ int ret;
- free(error.str);
- free(error.help);
- free(error.first_str);
- free(error.first_help);
+ parse_events_error__init(&error);
+ ret = check_parse_id(id, &error, &perf_pmu__fake);
+ parse_events_error__exit(&error);
return ret;
}
session->evlist = evlist__new();
TEST_ASSERT_VAL("can't get evlist", session->evlist);
+ parse_events_error__init(&err);
parse_events(session->evlist, "cpu_core/cycles/", &err);
+ parse_events_error__exit(&err);
}
perf_header__set_feat(&session->header, HEADER_CPU_TOPOLOGY);
goto err_out;
}
pr_debug("Parsing metric events '%s'\n", events.buf);
- bzero(&parse_error, sizeof(parse_error));
+ parse_events_error__init(&parse_error);
ret = __parse_events(parsed_evlist, events.buf, &parse_error, fake_pmu);
if (ret) {
parse_events_error__print(&parse_error, events.buf);
*out_evlist = parsed_evlist;
parsed_evlist = NULL;
err_out:
+ parse_events_error__exit(&parse_error);
evlist__delete(parsed_evlist);
strbuf_release(&events);
return ret;
return ret;
}
+void parse_events_error__init(struct parse_events_error *err)
+{
+ bzero(err, sizeof(*err));
+}
+
+void parse_events_error__exit(struct parse_events_error *err)
+{
+ zfree(&err->str);
+ zfree(&err->help);
+ zfree(&err->first_str);
+ zfree(&err->first_help);
+}
+
void parse_events_error__handle(struct parse_events_error *err, int idx,
char *str, char *help)
{
return;
__parse_events_error__print(err->idx, err->str, err->help, event);
- zfree(&err->str);
- zfree(&err->help);
if (err->num_errors > 1) {
fputs("\nInitial error:\n", stderr);
__parse_events_error__print(err->first_idx, err->first_str,
err->first_help, event);
- zfree(&err->first_str);
- zfree(&err->first_help);
}
}
struct parse_events_error err;
int ret;
- bzero(&err, sizeof(err));
+ parse_events_error__init(&err);
ret = parse_events(evlist, str, &err);
if (ret) {
parse_events_error__print(&err, str);
fprintf(stderr, "Run 'perf list' for a list of valid events\n");
}
+ parse_events_error__exit(&err);
return ret;
}
int valid_event_mount(const char *eventfs);
char *parse_events_formats_error_string(char *additional_terms);
+void parse_events_error__init(struct parse_events_error *err);
+void parse_events_error__exit(struct parse_events_error *err);
void parse_events_error__handle(struct parse_events_error *err, int idx,
char *str, char *help);
void parse_events_error__print(struct parse_events_error *err,