#include "net/net.h"
#include "net/slirp.h"
#include "chardev/char-fe.h"
+#include "chardev/char-io.h"
+#include "chardev/char-mux.h"
#include "ui/qemu-spice.h"
#include "sysemu/numa.h"
#include "monitor/monitor.h"
#include "qemu/readline.h"
#include "ui/console.h"
#include "ui/input.h"
-#include "sysemu/blockdev.h"
#include "sysemu/block-backend.h"
#include "audio/audio.h"
#include "disas/disas.h"
#include "qapi/qmp/qjson.h"
#include "qapi/qmp/json-streamer.h"
#include "qapi/qmp/json-parser.h"
+#include "qapi/qmp/qlist.h"
#include "qom/object_interfaces.h"
#include "trace-root.h"
#include "trace/control.h"
#include "qapi/qapi-introspect.h"
#include "sysemu/qtest.h"
#include "sysemu/cpus.h"
+#include "sysemu/iothread.h"
#include "qemu/cutils.h"
#if defined(TARGET_S390X)
const char *args_type;
const char *params;
const char *help;
+ const char *flags; /* p=preconfig */
void (*cmd)(Monitor *mon, const QDict *qdict);
/* @sub_table is a list of 2nd level of commands. If it does not exist,
* cmd should be used. If it exists, sub_table[?].cmd should be
* mode.
*/
QmpCommandList *commands;
+ bool qmp_caps[QMP_CAPABILITY__MAX];
+ /*
+ * Protects qmp request/response queue. Please take monitor_lock
+ * first when used together.
+ */
+ QemuMutex qmp_queue_lock;
+ /* Input queue that holds all the parsed QMP requests */
+ GQueue *qmp_requests;
+ /* Output queue contains all the QMP responses in order */
+ GQueue *qmp_responses;
} MonitorQMP;
/*
CharBackend chr;
int reset_seen;
int flags;
- int suspend_cnt;
+ int suspend_cnt; /* Needs to be accessed atomically */
bool skip_flush;
+ bool use_io_thr;
- QemuMutex out_lock;
- QString *outbuf;
- guint out_watch;
-
- /* Read under either BQL or out_lock, written with BQL+out_lock. */
- int mux_out;
-
+ /*
+ * State used only in the thread "owning" the monitor.
+ * If @use_io_thr, this is mon_global.mon_iothread.
+ * Else, it's the main thread.
+ * These members can be safely accessed without locks.
+ */
ReadLineState *rs;
+
MonitorQMP qmp;
gchar *mon_cpu_path;
BlockCompletionFunc *password_completion_cb;
void *password_opaque;
mon_cmd_t *cmd_table;
- QLIST_HEAD(,mon_fd_t) fds;
- QLIST_ENTRY(Monitor) entry;
+ QTAILQ_ENTRY(Monitor) entry;
+
+ /*
+ * The per-monitor lock. We can't access guest memory when holding
+ * the lock.
+ */
+ QemuMutex mon_lock;
+
+ /*
+ * Fields that are protected by the per-monitor lock.
+ */
+ QLIST_HEAD(, mon_fd_t) fds;
+ QString *outbuf;
+ guint out_watch;
+ /* Read under either BQL or mon_lock, written with BQL+mon_lock. */
+ int mux_out;
+};
+
+/* Let's add monitor global variables to this struct. */
+static struct {
+ IOThread *mon_iothread;
+ /* Bottom half to dispatch the requests received from IO thread */
+ QEMUBH *qmp_dispatcher_bh;
+ /* Bottom half to deliver the responses back to clients */
+ QEMUBH *qmp_respond_bh;
+} mon_global;
+
+struct QMPRequest {
+ /* Owner of the request */
+ Monitor *mon;
+ /* "id" field of the request */
+ QObject *id;
+ /* Request object to be handled */
+ QObject *req;
+ /*
+ * Whether we need to resume the monitor afterward. This flag is
+ * used to emulate the old QMP server behavior that the current
+ * command must be completed before execution of the next one.
+ */
+ bool need_resume;
};
+typedef struct QMPRequest QMPRequest;
/* QMP checker flags */
#define QMP_ACCEPT_UNKNOWNS 1
-/* Protects mon_list, monitor_event_state. */
+/* Protects mon_list, monitor_qapi_event_state. */
static QemuMutex monitor_lock;
+static GHashTable *monitor_qapi_event_state;
+static QTAILQ_HEAD(mon_list, Monitor) mon_list;
-static QLIST_HEAD(mon_list, Monitor) mon_list;
+/* Protects mon_fdsets */
+static QemuMutex mon_fdsets_lock;
static QLIST_HEAD(mon_fdsets, MonFdset) mon_fdsets;
+
static int mon_refcount;
static mon_cmd_t mon_cmds[];
Monitor *cur_mon;
-static QEMUClockType event_clock_type = QEMU_CLOCK_REALTIME;
-
static void monitor_command_cb(void *opaque, const char *cmdline,
void *readline_opaque);
}
/**
+ * Whether @mon is using readline? Note: not all HMP monitors use
+ * readline, e.g., gdbserver has a non-interactive HMP monitor, so
+ * readline is not used there.
+ */
+static inline bool monitor_uses_readline(const Monitor *mon)
+{
+ return mon->flags & MONITOR_USE_READLINE;
+}
+
+static inline bool monitor_is_hmp_non_interactive(const Monitor *mon)
+{
+ return !monitor_is_qmp(mon) && !monitor_uses_readline(mon);
+}
+
+/*
+ * Return the clock to use for recording an event's time.
+ * Beware: result is invalid before configure_accelerator().
+ */
+static inline QEMUClockType monitor_get_event_clock(void)
+{
+ /*
+ * This allows us to perform tests on the monitor queues to verify
+ * that the rate limits are enforced.
+ */
+ return qtest_enabled() ? QEMU_CLOCK_VIRTUAL : QEMU_CLOCK_REALTIME;
+}
+
+/**
* Is the current monitor, if any, a QMP monitor?
*/
bool monitor_cur_is_qmp(void)
}
}
+static void qmp_request_free(QMPRequest *req)
+{
+ qobject_unref(req->id);
+ qobject_unref(req->req);
+ g_free(req);
+}
+
+/* Must with the mon->qmp.qmp_queue_lock held */
+static void monitor_qmp_cleanup_req_queue_locked(Monitor *mon)
+{
+ while (!g_queue_is_empty(mon->qmp.qmp_requests)) {
+ qmp_request_free(g_queue_pop_head(mon->qmp.qmp_requests));
+ }
+}
+
+/* Must with the mon->qmp.qmp_queue_lock held */
+static void monitor_qmp_cleanup_resp_queue_locked(Monitor *mon)
+{
+ while (!g_queue_is_empty(mon->qmp.qmp_responses)) {
+ qobject_unref((QObject *)g_queue_pop_head(mon->qmp.qmp_responses));
+ }
+}
+
+static void monitor_qmp_cleanup_queues(Monitor *mon)
+{
+ qemu_mutex_lock(&mon->qmp.qmp_queue_lock);
+ monitor_qmp_cleanup_req_queue_locked(mon);
+ monitor_qmp_cleanup_resp_queue_locked(mon);
+ qemu_mutex_unlock(&mon->qmp.qmp_queue_lock);
+}
+
+
static void monitor_flush_locked(Monitor *mon);
static gboolean monitor_unblocked(GIOChannel *chan, GIOCondition cond,
{
Monitor *mon = opaque;
- qemu_mutex_lock(&mon->out_lock);
+ qemu_mutex_lock(&mon->mon_lock);
mon->out_watch = 0;
monitor_flush_locked(mon);
- qemu_mutex_unlock(&mon->out_lock);
+ qemu_mutex_unlock(&mon->mon_lock);
return FALSE;
}
-/* Called with mon->out_lock held. */
+/* Called with mon->mon_lock held. */
static void monitor_flush_locked(Monitor *mon)
{
int rc;
rc = qemu_chr_fe_write(&mon->chr, (const uint8_t *) buf, len);
if ((rc < 0 && errno != EAGAIN) || (rc == len)) {
/* all flushed or error */
- QDECREF(mon->outbuf);
+ qobject_unref(mon->outbuf);
mon->outbuf = qstring_new();
return;
}
if (rc > 0) {
/* partial write */
QString *tmp = qstring_from_str(buf + rc);
- QDECREF(mon->outbuf);
+ qobject_unref(mon->outbuf);
mon->outbuf = tmp;
}
if (mon->out_watch == 0) {
void monitor_flush(Monitor *mon)
{
- qemu_mutex_lock(&mon->out_lock);
+ qemu_mutex_lock(&mon->mon_lock);
monitor_flush_locked(mon);
- qemu_mutex_unlock(&mon->out_lock);
+ qemu_mutex_unlock(&mon->mon_lock);
}
/* flush at every end of line */
{
char c;
- qemu_mutex_lock(&mon->out_lock);
+ qemu_mutex_lock(&mon->mon_lock);
for(;;) {
c = *str++;
if (c == '\0')
monitor_flush_locked(mon);
}
}
- qemu_mutex_unlock(&mon->out_lock);
+ qemu_mutex_unlock(&mon->mon_lock);
}
void monitor_vprintf(Monitor *mon, const char *fmt, va_list ap)
return 0;
}
-static void monitor_json_emitter(Monitor *mon, const QObject *data)
+static void monitor_json_emitter_raw(Monitor *mon,
+ QObject *data)
{
QString *json;
qstring_append_chr(json, '\n');
monitor_puts(mon, qstring_get_str(json));
- QDECREF(json);
+ qobject_unref(json);
+}
+
+static void monitor_json_emitter(Monitor *mon, QObject *data)
+{
+ if (mon->use_io_thr) {
+ /*
+ * If using IO thread, we need to queue the item so that IO
+ * thread will do the rest for us. Take refcount so that
+ * caller won't free the data (which will be finally freed in
+ * responder thread).
+ */
+ qemu_mutex_lock(&mon->qmp.qmp_queue_lock);
+ g_queue_push_tail(mon->qmp.qmp_responses, qobject_ref(data));
+ qemu_mutex_unlock(&mon->qmp.qmp_queue_lock);
+ qemu_bh_schedule(mon_global.qmp_respond_bh);
+ } else {
+ /*
+ * If not using monitor IO thread, then we are in main thread.
+ * Do the emission right away.
+ */
+ monitor_json_emitter_raw(mon, data);
+ }
+}
+
+struct QMPResponse {
+ Monitor *mon;
+ QObject *data;
+};
+typedef struct QMPResponse QMPResponse;
+
+static QObject *monitor_qmp_response_pop_one(Monitor *mon)
+{
+ QObject *data;
+
+ qemu_mutex_lock(&mon->qmp.qmp_queue_lock);
+ data = g_queue_pop_head(mon->qmp.qmp_responses);
+ qemu_mutex_unlock(&mon->qmp.qmp_queue_lock);
+
+ return data;
+}
+
+static void monitor_qmp_response_flush(Monitor *mon)
+{
+ QObject *data;
+
+ while ((data = monitor_qmp_response_pop_one(mon))) {
+ monitor_json_emitter_raw(mon, data);
+ qobject_unref(data);
+ }
+}
+
+/*
+ * Pop a QMPResponse from any monitor's response queue into @response.
+ * Return false if all the queues are empty; else true.
+ */
+static bool monitor_qmp_response_pop_any(QMPResponse *response)
+{
+ Monitor *mon;
+ QObject *data = NULL;
+
+ qemu_mutex_lock(&monitor_lock);
+ QTAILQ_FOREACH(mon, &mon_list, entry) {
+ data = monitor_qmp_response_pop_one(mon);
+ if (data) {
+ response->mon = mon;
+ response->data = data;
+ break;
+ }
+ }
+ qemu_mutex_unlock(&monitor_lock);
+ return data != NULL;
+}
+
+static void monitor_qmp_bh_responder(void *opaque)
+{
+ QMPResponse response;
+
+ while (monitor_qmp_response_pop_any(&response)) {
+ monitor_json_emitter_raw(response.mon, response.data);
+ qobject_unref(response.data);
+ }
}
static MonitorQAPIEventConf monitor_qapi_event_conf[QAPI_EVENT__MAX] = {
[QAPI_EVENT_VSERPORT_CHANGE] = { 1000 * SCALE_MS },
};
-GHashTable *monitor_qapi_event_state;
-
/*
* Emits the event to every monitor instance, @event is only used for trace
* Called with monitor_lock held.
Monitor *mon;
trace_monitor_protocol_event_emit(event, qdict);
- QLIST_FOREACH(mon, &mon_list, entry) {
+ QTAILQ_FOREACH(mon, &mon_list, entry) {
if (monitor_is_qmp(mon)
&& mon->qmp.commands != &qmp_cap_negotiation_commands) {
monitor_json_emitter(mon, QOBJECT(qdict));
/* Unthrottled event */
monitor_qapi_event_emit(event, qdict);
} else {
- QDict *data = qobject_to_qdict(qdict_get(qdict, "data"));
+ QDict *data = qobject_to(QDict, qdict_get(qdict, "data"));
MonitorQAPIEventState key = { .event = event, .data = data };
evstate = g_hash_table_lookup(monitor_qapi_event_state, &key);
* last send. Store event for sending when timer fires,
* replacing a prior stored event if any.
*/
- QDECREF(evstate->qdict);
- evstate->qdict = qdict;
- QINCREF(evstate->qdict);
+ qobject_unref(evstate->qdict);
+ evstate->qdict = qobject_ref(qdict);
} else {
/*
* Last send was (at least) evconf->rate ns ago.
* monitor_qapi_event_handler() in evconf->rate ns. Any
* events arriving before then will be delayed until then.
*/
- int64_t now = qemu_clock_get_ns(event_clock_type);
+ int64_t now = qemu_clock_get_ns(monitor_get_event_clock());
monitor_qapi_event_emit(event, qdict);
evstate = g_new(MonitorQAPIEventState, 1);
evstate->event = event;
- evstate->data = data;
- QINCREF(evstate->data);
+ evstate->data = qobject_ref(data);
evstate->qdict = NULL;
- evstate->timer = timer_new_ns(event_clock_type,
+ evstate->timer = timer_new_ns(monitor_get_event_clock(),
monitor_qapi_event_handler,
evstate);
g_hash_table_add(monitor_qapi_event_state, evstate);
qemu_mutex_lock(&monitor_lock);
if (evstate->qdict) {
- int64_t now = qemu_clock_get_ns(event_clock_type);
+ int64_t now = qemu_clock_get_ns(monitor_get_event_clock());
monitor_qapi_event_emit(evstate->event, evstate->qdict);
- QDECREF(evstate->qdict);
+ qobject_unref(evstate->qdict);
evstate->qdict = NULL;
timer_mod_ns(evstate->timer, now + evconf->rate);
} else {
g_hash_table_remove(monitor_qapi_event_state, evstate);
- QDECREF(evstate->data);
+ qobject_unref(evstate->data);
timer_free(evstate->timer);
g_free(evstate);
}
static void monitor_qapi_event_init(void)
{
- if (qtest_enabled()) {
- event_clock_type = QEMU_CLOCK_VIRTUAL;
- }
-
monitor_qapi_event_state = g_hash_table_new(qapi_event_throttle_hash,
qapi_event_throttle_equal);
qmp_event_set_func_emit(monitor_qapi_event_queue);
static void handle_hmp_command(Monitor *mon, const char *cmdline);
-static void monitor_data_init(Monitor *mon)
+static void monitor_data_init(Monitor *mon, bool skip_flush,
+ bool use_io_thr)
{
memset(mon, 0, sizeof(Monitor));
- qemu_mutex_init(&mon->out_lock);
+ qemu_mutex_init(&mon->mon_lock);
+ qemu_mutex_init(&mon->qmp.qmp_queue_lock);
mon->outbuf = qstring_new();
/* Use *mon_cmds by default. */
mon->cmd_table = mon_cmds;
+ mon->skip_flush = skip_flush;
+ mon->use_io_thr = use_io_thr;
+ mon->qmp.qmp_requests = g_queue_new();
+ mon->qmp.qmp_responses = g_queue_new();
}
static void monitor_data_destroy(Monitor *mon)
json_message_parser_destroy(&mon->qmp.parser);
}
readline_free(mon->rs);
- QDECREF(mon->outbuf);
- qemu_mutex_destroy(&mon->out_lock);
+ qobject_unref(mon->outbuf);
+ qemu_mutex_destroy(&mon->mon_lock);
+ qemu_mutex_destroy(&mon->qmp.qmp_queue_lock);
+ monitor_qmp_cleanup_req_queue_locked(mon);
+ monitor_qmp_cleanup_resp_queue_locked(mon);
+ g_queue_free(mon->qmp.qmp_requests);
+ g_queue_free(mon->qmp.qmp_responses);
}
char *qmp_human_monitor_command(const char *command_line, bool has_cpu_index,
char *output = NULL;
Monitor *old_mon, hmp;
- monitor_data_init(&hmp);
- hmp.skip_flush = true;
+ monitor_data_init(&hmp, true, false);
old_mon = cur_mon;
cur_mon = &hmp;
handle_hmp_command(&hmp, command_line);
cur_mon = old_mon;
- qemu_mutex_lock(&hmp.out_lock);
+ qemu_mutex_lock(&hmp.mon_lock);
if (qstring_get_length(hmp.outbuf) > 0) {
output = g_strdup(qstring_get_str(hmp.outbuf));
} else {
output = g_strdup("");
}
- qemu_mutex_unlock(&hmp.out_lock);
+ qemu_mutex_unlock(&hmp.mon_lock);
out:
monitor_data_destroy(&hmp);
p = list;
for(;;) {
pstart = p;
- p = strchr(p, '|');
- if (!p)
- p = pstart + strlen(pstart);
+ p = qemu_strchrnul(p, '|');
if ((p - pstart) == len && !memcmp(pstart, name, len))
return 1;
if (*p == '\0')
return -1;
}
+/*
+ * Returns true if the command can be executed in preconfig mode
+ * i.e. it has the 'p' flag.
+ */
+static bool cmd_can_preconfig(const mon_cmd_t *cmd)
+{
+ if (!cmd->flags) {
+ return false;
+ }
+
+ return strchr(cmd->flags, 'p');
+}
+
static void help_cmd_dump_one(Monitor *mon,
const mon_cmd_t *cmd,
char **prefix_args,
{
int i;
+ if (runstate_check(RUN_STATE_PRECONFIG) && !cmd_can_preconfig(cmd)) {
+ return;
+ }
+
for (i = 0; i < prefix_args_nb; i++) {
monitor_printf(mon, "%s ", prefix_args[i]);
}
/* Find one entry to dump */
for (cmd = cmds; cmd->name != NULL; cmd++) {
- if (compare_cmd(args[arg_index], cmd->name)) {
+ if (compare_cmd(args[arg_index], cmd->name) &&
+ ((!runstate_check(RUN_STATE_PRECONFIG) ||
+ cmd_can_preconfig(cmd)))) {
if (cmd->sub_table) {
/* continue with next arg */
help_cmd_dump(mon, cmd->sub_table,
static void qmp_query_qmp_schema(QDict *qdict, QObject **ret_data,
Error **errp)
{
- *ret_data = qobject_from_json(qmp_schema_json, &error_abort);
+ *ret_data = qobject_from_qlit(&qmp_schema_qlit);
}
/*
#endif
}
-void monitor_init_qmp_commands(void)
+static void monitor_init_qmp_commands(void)
{
/*
* Two command lists:
qmp_init_marshal(&qmp_commands);
qmp_register_command(&qmp_commands, "query-qmp-schema",
- qmp_query_qmp_schema,
- QCO_NO_OPTIONS);
+ qmp_query_qmp_schema, QCO_ALLOW_PRECONFIG);
qmp_register_command(&qmp_commands, "device_add", qmp_device_add,
QCO_NO_OPTIONS);
qmp_register_command(&qmp_commands, "netdev_add", qmp_netdev_add,
QTAILQ_INIT(&qmp_cap_negotiation_commands);
qmp_register_command(&qmp_cap_negotiation_commands, "qmp_capabilities",
- qmp_marshal_qmp_capabilities, QCO_NO_OPTIONS);
+ qmp_marshal_qmp_capabilities, QCO_ALLOW_PRECONFIG);
+}
+
+static bool qmp_cap_enabled(Monitor *mon, QMPCapability cap)
+{
+ return mon->qmp.qmp_caps[cap];
+}
+
+static bool qmp_oob_enabled(Monitor *mon)
+{
+ return qmp_cap_enabled(mon, QMP_CAPABILITY_OOB);
+}
+
+static void qmp_caps_check(Monitor *mon, QMPCapabilityList *list,
+ Error **errp)
+{
+ for (; list; list = list->next) {
+ assert(list->value < QMP_CAPABILITY__MAX);
+ switch (list->value) {
+ case QMP_CAPABILITY_OOB:
+ if (!mon->use_io_thr) {
+ /*
+ * Out-Of-Band only works with monitors that are
+ * running on dedicated IOThread.
+ */
+ error_setg(errp, "This monitor does not support "
+ "Out-Of-Band (OOB)");
+ return;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+/* This function should only be called after capabilities are checked. */
+static void qmp_caps_apply(Monitor *mon, QMPCapabilityList *list)
+{
+ for (; list; list = list->next) {
+ mon->qmp.qmp_caps[list->value] = true;
+ }
+}
+
+/*
+ * Return true if check successful, or false otherwise. When false is
+ * returned, detailed error will be in errp if provided.
+ */
+static bool qmp_cmd_oob_check(Monitor *mon, QDict *req, Error **errp)
+{
+ const char *command;
+ QmpCommand *cmd;
+
+ command = qdict_get_try_str(req, "execute");
+ if (!command) {
+ error_setg(errp, "Command field 'execute' missing");
+ return false;
+ }
+
+ cmd = qmp_find_command(mon->qmp.commands, command);
+ if (!cmd) {
+ if (mon->qmp.commands == &qmp_cap_negotiation_commands) {
+ error_set(errp, ERROR_CLASS_COMMAND_NOT_FOUND,
+ "Expecting capabilities negotiation "
+ "with 'qmp_capabilities'");
+ } else {
+ error_set(errp, ERROR_CLASS_COMMAND_NOT_FOUND,
+ "The command %s has not been found", command);
+ }
+ return false;
+ }
+
+ if (qmp_is_oob(req)) {
+ if (!qmp_oob_enabled(mon)) {
+ error_setg(errp, "Please enable Out-Of-Band first "
+ "for the session during capabilities negotiation");
+ return false;
+ }
+ if (!(cmd->options & QCO_ALLOW_OOB)) {
+ error_setg(errp, "The command %s does not support OOB",
+ command);
+ return false;
+ }
+ }
+
+ return true;
}
-void qmp_qmp_capabilities(Error **errp)
+void qmp_qmp_capabilities(bool has_enable, QMPCapabilityList *enable,
+ Error **errp)
{
+ Error *local_err = NULL;
+
if (cur_mon->qmp.commands == &qmp_commands) {
error_set(errp, ERROR_CLASS_COMMAND_NOT_FOUND,
"Capabilities negotiation is already complete, command "
return;
}
+ /* Enable QMP capabilities provided by the client if applicable. */
+ if (has_enable) {
+ qmp_caps_check(cur_mon, enable, &local_err);
+ if (local_err) {
+ /*
+ * Failed check on any of the capabilities will fail the
+ * entire command (and thus not apply any of the other
+ * capabilities that were also requested).
+ */
+ error_propagate(errp, local_err);
+ return;
+ }
+ qmp_caps_apply(cur_mon, enable);
+ }
+
cur_mon->qmp.commands = &qmp_commands;
}
-/* set the current CPU defined by the user */
+/* Set the current CPU defined by the user. Callers must hold BQL. */
int monitor_set_cpu(int cpu_index)
{
CPUState *cpu;
return 0;
}
+/* Callers must hold BQL. */
static CPUState *mon_get_cpu_sync(bool synchronize)
{
CPUState *cpu;
{
bool flatview = qdict_get_try_bool(qdict, "flatview", false);
bool dispatch_tree = qdict_get_try_bool(qdict, "dispatch_tree", false);
+ bool owner = qdict_get_try_bool(qdict, "owner", false);
- mtree_info((fprintf_function)monitor_printf, mon, flatview, dispatch_tree);
+ mtree_info((fprintf_function)monitor_printf, mon, flatview, dispatch_tree,
+ owner);
}
static void hmp_info_numa(Monitor *mon, const QDict *qdict)
void qmp_getfd(const char *fdname, Error **errp)
{
mon_fd_t *monfd;
- int fd;
+ int fd, tmp_fd;
fd = qemu_chr_fe_get_msgfd(&cur_mon->chr);
if (fd == -1) {
return;
}
+ qemu_mutex_lock(&cur_mon->mon_lock);
QLIST_FOREACH(monfd, &cur_mon->fds, next) {
if (strcmp(monfd->name, fdname) != 0) {
continue;
}
- close(monfd->fd);
+ tmp_fd = monfd->fd;
monfd->fd = fd;
+ qemu_mutex_unlock(&cur_mon->mon_lock);
+ /* Make sure close() is out of critical section */
+ close(tmp_fd);
return;
}
monfd->fd = fd;
QLIST_INSERT_HEAD(&cur_mon->fds, monfd, next);
+ qemu_mutex_unlock(&cur_mon->mon_lock);
}
void qmp_closefd(const char *fdname, Error **errp)
{
mon_fd_t *monfd;
+ int tmp_fd;
+ qemu_mutex_lock(&cur_mon->mon_lock);
QLIST_FOREACH(monfd, &cur_mon->fds, next) {
if (strcmp(monfd->name, fdname) != 0) {
continue;
}
QLIST_REMOVE(monfd, next);
- close(monfd->fd);
+ tmp_fd = monfd->fd;
g_free(monfd->name);
g_free(monfd);
+ qemu_mutex_unlock(&cur_mon->mon_lock);
+ /* Make sure close() is out of critical section */
+ close(tmp_fd);
return;
}
+ qemu_mutex_unlock(&cur_mon->mon_lock);
error_setg(errp, QERR_FD_NOT_FOUND, fdname);
}
{
mon_fd_t *monfd;
+ qemu_mutex_lock(&mon->mon_lock);
QLIST_FOREACH(monfd, &mon->fds, next) {
int fd;
QLIST_REMOVE(monfd, next);
g_free(monfd->name);
g_free(monfd);
+ qemu_mutex_unlock(&mon->mon_lock);
return fd;
}
+ qemu_mutex_unlock(&mon->mon_lock);
error_setg(errp, "File descriptor named '%s' has not been found", fdname);
return -1;
}
MonFdset *mon_fdset;
MonFdset *mon_fdset_next;
+ qemu_mutex_lock(&mon_fdsets_lock);
QLIST_FOREACH_SAFE(mon_fdset, &mon_fdsets, next, mon_fdset_next) {
monitor_fdset_cleanup(mon_fdset);
}
+ qemu_mutex_unlock(&mon_fdsets_lock);
}
AddfdInfo *qmp_add_fd(bool has_fdset_id, int64_t fdset_id, bool has_opaque,
MonFdsetFd *mon_fdset_fd;
char fd_str[60];
+ qemu_mutex_lock(&mon_fdsets_lock);
QLIST_FOREACH(mon_fdset, &mon_fdsets, next) {
if (mon_fdset->id != fdset_id) {
continue;
goto error;
}
monitor_fdset_cleanup(mon_fdset);
+ qemu_mutex_unlock(&mon_fdsets_lock);
return;
}
error:
+ qemu_mutex_unlock(&mon_fdsets_lock);
if (has_fd) {
snprintf(fd_str, sizeof(fd_str), "fdset-id:%" PRId64 ", fd:%" PRId64,
fdset_id, fd);
MonFdsetFd *mon_fdset_fd;
FdsetInfoList *fdset_list = NULL;
+ qemu_mutex_lock(&mon_fdsets_lock);
QLIST_FOREACH(mon_fdset, &mon_fdsets, next) {
FdsetInfoList *fdset_info = g_malloc0(sizeof(*fdset_info));
FdsetFdInfoList *fdsetfd_list = NULL;
fdset_info->next = fdset_list;
fdset_list = fdset_info;
}
+ qemu_mutex_unlock(&mon_fdsets_lock);
return fdset_list;
}
MonFdsetFd *mon_fdset_fd;
AddfdInfo *fdinfo;
+ qemu_mutex_lock(&mon_fdsets_lock);
if (has_fdset_id) {
QLIST_FOREACH(mon_fdset, &mon_fdsets, next) {
/* Break if match found or match impossible due to ordering by ID */
if (fdset_id < 0) {
error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "fdset-id",
"a non-negative value");
+ qemu_mutex_unlock(&mon_fdsets_lock);
return NULL;
}
/* Use specified fdset ID */
fdinfo->fdset_id = mon_fdset->id;
fdinfo->fd = mon_fdset_fd->fd;
+ qemu_mutex_unlock(&mon_fdsets_lock);
return fdinfo;
}
int monitor_fdset_get_fd(int64_t fdset_id, int flags)
{
-#ifndef _WIN32
+#ifdef _WIN32
+ return -ENOENT;
+#else
MonFdset *mon_fdset;
MonFdsetFd *mon_fdset_fd;
int mon_fd_flags;
+ int ret;
+ qemu_mutex_lock(&mon_fdsets_lock);
QLIST_FOREACH(mon_fdset, &mon_fdsets, next) {
if (mon_fdset->id != fdset_id) {
continue;
QLIST_FOREACH(mon_fdset_fd, &mon_fdset->fds, next) {
mon_fd_flags = fcntl(mon_fdset_fd->fd, F_GETFL);
if (mon_fd_flags == -1) {
- return -1;
+ ret = -errno;
+ goto out;
}
if ((flags & O_ACCMODE) == (mon_fd_flags & O_ACCMODE)) {
- return mon_fdset_fd->fd;
+ ret = mon_fdset_fd->fd;
+ goto out;
}
}
- errno = EACCES;
- return -1;
+ ret = -EACCES;
+ goto out;
}
-#endif
+ ret = -ENOENT;
- errno = ENOENT;
- return -1;
+out:
+ qemu_mutex_unlock(&mon_fdsets_lock);
+ return ret;
+#endif
}
int monitor_fdset_dup_fd_add(int64_t fdset_id, int dup_fd)
MonFdset *mon_fdset;
MonFdsetFd *mon_fdset_fd_dup;
+ qemu_mutex_lock(&mon_fdsets_lock);
QLIST_FOREACH(mon_fdset, &mon_fdsets, next) {
if (mon_fdset->id != fdset_id) {
continue;
}
QLIST_FOREACH(mon_fdset_fd_dup, &mon_fdset->dup_fds, next) {
if (mon_fdset_fd_dup->fd == dup_fd) {
- return -1;
+ goto err;
}
}
mon_fdset_fd_dup = g_malloc0(sizeof(*mon_fdset_fd_dup));
mon_fdset_fd_dup->fd = dup_fd;
QLIST_INSERT_HEAD(&mon_fdset->dup_fds, mon_fdset_fd_dup, next);
+ qemu_mutex_unlock(&mon_fdsets_lock);
return 0;
}
+
+err:
+ qemu_mutex_unlock(&mon_fdsets_lock);
return -1;
}
MonFdset *mon_fdset;
MonFdsetFd *mon_fdset_fd_dup;
+ qemu_mutex_lock(&mon_fdsets_lock);
QLIST_FOREACH(mon_fdset, &mon_fdsets, next) {
QLIST_FOREACH(mon_fdset_fd_dup, &mon_fdset->dup_fds, next) {
if (mon_fdset_fd_dup->fd == dup_fd) {
if (QLIST_EMPTY(&mon_fdset->dup_fds)) {
monitor_fdset_cleanup(mon_fdset);
}
- return -1;
+ goto err;
} else {
+ qemu_mutex_unlock(&mon_fdsets_lock);
return mon_fdset->id;
}
}
}
}
+
+err:
+ qemu_mutex_unlock(&mon_fdsets_lock);
return -1;
}
(int)(p - cmdp_start), cmdp_start);
return NULL;
}
+ if (runstate_check(RUN_STATE_PRECONFIG) && !cmd_can_preconfig(cmd)) {
+ monitor_printf(mon, "Command '%.*s' not available with -preconfig "
+ "until after exit_preconfig.\n",
+ (int)(p - cmdp_start), cmdp_start);
+ return NULL;
+ }
/* filter out following useless space */
while (qemu_isspace(*p)) {
return qdict;
fail:
- QDECREF(qdict);
+ qobject_unref(qdict);
g_free(key);
return NULL;
}
{
QDict *qdict;
const mon_cmd_t *cmd;
+ const char *cmd_start = cmdline;
trace_handle_hmp_command(mon, cmdline);
qdict = monitor_parse_arguments(mon, &cmdline, cmd);
if (!qdict) {
- monitor_printf(mon, "Try \"help %s\" for more information\n",
- cmd->name);
+ while (cmdline > cmd_start && qemu_isspace(cmdline[-1])) {
+ cmdline--;
+ }
+ monitor_printf(mon, "Try \"help %.*s\" for more information\n",
+ (int)(cmdline - cmd_start), cmd_start);
return;
}
cmd->cmd(mon, qdict);
- QDECREF(qdict);
+ qobject_unref(qdict);
}
static void cmd_completion(Monitor *mon, const char *name, const char *list)
p = list;
for(;;) {
pstart = p;
- p = strchr(p, '|');
- if (!p)
- p = pstart + strlen(pstart);
+ p = qemu_strchrnul(p, '|');
len = p - pstart;
if (len > sizeof(cmd) - 2)
len = sizeof(cmd) - 2;
cmdname = args[0];
readline_set_completion_index(mon->rs, strlen(cmdname));
for (cmd = cmd_table; cmd->name != NULL; cmd++) {
- cmd_completion(mon, cmdname, cmd->name);
+ if (!runstate_check(RUN_STATE_PRECONFIG) ||
+ cmd_can_preconfig(cmd)) {
+ cmd_completion(mon, cmdname, cmd->name);
+ }
}
} else {
/* find the command */
for (cmd = cmd_table; cmd->name != NULL; cmd++) {
- if (compare_cmd(args[0], cmd->name)) {
+ if (compare_cmd(args[0], cmd->name) &&
+ (!runstate_check(RUN_STATE_PRECONFIG) ||
+ cmd_can_preconfig(cmd))) {
break;
}
}
{
Monitor *mon = opaque;
- return (mon->suspend_cnt == 0) ? 1 : 0;
+ return !atomic_mb_read(&mon->suspend_cnt);
}
+/*
+ * 1. This function takes ownership of rsp, err, and id.
+ * 2. rsp, err, and id may be NULL.
+ * 3. If err != NULL then rsp must be NULL.
+ */
+static void monitor_qmp_respond(Monitor *mon, QObject *rsp,
+ Error *err, QObject *id)
+{
+ QDict *qdict = NULL;
+
+ if (err) {
+ assert(!rsp);
+ qdict = qdict_new();
+ qdict_put_obj(qdict, "error", qmp_build_error_object(err));
+ error_free(err);
+ rsp = QOBJECT(qdict);
+ }
+
+ if (rsp) {
+ if (id) {
+ qdict_put_obj(qobject_to(QDict, rsp), "id", qobject_ref(id));
+ }
+
+ monitor_json_emitter(mon, rsp);
+ }
+
+ qobject_unref(id);
+ qobject_unref(rsp);
+}
+
+/*
+ * Dispatch one single QMP request. The function will free the req_obj
+ * and objects inside it before return.
+ */
+static void monitor_qmp_dispatch_one(QMPRequest *req_obj)
+{
+ Monitor *mon, *old_mon;
+ QObject *req, *rsp = NULL, *id;
+ bool need_resume;
+
+ req = req_obj->req;
+ mon = req_obj->mon;
+ id = req_obj->id;
+ need_resume = req_obj->need_resume;
+
+ g_free(req_obj);
+
+ if (trace_event_get_state_backends(TRACE_HANDLE_QMP_COMMAND)) {
+ QString *req_json = qobject_to_json(req);
+ trace_handle_qmp_command(mon, qstring_get_str(req_json));
+ qobject_unref(req_json);
+ }
+
+ old_mon = cur_mon;
+ cur_mon = mon;
+
+ rsp = qmp_dispatch(mon->qmp.commands, req);
+
+ cur_mon = old_mon;
+
+ /* Respond if necessary */
+ monitor_qmp_respond(mon, rsp, NULL, id);
+
+ /* This pairs with the monitor_suspend() in handle_qmp_command(). */
+ if (need_resume) {
+ monitor_resume(mon);
+ }
+
+ qobject_unref(req);
+}
+
+/*
+ * Pop one QMP request from monitor queues, return NULL if not found.
+ * We are using round-robin fashion to pop the request, to avoid
+ * processing commands only on a very busy monitor. To achieve that,
+ * when we process one request on a specific monitor, we put that
+ * monitor to the end of mon_list queue.
+ */
+static QMPRequest *monitor_qmp_requests_pop_any(void)
+{
+ QMPRequest *req_obj = NULL;
+ Monitor *mon;
+
+ qemu_mutex_lock(&monitor_lock);
+
+ QTAILQ_FOREACH(mon, &mon_list, entry) {
+ qemu_mutex_lock(&mon->qmp.qmp_queue_lock);
+ req_obj = g_queue_pop_head(mon->qmp.qmp_requests);
+ qemu_mutex_unlock(&mon->qmp.qmp_queue_lock);
+ if (req_obj) {
+ break;
+ }
+ }
+
+ if (req_obj) {
+ /*
+ * We found one request on the monitor. Degrade this monitor's
+ * priority to lowest by re-inserting it to end of queue.
+ */
+ QTAILQ_REMOVE(&mon_list, mon, entry);
+ QTAILQ_INSERT_TAIL(&mon_list, mon, entry);
+ }
+
+ qemu_mutex_unlock(&monitor_lock);
+
+ return req_obj;
+}
+
+static void monitor_qmp_bh_dispatcher(void *data)
+{
+ QMPRequest *req_obj = monitor_qmp_requests_pop_any();
+
+ if (req_obj) {
+ trace_monitor_qmp_cmd_in_band(qobject_get_try_str(req_obj->id) ?: "");
+ monitor_qmp_dispatch_one(req_obj);
+ /* Reschedule instead of looping so the main loop stays responsive */
+ qemu_bh_schedule(mon_global.qmp_dispatcher_bh);
+ }
+}
+
+#define QMP_REQ_QUEUE_LEN_MAX (8)
+
static void handle_qmp_command(JSONMessageParser *parser, GQueue *tokens)
{
- QObject *req, *rsp = NULL, *id = NULL;
+ QObject *req, *id = NULL;
QDict *qdict = NULL;
- Monitor *mon = cur_mon;
+ MonitorQMP *mon_qmp = container_of(parser, MonitorQMP, parser);
+ Monitor *mon = container_of(mon_qmp, Monitor, qmp);
Error *err = NULL;
+ QMPRequest *req_obj;
req = json_parser_parse_err(tokens, NULL, &err);
if (!req && !err) {
error_setg(&err, QERR_JSON_PARSING);
}
if (err) {
- goto err_out;
+ goto err;
}
- qdict = qobject_to_qdict(req);
- if (qdict) {
- id = qdict_get(qdict, "id");
- qobject_incref(id);
- qdict_del(qdict, "id");
- } /* else will fail qmp_dispatch() */
+ /* Check against the request in general layout */
+ qdict = qmp_dispatch_check_obj(req, &err);
+ if (!qdict) {
+ goto err;
+ }
- if (trace_event_get_state_backends(TRACE_HANDLE_QMP_COMMAND)) {
- QString *req_json = qobject_to_json(req);
- trace_handle_qmp_command(mon, qstring_get_str(req_json));
- QDECREF(req_json);
+ /* Check against OOB specific */
+ if (!qmp_cmd_oob_check(mon, qdict, &err)) {
+ goto err;
}
- rsp = qmp_dispatch(cur_mon->qmp.commands, req);
+ id = qdict_get(qdict, "id");
- if (mon->qmp.commands == &qmp_cap_negotiation_commands) {
- qdict = qdict_get_qdict(qobject_to_qdict(rsp), "error");
- if (qdict
- && !g_strcmp0(qdict_get_try_str(qdict, "class"),
- QapiErrorClass_str(ERROR_CLASS_COMMAND_NOT_FOUND))) {
- /* Provide a more useful error message */
- qdict_del(qdict, "desc");
- qdict_put_str(qdict, "desc", "Expecting capabilities negotiation"
- " with 'qmp_capabilities'");
- }
+ /* When OOB is enabled, the "id" field is mandatory. */
+ if (qmp_oob_enabled(mon) && !id) {
+ error_setg(&err, "Out-Of-Band capability requires that "
+ "every command contains an 'id' field");
+ goto err;
}
-err_out:
- if (err) {
- qdict = qdict_new();
- qdict_put_obj(qdict, "error", qmp_build_error_object(err));
- error_free(err);
- rsp = QOBJECT(qdict);
+ req_obj = g_new0(QMPRequest, 1);
+ req_obj->mon = mon;
+ req_obj->id = qobject_ref(id);
+ req_obj->req = req;
+ req_obj->need_resume = false;
+
+ qdict_del(qdict, "id");
+
+ if (qmp_is_oob(qdict)) {
+ /* Out-Of-Band (OOB) requests are executed directly in parser. */
+ trace_monitor_qmp_cmd_out_of_band(qobject_get_try_str(req_obj->id)
+ ?: "");
+ monitor_qmp_dispatch_one(req_obj);
+ return;
}
- if (rsp) {
- if (id) {
- qdict_put_obj(qobject_to_qdict(rsp), "id", id);
- id = NULL;
- }
+ /* Protect qmp_requests and fetching its length. */
+ qemu_mutex_lock(&mon->qmp.qmp_queue_lock);
- monitor_json_emitter(mon, rsp);
+ /*
+ * If OOB is not enabled on the current monitor, we'll emulate the
+ * old behavior that we won't process the current monitor any more
+ * until it has responded. This helps make sure that as long as
+ * OOB is not enabled, the server will never drop any command.
+ */
+ if (!qmp_oob_enabled(mon)) {
+ monitor_suspend(mon);
+ req_obj->need_resume = true;
+ } else {
+ /* Drop the request if queue is full. */
+ if (mon->qmp.qmp_requests->length >= QMP_REQ_QUEUE_LEN_MAX) {
+ qemu_mutex_unlock(&mon->qmp.qmp_queue_lock);
+ qapi_event_send_command_dropped(id,
+ COMMAND_DROP_REASON_QUEUE_FULL,
+ &error_abort);
+ qmp_request_free(req_obj);
+ return;
+ }
}
- qobject_decref(id);
- qobject_decref(rsp);
- qobject_decref(req);
+ /*
+ * Put the request to the end of queue so that requests will be
+ * handled in time order. Ownership for req_obj, req, id,
+ * etc. will be delivered to the handler side.
+ */
+ g_queue_push_tail(mon->qmp.qmp_requests, req_obj);
+ qemu_mutex_unlock(&mon->qmp.qmp_queue_lock);
+
+ /* Kick the dispatcher routine */
+ qemu_bh_schedule(mon_global.qmp_dispatcher_bh);
+ return;
+
+err:
+ monitor_qmp_respond(mon, NULL, err, NULL);
+ qobject_unref(req);
}
static void monitor_qmp_read(void *opaque, const uint8_t *buf, int size)
{
- Monitor *old_mon = cur_mon;
-
- cur_mon = opaque;
-
- json_message_parser_feed(&cur_mon->qmp.parser, (const char *) buf, size);
+ Monitor *mon = opaque;
- cur_mon = old_mon;
+ json_message_parser_feed(&mon->qmp.parser, (const char *) buf, size);
}
static void monitor_read(void *opaque, const uint8_t *buf, int size)
int monitor_suspend(Monitor *mon)
{
- if (!mon->rs)
+ if (monitor_is_hmp_non_interactive(mon)) {
return -ENOTTY;
- mon->suspend_cnt++;
+ }
+
+ atomic_inc(&mon->suspend_cnt);
+
+ if (monitor_is_qmp(mon)) {
+ /*
+ * Kick iothread to make sure this takes effect. It'll be
+ * evaluated again in prepare() of the watch object.
+ */
+ aio_notify(iothread_get_aio_context(mon_global.mon_iothread));
+ }
+
+ trace_monitor_suspend(mon, 1);
return 0;
}
void monitor_resume(Monitor *mon)
{
- if (!mon->rs)
+ if (monitor_is_hmp_non_interactive(mon)) {
return;
- if (--mon->suspend_cnt == 0)
- readline_show_prompt(mon->rs);
+ }
+
+ if (atomic_dec_fetch(&mon->suspend_cnt) == 0) {
+ if (monitor_is_qmp(mon)) {
+ /*
+ * For QMP monitors that are running in IOThread, let's
+ * kick the thread in case it's sleeping.
+ */
+ if (mon->use_io_thr) {
+ aio_notify(iothread_get_aio_context(mon_global.mon_iothread));
+ }
+ } else {
+ assert(mon->rs);
+ readline_show_prompt(mon->rs);
+ }
+ }
+ trace_monitor_suspend(mon, -1);
}
-static QObject *get_qmp_greeting(void)
+static QObject *get_qmp_greeting(Monitor *mon)
{
+ QList *cap_list = qlist_new();
QObject *ver = NULL;
+ QMPCapability cap;
qmp_marshal_query_version(NULL, &ver, NULL);
- return qobject_from_jsonf("{'QMP': {'version': %p, 'capabilities': []}}",
- ver);
+ for (cap = 0; cap < QMP_CAPABILITY__MAX; cap++) {
+ if (!mon->use_io_thr && cap == QMP_CAPABILITY_OOB) {
+ /* Monitors that are not using IOThread won't support OOB */
+ continue;
+ }
+ qlist_append_str(cap_list, QMPCapability_str(cap));
+ }
+
+ return qobject_from_jsonf("{'QMP': {'version': %p, 'capabilities': %p}}",
+ ver, cap_list);
+}
+
+static void monitor_qmp_caps_reset(Monitor *mon)
+{
+ memset(mon->qmp.qmp_caps, 0, sizeof(mon->qmp.qmp_caps));
}
static void monitor_qmp_event(void *opaque, int event)
switch (event) {
case CHR_EVENT_OPENED:
mon->qmp.commands = &qmp_cap_negotiation_commands;
- data = get_qmp_greeting();
+ monitor_qmp_caps_reset(mon);
+ data = get_qmp_greeting(mon);
monitor_json_emitter(mon, data);
- qobject_decref(data);
+ qobject_unref(data);
mon_refcount++;
break;
case CHR_EVENT_CLOSED:
+ /*
+ * Note: this is only useful when the output of the chardev
+ * backend is still open. For example, when the backend is
+ * stdio, it's possible that stdout is still open when stdin
+ * is closed.
+ */
+ monitor_qmp_response_flush(mon);
+ monitor_qmp_cleanup_queues(mon);
json_message_parser_destroy(&mon->qmp.parser);
json_message_parser_init(&mon->qmp.parser, handle_qmp_command);
mon_refcount--;
switch (event) {
case CHR_EVENT_MUX_IN:
- qemu_mutex_lock(&mon->out_lock);
+ qemu_mutex_lock(&mon->mon_lock);
mon->mux_out = 0;
- qemu_mutex_unlock(&mon->out_lock);
+ qemu_mutex_unlock(&mon->mon_lock);
if (mon->reset_seen) {
readline_restart(mon->rs);
monitor_resume(mon);
monitor_flush(mon);
} else {
- mon->suspend_cnt = 0;
+ atomic_mb_set(&mon->suspend_cnt, 0);
}
break;
case CHR_EVENT_MUX_OUT:
if (mon->reset_seen) {
- if (mon->suspend_cnt == 0) {
+ if (atomic_mb_read(&mon->suspend_cnt) == 0) {
monitor_printf(mon, "\n");
}
monitor_flush(mon);
monitor_suspend(mon);
} else {
- mon->suspend_cnt++;
+ atomic_inc(&mon->suspend_cnt);
}
- qemu_mutex_lock(&mon->out_lock);
+ qemu_mutex_lock(&mon->mon_lock);
mon->mux_out = 1;
- qemu_mutex_unlock(&mon->out_lock);
+ qemu_mutex_unlock(&mon->mon_lock);
break;
case CHR_EVENT_OPENED:
qsort((void *)info_cmds, array_num, elem_size, compare_mon_cmd);
}
+static GMainContext *monitor_get_io_context(void)
+{
+ return iothread_get_g_main_context(mon_global.mon_iothread);
+}
+
+static AioContext *monitor_get_aio_context(void)
+{
+ return iothread_get_aio_context(mon_global.mon_iothread);
+}
+
+static void monitor_iothread_init(void)
+{
+ mon_global.mon_iothread = iothread_create("mon_iothread",
+ &error_abort);
+
+ /*
+ * This MUST be on main loop thread since we have commands that
+ * have assumption to be run on main loop thread. It would be
+ * nice that one day we can remove this assumption in the future.
+ */
+ mon_global.qmp_dispatcher_bh = aio_bh_new(iohandler_get_aio_context(),
+ monitor_qmp_bh_dispatcher,
+ NULL);
+
+ /*
+ * Unlike the dispatcher BH, this must be run on the monitor IO
+ * thread, so that monitors that are using IO thread will make
+ * sure read/write operations are all done on the IO thread.
+ */
+ mon_global.qmp_respond_bh = aio_bh_new(monitor_get_aio_context(),
+ monitor_qmp_bh_responder,
+ NULL);
+}
+
+void monitor_init_globals(void)
+{
+ monitor_init_qmp_commands();
+ monitor_qapi_event_init();
+ sortcmdlist();
+ qemu_mutex_init(&monitor_lock);
+ qemu_mutex_init(&mon_fdsets_lock);
+ monitor_iothread_init();
+}
+
/* These functions just adapt the readline interface in a typesafe way. We
* could cast function pointers but that discards compiler checks.
*/
}
}
-static void __attribute__((constructor)) monitor_lock_init(void)
+static void monitor_list_append(Monitor *mon)
{
- qemu_mutex_init(&monitor_lock);
+ qemu_mutex_lock(&monitor_lock);
+ QTAILQ_INSERT_HEAD(&mon_list, mon, entry);
+ qemu_mutex_unlock(&monitor_lock);
+}
+
+static void monitor_qmp_setup_handlers_bh(void *opaque)
+{
+ Monitor *mon = opaque;
+ GMainContext *context;
+
+ if (mon->use_io_thr) {
+ /*
+ * When use_io_thr is set, we use the global shared dedicated
+ * IO thread for this monitor to handle input/output.
+ */
+ context = monitor_get_io_context();
+ /* We should have inited globals before reaching here. */
+ assert(context);
+ } else {
+ /* The default main loop, which is the main thread */
+ context = NULL;
+ }
+
+ qemu_chr_fe_set_handlers(&mon->chr, monitor_can_read, monitor_qmp_read,
+ monitor_qmp_event, NULL, mon, context, true);
+ monitor_list_append(mon);
}
void monitor_init(Chardev *chr, int flags)
{
- static int is_first_init = 1;
- Monitor *mon;
+ Monitor *mon = g_malloc(sizeof(*mon));
+ bool use_readline = flags & MONITOR_USE_READLINE;
+ bool use_oob = flags & MONITOR_USE_OOB;
- if (is_first_init) {
- monitor_qapi_event_init();
- sortcmdlist();
- is_first_init = 0;
+ if (use_oob) {
+ if (CHARDEV_IS_MUX(chr)) {
+ error_report("Monitor Out-Of-Band is not supported with "
+ "MUX typed chardev backend");
+ exit(1);
+ }
+ if (use_readline) {
+ error_report("Monitor Out-Of-band is only supported by QMP");
+ exit(1);
+ }
}
- mon = g_malloc(sizeof(*mon));
- monitor_data_init(mon);
+ monitor_data_init(mon, false, use_oob);
qemu_chr_fe_init(&mon->chr, chr, &error_abort);
mon->flags = flags;
- if (flags & MONITOR_USE_READLINE) {
+ if (use_readline) {
mon->rs = readline_init(monitor_readline_printf,
monitor_readline_flush,
mon,
}
if (monitor_is_qmp(mon)) {
- qemu_chr_fe_set_handlers(&mon->chr, monitor_can_read, monitor_qmp_read,
- monitor_qmp_event, NULL, mon, NULL, true);
qemu_chr_fe_set_echo(&mon->chr, true);
json_message_parser_init(&mon->qmp.parser, handle_qmp_command);
+ if (mon->use_io_thr) {
+ /*
+ * Make sure the old iowatch is gone. It's possible when
+ * e.g. the chardev is in client mode, with wait=on.
+ */
+ remove_fd_in_watch(chr);
+ /*
+ * We can't call qemu_chr_fe_set_handlers() directly here
+ * since during the procedure the chardev will be active
+ * and running in monitor iothread, while we'll still do
+ * something before returning from it, which is a possible
+ * race too. To avoid that, we just create a BH to setup
+ * the handlers.
+ */
+ aio_bh_schedule_oneshot(monitor_get_aio_context(),
+ monitor_qmp_setup_handlers_bh, mon);
+ /* We'll add this to mon_list in the BH when setup done */
+ return;
+ } else {
+ qemu_chr_fe_set_handlers(&mon->chr, monitor_can_read,
+ monitor_qmp_read, monitor_qmp_event,
+ NULL, mon, NULL, true);
+ }
} else {
qemu_chr_fe_set_handlers(&mon->chr, monitor_can_read, monitor_read,
monitor_event, NULL, mon, NULL, true);
}
- qemu_mutex_lock(&monitor_lock);
- QLIST_INSERT_HEAD(&mon_list, mon, entry);
- qemu_mutex_unlock(&monitor_lock);
+ monitor_list_append(mon);
}
void monitor_cleanup(void)
{
Monitor *mon, *next;
+ /*
+ * We need to explicitly stop the iothread (but not destroy it),
+ * cleanup the monitor resources, then destroy the iothread since
+ * we need to unregister from chardev below in
+ * monitor_data_destroy(), and chardev is not thread-safe yet
+ */
+ iothread_stop(mon_global.mon_iothread);
+
+ /*
+ * After we have IOThread to send responses, it's possible that
+ * when we stop the IOThread there are still replies queued in the
+ * responder queue. Flush all of them. Note that even after this
+ * flush it's still possible that out buffer is not flushed.
+ * It'll be done in below monitor_flush() as the last resort.
+ */
+ monitor_qmp_bh_responder(NULL);
+
qemu_mutex_lock(&monitor_lock);
- QLIST_FOREACH_SAFE(mon, &mon_list, entry, next) {
- QLIST_REMOVE(mon, entry);
+ QTAILQ_FOREACH_SAFE(mon, &mon_list, entry, next) {
+ QTAILQ_REMOVE(&mon_list, mon, entry);
+ monitor_flush(mon);
monitor_data_destroy(mon);
g_free(mon);
}
qemu_mutex_unlock(&monitor_lock);
+
+ /* QEMUBHs needs to be deleted before destroying the IOThread. */
+ qemu_bh_delete(mon_global.qmp_dispatcher_bh);
+ mon_global.qmp_dispatcher_bh = NULL;
+ qemu_bh_delete(mon_global.qmp_respond_bh);
+ mon_global.qmp_respond_bh = NULL;
+
+ iothread_destroy(mon_global.mon_iothread);
+ mon_global.mon_iothread = NULL;
}
QemuOptsList qemu_mon_opts = {
},{
.name = "pretty",
.type = QEMU_OPT_BOOL,
+ },{
+ .name = "x-oob",
+ .type = QEMU_OPT_BOOL,
},
{ /* end of list */ }
},