OSDN Git Service

libnvdimm, pmem: direct map legacy pmem by default
[sagit-ice-cold/kernel_xiaomi_msm8998.git] / drivers / staging / unisys / visorbus / visorchipset.c
1 /* visorchipset_main.c
2  *
3  * Copyright (C) 2010 - 2013 UNISYS CORPORATION
4  * All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or (at
9  * your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful, but
12  * WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14  * NON INFRINGEMENT.  See the GNU General Public License for more
15  * details.
16  */
17
18 #include <linux/acpi.h>
19 #include <linux/cdev.h>
20 #include <linux/ctype.h>
21 #include <linux/fs.h>
22 #include <linux/mm.h>
23 #include <linux/nls.h>
24 #include <linux/netdevice.h>
25 #include <linux/platform_device.h>
26 #include <linux/uuid.h>
27 #include <linux/crash_dump.h>
28
29 #include "channel_guid.h"
30 #include "controlvmchannel.h"
31 #include "controlvmcompletionstatus.h"
32 #include "guestlinuxdebug.h"
33 #include "periodic_work.h"
34 #include "version.h"
35 #include "visorbus.h"
36 #include "visorbus_private.h"
37 #include "vmcallinterface.h"
38
39 #define CURRENT_FILE_PC VISOR_CHIPSET_PC_visorchipset_main_c
40
41 #define MAX_NAME_SIZE 128
42 #define MAX_IP_SIZE   50
43 #define MAXOUTSTANDINGCHANNELCOMMAND 256
44 #define POLLJIFFIES_CONTROLVMCHANNEL_FAST   1
45 #define POLLJIFFIES_CONTROLVMCHANNEL_SLOW 100
46
47 #define MAX_CONTROLVM_PAYLOAD_BYTES (1024*128)
48
49 #define VISORCHIPSET_MMAP_CONTROLCHANOFFSET     0x00000000
50
51
52 #define UNISYS_SPAR_LEAF_ID 0x40000000
53
54 /* The s-Par leaf ID returns "UnisysSpar64" encoded across ebx, ecx, edx */
55 #define UNISYS_SPAR_ID_EBX 0x73696e55
56 #define UNISYS_SPAR_ID_ECX 0x70537379
57 #define UNISYS_SPAR_ID_EDX 0x34367261
58
59 /*
60  * Module parameters
61  */
62 static int visorchipset_major;
63 static int visorchipset_visorbusregwait = 1;    /* default is on */
64 static int visorchipset_holdchipsetready;
65 static unsigned long controlvm_payload_bytes_buffered;
66
67 static int
68 visorchipset_open(struct inode *inode, struct file *file)
69 {
70         unsigned minor_number = iminor(inode);
71
72         if (minor_number)
73                 return -ENODEV;
74         file->private_data = NULL;
75         return 0;
76 }
77
78 static int
79 visorchipset_release(struct inode *inode, struct file *file)
80 {
81         return 0;
82 }
83
84 /* When the controlvm channel is idle for at least MIN_IDLE_SECONDS,
85 * we switch to slow polling mode.  As soon as we get a controlvm
86 * message, we switch back to fast polling mode.
87 */
88 #define MIN_IDLE_SECONDS 10
89 static unsigned long poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
90 static unsigned long most_recent_message_jiffies;       /* when we got our last
91                                                  * controlvm message */
92 static int visorbusregistered;
93
94 #define MAX_CHIPSET_EVENTS 2
95 static u8 chipset_events[MAX_CHIPSET_EVENTS] = { 0, 0 };
96
97 struct parser_context {
98         unsigned long allocbytes;
99         unsigned long param_bytes;
100         u8 *curr;
101         unsigned long bytes_remaining;
102         bool byte_stream;
103         char data[0];
104 };
105
106 static struct delayed_work periodic_controlvm_work;
107 static struct workqueue_struct *periodic_controlvm_workqueue;
108 static DEFINE_SEMAPHORE(notifier_lock);
109
110 static struct cdev file_cdev;
111 static struct visorchannel **file_controlvm_channel;
112 static struct controlvm_message_header g_chipset_msg_hdr;
113 static struct controlvm_message_packet g_devicechangestate_packet;
114
115 static LIST_HEAD(bus_info_list);
116 static LIST_HEAD(dev_info_list);
117
118 static struct visorchannel *controlvm_channel;
119
120 /* Manages the request payload in the controlvm channel */
121 struct visor_controlvm_payload_info {
122         u8 *ptr;                /* pointer to base address of payload pool */
123         u64 offset;             /* offset from beginning of controlvm
124                                  * channel to beginning of payload * pool */
125         u32 bytes;              /* number of bytes in payload pool */
126 };
127
128 static struct visor_controlvm_payload_info controlvm_payload_info;
129
130 /* The following globals are used to handle the scenario where we are unable to
131  * offload the payload from a controlvm message due to memory requirements.  In
132  * this scenario, we simply stash the controlvm message, then attempt to
133  * process it again the next time controlvm_periodic_work() runs.
134  */
135 static struct controlvm_message controlvm_pending_msg;
136 static bool controlvm_pending_msg_valid;
137
138 /* This identifies a data buffer that has been received via a controlvm messages
139  * in a remote --> local CONTROLVM_TRANSMIT_FILE conversation.
140  */
141 struct putfile_buffer_entry {
142         struct list_head next;  /* putfile_buffer_entry list */
143         struct parser_context *parser_ctx; /* points to input data buffer */
144 };
145
146 /* List of struct putfile_request *, via next_putfile_request member.
147  * Each entry in this list identifies an outstanding TRANSMIT_FILE
148  * conversation.
149  */
150 static LIST_HEAD(putfile_request_list);
151
152 /* This describes a buffer and its current state of transfer (e.g., how many
153  * bytes have already been supplied as putfile data, and how many bytes are
154  * remaining) for a putfile_request.
155  */
156 struct putfile_active_buffer {
157         /* a payload from a controlvm message, containing a file data buffer */
158         struct parser_context *parser_ctx;
159         /* points within data area of parser_ctx to next byte of data */
160         u8 *pnext;
161         /* # bytes left from <pnext> to the end of this data buffer */
162         size_t bytes_remaining;
163 };
164
165 #define PUTFILE_REQUEST_SIG 0x0906101302281211
166 /* This identifies a single remote --> local CONTROLVM_TRANSMIT_FILE
167  * conversation.  Structs of this type are dynamically linked into
168  * <Putfile_request_list>.
169  */
170 struct putfile_request {
171         u64 sig;                /* PUTFILE_REQUEST_SIG */
172
173         /* header from original TransmitFile request */
174         struct controlvm_message_header controlvm_header;
175         u64 file_request_number;        /* from original TransmitFile request */
176
177         /* link to next struct putfile_request */
178         struct list_head next_putfile_request;
179
180         /* most-recent sequence number supplied via a controlvm message */
181         u64 data_sequence_number;
182
183         /* head of putfile_buffer_entry list, which describes the data to be
184          * supplied as putfile data;
185          * - this list is added to when controlvm messages come in that supply
186          * file data
187          * - this list is removed from via the hotplug program that is actually
188          * consuming these buffers to write as file data */
189         struct list_head input_buffer_list;
190         spinlock_t req_list_lock;       /* lock for input_buffer_list */
191
192         /* waiters for input_buffer_list to go non-empty */
193         wait_queue_head_t input_buffer_wq;
194
195         /* data not yet read within current putfile_buffer_entry */
196         struct putfile_active_buffer active_buf;
197
198         /* <0 = failed, 0 = in-progress, >0 = successful; */
199         /* note that this must be set with req_list_lock, and if you set <0, */
200         /* it is your responsibility to also free up all of the other objects */
201         /* in this struct (like input_buffer_list, active_buf.parser_ctx) */
202         /* before releasing the lock */
203         int completion_status;
204 };
205
206 struct parahotplug_request {
207         struct list_head list;
208         int id;
209         unsigned long expiration;
210         struct controlvm_message msg;
211 };
212
213 static LIST_HEAD(parahotplug_request_list);
214 static DEFINE_SPINLOCK(parahotplug_request_list_lock);  /* lock for above */
215 static void parahotplug_process_list(void);
216
217 /* Manages the info for a CONTROLVM_DUMP_CAPTURESTATE /
218  * CONTROLVM_REPORTEVENT.
219  */
220 static struct visorchipset_busdev_notifiers busdev_notifiers;
221
222 static void bus_create_response(struct visor_device *p, int response);
223 static void bus_destroy_response(struct visor_device *p, int response);
224 static void device_create_response(struct visor_device *p, int response);
225 static void device_destroy_response(struct visor_device *p, int response);
226 static void device_resume_response(struct visor_device *p, int response);
227
228 static void visorchipset_device_pause_response(struct visor_device *p,
229                                                int response);
230
231 static struct visorchipset_busdev_responders busdev_responders = {
232         .bus_create = bus_create_response,
233         .bus_destroy = bus_destroy_response,
234         .device_create = device_create_response,
235         .device_destroy = device_destroy_response,
236         .device_pause = visorchipset_device_pause_response,
237         .device_resume = device_resume_response,
238 };
239
240 /* info for /dev/visorchipset */
241 static dev_t major_dev = -1; /**< indicates major num for device */
242
243 /* prototypes for attributes */
244 static ssize_t toolaction_show(struct device *dev,
245                                struct device_attribute *attr, char *buf);
246 static ssize_t toolaction_store(struct device *dev,
247                                 struct device_attribute *attr,
248                                 const char *buf, size_t count);
249 static DEVICE_ATTR_RW(toolaction);
250
251 static ssize_t boottotool_show(struct device *dev,
252                                struct device_attribute *attr, char *buf);
253 static ssize_t boottotool_store(struct device *dev,
254                                 struct device_attribute *attr, const char *buf,
255                                 size_t count);
256 static DEVICE_ATTR_RW(boottotool);
257
258 static ssize_t error_show(struct device *dev, struct device_attribute *attr,
259                           char *buf);
260 static ssize_t error_store(struct device *dev, struct device_attribute *attr,
261                            const char *buf, size_t count);
262 static DEVICE_ATTR_RW(error);
263
264 static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
265                            char *buf);
266 static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
267                             const char *buf, size_t count);
268 static DEVICE_ATTR_RW(textid);
269
270 static ssize_t remaining_steps_show(struct device *dev,
271                                     struct device_attribute *attr, char *buf);
272 static ssize_t remaining_steps_store(struct device *dev,
273                                      struct device_attribute *attr,
274                                      const char *buf, size_t count);
275 static DEVICE_ATTR_RW(remaining_steps);
276
277 static ssize_t chipsetready_store(struct device *dev,
278                                   struct device_attribute *attr,
279                                   const char *buf, size_t count);
280 static DEVICE_ATTR_WO(chipsetready);
281
282 static ssize_t devicedisabled_store(struct device *dev,
283                                     struct device_attribute *attr,
284                                     const char *buf, size_t count);
285 static DEVICE_ATTR_WO(devicedisabled);
286
287 static ssize_t deviceenabled_store(struct device *dev,
288                                    struct device_attribute *attr,
289                                    const char *buf, size_t count);
290 static DEVICE_ATTR_WO(deviceenabled);
291
292 static struct attribute *visorchipset_install_attrs[] = {
293         &dev_attr_toolaction.attr,
294         &dev_attr_boottotool.attr,
295         &dev_attr_error.attr,
296         &dev_attr_textid.attr,
297         &dev_attr_remaining_steps.attr,
298         NULL
299 };
300
301 static struct attribute_group visorchipset_install_group = {
302         .name = "install",
303         .attrs = visorchipset_install_attrs
304 };
305
306 static struct attribute *visorchipset_guest_attrs[] = {
307         &dev_attr_chipsetready.attr,
308         NULL
309 };
310
311 static struct attribute_group visorchipset_guest_group = {
312         .name = "guest",
313         .attrs = visorchipset_guest_attrs
314 };
315
316 static struct attribute *visorchipset_parahotplug_attrs[] = {
317         &dev_attr_devicedisabled.attr,
318         &dev_attr_deviceenabled.attr,
319         NULL
320 };
321
322 static struct attribute_group visorchipset_parahotplug_group = {
323         .name = "parahotplug",
324         .attrs = visorchipset_parahotplug_attrs
325 };
326
327 static const struct attribute_group *visorchipset_dev_groups[] = {
328         &visorchipset_install_group,
329         &visorchipset_guest_group,
330         &visorchipset_parahotplug_group,
331         NULL
332 };
333
334 static void visorchipset_dev_release(struct device *dev)
335 {
336 }
337
338 /* /sys/devices/platform/visorchipset */
339 static struct platform_device visorchipset_platform_device = {
340         .name = "visorchipset",
341         .id = -1,
342         .dev.groups = visorchipset_dev_groups,
343         .dev.release = visorchipset_dev_release,
344 };
345
346 /* Function prototypes */
347 static void controlvm_respond(struct controlvm_message_header *msg_hdr,
348                               int response);
349 static void controlvm_respond_chipset_init(
350                 struct controlvm_message_header *msg_hdr, int response,
351                 enum ultra_chipset_feature features);
352 static void controlvm_respond_physdev_changestate(
353                 struct controlvm_message_header *msg_hdr, int response,
354                 struct spar_segment_state state);
355
356
357 static void parser_done(struct parser_context *ctx);
358
359 static struct parser_context *
360 parser_init_byte_stream(u64 addr, u32 bytes, bool local, bool *retry)
361 {
362         int allocbytes = sizeof(struct parser_context) + bytes;
363         struct parser_context *rc = NULL;
364         struct parser_context *ctx = NULL;
365
366         if (retry)
367                 *retry = false;
368
369         /*
370          * alloc an 0 extra byte to ensure payload is
371          * '\0'-terminated
372          */
373         allocbytes++;
374         if ((controlvm_payload_bytes_buffered + bytes)
375             > MAX_CONTROLVM_PAYLOAD_BYTES) {
376                 if (retry)
377                         *retry = true;
378                 rc = NULL;
379                 goto cleanup;
380         }
381         ctx = kzalloc(allocbytes, GFP_KERNEL|__GFP_NORETRY);
382         if (!ctx) {
383                 if (retry)
384                         *retry = true;
385                 rc = NULL;
386                 goto cleanup;
387         }
388
389         ctx->allocbytes = allocbytes;
390         ctx->param_bytes = bytes;
391         ctx->curr = NULL;
392         ctx->bytes_remaining = 0;
393         ctx->byte_stream = false;
394         if (local) {
395                 void *p;
396
397                 if (addr > virt_to_phys(high_memory - 1)) {
398                         rc = NULL;
399                         goto cleanup;
400                 }
401                 p = __va((unsigned long) (addr));
402                 memcpy(ctx->data, p, bytes);
403         } else {
404                 void *mapping;
405
406                 if (!request_mem_region(addr, bytes, "visorchipset")) {
407                         rc = NULL;
408                         goto cleanup;
409                 }
410
411                 mapping = memremap(addr, bytes, MEMREMAP_WB);
412                 if (!mapping) {
413                         release_mem_region(addr, bytes);
414                         rc = NULL;
415                         goto cleanup;
416                 }
417                 memcpy(ctx->data, mapping, bytes);
418                 release_mem_region(addr, bytes);
419                 memunmap(mapping);
420         }
421
422         ctx->byte_stream = true;
423         rc = ctx;
424 cleanup:
425         if (rc) {
426                 controlvm_payload_bytes_buffered += ctx->param_bytes;
427         } else {
428                 if (ctx) {
429                         parser_done(ctx);
430                         ctx = NULL;
431                 }
432         }
433         return rc;
434 }
435
436 static uuid_le
437 parser_id_get(struct parser_context *ctx)
438 {
439         struct spar_controlvm_parameters_header *phdr = NULL;
440
441         if (ctx == NULL)
442                 return NULL_UUID_LE;
443         phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
444         return phdr->id;
445 }
446
447 /** Describes the state from the perspective of which controlvm messages have
448  *  been received for a bus or device.
449  */
450
451 enum PARSER_WHICH_STRING {
452         PARSERSTRING_INITIATOR,
453         PARSERSTRING_TARGET,
454         PARSERSTRING_CONNECTION,
455         PARSERSTRING_NAME, /* TODO: only PARSERSTRING_NAME is used ? */
456 };
457
458 static void
459 parser_param_start(struct parser_context *ctx,
460                    enum PARSER_WHICH_STRING which_string)
461 {
462         struct spar_controlvm_parameters_header *phdr = NULL;
463
464         if (ctx == NULL)
465                 goto Away;
466         phdr = (struct spar_controlvm_parameters_header *)(ctx->data);
467         switch (which_string) {
468         case PARSERSTRING_INITIATOR:
469                 ctx->curr = ctx->data + phdr->initiator_offset;
470                 ctx->bytes_remaining = phdr->initiator_length;
471                 break;
472         case PARSERSTRING_TARGET:
473                 ctx->curr = ctx->data + phdr->target_offset;
474                 ctx->bytes_remaining = phdr->target_length;
475                 break;
476         case PARSERSTRING_CONNECTION:
477                 ctx->curr = ctx->data + phdr->connection_offset;
478                 ctx->bytes_remaining = phdr->connection_length;
479                 break;
480         case PARSERSTRING_NAME:
481                 ctx->curr = ctx->data + phdr->name_offset;
482                 ctx->bytes_remaining = phdr->name_length;
483                 break;
484         default:
485                 break;
486         }
487
488 Away:
489         return;
490 }
491
492 static void parser_done(struct parser_context *ctx)
493 {
494         if (!ctx)
495                 return;
496         controlvm_payload_bytes_buffered -= ctx->param_bytes;
497         kfree(ctx);
498 }
499
500 static void *
501 parser_string_get(struct parser_context *ctx)
502 {
503         u8 *pscan;
504         unsigned long nscan;
505         int value_length = -1;
506         void *value = NULL;
507         int i;
508
509         if (!ctx)
510                 return NULL;
511         pscan = ctx->curr;
512         nscan = ctx->bytes_remaining;
513         if (nscan == 0)
514                 return NULL;
515         if (!pscan)
516                 return NULL;
517         for (i = 0, value_length = -1; i < nscan; i++)
518                 if (pscan[i] == '\0') {
519                         value_length = i;
520                         break;
521                 }
522         if (value_length < 0)   /* '\0' was not included in the length */
523                 value_length = nscan;
524         value = kmalloc(value_length + 1, GFP_KERNEL|__GFP_NORETRY);
525         if (value == NULL)
526                 return NULL;
527         if (value_length > 0)
528                 memcpy(value, pscan, value_length);
529         ((u8 *) (value))[value_length] = '\0';
530         return value;
531 }
532
533
534 static ssize_t toolaction_show(struct device *dev,
535                                struct device_attribute *attr,
536                                char *buf)
537 {
538         u8 tool_action;
539
540         visorchannel_read(controlvm_channel,
541                 offsetof(struct spar_controlvm_channel_protocol,
542                          tool_action), &tool_action, sizeof(u8));
543         return scnprintf(buf, PAGE_SIZE, "%u\n", tool_action);
544 }
545
546 static ssize_t toolaction_store(struct device *dev,
547                                 struct device_attribute *attr,
548                                 const char *buf, size_t count)
549 {
550         u8 tool_action;
551         int ret;
552
553         if (kstrtou8(buf, 10, &tool_action))
554                 return -EINVAL;
555
556         ret = visorchannel_write(controlvm_channel,
557                 offsetof(struct spar_controlvm_channel_protocol,
558                          tool_action),
559                 &tool_action, sizeof(u8));
560
561         if (ret)
562                 return ret;
563         return count;
564 }
565
566 static ssize_t boottotool_show(struct device *dev,
567                                struct device_attribute *attr,
568                                char *buf)
569 {
570         struct efi_spar_indication efi_spar_indication;
571
572         visorchannel_read(controlvm_channel,
573                           offsetof(struct spar_controlvm_channel_protocol,
574                                    efi_spar_ind), &efi_spar_indication,
575                           sizeof(struct efi_spar_indication));
576         return scnprintf(buf, PAGE_SIZE, "%u\n",
577                          efi_spar_indication.boot_to_tool);
578 }
579
580 static ssize_t boottotool_store(struct device *dev,
581                                 struct device_attribute *attr,
582                                 const char *buf, size_t count)
583 {
584         int val, ret;
585         struct efi_spar_indication efi_spar_indication;
586
587         if (kstrtoint(buf, 10, &val))
588                 return -EINVAL;
589
590         efi_spar_indication.boot_to_tool = val;
591         ret = visorchannel_write(controlvm_channel,
592                         offsetof(struct spar_controlvm_channel_protocol,
593                                  efi_spar_ind), &(efi_spar_indication),
594                                  sizeof(struct efi_spar_indication));
595
596         if (ret)
597                 return ret;
598         return count;
599 }
600
601 static ssize_t error_show(struct device *dev, struct device_attribute *attr,
602                           char *buf)
603 {
604         u32 error;
605
606         visorchannel_read(controlvm_channel,
607                           offsetof(struct spar_controlvm_channel_protocol,
608                                    installation_error),
609                           &error, sizeof(u32));
610         return scnprintf(buf, PAGE_SIZE, "%i\n", error);
611 }
612
613 static ssize_t error_store(struct device *dev, struct device_attribute *attr,
614                            const char *buf, size_t count)
615 {
616         u32 error;
617         int ret;
618
619         if (kstrtou32(buf, 10, &error))
620                 return -EINVAL;
621
622         ret = visorchannel_write(controlvm_channel,
623                 offsetof(struct spar_controlvm_channel_protocol,
624                          installation_error),
625                 &error, sizeof(u32));
626         if (ret)
627                 return ret;
628         return count;
629 }
630
631 static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
632                            char *buf)
633 {
634         u32 text_id;
635
636         visorchannel_read(controlvm_channel,
637                           offsetof(struct spar_controlvm_channel_protocol,
638                                    installation_text_id),
639                           &text_id, sizeof(u32));
640         return scnprintf(buf, PAGE_SIZE, "%i\n", text_id);
641 }
642
643 static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
644                             const char *buf, size_t count)
645 {
646         u32 text_id;
647         int ret;
648
649         if (kstrtou32(buf, 10, &text_id))
650                 return -EINVAL;
651
652         ret = visorchannel_write(controlvm_channel,
653                 offsetof(struct spar_controlvm_channel_protocol,
654                          installation_text_id),
655                 &text_id, sizeof(u32));
656         if (ret)
657                 return ret;
658         return count;
659 }
660
661 static ssize_t remaining_steps_show(struct device *dev,
662                                     struct device_attribute *attr, char *buf)
663 {
664         u16 remaining_steps;
665
666         visorchannel_read(controlvm_channel,
667                           offsetof(struct spar_controlvm_channel_protocol,
668                                    installation_remaining_steps),
669                           &remaining_steps, sizeof(u16));
670         return scnprintf(buf, PAGE_SIZE, "%hu\n", remaining_steps);
671 }
672
673 static ssize_t remaining_steps_store(struct device *dev,
674                                      struct device_attribute *attr,
675                                      const char *buf, size_t count)
676 {
677         u16 remaining_steps;
678         int ret;
679
680         if (kstrtou16(buf, 10, &remaining_steps))
681                 return -EINVAL;
682
683         ret = visorchannel_write(controlvm_channel,
684                 offsetof(struct spar_controlvm_channel_protocol,
685                          installation_remaining_steps),
686                 &remaining_steps, sizeof(u16));
687         if (ret)
688                 return ret;
689         return count;
690 }
691
692 struct visor_busdev {
693         u32 bus_no;
694         u32 dev_no;
695 };
696
697 static int match_visorbus_dev_by_id(struct device *dev, void *data)
698 {
699         struct visor_device *vdev = to_visor_device(dev);
700         struct visor_busdev *id = (struct visor_busdev *)data;
701         u32 bus_no = id->bus_no;
702         u32 dev_no = id->dev_no;
703
704         if ((vdev->chipset_bus_no == bus_no) &&
705             (vdev->chipset_dev_no == dev_no))
706                 return 1;
707
708         return 0;
709 }
710 struct visor_device *visorbus_get_device_by_id(u32 bus_no, u32 dev_no,
711                                                struct visor_device *from)
712 {
713         struct device *dev;
714         struct device *dev_start = NULL;
715         struct visor_device *vdev = NULL;
716         struct visor_busdev id = {
717                         .bus_no = bus_no,
718                         .dev_no = dev_no
719                 };
720
721         if (from)
722                 dev_start = &from->device;
723         dev = bus_find_device(&visorbus_type, dev_start, (void *)&id,
724                               match_visorbus_dev_by_id);
725         if (dev)
726                 vdev = to_visor_device(dev);
727         return vdev;
728 }
729 EXPORT_SYMBOL(visorbus_get_device_by_id);
730
731 static u8
732 check_chipset_events(void)
733 {
734         int i;
735         u8 send_msg = 1;
736         /* Check events to determine if response should be sent */
737         for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
738                 send_msg &= chipset_events[i];
739         return send_msg;
740 }
741
742 static void
743 clear_chipset_events(void)
744 {
745         int i;
746         /* Clear chipset_events */
747         for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
748                 chipset_events[i] = 0;
749 }
750
751 void
752 visorchipset_register_busdev(
753                         struct visorchipset_busdev_notifiers *notifiers,
754                         struct visorchipset_busdev_responders *responders,
755                         struct ultra_vbus_deviceinfo *driver_info)
756 {
757         down(&notifier_lock);
758         if (!notifiers) {
759                 memset(&busdev_notifiers, 0,
760                        sizeof(busdev_notifiers));
761                 visorbusregistered = 0; /* clear flag */
762         } else {
763                 busdev_notifiers = *notifiers;
764                 visorbusregistered = 1; /* set flag */
765         }
766         if (responders)
767                 *responders = busdev_responders;
768         if (driver_info)
769                 bus_device_info_init(driver_info, "chipset", "visorchipset",
770                                      VERSION, NULL);
771
772         up(&notifier_lock);
773 }
774 EXPORT_SYMBOL_GPL(visorchipset_register_busdev);
775
776 static void
777 chipset_init(struct controlvm_message *inmsg)
778 {
779         static int chipset_inited;
780         enum ultra_chipset_feature features = 0;
781         int rc = CONTROLVM_RESP_SUCCESS;
782
783         POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC, POSTCODE_SEVERITY_INFO);
784         if (chipset_inited) {
785                 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
786                 goto cleanup;
787         }
788         chipset_inited = 1;
789         POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO);
790
791         /* Set features to indicate we support parahotplug (if Command
792          * also supports it). */
793         features =
794             inmsg->cmd.init_chipset.
795             features & ULTRA_CHIPSET_FEATURE_PARA_HOTPLUG;
796
797         /* Set the "reply" bit so Command knows this is a
798          * features-aware driver. */
799         features |= ULTRA_CHIPSET_FEATURE_REPLY;
800
801 cleanup:
802         if (inmsg->hdr.flags.response_expected)
803                 controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
804 }
805
806 static void
807 controlvm_init_response(struct controlvm_message *msg,
808                         struct controlvm_message_header *msg_hdr, int response)
809 {
810         memset(msg, 0, sizeof(struct controlvm_message));
811         memcpy(&msg->hdr, msg_hdr, sizeof(struct controlvm_message_header));
812         msg->hdr.payload_bytes = 0;
813         msg->hdr.payload_vm_offset = 0;
814         msg->hdr.payload_max_bytes = 0;
815         if (response < 0) {
816                 msg->hdr.flags.failed = 1;
817                 msg->hdr.completion_status = (u32) (-response);
818         }
819 }
820
821 static void
822 controlvm_respond(struct controlvm_message_header *msg_hdr, int response)
823 {
824         struct controlvm_message outmsg;
825
826         controlvm_init_response(&outmsg, msg_hdr, response);
827         if (outmsg.hdr.flags.test_message == 1)
828                 return;
829
830         if (!visorchannel_signalinsert(controlvm_channel,
831                                        CONTROLVM_QUEUE_REQUEST, &outmsg)) {
832                 return;
833         }
834 }
835
836 static void
837 controlvm_respond_chipset_init(struct controlvm_message_header *msg_hdr,
838                                int response,
839                                enum ultra_chipset_feature features)
840 {
841         struct controlvm_message outmsg;
842
843         controlvm_init_response(&outmsg, msg_hdr, response);
844         outmsg.cmd.init_chipset.features = features;
845         if (!visorchannel_signalinsert(controlvm_channel,
846                                        CONTROLVM_QUEUE_REQUEST, &outmsg)) {
847                 return;
848         }
849 }
850
851 static void controlvm_respond_physdev_changestate(
852                 struct controlvm_message_header *msg_hdr, int response,
853                 struct spar_segment_state state)
854 {
855         struct controlvm_message outmsg;
856
857         controlvm_init_response(&outmsg, msg_hdr, response);
858         outmsg.cmd.device_change_state.state = state;
859         outmsg.cmd.device_change_state.flags.phys_device = 1;
860         if (!visorchannel_signalinsert(controlvm_channel,
861                                        CONTROLVM_QUEUE_REQUEST, &outmsg)) {
862                 return;
863         }
864 }
865
866 enum crash_obj_type {
867         CRASH_DEV,
868         CRASH_BUS,
869 };
870
871 static void
872 bus_responder(enum controlvm_id cmd_id,
873               struct controlvm_message_header *pending_msg_hdr,
874               int response)
875 {
876         if (pending_msg_hdr == NULL)
877                 return;         /* no controlvm response needed */
878
879         if (pending_msg_hdr->id != (u32)cmd_id)
880                 return;
881
882         controlvm_respond(pending_msg_hdr, response);
883 }
884
885 static void
886 device_changestate_responder(enum controlvm_id cmd_id,
887                              struct visor_device *p, int response,
888                              struct spar_segment_state response_state)
889 {
890         struct controlvm_message outmsg;
891         u32 bus_no = p->chipset_bus_no;
892         u32 dev_no = p->chipset_dev_no;
893
894         if (p->pending_msg_hdr == NULL)
895                 return;         /* no controlvm response needed */
896         if (p->pending_msg_hdr->id != cmd_id)
897                 return;
898
899         controlvm_init_response(&outmsg, p->pending_msg_hdr, response);
900
901         outmsg.cmd.device_change_state.bus_no = bus_no;
902         outmsg.cmd.device_change_state.dev_no = dev_no;
903         outmsg.cmd.device_change_state.state = response_state;
904
905         if (!visorchannel_signalinsert(controlvm_channel,
906                                        CONTROLVM_QUEUE_REQUEST, &outmsg))
907                 return;
908 }
909
910 static void
911 device_responder(enum controlvm_id cmd_id,
912                  struct controlvm_message_header *pending_msg_hdr,
913                  int response)
914 {
915         if (pending_msg_hdr == NULL)
916                 return;         /* no controlvm response needed */
917
918         if (pending_msg_hdr->id != (u32)cmd_id)
919                 return;
920
921         controlvm_respond(pending_msg_hdr, response);
922 }
923
924 static void
925 bus_epilog(struct visor_device *bus_info,
926            u32 cmd, struct controlvm_message_header *msg_hdr,
927            int response, bool need_response)
928 {
929         bool notified = false;
930         struct controlvm_message_header *pmsg_hdr = NULL;
931
932         if (!bus_info) {
933                 /* relying on a valid passed in response code */
934                 /* be lazy and re-use msg_hdr for this failure, is this ok?? */
935                 pmsg_hdr = msg_hdr;
936                 goto away;
937         }
938
939         if (bus_info->pending_msg_hdr) {
940                 /* only non-NULL if dev is still waiting on a response */
941                 response = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
942                 pmsg_hdr = bus_info->pending_msg_hdr;
943                 goto away;
944         }
945
946         if (need_response) {
947                 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
948                 if (!pmsg_hdr) {
949                         response = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
950                         goto away;
951                 }
952
953                 memcpy(pmsg_hdr, msg_hdr,
954                        sizeof(struct controlvm_message_header));
955                 bus_info->pending_msg_hdr = pmsg_hdr;
956         }
957
958         down(&notifier_lock);
959         if (response == CONTROLVM_RESP_SUCCESS) {
960                 switch (cmd) {
961                 case CONTROLVM_BUS_CREATE:
962                         if (busdev_notifiers.bus_create) {
963                                 (*busdev_notifiers.bus_create) (bus_info);
964                                 notified = true;
965                         }
966                         break;
967                 case CONTROLVM_BUS_DESTROY:
968                         if (busdev_notifiers.bus_destroy) {
969                                 (*busdev_notifiers.bus_destroy) (bus_info);
970                                 notified = true;
971                         }
972                         break;
973                 }
974         }
975 away:
976         if (notified)
977                 /* The callback function just called above is responsible
978                  * for calling the appropriate visorchipset_busdev_responders
979                  * function, which will call bus_responder()
980                  */
981                 ;
982         else
983                 /*
984                  * Do not kfree(pmsg_hdr) as this is the failure path.
985                  * The success path ('notified') will call the responder
986                  * directly and kfree() there.
987                  */
988                 bus_responder(cmd, pmsg_hdr, response);
989         up(&notifier_lock);
990 }
991
992 static void
993 device_epilog(struct visor_device *dev_info,
994               struct spar_segment_state state, u32 cmd,
995               struct controlvm_message_header *msg_hdr, int response,
996               bool need_response, bool for_visorbus)
997 {
998         struct visorchipset_busdev_notifiers *notifiers;
999         bool notified = false;
1000         struct controlvm_message_header *pmsg_hdr = NULL;
1001
1002         notifiers = &busdev_notifiers;
1003
1004         if (!dev_info) {
1005                 /* relying on a valid passed in response code */
1006                 /* be lazy and re-use msg_hdr for this failure, is this ok?? */
1007                 pmsg_hdr = msg_hdr;
1008                 goto away;
1009         }
1010
1011         if (dev_info->pending_msg_hdr) {
1012                 /* only non-NULL if dev is still waiting on a response */
1013                 response = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
1014                 pmsg_hdr = dev_info->pending_msg_hdr;
1015                 goto away;
1016         }
1017
1018         if (need_response) {
1019                 pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
1020                 if (!pmsg_hdr) {
1021                         response = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1022                         goto away;
1023                 }
1024
1025                 memcpy(pmsg_hdr, msg_hdr,
1026                        sizeof(struct controlvm_message_header));
1027                 dev_info->pending_msg_hdr = pmsg_hdr;
1028         }
1029
1030         down(&notifier_lock);
1031         if (response >= 0) {
1032                 switch (cmd) {
1033                 case CONTROLVM_DEVICE_CREATE:
1034                         if (notifiers->device_create) {
1035                                 (*notifiers->device_create) (dev_info);
1036                                 notified = true;
1037                         }
1038                         break;
1039                 case CONTROLVM_DEVICE_CHANGESTATE:
1040                         /* ServerReady / ServerRunning / SegmentStateRunning */
1041                         if (state.alive == segment_state_running.alive &&
1042                             state.operating ==
1043                                 segment_state_running.operating) {
1044                                 if (notifiers->device_resume) {
1045                                         (*notifiers->device_resume) (dev_info);
1046                                         notified = true;
1047                                 }
1048                         }
1049                         /* ServerNotReady / ServerLost / SegmentStateStandby */
1050                         else if (state.alive == segment_state_standby.alive &&
1051                                  state.operating ==
1052                                  segment_state_standby.operating) {
1053                                 /* technically this is standby case
1054                                  * where server is lost
1055                                  */
1056                                 if (notifiers->device_pause) {
1057                                         (*notifiers->device_pause) (dev_info);
1058                                         notified = true;
1059                                 }
1060                         }
1061                         break;
1062                 case CONTROLVM_DEVICE_DESTROY:
1063                         if (notifiers->device_destroy) {
1064                                 (*notifiers->device_destroy) (dev_info);
1065                                 notified = true;
1066                         }
1067                         break;
1068                 }
1069         }
1070 away:
1071         if (notified)
1072                 /* The callback function just called above is responsible
1073                  * for calling the appropriate visorchipset_busdev_responders
1074                  * function, which will call device_responder()
1075                  */
1076                 ;
1077         else
1078                 /*
1079                  * Do not kfree(pmsg_hdr) as this is the failure path.
1080                  * The success path ('notified') will call the responder
1081                  * directly and kfree() there.
1082                  */
1083                 device_responder(cmd, pmsg_hdr, response);
1084         up(&notifier_lock);
1085 }
1086
1087 static void
1088 bus_create(struct controlvm_message *inmsg)
1089 {
1090         struct controlvm_message_packet *cmd = &inmsg->cmd;
1091         u32 bus_no = cmd->create_bus.bus_no;
1092         int rc = CONTROLVM_RESP_SUCCESS;
1093         struct visor_device *bus_info;
1094         struct visorchannel *visorchannel;
1095
1096         bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
1097         if (bus_info && (bus_info->state.created == 1)) {
1098                 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
1099                                  POSTCODE_SEVERITY_ERR);
1100                 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1101                 goto cleanup;
1102         }
1103         bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL);
1104         if (!bus_info) {
1105                 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
1106                                  POSTCODE_SEVERITY_ERR);
1107                 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1108                 goto cleanup;
1109         }
1110
1111         INIT_LIST_HEAD(&bus_info->list_all);
1112         bus_info->chipset_bus_no = bus_no;
1113         bus_info->chipset_dev_no = BUS_ROOT_DEVICE;
1114
1115         POSTCODE_LINUX_3(BUS_CREATE_ENTRY_PC, bus_no, POSTCODE_SEVERITY_INFO);
1116
1117         visorchannel = visorchannel_create(cmd->create_bus.channel_addr,
1118                                            cmd->create_bus.channel_bytes,
1119                                            GFP_KERNEL,
1120                                            cmd->create_bus.bus_data_type_uuid);
1121
1122         if (!visorchannel) {
1123                 POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
1124                                  POSTCODE_SEVERITY_ERR);
1125                 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1126                 kfree(bus_info);
1127                 bus_info = NULL;
1128                 goto cleanup;
1129         }
1130         bus_info->visorchannel = visorchannel;
1131
1132         POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus_no, POSTCODE_SEVERITY_INFO);
1133
1134 cleanup:
1135         bus_epilog(bus_info, CONTROLVM_BUS_CREATE, &inmsg->hdr,
1136                    rc, inmsg->hdr.flags.response_expected == 1);
1137 }
1138
1139 static void
1140 bus_destroy(struct controlvm_message *inmsg)
1141 {
1142         struct controlvm_message_packet *cmd = &inmsg->cmd;
1143         u32 bus_no = cmd->destroy_bus.bus_no;
1144         struct visor_device *bus_info;
1145         int rc = CONTROLVM_RESP_SUCCESS;
1146
1147         bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
1148         if (!bus_info)
1149                 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1150         else if (bus_info->state.created == 0)
1151                 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1152
1153         bus_epilog(bus_info, CONTROLVM_BUS_DESTROY, &inmsg->hdr,
1154                    rc, inmsg->hdr.flags.response_expected == 1);
1155
1156         /* bus_info is freed as part of the busdevice_release function */
1157 }
1158
1159 static void
1160 bus_configure(struct controlvm_message *inmsg,
1161               struct parser_context *parser_ctx)
1162 {
1163         struct controlvm_message_packet *cmd = &inmsg->cmd;
1164         u32 bus_no;
1165         struct visor_device *bus_info;
1166         int rc = CONTROLVM_RESP_SUCCESS;
1167
1168         bus_no = cmd->configure_bus.bus_no;
1169         POSTCODE_LINUX_3(BUS_CONFIGURE_ENTRY_PC, bus_no,
1170                          POSTCODE_SEVERITY_INFO);
1171
1172         bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
1173         if (!bus_info) {
1174                 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
1175                                  POSTCODE_SEVERITY_ERR);
1176                 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1177         } else if (bus_info->state.created == 0) {
1178                 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
1179                                  POSTCODE_SEVERITY_ERR);
1180                 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1181         } else if (bus_info->pending_msg_hdr != NULL) {
1182                 POSTCODE_LINUX_3(BUS_CONFIGURE_FAILURE_PC, bus_no,
1183                                  POSTCODE_SEVERITY_ERR);
1184                 rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
1185         } else {
1186                 visorchannel_set_clientpartition(bus_info->visorchannel,
1187                                 cmd->configure_bus.guest_handle);
1188                 bus_info->partition_uuid = parser_id_get(parser_ctx);
1189                 parser_param_start(parser_ctx, PARSERSTRING_NAME);
1190                 bus_info->name = parser_string_get(parser_ctx);
1191
1192                 POSTCODE_LINUX_3(BUS_CONFIGURE_EXIT_PC, bus_no,
1193                                  POSTCODE_SEVERITY_INFO);
1194         }
1195         bus_epilog(bus_info, CONTROLVM_BUS_CONFIGURE, &inmsg->hdr,
1196                    rc, inmsg->hdr.flags.response_expected == 1);
1197 }
1198
1199 static void
1200 my_device_create(struct controlvm_message *inmsg)
1201 {
1202         struct controlvm_message_packet *cmd = &inmsg->cmd;
1203         u32 bus_no = cmd->create_device.bus_no;
1204         u32 dev_no = cmd->create_device.dev_no;
1205         struct visor_device *dev_info = NULL;
1206         struct visor_device *bus_info;
1207         struct visorchannel *visorchannel;
1208         int rc = CONTROLVM_RESP_SUCCESS;
1209
1210         bus_info = visorbus_get_device_by_id(bus_no, BUS_ROOT_DEVICE, NULL);
1211         if (!bus_info) {
1212                 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1213                                  POSTCODE_SEVERITY_ERR);
1214                 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1215                 goto cleanup;
1216         }
1217
1218         if (bus_info->state.created == 0) {
1219                 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1220                                  POSTCODE_SEVERITY_ERR);
1221                 rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
1222                 goto cleanup;
1223         }
1224
1225         dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
1226         if (dev_info && (dev_info->state.created == 1)) {
1227                 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1228                                  POSTCODE_SEVERITY_ERR);
1229                 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1230                 goto cleanup;
1231         }
1232
1233         dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
1234         if (!dev_info) {
1235                 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1236                                  POSTCODE_SEVERITY_ERR);
1237                 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1238                 goto cleanup;
1239         }
1240
1241         dev_info->chipset_bus_no = bus_no;
1242         dev_info->chipset_dev_no = dev_no;
1243         dev_info->inst = cmd->create_device.dev_inst_uuid;
1244
1245         /* not sure where the best place to set the 'parent' */
1246         dev_info->device.parent = &bus_info->device;
1247
1248         POSTCODE_LINUX_4(DEVICE_CREATE_ENTRY_PC, dev_no, bus_no,
1249                          POSTCODE_SEVERITY_INFO);
1250
1251         visorchannel = visorchannel_create(cmd->create_device.channel_addr,
1252                                            cmd->create_device.channel_bytes,
1253                                            GFP_KERNEL,
1254                                            cmd->create_device.data_type_uuid);
1255
1256         if (!visorchannel) {
1257                 POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
1258                                  POSTCODE_SEVERITY_ERR);
1259                 rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
1260                 kfree(dev_info);
1261                 dev_info = NULL;
1262                 goto cleanup;
1263         }
1264         dev_info->visorchannel = visorchannel;
1265         dev_info->channel_type_guid = cmd->create_device.data_type_uuid;
1266         POSTCODE_LINUX_4(DEVICE_CREATE_EXIT_PC, dev_no, bus_no,
1267                          POSTCODE_SEVERITY_INFO);
1268 cleanup:
1269         device_epilog(dev_info, segment_state_running,
1270                       CONTROLVM_DEVICE_CREATE, &inmsg->hdr, rc,
1271                       inmsg->hdr.flags.response_expected == 1, 1);
1272 }
1273
1274 static void
1275 my_device_changestate(struct controlvm_message *inmsg)
1276 {
1277         struct controlvm_message_packet *cmd = &inmsg->cmd;
1278         u32 bus_no = cmd->device_change_state.bus_no;
1279         u32 dev_no = cmd->device_change_state.dev_no;
1280         struct spar_segment_state state = cmd->device_change_state.state;
1281         struct visor_device *dev_info;
1282         int rc = CONTROLVM_RESP_SUCCESS;
1283
1284         dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
1285         if (!dev_info) {
1286                 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
1287                                  POSTCODE_SEVERITY_ERR);
1288                 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1289         } else if (dev_info->state.created == 0) {
1290                 POSTCODE_LINUX_4(DEVICE_CHANGESTATE_FAILURE_PC, dev_no, bus_no,
1291                                  POSTCODE_SEVERITY_ERR);
1292                 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1293         }
1294         if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
1295                 device_epilog(dev_info, state,
1296                               CONTROLVM_DEVICE_CHANGESTATE, &inmsg->hdr, rc,
1297                               inmsg->hdr.flags.response_expected == 1, 1);
1298 }
1299
1300 static void
1301 my_device_destroy(struct controlvm_message *inmsg)
1302 {
1303         struct controlvm_message_packet *cmd = &inmsg->cmd;
1304         u32 bus_no = cmd->destroy_device.bus_no;
1305         u32 dev_no = cmd->destroy_device.dev_no;
1306         struct visor_device *dev_info;
1307         int rc = CONTROLVM_RESP_SUCCESS;
1308
1309         dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
1310         if (!dev_info)
1311                 rc = -CONTROLVM_RESP_ERROR_DEVICE_INVALID;
1312         else if (dev_info->state.created == 0)
1313                 rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
1314
1315         if ((rc >= CONTROLVM_RESP_SUCCESS) && dev_info)
1316                 device_epilog(dev_info, segment_state_running,
1317                               CONTROLVM_DEVICE_DESTROY, &inmsg->hdr, rc,
1318                               inmsg->hdr.flags.response_expected == 1, 1);
1319 }
1320
1321 /* When provided with the physical address of the controlvm channel
1322  * (phys_addr), the offset to the payload area we need to manage
1323  * (offset), and the size of this payload area (bytes), fills in the
1324  * controlvm_payload_info struct.  Returns true for success or false
1325  * for failure.
1326  */
1327 static int
1328 initialize_controlvm_payload_info(u64 phys_addr, u64 offset, u32 bytes,
1329                                   struct visor_controlvm_payload_info *info)
1330 {
1331         u8 *payload = NULL;
1332         int rc = CONTROLVM_RESP_SUCCESS;
1333
1334         if (!info) {
1335                 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1336                 goto cleanup;
1337         }
1338         memset(info, 0, sizeof(struct visor_controlvm_payload_info));
1339         if ((offset == 0) || (bytes == 0)) {
1340                 rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
1341                 goto cleanup;
1342         }
1343         payload = memremap(phys_addr + offset, bytes, MEMREMAP_WB);
1344         if (!payload) {
1345                 rc = -CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
1346                 goto cleanup;
1347         }
1348
1349         info->offset = offset;
1350         info->bytes = bytes;
1351         info->ptr = payload;
1352
1353 cleanup:
1354         if (rc < 0) {
1355                 if (payload) {
1356                         memunmap(payload);
1357                         payload = NULL;
1358                 }
1359         }
1360         return rc;
1361 }
1362
1363 static void
1364 destroy_controlvm_payload_info(struct visor_controlvm_payload_info *info)
1365 {
1366         if (info->ptr) {
1367                 memunmap(info->ptr);
1368                 info->ptr = NULL;
1369         }
1370         memset(info, 0, sizeof(struct visor_controlvm_payload_info));
1371 }
1372
1373 static void
1374 initialize_controlvm_payload(void)
1375 {
1376         u64 phys_addr = visorchannel_get_physaddr(controlvm_channel);
1377         u64 payload_offset = 0;
1378         u32 payload_bytes = 0;
1379
1380         if (visorchannel_read(controlvm_channel,
1381                               offsetof(struct spar_controlvm_channel_protocol,
1382                                        request_payload_offset),
1383                               &payload_offset, sizeof(payload_offset)) < 0) {
1384                 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1385                                  POSTCODE_SEVERITY_ERR);
1386                 return;
1387         }
1388         if (visorchannel_read(controlvm_channel,
1389                               offsetof(struct spar_controlvm_channel_protocol,
1390                                        request_payload_bytes),
1391                               &payload_bytes, sizeof(payload_bytes)) < 0) {
1392                 POSTCODE_LINUX_2(CONTROLVM_INIT_FAILURE_PC,
1393                                  POSTCODE_SEVERITY_ERR);
1394                 return;
1395         }
1396         initialize_controlvm_payload_info(phys_addr,
1397                                           payload_offset, payload_bytes,
1398                                           &controlvm_payload_info);
1399 }
1400
1401 /*  Send ACTION=online for DEVPATH=/sys/devices/platform/visorchipset.
1402  *  Returns CONTROLVM_RESP_xxx code.
1403  */
1404 static int
1405 visorchipset_chipset_ready(void)
1406 {
1407         kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_ONLINE);
1408         return CONTROLVM_RESP_SUCCESS;
1409 }
1410
1411 static int
1412 visorchipset_chipset_selftest(void)
1413 {
1414         char env_selftest[20];
1415         char *envp[] = { env_selftest, NULL };
1416
1417         sprintf(env_selftest, "SPARSP_SELFTEST=%d", 1);
1418         kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
1419                            envp);
1420         return CONTROLVM_RESP_SUCCESS;
1421 }
1422
1423 /*  Send ACTION=offline for DEVPATH=/sys/devices/platform/visorchipset.
1424  *  Returns CONTROLVM_RESP_xxx code.
1425  */
1426 static int
1427 visorchipset_chipset_notready(void)
1428 {
1429         kobject_uevent(&visorchipset_platform_device.dev.kobj, KOBJ_OFFLINE);
1430         return CONTROLVM_RESP_SUCCESS;
1431 }
1432
1433 static void
1434 chipset_ready(struct controlvm_message_header *msg_hdr)
1435 {
1436         int rc = visorchipset_chipset_ready();
1437
1438         if (rc != CONTROLVM_RESP_SUCCESS)
1439                 rc = -rc;
1440         if (msg_hdr->flags.response_expected && !visorchipset_holdchipsetready)
1441                 controlvm_respond(msg_hdr, rc);
1442         if (msg_hdr->flags.response_expected && visorchipset_holdchipsetready) {
1443                 /* Send CHIPSET_READY response when all modules have been loaded
1444                  * and disks mounted for the partition
1445                  */
1446                 g_chipset_msg_hdr = *msg_hdr;
1447         }
1448 }
1449
1450 static void
1451 chipset_selftest(struct controlvm_message_header *msg_hdr)
1452 {
1453         int rc = visorchipset_chipset_selftest();
1454
1455         if (rc != CONTROLVM_RESP_SUCCESS)
1456                 rc = -rc;
1457         if (msg_hdr->flags.response_expected)
1458                 controlvm_respond(msg_hdr, rc);
1459 }
1460
1461 static void
1462 chipset_notready(struct controlvm_message_header *msg_hdr)
1463 {
1464         int rc = visorchipset_chipset_notready();
1465
1466         if (rc != CONTROLVM_RESP_SUCCESS)
1467                 rc = -rc;
1468         if (msg_hdr->flags.response_expected)
1469                 controlvm_respond(msg_hdr, rc);
1470 }
1471
1472 /* This is your "one-stop" shop for grabbing the next message from the
1473  * CONTROLVM_QUEUE_EVENT queue in the controlvm channel.
1474  */
1475 static bool
1476 read_controlvm_event(struct controlvm_message *msg)
1477 {
1478         if (visorchannel_signalremove(controlvm_channel,
1479                                       CONTROLVM_QUEUE_EVENT, msg)) {
1480                 /* got a message */
1481                 if (msg->hdr.flags.test_message == 1)
1482                         return false;
1483                 return true;
1484         }
1485         return false;
1486 }
1487
1488 /*
1489  * The general parahotplug flow works as follows.  The visorchipset
1490  * driver receives a DEVICE_CHANGESTATE message from Command
1491  * specifying a physical device to enable or disable.  The CONTROLVM
1492  * message handler calls parahotplug_process_message, which then adds
1493  * the message to a global list and kicks off a udev event which
1494  * causes a user level script to enable or disable the specified
1495  * device.  The udev script then writes to
1496  * /proc/visorchipset/parahotplug, which causes parahotplug_proc_write
1497  * to get called, at which point the appropriate CONTROLVM message is
1498  * retrieved from the list and responded to.
1499  */
1500
1501 #define PARAHOTPLUG_TIMEOUT_MS 2000
1502
1503 /*
1504  * Generate unique int to match an outstanding CONTROLVM message with a
1505  * udev script /proc response
1506  */
1507 static int
1508 parahotplug_next_id(void)
1509 {
1510         static atomic_t id = ATOMIC_INIT(0);
1511
1512         return atomic_inc_return(&id);
1513 }
1514
1515 /*
1516  * Returns the time (in jiffies) when a CONTROLVM message on the list
1517  * should expire -- PARAHOTPLUG_TIMEOUT_MS in the future
1518  */
1519 static unsigned long
1520 parahotplug_next_expiration(void)
1521 {
1522         return jiffies + msecs_to_jiffies(PARAHOTPLUG_TIMEOUT_MS);
1523 }
1524
1525 /*
1526  * Create a parahotplug_request, which is basically a wrapper for a
1527  * CONTROLVM_MESSAGE that we can stick on a list
1528  */
1529 static struct parahotplug_request *
1530 parahotplug_request_create(struct controlvm_message *msg)
1531 {
1532         struct parahotplug_request *req;
1533
1534         req = kmalloc(sizeof(*req), GFP_KERNEL | __GFP_NORETRY);
1535         if (!req)
1536                 return NULL;
1537
1538         req->id = parahotplug_next_id();
1539         req->expiration = parahotplug_next_expiration();
1540         req->msg = *msg;
1541
1542         return req;
1543 }
1544
1545 /*
1546  * Free a parahotplug_request.
1547  */
1548 static void
1549 parahotplug_request_destroy(struct parahotplug_request *req)
1550 {
1551         kfree(req);
1552 }
1553
1554 /*
1555  * Cause uevent to run the user level script to do the disable/enable
1556  * specified in (the CONTROLVM message in) the specified
1557  * parahotplug_request
1558  */
1559 static void
1560 parahotplug_request_kickoff(struct parahotplug_request *req)
1561 {
1562         struct controlvm_message_packet *cmd = &req->msg.cmd;
1563         char env_cmd[40], env_id[40], env_state[40], env_bus[40], env_dev[40],
1564             env_func[40];
1565         char *envp[] = {
1566                 env_cmd, env_id, env_state, env_bus, env_dev, env_func, NULL
1567         };
1568
1569         sprintf(env_cmd, "SPAR_PARAHOTPLUG=1");
1570         sprintf(env_id, "SPAR_PARAHOTPLUG_ID=%d", req->id);
1571         sprintf(env_state, "SPAR_PARAHOTPLUG_STATE=%d",
1572                 cmd->device_change_state.state.active);
1573         sprintf(env_bus, "SPAR_PARAHOTPLUG_BUS=%d",
1574                 cmd->device_change_state.bus_no);
1575         sprintf(env_dev, "SPAR_PARAHOTPLUG_DEVICE=%d",
1576                 cmd->device_change_state.dev_no >> 3);
1577         sprintf(env_func, "SPAR_PARAHOTPLUG_FUNCTION=%d",
1578                 cmd->device_change_state.dev_no & 0x7);
1579
1580         kobject_uevent_env(&visorchipset_platform_device.dev.kobj, KOBJ_CHANGE,
1581                            envp);
1582 }
1583
1584 /*
1585  * Remove any request from the list that's been on there too long and
1586  * respond with an error.
1587  */
1588 static void
1589 parahotplug_process_list(void)
1590 {
1591         struct list_head *pos;
1592         struct list_head *tmp;
1593
1594         spin_lock(&parahotplug_request_list_lock);
1595
1596         list_for_each_safe(pos, tmp, &parahotplug_request_list) {
1597                 struct parahotplug_request *req =
1598                     list_entry(pos, struct parahotplug_request, list);
1599
1600                 if (!time_after_eq(jiffies, req->expiration))
1601                         continue;
1602
1603                 list_del(pos);
1604                 if (req->msg.hdr.flags.response_expected)
1605                         controlvm_respond_physdev_changestate(
1606                                 &req->msg.hdr,
1607                                 CONTROLVM_RESP_ERROR_DEVICE_UDEV_TIMEOUT,
1608                                 req->msg.cmd.device_change_state.state);
1609                 parahotplug_request_destroy(req);
1610         }
1611
1612         spin_unlock(&parahotplug_request_list_lock);
1613 }
1614
1615 /*
1616  * Called from the /proc handler, which means the user script has
1617  * finished the enable/disable.  Find the matching identifier, and
1618  * respond to the CONTROLVM message with success.
1619  */
1620 static int
1621 parahotplug_request_complete(int id, u16 active)
1622 {
1623         struct list_head *pos;
1624         struct list_head *tmp;
1625
1626         spin_lock(&parahotplug_request_list_lock);
1627
1628         /* Look for a request matching "id". */
1629         list_for_each_safe(pos, tmp, &parahotplug_request_list) {
1630                 struct parahotplug_request *req =
1631                     list_entry(pos, struct parahotplug_request, list);
1632                 if (req->id == id) {
1633                         /* Found a match.  Remove it from the list and
1634                          * respond.
1635                          */
1636                         list_del(pos);
1637                         spin_unlock(&parahotplug_request_list_lock);
1638                         req->msg.cmd.device_change_state.state.active = active;
1639                         if (req->msg.hdr.flags.response_expected)
1640                                 controlvm_respond_physdev_changestate(
1641                                         &req->msg.hdr, CONTROLVM_RESP_SUCCESS,
1642                                         req->msg.cmd.device_change_state.state);
1643                         parahotplug_request_destroy(req);
1644                         return 0;
1645                 }
1646         }
1647
1648         spin_unlock(&parahotplug_request_list_lock);
1649         return -1;
1650 }
1651
1652 /*
1653  * Enables or disables a PCI device by kicking off a udev script
1654  */
1655 static void
1656 parahotplug_process_message(struct controlvm_message *inmsg)
1657 {
1658         struct parahotplug_request *req;
1659
1660         req = parahotplug_request_create(inmsg);
1661
1662         if (!req)
1663                 return;
1664
1665         if (inmsg->cmd.device_change_state.state.active) {
1666                 /* For enable messages, just respond with success
1667                 * right away.  This is a bit of a hack, but there are
1668                 * issues with the early enable messages we get (with
1669                 * either the udev script not detecting that the device
1670                 * is up, or not getting called at all).  Fortunately
1671                 * the messages that get lost don't matter anyway, as
1672                 * devices are automatically enabled at
1673                 * initialization.
1674                 */
1675                 parahotplug_request_kickoff(req);
1676                 controlvm_respond_physdev_changestate(&inmsg->hdr,
1677                         CONTROLVM_RESP_SUCCESS,
1678                         inmsg->cmd.device_change_state.state);
1679                 parahotplug_request_destroy(req);
1680         } else {
1681                 /* For disable messages, add the request to the
1682                 * request list before kicking off the udev script.  It
1683                 * won't get responded to until the script has
1684                 * indicated it's done.
1685                 */
1686                 spin_lock(&parahotplug_request_list_lock);
1687                 list_add_tail(&req->list, &parahotplug_request_list);
1688                 spin_unlock(&parahotplug_request_list_lock);
1689
1690                 parahotplug_request_kickoff(req);
1691         }
1692 }
1693
1694 /* Process a controlvm message.
1695  * Return result:
1696  *    false - this function will return false only in the case where the
1697  *            controlvm message was NOT processed, but processing must be
1698  *            retried before reading the next controlvm message; a
1699  *            scenario where this can occur is when we need to throttle
1700  *            the allocation of memory in which to copy out controlvm
1701  *            payload data
1702  *    true  - processing of the controlvm message completed,
1703  *            either successfully or with an error.
1704  */
1705 static bool
1706 handle_command(struct controlvm_message inmsg, u64 channel_addr)
1707 {
1708         struct controlvm_message_packet *cmd = &inmsg.cmd;
1709         u64 parm_addr;
1710         u32 parm_bytes;
1711         struct parser_context *parser_ctx = NULL;
1712         bool local_addr;
1713         struct controlvm_message ackmsg;
1714
1715         /* create parsing context if necessary */
1716         local_addr = (inmsg.hdr.flags.test_message == 1);
1717         if (channel_addr == 0)
1718                 return true;
1719         parm_addr = channel_addr + inmsg.hdr.payload_vm_offset;
1720         parm_bytes = inmsg.hdr.payload_bytes;
1721
1722         /* Parameter and channel addresses within test messages actually lie
1723          * within our OS-controlled memory.  We need to know that, because it
1724          * makes a difference in how we compute the virtual address.
1725          */
1726         if (parm_addr && parm_bytes) {
1727                 bool retry = false;
1728
1729                 parser_ctx =
1730                     parser_init_byte_stream(parm_addr, parm_bytes,
1731                                             local_addr, &retry);
1732                 if (!parser_ctx && retry)
1733                         return false;
1734         }
1735
1736         if (!local_addr) {
1737                 controlvm_init_response(&ackmsg, &inmsg.hdr,
1738                                         CONTROLVM_RESP_SUCCESS);
1739                 if (controlvm_channel)
1740                         visorchannel_signalinsert(controlvm_channel,
1741                                                   CONTROLVM_QUEUE_ACK,
1742                                                   &ackmsg);
1743         }
1744         switch (inmsg.hdr.id) {
1745         case CONTROLVM_CHIPSET_INIT:
1746                 chipset_init(&inmsg);
1747                 break;
1748         case CONTROLVM_BUS_CREATE:
1749                 bus_create(&inmsg);
1750                 break;
1751         case CONTROLVM_BUS_DESTROY:
1752                 bus_destroy(&inmsg);
1753                 break;
1754         case CONTROLVM_BUS_CONFIGURE:
1755                 bus_configure(&inmsg, parser_ctx);
1756                 break;
1757         case CONTROLVM_DEVICE_CREATE:
1758                 my_device_create(&inmsg);
1759                 break;
1760         case CONTROLVM_DEVICE_CHANGESTATE:
1761                 if (cmd->device_change_state.flags.phys_device) {
1762                         parahotplug_process_message(&inmsg);
1763                 } else {
1764                         /* save the hdr and cmd structures for later use */
1765                         /* when sending back the response to Command */
1766                         my_device_changestate(&inmsg);
1767                         g_devicechangestate_packet = inmsg.cmd;
1768                         break;
1769                 }
1770                 break;
1771         case CONTROLVM_DEVICE_DESTROY:
1772                 my_device_destroy(&inmsg);
1773                 break;
1774         case CONTROLVM_DEVICE_CONFIGURE:
1775                 /* no op for now, just send a respond that we passed */
1776                 if (inmsg.hdr.flags.response_expected)
1777                         controlvm_respond(&inmsg.hdr, CONTROLVM_RESP_SUCCESS);
1778                 break;
1779         case CONTROLVM_CHIPSET_READY:
1780                 chipset_ready(&inmsg.hdr);
1781                 break;
1782         case CONTROLVM_CHIPSET_SELFTEST:
1783                 chipset_selftest(&inmsg.hdr);
1784                 break;
1785         case CONTROLVM_CHIPSET_STOP:
1786                 chipset_notready(&inmsg.hdr);
1787                 break;
1788         default:
1789                 if (inmsg.hdr.flags.response_expected)
1790                         controlvm_respond(&inmsg.hdr,
1791                                 -CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN);
1792                 break;
1793         }
1794
1795         if (parser_ctx) {
1796                 parser_done(parser_ctx);
1797                 parser_ctx = NULL;
1798         }
1799         return true;
1800 }
1801
1802 static inline unsigned int
1803 issue_vmcall_io_controlvm_addr(u64 *control_addr, u32 *control_bytes)
1804 {
1805         struct vmcall_io_controlvm_addr_params params;
1806         int result = VMCALL_SUCCESS;
1807         u64 physaddr;
1808
1809         physaddr = virt_to_phys(&params);
1810         ISSUE_IO_VMCALL(VMCALL_IO_CONTROLVM_ADDR, physaddr, result);
1811         if (VMCALL_SUCCESSFUL(result)) {
1812                 *control_addr = params.address;
1813                 *control_bytes = params.channel_bytes;
1814         }
1815         return result;
1816 }
1817
1818 static u64 controlvm_get_channel_address(void)
1819 {
1820         u64 addr = 0;
1821         u32 size = 0;
1822
1823         if (!VMCALL_SUCCESSFUL(issue_vmcall_io_controlvm_addr(&addr, &size)))
1824                 return 0;
1825
1826         return addr;
1827 }
1828
1829 static void
1830 controlvm_periodic_work(struct work_struct *work)
1831 {
1832         struct controlvm_message inmsg;
1833         bool got_command = false;
1834         bool handle_command_failed = false;
1835         static u64 poll_count;
1836
1837         /* make sure visorbus server is registered for controlvm callbacks */
1838         if (visorchipset_visorbusregwait && !visorbusregistered)
1839                 goto cleanup;
1840
1841         poll_count++;
1842         if (poll_count >= 250)
1843                 ;       /* keep going */
1844         else
1845                 goto cleanup;
1846
1847         /* Check events to determine if response to CHIPSET_READY
1848          * should be sent
1849          */
1850         if (visorchipset_holdchipsetready &&
1851             (g_chipset_msg_hdr.id != CONTROLVM_INVALID)) {
1852                 if (check_chipset_events() == 1) {
1853                         controlvm_respond(&g_chipset_msg_hdr, 0);
1854                         clear_chipset_events();
1855                         memset(&g_chipset_msg_hdr, 0,
1856                                sizeof(struct controlvm_message_header));
1857                 }
1858         }
1859
1860         while (visorchannel_signalremove(controlvm_channel,
1861                                          CONTROLVM_QUEUE_RESPONSE,
1862                                          &inmsg))
1863                 ;
1864         if (!got_command) {
1865                 if (controlvm_pending_msg_valid) {
1866                         /* we throttled processing of a prior
1867                         * msg, so try to process it again
1868                         * rather than reading a new one
1869                         */
1870                         inmsg = controlvm_pending_msg;
1871                         controlvm_pending_msg_valid = false;
1872                         got_command = true;
1873                 } else {
1874                         got_command = read_controlvm_event(&inmsg);
1875                 }
1876         }
1877
1878         handle_command_failed = false;
1879         while (got_command && (!handle_command_failed)) {
1880                 most_recent_message_jiffies = jiffies;
1881                 if (handle_command(inmsg,
1882                                    visorchannel_get_physaddr
1883                                    (controlvm_channel)))
1884                         got_command = read_controlvm_event(&inmsg);
1885                 else {
1886                         /* this is a scenario where throttling
1887                         * is required, but probably NOT an
1888                         * error...; we stash the current
1889                         * controlvm msg so we will attempt to
1890                         * reprocess it on our next loop
1891                         */
1892                         handle_command_failed = true;
1893                         controlvm_pending_msg = inmsg;
1894                         controlvm_pending_msg_valid = true;
1895                 }
1896         }
1897
1898         /* parahotplug_worker */
1899         parahotplug_process_list();
1900
1901 cleanup:
1902
1903         if (time_after(jiffies,
1904                        most_recent_message_jiffies + (HZ * MIN_IDLE_SECONDS))) {
1905                 /* it's been longer than MIN_IDLE_SECONDS since we
1906                 * processed our last controlvm message; slow down the
1907                 * polling
1908                 */
1909                 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_SLOW)
1910                         poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
1911         } else {
1912                 if (poll_jiffies != POLLJIFFIES_CONTROLVMCHANNEL_FAST)
1913                         poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
1914         }
1915
1916         queue_delayed_work(periodic_controlvm_workqueue,
1917                            &periodic_controlvm_work, poll_jiffies);
1918 }
1919
1920 static void
1921 setup_crash_devices_work_queue(struct work_struct *work)
1922 {
1923         struct controlvm_message local_crash_bus_msg;
1924         struct controlvm_message local_crash_dev_msg;
1925         struct controlvm_message msg;
1926         u32 local_crash_msg_offset;
1927         u16 local_crash_msg_count;
1928
1929         /* make sure visorbus is registered for controlvm callbacks */
1930         if (visorchipset_visorbusregwait && !visorbusregistered)
1931                 goto cleanup;
1932
1933         POSTCODE_LINUX_2(CRASH_DEV_ENTRY_PC, POSTCODE_SEVERITY_INFO);
1934
1935         /* send init chipset msg */
1936         msg.hdr.id = CONTROLVM_CHIPSET_INIT;
1937         msg.cmd.init_chipset.bus_count = 23;
1938         msg.cmd.init_chipset.switch_count = 0;
1939
1940         chipset_init(&msg);
1941
1942         /* get saved message count */
1943         if (visorchannel_read(controlvm_channel,
1944                               offsetof(struct spar_controlvm_channel_protocol,
1945                                        saved_crash_message_count),
1946                               &local_crash_msg_count, sizeof(u16)) < 0) {
1947                 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
1948                                  POSTCODE_SEVERITY_ERR);
1949                 return;
1950         }
1951
1952         if (local_crash_msg_count != CONTROLVM_CRASHMSG_MAX) {
1953                 POSTCODE_LINUX_3(CRASH_DEV_COUNT_FAILURE_PC,
1954                                  local_crash_msg_count,
1955                                  POSTCODE_SEVERITY_ERR);
1956                 return;
1957         }
1958
1959         /* get saved crash message offset */
1960         if (visorchannel_read(controlvm_channel,
1961                               offsetof(struct spar_controlvm_channel_protocol,
1962                                        saved_crash_message_offset),
1963                               &local_crash_msg_offset, sizeof(u32)) < 0) {
1964                 POSTCODE_LINUX_2(CRASH_DEV_CTRL_RD_FAILURE_PC,
1965                                  POSTCODE_SEVERITY_ERR);
1966                 return;
1967         }
1968
1969         /* read create device message for storage bus offset */
1970         if (visorchannel_read(controlvm_channel,
1971                               local_crash_msg_offset,
1972                               &local_crash_bus_msg,
1973                               sizeof(struct controlvm_message)) < 0) {
1974                 POSTCODE_LINUX_2(CRASH_DEV_RD_BUS_FAIULRE_PC,
1975                                  POSTCODE_SEVERITY_ERR);
1976                 return;
1977         }
1978
1979         /* read create device message for storage device */
1980         if (visorchannel_read(controlvm_channel,
1981                               local_crash_msg_offset +
1982                               sizeof(struct controlvm_message),
1983                               &local_crash_dev_msg,
1984                               sizeof(struct controlvm_message)) < 0) {
1985                 POSTCODE_LINUX_2(CRASH_DEV_RD_DEV_FAIULRE_PC,
1986                                  POSTCODE_SEVERITY_ERR);
1987                 return;
1988         }
1989
1990         /* reuse IOVM create bus message */
1991         if (local_crash_bus_msg.cmd.create_bus.channel_addr) {
1992                 bus_create(&local_crash_bus_msg);
1993         } else {
1994                 POSTCODE_LINUX_2(CRASH_DEV_BUS_NULL_FAILURE_PC,
1995                                  POSTCODE_SEVERITY_ERR);
1996                 return;
1997         }
1998
1999         /* reuse create device message for storage device */
2000         if (local_crash_dev_msg.cmd.create_device.channel_addr) {
2001                 my_device_create(&local_crash_dev_msg);
2002         } else {
2003                 POSTCODE_LINUX_2(CRASH_DEV_DEV_NULL_FAILURE_PC,
2004                                  POSTCODE_SEVERITY_ERR);
2005                 return;
2006         }
2007         POSTCODE_LINUX_2(CRASH_DEV_EXIT_PC, POSTCODE_SEVERITY_INFO);
2008         return;
2009
2010 cleanup:
2011
2012         poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
2013
2014         queue_delayed_work(periodic_controlvm_workqueue,
2015                            &periodic_controlvm_work, poll_jiffies);
2016 }
2017
2018 static void
2019 bus_create_response(struct visor_device *bus_info, int response)
2020 {
2021         if (response >= 0)
2022                 bus_info->state.created = 1;
2023
2024         bus_responder(CONTROLVM_BUS_CREATE, bus_info->pending_msg_hdr,
2025                       response);
2026
2027         kfree(bus_info->pending_msg_hdr);
2028         bus_info->pending_msg_hdr = NULL;
2029 }
2030
2031 static void
2032 bus_destroy_response(struct visor_device *bus_info, int response)
2033 {
2034         bus_responder(CONTROLVM_BUS_DESTROY, bus_info->pending_msg_hdr,
2035                       response);
2036
2037         kfree(bus_info->pending_msg_hdr);
2038         bus_info->pending_msg_hdr = NULL;
2039 }
2040
2041 static void
2042 device_create_response(struct visor_device *dev_info, int response)
2043 {
2044         if (response >= 0)
2045                 dev_info->state.created = 1;
2046
2047         device_responder(CONTROLVM_DEVICE_CREATE, dev_info->pending_msg_hdr,
2048                          response);
2049
2050         kfree(dev_info->pending_msg_hdr);
2051 }
2052
2053 static void
2054 device_destroy_response(struct visor_device *dev_info, int response)
2055 {
2056         device_responder(CONTROLVM_DEVICE_DESTROY, dev_info->pending_msg_hdr,
2057                          response);
2058
2059         kfree(dev_info->pending_msg_hdr);
2060         dev_info->pending_msg_hdr = NULL;
2061 }
2062
2063 static void
2064 visorchipset_device_pause_response(struct visor_device *dev_info,
2065                                    int response)
2066 {
2067         device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
2068                                      dev_info, response,
2069                                      segment_state_standby);
2070
2071         kfree(dev_info->pending_msg_hdr);
2072         dev_info->pending_msg_hdr = NULL;
2073 }
2074
2075 static void
2076 device_resume_response(struct visor_device *dev_info, int response)
2077 {
2078         device_changestate_responder(CONTROLVM_DEVICE_CHANGESTATE,
2079                                      dev_info, response,
2080                                      segment_state_running);
2081
2082         kfree(dev_info->pending_msg_hdr);
2083         dev_info->pending_msg_hdr = NULL;
2084 }
2085
2086 static ssize_t chipsetready_store(struct device *dev,
2087                                   struct device_attribute *attr,
2088                                   const char *buf, size_t count)
2089 {
2090         char msgtype[64];
2091
2092         if (sscanf(buf, "%63s", msgtype) != 1)
2093                 return -EINVAL;
2094
2095         if (!strcmp(msgtype, "CALLHOMEDISK_MOUNTED")) {
2096                 chipset_events[0] = 1;
2097                 return count;
2098         } else if (!strcmp(msgtype, "MODULES_LOADED")) {
2099                 chipset_events[1] = 1;
2100                 return count;
2101         }
2102         return -EINVAL;
2103 }
2104
2105 /* The parahotplug/devicedisabled interface gets called by our support script
2106  * when an SR-IOV device has been shut down. The ID is passed to the script
2107  * and then passed back when the device has been removed.
2108  */
2109 static ssize_t devicedisabled_store(struct device *dev,
2110                                     struct device_attribute *attr,
2111                                     const char *buf, size_t count)
2112 {
2113         unsigned int id;
2114
2115         if (kstrtouint(buf, 10, &id))
2116                 return -EINVAL;
2117
2118         parahotplug_request_complete(id, 0);
2119         return count;
2120 }
2121
2122 /* The parahotplug/deviceenabled interface gets called by our support script
2123  * when an SR-IOV device has been recovered. The ID is passed to the script
2124  * and then passed back when the device has been brought back up.
2125  */
2126 static ssize_t deviceenabled_store(struct device *dev,
2127                                    struct device_attribute *attr,
2128                                    const char *buf, size_t count)
2129 {
2130         unsigned int id;
2131
2132         if (kstrtouint(buf, 10, &id))
2133                 return -EINVAL;
2134
2135         parahotplug_request_complete(id, 1);
2136         return count;
2137 }
2138
2139 static int
2140 visorchipset_mmap(struct file *file, struct vm_area_struct *vma)
2141 {
2142         unsigned long physaddr = 0;
2143         unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
2144         u64 addr = 0;
2145
2146         /* sv_enable_dfp(); */
2147         if (offset & (PAGE_SIZE - 1))
2148                 return -ENXIO;  /* need aligned offsets */
2149
2150         switch (offset) {
2151         case VISORCHIPSET_MMAP_CONTROLCHANOFFSET:
2152                 vma->vm_flags |= VM_IO;
2153                 if (!*file_controlvm_channel)
2154                         return -ENXIO;
2155
2156                 visorchannel_read(*file_controlvm_channel,
2157                         offsetof(struct spar_controlvm_channel_protocol,
2158                                  gp_control_channel),
2159                         &addr, sizeof(addr));
2160                 if (!addr)
2161                         return -ENXIO;
2162
2163                 physaddr = (unsigned long)addr;
2164                 if (remap_pfn_range(vma, vma->vm_start,
2165                                     physaddr >> PAGE_SHIFT,
2166                                     vma->vm_end - vma->vm_start,
2167                                     /*pgprot_noncached */
2168                                     (vma->vm_page_prot))) {
2169                         return -EAGAIN;
2170                 }
2171                 break;
2172         default:
2173                 return -ENXIO;
2174         }
2175         return 0;
2176 }
2177
2178 static inline s64 issue_vmcall_query_guest_virtual_time_offset(void)
2179 {
2180         u64 result = VMCALL_SUCCESS;
2181         u64 physaddr = 0;
2182
2183         ISSUE_IO_VMCALL(VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET, physaddr,
2184                         result);
2185         return result;
2186 }
2187
2188 static inline int issue_vmcall_update_physical_time(u64 adjustment)
2189 {
2190         int result = VMCALL_SUCCESS;
2191
2192         ISSUE_IO_VMCALL(VMCALL_UPDATE_PHYSICAL_TIME, adjustment, result);
2193         return result;
2194 }
2195
2196 static long visorchipset_ioctl(struct file *file, unsigned int cmd,
2197                                unsigned long arg)
2198 {
2199         s64 adjustment;
2200         s64 vrtc_offset;
2201
2202         switch (cmd) {
2203         case VMCALL_QUERY_GUEST_VIRTUAL_TIME_OFFSET:
2204                 /* get the physical rtc offset */
2205                 vrtc_offset = issue_vmcall_query_guest_virtual_time_offset();
2206                 if (copy_to_user((void __user *)arg, &vrtc_offset,
2207                                  sizeof(vrtc_offset))) {
2208                         return -EFAULT;
2209                 }
2210                 return 0;
2211         case VMCALL_UPDATE_PHYSICAL_TIME:
2212                 if (copy_from_user(&adjustment, (void __user *)arg,
2213                                    sizeof(adjustment))) {
2214                         return -EFAULT;
2215                 }
2216                 return issue_vmcall_update_physical_time(adjustment);
2217         default:
2218                 return -EFAULT;
2219         }
2220 }
2221
2222 static const struct file_operations visorchipset_fops = {
2223         .owner = THIS_MODULE,
2224         .open = visorchipset_open,
2225         .read = NULL,
2226         .write = NULL,
2227         .unlocked_ioctl = visorchipset_ioctl,
2228         .release = visorchipset_release,
2229         .mmap = visorchipset_mmap,
2230 };
2231
2232 static int
2233 visorchipset_file_init(dev_t major_dev, struct visorchannel **controlvm_channel)
2234 {
2235         int rc = 0;
2236
2237         file_controlvm_channel = controlvm_channel;
2238         cdev_init(&file_cdev, &visorchipset_fops);
2239         file_cdev.owner = THIS_MODULE;
2240         if (MAJOR(major_dev) == 0) {
2241                 rc = alloc_chrdev_region(&major_dev, 0, 1, "visorchipset");
2242                 /* dynamic major device number registration required */
2243                 if (rc < 0)
2244                         return rc;
2245         } else {
2246                 /* static major device number registration required */
2247                 rc = register_chrdev_region(major_dev, 1, "visorchipset");
2248                 if (rc < 0)
2249                         return rc;
2250         }
2251         rc = cdev_add(&file_cdev, MKDEV(MAJOR(major_dev), 0), 1);
2252         if (rc < 0) {
2253                 unregister_chrdev_region(major_dev, 1);
2254                 return rc;
2255         }
2256         return 0;
2257 }
2258
2259 static int
2260 visorchipset_init(struct acpi_device *acpi_device)
2261 {
2262         int rc = 0;
2263         u64 addr;
2264         int tmp_sz = sizeof(struct spar_controlvm_channel_protocol);
2265         uuid_le uuid = SPAR_CONTROLVM_CHANNEL_PROTOCOL_UUID;
2266
2267         addr = controlvm_get_channel_address();
2268         if (!addr)
2269                 return -ENODEV;
2270
2271         memset(&busdev_notifiers, 0, sizeof(busdev_notifiers));
2272         memset(&controlvm_payload_info, 0, sizeof(controlvm_payload_info));
2273
2274         controlvm_channel = visorchannel_create_with_lock(addr, tmp_sz,
2275                                                           GFP_KERNEL, uuid);
2276         if (SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
2277                     visorchannel_get_header(controlvm_channel))) {
2278                 initialize_controlvm_payload();
2279         } else {
2280                 visorchannel_destroy(controlvm_channel);
2281                 controlvm_channel = NULL;
2282                 return -ENODEV;
2283         }
2284
2285         major_dev = MKDEV(visorchipset_major, 0);
2286         rc = visorchipset_file_init(major_dev, &controlvm_channel);
2287         if (rc < 0) {
2288                 POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
2289                 goto cleanup;
2290         }
2291
2292         memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
2293
2294         /* if booting in a crash kernel */
2295         if (is_kdump_kernel())
2296                 INIT_DELAYED_WORK(&periodic_controlvm_work,
2297                                   setup_crash_devices_work_queue);
2298         else
2299                 INIT_DELAYED_WORK(&periodic_controlvm_work,
2300                                   controlvm_periodic_work);
2301         periodic_controlvm_workqueue =
2302             create_singlethread_workqueue("visorchipset_controlvm");
2303
2304         if (!periodic_controlvm_workqueue) {
2305                 POSTCODE_LINUX_2(CREATE_WORKQUEUE_FAILED_PC,
2306                                  DIAG_SEVERITY_ERR);
2307                 rc = -ENOMEM;
2308                 goto cleanup;
2309         }
2310         most_recent_message_jiffies = jiffies;
2311         poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
2312         rc = queue_delayed_work(periodic_controlvm_workqueue,
2313                                 &periodic_controlvm_work, poll_jiffies);
2314         if (rc < 0) {
2315                 POSTCODE_LINUX_2(QUEUE_DELAYED_WORK_PC,
2316                                  DIAG_SEVERITY_ERR);
2317                 goto cleanup;
2318         }
2319
2320         visorchipset_platform_device.dev.devt = major_dev;
2321         if (platform_device_register(&visorchipset_platform_device) < 0) {
2322                 POSTCODE_LINUX_2(DEVICE_REGISTER_FAILURE_PC, DIAG_SEVERITY_ERR);
2323                 rc = -1;
2324                 goto cleanup;
2325         }
2326         POSTCODE_LINUX_2(CHIPSET_INIT_SUCCESS_PC, POSTCODE_SEVERITY_INFO);
2327
2328         rc = visorbus_init();
2329 cleanup:
2330         if (rc) {
2331                 POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, rc,
2332                                  POSTCODE_SEVERITY_ERR);
2333         }
2334         return rc;
2335 }
2336
2337 static void
2338 visorchipset_file_cleanup(dev_t major_dev)
2339 {
2340         if (file_cdev.ops)
2341                 cdev_del(&file_cdev);
2342         file_cdev.ops = NULL;
2343         unregister_chrdev_region(major_dev, 1);
2344 }
2345
2346 static int
2347 visorchipset_exit(struct acpi_device *acpi_device)
2348 {
2349         POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2350
2351         visorbus_exit();
2352
2353         cancel_delayed_work(&periodic_controlvm_work);
2354         flush_workqueue(periodic_controlvm_workqueue);
2355         destroy_workqueue(periodic_controlvm_workqueue);
2356         periodic_controlvm_workqueue = NULL;
2357         destroy_controlvm_payload_info(&controlvm_payload_info);
2358
2359         memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
2360
2361         visorchannel_destroy(controlvm_channel);
2362
2363         visorchipset_file_cleanup(visorchipset_platform_device.dev.devt);
2364         platform_device_unregister(&visorchipset_platform_device);
2365         POSTCODE_LINUX_2(DRIVER_EXIT_PC, POSTCODE_SEVERITY_INFO);
2366
2367         return 0;
2368 }
2369
2370 static const struct acpi_device_id unisys_device_ids[] = {
2371         {"PNP0A07", 0},
2372         {"", 0},
2373 };
2374
2375 static struct acpi_driver unisys_acpi_driver = {
2376         .name = "unisys_acpi",
2377         .class = "unisys_acpi_class",
2378         .owner = THIS_MODULE,
2379         .ids = unisys_device_ids,
2380         .ops = {
2381                 .add = visorchipset_init,
2382                 .remove = visorchipset_exit,
2383                 },
2384 };
2385 static __init uint32_t visorutil_spar_detect(void)
2386 {
2387         unsigned int eax, ebx, ecx, edx;
2388
2389         if (cpu_has_hypervisor) {
2390                 /* check the ID */
2391                 cpuid(UNISYS_SPAR_LEAF_ID, &eax, &ebx, &ecx, &edx);
2392                 return  (ebx == UNISYS_SPAR_ID_EBX) &&
2393                         (ecx == UNISYS_SPAR_ID_ECX) &&
2394                         (edx == UNISYS_SPAR_ID_EDX);
2395         } else {
2396                 return 0;
2397         }
2398 }
2399
2400 static int init_unisys(void)
2401 {
2402         int result;
2403
2404         if (!visorutil_spar_detect())
2405                 return -ENODEV;
2406
2407         result = acpi_bus_register_driver(&unisys_acpi_driver);
2408         if (result)
2409                 return -ENODEV;
2410
2411         pr_info("Unisys Visorchipset Driver Loaded.\n");
2412         return 0;
2413 };
2414
2415 static void exit_unisys(void)
2416 {
2417         acpi_bus_unregister_driver(&unisys_acpi_driver);
2418 }
2419
2420 module_param_named(major, visorchipset_major, int, S_IRUGO);
2421 MODULE_PARM_DESC(visorchipset_major,
2422                  "major device number to use for the device node");
2423 module_param_named(visorbusregwait, visorchipset_visorbusregwait, int, S_IRUGO);
2424 MODULE_PARM_DESC(visorchipset_visorbusreqwait,
2425                  "1 to have the module wait for the visor bus to register");
2426 module_param_named(holdchipsetready, visorchipset_holdchipsetready,
2427                    int, S_IRUGO);
2428 MODULE_PARM_DESC(visorchipset_holdchipsetready,
2429                  "1 to hold response to CHIPSET_READY");
2430
2431 module_init(init_unisys);
2432 module_exit(exit_unisys);
2433
2434 MODULE_AUTHOR("Unisys");
2435 MODULE_LICENSE("GPL");
2436 MODULE_DESCRIPTION("Supervisor chipset driver for service partition: ver "
2437                    VERSION);
2438 MODULE_VERSION(VERSION);