OSDN Git Service

a0c9dbce2b9975f5ec111f50a576d918e39f8b08
[tomoyo/tomoyo-test1.git] / arch / riscv / kvm / aia_imsic.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2021 Western Digital Corporation or its affiliates.
4  * Copyright (C) 2022 Ventana Micro Systems Inc.
5  *
6  * Authors:
7  *      Anup Patel <apatel@ventanamicro.com>
8  */
9
10 #include <linux/atomic.h>
11 #include <linux/bitmap.h>
12 #include <linux/kvm_host.h>
13 #include <linux/math.h>
14 #include <linux/spinlock.h>
15 #include <linux/swab.h>
16 #include <kvm/iodev.h>
17 #include <asm/csr.h>
18 #include <asm/kvm_aia_imsic.h>
19
20 #define IMSIC_MAX_EIX   (IMSIC_MAX_ID / BITS_PER_TYPE(u64))
21
22 struct imsic_mrif_eix {
23         unsigned long eip[BITS_PER_TYPE(u64) / BITS_PER_LONG];
24         unsigned long eie[BITS_PER_TYPE(u64) / BITS_PER_LONG];
25 };
26
27 struct imsic_mrif {
28         struct imsic_mrif_eix eix[IMSIC_MAX_EIX];
29         unsigned long eithreshold;
30         unsigned long eidelivery;
31 };
32
33 struct imsic {
34         struct kvm_io_device iodev;
35
36         u32 nr_msis;
37         u32 nr_eix;
38         u32 nr_hw_eix;
39
40         /*
41          * At any point in time, the register state is in
42          * one of the following places:
43          *
44          * 1) Hardware: IMSIC VS-file (vsfile_cpu >= 0)
45          * 2) Software: IMSIC SW-file (vsfile_cpu < 0)
46          */
47
48         /* IMSIC VS-file */
49         rwlock_t vsfile_lock;
50         int vsfile_cpu;
51         int vsfile_hgei;
52         void __iomem *vsfile_va;
53         phys_addr_t vsfile_pa;
54
55         /* IMSIC SW-file */
56         struct imsic_mrif *swfile;
57         phys_addr_t swfile_pa;
58 };
59
60 #define imsic_vs_csr_read(__c)                  \
61 ({                                              \
62         unsigned long __r;                      \
63         csr_write(CSR_VSISELECT, __c);          \
64         __r = csr_read(CSR_VSIREG);             \
65         __r;                                    \
66 })
67
68 #define imsic_read_switchcase(__ireg)                   \
69         case __ireg:                                    \
70                 return imsic_vs_csr_read(__ireg);
71 #define imsic_read_switchcase_2(__ireg)                 \
72         imsic_read_switchcase(__ireg + 0)               \
73         imsic_read_switchcase(__ireg + 1)
74 #define imsic_read_switchcase_4(__ireg)                 \
75         imsic_read_switchcase_2(__ireg + 0)             \
76         imsic_read_switchcase_2(__ireg + 2)
77 #define imsic_read_switchcase_8(__ireg)                 \
78         imsic_read_switchcase_4(__ireg + 0)             \
79         imsic_read_switchcase_4(__ireg + 4)
80 #define imsic_read_switchcase_16(__ireg)                \
81         imsic_read_switchcase_8(__ireg + 0)             \
82         imsic_read_switchcase_8(__ireg + 8)
83 #define imsic_read_switchcase_32(__ireg)                \
84         imsic_read_switchcase_16(__ireg + 0)            \
85         imsic_read_switchcase_16(__ireg + 16)
86 #define imsic_read_switchcase_64(__ireg)                \
87         imsic_read_switchcase_32(__ireg + 0)            \
88         imsic_read_switchcase_32(__ireg + 32)
89
90 static unsigned long imsic_eix_read(int ireg)
91 {
92         switch (ireg) {
93         imsic_read_switchcase_64(IMSIC_EIP0)
94         imsic_read_switchcase_64(IMSIC_EIE0)
95         };
96
97         return 0;
98 }
99
100 #define imsic_vs_csr_swap(__c, __v)             \
101 ({                                              \
102         unsigned long __r;                      \
103         csr_write(CSR_VSISELECT, __c);          \
104         __r = csr_swap(CSR_VSIREG, __v);        \
105         __r;                                    \
106 })
107
108 #define imsic_swap_switchcase(__ireg, __v)              \
109         case __ireg:                                    \
110                 return imsic_vs_csr_swap(__ireg, __v);
111 #define imsic_swap_switchcase_2(__ireg, __v)            \
112         imsic_swap_switchcase(__ireg + 0, __v)          \
113         imsic_swap_switchcase(__ireg + 1, __v)
114 #define imsic_swap_switchcase_4(__ireg, __v)            \
115         imsic_swap_switchcase_2(__ireg + 0, __v)        \
116         imsic_swap_switchcase_2(__ireg + 2, __v)
117 #define imsic_swap_switchcase_8(__ireg, __v)            \
118         imsic_swap_switchcase_4(__ireg + 0, __v)        \
119         imsic_swap_switchcase_4(__ireg + 4, __v)
120 #define imsic_swap_switchcase_16(__ireg, __v)           \
121         imsic_swap_switchcase_8(__ireg + 0, __v)        \
122         imsic_swap_switchcase_8(__ireg + 8, __v)
123 #define imsic_swap_switchcase_32(__ireg, __v)           \
124         imsic_swap_switchcase_16(__ireg + 0, __v)       \
125         imsic_swap_switchcase_16(__ireg + 16, __v)
126 #define imsic_swap_switchcase_64(__ireg, __v)           \
127         imsic_swap_switchcase_32(__ireg + 0, __v)       \
128         imsic_swap_switchcase_32(__ireg + 32, __v)
129
130 static unsigned long imsic_eix_swap(int ireg, unsigned long val)
131 {
132         switch (ireg) {
133         imsic_swap_switchcase_64(IMSIC_EIP0, val)
134         imsic_swap_switchcase_64(IMSIC_EIE0, val)
135         };
136
137         return 0;
138 }
139
140 #define imsic_vs_csr_write(__c, __v)            \
141 do {                                            \
142         csr_write(CSR_VSISELECT, __c);          \
143         csr_write(CSR_VSIREG, __v);             \
144 } while (0)
145
146 #define imsic_write_switchcase(__ireg, __v)             \
147         case __ireg:                                    \
148                 imsic_vs_csr_write(__ireg, __v);        \
149                 break;
150 #define imsic_write_switchcase_2(__ireg, __v)           \
151         imsic_write_switchcase(__ireg + 0, __v)         \
152         imsic_write_switchcase(__ireg + 1, __v)
153 #define imsic_write_switchcase_4(__ireg, __v)           \
154         imsic_write_switchcase_2(__ireg + 0, __v)       \
155         imsic_write_switchcase_2(__ireg + 2, __v)
156 #define imsic_write_switchcase_8(__ireg, __v)           \
157         imsic_write_switchcase_4(__ireg + 0, __v)       \
158         imsic_write_switchcase_4(__ireg + 4, __v)
159 #define imsic_write_switchcase_16(__ireg, __v)          \
160         imsic_write_switchcase_8(__ireg + 0, __v)       \
161         imsic_write_switchcase_8(__ireg + 8, __v)
162 #define imsic_write_switchcase_32(__ireg, __v)          \
163         imsic_write_switchcase_16(__ireg + 0, __v)      \
164         imsic_write_switchcase_16(__ireg + 16, __v)
165 #define imsic_write_switchcase_64(__ireg, __v)          \
166         imsic_write_switchcase_32(__ireg + 0, __v)      \
167         imsic_write_switchcase_32(__ireg + 32, __v)
168
169 static void imsic_eix_write(int ireg, unsigned long val)
170 {
171         switch (ireg) {
172         imsic_write_switchcase_64(IMSIC_EIP0, val)
173         imsic_write_switchcase_64(IMSIC_EIE0, val)
174         };
175 }
176
177 #define imsic_vs_csr_set(__c, __v)              \
178 do {                                            \
179         csr_write(CSR_VSISELECT, __c);          \
180         csr_set(CSR_VSIREG, __v);               \
181 } while (0)
182
183 #define imsic_set_switchcase(__ireg, __v)               \
184         case __ireg:                                    \
185                 imsic_vs_csr_set(__ireg, __v);          \
186                 break;
187 #define imsic_set_switchcase_2(__ireg, __v)             \
188         imsic_set_switchcase(__ireg + 0, __v)           \
189         imsic_set_switchcase(__ireg + 1, __v)
190 #define imsic_set_switchcase_4(__ireg, __v)             \
191         imsic_set_switchcase_2(__ireg + 0, __v)         \
192         imsic_set_switchcase_2(__ireg + 2, __v)
193 #define imsic_set_switchcase_8(__ireg, __v)             \
194         imsic_set_switchcase_4(__ireg + 0, __v)         \
195         imsic_set_switchcase_4(__ireg + 4, __v)
196 #define imsic_set_switchcase_16(__ireg, __v)            \
197         imsic_set_switchcase_8(__ireg + 0, __v)         \
198         imsic_set_switchcase_8(__ireg + 8, __v)
199 #define imsic_set_switchcase_32(__ireg, __v)            \
200         imsic_set_switchcase_16(__ireg + 0, __v)        \
201         imsic_set_switchcase_16(__ireg + 16, __v)
202 #define imsic_set_switchcase_64(__ireg, __v)            \
203         imsic_set_switchcase_32(__ireg + 0, __v)        \
204         imsic_set_switchcase_32(__ireg + 32, __v)
205
206 static void imsic_eix_set(int ireg, unsigned long val)
207 {
208         switch (ireg) {
209         imsic_set_switchcase_64(IMSIC_EIP0, val)
210         imsic_set_switchcase_64(IMSIC_EIE0, val)
211         };
212 }
213
214 static unsigned long imsic_mrif_atomic_rmw(struct imsic_mrif *mrif,
215                                            unsigned long *ptr,
216                                            unsigned long new_val,
217                                            unsigned long wr_mask)
218 {
219         unsigned long old_val = 0, tmp = 0;
220
221         __asm__ __volatile__ (
222                 "0:     lr.w.aq   %1, %0\n"
223                 "       and       %2, %1, %3\n"
224                 "       or        %2, %2, %4\n"
225                 "       sc.w.rl   %2, %2, %0\n"
226                 "       bnez      %2, 0b"
227                 : "+A" (*ptr), "+r" (old_val), "+r" (tmp)
228                 : "r" (~wr_mask), "r" (new_val & wr_mask)
229                 : "memory");
230
231         return old_val;
232 }
233
234 static unsigned long imsic_mrif_atomic_or(struct imsic_mrif *mrif,
235                                           unsigned long *ptr,
236                                           unsigned long val)
237 {
238         return atomic_long_fetch_or(val, (atomic_long_t *)ptr);
239 }
240
241 #define imsic_mrif_atomic_write(__mrif, __ptr, __new_val)       \
242                 imsic_mrif_atomic_rmw(__mrif, __ptr, __new_val, -1UL)
243 #define imsic_mrif_atomic_read(__mrif, __ptr)                   \
244                 imsic_mrif_atomic_or(__mrif, __ptr, 0)
245
246 static u32 imsic_mrif_topei(struct imsic_mrif *mrif, u32 nr_eix, u32 nr_msis)
247 {
248         struct imsic_mrif_eix *eix;
249         u32 i, imin, imax, ei, max_msi;
250         unsigned long eipend[BITS_PER_TYPE(u64) / BITS_PER_LONG];
251         unsigned long eithreshold = imsic_mrif_atomic_read(mrif,
252                                                         &mrif->eithreshold);
253
254         max_msi = (eithreshold && (eithreshold <= nr_msis)) ?
255                    eithreshold : nr_msis;
256         for (ei = 0; ei < nr_eix; ei++) {
257                 eix = &mrif->eix[ei];
258                 eipend[0] = imsic_mrif_atomic_read(mrif, &eix->eie[0]) &
259                             imsic_mrif_atomic_read(mrif, &eix->eip[0]);
260 #ifdef CONFIG_32BIT
261                 eipend[1] = imsic_mrif_atomic_read(mrif, &eix->eie[1]) &
262                             imsic_mrif_atomic_read(mrif, &eix->eip[1]);
263                 if (!eipend[0] && !eipend[1])
264 #else
265                 if (!eipend[0])
266 #endif
267                         continue;
268
269                 imin = ei * BITS_PER_TYPE(u64);
270                 imax = ((imin + BITS_PER_TYPE(u64)) < max_msi) ?
271                         imin + BITS_PER_TYPE(u64) : max_msi;
272                 for (i = (!imin) ? 1 : imin; i < imax; i++) {
273                         if (test_bit(i - imin, eipend))
274                                 return (i << TOPEI_ID_SHIFT) | i;
275                 }
276         }
277
278         return 0;
279 }
280
281 static int imsic_mrif_isel_check(u32 nr_eix, unsigned long isel)
282 {
283         u32 num = 0;
284
285         switch (isel) {
286         case IMSIC_EIDELIVERY:
287         case IMSIC_EITHRESHOLD:
288                 break;
289         case IMSIC_EIP0 ... IMSIC_EIP63:
290                 num = isel - IMSIC_EIP0;
291                 break;
292         case IMSIC_EIE0 ... IMSIC_EIE63:
293                 num = isel - IMSIC_EIE0;
294                 break;
295         default:
296                 return -ENOENT;
297         };
298 #ifndef CONFIG_32BIT
299         if (num & 0x1)
300                 return -EINVAL;
301 #endif
302         if ((num / 2) >= nr_eix)
303                 return -EINVAL;
304
305         return 0;
306 }
307
308 static int imsic_mrif_rmw(struct imsic_mrif *mrif, u32 nr_eix,
309                           unsigned long isel, unsigned long *val,
310                           unsigned long new_val, unsigned long wr_mask)
311 {
312         bool pend;
313         struct imsic_mrif_eix *eix;
314         unsigned long *ei, num, old_val = 0;
315
316         switch (isel) {
317         case IMSIC_EIDELIVERY:
318                 old_val = imsic_mrif_atomic_rmw(mrif, &mrif->eidelivery,
319                                                 new_val, wr_mask & 0x1);
320                 break;
321         case IMSIC_EITHRESHOLD:
322                 old_val = imsic_mrif_atomic_rmw(mrif, &mrif->eithreshold,
323                                 new_val, wr_mask & (IMSIC_MAX_ID - 1));
324                 break;
325         case IMSIC_EIP0 ... IMSIC_EIP63:
326         case IMSIC_EIE0 ... IMSIC_EIE63:
327                 if (isel >= IMSIC_EIP0 && isel <= IMSIC_EIP63) {
328                         pend = true;
329                         num = isel - IMSIC_EIP0;
330                 } else {
331                         pend = false;
332                         num = isel - IMSIC_EIE0;
333                 }
334
335                 if ((num / 2) >= nr_eix)
336                         return -EINVAL;
337                 eix = &mrif->eix[num / 2];
338
339 #ifndef CONFIG_32BIT
340                 if (num & 0x1)
341                         return -EINVAL;
342                 ei = (pend) ? &eix->eip[0] : &eix->eie[0];
343 #else
344                 ei = (pend) ? &eix->eip[num & 0x1] : &eix->eie[num & 0x1];
345 #endif
346
347                 /* Bit0 of EIP0 or EIE0 is read-only */
348                 if (!num)
349                         wr_mask &= ~BIT(0);
350
351                 old_val = imsic_mrif_atomic_rmw(mrif, ei, new_val, wr_mask);
352                 break;
353         default:
354                 return -ENOENT;
355         };
356
357         if (val)
358                 *val = old_val;
359
360         return 0;
361 }
362
363 struct imsic_vsfile_read_data {
364         int hgei;
365         u32 nr_eix;
366         bool clear;
367         struct imsic_mrif *mrif;
368 };
369
370 static void imsic_vsfile_local_read(void *data)
371 {
372         u32 i;
373         struct imsic_mrif_eix *eix;
374         struct imsic_vsfile_read_data *idata = data;
375         struct imsic_mrif *mrif = idata->mrif;
376         unsigned long new_hstatus, old_hstatus, old_vsiselect;
377
378         old_vsiselect = csr_read(CSR_VSISELECT);
379         old_hstatus = csr_read(CSR_HSTATUS);
380         new_hstatus = old_hstatus & ~HSTATUS_VGEIN;
381         new_hstatus |= ((unsigned long)idata->hgei) << HSTATUS_VGEIN_SHIFT;
382         csr_write(CSR_HSTATUS, new_hstatus);
383
384         /*
385          * We don't use imsic_mrif_atomic_xyz() functions to store
386          * values in MRIF because imsic_vsfile_read() is always called
387          * with pointer to temporary MRIF on stack.
388          */
389
390         if (idata->clear) {
391                 mrif->eidelivery = imsic_vs_csr_swap(IMSIC_EIDELIVERY, 0);
392                 mrif->eithreshold = imsic_vs_csr_swap(IMSIC_EITHRESHOLD, 0);
393                 for (i = 0; i < idata->nr_eix; i++) {
394                         eix = &mrif->eix[i];
395                         eix->eip[0] = imsic_eix_swap(IMSIC_EIP0 + i * 2, 0);
396                         eix->eie[0] = imsic_eix_swap(IMSIC_EIE0 + i * 2, 0);
397 #ifdef CONFIG_32BIT
398                         eix->eip[1] = imsic_eix_swap(IMSIC_EIP0 + i * 2 + 1, 0);
399                         eix->eie[1] = imsic_eix_swap(IMSIC_EIE0 + i * 2 + 1, 0);
400 #endif
401                 }
402         } else {
403                 mrif->eidelivery = imsic_vs_csr_read(IMSIC_EIDELIVERY);
404                 mrif->eithreshold = imsic_vs_csr_read(IMSIC_EITHRESHOLD);
405                 for (i = 0; i < idata->nr_eix; i++) {
406                         eix = &mrif->eix[i];
407                         eix->eip[0] = imsic_eix_read(IMSIC_EIP0 + i * 2);
408                         eix->eie[0] = imsic_eix_read(IMSIC_EIE0 + i * 2);
409 #ifdef CONFIG_32BIT
410                         eix->eip[1] = imsic_eix_read(IMSIC_EIP0 + i * 2 + 1);
411                         eix->eie[1] = imsic_eix_read(IMSIC_EIE0 + i * 2 + 1);
412 #endif
413                 }
414         }
415
416         csr_write(CSR_HSTATUS, old_hstatus);
417         csr_write(CSR_VSISELECT, old_vsiselect);
418 }
419
420 static void imsic_vsfile_read(int vsfile_hgei, int vsfile_cpu, u32 nr_eix,
421                               bool clear, struct imsic_mrif *mrif)
422 {
423         struct imsic_vsfile_read_data idata;
424
425         /* We can only read clear if we have a IMSIC VS-file */
426         if (vsfile_cpu < 0 || vsfile_hgei <= 0)
427                 return;
428
429         /* We can only read clear on local CPU */
430         idata.hgei = vsfile_hgei;
431         idata.nr_eix = nr_eix;
432         idata.clear = clear;
433         idata.mrif = mrif;
434         on_each_cpu_mask(cpumask_of(vsfile_cpu),
435                          imsic_vsfile_local_read, &idata, 1);
436 }
437
438 struct imsic_vsfile_rw_data {
439         int hgei;
440         int isel;
441         bool write;
442         unsigned long val;
443 };
444
445 static void imsic_vsfile_local_rw(void *data)
446 {
447         struct imsic_vsfile_rw_data *idata = data;
448         unsigned long new_hstatus, old_hstatus, old_vsiselect;
449
450         old_vsiselect = csr_read(CSR_VSISELECT);
451         old_hstatus = csr_read(CSR_HSTATUS);
452         new_hstatus = old_hstatus & ~HSTATUS_VGEIN;
453         new_hstatus |= ((unsigned long)idata->hgei) << HSTATUS_VGEIN_SHIFT;
454         csr_write(CSR_HSTATUS, new_hstatus);
455
456         switch (idata->isel) {
457         case IMSIC_EIDELIVERY:
458                 if (idata->write)
459                         imsic_vs_csr_write(IMSIC_EIDELIVERY, idata->val);
460                 else
461                         idata->val = imsic_vs_csr_read(IMSIC_EIDELIVERY);
462                 break;
463         case IMSIC_EITHRESHOLD:
464                 if (idata->write)
465                         imsic_vs_csr_write(IMSIC_EITHRESHOLD, idata->val);
466                 else
467                         idata->val = imsic_vs_csr_read(IMSIC_EITHRESHOLD);
468                 break;
469         case IMSIC_EIP0 ... IMSIC_EIP63:
470         case IMSIC_EIE0 ... IMSIC_EIE63:
471 #ifndef CONFIG_32BIT
472                 if (idata->isel & 0x1)
473                         break;
474 #endif
475                 if (idata->write)
476                         imsic_eix_write(idata->isel, idata->val);
477                 else
478                         idata->val = imsic_eix_read(idata->isel);
479                 break;
480         default:
481                 break;
482         }
483
484         csr_write(CSR_HSTATUS, old_hstatus);
485         csr_write(CSR_VSISELECT, old_vsiselect);
486 }
487
488 static int imsic_vsfile_rw(int vsfile_hgei, int vsfile_cpu, u32 nr_eix,
489                            unsigned long isel, bool write,
490                            unsigned long *val)
491 {
492         int rc;
493         struct imsic_vsfile_rw_data rdata;
494
495         /* We can only access register if we have a IMSIC VS-file */
496         if (vsfile_cpu < 0 || vsfile_hgei <= 0)
497                 return -EINVAL;
498
499         /* Check IMSIC register iselect */
500         rc = imsic_mrif_isel_check(nr_eix, isel);
501         if (rc)
502                 return rc;
503
504         /* We can only access register on local CPU */
505         rdata.hgei = vsfile_hgei;
506         rdata.isel = isel;
507         rdata.write = write;
508         rdata.val = (write) ? *val : 0;
509         on_each_cpu_mask(cpumask_of(vsfile_cpu),
510                          imsic_vsfile_local_rw, &rdata, 1);
511
512         if (!write)
513                 *val = rdata.val;
514
515         return 0;
516 }
517
518 static void imsic_vsfile_local_clear(int vsfile_hgei, u32 nr_eix)
519 {
520         u32 i;
521         unsigned long new_hstatus, old_hstatus, old_vsiselect;
522
523         /* We can only zero-out if we have a IMSIC VS-file */
524         if (vsfile_hgei <= 0)
525                 return;
526
527         old_vsiselect = csr_read(CSR_VSISELECT);
528         old_hstatus = csr_read(CSR_HSTATUS);
529         new_hstatus = old_hstatus & ~HSTATUS_VGEIN;
530         new_hstatus |= ((unsigned long)vsfile_hgei) << HSTATUS_VGEIN_SHIFT;
531         csr_write(CSR_HSTATUS, new_hstatus);
532
533         imsic_vs_csr_write(IMSIC_EIDELIVERY, 0);
534         imsic_vs_csr_write(IMSIC_EITHRESHOLD, 0);
535         for (i = 0; i < nr_eix; i++) {
536                 imsic_eix_write(IMSIC_EIP0 + i * 2, 0);
537                 imsic_eix_write(IMSIC_EIE0 + i * 2, 0);
538 #ifdef CONFIG_32BIT
539                 imsic_eix_write(IMSIC_EIP0 + i * 2 + 1, 0);
540                 imsic_eix_write(IMSIC_EIE0 + i * 2 + 1, 0);
541 #endif
542         }
543
544         csr_write(CSR_HSTATUS, old_hstatus);
545         csr_write(CSR_VSISELECT, old_vsiselect);
546 }
547
548 static void imsic_vsfile_local_update(int vsfile_hgei, u32 nr_eix,
549                                       struct imsic_mrif *mrif)
550 {
551         u32 i;
552         struct imsic_mrif_eix *eix;
553         unsigned long new_hstatus, old_hstatus, old_vsiselect;
554
555         /* We can only update if we have a HW IMSIC context */
556         if (vsfile_hgei <= 0)
557                 return;
558
559         /*
560          * We don't use imsic_mrif_atomic_xyz() functions to read values
561          * from MRIF in this function because it is always called with
562          * pointer to temporary MRIF on stack.
563          */
564
565         old_vsiselect = csr_read(CSR_VSISELECT);
566         old_hstatus = csr_read(CSR_HSTATUS);
567         new_hstatus = old_hstatus & ~HSTATUS_VGEIN;
568         new_hstatus |= ((unsigned long)vsfile_hgei) << HSTATUS_VGEIN_SHIFT;
569         csr_write(CSR_HSTATUS, new_hstatus);
570
571         for (i = 0; i < nr_eix; i++) {
572                 eix = &mrif->eix[i];
573                 imsic_eix_set(IMSIC_EIP0 + i * 2, eix->eip[0]);
574                 imsic_eix_set(IMSIC_EIE0 + i * 2, eix->eie[0]);
575 #ifdef CONFIG_32BIT
576                 imsic_eix_set(IMSIC_EIP0 + i * 2 + 1, eix->eip[1]);
577                 imsic_eix_set(IMSIC_EIE0 + i * 2 + 1, eix->eie[1]);
578 #endif
579         }
580         imsic_vs_csr_write(IMSIC_EITHRESHOLD, mrif->eithreshold);
581         imsic_vs_csr_write(IMSIC_EIDELIVERY, mrif->eidelivery);
582
583         csr_write(CSR_HSTATUS, old_hstatus);
584         csr_write(CSR_VSISELECT, old_vsiselect);
585 }
586
587 static void imsic_vsfile_cleanup(struct imsic *imsic)
588 {
589         int old_vsfile_hgei, old_vsfile_cpu;
590         unsigned long flags;
591
592         /*
593          * We don't use imsic_mrif_atomic_xyz() functions to clear the
594          * SW-file in this function because it is always called when the
595          * VCPU is being destroyed.
596          */
597
598         write_lock_irqsave(&imsic->vsfile_lock, flags);
599         old_vsfile_hgei = imsic->vsfile_hgei;
600         old_vsfile_cpu = imsic->vsfile_cpu;
601         imsic->vsfile_cpu = imsic->vsfile_hgei = -1;
602         imsic->vsfile_va = NULL;
603         imsic->vsfile_pa = 0;
604         write_unlock_irqrestore(&imsic->vsfile_lock, flags);
605
606         memset(imsic->swfile, 0, sizeof(*imsic->swfile));
607
608         if (old_vsfile_cpu >= 0)
609                 kvm_riscv_aia_free_hgei(old_vsfile_cpu, old_vsfile_hgei);
610 }
611
612 static void imsic_swfile_extirq_update(struct kvm_vcpu *vcpu)
613 {
614         struct imsic *imsic = vcpu->arch.aia_context.imsic_state;
615         struct imsic_mrif *mrif = imsic->swfile;
616
617         if (imsic_mrif_atomic_read(mrif, &mrif->eidelivery) &&
618             imsic_mrif_topei(mrif, imsic->nr_eix, imsic->nr_msis))
619                 kvm_riscv_vcpu_set_interrupt(vcpu, IRQ_VS_EXT);
620         else
621                 kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_EXT);
622 }
623
624 static void imsic_swfile_read(struct kvm_vcpu *vcpu, bool clear,
625                               struct imsic_mrif *mrif)
626 {
627         struct imsic *imsic = vcpu->arch.aia_context.imsic_state;
628
629         /*
630          * We don't use imsic_mrif_atomic_xyz() functions to read and
631          * write SW-file and MRIF in this function because it is always
632          * called when VCPU is not using SW-file and the MRIF points to
633          * a temporary MRIF on stack.
634          */
635
636         memcpy(mrif, imsic->swfile, sizeof(*mrif));
637         if (clear) {
638                 memset(imsic->swfile, 0, sizeof(*imsic->swfile));
639                 kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_EXT);
640         }
641 }
642
643 static void imsic_swfile_update(struct kvm_vcpu *vcpu,
644                                 struct imsic_mrif *mrif)
645 {
646         u32 i;
647         struct imsic_mrif_eix *seix, *eix;
648         struct imsic *imsic = vcpu->arch.aia_context.imsic_state;
649         struct imsic_mrif *smrif = imsic->swfile;
650
651         imsic_mrif_atomic_write(smrif, &smrif->eidelivery, mrif->eidelivery);
652         imsic_mrif_atomic_write(smrif, &smrif->eithreshold, mrif->eithreshold);
653         for (i = 0; i < imsic->nr_eix; i++) {
654                 seix = &smrif->eix[i];
655                 eix = &mrif->eix[i];
656                 imsic_mrif_atomic_or(smrif, &seix->eip[0], eix->eip[0]);
657                 imsic_mrif_atomic_or(smrif, &seix->eie[0], eix->eie[0]);
658 #ifdef CONFIG_32BIT
659                 imsic_mrif_atomic_or(smrif, &seix->eip[1], eix->eip[1]);
660                 imsic_mrif_atomic_or(smrif, &seix->eie[1], eix->eie[1]);
661 #endif
662         }
663
664         imsic_swfile_extirq_update(vcpu);
665 }
666
667 void kvm_riscv_vcpu_aia_imsic_release(struct kvm_vcpu *vcpu)
668 {
669         unsigned long flags;
670         struct imsic_mrif tmrif;
671         int old_vsfile_hgei, old_vsfile_cpu;
672         struct imsic *imsic = vcpu->arch.aia_context.imsic_state;
673
674         /* Read and clear IMSIC VS-file details */
675         write_lock_irqsave(&imsic->vsfile_lock, flags);
676         old_vsfile_hgei = imsic->vsfile_hgei;
677         old_vsfile_cpu = imsic->vsfile_cpu;
678         imsic->vsfile_cpu = imsic->vsfile_hgei = -1;
679         imsic->vsfile_va = NULL;
680         imsic->vsfile_pa = 0;
681         write_unlock_irqrestore(&imsic->vsfile_lock, flags);
682
683         /* Do nothing, if no IMSIC VS-file to release */
684         if (old_vsfile_cpu < 0)
685                 return;
686
687         /*
688          * At this point, all interrupt producers are still using
689          * the old IMSIC VS-file so we first re-direct all interrupt
690          * producers.
691          */
692
693         /* Purge the G-stage mapping */
694         kvm_riscv_gstage_iounmap(vcpu->kvm,
695                                  vcpu->arch.aia_context.imsic_addr,
696                                  IMSIC_MMIO_PAGE_SZ);
697
698         /* TODO: Purge the IOMMU mapping ??? */
699
700         /*
701          * At this point, all interrupt producers have been re-directed
702          * to somewhere else so we move register state from the old IMSIC
703          * VS-file to the IMSIC SW-file.
704          */
705
706         /* Read and clear register state from old IMSIC VS-file */
707         memset(&tmrif, 0, sizeof(tmrif));
708         imsic_vsfile_read(old_vsfile_hgei, old_vsfile_cpu, imsic->nr_hw_eix,
709                           true, &tmrif);
710
711         /* Update register state in IMSIC SW-file */
712         imsic_swfile_update(vcpu, &tmrif);
713
714         /* Free-up old IMSIC VS-file */
715         kvm_riscv_aia_free_hgei(old_vsfile_cpu, old_vsfile_hgei);
716 }
717
718 int kvm_riscv_vcpu_aia_imsic_update(struct kvm_vcpu *vcpu)
719 {
720         unsigned long flags;
721         phys_addr_t new_vsfile_pa;
722         struct imsic_mrif tmrif;
723         void __iomem *new_vsfile_va;
724         struct kvm *kvm = vcpu->kvm;
725         struct kvm_run *run = vcpu->run;
726         struct kvm_vcpu_aia *vaia = &vcpu->arch.aia_context;
727         struct imsic *imsic = vaia->imsic_state;
728         int ret = 0, new_vsfile_hgei = -1, old_vsfile_hgei, old_vsfile_cpu;
729
730         /* Do nothing for emulation mode */
731         if (kvm->arch.aia.mode == KVM_DEV_RISCV_AIA_MODE_EMUL)
732                 return 1;
733
734         /* Read old IMSIC VS-file details */
735         read_lock_irqsave(&imsic->vsfile_lock, flags);
736         old_vsfile_hgei = imsic->vsfile_hgei;
737         old_vsfile_cpu = imsic->vsfile_cpu;
738         read_unlock_irqrestore(&imsic->vsfile_lock, flags);
739
740         /* Do nothing if we are continuing on same CPU */
741         if (old_vsfile_cpu == vcpu->cpu)
742                 return 1;
743
744         /* Allocate new IMSIC VS-file */
745         ret = kvm_riscv_aia_alloc_hgei(vcpu->cpu, vcpu,
746                                        &new_vsfile_va, &new_vsfile_pa);
747         if (ret <= 0) {
748                 /* For HW acceleration mode, we can't continue */
749                 if (kvm->arch.aia.mode == KVM_DEV_RISCV_AIA_MODE_HWACCEL) {
750                         run->fail_entry.hardware_entry_failure_reason =
751                                                                 CSR_HSTATUS;
752                         run->fail_entry.cpu = vcpu->cpu;
753                         run->exit_reason = KVM_EXIT_FAIL_ENTRY;
754                         return 0;
755                 }
756
757                 /* Release old IMSIC VS-file */
758                 if (old_vsfile_cpu >= 0)
759                         kvm_riscv_vcpu_aia_imsic_release(vcpu);
760
761                 /* For automatic mode, we continue */
762                 goto done;
763         }
764         new_vsfile_hgei = ret;
765
766         /*
767          * At this point, all interrupt producers are still using
768          * to the old IMSIC VS-file so we first move all interrupt
769          * producers to the new IMSIC VS-file.
770          */
771
772         /* Zero-out new IMSIC VS-file */
773         imsic_vsfile_local_clear(new_vsfile_hgei, imsic->nr_hw_eix);
774
775         /* Update G-stage mapping for the new IMSIC VS-file */
776         ret = kvm_riscv_gstage_ioremap(kvm, vcpu->arch.aia_context.imsic_addr,
777                                        new_vsfile_pa, IMSIC_MMIO_PAGE_SZ,
778                                        true, true);
779         if (ret)
780                 goto fail_free_vsfile_hgei;
781
782         /* TODO: Update the IOMMU mapping ??? */
783
784         /* Update new IMSIC VS-file details in IMSIC context */
785         write_lock_irqsave(&imsic->vsfile_lock, flags);
786         imsic->vsfile_hgei = new_vsfile_hgei;
787         imsic->vsfile_cpu = vcpu->cpu;
788         imsic->vsfile_va = new_vsfile_va;
789         imsic->vsfile_pa = new_vsfile_pa;
790         write_unlock_irqrestore(&imsic->vsfile_lock, flags);
791
792         /*
793          * At this point, all interrupt producers have been moved
794          * to the new IMSIC VS-file so we move register state from
795          * the old IMSIC VS/SW-file to the new IMSIC VS-file.
796          */
797
798         memset(&tmrif, 0, sizeof(tmrif));
799         if (old_vsfile_cpu >= 0) {
800                 /* Read and clear register state from old IMSIC VS-file */
801                 imsic_vsfile_read(old_vsfile_hgei, old_vsfile_cpu,
802                                   imsic->nr_hw_eix, true, &tmrif);
803
804                 /* Free-up old IMSIC VS-file */
805                 kvm_riscv_aia_free_hgei(old_vsfile_cpu, old_vsfile_hgei);
806         } else {
807                 /* Read and clear register state from IMSIC SW-file */
808                 imsic_swfile_read(vcpu, true, &tmrif);
809         }
810
811         /* Restore register state in the new IMSIC VS-file */
812         imsic_vsfile_local_update(new_vsfile_hgei, imsic->nr_hw_eix, &tmrif);
813
814 done:
815         /* Set VCPU HSTATUS.VGEIN to new IMSIC VS-file */
816         vcpu->arch.guest_context.hstatus &= ~HSTATUS_VGEIN;
817         if (new_vsfile_hgei > 0)
818                 vcpu->arch.guest_context.hstatus |=
819                         ((unsigned long)new_vsfile_hgei) << HSTATUS_VGEIN_SHIFT;
820
821         /* Continue run-loop */
822         return 1;
823
824 fail_free_vsfile_hgei:
825         kvm_riscv_aia_free_hgei(vcpu->cpu, new_vsfile_hgei);
826         return ret;
827 }
828
829 int kvm_riscv_vcpu_aia_imsic_rmw(struct kvm_vcpu *vcpu, unsigned long isel,
830                                  unsigned long *val, unsigned long new_val,
831                                  unsigned long wr_mask)
832 {
833         u32 topei;
834         struct imsic_mrif_eix *eix;
835         int r, rc = KVM_INSN_CONTINUE_NEXT_SEPC;
836         struct imsic *imsic = vcpu->arch.aia_context.imsic_state;
837
838         if (isel == KVM_RISCV_AIA_IMSIC_TOPEI) {
839                 /* Read pending and enabled interrupt with highest priority */
840                 topei = imsic_mrif_topei(imsic->swfile, imsic->nr_eix,
841                                          imsic->nr_msis);
842                 if (val)
843                         *val = topei;
844
845                 /* Writes ignore value and clear top pending interrupt */
846                 if (topei && wr_mask) {
847                         topei >>= TOPEI_ID_SHIFT;
848                         if (topei) {
849                                 eix = &imsic->swfile->eix[topei /
850                                                           BITS_PER_TYPE(u64)];
851                                 clear_bit(topei & (BITS_PER_TYPE(u64) - 1),
852                                           eix->eip);
853                         }
854                 }
855         } else {
856                 r = imsic_mrif_rmw(imsic->swfile, imsic->nr_eix, isel,
857                                    val, new_val, wr_mask);
858                 /* Forward unknown IMSIC register to user-space */
859                 if (r)
860                         rc = (r == -ENOENT) ? 0 : KVM_INSN_ILLEGAL_TRAP;
861         }
862
863         if (wr_mask)
864                 imsic_swfile_extirq_update(vcpu);
865
866         return rc;
867 }
868
869 int kvm_riscv_aia_imsic_rw_attr(struct kvm *kvm, unsigned long type,
870                                 bool write, unsigned long *val)
871 {
872         u32 isel, vcpu_id;
873         unsigned long flags;
874         struct imsic *imsic;
875         struct kvm_vcpu *vcpu;
876         int rc, vsfile_hgei, vsfile_cpu;
877
878         if (!kvm_riscv_aia_initialized(kvm))
879                 return -ENODEV;
880
881         vcpu_id = KVM_DEV_RISCV_AIA_IMSIC_GET_VCPU(type);
882         vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id);
883         if (!vcpu)
884                 return -ENODEV;
885
886         isel = KVM_DEV_RISCV_AIA_IMSIC_GET_ISEL(type);
887         imsic = vcpu->arch.aia_context.imsic_state;
888
889         read_lock_irqsave(&imsic->vsfile_lock, flags);
890
891         rc = 0;
892         vsfile_hgei = imsic->vsfile_hgei;
893         vsfile_cpu = imsic->vsfile_cpu;
894         if (vsfile_cpu < 0) {
895                 if (write) {
896                         rc = imsic_mrif_rmw(imsic->swfile, imsic->nr_eix,
897                                             isel, NULL, *val, -1UL);
898                         imsic_swfile_extirq_update(vcpu);
899                 } else
900                         rc = imsic_mrif_rmw(imsic->swfile, imsic->nr_eix,
901                                             isel, val, 0, 0);
902         }
903
904         read_unlock_irqrestore(&imsic->vsfile_lock, flags);
905
906         if (!rc && vsfile_cpu >= 0)
907                 rc = imsic_vsfile_rw(vsfile_hgei, vsfile_cpu, imsic->nr_eix,
908                                      isel, write, val);
909
910         return rc;
911 }
912
913 int kvm_riscv_aia_imsic_has_attr(struct kvm *kvm, unsigned long type)
914 {
915         u32 isel, vcpu_id;
916         struct imsic *imsic;
917         struct kvm_vcpu *vcpu;
918
919         if (!kvm_riscv_aia_initialized(kvm))
920                 return -ENODEV;
921
922         vcpu_id = KVM_DEV_RISCV_AIA_IMSIC_GET_VCPU(type);
923         vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id);
924         if (!vcpu)
925                 return -ENODEV;
926
927         isel = KVM_DEV_RISCV_AIA_IMSIC_GET_ISEL(type);
928         imsic = vcpu->arch.aia_context.imsic_state;
929         return imsic_mrif_isel_check(imsic->nr_eix, isel);
930 }
931
932 void kvm_riscv_vcpu_aia_imsic_reset(struct kvm_vcpu *vcpu)
933 {
934         struct imsic *imsic = vcpu->arch.aia_context.imsic_state;
935
936         if (!imsic)
937                 return;
938
939         kvm_riscv_vcpu_aia_imsic_release(vcpu);
940
941         memset(imsic->swfile, 0, sizeof(*imsic->swfile));
942 }
943
944 int kvm_riscv_vcpu_aia_imsic_inject(struct kvm_vcpu *vcpu,
945                                     u32 guest_index, u32 offset, u32 iid)
946 {
947         unsigned long flags;
948         struct imsic_mrif_eix *eix;
949         struct imsic *imsic = vcpu->arch.aia_context.imsic_state;
950
951         /* We only emulate one IMSIC MMIO page for each Guest VCPU */
952         if (!imsic || !iid || guest_index ||
953             (offset != IMSIC_MMIO_SETIPNUM_LE &&
954              offset != IMSIC_MMIO_SETIPNUM_BE))
955                 return -ENODEV;
956
957         iid = (offset == IMSIC_MMIO_SETIPNUM_BE) ? __swab32(iid) : iid;
958         if (imsic->nr_msis <= iid)
959                 return -EINVAL;
960
961         read_lock_irqsave(&imsic->vsfile_lock, flags);
962
963         if (imsic->vsfile_cpu >= 0) {
964                 writel(iid, imsic->vsfile_va + IMSIC_MMIO_SETIPNUM_LE);
965                 kvm_vcpu_kick(vcpu);
966         } else {
967                 eix = &imsic->swfile->eix[iid / BITS_PER_TYPE(u64)];
968                 set_bit(iid & (BITS_PER_TYPE(u64) - 1), eix->eip);
969                 imsic_swfile_extirq_update(vcpu);
970         }
971
972         read_unlock_irqrestore(&imsic->vsfile_lock, flags);
973
974         return 0;
975 }
976
977 static int imsic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
978                            gpa_t addr, int len, void *val)
979 {
980         if (len != 4 || (addr & 0x3) != 0)
981                 return -EOPNOTSUPP;
982
983         *((u32 *)val) = 0;
984
985         return 0;
986 }
987
988 static int imsic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
989                             gpa_t addr, int len, const void *val)
990 {
991         struct kvm_msi msi = { 0 };
992
993         if (len != 4 || (addr & 0x3) != 0)
994                 return -EOPNOTSUPP;
995
996         msi.address_hi = addr >> 32;
997         msi.address_lo = (u32)addr;
998         msi.data = *((const u32 *)val);
999         kvm_riscv_aia_inject_msi(vcpu->kvm, &msi);
1000
1001         return 0;
1002 };
1003
1004 static struct kvm_io_device_ops imsic_iodoev_ops = {
1005         .read = imsic_mmio_read,
1006         .write = imsic_mmio_write,
1007 };
1008
1009 int kvm_riscv_vcpu_aia_imsic_init(struct kvm_vcpu *vcpu)
1010 {
1011         int ret = 0;
1012         struct imsic *imsic;
1013         struct page *swfile_page;
1014         struct kvm *kvm = vcpu->kvm;
1015
1016         /* Fail if we have zero IDs */
1017         if (!kvm->arch.aia.nr_ids)
1018                 return -EINVAL;
1019
1020         /* Allocate IMSIC context */
1021         imsic = kzalloc(sizeof(*imsic), GFP_KERNEL);
1022         if (!imsic)
1023                 return -ENOMEM;
1024         vcpu->arch.aia_context.imsic_state = imsic;
1025
1026         /* Setup IMSIC context  */
1027         imsic->nr_msis = kvm->arch.aia.nr_ids + 1;
1028         rwlock_init(&imsic->vsfile_lock);
1029         imsic->nr_eix = BITS_TO_U64(imsic->nr_msis);
1030         imsic->nr_hw_eix = BITS_TO_U64(kvm_riscv_aia_max_ids);
1031         imsic->vsfile_hgei = imsic->vsfile_cpu = -1;
1032
1033         /* Setup IMSIC SW-file */
1034         swfile_page = alloc_pages(GFP_KERNEL | __GFP_ZERO,
1035                                   get_order(sizeof(*imsic->swfile)));
1036         if (!swfile_page) {
1037                 ret = -ENOMEM;
1038                 goto fail_free_imsic;
1039         }
1040         imsic->swfile = page_to_virt(swfile_page);
1041         imsic->swfile_pa = page_to_phys(swfile_page);
1042
1043         /* Setup IO device */
1044         kvm_iodevice_init(&imsic->iodev, &imsic_iodoev_ops);
1045         mutex_lock(&kvm->slots_lock);
1046         ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS,
1047                                       vcpu->arch.aia_context.imsic_addr,
1048                                       KVM_DEV_RISCV_IMSIC_SIZE,
1049                                       &imsic->iodev);
1050         mutex_unlock(&kvm->slots_lock);
1051         if (ret)
1052                 goto fail_free_swfile;
1053
1054         return 0;
1055
1056 fail_free_swfile:
1057         free_pages((unsigned long)imsic->swfile,
1058                    get_order(sizeof(*imsic->swfile)));
1059 fail_free_imsic:
1060         vcpu->arch.aia_context.imsic_state = NULL;
1061         kfree(imsic);
1062         return ret;
1063 }
1064
1065 void kvm_riscv_vcpu_aia_imsic_cleanup(struct kvm_vcpu *vcpu)
1066 {
1067         struct kvm *kvm = vcpu->kvm;
1068         struct imsic *imsic = vcpu->arch.aia_context.imsic_state;
1069
1070         if (!imsic)
1071                 return;
1072
1073         imsic_vsfile_cleanup(imsic);
1074
1075         mutex_lock(&kvm->slots_lock);
1076         kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &imsic->iodev);
1077         mutex_unlock(&kvm->slots_lock);
1078
1079         free_pages((unsigned long)imsic->swfile,
1080                    get_order(sizeof(*imsic->swfile)));
1081
1082         vcpu->arch.aia_context.imsic_state = NULL;
1083         kfree(imsic);
1084 }